From b9ca0ea47da60426d57aaf0a41a9bc0755c62fa6 Mon Sep 17 00:00:00 2001 From: ZitengXue <2507456075@qq.com> Date: Mon, 9 Oct 2023 16:57:50 +0800 Subject: [PATCH] test --- .circleci/config.yml | 35 + .circleci/docker/Dockerfile | 11 + .circleci/test.yml | 186 ++ .dev_scripts/benchmark_full_models.txt | 26 + .dev_scripts/benchmark_options.py | 11 + .dev_scripts/benchmark_train_models.txt | 14 + .dev_scripts/covignore.cfg | 6 + .dev_scripts/diff_coverage_test.sh | 42 + .dev_scripts/gather_models.py | 229 ++ .dev_scripts/gen_benchmark_script.py | 193 ++ .dev_scripts/linter.sh | 3 + .dev_scripts/test_benchmark.sh | 128 + .dev_scripts/train_benchmark.sh | 128 + .github/CODE_OF_CONDUCT.md | 76 + .github/CONTRIBUTING.md | 1 + .github/ISSUE_TEMPLATE/config.yml | 1 + .github/ISSUE_TEMPLATE/error-report.md | 45 + .github/ISSUE_TEMPLATE/feature_request.md | 21 + .github/ISSUE_TEMPLATE/general_questions.md | 7 + .../reimplementation_questions.md | 67 + .github/pull_request_template.md | 25 + .github/workflows/deploy.yml | 28 + .github/workflows/lint.yml | 27 + .github/workflows/merge_stage_test.yml | 226 ++ .github/workflows/pr_stage_test.yml | 129 + .github/workflows/test_mim.yml | 44 + .gitignore | 141 + .pre-commit-config-zh-cn.yaml | 50 + .pre-commit-config.yaml | 50 + .readthedocs.yml | 9 + CITATION.cff | 8 + LICENSE | 203 ++ MANIFEST.in | 5 + README.md | 336 +++ README_zh-CN.md | 349 +++ configs/3dssd/3dssd_4xb4_kitti-3d-car.py | 119 + configs/3dssd/README.md | 45 + configs/3dssd/metafile.yml | 29 + configs/_base_/datasets/kitti-3d-3class.py | 167 ++ configs/_base_/datasets/kitti-3d-car.py | 165 + configs/_base_/datasets/kitti-mono3d.py | 100 + configs/_base_/datasets/lyft-3d-range100.py | 150 + configs/_base_/datasets/lyft-3d.py | 160 + configs/_base_/datasets/nuim-instance.py | 70 + configs/_base_/datasets/nus-3d.py | 169 ++ configs/_base_/datasets/nus-mono3d.py | 119 + configs/_base_/datasets/s3dis-3d.py | 134 + configs/_base_/datasets/s3dis-seg.py | 159 + configs/_base_/datasets/scannet-3d.py | 141 + configs/_base_/datasets/scannet-seg.py | 154 + configs/_base_/datasets/semantickitti.py | 184 ++ configs/_base_/datasets/sunrgbd-3d.py | 126 + configs/_base_/datasets/waymoD5-3d-3class.py | 177 ++ configs/_base_/datasets/waymoD5-3d-car.py | 174 ++ .../datasets/waymoD5-fov-mono3d-3class.py | 163 + .../datasets/waymoD5-mv-mono3d-3class.py | 163 + .../_base_/datasets/waymoD5-mv3d-3class.py | 166 + configs/_base_/default_runtime.py | 23 + configs/_base_/models/3dssd.py | 76 + .../models/cascade-mask-rcnn_r50_fpn.py | 199 ++ .../centerpoint_pillar02_second_secfpn_nus.py | 89 + .../centerpoint_voxel01_second_secfpn_nus.py | 89 + configs/_base_/models/cylinder3d.py | 41 + configs/_base_/models/dgcnn.py | 29 + configs/_base_/models/fcaf3d.py | 20 + configs/_base_/models/fcos3d.py | 86 + configs/_base_/models/groupfree3d.py | 75 + configs/_base_/models/h3dnet.py | 351 +++ configs/_base_/models/imvotenet.py | 118 + configs/_base_/models/mask-rcnn_r50_fpn.py | 125 + configs/_base_/models/minkunet.py | 29 + configs/_base_/models/multiview_dfm.py | 104 + configs/_base_/models/paconv_ssg-cuda.py | 7 + configs/_base_/models/paconv_ssg.py | 50 + configs/_base_/models/parta2.py | 207 ++ configs/_base_/models/pgd.py | 56 + configs/_base_/models/point_rcnn.py | 148 + configs/_base_/models/pointnet2_msg.py | 28 + configs/_base_/models/pointnet2_ssg.py | 36 + .../_base_/models/pointpillars_hv_fpn_lyft.py | 23 + .../_base_/models/pointpillars_hv_fpn_nus.py | 100 + .../pointpillars_hv_fpn_range100_lyft.py | 23 + .../models/pointpillars_hv_secfpn_kitti.py | 98 + .../models/pointpillars_hv_secfpn_waymo.py | 112 + .../_base_/models/second_hv_secfpn_kitti.py | 94 + .../_base_/models/second_hv_secfpn_waymo.py | 108 + configs/_base_/models/smoke.py | 61 + configs/_base_/models/spvcnn.py | 29 + configs/_base_/models/votenet.py | 73 + configs/_base_/schedules/cosine.py | 30 + configs/_base_/schedules/cyclic-20e.py | 65 + configs/_base_/schedules/cyclic-40e.py | 67 + configs/_base_/schedules/mmdet-schedule-1x.py | 28 + configs/_base_/schedules/schedule-2x.py | 36 + configs/_base_/schedules/schedule-3x.py | 31 + configs/_base_/schedules/seg-cosine-100e.py | 27 + configs/_base_/schedules/seg-cosine-150e.py | 27 + configs/_base_/schedules/seg-cosine-200e.py | 27 + configs/_base_/schedules/seg-cosine-50e.py | 27 + ...pn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py | 386 +++ ...lars_secfpn_3x8_100e_det3d_kitti-3d-car.py | 248 ++ ...rs_secfpn_4x8_80e_pcdet_kitti-3d-3class.py | 291 ++ ...nd_secfpn_4x8_80e_pcdet_kitti-3d-3class.py | 281 ++ configs/centerpoint/README.md | 136 + ...02_second_secfpn_8xb4-cyclic-20e_nus-3d.py | 159 + ...n_head-circlenms_8xb4-cyclic-20e_nus-3d.py | 3 + ...ad-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py | 16 + ..._secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py | 15 + ...75_second_secfpn_8xb4-cyclic-20e_nus-3d.py | 145 + ...n_head-circlenms_8xb4-cyclic-20e_nus-3d.py | 3 + ...ad-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py | 16 + ...rclenms_8xb4-flip-tta-cyclic-20e_nus-3d.py | 50 + ..._secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py | 15 + ...ead-dcn_8xb4-flip-tta-cyclic-20e_nus-3d.py | 50 + ...fpn_head-dcn_8xb4-tta-cyclic-20e_nus-3d.py | 52 + ...01_second_secfpn_8xb4-cyclic-20e_nus-3d.py | 160 + ...n_head-circlenms_8xb4-cyclic-20e_nus-3d.py | 3 + ...ad-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py | 16 + ..._secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py | 15 + configs/centerpoint/metafile.yml | 95 + configs/cylinder3d/README.md | 37 + .../cylinder3d_4xb4_3x_semantickitti.py | 39 + configs/cylinder3d/metafile.yml | 29 + ...ew-dfm_r101-dcn_16xb2_waymoD5-3d-3class.py | 49 + ...-dcn_centerhead_16xb2_waymoD5-3d-3class.py | 53 + configs/dgcnn/README.md | 55 + ..._4xb32-cosine-100e_s3dis-seg_test-area1.py | 17 + ..._4xb32-cosine-100e_s3dis-seg_test-area2.py | 17 + ..._4xb32-cosine-100e_s3dis-seg_test-area3.py | 17 + ..._4xb32-cosine-100e_s3dis-seg_test-area4.py | 17 + ..._4xb32-cosine-100e_s3dis-seg_test-area5.py | 21 + ..._4xb32-cosine-100e_s3dis-seg_test-area6.py | 17 + configs/dgcnn/metafile.yml | 89 + configs/dynamic_voxelization/README.md | 40 + configs/dynamic_voxelization/metafile.yml | 53 + ...illars_dv_secfpn_8xb6-160e_kitti-3d-car.py | 21 + ..._secfpn_8xb2-cosine-80e_kitti-3d-3class.py | 24 + .../second_dv_secfpn_8xb6-80e_kitti-3d-car.py | 20 + configs/fcaf3d/README.md | 53 + configs/fcaf3d/fcaf3d_2xb8_s3dis-3d-5class.py | 27 + .../fcaf3d/fcaf3d_2xb8_scannet-3d-18class.py | 94 + .../fcaf3d/fcaf3d_2xb8_sunrgbd-3d-10class.py | 92 + configs/fcaf3d/metafile.yml | 58 + configs/fcos3d/README.md | 75 + ...affe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py | 70 + ...fpn_head-gn_8xb2-1x_nus-mono3d_finetune.py | 8 + configs/fcos3d/metafile.yml | 43 + configs/free_anchor/README.md | 105 + configs/free_anchor/metafile.yml | 122 + ...head-free-anchor_sbn-all_8xb4-2x_nus-3d.py | 49 + ...head-free-anchor_sbn-all_8xb4-2x_nus-3d.py | 18 + ...nchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py | 76 + ...head-free-anchor_sbn-all_8xb4-2x_nus-3d.py | 18 + ...nchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py | 76 + ...head-free-anchor_sbn-all_8xb4-2x_nus-3d.py | 18 + configs/groupfree3d/README.md | 45 + ...upfree3d_head-L12-O256_4xb8_scannet-seg.py | 227 ++ ...oupfree3d_head-L6-O256_4xb8_scannet-seg.py | 227 ++ ...ee3d_w2x-head-L12-O256_4xb8_scannet-seg.py | 242 ++ ...ee3d_w2x-head-L12-O512_4xb8_scannet-seg.py | 243 ++ configs/groupfree3d/metafile.yml | 72 + configs/h3dnet/README.md | 44 + configs/h3dnet/h3dnet_8xb3_scannet-seg.py | 74 + configs/h3dnet/metafile.yml | 29 + configs/imvotenet/README.md | 43 + ...net_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py | 69 + .../imvotenet_stage2_8xb16_sunrgbd-3d.py | 228 ++ configs/imvotenet/metafile.yml | 43 + configs/imvoxelnet/README.md | 44 + .../imvoxelnet_2xb4_sunrgbd-3d-10class.py | 137 + .../imvoxelnet_8xb4_kitti-3d-car.py | 176 ++ configs/imvoxelnet/metafile.yml | 29 + configs/minkunet/README.md | 43 + configs/minkunet/metafile.yml | 57 + .../minkunet_w16_8xb2-15e_semantickitti.py | 13 + .../minkunet_w20_8xb2-15e_semantickitti.py | 8 + .../minkunet_w32_8xb2-15e_semantickitti.py | 54 + configs/monoflex/README.md | 48 + configs/monoflex/metafile.yml | 30 + configs/mvxnet/README.md | 38 + configs/mvxnet/metafile.yml | 31 + ..._second_secfpn_8xb2-80e_kitti-3d-3class.py | 271 ++ configs/nuimages/README.md | 69 + ...cascade-mask-rcnn-r50-fpn_coco-20e_nuim.py | 7 + .../cascade-mask-rcnn_r101_fpn_1x_nuim.py | 2 + .../cascade-mask-rcnn_r50_fpn_1x_nuim.py | 60 + ...cade-mask-rcnn_r50_fpn_coco-20e-1x_nuim.py | 3 + ...ascade-mask-rcnn_x101_32x4d_fpn_1x_nuim.py | 13 + configs/nuimages/htc_r50_fpn_1x_nuim.py | 38 + .../nuimages/htc_r50_fpn_coco-20e-1x_nuim.py | 3 + configs/nuimages/htc_r50_fpn_coco-20e_nuim.py | 4 + ...c_r50_fpn_head-without-semantic_1x_nuim.py | 222 ++ ...x4d_fpn_dconv_c3-c5_coco-20e-1xb16_nuim.py | 23 + .../nuimages/mask-rcnn_r101_fpn_1x_nuim.py | 2 + .../mask-rcnn_r50_caffe_fpn_1x_nuim.py | 41 + ...mask-rcnn_r50_caffe_fpn_coco-3x_1x_nuim.py | 43 + ...ask-rcnn_r50_caffe_fpn_coco-3x_20e_nuim.py | 47 + configs/nuimages/mask-rcnn_r50_fpn_1x_nuim.py | 8 + .../mask-rcnn_r50_fpn_coco-2x_1x_nuim.py | 9 + .../mask-rcnn_r50_fpn_coco-2x_1x_nus-2d.py | 32 + .../mask-rcnn_x101_32x4d_fpn_1x_nuim.py | 13 + configs/nuimages/metafile.yml | 279 ++ configs/paconv/README.md | 51 + configs/paconv/metafile.yml | 42 + ...onv_ssg-cuda_8xb8-cosine-200e_s3dis-seg.py | 64 + .../paconv_ssg_8xb8-cosine-150e_s3dis-seg.py | 61 + configs/parta2/README.md | 38 + configs/parta2/metafile.yml | 41 + ..._secfpn_8xb2-cyclic-80e_kitti-3d-3class.py | 160 + ..._hv_secfpn_8xb2-cyclic-80e_kitti-3d-car.py | 154 + configs/pgd/README.md | 69 + configs/pgd/metafile.yml | 83 + ...1-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py | 104 + ...pn_head-gn_16xb2-1x_nus-mono3d_finetune.py | 9 + ...1-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py | 20 + ...pn_head-gn_16xb2-2x_nus-mono3d_finetune.py | 9 + ...-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py | 127 + ...1_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py | 112 + ..._r101_fpn-head_dcn_16xb3_waymoD5-mono3d.py | 111 + ...01_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py | 112 + configs/point_rcnn/README.md | 47 + configs/point_rcnn/metafile.yml | 29 + .../point-rcnn_8xb2_kitti-3d-3class.py | 145 + configs/pointnet2/README.md | 72 + configs/pointnet2/metafile.yml | 95 + ..._2xb16-cosine-250e_scannet-seg-xyz-only.py | 111 + ...tnet2_msg_2xb16-cosine-250e_scannet-seg.py | 35 + ...ointnet2_msg_2xb16-cosine-80e_s3dis-seg.py | 26 + ..._2xb16-cosine-200e_scannet-seg-xyz-only.py | 109 + ...tnet2_ssg_2xb16-cosine-200e_scannet-seg.py | 33 + ...ointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py | 24 + configs/pointpillars/README.md | 78 + configs/pointpillars/metafile.yml | 215 ++ ...hv_fpn_sbn-all_8xb2-2x_lyft-3d-range100.py | 10 + ...tpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py | 10 + ...llars_hv_fpn_sbn-all_8xb2-amp-2x_nus-3d.py | 4 + ...ntpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py | 11 + ...ars_hv_secfpn_8xb6-160e_kitti-3d-3class.py | 130 + ...illars_hv_secfpn_8xb6-160e_kitti-3d-car.py | 101 + ...secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py | 14 + ...hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py | 42 + ...cfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py | 6 + ..._secfpn_sbn-all_16xb2-2x_waymoD5-3d-car.py | 39 + ...secfpn_sbn-all_8xb2-2x_lyft-3d-range100.py | 47 + ...llars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py | 48 + ...rs_hv_secfpn_sbn-all_8xb2-amp-2x_nus-3d.py | 4 + ...illars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py | 48 + configs/pv_rcnn/README.md | 42 + configs/pv_rcnn/metafile.yml | 29 + .../pv_rcnn_8xb2-80e_kitti-3d-3class.py | 369 +++ configs/regnet/README.md | 82 + configs/regnet/metafile.yml | 85 + ...regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d.py | 24 + ...egnet-400mf_fpn_sbn-all_8xb2-2x_lyft-3d.py | 29 + ...regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py | 24 + ...mf_fpn_sbn-all_range100_8xb2-2x_lyft-3d.py | 29 + ...et-400mf_secfpn_sbn-all_8xb2-2x_lyft-3d.py | 39 + ...net-400mf_secfpn_sbn-all_8xb4-2x_nus-3d.py | 38 + ...secfpn_sbn-all_range100_8xb2-2x_lyft-3d.py | 40 + configs/sassd/README.md | 28 + .../sassd/sassd_8xb6-80e_kitti-3d-3class.py | 99 + configs/second/README.md | 54 + configs/second/metafile.yml | 97 + ...cond_hv_secfpn_8xb6-80e_kitti-3d-3class.py | 5 + .../second_hv_secfpn_8xb6-80e_kitti-3d-car.py | 30 + ..._hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py | 4 + ...ond_hv_secfpn_8xb6-amp-80e_kitti-3d-car.py | 4 + ...cfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py | 145 + configs/smoke/README.md | 47 + configs/smoke/metafile.yml | 30 + ...a34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py | 63 + configs/spvcnn/README.md | 44 + configs/spvcnn/metafile.yml | 57 + .../spvcnn_w16_8xb2-15e_semantickitti.py | 10 + .../spvcnn_w20_8xb2-15e_semantickitti.py | 8 + .../spvcnn_w32_8xb2-15e_semantickitti.py | 54 + configs/ssn/README.md | 53 + configs/ssn/metafile.yml | 72 + ...t-400mf_secfpn_sbn-all_16xb1-2x_lyft-3d.py | 21 + ...et-400mf_secfpn_sbn-all_16xb2-2x_nus-3d.py | 20 + .../ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py | 244 ++ .../ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py | 256 ++ configs/votenet/README.md | 68 + configs/votenet/metafile.yml | 59 + configs/votenet/votenet_8xb16_sunrgbd-3d.py | 27 + configs/votenet/votenet_8xb8_scannet-3d.py | 39 + .../votenet_head-iouloss_8xb8_scannet-3d.py | 8 + demo/mono_det_demo.py | 70 + demo/multi_modality_demo.py | 71 + demo/pcd_demo.py | 60 + demo/pcd_seg_demo.py | 55 + docker/Dockerfile | 40 + docker/serve/Dockerfile | 65 + docker/serve/config.properties | 5 + docker/serve/entrypoint.sh | 12 + docs/en/Makefile | 20 + docs/en/_static/css/readthedocs.css | 6 + docs/en/advanced_guides/customize_dataset.md | 503 ++++ docs/en/advanced_guides/customize_models.md | 638 ++++ docs/en/advanced_guides/customize_runtime.md | 392 +++ docs/en/advanced_guides/datasets/index.rst | 11 + docs/en/advanced_guides/datasets/kitti_det.md | 206 ++ docs/en/advanced_guides/datasets/lyft_det.md | 207 ++ .../advanced_guides/datasets/nuscenes_det.md | 242 ++ .../advanced_guides/datasets/s3dis_sem_seg.md | 262 ++ .../advanced_guides/datasets/scannet_det.md | 292 ++ .../datasets/scannet_sem_seg.md | 128 + .../advanced_guides/datasets/sunrgbd_det.md | 250 ++ docs/en/advanced_guides/datasets/waymo_det.md | 168 ++ docs/en/advanced_guides/index.rst | 27 + .../pure_point_cloud_dataset.md | 461 +++ .../advanced_guides/supported_tasks/index.rst | 6 + .../supported_tasks/lidar_det3d.md | 104 + .../supported_tasks/lidar_sem_seg3d.md | 93 + .../supported_tasks/vision_det3d.md | 125 + docs/en/api.rst | 154 + docs/en/conf.py | 161 + docs/en/get_started.md | 295 ++ docs/en/index.rst | 56 + docs/en/make.bat | 35 + docs/en/migration.md | 33 + docs/en/model_zoo.md | 117 + docs/en/notes/benchmarks.md | 286 ++ docs/en/notes/changelog.md | 268 ++ docs/en/notes/changelog_v1.0.x.md | 930 ++++++ docs/en/notes/compatibility.md | 207 ++ docs/en/notes/contribution_guides.md | 139 + docs/en/notes/faq.md | 58 + docs/en/notes/index.rst | 8 + docs/en/stat.py | 62 + docs/en/switch_language.md | 3 + docs/en/user_guides/2_new_data_model.md | 105 + docs/en/user_guides/backends_support.md | 154 + docs/en/user_guides/config.md | 573 ++++ docs/en/user_guides/coord_sys_tutorial.md | 245 ++ docs/en/user_guides/data_pipeline.md | 199 ++ docs/en/user_guides/dataset_prepare.md | 180 ++ docs/en/user_guides/index.rst | 15 + docs/en/user_guides/inference.md | 89 + docs/en/user_guides/model_deployment.md | 4 + docs/en/user_guides/train_test.md | 264 ++ docs/en/user_guides/useful_tools.md | 220 ++ docs/en/user_guides/visualization.md | 204 ++ docs/zh_cn/Makefile | 20 + docs/zh_cn/_static/css/readthedocs.css | 6 + .../advanced_guides/customize_dataset.md | 500 +++ .../zh_cn/advanced_guides/customize_models.md | 619 ++++ .../advanced_guides/customize_runtime.md | 382 +++ docs/zh_cn/advanced_guides/datasets/index.rst | 11 + .../advanced_guides/datasets/kitti_det.md | 206 ++ .../advanced_guides/datasets/lyft_det.md | 195 ++ .../advanced_guides/datasets/nuscenes_det.md | 233 ++ .../advanced_guides/datasets/s3dis_sem_seg.md | 271 ++ .../advanced_guides/datasets/scannet_det.md | 293 ++ .../datasets/scannet_sem_seg.md | 133 + .../advanced_guides/datasets/sunrgbd_det.md | 250 ++ .../advanced_guides/datasets/waymo_det.md | 168 ++ docs/zh_cn/advanced_guides/index.rst | 27 + .../advanced_guides/supported_tasks/index.rst | 6 + .../supported_tasks/lidar_det3d.md | 83 + .../supported_tasks/lidar_sem_seg3d.md | 78 + .../supported_tasks/vision_det3d.md | 114 + docs/zh_cn/api.rst | 154 + docs/zh_cn/conf.py | 161 + docs/zh_cn/get_started.md | 291 ++ docs/zh_cn/index.rst | 56 + docs/zh_cn/model_zoo.md | 113 + docs/zh_cn/notes/benchmarks.md | 285 ++ docs/zh_cn/notes/changelog.md | 1 + docs/zh_cn/notes/changelog_v1.0.x.md | 1 + docs/zh_cn/notes/compatibility.md | 1 + docs/zh_cn/notes/faq.md | 58 + docs/zh_cn/notes/index.rst | 8 + docs/zh_cn/stat.py | 62 + docs/zh_cn/switch_language.md | 3 + docs/zh_cn/user_guides/2_new_data_model.md | 102 + docs/zh_cn/user_guides/backends_support.md | 154 + docs/zh_cn/user_guides/config.md | 558 ++++ docs/zh_cn/user_guides/coord_sys_tutorial.md | 245 ++ docs/zh_cn/user_guides/data_pipeline.md | 191 ++ docs/zh_cn/user_guides/dataset_prepare.md | 179 ++ docs/zh_cn/user_guides/index.rst | 15 + docs/zh_cn/user_guides/inference.md | 89 + docs/zh_cn/user_guides/model_deployment.md | 4 + docs/zh_cn/user_guides/train_test.md | 260 ++ docs/zh_cn/user_guides/useful_tools.md | 213 ++ docs/zh_cn/user_guides/visualization.md | 204 ++ mmdet3d/__init__.py | 38 + mmdet3d/apis/__init__.py | 16 + mmdet3d/apis/inference.py | 383 +++ mmdet3d/apis/inferencers/__init__.py | 11 + .../apis/inferencers/base_3d_inferencer.py | 312 ++ .../inferencers/lidar_det3d_inferencer.py | 187 ++ .../inferencers/lidar_seg3d_inferencer.py | 195 ++ .../apis/inferencers/mono_det3d_inferencer.py | 178 ++ .../multi_modality_det3d_inferencer.py | 233 ++ mmdet3d/datasets/__init__.py | 41 + mmdet3d/datasets/convert_utils.py | 421 +++ mmdet3d/datasets/dataset_wrappers.py | 182 ++ mmdet3d/datasets/det3d_dataset.py | 425 +++ mmdet3d/datasets/kitti2d_dataset.py | 241 ++ mmdet3d/datasets/kitti_dataset.py | 171 ++ mmdet3d/datasets/lyft_dataset.py | 102 + mmdet3d/datasets/nuscenes_dataset.py | 238 ++ mmdet3d/datasets/s3dis_dataset.py | 361 +++ mmdet3d/datasets/scannet_dataset.py | 347 +++ mmdet3d/datasets/seg3d_dataset.py | 337 +++ mmdet3d/datasets/semantickitti_dataset.py | 95 + mmdet3d/datasets/sunrgbd_dataset.py | 143 + mmdet3d/datasets/transforms/__init__.py | 36 + .../datasets/transforms/data_augment_utils.py | 411 +++ mmdet3d/datasets/transforms/dbsampler.py | 345 +++ mmdet3d/datasets/transforms/formating.py | 243 ++ mmdet3d/datasets/transforms/loading.py | 1319 ++++++++ mmdet3d/datasets/transforms/test_time_aug.py | 121 + mmdet3d/datasets/transforms/transforms_3d.py | 2668 +++++++++++++++++ mmdet3d/datasets/utils.py | 123 + mmdet3d/datasets/waymo_dataset.py | 239 ++ mmdet3d/engine/__init__.py | 4 + mmdet3d/engine/hooks/__init__.py | 8 + mmdet3d/engine/hooks/benchmark_hook.py | 38 + .../hooks/disable_object_sample_hook.py | 54 + mmdet3d/engine/hooks/visualization_hook.py | 241 ++ mmdet3d/evaluation/__init__.py | 25 + mmdet3d/evaluation/functional/__init__.py | 20 + mmdet3d/evaluation/functional/indoor_eval.py | 302 ++ .../functional/instance_seg_eval.py | 128 + .../functional/kitti_utils/__init__.py | 4 + .../evaluation/functional/kitti_utils/eval.py | 950 ++++++ .../functional/kitti_utils/rotate_iou.py | 379 +++ mmdet3d/evaluation/functional/lyft_eval.py | 285 ++ .../functional/panoptic_seg_eval.py | 387 +++ .../functional/scannet_utils/__init__.py | 4 + .../evaluate_semantic_instance.py | 347 +++ .../functional/scannet_utils/util_3d.py | 84 + mmdet3d/evaluation/functional/seg_eval.py | 134 + .../functional/waymo_utils/__init__.py | 5 + .../waymo_utils/prediction_to_waymo.py | 419 +++ mmdet3d/evaluation/metrics/__init__.py | 14 + mmdet3d/evaluation/metrics/indoor_metric.py | 169 ++ .../evaluation/metrics/instance_seg_metric.py | 91 + mmdet3d/evaluation/metrics/kitti_metric.py | 650 ++++ mmdet3d/evaluation/metrics/lyft_metric.py | 412 +++ mmdet3d/evaluation/metrics/nuscenes_metric.py | 788 +++++ .../evaluation/metrics/panoptic_seg_metric.py | 96 + mmdet3d/evaluation/metrics/seg_metric.py | 137 + mmdet3d/evaluation/metrics/waymo_metric.py | 710 +++++ mmdet3d/models/__init__.py | 17 + mmdet3d/models/backbones/__init__.py | 21 + mmdet3d/models/backbones/base_pointnet.py | 39 + mmdet3d/models/backbones/cylinder3d.py | 480 +++ mmdet3d/models/backbones/dgcnn.py | 97 + mmdet3d/models/backbones/dla.py | 446 +++ mmdet3d/models/backbones/mink_resnet.py | 137 + mmdet3d/models/backbones/minkunet_backbone.py | 121 + mmdet3d/models/backbones/multi_backbone.py | 127 + mmdet3d/models/backbones/nostem_regnet.py | 85 + mmdet3d/models/backbones/pointnet2_sa_msg.py | 191 ++ mmdet3d/models/backbones/pointnet2_sa_ssg.py | 141 + mmdet3d/models/backbones/second.py | 91 + mmdet3d/models/backbones/spvcnn_backone.py | 237 ++ mmdet3d/models/data_preprocessors/__init__.py | 4 + .../data_preprocessors/data_preprocessor.py | 524 ++++ mmdet3d/models/data_preprocessors/utils.py | 65 + mmdet3d/models/data_preprocessors/voxelize.py | 326 ++ mmdet3d/models/decode_heads/__init__.py | 11 + .../models/decode_heads/cylinder3d_head.py | 158 + mmdet3d/models/decode_heads/decode_head.py | 178 ++ mmdet3d/models/decode_heads/dgcnn_head.py | 71 + mmdet3d/models/decode_heads/minkunet_head.py | 80 + mmdet3d/models/decode_heads/paconv_head.py | 73 + mmdet3d/models/decode_heads/pointnet2_head.py | 94 + mmdet3d/models/dense_heads/__init__.py | 28 + mmdet3d/models/dense_heads/anchor3d_head.py | 427 +++ .../dense_heads/anchor_free_mono3d_head.py | 480 +++ .../models/dense_heads/base_3d_dense_head.py | 381 +++ .../models/dense_heads/base_conv_bbox_head.py | 131 + .../dense_heads/base_mono3d_dense_head.py | 186 ++ .../models/dense_heads/centerpoint_head.py | 926 ++++++ mmdet3d/models/dense_heads/fcaf3d_head.py | 696 +++++ .../models/dense_heads/fcos_mono3d_head.py | 958 ++++++ .../models/dense_heads/free_anchor3d_head.py | 291 ++ .../models/dense_heads/groupfree3d_head.py | 1110 +++++++ mmdet3d/models/dense_heads/imvoxel_head.py | 696 +++++ mmdet3d/models/dense_heads/monoflex_head.py | 804 +++++ mmdet3d/models/dense_heads/parta2_rpn_head.py | 398 +++ mmdet3d/models/dense_heads/pgd_head.py | 1241 ++++++++ mmdet3d/models/dense_heads/point_rpn_head.py | 511 ++++ .../models/dense_heads/shape_aware_head.py | 537 ++++ .../models/dense_heads/smoke_mono3d_head.py | 554 ++++ mmdet3d/models/dense_heads/ssd_3d_head.py | 583 ++++ mmdet3d/models/dense_heads/train_mixins.py | 353 +++ mmdet3d/models/dense_heads/vote_head.py | 838 ++++++ mmdet3d/models/detectors/__init__.py | 32 + mmdet3d/models/detectors/base.py | 152 + mmdet3d/models/detectors/centerpoint.py | 65 + mmdet3d/models/detectors/dfm.py | 234 ++ mmdet3d/models/detectors/dynamic_voxelnet.py | 48 + mmdet3d/models/detectors/fcos_mono3d.py | 101 + mmdet3d/models/detectors/groupfree3dnet.py | 87 + mmdet3d/models/detectors/h3dnet.py | 157 + mmdet3d/models/detectors/imvotenet.py | 537 ++++ mmdet3d/models/detectors/imvoxelnet.py | 275 ++ mmdet3d/models/detectors/mink_single_stage.py | 136 + mmdet3d/models/detectors/multiview_dfm.py | 384 +++ mmdet3d/models/detectors/mvx_faster_rcnn.py | 56 + mmdet3d/models/detectors/mvx_two_stage.py | 407 +++ mmdet3d/models/detectors/parta2.py | 66 + mmdet3d/models/detectors/point_rcnn.py | 67 + mmdet3d/models/detectors/pv_rcnn.py | 232 ++ mmdet3d/models/detectors/sassd.py | 98 + mmdet3d/models/detectors/single_stage.py | 163 + .../models/detectors/single_stage_mono3d.py | 99 + mmdet3d/models/detectors/smoke_mono3d.py | 43 + mmdet3d/models/detectors/ssd3dnet.py | 26 + mmdet3d/models/detectors/two_stage.py | 208 ++ mmdet3d/models/detectors/votenet.py | 148 + mmdet3d/models/detectors/voxelnet.py | 48 + mmdet3d/models/language_models/__init__.py | 4 + mmdet3d/models/language_models/bert.py | 233 ++ mmdet3d/models/layers/__init__.py | 32 + mmdet3d/models/layers/box3d_nms.py | 295 ++ .../models/layers/dgcnn_modules/__init__.py | 6 + .../layers/dgcnn_modules/dgcnn_fa_module.py | 72 + .../layers/dgcnn_modules/dgcnn_fp_module.py | 63 + .../layers/dgcnn_modules/dgcnn_gf_module.py | 222 ++ mmdet3d/models/layers/edge_fusion_module.py | 84 + .../models/layers/fusion_layers/__init__.py | 10 + .../layers/fusion_layers/coord_transform.py | 224 ++ .../layers/fusion_layers/point_fusion.py | 418 +++ .../layers/fusion_layers/vote_fusion.py | 207 ++ mmdet3d/models/layers/mlp.py | 58 + mmdet3d/models/layers/norm.py | 152 + mmdet3d/models/layers/paconv/__init__.py | 4 + mmdet3d/models/layers/paconv/paconv.py | 402 +++ mmdet3d/models/layers/paconv/utils.py | 91 + .../layers/pointnet_modules/__init__.py | 13 + .../models/layers/pointnet_modules/builder.py | 45 + .../pointnet_modules/paconv_sa_module.py | 383 +++ .../pointnet_modules/point_fp_module.py | 81 + .../pointnet_modules/point_sa_module.py | 354 +++ .../pointnet_modules/stack_point_sa_module.py | 199 ++ mmdet3d/models/layers/sparse_block.py | 209 ++ mmdet3d/models/layers/spconv/__init__.py | 14 + .../spconv/overwrite_spconv/__init__.py | 4 + .../spconv/overwrite_spconv/write_spconv2.py | 104 + mmdet3d/models/layers/torchsparse/__init__.py | 11 + .../layers/torchsparse/torchsparse_wrapper.py | 18 + mmdet3d/models/layers/torchsparse_block.py | 112 + mmdet3d/models/layers/transformer.py | 146 + mmdet3d/models/layers/vote_module.py | 190 ++ mmdet3d/models/losses/__init__.py | 17 + .../models/losses/axis_aligned_iou_loss.py | 85 + mmdet3d/models/losses/chamfer_distance.py | 156 + mmdet3d/models/losses/lovasz_loss.py | 356 +++ mmdet3d/models/losses/multibin_loss.py | 107 + .../losses/paconv_regularization_loss.py | 118 + mmdet3d/models/losses/rotated_iou_loss.py | 91 + .../models/losses/uncertain_smooth_l1_loss.py | 199 ++ mmdet3d/models/middle_encoders/__init__.py | 10 + .../models/middle_encoders/pillar_scatter.py | 100 + .../models/middle_encoders/sparse_encoder.py | 513 ++++ mmdet3d/models/middle_encoders/sparse_unet.py | 299 ++ .../middle_encoders/voxel_set_abstraction.py | 334 +++ mmdet3d/models/necks/__init__.py | 12 + mmdet3d/models/necks/dla_neck.py | 233 ++ mmdet3d/models/necks/imvoxel_neck.py | 230 ++ mmdet3d/models/necks/pointnet2_fp_neck.py | 89 + mmdet3d/models/necks/second_fpn.py | 90 + mmdet3d/models/roi_heads/__init__.py | 15 + mmdet3d/models/roi_heads/base_3droi_head.py | 55 + .../models/roi_heads/bbox_heads/__init__.py | 16 + .../roi_heads/bbox_heads/h3d_bbox_head.py | 990 ++++++ .../roi_heads/bbox_heads/parta2_bbox_head.py | 658 ++++ .../bbox_heads/point_rcnn_bbox_head.py | 604 ++++ .../roi_heads/bbox_heads/pv_rcnn_bbox_head.py | 509 ++++ mmdet3d/models/roi_heads/h3d_roi_head.py | 130 + .../models/roi_heads/mask_heads/__init__.py | 8 + .../foreground_segmentation_head.py | 174 ++ .../mask_heads/pointwise_semantic_head.py | 211 ++ .../roi_heads/mask_heads/primitive_head.py | 1053 +++++++ .../roi_heads/part_aggregation_roi_head.py | 379 +++ .../models/roi_heads/point_rcnn_roi_head.py | 309 ++ mmdet3d/models/roi_heads/pv_rcnn_roi_head.py | 312 ++ .../roi_heads/roi_extractors/__init__.py | 11 + .../batch_roigridpoint_extractor.py | 97 + .../single_roiaware_extractor.py | 61 + .../single_roipoint_extractor.py | 68 + mmdet3d/models/segmentors/__init__.py | 7 + mmdet3d/models/segmentors/base.py | 163 + mmdet3d/models/segmentors/cylinder3d.py | 142 + mmdet3d/models/segmentors/encoder_decoder.py | 552 ++++ mmdet3d/models/segmentors/minkunet.py | 117 + mmdet3d/models/task_modules/__init__.py | 32 + .../models/task_modules/anchor/__init__.py | 12 + .../anchor/anchor_3d_generator.py | 419 +++ mmdet3d/models/task_modules/anchor/builder.py | 22 + .../models/task_modules/assigners/__init__.py | 4 + .../assigners/max_3d_iou_assigner.py | 158 + mmdet3d/models/task_modules/builder.py | 29 + .../models/task_modules/coders/__init__.py | 18 + .../coders/anchor_free_bbox_coder.py | 130 + .../coders/centerpoint_bbox_coders.py | 229 ++ .../coders/delta_xyzwhlr_bbox_coder.py | 91 + .../task_modules/coders/fcos3d_bbox_coder.py | 127 + .../coders/groupfree3d_bbox_coder.py | 191 ++ .../coders/monoflex_bbox_coder.py | 515 ++++ .../coders/partial_bin_based_bbox_coder.py | 241 ++ .../task_modules/coders/pgd_bbox_coder.py | 128 + .../coders/point_xyzwhlr_bbox_coder.py | 117 + .../task_modules/coders/smoke_bbox_coder.py | 208 ++ .../models/task_modules/samplers/__init__.py | 15 + .../samplers/iou_neg_piecewise_sampler.py | 187 ++ .../task_modules/samplers/pseudosample.py | 61 + mmdet3d/models/task_modules/voxel/__init__.py | 4 + .../task_modules/voxel/voxel_generator.py | 283 ++ mmdet3d/models/test_time_augs/__init__.py | 4 + mmdet3d/models/test_time_augs/merge_augs.py | 98 + mmdet3d/models/utils/__init__.py | 15 + mmdet3d/models/utils/add_prefix.py | 18 + mmdet3d/models/utils/clip_sigmoid.py | 18 + mmdet3d/models/utils/edge_indices.py | 91 + mmdet3d/models/utils/gaussian.py | 169 ++ mmdet3d/models/utils/gen_keypoints.py | 84 + mmdet3d/models/utils/handle_objs.py | 149 + mmdet3d/models/voxel_encoders/__init__.py | 9 + .../models/voxel_encoders/pillar_encoder.py | 320 ++ mmdet3d/models/voxel_encoders/utils.py | 179 ++ .../models/voxel_encoders/voxel_encoder.py | 640 ++++ mmdet3d/registry.py | 141 + mmdet3d/structures/__init__.py | 49 + mmdet3d/structures/bbox_3d/__init__.py | 18 + mmdet3d/structures/bbox_3d/base_box3d.py | 584 ++++ mmdet3d/structures/bbox_3d/box_3d_mode.py | 258 ++ mmdet3d/structures/bbox_3d/cam_box3d.py | 361 +++ mmdet3d/structures/bbox_3d/coord_3d_mode.py | 235 ++ mmdet3d/structures/bbox_3d/depth_box3d.py | 270 ++ mmdet3d/structures/bbox_3d/lidar_box3d.py | 215 ++ mmdet3d/structures/bbox_3d/utils.py | 357 +++ mmdet3d/structures/det3d_data_sample.py | 213 ++ mmdet3d/structures/ops/__init__.py | 38 + mmdet3d/structures/ops/box_np_ops.py | 828 +++++ mmdet3d/structures/ops/iou3d_calculator.py | 329 ++ mmdet3d/structures/ops/transforms.py | 77 + mmdet3d/structures/point_data.py | 161 + mmdet3d/structures/points/__init__.py | 30 + mmdet3d/structures/points/base_points.py | 440 +++ mmdet3d/structures/points/cam_points.py | 63 + mmdet3d/structures/points/depth_points.py | 58 + mmdet3d/structures/points/lidar_points.py | 58 + mmdet3d/testing/__init__.py | 12 + mmdet3d/testing/data_utils.py | 196 ++ mmdet3d/testing/model_utils.py | 154 + mmdet3d/utils/__init__.py | 17 + mmdet3d/utils/array_converter.py | 351 +++ mmdet3d/utils/collect_env.py | 22 + mmdet3d/utils/compat_cfg.py | 139 + mmdet3d/utils/misc.py | 106 + mmdet3d/utils/setup_env.py | 91 + mmdet3d/utils/typing_utils.py | 26 + mmdet3d/version.py | 28 + mmdet3d/visualization/__init__.py | 11 + mmdet3d/visualization/local_visualizer.py | 833 +++++ mmdet3d/visualization/vis_utils.py | 177 ++ model-index.yml | 30 + projects/BEVFusion/README.md | 126 + projects/BEVFusion/bevfusion/__init__.py | 18 + projects/BEVFusion/bevfusion/bevfusion.py | 242 ++ .../BEVFusion/bevfusion/bevfusion_necks.py | 99 + projects/BEVFusion/bevfusion/depth_lss.py | 354 +++ projects/BEVFusion/bevfusion/loading.py | 208 ++ projects/BEVFusion/bevfusion/ops/__init__.py | 7 + .../bevfusion/ops/bev_pool/__init__.py | 3 + .../bevfusion/ops/bev_pool/bev_pool.py | 94 + .../bevfusion/ops/bev_pool/src/bev_pool.cpp | 94 + .../ops/bev_pool/src/bev_pool_cuda.cu | 98 + .../BEVFusion/bevfusion/ops/voxel/__init__.py | 4 + .../bevfusion/ops/voxel/scatter_points.py | 112 + .../ops/voxel/src/scatter_points_cpu.cpp | 122 + .../ops/voxel/src/scatter_points_cuda.cu | 310 ++ .../bevfusion/ops/voxel/src/voxelization.cpp | 13 + .../bevfusion/ops/voxel/src/voxelization.h | 142 + .../ops/voxel/src/voxelization_cpu.cpp | 173 ++ .../ops/voxel/src/voxelization_cuda.cu | 530 ++++ .../BEVFusion/bevfusion/ops/voxel/voxelize.py | 161 + .../BEVFusion/bevfusion/sparse_encoder.py | 151 + projects/BEVFusion/bevfusion/transformer.py | 110 + projects/BEVFusion/bevfusion/transforms_3d.py | 195 ++ .../BEVFusion/bevfusion/transfusion_head.py | 841 ++++++ projects/BEVFusion/bevfusion/utils.py | 306 ++ ...75_second_secfpn_8xb4-cyclic-20e_nus-3d.py | 430 +++ projects/BEVFusion/setup.py | 71 + projects/CenterFormer/README.md | 82 + .../CenterFormer/centerformer/__init__.py | 11 + .../CenterFormer/centerformer/bbox_ops.py | 41 + .../CenterFormer/centerformer/centerformer.py | 180 ++ .../centerformer/centerformer_backbone.py | 980 ++++++ .../centerformer/centerformer_head.py | 582 ++++ projects/CenterFormer/centerformer/losses.py | 58 + .../centerformer/multi_scale_deform_attn.py | 229 ++ .../CenterFormer/centerformer/transformer.py | 261 ++ ...-attn_4xb4-cyclic-20e_waymoD5-3d-3class.py | 308 ++ projects/DETR3D/README.md | 147 + .../DETR3D/configs/detr3d_r101_gridmask.py | 258 ++ .../configs/detr3d_r101_gridmask_cbgs.py | 80 + .../detr3d_r50_bert_gridmask_halfdata.py | 293 ++ .../configs/detr3d_r50_gridmask_halfdata.py | 259 ++ .../detr3d_vovnet_gridmask_trainval_cbgs.py | 52 + projects/DETR3D/detr3d/__init__.py | 13 + projects/DETR3D/detr3d/base.py | 156 + projects/DETR3D/detr3d/bert.py | 232 ++ projects/DETR3D/detr3d/detr3d.py | 571 ++++ projects/DETR3D/detr3d/detr3d_head.py | 447 +++ projects/DETR3D/detr3d/detr3d_transformer.py | 447 +++ projects/DETR3D/detr3d/glip.py | 168 ++ projects/DETR3D/detr3d/grid_mask.py | 142 + .../DETR3D/detr3d/hungarian_assigner_3d.py | 135 + projects/DETR3D/detr3d/match_cost.py | 34 + projects/DETR3D/detr3d/nms_free_coder.py | 118 + projects/DETR3D/detr3d/single_stage.py | 149 + projects/DETR3D/detr3d/util.py | 76 + projects/DETR3D/detr3d/vovnet.py | 442 +++ .../DETR3D/layers/transformer/__init__.py | 41 + .../transformer/conditional_detr_layers.py | 170 ++ .../layers/transformer/dab_detr_layers.py | 298 ++ .../layers/transformer/ddq_detr_layers.py | 223 ++ .../transformer/deformable_detr_layers.py | 265 ++ .../DETR3D/layers/transformer/detr_layers.py | 459 +++ .../DETR3D/layers/transformer/dino_layers.py | 562 ++++ .../transformer/grounding_dino_layers.py | 271 ++ .../layers/transformer/mask2former_layers.py | 135 + .../layers/transformer/positional_encoding.py | 125 + projects/DETR3D/layers/transformer/utils.py | 810 +++++ projects/DETR3D/old_detr3d_converter.py | 25 + projects/PETR/README.md | 63 + .../petr_vovnet_gridmask_p4_800x320.py | 368 +++ projects/PETR/petr/__init__.py | 24 + projects/PETR/petr/cp_fpn.py | 211 ++ projects/PETR/petr/grid_mask.py | 146 + projects/PETR/petr/hungarian_assigner_3d.py | 142 + projects/PETR/petr/match_cost.py | 338 +++ projects/PETR/petr/nms_free_coder.py | 246 ++ projects/PETR/petr/petr.py | 282 ++ projects/PETR/petr/petr_head.py | 825 +++++ projects/PETR/petr/petr_transformer.py | 540 ++++ projects/PETR/petr/positional_encoding.py | 171 ++ projects/PETR/petr/transforms_3d.py | 209 ++ projects/PETR/petr/utils.py | 63 + projects/PETR/petr/vovnetcp.py | 475 +++ projects/TR3D/README.md | 97 + projects/TR3D/configs/tr3d.py | 43 + .../configs/tr3d_1xb16_s3dis-3d-5class.py | 51 + .../configs/tr3d_1xb16_scannet-3d-18class.py | 68 + .../configs/tr3d_1xb16_sunrgbd-3d-10class.py | 62 + projects/TR3D/tr3d/__init__.py | 11 + projects/TR3D/tr3d/axis_aligned_iou_loss.py | 117 + projects/TR3D/tr3d/mink_resnet.py | 54 + projects/TR3D/tr3d/rotated_iou_loss.py | 149 + projects/TR3D/tr3d/tr3d_head.py | 472 +++ projects/TR3D/tr3d/tr3d_neck.py | 111 + projects/TR3D/tr3d/transforms_3d.py | 74 + projects/example_project/README.md | 115 + ...affe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py | 7 + projects/example_project/dummy/__init__.py | 3 + .../example_project/dummy/dummy_resnet.py | 15 + requirements.txt | 4 + requirements/build.txt | 0 requirements/docs.txt | 8 + requirements/mminstall.txt | 3 + requirements/optional.txt | 3 + requirements/readthedocs.txt | 5 + requirements/runtime.txt | 11 + requirements/tests.txt | 14 + resources/mmdet3d_outdoor_demo.gif | Bin 0 -> 830279 bytes resources/nuimages_demo.gif | Bin 0 -> 885289 bytes resources/open3d_visual.gif | Bin 0 -> 947665 bytes setup.cfg | 16 + setup.py | 225 ++ .../test_lidar_det3d_inferencer.py | 112 + .../test_lidar_seg3d_inferencer.py | 105 + .../test_mono_det3d_inferencer.py | 112 + .../test_multi_modality_det3d_inferencer.py | 120 + tests/test_datasets/test_dataset_wrappers.py | 122 + tests/test_datasets/test_kitti_dataset.py | 108 + tests/test_datasets/test_lyft_dataset.py | 70 + tests/test_datasets/test_nuscenes_dataset.py | 81 + tests/test_datasets/test_s3dis_dataset.py | 206 ++ tests/test_datasets/test_scannet_dataset.py | 229 ++ .../test_semantickitti_dataset.py | 115 + tests/test_datasets/test_sunrgbd_dataset.py | 97 + .../test_transforms/test_formating.py | 36 + .../test_transforms/test_loading.py | 110 + .../test_transforms/test_transforms_3d.py | 303 ++ tests/test_datasets/test_transforms/utils.py | 192 ++ .../test_disable_object_sample_hook.py | 75 + .../test_hooks/test_visualization_hook.py | 87 + .../test_functional/test_instance_seg_eval.py | 75 + .../test_functional/test_kitti_eval.py | 266 ++ .../test_functional/test_panoptic_seg_eval.py | 101 + .../test_functional/test_seg_eval.py | 39 + .../test_metrics/test_indoor_metric.py | 65 + .../test_metrics/test_instance_seg_metric.py | 75 + .../test_metrics/test_kitti_metric.py | 89 + .../test_metrics/test_panoptic_seg_metric.py | 123 + .../test_metrics/test_seg_metric.py | 54 + .../test_cylinder3d_backbone.py | 32 + .../test_models/test_backbones/test_dgcnn.py | 39 + tests/test_models/test_backbones/test_dla.py | 26 + .../test_backbones/test_mink_resnet.py | 58 + .../test_backbones/test_minkunet_backbone.py | 34 + .../test_backbones/test_multi_backbone.py | 117 + .../test_backbones/test_pointnet2_sa_msg.py | 120 + .../test_backbones/test_pointnet2_sa_ssg.py | 74 + .../test_backbones/test_spvcnn_backbone.py | 34 + .../test_data_preprocessor.py | 128 + .../test_decode_heads/test_cylinder3d_head.py | 67 + .../test_decode_heads/test_dgcnn_head.py | 52 + .../test_decode_heads/test_minkunet_head.py | 54 + .../test_decode_heads/test_paconv_head.py | 68 + .../test_decode_heads/test_pointnet2_head.py | 69 + .../test_dense_heads/test_anchor3d_head.py | 196 ++ .../test_dense_heads/test_fcaf3d_head.py | 84 + .../test_dense_heads/test_fcos_mono3d_head.py | 185 ++ .../test_dense_heads/test_freeanchors.py | 80 + .../test_dense_heads/test_imvoxel_head.py | 62 + .../test_dense_heads/test_monoflex_head.py | 68 + .../test_dense_heads/test_pgd_head.py | 210 ++ .../test_smoke_mono3d_head.py | 130 + .../test_models/test_dense_heads/test_ssn.py | 79 + .../test_models/test_detectors/test_3dssd.py | 39 + .../test_detectors/test_center_point.py | 63 + .../test_models/test_detectors/test_fcaf3d.py | 48 + .../test_detectors/test_groupfree3d.py | 49 + .../test_models/test_detectors/test_h3dnet.py | 46 + .../test_detectors/test_imvotenet.py | 80 + .../test_detectors/test_imvoxelnet.py | 89 + .../test_models/test_detectors/test_mvxnet.py | 47 + .../test_models/test_detectors/test_parta2.py | 61 + .../test_detectors/test_pointrcnn.py | 46 + .../test_models/test_detectors/test_pvrcnn.py | 63 + .../test_models/test_detectors/test_sassd.py | 43 + .../test_detectors/test_votenet.py | 72 + .../test_detectors/test_voxelnet.py | 73 + .../test_models/test_layers/test_box3d_nms.py | 114 + .../test_dgcnn_fa_module.py | 18 + .../test_dgcnn_fp_module.py | 24 + .../test_dgcnn_gf_module.py | 57 + .../test_fusion_coord_trans.py | 137 + .../test_fusion_layers/test_point_fusion.py | 61 + .../test_fusion_layers/test_vote_fusion.py | 322 ++ .../test_paconv/test_paconv_modules.py | 300 ++ .../test_paconv/test_paconv_ops.py | 49 + .../test_point_fp_module.py | 30 + .../test_point_sa_module.py | 208 ++ .../test_spconv/test_spconv_module.py | 105 + .../test_torchsparse_module.py | 60 + .../test_layers/test_vote_module.py | 39 + .../test_losses/test_chamfer_disrance.py | 72 + .../test_losses/test_multibin_loss.py | 31 + .../test_paconv_regularization_loss.py | 63 + .../test_losses/test_rotated_iou_loss.py | 27 + .../test_uncertain_smooth_l1_loss.py | 40 + .../test_sparse_encoders.py | 49 + .../test_middle_encoders/test_sparse_unet.py | 56 + tests/test_models/test_necks/test_dla_neck.py | 47 + .../test_necks/test_imvoxel_neck.py | 16 + .../test_necks/test_pointnet2_fp_neck.py | 37 + .../test_models/test_necks/test_second_fpn.py | 82 + .../test_segmentor/test_minkunet.py | 46 + .../test_segmentors/test_cylinder3d.py | 42 + .../test_anchor/test_anchor_3d_generator.py | 263 ++ .../test_coders/test_anchor_free_box_coder.py | 112 + .../test_centerpoint_bbox_coder.py | 32 + .../test_coders/test_fcos3d_bbox_coder.py | 82 + .../test_coders/test_monoflex_bbox_coder.py | 72 + .../test_partial_bin_based_box_coder.py | 219 ++ .../test_coders/test_pgd_bbox_coder.py | 110 + .../test_point_xyzwhlr_bbox_coder.py | 35 + .../test_coders/test_smoke_bbox_coder.py | 36 + .../test_iou_piecewise_sampler.py | 51 + .../test_voxel/test_voxel_generator.py | 20 + tests/test_models/test_utils/test_utils.py | 289 ++ .../test_pillar_encoder.py | 26 + .../test_voxel_encoders.py | 46 + tests/test_samples/parta2_roihead_inputs.npz | Bin 0 -> 36522 bytes tests/test_structures/test_bbox/test_box3d.py | 1796 +++++++++++ .../test_bbox/test_coord_3d_mode.py | 351 +++ .../test_structures/test_det3d_data_sample.py | 154 + .../test_ops/test_box_np_ops.py | 83 + tests/test_structures/test_point_data.py | 95 + .../test_points/test_base_points.py | 268 ++ .../test_points/test_cam_points.py | 559 ++++ .../test_points/test_depth_points.py | 282 ++ tests/test_utils/test_compat_cfg.py | 113 + tests/test_utils/test_setup_env.py | 81 + tools/analysis_tools/analyze_logs.py | 209 ++ tools/analysis_tools/benchmark.py | 97 + tools/analysis_tools/get_flops.py | 83 + tools/create_data.py | 356 +++ tools/create_data.sh | 24 + .../dataset_converters/create_gt_database.py | 636 ++++ tools/dataset_converters/indoor_converter.py | 111 + tools/dataset_converters/kitti_converter.py | 626 ++++ tools/dataset_converters/kitti_data_utils.py | 668 +++++ tools/dataset_converters/lyft_converter.py | 273 ++ tools/dataset_converters/lyft_data_fixer.py | 39 + tools/dataset_converters/nuimage_converter.py | 227 ++ .../dataset_converters/nuscenes_converter.py | 630 ++++ tools/dataset_converters/s3dis_data_utils.py | 247 ++ .../dataset_converters/scannet_data_utils.py | 299 ++ .../semantickitti_converter.py | 103 + .../dataset_converters/sunrgbd_data_utils.py | 227 ++ .../dataset_converters/update_infos_to_v2.py | 1157 +++++++ tools/dataset_converters/waymo_converter.py | 632 ++++ tools/deployment/mmdet3d2torchserve.py | 111 + tools/deployment/mmdet3d_handler.py | 120 + tools/deployment/test_torchserver.py | 56 + tools/dist_test.sh | 22 + tools/dist_train.sh | 19 + tools/misc/browse_dataset.py | 149 + tools/misc/fuse_conv_bn.py | 68 + tools/misc/print_config.py | 27 + tools/misc/visualize_results.py | 50 + .../convert_h3dnet_checkpoints.py | 177 ++ .../convert_votenet_checkpoints.py | 153 + tools/model_converters/publish_model.py | 36 + tools/model_converters/regnet2mmdet.py | 90 + tools/slurm_test.sh | 24 + tools/slurm_train.sh | 24 + tools/test.py | 127 + tools/train.py | 135 + tools/update_data_coords.py | 168 ++ tools/update_data_coords.sh | 22 + visual.py | 704 +++++ visual2.py | 477 +++ 935 files changed, 145125 insertions(+) create mode 100755 .circleci/config.yml create mode 100755 .circleci/docker/Dockerfile create mode 100755 .circleci/test.yml create mode 100755 .dev_scripts/benchmark_full_models.txt create mode 100755 .dev_scripts/benchmark_options.py create mode 100755 .dev_scripts/benchmark_train_models.txt create mode 100755 .dev_scripts/covignore.cfg create mode 100755 .dev_scripts/diff_coverage_test.sh create mode 100755 .dev_scripts/gather_models.py create mode 100755 .dev_scripts/gen_benchmark_script.py create mode 100755 .dev_scripts/linter.sh create mode 100755 .dev_scripts/test_benchmark.sh create mode 100755 .dev_scripts/train_benchmark.sh create mode 100755 .github/CODE_OF_CONDUCT.md create mode 100755 .github/CONTRIBUTING.md create mode 100755 .github/ISSUE_TEMPLATE/config.yml create mode 100755 .github/ISSUE_TEMPLATE/error-report.md create mode 100755 .github/ISSUE_TEMPLATE/feature_request.md create mode 100755 .github/ISSUE_TEMPLATE/general_questions.md create mode 100755 .github/ISSUE_TEMPLATE/reimplementation_questions.md create mode 100755 .github/pull_request_template.md create mode 100755 .github/workflows/deploy.yml create mode 100755 .github/workflows/lint.yml create mode 100755 .github/workflows/merge_stage_test.yml create mode 100755 .github/workflows/pr_stage_test.yml create mode 100755 .github/workflows/test_mim.yml create mode 100755 .gitignore create mode 100755 .pre-commit-config-zh-cn.yaml create mode 100755 .pre-commit-config.yaml create mode 100755 .readthedocs.yml create mode 100755 CITATION.cff create mode 100755 LICENSE create mode 100755 MANIFEST.in create mode 100755 README.md create mode 100755 README_zh-CN.md create mode 100755 configs/3dssd/3dssd_4xb4_kitti-3d-car.py create mode 100755 configs/3dssd/README.md create mode 100755 configs/3dssd/metafile.yml create mode 100755 configs/_base_/datasets/kitti-3d-3class.py create mode 100755 configs/_base_/datasets/kitti-3d-car.py create mode 100755 configs/_base_/datasets/kitti-mono3d.py create mode 100755 configs/_base_/datasets/lyft-3d-range100.py create mode 100755 configs/_base_/datasets/lyft-3d.py create mode 100755 configs/_base_/datasets/nuim-instance.py create mode 100755 configs/_base_/datasets/nus-3d.py create mode 100755 configs/_base_/datasets/nus-mono3d.py create mode 100755 configs/_base_/datasets/s3dis-3d.py create mode 100755 configs/_base_/datasets/s3dis-seg.py create mode 100755 configs/_base_/datasets/scannet-3d.py create mode 100755 configs/_base_/datasets/scannet-seg.py create mode 100755 configs/_base_/datasets/semantickitti.py create mode 100755 configs/_base_/datasets/sunrgbd-3d.py create mode 100755 configs/_base_/datasets/waymoD5-3d-3class.py create mode 100755 configs/_base_/datasets/waymoD5-3d-car.py create mode 100755 configs/_base_/datasets/waymoD5-fov-mono3d-3class.py create mode 100755 configs/_base_/datasets/waymoD5-mv-mono3d-3class.py create mode 100755 configs/_base_/datasets/waymoD5-mv3d-3class.py create mode 100755 configs/_base_/default_runtime.py create mode 100755 configs/_base_/models/3dssd.py create mode 100755 configs/_base_/models/cascade-mask-rcnn_r50_fpn.py create mode 100755 configs/_base_/models/centerpoint_pillar02_second_secfpn_nus.py create mode 100755 configs/_base_/models/centerpoint_voxel01_second_secfpn_nus.py create mode 100755 configs/_base_/models/cylinder3d.py create mode 100755 configs/_base_/models/dgcnn.py create mode 100755 configs/_base_/models/fcaf3d.py create mode 100755 configs/_base_/models/fcos3d.py create mode 100755 configs/_base_/models/groupfree3d.py create mode 100755 configs/_base_/models/h3dnet.py create mode 100755 configs/_base_/models/imvotenet.py create mode 100755 configs/_base_/models/mask-rcnn_r50_fpn.py create mode 100755 configs/_base_/models/minkunet.py create mode 100755 configs/_base_/models/multiview_dfm.py create mode 100755 configs/_base_/models/paconv_ssg-cuda.py create mode 100755 configs/_base_/models/paconv_ssg.py create mode 100755 configs/_base_/models/parta2.py create mode 100755 configs/_base_/models/pgd.py create mode 100755 configs/_base_/models/point_rcnn.py create mode 100755 configs/_base_/models/pointnet2_msg.py create mode 100755 configs/_base_/models/pointnet2_ssg.py create mode 100755 configs/_base_/models/pointpillars_hv_fpn_lyft.py create mode 100755 configs/_base_/models/pointpillars_hv_fpn_nus.py create mode 100755 configs/_base_/models/pointpillars_hv_fpn_range100_lyft.py create mode 100755 configs/_base_/models/pointpillars_hv_secfpn_kitti.py create mode 100755 configs/_base_/models/pointpillars_hv_secfpn_waymo.py create mode 100755 configs/_base_/models/second_hv_secfpn_kitti.py create mode 100755 configs/_base_/models/second_hv_secfpn_waymo.py create mode 100755 configs/_base_/models/smoke.py create mode 100755 configs/_base_/models/spvcnn.py create mode 100755 configs/_base_/models/votenet.py create mode 100755 configs/_base_/schedules/cosine.py create mode 100755 configs/_base_/schedules/cyclic-20e.py create mode 100755 configs/_base_/schedules/cyclic-40e.py create mode 100755 configs/_base_/schedules/mmdet-schedule-1x.py create mode 100755 configs/_base_/schedules/schedule-2x.py create mode 100755 configs/_base_/schedules/schedule-3x.py create mode 100755 configs/_base_/schedules/seg-cosine-100e.py create mode 100755 configs/_base_/schedules/seg-cosine-150e.py create mode 100755 configs/_base_/schedules/seg-cosine-200e.py create mode 100755 configs/_base_/schedules/seg-cosine-50e.py create mode 100755 configs/benchmark/hv_PartA2_secfpn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py create mode 100755 configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py create mode 100755 configs/benchmark/hv_pointpillars_secfpn_4x8_80e_pcdet_kitti-3d-3class.py create mode 100755 configs/benchmark/hv_second_secfpn_4x8_80e_pcdet_kitti-3d-3class.py create mode 100755 configs/centerpoint/README.md create mode 100755 configs/centerpoint/centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-flip-tta-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-flip-tta-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-tta-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py create mode 100755 configs/centerpoint/metafile.yml create mode 100755 configs/cylinder3d/README.md create mode 100755 configs/cylinder3d/cylinder3d_4xb4_3x_semantickitti.py create mode 100755 configs/cylinder3d/metafile.yml create mode 100755 configs/dfm/multiview-dfm_r101-dcn_16xb2_waymoD5-3d-3class.py create mode 100755 configs/dfm/multiview-dfm_r101-dcn_centerhead_16xb2_waymoD5-3d-3class.py create mode 100755 configs/dgcnn/README.md create mode 100755 configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py create mode 100755 configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py create mode 100755 configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py create mode 100755 configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py create mode 100755 configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py create mode 100755 configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py create mode 100755 configs/dgcnn/metafile.yml create mode 100755 configs/dynamic_voxelization/README.md create mode 100755 configs/dynamic_voxelization/metafile.yml create mode 100755 configs/dynamic_voxelization/pointpillars_dv_secfpn_8xb6-160e_kitti-3d-car.py create mode 100755 configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py create mode 100755 configs/dynamic_voxelization/second_dv_secfpn_8xb6-80e_kitti-3d-car.py create mode 100755 configs/fcaf3d/README.md create mode 100755 configs/fcaf3d/fcaf3d_2xb8_s3dis-3d-5class.py create mode 100755 configs/fcaf3d/fcaf3d_2xb8_scannet-3d-18class.py create mode 100755 configs/fcaf3d/fcaf3d_2xb8_sunrgbd-3d-10class.py create mode 100755 configs/fcaf3d/metafile.yml create mode 100755 configs/fcos3d/README.md create mode 100755 configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py create mode 100755 configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d_finetune.py create mode 100755 configs/fcos3d/metafile.yml create mode 100755 configs/free_anchor/README.md create mode 100755 configs/free_anchor/metafile.yml create mode 100755 configs/free_anchor/pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py create mode 100755 configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py create mode 100755 configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py create mode 100755 configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py create mode 100755 configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py create mode 100755 configs/free_anchor/pointpillars_hv_regnet-400mf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py create mode 100755 configs/groupfree3d/README.md create mode 100755 configs/groupfree3d/groupfree3d_head-L12-O256_4xb8_scannet-seg.py create mode 100755 configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py create mode 100755 configs/groupfree3d/groupfree3d_w2x-head-L12-O256_4xb8_scannet-seg.py create mode 100755 configs/groupfree3d/groupfree3d_w2x-head-L12-O512_4xb8_scannet-seg.py create mode 100755 configs/groupfree3d/metafile.yml create mode 100755 configs/h3dnet/README.md create mode 100755 configs/h3dnet/h3dnet_8xb3_scannet-seg.py create mode 100755 configs/h3dnet/metafile.yml create mode 100755 configs/imvotenet/README.md create mode 100755 configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py create mode 100755 configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py create mode 100755 configs/imvotenet/metafile.yml create mode 100755 configs/imvoxelnet/README.md create mode 100755 configs/imvoxelnet/imvoxelnet_2xb4_sunrgbd-3d-10class.py create mode 100755 configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py create mode 100755 configs/imvoxelnet/metafile.yml create mode 100755 configs/minkunet/README.md create mode 100755 configs/minkunet/metafile.yml create mode 100755 configs/minkunet/minkunet_w16_8xb2-15e_semantickitti.py create mode 100755 configs/minkunet/minkunet_w20_8xb2-15e_semantickitti.py create mode 100755 configs/minkunet/minkunet_w32_8xb2-15e_semantickitti.py create mode 100755 configs/monoflex/README.md create mode 100755 configs/monoflex/metafile.yml create mode 100755 configs/mvxnet/README.md create mode 100755 configs/mvxnet/metafile.yml create mode 100755 configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py create mode 100755 configs/nuimages/README.md create mode 100755 configs/nuimages/cascade-mask-rcnn-r50-fpn_coco-20e_nuim.py create mode 100755 configs/nuimages/cascade-mask-rcnn_r101_fpn_1x_nuim.py create mode 100755 configs/nuimages/cascade-mask-rcnn_r50_fpn_1x_nuim.py create mode 100755 configs/nuimages/cascade-mask-rcnn_r50_fpn_coco-20e-1x_nuim.py create mode 100755 configs/nuimages/cascade-mask-rcnn_x101_32x4d_fpn_1x_nuim.py create mode 100755 configs/nuimages/htc_r50_fpn_1x_nuim.py create mode 100755 configs/nuimages/htc_r50_fpn_coco-20e-1x_nuim.py create mode 100755 configs/nuimages/htc_r50_fpn_coco-20e_nuim.py create mode 100755 configs/nuimages/htc_r50_fpn_head-without-semantic_1x_nuim.py create mode 100755 configs/nuimages/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e-1xb16_nuim.py create mode 100755 configs/nuimages/mask-rcnn_r101_fpn_1x_nuim.py create mode 100755 configs/nuimages/mask-rcnn_r50_caffe_fpn_1x_nuim.py create mode 100755 configs/nuimages/mask-rcnn_r50_caffe_fpn_coco-3x_1x_nuim.py create mode 100755 configs/nuimages/mask-rcnn_r50_caffe_fpn_coco-3x_20e_nuim.py create mode 100755 configs/nuimages/mask-rcnn_r50_fpn_1x_nuim.py create mode 100755 configs/nuimages/mask-rcnn_r50_fpn_coco-2x_1x_nuim.py create mode 100755 configs/nuimages/mask-rcnn_r50_fpn_coco-2x_1x_nus-2d.py create mode 100755 configs/nuimages/mask-rcnn_x101_32x4d_fpn_1x_nuim.py create mode 100755 configs/nuimages/metafile.yml create mode 100755 configs/paconv/README.md create mode 100755 configs/paconv/metafile.yml create mode 100755 configs/paconv/paconv_ssg-cuda_8xb8-cosine-200e_s3dis-seg.py create mode 100755 configs/paconv/paconv_ssg_8xb8-cosine-150e_s3dis-seg.py create mode 100755 configs/parta2/README.md create mode 100755 configs/parta2/metafile.yml create mode 100755 configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py create mode 100755 configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-car.py create mode 100755 configs/pgd/README.md create mode 100755 configs/pgd/metafile.yml create mode 100755 configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py create mode 100755 configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d_finetune.py create mode 100755 configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py create mode 100755 configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d_finetune.py create mode 100755 configs/pgd/pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py create mode 100755 configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py create mode 100755 configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mono3d.py create mode 100755 configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py create mode 100755 configs/point_rcnn/README.md create mode 100755 configs/point_rcnn/metafile.yml create mode 100755 configs/point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py create mode 100755 configs/pointnet2/README.md create mode 100755 configs/pointnet2/metafile.yml create mode 100755 configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only.py create mode 100755 configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py create mode 100755 configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py create mode 100755 configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only.py create mode 100755 configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py create mode 100755 configs/pointnet2/pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py create mode 100755 configs/pointpillars/README.md create mode 100755 configs/pointpillars/metafile.yml create mode 100755 configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d-range100.py create mode 100755 configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py create mode 100755 configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-amp-2x_nus-3d.py create mode 100755 configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py create mode 100755 configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py create mode 100755 configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py create mode 100755 configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py create mode 100755 configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py create mode 100755 configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py create mode 100755 configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-car.py create mode 100755 configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d-range100.py create mode 100755 configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py create mode 100755 configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-amp-2x_nus-3d.py create mode 100755 configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py create mode 100755 configs/pv_rcnn/README.md create mode 100755 configs/pv_rcnn/metafile.yml create mode 100755 configs/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py create mode 100755 configs/regnet/README.md create mode 100755 configs/regnet/metafile.yml create mode 100755 configs/regnet/pointpillars_hv_regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d.py create mode 100755 configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb2-2x_lyft-3d.py create mode 100755 configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py create mode 100755 configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_range100_8xb2-2x_lyft-3d.py create mode 100755 configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb2-2x_lyft-3d.py create mode 100755 configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb4-2x_nus-3d.py create mode 100755 configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_range100_8xb2-2x_lyft-3d.py create mode 100755 configs/sassd/README.md create mode 100755 configs/sassd/sassd_8xb6-80e_kitti-3d-3class.py create mode 100755 configs/second/README.md create mode 100755 configs/second/metafile.yml create mode 100755 configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py create mode 100755 configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-car.py create mode 100755 configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py create mode 100755 configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-car.py create mode 100755 configs/second/second_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py create mode 100755 configs/smoke/README.md create mode 100755 configs/smoke/metafile.yml create mode 100755 configs/smoke/smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py create mode 100755 configs/spvcnn/README.md create mode 100755 configs/spvcnn/metafile.yml create mode 100755 configs/spvcnn/spvcnn_w16_8xb2-15e_semantickitti.py create mode 100755 configs/spvcnn/spvcnn_w20_8xb2-15e_semantickitti.py create mode 100755 configs/spvcnn/spvcnn_w32_8xb2-15e_semantickitti.py create mode 100755 configs/ssn/README.md create mode 100755 configs/ssn/metafile.yml create mode 100755 configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb1-2x_lyft-3d.py create mode 100755 configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb2-2x_nus-3d.py create mode 100755 configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py create mode 100755 configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py create mode 100755 configs/votenet/README.md create mode 100755 configs/votenet/metafile.yml create mode 100755 configs/votenet/votenet_8xb16_sunrgbd-3d.py create mode 100755 configs/votenet/votenet_8xb8_scannet-3d.py create mode 100755 configs/votenet/votenet_head-iouloss_8xb8_scannet-3d.py create mode 100755 demo/mono_det_demo.py create mode 100755 demo/multi_modality_demo.py create mode 100755 demo/pcd_demo.py create mode 100755 demo/pcd_seg_demo.py create mode 100755 docker/Dockerfile create mode 100755 docker/serve/Dockerfile create mode 100755 docker/serve/config.properties create mode 100755 docker/serve/entrypoint.sh create mode 100755 docs/en/Makefile create mode 100755 docs/en/_static/css/readthedocs.css create mode 100755 docs/en/advanced_guides/customize_dataset.md create mode 100755 docs/en/advanced_guides/customize_models.md create mode 100755 docs/en/advanced_guides/customize_runtime.md create mode 100755 docs/en/advanced_guides/datasets/index.rst create mode 100755 docs/en/advanced_guides/datasets/kitti_det.md create mode 100755 docs/en/advanced_guides/datasets/lyft_det.md create mode 100755 docs/en/advanced_guides/datasets/nuscenes_det.md create mode 100755 docs/en/advanced_guides/datasets/s3dis_sem_seg.md create mode 100755 docs/en/advanced_guides/datasets/scannet_det.md create mode 100755 docs/en/advanced_guides/datasets/scannet_sem_seg.md create mode 100755 docs/en/advanced_guides/datasets/sunrgbd_det.md create mode 100755 docs/en/advanced_guides/datasets/waymo_det.md create mode 100755 docs/en/advanced_guides/index.rst create mode 100755 docs/en/advanced_guides/pure_point_cloud_dataset.md create mode 100755 docs/en/advanced_guides/supported_tasks/index.rst create mode 100755 docs/en/advanced_guides/supported_tasks/lidar_det3d.md create mode 100755 docs/en/advanced_guides/supported_tasks/lidar_sem_seg3d.md create mode 100755 docs/en/advanced_guides/supported_tasks/vision_det3d.md create mode 100755 docs/en/api.rst create mode 100755 docs/en/conf.py create mode 100755 docs/en/get_started.md create mode 100755 docs/en/index.rst create mode 100755 docs/en/make.bat create mode 100755 docs/en/migration.md create mode 100755 docs/en/model_zoo.md create mode 100755 docs/en/notes/benchmarks.md create mode 100755 docs/en/notes/changelog.md create mode 100755 docs/en/notes/changelog_v1.0.x.md create mode 100755 docs/en/notes/compatibility.md create mode 100755 docs/en/notes/contribution_guides.md create mode 100755 docs/en/notes/faq.md create mode 100755 docs/en/notes/index.rst create mode 100755 docs/en/stat.py create mode 100755 docs/en/switch_language.md create mode 100755 docs/en/user_guides/2_new_data_model.md create mode 100755 docs/en/user_guides/backends_support.md create mode 100755 docs/en/user_guides/config.md create mode 100755 docs/en/user_guides/coord_sys_tutorial.md create mode 100755 docs/en/user_guides/data_pipeline.md create mode 100755 docs/en/user_guides/dataset_prepare.md create mode 100755 docs/en/user_guides/index.rst create mode 100755 docs/en/user_guides/inference.md create mode 100755 docs/en/user_guides/model_deployment.md create mode 100755 docs/en/user_guides/train_test.md create mode 100755 docs/en/user_guides/useful_tools.md create mode 100755 docs/en/user_guides/visualization.md create mode 100755 docs/zh_cn/Makefile create mode 100755 docs/zh_cn/_static/css/readthedocs.css create mode 100755 docs/zh_cn/advanced_guides/customize_dataset.md create mode 100755 docs/zh_cn/advanced_guides/customize_models.md create mode 100755 docs/zh_cn/advanced_guides/customize_runtime.md create mode 100755 docs/zh_cn/advanced_guides/datasets/index.rst create mode 100755 docs/zh_cn/advanced_guides/datasets/kitti_det.md create mode 100755 docs/zh_cn/advanced_guides/datasets/lyft_det.md create mode 100755 docs/zh_cn/advanced_guides/datasets/nuscenes_det.md create mode 100755 docs/zh_cn/advanced_guides/datasets/s3dis_sem_seg.md create mode 100755 docs/zh_cn/advanced_guides/datasets/scannet_det.md create mode 100755 docs/zh_cn/advanced_guides/datasets/scannet_sem_seg.md create mode 100755 docs/zh_cn/advanced_guides/datasets/sunrgbd_det.md create mode 100755 docs/zh_cn/advanced_guides/datasets/waymo_det.md create mode 100755 docs/zh_cn/advanced_guides/index.rst create mode 100755 docs/zh_cn/advanced_guides/supported_tasks/index.rst create mode 100755 docs/zh_cn/advanced_guides/supported_tasks/lidar_det3d.md create mode 100755 docs/zh_cn/advanced_guides/supported_tasks/lidar_sem_seg3d.md create mode 100755 docs/zh_cn/advanced_guides/supported_tasks/vision_det3d.md create mode 100755 docs/zh_cn/api.rst create mode 100755 docs/zh_cn/conf.py create mode 100755 docs/zh_cn/get_started.md create mode 100755 docs/zh_cn/index.rst create mode 100755 docs/zh_cn/model_zoo.md create mode 100755 docs/zh_cn/notes/benchmarks.md create mode 100755 docs/zh_cn/notes/changelog.md create mode 100755 docs/zh_cn/notes/changelog_v1.0.x.md create mode 100755 docs/zh_cn/notes/compatibility.md create mode 100755 docs/zh_cn/notes/faq.md create mode 100755 docs/zh_cn/notes/index.rst create mode 100755 docs/zh_cn/stat.py create mode 100755 docs/zh_cn/switch_language.md create mode 100755 docs/zh_cn/user_guides/2_new_data_model.md create mode 100755 docs/zh_cn/user_guides/backends_support.md create mode 100755 docs/zh_cn/user_guides/config.md create mode 100755 docs/zh_cn/user_guides/coord_sys_tutorial.md create mode 100755 docs/zh_cn/user_guides/data_pipeline.md create mode 100755 docs/zh_cn/user_guides/dataset_prepare.md create mode 100755 docs/zh_cn/user_guides/index.rst create mode 100755 docs/zh_cn/user_guides/inference.md create mode 100755 docs/zh_cn/user_guides/model_deployment.md create mode 100755 docs/zh_cn/user_guides/train_test.md create mode 100755 docs/zh_cn/user_guides/useful_tools.md create mode 100755 docs/zh_cn/user_guides/visualization.md create mode 100755 mmdet3d/__init__.py create mode 100755 mmdet3d/apis/__init__.py create mode 100755 mmdet3d/apis/inference.py create mode 100755 mmdet3d/apis/inferencers/__init__.py create mode 100755 mmdet3d/apis/inferencers/base_3d_inferencer.py create mode 100755 mmdet3d/apis/inferencers/lidar_det3d_inferencer.py create mode 100755 mmdet3d/apis/inferencers/lidar_seg3d_inferencer.py create mode 100755 mmdet3d/apis/inferencers/mono_det3d_inferencer.py create mode 100755 mmdet3d/apis/inferencers/multi_modality_det3d_inferencer.py create mode 100755 mmdet3d/datasets/__init__.py create mode 100755 mmdet3d/datasets/convert_utils.py create mode 100755 mmdet3d/datasets/dataset_wrappers.py create mode 100755 mmdet3d/datasets/det3d_dataset.py create mode 100755 mmdet3d/datasets/kitti2d_dataset.py create mode 100755 mmdet3d/datasets/kitti_dataset.py create mode 100755 mmdet3d/datasets/lyft_dataset.py create mode 100755 mmdet3d/datasets/nuscenes_dataset.py create mode 100755 mmdet3d/datasets/s3dis_dataset.py create mode 100755 mmdet3d/datasets/scannet_dataset.py create mode 100755 mmdet3d/datasets/seg3d_dataset.py create mode 100755 mmdet3d/datasets/semantickitti_dataset.py create mode 100755 mmdet3d/datasets/sunrgbd_dataset.py create mode 100755 mmdet3d/datasets/transforms/__init__.py create mode 100755 mmdet3d/datasets/transforms/data_augment_utils.py create mode 100755 mmdet3d/datasets/transforms/dbsampler.py create mode 100755 mmdet3d/datasets/transforms/formating.py create mode 100755 mmdet3d/datasets/transforms/loading.py create mode 100755 mmdet3d/datasets/transforms/test_time_aug.py create mode 100755 mmdet3d/datasets/transforms/transforms_3d.py create mode 100755 mmdet3d/datasets/utils.py create mode 100755 mmdet3d/datasets/waymo_dataset.py create mode 100755 mmdet3d/engine/__init__.py create mode 100755 mmdet3d/engine/hooks/__init__.py create mode 100755 mmdet3d/engine/hooks/benchmark_hook.py create mode 100755 mmdet3d/engine/hooks/disable_object_sample_hook.py create mode 100755 mmdet3d/engine/hooks/visualization_hook.py create mode 100755 mmdet3d/evaluation/__init__.py create mode 100755 mmdet3d/evaluation/functional/__init__.py create mode 100755 mmdet3d/evaluation/functional/indoor_eval.py create mode 100755 mmdet3d/evaluation/functional/instance_seg_eval.py create mode 100755 mmdet3d/evaluation/functional/kitti_utils/__init__.py create mode 100755 mmdet3d/evaluation/functional/kitti_utils/eval.py create mode 100755 mmdet3d/evaluation/functional/kitti_utils/rotate_iou.py create mode 100755 mmdet3d/evaluation/functional/lyft_eval.py create mode 100755 mmdet3d/evaluation/functional/panoptic_seg_eval.py create mode 100755 mmdet3d/evaluation/functional/scannet_utils/__init__.py create mode 100755 mmdet3d/evaluation/functional/scannet_utils/evaluate_semantic_instance.py create mode 100755 mmdet3d/evaluation/functional/scannet_utils/util_3d.py create mode 100755 mmdet3d/evaluation/functional/seg_eval.py create mode 100755 mmdet3d/evaluation/functional/waymo_utils/__init__.py create mode 100755 mmdet3d/evaluation/functional/waymo_utils/prediction_to_waymo.py create mode 100755 mmdet3d/evaluation/metrics/__init__.py create mode 100755 mmdet3d/evaluation/metrics/indoor_metric.py create mode 100755 mmdet3d/evaluation/metrics/instance_seg_metric.py create mode 100755 mmdet3d/evaluation/metrics/kitti_metric.py create mode 100755 mmdet3d/evaluation/metrics/lyft_metric.py create mode 100755 mmdet3d/evaluation/metrics/nuscenes_metric.py create mode 100755 mmdet3d/evaluation/metrics/panoptic_seg_metric.py create mode 100755 mmdet3d/evaluation/metrics/seg_metric.py create mode 100755 mmdet3d/evaluation/metrics/waymo_metric.py create mode 100755 mmdet3d/models/__init__.py create mode 100755 mmdet3d/models/backbones/__init__.py create mode 100755 mmdet3d/models/backbones/base_pointnet.py create mode 100755 mmdet3d/models/backbones/cylinder3d.py create mode 100755 mmdet3d/models/backbones/dgcnn.py create mode 100755 mmdet3d/models/backbones/dla.py create mode 100755 mmdet3d/models/backbones/mink_resnet.py create mode 100755 mmdet3d/models/backbones/minkunet_backbone.py create mode 100755 mmdet3d/models/backbones/multi_backbone.py create mode 100755 mmdet3d/models/backbones/nostem_regnet.py create mode 100755 mmdet3d/models/backbones/pointnet2_sa_msg.py create mode 100755 mmdet3d/models/backbones/pointnet2_sa_ssg.py create mode 100755 mmdet3d/models/backbones/second.py create mode 100755 mmdet3d/models/backbones/spvcnn_backone.py create mode 100755 mmdet3d/models/data_preprocessors/__init__.py create mode 100755 mmdet3d/models/data_preprocessors/data_preprocessor.py create mode 100755 mmdet3d/models/data_preprocessors/utils.py create mode 100755 mmdet3d/models/data_preprocessors/voxelize.py create mode 100755 mmdet3d/models/decode_heads/__init__.py create mode 100755 mmdet3d/models/decode_heads/cylinder3d_head.py create mode 100755 mmdet3d/models/decode_heads/decode_head.py create mode 100755 mmdet3d/models/decode_heads/dgcnn_head.py create mode 100755 mmdet3d/models/decode_heads/minkunet_head.py create mode 100755 mmdet3d/models/decode_heads/paconv_head.py create mode 100755 mmdet3d/models/decode_heads/pointnet2_head.py create mode 100755 mmdet3d/models/dense_heads/__init__.py create mode 100755 mmdet3d/models/dense_heads/anchor3d_head.py create mode 100755 mmdet3d/models/dense_heads/anchor_free_mono3d_head.py create mode 100755 mmdet3d/models/dense_heads/base_3d_dense_head.py create mode 100755 mmdet3d/models/dense_heads/base_conv_bbox_head.py create mode 100755 mmdet3d/models/dense_heads/base_mono3d_dense_head.py create mode 100755 mmdet3d/models/dense_heads/centerpoint_head.py create mode 100755 mmdet3d/models/dense_heads/fcaf3d_head.py create mode 100755 mmdet3d/models/dense_heads/fcos_mono3d_head.py create mode 100755 mmdet3d/models/dense_heads/free_anchor3d_head.py create mode 100755 mmdet3d/models/dense_heads/groupfree3d_head.py create mode 100755 mmdet3d/models/dense_heads/imvoxel_head.py create mode 100755 mmdet3d/models/dense_heads/monoflex_head.py create mode 100755 mmdet3d/models/dense_heads/parta2_rpn_head.py create mode 100755 mmdet3d/models/dense_heads/pgd_head.py create mode 100755 mmdet3d/models/dense_heads/point_rpn_head.py create mode 100755 mmdet3d/models/dense_heads/shape_aware_head.py create mode 100755 mmdet3d/models/dense_heads/smoke_mono3d_head.py create mode 100755 mmdet3d/models/dense_heads/ssd_3d_head.py create mode 100755 mmdet3d/models/dense_heads/train_mixins.py create mode 100755 mmdet3d/models/dense_heads/vote_head.py create mode 100755 mmdet3d/models/detectors/__init__.py create mode 100755 mmdet3d/models/detectors/base.py create mode 100755 mmdet3d/models/detectors/centerpoint.py create mode 100755 mmdet3d/models/detectors/dfm.py create mode 100755 mmdet3d/models/detectors/dynamic_voxelnet.py create mode 100755 mmdet3d/models/detectors/fcos_mono3d.py create mode 100755 mmdet3d/models/detectors/groupfree3dnet.py create mode 100755 mmdet3d/models/detectors/h3dnet.py create mode 100755 mmdet3d/models/detectors/imvotenet.py create mode 100755 mmdet3d/models/detectors/imvoxelnet.py create mode 100755 mmdet3d/models/detectors/mink_single_stage.py create mode 100755 mmdet3d/models/detectors/multiview_dfm.py create mode 100755 mmdet3d/models/detectors/mvx_faster_rcnn.py create mode 100755 mmdet3d/models/detectors/mvx_two_stage.py create mode 100755 mmdet3d/models/detectors/parta2.py create mode 100755 mmdet3d/models/detectors/point_rcnn.py create mode 100755 mmdet3d/models/detectors/pv_rcnn.py create mode 100755 mmdet3d/models/detectors/sassd.py create mode 100755 mmdet3d/models/detectors/single_stage.py create mode 100755 mmdet3d/models/detectors/single_stage_mono3d.py create mode 100755 mmdet3d/models/detectors/smoke_mono3d.py create mode 100755 mmdet3d/models/detectors/ssd3dnet.py create mode 100755 mmdet3d/models/detectors/two_stage.py create mode 100755 mmdet3d/models/detectors/votenet.py create mode 100755 mmdet3d/models/detectors/voxelnet.py create mode 100644 mmdet3d/models/language_models/__init__.py create mode 100644 mmdet3d/models/language_models/bert.py create mode 100755 mmdet3d/models/layers/__init__.py create mode 100755 mmdet3d/models/layers/box3d_nms.py create mode 100755 mmdet3d/models/layers/dgcnn_modules/__init__.py create mode 100755 mmdet3d/models/layers/dgcnn_modules/dgcnn_fa_module.py create mode 100755 mmdet3d/models/layers/dgcnn_modules/dgcnn_fp_module.py create mode 100755 mmdet3d/models/layers/dgcnn_modules/dgcnn_gf_module.py create mode 100755 mmdet3d/models/layers/edge_fusion_module.py create mode 100755 mmdet3d/models/layers/fusion_layers/__init__.py create mode 100755 mmdet3d/models/layers/fusion_layers/coord_transform.py create mode 100755 mmdet3d/models/layers/fusion_layers/point_fusion.py create mode 100755 mmdet3d/models/layers/fusion_layers/vote_fusion.py create mode 100755 mmdet3d/models/layers/mlp.py create mode 100755 mmdet3d/models/layers/norm.py create mode 100755 mmdet3d/models/layers/paconv/__init__.py create mode 100755 mmdet3d/models/layers/paconv/paconv.py create mode 100755 mmdet3d/models/layers/paconv/utils.py create mode 100755 mmdet3d/models/layers/pointnet_modules/__init__.py create mode 100755 mmdet3d/models/layers/pointnet_modules/builder.py create mode 100755 mmdet3d/models/layers/pointnet_modules/paconv_sa_module.py create mode 100755 mmdet3d/models/layers/pointnet_modules/point_fp_module.py create mode 100755 mmdet3d/models/layers/pointnet_modules/point_sa_module.py create mode 100755 mmdet3d/models/layers/pointnet_modules/stack_point_sa_module.py create mode 100755 mmdet3d/models/layers/sparse_block.py create mode 100755 mmdet3d/models/layers/spconv/__init__.py create mode 100755 mmdet3d/models/layers/spconv/overwrite_spconv/__init__.py create mode 100755 mmdet3d/models/layers/spconv/overwrite_spconv/write_spconv2.py create mode 100755 mmdet3d/models/layers/torchsparse/__init__.py create mode 100755 mmdet3d/models/layers/torchsparse/torchsparse_wrapper.py create mode 100755 mmdet3d/models/layers/torchsparse_block.py create mode 100755 mmdet3d/models/layers/transformer.py create mode 100755 mmdet3d/models/layers/vote_module.py create mode 100755 mmdet3d/models/losses/__init__.py create mode 100755 mmdet3d/models/losses/axis_aligned_iou_loss.py create mode 100755 mmdet3d/models/losses/chamfer_distance.py create mode 100755 mmdet3d/models/losses/lovasz_loss.py create mode 100755 mmdet3d/models/losses/multibin_loss.py create mode 100755 mmdet3d/models/losses/paconv_regularization_loss.py create mode 100755 mmdet3d/models/losses/rotated_iou_loss.py create mode 100755 mmdet3d/models/losses/uncertain_smooth_l1_loss.py create mode 100755 mmdet3d/models/middle_encoders/__init__.py create mode 100755 mmdet3d/models/middle_encoders/pillar_scatter.py create mode 100755 mmdet3d/models/middle_encoders/sparse_encoder.py create mode 100755 mmdet3d/models/middle_encoders/sparse_unet.py create mode 100755 mmdet3d/models/middle_encoders/voxel_set_abstraction.py create mode 100755 mmdet3d/models/necks/__init__.py create mode 100755 mmdet3d/models/necks/dla_neck.py create mode 100755 mmdet3d/models/necks/imvoxel_neck.py create mode 100755 mmdet3d/models/necks/pointnet2_fp_neck.py create mode 100755 mmdet3d/models/necks/second_fpn.py create mode 100755 mmdet3d/models/roi_heads/__init__.py create mode 100755 mmdet3d/models/roi_heads/base_3droi_head.py create mode 100755 mmdet3d/models/roi_heads/bbox_heads/__init__.py create mode 100755 mmdet3d/models/roi_heads/bbox_heads/h3d_bbox_head.py create mode 100755 mmdet3d/models/roi_heads/bbox_heads/parta2_bbox_head.py create mode 100755 mmdet3d/models/roi_heads/bbox_heads/point_rcnn_bbox_head.py create mode 100755 mmdet3d/models/roi_heads/bbox_heads/pv_rcnn_bbox_head.py create mode 100755 mmdet3d/models/roi_heads/h3d_roi_head.py create mode 100755 mmdet3d/models/roi_heads/mask_heads/__init__.py create mode 100755 mmdet3d/models/roi_heads/mask_heads/foreground_segmentation_head.py create mode 100755 mmdet3d/models/roi_heads/mask_heads/pointwise_semantic_head.py create mode 100755 mmdet3d/models/roi_heads/mask_heads/primitive_head.py create mode 100755 mmdet3d/models/roi_heads/part_aggregation_roi_head.py create mode 100755 mmdet3d/models/roi_heads/point_rcnn_roi_head.py create mode 100755 mmdet3d/models/roi_heads/pv_rcnn_roi_head.py create mode 100755 mmdet3d/models/roi_heads/roi_extractors/__init__.py create mode 100755 mmdet3d/models/roi_heads/roi_extractors/batch_roigridpoint_extractor.py create mode 100755 mmdet3d/models/roi_heads/roi_extractors/single_roiaware_extractor.py create mode 100755 mmdet3d/models/roi_heads/roi_extractors/single_roipoint_extractor.py create mode 100755 mmdet3d/models/segmentors/__init__.py create mode 100755 mmdet3d/models/segmentors/base.py create mode 100755 mmdet3d/models/segmentors/cylinder3d.py create mode 100755 mmdet3d/models/segmentors/encoder_decoder.py create mode 100755 mmdet3d/models/segmentors/minkunet.py create mode 100755 mmdet3d/models/task_modules/__init__.py create mode 100755 mmdet3d/models/task_modules/anchor/__init__.py create mode 100755 mmdet3d/models/task_modules/anchor/anchor_3d_generator.py create mode 100755 mmdet3d/models/task_modules/anchor/builder.py create mode 100755 mmdet3d/models/task_modules/assigners/__init__.py create mode 100755 mmdet3d/models/task_modules/assigners/max_3d_iou_assigner.py create mode 100755 mmdet3d/models/task_modules/builder.py create mode 100755 mmdet3d/models/task_modules/coders/__init__.py create mode 100755 mmdet3d/models/task_modules/coders/anchor_free_bbox_coder.py create mode 100755 mmdet3d/models/task_modules/coders/centerpoint_bbox_coders.py create mode 100755 mmdet3d/models/task_modules/coders/delta_xyzwhlr_bbox_coder.py create mode 100755 mmdet3d/models/task_modules/coders/fcos3d_bbox_coder.py create mode 100755 mmdet3d/models/task_modules/coders/groupfree3d_bbox_coder.py create mode 100755 mmdet3d/models/task_modules/coders/monoflex_bbox_coder.py create mode 100755 mmdet3d/models/task_modules/coders/partial_bin_based_bbox_coder.py create mode 100755 mmdet3d/models/task_modules/coders/pgd_bbox_coder.py create mode 100755 mmdet3d/models/task_modules/coders/point_xyzwhlr_bbox_coder.py create mode 100755 mmdet3d/models/task_modules/coders/smoke_bbox_coder.py create mode 100755 mmdet3d/models/task_modules/samplers/__init__.py create mode 100755 mmdet3d/models/task_modules/samplers/iou_neg_piecewise_sampler.py create mode 100755 mmdet3d/models/task_modules/samplers/pseudosample.py create mode 100755 mmdet3d/models/task_modules/voxel/__init__.py create mode 100755 mmdet3d/models/task_modules/voxel/voxel_generator.py create mode 100755 mmdet3d/models/test_time_augs/__init__.py create mode 100755 mmdet3d/models/test_time_augs/merge_augs.py create mode 100755 mmdet3d/models/utils/__init__.py create mode 100755 mmdet3d/models/utils/add_prefix.py create mode 100755 mmdet3d/models/utils/clip_sigmoid.py create mode 100755 mmdet3d/models/utils/edge_indices.py create mode 100755 mmdet3d/models/utils/gaussian.py create mode 100755 mmdet3d/models/utils/gen_keypoints.py create mode 100755 mmdet3d/models/utils/handle_objs.py create mode 100755 mmdet3d/models/voxel_encoders/__init__.py create mode 100755 mmdet3d/models/voxel_encoders/pillar_encoder.py create mode 100755 mmdet3d/models/voxel_encoders/utils.py create mode 100755 mmdet3d/models/voxel_encoders/voxel_encoder.py create mode 100755 mmdet3d/registry.py create mode 100755 mmdet3d/structures/__init__.py create mode 100755 mmdet3d/structures/bbox_3d/__init__.py create mode 100755 mmdet3d/structures/bbox_3d/base_box3d.py create mode 100755 mmdet3d/structures/bbox_3d/box_3d_mode.py create mode 100755 mmdet3d/structures/bbox_3d/cam_box3d.py create mode 100755 mmdet3d/structures/bbox_3d/coord_3d_mode.py create mode 100755 mmdet3d/structures/bbox_3d/depth_box3d.py create mode 100755 mmdet3d/structures/bbox_3d/lidar_box3d.py create mode 100755 mmdet3d/structures/bbox_3d/utils.py create mode 100755 mmdet3d/structures/det3d_data_sample.py create mode 100755 mmdet3d/structures/ops/__init__.py create mode 100755 mmdet3d/structures/ops/box_np_ops.py create mode 100755 mmdet3d/structures/ops/iou3d_calculator.py create mode 100755 mmdet3d/structures/ops/transforms.py create mode 100755 mmdet3d/structures/point_data.py create mode 100755 mmdet3d/structures/points/__init__.py create mode 100755 mmdet3d/structures/points/base_points.py create mode 100755 mmdet3d/structures/points/cam_points.py create mode 100755 mmdet3d/structures/points/depth_points.py create mode 100755 mmdet3d/structures/points/lidar_points.py create mode 100755 mmdet3d/testing/__init__.py create mode 100755 mmdet3d/testing/data_utils.py create mode 100755 mmdet3d/testing/model_utils.py create mode 100755 mmdet3d/utils/__init__.py create mode 100755 mmdet3d/utils/array_converter.py create mode 100755 mmdet3d/utils/collect_env.py create mode 100755 mmdet3d/utils/compat_cfg.py create mode 100755 mmdet3d/utils/misc.py create mode 100755 mmdet3d/utils/setup_env.py create mode 100755 mmdet3d/utils/typing_utils.py create mode 100755 mmdet3d/version.py create mode 100755 mmdet3d/visualization/__init__.py create mode 100755 mmdet3d/visualization/local_visualizer.py create mode 100755 mmdet3d/visualization/vis_utils.py create mode 100755 model-index.yml create mode 100755 projects/BEVFusion/README.md create mode 100755 projects/BEVFusion/bevfusion/__init__.py create mode 100755 projects/BEVFusion/bevfusion/bevfusion.py create mode 100755 projects/BEVFusion/bevfusion/bevfusion_necks.py create mode 100755 projects/BEVFusion/bevfusion/depth_lss.py create mode 100755 projects/BEVFusion/bevfusion/loading.py create mode 100755 projects/BEVFusion/bevfusion/ops/__init__.py create mode 100755 projects/BEVFusion/bevfusion/ops/bev_pool/__init__.py create mode 100755 projects/BEVFusion/bevfusion/ops/bev_pool/bev_pool.py create mode 100755 projects/BEVFusion/bevfusion/ops/bev_pool/src/bev_pool.cpp create mode 100755 projects/BEVFusion/bevfusion/ops/bev_pool/src/bev_pool_cuda.cu create mode 100755 projects/BEVFusion/bevfusion/ops/voxel/__init__.py create mode 100755 projects/BEVFusion/bevfusion/ops/voxel/scatter_points.py create mode 100755 projects/BEVFusion/bevfusion/ops/voxel/src/scatter_points_cpu.cpp create mode 100755 projects/BEVFusion/bevfusion/ops/voxel/src/scatter_points_cuda.cu create mode 100755 projects/BEVFusion/bevfusion/ops/voxel/src/voxelization.cpp create mode 100755 projects/BEVFusion/bevfusion/ops/voxel/src/voxelization.h create mode 100755 projects/BEVFusion/bevfusion/ops/voxel/src/voxelization_cpu.cpp create mode 100755 projects/BEVFusion/bevfusion/ops/voxel/src/voxelization_cuda.cu create mode 100755 projects/BEVFusion/bevfusion/ops/voxel/voxelize.py create mode 100755 projects/BEVFusion/bevfusion/sparse_encoder.py create mode 100755 projects/BEVFusion/bevfusion/transformer.py create mode 100755 projects/BEVFusion/bevfusion/transforms_3d.py create mode 100755 projects/BEVFusion/bevfusion/transfusion_head.py create mode 100755 projects/BEVFusion/bevfusion/utils.py create mode 100755 projects/BEVFusion/configs/bevfusion_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py create mode 100755 projects/BEVFusion/setup.py create mode 100755 projects/CenterFormer/README.md create mode 100755 projects/CenterFormer/centerformer/__init__.py create mode 100755 projects/CenterFormer/centerformer/bbox_ops.py create mode 100755 projects/CenterFormer/centerformer/centerformer.py create mode 100755 projects/CenterFormer/centerformer/centerformer_backbone.py create mode 100755 projects/CenterFormer/centerformer/centerformer_head.py create mode 100755 projects/CenterFormer/centerformer/losses.py create mode 100755 projects/CenterFormer/centerformer/multi_scale_deform_attn.py create mode 100755 projects/CenterFormer/centerformer/transformer.py create mode 100755 projects/CenterFormer/configs/centerformer_voxel01_second-attn_secfpn-attn_4xb4-cyclic-20e_waymoD5-3d-3class.py create mode 100755 projects/DETR3D/README.md create mode 100755 projects/DETR3D/configs/detr3d_r101_gridmask.py create mode 100755 projects/DETR3D/configs/detr3d_r101_gridmask_cbgs.py create mode 100644 projects/DETR3D/configs/detr3d_r50_bert_gridmask_halfdata.py create mode 100644 projects/DETR3D/configs/detr3d_r50_gridmask_halfdata.py create mode 100755 projects/DETR3D/configs/detr3d_vovnet_gridmask_trainval_cbgs.py create mode 100755 projects/DETR3D/detr3d/__init__.py create mode 100644 projects/DETR3D/detr3d/base.py create mode 100644 projects/DETR3D/detr3d/bert.py create mode 100755 projects/DETR3D/detr3d/detr3d.py create mode 100755 projects/DETR3D/detr3d/detr3d_head.py create mode 100755 projects/DETR3D/detr3d/detr3d_transformer.py create mode 100644 projects/DETR3D/detr3d/glip.py create mode 100755 projects/DETR3D/detr3d/grid_mask.py create mode 100755 projects/DETR3D/detr3d/hungarian_assigner_3d.py create mode 100755 projects/DETR3D/detr3d/match_cost.py create mode 100755 projects/DETR3D/detr3d/nms_free_coder.py create mode 100644 projects/DETR3D/detr3d/single_stage.py create mode 100755 projects/DETR3D/detr3d/util.py create mode 100755 projects/DETR3D/detr3d/vovnet.py create mode 100755 projects/DETR3D/layers/transformer/__init__.py create mode 100755 projects/DETR3D/layers/transformer/conditional_detr_layers.py create mode 100755 projects/DETR3D/layers/transformer/dab_detr_layers.py create mode 100755 projects/DETR3D/layers/transformer/ddq_detr_layers.py create mode 100755 projects/DETR3D/layers/transformer/deformable_detr_layers.py create mode 100755 projects/DETR3D/layers/transformer/detr_layers.py create mode 100755 projects/DETR3D/layers/transformer/dino_layers.py create mode 100755 projects/DETR3D/layers/transformer/grounding_dino_layers.py create mode 100755 projects/DETR3D/layers/transformer/mask2former_layers.py create mode 100644 projects/DETR3D/layers/transformer/positional_encoding.py create mode 100755 projects/DETR3D/layers/transformer/utils.py create mode 100755 projects/DETR3D/old_detr3d_converter.py create mode 100755 projects/PETR/README.md create mode 100755 projects/PETR/configs/petr_vovnet_gridmask_p4_800x320.py create mode 100755 projects/PETR/petr/__init__.py create mode 100755 projects/PETR/petr/cp_fpn.py create mode 100755 projects/PETR/petr/grid_mask.py create mode 100755 projects/PETR/petr/hungarian_assigner_3d.py create mode 100755 projects/PETR/petr/match_cost.py create mode 100755 projects/PETR/petr/nms_free_coder.py create mode 100755 projects/PETR/petr/petr.py create mode 100755 projects/PETR/petr/petr_head.py create mode 100755 projects/PETR/petr/petr_transformer.py create mode 100755 projects/PETR/petr/positional_encoding.py create mode 100755 projects/PETR/petr/transforms_3d.py create mode 100755 projects/PETR/petr/utils.py create mode 100755 projects/PETR/petr/vovnetcp.py create mode 100755 projects/TR3D/README.md create mode 100755 projects/TR3D/configs/tr3d.py create mode 100755 projects/TR3D/configs/tr3d_1xb16_s3dis-3d-5class.py create mode 100755 projects/TR3D/configs/tr3d_1xb16_scannet-3d-18class.py create mode 100755 projects/TR3D/configs/tr3d_1xb16_sunrgbd-3d-10class.py create mode 100755 projects/TR3D/tr3d/__init__.py create mode 100755 projects/TR3D/tr3d/axis_aligned_iou_loss.py create mode 100755 projects/TR3D/tr3d/mink_resnet.py create mode 100755 projects/TR3D/tr3d/rotated_iou_loss.py create mode 100755 projects/TR3D/tr3d/tr3d_head.py create mode 100755 projects/TR3D/tr3d/tr3d_neck.py create mode 100755 projects/TR3D/tr3d/transforms_3d.py create mode 100755 projects/example_project/README.md create mode 100755 projects/example_project/configs/fcos3d_dummy-resnet-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py create mode 100755 projects/example_project/dummy/__init__.py create mode 100755 projects/example_project/dummy/dummy_resnet.py create mode 100755 requirements.txt create mode 100755 requirements/build.txt create mode 100755 requirements/docs.txt create mode 100755 requirements/mminstall.txt create mode 100755 requirements/optional.txt create mode 100755 requirements/readthedocs.txt create mode 100755 requirements/runtime.txt create mode 100755 requirements/tests.txt create mode 100755 resources/mmdet3d_outdoor_demo.gif create mode 100755 resources/nuimages_demo.gif create mode 100755 resources/open3d_visual.gif create mode 100755 setup.cfg create mode 100755 setup.py create mode 100755 tests/test_apis/test_inferencers/test_lidar_det3d_inferencer.py create mode 100755 tests/test_apis/test_inferencers/test_lidar_seg3d_inferencer.py create mode 100755 tests/test_apis/test_inferencers/test_mono_det3d_inferencer.py create mode 100755 tests/test_apis/test_inferencers/test_multi_modality_det3d_inferencer.py create mode 100755 tests/test_datasets/test_dataset_wrappers.py create mode 100755 tests/test_datasets/test_kitti_dataset.py create mode 100755 tests/test_datasets/test_lyft_dataset.py create mode 100755 tests/test_datasets/test_nuscenes_dataset.py create mode 100755 tests/test_datasets/test_s3dis_dataset.py create mode 100755 tests/test_datasets/test_scannet_dataset.py create mode 100755 tests/test_datasets/test_semantickitti_dataset.py create mode 100755 tests/test_datasets/test_sunrgbd_dataset.py create mode 100755 tests/test_datasets/test_transforms/test_formating.py create mode 100755 tests/test_datasets/test_transforms/test_loading.py create mode 100755 tests/test_datasets/test_transforms/test_transforms_3d.py create mode 100755 tests/test_datasets/test_transforms/utils.py create mode 100755 tests/test_engine/test_hooks/test_disable_object_sample_hook.py create mode 100755 tests/test_engine/test_hooks/test_visualization_hook.py create mode 100755 tests/test_evaluation/test_functional/test_instance_seg_eval.py create mode 100755 tests/test_evaluation/test_functional/test_kitti_eval.py create mode 100755 tests/test_evaluation/test_functional/test_panoptic_seg_eval.py create mode 100755 tests/test_evaluation/test_functional/test_seg_eval.py create mode 100755 tests/test_evaluation/test_metrics/test_indoor_metric.py create mode 100755 tests/test_evaluation/test_metrics/test_instance_seg_metric.py create mode 100755 tests/test_evaluation/test_metrics/test_kitti_metric.py create mode 100755 tests/test_evaluation/test_metrics/test_panoptic_seg_metric.py create mode 100755 tests/test_evaluation/test_metrics/test_seg_metric.py create mode 100755 tests/test_models/test_backbones/test_cylinder3d_backbone.py create mode 100755 tests/test_models/test_backbones/test_dgcnn.py create mode 100755 tests/test_models/test_backbones/test_dla.py create mode 100755 tests/test_models/test_backbones/test_mink_resnet.py create mode 100755 tests/test_models/test_backbones/test_minkunet_backbone.py create mode 100755 tests/test_models/test_backbones/test_multi_backbone.py create mode 100755 tests/test_models/test_backbones/test_pointnet2_sa_msg.py create mode 100755 tests/test_models/test_backbones/test_pointnet2_sa_ssg.py create mode 100755 tests/test_models/test_backbones/test_spvcnn_backbone.py create mode 100755 tests/test_models/test_data_preprocessors/test_data_preprocessor.py create mode 100755 tests/test_models/test_decode_heads/test_cylinder3d_head.py create mode 100755 tests/test_models/test_decode_heads/test_dgcnn_head.py create mode 100755 tests/test_models/test_decode_heads/test_minkunet_head.py create mode 100755 tests/test_models/test_decode_heads/test_paconv_head.py create mode 100755 tests/test_models/test_decode_heads/test_pointnet2_head.py create mode 100755 tests/test_models/test_dense_heads/test_anchor3d_head.py create mode 100755 tests/test_models/test_dense_heads/test_fcaf3d_head.py create mode 100755 tests/test_models/test_dense_heads/test_fcos_mono3d_head.py create mode 100755 tests/test_models/test_dense_heads/test_freeanchors.py create mode 100755 tests/test_models/test_dense_heads/test_imvoxel_head.py create mode 100755 tests/test_models/test_dense_heads/test_monoflex_head.py create mode 100755 tests/test_models/test_dense_heads/test_pgd_head.py create mode 100755 tests/test_models/test_dense_heads/test_smoke_mono3d_head.py create mode 100755 tests/test_models/test_dense_heads/test_ssn.py create mode 100755 tests/test_models/test_detectors/test_3dssd.py create mode 100755 tests/test_models/test_detectors/test_center_point.py create mode 100755 tests/test_models/test_detectors/test_fcaf3d.py create mode 100755 tests/test_models/test_detectors/test_groupfree3d.py create mode 100755 tests/test_models/test_detectors/test_h3dnet.py create mode 100755 tests/test_models/test_detectors/test_imvotenet.py create mode 100755 tests/test_models/test_detectors/test_imvoxelnet.py create mode 100755 tests/test_models/test_detectors/test_mvxnet.py create mode 100755 tests/test_models/test_detectors/test_parta2.py create mode 100755 tests/test_models/test_detectors/test_pointrcnn.py create mode 100755 tests/test_models/test_detectors/test_pvrcnn.py create mode 100755 tests/test_models/test_detectors/test_sassd.py create mode 100755 tests/test_models/test_detectors/test_votenet.py create mode 100755 tests/test_models/test_detectors/test_voxelnet.py create mode 100755 tests/test_models/test_layers/test_box3d_nms.py create mode 100755 tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_fa_module.py create mode 100755 tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_fp_module.py create mode 100755 tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_gf_module.py create mode 100755 tests/test_models/test_layers/test_fusion_layers/test_fusion_coord_trans.py create mode 100755 tests/test_models/test_layers/test_fusion_layers/test_point_fusion.py create mode 100755 tests/test_models/test_layers/test_fusion_layers/test_vote_fusion.py create mode 100755 tests/test_models/test_layers/test_paconv/test_paconv_modules.py create mode 100755 tests/test_models/test_layers/test_paconv/test_paconv_ops.py create mode 100755 tests/test_models/test_layers/test_pointnet_modules/test_point_fp_module.py create mode 100755 tests/test_models/test_layers/test_pointnet_modules/test_point_sa_module.py create mode 100755 tests/test_models/test_layers/test_spconv/test_spconv_module.py create mode 100755 tests/test_models/test_layers/test_torchsparse/test_torchsparse_module.py create mode 100755 tests/test_models/test_layers/test_vote_module.py create mode 100755 tests/test_models/test_losses/test_chamfer_disrance.py create mode 100755 tests/test_models/test_losses/test_multibin_loss.py create mode 100755 tests/test_models/test_losses/test_paconv_regularization_loss.py create mode 100755 tests/test_models/test_losses/test_rotated_iou_loss.py create mode 100755 tests/test_models/test_losses/test_uncertain_smooth_l1_loss.py create mode 100755 tests/test_models/test_middle_encoders/test_sparse_encoders.py create mode 100755 tests/test_models/test_middle_encoders/test_sparse_unet.py create mode 100755 tests/test_models/test_necks/test_dla_neck.py create mode 100755 tests/test_models/test_necks/test_imvoxel_neck.py create mode 100755 tests/test_models/test_necks/test_pointnet2_fp_neck.py create mode 100755 tests/test_models/test_necks/test_second_fpn.py create mode 100755 tests/test_models/test_segmentor/test_minkunet.py create mode 100755 tests/test_models/test_segmentors/test_cylinder3d.py create mode 100755 tests/test_models/test_task_modules/test_anchor/test_anchor_3d_generator.py create mode 100755 tests/test_models/test_task_modules/test_coders/test_anchor_free_box_coder.py create mode 100755 tests/test_models/test_task_modules/test_coders/test_centerpoint_bbox_coder.py create mode 100755 tests/test_models/test_task_modules/test_coders/test_fcos3d_bbox_coder.py create mode 100755 tests/test_models/test_task_modules/test_coders/test_monoflex_bbox_coder.py create mode 100755 tests/test_models/test_task_modules/test_coders/test_partial_bin_based_box_coder.py create mode 100755 tests/test_models/test_task_modules/test_coders/test_pgd_bbox_coder.py create mode 100755 tests/test_models/test_task_modules/test_coders/test_point_xyzwhlr_bbox_coder.py create mode 100755 tests/test_models/test_task_modules/test_coders/test_smoke_bbox_coder.py create mode 100755 tests/test_models/test_task_modules/test_samplers/test_iou_piecewise_sampler.py create mode 100755 tests/test_models/test_task_modules/test_voxel/test_voxel_generator.py create mode 100755 tests/test_models/test_utils/test_utils.py create mode 100755 tests/test_models/test_voxel_encoders/test_pillar_encoder.py create mode 100755 tests/test_models/test_voxel_encoders/test_voxel_encoders.py create mode 100755 tests/test_samples/parta2_roihead_inputs.npz create mode 100755 tests/test_structures/test_bbox/test_box3d.py create mode 100755 tests/test_structures/test_bbox/test_coord_3d_mode.py create mode 100755 tests/test_structures/test_det3d_data_sample.py create mode 100755 tests/test_structures/test_ops/test_box_np_ops.py create mode 100755 tests/test_structures/test_point_data.py create mode 100755 tests/test_structures/test_points/test_base_points.py create mode 100755 tests/test_structures/test_points/test_cam_points.py create mode 100755 tests/test_structures/test_points/test_depth_points.py create mode 100755 tests/test_utils/test_compat_cfg.py create mode 100755 tests/test_utils/test_setup_env.py create mode 100755 tools/analysis_tools/analyze_logs.py create mode 100755 tools/analysis_tools/benchmark.py create mode 100755 tools/analysis_tools/get_flops.py create mode 100755 tools/create_data.py create mode 100755 tools/create_data.sh create mode 100755 tools/dataset_converters/create_gt_database.py create mode 100755 tools/dataset_converters/indoor_converter.py create mode 100755 tools/dataset_converters/kitti_converter.py create mode 100755 tools/dataset_converters/kitti_data_utils.py create mode 100755 tools/dataset_converters/lyft_converter.py create mode 100755 tools/dataset_converters/lyft_data_fixer.py create mode 100755 tools/dataset_converters/nuimage_converter.py create mode 100755 tools/dataset_converters/nuscenes_converter.py create mode 100755 tools/dataset_converters/s3dis_data_utils.py create mode 100755 tools/dataset_converters/scannet_data_utils.py create mode 100755 tools/dataset_converters/semantickitti_converter.py create mode 100755 tools/dataset_converters/sunrgbd_data_utils.py create mode 100755 tools/dataset_converters/update_infos_to_v2.py create mode 100755 tools/dataset_converters/waymo_converter.py create mode 100755 tools/deployment/mmdet3d2torchserve.py create mode 100755 tools/deployment/mmdet3d_handler.py create mode 100755 tools/deployment/test_torchserver.py create mode 100755 tools/dist_test.sh create mode 100755 tools/dist_train.sh create mode 100755 tools/misc/browse_dataset.py create mode 100755 tools/misc/fuse_conv_bn.py create mode 100755 tools/misc/print_config.py create mode 100755 tools/misc/visualize_results.py create mode 100755 tools/model_converters/convert_h3dnet_checkpoints.py create mode 100755 tools/model_converters/convert_votenet_checkpoints.py create mode 100755 tools/model_converters/publish_model.py create mode 100755 tools/model_converters/regnet2mmdet.py create mode 100755 tools/slurm_test.sh create mode 100755 tools/slurm_train.sh create mode 100755 tools/test.py create mode 100755 tools/train.py create mode 100755 tools/update_data_coords.py create mode 100755 tools/update_data_coords.sh create mode 100644 visual.py create mode 100644 visual2.py diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100755 index 0000000..af5086c --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,35 @@ +version: 2.1 + +# this allows you to use CircleCI's dynamic configuration feature +setup: true + +# the path-filtering orb is required to continue a pipeline based on +# the path of an updated fileset +orbs: + path-filtering: circleci/path-filtering@0.1.2 + +workflows: + # the always-run workflow is always triggered, regardless of the pipeline parameters. + always-run: + jobs: + # the path-filtering/filter job determines which pipeline + # parameters to update. + - path-filtering/filter: + name: check-updated-files + # 3-column, whitespace-delimited mapping. One mapping per + # line: + # + mapping: | + mmdet3d/.* lint_only false + requirements/.* lint_only false + tests/.* lint_only false + tools/.* lint_only false + configs/.* lint_only false + .circleci/.* lint_only false + projects/.* lint_only false + base-revision: dev-1.x + # this is the path of the configuration we should trigger once + # path filtering and pipeline parameter value updates are + # complete. In this case, we are using the parent dynamic + # configuration itself. + config-path: .circleci/test.yml diff --git a/.circleci/docker/Dockerfile b/.circleci/docker/Dockerfile new file mode 100755 index 0000000..d9cf8cc --- /dev/null +++ b/.circleci/docker/Dockerfile @@ -0,0 +1,11 @@ +ARG PYTORCH="1.8.1" +ARG CUDA="10.2" +ARG CUDNN="7" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +# To fix GPG key error when running apt-get update +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + +RUN apt-get update && apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx diff --git a/.circleci/test.yml b/.circleci/test.yml new file mode 100755 index 0000000..0c9a6d8 --- /dev/null +++ b/.circleci/test.yml @@ -0,0 +1,186 @@ +version: 2.1 + +# the default pipeline parameters, which will be updated according to +# the results of the path-filtering orb +parameters: + lint_only: + type: boolean + default: true + +jobs: + lint: + docker: + - image: cimg/python:3.7.4 + steps: + - checkout + - run: + name: Install pre-commit hook + command: | + pip install pre-commit + pre-commit install + - run: + name: Linting + command: pre-commit run --all-files + - run: + name: Check docstring coverage + command: | + pip install interrogate + interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 90 mmdet3d + + build_cpu: + parameters: + # The python version must match available image tags in + # https://circleci.com/developer/images/image/cimg/python + python: + type: string + torch: + type: string + torchvision: + type: string + docker: + - image: cimg/python:<< parameters.python >> + resource_class: large + steps: + - checkout + - run: + name: Install Libraries + command: | + sudo apt-get update + sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5 + - run: + name: Configure Python & pip + command: | + pip install --upgrade pip + pip install wheel + - run: + name: Install PyTorch + command: | + python -V + python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html + - when: + condition: + equal: ["3.9.0", << parameters.python >>] + steps: + - run: pip install "protobuf <= 3.20.1" && sudo apt-get update && sudo apt-get -y install libprotobuf-dev protobuf-compiler cmake + - run: + name: Install mmdet3d dependencies + command: | + python -m pip install git+ssh://git@github.com/open-mmlab/mmengine.git@main + pip install -U openmim + mim install 'mmcv >= 2.0.0rc4' + pip install git+ssh://git@github.com/open-mmlab/mmdetection.git@dev-3.x + pip install -r requirements.txt + - run: + name: Build and install + command: | + pip install -e . + - run: + name: Run unittests + command: | + python -m coverage run --branch --source mmdet3d -m pytest tests/ + python -m coverage xml + python -m coverage report -m + + build_cuda: + parameters: + torch: + type: string + cuda: + type: enum + enum: ["10.1", "10.2", "11.1"] + cudnn: + type: integer + default: 7 + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + # docker_layer_caching: true + resource_class: gpu.nvidia.small + steps: + - checkout + - run: + # Cloning repos in VM since Docker doesn't have access to the private key + name: Clone Repos + command: | + git clone -b main --depth 1 ssh://git@github.com/open-mmlab/mmengine.git /home/circleci/mmengine + git clone -b dev-3.x --depth 1 ssh://git@github.com/open-mmlab/mmdetection.git /home/circleci/mmdetection + - run: + name: Build Docker image + command: | + docker build .circleci/docker -t mmdet3d:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >> + docker run --gpus all -t -d -v /home/circleci/project:/mmdetection3d -v /home/circleci/mmengine:/mmengine -v /home/circleci/mmdetection:/mmdetection -w /mmdetection3d --name mmdet3d mmdet3d:gpu + docker exec mmdet3d apt-get install -y git + - run: + name: Install mmdet3d dependencies + command: | + docker exec mmdet3d pip install -e /mmengine + docker exec mmdet3d pip install -U openmim + docker exec mmdet3d mim install 'mmcv >= 2.0.0rc4' + docker exec mmdet3d pip install -e /mmdetection + docker exec mmdet3d pip install -r requirements.txt + - run: + name: Build and install + command: | + docker exec mmdet3d pip install -e . + - run: + name: Run unittests + command: | + docker exec mmdet3d python -m pytest tests/ + +workflows: + pr_stage_lint: + when: << pipeline.parameters.lint_only >> + jobs: + - lint: + name: lint + filters: + branches: + ignore: + - dev-1.x + pr_stage_test: + when: + not: << pipeline.parameters.lint_only >> + jobs: + - lint: + name: lint + filters: + branches: + ignore: + - dev-1.x + - build_cpu: + name: minimum_version_cpu + torch: 1.6.0 + torchvision: 0.7.0 + python: 3.7.4 # The lowest python 3.7.x version available on CircleCI images + requires: + - lint + - build_cpu: + name: maximum_version_cpu + torch: 1.13.0 + torchvision: 0.14.0 + python: 3.9.0 + requires: + - minimum_version_cpu + - hold: + type: approval + requires: + - maximum_version_cpu + - build_cuda: + name: mainstream_version_gpu + torch: 1.8.1 + # Use double quotation mark to explicitly specify its type + # as string instead of number + cuda: "10.2" + requires: + - hold + merge_stage_test: + when: + not: << pipeline.parameters.lint_only >> + jobs: + - build_cuda: + name: minimum_version_gpu + torch: 1.6.0 + cuda: "10.1" + filters: + branches: + only: + - dev-1.x diff --git a/.dev_scripts/benchmark_full_models.txt b/.dev_scripts/benchmark_full_models.txt new file mode 100755 index 0000000..80b7e2a --- /dev/null +++ b/.dev_scripts/benchmark_full_models.txt @@ -0,0 +1,26 @@ +configs/3dssd/3dssd_4xb4_kitti-3d-car.py +configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py +configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py +configs/fcaf3d/fcaf3d_2xb8_s3dis-3d-5class.py +configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py +configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py +configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py +configs/h3dnet/h3dnet_8xb3_scannet-seg.py +configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py +configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py +configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py +configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py +configs/paconv/paconv_ssg_8xb8-cosine-150e_s3dis-seg.py +configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py +configs/pgd/pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py +configs/point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py +configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py +configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py +configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py +configs/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py +configs/regnet/pointpillars_hv_regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d.py +configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py +configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py +configs/smoke/smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py +configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py +configs/votenet/votenet_8xb8_scannet-3d.py diff --git a/.dev_scripts/benchmark_options.py b/.dev_scripts/benchmark_options.py new file mode 100755 index 0000000..a8cc338 --- /dev/null +++ b/.dev_scripts/benchmark_options.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +third_part_libs = [ + 'conda install openblas-devel -c anaconda', + "pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps --install-option='--blas_include_dirs=/opt/conda/include' --install-option='--blas=openblas'" # noqa +] +default_floating_range = 0.5 +model_floating_ranges = { + 'configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py': # noqa + 0.7 +} diff --git a/.dev_scripts/benchmark_train_models.txt b/.dev_scripts/benchmark_train_models.txt new file mode 100755 index 0000000..45e03c3 --- /dev/null +++ b/.dev_scripts/benchmark_train_models.txt @@ -0,0 +1,14 @@ +configs/3dssd/3dssd_4xb4_kitti-3d-car.py +configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py +configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py +configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py +configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py +configs/point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py +configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py +configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py +configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py +configs/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py +configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py +configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py +configs/smoke/smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py +configs/votenet/votenet_8xb8_scannet-3d.py diff --git a/.dev_scripts/covignore.cfg b/.dev_scripts/covignore.cfg new file mode 100755 index 0000000..64e01e9 --- /dev/null +++ b/.dev_scripts/covignore.cfg @@ -0,0 +1,6 @@ +# Each line should be the relative path to the root directory +# of this repo. Support regular expression as well. +# For example: +# .*/utils.py + +.*/__init__.py diff --git a/.dev_scripts/diff_coverage_test.sh b/.dev_scripts/diff_coverage_test.sh new file mode 100755 index 0000000..b87690b --- /dev/null +++ b/.dev_scripts/diff_coverage_test.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +readarray -t IGNORED_FILES < $( dirname "$0" )/covignore.cfg + + +REUSE_COVERAGE_REPORT=${REUSE_COVERAGE_REPORT:-0} +REPO=${1:-"origin"} +BRANCH=${2:-"refactor_dev"} + +git fetch $REPO $BRANCH + +PY_FILES="" +for FILE_NAME in $(git diff --name-only ${REPO}/${BRANCH}); do + # Only test python files in mmdet3d/ existing in current branch, and not ignored in covignore.cfg + if [ ${FILE_NAME: -3} == ".py" ] && [ ${FILE_NAME:0:8} == "mmdet3d/" ] && [ -f "$FILE_NAME" ]; then + IGNORED=false + for IGNORED_FILE_NAME in "${IGNORED_FILES[@]}"; do + # Skip blank lines + if [ -z "$IGNORED_FILE_NAME" ]; then + continue + fi + if [ "${IGNORED_FILE_NAME::1}" != "#" ] && [[ "$FILE_NAME" =~ $IGNORED_FILE_NAME ]]; then + echo "Ignoring $FILE_NAME" + IGNORED=true + break + fi + done + if [ "$IGNORED" = false ]; then + PY_FILES="$PY_FILES $FILE_NAME" + fi + fi +done + +# Only test the coverage when PY_FILES are not empty, otherwise they will test the entire project +if [ ! -z "${PY_FILES}" ] +then + if [ "$REUSE_COVERAGE_REPORT" == "0" ]; then + coverage run --branch --source mmocr -m pytest tests/ + fi + coverage report --fail-under 80 -m $PY_FILES + interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 95 $PY_FILES +fi diff --git a/.dev_scripts/gather_models.py b/.dev_scripts/gather_models.py new file mode 100755 index 0000000..768a86c --- /dev/null +++ b/.dev_scripts/gather_models.py @@ -0,0 +1,229 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Script to gather benchmarked models and prepare them for upload. + +Usage: +python gather_models.py ${root_path} ${out_dir} + +Example: +python gather_models.py \ +work_dirs/pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d \ +work_dirs/pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d + +Note that before running the above command, rename the directory with the +config name if you did not use the default directory name, create +a corresponding directory 'pgd' under the above path and put the used config +into it. +""" + +import argparse +import glob +import json +import shutil +import subprocess +from os import path as osp + +import mmengine +import torch + +# build schedule look-up table to automatically find the final model +SCHEDULES_LUT = { + '_1x_': 12, + '_2x_': 24, + '_20e_': 20, + '_3x_': 36, + '_4x_': 48, + '_24e_': 24, + '_6x_': 73, + '_50e_': 50, + '_80e_': 80, + '_100e_': 100, + '_150e_': 150, + '_200e_': 200, + '_250e_': 250, + '_400e_': 400 +} + +# TODO: add support for lyft dataset +RESULTS_LUT = { + 'coco': ['bbox_mAP', 'segm_mAP'], + 'nus': ['pts_bbox_NuScenes/NDS', 'NDS'], + 'kitti-3d-3class': ['KITTI/Overall_3D_moderate', 'Overall_3D_moderate'], + 'kitti-3d-car': ['KITTI/Car_3D_moderate_strict', 'Car_3D_moderate_strict'], + 'lyft': ['score'], + 'scannet_seg': ['miou'], + 's3dis_seg': ['miou'], + 'scannet': ['mAP_0.50'], + 'sunrgbd': ['mAP_0.50'], + 'kitti-mono3d': [ + 'img_bbox/KITTI/Car_3D_AP40_moderate_strict', + 'Car_3D_AP40_moderate_strict' + ], + 'nus-mono3d': ['img_bbox_NuScenes/NDS', 'NDS'] +} + + +def get_model_dataset(log_json_path): + for key in RESULTS_LUT: + if log_json_path.find(key) != -1: + return key + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) + subprocess.Popen(['mv', out_file, final_file]) + return final_file + + +def get_final_epoch(config): + if config.find('grid_rcnn') != -1 and config.find('2x') != -1: + # grid_rcnn 2x trains 25 epochs + return 25 + + for schedule_name, epoch_num in SCHEDULES_LUT.items(): + if config.find(schedule_name) != -1: + return epoch_num + + +def get_best_results(log_json_path): + dataset = get_model_dataset(log_json_path) + max_dict = dict() + max_memory = 0 + with open(log_json_path, 'r') as f: + for line in f.readlines(): + log_line = json.loads(line) + if 'mode' not in log_line.keys(): + continue + + # record memory and find best results & epochs + if log_line['mode'] == 'train' \ + and max_memory <= log_line['memory']: + max_memory = log_line['memory'] + + elif log_line['mode'] == 'val': + result_dict = { + key: log_line[key] + for key in RESULTS_LUT[dataset] if key in log_line + } + if len(max_dict) == 0: + max_dict = result_dict + max_dict['epoch'] = log_line['epoch'] + elif all( + [max_dict[key] <= result_dict[key] + for key in result_dict]): + max_dict.update(result_dict) + max_dict['epoch'] = log_line['epoch'] + + max_dict['memory'] = max_memory + return max_dict + + +def parse_args(): + parser = argparse.ArgumentParser(description='Gather benchmarked models') + parser.add_argument( + 'root', + type=str, + help='root path of benchmarked models to be gathered') + parser.add_argument( + 'out', type=str, help='output path of gathered models to be stored') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + models_root = args.root + models_out = args.out + mmengine.mkdir_or_exist(models_out) + + # find all models in the root directory to be gathered + raw_configs = list(mmengine.scandir('./configs', '.py', recursive=True)) + + # filter configs that is not trained in the experiments dir + used_configs = [] + for raw_config in raw_configs: + if osp.exists(osp.join(models_root, raw_config)): + used_configs.append(raw_config) + print(f'Find {len(used_configs)} models to be gathered') + + # find final_ckpt and log file for trained each config + # and parse the best performance + model_infos = [] + for used_config in used_configs: + # get logs + log_json_path = glob.glob(osp.join(models_root, '*.log.json'))[0] + log_txt_path = glob.glob(osp.join(models_root, '*.log'))[0] + model_performance = get_best_results(log_json_path) + final_epoch = model_performance['epoch'] + final_model = 'epoch_{}.pth'.format(final_epoch) + model_path = osp.join(models_root, final_model) + + # skip if the model is still training + if not osp.exists(model_path): + print(f'Expected {model_path} does not exist!') + continue + + if model_performance is None: + print(f'Obtained no performance for model {used_config}') + continue + + model_time = osp.split(log_txt_path)[-1].split('.')[0] + model_infos.append( + dict( + config=used_config, + results=model_performance, + epochs=final_epoch, + model_time=model_time, + log_json_path=osp.split(log_json_path)[-1])) + + # publish model for each checkpoint + publish_model_infos = [] + for model in model_infos: + model_publish_dir = osp.join(models_out, model['config'].rstrip('.py')) + mmengine.mkdir_or_exist(model_publish_dir) + + model_name = model['config'].split('/')[-1].rstrip( + '.py') + '_' + model['model_time'] + publish_model_path = osp.join(model_publish_dir, model_name) + trained_model_path = osp.join(models_root, + 'epoch_{}.pth'.format(model['epochs'])) + + # convert model + final_model_path = process_checkpoint(trained_model_path, + publish_model_path) + + # copy log + shutil.copy( + osp.join(models_root, model['log_json_path']), + osp.join(model_publish_dir, f'{model_name}.log.json')) + shutil.copy( + osp.join(models_root, model['log_json_path'].rstrip('.json')), + osp.join(model_publish_dir, f'{model_name}.log')) + + # copy config to guarantee reproducibility + config_path = model['config'] + config_path = osp.join( + 'configs', + config_path) if 'configs' not in config_path else config_path + target_cconfig_path = osp.split(config_path)[-1] + shutil.copy(config_path, + osp.join(model_publish_dir, target_cconfig_path)) + + model['model_path'] = final_model_path + publish_model_infos.append(model) + + models = dict(models=publish_model_infos) + print(f'Totally gathered {len(publish_model_infos)} models') + mmengine.dump(models, osp.join(models_out, 'model_info.json')) + + +if __name__ == '__main__': + main() diff --git a/.dev_scripts/gen_benchmark_script.py b/.dev_scripts/gen_benchmark_script.py new file mode 100755 index 0000000..3ae1128 --- /dev/null +++ b/.dev_scripts/gen_benchmark_script.py @@ -0,0 +1,193 @@ +import argparse +import re +from os import path as osp + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Generate benchmark training/testing scripts') + parser.add_argument( + '--input_file', + required=False, + type=str, + help='Input file containing the paths ' + 'of configs to be trained/tested.') + parser.add_argument( + '--output_file', + required=True, + type=str, + help='Output file containing the ' + 'commands to train/test selected models.') + parser.add_argument( + '--gpus_per_node', + type=int, + default=8, + help='GPUs per node config for slurm, ' + 'should be set according to your slurm environment') + parser.add_argument( + '--cpus_per_task', + type=int, + default=5, + help='CPUs per task config for slurm, ' + 'should be set according to your slurm environment') + parser.add_argument( + '--gpus', + type=int, + default=8, + help='Totally used num of GPUs config for slurm (in testing), ' + 'should be set according to your slurm environment') + parser.add_argument( + '--mode', type=str, default='train', help='Train or test') + parser.add_argument( + '--long_work_dir', + action='store_true', + help='Whether use full relative path of config as work dir') + parser.add_argument( + '--max_keep_ckpts', + type=int, + default=1, + help='The max number of checkpoints saved in training') + parser.add_argument( + '--full_log', + action='store_true', + help='Whether save full log in a file') + + args = parser.parse_args() + return args + + +args = parse_args() +assert args.mode in ['train', 'test'], 'Currently we only support ' \ + 'automatically generating training or testing scripts.' + +config_paths = [] + +if args.input_file is not None: + with open(args.input_file, 'r') as fi: + config_paths = fi.read().strip().split('\n') +else: + while True: + print('Please type a config path and ' + 'press enter (press enter directly to exit):') + config_path = input() + if config_path != '': + config_paths.append(config_path) + else: + break + +script = '''PARTITION=$1 +CHECKPOINT_DIR=$2 + +''' + +if args.mode == 'train': + for i, config_path in enumerate(config_paths): + root_dir = osp.dirname(osp.dirname(osp.abspath(__file__))) + if not osp.exists(osp.join(root_dir, config_path)): + print(f'Invalid config path (does not exist):\n{config_path}') + continue + + config_name = config_path.split('/')[-1][:-3] + match_obj = re.match(r'^.*_[0-9]+x([0-9]+)_.*$', config_name) + if match_obj is None: + print(f'Invalid config path (no GPU num in ' + f'config name):\n{config_path}') + continue + + gpu_num = int(match_obj.group(1)) + work_dir_name = config_path if args.long_work_dir else config_name + + script += f"echo '{config_path}' &\n" + if args.full_log: + script += f'mkdir -p $CHECKPOINT_DIR/{work_dir_name}\n' + + # training commands + script += f'GPUS={gpu_num} GPUS_PER_NODE={args.gpus_per_node} ' \ + f'CPUS_PER_TASK={args.cpus_per_task} ' \ + f'./tools/slurm_train.sh $PARTITION {config_name} ' \ + f'{config_path} \\\n' + script += f'$CHECKPOINT_DIR/{work_dir_name} --cfg-options ' \ + f'checkpoint_config.max_keep_ckpts=' \ + f'{args.max_keep_ckpts} \\\n' \ + + # if output full log, redirect stdout and stderr to + # another log file in work dir + if args.full_log: + script += f'2>&1|tee $CHECKPOINT_DIR/{work_dir_name}' \ + f'/FULL_LOG.txt &\n' + else: + script += '>/dev/null &\n' + + if i != len(config_paths) - 1: + script += '\n' + + print(f'Successfully generated script for {config_name}') + + with open(args.output_file, 'w') as fo: + fo.write(script) + +elif args.mode == 'test': + for i, config_path in enumerate(config_paths): + root_dir = osp.dirname(osp.dirname(osp.abspath(__file__))) + if not osp.exists(osp.join(root_dir, config_path)): + print(f'Invalid config path (does not exist):\n{config_path}') + continue + + config_name = config_path.split('/')[-1][:-3] + + tasks = { + 'scannet_seg', 'scannet', 's3dis_seg', 'sunrgbd', 'kitti', 'nus', + 'lyft', 'waymo' + } + eval_option = None + for task in tasks: + if task in config_name: + eval_option = task + break + if eval_option is None: + print(f'Invalid config path (invalid task):\n{config_path}') + continue + + work_dir_name = config_path if args.long_work_dir else config_name + + script += f"echo '{config_path}' &\n" + if args.full_log: + script += f'mkdir -p $CHECKPOINT_DIR/{work_dir_name}\n' + + # training commands + script += f'GPUS={args.gpus} GPUS_PER_NODE={args.gpus_per_node} ' \ + f'CPUS_PER_TASK={args.cpus_per_task} ' \ + f'./tools/slurm_test.sh $PARTITION {config_name} ' \ + f'{config_path} \\\n' + script += f'$CHECKPOINT_DIR/{work_dir_name}/latest.pth ' \ + + if eval_option in ['scannet_seg', 's3dis_seg']: + script += '--eval mIoU \\\n' + elif eval_option in ['scannet', 'sunrgbd', 'kitti', 'nus']: + script += '--eval map \\\n' + elif eval_option in ['lyft']: + script += f'--format-only --eval-options jsonfile_prefix=' \ + f'$CHECKPOINT_DIR/{work_dir_name}/results_challenge ' \ + f'csv_savepath=$CHECKPOINT_DIR/{work_dir_name}/' \ + f'results_challenge.csv \\\n' + elif eval_option in ['waymo']: + script += f'--eval waymo --eval-options pklfile_prefix=' \ + f'$CHECKPOINT_DIR/{work_dir_name}/kitti_results ' \ + f'submission_prefix=$CHECKPOINT_DIR/{work_dir_name}/' \ + f'kitti_results \\\n' + + # if output full log, redirect stdout and stderr to + # another log file in work dir + if args.full_log: + script += f'2>&1|tee $CHECKPOINT_DIR/{work_dir_name}' \ + f'/FULL_LOG.txt &\n' + else: + script += '>/dev/null &\n' + + if i != len(config_paths) - 1: + script += '\n' + + print(f'Successfully generated script for {config_name}') + + with open(args.output_file, 'w') as fo: + fo.write(script) diff --git a/.dev_scripts/linter.sh b/.dev_scripts/linter.sh new file mode 100755 index 0000000..64161ca --- /dev/null +++ b/.dev_scripts/linter.sh @@ -0,0 +1,3 @@ +yapf -r -i mmdet3d/ configs/ tests/ tools/ +isort mmdet3d/ configs/ tests/ tools/ +flake8 . diff --git a/.dev_scripts/test_benchmark.sh b/.dev_scripts/test_benchmark.sh new file mode 100755 index 0000000..d95a66c --- /dev/null +++ b/.dev_scripts/test_benchmark.sh @@ -0,0 +1,128 @@ +PARTITION=$1 +CHECKPOINT_DIR=$2 + +echo 'configs/3dssd/3dssd_4xb4_kitti-3d-car.py' & +mkdir -p $CHECKPOINT_DIR/configs/3dssd/3dssd_4xb4_kitti-3d-car.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION 3dssd_4x4_kitti-3d-car configs/3dssd/3dssd_4xb4_kitti-3d-car.py \ +$CHECKPOINT_DIR/configs/3dssd/3dssd_4xb4_kitti-3d-car.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/3dssd/3dssd_4xb4_kitti-3d-car.py/FULL_LOG.txt & + +echo 'configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION centerpoint_02pillar_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py \ +$CHECKPOINT_DIR/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py/FULL_LOG.txt & + +echo 'configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py \ +$CHECKPOINT_DIR/configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py/FULL_LOG.txt & + +echo 'configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/fcos3d/fcos3d_r101-caffe-fpn-head-gn-dcn_8xb2-1x_nus-mono3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d configs/fcos3d/fcos3d_r101-caffe-fpn-head-gn-dcn_8xb2-1x_nus-mono3d.py \ +$CHECKPOINT_DIR/configs/fcos3d/fcos3d_r101-caffe-fpn-head-gn-dcn_8xb2-1x_nus-mono3d.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/fcos3d/fcos3d_r101-caffe-fpn-head-gn-dcn_8xb2-1x_nus-mono3d.py/FULL_LOG.txt & + +echo 'configs/second/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/second/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class configs/second/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py \ +$CHECKPOINT_DIR/configs/second/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/second/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py/FULL_LOG.txt & + +echo 'configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py \ +$CHECKPOINT_DIR/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py/FULL_LOG.txt & + +echo 'configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py' & +mkdir -p $CHECKPOINT_DIR/configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION groupfree3d_8x4_scannet-3d-18class-L6-O256 configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py \ +$CHECKPOINT_DIR/configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py/FULL_LOG.txt & + +echo 'configs/h3dnet/h3dnet_8xb3_scannet-seg.py' & +mkdir -p $CHECKPOINT_DIR/configs/h3dnet/h3dnet_8xb3_scannet-seg.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION h3dnet_3x8_scannet-3d-18class configs/h3dnet/h3dnet_8xb3_scannet-seg.py \ +$CHECKPOINT_DIR/configs/h3dnet/h3dnet_8xb3_scannet-seg.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/h3dnet/h3dnet_8xb3_scannet-seg.py/FULL_LOG.txt & + +echo 'configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py \ +$CHECKPOINT_DIR/configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py/FULL_LOG.txt & + +echo 'configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION imvotenet_stage2_16x8_sunrgbd-3d-10class configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py \ +$CHECKPOINT_DIR/configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py/FULL_LOG.txt & + +echo 'configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py' & +mkdir -p $CHECKPOINT_DIR/configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION imvoxelnet_4x8_kitti-3d-car configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py \ +$CHECKPOINT_DIR/configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py/FULL_LOG.txt & + +echo 'configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py \ +$CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py/FULL_LOG.txt & + +echo 'configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py \ +$CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py/FULL_LOG.txt & + +echo 'configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py' & +mkdir -p $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py \ +$CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py/latest.pth --eval mIoU \ +2>&1|tee $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py/FULL_LOG.txt & + +echo 'configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py' & +mkdir -p $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py \ +$CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py/FULL_LOG.txt & + +echo 'configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py \ +$CHECKPOINT_DIR/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py/latest.pth --format-only --eval-options jsonfile_prefix=$CHECKPOINT_DIR/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py/results_challenge csv_savepath=$CHECKPOINT_DIR/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py/results_challenge.csv \ +2>&1|tee $CHECKPOINT_DIR/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py/FULL_LOG.txt & + +echo 'configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py \ +$CHECKPOINT_DIR/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py/latest.pth --eval waymo --eval-options pklfile_prefix=$CHECKPOINT_DIR/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py/kitti_results submission_prefix=$CHECKPOINT_DIR/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py/kitti_results \ +2>&1|tee $CHECKPOINT_DIR/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py/FULL_LOG.txt & + +echo 'configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py \ +$CHECKPOINT_DIR/configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py/FULL_LOG.txt & + +echo 'configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION hv_second_secfpn_6x8_80e_kitti-3d-3class configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py \ +$CHECKPOINT_DIR/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py/FULL_LOG.txt & + +echo 'configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py \ +$CHECKPOINT_DIR/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py/latest.pth --format-only --eval-options jsonfile_prefix=$CHECKPOINT_DIR/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py/results_challenge csv_savepath=$CHECKPOINT_DIR/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py/results_challenge.csv \ +2>&1|tee $CHECKPOINT_DIR/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py/FULL_LOG.txt & + +echo 'configs/votenet/votenet_8xb8_scannet-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/votenet/votenet_8xb8_scannet-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_test.sh $PARTITION votenet_8x8_scannet-3d-18class configs/votenet/votenet_8xb8_scannet-3d.py \ +$CHECKPOINT_DIR/configs/votenet/votenet_8xb8_scannet-3d.py/latest.pth --eval map \ +2>&1|tee $CHECKPOINT_DIR/configs/votenet/votenet_8xb8_scannet-3d.py/FULL_LOG.txt & diff --git a/.dev_scripts/train_benchmark.sh b/.dev_scripts/train_benchmark.sh new file mode 100755 index 0000000..9efa320 --- /dev/null +++ b/.dev_scripts/train_benchmark.sh @@ -0,0 +1,128 @@ +PARTITION=$1 +CHECKPOINT_DIR=$2 + +echo 'configs/3dssd/3dssd_4xb4_kitti-3d-car.py' & +mkdir -p $CHECKPOINT_DIR/configs/3dssd/3dssd_4xb4_kitti-3d-car.py +GPUS=4 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION 3dssd_4x4_kitti-3d-car configs/3dssd/3dssd_4xb4_kitti-3d-car.py \ +$CHECKPOINT_DIR/configs/3dssd/3dssd_4xb4_kitti-3d-car.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/3dssd/3dssd_4xb4_kitti-3d-car.py/FULL_LOG.txt & + +echo 'configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION centerpoint_02pillar_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py \ +$CHECKPOINT_DIR/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py/FULL_LOG.txt & + +echo 'configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py \ +$CHECKPOINT_DIR/configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py/FULL_LOG.txt & + +echo 'configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/fcos3d/fcos3d_r101-caffe-fpn-head-gn-dcn_8xb2-1x_nus-mono3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d configs/fcos3d/fcos3d_r101-caffe-fpn-head-gn-dcn_8xb2-1x_nus-mono3d.py \ +$CHECKPOINT_DIR/configs/fcos3d/fcos3d_r101-caffe-fpn-head-gn-dcn_8xb2-1x_nus-mono3d.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/fcos3d/fcos3d_r101-caffe-fpn-head-gn-dcn_8xb2-1x_nus-mono3d.py/FULL_LOG.txt & + +echo 'configs/second/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/second/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class configs/second/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py \ +$CHECKPOINT_DIR/configs/second/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/second/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py/FULL_LOG.txt & + +echo 'configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py \ +$CHECKPOINT_DIR/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py/FULL_LOG.txt & + +echo 'configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py' & +mkdir -p $CHECKPOINT_DIR/configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py +GPUS=4 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION groupfree3d_8x4_scannet-3d-18class-L6-O256 configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py \ +$CHECKPOINT_DIR/configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py/FULL_LOG.txt & + +echo 'configs/h3dnet/h3dnet_8xb3_scannet-seg.py' & +mkdir -p $CHECKPOINT_DIR/configs/h3dnet/h3dnet_8xb3_scannet-seg.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION h3dnet_3x8_scannet-3d-18class configs/h3dnet/h3dnet_8xb3_scannet-seg.py \ +$CHECKPOINT_DIR/configs/h3dnet/h3dnet_8xb3_scannet-seg.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/h3dnet/h3dnet_8xb3_scannet-seg.py/FULL_LOG.txt & + +echo 'configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py +GPUS=4 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py \ +$CHECKPOINT_DIR/configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py/FULL_LOG.txt & + +echo 'configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION imvotenet_stage2_16x8_sunrgbd-3d-10class configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py \ +$CHECKPOINT_DIR/configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py/FULL_LOG.txt & + +echo 'configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py' & +mkdir -p $CHECKPOINT_DIR/configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION imvoxelnet_4x8_kitti-3d-car configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py \ +$CHECKPOINT_DIR/configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py/FULL_LOG.txt & + +echo 'configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py \ +$CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py/FULL_LOG.txt & + +echo 'configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py \ +$CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py/FULL_LOG.txt & + +echo 'configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py' & +mkdir -p $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py +GPUS=2 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py \ +$CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py/FULL_LOG.txt & + +echo 'configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py' & +mkdir -p $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py +GPUS=2 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py \ +$CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py/FULL_LOG.txt & + +echo 'configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py \ +$CHECKPOINT_DIR/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py/FULL_LOG.txt & + +echo 'configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py +GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py \ +$CHECKPOINT_DIR/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py/FULL_LOG.txt & + +echo 'configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py \ +$CHECKPOINT_DIR/configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py/FULL_LOG.txt & + +echo 'configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py' & +mkdir -p $CHECKPOINT_DIR/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION hv_second_secfpn_6x8_80e_kitti-3d-3class configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py \ +$CHECKPOINT_DIR/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py/FULL_LOG.txt & + +echo 'configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py +GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py \ +$CHECKPOINT_DIR/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py/FULL_LOG.txt & + +echo 'configs/votenet/votenet_8xb8_scannet-3d.py' & +mkdir -p $CHECKPOINT_DIR/configs/votenet/votenet_8xb8_scannet-3d.py +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=5 ./tools/slurm_train.sh $PARTITION votenet_8x8_scannet-3d-18class configs/votenet/votenet_8xb8_scannet-3d.py \ +$CHECKPOINT_DIR/configs/votenet/votenet_8xb8_scannet-3d.py --cfg-options checkpoint_config.max_keep_ckpts=1 \ +2>&1|tee $CHECKPOINT_DIR/configs/votenet/votenet_8xb8_scannet-3d.py/FULL_LOG.txt & diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100755 index 0000000..92afad1 --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at chenkaidev@gmail.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq + +[homepage]: https://www.contributor-covenant.org diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100755 index 0000000..9b01502 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1 @@ +We appreciate all contributions to improve MMDetection3D. Please refer to [CONTRIBUTING.md](https://github.com/open-mmlab/mmcv/blob/master/CONTRIBUTING.md) in MMCV for more details about the contributing guideline. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100755 index 0000000..3ba13e0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/.github/ISSUE_TEMPLATE/error-report.md b/.github/ISSUE_TEMPLATE/error-report.md new file mode 100755 index 0000000..ee65286 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/error-report.md @@ -0,0 +1,45 @@ +--- +name: Error report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' +--- + +Thanks for your error report and we appreciate it a lot. + +**Checklist** + +1. I have searched related issues but cannot get the expected help. +2. The bug has not been fixed in the latest version. + +**Describe the bug** +A clear and concise description of what the bug is. + +**Reproduction** + +1. What command or script did you run? + +``` +A placeholder for the command. +``` + +2. Did you make any modifications on the code or config? Did you understand what you have modified? +3. What dataset did you use? + +**Environment** + +1. Please run `python mmdet3d/utils/collect_env.py` to collect necessary environment information and paste it here. +2. You may add addition that may be helpful for locating the problem, such as + - How you installed PyTorch \[e.g., pip, conda, source\] + - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) + +**Error traceback** +If applicable, paste the error trackback here. + +``` +A placeholder for trackback. +``` + +**Bug fix** +If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100755 index 0000000..7bf92e8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,21 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' +--- + +**Describe the feature** + +**Motivation** +A clear and concise description of the motivation of the feature. +Ex1. It is inconvenient when \[....\]. +Ex2. There is a recent paper \[....\], which is very helpful for \[....\]. + +**Related resources** +If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful. + +**Additional context** +Add any other context or screenshots about the feature request here. +If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated. diff --git a/.github/ISSUE_TEMPLATE/general_questions.md b/.github/ISSUE_TEMPLATE/general_questions.md new file mode 100755 index 0000000..f02dd63 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/general_questions.md @@ -0,0 +1,7 @@ +--- +name: General questions +about: Ask general questions to get help +title: '' +labels: '' +assignees: '' +--- diff --git a/.github/ISSUE_TEMPLATE/reimplementation_questions.md b/.github/ISSUE_TEMPLATE/reimplementation_questions.md new file mode 100755 index 0000000..68637b6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/reimplementation_questions.md @@ -0,0 +1,67 @@ +--- +name: Reimplementation Questions +about: Ask about questions during model reimplementation +title: '' +labels: reimplementation +assignees: '' +--- + +**Notice** + +There are several common situations in the reimplementation issues as below + +1. Reimplement a model in the model zoo using the provided configs +2. Reimplement a model in the model zoo on other dataset (e.g., custom datasets) +3. Reimplement a custom model but all the components are implemented in MMDetection3D +4. Reimplement a custom model with new modules implemented by yourself + +There are several things to do for different cases as below. + +- For case 1 & 3, please follow the steps in the following sections thus we could help to quick identify the issue. +- For case 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code and the users should be responsible to the code they write. +- One suggestion for case 2 & 4 is that the users should first check whether the bug lies in the self-implemted code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections and try as clear as possible so that we can better help you. + +**Checklist** + +1. I have searched related issues but cannot get the expected help. +2. The issue has not been fixed in the latest version. + +**Describe the issue** + +A clear and concise description of what the problem you meet and what have you done. + +**Reproduction** + +1. What command or script did you run? + +``` +A placeholder for the command. +``` + +2. What config dir you run? + +``` +A placeholder for the config. +``` + +3. Did you make any modifications on the code or config? Did you understand what you have modified? +4. What dataset did you use? + +**Environment** + +1. Please run `python mmdet3d/utils/collect_env.py` to collect necessary environment information and paste it here. +2. You may add addition that may be helpful for locating the problem, such as + - How you installed PyTorch \[e.g., pip, conda, source\] + - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) + +**Results** + +If applicable, paste the related results here, e.g., what you expect and what you get. + +``` +A placeholder for results comparison +``` + +**Issue fix** + +If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100755 index 0000000..3668d83 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,25 @@ +Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers. + +## Motivation + +Please describe the motivation of this PR and the goal you want to achieve through this PR. + +## Modification + +Please briefly describe what modification is made in this PR. + +## BC-breaking (Optional) + +Does the modification introduce changes that break the back-compatibility of the downstream repos? +If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR. + +## Use cases (Optional) + +If this PR introduces a new feature, it is better to list some use cases here, and update the documentation. + +## Checklist + +1. Pre-commit or other linting tools are used to fix the potential lint issues. +2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness. +3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects. +4. The documentation has been modified accordingly, like docstring or example tutorials. diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100755 index 0000000..a5f8dbe --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,28 @@ +name: deploy + +on: push + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-n-publish: + runs-on: ubuntu-latest + if: startsWith(github.event.ref, 'refs/tags') + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Install torch + run: pip install torch + - name: Install wheel + run: pip install wheel + - name: Build MMDet3D + run: python setup.py sdist bdist_wheel + - name: Publish distribution to PyPI + run: | + pip install twine + twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100755 index 0000000..62a6ac1 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,27 @@ +name: lint + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Install pre-commit hook + run: | + pip install pre-commit + pre-commit install + - name: Linting + run: pre-commit run --all-files + - name: Check docstring coverage + run: | + pip install interrogate + interrogate -v --ignore-init-method --ignore-magic --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 90 mmdet3d diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml new file mode 100755 index 0000000..7394caa --- /dev/null +++ b/.github/workflows/merge_stage_test.yml @@ -0,0 +1,226 @@ +name: merge_stage_test + +on: + push: + paths-ignore: + - 'README.md' + - 'README_zh-CN.md' + - 'docs/**' + - 'demo/**' + - '.dev_scripts/**' + - '.circleci/**' + branches: + - dev-1.x + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_cpu_py: + runs-on: ubuntu-22.04 + strategy: + matrix: + python-version: [3.7, 3.8, 3.9] + torch: [1.8.1] + include: + - torch: 1.8.1 + torchvision: 0.9.1 + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: pip install pip --upgrade + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + - name: Install MMEngine + run: pip install git+https://github.com/open-mmlab/mmengine.git@main + - name: Install MMCV + run: | + pip install -U openmim + mim install 'mmcv >= 2.0.0rc4' + - name: Install MMDet + run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install other dependencies + run: pip install -r requirements/tests.txt + - name: Build and install + run: rm -rf .eggs && pip install -e . + - name: Run unittests and generate coverage report + run: | + coverage run --branch --source mmdet3d -m pytest tests/ + coverage xml + coverage report -m + + build_cpu_pt: + runs-on: ubuntu-22.04 + strategy: + matrix: + python-version: [3.7] + torch: [1.6.0, 1.7.1, 1.8.1, 1.9.1, 1.10.1, 1.11.0, 1.12.1, 1.13.0] + include: + - torch: 1.6.0 + torchvision: 0.7.0 + - torch: 1.7.1 + torchvision: 0.8.2 + - torch: 1.8.1 + torchvision: 0.9.1 + - torch: 1.9.1 + torchvision: 0.10.1 + - torch: 1.10.1 + torchvision: 0.11.2 + - torch: 1.11.0 + torchvision: 0.12.0 + - torch: 1.12.1 + torchvision: 0.13.1 + - torch: 1.13.0 + torchvision: 0.14.0 + - torch: 2.0.0 + torchvision: 0.15.1 + python-version: 3.8 + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: pip install pip --upgrade + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + - name: Install MMEngine + run: pip install git+https://github.com/open-mmlab/mmengine.git@main + - name: Install MMCV + run: | + pip install -U openmim + mim install 'mmcv >= 2.0.0rc4' + - name: Install MMDet + run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install other dependencies + run: pip install -r requirements/tests.txt + - name: Build and install + run: rm -rf .eggs && pip install -e . + - name: Run unittests and generate coverage report + run: | + coverage run --branch --source mmdet3d -m pytest tests/ + coverage xml + coverage report -m + # Only upload coverage report for python3.7 && pytorch1.8.1 cpu + - name: Upload coverage to Codecov + if: ${{matrix.torch == '1.8.1' && matrix.python-version == '3.7'}} + uses: codecov/codecov-action@v1.0.14 + with: + file: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false + + build_cu102: + runs-on: ubuntu-22.04 + container: + image: pytorch/pytorch:1.8.1-cuda10.2-cudnn7-devel + strategy: + matrix: + python-version: [3.7] + include: + - torch: 1.8.1 + cuda: 10.2 + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: pip install pip --upgrade + - name: Fetch GPG keys + run: | + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + - name: Install system dependencies + run: apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 + - name: Install mmdet3d dependencies + run: | + pip install git+https://github.com/open-mmlab/mmengine.git@main + pip install -U openmim + mim install 'mmcv >= 2.0.0rc4' + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install -r requirements/tests.txt + - name: Build and install + run: pip install -e . + - name: Run unittests and generate coverage report + run: | + coverage run --branch --source mmdet3d -m pytest tests/ + coverage xml + coverage report -m + + build_cu116: + runs-on: ubuntu-22.04 + container: + image: pytorch/pytorch:1.13.0-cuda11.6-cudnn8-devel + strategy: + matrix: + python-version: [3.7] + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: pip install pip --upgrade + - name: Fetch GPG keys + run: | + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + - name: Install system dependencies + run: apt-get update && apt-get install -y git ffmpeg libturbojpeg + - name: Install mmdet3d dependencies + run: | + pip install git+https://github.com/open-mmlab/mmengine.git@main + pip install -U openmim + mim install 'mmcv >= 2.0.0rc4' + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install -r requirements/tests.txt + - name: Build and install + run: pip install -e . + - name: Run unittests and generate coverage report + run: | + coverage run --branch --source mmcv -m pytest tests + coverage xml + coverage report -m + + build_windows: + runs-on: windows-2022 + strategy: + matrix: + python-version: [3.7] + platform: [cpu, cu111] + torch: [1.8.1] + torchvision: [0.9.1] + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + - name: Upgrade pip + run: pip install pip --upgrade + - name: Install lmdb + run: pip install lmdb + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+${{matrix.platform}} torchvision==${{matrix.torchvision}}+${{matrix.platform}} -f https://download.pytorch.org/whl/${{matrix.platform}}/torch_stable.html + - name: Install mmdet3d dependencies + run: | + pip install git+https://github.com/open-mmlab/mmengine.git@main + pip install -U openmim + mim install 'mmcv >= 2.0.0rc4' + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install -r requirements/tests.txt + - name: Build and install + run: pip install -e . + - name: Run unittests and generate coverage report + run: pytest tests/ diff --git a/.github/workflows/pr_stage_test.yml b/.github/workflows/pr_stage_test.yml new file mode 100755 index 0000000..7ae1851 --- /dev/null +++ b/.github/workflows/pr_stage_test.yml @@ -0,0 +1,129 @@ +name: pr_stage_test + +on: + pull_request: + paths-ignore: + - 'README.md' + - 'README_zh-CN.md' + - 'docs/**' + - 'demo/**' + - '.dev_scripts/**' + - '.circleci/**' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_cpu: + runs-on: ubuntu-22.04 + strategy: + matrix: + python-version: [3.7] + include: + - torch: 1.8.1 + torchvision: 0.9.1 + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: python -m pip install pip --upgrade + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + - name: Install MMEngine + run: pip install git+https://github.com/open-mmlab/mmengine.git@main + - name: Install MMCV + run: | + pip install -U openmim + mim install 'mmcv >= 2.0.0rc4' + - name: Install MMDet + run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + - name: Install other dependencies + run: pip install -r requirements/tests.txt + - name: Build and install + run: rm -rf .eggs && pip install -e . + - name: Run unittests and generate coverage report + run: | + coverage run --branch --source mmdet3d -m pytest tests/ + coverage xml + coverage report -m + # Upload coverage report for python3.7 && pytorch1.8.1 cpu + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1.0.14 + with: + file: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false + + build_cu102: + runs-on: ubuntu-22.04 + container: + image: pytorch/pytorch:1.8.1-cuda10.2-cudnn7-devel + strategy: + matrix: + python-version: [3.7] + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: pip install pip --upgrade + - name: Fetch GPG keys + run: | + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + - name: Install system dependencies + run: apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 + - name: Install mmdet3d dependencies + run: | + pip install git+https://github.com/open-mmlab/mmengine.git@main + pip install -U openmim + mim install 'mmcv >= 2.0.0rc4' + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install -r requirements/tests.txt + - name: Build and install + run: pip install -e . + - name: Run unittests and generate coverage report + run: | + coverage run --branch --source mmdet3d -m pytest tests/ + coverage xml + coverage report -m + + build_windows: + runs-on: windows-2022 + strategy: + matrix: + python-version: [3.7] + platform: [cpu, cu111] + torch: [1.8.1] + torchvision: [0.9.1] + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: python -m pip install pip --upgrade + - name: Install lmdb + run: pip install lmdb + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+${{matrix.platform}} torchvision==${{matrix.torchvision}}+${{matrix.platform}} -f https://download.pytorch.org/whl/${{matrix.platform}}/torch_stable.html + - name: Install mmdet3d dependencies + run: | + pip install git+https://github.com/open-mmlab/mmengine.git@main + pip install -U openmim + mim install 'mmcv >= 2.0.0rc4' + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install -r requirements/tests.txt + - name: Build and install + run: pip install -e . + - name: Run unittests and generate coverage report + run: pytest tests/ diff --git a/.github/workflows/test_mim.yml b/.github/workflows/test_mim.yml new file mode 100755 index 0000000..98d6d1c --- /dev/null +++ b/.github/workflows/test_mim.yml @@ -0,0 +1,44 @@ +name: test-mim + +on: + push: + paths: + - 'model-index.yml' + - 'configs/**' + + pull_request: + paths: + - 'model-index.yml' + - 'configs/**' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_cpu: + runs-on: ubuntu-22.04 + strategy: + matrix: + python-version: [3.7] + torch: [1.8.0] + include: + - torch: 1.8.0 + torch_version: torch1.8 + torchvision: 0.9.0 + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: pip install pip --upgrade + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + - name: Install openmim + run: pip install openmim + - name: Build and install + run: rm -rf .eggs && mim install -e . + - name: test commands of mim + run: mim search mmdet3d diff --git a/.gitignore b/.gitignore new file mode 100755 index 0000000..96be919 --- /dev/null +++ b/.gitignore @@ -0,0 +1,141 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +*.ipynb + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/en/_build/ +docs/zh_cn/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# cython generated cpp +data +.vscode +.idea + +# custom +*.pkl +*.pkl.json +*.log.json +work_dirs/ +exps/ +*~ +mmdet3d/.mim + +# Pytorch +*.pth + +# demo +*.jpg +*.png +data/s3dis/Stanford3dDataset_v1.2_Aligned_Version/ +data/scannet/scans/ +data/sunrgbd/OFFICIAL_SUNRGBD/ +*.obj +*.ply + +# Waymo evaluation +mmdet3d/evaluation/functional/waymo_utils/compute_detection_metrics_main +mmdet3d/evaluation/functional/waymo_utils/compute_detection_let_metrics_main +# ignore +data/nuscenes/ +work_dirs/ +output/ +text/ \ No newline at end of file diff --git a/.pre-commit-config-zh-cn.yaml b/.pre-commit-config-zh-cn.yaml new file mode 100755 index 0000000..1c78ad1 --- /dev/null +++ b/.pre-commit-config-zh-cn.yaml @@ -0,0 +1,50 @@ +repos: + - repo: https://gitee.com/openmmlab/mirrors-flake8 + rev: 5.0.4 + hooks: + - id: flake8 + - repo: https://gitee.com/openmmlab/mirrors-isort + rev: 5.11.5 + hooks: + - id: isort + - repo: https://gitee.com/openmmlab/mirrors-yapf + rev: v0.32.0 + hooks: + - id: yapf + - repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks + rev: v4.3.0 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: end-of-file-fixer + - id: requirements-txt-fixer + - id: double-quote-string-fixer + - id: check-merge-conflict + - id: fix-encoding-pragma + args: ["--remove"] + - id: mixed-line-ending + args: ["--fix=lf"] + - repo: https://gitee.com/openmmlab/mirrors-codespell + rev: v2.2.1 + hooks: + - id: codespell + - repo: https://gitee.com/openmmlab/mirrors-mdformat + rev: 0.7.9 + hooks: + - id: mdformat + args: ["--number"] + additional_dependencies: + - mdformat-openmmlab + - mdformat_frontmatter + - linkify-it-py + - repo: https://gitee.com/openmmlab/mirrors-docformatter + rev: v1.3.1 + hooks: + - id: docformatter + args: ["--in-place", "--wrap-descriptions", "79"] + - repo: https://gitee.com/openmmlab/pre-commit-hooks + rev: v0.2.0 + hooks: + - id: check-algo-readme + - id: check-copyright + args: ["mmdet3d"] diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100755 index 0000000..b0d7231 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,50 @@ +repos: + - repo: https://github.com/PyCQA/flake8 + rev: 5.0.4 + hooks: + - id: flake8 + - repo: https://github.com/PyCQA/isort + rev: 5.11.5 + hooks: + - id: isort + - repo: https://github.com/pre-commit/mirrors-yapf + rev: v0.32.0 + hooks: + - id: yapf + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: end-of-file-fixer + - id: requirements-txt-fixer + - id: double-quote-string-fixer + - id: check-merge-conflict + - id: fix-encoding-pragma + args: ["--remove"] + - id: mixed-line-ending + args: ["--fix=lf"] + - repo: https://github.com/codespell-project/codespell + rev: v2.2.1 + hooks: + - id: codespell + - repo: https://github.com/executablebooks/mdformat + rev: 0.7.9 + hooks: + - id: mdformat + args: [ "--number" ] + additional_dependencies: + - mdformat-openmmlab + - mdformat_frontmatter + - linkify-it-py + - repo: https://github.com/myint/docformatter + rev: v1.3.1 + hooks: + - id: docformatter + args: ["--in-place", "--wrap-descriptions", "79"] + - repo: https://github.com/open-mmlab/pre-commit-hooks + rev: v0.2.0 # Use the ref you want to point at + hooks: + - id: check-algo-readme + - id: check-copyright + args: ["mmdet3d"] # replace the dir_to_check with your expected directory to check diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100755 index 0000000..6cfbf5d --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,9 @@ +version: 2 + +formats: all + +python: + version: 3.7 + install: + - requirements: requirements/docs.txt + - requirements: requirements/readthedocs.txt diff --git a/CITATION.cff b/CITATION.cff new file mode 100755 index 0000000..958f6f3 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,8 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +authors: + - name: "MMDetection3D Contributors" +title: "OpenMMLab's Next-generation Platform for General 3D Object Detection" +date-released: 2020-07-23 +url: "https://github.com/open-mmlab/mmdetection3d" +license: Apache-2.0 diff --git a/LICENSE b/LICENSE new file mode 100755 index 0000000..04adf5c --- /dev/null +++ b/LICENSE @@ -0,0 +1,203 @@ +Copyright 2018-2019 Open-MMLab. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2019 Open-MMLab. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100755 index 0000000..7b9cae6 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +include mmdet3d/.mim/model-index.yml +include requirements/*.txt +recursive-include mmdet3d/.mim/ops *.cpp *.cu *.h *.cc +recursive-include mmdet3d/.mim/configs *.py *.yml +recursive-include mmdet3d/.mim/tools *.sh *.py diff --git a/README.md b/README.md new file mode 100755 index 0000000..c277436 --- /dev/null +++ b/README.md @@ -0,0 +1,336 @@ +
+ +
 
+
+ OpenMMLab website + + + HOT + + +      + OpenMMLab platform + + + TRY IT OUT + + +
+
 
+ +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection3d.readthedocs.io/en/latest/) +[![badge](https://github.com/open-mmlab/mmdetection3d/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection3d/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmdetection3d/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection3d) +[![license](https://img.shields.io/github/license/open-mmlab/mmdetection3d.svg)](https://github.com/open-mmlab/mmdetection3d/blob/master/LICENSE) + +
+ + + +
+ + + + + + + + + + + + + + + + + +
+ +**News**: + +**We have renamed the branch `1.1` to `main` and switched the default branch from `master` to `main`. We encourage +users to migrate to the latest version, though it comes with some cost. Please refer to [Migration Guide](docs/en/migration.md) for more details.** + +**v1.1.0** was released in 6/4/2023 + +We have supported more LiDAR-based segmentation methods, including Cylinder3D, MinkUNet and SPVCNN. More new features about 3D perception are on the way. Please stay tuned! + +**v1.1.0rc3** was released in 7/1/2023 + +The compatibilities of models are broken due to the unification and simplification of coordinate systems after v1.0.0rc0. For now, most models are benchmarked with similar performance, though few models are still being benchmarked. In the following release, we will update all the model checkpoints and benchmarks. See more details in the [Changelog](docs/en/notes/changelog.md) and [Changelog-v1.0.x](docs/en/notes/changelog_v1.0.x.md). + +Documentation: https://mmdetection3d.readthedocs.io/ + +## Introduction + +English | [简体中文](README_zh-CN.md) + +The master branch works with **PyTorch 1.6+**. + +MMDetection3D is an open source object detection toolbox based on PyTorch, towards the next-generation platform for general 3D detection. It is +a part of the OpenMMLab project developed by [MMLab](http://mmlab.ie.cuhk.edu.hk/). + +![demo image](resources/mmdet3d_outdoor_demo.gif) + +### Major features + +- **Support multi-modality/single-modality detectors out of box** + + It directly supports multi-modality/single-modality detectors including MVXNet, VoteNet, PointPillars, etc. + +- **Support indoor/outdoor 3D detection out of box** + + It directly supports popular indoor and outdoor 3D detection datasets, including ScanNet, SUNRGB-D, Waymo, nuScenes, Lyft, and KITTI. + For nuScenes dataset, we also support [nuImages dataset](https://github.com/open-mmlab/mmdetection3d/tree/latest/configs/nuimages). + +- **Natural integration with 2D detection** + + All the about **300+ models, methods of 40+ papers**, and modules supported in [MMDetection](https://github.com/open-mmlab/mmdetection/blob/3.x/docs/en/model_zoo.md) can be trained or used in this codebase. + +- **High efficiency** + + It trains faster than other codebases. The main results are as below. Details can be found in [benchmark.md](./docs/en/notes/benchmarks.md). We compare the number of samples trained per second (the higher, the better). The models that are not supported by other codebases are marked by `✗`. + + | Methods | MMDetection3D | [OpenPCDet](https://github.com/open-mmlab/OpenPCDet) | [votenet](https://github.com/facebookresearch/votenet) | [Det3D](https://github.com/poodarchu/Det3D) | + | :-----------------: | :-----------: | :--------------------------------------------------: | :----------------------------------------------------: | :-----------------------------------------: | + | VoteNet | 358 | ✗ | 77 | ✗ | + | PointPillars-car | 141 | ✗ | ✗ | 140 | + | PointPillars-3class | 107 | 44 | ✗ | ✗ | + | SECOND | 40 | 30 | ✗ | ✗ | + | Part-A2 | 17 | 14 | ✗ | ✗ | + +Like [MMDetection](https://github.com/open-mmlab/mmdetection) and [MMCV](https://github.com/open-mmlab/mmcv), MMDetection3D can also be used as a library to support different projects on top of it. + +## License + +This project is released under the [Apache 2.0 license](LICENSE). + +## Changelog + +**1.1.0** was released in 6/4/2023. + +Please refer to [changelog.md](docs/en/notes/changelog.md) for details and release history. + +## Benchmark and model zoo + +Results and models are available in the [model zoo](docs/en/model_zoo.md). + +
+ Components +
+ + + + + + + + + + + + + + + +
+ Backbones + + Heads + + Features +
+ + + + + +
+ +
+ Architectures +
+ + + + + + + + + + + + + + + + + +
+ 3D Object Detection + + Monocular 3D Object Detection + + Multi-modal 3D Object Detection + + 3D Semantic Segmentation +
+
  • Outdoor
  • + +
  • Indoor
  • + +
    +
  • Outdoor
  • + +
  • Indoor
  • + +
    +
  • Outdoor
  • + +
  • Indoor
  • + +
    +
  • Outdoor
  • + +
  • Indoor
  • + + +
    + +| | ResNet | PointNet++ | SECOND | DGCNN | RegNetX | DLA | MinkResNet | Cylinder3D | MinkUNet | +| :-----------: | :----: | :--------: | :----: | :---: | :-----: | :-: | :--------: | :--------: | :------: | +| SECOND | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| PointPillars | ✗ | ✗ | ✓ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | +| FreeAnchor | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | +| VoteNet | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| H3DNet | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| 3DSSD | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| Part-A2 | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| MVXNet | ✓ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| CenterPoint | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| SSN | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | +| ImVoteNet | ✓ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| FCOS3D | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| PointNet++ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| Group-Free-3D | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| ImVoxelNet | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| PAConv | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| DGCNN | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | +| SMOKE | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | +| PGD | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| MonoFlex | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | +| SA-SSD | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| FCAF3D | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | +| PV-RCNN | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| Cylinder3D | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | +| MinkUNet | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | +| SPVCNN | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | + +**Note:** All the about **300+ models, methods of 40+ papers** in 2D detection supported by [MMDetection](https://github.com/open-mmlab/mmdetection/blob/3.x/docs/en/model_zoo.md) can be trained or used in this codebase. + +## Installation + +Please refer to [get_started.md](docs/en/get_started.md) for installation. + +## Get Started + +Please see [get_started.md](docs/en/get_started.md) for the basic usage of MMDetection3D. We provide guidance for quick run [with existing dataset](docs/en/user_guides/train_test.md) and [with new dataset](docs/en/user_guides/2_new_data_model.md) for beginners. There are also tutorials for [learning configuration systems](docs/en/user_guides/config.md), [customizing dataset](docs/en/advanced_guides/customize_dataset.md), [designing data pipeline](docs/en/user_guides/data_pipeline.md), [customizing models](docs/en/advanced_guides/customize_models.md), [customizing runtime settings](docs/en/advanced_guides/customize_runtime.md) and [Waymo dataset](docs/en/advanced_guides/datasets/waymo_det.md). + +Please refer to [FAQ](docs/en/notes/faq.md) for frequently asked questions. When updating the version of MMDetection3D, please also check the [compatibility doc](docs/en/notes/compatibility.md) to be aware of the BC-breaking updates introduced in each version. + +## Citation + +If you find this project useful in your research, please consider cite: + +```latex +@misc{mmdet3d2020, + title={{MMDetection3D: OpenMMLab} next-generation platform for general {3D} object detection}, + author={MMDetection3D Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmdetection3d}}, + year={2020} +} +``` + +## Contributing + +We appreciate all contributions to improve MMDetection3D. Please refer to [CONTRIBUTING.md](./docs/en/notes/contribution_guides.md) for the contributing guideline. + +## Acknowledgement + +MMDetection3D is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors as well as users who give valuable feedbacks. +We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new 3D detectors. + +## Projects in OpenMMLab + +- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models. +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. +- [MMEval](https://github.com/open-mmlab/mmeval): A unified evaluation library for multiple machine learning libraries. +- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. +- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark. +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. +- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO series toolbox and benchmark. +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark. +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox. +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox. +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework. diff --git a/README_zh-CN.md b/README_zh-CN.md new file mode 100755 index 0000000..034c87e --- /dev/null +++ b/README_zh-CN.md @@ -0,0 +1,349 @@ +
    + +
     
    +
    + OpenMMLab 官网 + + + HOT + + +      + OpenMMLab 开放平台 + + + TRY IT OUT + + +
    +
     
    + +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection3d.readthedocs.io/zh_CN/latest/) +[![badge](https://github.com/open-mmlab/mmdetection3d/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection3d/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmdetection3d/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection3d) +[![license](https://img.shields.io/github/license/open-mmlab/mmdetection3d.svg)](https://github.com/open-mmlab/mmdetection3d/blob/master/LICENSE) + +
    + +
    + + + + + + + + + + + + + + + + + +
    + +**新闻**: + +**我们将 `1.1` 分支重命名为 `main` 并将默认分支从 `master` 切换到 `main`。我们鼓励用户迁移到最新版本,请参考 [迁移指南](docs/en/migration.md) 以了解更多细节。** + +**v1.1.0** 版本已经在 2023.4.6 发布。 + +我们已经支持了更多基于 LiDAR 的 3D 分割算法。更多关于 3D 感知的新特性正在开发中,请拭目以待! + +**v1.1.0rc3** 版本已经在 2023.1.7 发布。 + +由于坐标系的统一和简化,模型的兼容性会受到影响。目前,大多数模型都以类似的性能对齐了精度,但仍有少数模型在进行基准测试。在接下来的版本中,我们将更新所有的模型权重文件和基准。您可以在[变更日志](docs/zh_cn/notes/changelog.md)和 [v1.0.x 版本变更日志](docs/zh_cn/notes/changelog_v1.0.x.md)中查看更多详细信息。 + +文档:https://mmdetection3d.readthedocs.io/ + +## 简介 + +[English](README.md) | 简体中文 + +主分支代码目前支持 PyTorch 1.6 以上的版本。 + +MMDetection3D 是一个基于 PyTorch 的目标检测开源工具箱,下一代面向 3D 检测的平台。它是 OpenMMlab 项目的一部分,这个项目由香港中文大学多媒体实验室和商汤科技联合发起。 + +![demo image](resources/mmdet3d_outdoor_demo.gif) + +### 主要特性 + +- **支持多模态/单模态的检测器** + + 支持多模态/单模态检测器,包括 MVXNet,VoteNet,PointPillars 等。 + +- **支持户内/户外的数据集** + + 支持室内/室外的 3D 检测数据集,包括 ScanNet,SUNRGB-D,Waymo,nuScenes,Lyft,KITTI。 + 对于 nuScenes 数据集,我们也支持 [nuImages 数据集](https://github.com/open-mmlab/mmdetection3d/tree/latest/configs/nuimages)。 + +- **与 2D 检测器的自然整合** + + [MMDetection](https://github.com/open-mmlab/mmdetection/blob/3.x/docs/zh_cn/model_zoo.md) 支持的 **300+ 个模型,40+ 的论文算法**,和相关模块都可以在此代码库中训练或使用。 + +- **性能高** + + 训练速度比其他代码库更快。下表可见主要的对比结果。更多的细节可见[基准测评文档](./docs/zh_cn/notes/benchmarks.md)。我们对比了每秒训练的样本数(值越高越好)。其他代码库不支持的模型被标记为 `✗`。 + + | Methods | MMDetection3D | [OpenPCDet](https://github.com/open-mmlab/OpenPCDet) | [votenet](https://github.com/facebookresearch/votenet) | [Det3D](https://github.com/poodarchu/Det3D) | + | :-----------------: | :-----------: | :--------------------------------------------------: | :----------------------------------------------------: | :-----------------------------------------: | + | VoteNet | 358 | ✗ | 77 | ✗ | + | PointPillars-car | 141 | ✗ | ✗ | 140 | + | PointPillars-3class | 107 | 44 | ✗ | ✗ | + | SECOND | 40 | 30 | ✗ | ✗ | + | Part-A2 | 17 | 14 | ✗ | ✗ | + +和 [MMDetection](https://github.com/open-mmlab/mmdetection),[MMCV](https://github.com/open-mmlab/mmcv) 一样,MMDetection3D 也可以作为一个库去支持各式各样的项目。 + +## 开源许可证 + +该项目采用 [Apache 2.0 开源许可证](LICENSE)。 + +## 更新日志 + +我们在 2023.1.7 发布了 **1.1.0rc3** 版本。 + +更多细节和版本发布历史可以参考 [changelog.md](docs/zh_cn/notes/changelog.md)。 + +## 基准测试和模型库 + +测试结果和模型可以在[模型库](docs/zh_cn/model_zoo.md)中找到。 + +
    + 模块组件 +
    + + + + + + + + + + + + + + + +
    + 主干网络 + + 检测头 + + 特性 +
    + + + + + +
    + +
    + 算法模型 +
    + + + + + + + + + + + + + + + + + +
    + 3D 目标检测 + + 单目 3D 目标检测 + + 多模态 3D 目标检测 + + 3D 语义分割 +
    +
  • 室外
  • + +
  • 室内
  • + +
    +
  • 室外
  • + +
  • Indoor
  • + +
    +
  • 室外
  • + +
  • 室内
  • + +
    +
  • 室外
  • + +
  • 室内
  • + + +
    + +| | ResNet | PointNet++ | SECOND | DGCNN | RegNetX | DLA | MinkResNet | Cylinder3D | MinkUNet | +| :-----------: | :----: | :--------: | :----: | :---: | :-----: | :-: | :--------: | :--------: | :------: | +| SECOND | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| PointPillars | ✗ | ✗ | ✓ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | +| FreeAnchor | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | +| VoteNet | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| H3DNet | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| 3DSSD | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| Part-A2 | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| MVXNet | ✓ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| CenterPoint | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| SSN | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | +| ImVoteNet | ✓ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| FCOS3D | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| PointNet++ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| Group-Free-3D | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| ImVoxelNet | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| PAConv | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| DGCNN | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | +| SMOKE | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | +| PGD | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| MonoFlex | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | +| SA-SSD | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| FCAF3D | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | ✗ | +| PV-RCNN | ✗ | ✗ | ✓ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | +| Cylinder3D | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | ✗ | +| MinkUNet | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | +| SPVCNN | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ | ✓ | + +**注意:**[MMDetection](https://github.com/open-mmlab/mmdetection/blob/3.x/docs/zh_cn/model_zoo.md) 支持的基于 2D 检测的 **300+ 个模型,40+ 的论文算法**在 MMDetection3D 中都可以被训练或使用。 + +## 安装 + +请参考[快速入门文档](docs/zh_cn/get_started.md)进行安装。 + +## 快速入门 + +请参考[快速入门文档](docs/zh_cn/get_started.md)学习 MMDetection3D 的基本使用。我们为新手提供了分别针对[已有数据集](docs/zh_cn/user_guides/train_test.md)和[新数据集](docs/zh_cn/user_guides/2_new_data_model.md)的使用指南。我们也提供了一些进阶教程,内容覆盖了[学习配置文件](docs/zh_cn/user_guides/config.md),[增加自定义数据集](docs/zh_cn/advanced_guides/customize_dataset.md),[设计新的数据预处理流程](docs/zh_cn/user_guides/data_pipeline.md),[增加自定义模型](docs/zh_cn/advanced_guides/customize_models.md),[增加自定义的运行时配置](docs/zh_cn/advanced_guides/customize_runtime.md)和 [Waymo 数据集](docs/zh_cn/advanced_guides/datasets/waymo_det.md)。 + +请参考 [FAQ](docs/zh_cn/notes/faq.md) 查看一些常见的问题与解答。在升级 MMDetection3D 的版本时,请查看[兼容性文档](docs/zh_cn/notes/compatibility.md)以知晓每个版本引入的不与之前版本兼容的更新。 + +## 引用 + +如果你觉得本项目对你的研究工作有所帮助,请参考如下 bibtex 引用 MMdetection3D + +```latex +@misc{mmdet3d2020, + title={{MMDetection3D: OpenMMLab} next-generation platform for general {3D} object detection}, + author={MMDetection3D Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmdetection3d}}, + year={2020} +} +``` + +## 贡献指南 + +我们感谢所有的贡献者为改进和提升 MMDetection3D 所作出的努力。请参考[贡献指南](.github/CONTRIBUTING.md)来了解参与项目贡献的相关指引。 + +## 致谢 + +MMDetection3D 是一款由来自不同高校和企业的研发人员共同参与贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。我们希望这个工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现已有算法并开发自己的新的 3D 检测模型。 + +## OpenMMLab 的其他项目 + +- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练基础库 +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库 +- [MMEval](https://github.com/open-mmlab/mmeval): 统一开放的跨框架算法评测库 +- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口 +- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱 +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱 +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台 +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准 +- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO 系列工具箱与测试基准 +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱 +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包 +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱 +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准 +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准 +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准 +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准 +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱 +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台 +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准 +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱 +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱 +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架 + +## 欢迎加入 OpenMMLab 社区 + +扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=aCvMxdr3) + +
    + +
    + +我们会在 OpenMMLab 社区为大家 + +- 📢 分享 AI 框架的前沿核心技术 +- 💻 解读 PyTorch 常用模块源码 +- 📰 发布 OpenMMLab 的相关新闻 +- 🚀 介绍 OpenMMLab 开发的前沿算法 +- 🏃 获取更高效的问题答疑和意见反馈 +- 🔥 提供与各行各业开发者充分交流的平台 + +干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬 diff --git a/configs/3dssd/3dssd_4xb4_kitti-3d-car.py b/configs/3dssd/3dssd_4xb4_kitti-3d-car.py new file mode 100755 index 0000000..6fbdfec --- /dev/null +++ b/configs/3dssd/3dssd_4xb4_kitti-3d-car.py @@ -0,0 +1,119 @@ +_base_ = [ + '../_base_/models/3dssd.py', '../_base_/datasets/kitti-3d-car.py', + '../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Car'] +point_cloud_range = [0, -40, -5, 70, 40, 3] +input_modality = dict(use_lidar=True, use_camera=False) +backend_args = None + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + classes=class_names, + sample_groups=dict(Car=15), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectSample', db_sampler=db_sampler), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0], + global_rot_range=[0.0, 0.0], + rot_range=[-1.0471975511965976, 1.0471975511965976]), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.9, 1.1]), + # 3DSSD can get a higher performance without this transform + # dict(type='BackgroundPointsFilter', bbox_enlarge_range=(0.5, 2.0, 0.5)), + dict(type='PointSample', num_points=16384), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] + +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointSample', num_points=16384), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=4, dataset=dict(dataset=dict(pipeline=train_pipeline, ))) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# model settings +model = dict( + bbox_head=dict( + num_classes=1, + bbox_coder=dict( + type='AnchorFreeBBoxCoder', num_dir_bins=12, with_rot=True))) + +# optimizer +lr = 0.002 # max learning rate +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.), + clip_grad=dict(max_norm=35, norm_type=2), +) + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=80, val_interval=2) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=80, + by_epoch=True, + milestones=[45, 60], + gamma=0.1) +] diff --git a/configs/3dssd/README.md b/configs/3dssd/README.md new file mode 100755 index 0000000..d394696 --- /dev/null +++ b/configs/3dssd/README.md @@ -0,0 +1,45 @@ +# 3DSSD: Point-based 3D Single Stage Object Detector + +> [3DSSD: Point-based 3D Single Stage Object Detector](https://arxiv.org/abs/2002.10187) + + + +## Abstract + +Currently, there have been many kinds of voxel-based 3D single stage detectors, while point-based single stage methods are still underexplored. In this paper, we first present a lightweight and effective point-based 3D single stage object detector, named 3DSSD, achieving a good balance between accuracy and efficiency. In this paradigm, all upsampling layers and refinement stage, which are indispensable in all existing point-based methods, are abandoned to reduce the large computation cost. We novelly propose a fusion sampling strategy in downsampling process to make detection on less representative points feasible. A delicate box prediction network including a candidate generation layer, an anchor-free regression head with a 3D center-ness assignment strategy is designed to meet with our demand of accuracy and speed. Our paradigm is an elegant single stage anchor-free framework, showing great superiority to other existing methods. We evaluate 3DSSD on widely used KITTI dataset and more challenging nuScenes dataset. Our method outperforms all state-of-the-art voxel-based single stage methods by a large margin, and has comparable performance to two stage point-based methods as well, with inference speed more than 25 FPS, 2x faster than former state-of-the-art point-based methods. + +
    + +
    + +## Introduction + +We implement 3DSSD and provide the results and checkpoints on KITTI datasets. + +Some settings in our implementation are different from the [official implementation](https://github.com/Jia-Research-Lab/3DSSD), which bring marginal differences to the performance on KITTI datasets in our experiments. To simplify and unify the models of our implementation, we skip them in our models. These differences are listed as below: + +1. We keep the scenes without any object while the official code skips these scenes in training. In the official implementation, only 3229 and 3394 samples are used as training and validation sets, respectively. In our implementation, we keep using 3712 and 3769 samples as training and validation sets, respectively, as those used for all the other models in our implementation on KITTI datasets. +2. We do not modify the decay of `batch normalization` during training. +3. While using [`DataBaseSampler`](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/pipelines/dbsampler.py#L80) for data augmentation, the official code uses road planes as reference to place the sampled objects while we do not. +4. We perform detection using LIDAR coordinates while the official code uses camera coordinates. + +## Results and models + +### KITTI + +| Backbone | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | +| :--------------------------------------------: | :---: | :-----: | :------: | :------------: | :----------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [PointNet2SAMSG](./3dssd_4xb4_kitti-3d-car.py) | Car | 72e | 4.7 | | 78.58(81.27)1 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/3dssd/3dssd_4x4_kitti-3d-car/3dssd_4x4_kitti-3d-car_20210818_203828-b89c8fc4.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/3dssd/3dssd_4x4_kitti-3d-car/3dssd_4x4_kitti-3d-car_20210818_203828.log.json) | + +\[1\]: We report two different 3D object detection performance here. 78.58mAP is evaluated by our evaluation code and 81.27mAP is evaluated by the official development kit (so as that used in the paper and official code of 3DSSD ). We found that the commonly used Python implementation of [`rotate_iou`](https://github.com/traveller59/second.pytorch/blob/e42e4a0e17262ab7d180ee96a0a36427f2c20a44/second/core/non_max_suppression/nms_gpu.py#L605) which is used in our KITTI dataset evaluation, is different from the official implementation in [KITTI benchmark](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d). + +## Citation + +```latex +@inproceedings{yang20203dssd, + author = {Zetong Yang and Yanan Sun and Shu Liu and Jiaya Jia}, + title = {3DSSD: Point-based 3D Single Stage Object Detector}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + year = {2020} +} +``` diff --git a/configs/3dssd/metafile.yml b/configs/3dssd/metafile.yml new file mode 100755 index 0000000..bd2d146 --- /dev/null +++ b/configs/3dssd/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: 3DSSD + Metadata: + Training Data: KITTI + Training Techniques: + - AdamW + Training Resources: 4x TITAN X + Architecture: + - PointNet++ + Paper: + URL: https://arxiv.org/abs/2002.10187 + Title: '3DSSD: Point-based 3D Single Stage Object Detector' + README: configs/3dssd/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/ssd3dnet.py#L7 + Version: v0.6.0 + +Models: + - Name: 3dssd_4x4_kitti-3d-car + In Collection: 3DSSD + Config: configs/3dssd/3dssd_4xb4_kitti-3d-car.py + Metadata: + Training Memory (GB): 4.7 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 78.58 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/3dssd/3dssd_4x4_kitti-3d-car/3dssd_4x4_kitti-3d-car_20210818_203828-b89c8fc4.pth diff --git a/configs/_base_/datasets/kitti-3d-3class.py b/configs/_base_/datasets/kitti-3d-3class.py new file mode 100755 index 0000000..6c40509 --- /dev/null +++ b/configs/_base_/datasets/kitti-3d-3class.py @@ -0,0 +1,167 @@ +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] +input_modality = dict(use_lidar=True, use_camera=False) +metainfo = dict(classes=class_names) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/kitti/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + classes=class_names, + sample_groups=dict(Car=12, Pedestrian=6, Cyclist=6), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, # x, y, z, intensity + use_dim=4, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=6, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/kitti-3d-car.py b/configs/_base_/datasets/kitti-3d-car.py new file mode 100755 index 0000000..daea720 --- /dev/null +++ b/configs/_base_/datasets/kitti-3d-car.py @@ -0,0 +1,165 @@ +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Car'] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] +input_modality = dict(use_lidar=True, use_camera=False) +metainfo = dict(classes=class_names) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/kitti/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + classes=class_names, + sample_groups=dict(Car=15), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, # x, y, z, intensity + use_dim=4, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=6, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/kitti-mono3d.py b/configs/_base_/datasets/kitti-mono3d.py new file mode 100755 index 0000000..d5cd611 --- /dev/null +++ b/configs/_base_/datasets/kitti-mono3d.py @@ -0,0 +1,100 @@ +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +input_modality = dict(use_lidar=False, use_camera=True) +metainfo = dict(classes=class_names) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/kitti/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=False, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='Resize', scale=(1242, 375), keep_ratio=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_bboxes_3d', + 'gt_labels_3d', 'centers_2d', 'depths' + ]), +] +test_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict(type='Resize', scale=(1242, 375), keep_ratio=True), + dict(type='Pack3DDetInputs', keys=['img']) +] +eval_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict(type='Pack3DDetInputs', keys=['img']) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(img='training/image_2'), + pipeline=train_pipeline, + modality=input_modality, + load_type='fov_image_based', + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='Camera' in monocular 3d + # detection task + box_type_3d='Camera', + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img='training/image_2'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + load_type='fov_image_based', + metainfo=metainfo, + test_mode=True, + box_type_3d='Camera', + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox', + backend_args=backend_args) + +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/lyft-3d-range100.py b/configs/_base_/datasets/lyft-3d-range100.py new file mode 100755 index 0000000..58d63cd --- /dev/null +++ b/configs/_base_/datasets/lyft-3d-range100.py @@ -0,0 +1,150 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-100, -100, -5, 100, 100, 3] +# For Lyft we usually do 9-class detection +class_names = [ + 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', + 'bicycle', 'pedestrian', 'animal' +] +dataset_type = 'LyftDataset' +data_root = 'data/lyft/' +data_prefix = dict(pts='v1.01-train/lidar', img='', sweeps='v1.01-train/lidar') +# Input modality for Lyft dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=False) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/lyft/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='lyft_infos_train.pkl', + pipeline=train_pipeline, + metainfo=dict(classes=class_names), + modality=input_modality, + data_prefix=data_prefix, + test_mode=False, + box_type_3d='LiDAR', + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='lyft_infos_val.pkl', + pipeline=test_pipeline, + metainfo=dict(classes=class_names), + modality=input_modality, + test_mode=True, + data_prefix=data_prefix, + box_type_3d='LiDAR', + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='LyftMetric', + data_root=data_root, + ann_file='lyft_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/lyft-3d.py b/configs/_base_/datasets/lyft-3d.py new file mode 100755 index 0000000..a9e1c4c --- /dev/null +++ b/configs/_base_/datasets/lyft-3d.py @@ -0,0 +1,160 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-80, -80, -5, 80, 80, 3] +# For Lyft we usually do 9-class detection +class_names = [ + 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', + 'bicycle', 'pedestrian', 'animal' +] +dataset_type = 'LyftDataset' +data_root = 'data/lyft/' +# Input modality for Lyft dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict(use_lidar=True, use_camera=False) +data_prefix = dict(pts='v1.01-train/lidar', img='', sweeps='v1.01-train/lidar') + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/lyft/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='lyft_infos_train.pkl', + pipeline=train_pipeline, + metainfo=dict(classes=class_names), + modality=input_modality, + data_prefix=data_prefix, + test_mode=False, + box_type_3d='LiDAR', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='lyft_infos_val.pkl', + pipeline=test_pipeline, + metainfo=dict(classes=class_names), + modality=input_modality, + data_prefix=data_prefix, + test_mode=True, + box_type_3d='LiDAR', + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='lyft_infos_val.pkl', + pipeline=test_pipeline, + metainfo=dict(classes=class_names), + modality=input_modality, + test_mode=True, + data_prefix=data_prefix, + box_type_3d='LiDAR', + backend_args=backend_args)) + +val_evaluator = dict( + type='LyftMetric', + data_root=data_root, + ann_file='lyft_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/nuim-instance.py b/configs/_base_/datasets/nuim-instance.py new file mode 100755 index 0000000..913e507 --- /dev/null +++ b/configs/_base_/datasets/nuim-instance.py @@ -0,0 +1,70 @@ +dataset_type = 'CocoDataset' +data_root = 'data/nuimages/' +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/nuimages/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1280, 720), (1920, 1080)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PackDetInputs'), +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1600, 900), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/nuimages_v1.0-train.json', + img_prefix=data_root, + classes=class_names, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/nuimages_v1.0-val.json', + img_prefix=data_root, + classes=class_names, + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/nuimages_v1.0-val.json', + img_prefix=data_root, + classes=class_names, + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/configs/_base_/datasets/nus-3d.py b/configs/_base_/datasets/nus-3d.py new file mode 100755 index 0000000..46fa854 --- /dev/null +++ b/configs/_base_/datasets/nus-3d.py @@ -0,0 +1,169 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-50, -50, -5, 50, 50, 3] +# Using calibration info convert the Lidar-coordinate point cloud range to the +# ego-coordinate point cloud range could bring a little promotion in nuScenes. +# point_cloud_range = [-50, -50.8, -5, 50, 49.2, 3] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +metainfo = dict(classes=class_names) +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict(use_lidar=True, use_camera=False) +data_prefix = dict(pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP') + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/nuscenes/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + test_mode=True, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + test_mode=True, + backend_args=backend_args), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_train.pkl', + pipeline=train_pipeline, + metainfo=metainfo, + modality=input_modality, + test_mode=False, + data_prefix=data_prefix, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + modality=input_modality, + data_prefix=data_prefix, + test_mode=True, + box_type_3d='LiDAR', + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + modality=input_modality, + test_mode=True, + data_prefix=data_prefix, + box_type_3d='LiDAR', + backend_args=backend_args)) + +val_evaluator = dict( + type='NuScenesMetric', + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/nus-mono3d.py b/configs/_base_/datasets/nus-mono3d.py new file mode 100755 index 0000000..9a1d226 --- /dev/null +++ b/configs/_base_/datasets/nus-mono3d.py @@ -0,0 +1,119 @@ +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +metainfo = dict(classes=class_names) +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict(use_lidar=False, use_camera=True) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/nuscenes/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=True, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='Resize', scale=(1600, 900), keep_ratio=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'attr_labels', + 'gt_bboxes_3d', 'gt_labels_3d', 'centers_2d', 'depths' + ]), +] + +test_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict(type='mmdet.Resize', scale=(1600, 900), keep_ratio=True), + dict(type='Pack3DDetInputs', keys=['img']) +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + pts='', + CAM_FRONT='samples/CAM_FRONT', + CAM_FRONT_LEFT='samples/CAM_FRONT_LEFT', + CAM_FRONT_RIGHT='samples/CAM_FRONT_RIGHT', + CAM_BACK='samples/CAM_BACK', + CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', + CAM_BACK_LEFT='samples/CAM_BACK_LEFT'), + ann_file='nuscenes_infos_train.pkl', + load_type='mv_image_based', + pipeline=train_pipeline, + metainfo=metainfo, + modality=input_modality, + test_mode=False, + # we use box_type_3d='Camera' in monocular 3d + # detection task + box_type_3d='Camera', + use_valid_flag=True, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + pts='', + CAM_FRONT='samples/CAM_FRONT', + CAM_FRONT_LEFT='samples/CAM_FRONT_LEFT', + CAM_FRONT_RIGHT='samples/CAM_FRONT_RIGHT', + CAM_BACK='samples/CAM_BACK', + CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', + CAM_BACK_LEFT='samples/CAM_BACK_LEFT'), + ann_file='nuscenes_infos_val.pkl', + load_type='mv_image_based', + pipeline=test_pipeline, + modality=input_modality, + metainfo=metainfo, + test_mode=True, + box_type_3d='Camera', + use_valid_flag=True, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='NuScenesMetric', + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + metric='bbox', + backend_args=backend_args) + +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/s3dis-3d.py b/configs/_base_/datasets/s3dis-3d.py new file mode 100755 index 0000000..0428033 --- /dev/null +++ b/configs/_base_/datasets/s3dis-3d.py @@ -0,0 +1,134 @@ +# dataset settings +dataset_type = 'S3DISDataset' +data_root = 'data/s3dis/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/s3dis/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +metainfo = dict(classes=('table', 'chair', 'sofa', 'bookcase', 'board')) +train_area = [1, 2, 3, 4, 6] +test_area = 5 + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='PointSample', num_points=100000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[0.9, 1.1], + translation_std=[.1, .1, .1], + shift_height=False), + dict(type='NormalizePointsColor', color_mean=None), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=100000), + dict(type='NormalizePointsColor', color_mean=None), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=8, + num_workers=4, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=13, + dataset=dict( + type='ConcatDataset', + datasets=[ + dict( + type=dataset_type, + data_root=data_root, + ann_file=f's3dis_infos_Area_{i}.pkl', + pipeline=train_pipeline, + filter_empty_gt=True, + metainfo=metainfo, + box_type_3d='Depth', + backend_args=backend_args) for i in train_area + ]))) + +val_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=f's3dis_infos_Area_{test_area}.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=f's3dis_infos_Area_{test_area}.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +val_evaluator = dict(type='IndoorMetric') +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/s3dis-seg.py b/configs/_base_/datasets/s3dis-seg.py new file mode 100755 index 0000000..0158e8b --- /dev/null +++ b/configs/_base_/datasets/s3dis-seg.py @@ -0,0 +1,159 @@ +# For S3DIS seg we usually do 13-class segmentation +class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', + 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter') +metainfo = dict(classes=class_names) +dataset_type = 'S3DISSegDataset' +data_root = 'data/s3dis/' +input_modality = dict(use_lidar=True, use_camera=False) +data_prefix = dict( + pts='points', + pts_instance_mask='instance_mask', + pts_semantic_mask='semantic_mask') + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/s3dis/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +num_points = 4096 +train_area = [1, 2, 3, 4, 6] +test_area = 5 +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + backend_args=backend_args), + dict(type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.0, + ignore_index=len(class_names), + use_normalized_coord=True, + enlarge_size=0.2, + min_unique_num=None), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + backend_args=backend_args), + dict(type='NormalizePointsColor', color_mean=None), + dict( + # a wrapper in order to successfully call test function + # actually we don't perform test-time-aug + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.0, + flip_ratio_bev_vertical=0.0), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +# we need to load gt seg_mask! +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='Pack3DDetInputs', keys=['points']) +] + +# train on area 1, 2, 3, 4, 6 +# test on area 5 +train_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area], + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=train_pipeline, + modality=input_modality, + ignore_index=len(class_names), + scene_idxs=[ + f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area + ], + test_mode=False, + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_files=f's3dis_infos_Area_{test_area}.pkl', + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=test_pipeline, + modality=input_modality, + ignore_index=len(class_names), + scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy', + test_mode=True, + backend_args=backend_args)) +val_dataloader = test_dataloader + +val_evaluator = dict(type='SegMetric') +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/scannet-3d.py b/configs/_base_/datasets/scannet-3d.py new file mode 100755 index 0000000..67a39ef --- /dev/null +++ b/configs/_base_/datasets/scannet-3d.py @@ -0,0 +1,141 @@ +# dataset settings +dataset_type = 'ScanNetDataset' +data_root = 'data/scannet/' + +metainfo = dict( + classes=('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'garbagebin')) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/scannet/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True, + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict(type='PointSegClassMapping'), + dict(type='PointSample', num_points=40000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0], + shift_height=True), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', + 'pts_instance_mask' + ]) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=40000), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=8, + num_workers=4, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_train.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Depth', + backend_args=backend_args))) + +val_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +val_evaluator = dict(type='IndoorMetric') +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/scannet-seg.py b/configs/_base_/datasets/scannet-seg.py new file mode 100755 index 0000000..6e94b34 --- /dev/null +++ b/configs/_base_/datasets/scannet-seg.py @@ -0,0 +1,154 @@ +# For ScanNet seg we usually do 20-class segmentation +class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', + 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', + 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', + 'bathtub', 'otherfurniture') +metainfo = dict(classes=class_names) +dataset_type = 'ScanNetSegDataset' +data_root = 'data/scannet/' +input_modality = dict(use_lidar=True, use_camera=False) +data_prefix = dict( + pts='points', + pts_instance_mask='instance_mask', + pts_semantic_mask='semantic_mask') + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/scannet/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +num_points = 8192 +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + backend_args=backend_args), + dict(type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.5, + ignore_index=len(class_names), + use_normalized_coord=False, + enlarge_size=0.2, + min_unique_num=None), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + backend_args=backend_args), + dict(type='NormalizePointsColor', color_mean=None), + dict( + # a wrapper in order to successfully call test function + # actually we don't perform test-time-aug + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.0, + flip_ratio_bev_vertical=0.0), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +# we need to load gt seg_mask! +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_train.pkl', + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=train_pipeline, + modality=input_modality, + ignore_index=len(class_names), + scene_idxs=data_root + 'seg_info/train_resampled_scene_idxs.npy', + test_mode=False, + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=test_pipeline, + modality=input_modality, + ignore_index=len(class_names), + test_mode=True, + backend_args=backend_args)) +val_dataloader = test_dataloader + +val_evaluator = dict(type='SegMetric') +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/semantickitti.py b/configs/_base_/datasets/semantickitti.py new file mode 100755 index 0000000..aa253e5 --- /dev/null +++ b/configs/_base_/datasets/semantickitti.py @@ -0,0 +1,184 @@ +# For SemanticKitti we usually do 19-class segmentation. +# For labels_map we follow the uniform format of MMDetection & MMSegmentation +# i.e. we consider the unlabeled class as the last one, which is different +# from the original implementation of some methods e.g. Cylinder3D. +dataset_type = 'SemanticKittiDataset' +data_root = 'data/semantickitti/' +class_names = [ + 'car', 'bicycle', 'motorcycle', 'truck', 'bus', 'person', 'bicyclist', + 'motorcyclist', 'road', 'parking', 'sidewalk', 'other-ground', 'building', + 'fence', 'vegetation', 'trunck', 'terrian', 'pole', 'traffic-sign' +] +labels_map = { + 0: 19, # "unlabeled" + 1: 19, # "outlier" mapped to "unlabeled" --------------mapped + 10: 0, # "car" + 11: 1, # "bicycle" + 13: 4, # "bus" mapped to "other-vehicle" --------------mapped + 15: 2, # "motorcycle" + 16: 4, # "on-rails" mapped to "other-vehicle" ---------mapped + 18: 3, # "truck" + 20: 4, # "other-vehicle" + 30: 5, # "person" + 31: 6, # "bicyclist" + 32: 7, # "motorcyclist" + 40: 8, # "road" + 44: 9, # "parking" + 48: 10, # "sidewalk" + 49: 11, # "other-ground" + 50: 12, # "building" + 51: 13, # "fence" + 52: 19, # "other-structure" mapped to "unlabeled" ------mapped + 60: 8, # "lane-marking" to "road" ---------------------mapped + 70: 14, # "vegetation" + 71: 15, # "trunk" + 72: 16, # "terrain" + 80: 17, # "pole" + 81: 18, # "traffic-sign" + 99: 19, # "other-object" to "unlabeled" ----------------mapped + 252: 0, # "moving-car" to "car" ------------------------mapped + 253: 6, # "moving-bicyclist" to "bicyclist" ------------mapped + 254: 5, # "moving-person" to "person" ------------------mapped + 255: 7, # "moving-motorcyclist" to "motorcyclist" ------mapped + 256: 4, # "moving-on-rails" mapped to "other-vehic------mapped + 257: 4, # "moving-bus" mapped to "other-vehicle" -------mapped + 258: 3, # "moving-truck" to "truck" --------------------mapped + 259: 4 # "moving-other"-vehicle to "other-vehicle"-----mapped +} + +metainfo = dict( + classes=class_names, seg_label_mapping=labels_map, max_label=259) + +input_modality = dict(use_lidar=True, use_camera=False) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/semantickitti/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_seg_3d=True, + seg_3d_dtype='np.int32', + seg_offset=2**16, + dataset_type='semantickitti', + backend_args=backend_args), + dict(type='PointSegClassMapping', ), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05], + translation_std=[0.1, 0.1, 0.1], + ), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_seg_3d=True, + seg_3d_dtype='np.int32', + seg_offset=2**16, + dataset_type='semantickitti', + backend_args=backend_args), + dict(type='PointSegClassMapping', ), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_seg_3d=True, + seg_3d_dtype='np.int32', + seg_offset=2**16, + dataset_type='semantickitti', + backend_args=backend_args), + dict(type='PointSegClassMapping', ), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] + +train_dataloader = dict( + batch_size=2, + num_workers=4, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=1, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='semantickitti_infos_train.pkl', + pipeline=train_pipeline, + metainfo=metainfo, + modality=input_modality, + ignore_index=19, + backend_args=backend_args)), +) + +test_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type='RepeatDataset', + times=1, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='semantickitti_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + modality=input_modality, + ignore_index=19, + test_mode=True, + backend_args=backend_args)), +) + +val_dataloader = test_dataloader + +val_evaluator = dict(type='SegMetric') +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/sunrgbd-3d.py b/configs/_base_/datasets/sunrgbd-3d.py new file mode 100755 index 0000000..2857834 --- /dev/null +++ b/configs/_base_/datasets/sunrgbd-3d.py @@ -0,0 +1,126 @@ +dataset_type = 'SUNRGBDDataset' +data_root = 'data/sunrgbd/' +class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', + 'night_stand', 'bookshelf', 'bathtub') + +metainfo = dict(classes=class_names) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/sunrgbd/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict(type='LoadAnnotations3D'), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.523599, 0.523599], + scale_ratio_range=[0.85, 1.15], + shift_height=True), + dict(type='PointSample', num_points=20000), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict(type='PointSample', num_points=20000) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=16, + num_workers=4, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='sunrgbd_infos_train.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Depth', + backend_args=backend_args))) + +val_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='sunrgbd_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='sunrgbd_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +val_evaluator = dict(type='IndoorMetric') +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/waymoD5-3d-3class.py b/configs/_base_/datasets/waymoD5-3d-3class.py new file mode 100755 index 0000000..e5240b6 --- /dev/null +++ b/configs/_base_/datasets/waymoD5-3d-3class.py @@ -0,0 +1,177 @@ +# dataset settings +# D5 in the config name means the whole dataset is divided into 5 folds +# We only use one fold for efficient experiments +dataset_type = 'WaymoDataset' +# data_root = 's3://openmmlab/datasets/detection3d/waymo/kitti_format/' +data_root = 'data/waymo/kitti_format/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/waymo/kitti_format/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +class_names = ['Car', 'Pedestrian', 'Cyclist'] +metainfo = dict(classes=class_names) + +point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=[0, 1, 2, 3, 4], + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + # dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + backend_args=backend_args), + dict(type='Pack3DDetInputs', keys=['points']), +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='waymo_infos_train.pkl', + data_prefix=dict( + pts='training/velodyne', sweeps='training/velodyne'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + # load one frame every five frames + load_interval=5, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne', sweeps='training/velodyne'), + ann_file='waymo_infos_val.pkl', + pipeline=eval_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) + +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne', sweeps='training/velodyne'), + ann_file='waymo_infos_val.pkl', + pipeline=eval_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) + +val_evaluator = dict( + type='WaymoMetric', + ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', + waymo_bin_file='./data/waymo/waymo_format/gt.bin', + data_root='./data/waymo/waymo_format', + backend_args=backend_args, + convert_kitti_format=False) +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/waymoD5-3d-car.py b/configs/_base_/datasets/waymoD5-3d-car.py new file mode 100755 index 0000000..f95ac1d --- /dev/null +++ b/configs/_base_/datasets/waymoD5-3d-car.py @@ -0,0 +1,174 @@ +# dataset settings +# D5 in the config name means the whole dataset is divided into 5 folds +# We only use one fold for efficient experiments +dataset_type = 'WaymoDataset' +data_root = 'data/waymo/kitti_format/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/waymo/kitti_format/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +class_names = ['Car'] +metainfo = dict(classes=class_names) + +point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + classes=class_names, + sample_groups=dict(Car=15), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=[0, 1, 2, 3, 4], + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + backend_args=backend_args), + dict(type='Pack3DDetInputs', keys=['points']), +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='waymo_infos_train.pkl', + data_prefix=dict( + pts='training/velodyne', sweeps='training/velodyne'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + # load one frame every five frames + load_interval=5, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne', sweeps='training/velodyne'), + ann_file='waymo_infos_val.pkl', + pipeline=eval_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) + +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne', sweeps='training/velodyne'), + ann_file='waymo_infos_val.pkl', + pipeline=eval_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) + +val_evaluator = dict( + type='WaymoMetric', + ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', + waymo_bin_file='./data/waymo/waymo_format/gt.bin', + data_root='./data/waymo/waymo_format', + convert_kitti_format=False, + backend_args=backend_args) +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/_base_/datasets/waymoD5-fov-mono3d-3class.py b/configs/_base_/datasets/waymoD5-fov-mono3d-3class.py new file mode 100755 index 0000000..614b6a9 --- /dev/null +++ b/configs/_base_/datasets/waymoD5-fov-mono3d-3class.py @@ -0,0 +1,163 @@ +# dataset settings +# D3 in the config name means the whole dataset is divided into 3 folds +# We only use one fold for efficient experiments +dataset_type = 'WaymoDataset' +data_root = 'data/waymo/kitti_format/' +class_names = ['Car', 'Pedestrian', 'Cyclist'] +input_modality = dict(use_lidar=False, use_camera=True) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/waymo/kitti_format/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=False, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + # base shape (1248, 832), scale (0.95, 1.05) + dict( + type='RandomResize3D', + scale=(1284, 832), + ratio_range=(0.95, 1.05), + keep_ratio=True, + ), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_bboxes_3d', + 'gt_labels_3d', 'centers_2d', 'depths' + ]), +] + +test_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='RandomResize3D', + scale=(1248, 832), + ratio_range=(1., 1.), + keep_ratio=True), + dict(type='Pack3DDetInputs', keys=['img']), +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='RandomResize3D', + scale=(1248, 832), + ratio_range=(1., 1.), + keep_ratio=True), + dict(type='Pack3DDetInputs', keys=['img']), +] + +metainfo = dict(CLASSES=class_names) + +train_dataloader = dict( + batch_size=3, + num_workers=3, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='waymo_infos_train.pkl', + data_prefix=dict( + pts='training/velodyne', + CAM_FRONT='training/image_0', + CAM_FRONT_LEFT='training/image_1', + CAM_FRONT_RIGHT='training/image_2', + CAM_SIDE_LEFT='training/image_3', + CAM_SIDE_RIGHT='training/image_4'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Camera', + load_type='fov_image_based', + # load one frame every three frames + load_interval=5, + backend_args=backend_args)) + +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + pts='training/velodyne', + CAM_FRONT='training/image_0', + CAM_FRONT_LEFT='training/image_1', + CAM_FRONT_RIGHT='training/image_2', + CAM_SIDE_LEFT='training/image_3', + CAM_SIDE_RIGHT='training/image_4'), + ann_file='waymo_infos_val.pkl', + pipeline=eval_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Camera', + load_type='fov_image_based', + backend_args=backend_args)) + +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + pts='training/velodyne', + CAM_FRONT='training/image_0', + CAM_FRONT_LEFT='training/image_1', + CAM_FRONT_RIGHT='training/image_2', + CAM_SIDE_LEFT='training/image_3', + CAM_SIDE_RIGHT='training/image_4'), + ann_file='waymo_infos_val.pkl', + pipeline=eval_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Camera', + load_type='fov_image_based', + backend_args=backend_args)) + +val_evaluator = dict( + type='WaymoMetric', + ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', + waymo_bin_file='./data/waymo/waymo_format/fov_gt.bin', + data_root='./data/waymo/waymo_format', + metric='LET_mAP', + load_type='fov_image_based', + backend_args=backend_args) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/waymoD5-mv-mono3d-3class.py b/configs/_base_/datasets/waymoD5-mv-mono3d-3class.py new file mode 100755 index 0000000..0840d5e --- /dev/null +++ b/configs/_base_/datasets/waymoD5-mv-mono3d-3class.py @@ -0,0 +1,163 @@ +# dataset settings +# D3 in the config name means the whole dataset is divided into 3 folds +# We only use one fold for efficient experiments +dataset_type = 'WaymoDataset' +data_root = 'data/waymo/kitti_format/' +class_names = ['Car', 'Pedestrian', 'Cyclist'] +input_modality = dict(use_lidar=False, use_camera=True) + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/waymo/kitti_format/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=False, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + # base shape (1248, 832), scale (0.95, 1.05) + dict( + type='RandomResize3D', + scale=(1284, 832), + ratio_range=(0.95, 1.05), + keep_ratio=True, + ), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_bboxes_3d', + 'gt_labels_3d', 'centers_2d', 'depths' + ]), +] + +test_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='RandomResize3D', + scale=(1248, 832), + ratio_range=(1., 1.), + keep_ratio=True), + dict(type='Pack3DDetInputs', keys=['img']), +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='RandomResize3D', + scale=(1248, 832), + ratio_range=(1., 1.), + keep_ratio=True), + dict(type='Pack3DDetInputs', keys=['img']), +] + +metainfo = dict(classes=class_names) + +train_dataloader = dict( + batch_size=3, + num_workers=3, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='waymo_infos_train.pkl', + data_prefix=dict( + pts='training/velodyne', + CAM_FRONT='training/image_0', + CAM_FRONT_LEFT='training/image_1', + CAM_FRONT_RIGHT='training/image_2', + CAM_SIDE_LEFT='training/image_3', + CAM_SIDE_RIGHT='training/image_4'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Camera', + load_type='mv_image_based', + # load one frame every three frames + load_interval=5, + backend_args=backend_args)) + +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + pts='training/velodyne', + CAM_FRONT='training/image_0', + CAM_FRONT_LEFT='training/image_1', + CAM_FRONT_RIGHT='training/image_2', + CAM_SIDE_LEFT='training/image_3', + CAM_SIDE_RIGHT='training/image_4'), + ann_file='waymo_infos_val.pkl', + pipeline=eval_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Camera', + load_type='mv_image_based', + backend_args=backend_args)) + +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + pts='training/velodyne', + CAM_FRONT='training/image_0', + CAM_FRONT_LEFT='training/image_1', + CAM_FRONT_RIGHT='training/image_2', + CAM_SIDE_LEFT='training/image_3', + CAM_SIDE_RIGHT='training/image_4'), + ann_file='waymo_infos_val.pkl', + pipeline=eval_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Camera', + load_type='mv_image_based', + backend_args=backend_args)) + +val_evaluator = dict( + type='WaymoMetric', + ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', + waymo_bin_file='./data/waymo/waymo_format/cam_gt.bin', + data_root='./data/waymo/waymo_format', + metric='LET_mAP', + load_type='mv_image_based', + backend_args=backend_args) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/waymoD5-mv3d-3class.py b/configs/_base_/datasets/waymoD5-mv3d-3class.py new file mode 100755 index 0000000..a9cd619 --- /dev/null +++ b/configs/_base_/datasets/waymoD5-mv3d-3class.py @@ -0,0 +1,166 @@ +# dataset settings +# D3 in the config name means the whole dataset is divided into 3 folds +# We only use one fold for efficient experiments +dataset_type = 'WaymoDataset' +data_root = 'data/waymo/kitti_format/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection3d/waymo/kitti_format/' + +# Method 2: Use backend_args, file_client_args in versions before 1.1.0 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection3d/', +# 'data/': 's3://openmmlab/datasets/detection3d/' +# })) +backend_args = None + +class_names = ['Car', 'Pedestrian', 'Cyclist'] +input_modality = dict(use_lidar=False, use_camera=True) +point_cloud_range = [-35.0, -75.0, -2, 75.0, 75.0, 4] + +train_transforms = [ + dict(type='PhotoMetricDistortion3D'), + dict( + type='RandomResize3D', + scale=(1248, 832), + ratio_range=(0.95, 1.05), + keep_ratio=True), + dict(type='RandomCrop3D', crop_size=(720, 1080)), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5, flip_box3d=False), +] + +train_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=False, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='MultiViewWrapper', transforms=train_transforms), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict( + type='Pack3DDetInputs', keys=[ + 'img', + 'gt_bboxes_3d', + 'gt_labels_3d', + ]), +] +test_transforms = [ + dict( + type='RandomResize3D', + scale=(1248, 832), + ratio_range=(1., 1.), + keep_ratio=True) +] +test_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + backend_args=backend_args), + dict(type='MultiViewWrapper', transforms=test_transforms), + dict(type='Pack3DDetInputs', keys=['img']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + backend_args=backend_args), + dict(type='MultiViewWrapper', transforms=test_transforms), + dict(type='Pack3DDetInputs', keys=['img']) +] +metainfo = dict(classes=class_names) + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='waymo_infos_train.pkl', + data_prefix=dict( + pts='training/velodyne', + CAM_FRONT='training/image_0', + CAM_FRONT_LEFT='training/image_1', + CAM_FRONT_RIGHT='training/image_2', + CAM_SIDE_LEFT='training/image_3', + CAM_SIDE_RIGHT='training/image_4'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + box_type_3d='Lidar', + load_interval=5, + backend_args=backend_args)) + +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='waymo_infos_val.pkl', + data_prefix=dict( + pts='training/velodyne', + CAM_FRONT='training/image_0', + CAM_FRONT_LEFT='training/image_1', + CAM_FRONT_RIGHT='training/image_2', + CAM_SIDE_LEFT='training/image_3', + CAM_SIDE_RIGHT='training/image_4'), + pipeline=eval_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='Lidar', + backend_args=backend_args)) + +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='waymo_infos_val.pkl', + data_prefix=dict( + pts='training/velodyne', + CAM_FRONT='training/image_0', + CAM_FRONT_LEFT='training/image_1', + CAM_FRONT_RIGHT='training/image_2', + CAM_SIDE_LEFT='training/image_3', + CAM_SIDE_RIGHT='training/image_4'), + pipeline=eval_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='Lidar', + backend_args=backend_args)) +val_evaluator = dict( + type='WaymoMetric', + ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', + waymo_bin_file='./data/waymo/waymo_format/cam_gt.bin', + data_root='./data/waymo/waymo_format', + metric='LET_mAP', + backend_args=backend_args) + +test_evaluator = val_evaluator diff --git a/configs/_base_/default_runtime.py b/configs/_base_/default_runtime.py new file mode 100755 index 0000000..9249ab9 --- /dev/null +++ b/configs/_base_/default_runtime.py @@ -0,0 +1,23 @@ +default_scope = 'mmdet3d' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=-1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='Det3DVisualizationHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False + +# TODO: support auto scaling lr diff --git a/configs/_base_/models/3dssd.py b/configs/_base_/models/3dssd.py new file mode 100755 index 0000000..3232861 --- /dev/null +++ b/configs/_base_/models/3dssd.py @@ -0,0 +1,76 @@ +model = dict( + type='SSD3DNet', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + backbone=dict( + type='PointNet2SAMSG', + in_channels=4, + num_points=(4096, 512, (256, 256)), + radii=((0.2, 0.4, 0.8), (0.4, 0.8, 1.6), (1.6, 3.2, 4.8)), + num_samples=((32, 32, 64), (32, 32, 64), (32, 32, 32)), + sa_channels=(((16, 16, 32), (16, 16, 32), (32, 32, 64)), + ((64, 64, 128), (64, 64, 128), (64, 96, 128)), + ((128, 128, 256), (128, 192, 256), (128, 256, 256))), + aggregation_channels=(64, 128, 256), + fps_mods=(('D-FPS'), ('FS'), ('F-FPS', 'D-FPS')), + fps_sample_range_lists=((-1), (-1), (512, -1)), + norm_cfg=dict(type='BN2d', eps=1e-3, momentum=0.1), + sa_cfg=dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)), + bbox_head=dict( + type='SSD3DHead', + vote_module_cfg=dict( + in_channels=256, + num_points=256, + gt_per_seed=1, + conv_channels=(128, ), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), + with_res_feat=False, + vote_xyz_range=(3.0, 3.0, 2.0)), + vote_aggregation_cfg=dict( + type='PointSAModuleMSG', + num_point=256, + radii=(4.8, 6.4), + sample_nums=(16, 32), + mlp_channels=((256, 256, 256, 512), (256, 256, 512, 1024)), + norm_cfg=dict(type='BN2d', eps=1e-3, momentum=0.1), + use_xyz=True, + normalize_xyz=False, + bias=True), + pred_layer_cfg=dict( + in_channels=1536, + shared_conv_channels=(512, 128), + cls_conv_channels=(128, ), + reg_conv_channels=(128, ), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), + bias=True), + objectness_loss=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + center_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=1.0), + dir_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=1.0), + corner_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=1.0), + vote_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + sample_mode='spec', pos_distance_thr=10.0, expand_dims_length=0.05), + test_cfg=dict( + nms_cfg=dict(type='nms', iou_thr=0.1), + sample_mode='spec', + score_thr=0.0, + per_class_proposal=True, + max_output_num=100)) diff --git a/configs/_base_/models/cascade-mask-rcnn_r50_fpn.py b/configs/_base_/models/cascade-mask-rcnn_r50_fpn.py new file mode 100755 index 0000000..147b5f0 --- /dev/null +++ b/configs/_base_/models/cascade-mask-rcnn_r50_fpn.py @@ -0,0 +1,199 @@ +# model settings +model = dict( + type='CascadeRCNN', + pretrained='torchvision://resnet50', + _scope_='mmdet', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + nms_post=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + nms_post=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/configs/_base_/models/centerpoint_pillar02_second_secfpn_nus.py b/configs/_base_/models/centerpoint_pillar02_second_secfpn_nus.py new file mode 100755 index 0000000..233b912 --- /dev/null +++ b/configs/_base_/models/centerpoint_pillar02_second_secfpn_nus.py @@ -0,0 +1,89 @@ +voxel_size = [0.2, 0.2, 8] +model = dict( + type='CenterPoint', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=20, + voxel_size=voxel_size, + max_voxels=(30000, 40000))), + pts_voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=5, + feat_channels=[64], + with_distance=False, + voxel_size=(0.2, 0.2, 8), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + legacy=False), + pts_middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=(512, 512)), + pts_backbone=dict( + type='SECOND', + in_channels=64, + out_channels=[64, 128, 256], + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)), + pts_neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + out_channels=[128, 128, 128], + upsample_strides=[0.5, 1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True), + pts_bbox_head=dict( + type='CenterHead', + in_channels=sum([128, 128, 128]), + tasks=[ + dict(num_class=1, class_names=['car']), + dict(num_class=2, class_names=['truck', 'construction_vehicle']), + dict(num_class=2, class_names=['bus', 'trailer']), + dict(num_class=1, class_names=['barrier']), + dict(num_class=2, class_names=['motorcycle', 'bicycle']), + dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), + ], + common_heads=dict( + reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), + share_conv_channel=64, + bbox_coder=dict( + type='CenterPointBBoxCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_num=500, + score_threshold=0.1, + out_size_factor=4, + voxel_size=voxel_size[:2], + code_size=9), + separate_head=dict( + type='SeparateHead', init_bias=-2.19, final_kernel=3), + loss_cls=dict(type='mmdet.GaussianFocalLoss', reduction='mean'), + loss_bbox=dict( + type='mmdet.L1Loss', reduction='mean', loss_weight=0.25), + norm_bbox=True), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + out_size_factor=4, + dense_reg=1, + gaussian_overlap=0.1, + max_objs=500, + min_radius=2, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2])), + test_cfg=dict( + pts=dict( + post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_per_img=500, + max_pool_nms=False, + min_radius=[4, 12, 10, 1, 0.85, 0.175], + score_threshold=0.1, + pc_range=[-51.2, -51.2], + out_size_factor=4, + voxel_size=voxel_size[:2], + nms_type='rotate', + pre_max_size=1000, + post_max_size=83, + nms_thr=0.2))) diff --git a/configs/_base_/models/centerpoint_voxel01_second_secfpn_nus.py b/configs/_base_/models/centerpoint_voxel01_second_secfpn_nus.py new file mode 100755 index 0000000..91dcd17 --- /dev/null +++ b/configs/_base_/models/centerpoint_voxel01_second_secfpn_nus.py @@ -0,0 +1,89 @@ +voxel_size = [0.1, 0.1, 0.2] +model = dict( + type='CenterPoint', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=10, + voxel_size=voxel_size, + max_voxels=(90000, 120000))), + pts_voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + pts_middle_encoder=dict( + type='SparseEncoder', + in_channels=5, + sparse_shape=[41, 1024, 1024], + output_channels=128, + order=('conv', 'norm', 'act'), + encoder_channels=((16, 16, 32), (32, 32, 64), (64, 64, 128), (128, + 128)), + encoder_paddings=((0, 0, 1), (0, 0, 1), (0, 0, [0, 1, 1]), (0, 0)), + block_type='basicblock'), + pts_backbone=dict( + type='SECOND', + in_channels=256, + out_channels=[128, 256], + layer_nums=[5, 5], + layer_strides=[1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)), + pts_neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + out_channels=[256, 256], + upsample_strides=[1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True), + pts_bbox_head=dict( + type='CenterHead', + in_channels=sum([256, 256]), + tasks=[ + dict(num_class=1, class_names=['car']), + dict(num_class=2, class_names=['truck', 'construction_vehicle']), + dict(num_class=2, class_names=['bus', 'trailer']), + dict(num_class=1, class_names=['barrier']), + dict(num_class=2, class_names=['motorcycle', 'bicycle']), + dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), + ], + common_heads=dict( + reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), + share_conv_channel=64, + bbox_coder=dict( + type='CenterPointBBoxCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_num=500, + score_threshold=0.1, + out_size_factor=8, + voxel_size=voxel_size[:2], + code_size=9), + separate_head=dict( + type='SeparateHead', init_bias=-2.19, final_kernel=3), + loss_cls=dict(type='mmdet.GaussianFocalLoss', reduction='mean'), + loss_bbox=dict( + type='mmdet.L1Loss', reduction='mean', loss_weight=0.25), + norm_bbox=True), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[1024, 1024, 40], + voxel_size=voxel_size, + out_size_factor=8, + dense_reg=1, + gaussian_overlap=0.1, + max_objs=500, + min_radius=2, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2])), + test_cfg=dict( + pts=dict( + post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_per_img=500, + max_pool_nms=False, + min_radius=[4, 12, 10, 1, 0.85, 0.175], + score_threshold=0.1, + out_size_factor=8, + voxel_size=voxel_size[:2], + nms_type='rotate', + pre_max_size=1000, + post_max_size=83, + nms_thr=0.2))) diff --git a/configs/_base_/models/cylinder3d.py b/configs/_base_/models/cylinder3d.py new file mode 100755 index 0000000..02e8323 --- /dev/null +++ b/configs/_base_/models/cylinder3d.py @@ -0,0 +1,41 @@ +grid_shape = [480, 360, 32] +model = dict( + type='Cylinder3D', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_type='cylindrical', + voxel_layer=dict( + grid_shape=grid_shape, + point_cloud_range=[0, -3.14159265359, -4, 50, 3.14159265359, 2], + max_num_points=-1, + max_voxels=-1, + ), + ), + voxel_encoder=dict( + type='SegVFE', + feat_channels=[64, 128, 256, 256], + in_channels=6, + with_voxel_center=True, + feat_compression=16, + return_point_feats=False), + backbone=dict( + type='Asymm3DSpconv', + grid_size=grid_shape, + input_channels=16, + base_channels=32, + norm_cfg=dict(type='BN1d', eps=1e-5, momentum=0.1)), + decode_head=dict( + type='Cylinder3DHead', + channels=128, + num_classes=20, + loss_ce=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, + loss_weight=1.0), + loss_lovasz=dict(type='LovaszLoss', loss_weight=1.0, reduction='none'), + ), + train_cfg=None, + test_cfg=dict(mode='whole'), +) diff --git a/configs/_base_/models/dgcnn.py b/configs/_base_/models/dgcnn.py new file mode 100755 index 0000000..cdafa50 --- /dev/null +++ b/configs/_base_/models/dgcnn.py @@ -0,0 +1,29 @@ +# model settings +model = dict( + type='EncoderDecoder3D', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + backbone=dict( + type='DGCNNBackbone', + in_channels=9, # [xyz, rgb, normal_xyz], modified with dataset + num_samples=(20, 20, 20), + knn_modes=('D-KNN', 'F-KNN', 'F-KNN'), + radius=(None, None, None), + gf_channels=((64, 64), (64, 64), (64, )), + fa_channels=(1024, ), + act_cfg=dict(type='LeakyReLU', negative_slope=0.2)), + decode_head=dict( + type='DGCNNHead', + fp_channels=(1216, 512), + channels=256, + dropout_ratio=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='LeakyReLU', negative_slope=0.2), + loss_decode=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, # modified with dataset + loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide')) diff --git a/configs/_base_/models/fcaf3d.py b/configs/_base_/models/fcaf3d.py new file mode 100755 index 0000000..ae47827 --- /dev/null +++ b/configs/_base_/models/fcaf3d.py @@ -0,0 +1,20 @@ +model = dict( + type='MinkSingleStage3DDetector', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + backbone=dict(type='MinkResNet', in_channels=3, depth=34), + bbox_head=dict( + type='FCAF3DHead', + in_channels=(64, 128, 256, 512), + out_channels=128, + voxel_size=.01, + pts_prune_threshold=100000, + pts_assign_threshold=27, + pts_center_threshold=18, + num_classes=18, + num_reg_outs=6, + center_loss=dict(type='mmdet.CrossEntropyLoss', use_sigmoid=True), + bbox_loss=dict(type='AxisAlignedIoULoss'), + cls_loss=dict(type='mmdet.FocalLoss'), + ), + train_cfg=dict(), + test_cfg=dict(nms_pre=1000, iou_thr=.5, score_thr=.01)) diff --git a/configs/_base_/models/fcos3d.py b/configs/_base_/models/fcos3d.py new file mode 100755 index 0000000..fbb20ef --- /dev/null +++ b/configs/_base_/models/fcos3d.py @@ -0,0 +1,86 @@ +# model settings +model = dict( + type='FCOSMono3D', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='mmdet.ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe')), + neck=dict( + type='mmdet.FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='FCOSMono3DHead', + num_classes=10, + in_channels=256, + stacked_convs=2, + feat_channels=256, + use_direction_classifier=True, + diff_rad_by_sin=True, + pred_attrs=True, + pred_velo=True, + dir_offset=0.7854, # pi/4 + dir_limit_offset=0, + strides=[8, 16, 32, 64, 128], + group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo + cls_branch=(256, ), + reg_branch=( + (256, ), # offset + (256, ), # depth + (256, ), # size + (256, ), # rot + () # velo + ), + dir_branch=(256, ), + attr_branch=(256, ), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_attr=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_centerness=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + bbox_coder=dict(type='FCOS3DBBoxCoder', code_size=9), + norm_on_bbox=True, + centerness_on_reg=True, + center_sampling=True, + conv_bias=True, + dcn_on_last_conv=True), + train_cfg=dict( + allowed_border=0, + code_weight=[1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05], + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=1000, + nms_thr=0.8, + score_thr=0.05, + min_bbox_size=0, + max_per_img=200)) diff --git a/configs/_base_/models/groupfree3d.py b/configs/_base_/models/groupfree3d.py new file mode 100755 index 0000000..9627575 --- /dev/null +++ b/configs/_base_/models/groupfree3d.py @@ -0,0 +1,75 @@ +model = dict( + type='GroupFree3DNet', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + backbone=dict( + type='PointNet2SASSG', + in_channels=3, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 288)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True)), + bbox_head=dict( + type='GroupFree3DHead', + in_channels=288, + num_decoder_layers=6, + num_proposal=256, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='GroupFree3DMHA', + embed_dims=288, + num_heads=8, + attn_drop=0.1, + dropout_layer=dict(type='Dropout', drop_prob=0.1)), + ffn_cfgs=dict( + embed_dims=288, + feedforward_channels=2048, + ffn_drop=0.1, + act_cfg=dict(type='ReLU', inplace=True)), + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', + 'norm')), + pred_layer_cfg=dict( + in_channels=288, shared_conv_channels=(288, 288), bias=True), + sampling_objectness_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=8.0), + objectness_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + center_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + dir_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='mmdet.SmoothL1Loss', + beta=1.0, + reduction='sum', + loss_weight=10.0), + semantic_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(sample_mode='kps'), + test_cfg=dict( + sample_mode='kps', + nms_thr=0.25, + score_thr=0.0, + per_class_proposal=True, + prediction_stages='last')) diff --git a/configs/_base_/models/h3dnet.py b/configs/_base_/models/h3dnet.py new file mode 100755 index 0000000..559b06c --- /dev/null +++ b/configs/_base_/models/h3dnet.py @@ -0,0 +1,351 @@ +primitive_z_cfg = dict( + type='PrimitiveHead', + num_dims=2, + num_classes=18, + primitive_mode='z', + upper_thresh=100.0, + surface_thresh=0.5, + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=1, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=1024, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='mmdet.CrossEntropyLoss', + class_weight=[0.4, 0.6], + reduction='mean', + loss_weight=30.0), + center_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_reg_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_cls_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + train_cfg=dict( + sample_mode='vote', + dist_thresh=0.2, + var_thresh=1e-2, + lower_thresh=1e-6, + num_point=100, + num_point_line=10, + line_thresh=0.2), + test_cfg=dict(sample_mode='seed')) + +primitive_xy_cfg = dict( + type='PrimitiveHead', + num_dims=1, + num_classes=18, + primitive_mode='xy', + upper_thresh=100.0, + surface_thresh=0.5, + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=1, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=1024, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='mmdet.CrossEntropyLoss', + class_weight=[0.4, 0.6], + reduction='mean', + loss_weight=30.0), + center_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_reg_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_cls_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + train_cfg=dict( + sample_mode='vote', + dist_thresh=0.2, + var_thresh=1e-2, + lower_thresh=1e-6, + num_point=100, + num_point_line=10, + line_thresh=0.2), + test_cfg=dict(sample_mode='seed')) + +primitive_line_cfg = dict( + type='PrimitiveHead', + num_dims=0, + num_classes=18, + primitive_mode='line', + upper_thresh=100.0, + surface_thresh=0.5, + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=1, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=1024, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='mmdet.CrossEntropyLoss', + class_weight=[0.4, 0.6], + reduction='mean', + loss_weight=30.0), + center_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=1.0, + loss_dst_weight=1.0), + semantic_reg_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=1.0, + loss_dst_weight=1.0), + semantic_cls_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=2.0), + train_cfg=dict( + sample_mode='vote', + dist_thresh=0.2, + var_thresh=1e-2, + lower_thresh=1e-6, + num_point=100, + num_point_line=10, + line_thresh=0.2), + test_cfg=dict(sample_mode='seed')) + +model = dict( + type='H3DNet', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + backbone=dict( + type='MultiBackbone', + num_streams=4, + suffixes=['net0', 'net1', 'net2', 'net3'], + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-5, momentum=0.01), + act_cfg=dict(type='ReLU'), + backbones=dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True))), + rpn_head=dict( + type='VoteHead', + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=3, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=256, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + pred_layer_cfg=dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True), + objectness_loss=dict( + type='mmdet.CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='sum', + loss_weight=5.0), + center_loss=dict( + type='ChamferDistance', + mode='l2', + reduction='sum', + loss_src_weight=10.0, + loss_dst_weight=10.0), + dir_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + semantic_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + roi_head=dict( + type='H3DRoIHead', + primitive_list=[primitive_z_cfg, primitive_xy_cfg, primitive_line_cfg], + bbox_head=dict( + type='H3DBboxHead', + gt_per_seed=3, + num_proposal=256, + suface_matching_cfg=dict( + type='PointSAModule', + num_point=256 * 6, + radius=0.5, + num_sample=32, + mlp_channels=[128 + 6, 128, 64, 32], + use_xyz=True, + normalize_xyz=True), + line_matching_cfg=dict( + type='PointSAModule', + num_point=256 * 12, + radius=0.5, + num_sample=32, + mlp_channels=[128 + 12, 128, 64, 32], + use_xyz=True, + normalize_xyz=True), + primitive_refine_channels=[128, 128, 128], + upper_thresh=100.0, + surface_thresh=0.5, + line_thresh=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='mmdet.CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='sum', + loss_weight=5.0), + center_loss=dict( + type='ChamferDistance', + mode='l2', + reduction='sum', + loss_src_weight=10.0, + loss_dst_weight=10.0), + dir_class_loss=dict( + type='mmdet.CrossEntropyLoss', + reduction='sum', + loss_weight=0.1), + dir_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='mmdet.CrossEntropyLoss', + reduction='sum', + loss_weight=0.1), + size_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + semantic_loss=dict( + type='mmdet.CrossEntropyLoss', + reduction='sum', + loss_weight=0.1), + cues_objectness_loss=dict( + type='mmdet.CrossEntropyLoss', + class_weight=[0.3, 0.7], + reduction='mean', + loss_weight=5.0), + cues_semantic_loss=dict( + type='mmdet.CrossEntropyLoss', + class_weight=[0.3, 0.7], + reduction='mean', + loss_weight=5.0), + proposal_objectness_loss=dict( + type='mmdet.CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='none', + loss_weight=5.0), + primitive_center_loss=dict( + type='mmdet.MSELoss', reduction='none', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mode='vote'), + rpn_proposal=dict(use_nms=False), + rcnn=dict( + pos_distance_thr=0.3, + neg_distance_thr=0.6, + sample_mode='vote', + far_threshold=0.6, + near_threshold=0.3, + mask_surface_threshold=0.3, + label_surface_threshold=0.3, + mask_line_threshold=0.3, + label_line_threshold=0.3)), + test_cfg=dict( + rpn=dict( + sample_mode='seed', + nms_thr=0.25, + score_thr=0.05, + per_class_proposal=True, + use_nms=False), + rcnn=dict( + sample_mode='seed', + nms_thr=0.25, + score_thr=0.05, + per_class_proposal=True))) diff --git a/configs/_base_/models/imvotenet.py b/configs/_base_/models/imvotenet.py new file mode 100755 index 0000000..2946300 --- /dev/null +++ b/configs/_base_/models/imvotenet.py @@ -0,0 +1,118 @@ +model = dict( + type='ImVoteNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + # use caffe img_norm + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + img_backbone=dict( + type='mmdet.ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe'), + img_neck=dict( + type='mmdet.FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + img_rpn_head=dict( + _scope_='mmdet', + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + img_roi_head=dict( + _scope_='mmdet', + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=10, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + + # model training and testing settings + train_cfg=dict( + _scope_='mmdet', + img_rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + img_rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + img_rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + img_rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + img_rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/configs/_base_/models/mask-rcnn_r50_fpn.py b/configs/_base_/models/mask-rcnn_r50_fpn.py new file mode 100755 index 0000000..881d4df --- /dev/null +++ b/configs/_base_/models/mask-rcnn_r50_fpn.py @@ -0,0 +1,125 @@ +# model settings +model = dict( + type='MaskRCNN', + pretrained='torchvision://resnet50', + _scope_='mmdet', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/configs/_base_/models/minkunet.py b/configs/_base_/models/minkunet.py new file mode 100755 index 0000000..0a691d8 --- /dev/null +++ b/configs/_base_/models/minkunet.py @@ -0,0 +1,29 @@ +model = dict( + type='MinkUNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_type='minkunet', + voxel_layer=dict( + max_num_points=-1, + point_cloud_range=[-100, -100, -20, 100, 100, 20], + voxel_size=[0.05, 0.05, 0.05], + max_voxels=(-1, -1)), + ), + backbone=dict( + type='MinkUNetBackbone', + in_channels=4, + base_channels=32, + encoder_channels=[32, 64, 128, 256], + decoder_channels=[256, 128, 96, 96], + num_stages=4, + init_cfg=None), + decode_head=dict( + type='MinkUNetHead', + channels=96, + num_classes=19, + dropout_ratio=0, + loss_decode=dict(type='mmdet.CrossEntropyLoss', avg_non_ignore=True), + ignore_index=19), + train_cfg=dict(), + test_cfg=dict()) diff --git a/configs/_base_/models/multiview_dfm.py b/configs/_base_/models/multiview_dfm.py new file mode 100755 index 0000000..e6f4d27 --- /dev/null +++ b/configs/_base_/models/multiview_dfm.py @@ -0,0 +1,104 @@ +model = dict( + type='MultiViewDfM', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='mmdet.ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True)), + neck=dict( + type='mmdet.FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=64, + num_outs=4), + neck_2d=None, + bbox_head_2d=None, + backbone_stereo=None, + depth_head=None, + backbone_3d=None, + neck_3d=dict(type='OutdoorImVoxelNeck', in_channels=64, out_channels=256), + valid_sample=True, + voxel_size=(0.5, 0.5, 0.5), # n_voxels=[240, 300, 12] + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-35.0, -75.0, -2, 75.0, 75.0, 4]], + rotations=[.0]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=256, + feat_channels=256, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-35.0, -75.0, -0.0345, 75.0, 75.0, -0.0345], + [-35.0, -75.0, 0, 75.0, 75.0, 0], + [-35.0, -75.0, -0.1188, 75.0, 75.0, -0.1188]], + sizes=[ + [4.73, 2.08, 1.77], # car + [0.91, 0.84, 1.74], # pedestrian + [1.81, 0.84, 1.77], # cyclist + ], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + dir_offset=-0.7854, # -pi / 4 + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + train_cfg=dict( + assigner=[ + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.05, + score_thr=0.001, + min_bbox_size=0, + nms_pre=500, + max_num=100)) diff --git a/configs/_base_/models/paconv_ssg-cuda.py b/configs/_base_/models/paconv_ssg-cuda.py new file mode 100755 index 0000000..f513bd4 --- /dev/null +++ b/configs/_base_/models/paconv_ssg-cuda.py @@ -0,0 +1,7 @@ +_base_ = './paconv_ssg.py' + +model = dict( + backbone=dict( + sa_cfg=dict( + type='PAConvCUDASAModule', + scorenet_cfg=dict(mlp_channels=[8, 16, 16])))) diff --git a/configs/_base_/models/paconv_ssg.py b/configs/_base_/models/paconv_ssg.py new file mode 100755 index 0000000..4f6991f --- /dev/null +++ b/configs/_base_/models/paconv_ssg.py @@ -0,0 +1,50 @@ +# model settings +model = dict( + type='EncoderDecoder3D', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + backbone=dict( + type='PointNet2SASSG', + in_channels=9, # [xyz, rgb, normalized_xyz] + num_points=(1024, 256, 64, 16), + radius=(None, None, None, None), # use kNN instead of ball query + num_samples=(32, 32, 32, 32), + sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, + 512)), + fp_channels=(), + norm_cfg=dict(type='BN2d', momentum=0.1), + sa_cfg=dict( + type='PAConvSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=False, + paconv_num_kernels=[16, 16, 16], + paconv_kernel_input='w_neighbor', + scorenet_input='w_neighbor_dist', + scorenet_cfg=dict( + mlp_channels=[16, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False))), + decode_head=dict( + type='PAConvHead', + # PAConv model's decoder takes skip connections from beckbone + # different from PointNet++, it also concats input features in the last + # level of decoder, leading to `128 + 6` as the channel number + fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), + (128 + 6, 128, 128, 128)), + channels=128, + dropout_ratio=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'), + loss_decode=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, # should be modified with dataset + loss_weight=1.0)), + # correlation loss to regularize PAConv's kernel weights + loss_regularization=dict( + type='PAConvRegularizationLoss', reduction='sum', loss_weight=10.0), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide')) diff --git a/configs/_base_/models/parta2.py b/configs/_base_/models/parta2.py new file mode 100755 index 0000000..7db477a --- /dev/null +++ b/configs/_base_/models/parta2.py @@ -0,0 +1,207 @@ +# model settings +voxel_size = [0.05, 0.05, 0.1] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] + +model = dict( + type='PartA2', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=5, # max_points_per_voxel + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(16000, 40000))), + voxel_encoder=dict(type='HardSimpleVFE'), + middle_encoder=dict( + type='SparseUNet', + in_channels=4, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + rpn_head=dict( + type='PartA2RPNHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[[0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78]], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + assigner_per_size=True, + assign_per_class=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + roi_head=dict( + type='PartAggregationROIHead', + num_classes=3, + semantic_head=dict( + type='PointwiseSemanticHead', + in_channels=16, + extra_width=0.2, + seg_score_thr=0.3, + num_classes=3, + loss_seg=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + reduction='sum', + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_part=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0)), + seg_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='max')), + bbox_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='avg')), + bbox_head=dict( + type='PartA2BboxHead', + num_classes=3, + seg_in_channels=16, + part_in_channels=4, + seg_conv_channels=[64, 64], + part_conv_channels=[64, 64], + merge_conv_channels=[128, 128], + down_conv_channels=[128, 256], + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + shared_fc_channels=[256, 512, 512, 512], + cls_channels=[256, 256], + reg_channels=[256, 256], + dropout_ratio=0.1, + roi_feat_size=14, + with_corner_loss=True, + loss_bbox=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=1.0), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1) + ], + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=9000, + nms_post=512, + max_num=512, + nms_thr=0.8, + score_thr=0, + use_rotate_nms=False), + rcnn=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1) + ], + sampler=dict( + type='IoUNegPiecewiseSampler', + num=128, + pos_fraction=0.55, + neg_piece_fractions=[0.8, 0.2], + neg_iou_piece_thrs=[0.55, 0.1], + neg_pos_ub=-1, + add_gt_as_proposals=False, + return_iou=True), + cls_pos_thr=0.75, + cls_neg_thr=0.25)), + test_cfg=dict( + rpn=dict( + nms_pre=1024, + nms_post=100, + max_num=100, + nms_thr=0.7, + score_thr=0, + use_rotate_nms=True), + rcnn=dict( + use_rotate_nms=True, + use_raw_score=True, + nms_thr=0.01, + score_thr=0.1))) diff --git a/configs/_base_/models/pgd.py b/configs/_base_/models/pgd.py new file mode 100755 index 0000000..f7c098d --- /dev/null +++ b/configs/_base_/models/pgd.py @@ -0,0 +1,56 @@ +_base_ = './fcos3d.py' +# model settings +model = dict( + bbox_head=dict( + _delete_=True, + type='PGDHead', + num_classes=10, + in_channels=256, + stacked_convs=2, + feat_channels=256, + use_direction_classifier=True, + diff_rad_by_sin=True, + pred_attrs=True, + pred_velo=True, + pred_bbox2d=True, + pred_keypoints=False, + dir_offset=0.7854, # pi/4 + strides=[8, 16, 32, 64, 128], + group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo + cls_branch=(256, ), + reg_branch=( + (256, ), # offset + (256, ), # depth + (256, ), # size + (256, ), # rot + () # velo + ), + dir_branch=(256, ), + attr_branch=(256, ), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_attr=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_centerness=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + norm_on_bbox=True, + centerness_on_reg=True, + center_sampling=True, + conv_bias=True, + dcn_on_last_conv=True, + use_depth_classifier=True, + depth_branch=(256, ), + depth_range=(0, 50), + depth_unit=10, + division='uniform', + depth_bins=6, + bbox_coder=dict(type='PGDBBoxCoder', code_size=9)), + test_cfg=dict(nms_pre=1000, nms_thr=0.8, score_thr=0.01, max_per_img=200)) diff --git a/configs/_base_/models/point_rcnn.py b/configs/_base_/models/point_rcnn.py new file mode 100755 index 0000000..c23a78b --- /dev/null +++ b/configs/_base_/models/point_rcnn.py @@ -0,0 +1,148 @@ +model = dict( + type='PointRCNN', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + backbone=dict( + type='PointNet2SAMSG', + in_channels=4, + num_points=(4096, 1024, 256, 64), + radii=((0.1, 0.5), (0.5, 1.0), (1.0, 2.0), (2.0, 4.0)), + num_samples=((16, 32), (16, 32), (16, 32), (16, 32)), + sa_channels=(((16, 16, 32), (32, 32, 64)), ((64, 64, 128), (64, 96, + 128)), + ((128, 196, 256), (128, 196, 256)), ((256, 256, 512), + (256, 384, 512))), + fps_mods=(('D-FPS'), ('D-FPS'), ('D-FPS'), ('D-FPS')), + fps_sample_range_lists=((-1), (-1), (-1), (-1)), + aggregation_channels=(None, None, None, None), + dilated_group=(False, False, False, False), + out_indices=(0, 1, 2, 3), + norm_cfg=dict(type='BN2d', eps=1e-3, momentum=0.1), + sa_cfg=dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)), + neck=dict( + type='PointNetFPNeck', + fp_channels=((1536, 512, 512), (768, 512, 512), (608, 256, 256), + (257, 128, 128))), + rpn_head=dict( + type='PointRPNHead', + num_classes=3, + enlarge_width=0.1, + pred_layer_cfg=dict( + in_channels=128, + cls_linear_channels=(256, 256), + reg_linear_channels=(256, 256)), + cls_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + reduction='sum', + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + bbox_loss=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=1.0), + bbox_coder=dict( + type='PointXYZWHLRBBoxCoder', + code_size=8, + # code_size: (center residual (3), size regression (3), + # torch.cos(yaw) (1), torch.sin(yaw) (1) + use_mean_size=True, + mean_size=[[3.9, 1.6, 1.56], [0.8, 0.6, 1.73], [1.76, 0.6, + 1.73]])), + roi_head=dict( + type='PointRCNNRoIHead', + bbox_roi_extractor=dict( + type='Single3DRoIPointExtractor', + roi_layer=dict(type='RoIPointPool3d', num_sampled_points=512)), + bbox_head=dict( + type='PointRCNNBboxHead', + num_classes=1, + loss_bbox=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=1.0), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + pred_layer_cfg=dict( + in_channels=512, + cls_conv_channels=(256, 256), + reg_conv_channels=(256, 256), + bias=True), + in_channels=5, + # 5 = 3 (xyz) + scores + depth + mlp_channels=[128, 128], + num_points=(128, 32, -1), + radius=(0.2, 0.4, 100), + num_samples=(16, 16, 16), + sa_channels=((128, 128, 128), (128, 128, 256), (256, 256, 512)), + with_corner_loss=True), + depth_normalizer=70.0), + # model training and testing settings + train_cfg=dict( + pos_distance_thr=10.0, + rpn=dict( + rpn_proposal=dict( + use_rotate_nms=True, + score_thr=None, + iou_thr=0.8, + nms_pre=9000, + nms_post=512)), + rcnn=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1, + match_low_quality=False), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1, + match_low_quality=False), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1, + match_low_quality=False) + ], + sampler=dict( + type='IoUNegPiecewiseSampler', + num=128, + pos_fraction=0.5, + neg_piece_fractions=[0.8, 0.2], + neg_iou_piece_thrs=[0.55, 0.1], + neg_pos_ub=-1, + add_gt_as_proposals=False, + return_iou=True), + cls_pos_thr=0.7, + cls_neg_thr=0.25)), + test_cfg=dict( + rpn=dict( + nms_cfg=dict( + use_rotate_nms=True, + iou_thr=0.85, + nms_pre=9000, + nms_post=512, + score_thr=None)), + rcnn=dict(use_rotate_nms=True, nms_thr=0.1, score_thr=0.1))) diff --git a/configs/_base_/models/pointnet2_msg.py b/configs/_base_/models/pointnet2_msg.py new file mode 100755 index 0000000..222ab88 --- /dev/null +++ b/configs/_base_/models/pointnet2_msg.py @@ -0,0 +1,28 @@ +_base_ = './pointnet2_ssg.py' + +# model settings +model = dict( + backbone=dict( + _delete_=True, + type='PointNet2SAMSG', + in_channels=6, # [xyz, rgb], should be modified with dataset + num_points=(1024, 256, 64, 16), + radii=((0.05, 0.1), (0.1, 0.2), (0.2, 0.4), (0.4, 0.8)), + num_samples=((16, 32), (16, 32), (16, 32), (16, 32)), + sa_channels=(((16, 16, 32), (32, 32, 64)), ((64, 64, 128), (64, 96, + 128)), + ((128, 196, 256), (128, 196, 256)), ((256, 256, 512), + (256, 384, 512))), + aggregation_channels=(None, None, None, None), + fps_mods=(('D-FPS'), ('D-FPS'), ('D-FPS'), ('D-FPS')), + fps_sample_range_lists=((-1), (-1), (-1), (-1)), + dilated_group=(False, False, False, False), + out_indices=(0, 1, 2, 3), + sa_cfg=dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)), + decode_head=dict( + fp_channels=((1536, 256, 256), (512, 256, 256), (352, 256, 128), + (128, 128, 128, 128)))) diff --git a/configs/_base_/models/pointnet2_ssg.py b/configs/_base_/models/pointnet2_ssg.py new file mode 100755 index 0000000..386fe82 --- /dev/null +++ b/configs/_base_/models/pointnet2_ssg.py @@ -0,0 +1,36 @@ +# model settings +model = dict( + type='EncoderDecoder3D', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + backbone=dict( + type='PointNet2SASSG', + in_channels=6, # [xyz, rgb], should be modified with dataset + num_points=(1024, 256, 64, 16), + radius=(0.1, 0.2, 0.4, 0.8), + num_samples=(32, 32, 32, 32), + sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, + 512)), + fp_channels=(), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)), + decode_head=dict( + type='PointNet2Head', + fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), + (128, 128, 128, 128)), + channels=128, + dropout_ratio=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'), + loss_decode=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, # should be modified with dataset + loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide')) diff --git a/configs/_base_/models/pointpillars_hv_fpn_lyft.py b/configs/_base_/models/pointpillars_hv_fpn_lyft.py new file mode 100755 index 0000000..6a00b76 --- /dev/null +++ b/configs/_base_/models/pointpillars_hv_fpn_lyft.py @@ -0,0 +1,23 @@ +_base_ = './pointpillars_hv_fpn_nus.py' + +# model settings (based on nuScenes model settings) +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +model = dict( + data_preprocessor=dict( + voxel_layer=dict( + max_num_points=20, + point_cloud_range=[-80, -80, -5, 80, 80, 3], + max_voxels=(60000, 60000))), + pts_voxel_encoder=dict( + feat_channels=[64], point_cloud_range=[-80, -80, -5, 80, 80, 3]), + pts_middle_encoder=dict(output_shape=[640, 640]), + pts_bbox_head=dict( + num_classes=9, + anchor_generator=dict( + ranges=[[-80, -80, -1.8, 80, 80, -1.8]], custom_values=[]), + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)), + # model training settings (based on nuScenes model settings) + train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))) diff --git a/configs/_base_/models/pointpillars_hv_fpn_nus.py b/configs/_base_/models/pointpillars_hv_fpn_nus.py new file mode 100755 index 0000000..694e69e --- /dev/null +++ b/configs/_base_/models/pointpillars_hv_fpn_nus.py @@ -0,0 +1,100 @@ +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.25, 0.25, 8] +model = dict( + type='MVXFasterRCNN', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=64, + point_cloud_range=[-50, -50, -5, 50, 50, 3], + voxel_size=voxel_size, + max_voxels=(30000, 40000))), + pts_voxel_encoder=dict( + type='HardVFE', + in_channels=4, + feat_channels=[64, 64], + with_distance=False, + voxel_size=voxel_size, + with_cluster_center=True, + with_voxel_center=True, + point_cloud_range=[-50, -50, -5, 50, 50, 3], + norm_cfg=dict(type='naiveSyncBN1d', eps=1e-3, momentum=0.01)), + pts_middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[400, 400]), + pts_backbone=dict( + type='SECOND', + in_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + pts_neck=dict( + type='mmdet.FPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + act_cfg=dict(type='ReLU'), + in_channels=[64, 128, 256], + out_channels=256, + start_level=0, + num_outs=3), + pts_bbox_head=dict( + type='Anchor3DHead', + num_classes=10, + in_channels=256, + feat_channels=256, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-50, -50, -1.8, 50, 50, -1.8]], + scales=[1, 2, 4], + sizes=[ + [2.5981, 0.8660, 1.], # 1.5 / sqrt(3) + [1.7321, 0.5774, 1.], # 1 / sqrt(3) + [1., 1., 1.], + [0.4, 0.4, 1], + ], + custom_values=[0, 0], + rotations=[0, 1.57], + reshape_out=True), + assigner_per_size=False, + diff_rad_by_sin=True, + dir_offset=-0.7854, # -pi / 4 + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=9), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + pts=dict( + assigner=dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + pos_weight=-1, + debug=False)), + test_cfg=dict( + pts=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=1000, + nms_thr=0.2, + score_thr=0.05, + min_bbox_size=0, + max_num=500))) diff --git a/configs/_base_/models/pointpillars_hv_fpn_range100_lyft.py b/configs/_base_/models/pointpillars_hv_fpn_range100_lyft.py new file mode 100755 index 0000000..2e56144 --- /dev/null +++ b/configs/_base_/models/pointpillars_hv_fpn_range100_lyft.py @@ -0,0 +1,23 @@ +_base_ = './pointpillars_hv_fpn_nus.py' + +# model settings (based on nuScenes model settings) +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +model = dict( + data_preprocessor=dict( + voxel_layer=dict( + max_num_points=20, + point_cloud_range=[-100, -100, -5, 100, 100, 3], + max_voxels=(60000, 60000))), + pts_voxel_encoder=dict( + feat_channels=[64], point_cloud_range=[-100, -100, -5, 100, 100, 3]), + pts_middle_encoder=dict(output_shape=[800, 800]), + pts_bbox_head=dict( + num_classes=9, + anchor_generator=dict( + ranges=[[-100, -100, -1.8, 100, 100, -1.8]], custom_values=[]), + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)), + # model training settings (based on nuScenes model settings) + train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))) diff --git a/configs/_base_/models/pointpillars_hv_secfpn_kitti.py b/configs/_base_/models/pointpillars_hv_secfpn_kitti.py new file mode 100755 index 0000000..09933c3 --- /dev/null +++ b/configs/_base_/models/pointpillars_hv_secfpn_kitti.py @@ -0,0 +1,98 @@ +voxel_size = [0.16, 0.16, 4] + +model = dict( + type='VoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=32, # max_points_per_voxel + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1], + voxel_size=voxel_size, + max_voxels=(16000, 40000))), + voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=4, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1]), + middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]), + backbone=dict( + type='SECOND', + in_channels=64, + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + assign_per_class=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[ + [0, -39.68, -0.6, 69.12, 39.68, -0.6], + [0, -39.68, -0.6, 69.12, 39.68, -0.6], + [0, -39.68, -1.78, 69.12, 39.68, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='mmdet3d.BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='mmdet3d.BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='mmdet3d.BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) diff --git a/configs/_base_/models/pointpillars_hv_secfpn_waymo.py b/configs/_base_/models/pointpillars_hv_secfpn_waymo.py new file mode 100755 index 0000000..5e7fd55 --- /dev/null +++ b/configs/_base_/models/pointpillars_hv_secfpn_waymo.py @@ -0,0 +1,112 @@ +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.32, 0.32, 6] +model = dict( + type='MVXFasterRCNN', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=20, + point_cloud_range=[-74.88, -74.88, -2, 74.88, 74.88, 4], + voxel_size=voxel_size, + max_voxels=(32000, 32000))), + pts_voxel_encoder=dict( + type='HardVFE', + in_channels=5, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + with_cluster_center=True, + with_voxel_center=True, + point_cloud_range=[-74.88, -74.88, -2, 74.88, 74.88, 4], + norm_cfg=dict(type='naiveSyncBN1d', eps=1e-3, momentum=0.01)), + pts_middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[468, 468]), + pts_backbone=dict( + type='SECOND', + in_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + layer_nums=[3, 5, 5], + layer_strides=[1, 2, 2], + out_channels=[64, 128, 256]), + pts_neck=dict( + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-74.88, -74.88, -0.0345, 74.88, 74.88, -0.0345], + [-74.88, -74.88, 0, 74.88, 74.88, 0], + [-74.88, -74.88, -0.1188, 74.88, 74.88, -0.1188]], + sizes=[ + [4.73, 2.08, 1.77], # car + [0.91, 0.84, 1.74], # pedestrian + [1.81, 0.84, 1.77] # cyclist + ], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + dir_offset=-0.7854, # -pi / 4 + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + pts=dict( + assigner=[ + dict( # car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + dict( # cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ], + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + pos_weight=-1, + debug=False)), + test_cfg=dict( + pts=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=4096, + nms_thr=0.25, + score_thr=0.1, + min_bbox_size=0, + max_num=500))) diff --git a/configs/_base_/models/second_hv_secfpn_kitti.py b/configs/_base_/models/second_hv_secfpn_kitti.py new file mode 100755 index 0000000..8e1a6b0 --- /dev/null +++ b/configs/_base_/models/second_hv_secfpn_kitti.py @@ -0,0 +1,94 @@ +voxel_size = [0.05, 0.05, 0.1] + +model = dict( + type='VoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=5, + point_cloud_range=[0, -40, -3, 70.4, 40, 1], + voxel_size=voxel_size, + max_voxels=(16000, 40000))), + voxel_encoder=dict(type='HardSimpleVFE'), + middle_encoder=dict( + type='SparseEncoder', + in_channels=4, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) diff --git a/configs/_base_/models/second_hv_secfpn_waymo.py b/configs/_base_/models/second_hv_secfpn_waymo.py new file mode 100755 index 0000000..f641125 --- /dev/null +++ b/configs/_base_/models/second_hv_secfpn_waymo.py @@ -0,0 +1,108 @@ +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.08, 0.08, 0.1] +model = dict( + type='MVXFasterRCNN', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=20, + point_cloud_range=[-76.8, -51.2, -2, 76.8, 51.2, 4], + voxel_size=voxel_size, + max_voxels=(80000, 90000))), + pts_voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + pts_middle_encoder=dict( + type='SparseEncoder', + in_channels=5, + sparse_shape=[61, 1280, 1920], + order=('conv', 'norm', 'act')), + pts_backbone=dict( + type='SECOND', + in_channels=384, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + pts_neck=dict( + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + pts_bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[ + [-76.8, -51.2, -0.0345, 76.8, 51.2, -0.0345], + [-76.8, -51.2, -0.1188, 76.8, 51.2, -0.1188], + [-76.8, -51.2, 0, 76.8, 51.2, 0], + ], + sizes=[ + [4.73, 2.08, 1.77], # car + [1.81, 0.84, 1.77], # pedestrian + [0.91, 0.84, 1.74], # cyclist + ], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + dir_offset=-0.7854, # -pi / 4 + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + pts=dict( + assigner=[ + dict( # car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + dict( # cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ], + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + pos_weight=-1, + debug=False)), + test_cfg=dict( + pts=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=4096, + nms_thr=0.25, + score_thr=0.1, + min_bbox_size=0, + max_num=500))) diff --git a/configs/_base_/models/smoke.py b/configs/_base_/models/smoke.py new file mode 100755 index 0000000..a36456c --- /dev/null +++ b/configs/_base_/models/smoke.py @@ -0,0 +1,61 @@ +# model settings +model = dict( + type='SMOKEMono3D', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='DLANet', + depth=34, + in_channels=3, + norm_cfg=dict(type='GN', num_groups=32), + init_cfg=dict( + type='Pretrained', + checkpoint='http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth' + )), + neck=dict( + type='DLANeck', + in_channels=[16, 32, 64, 128, 256, 512], + start_level=2, + end_level=5, + norm_cfg=dict(type='GN', num_groups=32)), + bbox_head=dict( + type='SMOKEMono3DHead', + num_classes=3, + in_channels=64, + dim_channel=[3, 4, 5], + ori_channel=[6, 7], + stacked_convs=0, + feat_channels=64, + use_direction_classifier=False, + diff_rad_by_sin=False, + pred_attrs=False, + pred_velo=False, + dir_offset=0, + strides=None, + group_reg_dims=(8, ), + cls_branch=(256, ), + reg_branch=((256, ), ), + num_attrs=0, + bbox_code_size=7, + dir_branch=(), + attr_branch=(), + bbox_coder=dict( + type='SMOKECoder', + base_depth=(28.01, 16.32), + base_dims=((0.88, 1.73, 0.67), (1.78, 1.70, 0.58), (3.88, 1.63, + 1.53)), + code_size=7), + loss_cls=dict(type='mmdet.GaussianFocalLoss', loss_weight=1.0), + loss_bbox=dict( + type='mmdet.L1Loss', reduction='sum', loss_weight=1 / 300), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_attr=None, + conv_bias=True, + dcn_on_last_conv=False), + train_cfg=None, + test_cfg=dict(topK=100, local_maximum_kernel=3, max_per_img=100)) diff --git a/configs/_base_/models/spvcnn.py b/configs/_base_/models/spvcnn.py new file mode 100755 index 0000000..335407d --- /dev/null +++ b/configs/_base_/models/spvcnn.py @@ -0,0 +1,29 @@ +model = dict( + type='MinkUNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_type='minkunet', + voxel_layer=dict( + max_num_points=-1, + point_cloud_range=[-100, -100, -20, 100, 100, 20], + voxel_size=[0.05, 0.05, 0.05], + max_voxels=(-1, -1)), + ), + backbone=dict( + type='SPVCNNBackbone', + in_channels=4, + base_channels=32, + encoder_channels=[32, 64, 128, 256], + decoder_channels=[256, 128, 96, 96], + num_stages=4, + drop_ratio=0.3), + decode_head=dict( + type='MinkUNetHead', + channels=96, + num_classes=19, + dropout_ratio=0, + loss_decode=dict(type='mmdet.CrossEntropyLoss', avg_non_ignore=True), + ignore_index=19), + train_cfg=dict(), + test_cfg=dict()) diff --git a/configs/_base_/models/votenet.py b/configs/_base_/models/votenet.py new file mode 100755 index 0000000..3e72c12 --- /dev/null +++ b/configs/_base_/models/votenet.py @@ -0,0 +1,73 @@ +model = dict( + type='VoteNet', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + backbone=dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True)), + bbox_head=dict( + type='VoteHead', + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=3, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=256, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + pred_layer_cfg=dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True), + objectness_loss=dict( + type='mmdet.CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='sum', + loss_weight=5.0), + center_loss=dict( + type='ChamferDistance', + mode='l2', + reduction='sum', + loss_src_weight=10.0, + loss_dst_weight=10.0), + dir_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', + loss_weight=10.0 / 3.0), + semantic_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mode='vote'), + test_cfg=dict( + sample_mode='seed', + nms_thr=0.25, + score_thr=0.05, + per_class_proposal=True)) diff --git a/configs/_base_/schedules/cosine.py b/configs/_base_/schedules/cosine.py new file mode 100755 index 0000000..d800bf8 --- /dev/null +++ b/configs/_base_/schedules/cosine.py @@ -0,0 +1,30 @@ +# This schedule is mainly used by models with dynamic voxelization +# optimizer +lr = 0.003 # max learning rate +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=lr, weight_decay=0.001, betas=(0.95, 0.99)), + clip_grad=dict(max_norm=10, norm_type=2), +) + +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), + dict( + type='CosineAnnealingLR', + begin=0, + T_max=40, + end=40, + by_epoch=True, + eta_min=1e-5) +] +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=40, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/_base_/schedules/cyclic-20e.py b/configs/_base_/schedules/cyclic-20e.py new file mode 100755 index 0000000..caff691 --- /dev/null +++ b/configs/_base_/schedules/cyclic-20e.py @@ -0,0 +1,65 @@ +# For nuScenes dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 20. Please change the interval accordingly if you do not +# use a default schedule. +# optimizer +lr = 1e-4 +# This schedule is mainly used by models on nuScenes dataset +# max_norm=10 is better for SECOND +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.01), + clip_grad=dict(max_norm=35, norm_type=2)) +# learning rate +param_scheduler = [ + # learning rate scheduler + # During the first 8 epochs, learning rate increases from 0 to lr * 10 + # during the next 12 epochs, learning rate decreases from lr * 10 to + # lr * 1e-4 + dict( + type='CosineAnnealingLR', + T_max=8, + eta_min=lr * 10, + begin=0, + end=8, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=12, + eta_min=lr * 1e-4, + begin=8, + end=20, + by_epoch=True, + convert_to_iter_based=True), + # momentum scheduler + # During the first 8 epochs, momentum increases from 0 to 0.85 / 0.95 + # during the next 12 epochs, momentum increases from 0.85 / 0.95 to 1 + dict( + type='CosineAnnealingMomentum', + T_max=8, + eta_min=0.85 / 0.95, + begin=0, + end=8, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=12, + eta_min=1, + begin=8, + end=20, + by_epoch=True, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=20, val_interval=20) +val_cfg = dict() +test_cfg = dict() + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (4 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) diff --git a/configs/_base_/schedules/cyclic-40e.py b/configs/_base_/schedules/cyclic-40e.py new file mode 100755 index 0000000..58618f6 --- /dev/null +++ b/configs/_base_/schedules/cyclic-40e.py @@ -0,0 +1,67 @@ +# The schedule is usually used by models trained on KITTI dataset +# The learning rate set in the cyclic schedule is the initial learning rate +# rather than the max learning rate. Since the target_ratio is (10, 1e-4), +# the learning rate will change from 0.0018 to 0.018, than go to 0.0018*1e-4 +lr = 0.0018 +# The optimizer follows the setting in SECOND.Pytorch, but here we use +# the official AdamW optimizer implemented by PyTorch. +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01), + clip_grad=dict(max_norm=10, norm_type=2)) +# learning rate +param_scheduler = [ + # learning rate scheduler + # During the first 16 epochs, learning rate increases from 0 to lr * 10 + # during the next 24 epochs, learning rate decreases from lr * 10 to + # lr * 1e-4 + dict( + type='CosineAnnealingLR', + T_max=16, + eta_min=lr * 10, + begin=0, + end=16, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=24, + eta_min=lr * 1e-4, + begin=16, + end=40, + by_epoch=True, + convert_to_iter_based=True), + # momentum scheduler + # During the first 16 epochs, momentum increases from 0 to 0.85 / 0.95 + # during the next 24 epochs, momentum increases from 0.85 / 0.95 to 1 + dict( + type='CosineAnnealingMomentum', + T_max=16, + eta_min=0.85 / 0.95, + begin=0, + end=16, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=24, + eta_min=1, + begin=16, + end=40, + by_epoch=True, + convert_to_iter_based=True) +] + +# Runtime settings,training schedule for 40e +# Although the max_epochs is 40, this schedule is usually used we +# RepeatDataset with repeat ratio N, thus the actual max epoch +# number could be Nx40 +train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (6 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=48) diff --git a/configs/_base_/schedules/mmdet-schedule-1x.py b/configs/_base_/schedules/mmdet-schedule-1x.py new file mode 100755 index 0000000..95f30be --- /dev/null +++ b/configs/_base_/schedules/mmdet-schedule-1x.py @@ -0,0 +1,28 @@ +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/_base_/schedules/schedule-2x.py b/configs/_base_/schedules/schedule-2x.py new file mode 100755 index 0000000..c6f0a96 --- /dev/null +++ b/configs/_base_/schedules/schedule-2x.py @@ -0,0 +1,36 @@ +# optimizer +# This schedule is mainly used by models on nuScenes dataset +lr = 0.001 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.01), + # max_norm=10 is better for SECOND + clip_grad=dict(max_norm=35, norm_type=2)) + +# training schedule for 2x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=24) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 1000, + by_epoch=False, + begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[20, 23], + gamma=0.1) +] + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (4 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) diff --git a/configs/_base_/schedules/schedule-3x.py b/configs/_base_/schedules/schedule-3x.py new file mode 100755 index 0000000..4e5789f --- /dev/null +++ b/configs/_base_/schedules/schedule-3x.py @@ -0,0 +1,31 @@ +# optimizer +# This schedule is mainly used by models on indoor dataset, +# e.g., VoteNet on SUNRGBD and ScanNet +lr = 0.008 # max learning rate +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.01), + clip_grad=dict(max_norm=10, norm_type=2), +) + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=36, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=36, + by_epoch=True, + milestones=[24, 32], + gamma=0.1) +] + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (4 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) diff --git a/configs/_base_/schedules/seg-cosine-100e.py b/configs/_base_/schedules/seg-cosine-100e.py new file mode 100755 index 0000000..efc0754 --- /dev/null +++ b/configs/_base_/schedules/seg-cosine-100e.py @@ -0,0 +1,27 @@ +# optimizer +# This schedule is mainly used on S3DIS dataset in segmentation task +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.001), + clip_grad=None) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=100, + eta_min=1e-5, + by_epoch=True, + begin=0, + end=100) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (4 GPUs) x (32 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/_base_/schedules/seg-cosine-150e.py b/configs/_base_/schedules/seg-cosine-150e.py new file mode 100755 index 0000000..9119017 --- /dev/null +++ b/configs/_base_/schedules/seg-cosine-150e.py @@ -0,0 +1,27 @@ +# optimizer +# This schedule is mainly used on S3DIS dataset in segmentation task +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.2, momentum=0.9, weight_decay=0.0001), + clip_grad=None) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=150, + eta_min=0.002, + by_epoch=True, + begin=0, + end=150) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=150, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/_base_/schedules/seg-cosine-200e.py b/configs/_base_/schedules/seg-cosine-200e.py new file mode 100755 index 0000000..a702168 --- /dev/null +++ b/configs/_base_/schedules/seg-cosine-200e.py @@ -0,0 +1,27 @@ +# optimizer +# This schedule is mainly used on S3DIS dataset in segmentation task +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='Adam', lr=0.001, weight_decay=0.01), + clip_grad=None) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=200, + eta_min=1e-5, + by_epoch=True, + begin=0, + end=200) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=200, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (2 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) diff --git a/configs/_base_/schedules/seg-cosine-50e.py b/configs/_base_/schedules/seg-cosine-50e.py new file mode 100755 index 0000000..fd31219 --- /dev/null +++ b/configs/_base_/schedules/seg-cosine-50e.py @@ -0,0 +1,27 @@ +# optimizer +# This schedule is mainly used on S3DIS dataset in segmentation task +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='Adam', lr=0.001, weight_decay=0.001), + clip_grad=None) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=50, + eta_min=1e-5, + by_epoch=True, + begin=0, + end=50) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=50, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (2 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) diff --git a/configs/benchmark/hv_PartA2_secfpn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py b/configs/benchmark/hv_PartA2_secfpn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py new file mode 100755 index 0000000..baec55f --- /dev/null +++ b/configs/benchmark/hv_PartA2_secfpn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py @@ -0,0 +1,386 @@ +# model settings +voxel_size = [0.05, 0.05, 0.1] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] # velodyne coordinates, x, y, z + +model = dict( + type='PartA2', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=5, # max_points_per_voxel + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(16000, 40000))), + voxel_encoder=dict(type='HardSimpleVFE'), + middle_encoder=dict( + type='SparseUNet', + in_channels=4, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + rpn_head=dict( + type='PartA2RPNHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[[0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78]], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + assigner_per_size=True, + assign_per_class=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + roi_head=dict( + type='PartAggregationROIHead', + num_classes=3, + semantic_head=dict( + type='PointwiseSemanticHead', + in_channels=16, + extra_width=0.2, + seg_score_thr=0.3, + num_classes=3, + loss_seg=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + reduction='sum', + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_part=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0)), + seg_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='max')), + bbox_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='avg')), + bbox_head=dict( + type='PartA2BboxHead', + num_classes=3, + seg_in_channels=16, + part_in_channels=4, + seg_conv_channels=[64, 64], + part_conv_channels=[64, 64], + merge_conv_channels=[128, 128], + down_conv_channels=[128, 256], + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + shared_fc_channels=[256, 512, 512, 512], + cls_channels=[256, 256], + reg_channels=[256, 256], + dropout_ratio=0.1, + roi_feat_size=14, + with_corner_loss=True, + loss_bbox=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=1.0), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1) + ], + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=9000, + nms_post=512, + max_num=512, + nms_thr=0.8, + score_thr=0, + use_rotate_nms=False), + rcnn=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1) + ], + sampler=dict( + type='IoUNegPiecewiseSampler', + num=128, + pos_fraction=0.55, + neg_piece_fractions=[0.8, 0.2], + neg_iou_piece_thrs=[0.55, 0.1], + neg_pos_ub=-1, + add_gt_as_proposals=False, + return_iou=True), + cls_pos_thr=0.75, + cls_neg_thr=0.25)), + test_cfg=dict( + rpn=dict( + nms_pre=1024, + nms_post=100, + max_num=100, + nms_thr=0.7, + score_thr=0, + use_rotate_nms=True), + rcnn=dict( + use_rotate_nms=True, + use_raw_score=True, + nms_thr=0.01, + score_thr=0.1))) + +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +metainfo = dict(classes=class_names) +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)), + classes=class_names, + sample_groups=dict(Car=20, Pedestrian=15, Cyclist=15)) + +train_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_labels_3d', 'gt_bboxes_3d']) +] +test_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR')) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox') +test_evaluator = val_evaluator + +# optimizer +lr = 0.001 # max learning rate +epoch_num = 80 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01), + clip_grad=dict(max_norm=10, norm_type=2)) + +# learning policy +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=epoch_num * 0.4, + eta_min=lr * 10, + begin=0, + end=epoch_num * 0.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=epoch_num * 0.6, + eta_min=lr * 1e-4, + begin=epoch_num * 0.4, + end=epoch_num * 1, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=epoch_num * 0.4, + eta_min=0.85 / 0.95, + begin=0, + end=epoch_num * 0.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=epoch_num * 0.6, + eta_min=1, + begin=epoch_num * 0.4, + end=epoch_num * 1, + convert_to_iter_based=True) +] + +train_cfg = dict(by_epoch=True, max_epochs=epoch_num, val_interval=50) +val_cfg = dict() +test_cfg = dict() +auto_scale_lr = dict(enable=False, base_batch_size=32) + +default_scope = 'mmdet3d' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='Det3DVisualizationHook')) + +custom_hooks = [ + dict(type='BenchmarkHook'), +] + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False +find_unused_parameters = True +work_dir = './work_dirs/parta2_secfpn_80e' diff --git a/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py b/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py new file mode 100755 index 0000000..dc59480 --- /dev/null +++ b/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py @@ -0,0 +1,248 @@ +# model settings +voxel_size = [0.16, 0.16, 4] +point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] +model = dict( + type='VoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=64, + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(12000, 20000))), + voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=4, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range), + middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]), + backbone=dict( + type='SECOND', + in_channels=64, + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=1, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[[0, -39.68, -1.78, 69.12, 39.68, -1.78]], + sizes=[[3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=True), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) + +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Car'] +metainfo = dict(classes=class_names) +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + sample_groups=dict(Car=15), + classes=class_names) + +train_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[0.25, 0.25, 0.25], + global_rot_range=[0.0, 0.0], + rot_range=[-0.15707963267, 0.15707963267]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_labels_3d', 'gt_bboxes_3d']) +] +test_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=3, + num_workers=3, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox') +test_evaluator = val_evaluator + +# optimizer +lr = 0.001 # max learning rate +epoch_num = 50 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01), + clip_grad=dict(max_norm=10, norm_type=2)) + +# learning policy +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=epoch_num * 0.4, + eta_min=lr * 10, + begin=0, + end=epoch_num * 0.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=epoch_num * 0.6, + eta_min=lr * 1e-4, + begin=epoch_num * 0.4, + end=epoch_num * 1, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=epoch_num * 0.4, + eta_min=0.85 / 0.95, + begin=0, + end=epoch_num * 0.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=epoch_num * 0.6, + eta_min=1, + begin=epoch_num * 0.4, + end=epoch_num * 1, + convert_to_iter_based=True) +] + +train_cfg = dict(by_epoch=True, max_epochs=epoch_num, val_interval=50) +val_cfg = dict() +test_cfg = dict() +auto_scale_lr = dict(enable=False, base_batch_size=24) + +default_scope = 'mmdet3d' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='Det3DVisualizationHook')) + +custom_hooks = [ + dict(type='BenchmarkHook'), +] + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False +work_dir = './work_dirs/pp_secfpn_100e' diff --git a/configs/benchmark/hv_pointpillars_secfpn_4x8_80e_pcdet_kitti-3d-3class.py b/configs/benchmark/hv_pointpillars_secfpn_4x8_80e_pcdet_kitti-3d-3class.py new file mode 100755 index 0000000..01dc8b5 --- /dev/null +++ b/configs/benchmark/hv_pointpillars_secfpn_4x8_80e_pcdet_kitti-3d-3class.py @@ -0,0 +1,291 @@ +# model settings +point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] +voxel_size = [0.16, 0.16, 4] +model = dict( + type='VoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=32, # max_points_per_voxel + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(16000, 40000))), + voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=4, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + ), + middle_encoder=dict( + type='PointPillarsScatter', + in_channels=64, + output_shape=[496, 432], + ), + backbone=dict( + type='SECOND', + in_channels=64, + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256], + ), + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128], + ), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2), + ), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) + +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +metainfo = dict(classes=class_names) + +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict( + Car=5, + Pedestrian=5, + Cyclist=5, + )), + classes=class_names, + sample_groups=dict( + Car=15, + Pedestrian=15, + Cyclist=15, + )) + +train_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_labels_3d', 'gt_bboxes_3d']) +] +test_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR')) + +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox') +test_evaluator = val_evaluator + +# optimizer +lr = 0.0003 # max learning rate +epoch_num = 80 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01), + clip_grad=dict(max_norm=10, norm_type=2)) + +# learning policy +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=epoch_num * 0.4, + eta_min=lr * 10, + begin=0, + end=epoch_num * 0.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=epoch_num * 0.6, + eta_min=lr * 1e-4, + begin=epoch_num * 0.4, + end=epoch_num * 1, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=epoch_num * 0.4, + eta_min=0.85 / 0.95, + begin=0, + end=epoch_num * 0.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=epoch_num * 0.6, + eta_min=1, + begin=epoch_num * 0.4, + end=epoch_num * 1, + convert_to_iter_based=True) +] + +train_cfg = dict(by_epoch=True, max_epochs=epoch_num, val_interval=50) +val_cfg = dict() +test_cfg = dict() +auto_scale_lr = dict(enable=False, base_batch_size=32) + +default_scope = 'mmdet3d' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='Det3DVisualizationHook')) + +custom_hooks = [ + dict(type='BenchmarkHook'), +] + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False +work_dir = './work_dirs/pp_secfpn_80e' diff --git a/configs/benchmark/hv_second_secfpn_4x8_80e_pcdet_kitti-3d-3class.py b/configs/benchmark/hv_second_secfpn_4x8_80e_pcdet_kitti-3d-3class.py new file mode 100755 index 0000000..f8dd0d2 --- /dev/null +++ b/configs/benchmark/hv_second_secfpn_4x8_80e_pcdet_kitti-3d-3class.py @@ -0,0 +1,281 @@ +# model settings +voxel_size = [0.05, 0.05, 0.1] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] + +model = dict( + type='VoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=5, + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(16000, 40000))), + voxel_encoder=dict(type='HardSimpleVFE'), + middle_encoder=dict( + type='SparseEncoder', + in_channels=4, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) + +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +metainfo = dict(classes=class_names) +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict( + Car=5, + Pedestrian=5, + Cyclist=5, + )), + classes=class_names, + sample_groups=dict( + Car=20, + Pedestrian=15, + Cyclist=15, + )) + +train_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR')) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox') +test_evaluator = val_evaluator + +# optimizer +lr = 0.0003 # max learning rate +epoch_num = 80 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01), + clip_grad=dict(max_norm=10, norm_type=2)) + +# learning policy +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=epoch_num * 0.4, + eta_min=lr * 10, + begin=0, + end=epoch_num * 0.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=epoch_num * 0.6, + eta_min=lr * 1e-4, + begin=epoch_num * 0.4, + end=epoch_num * 1, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=epoch_num * 0.4, + eta_min=0.85 / 0.95, + begin=0, + end=epoch_num * 0.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=epoch_num * 0.6, + eta_min=1, + begin=epoch_num * 0.4, + end=epoch_num * 1, + convert_to_iter_based=True) +] + +train_cfg = dict(by_epoch=True, max_epochs=epoch_num, val_interval=50) +val_cfg = dict() +test_cfg = dict() +auto_scale_lr = dict(enable=False, base_batch_size=32) + +default_scope = 'mmdet3d' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='Det3DVisualizationHook')) + +custom_hooks = [ + dict(type='BenchmarkHook'), +] + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False +work_dir = './work_dirs/pp_secfpn_100e' diff --git a/configs/centerpoint/README.md b/configs/centerpoint/README.md new file mode 100755 index 0000000..6fd7af7 --- /dev/null +++ b/configs/centerpoint/README.md @@ -0,0 +1,136 @@ +# Center-based 3D Object Detection and Tracking + +> [Center-based 3D Object Detection and Tracking](https://arxiv.org/abs/2006.11275) + + + +## Abstract + +Three-dimensional objects are commonly represented as 3D boxes in a point-cloud. This representation mimics the well-studied image-based 2D bounding-box detection but comes with additional challenges. Objects in a 3D world do not follow any particular orientation, and box-based detectors have difficulties enumerating all orientations or fitting an axis-aligned bounding box to rotated objects. In this paper, we instead propose to represent, detect, and track 3D objects as points. Our framework, CenterPoint, first detects centers of objects using a keypoint detector and regresses to other attributes, including 3D size, 3D orientation, and velocity. In a second stage, it refines these estimates using additional point features on the object. In CenterPoint, 3D object tracking simplifies to greedy closest-point matching. The resulting detection and tracking algorithm is simple, efficient, and effective. CenterPoint achieved state-of-the-art performance on the nuScenes benchmark for both 3D detection and tracking, with 65.5 NDS and 63.8 AMOTA for a single model. On the Waymo Open Dataset, CenterPoint outperforms all previous single model method by a large margin and ranks first among all Lidar-only submissions. + +
    + +
    + +## Introduction + +We implement CenterPoint and provide the result and checkpoints on nuScenes dataset. + +We follow the below style to name config files. Contributors are advised to follow the same style. +`{xxx}` is required field and `[yyy]` is optional. + +`{model}`: model type like `centerpoint`. + +`{model setting}`: voxel size and voxel type like `01voxel`, `02pillar`. + +`{backbone}`: backbone type like `second`. + +`{neck}`: neck type like `secfpn`. + +`[dcn]`: Whether to use deformable convolution. + +`[circle]`: Whether to use circular nms. + +`[batch_per_gpu x gpu]`: GPUs and samples per GPU, 4x8 is used by default. + +`{schedule}`: training schedule, options are 1x, 2x, 20e, etc. 1x and 2x means 12 epochs and 24 epochs respectively. 20e is adopted in cascade models, which denotes 20 epochs. For 1x/2x, initial learning rate decays by a factor of 10 at the 8/16th and 11/22th epochs. For 20e, initial learning rate decays by a factor of 10 at the 16th and 19th epochs. + +`{dataset}`: dataset like nus-3d, kitti-3d, lyft-3d, scannet-3d, sunrgbd-3d. We also indicate the number of classes we are using if there exist multiple settings, e.g., kitti-3d-3class and kitti-3d-car means training on KITTI dataset with 3 classes and single class, respectively. + +## Usage + +### Test time augmentation + +We have supported double-flip and scale augmentation during test time. To use test time augmentation, users need to modify the +`test_pipeline` and `test_cfg` in the config. +For example, we change `centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus.py` to the following. + +```python +_base_ = './centerpoint_0075voxel_second_secfpn_circlenms' \ + '_4x8_cyclic_20e_nus.py' + +model = dict( + test_cfg=dict( + pts=dict( + use_rotate_nms=True, + max_num=83))) + +point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0] +backend_args = None +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +test_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + backend_args=backend_args, + pad_empty_sweeps=True, + remove_close=True), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=[0.95, 1.0, 1.05], + flip=True, + pcd_horizontal_flip=True, + pcd_vertical_flip=True, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', sync_2d=False), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +data = dict( + val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) + +``` + +## Results and models + +### CenterPoint + +| Backbone | Voxel type (voxel size) | Dcn | Circular nms | Mem (GB) | Inf time (fps) | mAP | NDS | Download | +| :------------------------------------------------------------------------------------------: | :---------------------: | :-: | :----------: | :------: | :------------: | :---: | :---: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](./centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py) | voxel (0.1) | ✗ | ✓ | 5.2 | | 56.11 | 64.61 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220810_030004-9061688e.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220810_030004.log) | +| above w/o circle nms | voxel (0.1) | ✗ | ✗ | | | x | x | | +| [SECFPN](./centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py) | voxel (0.1) | ✓ | ✓ | 5.5 | | 56.10 | 64.69 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus_20220810_052355-a6928835.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus_20220810_052355.log) | +| above w/o circle nms | voxel (0.1) | ✓ | ✗ | | | x | x | | +| [SECFPN](./centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py) | voxel (0.075) | ✗ | ✓ | 8.2 | | 56.54 | 65.17 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220810_011659-04cb3a3b.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220810_011659.log) | +| above w/o circle nms | voxel (0.075) | ✗ | ✗ | | | 57.63 | 65.39 | | +| [SECFPN](./centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py) | voxel (0.075) | ✓ | ✓ | 8.7 | | 56.92 | 65.27 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus_20220810_025930-657f67e0.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus_20220810_025930.log) | +| above w/o circle nms | voxel (0.075) | ✓ | ✗ | | | 57.43 | 65.63 | | +| above w/ double flip | voxel (0.075) | ✓ | ✗ | | | 59.73 | 67.39 | | +| above w/ scale tta | voxel (0.075) | ✓ | ✗ | | | 60.43 | 67.65 | | +| above w/ circle nms w/o scale tta | voxel (0.075) | ✓ | ✗ | | | 59.52 | 67.24 | | +| [SECFPN](./centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py) | pillar (0.2) | ✗ | ✓ | 4.6 | | 48.70 | 59.62 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220811_031844-191a3822.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220811_031844.log) | +| above w/o circle nms | pillar (0.2) | ✗ | ✗ | | | 49.12 | 59.66 | | +| [SECFPN](./centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py) | pillar (0.2) | ✓ | ✗ | 4.9 | | 48.38 | 59.79 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus_20220811_045458-808e69ad.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus_20220811_045458.log) | +| above w/ circle nms | pillar (0.2) | ✓ | ✓ | | | 48.79 | 59.65 | | + +**Note:** The model performance after coordinate refactor is slightly different (+/- 0.5 - 1 mAP/NDS) from the performance before coordinate refactor in v0.x branch. We are exploring the reason behind. | + +## Citation + +```latex +@article{yin2021center, + title={Center-based 3D Object Detection and Tracking}, + author={Yin, Tianwei and Zhou, Xingyi and Kr{\"a}henb{\"u}hl, Philipp}, + journal={CVPR}, + year={2021}, +} +``` diff --git a/configs/centerpoint/centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..a6854b2 --- /dev/null +++ b/configs/centerpoint/centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,159 @@ +_base_ = [ + '../_base_/datasets/nus-3d.py', + '../_base_/models/centerpoint_pillar02_second_secfpn_nus.py', + '../_base_/schedules/cyclic-20e.py', '../_base_/default_runtime.py' +] + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +# Using calibration info convert the Lidar-coordinate point cloud range to the +# ego-coordinate point cloud range could bring a little promotion in nuScenes. +# point_cloud_range = [-51.2, -52, -5.0, 51.2, 50.4, 3.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] +data_prefix = dict(pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP') +model = dict( + data_preprocessor=dict( + voxel_layer=dict(point_cloud_range=point_cloud_range)), + pts_voxel_encoder=dict(point_cloud_range=point_cloud_range), + pts_bbox_head=dict(bbox_coder=dict(pc_range=point_cloud_range[:2])), + # model training and testing settings + train_cfg=dict(pts=dict(point_cloud_range=point_cloud_range)), + test_cfg=dict(pts=dict(pc_range=point_cloud_range[:2]))) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +backend_args = None + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'nuscenes_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict( + car=5, + truck=5, + bus=5, + trailer=5, + construction_vehicle=5, + traffic_cone=5, + barrier=5, + motorcycle=5, + bicycle=5, + pedestrian=5)), + classes=class_names, + sample_groups=dict( + car=2, + truck=3, + construction_vehicle=7, + bus=4, + trailer=6, + barrier=2, + motorcycle=6, + bicycle=6, + pedestrian=2, + traffic_cone=2), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D') + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + _delete_=True, + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CBGSDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_train.pkl', + pipeline=train_pipeline, + metainfo=dict(classes=class_names), + test_mode=False, + data_prefix=data_prefix, + use_valid_flag=True, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + backend_args=backend_args))) +test_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names))) +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names))) + +train_cfg = dict(val_interval=20) diff --git a/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..5f6b4b4 --- /dev/null +++ b/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,3 @@ +_base_ = ['./centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] + +model = dict(test_cfg=dict(pts=dict(nms_type='circle'))) diff --git a/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..974aa63 --- /dev/null +++ b/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,16 @@ +_base_ = ['./centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] + +model = dict( + pts_bbox_head=dict( + separate_head=dict( + type='DCNSeparateHead', + dcn_config=dict( + type='DCN', + in_channels=64, + out_channels=64, + kernel_size=3, + padding=1, + groups=4), + init_bias=-2.19, + final_kernel=3)), + test_cfg=dict(pts=dict(nms_type='circle'))) diff --git a/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..9d1466a --- /dev/null +++ b/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,15 @@ +_base_ = ['./centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] + +model = dict( + pts_bbox_head=dict( + separate_head=dict( + type='DCNSeparateHead', + dcn_config=dict( + type='DCN', + in_channels=64, + out_channels=64, + kernel_size=3, + padding=1, + groups=4), + init_bias=-2.19, + final_kernel=3))) diff --git a/configs/centerpoint/centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..a7d6755 --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,145 @@ +_base_ = ['./centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +voxel_size = [0.075, 0.075, 0.2] +point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0] +# Using calibration info convert the Lidar-coordinate point cloud range to the +# ego-coordinate point cloud range could bring a little promotion in nuScenes. +# point_cloud_range = [-54, -54.8, -5.0, 54, 53.2, 3.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] +data_prefix = dict(pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP') +model = dict( + data_preprocessor=dict( + voxel_layer=dict( + voxel_size=voxel_size, point_cloud_range=point_cloud_range)), + pts_middle_encoder=dict(sparse_shape=[41, 1440, 1440]), + pts_bbox_head=dict( + bbox_coder=dict( + voxel_size=voxel_size[:2], pc_range=point_cloud_range[:2])), + train_cfg=dict( + pts=dict( + grid_size=[1440, 1440, 40], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range)), + test_cfg=dict( + pts=dict(voxel_size=voxel_size[:2], pc_range=point_cloud_range[:2]))) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +backend_args = None + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'nuscenes_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict( + car=5, + truck=5, + bus=5, + trailer=5, + construction_vehicle=5, + traffic_cone=5, + barrier=5, + motorcycle=5, + bicycle=5, + pedestrian=5)), + classes=class_names, + sample_groups=dict( + car=2, + truck=3, + construction_vehicle=7, + bus=4, + trailer=6, + barrier=2, + motorcycle=6, + bicycle=6, + pedestrian=2, + traffic_cone=2), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + dataset=dict( + dataset=dict( + pipeline=train_pipeline, metainfo=dict(classes=class_names)))) +test_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names))) +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names))) diff --git a/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..46280c6 --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,3 @@ +_base_ = ['./centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] + +model = dict(test_cfg=dict(pts=dict(nms_type='circle'))) diff --git a/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..035cfc2 --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,16 @@ +_base_ = ['./centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] + +model = dict( + pts_bbox_head=dict( + separate_head=dict( + type='DCNSeparateHead', + dcn_config=dict( + type='DCN', + in_channels=64, + out_channels=64, + kernel_size=3, + padding=1, + groups=4), + init_bias=-2.19, + final_kernel=3)), + test_cfg=dict(pts=dict(nms_type='circle'))) diff --git a/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-flip-tta-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-flip-tta-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..5625883 --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-flip-tta-cyclic-20e_nus-3d.py @@ -0,0 +1,50 @@ +_base_ = './centerpoint_voxel0075_second_secfpn_' \ + 'head-dcn-circlenms_8xb4_cyclic-20e_nus-3d.py' + +point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0] +# Using calibration info convert the Lidar-coordinate point cloud range to the +# ego-coordinate point cloud range could bring a little promotion in nuScenes. +# point_cloud_range = [-54, -54.8, -5.0, 54, 53.2, 3.0] +backend_args = None +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + # Add double-flip augmentation + flip=True, + pcd_horizontal_flip=True, + pcd_vertical_flip=True, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', sync_2d=False), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +data = dict( + val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) diff --git a/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..185676b --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,15 @@ +_base_ = ['./centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] + +model = dict( + pts_bbox_head=dict( + separate_head=dict( + type='DCNSeparateHead', + dcn_config=dict( + type='DCN', + in_channels=64, + out_channels=64, + kernel_size=3, + padding=1, + groups=4), + init_bias=-2.19, + final_kernel=3))) diff --git a/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-flip-tta-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-flip-tta-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..dc0986f --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-flip-tta-cyclic-20e_nus-3d.py @@ -0,0 +1,50 @@ +_base_ = './centerpoint_voxel0075_second_secfpn' \ + '_head-dcn_8xb4-cyclic-20e_nus-3d.py' + +point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0] +# Using calibration info convert the Lidar-coordinate point cloud range to the +# ego-coordinate point cloud range could bring a little promotion in nuScenes. +# point_cloud_range = [-54, -54.8, -5.0, 54, 53.2, 3.0] +backend_args = None +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + # Add double-flip augmentation + flip=True, + pcd_horizontal_flip=True, + pcd_vertical_flip=True, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', sync_2d=False), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +data = dict( + val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) diff --git a/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-tta-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-tta-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..d65585d --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-tta-cyclic-20e_nus-3d.py @@ -0,0 +1,52 @@ +_base_ = './centerpoint_voxel0075_second_secfpn' \ + '_head-dcn_8xb4-cyclic-20e_nus-3d.py' + +model = dict(test_cfg=dict(pts=dict(use_rotate_nms=True, max_num=500))) + +point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0] +# Using calibration info convert the Lidar-coordinate point cloud range to the +# ego-coordinate point cloud range could bring a little promotion in nuScenes. +# point_cloud_range = [-54, -54.8, -5.0, 54, 53.2, 3.0] +backend_args = None +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=[0.95, 1.0, 1.05], + # Add double-flip augmentation + flip=True, + pcd_horizontal_flip=True, + pcd_vertical_flip=True, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', sync_2d=False), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +data = dict( + val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) diff --git a/configs/centerpoint/centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..0a44c14 --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,160 @@ +_base_ = [ + '../_base_/datasets/nus-3d.py', + '../_base_/models/centerpoint_voxel01_second_secfpn_nus.py', + '../_base_/schedules/cyclic-20e.py', '../_base_/default_runtime.py' +] + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +# Using calibration info convert the Lidar-coordinate point cloud range to the +# ego-coordinate point cloud range could bring a little promotion in nuScenes. +# point_cloud_range = [-51.2, -52, -5.0, 51.2, 50.4, 3.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] +data_prefix = dict(pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP') +model = dict( + data_preprocessor=dict( + voxel_layer=dict(point_cloud_range=point_cloud_range)), + pts_bbox_head=dict(bbox_coder=dict(pc_range=point_cloud_range[:2])), + # model training and testing settings + train_cfg=dict(pts=dict(point_cloud_range=point_cloud_range)), + test_cfg=dict(pts=dict(pc_range=point_cloud_range[:2]))) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +backend_args = None + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'nuscenes_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict( + car=5, + truck=5, + bus=5, + trailer=5, + construction_vehicle=5, + traffic_cone=5, + barrier=5, + motorcycle=5, + bicycle=5, + pedestrian=5)), + classes=class_names, + sample_groups=dict( + car=2, + truck=3, + construction_vehicle=7, + bus=4, + trailer=6, + barrier=2, + motorcycle=6, + bicycle=6, + pedestrian=2, + traffic_cone=2), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + _delete_=True, + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CBGSDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_train.pkl', + pipeline=train_pipeline, + metainfo=dict(classes=class_names), + test_mode=False, + data_prefix=data_prefix, + use_valid_flag=True, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + backend_args=backend_args))) +test_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names))) +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names))) + +train_cfg = dict(val_interval=20) diff --git a/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..47c552a --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,3 @@ +_base_ = ['./centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] + +model = dict(test_cfg=dict(pts=dict(nms_type='circle'))) diff --git a/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..d684516 --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,16 @@ +_base_ = ['./centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] + +model = dict( + pts_bbox_head=dict( + separate_head=dict( + type='DCNSeparateHead', + dcn_config=dict( + type='DCN', + in_channels=64, + out_channels=64, + kernel_size=3, + padding=1, + groups=4), + init_bias=-2.19, + final_kernel=3)), + test_cfg=dict(pts=dict(nms_type='circle'))) diff --git a/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py b/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..bd571ba --- /dev/null +++ b/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,15 @@ +_base_ = ['./centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py'] + +model = dict( + pts_bbox_head=dict( + separate_head=dict( + type='DCNSeparateHead', + dcn_config=dict( + type='DCN', + in_channels=64, + out_channels=64, + kernel_size=3, + padding=1, + groups=4), + init_bias=-2.19, + final_kernel=3))) diff --git a/configs/centerpoint/metafile.yml b/configs/centerpoint/metafile.yml new file mode 100755 index 0000000..aca5f7c --- /dev/null +++ b/configs/centerpoint/metafile.yml @@ -0,0 +1,95 @@ +Collections: + - Name: CenterPoint + Metadata: + Training Data: nuScenes + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Hard Voxelization + Paper: + URL: https://arxiv.org/abs/2006.11275 + Title: 'Center-based 3D Object Detection and Tracking' + README: configs/centerpoint/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/centerpoint.py#L10 + Version: v0.6.0 + +Models: + - Name: centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d + In Collection: CenterPoint + Config: configs/centerpoint/centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py + metadata: + Training Memory (GB): 5.2 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 56.11 + NDS: 64.61 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220810_030004-9061688e.pth + + - Name: centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d + In Collection: CenterPoint + Config: configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py + Metadata: + Training Memory (GB): 5.5 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 56.10 + NDS: 64.69 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus_20220810_052355-a6928835.pth + + - Name: centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d + In Collection: CenterPoint + Config: configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py + Metadata: + Training Memory (GB): 8.2 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 56.54 + NDS: 65.17 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220810_011659-04cb3a3b.pth + + - Name: centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d + In Collection: CenterPoint + Config: configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py + Metadata: + Training Memory (GB): 8.7 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 56.92 + NDS: 65.27 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus_20220810_025930-657f67e0.pth + + - Name: centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d + In Collection: CenterPoint + Config: configs/centerpoint/centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py + Metadata: + Training Memory (GB): 4.6 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 48.70 + NDS: 59.62 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus/centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220811_031844-191a3822.pth + + - Name: centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d + In Collection: CenterPoint + Config: configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py + Metadata: + Training Memory (GB): 4.9 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 48.38 + NDS: 59.79 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/centerpoint/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus_20220811_045458-808e69ad.pth diff --git a/configs/cylinder3d/README.md b/configs/cylinder3d/README.md new file mode 100755 index 0000000..8183d08 --- /dev/null +++ b/configs/cylinder3d/README.md @@ -0,0 +1,37 @@ +# Cylindrical and Asymmetrical 3D Convolution Networks for LiDAR Segmentation + +> [Cylindrical and Asymmetrical 3D Convolution Networks for LiDAR Segmentation](https://arxiv.org/abs/2011.10033) + + + +## Abstract + +State-of-the-art methods for large-scale driving-scene LiDAR segmentation often project the point clouds to 2D space and then process them via 2D convolution. Although this corporation shows the competitiveness in the point cloud, it inevitably alters and abandons the 3D topology and geometric relations. A natural remedy is to utilize the3D voxelization and 3D convolution network. However, we found that in the outdoor point cloud, the improvement obtained in this way is quite limited. An important reason is the property of the outdoor point cloud, namely sparsity and varying density. Motivated by this investigation, we propose a new framework for the outdoor LiDAR segmentation, where cylindrical partition and asymmetrical 3D convolution networks are designed to explore the 3D geometric pat-tern while maintaining these inherent properties. Moreover, a point-wise refinement module is introduced to alleviate the interference of lossy voxel-based label encoding. We evaluate the proposed model on two large-scale datasets, i.e., SemanticKITTI and nuScenes. Our method achieves the 1st place in the leaderboard of SemanticKITTI and outperforms existing methods on nuScenes with a noticeable margin, about 4%. Furthermore, the proposed 3D framework also generalizes well to LiDAR panoptic segmentation and LiDAR 3D detection. + +![overview](https://user-images.githubusercontent.com/45515569/228523861-2923082c-37d9-4d4f-aa59-746a8d9284c2.png) + +## Introduction + +We implement Cylinder3D and provide the result and checkpoints on Semantickitti datasets. + +## Results and models + +### SemanticKITTI + +| Method | Lr schd | Mem (GB) | mIOU | Download | +| :--------: | :-----: | :------: | :------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Cylinder3D | 3x | 10.2 | 63.1±0.5 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/cylinder3d/cylinder3d_4xb4_3x_semantickitti/cylinder3d_4xb4_3x_semantickitti_20230318_191107-822a8c31.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/cylinder3d/cylinder3d_4xb4_3x_semantickitti/cylinder3d_4xb4_3x_semantickitti_20230318_191107.json) | + +Note: We reproduce the performance comparable with its [official repo](https://github.com/xinge008/Cylinder3D). It's slightly lower than the performance (65.9 mIOU) reported in the paper due to the lack of point-wise refinement and shorter training time. + +## Citation + +```latex +@inproceedings{zhu2021cylindrical, + title={Cylindrical and asymmetrical 3d convolution networks for lidar segmentation}, + author={Zhu, Xinge and Zhou, Hui and Wang, Tai and Hong, Fangzhou and Ma, Yuexin and Li, Wei and Li, Hongsheng and Lin, Dahua}, + booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition}, + pages={9939--9948}, + year={2021} +} +``` diff --git a/configs/cylinder3d/cylinder3d_4xb4_3x_semantickitti.py b/configs/cylinder3d/cylinder3d_4xb4_3x_semantickitti.py new file mode 100755 index 0000000..f147c58 --- /dev/null +++ b/configs/cylinder3d/cylinder3d_4xb4_3x_semantickitti.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/semantickitti.py', '../_base_/models/cylinder3d.py', + '../_base_/default_runtime.py' +] + +# optimizer +# This schedule is mainly used by models on nuScenes dataset +lr = 0.001 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.01)) + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=36, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=36, + by_epoch=True, + milestones=[30], + gamma=0.1) +] + +train_dataloader = dict(batch_size=4, ) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (4 samples per GPU). +# auto_scale_lr = dict(enable=False, base_batch_size=32) + +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=5)) diff --git a/configs/cylinder3d/metafile.yml b/configs/cylinder3d/metafile.yml new file mode 100755 index 0000000..e8241cb --- /dev/null +++ b/configs/cylinder3d/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: Cylinder3D + Metadata: + Training Techniques: + - AdamW + Training Resources: 4x A100 GPUs + Architecture: + - Cylinder3D + Paper: + URL: https://arxiv.org/abs/2011.10033 + Title: 'Cylindrical and Asymmetrical 3D Convolution Networks for LiDAR Segmentation' + README: configs/cylinder3d/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/models/segmentors/cylinder3d.py#L13 + Version: v1.1.0 + +Models: + - Name: + In Collection: Cylinder3D + Config: configs/cylinder3d/cylinder3d_4xb4_3x_semantickitti.py + Metadata: + Training Data: SemanticKITTI + Training Memory (GB): 10.2 + Results: + - Task: 3D Semantic Segmentation + Dataset: SemanticKITTI + Metrics: + mIOU: 63.1 + Weights: diff --git a/configs/dfm/multiview-dfm_r101-dcn_16xb2_waymoD5-3d-3class.py b/configs/dfm/multiview-dfm_r101-dcn_16xb2_waymoD5-3d-3class.py new file mode 100755 index 0000000..b75a6db --- /dev/null +++ b/configs/dfm/multiview-dfm_r101-dcn_16xb2_waymoD5-3d-3class.py @@ -0,0 +1,49 @@ +_base_ = [ + '../_base_/datasets/waymoD5-mv3d-3class.py', + '../_base_/models/multiview_dfm.py' +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0005, weight_decay=0.0001), + paramwise_cfg=dict( + custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}), + clip_grad=dict(max_norm=35., norm_type=2)) +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +# hooks +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=1), + sampler_seed=dict(type='DistSamplerSeedHook'), +) + +# training schedule for 2x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=24) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# runtime +default_scope = 'mmdet3d' + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +log_level = 'INFO' +load_from = None +resume = False +find_unused_parameters = True # only 1 of 4 FPN outputs is used diff --git a/configs/dfm/multiview-dfm_r101-dcn_centerhead_16xb2_waymoD5-3d-3class.py b/configs/dfm/multiview-dfm_r101-dcn_centerhead_16xb2_waymoD5-3d-3class.py new file mode 100755 index 0000000..998beb6 --- /dev/null +++ b/configs/dfm/multiview-dfm_r101-dcn_centerhead_16xb2_waymoD5-3d-3class.py @@ -0,0 +1,53 @@ +_base_ = ['./multiview-dfm_r101_dcn_2x16_waymoD5-3d-3class.py'] + +model = dict( + bbox_head=dict( + _delete_=True, + type='CenterHead', + in_channels=256, + tasks=[ + dict(num_class=1, class_names=['Pedestrian']), + dict(num_class=1, class_names=['Cyclist']), + dict(num_class=1, class_names=['Car']), + ], + common_heads=dict(reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2)), + share_conv_channel=64, + bbox_coder=dict( + type='CenterPointBBoxCoder', + post_center_range=[-35.0, -75.0, -2, 75.0, 75.0, 4], + pc_range=[-35.0, -75.0, -2, 75.0, 75.0, 4], + max_num=2000, + score_threshold=0, + out_size_factor=1, + voxel_size=(.50, .50), + code_size=7), + separate_head=dict( + type='SeparateHead', init_bias=-2.19, final_kernel=3), + loss_cls=dict(type='mmdet.GaussianFocalLoss', reduction='mean'), + loss_bbox=dict( + type='mmdet.L1Loss', reduction='mean', loss_weight=0.25), + norm_bbox=True), + train_cfg=dict( + _delete_=True, + grid_size=[220, 300, 1], + voxel_size=(0.5, 0.5, 6), + out_size_factor=1, + dense_reg=1, + gaussian_overlap=0.1, + max_objs=500, + min_radius=2, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + point_cloud_range=[-35.0, -75.0, -2, 75.0, 75.0, 4]), + test_cfg=dict( + _delete_=True, + post_center_limit_range=[-35.0, -75.0, -2, 75.0, 75.0, 4], + max_per_img=4096, + max_pool_nms=False, + min_radius=[0.5, 2, 6], + score_threshold=0, + out_size_factor=1, + voxel_size=(0.5, 0.5), + nms_type='circle', + pre_max_size=2000, + post_max_size=200, + nms_thr=0.2)) diff --git a/configs/dgcnn/README.md b/configs/dgcnn/README.md new file mode 100755 index 0000000..46c243a --- /dev/null +++ b/configs/dgcnn/README.md @@ -0,0 +1,55 @@ +# Dynamic Graph CNN for Learning on Point Clouds + +> [Dynamic Graph CNN for Learning on Point Clouds](https://arxiv.org/abs/1801.07829) + + + +## Abstract + +Point clouds provide a flexible geometric representation suitable for countless applications in computer graphics; they also comprise the raw output of most 3D data acquisition devices. While hand-designed features on point clouds have long been proposed in graphics and vision, however, the recent overwhelming success of convolutional neural networks (CNNs) for image analysis suggests the value of adapting insight from CNN to the point cloud world. Point clouds inherently lack topological information so designing a model to recover topology can enrich the representation power of point clouds. To this end, we propose a new neural network module dubbed EdgeConv suitable for CNN-based high-level tasks on point clouds including classification and segmentation. EdgeConv acts on graphs dynamically computed in each layer of the network. It is differentiable and can be plugged into existing architectures. Compared to existing modules operating in extrinsic space or treating each point independently, EdgeConv has several appealing properties: It incorporates local neighborhood information; it can be stacked applied to learn global shape properties; and in multi-layer systems affinity in feature space captures semantic characteristics over potentially long distances in the original embedding. We show the performance of our model on standard benchmarks including ModelNet40, ShapeNetPart, and S3DIS. + +
    + +
    + +## Introduction + +We implement DGCNN and provide the results and checkpoints on S3DIS dataset. + +**Notice**: We follow the implementations in the original DGCNN paper and a PyTorch implementation of DGCNN [code](https://github.com/AnTao97/dgcnn.pytorch). + +## Results and models + +### S3DIS + +| Method | Split | Lr schd | Mem (GB) | Inf time (fps) | mIoU (Val set) | Download | +| :--------------------------------------------------------: | :----: | :---------: | :------: | :------------: | :------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py) | Area_1 | cosine 100e | 13.1 | | 68.33 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area1/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_000734-39658f14.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area1/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_000734.log.json) | +| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py) | Area_2 | cosine 100e | 13.1 | | 40.68 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area2/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_144648-aea9ecb6.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area2/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_144648.log.json) | +| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py) | Area_3 | cosine 100e | 13.1 | | 69.38 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area3/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210801_154629-2ff50ee0.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area3/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210801_154629.log.json) | +| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py) | Area_4 | cosine 100e | 13.1 | | 50.07 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area4/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_073551-dffab9cd.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area4/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_073551.log.json) | +| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py) | Area_5 | cosine 100e | 13.1 | | 50.59 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area5/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210730_235824-f277e0c5.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area5/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210730_235824.log.json) | +| [DGCNN](./dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py) | Area_6 | cosine 100e | 13.1 | | 77.94 | [model](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area6/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_154317-e3511b32.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area6/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_154317.log.json) | +| DGCNN | 6-fold | | | | 59.43 | | + +**Notes:** + +- We use XYZ+Color+Normalized_XYZ as input in all the experiments on S3DIS datasets. +- `Area_5` Split means training the model on Area_1, 2, 3, 4, 6 and testing on Area_5. +- `6-fold` Split means the overall result of 6 different splits (Area_1, Area_2, Area_3, Area_4, Area_5 and Area_6 Splits). +- Users need to modify `train_area` and `test_area` in the S3DIS dataset's [config](./configs/_base_/datasets/s3dis_seg-3d-13class.py) to set the training and testing areas, respectively. + +## Indeterminism + +Since DGCNN testing adopts sliding patch inference which involves random point sampling, and the test script uses fixed random seeds while the random seeds of validation in training are not fixed, the test results may be slightly different from the results reported above. + +## Citation + +```latex +@article{dgcnn, + title={Dynamic Graph CNN for Learning on Point Clouds}, + author={Wang, Yue and Sun, Yongbin and Liu, Ziwei and Sarma, Sanjay E. and Bronstein, Michael M. and Solomon, Justin M.}, + journal={ACM Transactions on Graphics (TOG)}, + year={2019} +} +``` diff --git a/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py new file mode 100755 index 0000000..e9b40af --- /dev/null +++ b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py @@ -0,0 +1,17 @@ +_base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py' + +# data settings +train_area = [2, 3, 4, 5, 6] +test_area = 1 +train_dataloader = dict( + batch_size=32, + dataset=dict( + ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area], + scene_idxs=[ + f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area + ])) +test_dataloader = dict( + dataset=dict( + ann_files=f's3dis_infos_Area_{test_area}.pkl', + scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy')) +val_dataloader = test_dataloader diff --git a/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py new file mode 100755 index 0000000..c7a1366 --- /dev/null +++ b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py @@ -0,0 +1,17 @@ +_base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py' + +# data settings +train_area = [1, 3, 4, 5, 6] +test_area = 2 +train_dataloader = dict( + batch_size=32, + dataset=dict( + ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area], + scene_idxs=[ + f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area + ])) +test_dataloader = dict( + dataset=dict( + ann_files=f's3dis_infos_Area_{test_area}.pkl', + scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy')) +val_dataloader = test_dataloader diff --git a/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py new file mode 100755 index 0000000..56cbd98 --- /dev/null +++ b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py @@ -0,0 +1,17 @@ +_base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py' + +# data settings +train_area = [1, 2, 4, 5, 6] +test_area = 3 +train_dataloader = dict( + batch_size=32, + dataset=dict( + ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area], + scene_idxs=[ + f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area + ])) +test_dataloader = dict( + dataset=dict( + ann_files=f's3dis_infos_Area_{test_area}.pkl', + scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy')) +val_dataloader = test_dataloader diff --git a/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py new file mode 100755 index 0000000..842f1e1 --- /dev/null +++ b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py @@ -0,0 +1,17 @@ +_base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py' + +# data settings +train_area = [1, 2, 3, 5, 6] +test_area = 4 +train_dataloader = dict( + batch_size=32, + dataset=dict( + ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area], + scene_idxs=[ + f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area + ])) +test_dataloader = dict( + dataset=dict( + ann_files=f's3dis_infos_Area_{test_area}.pkl', + scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy')) +val_dataloader = test_dataloader diff --git a/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py new file mode 100755 index 0000000..fba7a97 --- /dev/null +++ b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py @@ -0,0 +1,21 @@ +_base_ = [ + '../_base_/datasets/s3dis-seg.py', '../_base_/models/dgcnn.py', + '../_base_/schedules/seg-cosine-100e.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz] + decode_head=dict( + num_classes=13, ignore_index=13, + loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight + test_cfg=dict( + num_points=4096, + block_size=1.0, + sample_rate=0.5, + use_normalized_coord=True, + batch_size=24)) + +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2)) +train_dataloader = dict(batch_size=32) +train_cfg = dict(val_interval=2) diff --git a/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py new file mode 100755 index 0000000..c4f50cd --- /dev/null +++ b/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py @@ -0,0 +1,17 @@ +_base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py' + +# data settings +train_area = [1, 2, 3, 4, 5] +test_area = 6 +train_dataloader = dict( + batch_size=32, + dataset=dict( + ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area], + scene_idxs=[ + f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area + ])) +test_dataloader = dict( + dataset=dict( + ann_files=f's3dis_infos_Area_{test_area}.pkl', + scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy')) +val_dataloader = test_dataloader diff --git a/configs/dgcnn/metafile.yml b/configs/dgcnn/metafile.yml new file mode 100755 index 0000000..c383576 --- /dev/null +++ b/configs/dgcnn/metafile.yml @@ -0,0 +1,89 @@ +Collections: + - Name: DGCNN + Metadata: + Training Techniques: + - SGD + Training Resources: 4x Titan XP GPUs + Architecture: + - DGCNN + Paper: https://arxiv.org/abs/1801.07829 + README: configs/dgcnn/README.md + +Models: + - Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py + In Collection: DGCNN + Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 13.3 + Results: + - Task: 3D Semantic Segmentation + Dataset: S3DIS Area1 + Metrics: + mIoU: 68.33 + Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area1/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_000734-39658f14.pth + + - Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py + In Collection: DGCNN + Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 13.3 + Results: + - Task: 3D Semantic Segmentation + Dataset: S3DIS Area2 + Metrics: + mIoU: 40.68 + Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area2/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210731_144648-aea9ecb6.pth + + - Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py + In Collection: DGCNN + Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 13.3 + Results: + - Task: 3D Semantic Segmentation + Dataset: S3DIS Area3 + Metrics: + mIoU: 69.38 + Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area3/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210801_154629-2ff50ee0.pth + + - Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py + In Collection: DGCNN + Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 13.3 + Results: + - Task: 3D Semantic Segmentation + Dataset: S3DIS Area4 + Metrics: + mIoU: 50.07 + Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area4/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_073551-dffab9cd.pth + + - Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py + In Collection: DGCNN + Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 13.3 + Results: + - Task: 3D Semantic Segmentation + Dataset: S3DIS Area5 + Metrics: + mIoU: 50.59 + Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area5/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210730_235824-f277e0c5.pth + + - Name: dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py + In Collection: DGCNN + Config: configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 13.3 + Results: + - Task: 3D Semantic Segmentation + Dataset: S3DIS Area6 + Metrics: + mIoU: 77.94 + Weights: https://download.openmmlab.com/mmdetection3d/v0.17.0_models/dgcnn/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class/area6/dgcnn_32x4_cosine_100e_s3dis_seg-3d-13class_20210802_154317-e3511b32.pth diff --git a/configs/dynamic_voxelization/README.md b/configs/dynamic_voxelization/README.md new file mode 100755 index 0000000..835551c --- /dev/null +++ b/configs/dynamic_voxelization/README.md @@ -0,0 +1,40 @@ +# Dynamic Voxelization + +> [End-to-End Multi-View Fusion for 3D Object Detection in LiDAR Point Clouds](https://arxiv.org/abs/1910.06528) + + + +## Abstract + +Recent work on 3D object detection advocates point cloud voxelization in birds-eye view, where objects preserve their physical dimensions and are naturally separable. When represented in this view, however, point clouds are sparse and have highly variable point density, which may cause detectors difficulties in detecting distant or small objects (pedestrians, traffic signs, etc.). On the other hand, perspective view provides dense observations, which could allow more favorable feature encoding for such cases. In this paper, we aim to synergize the birds-eye view and the perspective view and propose a novel end-to-end multi-view fusion (MVF) algorithm, which can effectively learn to utilize the complementary information from both. Specifically, we introduce dynamic voxelization, which has four merits compared to existing voxelization methods, i) removing the need of pre-allocating a tensor with fixed size; ii) overcoming the information loss due to stochastic point/voxel dropout; iii) yielding deterministic voxel embeddings and more stable detection outcomes; iv) establishing the bi-directional relationship between points and voxels, which potentially lays a natural foundation for cross-view feature fusion. By employing dynamic voxelization, the proposed feature fusion architecture enables each point to learn to fuse context information from different views. MVF operates on points and can be naturally extended to other approaches using LiDAR point clouds. We evaluate our MVF model extensively on the newly released Waymo Open Dataset and on the KITTI dataset and demonstrate that it significantly improves detection accuracy over the comparable single-view PointPillars baseline. + +
    + +
    + +## Introduction + +We implement Dynamic Voxelization proposed in and provide its results and models on KITTI dataset. + +## Results and models + +### KITTI + +| Model | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | +| :----------------------------------------------------------------: | :-----: | :--------: | :------: | :------------: | :---: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECOND](./second_dv_secfpn_8xb6-80e_kitti-3d-car.py) | Car | cyclic 80e | 5.5 | | 78.83 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/dynamic_voxelization/dv_second_secfpn_6x8_80e_kitti-3d-car/dv_second_secfpn_6x8_80e_kitti-3d-car_20200620_235228-ac2c1c0c.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/dynamic_voxelization/dv_second_secfpn_6x8_80e_kitti-3d-car/dv_second_secfpn_6x8_80e_kitti-3d-car_20200620_235228.log.json) | +| [SECOND](./second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py) | 3 Class | cosine 80e | 5.5 | | 65.27 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/dynamic_voxelization/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class_20210831_054106-e742d163.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/dynamic_voxelization/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class_20210831_054106.log.json) | +| [PointPillars](./pointpillars_dv_secfpn_8xb6-160e_kitti-3d-car.py) | Car | cyclic 80e | 4.7 | | 77.76 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/dynamic_voxelization/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20200620_230844-ee7b75c9.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/dynamic_voxelization/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20200620_230844.log.json) | + +## Citation + +```latex +@article{zhou2019endtoend, + title={End-to-End Multi-View Fusion for 3D Object Detection in LiDAR Point Clouds}, + author={Yin Zhou and Pei Sun and Yu Zhang and Dragomir Anguelov and Jiyang Gao and Tom Ouyang and James Guo and Jiquan Ngiam and Vijay Vasudevan}, + year={2019}, + eprint={1910.06528}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/dynamic_voxelization/metafile.yml b/configs/dynamic_voxelization/metafile.yml new file mode 100755 index 0000000..171a8c3 --- /dev/null +++ b/configs/dynamic_voxelization/metafile.yml @@ -0,0 +1,53 @@ +Collections: + - Name: Dynamic Voxelization + Metadata: + Training Data: KITTI + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Dynamic Voxelization + Paper: + URL: https://arxiv.org/abs/1910.06528 + Title: 'End-to-End Multi-View Fusion for 3D Object Detection in LiDAR Point Clouds' + README: configs/dynamic_voxelization/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/dynamic_voxelnet.py#L11 + Version: v0.5.0 + +Models: + - Name: dv_second_secfpn_6x8_80e_kitti-3d-car + In Collection: Dynamic Voxelization + Config: configs/dynamic_voxelization/second_dv_secfpn_8xb6-80e_kitti-3d-car.py + Metadata: + Training Memory (GB): 5.5 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 78.83 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/dynamic_voxelization/dv_second_secfpn_6x8_80e_kitti-3d-car/dv_second_secfpn_6x8_80e_kitti-3d-car_20200620_235228-ac2c1c0c.pth + + - Name: dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class + In Collection: Dynamic Voxelization + Config: configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py + Metadata: + Training Memory (GB): 5.5 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 65.27 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/dynamic_voxelization/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class_20210831_054106-e742d163.pth + + - Name: dv_pointpillars_secfpn_6x8_160e_kitti-3d-car + In Collection: Dynamic Voxelization + Config: configs/dynamic_voxelization/pointpillars_dv_secfpn_8xb6-160e_kitti-3d-car.py + Metadata: + Training Memory (GB): 4.7 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 77.76 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/dynamic_voxelization/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20200620_230844-ee7b75c9.pth diff --git a/configs/dynamic_voxelization/pointpillars_dv_secfpn_8xb6-160e_kitti-3d-car.py b/configs/dynamic_voxelization/pointpillars_dv_secfpn_8xb6-160e_kitti-3d-car.py new file mode 100755 index 0000000..3999aa5 --- /dev/null +++ b/configs/dynamic_voxelization/pointpillars_dv_secfpn_8xb6-160e_kitti-3d-car.py @@ -0,0 +1,21 @@ +_base_ = '../pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py' + +voxel_size = [0.16, 0.16, 4] +point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] + +model = dict( + type='DynamicVoxelNet', + data_preprocessor=dict( + voxel_type='dynamic', + voxel_layer=dict( + max_num_points=-1, + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(-1, -1))), + voxel_encoder=dict( + type='DynamicPillarFeatureNet', + in_channels=4, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range)) diff --git a/configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py b/configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py new file mode 100755 index 0000000..3c0b92a --- /dev/null +++ b/configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/second_hv_secfpn_kitti.py', + '../_base_/datasets/kitti-3d-3class.py', '../_base_/schedules/cosine.py', + '../_base_/default_runtime.py' +] + +point_cloud_range = [0, -40, -3, 70.4, 40, 1] +voxel_size = [0.05, 0.05, 0.1] + +model = dict( + type='DynamicVoxelNet', + data_preprocessor=dict( + voxel_type='dynamic', + voxel_layer=dict( + _delete_=True, + max_num_points=-1, + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(-1, -1))), + voxel_encoder=dict( + _delete_=True, + type='DynamicSimpleVFE', + voxel_size=voxel_size, + point_cloud_range=point_cloud_range)) diff --git a/configs/dynamic_voxelization/second_dv_secfpn_8xb6-80e_kitti-3d-car.py b/configs/dynamic_voxelization/second_dv_secfpn_8xb6-80e_kitti-3d-car.py new file mode 100755 index 0000000..8849403 --- /dev/null +++ b/configs/dynamic_voxelization/second_dv_secfpn_8xb6-80e_kitti-3d-car.py @@ -0,0 +1,20 @@ +_base_ = '../second/hv_second_secfpn_6x8_80e_kitti-3d-car.py' + +point_cloud_range = [0, -40, -3, 70.4, 40, 1] +voxel_size = [0.05, 0.05, 0.1] + +model = dict( + type='DynamicVoxelNet', + data_preprocessor=dict( + voxel_type='dynamic', + voxel_layer=dict( + _delete_=True, + max_num_points=-1, + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(-1, -1))), + voxel_encoder=dict( + _delete_=True, + type='DynamicSimpleVFE', + voxel_size=voxel_size, + point_cloud_range=point_cloud_range)) diff --git a/configs/fcaf3d/README.md b/configs/fcaf3d/README.md new file mode 100755 index 0000000..a561249 --- /dev/null +++ b/configs/fcaf3d/README.md @@ -0,0 +1,53 @@ +# FCAF3D: Fully Convolutional Anchor-Free 3D Object Detection + +> [FCAF3D: Fully Convolutional Anchor-Free 3D Object Detection](https://arxiv.org/abs/2112.00322) + + + +## Abstract + +Recently, promising applications in robotics and augmented reality have attracted considerable attention to 3D object detection from point clouds. In this paper, we present FCAF3D --- a first-in-class fully convolutional anchor-free indoor 3D object detection method. It is a simple yet effective method that uses a voxel representation of a point cloud and processes voxels with sparse convolutions. FCAF3D can handle large-scale scenes with minimal runtime through a single fully convolutional feed-forward pass. Existing 3D object detection methods make prior assumptions on the geometry of objects, and we argue that it limits their generalization ability. To eliminate prior assumptions, we propose a novel parametrization of oriented bounding boxes that allows obtaining better results in a purely data-driven way. The proposed method achieves state-of-the-art 3D object detection results in terms of mAP@0.5 on ScanNet V2 (+4.5), SUN RGB-D (+3.5), and S3DIS (+20.5) datasets. + +
    + +
    + +## Introduction + +We implement FCAF3D and provide the result and checkpoints on the ScanNet and SUN RGB-D dataset. + +## Results and models + +### ScanNet + +| Backbone | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :------------------------------------------------: | :------: | :------------: | :----------: | :----------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [MinkResNet34](./fcaf3d_8x2_scannet-3d-18class.py) | 10.5 | 15.7 | 69.7(70.7\*) | 55.2(56.0\*) | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_scannet-3d-18class/fcaf3d_8x2_scannet-3d-18class_20220805_084956.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_scannet-3d-18class/fcaf3d_8x2_scannet-3d-18class_20220805_084956.log.json) | + +### SUN RGB-D + +| Backbone | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :------------------------------------------------: | :------: | :------------: | :----------: | :----------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [MinkResNet34](./fcaf3d_8x2_sunrgbd-3d-10class.py) | 6.3 | 17.9 | 63.8(63.8\*) | 47.3(48.2\*) | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_sunrgbd-3d-10class/fcaf3d_8x2_sunrgbd-3d-10class_20220805_165017.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_sunrgbd-3d-10class/fcaf3d_8x2_sunrgbd-3d-10class_20220805_165017.log.json) | + +### S3DIS + +| Backbone | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :----------------------------------------------: | :------: | :------------: | :----------: | :----------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [MinkResNet34](./fcaf3d_2xb8_s3dis-3d-5class.py) | 23.5 | 10.9 | 67.4(64.9\*) | 45.7(43.8\*) | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_s3dis-3d-5class/fcaf3d_8x2_s3dis-3d-5class_20220805_121957.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_s3dis-3d-5class/fcaf3d_8x2_s3dis-3d-5class_20220805_121957.log.json) | + +**Note** + +- We report the results across 5 train runs followed by 5 test runs. * means the results reported in the paper. +- Inference time is given for a single NVidia RTX 4090 GPU. All models are trained on 2 GPUs. + +## Citation + +```latex +@inproceedings{rukhovich2022fcaf3d, + title={FCAF3D: Fully Convolutional Anchor-Free 3D Object Detection}, + author={Danila Rukhovich, Anna Vorontsova, Anton Konushin}, + booktitle={European conference on computer vision}, + year={2022} +} +``` diff --git a/configs/fcaf3d/fcaf3d_2xb8_s3dis-3d-5class.py b/configs/fcaf3d/fcaf3d_2xb8_s3dis-3d-5class.py new file mode 100755 index 0000000..9edd32f --- /dev/null +++ b/configs/fcaf3d/fcaf3d_2xb8_s3dis-3d-5class.py @@ -0,0 +1,27 @@ +_base_ = [ + '../_base_/models/fcaf3d.py', '../_base_/default_runtime.py', + '../_base_/datasets/s3dis-3d.py' +] + +model = dict(bbox_head=dict(num_classes=5)) + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.0001), + clip_grad=dict(max_norm=10, norm_type=2)) + +# learning rate +param_scheduler = dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) + +custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)] + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=12) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/configs/fcaf3d/fcaf3d_2xb8_scannet-3d-18class.py b/configs/fcaf3d/fcaf3d_2xb8_scannet-3d-18class.py new file mode 100755 index 0000000..49a0297 --- /dev/null +++ b/configs/fcaf3d/fcaf3d_2xb8_scannet-3d-18class.py @@ -0,0 +1,94 @@ +_base_ = [ + '../_base_/models/fcaf3d.py', '../_base_/default_runtime.py', + '../_base_/datasets/scannet-3d.py' +] +n_points = 100000 +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict(type='LoadAnnotations3D'), + dict(type='GlobalAlignment', rotation_axis=2), + dict(type='PointSample', num_points=n_points), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[.9, 1.1], + translation_std=[.1, .1, .1], + shift_height=False), + dict(type='NormalizePointsColor', color_mean=None), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=n_points), + dict(type='NormalizePointsColor', color_mean=None), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + dataset=dict( + type='RepeatDataset', + times=10, + dataset=dict(pipeline=train_pipeline, filter_empty_gt=True))) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.0001), + clip_grad=dict(max_norm=10, norm_type=2)) + +# learning rate +param_scheduler = dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) + +custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)] + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=12) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/configs/fcaf3d/fcaf3d_2xb8_sunrgbd-3d-10class.py b/configs/fcaf3d/fcaf3d_2xb8_sunrgbd-3d-10class.py new file mode 100755 index 0000000..3ebe705 --- /dev/null +++ b/configs/fcaf3d/fcaf3d_2xb8_sunrgbd-3d-10class.py @@ -0,0 +1,92 @@ +_base_ = [ + '../_base_/models/fcaf3d.py', '../_base_/default_runtime.py', + '../_base_/datasets/sunrgbd-3d.py' +] +n_points = 100000 +backend_args = None + +model = dict( + bbox_head=dict( + num_classes=10, + num_reg_outs=8, + bbox_loss=dict(type='RotatedIoU3DLoss'))) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict(type='LoadAnnotations3D'), + dict(type='PointSample', num_points=n_points), + dict(type='RandomFlip3D', sync_2d=False, flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.523599, 0.523599], + scale_ratio_range=[0.85, 1.15], + translation_std=[.1, .1, .1], + shift_height=False), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=n_points) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=8, + dataset=dict( + type='RepeatDataset', + times=3, + dataset=dict(pipeline=train_pipeline, filter_empty_gt=True))) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.0001), + clip_grad=dict(max_norm=10, norm_type=2)) + +# learning rate +param_scheduler = dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) + +custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)] + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=12) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/configs/fcaf3d/metafile.yml b/configs/fcaf3d/metafile.yml new file mode 100755 index 0000000..c3294bd --- /dev/null +++ b/configs/fcaf3d/metafile.yml @@ -0,0 +1,58 @@ +Collections: + - Name: FCAF3D + Metadata: + Training Techniques: + - AdamW + Training Resources: 2x V100 GPUs + Architecture: + - MinkResNet + Paper: + URL: https://arxiv.org/abs/2112.00322 + Title: 'FCAF3D: Fully Convolutional Anchor-Free 3D Object Detection' + README: configs/fcaf3d/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/mink_single_stage.py#L15 + Version: v1.0.0rc4 + +Models: + - Name: fcaf3d_2xb8_scannet-3d-18class + In Collection: FCAF3D + Config: configs/fcaf3d/fcaf3d_2xb8_scannet-3d-18class.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 10.7 + Results: + - Task: 3D Object Detection + Dataset: ScanNet + Metrics: + AP@0.25: 69.7 + AP@0.5: 55.2 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_scannet-3d-18class/fcaf3d_8x2_scannet-3d-18class_20220805_084956.pth + + - Name: fcaf3d_2xb8_sunrgbd-3d-10class + In Collection: FCAF3D + Config: configs/fcaf3d/fcaf3d_2xb8_sunrgbd-3d-10class.py + Metadata: + Training Data: SUNRGBD + Training Memory (GB): 6.5 + Results: + - Task: 3D Object Detection + Dataset: SUNRGBD + Metrics: + AP@0.25: 63.76 + AP@0.5: 47.31 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_sunrgbd-3d-10class/fcaf3d_8x2_sunrgbd-3d-10class_20220805_165017.pth + + - Name: fcaf3d_2xb8_s3dis-3d-5class + In Collection: FCAF3D + Config: configs/fcaf3d/fcaf3d_2xb8_s3dis-3d-5class.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 23.5 + Results: + - Task: 3D Object Detection + Dataset: S3DIS + Metrics: + AP@0.25: 67.36 + AP@0.5: 45.74 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/fcaf3d/fcaf3d_8x2_s3dis-3d-5class/fcaf3d_8x2_s3dis-3d-5class_20220805_121957.pth diff --git a/configs/fcos3d/README.md b/configs/fcos3d/README.md new file mode 100755 index 0000000..3f27b5e --- /dev/null +++ b/configs/fcos3d/README.md @@ -0,0 +1,75 @@ +# FCOS3D: Fully Convolutional One-Stage Monocular 3D Object Detection + +> [FCOS3D: Fully Convolutional One-Stage Monocular 3D Object Detection](https://arxiv.org/abs/2104.10956) + + + +## Abstract + +Monocular 3D object detection is an important task for autonomous driving considering its advantage of low cost. It is much more challenging than conventional 2D cases due to its inherent ill-posed property, which is mainly reflected in the lack of depth information. Recent progress on 2D detection offers opportunities to better solving this problem. However, it is non-trivial to make a general adapted 2D detector work in this 3D task. In this paper, we study this problem with a practice built on a fully convolutional single-stage detector and propose a general framework FCOS3D. Specifically, we first transform the commonly defined 7-DoF 3D targets to the image domain and decouple them as 2D and 3D attributes. Then the objects are distributed to different feature levels with consideration of their 2D scales and assigned only according to the projected 3D-center for the training procedure. Furthermore, the center-ness is redefined with a 2D Gaussian distribution based on the 3D-center to fit the 3D target formulation. All of these make this framework simple yet effective, getting rid of any 2D detection or 2D-3D correspondence priors. Our solution achieves 1st place out of all the vision-only methods in the nuScenes 3D detection challenge of NeurIPS 2020. + +
    + +
    + +## Introduction + +FCOS3D is a general anchor-free, one-stage monocular 3D object detector adapted from the original 2D version FCOS. +It serves as a baseline built on top of mmdetection and mmdetection3d for 3D detection based on monocular vision. + +Currently we first support the benchmark on the large-scale nuScenes dataset, which achieved 1st place out of all the vision-only methods in the [nuScenes 3D detecton challenge](https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera) of NeurIPS 2020. + +![demo image](../../resources/browse_dataset_mono.png) + +## Usage + +### Data Preparation + +After supporting FCOS3D and monocular 3D object detection in v0.13.0, the coco-style 2D json info files will include related annotations by default +(see [here](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/dataset_converters/nuscenes_converter.py#L333) if you would like to change the parameter). +So you can just follow the data preparation steps given in the documentation, then all the needed infos are ready together. + +### Training and Inference + +The way to training and inference a monocular 3D object detector is the same as others in mmdetection and mmdetection3d. You can basically follow the [documentation](https://mmdetection3d.readthedocs.io/en/latest/1_exist_data_model.html#train-predefined-models-on-standard-datasets) and change the `config`, `work_dirs`, etc. accordingly. + +### Test time augmentation + +We implement test time augmentation for the dense outputs of detection heads, which is more effective than merging predicted boxes at last. +You can turn on it by setting `flip=True` in the `test_pipeline`. + +### Training with finetune + +Due to the scale and measurements of depth is different from those of other regression targets, we first train the model with depth weight equal to 0.2 for a more stable training procedure. For a stronger detector with better performance, please finetune the model with depth weight changed to 1.0 as shown in the [config](./fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d_finetune.py). Note that the path of `load_from` needs to be changed to yours accordingly. + +### Visualizing prediction results + +We also provide visualization functions to show the monocular 3D detection results. Simply follow the [documentation](https://mmdetection3d.readthedocs.io/en/latest/1_exist_data_model.html#test-existing-models-on-standard-datasets) and use the `single-gpu testing` command. You only need to add the `--show` flag and specify `--show-dir` to store the visualization results. + +## Results and models + +### NuScenes + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP | NDS | Download | +| :-------------------------------------------------------------------------------------: | :-----: | :------: | :------------: | :--: | :--: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [ResNet101 w/ DCN](./fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py) | 1x | 8.69 | | 29.8 | 37.7 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_20210715_235813-4bed5239.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_20210715_235813.log.json) | +| [above w/ finetune](./fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d_finetune.py) | 1x | 8.69 | | 32.1 | 39.5 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune_20210717_095645-8d806dc2.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune_20210717_095645.log.json) | +| above w/ tta | 1x | 8.69 | | 33.1 | 40.3 | | + +## Citation + +```latex +@inproceedings{wang2021fcos3d, + title={{FCOS3D: Fully} Convolutional One-Stage Monocular 3D Object Detection}, + author={Wang, Tai and Zhu, Xinge and Pang, Jiangmiao and Lin, Dahua}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops}, + year={2021} +} +# For the original 2D version +@inproceedings{tian2019fcos, + title = {{FCOS: Fully} Convolutional One-Stage Object Detection}, + author = {Tian, Zhi and Shen, Chunhua and Chen, Hao and He, Tong}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + year = {2019} +} +``` diff --git a/configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py b/configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py new file mode 100755 index 0000000..f7ba665 --- /dev/null +++ b/configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py @@ -0,0 +1,70 @@ +_base_ = [ + '../_base_/datasets/nus-mono3d.py', '../_base_/models/fcos3d.py', + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + data_preprocessor=dict( + type='Det3DDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True))) + +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=True, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='mmdet.Resize', scale=(1600, 900), keep_ratio=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'attr_labels', + 'gt_bboxes_3d', 'gt_labels_3d', 'centers_2d', 'depths' + ]), +] +test_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict(type='mmdet.Resize', scale_factor=1.0), + dict(type='Pack3DDetInputs', keys=['img']) +] + +train_dataloader = dict( + batch_size=2, num_workers=2, dataset=dict(pipeline=train_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.002), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] diff --git a/configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d_finetune.py b/configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d_finetune.py new file mode 100755 index 0000000..d8ea7a0 --- /dev/null +++ b/configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d_finetune.py @@ -0,0 +1,8 @@ +_base_ = './fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py' +# model settings +model = dict( + train_cfg=dict( + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05])) +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.001)) +load_from = 'work_dirs/fcos3d_nus/latest.pth' diff --git a/configs/fcos3d/metafile.yml b/configs/fcos3d/metafile.yml new file mode 100755 index 0000000..a5fc343 --- /dev/null +++ b/configs/fcos3d/metafile.yml @@ -0,0 +1,43 @@ +Collections: + - Name: FCOS3D + Metadata: + Training Data: NuScenes + Training Techniques: + - SGD + Training Resources: 8x GeForce RTX 2080 Ti + Architecture: + - FCOSMono3DHead + Paper: + URL: https://arxiv.org/abs/2104.10956 + Title: 'FCOS3D: Fully Convolutional One-Stage Monocular 3D Object Detection' + README: configs/fcos3d/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/fcos_mono3d.py#L7 + Version: v0.13.0 + +Models: + - Name: fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d + In Collection: FCOS3D + Config: configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py + Metadata: + Training Memory (GB): 8.7 + Results: + - Task: 3D Object Detection + Dataset: NuScenes + Metrics: + mAP: 29.9 + NDS: 37.3 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_20210715_235813-4bed5239.pth + + - Name: fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune + In Collection: FCOS3D + Config: configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d_finetune.py + Metadata: + Training Memory (GB): 8.7 + Results: + - Task: 3D Object Detection + Dataset: NuScenes + Metrics: + mAP: 32.1 + NDS: 39.3 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune_20210717_095645-8d806dc2.pth diff --git a/configs/free_anchor/README.md b/configs/free_anchor/README.md new file mode 100755 index 0000000..37a3e1f --- /dev/null +++ b/configs/free_anchor/README.md @@ -0,0 +1,105 @@ +# FreeAnchor for 3D Object Detection + +> [FreeAnchor: Learning to Match Anchors for Visual Object Detection](https://arxiv.org/abs/1909.02466) + + + +## Abstract + +Modern CNN-based object detectors assign anchors for ground-truth objects under the restriction of object-anchor Intersection-over-Unit (IoU). In this study, we propose a learning-to-match approach to break IoU restriction, allowing objects to match anchors in a flexible manner. Our approach, referred to as FreeAnchor, updates hand-crafted anchor assignment to “free" anchor matching by formulating detector training as a maximum likelihood estimation (MLE) procedure. FreeAnchor targets at learning features which best explain a class of objects in terms of both classification and localization. FreeAnchor is implemented by optimizing detection customized likelihood and can be fused with CNN-based detectors in a plug-and-play manner. Experiments on COCO demonstrate that FreeAnchor consistently outperforms the counterparts with significant margins. + +
    + +
    + +## Introduction + +We implement FreeAnchor in 3D detection systems and provide their first results with PointPillars on nuScenes dataset. +With the implemented `FreeAnchor3DHead`, a PointPillar detector with a big backbone (e.g., RegNet-3.2GF) achieves top performance +on the nuScenes benchmark. + +## Usage + +### Modify config + +As in the [baseline config](pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py), we only need to replace the head of an existing one-stage detector to use FreeAnchor head. +Since the config is inherit from a common detector head, `_delete_=True` is necessary to avoid conflicts. +The hyperparameters are specifically tuned according to the original paper. + +```python +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_lyft.py', + '../_base_/datasets/nus-3d.py', '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py' +] + +model = dict( + pts_bbox_head=dict( + _delete_=True, + type='FreeAnchor3DHead', + num_classes=10, + in_channels=256, + feat_channels=256, + use_direction_classifier=True, + pre_anchor_topk=25, + bbox_thr=0.5, + gamma=2.0, + alpha=0.5, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-50, -50, -1.8, 50, 50, -1.8]], + scales=[1, 2, 4], + sizes=[ + [2.5981, 0.8660, 1.], # 1.5 / sqrt(3) + [1.7321, 0.5774, 1.], # 1 / sqrt(3) + [1., 1., 1.], + [0.4, 0.4, 1], + ], + custom_values=[0, 0], + rotations=[0, 1.57], + reshape_out=True), + assigner_per_size=False, + diff_rad_by_sin=True, + dir_offset=-0.7854, # -pi / 4 + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=9), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.8), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg = dict( + pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.25, 0.25]))) +``` + +## Results and models + +### PointPillars + +| Backbone | FreeAnchor | Lr schd | Mem (GB) | Inf time (fps) | mAP | NDS | Download | +| :-------------------------------------------------------------------------------------------------------------: | :--------: | :-----: | :------: | :------------: | :---: | :---: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [FPN](../pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py) | ✗ | 2x | 17.1 | | 40.0 | 53.3 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20200620_230405-2fa62f3d.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20200620_230405.log.json) | +| [FPN](./pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py) | ✓ | 2x | 16.3 | | 43.82 | 54.86 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210816_163441-ae0897e7.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210816_163441.log.json) | +| [RegNetX-400MF-FPN](../regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py) | ✗ | 2x | 17.3 | | 44.8 | 56.4 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d_20200620_230239-c694dce7.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d_20200620_230239.log.json) | +| [RegNetX-400MF-FPN](./pointpillars_hv_regnet-400mf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py) | ✓ | 2x | 17.6 | | 48.3 | 58.65 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-400mf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210827_213939-a2dd3fff.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-400mf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210827_213939.log.json) | +| [RegNetX-1.6GF-FPN](./pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py) | ✓ | 2x | 24.3 | | 52.04 | 61.49 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210828_025608-bfbd506e.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210828_025608.log.json) | +| [RegNetX-1.6GF-FPN](./pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py)\* | ✓ | 3x | 24.4 | | 52.69 | 62.45 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d_20210827_184909-14d2dbd1.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d_20210827_184909.log.json) | +| [RegNetX-3.2GF-FPN](./pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py) | ✓ | 2x | 29.4 | | 52.4 | 61.94 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210827_181237-e385c35a.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210827_181237.log.json) | +| [RegNetX-3.2GF-FPN](./pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py)\* | ✓ | 3x | 29.2 | | 54.23 | 63.41 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d_20210828_030816-06708918.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d_20210828_030816.log.json) | + +**Note**: Models noted by `*` means it is trained using stronger augmentation with vertical flip under bird-eye-view, global translation, and larger range of global rotation. + +## Citation + +```latex +@inproceedings{zhang2019freeanchor, + title = {{FreeAnchor}: Learning to Match Anchors for Visual Object Detection}, + author = {Zhang, Xiaosong and Wan, Fang and Liu, Chang and Ji, Rongrong and Ye, Qixiang}, + booktitle = {Neural Information Processing Systems}, + year = {2019} +} +``` diff --git a/configs/free_anchor/metafile.yml b/configs/free_anchor/metafile.yml new file mode 100755 index 0000000..10d9970 --- /dev/null +++ b/configs/free_anchor/metafile.yml @@ -0,0 +1,122 @@ +Collections: + - Name: FreeAnchor + Metadata: + Training Data: nuScenes + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Hard Voxelization + - Free Anchor + Paper: + URL: https://arxiv.org/abs/1909.02466 + Title: 'FreeAnchor: Learning to Match Anchors for Visual Object Detection' + README: configs/free_anchor/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/dense_heads/free_anchor3d_head.py#L13 + Version: v0.5.0 + +Models: + - Name: pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d + In Collection: FreeAnchor + Config: pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Memory (GB): 17.1 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 40.0 + NDS: 53.3 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20200620_230405-2fa62f3d.pth + + - Name: pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d + In Collection: FreeAnchor + Config: free_anchor/pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Memory (GB): 16.3 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 43.82 + NDS: 54.86 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210816_163441-ae0897e7.pth + + - Name: pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d + In Collection: FreeAnchor + Config: configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Memory (GB): 17.3 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 44.8 + NDS: 56.4 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210816_163441-ae0897e7.pth + + - Name: pointpillars_hv_regnet-400mf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d + In Collection: FreeAnchor + Config: configs/free_anchor/pointpillars_hv_regnet-400mf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Memory (GB): 17.6 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 48.3 + NDS: 58.65 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-400mf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210827_213939-a2dd3fff.pth + + - Name: hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d + In Collection: FreeAnchor + Config: configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Memory (GB): 24.3 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 52.04 + NDS: 61.49 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210828_025608-bfbd506e.pth + + - Name: pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d + In Collection: FreeAnchor + Config: configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py + Metadata: + Training Memory (GB): 24.4 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 52.69 + NDS: 62.45 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d_20210827_184909-14d2dbd1.pth + + - Name: pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d + In Collection: FreeAnchor + Config: configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Memory (GB): 29.4 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 52.4 + NDS: 61.94 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d_20210827_181237-e385c35a.pth + + - Name: pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d + In Collection: FreeAnchor + Config: configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py + Metadata: + Training Memory (GB): 29.2 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 54.23 + NDS: 63.41 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/free_anchor/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d_20210828_030816-06708918.pth diff --git a/configs/free_anchor/pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py b/configs/free_anchor/pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py new file mode 100755 index 0000000..e9fa321 --- /dev/null +++ b/configs/free_anchor/pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py @@ -0,0 +1,49 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_nus.py', + '../_base_/datasets/nus-3d.py', '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py' +] + +model = dict( + pts_bbox_head=dict( + _delete_=True, + type='FreeAnchor3DHead', + num_classes=10, + in_channels=256, + feat_channels=256, + use_direction_classifier=True, + pre_anchor_topk=25, + bbox_thr=0.5, + gamma=2.0, + alpha=0.5, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-50, -50, -1.8, 50, 50, -1.8]], + scales=[1, 2, 4], + sizes=[ + [2.5981, 0.8660, 1.], # 1.5 / sqrt(3) + [1.7321, 0.5774, 1.], # 1 / sqrt(3) + [1., 1., 1.], + [0.4, 0.4, 1], + ], + custom_values=[0, 0], + rotations=[0, 1.57], + reshape_out=True), + assigner_per_size=False, + diff_rad_by_sin=True, + dir_offset=-0.7854, # -pi / 4 + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=9), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.8), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.25, 0.25]))) diff --git a/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py b/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py new file mode 100755 index 0000000..8968b39 --- /dev/null +++ b/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py @@ -0,0 +1,18 @@ +_base_ = './pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py' + +model = dict( + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch='regnetx_1.6gf', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[168, 408, 912])) diff --git a/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py b/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py new file mode 100755 index 0000000..4d5a485 --- /dev/null +++ b/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py @@ -0,0 +1,76 @@ +_base_ = './pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py' + +model = dict( + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch='regnetx_1.6gf', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[168, 408, 912])) + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-50, -50, -5, 50, 50, 3] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.7854, 0.7854], + scale_ratio_range=[0.95, 1.05], + translation_std=[0.2, 0.2, 0.2]), + dict( + type='RandomFlip3D', + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +train_cfg = dict(max_epochs=36, val_interval=36) +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 1000, + by_epoch=False, + begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] diff --git a/configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py b/configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py new file mode 100755 index 0000000..079328f --- /dev/null +++ b/configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py @@ -0,0 +1,18 @@ +_base_ = './pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py' + +model = dict( + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch='regnetx_3.2gf', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[192, 432, 1008])) diff --git a/configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py b/configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py new file mode 100755 index 0000000..2e4e02f --- /dev/null +++ b/configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py @@ -0,0 +1,76 @@ +_base_ = './pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py' + +model = dict( + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch='regnetx_3.2gf', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[192, 432, 1008])) + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-50, -50, -5, 50, 50, 3] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.7854, 0.7854], + scale_ratio_range=[0.9, 1.1], + translation_std=[0.2, 0.2, 0.2]), + dict( + type='RandomFlip3D', + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +train_cfg = dict(max_epochs=36, val_interval=36) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 1000, + by_epoch=False, + begin=0, + end=1000), + dict( + type='MultiStepLR', + begin=0, + end=36, + by_epoch=True, + milestones=[28, 34], + gamma=0.1) +] diff --git a/configs/free_anchor/pointpillars_hv_regnet-400mf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py b/configs/free_anchor/pointpillars_hv_regnet-400mf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py new file mode 100755 index 0000000..e8198ed --- /dev/null +++ b/configs/free_anchor/pointpillars_hv_regnet-400mf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py @@ -0,0 +1,18 @@ +_base_ = './pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py' + +model = dict( + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch='regnetx_400mf', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[64, 160, 384])) diff --git a/configs/groupfree3d/README.md b/configs/groupfree3d/README.md new file mode 100755 index 0000000..3133420 --- /dev/null +++ b/configs/groupfree3d/README.md @@ -0,0 +1,45 @@ +# Group-Free 3D Object Detection via Transformers + +> [Group-Free 3D Object Detection via Transformers](https://arxiv.org/abs/2104.00678) + + + +## Abstract + +Recently, directly detecting 3D objects from 3D point clouds has received increasing attention. To extract object representation from an irregular point cloud, existing methods usually take a point grouping step to assign the points to an object candidate so that a PointNet-like network could be used to derive object features from the grouped points. However, the inaccurate point assignments caused by the hand-crafted grouping scheme decrease the performance of 3D object detection. In this paper, we present a simple yet effective method for directly detecting 3D objects from the 3D point cloud. Instead of grouping local points to each object candidate, our method computes the feature of an object from all the points in the point cloud with the help of an attention mechanism in the Transformers, where the contribution of each point is automatically learned in the network training. With an improved attention stacking scheme, our method fuses object features in different stages and generates more accurate object detection results. With few bells and whistles, the proposed method achieves state-of-the-art 3D object detection performance on two widely used benchmarks, ScanNet V2 and SUN RGB-D. + +
    + +
    + +## Introduction + +We implement Group-Free-3D and provide the result and checkpoints on ScanNet datasets. + +## Results and models + +### ScanNet + +| Method | Backbone | Lr schd | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :--------------------------------------------------------------: | :-----------: | :-----: | :------: | :------------: | :-------------: | :-------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [L6, O256](./groupfree3d_head-L6-O256_4xb8_scannet-seg.py) | PointNet++ | 3x | 6.7 | | 66.17 (65.67\*) | 48.47 (47.74\*) | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-L6-O256/groupfree3d_8x4_scannet-3d-18class-L6-O256_20210702_145347-3499eb55.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-L6-O256/groupfree3d_8x4_scannet-3d-18class-L6-O256_20210702_145347.log.json) | +| [L12, O256](./groupfree3d_head-L12-O256_4xb8_scannet-seg.py) | PointNet++ | 3x | 9.4 | | 66.57 (66.22\*) | 48.21 (48.95\*) | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-L12-O256/groupfree3d_8x4_scannet-3d-18class-L12-O256_20210702_150907-1c5551ad.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-L12-O256/groupfree3d_8x4_scannet-3d-18class-L12-O256_20210702_150907.log.json) | +| [L12, O256](./groupfree3d_w2x-head-L12-O256_4xb8_scannet-seg.py) | PointNet++w2x | 3x | 13.3 | | 68.20 (67.30\*) | 51.02 (50.44\*) | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O256/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O256_20210702_200301-944f0ac0.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O256/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O256_20210702_200301.log.json) | +| [L12, O512](./groupfree3d_w2x-head-L12-O512_4xb8_scannet-seg.py) | PointNet++w2x | 3x | 18.8 | | 68.22 (68.20\*) | 52.61 (51.31\*) | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O512/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O512_20210702_220204-187b71c7.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O512/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O512_20210702_220204.log.json) | + +**Notes:** + +- We defined L6-O256 represent num_layers=6 and num_proposals=256. And w2x means that the model backbone weight is twice the original. +- We report the best results (AP@0.50) on validation set during each training. * means the evaluation method in the paper: we train each setting 5 times and test each training trial 5 times, then the average performance of these 25 trials is reported to account for algorithm randomness. +- We use 4 GPUs for training by default as the original code. + +## Citation + +```latex +@article{liu2021, + title={Group-Free 3D Object Detection via Transformers}, + author={Liu, Ze and Zhang, Zheng and Cao, Yue and Hu, Han and Tong, Xin}, + journal={Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + year={2021} +} +``` diff --git a/configs/groupfree3d/groupfree3d_head-L12-O256_4xb8_scannet-seg.py b/configs/groupfree3d/groupfree3d_head-L12-O256_4xb8_scannet-seg.py new file mode 100755 index 0000000..f53b2bb --- /dev/null +++ b/configs/groupfree3d/groupfree3d_head-L12-O256_4xb8_scannet-seg.py @@ -0,0 +1,227 @@ +_base_ = [ + '../_base_/datasets/scannet-3d.py', '../_base_/models/groupfree3d.py', + '../_base_/schedules/schedule-3x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + bbox_head=dict( + num_classes=18, + num_decoder_layers=12, + size_cls_agnostic=False, + bbox_coder=dict( + type='GroupFree3DBBoxCoder', + num_sizes=18, + num_dir_bins=1, + with_rot=False, + size_cls_agnostic=False, + mean_sizes=[[0.76966727, 0.8116021, 0.92573744], + [1.876858, 1.8425595, 1.1931566], + [0.61328, 0.6148609, 0.7182701], + [1.3955007, 1.5121545, 0.83443564], + [0.97949594, 1.0675149, 0.6329687], + [0.531663, 0.5955577, 1.7500148], + [0.9624706, 0.72462326, 1.1481868], + [0.83221924, 1.0490936, 1.6875663], + [0.21132214, 0.4206159, 0.5372846], + [1.4440073, 1.8970833, 0.26985747], + [1.0294262, 1.4040797, 0.87554324], + [1.3766412, 0.65521795, 1.6813129], + [0.6650819, 0.71111923, 1.298853], + [0.41999173, 0.37906948, 1.7513971], + [0.59359556, 0.5912492, 0.73919016], + [0.50867593, 0.50656086, 0.30136237], + [1.1511526, 1.0546296, 0.49706793], + [0.47535285, 0.49249494, 0.5802117]]), + sampling_objectness_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=8.0), + objectness_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + center_loss=dict( + type='mmdet.SmoothL1Loss', + beta=0.04, + reduction='sum', + loss_weight=10.0), + dir_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=10.0 / 9.0), + semantic_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + test_cfg=dict( + sample_mode='kps', + nms_thr=0.25, + score_thr=0.0, + per_class_proposal=True, + prediction_stages='last_three')) + +# dataset settings +dataset_type = 'ScanNetDataset' +data_root = './data/scannet/' +class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'garbagebin') + +metainfo = dict(classes=class_names) +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True, + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict(type='PointSegClassMapping'), + dict(type='PointSample', num_points=50000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0]), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', + 'pts_instance_mask' + ]) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=50000), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=8, + num_workers=4, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_train.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Depth', + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +val_evaluator = dict(type='IndoorMetric') +test_evaluator = val_evaluator + +# optimizer +lr = 0.006 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.0005), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'bbox_head.decoder_layers': dict(lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_self_posembeds': dict( + lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_cross_posembeds': dict( + lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_query_proj': dict(lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_key_proj': dict(lr_mult=0.1, decay_mult=1.0) + })) + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=80, + by_epoch=True, + milestones=[56, 68], + gamma=0.1) +] + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=80, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=10)) diff --git a/configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py b/configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py new file mode 100755 index 0000000..e447b7f --- /dev/null +++ b/configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py @@ -0,0 +1,227 @@ +_base_ = [ + '../_base_/datasets/scannet-3d.py', '../_base_/models/groupfree3d.py', + '../_base_/schedules/schedule-3x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + bbox_head=dict( + num_classes=18, + size_cls_agnostic=False, + bbox_coder=dict( + type='GroupFree3DBBoxCoder', + num_sizes=18, + num_dir_bins=1, + with_rot=False, + size_cls_agnostic=False, + mean_sizes=[[0.76966727, 0.8116021, 0.92573744], + [1.876858, 1.8425595, 1.1931566], + [0.61328, 0.6148609, 0.7182701], + [1.3955007, 1.5121545, 0.83443564], + [0.97949594, 1.0675149, 0.6329687], + [0.531663, 0.5955577, 1.7500148], + [0.9624706, 0.72462326, 1.1481868], + [0.83221924, 1.0490936, 1.6875663], + [0.21132214, 0.4206159, 0.5372846], + [1.4440073, 1.8970833, 0.26985747], + [1.0294262, 1.4040797, 0.87554324], + [1.3766412, 0.65521795, 1.6813129], + [0.6650819, 0.71111923, 1.298853], + [0.41999173, 0.37906948, 1.7513971], + [0.59359556, 0.5912492, 0.73919016], + [0.50867593, 0.50656086, 0.30136237], + [1.1511526, 1.0546296, 0.49706793], + [0.47535285, 0.49249494, 0.5802117]]), + sampling_objectness_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=8.0), + objectness_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + center_loss=dict( + type='mmdet.SmoothL1Loss', + beta=0.04, + reduction='sum', + loss_weight=10.0), + dir_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=10.0 / 9.0), + semantic_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + test_cfg=dict( + sample_mode='kps', + nms_thr=0.25, + score_thr=0.0, + per_class_proposal=True, + prediction_stages='last_three')) + +# dataset settings +dataset_type = 'ScanNetDataset' +data_root = './data/scannet/' +class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'garbagebin') + +metainfo = dict(classes=class_names) +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True, + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict(type='PointSegClassMapping'), + dict(type='PointSample', num_points=50000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0]), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', + 'pts_instance_mask' + ]) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=50000), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=8, + num_workers=4, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_train.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Depth', + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +val_evaluator = dict(type='IndoorMetric') +test_evaluator = val_evaluator + +# optimizer +lr = 0.006 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.0005), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'bbox_head.decoder_layers': dict(lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_self_posembeds': dict( + lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_cross_posembeds': dict( + lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_query_proj': dict(lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_key_proj': dict(lr_mult=0.1, decay_mult=1.0) + })) + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=80, + by_epoch=True, + milestones=[56, 68], + gamma=0.1) +] + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=80, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=10)) +randomness = dict(seed=4) diff --git a/configs/groupfree3d/groupfree3d_w2x-head-L12-O256_4xb8_scannet-seg.py b/configs/groupfree3d/groupfree3d_w2x-head-L12-O256_4xb8_scannet-seg.py new file mode 100755 index 0000000..52fb7de --- /dev/null +++ b/configs/groupfree3d/groupfree3d_w2x-head-L12-O256_4xb8_scannet-seg.py @@ -0,0 +1,242 @@ +_base_ = [ + '../_base_/datasets/scannet-3d.py', '../_base_/models/groupfree3d.py', + '../_base_/schedules/schedule-3x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict( + type='PointNet2SASSG', + in_channels=3, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((128, 128, 256), (256, 256, 512), (256, 256, 512), + (256, 256, 512)), + fp_channels=((512, 512), (512, 288)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True)), + bbox_head=dict( + num_classes=18, + num_decoder_layers=12, + size_cls_agnostic=False, + bbox_coder=dict( + type='GroupFree3DBBoxCoder', + num_sizes=18, + num_dir_bins=1, + with_rot=False, + size_cls_agnostic=False, + mean_sizes=[[0.76966727, 0.8116021, 0.92573744], + [1.876858, 1.8425595, 1.1931566], + [0.61328, 0.6148609, 0.7182701], + [1.3955007, 1.5121545, 0.83443564], + [0.97949594, 1.0675149, 0.6329687], + [0.531663, 0.5955577, 1.7500148], + [0.9624706, 0.72462326, 1.1481868], + [0.83221924, 1.0490936, 1.6875663], + [0.21132214, 0.4206159, 0.5372846], + [1.4440073, 1.8970833, 0.26985747], + [1.0294262, 1.4040797, 0.87554324], + [1.3766412, 0.65521795, 1.6813129], + [0.6650819, 0.71111923, 1.298853], + [0.41999173, 0.37906948, 1.7513971], + [0.59359556, 0.5912492, 0.73919016], + [0.50867593, 0.50656086, 0.30136237], + [1.1511526, 1.0546296, 0.49706793], + [0.47535285, 0.49249494, 0.5802117]]), + sampling_objectness_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=8.0), + objectness_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + center_loss=dict( + type='mmdet.SmoothL1Loss', + beta=0.04, + reduction='sum', + loss_weight=10.0), + dir_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=10.0 / 9.0), + semantic_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + test_cfg=dict( + sample_mode='kps', + nms_thr=0.25, + score_thr=0.0, + per_class_proposal=True, + prediction_stages='last_three')) + +# dataset settings +dataset_type = 'ScanNetDataset' +data_root = './data/scannet/' +class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'garbagebin') + +metainfo = dict(classes=class_names) +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True, + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict(type='PointSegClassMapping'), + dict(type='PointSample', num_points=50000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0]), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', + 'pts_instance_mask' + ]) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=50000), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=8, + num_workers=4, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_train.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Depth', + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +val_evaluator = dict(type='IndoorMetric') +test_evaluator = val_evaluator + +# optimizer +lr = 0.006 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.0005), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'bbox_head.decoder_layers': dict(lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_self_posembeds': dict( + lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_cross_posembeds': dict( + lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_query_proj': dict(lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_key_proj': dict(lr_mult=0.1, decay_mult=1.0) + })) + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=80, + by_epoch=True, + milestones=[56, 68], + gamma=0.1) +] + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=80, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=10)) diff --git a/configs/groupfree3d/groupfree3d_w2x-head-L12-O512_4xb8_scannet-seg.py b/configs/groupfree3d/groupfree3d_w2x-head-L12-O512_4xb8_scannet-seg.py new file mode 100755 index 0000000..c24aa1a --- /dev/null +++ b/configs/groupfree3d/groupfree3d_w2x-head-L12-O512_4xb8_scannet-seg.py @@ -0,0 +1,243 @@ +_base_ = [ + '../_base_/datasets/scannet-3d.py', '../_base_/models/groupfree3d.py', + '../_base_/schedules/schedule-3x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict( + type='PointNet2SASSG', + in_channels=3, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((128, 128, 256), (256, 256, 512), (256, 256, 512), + (256, 256, 512)), + fp_channels=((512, 512), (512, 288)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True)), + bbox_head=dict( + num_classes=18, + num_decoder_layers=12, + num_proposal=512, + size_cls_agnostic=False, + bbox_coder=dict( + type='GroupFree3DBBoxCoder', + num_sizes=18, + num_dir_bins=1, + with_rot=False, + size_cls_agnostic=False, + mean_sizes=[[0.76966727, 0.8116021, 0.92573744], + [1.876858, 1.8425595, 1.1931566], + [0.61328, 0.6148609, 0.7182701], + [1.3955007, 1.5121545, 0.83443564], + [0.97949594, 1.0675149, 0.6329687], + [0.531663, 0.5955577, 1.7500148], + [0.9624706, 0.72462326, 1.1481868], + [0.83221924, 1.0490936, 1.6875663], + [0.21132214, 0.4206159, 0.5372846], + [1.4440073, 1.8970833, 0.26985747], + [1.0294262, 1.4040797, 0.87554324], + [1.3766412, 0.65521795, 1.6813129], + [0.6650819, 0.71111923, 1.298853], + [0.41999173, 0.37906948, 1.7513971], + [0.59359556, 0.5912492, 0.73919016], + [0.50867593, 0.50656086, 0.30136237], + [1.1511526, 1.0546296, 0.49706793], + [0.47535285, 0.49249494, 0.5802117]]), + sampling_objectness_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=8.0), + objectness_loss=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + center_loss=dict( + type='mmdet.SmoothL1Loss', + beta=0.04, + reduction='sum', + loss_weight=10.0), + dir_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=10.0 / 9.0), + semantic_loss=dict( + type='mmdet.CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + test_cfg=dict( + sample_mode='kps', + nms_thr=0.25, + score_thr=0.0, + per_class_proposal=True, + prediction_stages='last_three')) + +# dataset settings +dataset_type = 'ScanNetDataset' +data_root = './data/scannet/' +class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'garbagebin') + +metainfo = dict(classes=class_names) +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True, + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict(type='PointSegClassMapping'), + dict(type='PointSample', num_points=50000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0]), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', + 'pts_instance_mask' + ]) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=50000), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + batch_size=8, + num_workers=4, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_train.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Depth', + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='scannet_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='Depth', + backend_args=backend_args)) +val_evaluator = dict(type='IndoorMetric') +test_evaluator = val_evaluator + +# optimizer +lr = 0.006 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.0005), + clip_grad=dict(max_norm=0.1, norm_type=2), + paramwise_cfg=dict( + custom_keys={ + 'bbox_head.decoder_layers': dict(lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_self_posembeds': dict( + lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_cross_posembeds': dict( + lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_query_proj': dict(lr_mult=0.1, decay_mult=1.0), + 'bbox_head.decoder_key_proj': dict(lr_mult=0.1, decay_mult=1.0) + })) + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=80, + by_epoch=True, + milestones=[56, 68], + gamma=0.1) +] + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=80, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=10)) diff --git a/configs/groupfree3d/metafile.yml b/configs/groupfree3d/metafile.yml new file mode 100755 index 0000000..0b8a62e --- /dev/null +++ b/configs/groupfree3d/metafile.yml @@ -0,0 +1,72 @@ +Collections: + - Name: Group-Free-3D + Metadata: + Training Techniques: + - AdamW + Training Resources: 4x V100 GPUs + Architecture: + - PointNet++ + Paper: + URL: https://arxiv.org/abs/2104.00678 + Title: 'Group-Free 3D Object Detection via Transformers' + README: configs/groupfree3d/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/groupfree3dnet.py#L10 + Version: v0.15.0 + +Models: + - Name: groupfree3d_head-L6-O256_4xb8_scannet-seg.py + In Collection: Group-Free-3D + Config: configs/groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 6.7 + Results: + - Task: 3D Object Detection + Dataset: ScanNet + Metrics: + AP@0.25: 66.17 + AP@0.5: 48.47 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-L6-O256/groupfree3d_8x4_scannet-3d-18class-L6-O256_20210702_145347-3499eb55.pth + + - Name: groupfree3d_head-L12-O256_4xb8_scannet-seg.py + In Collection: Group-Free-3D + Config: configs/groupfree3d/groupfree3d_head-L12-O256_4xb8_scannet-seg.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 9.4 + Results: + - Task: 3D Object Detection + Dataset: ScanNet + Metrics: + AP@0.25: 66.57 + AP@0.5: 48.21 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-L12-O256/groupfree3d_8x4_scannet-3d-18class-L12-O256_20210702_150907-1c5551ad.pth + + - Name: groupfree3d_w2x-head-L12-O256_4xb8_scannet-seg.py + In Collection: Group-Free-3D + Config: configs/groupfree3d/groupfree3d_w2x-head-L12-O256_4xb8_scannet-seg.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 13.3 + Results: + - Task: 3D Object Detection + Dataset: ScanNet + Metrics: + AP@0.25: 68.20 + AP@0.5: 51.02 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O256/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O256_20210702_200301-944f0ac0.pth + + - Name: groupfree3d_w2x-head-L12-O512_4xb8_scannet-seg.py + In Collection: Group-Free-3D + Config: configs/groupfree3d/groupfree3d_w2x-head-L12-O512_4xb8_scannet-seg.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 18.8 + Results: + - Task: 3D Object Detection + Dataset: ScanNet + Metrics: + AP@0.25: 68.22 + AP@0.5: 52.61 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/groupfree3d/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O512/groupfree3d_8x4_scannet-3d-18class-w2x-L12-O512_20210702_220204-187b71c7.pth diff --git a/configs/h3dnet/README.md b/configs/h3dnet/README.md new file mode 100755 index 0000000..53d9115 --- /dev/null +++ b/configs/h3dnet/README.md @@ -0,0 +1,44 @@ +# H3DNet: 3D Object Detection Using Hybrid Geometric Primitives + +> [H3DNet: 3D Object Detection Using Hybrid Geometric Primitives](https://arxiv.org/abs/2006.05682) + + + +## Abstract + +We introduce H3DNet, which takes a colorless 3D point cloud as input and outputs a collection of oriented object bounding boxes (or BB) and their semantic labels. The critical idea of H3DNet is to predict a hybrid set of geometric primitives, i.e., BB centers, BB face centers, and BB edge centers. We show how to convert the predicted geometric primitives into object proposals by defining a distance function between an object and the geometric primitives. This distance function enables continuous optimization of object proposals, and its local minimums provide high-fidelity object proposals. H3DNet then utilizes a matching and refinement module to classify object proposals into detected objects and fine-tune the geometric parameters of the detected objects. The hybrid set of geometric primitives not only provides more accurate signals for object detection than using a single type of geometric primitives, but it also provides an overcomplete set of constraints on the resulting 3D layout. Therefore, H3DNet can tolerate outliers in predicted geometric primitives. Our model achieves state-of-the-art 3D detection results on two large datasets with real 3D scans, ScanNet and SUN RGB-D. + +
    + +
    + +## Introduction + +We implement H3DNet and provide the result and checkpoints on ScanNet datasets. + +## Results and models + +### ScanNet + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :-------------------------------------------: | :-----: | :------: | :------------: | :-----: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [MultiBackbone](./h3dnet_8xb3_scannet-seg.py) | 3x | 7.9 | | 66.07 | 47.68 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/h3dnet/h3dnet_3x8_scannet-3d-18class/h3dnet_3x8_scannet-3d-18class_20210824_003149-414bd304.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/h3dnet/h3dnet_scannet-3d-18class/h3dnet_3x8_scannet-3d-18class_20210824_003149.log.json) | + +**Notice**: If your current mmdetection3d version >= 0.6.0, and you are using the checkpoints downloaded from the above links or using checkpoints trained with mmdetection3d version \< 0.6.0, the checkpoints have to be first converted via [tools/model_converters/convert_h3dnet_checkpoints.py](../../tools/model_converters/convert_h3dnet_checkpoints.py): + +``` +python ./tools/model_converters/convert_h3dnet_checkpoints.py ${ORIGINAL_CHECKPOINT_PATH} --out=${NEW_CHECKPOINT_PATH} +``` + +Then you can use the converted checkpoints following [get_started.md](../../docs/en/get_started.md). + +## Citation + +```latex +@inproceedings{zhang2020h3dnet, + author = {Zhang, Zaiwei and Sun, Bo and Yang, Haitao and Huang, Qixing}, + title = {H3DNet: 3D Object Detection Using Hybrid Geometric Primitives}, + booktitle = {Proceedings of the European Conference on Computer Vision}, + year = {2020} +} +``` diff --git a/configs/h3dnet/h3dnet_8xb3_scannet-seg.py b/configs/h3dnet/h3dnet_8xb3_scannet-seg.py new file mode 100755 index 0000000..f9a6e71 --- /dev/null +++ b/configs/h3dnet/h3dnet_8xb3_scannet-seg.py @@ -0,0 +1,74 @@ +_base_ = [ + '../_base_/datasets/scannet-3d.py', '../_base_/models/h3dnet.py', + '../_base_/schedules/schedule-3x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + rpn_head=dict( + num_classes=18, + bbox_coder=dict( + type='PartialBinBasedBBoxCoder', + num_sizes=18, + num_dir_bins=24, + with_rot=False, + mean_sizes=[[0.76966727, 0.8116021, 0.92573744], + [1.876858, 1.8425595, 1.1931566], + [0.61328, 0.6148609, 0.7182701], + [1.3955007, 1.5121545, 0.83443564], + [0.97949594, 1.0675149, 0.6329687], + [0.531663, 0.5955577, 1.7500148], + [0.9624706, 0.72462326, 1.1481868], + [0.83221924, 1.0490936, 1.6875663], + [0.21132214, 0.4206159, 0.5372846], + [1.4440073, 1.8970833, 0.26985747], + [1.0294262, 1.4040797, 0.87554324], + [1.3766412, 0.65521795, 1.6813129], + [0.6650819, 0.71111923, 1.298853], + [0.41999173, 0.37906948, 1.7513971], + [0.59359556, 0.5912492, 0.73919016], + [0.50867593, 0.50656086, 0.30136237], + [1.1511526, 1.0546296, 0.49706793], + [0.47535285, 0.49249494, 0.5802117]])), + roi_head=dict( + bbox_head=dict( + num_classes=18, + bbox_coder=dict( + type='PartialBinBasedBBoxCoder', + num_sizes=18, + num_dir_bins=24, + with_rot=False, + mean_sizes=[[0.76966727, 0.8116021, 0.92573744], + [1.876858, 1.8425595, 1.1931566], + [0.61328, 0.6148609, 0.7182701], + [1.3955007, 1.5121545, 0.83443564], + [0.97949594, 1.0675149, 0.6329687], + [0.531663, 0.5955577, 1.7500148], + [0.9624706, 0.72462326, 1.1481868], + [0.83221924, 1.0490936, 1.6875663], + [0.21132214, 0.4206159, 0.5372846], + [1.4440073, 1.8970833, 0.26985747], + [1.0294262, 1.4040797, 0.87554324], + [1.3766412, 0.65521795, 1.6813129], + [0.6650819, 0.71111923, 1.298853], + [0.41999173, 0.37906948, 1.7513971], + [0.59359556, 0.5912492, 0.73919016], + [0.50867593, 0.50656086, 0.30136237], + [1.1511526, 1.0546296, 0.49706793], + [0.47535285, 0.49249494, 0.5802117]])))) + +train_dataloader = dict( + batch_size=3, + num_workers=2, +) + +# yapf:disable +default_hooks = dict( + logger=dict(type='LoggerHook', interval=30) +) +# yapf:enable +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (3 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=24) diff --git a/configs/h3dnet/metafile.yml b/configs/h3dnet/metafile.yml new file mode 100755 index 0000000..93558bc --- /dev/null +++ b/configs/h3dnet/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: H3DNet + Metadata: + Training Data: ScanNet + Training Techniques: + - AdamW + Training Resources: 8x GeForce GTX 1080 Ti + Architecture: + Paper: + URL: https://arxiv.org/abs/2006.05682 + Title: 'H3DNet: 3D Object Detection Using Hybrid Geometric Primitives' + README: configs/h3dnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/h3dnet.py#L10 + Version: v0.6.0 + +Models: + - Name: h3dnet_3x8_scannet-3d-18class + In Collection: H3DNet + Config: configs/h3dnet/h3dnet_8xb3_scannet-seg.py + Metadata: + Training Memory (GB): 7.9 + Results: + - Task: 3D Object Detection + Dataset: ScanNet + Metrics: + AP@0.25: 66.07 + AP@0.5: 47.68 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/h3dnet/h3dnet_3x8_scannet-3d-18class/h3dnet_3x8_scannet-3d-18class_20210824_003149-414bd304.pth diff --git a/configs/imvotenet/README.md b/configs/imvotenet/README.md new file mode 100755 index 0000000..5d30f02 --- /dev/null +++ b/configs/imvotenet/README.md @@ -0,0 +1,43 @@ +# ImVoteNet: Boosting 3D Object Detection in Point Clouds with Image Votes + +> [ImVoteNet: Boosting 3D Object Detection in Point Clouds with Image Votes](https://arxiv.org/abs/2001.10692) + + + +## Abstract + +3D object detection has seen quick progress thanks to advances in deep learning on point clouds. A few recent works have even shown state-of-the-art performance with just point clouds input (e.g. VOTENET). However, point cloud data have inherent limitations. They are sparse, lack color information and often suffer from sensor noise. Images, on the other hand, have high resolution and rich texture. Thus they can complement the 3D geometry provided by point clouds. Yet how to effectively use image information to assist point cloud based detection is still an open question. In this work, we build on top of VOTENET and propose a 3D detection architecture called IMVOTENET specialized for RGB-D scenes. IMVOTENET is based on fusing 2D votes in images and 3D votes in point clouds. Compared to prior work on multi-modal detection, we explicitly extract both geometric and semantic features from the 2D images. We leverage camera parameters to lift these features to 3D. To improve the synergy of 2D-3D feature fusion, we also propose a multi-tower training scheme. We validate our model on the challenging SUN RGB-D dataset, advancing state-of-the-art results by 5.7 mAP. We also provide rich ablation studies to analyze the contribution of each design choice. + +
    + +
    + +## Introduction + +We implement ImVoteNet and provide the result and checkpoints on SUNRGBD. + +## Results and models + +### SUNRGBD-2D (Stage 1, image branch pre-train) + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :--------------------------------------------------------------: | :-----: | :------: | :------------: | :-----: | :----: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [PointNet++](./imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py) | | 2.1 | | | 62.70 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvotenet/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class_20210819_225618-62eba6ce.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvotenet/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class_20210819_225618.json) | + +### SUNRGBD-3D (Stage 2) + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :--------------------------------------------------: | :-----: | :------: | :------------: | :-----: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [PointNet++](./imvotenet_stage2_8xb16_sunrgbd-3d.py) | 3x | 9.4 | | 64.48 | | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvotenet/imvotenet_stage2_16x8_sunrgbd-3d-10class/imvotenet_stage2_16x8_sunrgbd-3d-10class_20210819_192851-1bcd1b97.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvotenet/imvotenet_stage2_16x8_sunrgbd-3d-10class/imvotenet_stage2_16x8_sunrgbd-3d-10class_20210819_192851.log.json) | + +## Citation + +```latex +@inproceedings{qi2020imvotenet, + title={Imvotenet: Boosting 3D object detection in point clouds with image votes}, + author={Qi, Charles R and Chen, Xinlei and Litany, Or and Guibas, Leonidas J}, + booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition}, + pages={4404--4413}, + year={2020} +} +``` diff --git a/configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py b/configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py new file mode 100755 index 0000000..5846ad1 --- /dev/null +++ b/configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py @@ -0,0 +1,69 @@ +_base_ = [ + '../_base_/datasets/sunrgbd-3d.py', '../_base_/default_runtime.py', + '../_base_/models/imvotenet.py' +] + +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_bbox_3d=False, + with_label_3d=False), + dict( + type='RandomChoiceResize', + scales=[(1333, 480), (1333, 504), (1333, 528), (1333, 552), + (1333, 576), (1333, 600)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict( + type='Pack3DDetInputs', keys=['img', 'gt_bboxes', 'gt_bboxes_labels']), +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(1333, 600), keep_ratio=True), + dict( + type='Pack3DDetInputs', + keys=(['img']), + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', times=1, dataset=dict(pipeline=train_pipeline))) + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=8, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), + dict( + type='MultiStepLR', + begin=0, + end=8, + by_epoch=True, + milestones=[6], + gamma=0.1) +] +val_evaluator = dict(type='Indoor2DMetric') +test_evaluator = val_evaluator + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) + +load_from = 'http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' # noqa diff --git a/configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py b/configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py new file mode 100755 index 0000000..5f2218e --- /dev/null +++ b/configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py @@ -0,0 +1,228 @@ +_base_ = [ + '../_base_/datasets/sunrgbd-3d.py', '../_base_/schedules/schedule-3x.py', + '../_base_/default_runtime.py', '../_base_/models/imvotenet.py' +] + +class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', + 'night_stand', 'bookshelf', 'bathtub') +backend_args = None + +model = dict( + pts_backbone=dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True)), + pts_bbox_heads=dict( + common=dict( + type='VoteHead', + num_classes=10, + bbox_coder=dict( + type='PartialBinBasedBBoxCoder', + num_sizes=10, + num_dir_bins=12, + with_rot=True, + mean_sizes=[[2.114256, 1.620300, 0.927272], + [0.791118, 1.279516, 0.718182], + [0.923508, 1.867419, 0.845495], + [0.591958, 0.552978, 0.827272], + [0.699104, 0.454178, 0.75625], + [0.69519, 1.346299, 0.736364], + [0.528526, 1.002642, 1.172878], + [0.500618, 0.632163, 0.683424], + [0.404671, 1.071108, 1.688889], + [0.76584, 1.398258, 0.472728]]), + pred_layer_cfg=dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True), + objectness_loss=dict( + type='mmdet.CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='sum', + loss_weight=5.0), + center_loss=dict( + type='ChamferDistance', + mode='l2', + reduction='sum', + loss_src_weight=10.0, + loss_dst_weight=10.0), + dir_class_loss=dict( + type='mmdet.CrossEntropyLoss', + reduction='sum', + loss_weight=1.0), + dir_res_loss=dict( + type='mmdet.SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='mmdet.CrossEntropyLoss', + reduction='sum', + loss_weight=1.0), + size_res_loss=dict( + type='mmdet.SmoothL1Loss', + reduction='sum', + loss_weight=10.0 / 3.0), + semantic_loss=dict( + type='mmdet.CrossEntropyLoss', + reduction='sum', + loss_weight=1.0)), + joint=dict( + vote_module_cfg=dict( + in_channels=512, + vote_per_seed=1, + gt_per_seed=3, + conv_channels=(512, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=256, + radius=0.3, + num_sample=16, + mlp_channels=[512, 128, 128, 128], + use_xyz=True, + normalize_xyz=True)), + pts=dict( + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=3, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=256, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True)), + img=dict( + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=3, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=256, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True)), + loss_weights=[0.4, 0.3, 0.3]), + img_mlp=dict( + in_channel=18, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU')), + fusion_layer=dict( + type='VoteFusion', + num_classes=len(class_names), + max_imvote_per_pixel=3), + num_sampled_seed=1024, + freeze_img_branch=True, + + # model training and testing settings + train_cfg=dict( + pts=dict( + pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mode='vote')), + test_cfg=dict( + img_rcnn=dict(score_thr=0.1), + pts=dict( + sample_mode='seed', + nms_thr=0.25, + score_thr=0.05, + per_class_proposal=True))) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_bbox_3d=True, + with_label_3d=True), + dict(type='Resize', scale=(1333, 600), keep_ratio=True), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.523599, 0.523599], + scale_ratio_range=[0.85, 1.15], + shift_height=True), + dict(type='PointSample', num_points=20000), + dict( + type='Pack3DDetInputs', + keys=([ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'points', 'gt_bboxes_3d', + 'gt_labels_3d' + ])) +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict(type='Resize', scale=(1333, 600), keep_ratio=True), + dict(type='PointSample', num_points=20000), + dict(type='Pack3DDetInputs', keys=['img', 'points']) +] + +train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# may also use your own pre-trained image branch +load_from = 'https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvotenet/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class_20210819_225618-62eba6ce.pth' # noqa +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) +randomness = dict(seed=8) diff --git a/configs/imvotenet/metafile.yml b/configs/imvotenet/metafile.yml new file mode 100755 index 0000000..191dd13 --- /dev/null +++ b/configs/imvotenet/metafile.yml @@ -0,0 +1,43 @@ +Collections: + - Name: ImVoteNet + Metadata: + Training Data: SUNRGBD + Training Techniques: + - AdamW + Training Resources: 8x TITAN Xp + Architecture: + - Faster R-CNN + - VoteNet + - Feature Pyramid Network + Paper: + URL: https://arxiv.org/abs/2001.10692 + Title: 'ImVoteNet: Boosting 3D Object Detection in Point Clouds with Image Votes' + README: configs/imvotenet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/imvotenet.py#L56 + Version: v0.12.0 + +Models: + - Name: imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class + In Collection: ImVoteNet + Config: configs/imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py + Metadata: + Training Memory (GB): 2.1 + Results: + - Task: Object Detection + Dataset: SUNRGBD-2D + Metrics: + AP@0.5: 62.70 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvotenet/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class_20210819_225618-62eba6ce.pth + + - Name: imvotenet_stage2_16x8_sunrgbd-3d-10class + In Collection: ImVoteNet + Config: configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py + Metadata: + Training Memory (GB): 9.4 + Results: + - Task: 3D Object Detection + Dataset: SUNRGBD-3D + Metrics: + AP@0.25: 64.48 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvotenet/imvotenet_stage2_16x8_sunrgbd-3d-10class/imvotenet_stage2_16x8_sunrgbd-3d-10class_20210819_192851-1bcd1b97.pth diff --git a/configs/imvoxelnet/README.md b/configs/imvoxelnet/README.md new file mode 100755 index 0000000..56c2b55 --- /dev/null +++ b/configs/imvoxelnet/README.md @@ -0,0 +1,44 @@ +# ImVoxelNet: Image to Voxels Projection for Monocular and Multi-View General-Purpose 3D Object Detection + +> [ImVoxelNet: Image to Voxels Projection for Monocular and Multi-View General-Purpose 3D Object Detection](https://arxiv.org/abs/2106.01178) + + + +## Abstract + +In this paper, we introduce the task of multi-view RGB-based 3D object detection as an end-to-end optimization problem. To address this problem, we propose ImVoxelNet, a novel fully convolutional method of 3D object detection based on posed monocular or multi-view RGB images. The number of monocular images in each multiview input can variate during training and inference; actually, this number might be unique for each multi-view input. ImVoxelNet successfully handles both indoor and outdoor scenes, which makes it general-purpose. Specifically, it achieves state-of-the-art results in car detection on KITTI (monocular) and nuScenes (multi-view) benchmarks among all methods that accept RGB images. Moreover, it surpasses existing RGB-based 3D object detection methods on the SUN RGB-D dataset. On ScanNet, ImVoxelNet sets a new benchmark for multi-view 3D object detection. + +
    + +
    + +## Introduction + +We implement a monocular 3D detector ImVoxelNet and provide its results and checkpoints on KITTI dataset. +Results for SUN RGB-D, ScanNet and nuScenes are currently available in ImVoxelNet authors +[repo](https://github.com/saic-vul/imvoxelnet) (based on mmdetection3d). + +## Results and models + +### KITTI + +| Backbone | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | +| :--------------------------------------------: | :---: | :-----: | :------: | :------------: | :---: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [ResNet-50](./imvoxelnet_8xb4_kitti-3d-car.py) | Car | 3x | | | 17.26 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvoxelnet/imvoxelnet_4x8_kitti-3d-car/imvoxelnet_4x8_kitti-3d-car_20210830_003014-3d0ffdf4.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvoxelnet/imvoxelnet_4x8_kitti-3d-car/imvoxelnet_4x8_kitti-3d-car_20210830_003014.log.json) | + +### SUN RGB-D + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP@0.25 | mAP@0.5 | Download | +| :-------------------------------------------------: | :-----: | :------: | :------------: | :------: | :-----: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [ResNet-50](./imvoxelnet_4x2_sunrgbd-3d-10class.py) | 2x | 7.2 | 22.5 | 40.96 | 13.50 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvoxelnet/imvoxelnet_4x2_sunrgbd-3d-10class/imvoxelnet_4x2_sunrgbd-3d-10class_20220809_184416-29ca7d2e.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvoxelnet/imvoxelnet_4x2_sunrgbd-3d-10class/imvoxelnet_4x2_sunrgbd-3d-10class_20220809_184416.log.json) | + +## Citation + +```latex +@article{rukhovich2021imvoxelnet, + title={ImVoxelNet: Image to Voxels Projection for Monocular and Multi-View General-Purpose 3D Object Detection}, + author={Danila Rukhovich, Anna Vorontsova, Anton Konushin}, + journal={arXiv preprint arXiv:2106.01178}, + year={2021} +} +``` diff --git a/configs/imvoxelnet/imvoxelnet_2xb4_sunrgbd-3d-10class.py b/configs/imvoxelnet/imvoxelnet_2xb4_sunrgbd-3d-10class.py new file mode 100755 index 0000000..2884f92 --- /dev/null +++ b/configs/imvoxelnet/imvoxelnet_2xb4_sunrgbd-3d-10class.py @@ -0,0 +1,137 @@ +_base_ = [ + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +prior_generator = dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-3.2, -0.2, -2.28, 3.2, 6.2, 0.28]], + rotations=[.0]) +model = dict( + type='ImVoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='mmdet.ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + neck=dict( + type='mmdet.FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=4), + neck_3d=dict( + type='IndoorImVoxelNeck', + in_channels=256, + out_channels=128, + n_blocks=[1, 1, 1]), + bbox_head=dict( + type='ImVoxelHead', + n_classes=10, + n_levels=3, + n_channels=128, + n_reg_outs=7, + pts_assign_threshold=27, + pts_center_threshold=18, + prior_generator=prior_generator), + prior_generator=prior_generator, + n_voxels=[40, 40, 16], + coord_type='DEPTH', + train_cfg=dict(), + test_cfg=dict(nms_pre=1000, iou_thr=.25, score_thr=.01)) + +dataset_type = 'SUNRGBDDataset' +data_root = 'data/sunrgbd/' +class_names = [ + 'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', + 'night_stand', 'bookshelf', 'bathtub' +] +metainfo = dict(CLASSES=class_names) + +backend_args = None + +train_pipeline = [ + dict(type='LoadAnnotations3D', backend_args=backend_args), + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='RandomResize', scale=[(512, 384), (768, 576)], keep_ratio=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='Pack3DDetInputs', keys=['img', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='Resize', scale=(640, 480), keep_ratio=True), + dict(type='Pack3DDetInputs', keys=['img']) +] + +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='sunrgbd_infos_train.pkl', + pipeline=train_pipeline, + test_mode=False, + filter_empty_gt=True, + box_type_3d='Depth', + metainfo=metainfo, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='sunrgbd_infos_val.pkl', + pipeline=test_pipeline, + test_mode=True, + box_type_3d='Depth', + metainfo=metainfo, + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='IndoorMetric', + ann_file=data_root + 'sunrgbd_infos_val.pkl', + metric='bbox') +test_evaluator = val_evaluator + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001), + paramwise_cfg=dict( + custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}), + clip_grad=dict(max_norm=35., norm_type=2)) +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# hooks +default_hooks = dict(checkpoint=dict(type='CheckpointHook', max_keep_ckpts=1)) + +# runtime +find_unused_parameters = True # only 1 of 4 FPN outputs is used diff --git a/configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py b/configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py new file mode 100755 index 0000000..df1e9d6 --- /dev/null +++ b/configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py @@ -0,0 +1,176 @@ +_base_ = [ + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='ImVoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='mmdet.ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + neck=dict( + type='mmdet.FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=64, + num_outs=4), + neck_3d=dict(type='OutdoorImVoxelNeck', in_channels=64, out_channels=256), + bbox_head=dict( + type='Anchor3DHead', + num_classes=1, + in_channels=256, + feat_channels=256, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-0.16, -39.68, -1.78, 68.96, 39.68, -1.78]], + sizes=[[3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=True), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + n_voxels=[216, 248, 12], + coord_type='LIDAR', + prior_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-0.16, -39.68, -3.08, 68.96, 39.68, 0.76]], + rotations=[.0]), + train_cfg=dict( + assigner=dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='mmdet3d.BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) + +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Car'] +input_modality = dict(use_lidar=False, use_camera=True) +point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] +metainfo = dict(classes=class_names) + +backend_args = None + +train_pipeline = [ + dict(type='LoadAnnotations3D', backend_args=backend_args), + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='RandomResize', scale=[(1173, 352), (1387, 416)], + keep_ratio=True), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='Pack3DDetInputs', keys=['img', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict(type='Resize', scale=(1280, 384), keep_ratio=True), + dict(type='Pack3DDetInputs', keys=['img']) +] + +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(img='training/image_2'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_val.pkl', + data_prefix=dict(img='training/image_2'), + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001), + paramwise_cfg=dict( + custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}), + clip_grad=dict(max_norm=35., norm_type=2)) +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +# hooks +default_hooks = dict(checkpoint=dict(type='CheckpointHook', max_keep_ckpts=1)) + +# runtime +find_unused_parameters = True # only 1 of 4 FPN outputs is used + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/configs/imvoxelnet/metafile.yml b/configs/imvoxelnet/metafile.yml new file mode 100755 index 0000000..a15c4ca --- /dev/null +++ b/configs/imvoxelnet/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: ImVoxelNet + Metadata: + Training Data: KITTI + Training Techniques: + - AdamW + Training Resources: 8x Tesla P40 + Architecture: + - Anchor3DHead + Paper: + URL: https://arxiv.org/abs/2106.01178 + Title: 'ImVoxelNet: Image to Voxels Projection for Monocular and Multi-View General-Purpose 3D Object Detection' + README: configs/imvoxelnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/imvoxelnet.py#L11 + Version: v0.15.0 + +Models: + - Name: imvoxelnet_kitti-3d-car + In Collection: ImVoxelNet + Config: configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py + Metadata: + Training Memory (GB): 15.0 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 17.26 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvoxelnet/imvoxelnet_4x8_kitti-3d-car/imvoxelnet_4x8_kitti-3d-car_20210830_003014-3d0ffdf4.pth diff --git a/configs/minkunet/README.md b/configs/minkunet/README.md new file mode 100755 index 0000000..011fc04 --- /dev/null +++ b/configs/minkunet/README.md @@ -0,0 +1,43 @@ +# 4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural Networks + +> [4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural Networks](https://arxiv.org/abs/1904.08755) + + + +## Abstract + +In many robotics and VR/AR applications, 3D-videos are readily-available sources of input (a continuous sequence of depth images, or LIDAR scans). However, those 3D-videos are processed frame-by-frame either through 2D convnets or 3D perception algorithms. In this work, we propose 4-dimensional convolutional neural networks for spatio-temporal perception that can directly process such 3D-videos using high-dimensional convolutions. For this, we adopt sparse tensors and propose the generalized sparse convolution that encompasses all discrete convolutions. To implement the generalized sparse convolution, we create an open-source auto-differentiation library for sparse tensors that provides extensive functions for high-dimensional convolutional neural networks. We create 4D spatio-temporal convolutional neural networks using the library and validate them on various 3D semantic segmentation benchmarks and proposed 4D datasets for 3D-video perception. To overcome challenges in the 4D space, we propose the hybrid kernel, a special case of the generalized sparse convolution, and the trilateral-stationary conditional random field that enforces spatio-temporal consistency in the 7D space-time-chroma space. Experimentally, we show that convolutional neural networks with only generalized 3D sparse convolutions can outperform 2D or 2D-3D hybrid methods by a large margin. Also, we show that on 3D-videos, 4D spatio-temporal convolutional neural networks are robust to noise, outperform 3D convolutional neural networks and are faster than the 3D counterpart in some cases. + +
    + +
    + +## Introduction + +We implement MinkUNet with [TorchSparse](https://github.com/mit-han-lab/torchsparse) backend and provide the result and checkpoints on SemanticKITTI datasets. + +## Results and models + +### SemanticKITTI + +| Method | Lr schd | Mem (GB) | mIoU | Download | +| :----------: | :-----: | :------: | :--: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| MinkUNet-W16 | 15e | 3.4 | 60.3 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/minkunet/minkunet_w16_8xb2-15e_semantickitti/minkunet_w16_8xb2-15e_semantickitti_20230309_160737-0d8ec25b.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/minkunet/minkunet_w16_8xb2-15e_semantickitti/minkunet_w16_8xb2-15e_semantickitti_20230309_160737.log) | +| MinkUNet-W20 | 15e | 3.7 | 61.6 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/minkunet/minkunet_w20_8xb2-15e_semantickitti/minkunet_w20_8xb2-15e_semantickitti_20230309_160718-c3b92e6e.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/minkunet/minkunet_w20_8xb2-15e_semantickitti/minkunet_w20_8xb2-15e_semantickitti_20230309_160718.log) | +| MinkUNet-W32 | 15e | 4.9 | 63.1 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/minkunet/minkunet_w32_8xb2-15e_semantickitti/minkunet_w32_8xb2-15e_semantickitti_20230309_160710-7fa0a6f1.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/minkunet/minkunet_w32_8xb2-15e_semantickitti/minkunet_w32_8xb2-15e_semantickitti_20230309_160710.log) | + +**Note:** We follow the implementation in SPVNAS original [repo](https://github.com/mit-han-lab/spvnas) and W16\\W20\\W32 indicates different number of channels. + +**Note:** Due to TorchSparse backend, the model performance is unstable with TorchSparse backend and may fluctuate by about 1.5 mIoU for different random seeds. + +## Citation + +```latex +@inproceedings{choy20194d, + title={4d spatio-temporal convnets: Minkowski convolutional neural networks}, + author={Choy, Christopher and Gwak, JunYoung and Savarese, Silvio}, + booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition}, + pages={3075--3084}, + year={2019} +} +``` diff --git a/configs/minkunet/metafile.yml b/configs/minkunet/metafile.yml new file mode 100755 index 0000000..60d4e63 --- /dev/null +++ b/configs/minkunet/metafile.yml @@ -0,0 +1,57 @@ +Collections: + - Name: MinkUNet + Metadata: + Training Techniques: + - AdamW + Architecture: + - MinkUNet + Paper: + URL: https://arxiv.org/abs/1904.08755 + Title: '4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural Networks' + README: configs/minkunet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/1.1/mmdet3d/models/segmentors/minkunet.py#L13 + Version: v1.1.0 + +Models: + - Name: minkunet_w16_8xb2-15e_semantickitti + In Collection: MinkUNet + Config: configs/minkunet/minkunet_w16_8xb2-15e_semantickitti.py + Metadata: + Training Data: SemanticKITTI + Training Memory (GB): 3.4 + Training Resources: 8x A100 GPUs + Results: + - Task: 3D Semantic Segmentation + Dataset: SemanticKITTI + Metrics: + mIoU: 60.3 + Weights: https://download.openmmlab.com/mmdetection3d/v1.1.0_models/minkunet/minkunet_w16_8xb2-15e_semantickitti/minkunet_w16_8xb2-15e_semantickitti_20230309_160737-0d8ec25b.pth + + - Name: minkunet_w20_8xb2-15e_semantickitti + In Collection: MinkUNet + Config: configs/minkunet/minkunet_w20_8xb2-15e_semantickitti.py + Metadata: + Training Data: SemanticKITTI + Training Memory (GB): 3.7 + Training Resources: 8x A100 GPUs + Results: + - Task: 3D Semantic Segmentation + Dataset: SemanticKITTI + Metrics: + mIoU: 61.6 + Weights: https://download.openmmlab.com/mmdetection3d/v1.1.0_models/minkunet/minkunet_w20_8xb2-15e_semantickitti/minkunet_w20_8xb2-15e_semantickitti_20230309_160718-c3b92e6e.pth + + - Name: minkunet_w32_8xb2-15e_semantickitti + In Collection: MinkUNet + Config: configs/minkunet/minkunet_w32_8xb2-15e_semantickitti.py + Metadata: + Training Data: SemanticKITTI + Training Memory (GB): 4.9 + Training Resources: 8x A100 GPUs + Results: + - Task: 3D Semantic Segmentation + Dataset: SemanticKITTI + Metrics: + mIoU: 63.1 + Weights: https://download.openmmlab.com/mmdetection3d/v1.1.0_models/minkunet/minkunet_w32_8xb2-15e_semantickitti/minkunet_w32_8xb2-15e_semantickitti_20230309_160710-7fa0a6f1.pth diff --git a/configs/minkunet/minkunet_w16_8xb2-15e_semantickitti.py b/configs/minkunet/minkunet_w16_8xb2-15e_semantickitti.py new file mode 100755 index 0000000..ac450bf --- /dev/null +++ b/configs/minkunet/minkunet_w16_8xb2-15e_semantickitti.py @@ -0,0 +1,13 @@ +_base_ = ['./minkunet_w32_8xb2-15e_semantickitti.py'] + +model = dict( + backbone=dict( + base_channels=16, + encoder_channels=[16, 32, 64, 128], + decoder_channels=[128, 64, 48, 48]), + decode_head=dict(channels=48)) + +# NOTE: Due to TorchSparse backend, the model performance is relatively +# dependent on random seeds, and if random seeds are not specified the +# model performance will be different (± 1.5 mIoU). +randomness = dict(seed=1588147245) diff --git a/configs/minkunet/minkunet_w20_8xb2-15e_semantickitti.py b/configs/minkunet/minkunet_w20_8xb2-15e_semantickitti.py new file mode 100755 index 0000000..34c501f --- /dev/null +++ b/configs/minkunet/minkunet_w20_8xb2-15e_semantickitti.py @@ -0,0 +1,8 @@ +_base_ = ['./minkunet_w32_8xb2-15e_semantickitti.py'] + +model = dict( + backbone=dict( + base_channels=20, + encoder_channels=[20, 40, 81, 163], + decoder_channels=[163, 81, 61, 61]), + decode_head=dict(channels=61)) diff --git a/configs/minkunet/minkunet_w32_8xb2-15e_semantickitti.py b/configs/minkunet/minkunet_w32_8xb2-15e_semantickitti.py new file mode 100755 index 0000000..80f5283 --- /dev/null +++ b/configs/minkunet/minkunet_w32_8xb2-15e_semantickitti.py @@ -0,0 +1,54 @@ +_base_ = [ + '../_base_/datasets/semantickitti.py', '../_base_/models/minkunet.py', + '../_base_/default_runtime.py' +] + +train_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_seg_3d=True, + seg_3d_dtype='np.int32', + seg_offset=2**16, + dataset_type='semantickitti'), + dict(type='PointSegClassMapping'), + dict( + type='GlobalRotScaleTrans', + rot_range=[0., 6.28318531], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + ), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] + +train_dataloader = dict( + sampler=dict(seed=0), dataset=dict(dataset=dict(pipeline=train_pipeline))) + +lr = 0.24 +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='SGD', lr=lr, weight_decay=0.0001, momentum=0.9, nesterov=True)) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.008, by_epoch=False, begin=0, end=125), + dict( + type='CosineAnnealingLR', + begin=0, + T_max=15, + by_epoch=True, + eta_min=1e-5, + convert_to_iter_based=True) +] + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=15, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=1)) +randomness = dict(seed=0, deterministic=False, diff_rank_seed=True) +env_cfg = dict(cudnn_benchmark=True) diff --git a/configs/monoflex/README.md b/configs/monoflex/README.md new file mode 100755 index 0000000..0f402be --- /dev/null +++ b/configs/monoflex/README.md @@ -0,0 +1,48 @@ +# Objects are Different: Flexible Monocular 3D Object Detection + +> [Objects are Different: Flexible Monocular 3D Object Detection](https://arxiv.org/abs/2104.02323) + + + +## Abstract + +The precise localization of 3D objects from a single image without depth information is a highly challenging problem. Most existing methods adopt the same approach for all objects regardless of their diverse distributions, leading to limited performance for truncated objects. In this paper, we propose a flexible framework for monocular 3D object detection which explicitly decouples the truncated objects and adaptively combines multiple approaches for object depth estimation. Specifically, we decouple the edge of the feature map for predicting long-tail truncated objects so that the optimization of normal objects is not influenced. Furthermore, we formulate the object depth estimation as an uncertainty-guided ensemble of directly regressed object depth and solved depths from different groups of keypoints. Experiments demonstrate that our method outperforms the state-of-the-art method by relatively 27% for the moderate level and 30% for the hard level in the test set of KITTI benchmark while maintaining real-time efficiency. + +
    + +
    + +## Introduction + +We implement MonoFlex and provide the results and checkpoints on KITTI dataset. + +## Results and models + +### KITTI + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | +| :---------------------------------------------------------------------: | :-----: | :------: | :------------: | :---: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [DLA34](./monoflex_dla34_pytorch_dlaneck_gn-all_2x4_6x_kitti-mono3d.py) | 6x | 9.64 | | 21.86 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/monoflex/monoflex_dla34_pytorch_dlaneck_gn-all_2x4_6x_kitti-mono3d_20211228_027553-d46d9bb0.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/monoflex/monoflex_dla34_pytorch_dlaneck_gn-all_2x4_6x_kitti-mono3d_20211228_027553.log.json) | + +Note: mAP represents Car moderate 3D strict AP11 results. +Detailed performance on KITTI 3D detection (3D/BEV) is as follows, evaluated by AP11 and AP40 metric: + +| | Easy | Moderate | Hard | +| ---------- | :-----------: | :-----------: | :-----------: | +| Car (AP11) | 28.02 / 36.11 | 21.86 / 29.46 | 19.01 / 24.83 | +| Car (AP40) | 23.22 / 32.74 | 17.18 / 24.02 | 15.13 / 20.67 | + +Note: mAP represents Car moderate 3D strict AP11 / AP40 results. Because of the limited data for pedestrians and cyclists, the detection performance for these two classes is usually unstable. Therefore, we only list car detection results here. In addition, the AP11 result may fluctuate in a larger range (~1 AP), so AP40 is a more recommended metric for reference due to its much better stability. + +## Citation + +```latex +@InProceedings{MonoFlex, + author = {Zhang, Yunpeng and Lu, Jiwen and Zhou, Jie}, + title = {Objects Are Different: Flexible Monocular 3D Object Detection}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2021}, + pages = {3289-3298} +} +``` diff --git a/configs/monoflex/metafile.yml b/configs/monoflex/metafile.yml new file mode 100755 index 0000000..36fe1f0 --- /dev/null +++ b/configs/monoflex/metafile.yml @@ -0,0 +1,30 @@ +Collections: + - Name: MonoFlex + Metadata: + Training Data: KITTI + Training Techniques: + - Adam + Training Resources: 2x V100 GPUS + Architecture: + - MonoFlexHead + - DLA + Paper: + URL: https://arxiv.org/abs/2104.02323 + Title: 'Objects are Different: Flexible Monocular 3D Object Detection' + README: configs/monoflex/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0.dev0/mmdet3d/models/detectors/monoflex.py#L7 + Version: v1.0.0 + +Models: + - Name: monoflex_dla34_pytorch_dlaneck_gn-all_2x4_6x_kitti-mono3d + In Collection: MonoFlex + Config: configs/monoflex/monoflex_dla34_pytorch_dlaneck_gn-all_2x4_6x_kitti-mono3d.py + Metadata: + Training Memory (GB): 9.64 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 21.86 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/monoflex/monoflex_dla34_pytorch_dlaneck_gn-all_2x4_6x_kitti-mono3d_20211228_027553-d46d9bb0.pth diff --git a/configs/mvxnet/README.md b/configs/mvxnet/README.md new file mode 100755 index 0000000..77a820d --- /dev/null +++ b/configs/mvxnet/README.md @@ -0,0 +1,38 @@ +# MVX-Net: Multimodal VoxelNet for 3D Object Detection + +> [MVX-Net: Multimodal VoxelNet for 3D Object Detection](https://arxiv.org/abs/1904.01649) + + + +## Abstract + +Many recent works on 3D object detection have focused on designing neural network architectures that can consume point cloud data. While these approaches demonstrate encouraging performance, they are typically based on a single modality and are unable to leverage information from other modalities, such as a camera. Although a few approaches fuse data from different modalities, these methods either use a complicated pipeline to process the modalities sequentially, or perform late-fusion and are unable to learn interaction between different modalities at early stages. In this work, we present PointFusion and VoxelFusion: two simple yet effective early-fusion approaches to combine the RGB and point cloud modalities, by leveraging the recently introduced VoxelNet architecture. Evaluation on the KITTI dataset demonstrates significant improvements in performance over approaches which only use point cloud data. Furthermore, the proposed method provides results competitive with the state-of-the-art multimodal algorithms, achieving top-2 ranking in five of the six bird's eye view and 3D detection categories on the KITTI benchmark, by using a simple single stage network. + +
    + +
    + +## Introduction + +We implement MVX-Net and provide its results and models on KITTI dataset. + +## Results and models + +### KITTI + +| Backbone | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | +| :-----------------------------------------------------------------: | :-----: | :--------: | :------: | :------------: | :---: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](./mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py) | 3 Class | cosine 80e | 6.7 | | 63.22 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class_20210831_060805-83442923.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class_20210831_060805.log.json) | + +## Citation + +```latex +@inproceedings{sindagi2019mvx, + title={MVX-Net: Multimodal voxelnet for 3D object detection}, + author={Sindagi, Vishwanath A and Zhou, Yin and Tuzel, Oncel}, + booktitle={2019 International Conference on Robotics and Automation (ICRA)}, + pages={7276--7282}, + year={2019}, + organization={IEEE} +} +``` diff --git a/configs/mvxnet/metafile.yml b/configs/mvxnet/metafile.yml new file mode 100755 index 0000000..6eb341a --- /dev/null +++ b/configs/mvxnet/metafile.yml @@ -0,0 +1,31 @@ +Collections: + - Name: MVX-Net + Metadata: + Training Data: KITTI + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Feature Pyramid Network + - Dynamic Voxelization + Paper: + URL: https://arxiv.org/abs/1904.01649 + Title: 'MVX-Net: Multimodal VoxelNet for 3D Object Detection' + README: configs/mvxnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/mvx_two_stage.py#L20 + Version: v0.5.0 + +Models: + - Name: dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class + Alias: mvxnet_kitti-3class + In Collection: MVX-Net + Config: configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py + Metadata: + Training Memory (GB): 6.7 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 63.22 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class_20210831_060805-83442923.pth diff --git a/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py b/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py new file mode 100755 index 0000000..feceb17 --- /dev/null +++ b/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py @@ -0,0 +1,271 @@ +_base_ = ['../_base_/schedules/cosine.py', '../_base_/default_runtime.py'] + +# model settings +voxel_size = [0.05, 0.05, 0.1] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] + +model = dict( + type='DynamicMVXFasterRCNN', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_type='dynamic', + voxel_layer=dict( + max_num_points=-1, + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(-1, -1)), + mean=[102.9801, 115.9465, 122.7717], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + img_backbone=dict( + type='mmdet.ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe'), + img_neck=dict( + type='mmdet.FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + pts_voxel_encoder=dict( + type='DynamicVFE', + in_channels=4, + feat_channels=[64, 64], + with_distance=False, + voxel_size=voxel_size, + with_cluster_center=True, + with_voxel_center=True, + point_cloud_range=point_cloud_range, + fusion_layer=dict( + type='PointFusion', + img_channels=256, + pts_channels=64, + mid_channels=128, + out_channels=128, + img_levels=[0, 1, 2, 3, 4], + align_corners=False, + activate_out=True, + fuse_out=False)), + pts_middle_encoder=dict( + type='SparseEncoder', + in_channels=128, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act')), + pts_backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + pts_neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + pts_bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + assigner_per_size=True, + diff_rad_by_sin=True, + assign_per_class=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + pts=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False)), + test_cfg=dict( + pts=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50))) + +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +metainfo = dict(classes=class_names) +input_modality = dict(use_lidar=True, use_camera=True) +backend_args = None +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='RandomResize', scale=[(640, 192), (2560, 768)], keep_ratio=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05], + translation_std=[0.2, 0.2, 0.2]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'img', 'gt_bboxes_3d', 'gt_labels_3d', 'gt_bboxes', + 'gt_labels' + ]) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1280, 384), + pts_scale_ratio=1, + flip=False, + transforms=[ + # Temporary solution, fix this after refactor the augtest + dict(type='Resize', scale=0, keep_ratio=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + ]), + dict(type='Pack3DDetInputs', keys=['points', 'img']) +] +modality = dict(use_lidar=True, use_camera=True) +train_dataloader = dict( + batch_size=2, + num_workers=2, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + modality=modality, + ann_file='kitti_infos_train.pkl', + data_prefix=dict( + pts='training/velodyne_reduced', img='training/image_2'), + pipeline=train_pipeline, + filter_empty_gt=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + backend_args=backend_args))) + +val_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + modality=modality, + ann_file='kitti_infos_val.pkl', + data_prefix=dict( + pts='training/velodyne_reduced', img='training/image_2'), + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='LiDAR', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_val.pkl', + modality=modality, + data_prefix=dict( + pts='training/velodyne_reduced', img='training/image_2'), + pipeline=test_pipeline, + metainfo=metainfo, + test_mode=True, + box_type_3d='LiDAR', + backend_args=backend_args)) + +optim_wrapper = dict( + optimizer=dict(weight_decay=0.01), + clip_grad=dict(max_norm=35, norm_type=2), +) +val_evaluator = dict( + type='KittiMetric', ann_file='data/kitti/kitti_infos_val.pkl') +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# You may need to download the model first is the network is unstable +load_from = 'https://download.openmmlab.com/mmdetection3d/pretrain_models/mvx_faster_rcnn_detectron2-caffe_20e_coco-pretrain_gt-sample_kitti-3-class_moderate-79.3_20200207-a4a6a3c7.pth' # noqa diff --git a/configs/nuimages/README.md b/configs/nuimages/README.md new file mode 100755 index 0000000..38612f1 --- /dev/null +++ b/configs/nuimages/README.md @@ -0,0 +1,69 @@ +# Mask R-CNN + +> [Mask R-CNN](https://arxiv.org/abs/1703.06870) + + + +## Abstract + +We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. + +
    + +
    + +## Introduction + +We support and provide some baseline results on [nuImages dataset](https://www.nuscenes.org/nuimages). +We follow the class mapping in nuScenes dataset, which maps the original categories into 10 foreground categories. +The convert script can be found [here](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/dataset_converters/nuimage_converter.py). +The baseline results include instance segmentation models, e.g., Mask R-CNN, Cascade Mask R-CNN, and HTC. +We will support panoptic segmentation models in the future. + +![demo image](../../resources/nuimages_demo.gif) + +The dataset converted by the script of v0.6.0 only supports instance segmentation. Since v0.7.0, we also support to produce semantic segmentation mask of each image; thus, we can train HTC or semantic segmentation models using the dataset. To convert the nuImages dataset into COCO format, please use the command below: + +```shell +python -u tools/dataset_converters/nuimage_converter.py --data-root ${DATA_ROOT} --version ${VERSIONS} \ + --out-dir ${OUT_DIR} --nproc ${NUM_WORKERS} --extra-tag ${TAG} +``` + +- `--data-root`: the root of the dataset, defaults to `./data/nuimages`. +- `--version`: the version of the dataset, defaults to `v1.0-mini`. To get the full dataset, please use `--version v1.0-train v1.0-val v1.0-mini` +- `--out-dir`: the output directory of annotations and semantic masks, defaults to `./data/nuimages/annotations/`. +- `--nproc`: number of workers for data preparation, defaults to `4`. Larger number could reduce the preparation time as images are processed in parallel. +- `--extra-tag`: extra tag of the annotations, defaults to `nuimages`. This can be used to separate different annotations processed in different time for study. + +## Results and models + +### Instance Segmentation + +We report Mask R-CNN and Cascade Mask R-CNN results on nuimages. + +| Method | Backbone | Pretraining | Lr schd | Mem (GB) | Box AP | Mask AP | Download | +| :----------------: | :-----------------------------------------------------------------------------------: | :---------: | :-----: | :------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Mask R-CNN | [R-50](./mask_rcnn_r50_fpn_1x_nuim.py) | IN | 1x | 7.4 | 47.8 | 38.4 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_fpn_1x_nuim/mask_rcnn_r50_fpn_1x_nuim_20201008_195238-e99f5182.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_fpn_1x_nuim/mask_rcnn_r50_fpn_1x_nuim_20201008_195238.log.json) | +| Mask R-CNN | [R-50](./mask_rcnn_r50_fpn_coco-2x_1x_nuim.py) | IN+COCO-2x | 1x | 7.4 | 49.7 | 40.5 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_fpn_coco-2x_1x_nuim/mask_rcnn_r50_fpn_coco-2x_1x_nuim_20201008_195238-b1742a60.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_fpn_coco-2x_1x_nuim/mask_rcnn_r50_fpn_coco-2x_1x_nuim_20201008_195238.log.json) | +| Mask R-CNN | [R-50-CAFFE](./mask_rcnn_r50_caffe_fpn_1x_nuim.py) | IN | 1x | 7.0 | 47.7 | 38.2 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_caffe_fpn_1x_nuim/) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_caffe_fpn_1x_nuim/) | +| Mask R-CNN | [R-50-CAFFE](./mask_rcnn_r50_caffe_fpn_coco-3x_1x_nuim.py) | IN+COCO-3x | 1x | 7.0 | 49.9 | 40.8 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_caffe_fpn_coco-3x_1x_nuim/mask_rcnn_r50_caffe_fpn_coco-3x_1x_nuim_20201008_195305-661a992e.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_caffe_fpn_coco-3x_1x_nuim/mask_rcnn_r50_caffe_fpn_coco-3x_1x_nuim_20201008_195305.log.json) | +| Mask R-CNN | [R-50-CAFFE](./mask_rcnn_r50_caffe_fpn_coco-3x_20e_nuim.py) | IN+COCO-3x | 20e | 7.0 | 50.6 | 41.3 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_caffe_fpn_coco-3x_20e_nuim/mask_rcnn_r50_caffe_fpn_coco-3x_20e_nuim_20201009_125002-5529442c.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_caffe_fpn_coco-3x_20e_nuim/mask_rcnn_r50_caffe_fpn_coco-3x_20e_nuim_20201009_125002.log.json) | +| Mask R-CNN | [R-101](./mask-rcnn_r101_fpn_1x_nuim.py) | IN | 1x | 10.9 | 48.9 | 39.1 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r101_fpn_1x_nuim/mask_rcnn_r101_fpn_1x_nuim_20201024_134803-65c7623a.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r101_fpn_1x_nuim/mask_rcnn_r101_fpn_1x_nuim_20201024_134803.log.json) | +| Mask R-CNN | [X-101_32x4d](./mask-rcnn_x101_32x4d_fpn_1x_nuim.py) | IN | 1x | 13.3 | 50.4 | 40.5 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_x101_32x4d_fpn_1x_nuim/mask_rcnn_x101_32x4d_fpn_1x_nuim_20201024_135741-b699ab37.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_x101_32x4d_fpn_1x_nuim/mask_rcnn_x101_32x4d_fpn_1x_nuim_20201024_135741.log.json) | +| Cascade Mask R-CNN | [R-50](./cascade_mask_rcnn_r50_fpn_1x_nuim.py) | IN | 1x | 8.9 | 50.8 | 40.4 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_1x_nuim/cascade_mask_rcnn_r50_fpn_1x_nuim_20201008_195342-1147c036.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_1x_nuim/cascade_mask_rcnn_r50_fpn_1x_nuim_20201008_195342.log.json) | +| Cascade Mask R-CNN | [R-50](./cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim.py) | IN+COCO-20e | 1x | 8.9 | 52.8 | 42.2 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim_20201009_124158-ad0540e3.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim_20201009_124158.log.json) | +| Cascade Mask R-CNN | [R-50](./cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim.py) | IN+COCO-20e | 20e | 8.9 | 52.8 | 42.2 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951.log.json) | +| Cascade Mask R-CNN | [R-101](./cascade_mask_rcnn_r101_fpn_1x_nuim.py) | IN | 1x | 12.5 | 51.5 | 40.7 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r101_fpn_1x_nuim/cascade_mask_rcnn_r101_fpn_1x_nuim_20201024_134804-45215b1e.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r101_fpn_1x_nuim/cascade_mask_rcnn_r101_fpn_1x_nuim_20201024_134804.log.json) | +| Cascade Mask R-CNN | [X-101_32x4d](./cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim.py) | IN | 1x | 14.9 | 52.8 | 41.6 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim/cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim_20201024_135753-e0e49778.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim/cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim_20201024_135753.log.json) | +| HTC w/o semantic | [R-50](./htc_without_semantic_r50_fpn_1x_nuim.py) | IN | 1x | | [model](<>) \| [log](<>) | | | +| HTC | [R-50](./htc_r50_fpn_1x_nuim.py) | IN | 1x | | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/) | | | +| HTC | [R-50](./htc_r50_fpn_coco-20e_1x_nuim.py) | IN+COCO-20e | 1x | 11.6 | 53.8 | 43.8 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/htc_r50_fpn_coco-20e_1x_nuim/htc_r50_fpn_coco-20e_1x_nuim_20201010_070203-0b53a65e.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/htc_r50_fpn_coco-20e_1x_nuim/htc_r50_fpn_coco-20e_1x_nuim_20201010_070203.log.json) | +| HTC | [R-50](./htc_r50_fpn_coco-20e_20e_nuim.py) | IN+COCO-20e | 20e | 11.6 | 54.8 | 44.4 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/htc_r50_fpn_coco-20e_20e_nuim/htc_r50_fpn_coco-20e_20e_nuim_20201008_211415-d6c60a2c.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/htc_r50_fpn_coco-20e_20e_nuim/htc_r50_fpn_coco-20e_20e_nuim_20201008_211415.log.json) | +| HTC | [X-101_64x4d + DCN_c3-c5](./htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim.py) | IN+COCO-20e | 20e | 13.3 | 57.3 | 46.4 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim_20201008_211222-0b16ac4b.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim_20201008_211222.log.json) | + +**Note**: + +1. `IN` means only using ImageNet pre-trained backbone. `IN+COCO-Nx` and `IN+COCO-Ne` means the backbone is first pre-trained on ImageNet, and then the detector is pre-trained on COCO train2017 dataset by `Nx` and `N` epochs schedules, respectively. +2. All the training hyper-parameters follow the standard schedules on COCO dataset except that the images are resized from + 1280 x 720 to 1920 x 1080 (relative ratio 0.8 to 1.2) since the images are in size 1600 x 900. +3. The class order in the detectors released in v0.6.0 is different from the order in the configs because the bug in the conversion script. This bug has been fixed since v0.7.0 and the models trained by the correct class order are also released. If you used nuImages since v0.6.0, please re-convert the data through the conversion script using the above-mentioned command. diff --git a/configs/nuimages/cascade-mask-rcnn-r50-fpn_coco-20e_nuim.py b/configs/nuimages/cascade-mask-rcnn-r50-fpn_coco-20e_nuim.py new file mode 100755 index 0000000..5d69466 --- /dev/null +++ b/configs/nuimages/cascade-mask-rcnn-r50-fpn_coco-20e_nuim.py @@ -0,0 +1,7 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_1x_nuim.py' + +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(max_epochs=20) + +load_from = 'http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth' # noqa diff --git a/configs/nuimages/cascade-mask-rcnn_r101_fpn_1x_nuim.py b/configs/nuimages/cascade-mask-rcnn_r101_fpn_1x_nuim.py new file mode 100755 index 0000000..28a54f7 --- /dev/null +++ b/configs/nuimages/cascade-mask-rcnn_r101_fpn_1x_nuim.py @@ -0,0 +1,2 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_1x_nuim.py' +model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/configs/nuimages/cascade-mask-rcnn_r50_fpn_1x_nuim.py b/configs/nuimages/cascade-mask-rcnn_r50_fpn_1x_nuim.py new file mode 100755 index 0000000..c6ce25e --- /dev/null +++ b/configs/nuimages/cascade-mask-rcnn_r50_fpn_1x_nuim.py @@ -0,0 +1,60 @@ +_base_ = [ + '../_base_/models/cascade_mask_rcnn_r50_fpn.py', + '../_base_/datasets/nuim_instance.py', + '../_base_/schedules/mmdet_schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=10, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=10, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=10, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_head=dict(num_classes=10))) diff --git a/configs/nuimages/cascade-mask-rcnn_r50_fpn_coco-20e-1x_nuim.py b/configs/nuimages/cascade-mask-rcnn_r50_fpn_coco-20e-1x_nuim.py new file mode 100755 index 0000000..bf3ffed --- /dev/null +++ b/configs/nuimages/cascade-mask-rcnn_r50_fpn_coco-20e-1x_nuim.py @@ -0,0 +1,3 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_1x_nuim.py' + +load_from = 'http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth' # noqa diff --git a/configs/nuimages/cascade-mask-rcnn_x101_32x4d_fpn_1x_nuim.py b/configs/nuimages/cascade-mask-rcnn_x101_32x4d_fpn_1x_nuim.py new file mode 100755 index 0000000..19f35ae --- /dev/null +++ b/configs/nuimages/cascade-mask-rcnn_x101_32x4d_fpn_1x_nuim.py @@ -0,0 +1,13 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_1x_nuim.py' +model = dict( + pretrained='open-mmlab://resnext101_32x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch')) diff --git a/configs/nuimages/htc_r50_fpn_1x_nuim.py b/configs/nuimages/htc_r50_fpn_1x_nuim.py new file mode 100755 index 0000000..4b1bcc2 --- /dev/null +++ b/configs/nuimages/htc_r50_fpn_1x_nuim.py @@ -0,0 +1,38 @@ +_base_ = './htc_without_semantic_r50_fpn_1x_nuim.py' +model = dict( + roi_head=dict( + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='FusedSemanticHead', + num_ins=5, + fusion_level=1, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=32, + ignore_label=0, + loss_weight=0.2))) + +data_root = 'data/nuimages/' +backend_args = None +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), + dict( + type='Resize', + img_scale=[(1280, 720), (1920, 1080)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='SegRescale', scale_factor=1 / 8), + dict(type='PackDetInputs') +] +data = dict( + train=dict( + seg_prefix=data_root + 'annotations/semantic_masks/', + pipeline=train_pipeline)) diff --git a/configs/nuimages/htc_r50_fpn_coco-20e-1x_nuim.py b/configs/nuimages/htc_r50_fpn_coco-20e-1x_nuim.py new file mode 100755 index 0000000..e5f6052 --- /dev/null +++ b/configs/nuimages/htc_r50_fpn_coco-20e-1x_nuim.py @@ -0,0 +1,3 @@ +_base_ = './htc_r50_fpn_1x_nuim.py' + +load_from = 'http://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth' # noqa diff --git a/configs/nuimages/htc_r50_fpn_coco-20e_nuim.py b/configs/nuimages/htc_r50_fpn_coco-20e_nuim.py new file mode 100755 index 0000000..e08d939 --- /dev/null +++ b/configs/nuimages/htc_r50_fpn_coco-20e_nuim.py @@ -0,0 +1,4 @@ +_base_ = './htc_r50_fpn_coco-20e-1x_nuim.py' +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(max_epochs=20) diff --git a/configs/nuimages/htc_r50_fpn_head-without-semantic_1x_nuim.py b/configs/nuimages/htc_r50_fpn_head-without-semantic_1x_nuim.py new file mode 100755 index 0000000..7a9ba88 --- /dev/null +++ b/configs/nuimages/htc_r50_fpn_head-without-semantic_1x_nuim.py @@ -0,0 +1,222 @@ +_base_ = [ + '../_base_/datasets/nuim-instance.py', + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='HybridTaskCascade', + pretrained='torchvision://resnet50', + _scope_='mmdet', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='HybridTaskCascadeRoIHead', + interleaved=True, + mask_info_flow=True, + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=10, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=10, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=10, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=[ + dict( + type='HTCMaskHead', + with_conv_res=False, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=10, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), + dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=10, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), + dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=10, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.001, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/configs/nuimages/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e-1xb16_nuim.py b/configs/nuimages/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e-1xb16_nuim.py new file mode 100755 index 0000000..4ab095a --- /dev/null +++ b/configs/nuimages/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e-1xb16_nuim.py @@ -0,0 +1,23 @@ +_base_ = './htc_r50_fpn_1x_nuim.py' +model = dict( + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) + +data = dict(samples_per_gpu=1, workers_per_gpu=1) +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(max_epochs=20) + +load_from = 'http://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth' # noqa diff --git a/configs/nuimages/mask-rcnn_r101_fpn_1x_nuim.py b/configs/nuimages/mask-rcnn_r101_fpn_1x_nuim.py new file mode 100755 index 0000000..6245194 --- /dev/null +++ b/configs/nuimages/mask-rcnn_r101_fpn_1x_nuim.py @@ -0,0 +1,2 @@ +_base_ = './mask_rcnn_r50_fpn_1x_nuim.py' +model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/configs/nuimages/mask-rcnn_r50_caffe_fpn_1x_nuim.py b/configs/nuimages/mask-rcnn_r50_caffe_fpn_1x_nuim.py new file mode 100755 index 0000000..9d67f14 --- /dev/null +++ b/configs/nuimages/mask-rcnn_r50_caffe_fpn_1x_nuim.py @@ -0,0 +1,41 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/nuim-instance.py', + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +model = dict( + pretrained='open-mmlab://detectron2/resnet50_caffe', + backbone=dict(norm_cfg=dict(requires_grad=False), style='caffe'), + roi_head=dict( + bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10))) +backend_args = None +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1280, 720), (1920, 1080)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PackDetInputs'), +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1600, 900), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/nuimages/mask-rcnn_r50_caffe_fpn_coco-3x_1x_nuim.py b/configs/nuimages/mask-rcnn_r50_caffe_fpn_coco-3x_1x_nuim.py new file mode 100755 index 0000000..1be657c --- /dev/null +++ b/configs/nuimages/mask-rcnn_r50_caffe_fpn_coco-3x_1x_nuim.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/nuim-instance.py', + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +model = dict( + pretrained='open-mmlab://detectron2/resnet50_caffe', + backbone=dict(norm_cfg=dict(requires_grad=False), style='caffe'), + roi_head=dict( + bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10))) +backend_args = None +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1280, 720), (1920, 1080)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PackDetInputs'), +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1600, 900), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' # noqa diff --git a/configs/nuimages/mask-rcnn_r50_caffe_fpn_coco-3x_20e_nuim.py b/configs/nuimages/mask-rcnn_r50_caffe_fpn_coco-3x_20e_nuim.py new file mode 100755 index 0000000..c41d698 --- /dev/null +++ b/configs/nuimages/mask-rcnn_r50_caffe_fpn_coco-3x_20e_nuim.py @@ -0,0 +1,47 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/nuim-instance.py', + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +model = dict( + pretrained='open-mmlab://detectron2/resnet50_caffe', + backbone=dict(norm_cfg=dict(requires_grad=False), style='caffe'), + roi_head=dict( + bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10))) +backend_args = None +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1280, 720), (1920, 1080)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PackDetInputs'), +] +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1600, 900), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(max_epochs=20) + +load_from = 'http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' # noqa diff --git a/configs/nuimages/mask-rcnn_r50_fpn_1x_nuim.py b/configs/nuimages/mask-rcnn_r50_fpn_1x_nuim.py new file mode 100755 index 0000000..1fc8925 --- /dev/null +++ b/configs/nuimages/mask-rcnn_r50_fpn_1x_nuim.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/nuim-instance.py', + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10))) diff --git a/configs/nuimages/mask-rcnn_r50_fpn_coco-2x_1x_nuim.py b/configs/nuimages/mask-rcnn_r50_fpn_coco-2x_1x_nuim.py new file mode 100755 index 0000000..701101e --- /dev/null +++ b/configs/nuimages/mask-rcnn_r50_fpn_coco-2x_1x_nuim.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/mask-rcnn_r50_fpn.py', + '../_base_/datasets/nuim-instance.py', + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10))) +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth' # noqa diff --git a/configs/nuimages/mask-rcnn_r50_fpn_coco-2x_1x_nus-2d.py b/configs/nuimages/mask-rcnn_r50_fpn_coco-2x_1x_nus-2d.py new file mode 100755 index 0000000..c2279e4 --- /dev/null +++ b/configs/nuimages/mask-rcnn_r50_fpn_coco-2x_1x_nus-2d.py @@ -0,0 +1,32 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/nuim_instance.py', + '../_base_/schedules/mmdet_schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10))) + +backend_args = None + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1600, 900), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + ]), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')), +] +data_root = 'data/nuimages/' +# data = dict( +# val=dict( +# ann_file=data_root + 'annotations/nuimages_v1.0-mini.json'), +# test=dict( +# ann_file=data_root + 'annotations/nuimages_v1.0-mini.json')) diff --git a/configs/nuimages/mask-rcnn_x101_32x4d_fpn_1x_nuim.py b/configs/nuimages/mask-rcnn_x101_32x4d_fpn_1x_nuim.py new file mode 100755 index 0000000..eb3e81b --- /dev/null +++ b/configs/nuimages/mask-rcnn_x101_32x4d_fpn_1x_nuim.py @@ -0,0 +1,13 @@ +_base_ = './mask_rcnn_r50_fpn_1x_nuim.py' +model = dict( + pretrained='open-mmlab://resnext101_32x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch')) diff --git a/configs/nuimages/metafile.yml b/configs/nuimages/metafile.yml new file mode 100755 index 0000000..49ae8d7 --- /dev/null +++ b/configs/nuimages/metafile.yml @@ -0,0 +1,279 @@ +Collections: + - Name: Mask R-CNN + Metadata: + Training Data: nuImages + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x TITAN Xp + Architecture: + - Softmax + - RPN + - Convolution + - Dense Connections + - FPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/1703.06870v3 + Title: "Mask R-CNN" + README: configs/nuimages/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: mask_rcnn_r50_fpn_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/mask_rcnn_r50_fpn_1x_nuim.py + Metadata: + Training Memory (GB): 7.4 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 47.8 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 38.4 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_fpn_1x_nuim/mask_rcnn_r50_fpn_1x_nuim_20201008_195238-e99f5182.pth + + - Name: mask_rcnn_r50_fpn_coco-2x_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/mask_rcnn_r50_fpn_coco-2x_1x_nuim.py + Metadata: + Training Memory (GB): 7.4 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 49.7 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_fpn_coco-2x_1x_nuim/mask_rcnn_r50_fpn_coco-2x_1x_nuim_20201008_195238-b1742a60.pth + + - Name: mask_rcnn_r50_caffe_fpn_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/mask_rcnn_r50_caffe_fpn_1x_nuim.py + Metadata: + Training Memory (GB): 7.0 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 47.7 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 38.2 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_caffe_fpn_1x_nuim/ + + - Name: mask_rcnn_r50_caffe_fpn_coco-3x_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/mask_rcnn_r50_caffe_fpn_coco-3x_1x_nuim.py + Metadata: + Training Memory (GB): 7.0 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 49.9 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_caffe_fpn_coco-3x_1x_nuim/mask_rcnn_r50_caffe_fpn_coco-3x_1x_nuim_20201008_195305-661a992e.pth + + - Name: mask_rcnn_r50_caffe_fpn_coco-3x_20e_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/mask_rcnn_r50_caffe_fpn_coco-3x_20e_nuim.py + Metadata: + Training Memory (GB): 7.0 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 50.6 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 41.3 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r50_caffe_fpn_coco-3x_20e_nuim/mask_rcnn_r50_caffe_fpn_coco-3x_20e_nuim_20201009_125002-5529442c.pth + + - Name: mask_rcnn_r101_fpn_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/mask-rcnn_r101_fpn_1x_nuim.py + Metadata: + Training Memory (GB): 10.9 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 48.9 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_r101_fpn_1x_nuim/mask_rcnn_r101_fpn_1x_nuim_20201024_134803-65c7623a.pth + + - Name: mask_rcnn_x101_32x4d_fpn_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/mask-rcnn_x101_32x4d_fpn_1x_nuim.py + Metadata: + Training Memory (GB): 13.3 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 50.4 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/mask_rcnn_x101_32x4d_fpn_1x_nuim/mask_rcnn_x101_32x4d_fpn_1x_nuim_20201024_135741-b699ab37.pth + + - Name: cascade_mask_rcnn_r50_fpn_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/cascade_mask_rcnn_r50_fpn_1x_nuim.py + Metadata: + Training Memory (GB): 8.9 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 50.8 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_1x_nuim/cascade_mask_rcnn_r50_fpn_1x_nuim_20201008_195342-1147c036.pth + + - Name: cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim.py + Metadata: + Training Memory (GB): 8.9 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 52.8 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 42.2 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim_20201009_124158-ad0540e3.pth + + - Name: cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim.py + Metadata: + Training Memory (GB): 8.9 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 52.8 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 42.2 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth + + - Name: cascade_mask_rcnn_r101_fpn_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/cascade_mask_rcnn_r101_fpn_1x_nuim.py + Metadata: + Training Memory (GB): 12.5 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 51.5 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r101_fpn_1x_nuim/cascade_mask_rcnn_r101_fpn_1x_nuim_20201024_134804-45215b1e.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim.py + Metadata: + Training Memory (GB): 14.9 + Training Resources: 8x TITAN Xp + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 52.8 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim/cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim_20201024_135753-e0e49778.pth + + - Name: htc_r50_fpn_coco-20e_1x_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/htc_r50_fpn_coco-20e_1x_nuim.py + Metadata: + Training Memory (GB): 11.6 + Training Resources: 8x V100 GPUs + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 53.8 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 43.8 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/htc_r50_fpn_coco-20e_1x_nuim/htc_r50_fpn_coco-20e_1x_nuim_20201010_070203-0b53a65e.pth + + - Name: htc_r50_fpn_coco-20e_20e_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/htc_r50_fpn_coco-20e_20e_nuim.py + Metadata: + Training Memory (GB): 11.6 + Training Resources: 8x V100 GPUs + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 54.8 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 44.4 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/htc_r50_fpn_coco-20e_20e_nuim/htc_r50_fpn_coco-20e_20e_nuim_20201008_211415-d6c60a2c.pth + + - Name: htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim + In Collection: Mask R-CNN + Config: configs/nuimages/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim.py + Metadata: + Training Memory (GB): 13.3 + Training Resources: 8x V100 GPUs + Results: + - Task: Object Detection + Dataset: nuImages + Metrics: + Box AP: 57.3 + - Task: Instance Segmentation + Dataset: nuImages + Metrics: + Mask AP: 46.4 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim_20201008_211222-0b16ac4b.pth diff --git a/configs/paconv/README.md b/configs/paconv/README.md new file mode 100755 index 0000000..96063a9 --- /dev/null +++ b/configs/paconv/README.md @@ -0,0 +1,51 @@ +# PAConv: Position Adaptive Convolution with Dynamic Kernel Assembling on Point Clouds + +> [PAConv: Position Adaptive Convolution with Dynamic Kernel Assembling on Point Clouds](https://arxiv.org/abs/2103.14635) + + + +## Abstract + +We introduce Position Adaptive Convolution (PAConv), a generic convolution operation for 3D point cloud processing. The key of PAConv is to construct the convolution kernel by dynamically assembling basic weight matrices stored in Weight Bank, where the coefficients of these weight matrices are self-adaptively learned from point positions through ScoreNet. In this way, the kernel is built in a data-driven manner, endowing PAConv with more flexibility than 2D convolutions to better handle the irregular and unordered point cloud data. Besides, the complexity of the learning process is reduced by combining weight matrices instead of brutally predicting kernels from point positions. +Furthermore, different from the existing point convolution operators whose network architectures are often heavily engineered, we integrate our PAConv into classical MLP-based point cloud pipelines without changing network configurations. Even built on simple networks, our method still approaches or even surpasses the state-of-the-art models, and significantly improves baseline performance on both classification and segmentation tasks, yet with decent efficiency. Thorough ablation studies and visualizations are provided to understand PAConv. + +
    + +
    + +## Introduction + +We implement PAConv and provide the result and checkpoints on S3DIS dataset. + +**Notice**: The original PAConv paper used step learning rate schedule. We discovered that cosine schedule achieves slightly better results and adopt it in our implementations. + +## Results and models + +### S3DIS + +| Method | Split | Lr schd | Mem (GB) | Inf time (fps) | mIoU (Val set) | Download | +| :---------------------------------------------------------------: | :----: | :---------: | :------: | :------------: | :------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [PAConv (SSG)](./paconv_ssg_8xb8-cosine-150e_s3dis-seg.py) | Area_5 | cosine 150e | 5.8 | | 66.65 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/paconv/paconv_ssg_8x8_cosine_150e_s3dis_seg-3d-13class/paconv_ssg_8x8_cosine_150e_s3dis_seg-3d-13class_20210729_200615-2147b2d1.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/paconv/paconv_ssg_8x8_cosine_150e_s3dis_seg-3d-13class/paconv_ssg_8x8_cosine_150e_s3dis_seg-3d-13class_20210729_200615.log.json) | +| [PAConv\* (SSG)](./paconv_ssg-cuda_8xb8-cosine-200e_s3dis-seg.py) | Area_5 | cosine 200e | 3.8 | | 65.33 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/paconv/paconv_cuda_ssg_8x8_cosine_200e_s3dis_seg-3d-13class/paconv_cuda_ssg_8x8_cosine_200e_s3dis_seg-3d-13class_20210802_171802-e5ea9bb9.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/paconv/paconv_cuda_ssg_8x8_cosine_200e_s3dis_seg-3d-13class/paconv_cuda_ssg_8x8_cosine_200e_s3dis_seg-3d-13class_20210802_171802.log.json) | + +**Notes:** + +- We use XYZ+Color+Normalized_XYZ as input in all the experiments on S3DIS datasets. +- `Area_5` Split means training the model on Area_1, 2, 3, 4, 6 and testing on Area_5. +- PAConv\* stands for the CUDA implementation of PAConv operations. See the [paper](https://arxiv.org/pdf/2103.14635.pdf) appendix section D for more details. In our experiments, the training of PAConv\* is found to be very unstable. We achieved slightly lower mIoU than the result in the paper, but is consistent with the result obtained by running their [official code](https://github.com/CVMI-Lab/PAConv/tree/main/scene_seg). Besides, although the GPU memory consumption of PAConv\* is significantly lower than PAConv, its training and inference speed are actually slower (by ~10%). + +## Indeterminism + +Since PAConv testing adopts sliding patch inference which involves random point sampling, and the test script uses fixed random seeds while the random seeds of validation in training are not fixed, the test results may be slightly different from the results reported above. + +## Citation + +```latex +@inproceedings{xu2021paconv, + title={PAConv: Position Adaptive Convolution with Dynamic Kernel Assembling on Point Clouds}, + author={Xu, Mutian and Ding, Runyu and Zhao, Hengshuang and Qi, Xiaojuan}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={3173--3182}, + year={2021} +} +``` diff --git a/configs/paconv/metafile.yml b/configs/paconv/metafile.yml new file mode 100755 index 0000000..558ab86 --- /dev/null +++ b/configs/paconv/metafile.yml @@ -0,0 +1,42 @@ +Collections: + - Name: PAConv + Metadata: + Training Techniques: + - SGD + Training Resources: 8x Titan XP GPUs + Architecture: + - PAConv + Paper: + URL: https://arxiv.org/abs/2103.14635 + Title: 'PAConv: Position Adaptive Convolution with Dynamic Kernel Assembling on Point Clouds' + README: configs/paconv/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/paconv/paconv.py#L106 + Version: v0.16.0 + +Models: + - Name: paconv_ssg_8xb8-cosine-150e_s3dis-seg.py + In Collection: PAConv + Config: configs/paconv/paconv_ssg_8xb8-cosine-150e_s3dis-seg.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 5.8 + Results: + - Task: 3D Semantic Segmentation + Dataset: S3DIS + Metrics: + mIoU: 66.65 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/paconv/paconv_ssg_8x8_cosine_150e_s3dis_seg-3d-13class/paconv_ssg_8x8_cosine_150e_s3dis_seg-3d-13class_20210729_200615-2147b2d1.pth + + - Name: paconv_ssg-cuda_8xb8-cosine-200e_s3dis-seg + In Collection: PAConv + Config: configs/paconv/paconv_ssg-cuda_8xb8-cosine-200e_s3dis-seg.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 5.8 + Results: + - Task: 3D Semantic Segmentation + Dataset: S3DIS + Metrics: + mIoU: 66.65 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/paconv/paconv_cuda_ssg_8x8_cosine_200e_s3dis_seg-3d-13class/paconv_cuda_ssg_8x8_cosine_200e_s3dis_seg-3d-13class_20210802_171802-e5ea9bb9.pth diff --git a/configs/paconv/paconv_ssg-cuda_8xb8-cosine-200e_s3dis-seg.py b/configs/paconv/paconv_ssg-cuda_8xb8-cosine-200e_s3dis-seg.py new file mode 100755 index 0000000..e9b8cdc --- /dev/null +++ b/configs/paconv/paconv_ssg-cuda_8xb8-cosine-200e_s3dis-seg.py @@ -0,0 +1,64 @@ +_base_ = [ + '../_base_/datasets/s3dis-seg.py', '../_base_/models/paconv_ssg-cuda.py', + '../_base_/schedules/seg-cosine-150e.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + decode_head=dict( + num_classes=13, ignore_index=13, + loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight + test_cfg=dict( + num_points=4096, + block_size=1.0, + sample_rate=0.5, + use_normalized_coord=True, + batch_size=12)) + +# data settings +num_points = 4096 +backend_args = None +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + backend_args=backend_args), + dict(type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.0, + use_normalized_coord=True, + num_try=10000, + enlarge_size=None, + min_unique_num=num_points // 4, + eps=0.0), + dict(type='NormalizePointsColor', color_mean=None), + dict( + type='GlobalRotScaleTrans', + rot_range=[0.0, 6.283185307179586], # [0, 2 * pi] + scale_ratio_range=[0.8, 1.2], + translation_std=[0, 0, 0]), + dict( + type='RandomJitterPoints', + jitter_std=[0.01, 0.01, 0.01], + clip_range=[-0.05, 0.05]), + dict(type='RandomDropPointsColor', drop_ratio=0.2), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] + +train_dataloader = dict(batch_size=8, dataset=dict(pipeline=train_pipeline)) + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=200, val_interval=1) diff --git a/configs/paconv/paconv_ssg_8xb8-cosine-150e_s3dis-seg.py b/configs/paconv/paconv_ssg_8xb8-cosine-150e_s3dis-seg.py new file mode 100755 index 0000000..da29b89 --- /dev/null +++ b/configs/paconv/paconv_ssg_8xb8-cosine-150e_s3dis-seg.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/datasets/s3dis-seg.py', '../_base_/models/paconv_ssg.py', + '../_base_/schedules/seg-cosine-150e.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + decode_head=dict( + num_classes=13, ignore_index=13, + loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight + test_cfg=dict( + num_points=4096, + block_size=1.0, + sample_rate=0.5, + use_normalized_coord=True, + batch_size=12)) + +# data settings +num_points = 4096 +backend_args = None +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + backend_args=backend_args), + dict(type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.0, + use_normalized_coord=True, + num_try=10000, + enlarge_size=None, + min_unique_num=num_points // 4, + eps=0.0), + dict(type='NormalizePointsColor', color_mean=None), + dict( + type='GlobalRotScaleTrans', + rot_range=[0.0, 6.283185307179586], # [0, 2 * pi] + scale_ratio_range=[0.8, 1.2], + translation_std=[0, 0, 0]), + dict( + type='RandomJitterPoints', + jitter_std=[0.01, 0.01, 0.01], + clip_range=[-0.05, 0.05]), + dict(type='RandomDropPointsColor', drop_ratio=0.2), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] + +train_dataloader = dict(batch_size=8, dataset=dict(pipeline=train_pipeline)) diff --git a/configs/parta2/README.md b/configs/parta2/README.md new file mode 100755 index 0000000..716c73e --- /dev/null +++ b/configs/parta2/README.md @@ -0,0 +1,38 @@ +# From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network + +> [From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network](https://arxiv.org/abs/1907.03670) + + + +## Abstract + +3D object detection from LiDAR point cloud is a challenging problem in 3D scene understanding and has many practical applications. In this paper, we extend our preliminary work PointRCNN to a novel and strong point-cloud-based 3D object detection framework, the part-aware and aggregation neural network (Part-A2 net). The whole framework consists of the part-aware stage and the part-aggregation stage. Firstly, the part-aware stage for the first time fully utilizes free-of-charge part supervisions derived from 3D ground-truth boxes to simultaneously predict high quality 3D proposals and accurate intra-object part locations. The predicted intra-object part locations within the same proposal are grouped by our new-designed RoI-aware point cloud pooling module, which results in an effective representation to encode the geometry-specific features of each 3D proposal. Then the part-aggregation stage learns to re-score the box and refine the box location by exploring the spatial relationship of the pooled intra-object part locations. Extensive experiments are conducted to demonstrate the performance improvements from each component of our proposed framework. Our Part-A2 net outperforms all existing 3D detection methods and achieves new state-of-the-art on KITTI 3D object detection dataset by utilizing only the LiDAR point cloud data. + +
    + +
    + +## Introduction + +We implement Part-A^2 and provide its results and checkpoints on KITTI dataset. + +## Results and models + +### KITTI + +| Backbone | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | +| :-------------------------------------------------------------: | :-----: | :--------: | :------: | :------------: | :---: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](./parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py) | 3 Class | cyclic 80e | 4.1 | | 68.33 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class_20210831_022017-454a5344.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class_20210831_022017.log.json) | +| [SECFPN](./parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-car.py) | Car | cyclic 80e | 4.0 | | 79.08 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car_20210831_022017-cb7ff621.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car_20210831_022017.log.json) | + +## Citation + +```latex +@article{shi2020points, + title={From points to parts: 3d object detection from point cloud with part-aware and part-aggregation network}, + author={Shi, Shaoshuai and Wang, Zhe and Shi, Jianping and Wang, Xiaogang and Li, Hongsheng}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + year={2020}, + publisher={IEEE} +} +``` diff --git a/configs/parta2/metafile.yml b/configs/parta2/metafile.yml new file mode 100755 index 0000000..ac68c62 --- /dev/null +++ b/configs/parta2/metafile.yml @@ -0,0 +1,41 @@ +Collections: + - Name: Part-A^2 + Metadata: + Training Data: KITTI + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Sparse U-Net + Paper: + URL: https://arxiv.org/abs/1907.03670 + Title: 'From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network' + README: configs/parta2/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/parta2.py#L12 + Version: v0.5.0 + +Models: + - Name: parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class + In Collection: Part-A^2 + Config: configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py + Metadata: + Training Memory (GB): 4.1 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 68.33 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class_20210831_022017-454a5344.pth + + - Name: parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-car + In Collection: Part-A^2 + Config: configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-car.py + Metadata: + Training Memory (GB): 4.0 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 79.08 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car_20210831_022017-cb7ff621.pth diff --git a/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py b/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py new file mode 100755 index 0000000..f22e133 --- /dev/null +++ b/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py @@ -0,0 +1,160 @@ +_base_ = [ + '../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py', + '../_base_/models/parta2.py' +] + +point_cloud_range = [0, -40, -3, 70.4, 40, 1] + +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +input_modality = dict(use_lidar=True, use_camera=False) +backend_args = None +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + classes=class_names, + sample_groups=dict(Car=12, Pedestrian=6, Cyclist=6), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + backend_args=backend_args) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=train_pipeline, + modality=input_modality, + metainfo=dict(classes=class_names), + box_type_3d='LiDAR', + test_mode=False, + backend_args=backend_args))) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_val.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=test_pipeline, + modality=input_modality, + metainfo=dict(classes=class_names), + box_type_3d='LiDAR', + test_mode=True, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_val.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=eval_pipeline, + modality=input_modality, + metainfo=dict(classes=class_names), + box_type_3d='LiDAR', + test_mode=True, + backend_args=backend_args)) +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator +# Part-A2 uses a different learning rate from what SECOND uses. +optim_wrapper = dict(optimizer=dict(lr=0.001)) +find_unused_parameters = True + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-car.py b/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-car.py new file mode 100755 index 0000000..239cd0c --- /dev/null +++ b/configs/parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-car.py @@ -0,0 +1,154 @@ +_base_ = './parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py' + +point_cloud_range = [0, -40, -3, 70.4, 40, 1] # velodyne coordinates, x, y, z + +model = dict( + rpn_head=dict( + type='PartA2RPNHead', + num_classes=1, + anchor_generator=dict( + _delete_=True, + type='Anchor3DRangeGenerator', + ranges=[[0, -40.0, -1.78, 70.4, 40.0, -1.78]], + sizes=[[3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False)), + roi_head=dict( + num_classes=1, + semantic_head=dict(num_classes=1), + bbox_head=dict(num_classes=1)), + # model training and testing settings + train_cfg=dict( + _delete_=True, + rpn=dict( + assigner=dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=9000, + nms_post=512, + max_num=512, + nms_thr=0.8, + score_thr=0, + use_rotate_nms=False), + rcnn=dict( + assigner=dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + sampler=dict( + type='IoUNegPiecewiseSampler', + num=128, + pos_fraction=0.55, + neg_piece_fractions=[0.8, 0.2], + neg_iou_piece_thrs=[0.55, 0.1], + neg_pos_ub=-1, + add_gt_as_proposals=False, + return_iou=True), + cls_pos_thr=0.75, + cls_neg_thr=0.25)), + test_cfg=dict( + rpn=dict( + nms_pre=1024, + nms_post=100, + max_num=100, + nms_thr=0.7, + score_thr=0, + use_rotate_nms=True), + rcnn=dict( + use_rotate_nms=True, + use_raw_score=True, + nms_thr=0.01, + score_thr=0.1))) + +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Car'] +input_modality = dict(use_lidar=True, use_camera=False) +backend_args = None +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + classes=class_names, + sample_groups=dict(Car=15), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + backend_args=backend_args) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + dataset=dict( + dataset=dict( + pipeline=train_pipeline, metainfo=dict(classes=class_names)))) +test_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names))) +val_dataloader = dict(dataset=dict(metainfo=dict(classes=class_names))) +find_unused_parameters = True diff --git a/configs/pgd/README.md b/configs/pgd/README.md new file mode 100755 index 0000000..2237a0f --- /dev/null +++ b/configs/pgd/README.md @@ -0,0 +1,69 @@ +# Probabilistic and Geometric Depth: Detecting Objects in Perspective + +> [Probabilistic and Geometric Depth: Detecting Objects in Perspective](https://arxiv.org/abs/2107.14160) + + + +## Abstract + +3D object detection is an important capability needed in various practical applications such as driver assistance systems. Monocular 3D detection, as a representative general setting among image-based approaches, provides a more economical solution than conventional settings relying on LiDARs but still yields unsatisfactory results. This paper first presents a systematic study on this problem. We observe that the current monocular 3D detection can be simplified as an instance depth estimation problem: The inaccurate instance depth blocks all the other 3D attribute predictions from improving the overall detection performance. Moreover, recent methods directly estimate the depth based on isolated instances or pixels while ignoring the geometric relations across different objects. To this end, we construct geometric relation graphs across predicted objects and use the graph to facilitate depth estimation. As the preliminary depth estimation of each instance is usually inaccurate in this ill-posed setting, we incorporate a probabilistic representation to capture the uncertainty. It provides an important indicator to identify confident predictions and further guide the depth propagation. Despite the simplicity of the basic idea, our method, PGD, obtains significant improvements on KITTI and nuScenes benchmarks, achieving 1st place out of all monocular vision-only methods while still maintaining real-time efficiency. Code and models will be released at [this https URL](https://github.com/open-mmlab/mmdetection3d). + +
    + +
    + +## Introduction + +PGD, also can be regarded as FCOS3D++, is a simple yet effective monocular 3D detector. It enhances the FCOS3D baseline by involving local geometric constraints and improving instance depth estimation. + +We release the code and model for both KITTI and nuScenes benchmark, which is a good supplement for the original FCOS3D baseline (only supported on nuScenes). + +For clean implementation, our preliminary release supports base models with proposed local geometric constraints and the probabilistic depth representation. We will involve the geometric graph part in the future. + +A more extensive study based on FCOS3D and PGD is on-going. Please stay tuned. + +## Results and models + +### KITTI + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP_11 / mAP_40 | Download | +| :---------------------------------------------------------------: | :-----: | :------: | :------------: | :-------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [ResNet101](./pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py) | 4x | 9.07 | | 18.33 / 13.23 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d/pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d_20211022_102608-8a97533b.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d/pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d_20211022_102608.log.json) | + +Detailed performance on KITTI 3D detection (3D/BEV) is as follows, evaluated by AP11 and AP40 metric: + +| | Easy | Moderate | Hard | +| ---------- | :-----------: | :-----------: | :-----------: | +| Car (AP11) | 24.09 / 30.11 | 18.33 / 23.46 | 16.90 / 19.33 | +| Car (AP40) | 19.27 / 26.60 | 13.23 / 18.23 | 10.65 / 15.00 | + +Note: mAP represents Car moderate 3D strict AP11 / AP40 results. Because of the limited data for pedestrians and cyclists, the detection performance for these two classes is usually unstable. Therefore, we only list car detection results here. In addition, AP40 is a more recommended metric for reference due to its much better stability. + +### NuScenes + +| Backbone | Lr schd | Mem (GB) | mAP | NDS | Download | +| :-------------------------------------------------------------------------------: | :-----: | :------: | :--: | :--: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [ResNet101 w/ DCN](./pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py) | 1x | 9.20 | 31.7 | 39.3 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d_20211116_195350-f4b5eec2.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d_20211116_195350.log.json) | +| [above w/ finetune](./pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d_finetune.py) | 1x | 9.20 | 34.6 | 41.1 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d_finetune/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d_finetune_20211118_093245-fd419681.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d_finetune/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d_finetune_20211118_093245.log.json) | +| above w/ tta | 1x | 9.20 | 35.5 | 41.8 | | +| [ResNet101 w/ DCN](./pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py) | 2x | 9.20 | 33.6 | 40.9 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d_20211112_125314-cb677266.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d_20211112_125314.log.json) | +| [above w/ finetune](./pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d_finetune.py) | 2x | 9.20 | 35.8 | 42.5 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d_finetune/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d_finetune_20211114_162135-5ec7c1cd.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d_finetune/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d_finetune_20211114_162135.log.json) | +| above w/ tta | 2x | 9.20 | 36.8 | 43.1 | | + +## Citation + +```latex +@inproceedings{wang2021pgd, + title={{Probabilistic and Geometric Depth: Detecting} Objects in Perspective}, + author={Wang, Tai and Zhu, Xinge and Pang, Jiangmiao and Lin, Dahua}, + booktitle={Conference on Robot Learning (CoRL) 2021}, + year={2021} +} +# For the baseline version +@inproceedings{wang2021fcos3d, + title={{FCOS3D: Fully} Convolutional One-Stage Monocular 3D Object Detection}, + author={Wang, Tai and Zhu, Xinge and Pang, Jiangmiao and Lin, Dahua}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops}, + year={2021} +} +``` diff --git a/configs/pgd/metafile.yml b/configs/pgd/metafile.yml new file mode 100755 index 0000000..676f58e --- /dev/null +++ b/configs/pgd/metafile.yml @@ -0,0 +1,83 @@ +Collections: + - Name: PGD + Metadata: + Training Data: KITTI + Training Techniques: + - SGD + Training Resources: 4x TITAN XP + Architecture: + - PGDHead + Paper: + URL: https://arxiv.org/abs/2107.14160 + Title: 'Probabilistic and Geometric Depth: Detecting Objects in Perspective' + README: configs/pgd/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0.dev0/mmdet3d/models/dense_heads/pgd_head.py#17 + Version: v1.0.0 + +Models: + - Name: pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d + Alias: + - pgd_kitti + In Collection: PGD + Config: configs/pgd/pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py + Metadata: + Training Memory (GB): 9.1 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 18.33 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d/pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d_20211022_102608-8a97533b.pth + + - Name: pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d + In Collection: PGD + Config: configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py + Metadata: + Training Memory (GB): 9.2 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 31.7 + NDS: 39.3 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d_20211116_195350-f4b5eec2.pth + + - Name: pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d_finetune + In Collection: PGD + Config: configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d_finetune.py + Metadata: + Training Memory (GB): 9.2 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 34.6 + NDS: 41.1 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d_finetune/pgd_r101_caffe_fpn_gn-head_2x16_1x_nus-mono3d_finetune_20211118_093245-fd419681.pth + + - Name: pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d + In Collection: PGD + Config: configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py + Metadata: + Training Memory (GB): 9.2 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 33.6 + NDS: 40.9 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d_20211112_125314-cb677266.pth + + - Name: pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d_finetune + In Collection: PGD + Config: configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d_finetune.py + Metadata: + Training Memory (GB): 9.2 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 35.8 + NDS: 42.5 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d_finetune/pgd_r101_caffe_fpn_gn-head_2x16_2x_nus-mono3d_finetune_20211114_162135-5ec7c1cd.pth diff --git a/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py b/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py new file mode 100755 index 0000000..c7709d3 --- /dev/null +++ b/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py @@ -0,0 +1,104 @@ +_base_ = [ + '../_base_/datasets/nus-mono3d.py', '../_base_/models/pgd.py', + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + data_preprocessor=dict( + type='Det3DDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True)), + bbox_head=dict( + pred_bbox2d=True, + group_reg_dims=(2, 1, 3, 1, 2, + 4), # offset, depth, size, rot, velo, bbox2d + reg_branch=( + (256, ), # offset + (256, ), # depth + (256, ), # size + (256, ), # rot + (), # velo + (256, ) # bbox2d + ), + loss_depth=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + bbox_coder=dict( + type='PGDBBoxCoder', + base_depths=((31.99, 21.12), (37.15, 24.63), (39.69, 23.97), + (40.91, 26.34), (34.16, 20.11), (22.35, 13.70), + (24.28, 16.05), (27.26, 15.50), (20.61, 13.68), + (22.74, 15.01)), + base_dims=((4.62, 1.73, 1.96), (6.93, 2.83, 2.51), + (12.56, 3.89, 2.94), (11.22, 3.50, 2.95), + (6.68, 3.21, 2.85), (6.68, 3.21, 2.85), + (2.11, 1.46, 0.78), (0.73, 1.77, 0.67), + (0.41, 1.08, 0.41), (0.50, 0.99, 2.52)), + code_size=9)), + # set weight 1.0 for base 7 dims (offset, depth, size, rot) + # 0.05 for 2-dim velocity and 0.2 for 4-dim 2D distance targets + train_cfg=dict(code_weight=[ + 1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05, 0.2, 0.2, 0.2, 0.2 + ]), + test_cfg=dict(nms_pre=1000, nms_thr=0.8, score_thr=0.01, max_per_img=200)) + +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=True, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='mmdet.Resize', scale=(1600, 900), keep_ratio=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'attr_labels', + 'gt_bboxes_3d', 'gt_labels_3d', 'centers_2d', 'depths' + ]), +] +test_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict(type='mmdet.Resize', scale_factor=1.0), + dict(type='Pack3DDetInputs', keys=['img']), +] +train_dataloader = dict( + batch_size=2, num_workers=2, dataset=dict(pipeline=train_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.004), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) +] + +train_cfg = dict(max_epochs=12, val_interval=4) +auto_scale_lr = dict(base_batch_size=32) diff --git a/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d_finetune.py b/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d_finetune.py new file mode 100755 index 0000000..3c84f82 --- /dev/null +++ b/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d_finetune.py @@ -0,0 +1,9 @@ +_base_ = './pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py' +# model settings +model = dict( + train_cfg=dict(code_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05, 0.2, 0.2, 0.2, 0.2 + ])) +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.002)) +load_from = 'work_dirs/pgd_nus_benchmark_1x/latest.pth' diff --git a/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py b/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py new file mode 100755 index 0000000..b95b0ec --- /dev/null +++ b/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py @@ -0,0 +1,20 @@ +_base_ = './pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py' + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +train_cfg = dict(max_epochs=24) diff --git a/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d_finetune.py b/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d_finetune.py new file mode 100755 index 0000000..a733bc4 --- /dev/null +++ b/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d_finetune.py @@ -0,0 +1,9 @@ +_base_ = './pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py' +# model settings +model = dict( + train_cfg=dict(code_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05, 0.2, 0.2, 0.2, 0.2 + ])) +# optimizer +optim_wrapper = dict(optimizer=dict(lr=0.002)) +load_from = 'work_dirs/pgd_nus_benchmark_2x/latest.pth' diff --git a/configs/pgd/pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py b/configs/pgd/pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py new file mode 100755 index 0000000..2f83134 --- /dev/null +++ b/configs/pgd/pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py @@ -0,0 +1,127 @@ +_base_ = [ + '../_base_/datasets/kitti-mono3d.py', '../_base_/models/pgd.py', + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + data_preprocessor=dict( + type='Det3DDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[1.0, 1.0, 1.0], + bgr_to_rgb=False, + pad_size_divisor=32), + backbone=dict(frozen_stages=0), + neck=dict(start_level=0, num_outs=4), + bbox_head=dict( + num_classes=3, + bbox_code_size=7, + pred_attrs=False, + pred_velo=False, + pred_bbox2d=True, + use_onlyreg_proj=True, + strides=(4, 8, 16, 32), + regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 1e8)), + group_reg_dims=(2, 1, 3, 1, 16, + 4), # offset, depth, size, rot, kpts, bbox2d + reg_branch=( + (256, ), # offset + (256, ), # depth + (256, ), # size + (256, ), # rot + (256, ), # kpts + (256, ) # bbox2d + ), + centerness_branch=(256, ), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_centerness=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + use_depth_classifier=True, + depth_branch=(256, ), + depth_range=(0, 70), + depth_unit=10, + division='uniform', + depth_bins=8, + pred_keypoints=True, + weight_dim=1, + loss_depth=dict( + type='UncertainSmoothL1Loss', alpha=1.0, beta=3.0, + loss_weight=1.0), + bbox_coder=dict( + type='PGDBBoxCoder', + base_depths=((28.01, 16.32), ), + base_dims=((0.8, 1.73, 0.6), (1.76, 1.73, 0.6), (3.9, 1.56, 1.6)), + code_size=7)), + # set weight 1.0 for base 7 dims (offset, depth, size, rot) + # 0.2 for 16-dim keypoint offsets and 1.0 for 4-dim 2D distance targets + train_cfg=dict(code_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, + 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0 + ]), + test_cfg=dict(nms_pre=100, nms_thr=0.05, score_thr=0.001, max_per_img=20)) + +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=False, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='mmdet.Resize', scale=(1242, 375), keep_ratio=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_bboxes_3d', + 'gt_labels_3d', 'centers_2d', 'depths' + ]), +] +test_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict(type='mmdet.Resize', scale_factor=1.0), + dict(type='Pack3DDetInputs', keys=['img']) +] + +train_dataloader = dict( + batch_size=3, num_workers=3, dataset=dict(pipeline=train_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# optimizer +optim_wrapper = dict( + optimizer=dict(lr=0.001), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.), + clip_grad=dict(max_norm=35, norm_type=2)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=48, + by_epoch=True, + milestones=[32, 44], + gamma=0.1) +] + +train_cfg = dict(max_epochs=48, val_interval=2) +auto_scale_lr = dict(base_batch_size=12) diff --git a/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py b/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py new file mode 100755 index 0000000..856fd8f --- /dev/null +++ b/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py @@ -0,0 +1,112 @@ +_base_ = [ + '../_base_/datasets/waymoD5-fov-mono3d-3class.py', + '../_base_/models/pgd.py', '../_base_/schedules/mmdet-schedule-1x.py', + '../_base_/default_runtime.py' +] +# model settings +model = dict( + backbone=dict( + type='mmdet.ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True)), + neck=dict(num_outs=3), + bbox_head=dict( + num_classes=3, + bbox_code_size=7, + pred_attrs=False, + pred_velo=False, + pred_bbox2d=True, + use_onlyreg_proj=True, + strides=(8, 16, 32), + regress_ranges=((-1, 128), (128, 256), (256, 1e8)), + group_reg_dims=(2, 1, 3, 1, 16, + 4), # offset, depth, size, rot, kpts, bbox2d + reg_branch=( + (256, ), # offset + (256, ), # depth + (256, ), # size + (256, ), # rot + (256, ), # kpts + (256, ) # bbox2d + ), + centerness_branch=(256, ), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_centerness=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + use_depth_classifier=True, + depth_branch=(256, ), + depth_range=(0, 50), + depth_unit=10, + division='uniform', + depth_bins=6, + pred_keypoints=True, + weight_dim=1, + loss_depth=dict( + type='UncertainSmoothL1Loss', alpha=1.0, beta=3.0, + loss_weight=1.0), + loss_bbox2d=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.0), + loss_consistency=dict(type='mmdet.GIoULoss', loss_weight=0.0), + bbox_coder=dict( + type='PGDBBoxCoder', + base_depths=((41.01, 18.44), ), + base_dims=( + (4.73, 1.77, 2.08), + (0.91, 1.74, 0.84), + (1.81, 1.77, 0.84), + ), + code_size=7)), + # set weight 1.0 for base 7 dims (offset, depth, size, rot) + # 0.2 for 16-dim keypoint offsets and 1.0 for 4-dim 2D distance targets + train_cfg=dict(code_weight=[ + 1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, + 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0 + ]), + test_cfg=dict(nms_pre=100, nms_thr=0.05, score_thr=0.001, max_per_img=20)) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='SGD', + lr=0.008, + ), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.), + clip_grad=dict(max_norm=35, norm_type=2)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=24) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +auto_scale_lr = dict(base_batch_size=48) diff --git a/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mono3d.py b/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mono3d.py new file mode 100755 index 0000000..c812c70 --- /dev/null +++ b/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mono3d.py @@ -0,0 +1,111 @@ +_base_ = [ + '../_base_/datasets/waymoD5-mono3d-3class.py', '../_base_/models/pgd.py', + '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + backbone=dict( + type='mmdet.ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True)), + neck=dict(num_outs=3), + bbox_head=dict( + num_classes=3, + bbox_code_size=7, + pred_attrs=False, + pred_velo=False, + pred_bbox2d=True, + use_onlyreg_proj=True, + strides=(8, 16, 32), + regress_ranges=((-1, 128), (128, 256), (256, 1e8)), + group_reg_dims=(2, 1, 3, 1, 16, + 4), # offset, depth, size, rot, kpts, bbox2d + reg_branch=( + (256, ), # offset + (256, ), # depth + (256, ), # size + (256, ), # rot + (256, ), # kpts + (256, ) # bbox2d + ), + centerness_branch=(256, ), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_centerness=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + use_depth_classifier=True, + depth_branch=(256, ), + depth_range=(0, 50), + depth_unit=10, + division='uniform', + depth_bins=6, + pred_keypoints=True, + weight_dim=1, + loss_depth=dict( + type='UncertainSmoothL1Loss', alpha=1.0, beta=3.0, + loss_weight=1.0), + loss_bbox2d=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.0), + loss_consistency=dict(type='mmdet.GIoULoss', loss_weight=0.0), + bbox_coder=dict( + type='PGDBBoxCoder', + base_depths=((41.01, 18.44), ), + base_dims=( + (4.73, 1.77, 2.08), + (0.91, 1.74, 0.84), + (1.81, 1.77, 0.84), + ), + code_size=7)), + # set weight 1.0 for base 7 dims (offset, depth, size, rot) + # 0.2 for 16-dim keypoint offsets and 1.0 for 4-dim 2D distance targets + train_cfg=dict(code_weight=[ + 1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, + 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0 + ]), + test_cfg=dict(nms_pre=100, nms_thr=0.05, score_thr=0.001, max_per_img=20)) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='SGD', + lr=0.008, + ), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.), + clip_grad=dict(max_norm=35, norm_type=2)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=24) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +auto_scale_lr = dict(base_batch_size=48) diff --git a/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py b/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py new file mode 100755 index 0000000..034f866 --- /dev/null +++ b/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py @@ -0,0 +1,112 @@ +_base_ = [ + '../_base_/datasets/waymoD5-mv-mono3d-3class.py', + '../_base_/models/pgd.py', '../_base_/schedules/mmdet-schedule-1x.py', + '../_base_/default_runtime.py' +] +# model settings +model = dict( + backbone=dict( + type='mmdet.ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'), + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True)), + neck=dict(num_outs=3), + bbox_head=dict( + num_classes=3, + bbox_code_size=7, + pred_attrs=False, + pred_velo=False, + pred_bbox2d=True, + use_onlyreg_proj=True, + strides=(8, 16, 32), + regress_ranges=((-1, 128), (128, 256), (256, 1e8)), + group_reg_dims=(2, 1, 3, 1, 16, + 4), # offset, depth, size, rot, kpts, bbox2d + reg_branch=( + (256, ), # offset + (256, ), # depth + (256, ), # size + (256, ), # rot + (256, ), # kpts + (256, ) # bbox2d + ), + centerness_branch=(256, ), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_centerness=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + use_depth_classifier=True, + depth_branch=(256, ), + depth_range=(0, 50), + depth_unit=10, + division='uniform', + depth_bins=6, + pred_keypoints=True, + weight_dim=1, + loss_depth=dict( + type='UncertainSmoothL1Loss', alpha=1.0, beta=3.0, + loss_weight=1.0), + loss_bbox2d=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.0), + loss_consistency=dict(type='mmdet.GIoULoss', loss_weight=0.0), + bbox_coder=dict( + type='PGDBBoxCoder', + base_depths=((41.01, 18.44), ), + base_dims=( + (4.73, 1.77, 2.08), + (0.91, 1.74, 0.84), + (1.81, 1.77, 0.84), + ), + code_size=7)), + # set weight 1.0 for base 7 dims (offset, depth, size, rot) + # 0.2 for 16-dim keypoint offsets and 1.0 for 4-dim 2D distance targets + train_cfg=dict(code_weight=[ + 1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, + 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0 + ]), + test_cfg=dict(nms_pre=100, nms_thr=0.05, score_thr=0.001, max_per_img=20)) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='SGD', + lr=0.008, + ), + paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.), + clip_grad=dict(max_norm=35, norm_type=2)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='MultiStepLR', + begin=0, + end=24, + by_epoch=True, + milestones=[16, 22], + gamma=0.1) +] + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=24) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +auto_scale_lr = dict(base_batch_size=48) diff --git a/configs/point_rcnn/README.md b/configs/point_rcnn/README.md new file mode 100755 index 0000000..03be3ca --- /dev/null +++ b/configs/point_rcnn/README.md @@ -0,0 +1,47 @@ +# PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud + +> [PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud](https://arxiv.org/abs/1812.04244) + + + +## Abstract + +In this paper, we propose PointRCNN for 3D object detection from raw point cloud. The whole framework is composed of two stages: stage-1 for the bottom-up 3D proposal generation and stage-2 for refining proposals in the canonical coordinates to obtain the final detection results. Instead of generating proposals from RGB image or projecting point cloud to bird's view or voxels as previous methods do, our stage-1 sub-network directly generates a small number of high-quality 3D proposals from point cloud in a bottom-up manner via segmenting the point cloud of the whole scene into foreground points and background. The stage-2 sub-network transforms the pooled points of each proposal to canonical coordinates to learn better local spatial features, which is combined with global semantic features of each point learned in stage-1 for accurate box refinement and confidence prediction. Extensive experiments on the 3D detection benchmark of KITTI dataset show that our proposed architecture outperforms state-of-the-art methods with remarkable margins by using only point cloud as input. + +
    + +
    + +## Introduction + +We implement PointRCNN and provide the result with checkpoints on KITTI dataset. + +## Results and models + +### KITTI + +| Backbone | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | +| :------------------------------------------------: | :-----: | :--------: | :------: | :------------: | :---: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [PointNet++](./point-rcnn_8xb2_kitti-3d-3class.py) | 3 Class | cyclic 40e | 4.6 | | 70.83 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/point_rcnn/point_rcnn_2x8_kitti-3d-3classes_20211208_151344.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/point_rcnn/point_rcnn_2x8_kitti-3d-3classes_20211208_151344.log.json) | + +Note: mAP represents AP11 results on 3 Class under the moderate setting. + +Detailed performance on KITTI 3D detection (3D) is as follows, evaluated by AP11 metric: + +| | Easy | Moderate | Hard | +| ---------- | :---: | :------: | :---: | +| Car | 89.13 | 78.72 | 78.24 | +| Pedestrian | 65.81 | 59.57 | 52.75 | +| Cyclist | 93.51 | 74.19 | 70.73 | + +## Citation + +```latex +@inproceedings{Shi_2019_CVPR, + title = {PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud}, + author = {Shi, Shaoshuai and Wang, Xiaogang and Li, Hongsheng}, + booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2019} +} +``` diff --git a/configs/point_rcnn/metafile.yml b/configs/point_rcnn/metafile.yml new file mode 100755 index 0000000..2dcdc3a --- /dev/null +++ b/configs/point_rcnn/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: PointRCNN + Metadata: + Training Data: KITTI + Training Techniques: + - AdamW + Training Resources: 8x Titan XP GPUs + Architecture: + - PointNet++ + Paper: + URL: https://arxiv.org/abs/1812.04244 + Title: 'PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud' + README: configs/point_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0.dev0/mmdet3d/models/detectors/point_rcnn.py#L8 + Version: v1.0.0 + +Models: + - Name: point-rcnn_8xb2_kitti-3d-3class + In Collection: PointRCNN + Config: configs/point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py + Metadata: + Training Memory (GB): 4.6 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 70.83 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/point_rcnn/point_rcnn_2x8_kitti-3d-3classes_20211208_151344.pth diff --git a/configs/point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py b/configs/point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py new file mode 100755 index 0000000..1826198 --- /dev/null +++ b/configs/point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py @@ -0,0 +1,145 @@ +_base_ = [ + '../_base_/datasets/kitti-3d-car.py', '../_base_/models/point_rcnn.py', + '../_base_/default_runtime.py', '../_base_/schedules/cyclic-40e.py' +] + +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +metainfo = dict(classes=class_names) +point_cloud_range = [0, -40, -3, 70.4, 40, 1] +input_modality = dict(use_lidar=True, use_camera=False) +backend_args = None + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)), + sample_groups=dict(Car=20, Pedestrian=15, Cyclist=15), + classes=class_names, + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectSample', db_sampler=db_sampler), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointSample', num_points=16384, sample_range=40.0), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointSample', num_points=16384, sample_range=40.0) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict(pipeline=train_pipeline, metainfo=metainfo))) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo)) + +lr = 0.001 # max learning rate +optim_wrapper = dict(optimizer=dict(lr=lr, betas=(0.95, 0.85))) +train_cfg = dict(by_epoch=True, max_epochs=80, val_interval=2) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) +param_scheduler = [ + # learning rate scheduler + # During the first 35 epochs, learning rate increases from 0 to lr * 10 + # during the next 45 epochs, learning rate decreases from lr * 10 to + # lr * 1e-4 + dict( + type='CosineAnnealingLR', + T_max=35, + eta_min=lr * 10, + begin=0, + end=35, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=45, + eta_min=lr * 1e-4, + begin=35, + end=80, + by_epoch=True, + convert_to_iter_based=True), + # momentum scheduler + # During the first 35 epochs, momentum increases from 0 to 0.85 / 0.95 + # during the next 45 epochs, momentum increases from 0.85 / 0.95 to 1 + dict( + type='CosineAnnealingMomentum', + T_max=35, + eta_min=0.85 / 0.95, + begin=0, + end=35, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=45, + eta_min=1, + begin=35, + end=80, + by_epoch=True, + convert_to_iter_based=True) +] diff --git a/configs/pointnet2/README.md b/configs/pointnet2/README.md new file mode 100755 index 0000000..fc70a06 --- /dev/null +++ b/configs/pointnet2/README.md @@ -0,0 +1,72 @@ +# PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space + +> [PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space](https://arxiv.org/abs/1706.02413) + + + +## Abstract + +Few prior works study deep learning on point sets. PointNet by Qi et al. is a pioneer in this direction. However, by design PointNet does not capture local structures induced by the metric space points live in, limiting its ability to recognize fine-grained patterns and generalizability to complex scenes. In this work, we introduce a hierarchical neural network that applies PointNet recursively on a nested partitioning of the input point set. By exploiting metric space distances, our network is able to learn local features with increasing contextual scales. With further observation that point sets are usually sampled with varying densities, which results in greatly decreased performance for networks trained on uniform densities, we propose novel set learning layers to adaptively combine features from multiple scales. Experiments show that our network called PointNet++ is able to learn deep point set features efficiently and robustly. In particular, results significantly better than state-of-the-art have been obtained on challenging benchmarks of 3D point clouds. + +
    + +
    + +## Introduction + +We implement PointNet++ and provide the result and checkpoints on ScanNet and S3DIS datasets. + +**Notice**: The original PointNet++ paper used step learning rate schedule. We discovered that cosine schedule achieves much better results and adopt it in our implementations. We also use a larger `weight_decay` factor because we find it consistently improves the performance. + +## Results and models + +### ScanNet + +| Method | Input | Lr schd | Mem (GB) | Inf time (fps) | mIoU (Val set) | mIoU (Test set) | Download | +| :---------------------------------------------------------------------------: | :-------: | :---------: | :------: | :------------: | :------------: | :-------------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [PointNet++ (SSG)](./pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only.py) | XYZ | cosine 200e | 1.9 | | 53.91 | | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143628-4e341a48.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143628.log.json) | +| [PointNet++ (SSG)](./pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py) | XYZ+Color | cosine 200e | 1.9 | | 54.44 | | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143644-ee73704a.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143644.log.json) | +| [PointNet++ (MSG)](./pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only.py) | XYZ | cosine 250e | 2.4 | | 54.26 | | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class_20210514_143838-b4a3cf89.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class_20210514_143838.log.json) | +| [PointNet++ (MSG)](./pointnet2_msg_2xb16-cosine-250e_scannet-seg.py) | XYZ+Color | cosine 250e | 2.4 | | 55.05 | | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class_20210514_144009-24477ab1.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class_20210514_144009.log.json) | + +**Notes:** + +- The original PointNet++ paper conducted experiments on the ScanNet V1 dataset, while later point cloud segmentor papers often used ScanNet V2. Following common practice, we report results on the ScanNet V2 dataset. + +- Since ScanNet dataset doesn't provide ground-truth labels for the test set, users can only evaluate test set performance by submitting to its online benchmark [website](http://kaldir.vc.in.tum.de/scannet_benchmark/). However, users are only allowed to submit once every two weeks. Therefore, we currently report val set mIoU. Test set performance may be added in the future. + +- To generate submission file for ScanNet online benchmark, you need to modify the ScanNet dataset's [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/datasets/scannet_seg-3d-20class.py#L126). Change `ann_file=data_root + 'scannet_infos_val.pkl'` to `ann_file=data_root + 'scannet_infos_test.pkl'`, and then simply run: + + ```shell + python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} --format-only --options 'txt_prefix=exps/pointnet2_scannet_results' + ``` + + This will save the prediction results as `txt` files in `exps/pointnet2_scannet_results/`. Then, go to this folder and zip all files into `pn2_scannet.zip`. Now you can submit it to the online benchmark and wait for the test set result. More instructions can be found at their official [website](http://kaldir.vc.in.tum.de/scannet_benchmark/documentation#submission-policy). + +### S3DIS + +| Method | Split | Lr schd | Mem (GB) | Inf time (fps) | mIoU (Val set) | Download | +| :---------------------------------------------------------------: | :----: | :--------: | :------: | :------------: | :------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [PointNet++ (SSG)](./pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py) | Area_5 | cosine 50e | 3.6 | | 56.93 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class_20210514_144205-995d0119.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class_20210514_144205.log.json) | +| [PointNet++ (MSG)](./pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py) | Area_5 | cosine 80e | 3.6 | | 58.04 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class_20210514_144307-b2059817.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class_20210514_144307.log.json) | + +**Notes:** + +- We use XYZ+Color+Normalized_XYZ as input in all the experiments on S3DIS datasets. +- `Area_5` Split means training the model on Area_1, 2, 3, 4, 6 and testing on Area_5. + +## Indeterminism + +Since PointNet++ testing adopts sliding patch inference which involves random point sampling, and the test script uses fixed random seeds while the random seeds of validation in training are not fixed, the test results may be slightly different from the results reported above. + +## Citation + +```latex +@inproceedings{qi2017pointnet++, + title={PointNet++ deep hierarchical feature learning on point sets in a metric space}, + author={Qi, Charles R and Yi, Li and Su, Hao and Guibas, Leonidas J}, + booktitle={Proceedings of the 31st International Conference on Neural Information Processing Systems}, + pages={5105--5114}, + year={2017} +} +``` diff --git a/configs/pointnet2/metafile.yml b/configs/pointnet2/metafile.yml new file mode 100755 index 0000000..cdceb04 --- /dev/null +++ b/configs/pointnet2/metafile.yml @@ -0,0 +1,95 @@ +Collections: + - Name: PointNet++ + Metadata: + Training Techniques: + - Adam + Training Resources: 2x Titan XP GPUs + Architecture: + - PointNet++ + Paper: + URL: https://arxiv.org/abs/1706.02413 + Title: 'PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space' + README: configs/pointnet2/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/backbones/pointnet2_sa_ssg.py#L12 + Version: v0.14.0 + +Models: + - Name: pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only + In Collection: PointNet++ + Config: configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 1.9 + Results: + - Task: 3D Semantic Segmentation + Dataset: ScanNet + Metrics: + mIoU: 53.91 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143628-4e341a48.pth + + - Name: pointnet2_ssg_2xb16-cosine-200e_scannet-seg + In Collection: PointNet++ + Config: configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 1.9 + Results: + - Task: 3D Semantic Segmentation + Dataset: ScanNet + Metrics: + mIoU: 54.44 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class_20210514_143644-ee73704a.pth + + - Name: pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only + In Collection: PointNet++ + Config: configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 2.4 + Results: + - Task: 3D Semantic Segmentation + Dataset: ScanNet + Metrics: + mIoU: 54.26 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class_20210514_143838-b4a3cf89.pth + + - Name: pointnet2_msg_2xb16-cosine-250e_scannet-seg + In Collection: PointNet++ + Config: configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 2.4 + Results: + - Task: 3D Semantic Segmentation + Dataset: ScanNet + Metrics: + mIoU: 55.05 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class_20210514_144009-24477ab1.pth + + - Name: pointnet2_ssg_2xb16-cosine-50e_s3dis-seg + Alias: pointnet2-ssg_s3dis-seg + In Collection: PointNet++ + Config: configs/pointnet2/pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 3.6 + Results: + - Task: 3D Semantic Segmentation + Dataset: S3DIS + Metrics: + mIoU: 56.93 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class_20210514_144205-995d0119.pth + + - Name: pointnet2_msg_2xb16-cosine-80e_s3dis-seg + In Collection: PointNet++ + Config: configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py + Metadata: + Training Data: S3DIS + Training Memory (GB): 3.6 + Results: + - Task: 3D Semantic Segmentation + Dataset: S3DIS + Metrics: + mIoU: 58.04 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class_20210514_144307-b2059817.pth diff --git a/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only.py b/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only.py new file mode 100755 index 0000000..b0b793f --- /dev/null +++ b/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only.py @@ -0,0 +1,111 @@ +_base_ = [ + '../_base_/datasets/scannet-seg.py', '../_base_/models/pointnet2_msg.py', + '../_base_/schedules/seg-cosine-200e.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict(in_channels=3), # only [xyz] + decode_head=dict( + num_classes=20, + ignore_index=20, + # `class_weight` is generated in data pre-processing, saved in + # `data/scannet/seg_info/train_label_weight.npy` + # you can copy paste the values here, or input the file path as + # `class_weight=data/scannet/seg_info/train_label_weight.npy` + loss_decode=dict(class_weight=[ + 2.389689, 2.7215734, 4.5944676, 4.8543367, 4.096086, 4.907941, + 4.690836, 4.512031, 4.623311, 4.9242644, 5.358117, 5.360071, + 5.019636, 4.967126, 5.3502126, 5.4023647, 5.4027233, 5.4169416, + 5.3954206, 4.6971426 + ])), + test_cfg=dict( + num_points=8192, + block_size=1.5, + sample_rate=0.5, + use_normalized_coord=False, + batch_size=24)) + +# dataset settings +# in this setting, we only use xyz as network input +# so we need to re-write all the data pipeline +class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', + 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', + 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', + 'bathtub', 'otherfurniture') +num_points = 8192 +backend_args = None +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=False, + load_dim=6, + use_dim=[0, 1, 2], # only load xyz coordinates + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + backend_args=backend_args), + dict(type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.5, + ignore_index=len(class_names), + use_normalized_coord=False, + enlarge_size=0.2, + min_unique_num=None), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=False, + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + backend_args=backend_args), + dict( + # a wrapper in order to successfully call test function + # actually we don't perform test-time-aug + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.0, + flip_ratio_bev_vertical=0.0), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict(batch_size=16, dataset=dict(pipeline=train_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +val_dataloader = test_dataloader + +# runtime settings +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=5)) + +# PointNet2-MSG needs longer training time than PointNet2-SSG +train_cfg = dict(by_epoch=True, max_epochs=250, val_interval=5) diff --git a/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py b/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py new file mode 100755 index 0000000..83c003b --- /dev/null +++ b/configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/datasets/scannet-seg.py', '../_base_/models/pointnet2_msg.py', + '../_base_/schedules/seg-cosine-200e.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + decode_head=dict( + num_classes=20, + ignore_index=20, + # `class_weight` is generated in data pre-processing, saved in + # `data/scannet/seg_info/train_label_weight.npy` + # you can copy paste the values here, or input the file path as + # `class_weight=data/scannet/seg_info/train_label_weight.npy` + loss_decode=dict(class_weight=[ + 2.389689, 2.7215734, 4.5944676, 4.8543367, 4.096086, 4.907941, + 4.690836, 4.512031, 4.623311, 4.9242644, 5.358117, 5.360071, + 5.019636, 4.967126, 5.3502126, 5.4023647, 5.4027233, 5.4169416, + 5.3954206, 4.6971426 + ])), + test_cfg=dict( + num_points=8192, + block_size=1.5, + sample_rate=0.5, + use_normalized_coord=False, + batch_size=24)) + +# data settings +train_dataloader = dict(batch_size=16) + +# runtime settings +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=5)) + +# PointNet2-MSG needs longer training time than PointNet2-SSG +train_cfg = dict(by_epoch=True, max_epochs=250, val_interval=5) diff --git a/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py b/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py new file mode 100755 index 0000000..0913df3 --- /dev/null +++ b/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py @@ -0,0 +1,26 @@ +_base_ = [ + '../_base_/datasets/s3dis-seg.py', '../_base_/models/pointnet2_msg.py', + '../_base_/schedules/seg-cosine-50e.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz] + decode_head=dict( + num_classes=13, ignore_index=13, + loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight + test_cfg=dict( + num_points=4096, + block_size=1.0, + sample_rate=0.5, + use_normalized_coord=True, + batch_size=24)) + +# data settings +train_dataloader = dict(batch_size=16) + +# runtime settings +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2)) + +# PointNet2-MSG needs longer training time than PointNet2-SSG +train_cfg = dict(by_epoch=True, max_epochs=80, val_interval=2) diff --git a/configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only.py b/configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only.py new file mode 100755 index 0000000..a8d4421 --- /dev/null +++ b/configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only.py @@ -0,0 +1,109 @@ +_base_ = [ + '../_base_/datasets/scannet-seg.py', '../_base_/models/pointnet2_ssg.py', + '../_base_/schedules/seg-cosine-200e.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict(in_channels=3), # only [xyz] + decode_head=dict( + num_classes=20, + ignore_index=20, + # `class_weight` is generated in data pre-processing, saved in + # `data/scannet/seg_info/train_label_weight.npy` + # you can copy paste the values here, or input the file path as + # `class_weight=data/scannet/seg_info/train_label_weight.npy` + loss_decode=dict(class_weight=[ + 2.389689, 2.7215734, 4.5944676, 4.8543367, 4.096086, 4.907941, + 4.690836, 4.512031, 4.623311, 4.9242644, 5.358117, 5.360071, + 5.019636, 4.967126, 5.3502126, 5.4023647, 5.4027233, 5.4169416, + 5.3954206, 4.6971426 + ])), + test_cfg=dict( + num_points=8192, + block_size=1.5, + sample_rate=0.5, + use_normalized_coord=False, + batch_size=24)) + +# dataset settings +# in this setting, we only use xyz as network input +# so we need to re-write all the data pipeline +class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', + 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', + 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', + 'bathtub', 'otherfurniture') +num_points = 8192 +backend_args = None +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=False, + load_dim=6, + use_dim=[0, 1, 2], # only load xyz coordinates + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + backend_args=backend_args), + dict(type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.5, + ignore_index=len(class_names), + use_normalized_coord=False, + enlarge_size=0.2, + min_unique_num=None), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=False, + load_dim=6, + use_dim=[0, 1, 2], + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + backend_args=backend_args), + dict( + # a wrapper in order to successfully call test function + # actually we don't perform test-time-aug + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.0, + flip_ratio_bev_vertical=0.0), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict(batch_size=16, dataset=dict(pipeline=train_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +val_dataloader = test_dataloader + +# runtime settings +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=5)) +train_cfg = dict(val_interval=5) diff --git a/configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py b/configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py new file mode 100755 index 0000000..3900a87 --- /dev/null +++ b/configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py @@ -0,0 +1,33 @@ +_base_ = [ + '../_base_/datasets/scannet-seg.py', '../_base_/models/pointnet2_ssg.py', + '../_base_/schedules/seg-cosine-200e.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + decode_head=dict( + num_classes=20, + ignore_index=20, + # `class_weight` is generated in data pre-processing, saved in + # `data/scannet/seg_info/train_label_weight.npy` + # you can copy paste the values here, or input the file path as + # `class_weight=data/scannet/seg_info/train_label_weight.npy` + loss_decode=dict(class_weight=[ + 2.389689, 2.7215734, 4.5944676, 4.8543367, 4.096086, 4.907941, + 4.690836, 4.512031, 4.623311, 4.9242644, 5.358117, 5.360071, + 5.019636, 4.967126, 5.3502126, 5.4023647, 5.4027233, 5.4169416, + 5.3954206, 4.6971426 + ])), + test_cfg=dict( + num_points=8192, + block_size=1.5, + sample_rate=0.5, + use_normalized_coord=False, + batch_size=24)) + +# data settings +train_dataloader = dict(batch_size=16) + +# runtime settings +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=5)) +train_cfg = dict(val_interval=5) diff --git a/configs/pointnet2/pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py b/configs/pointnet2/pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py new file mode 100755 index 0000000..5d4490d --- /dev/null +++ b/configs/pointnet2/pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/datasets/s3dis-seg.py', '../_base_/models/pointnet2_ssg.py', + '../_base_/schedules/seg-cosine-50e.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz] + decode_head=dict( + num_classes=13, ignore_index=13, + loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight + test_cfg=dict( + num_points=4096, + block_size=1.0, + sample_rate=0.5, + use_normalized_coord=True, + batch_size=24)) + +# data settings +train_dataloader = dict(batch_size=16) + +# runtime settings +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2)) +train_cfg = dict(val_interval=2) diff --git a/configs/pointpillars/README.md b/configs/pointpillars/README.md new file mode 100755 index 0000000..a2de0d4 --- /dev/null +++ b/configs/pointpillars/README.md @@ -0,0 +1,78 @@ +# PointPillars: Fast Encoders for Object Detection from Point Clouds + +> [PointPillars: Fast Encoders for Object Detection from Point Clouds](https://arxiv.org/abs/1812.05784) + + + +## Abstract + +Object detection in point clouds is an important aspect of many robotics applications such as autonomous driving. In this paper we consider the problem of encoding a point cloud into a format appropriate for a downstream detection pipeline. Recent literature suggests two types of encoders; fixed encoders tend to be fast but sacrifice accuracy, while encoders that are learned from data are more accurate, but slower. In this work we propose PointPillars, a novel encoder which utilizes PointNets to learn a representation of point clouds organized in vertical columns (pillars). While the encoded features can be used with any standard 2D convolutional detection architecture, we further propose a lean downstream network. Extensive experimentation shows that PointPillars outperforms previous encoders with respect to both speed and accuracy by a large margin. Despite only using lidar, our full detection pipeline significantly outperforms the state of the art, even among fusion methods, with respect to both the 3D and bird's eye view KITTI benchmarks. This detection performance is achieved while running at 62 Hz: a 2 - 4 fold runtime improvement. A faster version of our method matches the state of the art at 105 Hz. These benchmarks suggest that PointPillars is an appropriate encoding for object detection in point clouds. + +
    + +
    + +## Introduction + +We implement PointPillars and provide the results and checkpoints on KITTI, nuScenes, Lyft and Waymo datasets. + +## Results and models + +### KITTI + +| Backbone | Class | Lr schd | Mem (GB) | Inf time (fps) | AP | Download | +| :-------------------------------------------------------------: | :-----: | :---------: | :------: | :------------: | :---: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](./pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py) | Car | cyclic 160e | 5.4 | | 77.6 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606.log.json) | +| [SECFPN](./pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py) | 3 Class | cyclic 160e | 5.5 | | 64.07 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20220301_150306-37dc2420.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20220301_150306.log.json) | + +### nuScenes + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP | NDS | Download | +| :---------------------------------------------------------------------: | :-----: | :------: | :------------: | :---: | :---: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](./pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 16.4 | | 34.33 | 49.1 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d_20210826_225857-f19d00a3.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d_20210826_225857.log.json) | +| [SECFPN (FP16)](./pointpillars_hv_secfpn_sbn-all_8xb2-amp-2x_nus-3d.py) | 2x | 8.37 | | 35.19 | 50.27 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d_20201020_222626-c3f0483e.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d_20201020_222626.log.json) | +| [FPN](./pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 16.3 | | 39.7 | 53.2 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20210826_104936-fca299c1.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20210826_104936.log.json) | +| [FPN (FP16)](./pointpillars_hv_fpn_sbn-all_8xb2-amp-2x_nus-3d.py) | 2x | 8.40 | | 39.26 | 53.26 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d_20201021_120719-269f9dd6.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d_20201021_120719.log.json) | + +### Lyft + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | Private Score | Public Score | Download | +| :-----------------------------------------------------------: | :-----: | :------: | :------------: | :-----------: | :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](./pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py) | 2x | 12.2 | | 13.8 | 14.1 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d_20210829_100455-82b81c39.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d_20210829_100455.log.json) | +| [FPN](./pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py) | 2x | 9.2 | | 14.8 | 15.0 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d_20210822_095429-0b3d6196.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d_20210822_095429.log.json) | + +### Waymo + +| Backbone | Load Interval | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP@L1 | mAPH@L1 | mAP@L2 | **mAPH@L2** | Download | +| :----------------------------------------------------------------------: | :-----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----: | :---------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](./pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-car.py) | 5 | Car | 2x | 7.76 | | 70.2 | 69.6 | 62.6 | 62.1 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car_20200901_204315-302fc3e7.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car_20200901_204315.log.json) | +| [SECFPN](./pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py) | 5 | 3 Class | 2x | 8.12 | | 64.7 | 57.6 | 58.4 | 52.1 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class_20200831_204144-d1a706b1.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class_20200831_204144.log.json) | +| above @ Car | | | 2x | 8.12 | | 68.5 | 67.9 | 60.1 | 59.6 | | +| above @ Pedestrian | | | 2x | 8.12 | | 67.8 | 50.6 | 59.6 | 44.3 | | +| above @ Cyclist | | | 2x | 8.12 | | 57.7 | 54.4 | 55.5 | 52.4 | | +| [SECFPN](./pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py) | 1 | Car | 2x | 7.76 | | 72.1 | 71.5 | 63.6 | 63.1 | [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-car/hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-car.log.json) | +| [SECFPN](./pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py) | 1 | 3 Class | 2x | 8.12 | | 68.8 | 63.3 | 62.6 | 57.6 | [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-3class/hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-3class.log.json) | +| above @ Car | | | 2x | 8.12 | | 71.6 | 71.0 | 63.1 | 62.5 | | +| above @ Pedestrian | | | 2x | 8.12 | | 70.6 | 56.7 | 62.9 | 50.2 | | +| above @ Cyclist | | | 2x | 8.12 | | 64.4 | 62.3 | 61.9 | 59.9 | | + +#### Note: + +- **Metric**: For model trained with 3 classes, the average APH@L2 (mAPH@L2) of all the categories is reported and used to rank the model. For model trained with only 1 class, the APH@L2 is reported and used to rank the model. +- **Data Split**: Here we provide several baselines for waymo dataset, among which D5 means that we divide the dataset into 5 folds and only use one fold for efficient experiments. Using the complete dataset can boost the performance a lot, especially for the detection of cyclist and pedestrian, where more than 5 mAP or mAPH improvement can be expected. +- **Implementation Details**: We basically follow the implementation in the [paper](https://arxiv.org/pdf/1912.04838.pdf) in terms of the network architecture (having a + stride of 1 for the first convolutional block). Different settings of voxelization, data augmentation and hyper parameters make these baselines outperform those in the paper by about 7 mAP for car and 4 mAP for pedestrian with only a subset of the whole dataset. All of these results are achieved without bells-and-whistles, e.g. ensemble, multi-scale training and test augmentation. +- **License Aggrement**: To comply the [license agreement of Waymo dataset](https://waymo.com/open/terms/), the pre-trained models on Waymo dataset are not released. We still release the training log as a reference to ease the future research. +- `FP16` means Mixed Precision (FP16) is adopted in training. With mixed precision training, we can train PointPillars with nuScenes dataset on 8 Titan XP GPUS with batch size of 2. This will cause OOM error without mixed precision training. The loss scale for PointPillars on nuScenes dataset is specifically tuned to avoid the loss to be Nan. We find 32 is more stable than 512, though loss scale 32 still cause Nan sometimes. + +## Citation + +```latex +@inproceedings{lang2019pointpillars, + title={Pointpillars: Fast encoders for object detection from point clouds}, + author={Lang, Alex H and Vora, Sourabh and Caesar, Holger and Zhou, Lubing and Yang, Jiong and Beijbom, Oscar}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={12697--12705}, + year={2019} +} +``` diff --git a/configs/pointpillars/metafile.yml b/configs/pointpillars/metafile.yml new file mode 100755 index 0000000..77f5692 --- /dev/null +++ b/configs/pointpillars/metafile.yml @@ -0,0 +1,215 @@ +Collections: + - Name: PointPillars + Metadata: + Training Techniques: + - AdamW + Architecture: + - Feature Pyramid Network + Paper: + URL: https://arxiv.org/abs/1812.05784 + Title: 'PointPillars: Fast Encoders for Object Detection from Point Clouds' + README: configs/pointpillars/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/voxel_encoders/pillar_encoder.py#L13 + Version: v0.6.0 + +Models: + - Name: pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py + Metadata: + Training Data: KITTI + Training Memory (GB): 5.4 + Training Resources: 8x V100 GPUs + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + AP: 77.6 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth + + - Name: pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class + Alias: pointpillars_kitti-3class + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py + Metadata: + Training Data: KITTI + Training Memory (GB): 5.5 + Training Resources: 8x V100 GPUs + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + AP: 64.07 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20220301_150306-37dc2420.pth + + - Name: pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Data: nuScenes + Training Memory (GB): 16.4 + Training Resources: 8x V100 GPUs + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 34.33 + NDS: 49.1 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d_20210826_225857-f19d00a3.pth + + - Name: pointpillars_hv_secfpn_sbn-all_8xb4-amp-2x_nus-3d + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-amp-2x_nus-3d.py + Metadata: + Training Techniques: + - AdamW + - Mixed Precision Training + Training Resources: 8x TITAN Xp + Architecture: + - Hard Voxelization + Training Data: nuScenes + Training Memory (GB): 8.37 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 35.19 + NDS: 50.27 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d_20201020_222626-c3f0483e.pth + Code: + Version: v0.7.0 + + - Name: pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Data: nuScenes + Training Memory (GB): 16.3 + Training Resources: 8x V100 GPUs + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 39.71 + NDS: 53.15 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20210826_104936-fca299c1.pth + + - Name: pointpillars_hv_fpn_sbn-all_8xb4-amp-2x_nus-3d + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-amp-2x_nus-3d.py + Metadata: + Training Techniques: + - AdamW + - Mixed Precision Training + Training Resources: 8x TITAN Xp + Architecture: + - Hard Voxelization + Training Data: nuScenes + Training Memory (GB): 8.40 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 39.26 + NDS: 53.26 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d_20201021_120719-269f9dd6.pth + Code: + Version: v0.7.0 + + - Name: pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py + Metadata: + Training Data: Lyft + Training Memory (GB): 12.2 + Training Resources: 8x V100 GPUs + Results: + - Task: 3D Object Detection + Dataset: Lyft + Metrics: + Private Score: 13.8 + Public Score: 14.1 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d_20210829_100455-82b81c39.pth + + - Name: pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py + Metadata: + Training Data: Lyft + Training Memory (GB): 9.2 + Training Resources: 8x V100 GPUs + Results: + - Task: 3D Object Detection + Dataset: Lyft + Metrics: + Private Score: 14.0 + Public Score: 15.0 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d_20210822_095429-0b3d6196.pth + + - Name: pointpillars_hv_secfpn_sbn_2x16_2x_waymoD5-3d-car + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_secfpn_sbn_2x16_2x_waymoD5-3d-car.py + Metadata: + Training Data: Waymo + Training Memory (GB): 7.76 + Training Resources: 8x GeForce GTX 1080 Ti + Results: + - Task: 3D Object Detection + Dataset: Waymo + Metrics: + mAP@L1: 70.2 + mAPH@L1: 69.6 + mAP@L2: 62.6 + mAPH@L2: 62.1 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car_20200901_204315-302fc3e7.pth + + - Name: pointpillars_hv_secfpn_sbn_2x16_2x_waymoD5-3d-3class + Alias: pointpillars_waymod5-3class + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py + Metadata: + Training Data: Waymo + Training Memory (GB): 8.12 + Training Resources: 8x GeForce GTX 1080 Ti + Results: + - Task: 3D Object Detection + Dataset: Waymo + Metrics: + mAP@L1: 64.7 + mAPH@L1: 57.6 + mAP@L2: 58.4 + mAPH@L2: 52.1 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class_20200831_204144-d1a706b1.pth + + - Name: pointpillars_hv_secfpn_sbn_2x16_2x_waymo-3d-car + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py + Metadata: + Training Data: Waymo + Training Memory (GB): 7.76 + Training Resources: 8x GeForce GTX 1080 Ti + Results: + - Task: 3D Object Detection + Dataset: Waymo + Metrics: + mAP@L1: 72.1 + mAPH@L1: 71.5 + mAP@L2: 63.6 + mAPH@L2: 63.1 + + - Name: pointpillars_hv_secfpn_sbn_2x16_2x_waymo-3d-3class + In Collection: PointPillars + Config: configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py + Metadata: + Training Data: Waymo + Training Memory (GB): 8.12 + Training Resources: 8x GeForce GTX 1080 Ti + Results: + - Task: 3D Object Detection + Dataset: Waymo + Metrics: + mAP@L1: 68.8 + mAPH@L1: 63.3 + mAP@L2: 62.6 + mAPH@L2: 57.6 diff --git a/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d-range100.py b/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d-range100.py new file mode 100755 index 0000000..d912bf5 --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d-range100.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_range100_lyft.py', + '../_base_/datasets/lyft-3d-range100.py', + '../_base_/schedules/schedule-2x.py', '../_base_/default_runtime.py' +] +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py b/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py new file mode 100755 index 0000000..8491dea --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_lyft.py', + '../_base_/datasets/lyft-3d.py', '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py' +] +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-amp-2x_nus-3d.py b/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-amp-2x_nus-3d.py new file mode 100755 index 0000000..a268c7e --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-amp-2x_nus-3d.py @@ -0,0 +1,4 @@ +_base_ = './pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py' +train_dataloader = dict(batch_size=2, num_workers=2) +# schedule settings +optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=512.) diff --git a/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py b/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py new file mode 100755 index 0000000..6000f88 --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_nus.py', + '../_base_/datasets/nus-3d.py', '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py' +] + +# For nuScenes dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +train_cfg = dict(val_interval=24) diff --git a/configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py b/configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py new file mode 100755 index 0000000..65786ab --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py @@ -0,0 +1,130 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_secfpn_kitti.py', + '../_base_/datasets/kitti-3d-3class.py', + '../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py' +] + +point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] +# dataset settings +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +metainfo = dict(classes=class_names) +backend_args = None + +# PointPillars adopted a different sampling strategies among classes +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=15, Cyclist=15), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + backend_args=backend_args) + +# PointPillars uses different augmentation hyper parameters +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler, use_ground_plane=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_labels_3d', 'gt_bboxes_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + dataset=dict(dataset=dict(pipeline=train_pipeline, metainfo=metainfo))) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo)) +# In practice PointPillars also uses a different schedule +# optimizer +lr = 0.001 +epoch_num = 80 +optim_wrapper = dict( + optimizer=dict(lr=lr), clip_grad=dict(max_norm=35, norm_type=2)) +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=epoch_num * 0.4, + eta_min=lr * 10, + begin=0, + end=epoch_num * 0.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=epoch_num * 0.6, + eta_min=lr * 1e-4, + begin=epoch_num * 0.4, + end=epoch_num * 1, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=epoch_num * 0.4, + eta_min=0.85 / 0.95, + begin=0, + end=epoch_num * 0.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=epoch_num * 0.6, + eta_min=1, + begin=epoch_num * 0.4, + end=epoch_num * 1, + convert_to_iter_based=True) +] +# max_norm=35 is slightly better than 10 for PointPillars in the earlier +# development of the codebase thus we keep the setting. But we does not +# specifically tune this parameter. +# PointPillars usually need longer schedule than second, we simply double +# the training schedule. Do remind that since we use RepeatDataset and +# repeat factor is 2, so we actually train 160 epochs. +train_cfg = dict(by_epoch=True, max_epochs=epoch_num, val_interval=2) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py b/configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py new file mode 100755 index 0000000..0ffd46c --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py @@ -0,0 +1,101 @@ +# model settings +_base_ = './pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py' +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Car'] +metainfo = dict(classes=class_names) +backend_args = None + +point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] + +model = dict( + bbox_head=dict( + type='Anchor3DHead', + num_classes=1, + anchor_generator=dict( + _delete_=True, + type='AlignedAnchor3DRangeGenerator', + ranges=[[0, -39.68, -1.78, 69.12, 39.68, -1.78]], + sizes=[[3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=True)), + # model training and testing settings + train_cfg=dict( + _delete_=True, + assigner=dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + allowed_border=0, + pos_weight=-1, + debug=False)) + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + classes=class_names, + sample_groups=dict(Car=15), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler, use_ground_plane=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_labels_3d', 'gt_bboxes_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +train_dataloader = dict( + dataset=dict(dataset=dict(pipeline=train_pipeline, metainfo=metainfo))) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo)) diff --git a/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py new file mode 100755 index 0000000..61f8fba --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_secfpn_waymo.py', + '../_base_/datasets/waymoD5-3d-3class.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] + +# data settings +train_dataloader = dict(dataset=dict(dataset=dict(load_interval=1))) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (16 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) diff --git a/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py new file mode 100755 index 0000000..38bd95b --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py @@ -0,0 +1,42 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_secfpn_waymo.py', + '../_base_/datasets/waymoD5-3d-car.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] + +# data settings +train_dataloader = dict(dataset=dict(dataset=dict(load_interval=1))) + +# model settings +model = dict( + type='MVXFasterRCNN', + pts_bbox_head=dict( + type='Anchor3DHead', + num_classes=1, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-74.88, -74.88, -0.0345, 74.88, 74.88, -0.0345]], + sizes=[[4.73, 2.08, 1.77]], + rotations=[0, 1.57], + reshape_out=True)), + # model training and testing settings + train_cfg=dict( + _delete_=True, + pts=dict( + assigner=dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + pos_weight=-1, + debug=False))) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (16 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) diff --git a/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py new file mode 100755 index 0000000..f39d9ea --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_secfpn_waymo.py', + '../_base_/datasets/waymoD5-3d-3class.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] diff --git a/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-car.py b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-car.py new file mode 100755 index 0000000..e2e9833 --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-car.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_secfpn_waymo.py', + '../_base_/datasets/waymoD5-3d-car.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='MVXFasterRCNN', + pts_bbox_head=dict( + type='Anchor3DHead', + num_classes=1, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-74.88, -74.88, -0.0345, 74.88, 74.88, -0.0345]], + sizes=[[4.73, 2.08, 1.77]], + rotations=[0, 1.57], + reshape_out=True)), + # model training and testing settings + train_cfg=dict( + _delete_=True, + pts=dict( + assigner=dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + pos_weight=-1, + debug=False))) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (16 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) diff --git a/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d-range100.py b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d-range100.py new file mode 100755 index 0000000..90c2071 --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d-range100.py @@ -0,0 +1,47 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_range100_lyft.py', + '../_base_/datasets/lyft-3d-range100.py', + '../_base_/schedules/schedule-2x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + pts_neck=dict( + _delete_=True, + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + in_channels=384, + feat_channels=384, + anchor_generator=dict( + _delete_=True, + type='AlignedAnchor3DRangeGenerator', + ranges=[[-100, -100, -1.0715024, 100, 100, -1.0715024], + [-100, -100, -0.3033737, 100, 100, -0.3033737], + [-100, -100, -0.3519405, 100, 100, -0.3519405], + [-100, -100, -0.8871424, 100, 100, -0.8871424], + [-100, -100, -0.6276341, 100, 100, -0.6276341], + [-100, -100, -1.3220503, 100, 100, -1.3220503], + [-100, -100, -1.0709302, 100, 100, -1.0709302], + [-100, -100, -0.9122268, 100, 100, -0.9122268], + [-100, -100, -1.8012227, 100, 100, -1.8012227]], + sizes=[ + [4.75, 1.92, 1.71], # car + [10.24, 2.84, 3.44], # truck + [12.70, 2.92, 3.42], # bus + [6.52, 2.42, 2.34], # emergency vehicle + [8.17, 2.75, 3.20], # other vehicle + [2.35, 0.96, 1.59], # motorcycle + [1.76, 0.63, 1.44], # bicycle + [0.80, 0.76, 1.76], # pedestrian + [0.73, 0.35, 0.50] # animal + ], + rotations=[0, 1.57], + reshape_out=True))) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py new file mode 100755 index 0000000..61c5f70 --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py @@ -0,0 +1,48 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_lyft.py', + '../_base_/datasets/lyft-3d.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] +# model settings +model = dict( + pts_neck=dict( + _delete_=True, + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + in_channels=384, + feat_channels=384, + anchor_generator=dict( + _delete_=True, + type='AlignedAnchor3DRangeGenerator', + ranges=[[-80, -80, -1.0715024, 80, 80, -1.0715024], + [-80, -80, -0.3033737, 80, 80, -0.3033737], + [-80, -80, -0.3519405, 80, 80, -0.3519405], + [-80, -80, -0.8871424, 80, 80, -0.8871424], + [-80, -80, -0.6276341, 80, 80, -0.6276341], + [-80, -80, -1.3220503, 80, 80, -1.3220503], + [-80, -80, -1.0709302, 80, 80, -1.0709302], + [-80, -80, -0.9122268, 80, 80, -0.9122268], + [-80, -80, -1.8012227, 80, 80, -1.8012227]], + sizes=[ + [4.75, 1.92, 1.71], # car + [10.24, 2.84, 3.44], # truck + [12.70, 2.92, 3.42], # bus + [6.52, 2.42, 2.34], # emergency vehicle + [8.17, 2.75, 3.20], # other vehicle + [2.35, 0.96, 1.59], # motorcycle + [1.76, 0.63, 1.44], # bicycle + [0.80, 0.76, 1.76], # pedestrian + [0.73, 0.35, 0.50] # animal + ], + rotations=[0, 1.57], + reshape_out=True))) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-amp-2x_nus-3d.py b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-amp-2x_nus-3d.py new file mode 100755 index 0000000..bfa5fc1 --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-amp-2x_nus-3d.py @@ -0,0 +1,4 @@ +_base_ = './pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py' +train_dataloader = dict(batch_size=2, num_workers=2) +# schedule settings +optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=512.) diff --git a/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py new file mode 100755 index 0000000..51b5ae2 --- /dev/null +++ b/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py @@ -0,0 +1,48 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_nus.py', + '../_base_/datasets/nus-3d.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] +# model settings +model = dict( + pts_neck=dict( + _delete_=True, + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + in_channels=384, + feat_channels=384, + anchor_generator=dict( + _delete_=True, + type='AlignedAnchor3DRangeGenerator', + ranges=[ + [-49.6, -49.6, -1.80032795, 49.6, 49.6, -1.80032795], + [-49.6, -49.6, -1.74440365, 49.6, 49.6, -1.74440365], + [-49.6, -49.6, -1.68526504, 49.6, 49.6, -1.68526504], + [-49.6, -49.6, -1.67339111, 49.6, 49.6, -1.67339111], + [-49.6, -49.6, -1.61785072, 49.6, 49.6, -1.61785072], + [-49.6, -49.6, -1.80984986, 49.6, 49.6, -1.80984986], + [-49.6, -49.6, -1.763965, 49.6, 49.6, -1.763965], + ], + sizes=[ + [4.60718145, 1.95017717, 1.72270761], # car + [6.73778078, 2.4560939, 2.73004906], # truck + [12.01320693, 2.87427237, 3.81509561], # trailer + [1.68452161, 0.60058911, 1.27192197], # bicycle + [0.7256437, 0.66344886, 1.75748069], # pedestrian + [0.40359262, 0.39694519, 1.06232151], # traffic_cone + [0.48578221, 2.49008838, 0.98297065], # barrier + ], + custom_values=[0, 0], + rotations=[0, 1.57], + reshape_out=True))) + +# For nuScenes dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +train_cfg = dict(val_interval=24) diff --git a/configs/pv_rcnn/README.md b/configs/pv_rcnn/README.md new file mode 100755 index 0000000..5af3190 --- /dev/null +++ b/configs/pv_rcnn/README.md @@ -0,0 +1,42 @@ +# PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection + +> [PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection](https://arxiv.org/abs/1912.13192) + + + +## Abstract + +3D object detection has been receiving increasing attention from both industry and academia thanks to its wide applications in various fields such as autonomous driving and robotics. LiDAR sensors are widely adopted in autonomous driving vehicles and robots for capturing 3D scene information as sparse and irregular point clouds, which provide vital cues for 3D scene perception and understanding. In this paper, we propose to achieve high performance 3D object detection by designing novel point-voxel integrated networks to learn better 3D features from irregular point clouds. + +
    + +
    + +## Results and models + +### KITTI + +| Backbone | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | +| :---------------------------------------------: | :-----: | :--------: | :------: | :------------: | :---: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](./pv_rcnn_8xb2-80e_kitti-3d-3class.py) | 3 Class | cyclic 80e | 5.4 | | 72.28 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class/pv_rcnn_8xb2-80e_kitti-3d-3class_20221117_234428-b384d22f.pth) \\ [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class/pv_rcnn_8xb2-80e_kitti-3d-3class_20221117_234428.json) | + +Note: mAP represents AP11 results on 3 Class under the moderate setting. + +Detailed performance on KITTI 3D detection (3D) is as follows, evaluated by AP11 metric: + +| | Easy | Moderate | Hard | +| ---------- | :---: | :------: | :---: | +| Car | 89.20 | 83.72 | 78.79 | +| Pedestrian | 66.64 | 59.84 | 55.33 | +| Cyclist | 87.25 | 73.27 | 69.61 | + +## Citation + +```latex +@article{ShaoshuaiShi2020PVRCNNPF, + title={PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection}, + author={Shaoshuai Shi and Chaoxu Guo and Li Jiang and Zhe Wang and Jianping Shi and Xiaogang Wang and Hongsheng Li}, + journal={computer vision and pattern recognition}, + year={2020} +} +``` diff --git a/configs/pv_rcnn/metafile.yml b/configs/pv_rcnn/metafile.yml new file mode 100755 index 0000000..ddef749 --- /dev/null +++ b/configs/pv_rcnn/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: PV-RCNN + Metadata: + Training Data: KITTI + Training Techniques: + - AdamW + Training Resources: 8x A100 GPUs + Architecture: + - Feature Pyramid Network + Paper: + URL: https://arxiv.org/abs/1912.13192 + Title: 'PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection' + README: configs/pv_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/models/detectors/pv_rcnn.py#L12 + Version: v1.1.0rc2 + +Models: + - Name: pv_rcnn_8xb2-80e_kitti-3d-3class + In Collection: PV-RCNN + Config: configs/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py + Metadata: + Training Memory (GB): 5.4 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 72.28 + Weights: https://download.openmmlab.com/mmdetection3d/v1.1.0_models/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class/pv_rcnn_8xb2-80e_kitti-3d-3class_20221117_234428-b384d22f.pth diff --git a/configs/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py b/configs/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py new file mode 100755 index 0000000..f894c9a --- /dev/null +++ b/configs/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py @@ -0,0 +1,369 @@ +_base_ = [ + '../_base_/datasets/kitti-3d-3class.py', + '../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py' +] + +voxel_size = [0.05, 0.05, 0.1] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] + +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +metainfo = dict(CLASSES=class_names) +backend_args = None +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler, use_ground_plane=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +model = dict( + type='PointVoxelRCNN', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=5, # max_points_per_voxel + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(16000, 40000))), + voxel_encoder=dict(type='HardSimpleVFE'), + middle_encoder=dict( + type='SparseEncoder', + in_channels=4, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act'), + encoder_paddings=((0, 0, 0), ((1, 1, 1), 0, 0), ((1, 1, 1), 0, 0), + ((0, 1, 1), 0, 0)), + return_middle_feats=True), + points_encoder=dict( + type='VoxelSetAbstraction', + num_keypoints=2048, + fused_out_channel=128, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + voxel_sa_cfgs_list=[ + dict( + type='StackedSAModuleMSG', + in_channels=16, + scale_factor=1, + radius=(0.4, 0.8), + sample_nums=(16, 16), + mlp_channels=((16, 16), (16, 16)), + use_xyz=True), + dict( + type='StackedSAModuleMSG', + in_channels=32, + scale_factor=2, + radius=(0.8, 1.2), + sample_nums=(16, 32), + mlp_channels=((32, 32), (32, 32)), + use_xyz=True), + dict( + type='StackedSAModuleMSG', + in_channels=64, + scale_factor=4, + radius=(1.2, 2.4), + sample_nums=(16, 32), + mlp_channels=((64, 64), (64, 64)), + use_xyz=True), + dict( + type='StackedSAModuleMSG', + in_channels=64, + scale_factor=8, + radius=(2.4, 4.8), + sample_nums=(16, 32), + mlp_channels=((64, 64), (64, 64)), + use_xyz=True) + ], + rawpoints_sa_cfgs=dict( + type='StackedSAModuleMSG', + in_channels=1, + radius=(0.4, 0.8), + sample_nums=(16, 16), + mlp_channels=((16, 16), (16, 16)), + use_xyz=True), + bev_feat_channel=256, + bev_scale_factor=8), + backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + rpn_head=dict( + type='PartA2RPNHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + dir_offset=0.78539, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[[0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78]], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + assigner_per_size=True, + assign_per_class=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + roi_head=dict( + type='PVRCNNRoiHead', + num_classes=3, + semantic_head=dict( + type='ForegroundSegmentationHead', + in_channels=640, + extra_width=0.1, + loss_seg=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + reduction='sum', + gamma=2.0, + alpha=0.25, + activated=True, + loss_weight=1.0)), + bbox_roi_extractor=dict( + type='Batch3DRoIGridExtractor', + grid_size=6, + roi_layer=dict( + type='StackedSAModuleMSG', + in_channels=128, + radius=(0.8, 1.6), + sample_nums=(16, 16), + mlp_channels=((64, 64), (64, 64)), + use_xyz=True, + pool_mod='max'), + ), + bbox_head=dict( + type='PVRCNNBBoxHead', + in_channels=128, + grid_size=6, + num_classes=3, + class_agnostic=True, + shared_fc_channels=(256, 256), + reg_channels=(256, 256), + cls_channels=(256, 256), + dropout_ratio=0.3, + with_corner_loss=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=1.0), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1) + ], + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=9000, + nms_post=512, + max_num=512, + nms_thr=0.8, + score_thr=0, + use_rotate_nms=True), + rcnn=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1) + ], + sampler=dict( + type='IoUNegPiecewiseSampler', + num=128, + pos_fraction=0.5, + neg_piece_fractions=[0.8, 0.2], + neg_iou_piece_thrs=[0.55, 0.1], + neg_pos_ub=-1, + add_gt_as_proposals=False, + return_iou=True), + cls_pos_thr=0.75, + cls_neg_thr=0.25)), + test_cfg=dict( + rpn=dict( + nms_pre=1024, + nms_post=100, + max_num=100, + nms_thr=0.7, + score_thr=0, + use_rotate_nms=True), + rcnn=dict( + use_rotate_nms=True, + use_raw_score=True, + nms_thr=0.1, + score_thr=0.1))) +train_dataloader = dict( + batch_size=2, + num_workers=2, + dataset=dict(dataset=dict(pipeline=train_pipeline, metainfo=metainfo))) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo)) +eval_dataloader = dict(dataset=dict(pipeline=test_pipeline, metainfo=metainfo)) +lr = 0.001 +optim_wrapper = dict(optimizer=dict(lr=lr)) +param_scheduler = [ + # learning rate scheduler + # During the first 16 epochs, learning rate increases from 0 to lr * 10 + # during the next 24 epochs, learning rate decreases from lr * 10 to + # lr * 1e-4 + dict( + type='CosineAnnealingLR', + T_max=15, + eta_min=lr * 10, + begin=0, + end=15, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=25, + eta_min=lr * 1e-4, + begin=15, + end=40, + by_epoch=True, + convert_to_iter_based=True), + # momentum scheduler + # During the first 16 epochs, momentum increases from 0 to 0.85 / 0.95 + # during the next 24 epochs, momentum increases from 0.85 / 0.95 to 1 + dict( + type='CosineAnnealingMomentum', + T_max=15, + eta_min=0.85 / 0.95, + begin=0, + end=15, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=25, + eta_min=1, + begin=15, + end=40, + by_epoch=True, + convert_to_iter_based=True) +] diff --git a/configs/regnet/README.md b/configs/regnet/README.md new file mode 100755 index 0000000..4c992ed --- /dev/null +++ b/configs/regnet/README.md @@ -0,0 +1,82 @@ +# Designing Network Design Spaces + +> [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678) + + + +## Abstract + +In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs. + +
    + +
    + +## Introduction + +We implement RegNetX models in 3D detection systems and provide their first results with PointPillars on nuScenes and Lyft dataset. + +The pre-trained modles are converted from [model zoo of pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md) and maintained in [mmcv](https://github.com/open-mmlab/mmcv). + +## Usage + +To use a regnet model, there are two steps to do: + +1. Convert the model to ResNet-style supported by MMDetection +2. Modify backbone and neck in config accordingly + +### Convert model + +We already prepare models of FLOPs from 800M to 12G in our model zoo. + +For more general usage, we also provide script `regnet2mmdet.py` in the tools directory to convert the key of models pretrained by [pycls](https://github.com/facebookresearch/pycls/) to +ResNet-style checkpoints used in MMDetection. + +```bash +python -u tools/model_converters/regnet2mmdet.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +### Modify config + +The users can modify the config's `depth` of backbone and corresponding keys in `arch` according to the configs in the [pycls model zoo](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). +The parameter `in_channels` in FPN can be found in the Figure 15 & 16 of the paper (`wi` in the legend). +This directory already provides some configs with their performance, using RegNetX from 800MF to 12GF level. +For other pre-trained models or self-implemented regnet models, the users are responsible to check these parameters by themselves. + +**Note**: Although Fig. 15 & 16 also provide `w0`, `wa`, `wm`, `group_w`, and `bot_mul` for `arch`, they are quantized thus inaccurate, using them sometimes produces different backbone that does not match the key in the pre-trained model. + +## Results and models + +### nuScenes + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP | NDS | Download | +| :-------------------------------------------------------------------------------------: | :-----: | :------: | :------------: | :---: | :--: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](../pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 16.4 | | 35.17 | 49.7 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d_20200620_230725-0817d270.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d_20200620_230725.log.json) | +| [RegNetX-400MF-SECFPN](./pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 16.4 | | 41.2 | 55.2 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d_20200620_230334-53044f32.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d_20200620_230334.log.json) | +| [FPN](../pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 17.1 | | 40.0 | 53.3 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20200620_230405-2fa62f3d.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20200620_230405.log.json) | +| [RegNetX-400MF-FPN](./pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 17.3 | | 44.8 | 56.4 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d_20200620_230239-c694dce7.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d_20200620_230239.log.json) | +| [RegNetX-1.6gF-FPN](./pointpillars_hv_regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 24.0 | | 48.2 | 59.3 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d_20200629_050311-dcd4e090.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d_20200629_050311.log.json) | + +### Lyft + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | Private Score | Public Score | Download | +| :-------------------------------------------------------------------------------------: | :-----: | :------: | :------------: | :-----------: | :----------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](../pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py) | 2x | 12.2 | | 13.9 | 14.1 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d_20210517_204807-2518e3de.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d_20210517_204807.log.json) | +| [RegNetX-400MF-SECFPN](./hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_lyft-3d.py) | 2x | 15.9 | | 14.9 | 15.1 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_regnet-400mf_secfpn_sbn-all_2x8_2x_lyft-3d_20210524_092151-42513826.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_regnet-400mf_secfpn_sbn-all_2x8_2x_lyft-3d_20210524_092151.log.json) | +| [FPN](../pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py) | 2x | 9.2 | | 14.9 | 15.1 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d_20210517_202818-fc6904c3.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d_20210517_202818.log.json) | +| [RegNetX-400MF-FPN](./hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_lyft-3d.py) | 2x | 13.0 | | 16.0 | 16.1 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d_20210521_115618-823dcf18.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d_20210521_115618.log.json) | + +## Citation + +```latex +@article{radosavovic2020designing, + title={Designing Network Design Spaces}, + author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, + year={2020}, + eprint={2003.13678}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/regnet/metafile.yml b/configs/regnet/metafile.yml new file mode 100755 index 0000000..c2caa29 --- /dev/null +++ b/configs/regnet/metafile.yml @@ -0,0 +1,85 @@ +Models: + - Name: pointpillars_hv_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d + In Collection: PointPillars + Config: configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Data: nuScenes + Training Memory (GB): 16.4 + Architecture: + - RegNetX + - Hard Voxelization + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 41.2 + NDS: 55.2 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d_20200620_230334-53044f32.pth + + - Name: pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d + In Collection: PointPillars + Config: configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Data: nuScenes + Training Memory (GB): 17.3 + Architecture: + - RegNetX + - Hard Voxelization + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 44.8 + NDS: 56.4 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d_20200620_230239-c694dce7.pth + + - Name: pointpillars_hv_regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d + In Collection: PointPillars + Config: configs/regnet/pointpillars_hv_regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d.py + Metadata: + Training Data: nuScenes + Training Memory (GB): 24.0 + Architecture: + - RegNetX + - Hard Voxelization + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 48.2 + NDS: 59.3 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d_20200629_050311-dcd4e090.pth + + - Name: pointpillars_hv_regnet-400mf_secfpn_sbn-all_2x8_2x_lyft-3d + In Collection: PointPillars + Config: configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_2x8_2x_lyft-3d.py + Metadata: + Training Data: Lyft + Training Memory (GB): 15.9 + Architecture: + - RegNetX + - Hard Voxelization + Results: + - Task: 3D Object Detection + Dataset: Lyft + Metrics: + Private Score: 14.9 + Public Score: 15.1 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_regnet-400mf_secfpn_sbn-all_2x8_2x_lyft-3d_20210524_092151-42513826.pth + + - Name: pointpillars_hv_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d + In Collection: PointPillars + Config: configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d.py + Metadata: + Training Data: Lyft + Training Memory (GB): 13.0 + Architecture: + - RegNetX + - Hard Voxelization + Results: + - Task: 3D Object Detection + Dataset: Lyft + Metrics: + Private Score: 16.0 + Public Score: 16.1 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d_20210521_115618-823dcf18.pth diff --git a/configs/regnet/pointpillars_hv_regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d.py b/configs/regnet/pointpillars_hv_regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d.py new file mode 100755 index 0000000..97fe8a3 --- /dev/null +++ b/configs/regnet/pointpillars_hv_regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_nus.py', + '../_base_/datasets/nus-3d.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] +# model settings +model = dict( + type='MVXFasterRCNN', + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch='regnetx_1.6gf', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[168, 408, 912])) diff --git a/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb2-2x_lyft-3d.py b/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb2-2x_lyft-3d.py new file mode 100755 index 0000000..90df3f5 --- /dev/null +++ b/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb2-2x_lyft-3d.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_lyft.py', + '../_base_/datasets/lyft-3d.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] +# model settings +model = dict( + type='MVXFasterRCNN', + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[64, 160, 384])) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py b/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py new file mode 100755 index 0000000..e2118b5 --- /dev/null +++ b/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_nus.py', + '../_base_/datasets/nus-3d.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] +# model settings +model = dict( + type='MVXFasterRCNN', + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[64, 160, 384])) diff --git a/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_range100_8xb2-2x_lyft-3d.py b/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_range100_8xb2-2x_lyft-3d.py new file mode 100755 index 0000000..e97e24e --- /dev/null +++ b/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_range100_8xb2-2x_lyft-3d.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_range100_lyft.py', + '../_base_/datasets/lyft-3d-range100.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] +# model settings +model = dict( + type='MVXFasterRCNN', + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[64, 160, 384])) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb2-2x_lyft-3d.py b/configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb2-2x_lyft-3d.py new file mode 100755 index 0000000..fb330d7 --- /dev/null +++ b/configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb2-2x_lyft-3d.py @@ -0,0 +1,39 @@ +_base_ = './hv_pointpillars_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d.py' +# model settings +model = dict( + pts_neck=dict( + type='SECONDFPN', + _delete_=True, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 160, 384], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + type='Anchor3DHead', + in_channels=384, + feat_channels=384, + anchor_generator=dict( + _delete_=True, + type='AlignedAnchor3DRangeGenerator', + ranges=[[-80, -80, -1.0715024, 80, 80, -1.0715024], + [-80, -80, -0.3033737, 80, 80, -0.3033737], + [-80, -80, -0.3519405, 80, 80, -0.3519405], + [-80, -80, -0.8871424, 80, 80, -0.8871424], + [-80, -80, -0.6276341, 80, 80, -0.6276341], + [-80, -80, -1.3220503, 80, 80, -1.3220503], + [-80, -80, -1.0709302, 80, 80, -1.0709302], + [-80, -80, -0.9122268, 80, 80, -0.9122268], + [-80, -80, -1.8012227, 80, 80, -1.8012227]], + sizes=[ + [4.75, 1.92, 1.71], # car + [10.24, 2.84, 3.44], # truck + [12.70, 2.92, 3.42], # bus + [6.52, 2.42, 2.34], # emergency vehicle + [8.17, 2.75, 3.20], # other vehicle + [2.35, 0.96, 1.59], # motorcycle + [1.76, 0.63, 1.44], # bicycle + [0.80, 0.76, 1.76], # pedestrian + [0.73, 0.35, 0.50] # animal + ], + rotations=[0, 1.57], + reshape_out=True))) diff --git a/configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb4-2x_nus-3d.py b/configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb4-2x_nus-3d.py new file mode 100755 index 0000000..ef8996a --- /dev/null +++ b/configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb4-2x_nus-3d.py @@ -0,0 +1,38 @@ +_base_ = './hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d.py' +# model settings +model = dict( + pts_neck=dict( + type='SECONDFPN', + _delete_=True, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 160, 384], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + type='Anchor3DHead', + in_channels=384, + feat_channels=384, + anchor_generator=dict( + _delete_=True, + type='AlignedAnchor3DRangeGenerator', + ranges=[ + [-49.6, -49.6, -1.80032795, 49.6, 49.6, -1.80032795], + [-49.6, -49.6, -1.74440365, 49.6, 49.6, -1.74440365], + [-49.6, -49.6, -1.68526504, 49.6, 49.6, -1.68526504], + [-49.6, -49.6, -1.67339111, 49.6, 49.6, -1.67339111], + [-49.6, -49.6, -1.61785072, 49.6, 49.6, -1.61785072], + [-49.6, -49.6, -1.80984986, 49.6, 49.6, -1.80984986], + [-49.6, -49.6, -1.763965, 49.6, 49.6, -1.763965], + ], + sizes=[ + [4.60718145, 1.95017717, 1.72270761], # car + [6.73778078, 2.4560939, 2.73004906], # truck + [12.01320693, 2.87427237, 3.81509561], # trailer + [1.68452161, 0.60058911, 1.27192197], # bicycle + [0.7256437, 0.66344886, 1.75748069], # pedestrian + [0.40359262, 0.39694519, 1.06232151], # traffic_cone + [0.48578221, 2.49008838, 0.98297065], # barrier + ], + custom_values=[0, 0], + rotations=[0, 1.57], + reshape_out=True))) diff --git a/configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_range100_8xb2-2x_lyft-3d.py b/configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_range100_8xb2-2x_lyft-3d.py new file mode 100755 index 0000000..2af3719 --- /dev/null +++ b/configs/regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_range100_8xb2-2x_lyft-3d.py @@ -0,0 +1,40 @@ +_base_ = \ + './hv_pointpillars_regnet-400mf_fpn_sbn-all_range100_2x8_2x_lyft-3d.py' +# model settings +model = dict( + pts_neck=dict( + type='SECONDFPN', + _delete_=True, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 160, 384], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + type='Anchor3DHead', + in_channels=384, + feat_channels=384, + anchor_generator=dict( + _delete_=True, + type='AlignedAnchor3DRangeGenerator', + ranges=[[-100, -100, -1.0715024, 100, 100, -1.0715024], + [-100, -100, -0.3033737, 100, 100, -0.3033737], + [-100, -100, -0.3519405, 100, 100, -0.3519405], + [-100, -100, -0.8871424, 100, 100, -0.8871424], + [-100, -100, -0.6276341, 100, 100, -0.6276341], + [-100, -100, -1.3220503, 100, 100, -1.3220503], + [-100, -100, -1.0709302, 100, 100, -1.0709302], + [-100, -100, -0.9122268, 100, 100, -0.9122268], + [-100, -100, -1.8012227, 100, 100, -1.8012227]], + sizes=[ + [4.75, 1.92, 1.71], # car + [10.24, 2.84, 3.44], # truck + [12.70, 2.92, 3.42], # bus + [6.52, 2.42, 2.34], # emergency vehicle + [8.17, 2.75, 3.20], # other vehicle + [2.35, 0.96, 1.59], # motorcycle + [1.76, 0.63, 1.44], # bicycle + [0.80, 0.76, 1.76], # pedestrian + [0.73, 0.35, 0.50] # animal + ], + rotations=[0, 1.57], + reshape_out=True))) diff --git a/configs/sassd/README.md b/configs/sassd/README.md new file mode 100755 index 0000000..d1eb771 --- /dev/null +++ b/configs/sassd/README.md @@ -0,0 +1,28 @@ +# Structure Aware Single-stage 3D Object Detection from Point Cloud + +> [Structure Aware Single-stage 3D Object Detection from Point Cloud](<%5Bhttps://arxiv.org/abs/2104.02323%5D(https://openaccess.thecvf.com/content_CVPR_2020/papers/He_Structure_Aware_Single-Stage_3D_Object_Detection_From_Point_Cloud_CVPR_2020_paper.pdf)>) + + + +## Abstract + +3D object detection from point cloud data plays an essential role in autonomous driving. Current single-stage detectors are efficient by progressively downscaling the 3D point clouds in a fully convolutional manner. However, the downscaled features inevitably lose spatial information and cannot make full use of the structure information of 3D point cloud, degrading their localization precision. In this work, we propose to improve the localization precision of single-stage detectors by explicitly leveraging the structure information of 3D point cloud. Specifically, we design an auxiliary network which converts the convolutional features in the backbone network back to point-level representations. The auxiliary network is jointly optimized, by two point-level supervisions, to guide the convolutional features in the backbone network to be aware of the object structure. The auxiliary network can be detached after training and therefore introduces no extra computation in the inference stage. Besides, considering that single-stage detectors suffer from the discordance between the predicted bounding boxes and corresponding classification confidences, we develop an efficient part-sensitive warping operation to align the confidences to the predicted bounding boxes. Our proposed detector ranks at the top of KITTI 3D/BEV detection leaderboards and runs at 25 FPS for inference. + +
    + +
    + +## Introduction + +We implement SA-SSD and provide the results and checkpoints on KITTI dataset. + +## Citation + +```latex +@InProceedings{he2020sassd, + title={Structure Aware Single-stage 3D Object Detection from Point Cloud}, + author={He, Chenhang and Zeng, Hui and Huang, Jianqiang and Hua, Xian-Sheng and Zhang, Lei}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + year={2020} +} +``` diff --git a/configs/sassd/sassd_8xb6-80e_kitti-3d-3class.py b/configs/sassd/sassd_8xb6-80e_kitti-3d-3class.py new file mode 100755 index 0000000..b145f49 --- /dev/null +++ b/configs/sassd/sassd_8xb6-80e_kitti-3d-3class.py @@ -0,0 +1,99 @@ +_base_ = [ + '../_base_/datasets/kitti-3d-3class.py', + '../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py' +] + +voxel_size = [0.05, 0.05, 0.1] + +model = dict( + type='SASSD', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=5, + point_cloud_range=[0, -40, -3, 70.4, 40, 1], + voxel_size=voxel_size, + max_voxels=(16000, 40000))), + voxel_encoder=dict(type='HardSimpleVFE'), + middle_encoder=dict( + type='SparseEncoderSASSD', + in_channels=4, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) diff --git a/configs/second/README.md b/configs/second/README.md new file mode 100755 index 0000000..182a16e --- /dev/null +++ b/configs/second/README.md @@ -0,0 +1,54 @@ +# Second: Sparsely embedded convolutional detection + +> [SECOND: Sparsely Embedded Convolutional Detection](https://www.mdpi.com/1424-8220/18/10/3337) + + + +## Abstract + +LiDAR-based or RGB-D-based object detection is used in numerous applications, ranging from autonomous driving to robot vision. Voxel-based 3D convolutional networks have been used for some time to enhance the retention of information when processing point cloud LiDAR data. However, problems remain, including a slow inference speed and low orientation estimation performance. We therefore investigate an improved sparse convolution method for such networks, which significantly increases the speed of both training and inference. We also introduce a new form of angle loss regression to improve the orientation estimation performance and a new data augmentation approach that can enhance the convergence speed and performance. The proposed network produces state-of-the-art results on the KITTI 3D object detection benchmarks while maintaining a fast inference speed. + +
    + +
    + +## Introduction + +We implement SECOND and provide the results and checkpoints on KITTI dataset. + +## Results and models + +### KITTI + +| Backbone | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | +| :-----------------------------------------------------------------: | :-----: | :--------: | :------: | :------------: | :---: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](./second_hv_secfpn_8xb6-80e_kitti-3d-car.py) | Car | cyclic 80e | 5.4 | | 79.07 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/second/hv_second_secfpn_6x8_80e_kitti-3d-car/hv_second_secfpn_6x8_80e_kitti-3d-car_20200620_230238-393f000c.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/second/hv_second_secfpn_6x8_80e_kitti-3d-car/hv_second_secfpn_6x8_80e_kitti-3d-car_20200620_230238.log.json) | +| [SECFPN (FP16)](./hv_second_secfpn_fp16_6x8_80e_kitti-3d-car.py) | Car | cyclic 80e | 2.9 | | 78.72 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_second_secfpn_fp16_6x8_80e_kitti-3d-car/hv_second_secfpn_fp16_6x8_80e_kitti-3d-car_20200924_211301-1f5ad833.pth)\| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_second_secfpn_fp16_6x8_80e_kitti-3d-car/hv_second_secfpn_fp16_6x8_80e_kitti-3d-car_20200924_211301.log.json) | +| [SECFPN](./second_hv_secfpn_8xb6-80e_kitti-3d-3class.py) | 3 Class | cyclic 80e | 5.4 | | 65.74 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/second/hv_second_secfpn_6x8_80e_kitti-3d-3class/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/second/hv_second_secfpn_6x8_80e_kitti-3d-3class/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017log.json) | +| [SECFPN (FP16)](./hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py) | 3 Class | cyclic 80e | 2.9 | | 67.4 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class_20200925_110059-05f67bdf.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class_20200925_110059.log.json) | + +### Waymo + +| Backbone | Load Interval | Class | Lr schd | Mem (GB) | Inf time (fps) | mAP@L1 | mAPH@L1 | mAP@L2 | **mAPH@L2** | Download | +| :----------------------------------------------------------------: | :-----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----: | :---------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](./second_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py) | 5 | 3 Class | 2x | 8.12 | | 65.3 | 61.7 | 58.9 | 55.7 | [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/second/hv_second_secfpn_sbn_4x8_2x_waymoD5-3d-3class/hv_second_secfpn_sbn_4x8_2x_waymoD5-3d-3class_20201115_112448.log.json) | +| above @ Car | | | 2x | 8.12 | | 67.1 | 66.6 | 58.7 | 58.2 | | +| above @ Pedestrian | | | 2x | 8.12 | | 68.1 | 59.1 | 59.5 | 51.5 | | +| above @ Cyclist | | | 2x | 8.12 | | 60.7 | 59.5 | 58.4 | 57.3 | | + +Note: + +- See more details about metrics and data split on Waymo [HERE](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/pointpillars). For implementation details, we basically follow the original settings. All of these results are achieved without bells-and-whistles, e.g. ensemble, multi-scale training and test augmentation. +- `FP16` means Mixed Precision (FP16) is adopted in training. + +## Citation + +```latex +@article{yan2018second, + title={Second: Sparsely embedded convolutional detection}, + author={Yan, Yan and Mao, Yuxing and Li, Bo}, + journal={Sensors}, + year={2018}, + publisher={Multidisciplinary Digital Publishing Institute} +} +``` diff --git a/configs/second/metafile.yml b/configs/second/metafile.yml new file mode 100755 index 0000000..f26f7aa --- /dev/null +++ b/configs/second/metafile.yml @@ -0,0 +1,97 @@ +Collections: + - Name: SECOND + Metadata: + Training Techniques: + - AdamW + Architecture: + - Hard Voxelization + Paper: + URL: https://www.mdpi.com/1424-8220/18/10/3337 + Title: 'SECOND: Sparsely Embedded Convolutional Detection' + README: configs/second/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/backbones/second.py#L11 + Version: v0.5.0 + +Models: + - Name: second_hv_secfpn_8xb6-80e_kitti-3d-car + In Collection: SECOND + Config: configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-car.py + Metadata: + Training Data: KITTI + Training Memory (GB): 5.4 + Training Resources: 8x V100 GPUs + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 79.07 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/second/hv_second_secfpn_6x8_80e_kitti-3d-car/hv_second_secfpn_6x8_80e_kitti-3d-car_20200620_230238-393f000c.pth + + - Name: second_hv_secfpn_8xb6-80e_kitti-3d-3class + In Collection: SECOND + Config: configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py + Metadata: + Training Data: KITTI + Training Memory (GB): 5.4 + Training Resources: 8x V100 GPUs + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 65.74 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/second/hv_second_secfpn_6x8_80e_kitti-3d-3class/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth + + - Name: second_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class + In Collection: SECOND + Config: configs/second/second_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py + Metadata: + Training Data: Waymo + Training Memory (GB): 8.12 + Training Resources: 8x GeForce GTX 1080 Ti + Results: + - Task: 3D Object Detection + Dataset: Waymo + Metrics: + mAP@L1: 65.3 + mAPH@L1: 61.7 + mAP@L2: 58.9 + mAPH@L2: 55.7 + + - Name: second_hv_secfpn_8xb6-amp-80e_kitti-3d-car + In Collection: SECOND + Config: configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-car.py + Metadata: + Training Techniques: + - AdamW + - Mixed Precision Training + Training Resources: 8x TITAN Xp + Training Data: KITTI + Training Memory (GB): 2.9 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 78.72 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_second_secfpn_fp16_6x8_80e_kitti-3d-car/hv_second_secfpn_fp16_6x8_80e_kitti-3d-car_20200924_211301-1f5ad833.pth + Code: + Version: v0.7.0 + + - Name: second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class + In Collection: SECOND + Config: configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py + Metadata: + Training Techniques: + - AdamW + - Mixed Precision Training + Training Resources: 8x TITAN Xp + Training Data: KITTI + Training Memory (GB): 2.9 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 67.4 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fp16/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class_20200925_110059-05f67bdf.pth + Code: + Version: v0.7.0 diff --git a/configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py b/configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py new file mode 100755 index 0000000..ecd04ee --- /dev/null +++ b/configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/second_hv_secfpn_kitti.py', + '../_base_/datasets/kitti-3d-3class.py', + '../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py' +] diff --git a/configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-car.py b/configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-car.py new file mode 100755 index 0000000..a2c4083 --- /dev/null +++ b/configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-car.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/second_hv_secfpn_kitti.py', + '../_base_/datasets/kitti-3d-car.py', '../_base_/schedules/cyclic-40e.py', + '../_base_/default_runtime.py' +] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] +model = dict( + bbox_head=dict( + type='Anchor3DHead', + num_classes=1, + anchor_generator=dict( + _delete_=True, + type='Anchor3DRangeGenerator', + ranges=[[0, -40.0, -1.78, 70.4, 40.0, -1.78]], + sizes=[[3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=True)), + # model training and testing settings + train_cfg=dict( + _delete_=True, + assigner=dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + allowed_border=0, + pos_weight=-1, + debug=False)) diff --git a/configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py b/configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py new file mode 100755 index 0000000..dca1743 --- /dev/null +++ b/configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py @@ -0,0 +1,4 @@ +_base_ = 'second_hv_secfpn_8xb6-80e_kitti-3d-3class.py' + +# schedule settings +optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=512.) diff --git a/configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-car.py b/configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-car.py new file mode 100755 index 0000000..3e719dd --- /dev/null +++ b/configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-car.py @@ -0,0 +1,4 @@ +_base_ = 'second_hv_secfpn_8xb6-80e_kitti-3d-car.py' + +# schedule settings +optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=512.) diff --git a/configs/second/second_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py b/configs/second/second_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py new file mode 100755 index 0000000..3f31302 --- /dev/null +++ b/configs/second/second_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py @@ -0,0 +1,145 @@ +_base_ = [ + '../_base_/models/second_hv_secfpn_waymo.py', + '../_base_/datasets/waymoD5-3d-3class.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] + +dataset_type = 'WaymoDataset' +data_root = 'data/waymo/kitti_format/' +class_names = ['Car', 'Pedestrian', 'Cyclist'] +metainfo = dict(classes=class_names) + +point_cloud_range = [-76.8, -51.2, -2, 76.8, 51.2, 4] +input_modality = dict(use_lidar=True, use_camera=False) +backend_args = None + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=[0, 1, 2, 3, 4], + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + # dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] + +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='Pack3DDetInputs', keys=['points']), + ]) +] + +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='waymo_infos_train.pkl', + data_prefix=dict(pts='training/velodyne'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + # load one frame every five frames + load_interval=5, + backend_args=backend_args))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne'), + ann_file='waymo_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne'), + ann_file='waymo_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (16 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) diff --git a/configs/smoke/README.md b/configs/smoke/README.md new file mode 100755 index 0000000..b03c940 --- /dev/null +++ b/configs/smoke/README.md @@ -0,0 +1,47 @@ +# SMOKE: Single-Stage Monocular 3D Object Detection via Keypoint Estimation + +> [SMOKE: Single-Stage Monocular 3D Object Detection via Keypoint Estimation](https://arxiv.org/abs/2002.10111) + + + +## Abstract + +Estimating 3D orientation and translation of objects is essential for infrastructure-less autonomous navigation and driving. In case of monocular vision, successful methods have been mainly based on two ingredients: (i) a network generating 2D region proposals, (ii) a R-CNN structure predicting 3D object pose by utilizing the acquired regions of interest. We argue that the 2D detection network is redundant and introduces non-negligible noise for 3D detection. Hence, we propose a novel 3D object detection method, named SMOKE, in this paper that predicts a 3D bounding box for each detected object by combining a single keypoint estimate with regressed 3D variables. As a second contribution, we propose a multi-step disentangling approach for constructing the 3D bounding box, which significantly improves both training convergence and detection accuracy. In contrast to previous 3D detection techniques, our method does not require complicated pre/post-processing, extra data, and a refinement stage. Despite of its structural simplicity, our proposed SMOKE network outperforms all existing monocular 3D detection methods on the KITTI dataset, giving the best state-of-the-art result on both 3D object detection and Bird's eye view evaluation. + +
    + +
    + +## Introduction + +We implement SMOKE and provide the results and checkpoints on KITTI dataset. + +## Results and models + +### KITTI + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | +| :-----------------------------------------------------------: | :-----: | :------: | :------------: | :---: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [DLA34](./smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py) | 6x | 9.64 | | 13.85 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/smoke/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d_20210929_015553-d46d9bb0.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/smoke/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d_20210929_015553.log.json) | + +Note: mAP represents Car moderate 3D strict AP11 results. + +Detailed performance on KITTI 3D detection (3D/BEV) is as follows, evaluated by AP11 metric: + +| | Easy | Moderate | Hard | +| ---------- | :-----------: | :-----------: | :-----------: | +| Car | 16.92 / 22.97 | 13.85 / 18.32 | 11.90 / 15.88 | +| Pedestrian | 11.13 / 12.61 | 11.10 / 11.32 | 10.67 / 11.14 | +| Cyclist | 0.99 / 1.47 | 0.54 / 0.65 | 0.55 / 0.67 | + +## Citation + +```latex +@inproceedings{liu2020smoke, + title={Smoke: Single-stage monocular 3d object detection via keypoint estimation}, + author={Liu, Zechen and Wu, Zizhang and T{\'o}th, Roland}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops}, + pages={996--997}, + year={2020} +} +``` diff --git a/configs/smoke/metafile.yml b/configs/smoke/metafile.yml new file mode 100755 index 0000000..df555d8 --- /dev/null +++ b/configs/smoke/metafile.yml @@ -0,0 +1,30 @@ +Collections: + - Name: SMOKE + Metadata: + Training Data: KITTI + Training Techniques: + - Adam + Training Resources: 4x V100 GPUS + Architecture: + - SMOKEMono3DHead + - DLA + Paper: + URL: https://arxiv.org/abs/2002.10111 + Title: 'SMOKE: Single-Stage Monocular 3D Object Detection via Keypoint Estimation' + README: configs/smoke/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0.dev0/mmdet3d/models/detectors/smoke_mono3d.py#L7 + Version: v1.0.0 + +Models: + - Name: smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d + In Collection: SMOKE + Config: configs/smoke/smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py + Metadata: + Training Memory (GB): 9.6 + Results: + - Task: 3D Object Detection + Dataset: KITTI + Metrics: + mAP: 13.8 + Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/smoke/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d_20210929_015553-d46d9bb0.pth diff --git a/configs/smoke/smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py b/configs/smoke/smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py new file mode 100755 index 0000000..8ca6b44 --- /dev/null +++ b/configs/smoke/smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py @@ -0,0 +1,63 @@ +_base_ = [ + '../_base_/datasets/kitti-mono3d.py', '../_base_/models/smoke.py', + '../_base_/default_runtime.py' +] + +backend_args = None + +train_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=False, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='RandomShiftScale', shift_scale=(0.2, 0.4), aug_prob=0.3), + dict(type='AffineResize', img_scale=(1280, 384), down_ratio=4), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_bboxes_3d', + 'gt_labels_3d', 'centers_2d', 'depths' + ]), +] +test_pipeline = [ + dict(type='LoadImageFromFileMono3D', backend_args=backend_args), + dict(type='AffineResize', img_scale=(1280, 384), down_ratio=4), + dict(type='Pack3DDetInputs', keys=['img']) +] + +train_dataloader = dict( + batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# training schedule for 6x +max_epochs = 72 +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning rate +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[50], + gamma=0.1) +] + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='Adam', lr=2.5e-4), + clip_grad=None) + +find_unused_parameters = True diff --git a/configs/spvcnn/README.md b/configs/spvcnn/README.md new file mode 100755 index 0000000..4f27c4b --- /dev/null +++ b/configs/spvcnn/README.md @@ -0,0 +1,44 @@ +# Searching Efficient 3D Architectures with Sparse Point-Voxel Convolution + +> [Searching Efficient 3D Architectures with Sparse Point-Voxel Convolution ](https://arxiv.org/abs/2007.16100) + + + +## Abstract + +Self-driving cars need to understand 3D scenes efficiently and accurately in order to drive safely. Given the limited hardware resources, existing 3D perception models are not able to recognize small instances (e.g., pedestrians, cyclists) very well due to the low-resolution voxelization and aggressive downsampling. To this end, we propose Sparse Point-Voxel Convolution (SPVConv), a lightweight 3D module that equips the vanilla Sparse Convolution with the high-resolution point-based branch. With negligible overhead, this point-based branch is able to preserve the fine details even from large outdoor scenes. To explore the spectrum of efficient 3D models, we first define a flexible architecture design space based on SPVConv, and we then present 3D Neural Architecture Search (3D-NAS) to search the optimal network architecture over this diverse design space efficiently and effectively. Experimental results validate that the resulting SPVNAS model is fast and accurate: it outperforms the state-of-the-art MinkowskiNet by 3.3%, ranking 1st on the competitive SemanticKITTI leaderboard. It also achieves 8x computation reduction and 3x measured speedup over MinkowskiNet with higher accuracy. Finally, we transfer our method to 3D object detection, and it achieves consistent improvements over the one-stage detection baseline on KITTI. + +
    + +
    + +## Introduction + +We implement SPVCNN with [TorchSparse](https://github.com/mit-han-lab/torchsparse) backend and provide the result and checkpoints on SemanticKITTI datasets. + +## Results and models + +### SemanticKITTI + +| Method | Lr schd | Mem (GB) | mIoU | Download | +| :--------: | :-----: | :------: | :--: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| SPVCNN-W16 | 15e | 3.9 | 61.8 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/spvcnn/spvcnn_w16_8xb2-15e_semantickitti/spvcnn_w16_8xb2-15e_semantickitti_20230321_011645-a2734d85.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/spvcnn/spvcnn_w16_8xb2-15e_semantickitti/spvcnn_w16_8xb2-15e_semantickitti_20230321_011645.log) | +| SPVCNN-W20 | 15e | 4.2 | 62.6 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/spvcnn/spvcnn_w20_8xb2-15e_semantickitti/spvcnn_w20_8xb2-15e_semantickitti_20230321_011649-519e7eff.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/spvcnn/spvcnn_w20_8xb2-15e_semantickitti/spvcnn_w20_8xb2-15e_semantickitti_20230321_011649.log) | +| SPVCNN-W32 | 15e | 5.4 | 64.3 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/spvcnn/spvcnn_w32_8xb2-15e_semantickitti/spvcnn_w32_8xb2-15e_semantickitti_20230308_113324-f7c0c5b4.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/spvcnn/pvcnn_w32_8xb2-15e_semantickitti/spvcnn_w32_8xb2-15e_semantickitti_20230308_113324.log) | + +**Note:** We follow the implementation in SPVNAS original [repo](https://github.com/mit-han-lab/spvnas) and W16\\W20\\W32 indicates different number of channels. + +**Note:** Due to TorchSparse backend, the model performance is unstable with TorchSparse backend and may fluctuate by about 1.5 mIoU for different random seeds. + +## Citation + +```latex +@inproceedings{tang2020searching, + title={Searching efficient 3d architectures with sparse point-voxel convolution}, + author={Tang, Haotian and Liu, Zhijian and Zhao, Shengyu and Lin, Yujun and Lin, Ji and Wang, Hanrui and Han, Song}, + booktitle={Computer Vision--ECCV 2020: 16th European Conference, Glasgow, UK, August 23--28, 2020, Proceedings, Part XXVIII}, + pages={685--702}, + year={2020}, + organization={Springer} +} +``` diff --git a/configs/spvcnn/metafile.yml b/configs/spvcnn/metafile.yml new file mode 100755 index 0000000..0f3ce9f --- /dev/null +++ b/configs/spvcnn/metafile.yml @@ -0,0 +1,57 @@ +Collections: + - Name: SPVCNN + Metadata: + Training Techniques: + - AdamW + Architecture: + - SPVCNN + Paper: + URL: https://arxiv.org/abs/2007.16100 + Title: 'Searching Efficient 3D Architectures with Sparse Point-Voxel Convolution' + README: configs/spvcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/1.1/mmdet3d/models/backbones/spvcnn_backone.py#L22 + Version: v1.1.0 + +Models: + - Name: spvcnn_w16_8xb2-15e_semantickitti + In Collection: SPVCNN + Config: configs/spvcnn/spvcnn_w16_8xb2-15e_semantickitti.py + Metadata: + Training Data: SemanticKITTI + Training Memory (GB): 3.9 + Training Resources: 8x A100 GPUs + Results: + - Task: 3D Semantic Segmentation + Dataset: SemanticKITTI + Metrics: + mIOU: 61.7 + Weights: https://download.openmmlab.com/mmdetection3d/v1.1.0_models/spvcnn/spvcnn_w16_8xb2-15e_semantickitti/spvcnn_w16_8xb2-15e_semantickitti_20230321_011645-a2734d85.pth + + - Name: spvcnn_w20_8xb2-15e_semantickitti + In Collection: SPVCNN + Config: configs/spvcnn/spvcnn_w20_8xb2-15e_semantickitti.py + Metadata: + Training Data: SemanticKITTI + Training Memory (GB): 4.2 + Training Resources: 8x A100 GPUs + Results: + - Task: 3D Semantic Segmentation + Dataset: SemanticKITTI + Metrics: + mIOU: 62.9 + Weights: https://download.openmmlab.com/mmdetection3d/v1.1.0_models/spvcnn/spvcnn_w20_8xb2-15e_semantickitti/spvcnn_w20_8xb2-15e_semantickitti_20230321_011649-519e7eff.pth + + - Name: spvcnn_w32_8xb2-15e_semantickitti + In Collection: SPVCNN + Config: configs/spvcnn/spvcnn_w32_8xb2-15e_semantickitti.py + Metadata: + Training Data: SemanticKITTI + Training Memory (GB): 5.4 + Training Resources: 8x A100 GPUs + Results: + - Task: 3D Semantic Segmentation + Dataset: SemanticKITTI + Metrics: + mIOU: 64.3 + Weights: https://download.openmmlab.com/mmdetection3d/v1.1.0_models/spvcnn/spvcnn_w32_8xb2-15e_semantickitti/spvcnn_w32_8xb2-15e_semantickitti_20230308_113324-f7c0c5b4.pth diff --git a/configs/spvcnn/spvcnn_w16_8xb2-15e_semantickitti.py b/configs/spvcnn/spvcnn_w16_8xb2-15e_semantickitti.py new file mode 100755 index 0000000..2bfcd2d --- /dev/null +++ b/configs/spvcnn/spvcnn_w16_8xb2-15e_semantickitti.py @@ -0,0 +1,10 @@ +_base_ = ['./spvcnn_w32_8xb2-15e_semantickitti.py'] + +model = dict( + backbone=dict( + base_channels=16, + encoder_channels=[16, 32, 64, 128], + decoder_channels=[128, 64, 48, 48]), + decode_head=dict(channels=48)) + +randomness = dict(seed=1588147245) diff --git a/configs/spvcnn/spvcnn_w20_8xb2-15e_semantickitti.py b/configs/spvcnn/spvcnn_w20_8xb2-15e_semantickitti.py new file mode 100755 index 0000000..cdafb16 --- /dev/null +++ b/configs/spvcnn/spvcnn_w20_8xb2-15e_semantickitti.py @@ -0,0 +1,8 @@ +_base_ = ['./spvcnn_w32_8xb2-15e_semantickitti.py'] + +model = dict( + backbone=dict( + base_channels=20, + encoder_channels=[20, 40, 81, 163], + decoder_channels=[163, 81, 61, 61]), + decode_head=dict(channels=61)) diff --git a/configs/spvcnn/spvcnn_w32_8xb2-15e_semantickitti.py b/configs/spvcnn/spvcnn_w32_8xb2-15e_semantickitti.py new file mode 100755 index 0000000..0d3f30e --- /dev/null +++ b/configs/spvcnn/spvcnn_w32_8xb2-15e_semantickitti.py @@ -0,0 +1,54 @@ +_base_ = [ + '../_base_/datasets/semantickitti.py', '../_base_/models/spvcnn.py', + '../_base_/default_runtime.py' +] + +train_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_seg_3d=True, + seg_3d_dtype='np.int32', + seg_offset=2**16, + dataset_type='semantickitti'), + dict(type='PointSegClassMapping'), + dict( + type='GlobalRotScaleTrans', + rot_range=[0., 6.28318531], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + ), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] + +train_dataloader = dict( + sampler=dict(seed=0), dataset=dict(dataset=dict(pipeline=train_pipeline))) + +lr = 0.24 +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='SGD', lr=lr, weight_decay=0.0001, momentum=0.9, nesterov=True)) + +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.008, by_epoch=False, begin=0, end=125), + dict( + type='CosineAnnealingLR', + begin=0, + T_max=15, + by_epoch=True, + eta_min=1e-5, + convert_to_iter_based=True) +] + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=15, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=1)) +randomness = dict(seed=0, deterministic=False, diff_rank_seed=True) +env_cfg = dict(cudnn_benchmark=True) diff --git a/configs/ssn/README.md b/configs/ssn/README.md new file mode 100755 index 0000000..89af3f4 --- /dev/null +++ b/configs/ssn/README.md @@ -0,0 +1,53 @@ +# SSN: Shape Signature Networks for Multi-class Object Detection from Point Clouds + +> [SSN: Shape Signature Networks for Multi-class Object Detection from Point Clouds](https://arxiv.org/abs/2004.02774) + + + +## Abstract + +Multi-class 3D object detection aims to localize and classify objects of multiple categories from point clouds. Due to the nature of point clouds, i.e. unstructured, sparse and noisy, some features benefit-ting multi-class discrimination are underexploited, such as shape information. In this paper, we propose a novel 3D shape signature to explore the shape information from point clouds. By incorporating operations of symmetry, convex hull and chebyshev fitting, the proposed shape sig-nature is not only compact and effective but also robust to the noise, which serves as a soft constraint to improve the feature capability of multi-class discrimination. Based on the proposed shape signature, we develop the shape signature networks (SSN) for 3D object detection, which consist of pyramid feature encoding part, shape-aware grouping heads and explicit shape encoding objective. Experiments show that the proposed method performs remarkably better than existing methods on two large-scale datasets. Furthermore, our shape signature can act as a plug-and-play component and ablation study shows its effectiveness and good scalability. + +
    + +
    + +## Introduction + +We implement PointPillars with Shape-aware grouping heads used in the SSN and provide the results and checkpoints on the nuScenes and Lyft dataset. + +## Results and models + +### NuScenes + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP | NDS | Download | +| :---------------------------------------------------------------------------------------------: | :-----: | :------: | :------------: | :---: | :---: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](../pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 16.4 | | 35.17 | 49.76 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d_20200620_230725-0817d270.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d_20200620_230725.log.json) | +| [SSN](./ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py) | 2x | 3.6 | | 40.91 | 54.44 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_secfpn_sbn-all_2x16_2x_nus-3d/hv_ssn_secfpn_sbn-all_2x16_2x_nus-3d_20210830_101351-51915986.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_secfpn_sbn-all_2x16_2x_nus-3d/hv_ssn_secfpn_sbn-all_2x16_2x_nus-3d_20210830_101351.log.json) | +| [RegNetX-400MF-SECFPN](../regnet/pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb4-2x_nus-3d.py) | 2x | 16.4 | | 41.15 | 55.20 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d_20200620_230334-53044f32.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/regnet/hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d/hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d_20200620_230334.log.json) | +| [RegNetX-400MF-SSN](./ssn_hv_regnet-400mf_secfpn_sbn-all_16xb2-2x_nus-3d.py) | 2x | 5.1 | | 46.65 | 58.24 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_regnet-400mf_secfpn_sbn-all_2x16_2x_nus-3d/hv_ssn_regnet-400mf_secfpn_sbn-all_2x16_2x_nus-3d_20210829_210615-361e5e04.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_regnet-400mf_secfpn_sbn-all_2x16_2x_nus-3d/hv_ssn_regnet-400mf_secfpn_sbn-all_2x16_2x_nus-3d_20210829_210615.log.json) | + +### Lyft + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | Private Score | Public Score | Download | +| :---------------------------------------------------------------------------: | :-----: | :------: | :------------: | :-----------: | :----------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN](../pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py) | 2x | 12.2 | | 13.9 | 14.1 | [model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d_20210517_204807-2518e3de.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d/hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d_20210517_204807.log.json) | +| [SSN](./ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py) | 2x | 8.5 | | 17.5 | 17.5 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d/hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d_20210822_134731-46841b41.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d/hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d_20210822_134731.log.json) | +| [RegNetX-400MF-SSN](./ssn_hv_regnet-400mf_secfpn_sbn-all_16xb1-2x_lyft-3d.py) | 2x | 7.4 | | 17.9 | 18 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_regnet-400mf_secfpn_sbn-all_1x16_2x_lyft-3d/hv_ssn_regnet-400mf_secfpn_sbn-all_1x16_2x_lyft-3d_20210829_122825-d93475a1.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_regnet-400mf_secfpn_sbn-all_1x16_2x_lyft-3d/hv_ssn_regnet-400mf_secfpn_sbn-all_1x16_2x_lyft-3d_20210829_122825.log.json) | + +Note: + +The main difference of the shape-aware grouping heads with the original SECOND FPN heads is that the former groups objects with similar sizes and shapes together, and design shape-specific heads for each group. Heavier heads (with more convolutions and large strides) are designed for large objects while smaller heads for small objects. Note that there may appear different feature map sizes in the outputs, so an anchor generator tailored to these feature maps is also needed in the implementation. + +Users could try other settings in terms of the head design. Here we basically refer to the implementation [HERE](https://github.com/xinge008/SSN). + +## Citation + +```latex +@inproceedings{zhu2020ssn, + title={SSN: Shape Signature Networks for Multi-class Object Detection from Point Clouds}, + author={Zhu, Xinge and Ma, Yuexin and Wang, Tai and Xu, Yan and Shi, Jianping and Lin, Dahua}, + booktitle={Proceedings of the European Conference on Computer Vision}, + year={2020} +} +``` diff --git a/configs/ssn/metafile.yml b/configs/ssn/metafile.yml new file mode 100755 index 0000000..00b15b9 --- /dev/null +++ b/configs/ssn/metafile.yml @@ -0,0 +1,72 @@ +Collections: + - Name: SSN + Metadata: + Training Techniques: + - AdamW + Training Resources: 8x GeForce GTX 1080 Ti + Architecture: + - Hard Voxelization + Paper: + URL: https://arxiv.org/abs/2004.02774 + Title: 'SSN: Shape Signature Networks for Multi-class Object Detection from Point Clouds' + README: configs/ssn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/dense_heads/shape_aware_head.py#L166 + Version: v0.7.0 + +Models: + - Name: hv_ssn_secfpn_sbn-all_16xb2-2x_nus-3d + In Collection: SSN + Config: configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py + Metadata: + Training Data: nuScenes + Training Memory (GB): 3.6 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 40.91 + NDS: 54.44 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_secfpn_sbn-all_2x16_2x_nus-3d/hv_ssn_secfpn_sbn-all_2x16_2x_nus-3d_20210830_101351-51915986.pth + + - Name: hv_ssn_regnet-400mf_secfpn_sbn-all_16xb2-2x_nus-3d + In Collection: SSN + Config: configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb2-2x_nus-3d.py + Metadata: + Training Data: nuScenes + Training Memory (GB): 5.1 + Results: + - Task: 3D Object Detection + Dataset: nuScenes + Metrics: + mAP: 46.65 + NDS: 58.24 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_regnet-400mf_secfpn_sbn-all_2x16_2x_nus-3d/hv_ssn_regnet-400mf_secfpn_sbn-all_2x16_2x_nus-3d_20210829_210615-361e5e04.pth + + - Name: hv_ssn_secfpn_sbn-all_16xb2-2x_lyft-3d + In Collection: SSN + Config: configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py + Metadata: + Training Data: Lyft + Training Memory (GB): 8.5 + Results: + - Task: 3D Object Detection + Dataset: Lyft + Metrics: + Private Score: 17.5 + Public Score: 17.5 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d/hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d_20210822_134731-46841b41.pth + + - Name: hv_ssn_regnet-400mf_secfpn_sbn-all_16xb1-2x_lyft-3d + In Collection: SSN + Config: configs/ssn/hv_ssn_regnet-400mf_secfpn_sbn-all_16xb1-2x_lyft-3d.py + Metadata: + Training Data: Lyft + Training Memory (GB): 7.4 + Results: + - Task: 3D Object Detection + Dataset: Lyft + Metrics: + Private Score: 17.9 + Public Score: 18.0 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/ssn/hv_ssn_regnet-400mf_secfpn_sbn-all_1x16_2x_lyft-3d/hv_ssn_regnet-400mf_secfpn_sbn-all_1x16_2x_lyft-3d_20210829_122825-d93475a1.pth diff --git a/configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb1-2x_lyft-3d.py b/configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb1-2x_lyft-3d.py new file mode 100755 index 0000000..355a645 --- /dev/null +++ b/configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb1-2x_lyft-3d.py @@ -0,0 +1,21 @@ +_base_ = './ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py' +# model settings +model = dict( + type='MVXFasterRCNN', + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[64, 160, 384])) +# dataset settings +train_dataloader = dict(batch_size=1, num_workers=2) diff --git a/configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb2-2x_nus-3d.py b/configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb2-2x_nus-3d.py new file mode 100755 index 0000000..cd6056f --- /dev/null +++ b/configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb2-2x_nus-3d.py @@ -0,0 +1,20 @@ +_base_ = './ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py' +# model settings +model = dict( + type='MVXFasterRCNN', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'), + out_indices=(1, 2, 3), + frozen_stages=-1, + strides=(1, 2, 2, 2), + base_channels=64, + stem_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_eval=False, + style='pytorch'), + pts_neck=dict(in_channels=[64, 160, 384])) diff --git a/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py b/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py new file mode 100755 index 0000000..cb3a5d4 --- /dev/null +++ b/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py @@ -0,0 +1,244 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_lyft.py', + '../_base_/datasets/lyft-3d.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] +point_cloud_range = [-100, -100, -5, 100, 100, 3] +# Note that the order of class names should be consistent with +# the following anchors' order +class_names = [ + 'bicycle', 'motorcycle', 'pedestrian', 'animal', 'car', + 'emergency_vehicle', 'bus', 'other_vehicle', 'truck' +] +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=2, num_workers=4, dataset=dict(pipeline=train_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# model settings +model = dict( + data_preprocessor=dict( + voxel_layer=dict(point_cloud_range=[-100, -100, -5, 100, 100, 3])), + pts_voxel_encoder=dict( + feat_channels=[32, 64], + point_cloud_range=[-100, -100, -5, 100, 100, 3]), + pts_middle_encoder=dict(output_shape=[800, 800]), + pts_neck=dict( + _delete_=True, + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + _delete_=True, + type='ShapeAwareHead', + num_classes=9, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGeneratorPerCls', + ranges=[[-100, -100, -1.0709302, 100, 100, -1.0709302], + [-100, -100, -1.3220503, 100, 100, -1.3220503], + [-100, -100, -0.9122268, 100, 100, -0.9122268], + [-100, -100, -1.8012227, 100, 100, -1.8012227], + [-100, -100, -1.0715024, 100, 100, -1.0715024], + [-100, -100, -0.8871424, 100, 100, -0.8871424], + [-100, -100, -0.3519405, 100, 100, -0.3519405], + [-100, -100, -0.6276341, 100, 100, -0.6276341], + [-100, -100, -0.3033737, 100, 100, -0.3033737]], + sizes=[ + [1.76, 0.63, 1.44], # bicycle + [2.35, 0.96, 1.59], # motorcycle + [0.80, 0.76, 1.76], # pedestrian + [0.73, 0.35, 0.50], # animal + [4.75, 1.92, 1.71], # car + [6.52, 2.42, 2.34], # emergency vehicle + [12.70, 2.92, 3.42], # bus + [8.17, 2.75, 3.20], # other vehicle + [10.24, 2.84, 3.44] # truck + ], + custom_values=[], + rotations=[0, 1.57], + reshape_out=False), + tasks=[ + dict( + num_class=2, + class_names=['bicycle', 'motorcycle'], + shared_conv_channels=(64, 64), + shared_conv_strides=(1, 1), + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01)), + dict( + num_class=2, + class_names=['pedestrian', 'animal'], + shared_conv_channels=(64, 64), + shared_conv_strides=(1, 1), + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01)), + dict( + num_class=2, + class_names=['car', 'emergency_vehicle'], + shared_conv_channels=(64, 64, 64), + shared_conv_strides=(2, 1, 1), + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01)), + dict( + num_class=3, + class_names=['bus', 'other_vehicle', 'truck'], + shared_conv_channels=(64, 64, 64), + shared_conv_strides=(2, 1, 1), + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01)) + ], + assign_per_class=True, + diff_rad_by_sin=True, + dir_offset=-0.7854, # -pi/4 + dir_limit_offset=0, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + _delete_=True, + pts=dict( + assigner=[ + dict( # bicycle + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # motorcycle + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # animal + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + dict( # emergency vehicle + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # bus + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + dict( # other vehicle + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # truck + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1) + ], + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + pos_weight=-1, + debug=False))) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (16 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=32) diff --git a/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py b/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py new file mode 100755 index 0000000..8d5e67f --- /dev/null +++ b/configs/ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py @@ -0,0 +1,256 @@ +_base_ = [ + '../_base_/models/pointpillars_hv_fpn_nus.py', + '../_base_/datasets/nus-3d.py', + '../_base_/schedules/schedule-2x.py', + '../_base_/default_runtime.py', +] +# Note that the order of class names should be consistent with +# the following anchors' order +point_cloud_range = [-50, -50, -5, 50, 50, 3] +class_names = [ + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier', 'car', + 'truck', 'trailer', 'bus', 'construction_vehicle' +] +backend_args = None + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=2, + num_workers=4, + dataset=dict(pipeline=train_pipeline, metainfo=dict(classes=class_names))) +test_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names))) +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, metainfo=dict(classes=class_names))) + +# model settings +model = dict( + data_preprocessor=dict(voxel_layer=dict(max_num_points=20)), + pts_voxel_encoder=dict(feat_channels=[64, 64]), + pts_neck=dict( + _delete_=True, + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + _delete_=True, + type='ShapeAwareHead', + num_classes=10, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGeneratorPerCls', + ranges=[[-50, -50, -1.67339111, 50, 50, -1.67339111], + [-50, -50, -1.71396371, 50, 50, -1.71396371], + [-50, -50, -1.61785072, 50, 50, -1.61785072], + [-50, -50, -1.80984986, 50, 50, -1.80984986], + [-50, -50, -1.76396500, 50, 50, -1.76396500], + [-50, -50, -1.80032795, 50, 50, -1.80032795], + [-50, -50, -1.74440365, 50, 50, -1.74440365], + [-50, -50, -1.68526504, 50, 50, -1.68526504], + [-50, -50, -1.80673031, 50, 50, -1.80673031], + [-50, -50, -1.64824291, 50, 50, -1.64824291]], + sizes=[ + [1.68452161, 0.60058911, 1.27192197], # bicycle + [2.09973778, 0.76279481, 1.44403034], # motorcycle + [0.72564370, 0.66344886, 1.75748069], # pedestrian + [0.40359262, 0.39694519, 1.06232151], # traffic cone + [0.48578221, 2.49008838, 0.98297065], # barrier + [4.60718145, 1.95017717, 1.72270761], # car + [6.73778078, 2.45609390, 2.73004906], # truck + [12.01320693, 2.87427237, 3.81509561], # trailer + [11.1885991, 2.94046906, 3.47030982], # bus + [6.38352896, 2.73050468, 3.13312415] # construction vehicle + ], + custom_values=[0, 0], + rotations=[0, 1.57], + reshape_out=False), + tasks=[ + dict( + num_class=2, + class_names=['bicycle', 'motorcycle'], + shared_conv_channels=(64, 64), + shared_conv_strides=(1, 1), + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01)), + dict( + num_class=1, + class_names=['pedestrian'], + shared_conv_channels=(64, 64), + shared_conv_strides=(1, 1), + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01)), + dict( + num_class=2, + class_names=['traffic_cone', 'barrier'], + shared_conv_channels=(64, 64), + shared_conv_strides=(1, 1), + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01)), + dict( + num_class=1, + class_names=['car'], + shared_conv_channels=(64, 64, 64), + shared_conv_strides=(2, 1, 1), + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01)), + dict( + num_class=4, + class_names=[ + 'truck', 'trailer', 'bus', 'construction_vehicle' + ], + shared_conv_channels=(64, 64, 64), + shared_conv_strides=(2, 1, 1), + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01)) + ], + assign_per_class=True, + diff_rad_by_sin=True, + dir_offset=-0.7854, # -pi/4 + dir_limit_offset=0, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=9), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + _delete_=True, + pts=dict( + assigner=[ + dict( # bicycle + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # motorcycle + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + dict( # pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # traffic cone + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # barrier + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + dict( # truck + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # trailer + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # bus + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # construction vehicle + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1) + ], + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + pos_weight=-1, + debug=False))) diff --git a/configs/votenet/README.md b/configs/votenet/README.md new file mode 100755 index 0000000..21fe6db --- /dev/null +++ b/configs/votenet/README.md @@ -0,0 +1,68 @@ +# Deep Hough Voting for 3D Object Detection in Point Clouds + +> [Deep Hough Voting for 3D Object Detection in Point Clouds](https://arxiv.org/abs/1904.09664) + + + +## Abstract + +Current 3D object detection methods are heavily influenced by 2D detectors. In order to leverage architectures in 2D detectors, they often convert 3D point clouds to regular grids (i.e., to voxel grids or to bird's eye view images), or rely on detection in 2D images to propose 3D boxes. Few works have attempted to directly detect objects in point clouds. In this work, we return to first principles to construct a 3D detection pipeline for point cloud data and as generic as possible. However, due to the sparse nature of the data -- samples from 2D manifolds in 3D space -- we face a major challenge when directly predicting bounding box parameters from scene points: a 3D object centroid can be far from any surface point thus hard to regress accurately in one step. To address the challenge, we propose VoteNet, an end-to-end 3D object detection network based on a synergy of deep point set networks and Hough voting. Our model achieves state-of-the-art 3D detection on two large datasets of real 3D scans, ScanNet and SUN RGB-D with a simple design, compact model size and high efficiency. Remarkably, VoteNet outperforms previous methods by using purely geometric information without relying on color images. + +
    + +
    + +## Introduction + +We implement VoteNet and provide the result and checkpoints on ScanNet and SUNRGBD datasets. + +## Results and models + +### ScanNet + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :----------------------------------------: | :-----: | :------: | :------------: | :-----: | :----: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [PointNet++](./votenet_8xb8_scannet-3d.py) | 3x | 4.1 | | 62.34 | 40.82 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/votenet/votenet_8x8_scannet-3d-18class/votenet_8x8_scannet-3d-18class_20210823_234503-cf8134fa.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/votenet/votenet_8x8_scannet-3d-18class/votenet_8x8_scannet-3d-18class_20210823_234503.log.json) | + +### SUNRGBD + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :-----------------------------------------: | :-----: | :------: | :------------: | :-----: | :----: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [PointNet++](./votenet_8xb16_sunrgbd-3d.py) | 3x | 8.1 | | 59.78 | 35.77 | [model](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/votenet/votenet_16x8_sunrgbd-3d-10class/votenet_16x8_sunrgbd-3d-10class_20210820_162823-bf11f014.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.0.0_models/votenet/votenet_16x8_sunrgbd-3d-10class/votenet_16x8_sunrgbd-3d-10class_20210820_162823.log.json) | + +**Notice**: If your current mmdetection3d version >= 0.6.0, and you are using the checkpoints downloaded from the above links or using checkpoints trained with mmdetection3d version \< 0.6.0, the checkpoints have to be first converted via [tools/model_converters/convert_votenet_checkpoints.py](../../tools/model_converters/convert_votenet_checkpoints.py): + +``` +python ./tools/model_converters/convert_votenet_checkpoints.py ${ORIGINAL_CHECKPOINT_PATH} --out=${NEW_CHECKPOINT_PATH} +``` + +Then you can use the converted checkpoints following [get_started.md](../../docs/en/get_started.md). + +## Indeterminism + +Since test data preparation randomly downsamples the points, and the test script uses fixed random seeds while the random seeds of validation in training are not fixed, the test results may be slightly different from the results reported above. + +## IoU loss + +Adding IoU loss (simply = 1-IoU) boosts VoteNet's performance. To use IoU loss, add this loss term to the config file: + +```python +iou_loss=dict(type='AxisAlignedIoULoss', reduction='sum', loss_weight=10.0 / 3.0) +``` + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :-----------------------------------------------------: | :-----: | :------: | :------------: | :-----: | :----: | :------: | +| [PointNet++](./votenet_head-iouloss_8xb8_scannet-3d.py) | 3x | 4.1 | | 63.81 | 44.21 | / | + +For now, we only support calculating IoU loss for axis-aligned bounding boxes since the CUDA op of general 3D IoU calculation does not implement the backward method. Therefore, IoU loss can only be used for ScanNet dataset for now. + +## Citation + +```latex +@inproceedings{qi2019deep, + author = {Qi, Charles R and Litany, Or and He, Kaiming and Guibas, Leonidas J}, + title = {Deep Hough Voting for 3D Object Detection in Point Clouds}, + booktitle = {Proceedings of the IEEE International Conference on Computer Vision}, + year = {2019} +} +``` diff --git a/configs/votenet/metafile.yml b/configs/votenet/metafile.yml new file mode 100755 index 0000000..7353a63 --- /dev/null +++ b/configs/votenet/metafile.yml @@ -0,0 +1,59 @@ +Collections: + - Name: VoteNet + Metadata: + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - PointNet++ + Paper: + URL: https://arxiv.org/abs/1904.09664 + Title: 'Deep Hough Voting for 3D Object Detection in Point Clouds' + README: configs/votenet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/votenet.py#L10 + Version: v0.5.0 + +Models: + - Name: votenet_8xb16_sunrgbd-3d.py + In Collection: VoteNet + Config: configs/votenet/votenet_8xb16_sunrgbd-3d.py + Metadata: + Training Data: SUNRGBD + Training Memory (GB): 8.1 + Results: + - Task: 3D Object Detection + Dataset: SUNRGBD + Metrics: + AP@0.25: 59.78 + AP@0.5: 35.77 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/votenet/votenet_16x8_sunrgbd-3d-10class/votenet_16x8_sunrgbd-3d-10class_20210820_162823-bf11f014.pth + + - Name: votenet_8xb8_scannet-3d.py + In Collection: VoteNet + Config: configs/votenet/votenet_8xb8_scannet-3d.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 4.1 + Results: + - Task: 3D Object Detection + Dataset: ScanNet + Metrics: + AP@0.25: 62.34 + AP@0.5: 40.82 + Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/votenet/votenet_8x8_scannet-3d-18class/votenet_8x8_scannet-3d-18class_20210823_234503-cf8134fa.pth + + - Name: votenet_iouloss_8x8_scannet-3d-18class + In Collection: VoteNet + Config: configs/votenet/votenet_head-iouloss_8xb8_scannet-3d.py + Metadata: + Training Data: ScanNet + Training Memory (GB): 4.1 + Architecture: + - IoU Loss + Results: + - Task: 3D Object Detection + Dataset: ScanNet + Metrics: + AP@0.25: 63.81 + AP@0.5: 44.21 diff --git a/configs/votenet/votenet_8xb16_sunrgbd-3d.py b/configs/votenet/votenet_8xb16_sunrgbd-3d.py new file mode 100755 index 0000000..9472910 --- /dev/null +++ b/configs/votenet/votenet_8xb16_sunrgbd-3d.py @@ -0,0 +1,27 @@ +# TODO refactor the config of sunrgbd +_base_ = [ + '../_base_/datasets/sunrgbd-3d.py', '../_base_/models/votenet.py', + '../_base_/schedules/schedule-3x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + bbox_head=dict( + num_classes=10, + bbox_coder=dict( + type='PartialBinBasedBBoxCoder', + num_sizes=10, + num_dir_bins=12, + with_rot=True, + mean_sizes=[ + [2.114256, 1.620300, 0.927272], [0.791118, 1.279516, 0.718182], + [0.923508, 1.867419, 0.845495], [0.591958, 0.552978, 0.827272], + [0.699104, 0.454178, 0.75625], [0.69519, 1.346299, 0.736364], + [0.528526, 1.002642, 1.172878], [0.500618, 0.632163, 0.683424], + [0.404671, 1.071108, 1.688889], [0.76584, 1.398258, 0.472728] + ]), + )) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/votenet/votenet_8xb8_scannet-3d.py b/configs/votenet/votenet_8xb8_scannet-3d.py new file mode 100755 index 0000000..e298cba --- /dev/null +++ b/configs/votenet/votenet_8xb8_scannet-3d.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/scannet-3d.py', '../_base_/models/votenet.py', + '../_base_/schedules/schedule-3x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + bbox_head=dict( + num_classes=18, + bbox_coder=dict( + type='PartialBinBasedBBoxCoder', + num_sizes=18, + num_dir_bins=1, + with_rot=False, + mean_sizes=[[0.76966727, 0.8116021, 0.92573744], + [1.876858, 1.8425595, 1.1931566], + [0.61328, 0.6148609, 0.7182701], + [1.3955007, 1.5121545, 0.83443564], + [0.97949594, 1.0675149, 0.6329687], + [0.531663, 0.5955577, 1.7500148], + [0.9624706, 0.72462326, 1.1481868], + [0.83221924, 1.0490936, 1.6875663], + [0.21132214, 0.4206159, 0.5372846], + [1.4440073, 1.8970833, 0.26985747], + [1.0294262, 1.4040797, 0.87554324], + [1.3766412, 0.65521795, 1.6813129], + [0.6650819, 0.71111923, 1.298853], + [0.41999173, 0.37906948, 1.7513971], + [0.59359556, 0.5912492, 0.73919016], + [0.50867593, 0.50656086, 0.30136237], + [1.1511526, 1.0546296, 0.49706793], + [0.47535285, 0.49249494, 0.5802117]]))) + +default_hooks = dict(logger=dict(type='LoggerHook', interval=30)) +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/votenet/votenet_head-iouloss_8xb8_scannet-3d.py b/configs/votenet/votenet_head-iouloss_8xb8_scannet-3d.py new file mode 100755 index 0000000..58d49a5 --- /dev/null +++ b/configs/votenet/votenet_head-iouloss_8xb8_scannet-3d.py @@ -0,0 +1,8 @@ +_base_ = ['./votenet_8xb8_scannet-3d.py'] + +# model settings, add iou loss +model = dict( + bbox_head=dict( + iou_loss=dict( + type='AxisAlignedIoULoss', reduction='sum', loss_weight=10.0 / + 3.0))) diff --git a/demo/mono_det_demo.py b/demo/mono_det_demo.py new file mode 100755 index 0000000..19ab7ae --- /dev/null +++ b/demo/mono_det_demo.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser + +import mmcv + +from mmdet3d.apis import inference_mono_3d_detector, init_model +from mmdet3d.registry import VISUALIZERS + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('img', help='image file') + parser.add_argument('ann', help='ann file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--cam-type', + type=str, + default='CAM_BACK', + help='choose camera type to inference') + parser.add_argument( + '--score-thr', type=float, default=0.30, help='bbox score threshold') + parser.add_argument( + '--out-dir', type=str, default='demo', help='dir to save results') + parser.add_argument( + '--show', + action='store_true', + help='show online visualization results') + parser.add_argument( + '--snapshot', + action='store_true', + help='whether to save online visualization results') + args = parser.parse_args() + return args + + +def main(args): + # build the model from a config file and a checkpoint file + model = init_model(args.config, args.checkpoint, device=args.device) + + # init visualizer + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.dataset_meta = model.dataset_meta + + # test a single image + result = inference_mono_3d_detector(model, args.img, args.ann, + args.cam_type) + + img = mmcv.imread(args.img) + img = mmcv.imconvert(img, 'bgr', 'rgb') + + data_input = dict(img=img) + # show the results + visualizer.add_datasample( + 'result', + data_input, + data_sample=result, + draw_gt=False, + show=args.show, + wait_time=0, + out_file=args.out_dir, + pred_score_thr=args.score_thr, + vis_task='mono_det') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/demo/multi_modality_demo.py b/demo/multi_modality_demo.py new file mode 100755 index 0000000..c5486bf --- /dev/null +++ b/demo/multi_modality_demo.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser + +import mmcv + +from mmdet3d.apis import inference_multi_modality_detector, init_model +from mmdet3d.registry import VISUALIZERS + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('pcd', help='Point cloud file') + parser.add_argument('img', help='image file') + parser.add_argument('ann', help='ann file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--cam-type', + type=str, + default='CAM_FRONT', + help='choose camera type to inference') + parser.add_argument( + '--score-thr', type=float, default=0.0, help='bbox score threshold') + parser.add_argument( + '--out-dir', type=str, default='demo', help='dir to save results') + parser.add_argument( + '--show', + action='store_true', + help='show online visualization results') + parser.add_argument( + '--snapshot', + action='store_true', + help='whether to save online visualization results') + args = parser.parse_args() + return args + + +def main(args): + # build the model from a config file and a checkpoint file + model = init_model(args.config, args.checkpoint, device=args.device) + + # init visualizer + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.dataset_meta = model.dataset_meta + + # test a single image and point cloud sample + result, data = inference_multi_modality_detector(model, args.pcd, args.img, + args.ann, args.cam_type) + points = data['inputs']['points'] + img = mmcv.imread(args.img) + img = mmcv.imconvert(img, 'bgr', 'rgb') + data_input = dict(points=points, img=img) + + # show the results + visualizer.add_datasample( + 'result', + data_input, + data_sample=result, + draw_gt=False, + show=args.show, + wait_time=0, + out_file=args.out_dir, + pred_score_thr=args.score_thr, + vis_task='multi-modality_det') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/demo/pcd_demo.py b/demo/pcd_demo.py new file mode 100755 index 0000000..4e5cd51 --- /dev/null +++ b/demo/pcd_demo.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser + +from mmdet3d.apis import inference_detector, init_model +from mmdet3d.registry import VISUALIZERS + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('pcd', help='Point cloud file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--score-thr', type=float, default=0.0, help='bbox score threshold') + parser.add_argument( + '--out-dir', type=str, default='demo', help='dir to save results') + parser.add_argument( + '--show', + action='store_true', + help='show online visualization results') + parser.add_argument( + '--snapshot', + action='store_true', + help='whether to save online visualization results') + args = parser.parse_args() + return args + + +def main(args): + # TODO: Support inference of point cloud numpy file. + # build the model from a config file and a checkpoint file + model = init_model(args.config, args.checkpoint, device=args.device) + + # init visualizer + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.dataset_meta = model.dataset_meta + + # test a single point cloud sample + result, data = inference_detector(model, args.pcd) + points = data['inputs']['points'] + data_input = dict(points=points) + + # show the results + visualizer.add_datasample( + 'result', + data_input, + data_sample=result, + draw_gt=False, + show=args.show, + wait_time=0, + out_file=args.out_dir, + pred_score_thr=args.score_thr, + vis_task='lidar_det') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/demo/pcd_seg_demo.py b/demo/pcd_seg_demo.py new file mode 100755 index 0000000..2045fd6 --- /dev/null +++ b/demo/pcd_seg_demo.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser + +from mmdet3d.apis import inference_segmentor, init_model +from mmdet3d.registry import VISUALIZERS + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('pcd', help='Point cloud file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--out-dir', type=str, default='demo', help='dir to save results') + parser.add_argument( + '--show', + action='store_true', + help='show online visualization results') + parser.add_argument( + '--snapshot', + action='store_true', + help='whether to save online visualization results') + args = parser.parse_args() + return args + + +def main(args): + # build the model from a config file and a checkpoint file + model = init_model(args.config, args.checkpoint, device=args.device) + + # init visualizer + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.dataset_meta = model.dataset_meta + + # test a single point cloud sample + result, data = inference_segmentor(model, args.pcd) + points = data['inputs']['points'] + data_input = dict(points=points) + # show the results + visualizer.add_datasample( + 'result', + data_input, + data_sample=result, + draw_gt=False, + show=args.show, + wait_time=0, + out_file=args.out_dir, + vis_task='lidar_seg') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100755 index 0000000..dd6ef6d --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,40 @@ +ARG PYTORCH="1.9.0" +ARG CUDA="11.1" +ARG CUDNN="8" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" \ + TORCH_NVCC_FLAGS="-Xfatbin -compress-all" \ + CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" \ + FORCE_CUDA="1" + +# Avoid Public GPG key error +# https://github.com/NVIDIA/nvidia-docker/issues/1631 +RUN rm /etc/apt/sources.list.d/cuda.list \ + && rm /etc/apt/sources.list.d/nvidia-ml.list \ + && apt-key del 7fa2af80 \ + && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub \ + && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + +# (Optional, use Mirror to speed up downloads) +# RUN sed -i 's/http:\/\/archive.ubuntu.com\/ubuntu\//http:\/\/mirrors.aliyun.com\/ubuntu\//g' /etc/apt/sources.list && \ +# pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple + +# Install the required packages +RUN apt-get update \ + && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install MMEngine, MMCV and MMDetection +RUN pip install openmim && \ + mim install "mmengine" "mmcv>=2.0.0rc4" "mmdet>=3.0.0" + +# Install MMDetection3D +RUN conda clean --all \ + && git clone https://github.com/open-mmlab/mmdetection3d.git -b dev-1.x /mmdetection3d \ + && cd /mmdetection \ + && pip install --no-cache-dir -e . + +WORKDIR /mmdetection3d diff --git a/docker/serve/Dockerfile b/docker/serve/Dockerfile new file mode 100755 index 0000000..c39885e --- /dev/null +++ b/docker/serve/Dockerfile @@ -0,0 +1,65 @@ +ARG PYTORCH="1.9.0" +ARG CUDA="11.1" +ARG CUDNN="8" +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +ARG MMCV="2.0.0rc4" +ARG MMDET="3.0.0" +ARG MMDET3D="1.1.0" + +ENV PYTHONUNBUFFERED TRUE + +# Avoid Public GPG key error +# https://github.com/NVIDIA/nvidia-docker/issues/1631 +RUN rm /etc/apt/sources.list.d/cuda.list \ + && rm /etc/apt/sources.list.d/nvidia-ml.list \ + && apt-key del 7fa2af80 \ + && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub \ + && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + +# (Optional, use Mirror to speed up downloads) +# RUN sed -i 's/http:\/\/archive.ubuntu.com\/ubuntu\//http:\/\/mirrors.aliyun.com\/ubuntu\//g' /etc/apt/sources.list + +# Install the required packages +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + ca-certificates \ + g++ \ + openjdk-11-jre-headless \ + # MMDet3D Requirements + ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ + && rm -rf /var/lib/apt/lists/* + +ENV PATH="/opt/conda/bin:$PATH" \ + FORCE_CUDA="1" + +# TORCHSEVER +RUN pip install torchserve torch-model-archiver + +# MMLAB +ARG PYTORCH +ARG CUDA +RUN pip install openmim +RUN mim install mmengine +RUN mim install mmcv==${MMCV} +RUN mim install mmdet==${MMDET} +RUN mim install mmdet3d==${MMDET3D} + +RUN useradd -m model-server \ + && mkdir -p /home/model-server/tmp + +COPY entrypoint.sh /usr/local/bin/entrypoint.sh + +RUN chmod +x /usr/local/bin/entrypoint.sh \ + && chown -R model-server /home/model-server + +COPY config.properties /home/model-server/config.properties +RUN mkdir /home/model-server/model-store && chown -R model-server /home/model-server/model-store + +EXPOSE 8080 8081 8082 + +USER model-server +WORKDIR /home/model-server +ENV TEMP=/home/model-server/tmp +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +CMD ["serve"] diff --git a/docker/serve/config.properties b/docker/serve/config.properties new file mode 100755 index 0000000..efb9c47 --- /dev/null +++ b/docker/serve/config.properties @@ -0,0 +1,5 @@ +inference_address=http://0.0.0.0:8080 +management_address=http://0.0.0.0:8081 +metrics_address=http://0.0.0.0:8082 +model_store=/home/model-server/model-store +load_models=all diff --git a/docker/serve/entrypoint.sh b/docker/serve/entrypoint.sh new file mode 100755 index 0000000..41ba00b --- /dev/null +++ b/docker/serve/entrypoint.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +if [[ "$1" = "serve" ]]; then + shift 1 + torchserve --start --ts-config /home/model-server/config.properties +else + eval "$@" +fi + +# prevent docker exit +tail -f /dev/null diff --git a/docs/en/Makefile b/docs/en/Makefile new file mode 100755 index 0000000..d4bb2cb --- /dev/null +++ b/docs/en/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/en/_static/css/readthedocs.css b/docs/en/_static/css/readthedocs.css new file mode 100755 index 0000000..cc61ab8 --- /dev/null +++ b/docs/en/_static/css/readthedocs.css @@ -0,0 +1,6 @@ +.header-logo { + background-image: url("../image/mmdet3d-logo.png"); + background-size: 182.5px 40px; + height: 40px; + width: 182.5px; +} diff --git a/docs/en/advanced_guides/customize_dataset.md b/docs/en/advanced_guides/customize_dataset.md new file mode 100755 index 0000000..2b57fa0 --- /dev/null +++ b/docs/en/advanced_guides/customize_dataset.md @@ -0,0 +1,503 @@ +# Customize Datasets + +In this note, you will know how to train and test predefined models with customized datasets. + +The basic steps are as below: + +1. Prepare data +2. Prepare a config +3. Train, test and inference models on the customized dataset + +## Data Preparation + +The ideal situation is that we can reorganize the customized raw data and convert the annotation format into KITTI style. However, considering some calibration files and 3D annotations in KITTI format are difficult to obtain for customized datasets, we introduce the basic data format in the doc. + +### Basic Data Format + +#### Point cloud Format + +Currently, we only support `.bin` format point cloud for training and inference. Before training on your own datasets, you need to convert your point cloud files with other formats to `.bin` files. The common point cloud data formats include `.pcd` and `.las`, we list some open-source tools for reference. + +1. Convert `.pcd` to `.bin`: https://github.com/DanielPollithy/pypcd + +- You can install `pypcd` with the following command: + + ```bash + pip install git+https://github.com/DanielPollithy/pypcd.git + ``` + +- You can use the following script to read the `.pcd` file and convert it to `.bin` format for saving: + + ```python + import numpy as np + from pypcd import pypcd + + pcd_data = pypcd.PointCloud.from_path('point_cloud_data.pcd') + points = np.zeros([pcd_data.width, 4], dtype=np.float32) + points[:, 0] = pcd_data.pc_data['x'].copy() + points[:, 1] = pcd_data.pc_data['y'].copy() + points[:, 2] = pcd_data.pc_data['z'].copy() + points[:, 3] = pcd_data.pc_data['intensity'].copy().astype(np.float32) + with open('point_cloud_data.bin', 'wb') as f: + f.write(points.tobytes()) + ``` + +2. Convert `.las` to `.bin`: The common conversion path is `.las -> .pcd -> .bin`, and the conversion path `.las -> .pcd` can be achieved through [this tool](https://github.com/Hitachi-Automotive-And-Industry-Lab/semantic-segmentation-editor). + +#### Label Format + +The most basic information: 3D bounding box and category label of each scene need to be contained in the `.txt` annotation file. Each line represents a 3D box in a certain scene as follow: + +``` +# format: [x, y, z, dx, dy, dz, yaw, category_name] +1.23 1.42 0.23 3.96 1.65 1.55 1.56 Car +3.51 2.15 0.42 1.05 0.87 1.86 1.23 Pedestrian +... +``` + +**Note**: Currently we only support KITTI Metric evaluation for customized datasets evaluation. + +The 3D Box should be stored in unified 3D coordinates. + +#### Calibration Format + +For the point cloud data collected by each LiDAR, they are usually fused and converted to a certain LiDAR coordinate. So typically the calibration information file should contain the intrinsic matrix of each camera and the transformation extrinsic matrix from the LiDAR to each camera in `.txt` calibration file, while `Px` represents the intrinsic matrix of `camera_x` and `lidar2camx` represents the transformation extrinsic matrix from the `lidar` to `camera_x`. + +``` +P0 +P1 +P2 +P3 +P4 +... +lidar2cam0 +lidar2cam1 +lidar2cam2 +lidar2cam3 +lidar2cam4 +... +``` + +### Raw Data Structure + +#### LiDAR-Based 3D Detection + +The raw data for LiDAR-based 3D object detection are typically organized as follows, where `ImageSets` contains split files indicating which files belong to training/validation set, `points` includes point cloud data which are supposed to be stored in `.bin` format and `labels` includes label files for 3D detection. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── custom +│ │ ├── ImageSets +│ │ │ ├── train.txt +│ │ │ ├── val.txt +│ │ ├── points +│ │ │ ├── 000000.bin +│ │ │ ├── 000001.bin +│ │ │ ├── ... +│ │ ├── labels +│ │ │ ├── 000000.txt +│ │ │ ├── 000001.txt +│ │ │ ├── ... +``` + +#### Vision-Based 3D Detection + +The raw data for vision-based 3D object detection are typically organized as follows, where `ImageSets` contains split files indicating which files belong to training/validation set, `images` contains the images from different cameras, for example, images from `camera_x` need to be placed in `images/images_x`, `calibs` contains calibration information files which store the camera intrinsic matrix of each camera, and `labels` includes label files for 3D detection. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── custom +│ │ ├── ImageSets +│ │ │ ├── train.txt +│ │ │ ├── val.txt +│ │ ├── calibs +│ │ │ ├── 000000.txt +│ │ │ ├── 000001.txt +│ │ │ ├── ... +│ │ ├── images +│ │ │ ├── images_0 +│ │ │ │ ├── 000000.png +│ │ │ │ ├── 000001.png +│ │ │ │ ├── ... +│ │ │ ├── images_1 +│ │ │ ├── images_2 +│ │ │ ├── ... +│ │ ├── labels +│ │ │ ├── 000000.txt +│ │ │ ├── 000001.txt +│ │ │ ├── ... +``` + +#### Multi-Modality 3D Detection + +The raw data for multi-modality 3D object detection are typically organized as follows. Different from vision-based 3D object detection, calibration information files in `calibs` store the camera intrinsic matrix of each camera and extrinsic matrix. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── custom +│ │ ├── ImageSets +│ │ │ ├── train.txt +│ │ │ ├── val.txt +│ │ ├── calibs +│ │ │ ├── 000000.txt +│ │ │ ├── 000001.txt +│ │ │ ├── ... +│ │ ├── points +│ │ │ ├── 000000.bin +│ │ │ ├── 000001.bin +│ │ │ ├── ... +│ │ ├── images +│ │ │ ├── images_0 +│ │ │ │ ├── 000000.png +│ │ │ │ ├── 000001.png +│ │ │ │ ├── ... +│ │ │ ├── images_1 +│ │ │ ├── images_2 +│ │ │ ├── ... +│ │ ├── labels +│ │ │ ├── 000000.txt +│ │ │ ├── 000001.txt +│ │ │ ├── ... +``` + +#### LiDAR-Based 3D Semantic Segmentation + +The raw data for LiDAR-based 3D semantic segmentation are typically organized as follows, where `ImageSets` contains split files indicating which files belong to training/validation set, `points` includes point cloud data, and `semantic_mask` includes point-level label. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── custom +│ │ ├── ImageSets +│ │ │ ├── train.txt +│ │ │ ├── val.txt +│ │ ├── points +│ │ │ ├── 000000.bin +│ │ │ ├── 000001.bin +│ │ │ ├── ... +│ │ ├── semantic_mask +│ │ │ ├── 000000.bin +│ │ │ ├── 000001.bin +│ │ │ ├── ... +``` + +### Data Converter + +Once you prepared the raw data following our instruction, you can directly use the following command to generate training/validation information files. + +```bash +python tools/create_data.py custom --root-path ./data/custom --out-dir ./data/custom --extra-tag custom +``` + +## An example of customized dataset + +Once we finish data preparation, we can create a new dataset in `mmdet3d/datasets/my_dataset.py` to load the data. + +```python +import mmengine + +from mmdet3d.registry import DATASETS +from .det3d_dataset import Det3DDataset + + +@DATASETS.register_module() +class MyDataset(Det3DDataset): + + # replace with all the classes in customized pkl info file + METAINFO = { + 'classes': ('Pedestrian', 'Cyclist', 'Car') + } + + def parse_ann_info(self, info): + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Data information of single data sample. + + Returns: + dict: Annotation information consists of the following keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): + 3D ground truth bboxes. + - gt_labels_3d (np.ndarray): Labels of ground truths. + """ + ann_info = super().parse_ann_info(info) + if ann_info is None: + ann_info = dict() + # empty instance + ann_info['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros(0, dtype=np.int64) + + # filter the gt classes not used in training + ann_info = self._remove_dontcare(ann_info) + gt_bboxes_3d = LiDARInstance3DBoxes(ann_info['gt_bboxes_3d']) + ann_info['gt_bboxes_3d'] = gt_bboxes_3d + return ann_info +``` + +After the data pre-processing, there are two steps for users to train the customized new dataset: + +1. Modify the config file for using the customized dataset. +2. Check the annotations of the customized dataset. + +Here we take training PointPillars on customized dataset as an example: + +### Prepare a config + +Here we demonstrate a config sample for pure point cloud training. + +#### Prepare dataset config + +In `configs/_base_/datasets/custom.py`: + +```python +# dataset settings +dataset_type = 'MyDataset' +data_root = 'data/custom/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] # replace with your dataset class +point_cloud_range = [0, -40, -3, 70.4, 40, 1] # adjust according to your dataset +input_modality = dict(use_lidar=True, use_camera=False) +metainfo = dict(classes=class_names) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, # replace with your point cloud data dimension + use_dim=4), # replace with the actual dimension used in training and inference + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, # replace with your point cloud data dimension + use_dim=4), + dict(type='Pack3DDetInputs', keys=['points']) +] +# construct a pipeline for data and gt loading in show function +eval_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='Pack3DDetInputs', keys=['points']), +] +train_dataloader = dict( + batch_size=6, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='custom_infos_train.pkl', # specify your training pkl info + data_prefix=dict(pts='points'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + box_type_3d='LiDAR'))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='points'), + ann_file='custom_infos_val.pkl', # specify your validation pkl info + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'custom_infos_val.pkl', # specify your validation pkl info + metric='bbox') +``` + +#### Prepare model config + +For voxel-based detectors such as SECOND, PointPillars and CenterPoint, the point cloud range and voxel size should be adjusted according to your dataset. +Theoretically, `voxel_size` is linked to the setting of `point_cloud_range`. Setting a smaller `voxel_size` will increase the voxel num and the corresponding memory consumption. In addition, the following issues need to be noted: + +If the `point_cloud_range` and `voxel_size` are set to be `[0, -40, -3, 70.4, 40, 1]` and `[0.05, 0.05, 0.1]` respectively, then the shape of intermediate feature map should be `[(1-(-3))/0.1+1, (40-(-40))/0.05, (70.4-0)/0.05]=[41, 1600, 1408]`. When changing `point_cloud_range`, remember to change the shape of intermediate feature map in `middle_encoder` according to the `voxel_size`. + +Regarding the setting of `anchor_range`, it is generally adjusted according to dataset. Note that `z` value needs to be adjusted accordingly to the position of the point cloud, please refer to this [issue](https://github.com/open-mmlab/mmdetection3d/issues/986). + +Regarding the setting of `anchor_size`, it is usually necessary to count the average length, width and height of objects in the entire training dataset as `anchor_size` to obtain the best results. + +In `configs/_base_/models/pointpillars_hv_secfpn_custom.py`: + +```python +voxel_size = [0.16, 0.16, 4] # adjust according to your dataset +point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] # adjust according to your dataset +model = dict( + type='VoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=32, + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(16000, 40000))), + voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=4, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range), + # the `output_shape` should be adjusted according to `point_cloud_range` + # and `voxel_size` + middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]), + backbone=dict( + type='SECOND', + in_channels=64, + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + assign_per_class=True, + # adjust the `ranges` and `sizes` according to your dataset + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[ + [0, -39.68, -0.6, 69.12, 39.68, -0.6], + [0, -39.68, -0.6, 69.12, 39.68, -0.6], + [0, -39.68, -1.78, 69.12, 39.68, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) +``` + +#### Prepare overall config + +We combine all the configs above in `configs/pointpillars/pointpillars_hv_secfpn_8xb6_custom.py`: + +```python +_base_ = [ + '../_base_/models/pointpillars_hv_secfpn_custom.py', + '../_base_/datasets/custom.py', + '../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py' +] +``` + +#### Visualize your dataset (optional) + +To validate whether your prepared data and config are correct, it's highly recommended to use `tools/misc/browse_dataset.py` script +to visualize your dataset and annotations before training and validation. Please refer to [visualization doc](https://mmdetection3d.readthedocs.io/en/dev-1.x/user_guides/visualization.html) for more details. + +## Evaluation + +Once the data and config have been prepared, you can directly run the training/testing script following our doc. + +**Note**: We only provide an implementation for KITTI style evaluation for the customized dataset. It should be included in the dataset config: + +```python +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'custom_infos_val.pkl', # specify your validation pkl info + metric='bbox') +``` diff --git a/docs/en/advanced_guides/customize_models.md b/docs/en/advanced_guides/customize_models.md new file mode 100755 index 0000000..4a53e24 --- /dev/null +++ b/docs/en/advanced_guides/customize_models.md @@ -0,0 +1,638 @@ +# Customize Models + +We basically categorize model components into 6 types: + +- encoder: Including voxel encoder and middle encoder used in voxel-based methods before backbone, e.g., `HardVFE` and `PointPillarsScatter`. +- backbone: Usually an FCN network to extract feature maps, e.g., `ResNet`, `SECOND`. +- neck: The component between backbones and heads, e.g., `FPN`, `SECONDFPN`. +- head: The component for specific tasks, e.g., `bbox prediction` and `mask prediction`. +- RoI extractor: The part for extracting RoI features from feature maps, e.g., `H3DRoIHead` and `PartAggregationROIHead`. +- loss: The component in heads for calculating losses, e.g., `FocalLoss`, `L1Loss`, and `GHMLoss`. + +## Develop new components + +### Add a new encoder + +Here we show how to develop new components with an example of HardVFE. + +#### 1. Define a new voxel encoder (e.g. HardVFE: Voxel feature encoder used in HV-SECOND) + +Create a new file `mmdet3d/models/voxel_encoders/voxel_encoder.py`. + +```python +import torch.nn as nn + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class HardVFE(nn.Module): + + def __init__(self, arg1, arg2): + pass + + def forward(self, x): # should return a tuple + pass +``` + +#### 2. Import the module + +You can either add the following line to `mmdet3d/models/voxel_encoders/__init__.py`: + +```python +from .voxel_encoder import HardVFE +``` + +or alternatively add + +```python +custom_imports = dict( + imports=['mmdet3d.models.voxel_encoders.voxel_encoder'], + allow_failed_imports=False) +``` + +to the config file to avoid modifying the original code. + +#### 3. Use the voxel encoder in your config file + +```python +model = dict( + ... + voxel_encoder=dict( + type='HardVFE', + arg1=xxx, + arg2=yyy), + ... +) +``` + +### Add a new backbone + +Here we show how to develop new components with an example of [SECOND](https://www.mdpi.com/1424-8220/18/10/3337) (Sparsely Embedded Convolutional Detection). + +#### 1. Define a new backbone (e.g. SECOND) + +Create a new file `mmdet3d/models/backbones/second.py`. + +```python +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class SECOND(BaseModule): + + def __init__(self, arg1, arg2): + pass + + def forward(self, x): # should return a tuple + pass +``` + +#### 2. Import the module + +You can either add the following line to `mmdet3d/models/backbones/__init__.py`: + +```python +from .second import SECOND +``` + +or alternatively add + +```python +custom_imports = dict( + imports=['mmdet3d.models.backbones.second'], + allow_failed_imports=False) +``` + +to the config file to avoid modifying the original code. + +#### 3. Use the backbone in your config file + +```python +model = dict( + ... + backbone=dict( + type='SECOND', + arg1=xxx, + arg2=yyy), + ... +) +``` + +### Add a new neck + +#### 1. Define a new neck (e.g. SECONDFPN) + +Create a new file `mmdet3d/models/necks/second_fpn.py`. + +```python +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class SECONDFPN(BaseModule): + + def __init__(self, + in_channels=[128, 128, 256], + out_channels=[256, 256, 256], + upsample_strides=[1, 2, 4], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + conv_cfg=dict(type='Conv2d', bias=False), + use_conv_for_no_stride=False, + init_cfg=None): + pass + + def forward(self, x): + # implementation is ignored + pass +``` + +#### 2. Import the module + +You can either add the following line to `mmdet3d/models/necks/__init__.py`: + +```python +from .second_fpn import SECONDFPN +``` + +or alternatively add + +```python +custom_imports = dict( + imports=['mmdet3d.models.necks.second_fpn'], + allow_failed_imports=False) +``` + +to the config file to avoid modifying the original code. + +#### 3. Use the neck in your config file + +```python +model = dict( + ... + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + ... +) +``` + +### Add a new head + +Here we show how to develop a new head with the example of [PartA2 Head](https://arxiv.org/abs/1907.03670) as the following. + +**Note**: Here the example of `PartA2 RoI Head` is used in the second stage. For one-stage heads, please refer to examples in `mmdet3d/models/dense_heads/`. They are more commonly used in 3D detection for autonomous driving due to its simplicity and high efficiency. + +First, add a new bbox head in `mmdet3d/models/roi_heads/bbox_heads/parta2_bbox_head.py`. +`PartA2 RoI Head` implements a new bbox head for object detection. +To implement a bbox head, basically we need to implement two functions of the new module as the following. Sometimes other related functions like `loss` and `get_targets` are also required. + +```python +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class PartA2BboxHead(BaseModule): + """PartA2 RoI head.""" + + def __init__(self, + num_classes, + seg_in_channels, + part_in_channels, + seg_conv_channels=None, + part_conv_channels=None, + merge_conv_channels=None, + down_conv_channels=None, + shared_fc_channels=None, + cls_channels=None, + reg_channels=None, + dropout_ratio=0.1, + roi_feat_size=14, + with_corner_loss=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + loss_bbox=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='none', + loss_weight=1.0), + init_cfg=None): + super(PartA2BboxHead, self).__init__(init_cfg=init_cfg) + + def forward(self, seg_feats, part_feats): + pass +``` + +Second, implement a new RoI Head if it is necessary. We plan to inherit the new `PartAggregationROIHead` from `Base3DRoIHead`. We can find that a `Base3DRoIHead` already implements the following functions. + +```python +from mmdet.models.roi_heads import BaseRoIHead + +from mmdet3d.registry import MODELS, TASK_UTILS + + +class Base3DRoIHead(BaseRoIHead): + """Base class for 3d RoIHeads.""" + + def __init__(self, + bbox_head=None, + bbox_roi_extractor=None, + mask_head=None, + mask_roi_extractor=None, + train_cfg=None, + test_cfg=None, + init_cfg=None): + super(Base3DRoIHead, self).__init__( + bbox_head=bbox_head, + bbox_roi_extractor=bbox_roi_extractor, + mask_head=mask_head, + mask_roi_extractor=mask_roi_extractor, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + + def init_bbox_head(self, bbox_roi_extractor: dict, + bbox_head: dict) -> None: + """Initialize box head and box roi extractor. + + Args: + bbox_roi_extractor (dict or ConfigDict): Config of box + roi extractor. + bbox_head (dict or ConfigDict): Config of box in box head. + """ + self.bbox_roi_extractor = MODELS.build(bbox_roi_extractor) + self.bbox_head = MODELS.build(bbox_head) + + def init_assigner_sampler(self): + """Initialize assigner and sampler.""" + self.bbox_assigner = None + self.bbox_sampler = None + if self.train_cfg: + if isinstance(self.train_cfg.assigner, dict): + self.bbox_assigner = TASK_UTILS.build(self.train_cfg.assigner) + elif isinstance(self.train_cfg.assigner, list): + self.bbox_assigner = [ + TASK_UTILS.build(res) for res in self.train_cfg.assigner + ] + self.bbox_sampler = TASK_UTILS.build(self.train_cfg.sampler) + + def init_mask_head(self): + """Initialize mask head, skip since ``PartAggregationROIHead`` does not + have one.""" + pass +``` + +Double Head's modification is mainly in the bbox_forward logic, and it inherits other logics from the `Base3DRoIHead`. +In the `mmdet3d/models/roi_heads/part_aggregation_roi_head.py`, we implement the new RoI Head as the following: + +```python +from typing import Dict, List, Tuple + +from mmdet.models.task_modules import AssignResult, SamplingResult +from mmengine import ConfigDict +from torch import Tensor +from torch.nn import functional as F + +from mmdet3d.registry import MODELS +from mmdet3d.structures import bbox3d2roi +from mmdet3d.utils import InstanceList +from ...structures.det3d_data_sample import SampleList +from .base_3droi_head import Base3DRoIHead + + +@MODELS.register_module() +class PartAggregationROIHead(Base3DRoIHead): + """Part aggregation roi head for PartA2. + + Args: + semantic_head (ConfigDict): Config of semantic head. + num_classes (int): The number of classes. + seg_roi_extractor (ConfigDict): Config of seg_roi_extractor. + bbox_roi_extractor (ConfigDict): Config of part_roi_extractor. + bbox_head (ConfigDict): Config of bbox_head. + train_cfg (ConfigDict): Training config. + test_cfg (ConfigDict): Testing config. + """ + + def __init__(self, + semantic_head: dict, + num_classes: int = 3, + seg_roi_extractor: dict = None, + bbox_head: dict = None, + bbox_roi_extractor: dict = None, + train_cfg: dict = None, + test_cfg: dict = None, + init_cfg: dict = None) -> None: + super(PartAggregationROIHead, self).__init__( + bbox_head=bbox_head, + bbox_roi_extractor=bbox_roi_extractor, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + self.num_classes = num_classes + assert semantic_head is not None + self.init_seg_head(seg_roi_extractor, semantic_head) + + def init_seg_head(self, seg_roi_extractor: dict, + semantic_head: dict) -> None: + """Initialize semantic head and seg roi extractor. + + Args: + seg_roi_extractor (dict): Config of seg + roi extractor. + semantic_head (dict): Config of semantic head. + """ + self.semantic_head = MODELS.build(semantic_head) + self.seg_roi_extractor = MODELS.build(seg_roi_extractor) + + @property + def with_semantic(self): + """bool: whether the head has semantic branch""" + return hasattr(self, + 'semantic_head') and self.semantic_head is not None + + def predict(self, + feats_dict: Dict, + rpn_results_list: InstanceList, + batch_data_samples: SampleList, + rescale: bool = False, + **kwargs) -> InstanceList: + """Perform forward propagation of the roi head and predict detection + results on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + assert self.with_bbox, 'Bbox head must be implemented in PartA2.' + assert self.with_semantic, 'Semantic head must be implemented' \ + ' in PartA2.' + + batch_input_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + voxels_dict = feats_dict.pop('voxels_dict') + # TODO: Split predict semantic and bbox + results_list = self.predict_bbox(feats_dict, voxels_dict, + batch_input_metas, rpn_results_list, + self.test_cfg) + return results_list + + def predict_bbox(self, feats_dict: Dict, voxel_dict: Dict, + batch_input_metas: List[dict], + rpn_results_list: InstanceList, + test_cfg: ConfigDict) -> InstanceList: + """Perform forward propagation of the bbox head and predict detection + results on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + voxel_dict (dict): Contains information of voxels. + batch_input_metas (list[dict], Optional): Batch image meta info. + Defaults to None. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + test_cfg (Config): Test config. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + ... + + def loss(self, feats_dict: Dict, rpn_results_list: InstanceList, + batch_data_samples: SampleList, **kwargs) -> dict: + """Perform forward propagation and loss calculation of the detection + roi on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + dict[str, Tensor]: A dictionary of loss components + """ + assert len(rpn_results_list) == len(batch_data_samples) + losses = dict() + batch_gt_instances_3d = [] + batch_gt_instances_ignore = [] + voxels_dict = feats_dict.pop('voxels_dict') + for data_sample in batch_data_samples: + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + if 'ignored_instances' in data_sample: + batch_gt_instances_ignore.append(data_sample.ignored_instances) + else: + batch_gt_instances_ignore.append(None) + if self.with_semantic: + semantic_results = self._semantic_forward_train( + feats_dict, voxels_dict, batch_gt_instances_3d) + losses.update(semantic_results.pop('loss_semantic')) + + sample_results = self._assign_and_sample(rpn_results_list, + batch_gt_instances_3d) + if self.with_bbox: + feats_dict.update(semantic_results) + bbox_results = self._bbox_forward_train(feats_dict, voxels_dict, + sample_results) + losses.update(bbox_results['loss_bbox']) + + return losses +``` + +Here we omit more details related to other functions. Please see the [code](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/models/roi_heads/part_aggregation_roi_head.py) for more details. + +Last, the users need to add the module in +`mmdet3d/models/roi_heads/bbox_heads/__init__.py` and `mmdet3d/models/roi_heads/__init__.py` thus the corresponding registry could find and load them. + +Alternatively, the users can add + +```python +custom_imports=dict( + imports=['mmdet3d.models.roi_heads.part_aggregation_roi_head', 'mmdet3d.models.roi_heads.bbox_heads.parta2_bbox_head'], + allow_failed_imports=False) +``` + +to the config file and achieve the same goal. + +The config file of `PartAggregationROIHead` is as the following: + +```python +model = dict( + ... + roi_head=dict( + type='PartAggregationROIHead', + num_classes=3, + semantic_head=dict( + type='PointwiseSemanticHead', + in_channels=16, + extra_width=0.2, + seg_score_thr=0.3, + num_classes=3, + loss_seg=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + reduction='sum', + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_part=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0)), + seg_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='max')), + bbox_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='avg')), + bbox_head=dict( + type='PartA2BboxHead', + num_classes=3, + seg_in_channels=16, + part_in_channels=4, + seg_conv_channels=[64, 64], + part_conv_channels=[64, 64], + merge_conv_channels=[128, 128], + down_conv_channels=[128, 256], + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + shared_fc_channels=[256, 512, 512, 512], + cls_channels=[256, 256], + reg_channels=[256, 256], + dropout_ratio=0.1, + roi_feat_size=14, + with_corner_loss=True, + loss_bbox=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=1.0), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0))), + ... +) +``` + +Since MMDetection 2.0, the config system supports to inherit configs such that the users can focus on the modification. +The second stage of PartA2 Head mainly uses a new `PartAggregationROIHead` and a new +`PartA2BboxHead`, the arguments are set according to the `__init__` function of each module. + +### Add a new loss + +Assume you want to add a new loss as `MyLoss` for bounding box regression. +To add a new loss function, the users need to implement it in `mmdet3d/models/losses/my_loss.py`. +The decorator `weighted_loss` enables the loss to be weighted for each element. + +```python +import torch +import torch.nn as nn +from mmdet.models.losses.utils import weighted_loss + +from mmdet3d.registry import MODELS + + +@weighted_loss +def my_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + + +@MODELS.register_module() +class MyLoss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(MyLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * my_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_bbox +``` + +Then the users need to add it in the `mmdet3d/models/losses/__init__.py`. + +```python +from .my_loss import MyLoss, my_loss +``` + +Alternatively, you can add + +```python +custom_imports=dict( + imports=['mmdet3d.models.losses.my_loss'], + allow_failed_imports=False) +``` + +to the config file and achieve the same goal. + +To use it, users should modify the `loss_xxx` field. +Since `MyLoss` is for regression, you need to modify the `loss_bbox` field in the head. + +```python +loss_bbox=dict(type='MyLoss', loss_weight=1.0) +``` diff --git a/docs/en/advanced_guides/customize_runtime.md b/docs/en/advanced_guides/customize_runtime.md new file mode 100755 index 0000000..9ccacd3 --- /dev/null +++ b/docs/en/advanced_guides/customize_runtime.md @@ -0,0 +1,392 @@ +# Customize Runtime Settings + +## Customize optimization settings + +Optimization related configuration is now all managed by `optim_wrapper` which usually has three fields: `optimizer`, `paramwise_cfg`, `clip_grad`. Please refer to [OptimWrapper](https://mmengine.readthedocs.io/en/latest/tutorials/optim_wrapper.html) for more details. See the example below, where `AdamW` is used as an `optimizer`, the learning rate of the backbone is reduced by a factor of 10, and gradient clipping is added. + +```python +optim_wrapper = dict( + type='OptimWrapper', + # optimizer + optimizer=dict( + type='AdamW', + lr=0.0001, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + + # Parameter-level learning rate and weight decay settings + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + }, + norm_decay_mult=0.0), + + # gradient clipping + clip_grad=dict(max_norm=0.01, norm_type=2)) +``` + +### Customize optimizer supported by PyTorch + +We already support to use all the optimizers implemented by PyTorch, and the only modification is to change the `optimizer` field in `optim_wrapper` field of config files. For example, if you want to use `Adam` (note that the performance could drop a lot), the modification could be as the following: + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='Adam', lr=0.0003, weight_decay=0.0001)) +``` + +To modify the learning rate of the model, the users only need to modify the `lr` in `optimizer`. The users can directly set arguments following the [API doc](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) of PyTorch. + +### Customize self-implemented optimizer + +#### 1. Define a new optimizer + +A customized optimizer could be defined as following: + +Assume you want to add a optimizer named `MyOptimizer`, which has arguments `a`, `b`, and `c`. +You need to create a new directory named `mmdet3d/engine/optimizers`, and then implement the new optimizer in a file, e.g., in `mmdet3d/engine/optimizers/my_optimizer.py`: + +```python +from torch.optim import Optimizer + +from mmdet3d.registry import OPTIMIZERS + + +@OPTIMIZERS.register_module() +class MyOptimizer(Optimizer): + + def __init__(self, a, b, c): + pass +``` + +#### 2. Add the optimizer to registry + +To find the above module defined above, this module should be imported into the main namespace at first. There are two options to achieve it. + +- Modify `mmdet3d/engine/optimizers/__init__.py` to import it. + + The newly defined module should be imported in `mmdet3d/engine/optimizers/__init__.py` so that the registry will find the new module and add it: + + ```python + from .my_optimizer import MyOptimizer + ``` + +- Use `custom_imports` in the config to manually import it. + + ```python + custom_imports = dict(imports=['mmdet3d.engine.optimizers.my_optimizer'], allow_failed_imports=False) + ``` + + The module `mmdet3d.engine.optimizers.my_optimizer` will be imported at the beginning of the program and the class `MyOptimizer` is then automatically registered. + Note that only the package containing the class `MyOptimizer` should be imported. + `mmdet3d.engine.optimizers.my_optimizer.MyOptimizer` **cannot** be imported directly. + + Actually users can use a totally different file directory structure with this importing method, as long as the module root is located in `PYTHONPATH`. + +#### 3. Specify the optimizer in the config file + +Then you can use `MyOptimizer` in `optimizer` field in `optim_wrapper` field of config files. In the configs, the optimizers are defined by the field `optimizer` like the following: + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) +``` + +To use your own optimizer, the field can be changed to: + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value)) +``` + +### Customize optimizer wrapper constructor + +Some models may have some parameter-specific settings for optimization, e.g. weight decay for BatchNorm layers. +The users can do those fine-grained parameter tuning through customizing optimizer wrapper constructor. + +```python +from mmengine.optim import DefaultOptimWrapperConstructor + +from mmdet3d.registry import OPTIM_WRAPPER_CONSTRUCTORS +from .my_optimizer import MyOptimizer + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class MyOptimizerWrapperConstructor(DefaultOptimWrapperConstructor): + + def __init__(self, + optim_wrapper_cfg: dict, + paramwise_cfg: Optional[dict] = None): + pass + + def __call__(self, model: nn.Module) -> OptimWrapper: + + return optim_wrapper +``` + +The default optimizer wrapper constructor is implemented [here](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/optimizer/default_constructor.py#L18), which could also serve as a template for the new optimizer wrapper constructor. + +### Additional settings + +Tricks not implemented by the optimizer should be implemented through optimizer wrapper constructor (e.g., set parameter-wise learning rates) or hooks. We list some common settings that could stabilize the training or accelerate the training. Feel free to create PR, issue for more settings. + +- __Use gradient clip to stabilize training__: + Some models need gradient clip to clip the gradients to stabilize the training process. An example is as below: + + ```python + optim_wrapper = dict( + _delete_=True, clip_grad=dict(max_norm=35, norm_type=2)) + ``` + + If your config inherits the base config which already sets the `optim_wrapper`, you might need `_delete_=True` to override the unnecessary settings. See the [config documentation](https://mmdetection3d.readthedocs.io/en/dev-1.x/user_guides/config.html) for more details. + +- __Use momentum schedule to accelerate model convergence__: + We support momentum scheduler to modify model's momentum according to learning rate, which could make the model converge in a faster way. + Momentum scheduler is usually used with LR scheduler, for example, the following config is used in [3D detection](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/schedules/cyclic-20e.py) to accelerate convergence. + For more details, please refer to the implementation of [CosineAnnealingLR](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/lr_scheduler.py#L43) and [CosineAnnealingMomentum](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/momentum_scheduler.py#L71). + + ```python + param_scheduler = [ + # learning rate scheduler + # During the first 8 epochs, learning rate increases from 0 to lr * 10 + # during the next 12 epochs, learning rate decreases from lr * 10 to lr * 1e-4 + dict( + type='CosineAnnealingLR', + T_max=8, + eta_min=lr * 10, + begin=0, + end=8, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=12, + eta_min=lr * 1e-4, + begin=8, + end=20, + by_epoch=True, + convert_to_iter_based=True), + # momentum scheduler + # During the first 8 epochs, momentum increases from 0 to 0.85 / 0.95 + # during the next 12 epochs, momentum increases from 0.85 / 0.95 to 1 + dict( + type='CosineAnnealingMomentum', + T_max=8, + eta_min=0.85 / 0.95, + begin=0, + end=8, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=12, + eta_min=1, + begin=8, + end=20, + by_epoch=True, + convert_to_iter_based=True) + ] + ``` + +## Customize training schedules + +By default we use step learning rate with 1x schedule, this calls [`MultiStepLR`](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/lr_scheduler.py#L144) in MMEngine. +We support many other learning rate schedule [here](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/lr_scheduler.py), such as `CosineAnnealingLR` and `PolyLR` schedules. Here are some examples: + +- Poly schedule: + + ```python + param_scheduler = [ + dict( + type='PolyLR', + power=0.9, + eta_min=1e-4, + begin=0, + end=8, + by_epoch=True)] + ``` + +- CosineAnnealing schedule: + + ```python + param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=8, + eta_min=lr * 1e-5, + begin=0, + end=8, + by_epoch=True)] + ``` + +## Customize train loop + +By default, `EpochBasedTrainLoop` is used in `train_cfg` and validation is done after every train epoch, as follows: + +```python +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_begin=1, val_interval=1) +``` + +Actually, both [`IterBasedTrainLoop`](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L185) and [`EpochBasedTrainLoop`](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L18) support dynamic interval, see the following example: + +```python +# Before 365001th iteration, we do evaluation every 5000 iterations. +# After 365000th iteration, we do evaluation every 368750 iterations, +# which means that we do evaluation at the end of training. + +interval = 5000 +max_iters = 368750 +dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] +train_cfg = dict( + type='IterBasedTrainLoop', + max_iters=max_iters, + val_interval=interval, + dynamic_intervals=dynamic_intervals) +``` + +## Customize hooks + +### Customize self-implemented hooks + +#### 1. Implement a new hook + +MMEngine provides many useful [hooks](https://mmengine.readthedocs.io/en/latest/tutorials/hook.html), but there are some occasions when the users might need to implement a new hook. MMDetection3D supports customized hooks in training based on MMEngine after v1.1.0rc0. Thus the users could implement a hook directly in mmdet3d or their mmdet3d-based codebases and use the hook by only modifying the config in training. +Here we give an example of creating a new hook in mmdet3d and using it in training. + +```python +from mmengine.hooks import Hook + +from mmdet3d.registry import HOOKS + + +@HOOKS.register_module() +class MyHook(Hook): + + def __init__(self, a, b): + + def before_run(self, runner) -> None: + + def after_run(self, runner) -> None: + + def before_train(self, runner) -> None: + + def after_train(self, runner) -> None: + + def before_train_epoch(self, runner) -> None: + + def after_train_epoch(self, runner) -> None: + + def before_train_iter(self, + runner, + batch_idx: int, + data_batch: DATA_BATCH = None) -> None: + + def after_train_iter(self, + runner, + batch_idx: int, + data_batch: DATA_BATCH = None, + outputs: Optional[dict] = None) -> None: +``` + +Depending on the functionality of the hook, users need to specify what the hook will do at each stage of the training in `before_run`, `after_run`, `before_train`, `after_train`, `before_train_epoch`, `after_train_epoch`, `before_train_iter`, and `after_train_iter`. There are more points where hooks can be inserted, refer to [base hook class](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/hook.py#L9) for more details. + +#### 2. Register the new hook + +Then we need to make `MyHook` imported. Assuming the file is in `mmdet3d/engine/hooks/my_hook.py`, there are two ways to do that: + +- Modify `mmdet3d/engine/hooks/__init__.py` to import it. + + The newly defined module should be imported in `mmdet3d/engine/hooks/__init__.py` so that the registry will find the new module and add it: + + ```python + from .my_hook import MyHook + ``` + +- Use `custom_imports` in the config to manually import it. + + ```python + custom_imports = dict(imports=['mmdet3d.engine.hooks.my_hook'], allow_failed_imports=False) + ``` + +#### 3. Modify the config + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value) +] +``` + +You can also set the priority of the hook by adding key `priority` to `'NORMAL'` or `'HIGHEST'` as below: + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value, priority='NORMAL') +] +``` + +By default the hook's priority is set as `NORMAL` during registration. + +### Use hooks implemented in MMDetection3D + +If the hook is already implemented in MMDetection3D, you can directly modify the config to use the hook as below. + +#### Example: `DisableObjectSampleHook` + +We implement a customized hook named [DisableObjectSampleHook](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/engine/hooks/disable_object_sample_hook.py) to disable `ObjectSample` augmentation during training after specified epoch. + +We can set it in the config file if needed: + +```python +custom_hooks = [dict(type='DisableObjectSampleHook', disable_after_epoch=15)] +``` + +### Modify default runtime hooks + +There are some common hooks that are registered through `default_hooks`, they are + +- `IterTimerHook`: A hook that logs 'data_time' for loading data and 'time' for a model training step. +- `LoggerHook`: A hook that collects logs from different components of `Runner` and writes them to terminal, json file, tensorboard and wandb etc. +- `ParamSchedulerHook`: A hook that updates some hyper-parameters in optimizer, e.g., learning rate and momentum. +- `CheckpointHook`: A hook that saves checkpoints periodically. +- `DistSamplerSeedHook`: A hook that sets the seed for sampler and batch_sampler. +- `Det3DVisualizationHook`: A hook used to visualize validation and testing process prediction results. + +`IterTimerHook`, `ParamSchedulerHook` and `DistSamplerSeedHook` are simple and no need to be modified usually, so here we reveal what we can do with `LoggerHook`, `CheckpointHook` and `Det3DVisualizationHook`. + +#### CheckpointHook + +Except saving checkpoints periodically, [`CheckpointHook`](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/checkpoint_hook.py#L18) provides other options such as `max_keep_ckpts`, `save_optimizer` and etc. The users could set `max_keep_ckpts` to only save small number of checkpoints or decide whether to store state dict of optimizer by `save_optimizer`. More details of the arguments are [here](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/checkpoint_hook.py#L18). + +```python +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=1, + max_keep_ckpts=3, + save_optimizer=True)) +``` + +#### LoggerHook + +The `LoggerHook` enables setting intervals. Detailed instructions can be found in the [docstring](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/logger_hook.py#L19). + +```python +default_hooks = dict(logger=dict(type='LoggerHook', interval=50)) +``` + +#### Det3DVisualizationHook + +`Det3DVisualizationHook` use `DetLocalVisualizer` to visualize prediction results, and `Det3DLocalVisualizer` current supports different backends, e.g., `TensorboardVisBackend` and `WandbVisBackend` (see [docstring](https://github.com/open-mmlab/mmengine/blob/main/mmengine/visualization/vis_backend.py) for more details). The users could add multi backends to do visualization as follows. + +```python +default_hooks = dict( + visualization=dict(type='Det3DVisualizationHook', draw=True)) + +vis_backends = [dict(type='LocalVisBackend'), + dict(type='TensorboardVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') +``` diff --git a/docs/en/advanced_guides/datasets/index.rst b/docs/en/advanced_guides/datasets/index.rst new file mode 100755 index 0000000..1622f78 --- /dev/null +++ b/docs/en/advanced_guides/datasets/index.rst @@ -0,0 +1,11 @@ +.. toctree:: + :maxdepth: 3 + + kitti_det.md + nuscenes_det.md + lyft_det.md + waymo_det.md + sunrgbd_det.md + scannet_det.md + scannet_sem_seg.md + s3dis_sem_seg.md diff --git a/docs/en/advanced_guides/datasets/kitti_det.md b/docs/en/advanced_guides/datasets/kitti_det.md new file mode 100755 index 0000000..bafdca3 --- /dev/null +++ b/docs/en/advanced_guides/datasets/kitti_det.md @@ -0,0 +1,206 @@ +# KITTI Dataset for 3D Object Detection + +This page provides specific tutorials about the usage of MMDetection3D for KITTI dataset. + +## Prepare dataset + +You can download KITTI 3D detection data [HERE](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d) and unzip all zip files. Besides, the road planes could be downloaded from [HERE](https://download.openmmlab.com/mmdetection3d/data/train_planes.zip), which are optional for data augmentation during training for better performance. The road planes are generated by [AVOD](https://github.com/kujason/avod), you can see more details [HERE](https://github.com/kujason/avod/issues/19). + +Like the general way to prepare dataset, it is recommended to symlink the dataset root to `$MMDETECTION3D/data`. + +The folder structure should be organized as follows before our processing. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── kitti +│ │ ├── ImageSets +│ │ ├── testing +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── velodyne +│ │ ├── training +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── label_2 +│ │ │ ├── velodyne +│ │ │ ├── planes (optional) +``` + +### Create KITTI dataset + +To create KITTI point cloud data, we load the raw point cloud data and generate the relevant annotations including object labels and bounding boxes. We also generate all single training objects' point cloud in KITTI dataset and save them as `.bin` files in `data/kitti/kitti_gt_database`. Meanwhile, `.pkl` info files are also generated for training or validation. Subsequently, create KITTI data by running: + +```bash +mkdir ./data/kitti/ && mkdir ./data/kitti/ImageSets + +# Download data split +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/test.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/test.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/train.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/train.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/val.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/val.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/trainval.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/trainval.txt + +python tools/create_data.py kitti --root-path ./data/kitti --out-dir ./data/kitti --extra-tag kitti --with-plane +``` + +Note that if your local disk does not have enough space for saving converted data, you can change the `--out-dir` to anywhere else, and you need to remove the `--with-plane` flag if `planes` are not prepared. + +The folder structure after processing should be as below + +``` +kitti +├── ImageSets +│ ├── test.txt +│ ├── train.txt +│ ├── trainval.txt +│ ├── val.txt +├── testing +│ ├── calib +│ ├── image_2 +│ ├── velodyne +│ ├── velodyne_reduced +├── training +│ ├── calib +│ ├── image_2 +│ ├── label_2 +│ ├── velodyne +│ ├── velodyne_reduced +│ ├── planes (optional) +├── kitti_gt_database +│ ├── xxxxx.bin +├── kitti_infos_train.pkl +├── kitti_infos_val.pkl +├── kitti_dbinfos_train.pkl +├── kitti_infos_test.pkl +├── kitti_infos_trainval.pkl +``` + +- `kitti_gt_database/xxxxx.bin`: point cloud data included in each 3D bounding box of the training dataset. +- `kitti_infos_train.pkl`: training dataset, a dict contains two keys: `metainfo` and `data_list`. + `metainfo` contains the basic information for the dataset itself, such as `categories`, `dataset` and `info_version`, while `data_list` is a list of dict, each dict (hereinafter referred to as `info`) contains all the detailed information of single sample as follows: + - info\['sample_idx'\]: The index of this sample in the whole dataset. + - info\['images'\]: Information of images captured by multiple cameras. A dict contains five keys including: `CAM0`, `CAM1`, `CAM2`, `CAM3`, `R0_rect`. + - info\['images'\]\['R0_rect'\]: Rectifying rotation matrix with shape (4, 4). + - info\['images'\]\['CAM2'\]: Include some information about the `CAM2` camera sensor. + - info\['images'\]\['CAM2'\]\['img_path'\]: The filename of the image. + - info\['images'\]\['CAM2'\]\['height'\]: The height of the image. + - info\['images'\]\['CAM2'\]\['width'\]: The width of the image. + - info\['images'\]\['CAM2'\]\['cam2img'\]: Transformation matrix from camera to image with shape (4, 4). + - info\['images'\]\['CAM2'\]\['lidar2cam'\]: Transformation matrix from lidar to camera with shape (4, 4). + - info\['images'\]\['CAM2'\]\['lidar2img'\]: Transformation matrix from lidar to image with shape (4, 4). + - info\['lidar_points'\]: A dict containing all the information related to the lidar points. + - info\['lidar_points'\]\['lidar_path'\]: The filename of the lidar point cloud data. + - info\['lidar_points'\]\['num_pts_feats'\]: The feature dimension of point. + - info\['lidar_points'\]\['Tr_velo_to_cam'\]: Transformation from Velodyne coordinate to camera coordinate with shape (4, 4). + - info\['lidar_points'\]\['Tr_imu_to_velo'\]: Transformation from IMU coordinate to Velodyne coordinate with shape (4, 4). + - info\['instances'\]: It is a list of dict. Each dict contains all annotation information of single instance. For the i-th instance: + - info\['instances'\]\[i\]\['bbox'\]: List of 4 numbers representing the 2D bounding box of the instance, in (x1, y1, x2, y2) order. + - info\['instances'\]\[i\]\['bbox_3d'\]: List of 7 numbers representing the 3D bounding box of the instance, in (x, y, z, l, h, w, yaw) order. + - info\['instances'\]\[i\]\['bbox_label'\]: An int indicate the 2D label of instance and the -1 indicating ignore. + - info\['instances'\]\[i\]\['bbox_label_3d'\]: An int indicate the 3D label of instance and the -1 indicating ignore. + - info\['instances'\]\[i\]\['depth'\]: Projected center depth of the 3D bounding box with respect to the image plane. + - info\['instances'\]\[i\]\['num_lidar_pts'\]: The number of LiDAR points in the 3D bounding box. + - info\['instances'\]\[i\]\['center_2d'\]: Projected 2D center of the 3D bounding box. + - info\['instances'\]\[i\]\['difficulty'\]: KITTI difficulty: 'Easy', 'Moderate', 'Hard'. + - info\['instances'\]\[i\]\['truncated'\]: Float from 0 (non-truncated) to 1 (truncated), where truncated refers to the object leaving image boundaries. + - info\['instances'\]\[i\]\['occluded'\]: Integer (0,1,2,3) indicating occlusion state: 0 = fully visible, 1 = partly occluded, 2 = largely occluded, 3 = unknown. + - info\['instances'\]\[i\]\['group_ids'\]: Used for multi-part object. + - info\['plane'\](optional): Road level information. + +Please refer to [kitti_converter.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/kitti_converter.py) and [update_infos_to_v2.py ](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/update_infos_to_v2.py) for more details. + +## Train pipeline + +A typical train pipeline of 3D detection on KITTI is as below: + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, # x, y, z, intensity + use_dim=4), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +``` + +- Data augmentation: + - `ObjectNoise`: apply noise to each GT objects in the scene. + - `RandomFlip3D`: randomly flip input point cloud horizontally or vertically. + - `GlobalRotScaleTrans`: rotate input point cloud. + +## Evaluation + +An example to evaluate PointPillars with 8 GPUs with kitti metrics is as follows: + +```shell +bash tools/dist_test.sh configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py work_dirs/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class/latest.pth 8 +``` + +## Metrics + +KITTI evaluates 3D object detection performance using mean Average Precision (mAP) and Average Orientation Similarity (AOS), Please refer to its [official website](http://www.cvlibs.net/datasets/kitti/eval_3dobject.php) and [original paper](http://www.cvlibs.net/publications/Geiger2012CVPR.pdf) for more details. + +We also adopt this approach for evaluation on KITTI. An example of printed evaluation results is as follows: + +``` +Car AP@0.70, 0.70, 0.70: +bbox AP:97.9252, 89.6183, 88.1564 +bev AP:90.4196, 87.9491, 85.1700 +3d AP:88.3891, 77.1624, 74.4654 +aos AP:97.70, 89.11, 87.38 +Car AP@0.70, 0.50, 0.50: +bbox AP:97.9252, 89.6183, 88.1564 +bev AP:98.3509, 90.2042, 89.6102 +3d AP:98.2800, 90.1480, 89.4736 +aos AP:97.70, 89.11, 87.38 +``` + +## Testing and make a submission + +An example to test PointPillars on KITTI with 8 GPUs and generate a submission to the leaderboard is as follows: + +- First, you need to modify the `test_dataloader` and `test_evaluator` dict in your config file, just like: + + ```python + data_root = 'data/kitti/' + test_dataloader = dict( + dataset=dict( + ann_file='kitti_infos_test.pkl', + load_eval_anns=False, + data_prefix=dict(pts='testing/velodyne_reduced'))) + test_evaluator = dict( + ann_file=data_root + 'kitti_infos_test.pkl', + format_only=True, + pklfile_prefix='results/kitti-3class/kitti_results', + submission_prefix='results/kitti-3class/kitti_results') + ``` + +- And then, you can run the test script. + + ```shell + ./tools/dist_test.sh configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py work_dirs/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class/latest.pth 8 + ``` + +After generating `results/kitti-3class/kitti_results/xxxxx.txt` files, you can submit these files to KITTI benchmark. Please refer to the [KITTI official website](http://www.cvlibs.net/datasets/kitti/index.php) for more details. diff --git a/docs/en/advanced_guides/datasets/lyft_det.md b/docs/en/advanced_guides/datasets/lyft_det.md new file mode 100755 index 0000000..237054e --- /dev/null +++ b/docs/en/advanced_guides/datasets/lyft_det.md @@ -0,0 +1,207 @@ +# Lyft Dataset for 3D Object Detection + +This page provides specific tutorials about the usage of MMDetection3D for Lyft dataset. + +## Before Preparation + +You can download Lyft 3D detection data [HERE](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data) and unzip all zip files. + +Like the general way to prepare a dataset, it is recommended to symlink the dataset root to `$MMDETECTION3D/data`. + +The folder structure should be organized as follows before our processing. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── lyft +│ │ ├── v1.01-train +│ │ │ ├── v1.01-train (train_data) +│ │ │ ├── lidar (train_lidar) +│ │ │ ├── images (train_images) +│ │ │ ├── maps (train_maps) +│ │ ├── v1.01-test +│ │ │ ├── v1.01-test (test_data) +│ │ │ ├── lidar (test_lidar) +│ │ │ ├── images (test_images) +│ │ │ ├── maps (test_maps) +│ │ ├── train.txt +│ │ ├── val.txt +│ │ ├── test.txt +│ │ ├── sample_submission.csv +``` + +Here `v1.01-train` and `v1.01-test` contain the metafiles which are similar to those of nuScenes. `.txt` files contain the data split information. +Lyft does not have an official split for training and validation set, so we provide a split considering the number of objects from different categories in different scenes. +`sample_submission.csv` is the base file for submission on the Kaggle evaluation server. +Note that we follow the original folder names for clear organization. Please rename the raw folders as shown above. + +## Dataset Preparation + +The way to organize Lyft dataset is similar to nuScenes. We also generate the `.pkl` files which share almost the same structure. +Next, we will mainly focus on the difference between these two datasets. For a more detailed explanation of the info structure, please refer to [nuScenes tutorial](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docs/en/advanced_guides/datasets/nuscenes_det.md). + +To prepare info files for Lyft, run the following commands: + +```bash +python tools/create_data.py lyft --root-path ./data/lyft --out-dir ./data/lyft --extra-tag lyft --version v1.01 +python tools/dataset_converters/lyft_data_fixer.py --version v1.01 --root-folder ./data/lyft +``` + +Note that the second command serves the purpose of fixing a corrupted lidar data file. Please refer to the discussion [here](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/discussion/110000) for more details. + +The folder structure after processing should be as below. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── lyft +│ │ ├── v1.01-train +│ │ │ ├── v1.01-train (train_data) +│ │ │ ├── lidar (train_lidar) +│ │ │ ├── images (train_images) +│ │ │ ├── maps (train_maps) +│ │ ├── v1.01-test +│ │ │ ├── v1.01-test (test_data) +│ │ │ ├── lidar (test_lidar) +│ │ │ ├── images (test_images) +│ │ │ ├── maps (test_maps) +│ │ ├── train.txt +│ │ ├── val.txt +│ │ ├── test.txt +│ │ ├── sample_submission.csv +│ │ ├── lyft_infos_train.pkl +│ │ ├── lyft_infos_val.pkl +│ │ ├── lyft_infos_test.pkl +``` + +- `lyft_infos_train.pkl`: training dataset, a dict contains two keys: `metainfo` and `data_list`. + `metainfo` contains the basic information for the dataset itself, such as `categories`, `dataset` and `info_version`, while `data_list` is a list of dict, each dict (hereinafter referred to as `info`) contains all the detailed information of single sample as follows: + - info\['sample_idx'\]: The index of this sample in the whole dataset. + - info\['token'\]: Sample data token. + - info\['timestamp'\]: Timestamp of the sample data. + - info\['lidar_points'\]: A dict containing all the information related to the lidar points. + - info\['lidar_points'\]\['lidar_path'\]: The filename of the lidar point cloud data. + - info\['lidar_points'\]\['num_pts_feats'\]: The feature dimension of point. + - info\['lidar_points'\]\['lidar2ego'\]: The transformation matrix from this lidar sensor to ego vehicle. (4x4 list) + - info\['lidar_points'\]\['ego2global'\]: The transformation matrix from the ego vehicle to global coordinates. (4x4 list) + - info\['lidar_sweeps'\]: A list contains sweeps information (The intermediate lidar frames without annotations). + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\['data_path'\]: The lidar data path of i-th sweep. + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\['lidar2ego'\]: The transformation matrix from this lidar sensor to ego vehicle in i-th sweep timestamp + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\['ego2global'\]: The transformation matrix from the ego vehicle in i-th sweep timestamp to global coordinates. (4x4 list) + - info\['lidar_sweeps'\]\[i\]\['lidar2sensor'\]: The transformation matrix from the keyframe lidar to the i-th frame lidar. (4x4 list) + - info\['lidar_sweeps'\]\[i\]\['timestamp'\]: Timestamp of the sweep data. + - info\['lidar_sweeps'\]\[i\]\['sample_data_token'\]: The sweep sample data token. + - info\['images'\]: A dict contains six keys corresponding to each camera: `'CAM_FRONT'`, `'CAM_FRONT_RIGHT'`, `'CAM_FRONT_LEFT'`, `'CAM_BACK'`, `'CAM_BACK_LEFT'`, `'CAM_BACK_RIGHT'`. Each dict contains all data information related to corresponding camera. + - info\['images'\]\['CAM_XXX'\]\['img_path'\]: The filename of the image. + - info\['images'\]\['CAM_XXX'\]\['cam2img'\]: The transformation matrix recording the intrinsic parameters when projecting 3D points to each image plane. (3x3 list) + - info\['images'\]\['CAM_XXX'\]\['sample_data_token'\]: Sample data token of image. + - info\['images'\]\['CAM_XXX'\]\['timestamp'\]: Timestamp of the image. + - info\['images'\]\['CAM_XXX'\]\['cam2ego'\]: The transformation matrix from this camera sensor to ego vehicle. (4x4 list) + - info\['images'\]\['CAM_XXX'\]\['lidar2cam'\]: The transformation matrix from lidar sensor to this camera. (4x4 list) + - info\['instances'\]: It is a list of dict. Each dict contains all annotation information of single instance. For the i-th instance: + - info\['instances'\]\[i\]\['bbox_3d'\]: List of 7 numbers representing the 3D bounding box in lidar coordinate system of the instance, in (x, y, z, l, w, h, yaw) order. + - info\['instances'\]\[i\]\['bbox_label_3d'\]: A int starting from 0 indicates the label of instance, while the -1 indicates ignore class. + - info\['instances'\]\[i\]\['bbox_3d_isvalid'\]: Whether each bounding box is valid. In general, we only take the 3D boxes that include at least one lidar or radar point as valid boxes. + +Next, we will elaborate on the difference compared to nuScenes in terms of the details recorded in these info files. + +- Without `lyft_database/xxxxx.bin`: This folder and `.bin` files are not extracted on the Lyft dataset due to the negligible effect of ground-truth sampling in the experiments. + +- `lyft_infos_train.pkl`: + + - Without info\['instances'\]\[i\]\['velocity'\]: There is no velocity measurement on Lyft. + - Without info\['instances'\]\[i\]\['num_lidar_pts'\] and info\['instances'\]\['num_radar_pts'\] + +Here we only explain the data recorded in the training info files. The same applies to the validation set and test set (without instances). + +Please refer to [lyft_converter.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/lyft_converter.py) for more details about the structure of `lyft_infos_xxx.pkl`. + +## Training pipeline + +### LiDAR-Based Methods + +A typical training pipeline of LiDAR-based 3D detection (including multi-modality methods) on Lyft is almost the same as nuScenes as below. + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +``` + +Similar to nuScenes, models on Lyft also need the `'LoadPointsFromMultiSweeps'` pipeline to load point clouds from consecutive frames. +In addition, considering the intensity of LiDAR points collected by Lyft is invalid, we also set the `use_dim` in `'LoadPointsFromMultiSweeps'` to `[0, 1, 2, 4]` by default, +where the first 3 dimensions refer to point coordinates, and the last refers to timestamp differences. + +## Evaluation + +An example to evaluate PointPillars with 8 GPUs with Lyft metrics is as follows: + +```shell +bash ./tools/dist_test.sh configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py checkpoints/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d_20210517_202818-fc6904c3.pth 8 +``` + +## Metrics + +Lyft proposes a more strict metric for evaluating the predicted 3D bounding boxes. +The basic criteria to judge whether a predicted box is positive or not is the same as KITTI, i.e. the 3D Intersection over Union (IoU). +However, it adopts a way similar to COCO to compute the mean average precision (mAP) -- compute the average precision under different thresholds of 3D IoU from 0.5-0.95. +Actually, overlap more than 0.7 3D IoU is a quite strict criterion for 3D detection methods, so the overall performance seems a little low. +The imbalance of annotations for different categories is another important reason for the finally lower results compared to other datasets. +Please refer to its [official website](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/overview/evaluation) for more details about the definition of this metric. + +We employ this official method for evaluation on Lyft. An example of printed evaluation results is as follows: + +``` ++mAPs@0.5:0.95------+--------------+ +| class | mAP@0.5:0.95 | ++-------------------+--------------+ +| animal | 0.0 | +| bicycle | 0.099 | +| bus | 0.177 | +| car | 0.422 | +| emergency_vehicle | 0.0 | +| motorcycle | 0.049 | +| other_vehicle | 0.359 | +| pedestrian | 0.066 | +| truck | 0.176 | +| Overall | 0.15 | ++-------------------+--------------+ +``` + +## Testing and make a submission + +An example to test PointPillars on Lyft with 8 GPUs and generate a submission to the leaderboard is as follows. + +```shell +./tools/dist_test.sh configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py work_dirs/pp-lyft/latest.pth 8 --cfg-options test_evaluator.jsonfile_prefix=work_dirs/pp-lyft/results_challenge test_evaluator.csv_savepath=results/pp-lyft/results_challenge.csv +``` + +After generating the `work_dirs/pp-lyft/results_challenge.csv`, you can submit it to the Kaggle evaluation server. Please refer to the [official website](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles) for more information. + +We can also visualize the prediction results with our developed visualization tools. Please refer to the [visualization doc](https://mmdetection3d.readthedocs.io/en/latest/useful_tools.html#visualization) for more details. diff --git a/docs/en/advanced_guides/datasets/nuscenes_det.md b/docs/en/advanced_guides/datasets/nuscenes_det.md new file mode 100755 index 0000000..9c53395 --- /dev/null +++ b/docs/en/advanced_guides/datasets/nuscenes_det.md @@ -0,0 +1,242 @@ +# NuScenes Dataset for 3D Object Detection + +This page provides specific tutorials about the usage of MMDetection3D for nuScenes dataset. + +## Before Preparation + +You can download nuScenes 3D detection data [HERE](https://www.nuscenes.org/download) and unzip all zip files. + +Like the general way to prepare dataset, it is recommended to symlink the dataset root to `$MMDETECTION3D/data`. + +The folder structure should be organized as follows before our processing. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── nuscenes +│ │ ├── maps +│ │ ├── samples +│ │ ├── sweeps +│ │ ├── v1.0-test +| | ├── v1.0-trainval +``` + +## Dataset Preparation + +We typically need to organize the useful data information with a `.pkl` file in a specific style. +To prepare these files for nuScenes, run the following command: + +```bash +python tools/create_data.py nuscenes --root-path ./data/nuscenes --out-dir ./data/nuscenes --extra-tag nuscenes +``` + +The folder structure after processing should be as below. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── nuscenes +│ │ ├── maps +│ │ ├── samples +│ │ ├── sweeps +│ │ ├── v1.0-test +| | ├── v1.0-trainval +│ │ ├── nuscenes_database +│ │ ├── nuscenes_infos_train.pkl +│ │ ├── nuscenes_infos_val.pkl +│ │ ├── nuscenes_infos_test.pkl +│ │ ├── nuscenes_dbinfos_train.pkl +``` + +- `nuscenes_database/xxxxx.bin`: point cloud data included in each 3D bounding box of the training dataset +- `nuscenes_infos_train.pkl`: training dataset, a dict contains two keys: `metainfo` and `data_list`. + `metainfo` contains the basic information for the dataset itself, such as `categories`, `dataset` and `info_version`, while `data_list` is a list of dict, each dict (hereinafter referred to as `info`) contains all the detailed information of single sample as follows: + - info\['sample_idx'\]: The index of this sample in the whole dataset. + - info\['token'\]: Sample data token. + - info\['timestamp'\]: Timestamp of the sample data. + - info\['lidar_points'\]: A dict containing all the information related to the lidar points. + - info\['lidar_points'\]\['lidar_path'\]: The filename of the lidar point cloud data. + - info\['lidar_points'\]\['num_pts_feats'\]: The feature dimension of point. + - info\['lidar_points'\]\['lidar2ego'\]: The transformation matrix from this lidar sensor to ego vehicle. (4x4 list) + - info\['lidar_points'\]\['ego2global'\]: The transformation matrix from the ego vehicle to global coordinates. (4x4 list) + - info\['lidar_sweeps'\]: A list contains sweeps information (The intermediate lidar frames without annotations) + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\['data_path'\]: The lidar data path of i-th sweep. + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\['lidar2ego'\]: The transformation matrix from this lidar sensor to ego vehicle. (4x4 list) + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\['ego2global'\]: The transformation matrix from the ego vehicle to global coordinates. (4x4 list) + - info\['lidar_sweeps'\]\[i\]\['lidar2sensor'\]: The transformation matrix from the main lidar sensor to the current sensor (for collecting the sweep data). (4x4 list) + - info\['lidar_sweeps'\]\[i\]\['timestamp'\]: Timestamp of the sweep data. + - info\['lidar_sweeps'\]\[i\]\['sample_data_token'\]: The sweep sample data token. + - info\['images'\]: A dict contains six keys corresponding to each camera: `'CAM_FRONT'`, `'CAM_FRONT_RIGHT'`, `'CAM_FRONT_LEFT'`, `'CAM_BACK'`, `'CAM_BACK_LEFT'`, `'CAM_BACK_RIGHT'`. Each dict contains all data information related to corresponding camera. + - info\['images'\]\['CAM_XXX'\]\['img_path'\]: The filename of the image. + - info\['images'\]\['CAM_XXX'\]\['cam2img'\]: The transformation matrix recording the intrinsic parameters when projecting 3D points to each image plane. (3x3 list) + - info\['images'\]\['CAM_XXX'\]\['sample_data_token'\]: Sample data token of image. + - info\['images'\]\['CAM_XXX'\]\['timestamp'\]: Timestamp of the image. + - info\['images'\]\['CAM_XXX'\]\['cam2ego'\]: The transformation matrix from this camera sensor to ego vehicle. (4x4 list) + - info\['images'\]\['CAM_XXX'\]\['lidar2cam'\]: The transformation matrix from lidar sensor to this camera. (4x4 list) + - info\['instances'\]: It is a list of dict. Each dict contains all annotation information of single instance. For the i-th instance: + - info\['instances'\]\[i\]\['bbox_3d'\]: List of 7 numbers representing the 3D bounding box of the instance, in (x, y, z, l, w, h, yaw) order. + - info\['instances'\]\[i\]\['bbox_label_3d'\]: A int indicate the label of instance and the -1 indicate ignore. + - info\['instances'\]\[i\]\['velocity'\]: Velocities of 3D bounding boxes (no vertical measurements due to inaccuracy), a list has shape (2.). + - info\['instances'\]\[i\]\['num_lidar_pts'\]: Number of lidar points included in each 3D bounding box. + - info\['instances'\]\[i\]\['num_radar_pts'\]: Number of radar points included in each 3D bounding box. + - info\['instances'\]\[i\]\['bbox_3d_isvalid'\]: Whether each bounding box is valid. In general, we only take the 3D boxes that include at least one lidar or radar point as valid boxes. + - info\['cam_instances'\]: It is a dict containing keys `'CAM_FRONT'`, `'CAM_FRONT_RIGHT'`, `'CAM_FRONT_LEFT'`, `'CAM_BACK'`, `'CAM_BACK_LEFT'`, `'CAM_BACK_RIGHT'`. For vision-based 3D object detection task, we split 3D annotations of the whole scenes according to the camera they belong to. For the i-th instance: + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['bbox_label'\]: Label of instance. + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['bbox_label_3d'\]: Label of instance. + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['bbox'\]: 2D bounding box annotation (exterior rectangle of the projected 3D box), a list arrange as \[x1, y1, x2, y2\]. + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['center_2d'\]: Projected center location on the image, a list has shape (2,), . + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['depth'\]: The depth of projected center. + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['velocity'\]: Velocities of 3D bounding boxes (no vertical measurements due to inaccuracy), a list has shape (2,). + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['attr_label'\]: The attr label of instance. We maintain a default attribute collection and mapping for attribute classification. + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['bbox_3d'\]: List of 7 numbers representing the 3D bounding box of the instance, in (x, y, z, l, h, w, yaw) order. + +Note: + +1. The differences between `bbox_3d` in `instances` and that in `cam_instances`. + Both `bbox_3d` have been converted to MMDet3D coordinate system, but `bboxes_3d` in `instances` is in LiDAR coordinate format and `bboxes_3d` in `cam_instances` is in Camera coordinate format. Mind the difference between them in 3D Box representation ('l, w, h' and 'l, h, w'). + +2. Here we only explain the data recorded in the training info files. The same applies to validation and testing set (the `.pkl` file of test set does not contains `instances` and `cam_instances`). + +The core function to get `nuscenes_infos_xxx.pkl` is [\_fill_trainval_infos](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/nuscenes_converter.py#L146). +Please refer to [nuscenes_converter.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/nuscenes_converter.py) for more details. + +## Training pipeline + +### LiDAR-Based Methods + +A typical training pipeline of LiDAR-based 3D detection (including multi-modality methods) on nuScenes is as below. + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +``` + +Compared to general cases, nuScenes has a specific `'LoadPointsFromMultiSweeps'` pipeline to load point clouds from consecutive frames. This is a common practice used in this setting. +Please refer to the nuScenes [original paper](https://arxiv.org/abs/1903.11027) for more details. +The default `use_dim` in `'LoadPointsFromMultiSweeps'` is `[0, 1, 2, 4]`, where the first 3 dimensions refer to point coordinates and the last refers to timestamp differences. +Intensity is not used by default due to its yielded noise when concatenating the points from different frames. + +### Vision-Based Methods + +A typical training pipeline of image-based 3D detection on nuScenes is as below. + +```python +train_pipeline = [ + dict(type='LoadImageFromFileMono3D'), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=True, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='mmdet.Resize', scale=(1600, 900), keep_ratio=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'attr_labels', 'gt_bboxes_3d', + 'gt_labels_3d', 'centers_2d', 'depths' + ]), +] +``` + +It follows the general pipeline of 2D detection while differs in some details: + +- It uses monocular pipelines to load images, which includes additional required information like camera intrinsics. +- It needs to load 3D annotations. +- Some data augmentation techniques need to be adjusted, such as `RandomFlip3D`. + Currently we do not support more augmentation methods, because how to transfer and apply other techniques is still under explored. + +## Evaluation + +An example to evaluate PointPillars with 8 GPUs with nuScenes metrics is as follows. + +```shell +bash ./tools/dist_test.sh configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py checkpoints/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20200620_230405-2fa62f3d.pth 8 +``` + +## Metrics + +NuScenes proposes a comprehensive metric, namely nuScenes detection score (NDS), to evaluate different methods and set up the benchmark. +It consists of mean Average Precision (mAP), Average Translation Error (ATE), Average Scale Error (ASE), Average Orientation Error (AOE), Average Velocity Error (AVE) and Average Attribute Error (AAE). +Please refer to its [official website](https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Any) for more details. + +We also adopt this approach for evaluation on nuScenes. An example of printed evaluation results is as follows: + +``` +mAP: 0.3197 +mATE: 0.7595 +mASE: 0.2700 +mAOE: 0.4918 +mAVE: 1.3307 +mAAE: 0.1724 +NDS: 0.3905 +Eval time: 170.8s + +Per-class results: +Object Class AP ATE ASE AOE AVE AAE +car 0.503 0.577 0.152 0.111 2.096 0.136 +truck 0.223 0.857 0.224 0.220 1.389 0.179 +bus 0.294 0.855 0.204 0.190 2.689 0.283 +trailer 0.081 1.094 0.243 0.553 0.742 0.167 +construction_vehicle 0.058 1.017 0.450 1.019 0.137 0.341 +pedestrian 0.392 0.687 0.284 0.694 0.876 0.158 +motorcycle 0.317 0.737 0.265 0.580 2.033 0.104 +bicycle 0.308 0.704 0.299 0.892 0.683 0.010 +traffic_cone 0.555 0.486 0.309 nan nan nan +barrier 0.466 0.581 0.269 0.169 nan nan +``` + +## Testing and make a submission + +An example to test PointPillars on nuScenes with 8 GPUs and generate a submission to the leaderboard is as follows. + +You should modify the `jsonfile_prefix` in the `test_evaluator` of corresponding configuration. For example, adding `test_evaluator = dict(type='NuScenesMetric', jsonfile_prefix='work_dirs/pp-nus/results_eval.json')` or using `--cfg-options "test_evaluator.jsonfile_prefix=work_dirs/pp-nus/results_eval.json)` after the test command. + +```shell +./tools/dist_test.sh configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py work_dirs/pp-nus/latest.pth 8 --cfg-options 'test_evaluator.jsonfile_prefix=work_dirs/pp-nus/results_eval' +``` + +Note that the testing info should be changed to that for testing set instead of validation set [here](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/datasets/nus-3d.py#L132). + +After generating the `work_dirs/pp-nus/results_eval.json`, you can compress it and submit it to nuScenes benchmark. Please refer to the [nuScenes official website](https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Any) for more information. + +We can also visualize the prediction results with our developed visualization tools. Please refer to the [visualization doc](https://mmdetection3d.readthedocs.io/en/latest/useful_tools.html#visualization) for more details. + +## Notes + +### Transformation between `NuScenesBox` and our `CameraInstanceBoxes`. + +In general, the main difference of `NuScenesBox` and our `CameraInstanceBoxes` is mainly reflected in the yaw definition. `NuScenesBox` defines the rotation with a quaternion or three Euler angles while ours only defines one yaw angle due to the practical scenario. It requires us to add some additional rotations manually in the pre-processing and post-processing, such as [here](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L673). + +In addition, please note that the definition of corners and locations are detached in the `NuScenesBox`. For example, in monocular 3D detection, the definition of the box location is in its camera coordinate (see its official [illustration](https://www.nuscenes.org/nuscenes#data-collection) for car setup), which is consistent with [ours](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/bbox/structures/cam_box3d.py). In contrast, its corners are defined with the [convention](https://github.com/nutonomy/nuscenes-devkit/blob/02e9200218977193a1058dd7234f935834378319/python-sdk/nuscenes/utils/data_classes.py#L527) "x points forward, y to the left, z up". It results in different philosophy of dimension and rotation definitions from our `CameraInstanceBoxes`. An example to remove similar hacks is PR [#744](https://github.com/open-mmlab/mmdetection3d/pull/744). The same problem also exists in the LiDAR system. To deal with them, we typically add some transformation in the pre-processing and post-processing to guarantee the box will be in our coordinate system during the entire training and inference procedure. diff --git a/docs/en/advanced_guides/datasets/s3dis_sem_seg.md b/docs/en/advanced_guides/datasets/s3dis_sem_seg.md new file mode 100755 index 0000000..23331a0 --- /dev/null +++ b/docs/en/advanced_guides/datasets/s3dis_sem_seg.md @@ -0,0 +1,262 @@ +# S3DIS for 3D Semantic Segmentation + +## Dataset preparation + +For the overall process, please refer to the [README](https://github.com/open-mmlab/mmdetection3d/blob/master/data/s3dis/README.md/) page for S3DIS. + +### Export S3DIS data + +By exporting S3DIS data, we load the raw point cloud data and generate the relevant annotations including semantic labels and instance labels. + +The directory structure before exporting should be as below: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── s3dis +│ │ ├── meta_data +│ │ ├── Stanford3dDataset_v1.2_Aligned_Version +│ │ │ ├── Area_1 +│ │ │ │ ├── conferenceRoom_1 +│ │ │ │ ├── office_1 +│ │ │ │ ├── ... +│ │ │ ├── Area_2 +│ │ │ ├── Area_3 +│ │ │ ├── Area_4 +│ │ │ ├── Area_5 +│ │ │ ├── Area_6 +│ │ ├── indoor3d_util.py +│ │ ├── collect_indoor3d_data.py +│ │ ├── README.md +``` + +Under folder `Stanford3dDataset_v1.2_Aligned_Version`, the rooms are spilted into 6 areas. We use 5 areas for training and 1 for evaluation (typically `Area_5`). Under the directory of each area, there are folders in which raw point cloud data and relevant annotations are saved. For instance, under folder `Area_1/office_1` the files are as below: + +- `office_1.txt`: A txt file storing coordinates and colors of each point in the raw point cloud data. + +- `Annotations/`: This folder contains txt files for different object instances. Each txt file represents one instance, e.g. + + - `chair_1.txt`: A txt file storing raw point cloud data of one chair in this room. + + If we concat all the txt files under `Annotations/`, we will get the same point cloud as denoted by `office_1.txt`. + +Export S3DIS data by running `python collect_indoor3d_data.py`. The main steps include: + +- Export original txt files to point cloud, instance label and semantic label. +- Save point cloud data and relevant annotation files. + +And the core function `export` in `indoor3d_util.py` is as follows: + +```python +def export(anno_path, out_filename): + """Convert original dataset files to points, instance mask and semantic + mask files. We aggregated all the points from each instance in the room. + + Args: + anno_path (str): path to annotations. e.g. Area_1/office_2/Annotations/ + out_filename (str): path to save collected points and labels. + file_format (str): txt or numpy, determines what file format to save. + + Note: + the points are shifted before save, the most negative point is now + at origin. + """ + points_list = [] + ins_idx = 1 # instance ids should be indexed from 1, so 0 is unannotated + + # an example of `anno_path`: Area_1/office_1/Annotations + # which contains all object instances in this room as txt files + for f in glob.glob(osp.join(anno_path, '*.txt')): + # get class name of this instance + one_class = osp.basename(f).split('_')[0] + if one_class not in class_names: # some rooms have 'staris' class + one_class = 'clutter' + points = np.loadtxt(f) + labels = np.ones((points.shape[0], 1)) * class2label[one_class] + ins_labels = np.ones((points.shape[0], 1)) * ins_idx + ins_idx += 1 + points_list.append(np.concatenate([points, labels, ins_labels], 1)) + + data_label = np.concatenate(points_list, 0) # [N, 8], (pts, rgb, sem, ins) + # align point cloud to the origin + xyz_min = np.amin(data_label, axis=0)[0:3] + data_label[:, 0:3] -= xyz_min + + np.save(f'{out_filename}_point.npy', data_label[:, :6].astype(np.float32)) + np.save(f'{out_filename}_sem_label.npy', data_label[:, 6].astype(np.int64)) + np.save(f'{out_filename}_ins_label.npy', data_label[:, 7].astype(np.int64)) + +``` + +where we load and concatenate all the point cloud instances under `Annotations/` to form raw point cloud and generate semantic/instance labels. After exporting each room, the point cloud data, semantic labels and instance labels should be saved in `.npy` files. + +### Create dataset + +```shell +python tools/create_data.py s3dis --root-path ./data/s3dis \ +--out-dir ./data/s3dis --extra-tag s3dis +``` + +The above exported point cloud files, semantic label files and instance label files are further saved in `.bin` format. Meanwhile `.pkl` info files are also generated for each area. + +The directory structure after process should be as below: + +``` +s3dis +├── meta_data +├── indoor3d_util.py +├── collect_indoor3d_data.py +├── README.md +├── Stanford3dDataset_v1.2_Aligned_Version +├── s3dis_data +├── points +│ ├── xxxxx.bin +├── instance_mask +│ ├── xxxxx.bin +├── semantic_mask +│ ├── xxxxx.bin +├── seg_info +│ ├── Area_1_label_weight.npy +│ ├── Area_1_resampled_scene_idxs.npy +│ ├── Area_2_label_weight.npy +│ ├── Area_2_resampled_scene_idxs.npy +│ ├── Area_3_label_weight.npy +│ ├── Area_3_resampled_scene_idxs.npy +│ ├── Area_4_label_weight.npy +│ ├── Area_4_resampled_scene_idxs.npy +│ ├── Area_5_label_weight.npy +│ ├── Area_5_resampled_scene_idxs.npy +│ ├── Area_6_label_weight.npy +│ ├── Area_6_resampled_scene_idxs.npy +├── s3dis_infos_Area_1.pkl +├── s3dis_infos_Area_2.pkl +├── s3dis_infos_Area_3.pkl +├── s3dis_infos_Area_4.pkl +├── s3dis_infos_Area_5.pkl +├── s3dis_infos_Area_6.pkl +``` + +- `points/xxxxx.bin`: The exported point cloud data. +- `instance_mask/xxxxx.bin`: The instance label for each point, value range: \[0, ${NUM_INSTANCES}\], 0: unannotated. +- `semantic_mask/xxxxx.bin`: The semantic label for each point, value range: \[0, 12\]. +- `s3dis_infos_Area_1.pkl`: Area 1 data infos, the detailed info of each room is as follows: + - info\['point_cloud'\]: {'num_features': 6, 'lidar_idx': sample_idx}. + - info\['pts_path'\]: The path of `points/xxxxx.bin`. + - info\['pts_instance_mask_path'\]: The path of `instance_mask/xxxxx.bin`. + - info\['pts_semantic_mask_path'\]: The path of `semantic_mask/xxxxx.bin`. +- `seg_info`: The generated infos to support semantic segmentation model training. + - `Area_1_label_weight.npy`: Weighting factor for each semantic class. Since the number of points in different classes varies greatly, it's a common practice to use label re-weighting to get a better performance. + - `Area_1_resampled_scene_idxs.npy`: Re-sampling index for each scene. Different rooms will be sampled multiple times according to their number of points to balance training data. + +## Training pipeline + +A typical training pipeline of S3DIS for 3D semantic segmentation is as below. + +```python +class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', + 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter') +num_points = 4096 +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.0, + ignore_index=None, + use_normalized_coord=True, + enlarge_size=None, + min_unique_num=num_points // 4, + eps=0.0), + dict(type='NormalizePointsColor', color_mean=None), + dict( + type='GlobalRotScaleTrans', + rot_range=[-3.141592653589793, 3.141592653589793], # [-pi, pi] + scale_ratio_range=[0.8, 1.2], + translation_std=[0, 0, 0]), + dict( + type='RandomJitterPoints', + jitter_std=[0.01, 0.01, 0.01], + clip_range=[-0.05, 0.05]), + dict(type='RandomDropPointsColor', drop_ratio=0.2), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] +``` + +- `PointSegClassMapping`: Only the valid category ids will be mapped to class label ids like \[0, 13) during training. Other class ids will be converted to `ignore_index` which equals to `13`. +- `IndoorPatchPointSample`: Crop a patch containing a fixed number of points from input point cloud. `block_size` indicates the size of the cropped block, typically `1.0` for S3DIS. +- `NormalizePointsColor`: Normalize the RGB color values of input point cloud by dividing `255`. +- Data augmentation: + - `GlobalRotScaleTrans`: randomly rotate and scale input point cloud. + - `RandomJitterPoints`: randomly jitter point cloud by adding different noise vector to each point. + - `RandomDropPointsColor`: set the colors of point cloud to all zeros by a probability `drop_ratio`. + +## Metrics + +Typically mean intersection over union (mIoU) is used for evaluation on S3DIS. In detail, we first compute IoU for multiple classes and then average them to get mIoU, please refer to [seg_eval.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/evaluation/functional/seg_eval.py). + +As introduced in section `Export S3DIS data`, S3DIS trains on 5 areas and evaluates on the remaining 1 area. But there are also other area split schemes in different papers. +To enable flexible combination of train-val splits, we use sub-dataset to represent one area, and concatenate them to form a larger training set. An example of training on area 1, 2, 3, 4, 6 and evaluating on area 5 is shown as below: + +```python +dataset_type = 'S3DISSegDataset' +data_root = './data/s3dis/' +class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', + 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter') +train_area = [1, 2, 3, 4, 6] +test_area = 5 +train_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area], + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=train_pipeline, + modality=input_modality, + ignore_index=len(class_names), + scene_idxs=[ + f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area + ], + test_mode=False)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_files=f's3dis_infos_Area_{test_area}.pkl', + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=test_pipeline, + modality=input_modality, + ignore_index=len(class_names), + scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy', + test_mode=True)) +val_dataloader = test_dataloader +``` + +where we specify the areas used for training/validation by setting `ann_files` and `scene_idxs` with lists that include corresponding paths. The train-val split can be simply modified via changing the `train_area` and `test_area` variables. diff --git a/docs/en/advanced_guides/datasets/scannet_det.md b/docs/en/advanced_guides/datasets/scannet_det.md new file mode 100755 index 0000000..959f6ef --- /dev/null +++ b/docs/en/advanced_guides/datasets/scannet_det.md @@ -0,0 +1,292 @@ +# ScanNet for 3D Object Detection + +## Dataset preparation + +For the overall process, please refer to the [README](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/data/scannet/README.md) page for ScanNet. + +### Export ScanNet point cloud data + +By exporting ScanNet data, we load the raw point cloud data and generate the relevant annotations including semantic labels, instance labels and ground truth bounding boxes. + +```shell +python batch_load_scannet_data.py +``` + +The directory structure before data preparation should be as below + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── scannet +│ │ ├── meta_data +│ │ ├── scans +│ │ │ ├── scenexxxx_xx +│ │ ├── batch_load_scannet_data.py +│ │ ├── load_scannet_data.py +│ │ ├── scannet_utils.py +│ │ ├── README.md +``` + +Under folder `scans` there are overall 1201 train and 312 validation folders in which raw point cloud data and relevant annotations are saved. For instance, under folder `scene0001_01` the files are as below: + +- `scene0001_01_vh_clean_2.ply`: Mesh file storing coordinates and colors of each vertex. The mesh's vertices are taken as raw point cloud data. +- `scene0001_01.aggregation.json`: Aggregation file including object ID, segments ID and label. +- `scene0001_01_vh_clean_2.0.010000.segs.json`: Segmentation file including segments ID and vertex. +- `scene0001_01.txt`: Meta file including axis-aligned matrix, etc. +- `scene0001_01_vh_clean_2.labels.ply`: Annotation file containing the category of each vertex. + +Export ScanNet data by running `python batch_load_scannet_data.py`. The main steps include: + +- Export original files to point cloud, instance label, semantic label and bounding box file. +- Downsample raw point cloud and filter invalid classes. +- Save point cloud data and relevant annotation files. + +And the core function `export` in `load_scannet_data.py` is as follows: + +```python +def export(mesh_file, + agg_file, + seg_file, + meta_file, + label_map_file, + output_file=None, + test_mode=False): + + # label map file: ./data/scannet/meta_data/scannetv2-labels.combined.tsv + # the various label standards in the label map file, e.g. 'nyu40id' + label_map = scannet_utils.read_label_mapping( + label_map_file, label_from='raw_category', label_to='nyu40id') + # load raw point cloud data, 6-dims feature: XYZRGB + mesh_vertices = scannet_utils.read_mesh_vertices_rgb(mesh_file) + + # Load scene axis alignment matrix: a 4x4 transformation matrix + # transform raw points in sensor coordinate system to a coordinate system + # which is axis-aligned with the length/width of the room + lines = open(meta_file).readlines() + # test set data doesn't have align_matrix + axis_align_matrix = np.eye(4) + for line in lines: + if 'axisAlignment' in line: + axis_align_matrix = [ + float(x) + for x in line.rstrip().strip('axisAlignment = ').split(' ') + ] + break + axis_align_matrix = np.array(axis_align_matrix).reshape((4, 4)) + + # perform global alignment of mesh vertices + pts = np.ones((mesh_vertices.shape[0], 4)) + # raw point cloud in homogeneous coordinates, each row: [x, y, z, 1] + pts[:, 0:3] = mesh_vertices[:, 0:3] + # transform raw mesh vertices to aligned mesh vertices + pts = np.dot(pts, axis_align_matrix.transpose()) # Nx4 + aligned_mesh_vertices = np.concatenate([pts[:, 0:3], mesh_vertices[:, 3:]], + axis=1) + + # Load semantic and instance labels + if not test_mode: + # each object has one semantic label and consists of several segments + object_id_to_segs, label_to_segs = read_aggregation(agg_file) + # many points may belong to the same segment + seg_to_verts, num_verts = read_segmentation(seg_file) + label_ids = np.zeros(shape=(num_verts), dtype=np.uint32) + object_id_to_label_id = {} + for label, segs in label_to_segs.items(): + label_id = label_map[label] + for seg in segs: + verts = seg_to_verts[seg] + # each point has one semantic label + label_ids[verts] = label_id + instance_ids = np.zeros( + shape=(num_verts), dtype=np.uint32) # 0: unannotated + for object_id, segs in object_id_to_segs.items(): + for seg in segs: + verts = seg_to_verts[seg] + # object_id is 1-indexed, i.e. 1,2,3,.,,,.NUM_INSTANCES + # each point belongs to one object + instance_ids[verts] = object_id + if object_id not in object_id_to_label_id: + object_id_to_label_id[object_id] = label_ids[verts][0] + # bbox format is [x, y, z, x_size, y_size, z_size, label_id] + # [x, y, z] is gravity center of bbox, [x_size, y_size, z_size] is axis-aligned + # [label_id] is semantic label id in 'nyu40id' standard + # Note: since 3D bbox is axis-aligned, the yaw is 0. + unaligned_bboxes = extract_bbox(mesh_vertices, object_id_to_segs, + object_id_to_label_id, instance_ids) + aligned_bboxes = extract_bbox(aligned_mesh_vertices, object_id_to_segs, + object_id_to_label_id, instance_ids) + ... + + return mesh_vertices, label_ids, instance_ids, unaligned_bboxes, \ + aligned_bboxes, object_id_to_label_id, axis_align_matrix + +``` + +After exporting each scan, the raw point cloud could be downsampled, e.g. to 50000, if the number of points is too large (the raw point cloud won't be downsampled if it's also used in 3D semantic segmentation task). In addition, invalid semantic labels outside of `nyu40id` standard or optional `DONOT CARE` classes should be filtered. Finally, the point cloud data, semantic labels, instance labels and ground truth bounding boxes should be saved in `.npy` files. + +### Export ScanNet RGB data (optional) + +By exporting ScanNet RGB data, for each scene we load a set of RGB images with corresponding 4x4 pose matrices, and a single 4x4 camera intrinsic matrix. Note, that this step is optional and can be skipped if multi-view detection is not planned to use. + +```shell +python extract_posed_images.py +``` + +Each of 1201 train, 312 validation and 100 test scenes contains a single `.sens` file. For instance, for scene `0001_01` we have `data/scannet/scans/scene0001_01/0001_01.sens`. For this scene all images and poses are extracted to `data/scannet/posed_images/scene0001_01`. Specifically, there will be 300 image files xxxxx.jpg, 300 camera pose files xxxxx.txt and a single `intrinsic.txt` file. Typically, single scene contains several thousand images. By default, we extract only 300 of them with resulting space occupation of \<100 Gb. To extract more images, use `--max-images-per-scene` parameter. + +### Create dataset + +```shell +python tools/create_data.py scannet --root-path ./data/scannet \ +--out-dir ./data/scannet --extra-tag scannet +``` + +The above exported point cloud file, semantic label file and instance label file are further saved in `.bin` format. Meanwhile `.pkl` info files are also generated for train or validation. The core function `process_single_scene` of getting data infos is as follows. + +```python +def process_single_scene(sample_idx): + + # save point cloud, instance label and semantic label in .bin file respectively, get info['pts_path'], info['pts_instance_mask_path'] and info['pts_semantic_mask_path'] + ... + + # get annotations + if has_label: + annotations = {} + # box is of shape [k, 6 + class] + aligned_box_label = self.get_aligned_box_label(sample_idx) + unaligned_box_label = self.get_unaligned_box_label(sample_idx) + annotations['gt_num'] = aligned_box_label.shape[0] + if annotations['gt_num'] != 0: + aligned_box = aligned_box_label[:, :-1] # k, 6 + unaligned_box = unaligned_box_label[:, :-1] + classes = aligned_box_label[:, -1] # k + annotations['name'] = np.array([ + self.label2cat[self.cat_ids2class[classes[i]]] + for i in range(annotations['gt_num']) + ]) + # default names are given to aligned bbox for compatibility + # we also save unaligned bbox info with marked names + annotations['location'] = aligned_box[:, :3] + annotations['dimensions'] = aligned_box[:, 3:6] + annotations['gt_boxes_upright_depth'] = aligned_box + annotations['unaligned_location'] = unaligned_box[:, :3] + annotations['unaligned_dimensions'] = unaligned_box[:, 3:6] + annotations[ + 'unaligned_gt_boxes_upright_depth'] = unaligned_box + annotations['index'] = np.arange( + annotations['gt_num'], dtype=np.int32) + annotations['class'] = np.array([ + self.cat_ids2class[classes[i]] + for i in range(annotations['gt_num']) + ]) + axis_align_matrix = self.get_axis_align_matrix(sample_idx) + annotations['axis_align_matrix'] = axis_align_matrix # 4x4 + info['annos'] = annotations + return info +``` + +The directory structure after process should be as below: + +``` +scannet +├── meta_data +├── batch_load_scannet_data.py +├── load_scannet_data.py +├── scannet_utils.py +├── README.md +├── scans +├── scans_test +├── scannet_instance_data +├── points +│ ├── xxxxx.bin +├── instance_mask +│ ├── xxxxx.bin +├── semantic_mask +│ ├── xxxxx.bin +├── seg_info +│ ├── train_label_weight.npy +│ ├── train_resampled_scene_idxs.npy +│ ├── val_label_weight.npy +│ ├── val_resampled_scene_idxs.npy +├── posed_images +│ ├── scenexxxx_xx +│ │ ├── xxxxxx.txt +│ │ ├── xxxxxx.jpg +│ │ ├── intrinsic.txt +├── scannet_infos_train.pkl +├── scannet_infos_val.pkl +├── scannet_infos_test.pkl +``` + +- `points/xxxxx.bin`: The `axis-unaligned` point cloud data after downsample. Since ScanNet 3D detection task takes axis-aligned point clouds as input, while ScanNet 3D semantic segmentation task takes unaligned points, we choose to store unaligned points and their axis-align transform matrix. Note: the points would be axis-aligned in pre-processing pipeline [`GlobalAlignment`](https://github.com/open-mmlab/mmdetection3d/blob/9f0b01caf6aefed861ef4c3eb197c09362d26b32/mmdet3d/datasets/pipelines/transforms_3d.py#L423) of 3D detection task. +- `instance_mask/xxxxx.bin`: The instance label for each point, value range: \[0, NUM_INSTANCES\], 0: unannotated. +- `semantic_mask/xxxxx.bin`: The semantic label for each point, value range: \[1, 40\], i.e. `nyu40id` standard. Note: the `nyu40id` ID will be mapped to train ID in train pipeline `PointSegClassMapping`. +- `posed_images/scenexxxx_xx`: The set of `.jpg` images with `.txt` 4x4 poses and the single `.txt` file with camera intrinsic matrix. +- `scannet_infos_train.pkl`: The train data infos, the detailed info of each scan is as follows: + - info\['lidar_points'\]: A dict containing all information related to the lidar points. + - info\['lidar_points'\]\['lidar_path'\]: The filename of the lidar point cloud data. + - info\['lidar_points'\]\['num_pts_feats'\]: The feature dimension of point. + - info\['lidar_points'\]\['axis_align_matrix'\]: The transformation matrix to align the axis. + - info\['pts_semantic_mask_path'\]: The filename of the semantic mask annotation. + - info\['pts_instance_mask_path'\]: The filename of the instance mask annotation. + - info\['instances'\]: A list of dict contains all annotations, each dict contains all annotation information of single instance. For the i-th instance: + - info\['instances'\]\[i\]\['bbox_3d'\]: List of 6 numbers representing the axis-aligned 3D bounding box of the instance in depth coordinate system, in (x, y, z, l, w, h) order. + - info\['instances'\]\[i\]\['bbox_label_3d'\]: The label of each 3d bounding boxes. +- `scannet_infos_val.pkl`: The val data infos, which shares the same format as `scannet_infos_train.pkl`. +- `scannet_infos_test.pkl`: The test data infos, which almost shares the same format as `scannet_infos_train.pkl` except for the lack of annotation. + +## Training pipeline + +A typical training pipeline of ScanNet for 3D detection is as follows. + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True), + dict(type='GlobalAlignment', rotation_axis=2), + dict(type='PointSegClassMapping'), + dict(type='PointSample', num_points=40000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0], + shift_height=True), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', + 'pts_instance_mask' + ]) +] +``` + +- `GlobalAlignment`: The previous point cloud would be axis-aligned using the axis-aligned matrix. +- `PointSegClassMapping`: Only the valid category IDs will be mapped to class label IDs like \[0, 18) during training. +- Data augmentation: + - `PointSample`: downsample the input point cloud. + - `RandomFlip3D`: randomly flip the input point cloud horizontally or vertically. + - `GlobalRotScaleTrans`: rotate the input point cloud, usually in the range of \[-5, 5\] (degrees) for ScanNet; then scale the input point cloud, usually by 1.0 for ScanNet (which means no scaling); finally translate the input point cloud, usually by 0 for ScanNet (which means no translation). + +## Metrics + +Typically mean Average Precision (mAP) is used for evaluation on ScanNet, e.g. `mAP@0.25` and `mAP@0.5`. In detail, a generic function to compute precision and recall for 3D object detection for multiple classes is called. Please refer to [indoor_eval](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/evaluation/functional/indoor_eval.py) for more details. + +As introduced in section `Export ScanNet data`, all ground truth 3D bounding box are axis-aligned, i.e. the yaw is zero. So the yaw target of network predicted 3D bounding box is also zero and axis-aligned 3D Non-Maximum Suppression (NMS), which is regardless of rotation, is adopted during post-processing . diff --git a/docs/en/advanced_guides/datasets/scannet_sem_seg.md b/docs/en/advanced_guides/datasets/scannet_sem_seg.md new file mode 100755 index 0000000..b56a114 --- /dev/null +++ b/docs/en/advanced_guides/datasets/scannet_sem_seg.md @@ -0,0 +1,128 @@ +# ScanNet for 3D Semantic Segmentation + +## Dataset preparation + +The overall process is similar to ScanNet 3D detection task. Please refer to this [section](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docs/en/advanced_guides/datasets/scannet_det.md#dataset-preparation). Only a few differences and additional information about the 3D semantic segmentation data will be listed below. + +### Export ScanNet data + +Since ScanNet provides online benchmark for 3D semantic segmentation evaluation on the test set, we need to also download the test scans and put it under `scannet` folder. + +The directory structure before data preparation should be as below: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── scannet +│ │ ├── meta_data +│ │ ├── scans +│ │ │ ├── scenexxxx_xx +│ │ ├── scans_test +│ │ │ ├── scenexxxx_xx +│ │ ├── batch_load_scannet_data.py +│ │ ├── load_scannet_data.py +│ │ ├── scannet_utils.py +│ │ ├── README.md +``` + +Under folder `scans_test` there are 100 test folders in which only raw point cloud data and its meta file are saved. For instance, under folder `scene0707_00` the files are as below: + +- `scene0707_00_vh_clean_2.ply`: Mesh file storing coordinates and colors of each vertex. The mesh's vertices are taken as raw point cloud data. +- `scene0707_00.txt`: Meta file including sensor parameters, etc. Note: different from data under `scans`, axis-aligned matrix is not provided for test scans. + +Export ScanNet data by running `python batch_load_scannet_data.py`. Note: only point cloud data will be saved for test set scans because no annotations are provided. + +### Create dataset + +Similar to the 3D detection task, we create dataset by running `python tools/create_data.py scannet --root-path ./data/scannet --out-dir ./data/scannet --extra-tag scannet`. +The directory structure after processing should be as below: + +``` +scannet +├── scannet_utils.py +├── batch_load_scannet_data.py +├── load_scannet_data.py +├── scannet_utils.py +├── README.md +├── scans +├── scans_test +├── scannet_instance_data +├── points +│ ├── xxxxx.bin +├── instance_mask +│ ├── xxxxx.bin +├── semantic_mask +│ ├── xxxxx.bin +├── seg_info +│ ├── train_label_weight.npy +│ ├── train_resampled_scene_idxs.npy +│ ├── val_label_weight.npy +│ ├── val_resampled_scene_idxs.npy +├── scannet_infos_train.pkl +├── scannet_infos_val.pkl +├── scannet_infos_test.pkl +``` + +- `seg_info`: The generated infos to support semantic segmentation model training. + - `train_label_weight.npy`: Weighting factor for each semantic class. Since the number of points in different classes varies greatly, it's a common practice to use label re-weighting to get a better performance. + - `train_resampled_scene_idxs.npy`: Re-sampling index for each scene. Different rooms will be sampled multiple times according to their number of points to balance training data. + +## Training pipeline + +A typical training pipeline of ScanNet for 3D semantic segmentation is as below: + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.5, + ignore_index=len(class_names), + use_normalized_coord=False, + enlarge_size=0.2, + min_unique_num=None), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] +``` + +- `PointSegClassMapping`: Only the valid category ids will be mapped to class label ids like \[0, 20) during training. Other class ids will be converted to `ignore_index` which equals to `20`. +- `IndoorPatchPointSample`: Crop a patch containing a fixed number of points from input point cloud. `block_size` indicates the size of the cropped block, typically `1.5` for ScanNet. +- `NormalizePointsColor`: Normalize the RGB color values of input point cloud by dividing `255`. + +## Metrics + +Typically mean Intersection over Union (mIoU) is used for evaluation on ScanNet. In detail, we first compute IoU for multiple classes and then average them to get mIoU, please refer to [seg_eval](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/evaluation/functional/seg_eval.py). + +## Testing and Making a Submission + +By default, our codebase evaluates semantic segmentation results on the validation set. +If you would like to test the model performance on the online benchmark, add `--format-only` flag in the evaluation script and change `ann_file=data_root + 'scannet_infos_val.pkl'` to `ann_file=data_root + 'scannet_infos_test.pkl'` in the ScanNet dataset's [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/datasets/scannet_seg-3d-20class.py#L126). Remember to specify the `txt_prefix` as the directory to save the testing results. + +Taking PointNet++ (SSG) on ScanNet for example, the following command can be used to do inference on test set: + +``` +./tools/dist_test.sh configs/pointnet2/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class.py \ + work_dirs/pointnet2_ssg/latest.pth --format-only \ + --eval-options txt_prefix=work_dirs/pointnet2_ssg/test_submission +``` + +After generating the results, you can basically compress the folder and upload to the [ScanNet evaluation server](http://kaldir.vc.in.tum.de/scannet_benchmark/semantic_label_3d). diff --git a/docs/en/advanced_guides/datasets/sunrgbd_det.md b/docs/en/advanced_guides/datasets/sunrgbd_det.md new file mode 100755 index 0000000..9103fb4 --- /dev/null +++ b/docs/en/advanced_guides/datasets/sunrgbd_det.md @@ -0,0 +1,250 @@ +# SUN RGB-D for 3D Object Detection + +## Dataset preparation + +For the overall process, please refer to the [README](https://github.com/open-mmlab/mmdetection3d/blob/master/data/sunrgbd/README.md) page for SUN RGB-D. + +### Download SUN RGB-D data and toolbox + +Download SUNRGBD data [HERE](http://rgbd.cs.princeton.edu/data/). Then, move `SUNRGBD.zip`, `SUNRGBDMeta2DBB_v2.mat`, `SUNRGBDMeta3DBB_v2.mat` and `SUNRGBDtoolbox.zip` to the `OFFICIAL_SUNRGBD` folder, unzip the zip files. + +The directory structure before data preparation should be as below: + +``` +sunrgbd +├── README.md +├── matlab +│ ├── extract_rgbd_data_v1.m +│ ├── extract_rgbd_data_v2.m +│ ├── extract_split.m +├── OFFICIAL_SUNRGBD +│ ├── SUNRGBD +│ ├── SUNRGBDMeta2DBB_v2.mat +│ ├── SUNRGBDMeta3DBB_v2.mat +│ ├── SUNRGBDtoolbox +``` + +### Extract data and annotations for 3D detection from raw data + +Extract SUN RGB-D annotation data from raw annotation data by running (this requires MATLAB installed on your machine): + +```bash +matlab -nosplash -nodesktop -r 'extract_split;quit;' +matlab -nosplash -nodesktop -r 'extract_rgbd_data_v2;quit;' +matlab -nosplash -nodesktop -r 'extract_rgbd_data_v1;quit;' +``` + +The main steps include: + +- Extract train and val split. +- Extract data for 3D detection from raw data. +- Extract and format detection annotation from raw data. + +The main component of `extract_rgbd_data_v2.m` which extracts point cloud data from depth map is as follows: + +```matlab +data = SUNRGBDMeta(imageId); +data.depthpath(1:16) = ''; +data.depthpath = strcat('../OFFICIAL_SUNRGBD', data.depthpath); +data.rgbpath(1:16) = ''; +data.rgbpath = strcat('../OFFICIAL_SUNRGBD', data.rgbpath); + +% extract point cloud from depth map +[rgb,points3d,depthInpaint,imsize]=read3dPoints(data); +rgb(isnan(points3d(:,1)),:) = []; +points3d(isnan(points3d(:,1)),:) = []; +points3d_rgb = [points3d, rgb]; + +% MAT files are 3x smaller than TXT files. In Python we can use +% scipy.io.loadmat('xxx.mat')['points3d_rgb'] to load the data. +mat_filename = strcat(num2str(imageId,'%06d'), '.mat'); +txt_filename = strcat(num2str(imageId,'%06d'), '.txt'); +% save point cloud data +parsave(strcat(depth_folder, mat_filename), points3d_rgb); +``` + +The main component of `extract_rgbd_data_v1.m` which extracts annotation is as follows: + +```matlab +% Write 2D and 3D box label +data2d = data; +fid = fopen(strcat(det_label_folder, txt_filename), 'w'); +for j = 1:length(data.groundtruth3DBB) + centroid = data.groundtruth3DBB(j).centroid; % 3D bbox center + classname = data.groundtruth3DBB(j).classname; % class name + orientation = data.groundtruth3DBB(j).orientation; % 3D bbox orientation + coeffs = abs(data.groundtruth3DBB(j).coeffs); % 3D bbox size + box2d = data2d.groundtruth2DBB(j).gtBb2D; % 2D bbox + fprintf(fid, '%s %d %d %d %d %f %f %f %f %f %f %f %f\n', classname, box2d(1), box2d(2), box2d(3), box2d(4), centroid(1), centroid(2), centroid(3), coeffs(1), coeffs(2), coeffs(3), orientation(1), orientation(2)); +end +fclose(fid); +``` + +The above two scripts call functions such as `read3dPoints` from the [toolbox](https://rgbd.cs.princeton.edu/data/SUNRGBDtoolbox.zip) provided by SUN RGB-D. + +The directory structure after extraction should be as follows. + +``` +sunrgbd +├── README.md +├── matlab +│ ├── extract_rgbd_data_v1.m +│ ├── extract_rgbd_data_v2.m +│ ├── extract_split.m +├── OFFICIAL_SUNRGBD +│ ├── SUNRGBD +│ ├── SUNRGBDMeta2DBB_v2.mat +│ ├── SUNRGBDMeta3DBB_v2.mat +│ ├── SUNRGBDtoolbox +├── sunrgbd_trainval +│ ├── calib +│ ├── depth +│ ├── image +│ ├── label +│ ├── label_v1 +│ ├── seg_label +│ ├── train_data_idx.txt +│ ├── val_data_idx.txt +``` + +Under each following folder there are overall 5285 train files and 5050 val files: + +- `calib`: Camera calibration information in `.txt` +- `depth`: Point cloud saved in `.mat` (xyz+rgb) +- `image`: Image data in `.jpg` +- `label`: Detection annotation data in `.txt` (version 2) +- `label_v1`: Detection annotation data in `.txt` (version 1) +- `seg_label`: Segmentation annotation data in `.txt` + +Currently, we use v1 data for training and testing, so the version 2 labels are unused. + +### Create dataset + +Please run the command below to create the dataset. + +```shell +python tools/create_data.py sunrgbd --root-path ./data/sunrgbd \ +--out-dir ./data/sunrgbd --extra-tag sunrgbd +``` + +or (if in a slurm environment) + +``` +bash tools/create_data.sh sunrgbd +``` + +The above point cloud data are further saved in `.bin` format. Meanwhile `.pkl` info files are also generated for saving annotation and metadata. + +The directory structure after processing should be as follows. + +``` +sunrgbd +├── README.md +├── matlab +│ ├── ... +├── OFFICIAL_SUNRGBD +│ ├── ... +├── sunrgbd_trainval +│ ├── ... +├── points +├── sunrgbd_infos_train.pkl +├── sunrgbd_infos_val.pkl +``` + +- `points/xxxxxx.bin`: The point cloud data after downsample. +- `sunrgbd_infos_train.pkl`: The train data infos, the detailed info of each scene is as follows: + - info\['lidar_points'\]: A dict containing all information related to the the lidar points. + - info\['lidar_points'\]\['num_pts_feats'\]: The feature dimension of point. + - info\['lidar_points'\]\['lidar_path'\]: The filename of the lidar point cloud data. + - info\['images'\]: A dict containing all information relate to the image data. + - info\['images'\]\['CAM0'\]\['img_path'\]: The filename of the image. + - info\['images'\]\['CAM0'\]\['depth2img'\]: Transformation matrix from depth to image with shape (4, 4). + - info\['images'\]\['CAM0'\]\['height'\]: The height of image. + - info\['images'\]\['CAM0'\]\['width'\]: The width of image. + - info\['instances'\]: A list of dict contains all the annotations of this frame. Each dict corresponds to annotations of single instance. For the i-th instance: + - info\['instances'\]\[i\]\['bbox_3d'\]: List of 7 numbers representing the 3D bounding box in depth coordinate system. + - info\['instances'\]\[i\]\['bbox'\]: List of 4 numbers representing the 2D bounding box of the instance, in (x1, y1, x2, y2) order. + - info\['instances'\]\[i\]\['bbox_label_3d'\]: An int indicates the 3D label of instance and the -1 indicates ignore class. + - info\['instances'\]\[i\]\['bbox_label'\]: An int indicates the 2D label of instance and the -1 indicates ignore class. +- `sunrgbd_infos_val.pkl`: The val data infos, which shares the same format as `sunrgbd_infos_train.pkl`. + +## Train pipeline + +A typical train pipeline of SUN RGB-D for point cloud only 3D detection is as follows. + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict(type='LoadAnnotations3D'), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.523599, 0.523599], + scale_ratio_range=[0.85, 1.15], + shift_height=True), + dict(type='PointSample', num_points=20000), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +``` + +Data augmentation for point clouds: + +- `RandomFlip3D`: randomly flip the input point cloud horizontally or vertically. +- `GlobalRotScaleTrans`: rotate the input point cloud, usually in the range of \[-30, 30\] (degrees) for SUN RGB-D; then scale the input point cloud, usually in the range of \[0.85, 1.15\] for SUN RGB-D; finally translate the input point cloud, usually by 0 for SUN RGB-D (which means no translation). +- `PointSample`: downsample the input point cloud. + +A typical train pipeline of SUN RGB-D for multi-modality (point cloud and image) 3D detection is as follows. + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations3D'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 600), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.0), + dict(type='Pad', size_divisor=32), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.523599, 0.523599], + scale_ratio_range=[0.85, 1.15], + shift_height=True), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d','img', 'gt_bboxes', 'gt_bboxes_labels']) +] +``` + +Data augmentation for images: + +- `Resize`: resize the input image, `keep_ratio=True` means the ratio of the image is kept unchanged. +- `RandomFlip`: randomly flip the input image. + +The image augmentation functions are implemented in [MMDetection](https://github.com/open-mmlab/mmdetection/tree/dev-3.x/mmdet/datasets/transforms). + +## Metrics + +Same as ScanNet, typically mean Average Precision (mAP) is used for evaluation on SUN RGB-D, e.g. `mAP@0.25` and `mAP@0.5`. In detail, a generic function to compute precision and recall for 3D object detection for multiple classes is called. Please refer to [indoor_eval](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/evaluation/functional/indoor_eval.py) for more details. + +Since SUN RGB-D consists of image data, detection on image data is also feasible. For instance, in ImVoteNet, we first train an image detector, and we also use mAP for evaluation, e.g. `mAP@0.5`. We use the `eval_map` function from [MMDetection](https://github.com/open-mmlab/mmdetection) to calculate mAP. diff --git a/docs/en/advanced_guides/datasets/waymo_det.md b/docs/en/advanced_guides/datasets/waymo_det.md new file mode 100755 index 0000000..2e52b9d --- /dev/null +++ b/docs/en/advanced_guides/datasets/waymo_det.md @@ -0,0 +1,168 @@ +# Waymo Dataset + +This page provides specific tutorials about the usage of MMDetection3D for Waymo dataset. + +## Prepare dataset + +Before preparing Waymo dataset, if you only installed requirements in `requirements/build.txt` and `requirements/runtime.txt` before, please install the official package for this dataset at first by running + +``` +# tf 2.1.0. +pip install waymo-open-dataset-tf-2-1-0==1.2.0 +# tf 2.0.0 +# pip install waymo-open-dataset-tf-2-0-0==1.2.0 +# tf 1.15.0 +# pip install waymo-open-dataset-tf-1-15-0==1.2.0 +``` + +or + +``` +pip install -r requirements/optional.txt +``` + +Like the general way to prepare dataset, it is recommended to symlink the dataset root to `$MMDETECTION3D/data`. +Due to the original Waymo data format is based on `tfrecord`, we need to preprocess the raw data for convenient usage in the training and evaluation procedure. Our approach is to convert them into KITTI format. + +The folder structure should be organized as follows before our processing. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── waymo +│ │ ├── waymo_format +│ │ │ ├── training +│ │ │ ├── validation +│ │ │ ├── testing +│ │ │ ├── gt.bin +│ │ ├── kitti_format +│ │ │ ├── ImageSets + +``` + +You can download Waymo open dataset V1.2 [HERE](https://waymo.com/open/download/) and its data split [HERE](https://drive.google.com/drive/folders/18BVuF_RYJF0NjZpt8SnfzANiakoRMf0o?usp=sharing). Then put `tfrecord` files into corresponding folders in `data/waymo/waymo_format/` and put the data split txt files into `data/waymo/kitti_format/ImageSets`. Download ground truth bin files for validation set [HERE](https://console.cloud.google.com/storage/browser/waymo_open_dataset_v_1_2_0/validation/ground_truth_objects) and put it into `data/waymo/waymo_format/`. A tip is that you can use `gsutil` to download the large-scale dataset with commands. You can take this [tool](https://github.com/RalphMao/Waymo-Dataset-Tool) as an example for more details. Subsequently, prepare Waymo data by running + +```bash +python tools/create_data.py waymo --root-path ./data/waymo/ --out-dir ./data/waymo/ --workers 128 --extra-tag waymo +``` + +Note that if your local disk does not have enough space for saving converted data, you can change the `--out-dir` to anywhere else. Just remember to create folders and prepare data there in advance and link them back to `data/waymo/kitti_format` after the data conversion. + +After the data conversion, the folder structure and info files should be organized as below. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── waymo +│ │ ├── waymo_format +│ │ │ ├── training +│ │ │ ├── validation +│ │ │ ├── testing +│ │ │ ├── gt.bin +│ │ ├── kitti_format +│ │ │ ├── ImageSets +│ │ │ ├── training +│ │ │ │ ├── calib +│ │ │ │ ├── image_0 +│ │ │ │ ├── image_1 +│ │ │ │ ├── image_2 +│ │ │ │ ├── image_3 +│ │ │ │ ├── image_4 +│ │ │ │ ├── label_0 +│ │ │ │ ├── label_1 +│ │ │ │ ├── label_2 +│ │ │ │ ├── label_3 +│ │ │ │ ├── label_4 +│ │ │ │ ├── label_all +│ │ │ │ ├── pose +│ │ │ │ ├── velodyne +│ │ │ ├── testing +│ │ │ │ ├── (the same as training) +│ │ │ ├── waymo_gt_database +│ │ │ ├── waymo_infos_trainval.pkl +│ │ │ ├── waymo_infos_train.pkl +│ │ │ ├── waymo_infos_val.pkl +│ │ │ ├── waymo_infos_test.pkl +│ │ │ ├── waymo_dbinfos_train.pkl + +``` + +Here because there are several cameras, we store the corresponding image and labels that can be projected to that camera respectively and save pose for further usage of consecutive frames point clouds. We use a coding way `{a}{bbb}{ccc}` to name the data for each frame, where `a` is the prefix for different split (`0` for training, `1` for validation and `2` for testing), `bbb` for segment index and `ccc` for frame index. You can easily locate the required frame according to this naming rule. We gather the data for training and validation together as KITTI and store the indices for different set in the `ImageSet` files. + +## Training + +Considering there are many similar frames in the original dataset, we can basically use a subset to train our model primarily. In our preliminary baselines, we load one frame every five frames, and thanks to our hyper parameters settings and data augmentation, we obtain a better result compared with the performance given in the original dataset [paper](https://arxiv.org/pdf/1912.04838.pdf). For more details about the configuration and performance, please refer to README.md in the `configs/pointpillars/`. A more complete benchmark based on other settings and methods is coming soon. + +## Evaluation + +For evaluation on Waymo, please follow the [instruction](https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md) to build the binary file `compute_detection_metrics_main` for metrics computation and put it into `mmdet3d/core/evaluation/waymo_utils/`. Basically, you can follow the commands below to install `bazel` and build the file. + +```shell +# download the code and enter the base directory +git clone https://github.com/waymo-research/waymo-open-dataset.git waymo-od +# git clone https://github.com/Abyssaledge/waymo-open-dataset-master waymo-od # if you want to use faster multi-thread version. +cd waymo-od +git checkout remotes/origin/master + +# use the Bazel build system +sudo apt-get install --assume-yes pkg-config zip g++ zlib1g-dev unzip python3 python3-pip +BAZEL_VERSION=3.1.0 +wget https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh +sudo bash bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh +sudo apt install build-essential + +# configure .bazelrc +./configure.sh +# delete previous bazel outputs and reset internal caches +bazel clean + +bazel build waymo_open_dataset/metrics/tools/compute_detection_metrics_main +cp bazel-bin/waymo_open_dataset/metrics/tools/compute_detection_metrics_main ../mmdetection3d/mmdet3d/evaluation/functional/waymo_utils/ +``` + +Then you can evaluate your models on Waymo. An example to evaluate PointPillars on Waymo with 8 GPUs with Waymo metrics is as follows. + +```shell +./tools/dist_test.sh configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py checkpoints/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car_latest.pth +``` + +`pklfile_prefix` should be set in `test_evaluator` of configuration if the bin file is needed to be generated, so you can add `--cfg-options "test_evaluator.pklfile_prefix=xxxx"` in the end of command if you want do it. + +**Notice**: + +1. Sometimes when using `bazel` to build `compute_detection_metrics_main`, an error `'round' is not a member of 'std'` may appear. We just need to remove the `std::` before `round` in that file. + +2. Considering it takes a little long time to evaluate once, we recommend to evaluate only once at the end of model training. + +3. To use TensorFlow with CUDA 9, it is recommended to compile it from source. Apart from official tutorials, you can refer to this [link](https://github.com/SmileTM/Tensorflow2.X-GPU-CUDA9.0) for possibly suitable precompiled packages and useful information for compiling it from source. + +## Testing and make a submission + +An example to test PointPillars on Waymo with 8 GPUs, generate the bin files and make a submission to the leaderboard. + +`submission_prefix` should be set in `test_evaluator` of configuration before you run the test command if you want to generate the bin files and make a submission to the leaderboard.. + +After generating the bin file, you can simply build the binary file `create_submission` and use them to create a submission file by following the [instruction](https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md/). Basically, here are some example commands. + +```shell +cd ../waymo-od/ +bazel build waymo_open_dataset/metrics/tools/create_submission +cp bazel-bin/waymo_open_dataset/metrics/tools/create_submission ../mmdetection3d/mmdet3d/core/evaluation/waymo_utils/ +vim waymo_open_dataset/metrics/tools/submission.txtpb # set the metadata information +cp waymo_open_dataset/metrics/tools/submission.txtpb ../mmdetection3d/mmdet3d/evaluation/functional/waymo_utils/ + +cd ../mmdetection3d +# suppose the result bin is in `results/waymo-car/submission` +mmdet3d/core/evaluation/waymo_utils/create_submission --input_filenames='results/waymo-car/kitti_results_test.bin' --output_filename='results/waymo-car/submission/model' --submission_filename='mmdet3d/evaluation/functional/waymo_utils/submission.txtpb' + +tar cvf results/waymo-car/submission/my_model.tar results/waymo-car/submission/my_model/ +gzip results/waymo-car/submission/my_model.tar +``` + +For evaluation on the validation set with the eval server, you can also use the same way to generate a submission. Make sure you change the fields in `submission.txtpb` before running the command above. diff --git a/docs/en/advanced_guides/index.rst b/docs/en/advanced_guides/index.rst new file mode 100755 index 0000000..1faa4c5 --- /dev/null +++ b/docs/en/advanced_guides/index.rst @@ -0,0 +1,27 @@ +Datasets +************** + +.. toctree:: + :maxdepth: 1 + + datasets/index.rst + + +Supported Tasks +************** + +.. toctree:: + :maxdepth: 2 + + supported_tasks/index.rst + + +Customization +************** + +.. toctree:: + :maxdepth: 2 + + customize_dataset.md + customize_models.md + customize_runtime.md diff --git a/docs/en/advanced_guides/pure_point_cloud_dataset.md b/docs/en/advanced_guides/pure_point_cloud_dataset.md new file mode 100755 index 0000000..dc38546 --- /dev/null +++ b/docs/en/advanced_guides/pure_point_cloud_dataset.md @@ -0,0 +1,461 @@ +# Use Pure Point Cloud Dataset + +## Data Pre-Processing + +### Convert Point cloud format + +Currently, we only support bin format point cloud training and inference, before training on your own datasets, you need to transform your point cloud format to bin file. The common point cloud data formats include pcd and las, we provide some open-source tools for reference. + +1. Convert pcd to bin: https://github.com/leofansq/Tools_RosBag2KITTI +2. Convert las to bin: The common conversion path is las -> pcd -> bin, and the conversion from las -> pcd can be achieved through [this tool](https://github.com/Hitachi-Automotive-And-Industry-Lab/semantic-segmentation-editor). + +### Point cloud annotation + +MMDetection3D does not support point cloud annotation. Some open-source annotation tools are offered for reference: + +- [SUSTechPOINTS](https://github.com/naurril/SUSTechPOINTS) +- [LATTE](https://github.com/bernwang/latte) + +Besides, we improved [LATTE](https://github.com/bernwang/latte) for better usage. More details can be found [here](https://arxiv.org/abs/2011.10174). + +## Support new data format + +To support a new data format, you can either convert them to existing formats or directly convert them to the middle format. You could also choose to convert them offline (before training by a script) or online (implement a new dataset and do the conversion at training). + +### Reorganize new data formats to existing format + +Once your datasets only contain point cloud file and 3D Bounding box annotations, without calib file. We recommend converting it into the basic formats, the annotations files in basic format has the following necessary keys: + +```python + +[ + {'sample_idx': + 'lidar_points': {'lidar_path': velodyne_path, + .... + }, + 'annos': {'box_type_3d': (str) 'LiDAR/Camera/Depth' + 'gt_bboxes_3d': (n, 7) + 'gt_names': [list] + .... + } + 'calib': { .....} + 'images': { .....} + } +] + +``` + +In MMDetection3D, for the data that is inconvenient to read directly online, we recommend converting it into into basic format as above and do the conversion offline, thus you only need to modify the config's data annotation paths and classes after the conversion. +To use data that share a similar format as the existing datasets, e.g., Lyft has a similar format as the nuScenes dataset, we recommend directly implementing a new data converter and a dataset class to convert the data and load the data, respectively. In this procedure, the code can inherit from the existing dataset classes to reuse the code. + +### Reorganize new data format to middle format + +There is also a way if users do not want to convert the annotation format to existing formats. +Actually, we convert all the supported datasets into pickle files, which summarize useful information for model training and inference. + +The annotation of a dataset is a list of dict, each dict corresponds to a frame. +A basic example (used in KITTI) is as follows. A frame consists of several keys, like `image`, `point_cloud`, `calib` and `annos`. +As long as we could directly read data according to these information, the organization of raw data could also be different from existing ones. +With this design, we provide an alternative choice for customizing datasets. + +```python + +[ + {'image': {'image_idx': 0, 'image_path': 'training/image_2/000000.png', 'image_shape': array([ 370, 1224], dtype=int32)}, + 'point_cloud': {'num_features': 4, 'velodyne_path': 'training/velodyne/000000.bin'}, + 'calib': {'P0': array([[707.0493, 0. , 604.0814, 0. ], + [ 0. , 707.0493, 180.5066, 0. ], + [ 0. , 0. , 1. , 0. ], + [ 0. , 0. , 0. , 1. ]]), + 'P1': array([[ 707.0493, 0. , 604.0814, -379.7842], + [ 0. , 707.0493, 180.5066, 0. ], + [ 0. , 0. , 1. , 0. ], + [ 0. , 0. , 0. , 1. ]]), + 'P2': array([[ 7.070493e+02, 0.000000e+00, 6.040814e+02, 4.575831e+01], + [ 0.000000e+00, 7.070493e+02, 1.805066e+02, -3.454157e-01], + [ 0.000000e+00, 0.000000e+00, 1.000000e+00, 4.981016e-03], + [ 0.000000e+00, 0.000000e+00, 0.000000e+00, 1.000000e+00]]), + 'P3': array([[ 7.070493e+02, 0.000000e+00, 6.040814e+02, -3.341081e+02], + [ 0.000000e+00, 7.070493e+02, 1.805066e+02, 2.330660e+00], + [ 0.000000e+00, 0.000000e+00, 1.000000e+00, 3.201153e-03], + [ 0.000000e+00, 0.000000e+00, 0.000000e+00, 1.000000e+00]]), + 'R0_rect': array([[ 0.9999128 , 0.01009263, -0.00851193, 0. ], + [-0.01012729, 0.9999406 , -0.00403767, 0. ], + [ 0.00847068, 0.00412352, 0.9999556 , 0. ], + [ 0. , 0. , 0. , 1. ]]), + 'Tr_velo_to_cam': array([[ 0.00692796, -0.9999722 , -0.00275783, -0.02457729], + [-0.00116298, 0.00274984, -0.9999955 , -0.06127237], + [ 0.9999753 , 0.00693114, -0.0011439 , -0.3321029 ], + [ 0. , 0. , 0. , 1. ]]), + 'Tr_imu_to_velo': array([[ 9.999976e-01, 7.553071e-04, -2.035826e-03, -8.086759e-01], + [-7.854027e-04, 9.998898e-01, -1.482298e-02, 3.195559e-01], + [ 2.024406e-03, 1.482454e-02, 9.998881e-01, -7.997231e-01], + [ 0.000000e+00, 0.000000e+00, 0.000000e+00, 1.000000e+00]])}, + 'annos': {'name': array(['Pedestrian'], dtype='=2.0.0rc4' +mim install 'mmdet>=3.0.0' +``` + +**Note**: In MMCV-v2.x, `mmcv-full` is renamed to `mmcv`, if you want to install `mmcv` without CUDA ops, you can use `mim install "mmcv-lite>=2.0.0rc4"` to install the lite version. + +**Step 1.** Install MMDetection3D. + +Case a: If you develop and run mmdet3d directly, install it from source: + +```shell +git clone https://github.com/open-mmlab/mmdetection3d.git -b dev-1.x +# "-b dev-1.x" means checkout to the `dev-1.x` branch. +cd mmdetection3d +pip install -v -e . +# "-v" means verbose, or more output +# "-e" means installing a project in edtiable mode, +# thus any local modifications made to the code will take effect without reinstallation. +``` + +Case b: If you use mmdet3d as a dependency or third-party package, install it with MIM: + +```shell +mim install "mmdet3d>=1.1.0rc0" +``` + +Note: + +1. If you would like to use `opencv-python-headless` instead of `opencv-python`, + you can install it before installing MMCV. + +2. Some dependencies are optional. Simply running `pip install -v -e .` will only install the minimum runtime requirements. To use optional dependencies like `albumentations` and `imagecorruptions` either install them manually with `pip install -r requirements/optional.txt` or specify desired extras when calling `pip` (e.g. `pip install -v -e .[optional]`). Valid keys for the extras field are: `all`, `tests`, `build`, and `optional`. + + We have supported `spconv 2.0`. If the user has installed `spconv 2.0`, the code will use `spconv 2.0` first, which will take up less GPU memory than using the default `mmcv spconv`. Users can use the following commands to install `spconv 2.0`: + + ```shell + pip install cumm-cuxxx + pip install spconv-cuxxx + ``` + + Where `xxx` is the CUDA version in the environment. + + For example, using CUDA 10.2, the command will be `pip install cumm-cu102 && pip install spconv-cu102`. + + Supported CUDA versions include 10.2, 11.1, 11.3, and 11.4. Users can also install it by building from the source. For more details please refer to [spconv v2.x](https://github.com/traveller59/spconv). + + We also support `Minkowski Engine` as a sparse convolution backend. If necessary please follow original [installation guide](https://github.com/NVIDIA/MinkowskiEngine#installation) or use `pip` to install it: + + ```shell + conda install openblas-devel -c anaconda + pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps --install-option="--blas_include_dirs=/opt/conda/include" --install-option="--blas=openblas" + ``` + + We also support `Torchsparse` as a sparse convolution backend. If necessary please follow original [installation guide](https://github.com/mit-han-lab/torchsparse#installation) or use `pip` to install it: + + ```shell + sudo apt-get install libsparsehash-dev + pip install --upgrade git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0 + ``` + + or omit sudo install by following command: + + ```shell + conda install -c bioconda sparsehash + export CPLUS_INCLUDE_PATH=CPLUS_INCLUDE_PATH:${YOUR_CONDA_ENVS_DIR}/include + pip install --upgrade git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0 + ``` + +3. The code can not be built for CPU only environment (where CUDA isn't available) for now. + +### Verify the Installation + +To verify whether MMDetection3D is installed correctly, we provide some sample codes to run an inference demo. + +**Step 1.** We need to download config and checkpoint files. + +```shell +mim download mmdet3d --config pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car --dest . +``` + +The downloading will take several seconds or more, depending on your network environment. When it is done, you will find two files `pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py` and `hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth` in your current folder. + +**Step 2.** Verify the inference demo. + +Case a: If you install MMDetection3D from source, just run the following command. + +```shell +python demo/pcd_demo.py demo/data/kitti/000008.bin pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth --show +``` + +You will see a visualizer interface with point cloud, where bounding boxes are plotted on cars. + +**Note**: + +If you want to input a `.ply` file, you can use the following function and convert it to `.bin` format. Then you can use the converted `.bin` file to run demo. +Note that you need to install `pandas` and `plyfile` before using this script. This function can also be used for data preprocessing for training `ply data`. + +```python +import numpy as np +import pandas as pd +from plyfile import PlyData + +def convert_ply(input_path, output_path): + plydata = PlyData.read(input_path) # read file + data = plydata.elements[0].data # read data + data_pd = pd.DataFrame(data) # convert to DataFrame + data_np = np.zeros(data_pd.shape, dtype=np.float) # initialize array to store data + property_names = data[0].dtype.names # read names of properties + for i, name in enumerate( + property_names): # read data by property + data_np[:, i] = data_pd[name] + data_np.astype(np.float32).tofile(output_path) +``` + +Examples: + +```python +convert_ply('./test.ply', './test.bin') +``` + +If you have point clouds in other format (`.off`, `.obj`, etc.), you can use `trimesh` to convert them into `.ply`. + +```python +import trimesh + +def to_ply(input_path, output_path, original_type): + mesh = trimesh.load(input_path, file_type=original_type) # read file + mesh.export(output_path, file_type='ply') # convert to ply +``` + +Examples: + +```python +to_ply('./test.obj', './test.ply', 'obj') +``` + +Case b: If you install MMDetection3D with MIM, open your python interpreter and copy&paste the following codes. + +```python +from mmdet3d.apis import init_model, inference_detector + +config_file = 'pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py' +checkpoint_file = 'hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth' +model = init_model(config_file, checkpoint_file) +inference_detector(model, 'demo/data/kitti/000008.bin') +``` + +You will see a list of `Det3DDataSample`, and the predictions are in the `pred_instances_3d`, indicating the detected bounding boxes, labels, and scores. + +### Customize Installation + +#### CUDA Versions + +When installing PyTorch, you need to specify the version of CUDA. If you are not clear on which to choose, follow our recommendations: + +- For Ampere-based NVIDIA GPUs, such as GeForce 30 series and NVIDIA A100, CUDA 11 is a must. +- For older NVIDIA GPUs, CUDA 11 is backward compatible, but CUDA 10.2 offers better compatibility and is more lightweight. + +Please make sure the GPU driver satisfies the minimum version requirements. See [this table](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions) for more information. + +```{note} +Installing CUDA runtime libraries is enough if you follow our best practices, because no CUDA code will be compiled locally. However if you hope to compile MMCV from source or develop other CUDA operators, you need to install the complete CUDA toolkit from NVIDIA's [website](https://developer.nvidia.com/cuda-downloads), and its version should match the CUDA version of PyTorch. i.e., the specified version of cudatoolkit in `conda install` command. +``` + +#### Install MMEngine without MIM + +To install MMEngine with pip instead of MIM, please follow [MMEngine installation guides](https://mmengine.readthedocs.io/en/latest/get_started/installation.html). + +For example, you can install MMEngine by the following command: + +```shell +pip install mmengine +``` + +#### Install MMCV without MIM + +MMCV contains C++ and CUDA extensions, thus depending on PyTorch in a complex way. MIM solves such dependencies automatically and makes the installation easier. However, it is not a must. + +To install MMCV with pip instead of MIM, please follow [MMCV installation guides](https://mmcv.readthedocs.io/en/2.x/get_started/installation.html). This requires manually specifying a find-url based on PyTorch version and its CUDA version. + +For example, the following command install MMCV built for PyTorch 1.12.x and CUDA 11.6: + +```shell +pip install "mmcv>=2.0.0rc4" -f https://download.openmmlab.com/mmcv/dist/cu116/torch1.12.0/index.html +``` + +#### Install on Google Colab + +[Google Colab](https://colab.research.google.com/) usually has PyTorch installed, thus we only need to install MMEngine, MMCV, MMDetection, and MMDetection3D with the following commands. + +**Step 1.** Install [MMEngine](https://github.com/open-mmlab/mmengine), [MMCV](https://github.com/open-mmlab/mmcv) and [MMDetection](https://github.com/open-mmlab/mmdetection) using [MIM](https://github.com/open-mmlab/mim). + +```shell +!pip3 install openmim +!mim install mmengine +!mim install "mmcv>=2.0.0rc4,<2.1.0" +!mim install "mmdet>=3.0.0,<3.1.0" +``` + +**Step 2.** Install MMDetection3D from source. + +```shell +!git clone https://github.com/open-mmlab/mmdetection3d.git -b dev-1.x +%cd mmdetection3d +!pip install -e . +``` + +**Step 3.** Verification. + +```python +import mmdet3d +print(mmdet3d.__version__) +# Example output: 1.1.0rc0, or an another version. +``` + +```{note} +Within Jupyter, the exclamation mark `!` is used to call external executables and `%cd` is a [magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd) to change the current working directory of Python. +``` + +#### Using MMDetection3D with Docker + +We provide a [Dockerfile](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docker/Dockerfile) to build an image. Ensure that your [docker version](https://docs.docker.com/engine/install/) >= 19.03. + +```shell +# build an image with PyTorch 1.9, CUDA 11.1 +# If you prefer other versions, just modified the Dockerfile +docker build -t mmdetection3d docker/ +``` + +Run it with: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmdetection3d/data mmdetection3d +``` + +### Troubleshooting + +If you have some issues during the installation, please first view the [FAQ](notes/faq.md) page. +You may [open an issue](https://github.com/open-mmlab/mmdetection3d/issues/new/choose) on GitHub if no solution is found. + +### Use Multiple Versions of MMDetection3D in Development + +Training and testing scripts have already been modified in `PYTHONPATH` in order to make sure the scripts are using their own versions of MMDetection3D. + +To install the default version of MMDetection3D in your environment, you can exclude the following code in the related scripts: + +```shell +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +``` diff --git a/docs/en/index.rst b/docs/en/index.rst new file mode 100755 index 0000000..4af0e33 --- /dev/null +++ b/docs/en/index.rst @@ -0,0 +1,56 @@ +Welcome to MMDetection3D's documentation! +========================================== + +.. toctree:: + :maxdepth: 1 + :caption: Get Started + + overview.md + get_started.md + +.. toctree:: + :maxdepth: 2 + :caption: User Guides + + user_guides/index.rst + +.. toctree:: + :maxdepth: 2 + :caption: Advanced Guides + + advanced_guides/index.rst + +.. toctree:: + :maxdepth: 1 + :caption: Migrating from MMDetection3D 1.0 + + migration.md + +.. toctree:: + :maxdepth: 1 + :caption: API Reference + + api.rst + +.. toctree:: + :maxdepth: 1 + :caption: Model Zoo + + model_zoo.md + +.. toctree:: + :maxdepth: 1 + :caption: Notes + + notes/index.rst + +.. toctree:: + :caption: Switch Language + + switch_language.md + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/en/make.bat b/docs/en/make.bat new file mode 100755 index 0000000..922152e --- /dev/null +++ b/docs/en/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/en/migration.md b/docs/en/migration.md new file mode 100755 index 0000000..66b1de8 --- /dev/null +++ b/docs/en/migration.md @@ -0,0 +1,33 @@ +Along with the release of OpenMMLab 2.0, MMDetection3D (namely MMDet3D) 1.1 made many significant changes, resulting in less redundant, more efficient code and a more consistent overall design. These changes break backward compatibility. Therefore, we prepared this migration guide to make the transition as smooth as possible so that all users can enjoy the productivity benefits of the new MMDet3D and the entire OpenMMLab 2.0 ecosystem. + +## Environment + +MMDet3D 1.1 depends on the new foundational library [MMEngine](https://github.com/open-mmlab/mmengine) for training deep learning models, and therefore has an entirely different dependency chain compared with MMDet3D 1.0. Even if you have a well-rounded MMDet3D 1.0 / 0.x environment before, you still need to create a new Python environment for MMDet3D 1.1. We provide a detailed [installation guide](./get_started.md) for reference. + +The configuration files in our new version have a lot of modifications because of the differences between MMCV 1.x and MMEngine. The guides for migration from MMCV to MMEngine can be seen [here](https://github.com/open-mmlab/mmengine/tree/main/docs/en/migration). + +We have renamed the names of the remote branches in MMDet3D 1.1 (renaming 1.1 to main, master to 1.0, and dev to dev-1.0). If your local branches in the git system are not aligned with branches of the remote repo, you can use the following commands to resolve it: + +``` +git fetch origin +git checkout main +git branch main_backup # backup your main branch +git reset --hard origin/main +``` + +## Dataset + +You should update the annotation files generated in the 1.0 version since some key words and structures of annotation in MMDet3D 1.1 have changed. Taking KITTI as an example, the update script is as follows: + +```python +python tools/dataset_converters/update_infos_to_v2.py + --dataset kitti + --pkl-path ./data/kitti/kitti_infos_train.pkl + --out-dir ./kitti_v2/ +``` + +If your annotation files are generated in the 0.x version, you should first update them to 1.0 version using this [script](../../tools/update_data_coords.py). Alternatively, you can re-generate annotation files from scratch using this [script](../../tools/create_data.py). + +## Model + +MMDet3D 1.1 supports loading weights trained on the old version (1.0 version). For models that are important or frequently used, we have thoroughly verified their precisions in the 1.1 version. Especially for some models that may experience potential performance drop or training bugs in the old version, such as [centerpoint](https://github.com/open-mmlab/mmdetection3d/issues/2390), we have checked them and ensured the right precision in the new version. If you encounter any problem, please feel free to raise an [issue](https://github.com/open-mmlab/mmdetection3d/issues). Additionally, we have added some of the latest SOTA methods in our [package](../../configs/) and [projects](../../projects/), making MMDet3D 1.1 a highly recommended choice for implementing your project. diff --git a/docs/en/model_zoo.md b/docs/en/model_zoo.md new file mode 100755 index 0000000..a8956d4 --- /dev/null +++ b/docs/en/model_zoo.md @@ -0,0 +1,117 @@ +# Model Zoo + +## Common settings + +- We use distributed training. +- For fair comparison with other codebases, we report the GPU memory as the maximum value of `torch.cuda.max_memory_allocated()` for all 8 GPUs. Note that this value is usually less than what `nvidia-smi` shows. +- We report the inference time as the total time of network forwarding and post-processing, excluding the data loading time. Results are obtained with the script [benchmark.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/benchmark.py) which computes the average time on 2000 images. + +## Baselines + +### SECOND + +Please refer to [SECOND](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/second) for details. We provide SECOND baselines on KITTI and Waymo datasets. + +### PointPillars + +Please refer to [PointPillars](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointpillars) for details. We provide pointpillars baselines on KITTI, nuScenes, Lyft, and Waymo datasets. + +### Part-A2 + +Please refer to [Part-A2](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/parta2) for details. + +### VoteNet + +Please refer to [VoteNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/votenet) for details. We provide VoteNet baselines on ScanNet and SUNRGBD datasets. + +### Dynamic Voxelization + +Please refer to [Dynamic Voxelization](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/dynamic_voxelization) for details. + +### MVXNet + +Please refer to [MVXNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/mvxnet) for details. + +### RegNetX + +Please refer to [RegNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/regnet) for details. We provide pointpillars baselines with RegNetX backbones on nuScenes and Lyft datasets currently. + +### nuImages + +We also support baseline models on [nuImages dataset](https://www.nuscenes.org/nuimages). Please refer to [nuImages](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/nuimages) for details. We report Mask R-CNN, Cascade Mask R-CNN and HTC results currently. + +### H3DNet + +Please refer to [H3DNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/h3dnet) for details. + +### 3DSSD + +Please refer to [3DSSD](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/3dssd) for details. + +### CenterPoint + +Please refer to [CenterPoint](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/centerpoint) for details. + +### SSN + +Please refer to [SSN](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/ssn) for details. We provide pointpillars with shape-aware grouping heads used in SSN on the nuScenes and Lyft datasets currently. + +### ImVoteNet + +Please refer to [ImVoteNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/imvotenet) for details. We provide ImVoteNet baselines on SUNRGBD dataset. + +### FCOS3D + +Please refer to [FCOS3D](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/fcos3d) for details. We provide FCOS3D baselines on the nuScenes dataset. + +### PointNet++ + +Please refer to [PointNet++](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointnet2) for details. We provide PointNet++ baselines on ScanNet and S3DIS datasets. + +### Group-Free-3D + +Please refer to [Group-Free-3D](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/groupfree3d) for details. We provide Group-Free-3D baselines on ScanNet dataset. + +### ImVoxelNet + +Please refer to [ImVoxelNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/imvoxelnet) for details. We provide ImVoxelNet baselines on KITTI dataset. + +### PAConv + +Please refer to [PAConv](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/paconv) for details. We provide PAConv baselines on S3DIS dataset. + +### DGCNN + +Please refer to [DGCNN](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/dgcnn) for details. We provide DGCNN baselines on S3DIS dataset. + +### SMOKE + +Please refer to [SMOKE](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/smoke) for details. We provide SMOKE baselines on KITTI dataset. + +### PGD + +Please refer to [PGD](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/pgd) for details. We provide PGD baselines on KITTI and nuScenes dataset. + +### PointRCNN + +Please refer to [PointRCNN](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/point_rcnn) for details. We provide PointRCNN baselines on KITTI dataset. + +### MonoFlex + +Please refer to [MonoFlex](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/monoflex) for details. We provide MonoFlex baselines on KITTI dataset. + +### SA-SSD + +Please refer to [SA-SSD](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/sassd) for details. We provide SA-SSD baselines on the KITTI dataset. + +### FCAF3D + +Please refer to [FCAF3D](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/fcaf3d) for details. We provide FCAF3D baselines on the ScanNet, S3DIS, and SUN RGB-D datasets. + +### PV-RCNN + +Please refer to [PV-RCNN](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/pv_rcnn) for details. We provide PV-RCNN baselines on the KITTI dataset. + +### Mixed Precision (FP16) Training + +Please refer to [Mixed Precision (FP16) Training on PointPillars](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/pointpillars/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d.py) for details. diff --git a/docs/en/notes/benchmarks.md b/docs/en/notes/benchmarks.md new file mode 100755 index 0000000..4db2c39 --- /dev/null +++ b/docs/en/notes/benchmarks.md @@ -0,0 +1,286 @@ +# Benchmarks + +Here we benchmark the training and testing speed of models in MMDetection3D, +with some other open source 3D detection codebases. + +## Settings + +- Hardwares: 8 NVIDIA Tesla V100 (32G) GPUs, Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz +- Software: Python 3.7, CUDA 10.1, cuDNN 7.6.5, PyTorch 1.3, numba 0.48.0. +- Model: Since all the other codebases implements different models, we compare the corresponding models including SECOND, PointPillars, Part-A2, and VoteNet with them separately. +- Metrics: We use the average throughput in iterations of the entire training run and skip the first 50 iterations of each epoch to skip GPU warmup time. + +## Main Results + +We compare the training speed (samples/s) with other codebases if they implement the similar models. The results are as below, the greater the numbers in the table, the faster of the training process. The models that are not supported by other codebases are marked by `×`. + +| Methods | MMDetection3D | OpenPCDet | votenet | Det3D | +| :-----------------: | :-----------: | :-------: | :-----: | :---: | +| VoteNet | 358 | × | 77 | × | +| PointPillars-car | 141 | × | × | 140 | +| PointPillars-3class | 107 | 44 | × | × | +| SECOND | 40 | 30 | × | × | +| Part-A2 | 17 | 14 | × | × | + +## Details of Comparison + +### Modification for Calculating Speed + +- __MMDetection3D__: We try to use as similar settings as those of other codebases as possible using [benchmark configs](https://github.com/open-mmlab/MMDetection3D/blob/master/configs/benchmark). + +- __Det3D__: For comparison with Det3D, we use the commit [519251e](https://github.com/poodarchu/Det3D/tree/519251e72a5c1fdd58972eabeac67808676b9bb7). + +- __OpenPCDet__: For comparison with OpenPCDet, we use the commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2). + + For training speed, we add code to record the running time in the file `./tools/train_utils/train_utils.py`. We calculate the speed of each epoch, and report the average speed of all the epochs. + +
    + + (diff to make it use the same method for benchmarking speed - click to expand) + + + ```diff + diff --git a/tools/train_utils/train_utils.py b/tools/train_utils/train_utils.py + index 91f21dd..021359d 100644 + --- a/tools/train_utils/train_utils.py + +++ b/tools/train_utils/train_utils.py + @@ -2,6 +2,7 @@ import torch + import os + import glob + import tqdm + +import datetime + from torch.nn.utils import clip_grad_norm_ + + + @@ -13,7 +14,10 @@ def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, ac + if rank == 0: + pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True) + + + start_time = None + for cur_it in range(total_it_each_epoch): + + if cur_it > 49 and start_time is None: + + start_time = datetime.datetime.now() + try: + batch = next(dataloader_iter) + except StopIteration: + @@ -55,9 +59,11 @@ def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, ac + tb_log.add_scalar('learning_rate', cur_lr, accumulated_iter) + for key, val in tb_dict.items(): + tb_log.add_scalar('train_' + key, val, accumulated_iter) + + endtime = datetime.datetime.now() + + speed = (endtime - start_time).seconds / (total_it_each_epoch - 50) + if rank == 0: + pbar.close() + - return accumulated_iter + + return accumulated_iter, speed + + + def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg, + @@ -65,6 +71,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ + lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50, + merge_all_iters_to_one_epoch=False): + accumulated_iter = start_iter + + speeds = [] + with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar: + total_it_each_epoch = len(train_loader) + if merge_all_iters_to_one_epoch: + @@ -82,7 +89,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ + cur_scheduler = lr_warmup_scheduler + else: + cur_scheduler = lr_scheduler + - accumulated_iter = train_one_epoch( + + accumulated_iter, speed = train_one_epoch( + model, optimizer, train_loader, model_func, + lr_scheduler=cur_scheduler, + accumulated_iter=accumulated_iter, optim_cfg=optim_cfg, + @@ -91,7 +98,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ + total_it_each_epoch=total_it_each_epoch, + dataloader_iter=dataloader_iter + ) + - + + speeds.append(speed) + # save trained model + trained_epoch = cur_epoch + 1 + if trained_epoch % ckpt_save_interval == 0 and rank == 0: + @@ -107,6 +114,8 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ + save_checkpoint( + checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name, + ) + + print(speed) + + print(f'*******{sum(speeds) / len(speeds)}******') + + + def model_state_to_cpu(model_state): + ``` + +
    + +### VoteNet + +- __MMDetection3D__: With release v0.1.0, run + + ```bash + ./tools/dist_train.sh configs/votenet/votenet_8xb16_sunrgbd-3d.py 8 --no-validate + ``` + +- __votenet__: At commit [2f6d6d3](https://github.com/facebookresearch/votenet/tree/2f6d6d36ff98d96901182e935afe48ccee82d566), run + + ```bash + python train.py --dataset sunrgbd --batch_size 16 + ``` + + Then benchmark the test speed by running + + ```bash + python eval.py --dataset sunrgbd --checkpoint_path log_sunrgbd/checkpoint.tar --batch_size 1 --dump_dir eval_sunrgbd --cluster_sampling seed_fps --use_3d_nms --use_cls_nms --per_class_proposal + ``` + + Note that eval.py is modified to compute inference time. + +
    + + (diff to benchmark the similar models - click to expand) + + + ```diff + diff --git a/eval.py b/eval.py + index c0b2886..04921e9 100644 + --- a/eval.py + +++ b/eval.py + @@ -10,6 +10,7 @@ import os + import sys + import numpy as np + from datetime import datetime + +import time + import argparse + import importlib + import torch + @@ -28,7 +29,7 @@ parser.add_argument('--checkpoint_path', default=None, help='Model checkpoint pa + parser.add_argument('--dump_dir', default=None, help='Dump dir to save sample outputs [default: None]') + parser.add_argument('--num_point', type=int, default=20000, help='Point Number [default: 20000]') + parser.add_argument('--num_target', type=int, default=256, help='Point Number [default: 256]') + -parser.add_argument('--batch_size', type=int, default=8, help='Batch Size during training [default: 8]') + +parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 8]') + parser.add_argument('--vote_factor', type=int, default=1, help='Number of votes generated from each seed [default: 1]') + parser.add_argument('--cluster_sampling', default='vote_fps', help='Sampling strategy for vote clusters: vote_fps, seed_fps, random [default: vote_fps]') + parser.add_argument('--ap_iou_thresholds', default='0.25,0.5', help='A list of AP IoU thresholds [default: 0.25,0.5]') + @@ -132,6 +133,7 @@ CONFIG_DICT = {'remove_empty_box': (not FLAGS.faster_eval), 'use_3d_nms': FLAGS. + # ------------------------------------------------------------------------- GLOBAL CONFIG END + + def evaluate_one_epoch(): + + time_list = list() + stat_dict = {} + ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) \ + for iou_thresh in AP_IOU_THRESHOLDS] + @@ -144,6 +146,8 @@ def evaluate_one_epoch(): + + # Forward pass + inputs = {'point_clouds': batch_data_label['point_clouds']} + + torch.cuda.synchronize() + + start_time = time.perf_counter() + with torch.no_grad(): + end_points = net(inputs) + + @@ -161,6 +165,12 @@ def evaluate_one_epoch(): + + batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT) + batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT) + + torch.cuda.synchronize() + + elapsed = time.perf_counter() - start_time + + time_list.append(elapsed) + + + + if len(time_list==200): + + print("average inference time: %4f"%(sum(time_list[5:])/len(time_list[5:]))) + for ap_calculator in ap_calculator_list: + ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls) + + ``` + +### PointPillars-car + +- __MMDetection3D__: With release v0.1.0, run + + ```bash + ./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py 8 --no-validate + ``` + +- __Det3D__: At commit [519251e](https://github.com/poodarchu/Det3D/tree/519251e72a5c1fdd58972eabeac67808676b9bb7), use `kitti_point_pillars_mghead_syncbn.py` and run + + ```bash + ./tools/scripts/train.sh --launcher=slurm --gpus=8 + ``` + + Note that the config in train.sh is modified to train point pillars. + +
    + + (diff to benchmark the similar models - click to expand) + + + ```diff + diff --git a/tools/scripts/train.sh b/tools/scripts/train.sh + index 3a93f95..461e0ea 100755 + --- a/tools/scripts/train.sh + +++ b/tools/scripts/train.sh + @@ -16,9 +16,9 @@ then + fi + + # Voxelnet + -python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py examples/second/configs/ kitti_car_vfev3_spmiddlefhd_rpn1_mghead_syncbn.py --work_dir=$SECOND_WORK_DIR + +# python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py examples/second/configs/ kitti_car_vfev3_spmiddlefhd_rpn1_mghead_syncbn.py --work_dir=$SECOND_WORK_DIR + # python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py examples/cbgs/configs/ nusc_all_vfev3_spmiddleresnetfhd_rpn2_mghead_syncbn.py --work_dir=$NUSC_CBGS_WORK_DIR + # python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py examples/second/configs/ lyft_all_vfev3_spmiddleresnetfhd_rpn2_mghead_syncbn.py --work_dir=$LYFT_CBGS_WORK_DIR + + # PointPillars + -# python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py ./examples/point_pillars/configs/ original_pp_mghead_syncbn_kitti.py --work_dir=$PP_WORK_DIR + +python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py ./examples/point_pillars/configs/ kitti_point_pillars_mghead_syncbn.py + ``` + +
    + +### PointPillars-3class + +- __MMDetection3D__: With release v0.1.0, run + + ```bash + ./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_4x8_80e_pcdet_kitti-3d-3class.py 8 --no-validate + ``` + +- __OpenPCDet__: At commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2), run + + ```bash + cd tools + sh scripts/slurm_train.sh ${PARTITION} ${JOB_NAME} 8 --cfg_file ./cfgs/kitti_models/pointpillar.yaml --batch_size 32 --workers 32 --epochs 80 + ``` + +### SECOND + +For SECOND, we mean the [SECONDv1.5](https://github.com/traveller59/second.pytorch/blob/master/second/configs/all.fhd.config) that was first implemented in [second.Pytorch](https://github.com/traveller59/second.pytorch). Det3D's implementation of SECOND uses its self-implemented Multi-Group Head, so its speed is not compatible with other codebases. + +- __MMDetection3D__: With release v0.1.0, run + + ```bash + ./tools/dist_train.sh configs/benchmark/hv_second_secfpn_4x8_80e_pcdet_kitti-3d-3class.py 8 --no-validate + ``` + +- __OpenPCDet__: At commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2), run + + ```bash + cd tools + sh ./scripts/slurm_train.sh ${PARTITION} ${JOB_NAME} 8 --cfg_file ./cfgs/kitti_models/second.yaml --batch_size 32 --workers 32 --epochs 80 + ``` + +### Part-A2 + +- __MMDetection3D__: With release v0.1.0, run + + ```bash + ./tools/dist_train.sh configs/benchmark/hv_PartA2_secfpn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py 8 --no-validate + ``` + +- __OpenPCDet__: At commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2), train the model by running + + ```bash + cd tools + sh ./scripts/slurm_train.sh ${PARTITION} ${JOB_NAME} 8 --cfg_file ./cfgs/kitti_models/PartA2.yaml --batch_size 32 --workers 32 --epochs 80 + ``` diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md new file mode 100755 index 0000000..0a07b7f --- /dev/null +++ b/docs/en/notes/changelog.md @@ -0,0 +1,268 @@ +# Changelog of v1.1 + +### v1.1.0 (6/4/2023) + +#### Highlights + +- Support [Cylinder3D](https://arxiv.org/pdf/2011.10033.pdf) (#2291, #2344, #2350) +- Support [MinkUnet](https://arxiv.org/abs/1904.08755) (#2294, #2358) +- Support [SPVCNN](https://arxiv.org/abs/2007.16100) (#2320,#2372) +- Support [TR3D](https://arxiv.org/abs/2302.02858) detector in `projects` (#2274) +- Support the inference of [BEVFusion](https://arxiv.org/abs/2205.13542) in `projects` (#2175) +- Support [DETR3D](https://arxiv.org/abs/2110.06922) in `projects` (#2173) + +#### New Features + +- Support [Cylinder3D](https://arxiv.org/pdf/2011.10033.pdf) (#2291, #2344, #2350) +- Support [MinkUnet](https://arxiv.org/abs/1904.08755) (#2294, #2358) +- Support [SPVCNN](https://arxiv.org/abs/2007.16100) (#2320,#2372) +- Support [TR3D](https://arxiv.org/abs/2302.02858) detector in `projects` (#2274) +- Support the inference of [BEVFusion](https://arxiv.org/abs/2205.13542) in `projects` (#2175) +- Support [DETR3D](https://arxiv.org/abs/2110.06922) in `projects` (#2173) +- Support PolarMix and LaserMix augmentation (#2265, #2302) +- Support loading annotation of panoptic segmentation (#2223) +- Support panoptic segmentation metric (#2230) +- Add inferencer for LiDAR-based, monocular and multi-modality 3D detection (#2208, #2190, #2342) +- Add inferencer for LiDAR-based segmentation (#2304) + +#### Improvements + +- Support `lazy_init` for CBGSDataset (#2271) +- Support generating annotation files for test set on Waymo (#2180) +- Enhance the support for SemanticKitti (#2253, #2323) +- File I/O migration and reconstruction (#2319) +- Support `format_only` option for Lyft, NuScenes and Waymo datasets (#2333, #2151) +- Replace `np.transpose` with `torch.permute` to speed up (#2277) +- Allow setting local-rank for pytorch 2.0 (#2387) + +#### Bug Fixes + +- Fix the problem of reversal of length and width when drawing heatmap in CenterFormer (#2362) +- Deprecate old type alias due to the new version of numpy (#2339) +- Lose `trimesh` version requirements to fix numpy random state (#2340) +- Fix the device mismatch error in CenterPoint (#2308) +- Fix bug of visualization when there are no bboxes (#2231) +- Fix bug of counting ignore index in IOU in segmentation evaluation (#2229) + +#### Contributors + +A total of 14 developers contributed to this release. + +@ZLTJohn, @SekiroRong, @shufanwu, @vansin, @triple-Mu, @404Vector, @filaPro, @sunjiahao1999, @Ginray, @Xiangxu-0103, @JingweiZhang12, @DezeZhao, @ZCMax, @roger-lcc + +### v1.1.0rc3 (7/1/2023) + +#### Highlights + +- Support [CenterFormer](https://arxiv.org/abs/2209.05588) in `projects` (#2175) +- Support [PETR](https://arxiv.org/abs/2203.05625) in `projects` (#2173) + +#### New Features + +- Support [CenterFormer](https://arxiv.org/abs/2209.05588) in `projects` (#2175) +- Support [PETR](https://arxiv.org/abs/2203.05625) in `projects` (#2173) +- Refactor ImVoxelNet on SUN RGB-D into mmdet3d v1.1 (#2141) + +#### Improvements + +- Remove legacy builder.py (#2061) +- Update `customize_dataset` documentation (#2153) +- Update tutorial of LiDAR-based detection (#2120) + +#### Bug Fixes + +- Fix the configs of FCOS3D and PGD (#2191) +- Fix numpy's `ValueError` in update_infos_to_v2.py (#2162) +- Fix parameter missing in Det3DVisualizationHook (#2118) +- Fix memory overflow in the rotated box IoU calculation (#2134) +- Fix lidar2cam error in update_infos_to_v2.py for nus and lyft dataset (#2110) +- Fix error of data type in Waymo metrics (#2109) +- Update `bbox_3d` information in `cam_instances` for mono3d detection task (#2046) +- Fix label saving of Waymo dataset (#2096) + +#### Contributors + +A total of 10 developers contributed to this release. + +@SekiroRong, @ZLTJohn, @vansin, @shanmo, @VVsssssk, @ZCMax, @Xiangxu-0103, @JingweiZhang12, @Tai-Wang, @lianqing11 + +### v1.1.0rc2 (2/12/2022) + +#### Highlights + +- Support [PV-RCNN](https://arxiv.org/abs/1912.13192) +- Speed up evaluation on Waymo dataset + +#### New Features + +- Support [PV-RCNN](https://arxiv.org/abs/1912.13192) (#1597, #2045) +- Speed up evaluation on Waymo dataset (#2008) +- Refactor FCAF3D into the framework of mmdet3d v1.1 (#1945) +- Refactor S3DIS dataset into the framework of mmdet3d v1.1 (#1984) +- Add `Projects/` folder and the first example project (#2042) + +#### Improvements + +- Rename `CLASSES` and `PALETTE` to `classes` and `palette` respectively (#1932) +- Update `metainfo` in pkl files and add `categories` into metainfo (#1934) +- Show instance statistics before and after through the pipeline (#1863) +- Add configs of DGCNN for different testing areas (#1967) +- Remove testing utils from `tests/utils/` to `mmdet3d/testing/` (#2012) +- Add typehint for code in `models/layers/` (#2014) +- Refine documentation (#1891, #1994) +- Refine voxelization for better speed (#2062) + +#### Bug Fixes + +- Fix loop visualization error about point cloud (#1914) +- Fix image conversion of Waymo to avoid information loss (#1979) +- Fix evaluation on KITTI testset (#2005) +- Fix sampling bug in `IoUNegPiecewiseSampler` (#2017) +- Fix point cloud range in CenterPoint (#1998) +- Fix some loading bugs and support FOV-image-based mode on Waymo dataset (#1942) +- Fix dataset conversion utils (#1923, #2040, #1971) +- Update metafiles in all the configs (#2006) + +#### Contributors + +A total of 12 developers contributed to this release. + +@vavanade, @oyel, @thinkthinking, @PeterH0323, @274869388, @cxiang26, @lianqing11, @VVsssssk, @ZCMax, @Xiangxu-0103, @JingweiZhang12, @Tai-Wang + +### v1.1.0rc1 (11/10/2022) + +#### Highlights + +- Support a camera-only 3D detection baseline on Waymo, [MV-FCOS3D++](https://arxiv.org/abs/2207.12716) + +#### New Features + +- Support a camera-only 3D detection baseline on Waymo, [MV-FCOS3D++](https://arxiv.org/abs/2207.12716), with new evaluation metrics and transformations (#1716) +- Refactor PointRCNN in the framework of mmdet3d v1.1 (#1819) + +#### Improvements + +- Add `auto_scale_lr` in config to support training with auto-scale learning rates (#1807) +- Fix CI (#1813, #1865, #1877) +- Update `browse_dataset.py` script (#1817) +- Update SUN RGB-D and Lyft datasets documentation (#1833) +- Rename `convert_to_datasample` to `add_pred_to_datasample` in detectors (#1843) +- Update customized dataset documentation (#1845) +- Update `Det3DLocalVisualization` and visualization documentation (#1857) +- Add the code of generating `cam_sync_labels` for Waymo dataset (#1870) +- Update dataset transforms typehints (#1875) + +#### Bug Fixes + +- Fix missing registration of models in [setup_env.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/utils/setup_env.py) (#1808) +- Fix the data base sampler bugs when using the ground plane data (#1812) +- Add output directory existing check during visualization (#1828) +- Fix bugs of nuScenes dataset for monocular 3D detection (#1837) +- Fix visualization hook to support the visualization of different data modalities (#1839) +- Fix monocular 3D detection demo (#1864) +- Fix the lack of `num_pts_feats` key in nuscenes dataset and complete docstring (#1882) + +#### Contributors + +A total of 10 developers contributed to this release. + +@ZwwWayne, @Tai-Wang, @lianqing11, @VVsssssk, @ZCMax, @Xiangxu-0103, @JingweiZhang12, @tpoisonooo, @ice-tong, @jshilong + +### v1.1.0rc0 (1/9/2022) + +We are excited to announce the release of MMDetection3D 1.1.0rc0. +MMDet3D 1.1.0rc0 is the first version of MMDetection3D 1.1, a part of the OpenMMLab 2.0 projects. +Built upon the new [training engine](https://github.com/open-mmlab/mmengine) and [MMDet 3.x](https://github.com/open-mmlab/mmdetection/tree/3.x), +MMDet3D 1.1 unifies the interfaces of dataset, models, evaluation, and visualization with faster training and testing speed. +It also provides a standard data protocol for different datasets, modalities, and tasks for 3D perception. +We will support more strong baselines in the future release, with our latest exploration on camera-only 3D detection from videos. + +### Highlights + +1. **New engines**. MMDet3D 1.1 is based on [MMEngine](https://github.com/open-mmlab/mmengine) and [MMDet 3.x](https://github.com/open-mmlab/mmdetection/tree/3.x), which provides a universal and powerful runner that allows more flexible customizations and significantly simplifies the entry points of high-level interfaces. + +2. **Unified interfaces**. As a part of the OpenMMLab 2.0 projects, MMDet3D 1.1 unifies and refactors the interfaces and internal logics of train, testing, datasets, models, evaluation, and visualization. All the OpenMMLab 2.0 projects share the same design in those interfaces and logics to allow the emergence of multi-task/modality algorithms. + +3. **Standard data protocol for all the datasets, modalities, and tasks for 3D perception**. Based on the unified base datasets inherited from MMEngine, we also design a standard data protocol that defines and unifies the common keys across different datasets, tasks, and modalities. It significantly simplifies the usage of multiple datasets and data modalities for multi-task frameworks and eases dataset customization. Please refer to the [documentation of customized datasets](../advanced_guides/customize_dataset.md) for details. + +4. **Strong baselines**. We will release strong baselines of many popular models to enable fair comparisons among state-of-the-art models. + +5. **More documentation and tutorials**. We add a bunch of documentation and tutorials to help users get started more smoothly. Read it [here](https://mmdetection3d.readthedocs.io/en/1.1/). + +### Breaking Changes + +MMDet3D 1.1 has undergone significant changes to have better design, higher efficiency, more flexibility, and more unified interfaces. +Besides the changes of API, we briefly list the major breaking changes in this section. +We will update the [migration guide](../migration.md) to provide complete details and migration instructions. +Users can also refer to the [compatibility documentation](./compatibility.md) and [API doc](https://mmdetection3d.readthedocs.io/en/1.1/) for more details. + +#### Dependencies + +- MMDet3D 1.1 runs on PyTorch>=1.6. We have deprecated the support of PyTorch 1.5 to embrace the mixed precision training and other new features since PyTorch 1.6. Some models can still run on PyTorch 1.5, but the full functionality of MMDet3D 1.1 is not guaranteed. +- MMDet3D 1.1 relies on MMEngine to run. MMEngine is a new foundational library for training deep learning models of OpenMMLab and are widely depended by OpenMMLab 2.0 projects. The dependencies of file IO and training are migrated from MMCV 1.x to MMEngine. +- MMDet3D 1.1 relies on MMCV>=2.0.0rc0. Although MMCV no longer maintains the training functionalities since 2.0.0rc0, MMDet3D 1.1 relies on the data transforms, CUDA operators, and image processing interfaces in MMCV. Note that the package `mmcv` is the version that provides pre-built CUDA operators and `mmcv-lite` does not since MMCV 2.0.0rc0, while `mmcv-full` has been deprecated since 2.0.0rc0. +- MMDet3D 1.1 is based on MMDet 3.x, which is also a part of OpenMMLab 2.0 projects. + +#### Training and testing + +- MMDet3D 1.1 uses Runner in [MMEngine](https://github.com/open-mmlab/mmengine) rather than that in MMCV. The new Runner implements and unifies the building logic of dataset, model, evaluation, and visualizer. Therefore, MMDet3D 1.1 no longer relies on the building logics of those modules in `mmdet3d.train.apis` and `tools/train.py`. Those code have been migrated into [MMEngine](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/runner.py). Please refer to the [migration guide of Runner in MMEngine](https://mmengine.readthedocs.io/en/latest/migration/runner.html) for more details. +- The Runner in MMEngine also supports testing and validation. The testing scripts are also simplified, which has similar logic as that in training scripts to build the runner. +- The execution points of hooks in the new Runner have been enriched to allow more flexible customization. Please refer to the [migration guide of Hook in MMEngine](https://mmengine.readthedocs.io/en/latest/migration/hook.html) for more details. +- Learning rate and momentum scheduling has been migrated from Hook to [Parameter Scheduler in MMEngine](https://mmengine.readthedocs.io/en/latest/tutorials/param_scheduler.html). Please refer to the [migration guide of Parameter Scheduler in MMEngine](https://mmengine.readthedocs.io/en/latest/migration/param_scheduler.html) for more details. + +#### Configs + +- The [Runner in MMEngine](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/runner.py) uses a different config structure to ease the understanding of the components in runner. Users can read the [config example of MMDet3D 1.1](../user_guides/config.md) or refer to the [migration guide in MMEngine](https://mmengine.readthedocs.io/en/latest/migration/runner.html) for migration details. +- The file names of configs and models are also refactored to follow the new rules unified across OpenMMLab 2.0 projects. The names of checkpoints are not updated for now as there is no BC-breaking of model weights between MMDet3D 1.1 and 1.0.x. We will progressively replace all the model weights by those trained in MMDet3D 1.1. Please refer to the [user guides of config](../user_guides/config.md) for more details. + +#### Dataset + +The Dataset classes implemented in MMDet3D 1.1 all inherits from the `Det3DDataset` and `Seg3DDataset`, which inherits from the [BaseDataset in MMEngine](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/basedataset.html). In addition to the changes of interfaces, there are several changes of Dataset in MMDet3D 1.1. + +- All the datasets support to serialize the internal data list to reduce the memory when multiple workers are built for data loading. +- The internal data structure in the dataset is changed to be self-contained (without losing information like class names in MMDet3D 1.0.x) while keeping simplicity. +- Common keys across different datasets and data modalities are defined and all the info files are unified into a standard protocol. +- The evaluation functionality of each dataset has been removed from dataset so that some specific evaluation metrics like KITTI AP can be used to evaluate the prediction on other datasets. + +#### Data Transforms + +The data transforms in MMDet3D 1.1 all inherits from `BaseTransform` in MMCV>=2.0.0rc0, which defines a new convention in OpenMMLab 2.0 projects. +Besides the interface changes, there are several changes listed as below: + +- The functionality of some data transforms (e.g., `Resize`) are decomposed into several transforms to simplify and clarify the usages. +- The format of data dict processed by each data transform is changed according to the new data structure of dataset. +- Some inefficient data transforms (e.g., normalization and padding) are moved into data preprocessor of model to improve data loading and training speed. +- The same data transforms in different OpenMMLab 2.0 libraries have the same augmentation implementation and the logic given the same arguments, i.e., `Resize` in MMDet 3.x and MMSeg 1.x will resize the image in the exact same manner given the same arguments. + +#### Model + +The models in MMDet3D 1.1 all inherits from `BaseModel` in MMEngine, which defines a new convention of models in OpenMMLeb 2.0 projects. +Users can refer to [the tutorial of model in MMengine](https://mmengine.readthedocs.io/en/latest/tutorials/model.html) for more details. +Accordingly, there are several changes as the following: + +- The model interfaces, including the input and output formats, are significantly simplified and unified following the new convention in MMDet3D 1.1. + Specifically, all the input data in training and testing are packed into `inputs` and `data_samples`, where `inputs` contains model inputs like a dict contain a list of image tensors and the point cloud data, and `data_samples` contains other information of the current data sample such as ground truths, region proposals, and model predictions. In this way, different tasks in MMDet3D 1.1 can share the same input arguments, which makes the models more general and suitable for multi-task learning and some flexible training paradigms like semi-supervised learning. +- The model has a data preprocessor module, which are used to pre-process the input data of model. In MMDet3D 1.1, the data preprocessor usually does necessary steps to form the input images into a batch, such as padding. It can also serve as a place for some special data augmentations or more efficient data transformations like normalization. +- The internal logic of model have been changed. In MMDet3D 1.1, model uses `forward_train`, `forward_test`, `simple_test`, and `aug_test` to deal with different model forward logics. In MMDet3D 1.1 and OpenMMLab 2.0, the forward function has three modes: 'loss', 'predict', and 'tensor' for training, inference, and tracing or other purposes, respectively. + The forward function calls `self.loss`, `self.predict`, and `self._forward` given the modes 'loss', 'predict', and 'tensor', respectively. + +#### Evaluation + +The evaluation in MMDet3D 1.0.x strictly binds with the dataset. In contrast, MMDet3D 1.1 decomposes the evaluation from dataset, so that all the detection dataset can evaluate with KITTI AP and other metrics implemented in MMDet3D 1.1. +MMDet3D 1.1 mainly implements corresponding metrics for each dataset, which are manipulated by [Evaluator](https://mmengine.readthedocs.io/en/latest/design/evaluator.html) to complete the evaluation. +Users can build evaluator in MMDet3D 1.1 to conduct offline evaluation, i.e., evaluate predictions that may not produced in MMDet3D 1.1 with the dataset as long as the dataset and the prediction follows the dataset conventions. More details can be find in the [tutorial in mmengine](https://mmengine.readthedocs.io/en/latest/tutorials/evaluation.html). + +#### Visualization + +The functions of visualization in MMDet3D 1.1 are removed. Instead, in OpenMMLab 2.0 projects, we use [Visualizer](https://mmengine.readthedocs.io/en/latest/design/visualization.html) to visualize data. MMDet3D 1.1 implements `Det3DLocalVisualizer` to allow visualization of 2D and 3D data, ground truths, model predictions, and feature maps, etc., at any place. It also supports to send the visualization data to any external visualization backends such as Tensorboard. + +### Planned changes + +We list several planned changes of MMDet3D 1.1.0rc0 so that the community could more comprehensively know the progress of MMDet3D 1.1. Feel free to create a PR, issue, or discussion if you are interested, have any suggestions and feedbacks, or want to participate. + +1. Test-time augmentation: which is supported in MMDet3D 1.0.x, is not implemented in this version due to limited time slot. We will support it in the following releases with a new and simplified design. +2. Inference interfaces: a unified inference interfaces will be supported in the future to ease the use of released models. +3. Interfaces of useful tools that can be used in notebook: more useful tools that implemented in the `tools` directory will have their python interfaces so that they can be used through notebook and in downstream libraries. +4. Documentation: we will add more design docs, tutorials, and migration guidance so that the community can deep dive into our new design, participate the future development, and smoothly migrate downstream libraries to MMDet3D 1.1. +5. Wandb visualization: MMDet 2.x supports data visualization since v2.25.0, which has not been migrated to MMDet 3.x for now. Since Wandb provides strong visualization and experiment management capabilities, a `DetWandbVisualizer` and maybe a hook are planned to fully migrated those functionalities in MMDet 2.x and a `Det3DWandbVisualizer` will be supported in MMDet3D 1.1 accordingly. +6. Will support recent new features added in MMDet3D 1.0.x and our recent exploration on camera-only 3D detection from videos: we will refactor these models and support them with benchmarks and models soon. diff --git a/docs/en/notes/changelog_v1.0.x.md b/docs/en/notes/changelog_v1.0.x.md new file mode 100755 index 0000000..76d2ba0 --- /dev/null +++ b/docs/en/notes/changelog_v1.0.x.md @@ -0,0 +1,930 @@ +# Changelog of v1.0.x + +### v1.0.0 (6/4/2023) + +#### Improvements + +- Add BN in FPN to avoid loss Nan in MVXNet (#2282) +- Update `s3dis_data_utils.py` (#2232) + +#### Bug Fixes + +- Fix precision error when using mixed precision on CenterPoint (#2341) +- Replace `np.transpose` with `torch.permute` to speed up (@2273) +- Update links of SECOND checkpoints (#2185) + +#### Contributors + +A total of 7 developers contributed to this release. +@JingweiZhang12, @ZCMax, @Xiangxu-0103, @vansinhu, @cs1488, @sunjiahao1999, @Ginray + +### v1.0.0rc7 (7/1/2023) + +#### Improvements + +- Support training and testing on MLU (#2167) + +#### Contributors + +A total of 1 developers contributed to this release. +@mengpenghui + +### v1.0.0rc6 (2/12/2022) + +#### New Features + +- Add `Projects/` folder and the first example project (#2082) + +#### Improvements + +- Update Waymo converter to save storage space (#1759) +- Update model link and performance of CenterPoint (#1916) + +#### Bug Fixes + +- Fix GPU memory occupancy problem in PointRCNN (#1928) +- Fix sampling bug in `IoUNegPiecewiseSampler` (#2018) + +#### Contributors + +A total of 6 developers contributed to this release. + +@oyel, @zzj403, @VVsssssk, @Tai-Wang, @tpoisonooo, @JingweiZhang12, @ZCMax + +### v1.0.0rc5 (11/10/2022) + +#### New Features + +- Support ImVoxelNet on SUN RGB-D (#1738) + +#### Improvements + +- Fix the cross-codebase reference problem in metafile README (#1644) +- Update the Chinese documentation about getting started (#1715) +- Fix docs link and add docs link checker (#1811) + +#### Bug Fixes + +- Fix a visualization bug that is potentially triggered by empty prediction labels (#1725) +- Fix point cloud segmentation visualization bug due to wrong parameter passing (#1858) +- Fix Nan loss bug during PointRCNN training (#1874) + +#### Contributors + +A total of 9 developers contributed to this release. + +@ZwwWayne, @Tai-Wang, @filaPro, @VVsssssk, @ZCMax, @Xiangxu-0103, @holtvogt, @tpoisonooo, @lianqing01 + +### v1.0.0rc4 (8/8/2022) + +#### Highlights + +- Support [FCAF3D](https://arxiv.org/pdf/2112.00322.pdf) + +#### New Features + +- Support [FCAF3D](https://arxiv.org/pdf/2112.00322.pdf) (#1547) +- Add the transformation to support multi-camera 3D object detection (#1580) +- Support lift-splat-shoot view transformer (#1598) + +#### Improvements + +- Remove the limitation of the maximum number of points during SUN RGB-D preprocessing (#1555) +- Support circle CI (#1647) +- Add mim to extras_require in setup.py (#1560, #1574) +- Update dockerfile package version (#1697) + +#### Bug Fixes + +- Flip yaw angle for DepthInstance3DBoxes.overlaps (#1548, #1556) +- Fix DGCNN configs (#1587) +- Fix bbox head not registered bug (#1625) +- Fix missing objects in S3DIS preprocessing (#1665) +- Fix spconv2.0 model loading bug (#1699) + +#### Contributors + +A total of 9 developers contributed to this release. + +@Tai-Wang, @ZwwWayne, @filaPro, @lianqing11, @ZCMax, @HuangJunJie2017, @Xiangxu-0103, @ChonghaoSima, @VVsssssk + +### v1.0.0rc3 (8/6/2022) + +#### Highlights + +- Support [SA-SSD](https://openaccess.thecvf.com/content_CVPR_2020/papers/He_Structure_Aware_Single-Stage_3D_Object_Detection_From_Point_Cloud_CVPR_2020_paper.pdf) + +#### New Features + +- Support [SA-SSD](https://openaccess.thecvf.com/content_CVPR_2020/papers/He_Structure_Aware_Single-Stage_3D_Object_Detection_From_Point_Cloud_CVPR_2020_paper.pdf) (#1337) + +#### Improvements + +- Add Chinese documentation for vision-only 3D detection (#1438) +- Update CenterPoint pretrained models that are compatible with refactored coordinate systems (#1450) +- Configure myst-parser to parse anchor tag in the documentation (#1488) +- Replace markdownlint with mdformat for avoiding installing ruby (#1489) +- Add missing `gt_names` when getting annotation info in Custom3DDataset (#1519) +- Support S3DIS full ceph training (#1542) +- Rewrite the installation and FAQ documentation (#1545) + +#### Bug Fixes + +- Fix the incorrect registry name when building RoI extractors (#1460) +- Fix the potential problems caused by the registry scope update when composing pipelines (#1466) and using CocoDataset (#1536) +- Fix the missing selection with `order` in the [box3d_nms](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/post_processing/box3d_nms.py) introduced by [#1403](https://github.com/open-mmlab/mmdetection3d/pull/1403) (#1479) +- Update the [PointPillars config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py) to make it consistent with the log (#1486) +- Fix heading anchor in documentation (#1490) +- Fix the compatibility of mmcv in the dockerfile (#1508) +- Make overwrite_spconv packaged when building whl (#1516) +- Fix the requirement of mmcv and mmdet (#1537) +- Update configs of PartA2 and support its compatibility with spconv 2.0 (#1538) + +#### Contributors + +A total of 13 developers contributed to this release. + +@Xiangxu-0103, @ZCMax, @jshilong, @filaPro, @atinfinity, @Tai-Wang, @wenbo-yu, @yi-chen-isuzu, @ZwwWayne, @wchen61, @VVsssssk, @AlexPasqua, @lianqing11 + +### v1.0.0rc2 (1/5/2022) + +#### Highlights + +- Support spconv 2.0 +- Support MinkowskiEngine with MinkResNet +- Support training models on custom datasets with only point clouds +- Update Registry to distinguish the scope of built functions +- Replace mmcv.iou3d with a set of bird-eye-view (BEV) operators to unify the operations of rotated boxes + +#### New Features + +- Add loader arguments in the configuration files (#1388) +- Support [spconv 2.0](https://github.com/traveller59/spconv) when the package is installed. Users can still use spconv 1.x in MMCV with CUDA 9.0 (only cost more memory) without losing the compatibility of model weights between two versions (#1421) +- Support MinkowskiEngine with MinkResNet (#1422) + +#### Improvements + +- Add the documentation for model deployment (#1373, #1436) +- Add Chinese documentation of + - Speed benchmark (#1379) + - LiDAR-based 3D detection (#1368) + - LiDAR 3D segmentation (#1420) + - Coordinate system refactoring (#1384) +- Support training models on custom datasets with only point clouds (#1393) +- Replace mmcv.iou3d with a set of bird-eye-view (BEV) operators to unify the operations of rotated boxes (#1403, #1418) +- Update Registry to distinguish the scope of building functions (#1412, #1443) +- Replace recommonmark with myst_parser for documentation rendering (#1414) + +#### Bug Fixes + +- Fix the show pipeline in the [browse_dataset.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/misc/browse_dataset.py) (#1376) +- Fix missing __init__ files after coordinate system refactoring (#1383) +- Fix the incorrect yaw in the visualization caused by coordinate system refactoring (#1407) +- Fix `NaiveSyncBatchNorm1d` and `NaiveSyncBatchNorm2d` to support non-distributed cases and more general inputs (#1435) + +#### Contributors + +A total of 11 developers contributed to this release. + +@ZCMax, @ZwwWayne, @Tai-Wang, @VVsssssk, @HanaRo, @JoeyforJoy, @ansonlcy, @filaPro, @jshilong, @Xiangxu-0103, @deleomike + +### v1.0.0rc1 (1/4/2022) + +#### Compatibility + +- We migrate all the mmdet3d ops to mmcv and do not need to compile them when installing mmdet3d. +- To fix the imprecise timestamp and optimize its saving method, we reformat the point cloud data during Waymo data conversion. The data conversion time is also optimized significantly by supporting parallel processing. Please re-generate KITTI format Waymo data if necessary. See more details in the [compatibility documentation](https://github.com/open-mmlab/mmdetection3d/blob/master/docs/en/compatibility.md). +- We update some of the model checkpoints after the refactor of coordinate systems. Please stay tuned for the release of the remaining model checkpoints. + +| | Fully Updated | Partially Updated | In Progress | No Influcence | +| ------------- | :-----------: | :---------------: | :---------: | :-----------: | +| SECOND | | ✓ | | | +| PointPillars | | ✓ | | | +| FreeAnchor | ✓ | | | | +| VoteNet | ✓ | | | | +| H3DNet | ✓ | | | | +| 3DSSD | | ✓ | | | +| Part-A2 | ✓ | | | | +| MVXNet | ✓ | | | | +| CenterPoint | | | ✓ | | +| SSN | ✓ | | | | +| ImVoteNet | ✓ | | | | +| FCOS3D | | | | ✓ | +| PointNet++ | | | | ✓ | +| Group-Free-3D | | | | ✓ | +| ImVoxelNet | ✓ | | | | +| PAConv | | | | ✓ | +| DGCNN | | | | ✓ | +| SMOKE | | | | ✓ | +| PGD | | | | ✓ | +| MonoFlex | | | | ✓ | + +#### Highlights + +- Migrate all the mmdet3d ops to mmcv +- Support parallel waymo data converter +- Add ScanNet instance segmentation dataset with metrics +- Better compatibility for windows with CI support, op migration and bug fixes +- Support loading annotations from Ceph + +#### New Features + +- Add ScanNet instance segmentation dataset with metrics (#1230) +- Support different random seeds for different ranks (#1321) +- Support loading annotations from Ceph (#1325) +- Support resuming from the latest checkpoint automatically (#1329) +- Add windows CI (#1345) + +#### Improvements + +- Update the table format and OpenMMLab project orders in [README.md](https://github.com/open-mmlab/mmdetection3d/blob/master/README.md) (#1272, #1283) +- Migrate all the mmdet3d ops to mmcv (#1240, #1286, #1290, #1333) +- Add `with_plane` flag in the KITTI data conversion (#1278) +- Update instructions and links in the documentation (#1300, 1309, #1319) +- Support parallel Waymo dataset converter and ground truth database generator (#1327) +- Add quick installation commands to [getting_started.md](https://github.com/open-mmlab/mmdetection3d/blob/master/docs/en/getting_started.md) (#1366) + +#### Bug Fixes + +- Update nuimages configs to use new nms config style (#1258) +- Fix the usage of np.long for windows compatibility (#1270) +- Fix the incorrect indexing in `BasePoints` (#1274) +- Fix the incorrect indexing in the [pillar_scatter.forward_single](https://github.com/open-mmlab/mmdetection3d/blob/dev/mmdet3d/models/middle_encoders/pillar_scatter.py#L38) (#1280) +- Fix unit tests that use GPUs (#1301) +- Fix incorrect feature dimensions in `DynamicPillarFeatureNet` caused by previous upgrading of `PillarFeatureNet` (#1302) +- Remove the `CameraPoints` constraint in `PointSample` (#1314) +- Fix imprecise timestamps saving of Waymo dataset (#1327) + +#### Contributors + +A total of 9 developers contributed to this release. + +@ZCMax, @ZwwWayne, @wHao-Wu, @Tai-Wang, @wangruohui, @zjwzcx, @Xiangxu-0103, @EdAyers, @hongye-dev, @zhanggefan + +### v1.0.0rc0 (18/2/2022) + +#### Compatibility + +- We refactor our three coordinate systems to make their rotation directions and origins more consistent, and further remove unnecessary hacks in different datasets and models. Therefore, please re-generate data infos or convert the old version to the new one with our provided scripts. We will also provide updated checkpoints in the next version. Please refer to the [compatibility documentation](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0.dev0/docs/en/compatibility.md) for more details. +- Unify the camera keys for consistent transformation between coordinate systems on different datasets. The modification changes the key names to `lidar2img`, `depth2img`, `cam2img`, etc., for easier understanding. Customized codes using legacy keys may be influenced. +- The next release will begin to move files of CUDA ops to [MMCV](https://github.com/open-mmlab/mmcv). It will influence the way to import related functions. We will not break the compatibility but will raise a warning first and please prepare to migrate it. + +#### Highlights + +- Support new monocular 3D detectors: [PGD](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/pgd), [SMOKE](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/smoke), [MonoFlex](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/monoflex) +- Support a new LiDAR-based detector: [PointRCNN](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/point_rcnn) +- Support a new backbone: [DGCNN](https://github.com/open-mmlab/mmdetection3d/tree/v1.0.0.dev0/configs/dgcnn) +- Support 3D object detection on the S3DIS dataset +- Support compilation on Windows +- Full benchmark for PAConv on S3DIS +- Further enhancement for documentation, especially on the Chinese documentation + +#### New Features + +- Support 3D object detection on the S3DIS dataset (#835) +- Support PointRCNN (#842, #843, #856, #974, #1022, #1109, #1125) +- Support DGCNN (#896) +- Support PGD (#938, #940, #948, #950, #964, #1014, #1065, #1070, #1157) +- Support SMOKE (#939, #955, #959, #975, #988, #999, #1029) +- Support MonoFlex (#1026, #1044, #1114, #1115, #1183) +- Support CPU Training (#1196) + +#### Improvements + +- Support point sampling based on distance metric (#667, #840) +- Refactor coordinate systems (#677, #774, #803, #899, #906, #912, #968, #1001) +- Unify camera keys in PointFusion and transformations between different systems (#791, #805) +- Refine documentation (#792, #827, #829, #836, #849, #854, #859, #1111, #1113, #1116, #1121, #1132, #1135, #1185, #1193, #1226) +- Add a script to support benchmark regression (#808) +- Benchmark PAConvCUDA on S3DIS (#847) +- Support to download pdf and epub documentation (#850) +- Change the `repeat` setting in Group-Free-3D configs to reduce training epochs (#855) +- Support KITTI AP40 evaluation metric (#927) +- Add the mmdet3d2torchserve tool for SECOND (#977) +- Add code-spell pre-commit hook and fix typos (#995) +- Support the latest numba version (#1043) +- Set a default seed to use when the random seed is not specified (#1072) +- Distribute mix-precision models to each algorithm folder (#1074) +- Add abstract and a representative figure for each algorithm (#1086) +- Upgrade pre-commit hook (#1088, #1217) +- Support augmented data and ground truth visualization (#1092) +- Add local yaw property for `CameraInstance3DBoxes` (#1130) +- Lock the required numba version to 0.53.0 (#1159) +- Support the usage of plane information for KITTI dataset (#1162) +- Deprecate the support for "python setup.py test" (#1164) +- Reduce the number of multi-process threads to accelerate training (#1168) +- Support 3D flip augmentation for semantic segmentation (#1181) +- Update README format for each model (#1195) + +#### Bug Fixes + +- Fix compiling errors on Windows (#766) +- Fix the deprecated nms setting in the ImVoteNet config (#828) +- Use the latest `wrap_fp16_model` import from mmcv (#861) +- Remove 2D annotations generation on Lyft (#867) +- Update index files for the Chinese documentation to be consistent with the English version (#873) +- Fix the nested list transpose in the CenterPoint head (#879) +- Fix deprecated pretrained model loading for RegNet (#889) +- Fix the incorrect dimension indices of rotations and testing config in the CenterPoint test time augmentation (#892) +- Fix and improve visualization tools (#956, #1066, #1073) +- Fix PointPillars FLOPs calculation error (#1075) +- Fix missing dimension information in the SUN RGB-D data generation (#1120) +- Fix incorrect anchor range settings in the PointPillars [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/models/hv_pointpillars_secfpn_kitti.py) for KITTI (#1163) +- Fix incorrect model information in the RegNet metafile (#1184) +- Fix bugs in non-distributed multi-gpu training and testing (#1197) +- Fix a potential assertion error when generating corners from an empty box (#1212) +- Upgrade bazel version according to the requirement of Waymo Devkit (#1223) + +#### Contributors + +A total of 12 developers contributed to this release. + +@THU17cyz, @wHao-Wu, @wangruohui, @Wuziyi616, @filaPro, @ZwwWayne, @Tai-Wang, @DCNSW, @xieenze, @robin-karlsson0, @ZCMax, @Otteri + +### v0.18.1 (1/2/2022) + +#### Improvements + +- Support Flip3D augmentation in semantic segmentation task (#1182) +- Update regnet metafile (#1184) +- Add point cloud annotation tools introduction in FAQ (#1185) +- Add missing explanations of `cam_intrinsic` in the nuScenes dataset doc (#1193) + +#### Bug Fixes + +- Deprecate the support for "python setup.py test" (#1164) +- Fix the rotation matrix while rotation axis=0 (#1182) +- Fix the bug in non-distributed multi-gpu training/testing (#1197) +- Fix a potential bug when generating corners for empty bounding boxes (#1212) + +#### Contributors + +A total of 4 developers contributed to this release. + +@ZwwWayne, @ZCMax, @Tai-Wang, @wHao-Wu + +### v0.18.0 (1/1/2022) + +#### Highlights + +- Update the required minimum version of mmdet and mmseg + +#### Improvements + +- Use the official markdownlint hook and add codespell hook for pre-committing (#1088) +- Improve CI operation (#1095, #1102, #1103) +- Use shared menu content from OpenMMLab's theme and remove duplicated contents from config (#1111) +- Refactor the structure of documentation (#1113, #1121) +- Update the required minimum version of mmdet and mmseg (#1147) + +#### Bug Fixes + +- Fix symlink failure on Windows (#1096) +- Fix the upper bound of mmcv version in the mminstall requirements (#1104) +- Fix API documentation compilation and mmcv build errors (#1116) +- Fix figure links and pdf documentation compilation (#1132, #1135) + +#### Contributors + +A total of 4 developers contributed to this release. + +@ZwwWayne, @ZCMax, @Tai-Wang, @wHao-Wu + +### v0.17.3 (1/12/2021) + +#### Improvements + +- Change the default show value to `False` in show_result function to avoid unnecessary errors (#1034) +- Improve the visualization of detection results with colorized points in [single_gpu_test](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/apis/test.py#L11) (#1050) +- Clean unnecessary custom_imports in entrypoints (#1068) + +#### Bug Fixes + +- Update mmcv version in the Dockerfile (#1036) +- Fix the memory-leak problem when loading checkpoints in [init_model](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/apis/inference.py#L36) (#1045) +- Fix incorrect velocity indexing when formatting boxes on nuScenes (#1049) +- Explicitly set cuda device ID in [init_model](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/apis/inference.py#L36) to avoid memory allocation on unexpected devices (#1056) +- Fix PointPillars FLOPs calculation error (#1076) + +#### Contributors + +A total of 5 developers contributed to this release. + +@wHao-Wu, @Tai-Wang, @ZCMax, @MilkClouds, @aldakata + +### v0.17.2 (1/11/2021) + +#### Improvements + +- Update Group-Free-3D and FCOS3D bibtex (#985) +- Update the solutions for incompatibility of pycocotools in the FAQ (#993) +- Add Chinese documentation for the KITTI (#1003) and Lyft (#1010) dataset tutorial +- Add the H3DNet checkpoint converter for incompatible keys (#1007) + +#### Bug Fixes + +- Update mmdetection and mmsegmentation version in the Dockerfile (#992) +- Fix links in the Chinese documentation (#1015) + +#### Contributors + +A total of 4 developers contributed to this release. + +@Tai-Wang, @wHao-Wu, @ZwwWayne, @ZCMax + +### v0.17.1 (1/10/2021) + +#### Highlights + +- Support a faster but non-deterministic version of hard voxelization +- Completion of dataset tutorials and the Chinese documentation +- Improved the aesthetics of the documentation format + +#### Improvements + +- Add Chinese documentation for training on customized datasets and designing customized models (#729, #820) +- Support a faster but non-deterministic version of hard voxelization (#904) +- Update paper titles and code details for metafiles (#917) +- Add a tutorial for KITTI dataset (#953) +- Use Pytorch sphinx theme to improve the format of documentation (#958) +- Use the docker to accelerate CI (#971) + +#### Bug Fixes + +- Fix the sphinx version used in the documentation (#902) +- Fix a dynamic scatter bug that discards the first voxel by mistake when all input points are valid (#915) +- Fix the inconsistent variable names used in the [unit test](https://github.com/open-mmlab/mmdetection3d/blob/master/tests/test_models/test_voxel_encoder/test_voxel_generator.py) for voxel generator (#919) +- Upgrade to use `build_prior_generator` to replace the legacy `build_anchor_generator` (#941) +- Fix a minor bug caused by a too small difference set in the FreeAnchor Head (#944) + +#### Contributors + +A total of 8 developers contributed to this release. + +@DCNSW, @zhanggefan, @mickeyouyou, @ZCMax, @wHao-Wu, @tojimahammatov, @xiliu8006, @Tai-Wang + +### v0.17.0 (1/9/2021) + +#### Compatibility + +- Unify the camera keys for consistent transformation between coordinate systems on different datasets. The modification change the key names to `lidar2img`, `depth2img`, `cam2img`, etc. for easier understanding. Customized codes using legacy keys may be influenced. +- The next release will begin to move files of CUDA ops to [MMCV](https://github.com/open-mmlab/mmcv). It will influence the way to import related functions. We will not break the compatibility but will raise a warning first and please prepare to migrate it. + +#### Highlights + +- Support 3D object detection on the S3DIS dataset +- Support compilation on Windows +- Full benchmark for PAConv on S3DIS +- Further enhancement for documentation, especially on the Chinese documentation + +#### New Features + +- Support 3D object detection on the S3DIS dataset (#835) + +#### Improvements + +- Support point sampling based on distance metric (#667, #840) +- Update PointFusion to support unified camera keys (#791) +- Add Chinese documentation for customized dataset (#792), data pipeline (#827), customized runtime (#829), 3D Detection on ScanNet (#836), nuScenes (#854) and Waymo (#859) +- Unify camera keys used in transformation between different systems (#805) +- Add a script to support benchmark regression (#808) +- Benchmark PAConvCUDA on S3DIS (#847) +- Add a tutorial for 3D detection on the Lyft dataset (#849) +- Support to download pdf and epub documentation (#850) +- Change the `repeat` setting in Group-Free-3D configs to reduce training epochs (#855) + +#### Bug Fixes + +- Fix compiling errors on Windows (#766) +- Fix the deprecated nms setting in the ImVoteNet config (#828) +- Use the latest `wrap_fp16_model` import from mmcv (#861) +- Remove 2D annotations generation on Lyft (#867) +- Update index files for the Chinese documentation to be consistent with the English version (#873) +- Fix the nested list transpose in the CenterPoint head (#879) +- Fix deprecated pretrained model loading for RegNet (#889) + +#### Contributors + +A total of 11 developers contributed to this release. + +@THU17cyz, @wHao-Wu, @wangruohui, @Wuziyi616, @filaPro, @ZwwWayne, @Tai-Wang, @DCNSW, @xieenze, @robin-karlsson0, @ZCMax + +### v0.16.0 (1/8/2021) + +#### Compatibility + +- Remove the rotation and dimension hack in the monocular 3D detection on nuScenes by applying corresponding transformation in the pre-processing and post-processing. The modification only influences nuScenes coco-style json files. Please re-run the data preparation scripts if necessary. See more details in the PR #744. +- Add a new pre-processing module for the ScanNet dataset in order to support multi-view detectors. Please run the updated scripts to extract the RGB data and its annotations. See more details in the PR #696. + +#### Highlights + +- Support to use [MIM](https://github.com/open-mmlab/mim) with pip installation +- Support PAConv [models and benchmarks](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/paconv) on S3DIS +- Enhance the documentation especially on dataset tutorials + +#### New Features + +- Support RGB images on ScanNet for multi-view detectors (#696) +- Support FLOPs and number of parameters calculation (#736) +- Support to use [MIM](https://github.com/open-mmlab/mim) with pip installation (#782) +- Support PAConv models and benchmarks on the S3DIS dataset (#783, #809) + +#### Improvements + +- Refactor Group-Free-3D to make it inherit BaseModule from MMCV (#704) +- Modify the initialization methods of FCOS3D to be consistent with the refactored approach (#705) +- Benchmark the Group-Free-3D [models](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/groupfree3d) on ScanNet (#710) +- Add Chinese documentation for Getting Started (#725), FAQ (#730), Model Zoo (#735), Demo (#745), Quick Run (#746), Data Preparation (#787) and Configs (#788) +- Add documentation for semantic segmentation on ScanNet and S3DIS (#743, #747, #806, #807) +- Add a parameter `max_keep_ckpts` to limit the maximum number of saved Group-Free-3D checkpoints (#765) +- Add documentation for 3D detection on SUN RGB-D and nuScenes (#770, #793) +- Remove mmpycocotools in the Dockerfile (#785) + +#### Bug Fixes + +- Fix versions of OpenMMLab dependencies (#708) +- Convert `rt_mat` to `torch.Tensor` in coordinate transformation for compatibility (#709) +- Fix the `bev_range` initialization in `ObjectRangeFilter` according to the `gt_bboxes_3d` type (#717) +- Fix Chinese documentation and incorrect doc format due to the incompatible Sphinx version (#718) +- Fix a potential bug when setting `interval == 1` in [analyze_logs.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/analysis_tools/analyze_logs.py) (#720) +- Update the structure of Chinese documentation (#722) +- Fix FCOS3D FPN BC-Breaking caused by the code refactoring in MMDetection (#739) +- Fix wrong `in_channels` when `with_distance=True` in the [Dynamic VFE Layers](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/voxel_encoders/voxel_encoder.py#L87) (#749) +- Fix the dimension and yaw hack of FCOS3D on nuScenes (#744, #794, #795, #818) +- Fix the missing default `bbox_mode` in the `show_multi_modality_result` (#825) + +#### Contributors + +A total of 12 developers contributed to this release. + +@yinchimaoliang, @gopi231091, @filaPro, @ZwwWayne, @ZCMax, @hjin2902, @wHao-Wu, @Wuziyi616, @xiliu8006, @THU17cyz, @DCNSW, @Tai-Wang + +### v0.15.0 (1/7/2021) + +#### Compatibility + +In order to fix the problem that the priority of EvalHook is too low, all hook priorities have been re-adjusted in 1.3.8, so MMDetection 2.14.0 needs to rely on the latest MMCV 1.3.8 version. For related information, please refer to [#1120](https://github.com/open-mmlab/mmcv/pull/1120), for related issues, please refer to [#5343](https://github.com/open-mmlab/mmdetection/issues/5343). + +#### Highlights + +- Support [PAConv](https://arxiv.org/abs/2103.14635) +- Support monocular/multi-view 3D detector [ImVoxelNet](https://arxiv.org/abs/2106.01178) on KITTI +- Support Transformer-based 3D detection method [Group-Free-3D](https://arxiv.org/abs/2104.00678) on ScanNet +- Add documentation for tasks including LiDAR-based 3D detection, vision-only 3D detection and point-based 3D semantic segmentation +- Add dataset documents like ScanNet + +#### New Features + +- Support Group-Free-3D on ScanNet (#539) +- Support PAConv modules (#598, #599) +- Support ImVoxelNet on KITTI (#627, #654) + +#### Improvements + +- Add unit tests for pipeline functions `LoadImageFromFileMono3D`, `ObjectNameFilter` and `ObjectRangeFilter` (#615) +- Enhance [IndoorPatchPointSample](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/pipelines/transforms_3d.py) (#617) +- Refactor model initialization methods based MMCV (#622) +- Add Chinese docs (#629) +- Add documentation for LiDAR-based 3D detection (#642) +- Unify intrinsic and extrinsic matrices for all datasets (#653) +- Add documentation for point-based 3D semantic segmentation (#663) +- Add documentation of ScanNet for 3D detection (#664) +- Refine docs for tutorials (#666) +- Add documentation for vision-only 3D detection (#669) +- Refine docs for Quick Run and Useful Tools (#686) + +#### Bug Fixes + +- Fix the bug of [BackgroundPointsFilter](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/pipelines/transforms_3d.py) using the bottom center of ground truth (#609) +- Fix [LoadMultiViewImageFromFiles](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/pipelines/loading.py) to unravel stacked multi-view images to list to be consistent with DefaultFormatBundle (#611) +- Fix the potential bug in [analyze_logs](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/analysis_tools/analyze_logs.py) when the training resumes from a checkpoint or is stopped before evaluation (#634) +- Fix test commands in docs and make some refinements (#635) +- Fix wrong config paths in unit tests (#641) + +### v0.14.0 (1/6/2021) + +#### Highlights + +- Support the point cloud segmentation method [PointNet++](https://arxiv.org/abs/1706.02413) + +#### New Features + +- Support PointNet++ (#479, #528, #532, #541) +- Support RandomJitterPoints transform for point cloud segmentation (#584) +- Support RandomDropPointsColor transform for point cloud segmentation (#585) + +#### Improvements + +- Move the point alignment of ScanNet from data pre-processing to pipeline (#439, #470) +- Add compatibility document to provide detailed descriptions of BC-breaking changes (#504) +- Add MMSegmentation installation requirement (#535) +- Support points rotation even without bounding box in GlobalRotScaleTrans for point cloud segmentaiton (#540) +- Support visualization of detection results and dataset browse for nuScenes Mono-3D dataset (#542, #582) +- Support faster implementation of KNN (#586) +- Support RegNetX models on Lyft dataset (#589) +- Remove a useless parameter `label_weight` from segmentation datasets including `Custom3DSegDataset`, `ScanNetSegDataset` and `S3DISSegDataset` (#607) + +#### Bug Fixes + +- Fix a corrupted lidar data file in Lyft dataset in [data_preparation](https://github.com/open-mmlab/mmdetection3d/tree/master/docs/data_preparation.md) (#546) +- Fix evaluation bugs in nuScenes and Lyft dataset (#549) +- Fix converting points between coordinates with specific transformation matrix in the [coord_3d_mode.py](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/bbox/structures/coord_3d_mode.py) (#556) +- Support PointPillars models on Lyft dataset (#578) +- Fix the bug of demo with pre-trained VoteNet model on ScanNet (#600) + +### v0.13.0 (1/5/2021) + +#### Highlights + +- Support a monocular 3D detection method [FCOS3D](https://arxiv.org/abs/2104.10956) +- Support ScanNet and S3DIS semantic segmentation dataset +- Enhancement of visualization tools for dataset browsing and demos, including support of visualization for multi-modality data and point cloud segmentation. + +#### New Features + +- Support ScanNet semantic segmentation dataset (#390) +- Support monocular 3D detection on nuScenes (#392) +- Support multi-modality visualization (#405) +- Support nuimages visualization (#408) +- Support monocular 3D detection on KITTI (#415) +- Support online visualization of semantic segmentation results (#416) +- Support ScanNet test results submission to online benchmark (#418) +- Support S3DIS data pre-processing and dataset class (#433) +- Support FCOS3D (#436, #442, #482, #484) +- Support dataset browse for multiple types of datasets (#467) +- Adding paper-with-code (PWC) metafile for each model in the model zoo (#485) + +#### Improvements + +- Support dataset browsing for SUNRGBD, ScanNet or KITTI points and detection results (#367) +- Add the pipeline to load data using file client (#430) +- Support to customize the type of runner (#437) +- Make pipeline functions process points and masks simultaneously when sampling points (#444) +- Add waymo unit tests (#455) +- Split the visualization of projecting points onto image from that for only points (#480) +- Efficient implementation of PointSegClassMapping (#489) +- Use the new model registry from mmcv (#495) + +#### Bug Fixes + +- Fix Pytorch 1.8 Compilation issue in the [scatter_points_cuda.cu](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/voxel/src/scatter_points_cuda.cu) (#404) +- Fix [dynamic_scatter](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/voxel/src/scatter_points_cuda.cu) errors triggered by empty point input (#417) +- Fix the bug of missing points caused by using break incorrectly in the voxelization (#423) +- Fix the missing `coord_type` in the waymo dataset [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/datasets/waymoD5-3d-3class.py) (#441) +- Fix errors in four unittest functions of [configs](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/ssn/hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d.py), [test_detectors.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tests/test_models/test_detectors.py), [test_heads.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tests/test_models/test_heads/test_heads.py) (#453) +- Fix 3DSSD training errors and simplify configs (#462) +- Clamp 3D votes projections to image boundaries in ImVoteNet (#463) +- Update out-of-date names of pipelines in the [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py) of pointpillars benchmark (#474) +- Fix the lack of a placeholder when unpacking RPN targets in the [h3d_bbox_head.py](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/roi_heads/bbox_heads/h3d_bbox_head.py) (#508) +- Fix the incorrect value of `K` when creating pickle files for SUN RGB-D (#511) + +### v0.12.0 (1/4/2021) + +#### Highlights + +- Support a new multi-modality method [ImVoteNet](https://arxiv.org/abs/2001.10692). +- Support PyTorch 1.7 and 1.8 +- Refactor the structure of tools and [train.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/train.py)/[test.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/test.py) + +#### New Features + +- Support LiDAR-based semantic segmentation metrics (#332) +- Support [ImVoteNet](https://arxiv.org/abs/2001.10692) (#352, #384) +- Support the KNN GPU operation (#360, #371) + +#### Improvements + +- Add FAQ for common problems in the documentation (#333) +- Refactor the structure of tools (#339) +- Refactor [train.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/train.py) and [test.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/test.py) (#343) +- Support demo on nuScenes (#353) +- Add 3DSSD checkpoints (#359) +- Update the Bibtex of CenterPoint (#368) +- Add citation format and reference to other OpenMMLab projects in the README (#374) +- Upgrade the mmcv version requirements (#376) +- Add numba and numpy version requirements in FAQ (#379) +- Avoid unnecessary for-loop execution of vfe layer creation (#389) +- Update SUNRGBD dataset documentation to stress the requirements for training ImVoteNet (#391) +- Modify vote head to support 3DSSD (#396) + +#### Bug Fixes + +- Fix missing keys `coord_type` in database sampler config (#345) +- Rename H3DNet configs (#349) +- Fix CI by using ubuntu 18.04 in github workflow (#350) +- Add assertions to avoid 4-dim points being input to [points_in_boxes](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/roiaware_pool3d/points_in_boxes.py) (#357) +- Fix the SECOND results on Waymo in the corresponding [README](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/second) (#363) +- Fix the incorrect adopted pipeline when adding val to workflow (#370) +- Fix a potential bug when indices used in the backwarding in ThreeNN (#377) +- Fix a compilation error triggered by [scatter_points_cuda.cu](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/voxel/src/scatter_points_cuda.cu) in PyTorch 1.7 (#393) + +### v0.11.0 (1/3/2021) + +#### Highlights + +- Support more friendly visualization interfaces based on open3d +- Support a faster and more memory-efficient implementation of DynamicScatter +- Refactor unit tests and details of configs + +#### New Features + +- Support new visualization methods based on open3d (#284, #323) + +#### Improvements + +- Refactor unit tests (#303) +- Move the key `train_cfg` and `test_cfg` into the model configs (#307) +- Update [README](https://github.com/open-mmlab/mmdetection3d/blob/master/README.md/) with [Chinese version](https://github.com/open-mmlab/mmdetection3d/blob/master/README_zh-CN.md/) and [instructions for getting started](https://github.com/open-mmlab/mmdetection3d/blob/master/docs/getting_started.md/). (#310, #316) +- Support a faster and more memory-efficient implementation of DynamicScatter (#318, #326) + +#### Bug Fixes + +- Fix an unsupported bias setting in the unit test for centerpoint head (#304) +- Fix errors due to typos in the centerpoint head (#308) +- Fix a minor bug in [points_in_boxes.py](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/roiaware_pool3d/points_in_boxes.py) when tensors are not in the same device. (#317) +- Fix warning of deprecated usages of nonzero during training with PyTorch 1.6 (#330) + +### v0.10.0 (1/2/2021) + +#### Highlights + +- Preliminary release of API for SemanticKITTI dataset. +- Documentation and demo enhancement for better user experience. +- Fix a number of underlying minor bugs and add some corresponding important unit tests. + +#### New Features + +- Support SemanticKITTI dataset preliminarily (#287) + +#### Improvements + +- Add tag to README in configurations for specifying different uses (#262) +- Update instructions for evaluation metrics in the documentation (#265) +- Add nuImages entry in [README.md](https://github.com/open-mmlab/mmdetection3d/blob/master/README.md/) and gif demo (#266, #268) +- Add unit test for voxelization (#275) + +#### Bug Fixes + +- Fixed the issue of unpacking size in [furthest_point_sample.py](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/furthest_point_sample/furthest_point_sample.py) (#248) +- Fix bugs for 3DSSD triggered by empty ground truths (#258) +- Remove models without checkpoints in model zoo statistics of documentation (#259) +- Fix some unclear installation instructions in [getting_started.md](https://github.com/open-mmlab/mmdetection3d/blob/master/docs/getting_started.md/) (#269) +- Fix relative paths/links in the documentation (#271) +- Fix a minor bug in [scatter_points_cuda.cu](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/voxel/src/scatter_points_cuda.cu) when num_features != 4 (#275) +- Fix the bug about missing text files when testing on KITTI (#278) +- Fix issues caused by inplace modification of tensors in `BaseInstance3DBoxes` (#283) +- Fix log analysis for evaluation and adjust the documentation accordingly (#285) + +### v0.9.0 (31/12/2020) + +#### Highlights + +- Documentation refactoring with better structure, especially about how to implement new models and customized datasets. +- More compatible with refactored point structure by bug fixes in ground truth sampling. + +#### Improvements + +- Documentation refactoring (#242) + +#### Bug Fixes + +- Fix point structure related bugs in ground truth sampling (#211) +- Fix loading points in ground truth sampling augmentation on nuScenes (#221) +- Fix channel setting in the SeparateHead of CenterPoint (#228) +- Fix evaluation for indoors 3D detection in case of less classes in prediction (#231) +- Remove unreachable lines in nuScenes data converter (#235) +- Minor adjustments of numpy implementation for perspective projection and prediction filtering criterion in KITTI evaluation (#241) + +### v0.8.0 (30/11/2020) + +#### Highlights + +- Refactor points structure with more constructive and clearer implementation. +- Support axis-aligned IoU loss for VoteNet with better performance. +- Update and enhance [SECOND](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/second) benchmark on Waymo. + +#### New Features + +- Support axis-aligned IoU loss for VoteNet. (#194) +- Support points structure for consistent processing of all the point related representation. (#196, #204) + +#### Improvements + +- Enhance [SECOND](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/second) benchmark on Waymo with stronger baselines. (#205) +- Add model zoo statistics and polish the documentation. (#201) + +### v0.7.0 (1/11/2020) + +#### Highlights + +- Support a new method [SSN](https://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123700579.pdf) with benchmarks on nuScenes and Lyft datasets. +- Update benchmarks for SECOND on Waymo, CenterPoint with TTA on nuScenes and models with mixed precision training on KITTI and nuScenes. +- Support semantic segmentation on nuImages and provide [HTC](https://arxiv.org/abs/1901.07518) models with configurations and performance for reference. + +#### New Features + +- Modified primitive head which can support the setting on SUN-RGBD dataset (#136) +- Support semantic segmentation and [HTC](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/nuimages) with models for reference on nuImages dataset (#155) +- Support [SSN](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/ssn) on nuScenes and Lyft datasets (#147, #174, #166, #182) +- Support double flip for test time augmentation of CenterPoint with updated benchmark (#143) + +#### Improvements + +- Update [SECOND](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/second) benchmark with configurations for reference on Waymo (#166) +- Delete checkpoints on Waymo to comply its specific license agreement (#180) +- Update models and instructions with [mixed precision training](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/fp16) on KITTI and nuScenes (#178) + +#### Bug Fixes + +- Fix incorrect code weights in anchor3d_head when introducing mixed precision training (#173) +- Fix the incorrect label mapping on nuImages dataset (#155) + +### v0.6.1 (11/10/2020) + +#### Highlights + +- Support mixed precision training of voxel-based methods +- Support docker with PyTorch 1.6.0 +- Update baseline configs and results ([CenterPoint](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/centerpoint) on nuScenes and [PointPillars](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/pointpillars) on Waymo with full dataset) +- Switch model zoo to download.openmmlab.com + +#### New Features + +- Support dataset pipeline `VoxelBasedPointSampler` to sample multi-sweep points based on voxelization. (#125) +- Support mixed precision training of voxel-based methods (#132) +- Support docker with PyTorch 1.6.0 (#160) + +#### Improvements + +- Reduce requirements for the case exclusive of Waymo (#121) +- Switch model zoo to download.openmmlab.com (#126) +- Update docs related to Waymo (#128) +- Add version assertion in the [init file](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/__init__.py) (#129) +- Add evaluation interval setting for CenterPoint (#131) +- Add unit test for CenterPoint (#133) +- Update [PointPillars](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/pointpillars) baselines on Waymo with full dataset (#142) +- Update [CenterPoint](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/centerpoint) results with models and logs (#154) + +#### Bug Fixes + +- Fix a bug of visualization in multi-batch case (#120) +- Fix bugs in dcn unit test (#130) +- Fix dcn bias bug in centerpoint (#137) +- Fix dataset mapping in the evaluation of nuScenes mini dataset (#140) +- Fix origin initialization in `CameraInstance3DBoxes` (#148, #150) +- Correct documentation link in the getting_started.md (#159) +- Fix model save path bug in gather_models.py (#153) +- Fix image padding shape bug in `PointFusion` (#162) + +### v0.6.0 (20/9/2020) + +#### Highlights + +- Support new methods [H3DNet](https://arxiv.org/abs/2006.05682), [3DSSD](https://arxiv.org/abs/2002.10187), [CenterPoint](https://arxiv.org/abs/2006.11275). +- Support new dataset [Waymo](https://waymo.com/open/) (with PointPillars baselines) and [nuImages](https://www.nuscenes.org/nuimages) (with Mask R-CNN and Cascade Mask R-CNN baselines). +- Support Batch Inference +- Support Pytorch 1.6 +- Start to publish `mmdet3d` package to PyPI since v0.5.0. You can use mmdet3d through `pip install mmdet3d`. + +#### Backwards Incompatible Changes + +- Support Batch Inference (#95, #103, #116): MMDetection3D v0.6.0 migrates to support batch inference based on MMDetection >= v2.4.0. This change influences all the test APIs in MMDetection3D and downstream codebases. +- Start to use collect environment function from MMCV (#113): MMDetection3D v0.6.0 migrates to use `collect_env` function in MMCV. + `get_compiler_version` and `get_compiling_cuda_version` compiled in `mmdet3d.ops.utils` are removed. Please import these two functions from `mmcv.ops`. + +#### New Features + +- Support [nuImages](https://www.nuscenes.org/nuimages) dataset by converting them into coco format and release Mask R-CNN and Cascade Mask R-CNN baseline models (#91, #94) +- Support to publish to PyPI in github-action (#17, #19, #25, #39, #40) +- Support CBGSDataset and make it generally applicable to all the supported datasets (#75, #94) +- Support [H3DNet](https://arxiv.org/abs/2006.05682) and release models on ScanNet dataset (#53, #58, #105) +- Support Fusion Point Sampling used in [3DSSD](https://arxiv.org/abs/2002.10187) (#66) +- Add `BackgroundPointsFilter` to filter background points in data pipeline (#84) +- Support pointnet2 with multi-scale grouping in backbone and refactor pointnets (#82) +- Support dilated ball query used in [3DSSD](https://arxiv.org/abs/2002.10187) (#96) +- Support [3DSSD](https://arxiv.org/abs/2002.10187) and release models on KITTI dataset (#83, #100, #104) +- Support [CenterPoint](https://arxiv.org/abs/2006.11275) and release models on nuScenes dataset (#49, #92) +- Support [Waymo](https://waymo.com/open/) dataset and release PointPillars baseline models (#118) +- Allow `LoadPointsFromMultiSweeps` to pad empty sweeps and select multiple sweeps randomly (#67) + +#### Improvements + +- Fix all warnings and bugs in PyTorch 1.6.0 (#70, #72) +- Update issue templates (#43) +- Update unit tests (#20, #24, #30) +- Update documentation for using `ply` format point cloud data (#41) +- Use points loader to load point cloud data in ground truth (GT) samplers (#87) +- Unify version file of OpenMMLab projects by using `version.py` (#112) +- Remove unnecessary data preprocessing commands of SUN RGB-D dataset (#110) + +#### Bug Fixes + +- Rename CosineAnealing to CosineAnnealing (#57) +- Fix device inconsistent bug in 3D IoU computation (#69) +- Fix a minor bug in json2csv of lyft dataset (#78) +- Add missed test data for pointnet modules (#85) +- Fix `use_valid_flag` bug in `CustomDataset` (#106) + +### v0.5.0 (9/7/2020) + +MMDetection3D is released. diff --git a/docs/en/notes/compatibility.md b/docs/en/notes/compatibility.md new file mode 100755 index 0000000..878f01c --- /dev/null +++ b/docs/en/notes/compatibility.md @@ -0,0 +1,207 @@ +# Compatibility + +## v1.1.0rc0 + +### OpenMMLab v2.0 Refactoring + +In this version, we make large refactoring based on MMEngine to achieve unified data elements, model interfaces, visualizers, evaluators and other runtime modules across different datasets, tasks and even codebases. A brief summary for this refactoring is as follows: + +- Data Element: + - We add [`Det3DDataSample`](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/structures/det3d_data_sample.py) as the common data element passing through datasets and models. It inherits from [`DetDataSample`](<%5Bhttps://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/structures/det3d_data_sample.py%5D(https://github.com/open-mmlab/mmdetection/blob/dev-3.x/mmdet/structures/det_data_sample.py)>) in mmdetection and implement `InstanceData`, `PixelData`, and + `LabelData` inheriting from `BaseDataElement` in MMEngine to represent different types of ground truth labels or predictions. +- Datasets: + - We add [`Det3DDataset`](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/datasets/det3d_dataset.py) and [`Seg3DDataset`](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/datasets/seg3d_dataset.py) as the base datasets to inherit from the unified `BaseDataset` in MMEngine. They implement most functions that are commonly used across different datasets and simplify the info loading/processing in the current datasets. Re-defined input arguments and functions can be most re-used in different datasets, which are important for the implementation of customized datasets. + - We define the common keys across different datasets and unify all the info files with a standard protocol. The same info is more clear for users because they share the same key across different dataset infos. Besides, for different settings, such as camera-only and LiDAR-only methods, we no longer need different info formats (like the previous pkl and json files). We can just revise the `parse_data_info` to read the necessary information from a complete info file. + - We add `train_dataloader`, `val_dataloader` and `test_dataloader` to replace the original `data` in the config. It simplify the levels of data-related fields. +- Data Transforms + - Based on the basic transforms and wrappers re-implemented and simplified in the latest MMCV, we refactor data transforms to inherit from them. + - We also adjust the implementation of current data pipelines to make them compatible with our latest data protocol. + - Normalization, padding of images and voxelization operations are moved to the data-preprocessing. + - `DefaultFormatBundle3D` and `Collect3D` are replaced with `PackDet3DInputs` to pack the data into the element format as model input. +- Models + - Unify the model interface as `inputs`, `data_samples`, `return_loss=False` + - The basic pre-processing before model forward includes: 1) convert input from CPU to GPU tensors; 2) padding images; 3) normalize images; 4) voxelization. + - Return `loss_dict` during training while return `list[data_sample]` during inference + - Simply function interfaces in the models + - Add `preprocess_cfg` in the model configs for pre-processing +- Visualizer + - Design a unified visualizer, [`Det3DLocalVisualizer`](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/visualization/local_visualizer.py), based on MMEngine for different 3D tasks and settings + - Support browsing dataset and visualization hooks based on the [`Det3DLocalVisualizer`](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/visualization/local_visualizer.py) +- Evaluator + - Decouple evaluators from datasets to make them more flexible: the evaluation codes of each dataset are implemented as a metric class exclusively. + - Add evaluator information to the current dataset configs +- Registry + - Refactor all the registries to inherit from root registries in MMEngine + - When using modules from other codebases, it is necessary to specify the registry scope, such as `mmdet.ResNet` +- Others: Refactor logging, hooks, scheduler, runner and other runtime configs based on MMEngine + +## v1.0.0rc1 + +### Operators Migration + +We have adopted CUDA operators compiled from [mmcv](https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/__init__.py) and removed all the CUDA operators in mmdet3d. We now do not need to compile the CUDA operators in mmdet3d anymore. + +### Waymo dataset converter refactoring + +In this version we did a major code refactoring that boosted the performance of waymo dataset conversion by multiprocessing. +Meanwhile, we also fixed the imprecise timestamps saving issue in waymo dataset conversion. This change introduces following backward compatibility breaks: + +- The point cloud .bin files of waymo dataset need to be regenerated. + In the .bin files each point occupies 6 `float32` and the meaning of the last `float32` now changed from **imprecise timestamps** to **range frame offset**. + The **range frame offset** for each point is calculated as`ri * h * w + row * w + col` if the point is from the **TOP** lidar or `-1` otherwise. + The `h`, `w` denote the height and width of the TOP lidar's range frame. + The `ri`, `row`, `col` denote the return index, the row and the column of the range frame where each point locates. + Following tables show the difference across the change: + +Before + +| Element offset (float32) | 0 | 1 | 2 | 3 | 4 | 5 | +| ------------------------ | :-: | :-: | :-: | :-------: | :--------: | :---------------------: | +| Bytes offset | 0 | 4 | 8 | 12 | 16 | 20 | +| Meaning | x | y | z | intensity | elongation | **imprecise timestamp** | + +After + +| Element offset (float32) | 0 | 1 | 2 | 3 | 4 | 5 | +| ------------------------ | :-: | :-: | :-: | :-------: | :--------: | :--------------------: | +| Bytes offset | 0 | 4 | 8 | 12 | 16 | 20 | +| Meaning | x | y | z | intensity | elongation | **range frame offset** | + +- The objects' point cloud .bin files in the GT-database of waymo dataset need to be regenerated because we also dumped the range frame offset for each point into it. + Following tables show the difference across the change: + +Before + +| Element offset (float32) | 0 | 1 | 2 | 3 | 4 | +| ------------------------ | :-: | :-: | :-: | :-------: | :--------: | +| Bytes offset | 0 | 4 | 8 | 12 | 16 | +| Meaning | x | y | z | intensity | elongation | + +After + +| Element offset (float32) | 0 | 1 | 2 | 3 | 4 | 5 | +| ------------------------ | :-: | :-: | :-: | :-------: | :--------: | :--------------------: | +| Bytes offset | 0 | 4 | 8 | 12 | 16 | 20 | +| Meaning | x | y | z | intensity | elongation | **range frame offset** | + +- Any configuration that uses waymo dataset with GT Augmentation should change the `db_sampler.points_loader.load_dim` from `5` to `6`. + +## v1.0.0rc0 + +### Coordinate system refactoring + +In this version, we did a major code refactoring which improved the consistency among the three coordinate systems (and corresponding box representation), LiDAR, Camera, and Depth. A brief summary for this refactoring is as follows: + +- The three coordinate systems are all right-handed now (which means the yaw angle increases in the counterclockwise direction). +- The LiDAR system `(x_size, y_size, z_size)` corresponds to `(l, w, h)` instead of `(w, l, h)`. This is more natural since `l` is parallel with the direction where the yaw angle is zero, and we prefer using the positive direction of the `x` axis as that direction, which is exactly how we define yaw angle in Depth and Camera coordinate systems. +- The APIs for box-related operations are improved and now are more user-friendly. + +#### ***NOTICE!!*** + +Since definitions of box representation have changed, the annotation data of most datasets require updating: + +- SUN RGB-D: Yaw angles in the annotation should be reversed. +- KITTI: For LiDAR boxes in GT databases, (x_size, y_size, z_size, yaw) out of (x, y, z, x_size, y_size, z_size) should be converted from the old LiDAR coordinate system to the new one. The training/validation data annotations should be left unchanged since they are under the Camera coordinate system, which is unmodified after the refactoring. +- Waymo: Same as KITTI. +- nuScenes: For LiDAR boxes in training/validation data and GT databases, (x_size, y_size, z_size, yaw) out of (x, y, z, x_size, y_size, z_size) should be converted. +- Lyft: Same as nuScenes. + +Please regenerate the data annotation/GT database files or use [`update_data_coords.py`](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/tools/update_data_coords.py) to update the data. + +To use boxes under Depth and LiDAR coordinate systems, or to convert boxes between different coordinate systems, users should be aware of the difference between the old and new definitions. For example, the rotation, flipping, and bev functions of [`DepthInstance3DBoxes`](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/core/bbox/structures/depth_box3d.py) and [`LiDARInstance3DBoxes`](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mdet3d/core/bbox/structures/lidar_box3d.py) and box conversion [functions](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/core/bbox/structures/box_3d_mode.py) have all been reimplemented in the refactoring. + +Consequently, functions like [`output_to_lyft_box`](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/datasets/lyft_dataset.py) undergo small modification to adapt to the new LiDAR/Depth box. + +Since the LiDAR system `(x_size, y_size, z_size)` now corresponds to `(l, w, h)` instead of `(w, l, h)`, the anchor sizes for LiDAR boxes are also changed, e.g., from `[1.6, 3.9, 1.56]` to `[3.9, 1.6, 1.56]`. + +Functions only involving points are generally unaffected except if they rely on some refactored utility functions such as `rotation_3d_in_axis`. + +#### Other BC-breaking or new features: + +- `array_converter`: Please refer to [array_converter.py](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/core/utils/array_converter.py). Functions wrapped with `array_converter` can convert array-like input types of `torch.Tensor`, `np.ndarray`, and `list/tuple/float` to `torch.Tensor` to process in an unified PyTorch pipeline. The result may finally be converted back to the input type. Most functions in [utils.py](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/core/bbox/structures/utils.py) are wrapped with `array_converter`. +- [`points_in_boxes`](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/core/bbox/structures/base_box3d.py) and [`points_in_boxes_batch`](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/core/bbox/structures/base_box3d.py) will be deprecated soon. They are renamed to `points_in_boxes_part` and `points_in_boxes_all` respectively, with more detailed docstrings. The major difference of the two functions is that if a point is enclosed by multiple boxes, `points_in_boxes_part` will only return the index of the first enclosing box while `points_in_boxes_all` will return all the indices of enclosing boxes. +- `rotation_3d_in_axis`: Please refer to [utils.py](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/core/bbox/structures/utils.py). Now this function supports multiple input types and more options. The function with the same name in [box_np_ops.py](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/core/bbox/box_np_ops.py) is deleted since we do not need another function to tackle with NumPy data. `rotation_2d`, `points_cam2img`, and `limit_period` in box_np_ops.py are also deleted for the same reason. +- `bev` method of [`CameraInstance3DBoxes`](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/core/bbox/structures/cam_box3d.py): Changed it to be consistent with the definition of bev in Depth and LiDAR coordinate systems. +- Data augmentation utils in [data_augment_utils.py](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/datasets/pipelines/data_augment_utils.py) now follow the rules of a right-handed system. +- We do not need the yaw hacking in KITTI anymore after refining [`get_direction_target`](https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0rc0/mmdet3d/models/dense_heads/train_mixins.py). Interested users may refer to PR [#677](https://github.com/open-mmlab/mmdetection3d/pull/677) . + +## 0.16.0 + +### Returned values of `QueryAndGroup` operation + +We modified the returned `grouped_xyz` value of operation `QueryAndGroup` to support PAConv segmentor. Originally, the `grouped_xyz` is centered by subtracting the grouping centers, which represents the relative positions of grouped points. Now, we didn't perform such subtraction and the returned `grouped_xyz` stands for the absolute coordinates of these points. + +Note that, the other returned variables of `QueryAndGroup` such as `new_features`, `unique_cnt` and `grouped_idx` are not affected. + +### NuScenes coco-style data pre-processing + +We remove the rotation and dimension hack in the monocular 3D detection on nuScenes. Specifically, we transform the rotation and dimension of boxes defined by nuScenes devkit to the coordinate system of our `CameraInstance3DBoxes` in the pre-processing and transform them back in the post-processing. In this way, we can remove the corresponding [hack](https://github.com/open-mmlab/mmdetection3d/pull/744/files#diff-5bee5062bd84e6fa25a2fdd71353f6f283dfdc4a66a0316c3b1ca26078c978b6L165) used in the visualization tools. The modification also guarantees the correctness of all the operations based on our `CameraInstance3DBoxes` (such as NMS and flip augmentation) when training monocular 3D detectors. + +The modification only influences nuScenes coco-style json files. Please re-run the nuScenes data preparation script if necessary. See more details in the PR [#744](https://github.com/open-mmlab/mmdetection3d/pull/744). + +### ScanNet dataset for ImVoxelNet + +We adopt a new pre-processing procedure for the ScanNet dataset in order to support ImVoxelNet, which is a multi-view method requiring image data. In previous versions of MMDetection3D, ScanNet dataset was only used for point cloud based 3D detection and segmentation methods. We plan adding ImVoxelNet to our model zoo, thus updating ScanNet correspondingly by adding image-related pre-processing steps. Specifically, we made these changes: + +- Add [script](https://github.com/open-mmlab/mmdetection3d/blob/master/data/scannet/extract_posed_images.py) for extracting RGB data. +- Update [script](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/dataset_converters/scannet_data_utils.py) for annotation creating. +- Add instructions in the documents on preparing image data. + +Please refer to the ScanNet [README.md](https://github.com/open-mmlab/mmdetection3d/blob/master/data/scannet/README.md/) for more details. + +## 0.15.0 + +### MMCV Version + +In order to fix the problem that the priority of EvalHook is too low, all hook priorities have been re-adjusted in 1.3.8, so MMDetection 2.14.0 needs to rely on the latest MMCV 1.3.8 version. For related information, please refer to [#1120](https://github.com/open-mmlab/mmcv/pull/1120), for related issues, please refer to [#5343](https://github.com/open-mmlab/mmdetection/issues/5343). + +### Unified parameter initialization + +To unify the parameter initialization in OpenMMLab projects, MMCV supports `BaseModule` that accepts `init_cfg` to allow the modules' parameters initialized in a flexible and unified manner. Now the users need to explicitly call `model.init_weights()` in the training script to initialize the model (as in [here](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/train.py#L183), previously this was handled by the detector. Please refer to PR [#622](https://github.com/open-mmlab/mmdetection3d/pull/622) for details. + +### BackgroundPointsFilter + +We modified the dataset augmentation function `BackgroundPointsFilter`([here](https://github.com/open-mmlab/mmdetection3d/blob/v0.15.0/mmdet3d/datasets/pipelines/transforms_3d.py#L1132)). In previous version of MMdetection3D, `BackgroundPointsFilter` changes the gt_bboxes_3d's bottom center to the gravity center. In MMDetection3D 0.15.0, +`BackgroundPointsFilter` will not change it. Please refer to PR [#609](https://github.com/open-mmlab/mmdetection3d/pull/609) for details. + +### Enhance `IndoorPatchPointSample` transform + +We enhance the pipeline function `IndoorPatchPointSample` used in point cloud segmentation task by adding more choices for patch selection. Also, we plan to remove the unused parameter `sample_rate` in the future. Please modify the code as well as the config files accordingly if you use this transform. + +## 0.14.0 + +### Dataset class for 3D segmentation task + +We remove a useless parameter `label_weight` from segmentation datasets including `Custom3DSegDataset`, `ScanNetSegDataset` and `S3DISSegDataset` since this weight is utilized in the loss function of model class. Please modify the code as well as the config files accordingly if you use or inherit from these codes. + +### ScanNet data pre-processing + +We adopt new pre-processing and conversion steps of ScanNet dataset. In previous versions of MMDetection3D, ScanNet dataset was only used for 3D detection task, where we trained on the training set and tested on the validation set. In MMDetection3D 0.14.0, we further support 3D segmentation task on ScanNet, which includes online benchmarking on test set. Since the alignment matrix is not provided for test set data, we abandon the alignment of points in data generation steps to support both tasks. Besides, as 3D segmentation requires per-point prediction, we also remove the down-sampling step in data generation. + +- In the new ScanNet processing scripts, we save the unaligned points for all the training, validation and test set. For train and val set with annotations, we also store the `axis_align_matrix` in data infos. For ground-truth bounding boxes, we store boxes in both aligned and unaligned coordinates with key `gt_boxes_upright_depth` and key `unaligned_gt_boxes_upright_depth` respectively in data infos. + +- In `ScanNetDataset`, we now load the `axis_align_matrix` as a part of data annotations. If it is not contained in old data infos, we will use identity matrix for compatibility. We also add a transform function `GlobalAlignment` in ScanNet detection data pipeline to align the points. + +- Since the aligned boxes share the same key as in old data infos, we do not need to modify the code related to it. But do remember that they are not in the same coordinate system as the saved points. + +- There is an `PointSample` pipeline in the data pipelines for ScanNet detection task which down-samples points. So removing down-sampling in data generation will not affect the code. + +We have trained a [VoteNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/votenet/votenet_8x8_scannet-3d-18class.py) model on the newly processed ScanNet dataset and get similar benchmark results. In order to prepare ScanNet data for both detection and segmentation tasks, please re-run the new pre-processing scripts following the ScanNet [README.md](https://github.com/open-mmlab/mmdetection3d/blob/master/data/scannet/README.md/). + +## 0.12.0 + +### SUNRGBD dataset for ImVoteNet + +We adopt a new pre-processing procedure for the SUNRGBD dataset in order to support ImVoteNet, which is a multi-modality method requiring both image and point cloud data. In previous versions of MMDetection3D, SUNRGBD dataset was only used for point cloud based 3D detection methods. In MMDetection3D 0.12.0, we add ImVoteNet to our model zoo, thus updating SUNRGBD correspondingly by adding image-related pre-processing steps. Specifically, we made these changes: + +- Fix a bug in the image file path in meta data. +- Convert calibration matrices from double to float to avoid type mismatch in further operations. +- Add instructions in the documents on preparing image data. + +Please refer to the SUNRGBD [README.md](https://github.com/open-mmlab/mmdetection3d/blob/master/data/sunrgbd/README.md/) for more details. + +## 0.6.0 + +### VoteNet and H3DNet model structure update + +In MMDetection 0.6.0, we updated the model structures of VoteNet and H3DNet, therefore model checkpoints generated by MMDetection \< 0.6.0 should be first converted to a format compatible with the latest structures via [convert_votenet_checkpoints.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/model_converters/convert_votenet_checkpoints.py) and [convert_h3dnet_checkpoints.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/model_converters/convert_h3dnet_checkpoints.py) . For more details, please refer to the VoteNet [README.md](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/votenet/README.md/) and H3DNet [README.md](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/h3dnet/README.md/). diff --git a/docs/en/notes/contribution_guides.md b/docs/en/notes/contribution_guides.md new file mode 100755 index 0000000..e201b6c --- /dev/null +++ b/docs/en/notes/contribution_guides.md @@ -0,0 +1,139 @@ +# Contribution Guide + +OpenMMLab welcomes everyone who is interested in contributing to our projects and accepts contribution in the form of PR. + +## What is PR + +`PR` is the abbreviation of `Pull Request`. Here's the definition of `PR` in the [official document](https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests) of Github. + +``` +Pull requests let you tell others about changes you have pushed to a branch in a repository on GitHub. Once a pull request is opened, you can discuss and review the potential changes with collaborators and add follow-up commits before your changes are merged into the base branch. +``` + +## Basic Workflow + +1. Get the most recent codebase + +2. Checkout a new branch from `dev-1.x` or `dev` branch, depending on the version of the codebase you want to contribute to. The main differences between `dev-1.x` and `dev` is that `dev-1.x` depends on MMEngine additionally and it's the main branch we maintains. We strongly recommend you pull request based on more advanced `dev-1.x` branch. + +3. Commit your changes ([Don't forget to use pre-commit hooks!](#3-commit-your-changes)) + +4. Push your changes and create a PR + +5. Discuss and review your code + +6. Merge your branch to `dev-1.x` / `dev` branch + +## Procedures in detail + +### 1. Get the most recent codebase + +- When you work on your first PR + + Fork the OpenMMLab repository: click the **fork** button at the top right corner of Github page + ![avatar](https://user-images.githubusercontent.com/34888372/224920532-dc11f696-1175-436a-8c0f-1966f5ca33d1.png) + + Clone forked repository to local + + ```bash + git clone git@github.com:XXX/mmdetection3d.git + ``` + + Add source repository to upstream + + ```bash + git remote add upstream git@github.com:open-mmlab/mmdetection3d + ``` + +- After your first PR + + Checkout the latest branch of the local repository and pull the latest branch of the source repository. Here we assume that you are working on the `dev-1.x` branch. + + ```bash + git checkout dev-1.x + git pull upstream dev-1.x + ``` + +### 2. Checkout a new branch from the `dev-1.x` / `dev` branch + +```bash +git checkout -b branchname +``` + +```{tip} +To make commit history clear, we strongly recommend you checkout the `dev-1.x` branch before creating a new branch. +``` + +### 3. Commit your changes + +- If you are a first-time contributor, please install and initialize pre-commit hooks from the repository root directory first. + + ```bash + pip install -U pre-commit + pre-commit install + ``` + +- Commit your changes as usual. Pre-commit hooks will be triggered to stylize your code before each commit. + + ```bash + # coding + git add [files] + git commit -m 'messages' + ``` + + ```{note} + Sometimes your code may be changed by pre-commit hooks. In this case, please remember to re-stage the modified files and commit again. + ``` + +### 4. Push your changes to the forked repository and create a PR + +- Push the branch to your forked remote repository + + ```bash + git push origin branchname + ``` + +- Create a PR + ![avatar](https://user-images.githubusercontent.com/34888372/224922548-69455db9-68d1-4d92-a007-afcd2814b1c1.png) + +- Revise PR message template to describe your motivation and modifications made in this PR. You can also link the related issue to the PR manually in the PR message (For more information, checkout the [official guidance](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue)). + +- Specifically, if you are contributing to `dev-1.x`, you will have to change the base branch of the PR to `dev-1.x` in the PR page, since the default base branch is `master`. + + ![avatar](https://user-images.githubusercontent.com/34888372/224923009-1d611a30-0bfc-4fe5-93a2-96cc88a18886.png) + +- You can also ask a specific person to review the changes you've proposed. + +### 5. Discuss and review your code + +- Modify your codes according to reviewers' suggestions and then push your changes. + +### 6. Merge your branch to the `dev-1.x` / `dev` branch and delete the branch + +- After the PR is merged by the maintainer, you can delete the branch you created in your forked repository. + + ```bash + git branch -d branchname # delete local branch + git push origin --delete branchname # delete remote branch + ``` + +## PR Specs + +1. Use [pre-commit](https://pre-commit.com) hook to avoid issues of code style + +2. One short-time branch should be matched with only one PR + +3. Accomplish a detailed change in one PR. Avoid large PR + + - Bad: Support Faster R-CNN + - Acceptable: Add a box head to Faster R-CNN + - Good: Add a parameter to box head to support custom conv-layer number + +4. Provide clear and significant commit message + +5. Provide clear and meaningful PR description + + - Task name should be clarified in title. The general format is: \[Prefix\] Short description of the PR (Suffix) + - Prefix: add new feature \[Feature\], fix bug \[Fix\], related to documents \[Docs\], in developing \[WIP\] (which will not be reviewed temporarily) + - Introduce main changes, results and influences on other modules in short description + - Associate related issues and pull requests with a milestone diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md new file mode 100755 index 0000000..d1bc10e --- /dev/null +++ b/docs/en/notes/faq.md @@ -0,0 +1,58 @@ +# FAQ + +We list some potential troubles encountered by users and developers, along with their corresponding solutions. Feel free to enrich the list if you find any frequent issues and contribute your solutions to solve them. If you have any trouble with environment configuration, model training, etc, please create an issue using the [provided templates](https://github.com/open-mmlab/mmdetection3d/blob/master/.github/ISSUE_TEMPLATE/error-report.md) and fill in all required information in the template. + +## MMEngine/MMCV/MMDet/MMDet3D Installation + +- Compatibility issue between MMEngine, MMCV, MMDetection and MMDetection3D; "ConvWS is already registered in conv layer"; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." + +- The required versions of MMEngine, MMCV and MMDetection for different versions of MMDetection3D are as below. Please install the correct version of MMEngine, MMCV and MMDetection to avoid installation issues. + + | MMDetection3D version | MMEngine version | MMCV version | MMDetection version | + | --------------------- | :----------------------: | :---------------------: | :----------------------: | + | dev-1.x | mmengine>=0.7.1, \<1.0.0 | mmcv>=2.0.0rc4, \<2.1.0 | mmdet>=3.0.0, \<3.1.0 | + | main | mmengine>=0.7.1, \<1.0.0 | mmcv>=2.0.0rc4, \<2.1.0 | mmdet>=3.0.0, \<3.1.0 | + | v1.1.0rc3 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc3, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | + | v1.1.0rc2 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc3, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | + | v1.1.0rc1 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | + | v1.1.0rc0 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | + + **Note:** If you want to install mmdet3d-v1.0.0rcx, the compatible MMDetection, MMSegmentation and MMCV versions table can be found at [here](https://mmdetection3d.readthedocs.io/en/latest/faq.html#mmcv-mmdet-mmdet3d-installation). Please choose the correct version of MMCV, MMDetection and MMSegmentation to avoid installation issues. + +- If you faced the error shown below when importing open3d: + + `OSError: /lib/x86_64-linux-gnu/libm.so.6: version 'GLIBC_2.27' not found` + + please downgrade open3d to 0.9.0.0, because the latest open3d needs the support of file 'GLIBC_2.27', which only exists in Ubuntu 18.04, not in Ubuntu 16.04. + +- If you faced the error when importing pycocotools, this is because nuscenes-devkit installs pycocotools but mmdet relies on mmpycocotools. The current workaround is as below. We will migrate to use pycocotools in the future. + + ```shell + pip uninstall pycocotools mmpycocotools + pip install mmpycocotools + ``` + + **NOTE**: We have migrated to use pycocotools in mmdet3d >= 0.13.0. + +- If you face the error shown below when importing pycocotools: + + `ValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject` + + please downgrade pycocotools to 2.0.1 because of the incompatibility between the newest pycocotools and numpy \< 1.20.0. Or you can compile and install the latest pycocotools from source as below: + + `pip install -e "git+https://github.com/cocodataset/cocoapi#egg=pycocotools&subdirectory=PythonAPI"` + + or + + `pip install -e "git+https://github.com/ppwwyyxx/cocoapi#egg=pycocotools&subdirectory=PythonAPI"` + +- If you face some errors about numba in cuda-9.0 environment, you should check the version of numba. In cuda-9.0 environment, the high version of numba is not supported and we suggest you could install numba==0.53.0. + +## How to annotate point cloud? + +MMDetection3D does not support point cloud annotation. Some open-source annotation tool are offered for reference: + +- [SUSTechPOINTS](https://github.com/naurril/SUSTechPOINTS) +- [LATTE](https://github.com/bernwang/latte) + +Besides, we improved [LATTE](https://github.com/bernwang/latte) for better use. More details can be found [here](https://arxiv.org/abs/2011.10174). diff --git a/docs/en/notes/index.rst b/docs/en/notes/index.rst new file mode 100755 index 0000000..66a907a --- /dev/null +++ b/docs/en/notes/index.rst @@ -0,0 +1,8 @@ +.. toctree:: + :maxdepth: 1 + + benchmarks.md + changelog_v1.0.x.md + changelog.md + compatibility.md + faq.md diff --git a/docs/en/stat.py b/docs/en/stat.py new file mode 100755 index 0000000..b5f10a8 --- /dev/null +++ b/docs/en/stat.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +import functools as func +import glob +import re +from os import path as osp + +import numpy as np + +url_prefix = 'https://github.com/open-mmlab/mmdetection3d/blob/master/' + +files = sorted(glob.glob('../configs/*/README.md')) + +stats = [] +titles = [] +num_ckpts = 0 + +for f in files: + url = osp.dirname(f.replace('../', url_prefix)) + + with open(f, 'r') as content_file: + content = content_file.read() + + title = content.split('\n')[0].replace('#', '').strip() + ckpts = set(x.lower().strip() + for x in re.findall(r'https?://download.*\.pth', content) + if 'mmdetection3d' in x) + if len(ckpts) == 0: + continue + + _papertype = [x for x in re.findall(r'', content)] + assert len(_papertype) > 0 + papertype = _papertype[0] + + paper = set([(papertype, title)]) + + titles.append(title) + num_ckpts += len(ckpts) + statsmsg = f""" +\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) +""" + stats.append((paper, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) +msglist = '\n'.join(x for _, _, x in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +\n## Model Zoo Statistics + +* Number of papers: {len(set(titles))} +{countstr} + +* Number of checkpoints: {num_ckpts} +{msglist} +""" + +with open('model_zoo.md', 'a') as f: + f.write(modelzoo) diff --git a/docs/en/switch_language.md b/docs/en/switch_language.md new file mode 100755 index 0000000..d33d080 --- /dev/null +++ b/docs/en/switch_language.md @@ -0,0 +1,3 @@ +## English + +## 简体中文 diff --git a/docs/en/user_guides/2_new_data_model.md b/docs/en/user_guides/2_new_data_model.md new file mode 100755 index 0000000..15e847f --- /dev/null +++ b/docs/en/user_guides/2_new_data_model.md @@ -0,0 +1,105 @@ +# 2: Train with customized datasets + +In this note, you will know how to train and test predefined models with customized datasets. We use the Waymo dataset as an example to describe the whole process. + +The basic steps are as below: + +1. Prepare the customized dataset +2. Prepare a config +3. Train, test, inference models on the customized dataset. + +## Prepare the customized dataset + +There are three ways to support a new dataset in MMDetection3D: + +1. reorganize the dataset into existing format. +2. reorganize the dataset into a standard format. +3. implement a new dataset. + +Usually we recommend to use the first two methods which are usually easier than the third. + +In this note, we give an example for converting the data into KITTI format, you can refer to this to reorganize your dataset into kitti format. About the standard format dataset, and you can refer to [customize_dataset.md](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docs/en/advanced_guides/customize_dataset.md). + +**Note**: We take Waymo as the example here considering its format is totally different from other existing formats. For other datasets using similar methods to organize data, like Lyft compared to nuScenes, it would be easier to directly implement the new data converter (for the second approach above) instead of converting it to another format (for the first approach above). + +### KITTI dataset format + +Firstly, the raw data for 3D object detection from KITTI are typically organized as follows, where `ImageSets` contains split files indicating which files belong to training/validation/testing set, `calib` contains calibration information files, `image_2` and `velodyne` include image data and point cloud data, and `label_2` includes label files for 3D detection. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── kitti +│ │ ├── ImageSets +│ │ ├── testing +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── velodyne +│ │ ├── training +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── label_2 +│ │ │ ├── velodyne +``` + +Specific annotation format is described in the official object development [kit](https://s3.eu-central-1.amazonaws.com/avg-kitti/devkit_object.zip). For example, it consists of the following labels: + +``` +#Values Name Description +---------------------------------------------------------------------------- + 1 type Describes the type of object: 'Car', 'Van', 'Truck', + 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', + 'Misc' or 'DontCare' + 1 truncated Float from 0 (non-truncated) to 1 (truncated), where + truncated refers to the object leaving image boundaries + 1 occluded Integer (0,1,2,3) indicating occlusion state: + 0 = fully visible, 1 = partly occluded + 2 = largely occluded, 3 = unknown + 1 alpha Observation angle of object, ranging [-pi..pi] + 4 bbox 2D bounding box of object in the image (0-based index): + contains left, top, right, bottom pixel coordinates + 3 dimensions 3D object dimensions: height, width, length (in meters) + 3 location 3D object location x,y,z in camera coordinates (in meters) + 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi] + 1 score Only for results: Float, indicating confidence in + detection, needed for p/r curves, higher is better. +``` + +Assume we use the Waymo dataset. + +After downloading the data, we need to implement a function to convert both the input data and annotation format into the KITTI style. Then we can implement `WaymoDataset` inherited from `KittiDataset` to load the data and perform training, and implement `WaymoMetric` inherited from `KittiMetric` for evaluation. + +Specifically, we implement a waymo [converter](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/waymo_converter.py) to convert Waymo data into KITTI format and a waymo dataset [class](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/datasets/waymo_dataset.py) to process it, in addition need to add a waymo [metric](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/evaluation/metrics/waymo_metric.py) to evaluate results. Because we preprocess the raw data and reorganize it like KITTI, the dataset class could be implemented more easily by inheriting from KittiDataset. Regarding the dataset evaluation metric, because Waymo has its own evaluation approach, we need further implement a new Waymo metric; more about the metric could refer to [metric_and_evaluator.md](https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/metric_and_evaluator.md). Afterward, users can successfully convert the data format and use `WaymoDataset` to train and evaluate the model by `WaymoMetric`. + +For more details about the intermediate results of preprocessing of Waymo dataset, please refer to its [waymo_det.md](https://mmdetection3d.readthedocs.io/en/latest/datasets/waymo_det.html). + +## Prepare a config + +The second step is to prepare configs such that the dataset could be successfully loaded. In addition, adjusting hyperparameters is usually necessary to obtain decent performance in 3D detection. + +Suppose we would like to train PointPillars on Waymo to achieve 3D detection for 3 classes, vehicle, cyclist and pedestrian, we need to prepare dataset config like [this](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/datasets/waymoD5-3d-3class.py), model config like [this](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/models/pointpillars_hv_secfpn_waymo.py) and combine them like [this](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py), compared to KITTI [dataset config](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/datasets/kitti-3d-3class.py), [model config](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/models/pointpillars_hv_secfpn_kitti.py) and [overall](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py). + +## Train a new model + +To train a model with the new config, you can simply run + +```shell +python tools/train.py configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py +``` + +For more detailed usages, please refer to the [Case 1](https://mmdetection3d.readthedocs.io/en/latest/1_exist_data_model.html). + +## Test and inference + +To test the trained model, you can simply run + +```shell +python tools/test.py configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py work_dirs/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class/latest.pth +``` + +**Note**: To use Waymo evaluation protocol, you need to follow the [tutorial](https://mmdetection3d.readthedocs.io/en/latest/datasets/waymo_det.html) and prepare files related to metrics computation as official instructions. + +For more detailed usages for test and inference, please refer to the [Case 1](https://mmdetection3d.readthedocs.io/en/latest/1_exist_data_model.html). diff --git a/docs/en/user_guides/backends_support.md b/docs/en/user_guides/backends_support.md new file mode 100755 index 0000000..838fede --- /dev/null +++ b/docs/en/user_guides/backends_support.md @@ -0,0 +1,154 @@ +# Backends Support + +We support different file client backends: Disk, Ceph and LMDB, etc. Here is an example of how to modify configs for Ceph-based data loading and saving. + +## Load data and annotations from Ceph + +We support loading data and generated annotation info files (pkl and json) from Ceph: + +```python +# set file client backends as Ceph +backend_args = dict( + backend='petrel', + path_mapping=dict({ + './data/nuscenes/': + 's3://openmmlab/datasets/detection3d/nuscenes/', # replace the path with your data path on Ceph + 'data/nuscenes/': + 's3://openmmlab/datasets/detection3d/nuscenes/' # replace the path with your data path on Ceph + })) + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + sample_groups=dict(Car=15), + classes=class_names, + # set file client for points loader to load training data + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + # set file client for data base sampler to load db info file + backend_args=backend_args) + +train_pipeline = [ + # set file client for loading training data + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, backend_args=backend_args), + # set file client for loading training data annotations + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, backend_args=backend_args), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[0.25, 0.25, 0.25], + global_rot_range=[0.0, 0.0], + rot_range=[-0.15707963267, 0.15707963267]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + # set file client for loading validation/testing data + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] + +data = dict( + # set file client for loading training info files (.pkl) + train=dict( + type='RepeatDataset', + times=2, + dataset=dict(pipeline=train_pipeline, classes=class_names, backend_args=backend_args)), + # set file client for loading validation info files (.pkl) + val=dict(pipeline=test_pipeline, classes=class_names,backend_args=backend_args), + # set file client for loading testing info files (.pkl) + test=dict(pipeline=test_pipeline, classes=class_names, backend_args=backend_args)) +``` + +## Load pretrained model from Ceph + +```python +model = dict( + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch='regnetx_1.6gf', + init_cfg=dict( + type='Pretrained', checkpoint='s3://openmmlab/checkpoints/mmdetection3d/regnetx_1.6gf'), # replace the path with your pretrained model path on Ceph + ... +``` + +## Load checkpoint from Ceph + +```python +# replace the path with your checkpoint path on Ceph +load_from = 's3://openmmlab/checkpoints/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20200620_230614-77663cd6.pth.pth' +resume_from = None +workflow = [('train', 1)] +``` + +## Save checkpoint into Ceph + +```python +# checkpoint saving +# replace the path with your checkpoint saving path on Ceph +checkpoint_config = dict(interval=1, max_keep_ckpts=2, out_dir='s3://openmmlab/mmdetection3d') +``` + +## EvalHook saves the best checkpoint into Ceph + +```python +# replace the path with your checkpoint saving path on Ceph +evaluation = dict(interval=1, save_best='bbox', out_dir='s3://openmmlab/mmdetection3d') +``` + +## Save the training log into Ceph + +The training log will be backed up to the specified Ceph path after training. + +```python +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', out_dir='s3://openmmlab/mmdetection3d'), + ]) +``` + +You can also delete the local training log after backing up to the specified Ceph path by setting `keep_local = False`. + +```python +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', out_dir='s3://openmmlab/mmdetection3d', keep_local=False), + ]) +``` diff --git a/docs/en/user_guides/config.md b/docs/en/user_guides/config.md new file mode 100755 index 0000000..2d4358e --- /dev/null +++ b/docs/en/user_guides/config.md @@ -0,0 +1,573 @@ +# Learn about Configs + +MMDetection3D and other OpenMMLab repositories use [MMEngine's config system](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html). It has a modular and inheritance design, which is convenient to conduct various experiments. + +## Config file content + +MMDetection3D uses a modular design, all modules with different functions can be configured through the config. Taking PointPillars as an example, we will introduce each field in the config according to different function modules. + +### Model config + +In MMDetection3D's config, we use `model` to setup detection algorithm components. In addition to neural network components such as `voxel_encoder`, `backbone` etc, it also requires `data_preprocessor`, `train_cfg`, and `test_cfg`. `data_preprocessor` is responsible for processing a batch of data output by dataloader. `train_cfg` and `test_cfg` in the model config are training and testing hyperparameters of the components. + +```python +model = dict( + type='VoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=32, + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1], + voxel_size=[0.16, 0.16, 4], + max_voxels=(16000, 40000))), + voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=4, + feat_channels=[64], + with_distance=False, + voxel_size=[0.16, 0.16, 4], + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1]), + middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]), + backbone=dict( + type='SECOND', + in_channels=64, + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + assign_per_class=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[0, -39.68, -0.6, 69.12, 39.68, -0.6], + [0, -39.68, -0.6, 69.12, 39.68, -0.6], + [0, -39.68, -1.78, 69.12, 39.68, -1.78]], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', + beta=0.1111111111111111, + loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + train_cfg=dict( + assigner=[ + dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1) + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) +``` + +### Dataset and evaluator config + +[Dataloaders](https://pytorch.org/docs/stable/data.html?highlight=data%20loader#torch.utils.data.DataLoader) are required for the training, validation, and testing of the [runner](https://mmengine.readthedocs.io/en/latest/tutorials/runner.html). Dataset and data pipeline need to be set to build the dataloader. Due to the complexity of this part, we use intermediate variables to simplify the writing of dataloader configs. + +```python +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] +input_modality = dict(use_lidar=True, use_camera=False) +metainfo = dict(classes=class_names) + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=15, Cyclist=15), + points_loader=dict( + type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4)) + +train_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler, use_ground_plane=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_labels_3d', 'gt_bboxes_3d']) +] +test_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +eval_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=6, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + box_type_3d='LiDAR'))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +``` + +[Evaluators](https://mmengine.readthedocs.io/en/latest/tutorials/evaluation.html) are used to compute the metrics of the trained model on the validation and testing datasets. The config of evaluators consists of one or a list of metric configs: + +```python +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox') +test_evaluator = val_evaluator +``` + +Since the test dataset has no annotation files, the test_dataloader and test_evaluator config in MMDetection3D are generally equal to the val's. If you want to save the detection results on the test dataset, you can write the config like this: + +```python +# inference on test dataset and +# format the output results for submission. +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='testing/velodyne_reduced'), + ann_file='kitti_infos_test.pkl', + load_eval_anns=False, + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +test_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_test.pkl', + metric='bbox', + format_only=True, + submission_prefix='results/kitti-3class/kitti_results') +``` + +### Training and testing config + +MMEngine's runner uses Loop to control the training, validation, and testing processes. +Users can set the maximum training epochs and validation intervals with these fields: + +```python +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=80, + val_interval=2) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +``` + +### Optimization config + +`optim_wrapper` is the field to configure optimization-related settings. The optimizer wrapper not only provides the functions of the optimizer, but also supports functions such as gradient clipping, mixed precision training, etc. Find more in [optimizer wrapper tutorial](https://mmengine.readthedocs.io/en/latest/tutorials/optim_wrapper.html). + +```python +optim_wrapper = dict( # Optimizer wrapper config + type='OptimWrapper', # Optimizer wrapper type, switch to AmpOptimWrapper to enable mixed precision training. + optimizer=dict( # Optimizer config. Support all kinds of optimizers in PyTorch. Refer to https://pytorch.org/docs/stable/optim.html#algorithms + type='AdamW', lr=0.001, betas=(0.95, 0.99), weight_decay=0.01), + clip_grad=dict(max_norm=35, norm_type=2)) # Gradient clip option. Set None to disable gradient clip. Find usage in https://mmengine.readthedocs.io/en/latest/tutorials/optim_wrapper.html +``` + +`param_scheduler` is a field that configures methods of adjusting optimization hyperparameters such as learning rate and momentum. Users can combine multiple schedulers to create a desired parameter adjustment strategy. Find more in [parameter scheduler tutorial](https://mmengine.readthedocs.io/en/latest/tutorials/param_scheduler.html) and [parameter scheduler API documents](https://mmengine.readthedocs.io/en/latest/api/optim.html#scheduler). + +```python +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=32, + eta_min=0.01, + begin=0, + end=32, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=48, + eta_min=1.0000000000000001e-07, + begin=32, + end=80, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=32, + eta_min=0.8947368421052632, + begin=0, + end=32, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=48, + eta_min=1, + begin=32, + end=80, + by_epoch=True, + convert_to_iter_based=True), +] +``` + +### Hook config + +Users can attach Hooks to training, validation, and testing loops to insert some operations during running. There are two different hook fields, one is `default_hooks` and the other is `custom_hooks`. + +`default_hooks` is a dict of hook configs, and they are the hooks must be required at the runtime. They have default priority which should not be modified. If not set, runner will use the default values. To disable a default hook, users can set its config to `None`. + +```python +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=-1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='Det3DVisualizationHook')) +``` + +`custom_hooks` is a list of all other hook configs. Users can develop their own hooks and insert them in this field. + +```python +custom_hooks = [] +``` + +### Runtime config + +```python +default_scope = 'mmdet3d' # The default registry scope to find modules. Refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/registry.html + +env_cfg = dict( + cudnn_benchmark=False, # Whether to enable cudnn benchmark + mp_cfg=dict( # Multi-processing config + mp_start_method='fork', # Use fork to start multi-processing threads. 'fork' usually faster than 'spawn' but maybe unsafe. See discussion in https://github.com/pytorch/pytorch/issues/1355 + opencv_num_threads=0), # Disable opencv multi-threads to avoid system being overloaded + dist_cfg=dict(backend='nccl')) # Distribution configs + +vis_backends = [dict(type='LocalVisBackend')] # Visualization backends. Refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/visualization.html +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +log_processor = dict( + type='LogProcessor', # Log processor to process runtime logs + window_size=50, # Smooth interval of log values + by_epoch=True) # Whether to format logs with epoch type. Should be consistent with the train loop's type. + +log_level = 'INFO' # The level of logging. +load_from = None # Load model checkpoint as a pre-trained model from a given path. This will not resume training. +resume = False # Whether to resume from the checkpoint defined in `load_from`. If `load_from` is None, it will resume the latest checkpoint in the `work_dir`. +``` + +## Config file inheritance + +There are 4 basic component types under `configs/_base_`, dataset, model, schedule, default_runtime. +Many methods could be easily constructed with one of these models like SECOND, PointPillars, PartA2, VoteNet. +The configs that are composed of components from `_base_` are called _primitive_. + +For all configs under the same folder, it is recommended to have only **one** _primitive_ config. All other configs should inherit from the _primitive_ config. In this way, the maximum of inheritance level is 3. + +For easy understanding, we recommend contributors to inherit from existing methods. +For example, if some modification is made based on PointPillars, users may first inherit the basic PointPillars structure by specifying `_base_ = '../pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py'`, then modify the necessary fields in the config files. + +If you are building an entirely new method that does not share the structure with any of the existing methods, you may create a folder `xxx_rcnn` under `configs`. + +Please refer to [MMEngine config tutorial](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html) for detailed documentation. + +By setting the `_base_` field, we can set which files the current configuration file inherits from. + +When `_base_` is a string of a file path, it means inheriting the contents from one config file. + +```python +_base_ = './pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py' +``` + +When `_base_` is a list of multiple file paths, it means inheriting from multiple files. + +```python +_base_ = [ + '../_base_/models/pointpillars_hv_secfpn_kitti.py', + '../_base_/datasets/kitti-3d-3class.py', + '../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py' +] +``` + +If you wish to inspect the config file, you may run `python tools/misc/print_config.py /PATH/TO/CONFIG` to see the complete config. + +### Ignore some fields in the base configs + +Sometimes, you may set `_delete_=True` to ignore some of the fields in base configs. +You may refer to [MMEngine config tutorial](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html) for a simple illustration. + +In MMDetection3D, for example, to change the neck of PointPillars with the following config: + +```python +model = dict( + type='MVXFasterRCNN', + data_preprocessor=dict(voxel_layer=dict(...)), + pts_voxel_encoder=dict(...), + pts_middle_encoder=dict(...), + pts_backbone=dict(...), + pts_neck=dict( + type='FPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + act_cfg=dict(type='ReLU'), + in_channels=[64, 128, 256], + out_channels=256, + start_level=0, + num_outs=3), + pts_bbox_head=dict(...)) +``` + +`FPN` and `SECONDFPN` use different keywords to construct: + +```python +_base_ = '../_base_/models/pointpillars_hv_fpn_nus.py' +model = dict( + pts_neck=dict( + _delete_=True, + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict(...)) +``` + +The `_delete_=True` would replace all old keys in `pts_neck` field with new keys. + +### Use intermediate variables in configs + +Some intermediate variables are used in the configs files, like `train_pipeline`/`test_pipeline` in datasets. +It's worth noting that when modifying intermediate variables in the children configs, user needs to pass the intermediate variables into corresponding fields again. +For example, we would like to use a multi-scale strategy to train and test a PointPillars, `train_pipeline`/`test_pipeline` are intermediate variables we would like to modify. + +```python +_base_ = './nus-3d.py' +train_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_labels_3d', 'gt_bboxes_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=[0.95, 1.0, 1.05], + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +``` + +We first define the new `train_pipeline`/`test_pipeline` and pass them into dataloader fields. + +### Reuse variables in \_base\_ file + +If the users want to reuse the variables in the base file, they can get a copy of the corresponding variable by using `{{_base_.xxx}}`. E.g: + +```python +_base_ = './pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py' + +a = {{_base_.model}} # variable `a` is equal to the `model` defined in `_base_` +``` + +## Modify config through script arguments + +When submitting jobs using `tools/train.py` or `tools/test.py`, you may specify `--cfg-options` to in-place modify the config. + +- Update config keys of dict chains + + The config options can be specified following the order of the dict keys in the original config. + For example, `--cfg-options model.backbone.norm_eval=False` changes the all BN modules in model backbones to `train` mode. + +- Update keys inside a list of configs + + Some config dicts are composed as a list in your config. For example, the training pipeline `train_dataloader.dataset.pipeline` is normally a list + e.g. `[dict(type='LoadPointsFromFile'), ...]`. If you want to change `'LoadPointsFromFile'` to `'LoadPointsFromDict'` in the pipeline, + you may specify `--cfg-options data.train.pipeline.0.type=LoadPointsFromDict`. + +- Update values of list/tuple + + If the value to be updated is a list or a tuple. For example, the config file normally sets `model.data_preprocessor.mean=[123.675, 116.28, 103.53]`. If you want to + change the mean values, you may specify `--cfg-options model.data_preprocessor.mean="[127,127,127]"`. Note that the quotation mark `"` is necessary to + support list/tuple data types, and that **NO** white space is allowed inside the quotation marks in the specified value. + +## Config Name Style + +We follow the below style to name config files. Contributors are advised to follow the same style. + +``` +{algorithm name}_{model component names [component1]_[component2]_[...]}_{training settings}_{training dataset information}_{testing dataset information}.py +``` + +The file name is divided to five parts. All parts and components are connected with `_` and words of each part or component should be connected with `-`. + +- `{algorithm name}`: The name of the algorithm. It can be a detector name such as `pointpillars`, `fcos3d`, etc. +- `{model component names}`: Names of the components used in the algorithm such as voxel_encoder, backbone, neck, etc. For example, `second_secfpn_head-dcn-circlenms` means using SECOND's SparseEncoder, SECONDFPN and a detection head with DCN and circle NMS. +- `{training settings}`: Information of training settings such as batch size, augmentations, loss trick, scheduler, and epochs/iterations. For example: `8xb4-tta-cyclic-20e` means using 8-gpus x 4-samples-per-gpu, test time augmentation, cyclic annealing learning rate, and train 20 epochs. + Some abbreviations: + - `{gpu x batch_per_gpu}`: GPUs and samples per GPU. `bN` indicates N batch size per GPU. E.g. `4xb4` is the short term of 4-GPUs x 4-samples-per-GPU. + - `{schedule}`: training schedule, options are `schedule-2x`, `schedule-3x`, `cyclic-20e`, etc. + `schedule-2x` and `schedule-3x` mean 24 epochs and 36 epochs respectively. + `cyclic-20e` means 20 epochs respectively. +- `{training dataset information}`: Training dataset names like `kitti-3d-3class`, `nus-3d`, `s3dis-seg`, `scannet-seg`, `waymoD5-3d-car`. Here `3d` means dataset used for 3D object detection, and `seg` means dataset used for point cloud segmentation. +- `{testing dataset information}` (optional): Testing dataset name for models trained on one dataset but tested on another. If not mentioned, it means the model was trained and tested on the same dataset type. diff --git a/docs/en/user_guides/coord_sys_tutorial.md b/docs/en/user_guides/coord_sys_tutorial.md new file mode 100755 index 0000000..cdfc313 --- /dev/null +++ b/docs/en/user_guides/coord_sys_tutorial.md @@ -0,0 +1,245 @@ +# Coordinate System + +## Overview + +MMDetection3D uses three different coordinate systems. The existence of different coordinate systems in the society of 3D object detection is necessary, because for various 3D data collection devices, such as LiDAR, depth camera, etc., the coordinate systems are not consistent, and different 3D datasets also follow different data formats. Early works, such as SECOND, VoteNet, convert the raw data to another format, forming conventions that some later works also follow, making the conversion between coordinate systems even more complicated. + +Despite the variety of datasets and equipment, by summarizing the line of works on 3D object detection we can roughly categorize coordinate systems into three: + +- Camera coordinate system -- the coordinate system of most cameras, in which the positive direction of the y-axis points to the ground, the positive direction of the x-axis points to the right, and the positive direction of the z-axis points to the front. + + ``` + up z front + | ^ + | / + | / + | / + |/ + left ------ 0 ------> x right + | + | + | + | + v + y down + ``` + +- LiDAR coordinate system -- the coordinate system of many LiDARs, in which the negative direction of the z-axis points to the ground, the positive direction of the x-axis points to the front, and the positive direction of the y-axis points to the left. + + ``` + z up x front + ^ ^ + | / + | / + | / + |/ + y left <------ 0 ------ right + ``` + +- Depth coordinate system -- the coordinate system used by VoteNet, H3DNet, etc., in which the negative direction of the z-axis points to the ground, the positive direction of the x-axis points to the right, and the positive direction of the y-axis points to the front. + + ``` + z up y front + ^ ^ + | / + | / + | / + |/ + left ------ 0 ------> x right + ``` + +The definition of coordinate systems in this tutorial is actually **more than just defining the three axes**. For a box in the form of `` $$`(x, y, z, dx, dy, dz, r)`$$ ``, our coordinate systems also define how to interpret the box dimensions `` $$`(dx, dy, dz)`$$ `` and the yaw angle `` $$`r`$$ ``. + +The illustration of the three coordinate systems is shown below: + +![](https://raw.githubusercontent.com/open-mmlab/mmdetection3d/master/resources/coord_sys_all.png) + +The three figures above are the 3D coordinate systems while the three figures below are the bird's eye view. + +We will stick to the three coordinate systems defined in this tutorial in the future. + +## Definition of the yaw angle + +Please refer to [wikipedia](https://en.wikipedia.org/wiki/Euler_angles#Tait%E2%80%93Bryan_angles) for the standard definition of the yaw angle. In object detection, we choose an axis as the gravity axis, and a reference direction on the plane `` $$`\Pi`$$ `` perpendicular to the gravity axis, then the reference direction has a yaw angle of 0, and other directions on `` $$`\Pi`$$ `` have non-zero yaw angles depending on its angle with the reference direction. + +Currently, for all supported datasets, annotations do not include pitch angle and roll angle, which means we need only consider the yaw angle when predicting boxes and calculating overlap between boxes. + +In MMDetection3D, all three coordinate systems are right-handed coordinate systems, which means the ascending direction of the yaw angle is counter-clockwise if viewed from the negative direction of the gravity axis (the axis is pointing at one's eyes). + +The figure below shows that, in this right-handed coordinate system, if we set the positive direction of the x-axis as a reference direction, then the positive direction of the y-axis has a yaw angle of `` $$`\frac{\pi}{2}`$$ ``. + +``` + z up y front (yaw=0.5*pi) + ^ ^ + | / + | / + | / + |/ +left (yaw=pi) ------ 0 ------> x right (yaw=0) +``` + +For a box, the value of its yaw angle equals its direction minus a reference direction. In all three coordinate systems in MMDetection3D, the reference direction is always the positive direction of the x-axis, while the direction of a box is defined to be parallel with the x-axis if its yaw angle is 0. The definition of the yaw angle of a box is illustrated in the figure below. + +``` +y front + ^ box direction (yaw=0.5*pi) + /|\ ^ + | /|\ + | ____|____ + | | | | + | | | | +__|____|____|____|______\ x right + | | | | / + | | | | + | |____|____| + | +``` + +## Definition of the box dimensions + +The definition of the box dimensions cannot be disentangled with the definition of the yaw angle. In the previous section, we said that the direction of a box is defined to be parallel with the x-axis if its yaw angle is 0. Then naturally, the dimension of a box which corresponds to the x-axis should be `` $$`dx`$$ ``. However, this is not always the case in some datasets (we will address that later). + +The following figures show the meaning of the correspondence between the x-axis and `` $$`dx`$$ ``, and between the y-axis and `` $$`dy`$$ ``. + +``` +y front + ^ box direction (yaw=0.5*pi) + /|\ ^ + | /|\ + | ____|____ + | | | | + | | | | dx +__|____|____|____|______\ x right + | | | | / + | | | | + | |____|____| + | dy +``` + +Note that the box direction is always parallel with the edge `` $$`dx`$$ ``. + +``` +y front + ^ _________ + /|\ | | | + | | | | + | | | | dy + | |____|____|____\ box direction (yaw=0) + | | | | / +__|____|____|____|_________\ x right + | | | | / + | |____|____| + | dx + | +``` + +## Relation with raw coordinate systems of supported datasets + +### KITTI + +The raw annotation of KITTI is under camera coordinate system, see [get_label_anno](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/dataset_converters/kitti_data_utils.py). In MMDetection3D, to train LiDAR-based models on KITTI, the data is first converted from camera coordinate system to LiDAR coordinate system, see [get_ann_info](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/kitti_dataset.py). For training vision-based models, the data is kept in the camera coordinate system. + +In SECOND, the LiDAR coordinate system for a box is defined as follows (a bird's eye view): + +![](https://raw.githubusercontent.com/traveller59/second.pytorch/master/images/kittibox.png) + +For each box, the dimensions are `` $$`(w, l, h)`$$ ``, and the reference direction for the yaw angle is the positive direction of the y axis. For more details, refer to the [repo](https://github.com/traveller59/second.pytorch#concepts). + +Our LiDAR coordinate system has two changes: + +- The yaw angle is defined to be right-handed instead of left-handed for consistency; +- The box dimensions are `` $$`(l, w, h)`$$ `` instead of `` $$`(w, l, h)`$$ ``, since `` $$`w`$$ `` corresponds to `` $$`dy`$$ `` and `` $$`l`$$ `` corresponds to `` $$`dx`$$ `` in KITTI. + +### Waymo + +We use the KITTI-format data of Waymo dataset. Therefore, KITTI and Waymo also share the same coordinate system in our implementation. + +### NuScenes + +NuScenes provides a toolkit for evaluation, in which each box is wrapped into a `Box` instance. The coordinate system of `Box` is different from our LiDAR coordinate system in that the first two elements of the box dimension correspond to `` $$`(dy, dx)`$$ ``, or `` $$`(w, l)`$$ ``, respectively, instead of the reverse. For more details, please refer to the NuScenes [tutorial](https://github.com/open-mmlab/mmdetection3d/blob/master/docs/en/datasets/nuscenes_det.md#notes). + +Readers may refer to the [NuScenes development kit](https://github.com/nutonomy/nuscenes-devkit/tree/master/python-sdk/nuscenes/eval/detection) for the definition of a [NuScenes box](https://github.com/nutonomy/nuscenes-devkit/blob/2c6a752319f23910d5f55cc995abc547a9e54142/python-sdk/nuscenes/utils/data_classes.py#L457) and implementation of [NuScenes evaluation](https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/eval/detection/evaluate.py). + +### Lyft + +Lyft shares the same data format with NuScenes as far as coordinate system is involved. + +Please refer to the [official website](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data) for more information. + +### ScanNet + +The raw data of ScanNet is not point cloud but mesh. The sampled point cloud data is under our depth coordinate system. For ScanNet detection task, the box annotations are axis-aligned, and the yaw angle is always zero. Therefore the direction of the yaw angle in our depth coordinate system makes no difference regarding ScanNet. + +### SUN RGB-D + +The raw data of SUN RGB-D is not point cloud but RGB-D image. By back projection, we obtain the corresponding point cloud for each image, which is under our Depth coordinate system. However, the annotation is not under our system and thus needs conversion. + +For the conversion from raw annotation to annotation under our Depth coordinate system, please refer to [sunrgbd_data_utils.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/dataset_converters/sunrgbd_data_utils.py). + +### S3DIS + +S3DIS shares the same coordinate system as ScanNet in our implementation. However, S3DIS is a segmentation-task-only dataset, and thus no annotation is coordinate system sensitive. + +## Examples + +### Box conversion (between different coordinate systems) + +Take the conversion between our Camera coordinate system and LiDAR coordinate system as an example: + +First, for points and box centers, the coordinates before and after the conversion satisfy the following relationship: + +- `` $$`x_{LiDAR}=z_{camera}`$$ `` +- `` $$`y_{LiDAR}=-x_{camera}`$$ `` +- `` $$`z_{LiDAR}=-y_{camera}`$$ `` + +Then, the box dimensions before and after the conversion satisfy the following relationship: + +- `` $$`dx_{LiDAR}=dx_{camera}`$$ `` +- `` $$`dy_{LiDAR}=dz_{camera}`$$ `` +- `` $$`dz_{LiDAR}=dy_{camera}`$$ `` + +Finally, the yaw angle should also be converted: + +- `` $$`r_{LiDAR}=-\frac{\pi}{2}-r_{camera}`$$ `` + +See the code [here](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/bbox/structures/box_3d_mode.py) for more details. + +### Bird's Eye View + +The BEV of a camera coordinate system box is `` $$`(x, z, dx, dz, -r)`$$ `` if the 3D box is `` $$`(x, y, z, dx, dy, dz, r)`$$ ``. The inversion of the sign of the yaw angle is because the positive direction of the gravity axis of the Camera coordinate system points to the ground. + +See the code [here](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/bbox/structures/cam_box3d.py) for more details. + +### Rotation of boxes + +We set the rotation of all kinds of boxes to be counter-clockwise about the gravity axis. Therefore, to rotate a 3D box we first calculate the new box center, and then we add the rotation angle to the yaw angle. + +See the code [here](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/bbox/structures/cam_box3d.py) for more details. + +## Common FAQ + +#### Q1: Are the box related ops universal to all coordinate system types? + +No. For example, [RoI-Aware Pooling ops](https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/roiaware_pool3d.py) is applicable to boxes under Depth or LiDAR coordinate system only. The evaluation functions for KITTI dataset [here](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/evaluation/kitti_utils) are only applicable to boxes under Camera coordinate system since the rotation is clockwise if viewed from above. + +For each box related op, we have marked the type of boxes to which we can apply the op. + +#### Q2: In every coordinate system, do the three axes point exactly to the right, the front, and the ground, respectively? + +No. For example, in KITTI, we need a calibration matrix when converting from Camera coordinate system to LiDAR coordinate system. + +#### Q3: How does a phase difference of `` $$`2\pi`$$ `` in the yaw angle of a box affect evaluation? + +For IoU calculation, a phase difference of `` $$`2\pi`$$ `` in the yaw angle will result in the same box, thus not affecting evaluation. + +For angle prediction evaluation such as the NDS metric in NuScenes and the AOS metric in KITTI, the angle of predicted boxes will be first standardized, so the phase difference of `` $$`2\pi`$$ `` will not change the result. + +#### Q4: How does a phase difference of `` $$`\pi`$$ `` in the yaw angle of a box affect evaluation? + +For IoU calculation, a phase difference of `` $$`\pi`$$ `` in the yaw angle will result in the same box, thus not affecting evaluation. + +However, for angle prediction evaluation, this will result in the exact opposite direction. + +Just think about a car. The yaw angle is the angle between the direction of the car front and the positive direction of the x-axis. If we add `` $$`\pi`$$ `` to this angle, the car front will become the car rear. + +For categories such as barrier, the front and the rear have no difference, therefore a phase difference of `` $$`\pi`$$ `` will not affect the angle prediction score. diff --git a/docs/en/user_guides/data_pipeline.md b/docs/en/user_guides/data_pipeline.md new file mode 100755 index 0000000..01f5c61 --- /dev/null +++ b/docs/en/user_guides/data_pipeline.md @@ -0,0 +1,199 @@ +# Customize Data Pipelines + +## Design of Data pipelines + +Following typical conventions, we use `Dataset` and `DataLoader` for data loading +with multiple workers. `Dataset` returns a dict of data items corresponding +the arguments of models' forward method. +Since the data in object detection may not be the same size (point number, gt bbox size, etc.), +we introduce a new `DataContainer` type in MMCV to help collect and distribute +data of different size. +See [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) for more details. + +The data preparation pipeline and the dataset is decomposed. Usually a dataset +defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict. +A pipeline consists of a sequence of operations. Each operation takes a dict as input and also output a dict for the next transform. + +We present a classical pipeline in the following figure. The blue blocks are pipeline operations. With the pipeline going on, each operator can add new keys (marked as green) to the result dict or update the existing keys (marked as orange). + +![](../../../resources/data_pipeline.png) + +The operations are categorized into data loading, pre-processing, formatting and test-time augmentation. + +Here is an pipeline example for PointPillars. + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + pts_scale_ratio=1.0, + flip=False, + pcd_horizontal_flip=False, + pcd_vertical_flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +``` + +For each operation, we list the related dict fields that are added/updated/removed. + +### Data loading + +`LoadPointsFromFile` + +- add: points + +`LoadPointsFromMultiSweeps` + +- update: points + +`LoadAnnotations3D` + +- add: gt_bboxes_3d, gt_labels_3d, gt_bboxes, gt_labels, pts_instance_mask, pts_semantic_mask, bbox3d_fields, pts_mask_fields, pts_seg_fields + +### Pre-processing + +`GlobalRotScaleTrans` + +- add: pcd_trans, pcd_rotation, pcd_scale_factor +- update: points, \*bbox3d_fields + +`RandomFlip3D` + +- add: flip, pcd_horizontal_flip, pcd_vertical_flip +- update: points, \*bbox3d_fields + +`PointsRangeFilter` + +- update: points + +`ObjectRangeFilter` + +- update: gt_bboxes_3d, gt_labels_3d + +`ObjectNameFilter` + +- update: gt_bboxes_3d, gt_labels_3d + +`PointShuffle` + +- update: points + +`PointsRangeFilter` + +- update: points + +### Formatting + +`DefaultFormatBundle3D` + +- update: points, gt_bboxes_3d, gt_labels_3d, gt_bboxes, gt_labels + +`Collect3D` + +- add: img_meta (the keys of img_meta is specified by `meta_keys`) +- remove: all other keys except for those specified by `keys` + +### Test time augmentation + +`MultiScaleFlipAug` + +- update: scale, pcd_scale_factor, flip, flip_direction, pcd_horizontal_flip, pcd_vertical_flip with list of augmented data with these specific parameters + +## Extend and use custom pipelines + +1. Write a new pipeline in any file, e.g., `my_pipeline.py`. It takes a dict as input and return a dict. + + ```python + from mmdet.datasets import PIPELINES + + @PIPELINES.register_module() + class MyTransform: + + def __call__(self, results): + results['dummy'] = True + return results + ``` + +2. Import the new class. + + ```python + from .my_pipeline import MyTransform + ``` + +3. Use it in config files. + + ```python + train_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='MyTransform'), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + ] + ``` diff --git a/docs/en/user_guides/dataset_prepare.md b/docs/en/user_guides/dataset_prepare.md new file mode 100755 index 0000000..345d31d --- /dev/null +++ b/docs/en/user_guides/dataset_prepare.md @@ -0,0 +1,180 @@ +# Dataset Preparation + +## Before Preparation + +It is recommended to symlink the dataset root to `$MMDETECTION3D/data`. +If your folder structure is different from the following, you may need to change the corresponding paths in config files. + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── nuscenes +│ │ ├── maps +│ │ ├── samples +│ │ ├── sweeps +│ │ ├── v1.0-test +| | ├── v1.0-trainval +│ ├── kitti +│ │ ├── ImageSets +│ │ ├── testing +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── velodyne +│ │ ├── training +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── label_2 +│ │ │ ├── velodyne +│ ├── waymo +│ │ ├── waymo_format +│ │ │ ├── training +│ │ │ ├── validation +│ │ │ ├── testing +│ │ │ ├── gt.bin +│ │ ├── kitti_format +│ │ │ ├── ImageSets +│ ├── lyft +│ │ ├── v1.01-train +│ │ │ ├── v1.01-train (train_data) +│ │ │ ├── lidar (train_lidar) +│ │ │ ├── images (train_images) +│ │ │ ├── maps (train_maps) +│ │ ├── v1.01-test +│ │ │ ├── v1.01-test (test_data) +│ │ │ ├── lidar (test_lidar) +│ │ │ ├── images (test_images) +│ │ │ ├── maps (test_maps) +│ │ ├── train.txt +│ │ ├── val.txt +│ │ ├── test.txt +│ │ ├── sample_submission.csv +│ ├── s3dis +│ │ ├── meta_data +│ │ ├── Stanford3dDataset_v1.2_Aligned_Version +│ │ ├── collect_indoor3d_data.py +│ │ ├── indoor3d_util.py +│ │ ├── README.md +│ ├── scannet +│ │ ├── meta_data +│ │ ├── scans +│ │ ├── scans_test +│ │ ├── batch_load_scannet_data.py +│ │ ├── load_scannet_data.py +│ │ ├── scannet_utils.py +│ │ ├── README.md +│ ├── sunrgbd +│ │ ├── OFFICIAL_SUNRGBD +│ │ ├── matlab +│ │ ├── sunrgbd_data.py +│ │ ├── sunrgbd_utils.py +│ │ ├── README.md + +``` + +## Download and Data Preparation + +### KITTI + +Download KITTI 3D detection data [HERE](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d). Prepare KITTI data splits by running: + +```bash +mkdir ./data/kitti/ && mkdir ./data/kitti/ImageSets + +# Download data split +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/test.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/test.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/train.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/train.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/val.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/val.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/trainval.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/trainval.txt +``` + +Then generate info files by running: + +```bash +python tools/create_data.py kitti --root-path ./data/kitti --out-dir ./data/kitti --extra-tag kitti +``` + +In an environment using slurm, users may run the following command instead: + +```bash +sh tools/create_data.sh kitti +``` + +### Waymo + +Download Waymo open dataset V1.2 [HERE](https://waymo.com/open/download/) and its data split [HERE](https://drive.google.com/drive/folders/18BVuF_RYJF0NjZpt8SnfzANiakoRMf0o?usp=sharing). Then put `.tfrecord` files into corresponding folders in `data/waymo/waymo_format/` and put the data split `.txt` files into `data/waymo/kitti_format/ImageSets`. Download ground truth `.bin` file for validation set [HERE](https://console.cloud.google.com/storage/browser/waymo_open_dataset_v_1_2_0/validation/ground_truth_objects) and put it into `data/waymo/waymo_format/`. A tip is that you can use `gsutil` to download the large-scale dataset with commands. You can take this [tool](https://github.com/RalphMao/Waymo-Dataset-Tool) as an example for more details. Subsequently, prepare waymo data by running: + +```bash +python tools/create_data.py waymo --root-path ./data/waymo/ --out-dir ./data/waymo/ --workers 128 --extra-tag waymo +``` + +Note that: + +- If your local disk does not have enough space for saving converted data, you can change the `--out-dir` to anywhere else. Just remember to create folders and prepare data there in advance and link them back to `data/waymo/kitti_format` after the data conversion. + +- If you want faster evaluation on Waymo, you can download the preprocessed [metainfo](https://download.openmmlab.com/mmdetection3d/data/waymo/idx2metainfo.pkl) containing `contextname` and `timestamp` to the directory `data/waymo/waymo_format/`. Then, the dataset config is modified like the following: + + ```python + val_evaluator = dict( + type='WaymoMetric', + ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', + waymo_bin_file='./data/waymo/waymo_format/gt.bin', + data_root='./data/waymo/waymo_format', + backend_args=backend_args, + convert_kitti_format=True, + idx2metainfo='data/waymo/waymo_format/idx2metainfo.pkl' + ) + ``` + + Now, this trick is only used for LiDAR-based detection methods. + +### NuScenes + +Download nuScenes V1.0 full dataset data [HERE](https://www.nuscenes.org/download). Prepare nuscenes data by running: + +```bash +python tools/create_data.py nuscenes --root-path ./data/nuscenes --out-dir ./data/nuscenes --extra-tag nuscenes +``` + +### Lyft + +Download Lyft 3D detection data [HERE](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data). Prepare Lyft data by running: + +```bash +python tools/create_data.py lyft --root-path ./data/lyft --out-dir ./data/lyft --extra-tag lyft --version v1.01 +python tools/dataset_converters/lyft_data_fixer.py --version v1.01 --root-folder ./data/lyft +``` + +Note that we follow the original folder names for clear organization. Please rename the raw folders as shown above. Also note that the second command serves the purpose of fixing a corrupted lidar data file. Please refer to the [discussion](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/discussion/110000) for more details. + +### S3DIS, ScanNet and SUN RGB-D + +To prepare S3DIS data, please see its [README](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/data/s3dis/README.md). + +To prepare ScanNet data, please see its [README](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/data/scannet/README.md). + +To prepare SUN RGB-D data, please see its [README](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/data/sunrgbd/README.md). + +### Customized Datasets + +For using custom datasets, please refer to [Customize Datasets](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docs/en/advanced_guides/customize_dataset.md). + +### Update data infos + +If you have used v1.0.0rc1-v1.0.0rc4 mmdetection3d to create data infos before, and now you want to use the newest v1.1.0 mmdetection3d, you need to update the data infos file. + +```bash +python tools/dataset_converters/update_infos_to_v2.py --dataset ${DATA_SET} --pkl-path ${PKL_PATH} --out-dir ${OUT_DIR} +``` + +- `--dataset` : Name of dataset. +- `--pkl-path` : Specify the data infos pkl file path. +- `--out-dir` : Output direction of the data infos pkl file. + +Example: + +```bash +python tools/dataset_converters/update_infos_to_v2.py --dataset kitti --pkl-path ./data/kitti/kitti_infos_trainval.pkl --out-dir ./data/kitti +``` diff --git a/docs/en/user_guides/index.rst b/docs/en/user_guides/index.rst new file mode 100755 index 0000000..cd11405 --- /dev/null +++ b/docs/en/user_guides/index.rst @@ -0,0 +1,15 @@ +.. toctree:: + :maxdepth: 3 + + 2_new_data_model.md + backends_support.md + config.md + coord_sys_tutorial.md + data_pipeline.md + dataset_prepare.md + inference.md + index.rst + model_deployment.md + train_test.md + useful_tools.md + visualization.md diff --git a/docs/en/user_guides/inference.md b/docs/en/user_guides/inference.md new file mode 100755 index 0000000..e14888b --- /dev/null +++ b/docs/en/user_guides/inference.md @@ -0,0 +1,89 @@ +# Inference + +## Introduction + +We provide scripts for multi-modality/single-modality (LiDAR-based/vision-based), indoor/outdoor 3D detection and 3D semantic segmentation demos. The pre-trained models can be downloaded from [model zoo](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docs/en/model_zoo.md). We provide pre-processed sample data from KITTI, SUN RGB-D, nuScenes and ScanNet dataset. You can use any other data following our pre-processing steps. + +## Testing + +### 3D Detection + +#### Single-modality demo + +To test a 3D detector on point cloud data, simply run: + +```shell +python demo/pcd_demo.py ${PCD_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--score-thr ${SCORE_THR}] [--out-dir ${OUT_DIR}] [--show] +``` + +The visualization results including a point cloud and predicted 3D bounding boxes will be saved in `${OUT_DIR}/PCD_NAME`, which you can open using [MeshLab](http://www.meshlab.net/). Note that if you set the flag `--show`, the prediction result will be displayed online using [Open3D](http://www.open3d.org/). + +Example on KITTI data using [SECOND](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/second) model: + +```shell +python demo/pcd_demo.py demo/data/kitti/000008.bin configs/second/second_hv-secfpn_8xb6-80e_kitti-3d-car.py checkpoints/second_hv-secfpn_8xb6-80e_kitti-3d-car_20200620_230238-393f000c.pth +``` + +Example on SUN RGB-D data using [VoteNet](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/votenet) model: + +```shell +python demo/pcd_demo.py demo/data/sunrgbd/sunrgbd_000017.bin configs/votenet/votenet_8xb16_sunrgbd-3d.py checkpoints/votenet_8xb16_sunrgbd-3d_20200620_230238-4483c0c0.pth +``` + +Remember to convert the VoteNet checkpoint if you are using mmdetection3d version >= 0.6.0. See its [README](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/votenet/README.md/) for detailed instructions on how to convert the checkpoint. + +#### Multi-modality demo + +To test a 3D detector on multi-modality data (typically point cloud and image), simply run: + +```shell +python demo/multi_modality_demo.py ${PCD_FILE} ${IMAGE_FILE} ${ANNOTATION_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--score-thr ${SCORE_THR}] [--out-dir ${OUT_DIR}] [--show] +``` + +where the `ANNOTATION_FILE` should provide the 3D to 2D projection matrix. The visualization results including a point cloud, an image, predicted 3D bounding boxes and their projection on the image will be saved in `${OUT_DIR}/PCD_NAME`. + +Example on KITTI data using [MVX-Net](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/mvxnet) model: + +```shell +python demo/multi_modality_demo.py demo/data/kitti/000008.bin demo/data/kitti/000008.png demo/data/kitti/000008.pkl configs/mvxnet/mvx_fpn-dv-second-secfpn_8xb2-80e_kitti-3d-3class.py checkpoints/mvx_fpn-dv-second-secfpn_8xb2-80e_kitti-3d-3class_20200621_003904-10140f2d.pth +``` + +Example on SUN RGB-D data using [ImVoteNet](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/imvotenet) model: + +```shell +python demo/multi_modality_demo.py demo/data/sunrgbd/sunrgbd_000017.bin demo/data/sunrgbd/sunrgbd_000017.jpg demo/data/sunrgbd/sunrgbd_000017_infos.pkl configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd.py checkpoints/imvotenet_stage2_8xb16_sunrgbd_20210323_184021-d44dcb66.pth +``` + +### Monocular 3D Detection + +To test a monocular 3D detector on image data, simply run: + +```shell +python demo/mono_det_demo.py ${IMAGE_FILE} ${ANNOTATION_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--cam-type ${CAM_TYPE}] [--score-thr ${SCORE-THR}] [--out-dir ${OUT_DIR}] [--show] +``` + +where the `ANNOTATION_FILE` should provide the 3D to 2D projection matrix (camera intrinsic matrix), and `CAM_TYPE` should be specified according to dataset. For example, if you want to inference on the front camera image, the `CAM_TYPE` should be set as `CAM_2` for KITTI, and `CAM_FRONT` for nuScenes. By specifying `CAM_TYPE`, you can even infer on any camera images for datasets with multi-view cameras, such as nuScenes and Waymo. `SCORE-THR` is the 3D bbox threshold while visualization. The visualization results including an image and its predicted 3D bounding boxes projected on the image will be saved in `${OUT_DIR}/IMG_NAME`. + +Example on nuScenes data using [FCOS3D](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/fcos3d) model: + +```shell +python demo/mono_det_demo.py demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.jpg demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.pkl configs/fcos3d/fcos3d_r101-caffe-dcn-fpn-head-gn_8xb2-1x_nus-mono3d_finetune.py checkpoints/fcos3d_r101-caffe-dcn-fpn-head-gn_8xb2-1x_nus-mono3d_finetune_20210717_095645-8d806dc2.pth +``` + +Note that when visualizing results of monocular 3D detection for flipped images, the camera intrinsic matrix should also be modified accordingly. See more details and examples in PR [#744](https://github.com/open-mmlab/mmdetection3d/pull/744). + +### 3D Segmentation + +To test a 3D segmentor on point cloud data, simply run: + +```shell +python demo/pcd_seg_demo.py ${PCD_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--out-dir ${OUT_DIR}] [--show] +``` + +The visualization results including a point cloud and its predicted 3D segmentation mask will be saved in `${OUT_DIR}/PCD_NAME`. + +Example on ScanNet data using [PointNet++ (SSG)](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/pointnet2) model: + +```shell +python demo/pc_seg_demo.py demo/data/scannet/scene0000_00.bin configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py checkpoints/pointnet2_ssg_2xb16-cosine-200e_scannet-seg_20210514_143644-ee73704a.pth +``` diff --git a/docs/en/user_guides/model_deployment.md b/docs/en/user_guides/model_deployment.md new file mode 100755 index 0000000..6014b3a --- /dev/null +++ b/docs/en/user_guides/model_deployment.md @@ -0,0 +1,4 @@ +# Model Deployment + +MMDet3D 1.1 fully relies on [MMDeploy](https://mmdeploy.readthedocs.io/) to deploy models. +Please stay tuned and this document will be update soon. diff --git a/docs/en/user_guides/train_test.md b/docs/en/user_guides/train_test.md new file mode 100755 index 0000000..cc730f0 --- /dev/null +++ b/docs/en/user_guides/train_test.md @@ -0,0 +1,264 @@ +# Inference and train with existing models and standard datasets + +## Inference with existing models + +Here we provide testing scripts to evaluate a whole dataset (SUNRGBD, ScanNet, KITTI, etc.). + +For high-level apis easier to integrated into other projects and basic demos, please refer to Verification/Demo under [Get Started](https://mmdetection3d.readthedocs.io/en/dev-1.x/inference.html). + +### Test existing models on standard datasets + +- single GPU +- CPU +- single node multiple GPU +- multiple node + +You can use the following commands to test a dataset. + +```shell +# single-gpu testing +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--cfg-options test_evaluator.pklfile_prefix=${RESULT_FILE}] [--show] [--show-dir ${SHOW_DIR}] + +# CPU: disable GPUs and run single-gpu testing script (experimental) +export CUDA_VISIBLE_DEVICES=-1 +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--cfg-options test_evaluator.pklfile_prefix=${RESULT_FILE}] [--show] [--show-dir ${SHOW_DIR}] + +# multi-gpu testing +./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [--cfg-options test_evaluator.pklfile_prefix=${RESULT_FILE}] [--show] [--show-dir ${SHOW_DIR}] +``` + +**Note**: + +For now, CPU testing is only supported for SMOKE. + +Optional arguments: + +- `--show`: If specified, detection results will be plotted in the silient mode. It is only applicable to single GPU testing and used for debugging and visualization. This should be used with `--show-dir`. +- `--show-dir`: If specified, detection results will be plotted on the `***_points.obj` and `***_pred.obj` files in the specified directory. It is only applicable to single GPU testing and used for debugging and visualization. You do NOT need a GUI available in your environment for using this option. + +All evaluation related arguments are set in the `test_evaluator` in corresponding dataset configuration. such as +`test_evaluator = dict(type='KittiMetric', ann_file=data_root + 'kitti_infos_val.pkl', pklfile_prefix=None, submission_prefix=None)` + +The arguments: + +- `type`: The name of the corresponding metric, usually associated with the dataset. +- `ann_file`: The path of annotation file. +- `pklfile_prefix`: An optional argument. The filename of the output results in pickle format. If not specified, the results will not be saved to a file. +- `submission_prefix`: An optional argument. The results will be saved to a file then you can upload it to do the official evaluation. + +Examples: + +Assume that you have already downloaded the checkpoints to the directory `checkpoints/`. + +1. Test VoteNet on ScanNet and save the points and prediction visualization results. + + ```shell + python tools/test.py configs/votenet/votenet_8xb8_scannet-3d.py \ + checkpoints/votenet_8x8_scannet-3d-18class_20200620_230238-2cea9c3a.pth \ + --show --show-dir ./data/scannet/show_results + ``` + +2. Test VoteNet on ScanNet, save the points, prediction, groundtruth visualization results, and evaluate the mAP. + + ```shell + python tools/test.py configs/votenet/votenet_8xb8_scannet-3d.py \ + checkpoints/votenet_8x8_scannet-3d-18class_20200620_230238-2cea9c3a.pth \ + --show --show-dir ./data/scannet/show_results + ``` + +3. Test VoteNet on ScanNet (without saving the test results) and evaluate the mAP. + + ```shell + python tools/test.py configs/votenet/votenet_8xb8_scannet-3d.py \ + checkpoints/votenet_8x8_scannet-3d-18class_20200620_230238-2cea9c3a.pth + ``` + +4. Test SECOND on KITTI with 8 GPUs, and evaluate the mAP. + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py \ + checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20200620_230238-9208083a.pth + ``` + +5. Test PointPillars on nuScenes with 8 GPUs, and generate the json file to be submit to the official evaluation server. + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py \ + checkpoints/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20200620_230405-2fa62f3d.pth \ + --cfg-options 'test_evaluator.jsonfile_prefix=./pointpillars_nuscenes_results' + ``` + + The generated results be under `./pointpillars_nuscenes_results` directory. + +6. Test SECOND on KITTI with 8 GPUs, and generate the pkl files and submission data to be submit to the official evaluation server. + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py \ + checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20200620_230238-9208083a.pth \ + --cfg-options 'test_evaluator.pklfile_prefix=./second_kitti_results' 'submission_prefix=./second_kitti_results' + ``` + + The generated results be under `./second_kitti_results` directory. + +7. Test PointPillars on Lyft with 8 GPUs, generate the pkl files and make a submission to the leaderboard. + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/hv_pointpillars_fpn_sbn-2x8_2x_lyft-3d.py \ + checkpoints/hv_pointpillars_fpn_sbn-2x8_2x_lyft-3d_latest.pth \ + --cfg-options 'test_evaluator.jsonfile_prefix=results/pp_lyft/results_challenge' \ + 'test_evaluator.csv_savepath=results/pp_lyft/results_challenge.csv' \ + 'test_evaluator.pklfile_prefix=results/pp_lyft/results_challenge.pkl' + ``` + + **Notice**: To generate submissions on Lyft, `csv_savepath` must be given in the `--cfg-options`. After generating the csv file, you can make a submission with kaggle commands given on the [website](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/submit). + + Note that in the [config of Lyft dataset](../../configs/_base_/datasets/lyft-3d.py), the value of `ann_file` keyword in `test` is `'lyft_infos_test.pkl'`, which is the official test set of Lyft without annotation. To test on the validation set, please change this to `'lyft_infos_val.pkl'`. + +8. Test PointPillars on waymo with 8 GPUs, and evaluate the mAP with waymo metrics. + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py \ + checkpoints/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car_latest.pth \ + --cfg-options 'test_evaluator.pklfile_prefix=results/waymo-car/kitti_results' \ + 'test_evaluator.submission_prefix=results/waymo-car/kitti_results' + ``` + + **Notice**: For evaluation on waymo, please follow the [instruction](https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md/) to build the binary file `compute_detection_metrics_main` for metrics computation and put it into `mmdet3d/core/evaluation/waymo_utils/`.(Sometimes when using bazel to build `compute_detection_metrics_main`, an error `'round' is not a member of 'std'` may appear. We just need to remove the `std::` before `round` in that file.) `pklfile_prefix` should be given in the `--eval-options` for the bin file generation. For metrics, `waymo` is the recommended official evaluation prototype. Currently, evaluating with choice `kitti` is adapted from KITTI and the results for each difficulty are not exactly the same as the definition of KITTI. Instead, most of objects are marked with difficulty 0 currently, which will be fixed in the future. The reasons of its instability include the large computation for evaluation, the lack of occlusion and truncation in the converted data, different definition of difficulty and different methods of computing average precision. + +9. Test PointPillars on waymo with 8 GPUs, generate the bin files and make a submission to the leaderboard. + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py \ + checkpoints/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car_latest.pth \ + --cfg-options 'test_evaluator.pklfile_prefix=results/waymo-car/kitti_results' \ + 'test_evaluator.submission_prefix=results/waymo-car/kitti_results' + ``` + + **Notice**: After generating the bin file, you can simply build the binary file `create_submission` and use them to create a submission file by following the [instruction](https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md/). For evaluation on the validation set with the eval server, you can also use the same way to generate a submission. + +## Train predefined models on standard datasets + +MMDetection3D implements distributed training and non-distributed training, +which uses `MMDistributedDataParallel` and `MMDataParallel` respectively. + +All outputs (log files and checkpoints) will be saved to the working directory, +which is specified by `work_dir` in the config file. + +By default we evaluate the model on the validation set after each epoch, you can change the evaluation interval by adding the interval argument in the training config. + +```python +train_cfg = dict(type='EpochBasedTrainLoop', val_interval=1) # This evaluate the model per 12 epoch. +``` + +**Important**: The default learning rate in config files is for 8 GPUs and the exact batch size is marked by the config's file name, e.g. '2xb8' means 2 samples per GPU using 8 GPUs. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you need to set the learning rate proportional to the batch size if you use different GPUs or images per GPU, e.g., lr=0.01 for 4 GPUs * 2 img/gpu and lr=0.08 for 16 GPUs * 4 img/gpu. However, since most of the models in this repo use ADAM rather than SGD for optimization, the rule may not hold and users need to tune the learning rate by themselves. + +### Train with a single GPU + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +If you want to specify the working directory in the command, you can add an argument `--work-dir ${YOUR_WORK_DIR}`. + +### Training with CPU (experimental) + +The process of training on the CPU is consistent with single GPU training. We just need to disable GPUs before the training process. + +```shell +export CUDA_VISIBLE_DEVICES=-1 +``` + +And then run the script of train with a single GPU. + +**Note**: + +For now, most of the point cloud related algorithms rely on 3D CUDA op, which can not be trained on CPU. Some monocular 3D object detection algorithms, like FCOS3D and SMOKE can be trained on CPU. We do not recommend users to use CPU for training because it is too slow. We support this feature to allow users to debug certain models on machines without GPU for convenience. + +### Train with multiple GPUs + +```shell +./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +Optional arguments are: + +- `--cfg-options 'Key=value'`: Override some settings in the used config. + +### Train with multiple machines + +If you run MMDetection3D on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_train.sh`. (This script also supports single machine training.) + +```shell +[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} +``` + +Here is an example of using 16 GPUs to train Mask R-CNN on the dev partition. + +```shell +GPUS=16 ./tools/slurm_train.sh dev pp_kitti_3class configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py /nfs/xxxx/pp_kitti_3class +``` + +You can check [slurm_train.sh](https://github.com/open-mmlab/mmdetection/blob/master/tools/slurm_train.sh) for full arguments and environment variables. + +If you launch with multiple machines simply connected with ethernet, you can simply run following commands: + +On the first machine: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR ./tools/dist_train.sh $CONFIG $GPUS +``` + +On the second machine: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR ./tools/dist_train.sh $CONFIG $GPUS +``` + +Usually it is slow if you do not have high speed networking like InfiniBand. + +### Launch multiple jobs on a single machine + +If you launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, +you need to specify different ports (29500 by default) for each job to avoid communication conflict. + +If you use `dist_train.sh` to launch training jobs, you can set the port in commands. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +If you use launch training jobs with Slurm, there are two ways to specify the ports. + +1. Set the port through `--cfg-options`. This is more recommended since it does not change the original configs. + + ```shell + CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} --cfg-options 'env_cfg.dist_cfg.port=29500' + CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} --cfg-options 'env_cfg.dist_cfg.port=29501' + ``` + +2. Modify the config files (usually the 6th line from the bottom in config files) to set different communication ports. + + In `config1.py`, + + ```python + env_cfg = dict( + dist_cfg=dict(backend='nccl', port=29500) + ) + ``` + + In `config2.py`, + + ```python + env_cfg = dict( + dist_cfg=dict(backend='nccl', port=29501) + ) + ``` + + Then you can launch two jobs with `config1.py` and `config2.py`. + + ```shell + CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} + CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} + ``` diff --git a/docs/en/user_guides/useful_tools.md b/docs/en/user_guides/useful_tools.md new file mode 100755 index 0000000..9b4eb34 --- /dev/null +++ b/docs/en/user_guides/useful_tools.md @@ -0,0 +1,220 @@ +# Useful Tools + +We provide lots of useful tools under `tools/` directory. + +## Log Analysis + +You can plot loss/mAP curves given a training log file. Run `pip install seaborn` first to install the dependency. + +![loss curve image](../../../resources/loss_curve.png) + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve [--keys ${KEYS}] [--title ${TITLE}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] [--mode ${MODE}] [--interval ${INTERVAL}] +``` + +**Notice**: If the metric you want to plot is calculated in the eval stage, you need to add the flag `--mode eval`. If you perform evaluation with an interval of `${INTERVAL}`, you need to add the args `--interval ${INTERVAL}`. + +Examples: + +- Plot the classification loss of some run. + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_cls --legend loss_cls + ``` + +- Plot the classification and regression loss of some run, and save the figure to a pdf. + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_cls loss_bbox --out losses.pdf + ``` + +- Compare the bbox mAP of two runs in the same figure. + + ```shell + # evaluate PartA2 and second on KITTI according to Car_3D_moderate_strict + python tools/analysis_tools/analyze_logs.py plot_curve tools/logs/PartA2.log.json tools/logs/second.log.json --keys KITTI/Car_3D_moderate_strict --legend PartA2 second --mode eval --interval 1 + # evaluate PointPillars for car and 3 classes on KITTI according to Car_3D_moderate_strict + python tools/analysis_tools/analyze_logs.py plot_curve tools/logs/pp-3class.log.json tools/logs/pp.log.json --keys KITTI/Car_3D_moderate_strict --legend pp-3class pp --mode eval --interval 2 + ``` + +You can also compute the average training speed. + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time log.json [--include-outliers] +``` + +The output is expected to be like the following. + +``` +-----Analyze train time of work_dirs/some_exp/20190611_192040.log.json----- +slowest epoch 11, average time is 1.2024 +fastest epoch 1, average time is 1.1909 +time std over epochs is 0.0028 +average iter time: 1.1959 s/iter +``` + +  + +## Model Serving + +**Note**: This tool is still experimental now, only SECOND is supported to be served with [`TorchServe`](https://pytorch.org/serve/). We'll support more models in the future. + +In order to serve an `MMDetection3D` model with [`TorchServe`](https://pytorch.org/serve/), you can follow the steps: + +### 1. Convert the model from MMDetection3D to TorchServe + +```shell +python tools/deployment/mmdet3d2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +**Note**: ${MODEL_STORE} needs to be an absolute path to a folder. + +### 2. Build `mmdet3d-serve` docker image + +```shell +docker build -t mmdet3d-serve:latest docker/serve/ +``` + +### 3. Run `mmdet3d-serve` + +Check the official docs for [running TorchServe with docker](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). + +In order to run it on the GPU, you need to install [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). You can omit the `--gpus` argument in order to run on the CPU. + +Example: + +```shell +docker run --rm \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=$MODEL_STORE,target=/home/model-server/model-store \ +mmdet3d-serve:latest +``` + +[Read the docs](https://github.com/pytorch/serve/blob/072f5d088cce9bb64b2a18af065886c9b01b317b/docs/rest_api.md/) about the Inference (8080), Management (8081) and Metrics (8082) APis + +### 4. Test deployment + +You can use `test_torchserver.py` to compare result of torchserver and pytorch. + +```shell +python tools/deployment/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] [--score-thr ${SCORE_THR}] +``` + +Example: + +```shell +python tools/deployment/test_torchserver.py demo/data/kitti/kitti_000008.bin configs/second/hv_second_secfpn_6x8_80e_kitti-3d-car.py checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-car_20200620_230238-393f000c.pth second +``` + +  + +## Model Complexity + +You can use `tools/analysis_tools/get_flops.py` in MMDetection3D, a script adapted from [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch), to compute the FLOPs and params of a given model. + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +You will get the results like this. + +```text +============================== +Input shape: (40000, 4) +Flops: 5.78 GFLOPs +Params: 953.83 k +============================== +``` + +**Note**: This tool is still experimental and we do not guarantee that the +number is absolutely correct. You may well use the result for simple +comparisons, but double check it before you adopt it in technical reports or papers. + +1. FLOPs are related to the input shape while parameters are not. The default + input shape is (1, 40000, 4). +2. Some operators are not counted into FLOPs like GN and custom operators. Refer to [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) for details. +3. We currently only support FLOPs calculation of single-stage models with single-modality input (point cloud or image). We will support two-stage and multi-modality models in the future. + +  + +## Model Conversion + +### RegNet model to MMDetection + +`tools/model_converters/regnet2mmdet.py` convert keys in pycls pretrained RegNet models to +MMDetection style. + +```shell +python tools/model_converters/regnet2mmdet.py ${SRC} ${DST} [-h] +``` + +### Detectron ResNet to Pytorch + +`tools/detectron2pytorch.py` in MMDetection could convert keys in the original detectron pretrained +ResNet models to PyTorch style. + +```shell +python tools/detectron2pytorch.py ${SRC} ${DST} ${DEPTH} [-h] +``` + +### Prepare a model for publishing + +`tools/model_converters/publish_model.py` helps users to prepare their model for publishing. + +Before you upload a model to AWS, you may want to + +1. convert model weights to CPU tensors +2. delete the optimizer states and +3. compute the hash of the checkpoint file and append the hash id to the + filename. + +```shell +python tools/model_converters/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +E.g., + +```shell +python tools/model_converters/publish_model.py work_dirs/faster_rcnn/latest.pth faster_rcnn_r50_fpn_1x_20190801.pth +``` + +The final output filename will be `faster_rcnn_r50_fpn_1x_20190801-{hash id}.pth`. + +  + +## Dataset Conversion + +`tools/dataset_converters/` contains tools for converting datasets to other formats. Most of them convert datasets to pickle based info files, like kitti, nuscenes and lyft. Waymo converter is used to reorganize waymo raw data like KITTI style. Users could refer to them for our approach to converting data format. It is also convenient to modify them to use as scripts like nuImages converter. + +To convert the nuImages dataset into COCO format, please use the command below: + +```shell +python -u tools/dataset_converters/nuimage_converter.py --data-root ${DATA_ROOT} --version ${VERSIONS} \ + --out-dir ${OUT_DIR} --nproc ${NUM_WORKERS} --extra-tag ${TAG} +``` + +- `--data-root`: the root of the dataset, defaults to `./data/nuimages`. +- `--version`: the version of the dataset, defaults to `v1.0-mini`. To get the full dataset, please use `--version v1.0-train v1.0-val v1.0-mini` +- `--out-dir`: the output directory of annotations and semantic masks, defaults to `./data/nuimages/annotations/`. +- `--nproc`: number of workers for data preparation, defaults to `4`. Larger number could reduce the preparation time as images are processed in parallel. +- `--extra-tag`: extra tag of the annotations, defaults to `nuimages`. This can be used to separate different annotations processed in different time for study. + +More details could be referred to the [doc](https://mmdetection3d.readthedocs.io/en/latest/data_preparation.html) for dataset preparation and [README](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/nuimages/README.md/) for nuImages dataset. + +  + +## Miscellaneous + +### Print the entire config + +`tools/misc/print_config.py` prints the whole config verbatim, expanding all its +imports. + +```shell +python tools/misc/print_config.py ${CONFIG} [-h] [--options ${OPTIONS [OPTIONS...]}] +``` diff --git a/docs/en/user_guides/visualization.md b/docs/en/user_guides/visualization.md new file mode 100755 index 0000000..397e5e3 --- /dev/null +++ b/docs/en/user_guides/visualization.md @@ -0,0 +1,204 @@ +# Visualization + +MMDetection3D provides a `Det3DLocalVisualizer` to visualize and store the state of the model during training and testing, as well as results, with the following features. + +1. Support the basic drawing interface for multi-modality data and multi-task. +2. Support multiple backends such as local, TensorBoard, to write training status such as `loss`, `lr`, or performance evaluation metrics and to a specified single or multiple backends. +3. Support ground truth visualization on multimodal data, and cross-modal visualization of 3D detection results. + +## Basic Drawing Interface + +Inherited from `DetLocalVisualizer`, `Det3DLocalVisualizer` provides an interface for drawing common objects on 2D images, such as drawing detection boxes, points, text, lines, circles, polygons, and binary masks. More details about 2D drawing can refer to the [visualization documentation](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/visualization.html) in MMDetection. Here we introduce the 3D drawing interface: + +### Drawing point cloud on the image + +We support drawing point cloud on the image by using `draw_points_on_image`. + +```python +import mmcv +import numpy as np +from mmengine import load + +from mmdet3d.visualization import Det3DLocalVisualizer + +info_file = load('demo/data/kitti/000008.pkl') +points = np.fromfile('demo/data/kitti/000008.bin', dtype=np.float32) +points = points.reshape(-1, 4)[:, :3] +lidar2img = np.array(info_file['data_list'][0]['images']['CAM2']['lidar2img'], dtype=np.float32) + +visualizer = Det3DLocalVisualizer() +img = mmcv.imread('demo/data/kitti/000008.png') +img = mmcv.imconvert(img, 'bgr', 'rgb') +visualizer.set_image(img) +visualizer.draw_points_on_image(points, lidar2img) +visualizer.show() +``` + +![points_on_image](../../../resources/points_on_image.png) + +### Drawing 3D Boxes on Point Cloud + +We support drawing 3D boxes on point cloud by using `draw_bboxes_3d`. + +```python +import torch + +from mmdet3d.visualization import Det3DLocalVisualizer +from mmdet3d.structures import LiDARInstance3DBoxes + +points = np.fromfile('tests/data/kitti/training/velodyne/000000.bin', dtype=np.float32) +points = points.reshape(-1, 4) +visualizer = Det3DLocalVisualizer() +# set point cloud in visualizer +visualizer.set_points(points) +bboxes_3d = LiDARInstance3DBoxes(torch.tensor( + [[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, + -1.5808]])), +# Draw 3D bboxes +visualizer.draw_bboxes_3d(bboxes_3d) +visualizer.show() +``` + +![mono3d](../../../resources/pcd.png) + +### Drawing Projected 3D Boxes on Image + +We support drawing projected 3D boxes on image by using `draw_proj_bboxes_3d`. + +```python +import mmcv +import numpy as np +from mmengine import load + +from mmdet3d.visualization import Det3DLocalVisualizer +from mmdet3d.structures import CameraInstance3DBoxes + +info_file = load('demo/data/kitti/000008.pkl') +cam2img = np.array(info_file['data_list'][0]['images']['CAM2']['cam2img'], dtype=np.float32) +bboxes_3d = [] +for instance in info_file['data_list'][0]['instances']: + bboxes_3d.append(instance['bbox_3d']) +gt_bboxes_3d = np.array(bboxes_3d, dtype=np.float32) +gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d) +input_meta = {'cam2img': cam2img} + +visualizer = Det3DLocalVisualizer() + +img = mmcv.imread('demo/data/kitti/000008.png') +img = mmcv.imconvert(img, 'bgr', 'rgb') +visualizer.set_image(img) +# project 3D bboxes to image +visualizer.draw_proj_bboxes_3d(gt_bboxes_3d, input_meta) +visualizer.show() +``` + +![mono3d](../../../resources/mono3d.png) + +### Drawing BEV Boxes + +We support drawing BEV boxes by using `draw_bev_bboxes`. + +```python +import numpy as np +from mmengine import load + +from mmdet3d.visualization import Det3DLocalVisualizer +from mmdet3d.structures import CameraInstance3DBoxes + +info_file = load('demo/data/kitti/000008.pkl') +bboxes_3d = [] +for instance in info_file['data_list'][0]['instances']: + bboxes_3d.append(instance['bbox_3d']) +gt_bboxes_3d = np.array(bboxes_3d, dtype=np.float32) +gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d) + +visualizer = Det3DLocalVisualizer() +# set bev image in visualizer +visualizer.set_bev_image() +# draw bev bboxes +visualizer.draw_bev_bboxes(gt_bboxes_3d, edge_colors='orange') +visualizer.show() +``` + + + +### Drawing 3D Semantic Mask + +We support draw segmentation mask via per-point colorization by using `draw_seg_mask`. + +```python +import torch + +from mmdet3d.visualization import Det3DLocalVisualizer + +points = np.fromfile('tests/data/s3dis/points/Area_1_office_2.bin', dtype=np.float32) +points = points.reshape(-1, 3) +visualizer = Det3DLocalVisualizer() +mask = np.random.rand(points.shape[0], 3) +points_with_mask = np.concatenate((points, mask), axis=-1) +# Draw 3D points with mask +visualizer.draw_seg_mask(points_with_mask) +visualizer.show() +``` + +## Results + +To see the prediction results of trained models, you can run the following command: + +```bash +python tools/test.py ${CONFIG_FILE} ${CKPT_PATH} --show --show-dir ${SHOW_DIR} +``` + +After running this command, plotted results including input data and the output of networks visualized on the input will be saved in `${SHOW_DIR}`. + +After running this command, you will obtain the input data, the output of networks and ground-truth labels visualized on the input (e.g. `***_gt.png` and `***_pred.png` in multi-modality detection task and vision-based detection task) in `${SHOW_DIR}`. When `show` is enabled, [Open3D](http://www.open3d.org/) will be used to visualize the results online. If you are running test in remote server without GUI, the online visualization is not supported. You can download the `results.pkl` from the remote server, and visualize the prediction results offline in your local machine. + +To visualize the results with `Open3D` backend offline, you can run the following command: + +```bash +python tools/misc/visualize_results.py ${CONFIG_FILE} --result ${RESULTS_PATH} --show-dir ${SHOW_DIR} +``` + +![](../../../resources/open3d_visual.gif) + +This allows the inference and results generation to be done in remote server and the users can open them on their host with GUI. + +## Dataset + +We also provide scripts to visualize the dataset without inference. You can use `tools/misc/browse_dataset.py` to show loaded data and ground-truth online and save them on the disk. Currently we support single-modality 3D detection and 3D segmentation on all the datasets, multi-modality 3D detection on KITTI and SUN RGB-D, as well as monocular 3D detection on nuScenes. To browse the KITTI dataset, you can run the following command: + +```shell +python tools/misc/browse_dataset.py configs/_base_/datasets/kitti-3d-3class.py --task det --output-dir ${OUTPUT_DIR} +``` + +**Notice**: Once specifying `--output-dir`, the images of views specified by users will be saved when pressing `_ESC_` in open3d window. + +To verify the data consistency and the effect of data augmentation, you can also add `--aug` flag to visualize the data after data augmentation using the command as below: + +```shell +python tools/misc/browse_dataset.py configs/_base_/datasets/kitti-3d-3class.py --task lidar_det --aug --output-dir ${OUTPUT_DIR} +``` + +If you also want to show 2D images with 3D bounding boxes projected onto them, you need to find a config that supports multi-modality data loading, and then change the `--task` args to `multi-modality_det`. An example is showed below: + +```shell +python tools/misc/browse_dataset.py configs/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class.py --task multi-modality_det --output-dir ${OUTPUT_DIR} +``` + +![](../../../resources/browse_dataset_multi_modality.png) + +You can simply browse different datasets using different configs, e.g. visualizing the ScanNet dataset in 3D semantic segmentation task: + +```shell +python tools/misc/browse_dataset.py configs/_base_/datasets/scannet_seg-3d-20class.py --task lidar_seg --output-dir ${OUTPUT_DIR} --online +``` + +![](../../../resources/browse_dataset_seg.png) + +And browsing the nuScenes dataset in monocular 3D detection task: + +```shell +python tools/misc/browse_dataset.py configs/_base_/datasets/nus-mono3d.py --task mono_det --output-dir ${OUTPUT_DIR} --online +``` + +![](../../../resources/browse_dataset_mono.png) diff --git a/docs/zh_cn/Makefile b/docs/zh_cn/Makefile new file mode 100755 index 0000000..d4bb2cb --- /dev/null +++ b/docs/zh_cn/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/zh_cn/_static/css/readthedocs.css b/docs/zh_cn/_static/css/readthedocs.css new file mode 100755 index 0000000..cc61ab8 --- /dev/null +++ b/docs/zh_cn/_static/css/readthedocs.css @@ -0,0 +1,6 @@ +.header-logo { + background-image: url("../image/mmdet3d-logo.png"); + background-size: 182.5px 40px; + height: 40px; + width: 182.5px; +} diff --git a/docs/zh_cn/advanced_guides/customize_dataset.md b/docs/zh_cn/advanced_guides/customize_dataset.md new file mode 100755 index 0000000..a481fa2 --- /dev/null +++ b/docs/zh_cn/advanced_guides/customize_dataset.md @@ -0,0 +1,500 @@ +# 自定义数据集 + +在本节中,您将了解如何使用自定义数据集训练和测试预定义模型。 + +基本步骤如下: + +1. 准备数据 +2. 准备配置文件 +3. 在自定义数据集上训练,测试和推理模型 + +## 数据准备 + +理想情况下我们可以重新组织自定义的原始数据并将标注格式转换成 KITTI 风格。但是,考虑到对于自定义数据集而言,KITTI 格式的校准文件和 3D 标注难以获得,因此我们在文档中介绍基本的数据格式。 + +### 基本数据格式 + +#### 点云格式 + +目前,我们只支持 `.bin` 格式的点云用于训练和推理。在训练自己的数据集之前,需要将其它格式的点云文件转换成 `.bin` 文件。常见的点云数据格式包括 `.pcd` 和 `.las`,我们列举了一些开源工具作为参考。 + +1. `.pcd` 转换成 `.bin`:https://github.com/DanielPollithy/pypcd + +- 您可以通过以下指令安装 `pypcd`: + + ```bash + pip install git+https://github.com/DanielPollithy/pypcd.git + ``` + +- 您可以使用以下脚本读取 `.pcd` 文件,并将其转换成 `.bin` 格式来保存: + + ```python + import numpy as np + from pypcd import pypcd + + pcd_data = pypcd.PointCloud.from_path('point_cloud_data.pcd') + points = np.zeros([pcd_data.width, 4], dtype=np.float32) + points[:, 0] = pcd_data.pc_data['x'].copy() + points[:, 1] = pcd_data.pc_data['y'].copy() + points[:, 2] = pcd_data.pc_data['z'].copy() + points[:, 3] = pcd_data.pc_data['intensity'].copy().astype(np.float32) + with open('point_cloud_data.bin', 'wb') as f: + f.write(points.tobytes()) + ``` + +2. `.las` 转换成 `.bin`:常见的转换流程为 `.las -> .pcd -> .bin`,`.las -> .pcd` 的转换可以用该[工具](https://github.com/Hitachi-Automotive-And-Industry-Lab/semantic-segmentation-editor)实现。 + +#### 标签格式 + +最基本的信息:每个场景的 3D 边界框和类别标签应该包含在 `.txt` 标注文件中。每一行代表特定场景的一个 3D 框,如下所示: + +``` +# 格式:[x, y, z, dx, dy, dz, yaw, category_name] +1.23 1.42 0.23 3.96 1.65 1.55 1.56 Car +3.51 2.15 0.42 1.05 0.87 1.86 1.23 Pedestrian +... +``` + +**注意**:对于自定义数据集的评估我们目前只支持 KITTI 评估方法。 + +3D 框应存储在统一的 3D 坐标系中。 + +#### 校准格式 + +对于每个激光雷达收集的点云数据,通常会进行融合并转换到特定的激光雷达坐标系。因此,校准信息文件中通常应该包含每个相机的内参矩阵和激光雷达到每个相机的外参转换矩阵,并保存在 `.txt` 校准文件中,其中 `Px` 表示 `camera_x` 的内参矩阵,`lidar2camx` 表示 `lidar` 到 `camera_x` 的外参转换矩阵。 + +``` +P0 +P1 +P2 +P3 +P4 +... +lidar2cam0 +lidar2cam1 +lidar2cam2 +lidar2cam3 +lidar2cam4 +... +``` + +### 原始数据结构 + +#### 基于激光雷达的 3D 检测 + +基于激光雷达的 3D 目标检测原始数据通常组织成如下格式,其中 `ImageSets` 包含划分文件,指明哪些文件属于训练/验证集,`points` 包含存储成 `.bin` 格式的点云数据,`labels` 包含 3D 检测的标签文件。 + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── custom +│ │ ├── ImageSets +│ │ │ ├── train.txt +│ │ │ ├── val.txt +│ │ ├── points +│ │ │ ├── 000000.bin +│ │ │ ├── 000001.bin +│ │ │ ├── ... +│ │ ├── labels +│ │ │ ├── 000000.txt +│ │ │ ├── 000001.txt +│ │ │ ├── ... +``` + +#### 基于视觉的 3D 检测 + +基于视觉的 3D 目标检测原始数据通常组织成如下格式,其中 `ImageSets` 包含划分文件,指明哪些文件属于训练/验证集,`images` 包含来自不同相机的图像,例如 `camera_x` 获得的图像应放在 `images/images_x` 下,`calibs` 包含校准信息文件,其中存储了每个相机的内参矩阵,`labels` 包含 3D 检测的标签文件。 + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── custom +│ │ ├── ImageSets +│ │ │ ├── train.txt +│ │ │ ├── val.txt +│ │ ├── calibs +│ │ │ ├── 000000.txt +│ │ │ ├── 000001.txt +│ │ │ ├── ... +│ │ ├── images +│ │ │ ├── images_0 +│ │ │ │ ├── 000000.png +│ │ │ │ ├── 000001.png +│ │ │ │ ├── ... +│ │ │ ├── images_1 +│ │ │ ├── images_2 +│ │ │ ├── ... +│ │ ├── labels +│ │ │ ├── 000000.txt +│ │ │ ├── 000001.txt +│ │ │ ├── ... +``` + +#### 多模态 3D 检测 + +多模态 3D 目标检测原始数据通常组织成如下格式。不同于基于视觉的 3D 目标检测,`calibs` 里的校准信息文件存储了每个相机的内参矩阵和外参矩阵。 + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── custom +│ │ ├── ImageSets +│ │ │ ├── train.txt +│ │ │ ├── val.txt +│ │ ├── calibs +│ │ │ ├── 000000.txt +│ │ │ ├── 000001.txt +│ │ │ ├── ... +│ │ ├── points +│ │ │ ├── 000000.bin +│ │ │ ├── 000001.bin +│ │ │ ├── ... +│ │ ├── images +│ │ │ ├── images_0 +│ │ │ │ ├── 000000.png +│ │ │ │ ├── 000001.png +│ │ │ │ ├── ... +│ │ │ ├── images_1 +│ │ │ ├── images_2 +│ │ │ ├── ... +│ │ ├── labels +│ │ │ ├── 000000.txt +│ │ │ ├── 000001.txt +│ │ │ ├── ... +``` + +#### 基于激光雷达的 3D 语义分割 + +基于激光雷达的 3D 语义分割原始数据通常组织成如下格式,其中 `ImageSets` 包含划分文件,指明哪些文件属于训练/验证集,`points` 包含点云数据,`semantic_mask` 包含逐点级标签。 + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── custom +│ │ ├── ImageSets +│ │ │ ├── train.txt +│ │ │ ├── val.txt +│ │ ├── points +│ │ │ ├── 000000.bin +│ │ │ ├── 000001.bin +│ │ │ ├── ... +│ │ ├── semantic_mask +│ │ │ ├── 000000.bin +│ │ │ ├── 000001.bin +│ │ │ ├── ... +``` + +### 数据转换 + +按照我们的说明准备好原始数据后,您可以直接使用以下命令生成训练/验证信息文件。 + +```bash +python tools/create_data.py custom --root-path ./data/custom --out-dir ./data/custom --extra-tag custom +``` + +## 自定义数据集示例 + +在完成数据准备后,我们可以在 `mmdet3d/datasets/my_dataset.py` 中创建一个新的数据集来加载数据。 + +```python +import mmengine + +from mmdet3d.registry import DATASETS +from .det3d_dataset import Det3DDataset + + +@DATASETS.register_module() +class MyDataset(Det3DDataset): + + # 替换成自定义 pkl 信息文件里的所有类别 + METAINFO = { + 'classes': ('Pedestrian', 'Cyclist', 'Car') + } + + def parse_ann_info(self, info): + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Data information of single data sample. + + Returns: + dict: Annotation information consists of the following keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): + 3D ground truth bboxes. + - gt_labels_3d (np.ndarray): Labels of ground truths. + """ + ann_info = super().parse_ann_info(info) + if ann_info is None: + ann_info = dict() + # 空实例 + ann_info['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros(0, dtype=np.int64) + + # 过滤掉没有在训练中使用的类别 + ann_info = self._remove_dontcare(ann_info) + gt_bboxes_3d = LiDARInstance3DBoxes(ann_info['gt_bboxes_3d']) + ann_info['gt_bboxes_3d'] = gt_bboxes_3d + return ann_info +``` + +数据预处理后,用户可以通过以下两个步骤来训练自定义数据集: + +1. 修改配置文件来使用自定义数据集。 +2. 验证自定义数据集标注的正确性。 + +这里我们以在自定义数据集上训练 PointPillars 为例: + +### 准备配置 + +这里我们演示一个纯点云训练的配置示例: + +#### 准备数据集配置 + +在 `configs/_base_/datasets/custom.py` 中: + +```python +# 数据集设置 +dataset_type = 'MyDataset' +data_root = 'data/custom/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] # 替换成您的数据集类别 +point_cloud_range = [0, -40, -3, 70.4, 40, 1] # 根据您的数据集进行调整 +input_modality = dict(use_lidar=True, use_camera=False) +metainfo = dict(classes=class_names) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, # 替换成您的点云数据维度 + use_dim=4), # 替换成在训练和推理时实际使用的维度 + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, # 替换成您的点云数据维度 + use_dim=4), + dict(type='Pack3DDetInputs', keys=['points']) +] +# 为可视化阶段的数据和 GT 加载构造流水线 +eval_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='Pack3DDetInputs', keys=['points']), +] +train_dataloader = dict( + batch_size=6, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='custom_infos_train.pkl', # 指定您的训练 pkl 信息 + data_prefix=dict(pts='points'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + box_type_3d='LiDAR'))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='points'), + ann_file='custom_infos_val.pkl', # 指定您的验证 pkl 信息 + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'custom_infos_val.pkl', # 指定您的验证 pkl 信息 + metric='bbox') +``` + +#### 准备模型配置 + +对于基于体素化的检测器如 SECOND,PointPillars 及 CenterPoint,点云范围(point cloud range)和体素大小(voxel size)应该根据您的数据集做调整。理论上,`voxel_size` 和 `point_cloud_range` 的设置是相关联的。设置较小的 `voxel_size` 将增加体素数以及相应的内存消耗。此外,需要注意以下问题: + +如果将 `point_cloud_range` 和 `voxel_size` 分别设置成 `[0, -40, -3, 70.4, 40, 1]` 和 `[0.05, 0.05, 0.1]`,那么中间特征图的形状应该为 `[(1-(-3))/0.1+1, (40-(-40))/0.05, (70.4-0)/0.05]=[41, 1600, 1408]`。更改 `point_cloud_range` 时,请记得依据 `voxel_size` 更改 `middle_encoder` 里中间特征图的形状。 + +关于 `anchor_range` 的设置,一般需要根据数据集做调整。需要注意的是,`z` 值需要根据点云的位置做相应调整,具体请参考此 [issue](https://github.com/open-mmlab/mmdetection3d/issues/986)。 + +关于 `anchor_size` 的设置,通常需要计算整个训练集中目标的长、宽、高的平均值作为 `anchor_size`,以获得最好的结果。 + +在 `configs/_base_/models/pointpillars_hv_secfpn_custom.py` 中: + +```python +voxel_size = [0.16, 0.16, 4] # 根据您的数据集做调整 +point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] # 根据您的数据集做调整 +model = dict( + type='VoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=32, + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(16000, 40000))), + voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=4, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range), + # `output_shape` 需要根据 `point_cloud_range` 和 `voxel_size` 做相应调整 + middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]), + backbone=dict( + type='SECOND', + in_channels=64, + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + assign_per_class=True, + # 根据您的数据集调整 `ranges` 和 `sizes` + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[ + [0, -39.68, -0.6, 69.12, 39.68, -0.6], + [0, -39.68, -0.6, 69.12, 39.68, -0.6], + [0, -39.68, -1.78, 69.12, 39.68, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + # 模型训练和测试设置 + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) +``` + +#### 准备整体配置 + +我们将上述的所有配置组合在 `configs/pointpillars/pointpillars_hv_secfpn_8xb6_custom.py` 文件中: + +```python +_base_ = [ + '../_base_/models/pointpillars_hv_secfpn_custom.py', + '../_base_/datasets/custom.py', + '../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py' +] +``` + +#### 可视化数据集(可选) + +为了验证准备的数据和配置是否正确,我们建议在训练和验证前使用 `tools/misc/browse_dataset.py` 脚本可视化数据集和标注。更多细节请参考[可视化文档](https://mmdetection3d.readthedocs.io/zh_CN/dev-1.x/user_guides/visualization.html)。 + +## 评估 + +准备好数据和配置之后,您可以遵循我们的文档直接运行训练/测试脚本。 + +**注意**:我们为自定义数据集提供了 KITTI 风格的评估实现方法。在数据集配置中需要包含如下内容: + +```python +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'custom_infos_val.pkl', # 指定您的验证 pkl 信息 + metric='bbox') +``` diff --git a/docs/zh_cn/advanced_guides/customize_models.md b/docs/zh_cn/advanced_guides/customize_models.md new file mode 100755 index 0000000..677deca --- /dev/null +++ b/docs/zh_cn/advanced_guides/customize_models.md @@ -0,0 +1,619 @@ +# 自定义模型 + +我们通常把模型的各个组成成分分成 6 种类型: + +- 编码器(encoder):包括 voxel encoder 和 middle encoder 等进入 backbone 前所使用的基于体素的方法,如 `HardVFE` 和 `PointPillarsScatter`。 +- 骨干网络(backbone):通常采用 FCN 网络来提取特征图,如 `ResNet` 和 `SECOND`。 +- 颈部网络(neck):位于 backbones 和 heads 之间的组成模块,如 `FPN` 和 `SECONDFPN`。 +- 检测头(head):用于特定任务的组成模块,如`检测框的预测`和`掩码的预测`。 +- RoI 提取器(RoI extractor):用于从特征图中提取 RoI 特征的组成模块,如 `H3DRoIHead` 和 `PartAggregationROIHead`。 +- 损失函数(loss):heads 中用于计算损失函数的组成模块,如 `FocalLoss`、`L1Loss` 和 `GHMLoss`。 + +## 开发新的组成模块 + +### 添加新的编码器 + +接下来我们以 HardVFE 为例展示如何开发新的组成模块。 + +#### 1. 定义一个新的体素编码器(如 HardVFE:即 HV-SECOND 中使用的体素特征编码器) + +创建一个新文件 `mmdet3d/models/voxel_encoders/voxel_encoder.py`。 + +```python +import torch.nn as nn + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class HardVFE(nn.Module): + + def __init__(self, arg1, arg2): + pass + + def forward(self, x): # 需要返回一个元组 + pass +``` + +#### 2. 导入该模块 + +您可以在 `mmdet3d/models/voxel_encoders/__init__.py` 中添加以下代码: + +```python +from .voxel_encoder import HardVFE +``` + +或者在配置文件中添加以下代码,从而避免修改源码: + +```python +custom_imports = dict( + imports=['mmdet3d.models.voxel_encoders.voxel_encoder'], + allow_failed_imports=False) +``` + +#### 3. 在配置文件中使用体素编码器 + +```python +model = dict( + ... + voxel_encoder=dict( + type='HardVFE', + arg1=xxx, + arg2=yyy), + ... +) +``` + +### 添加新的骨干网络 + +接下来我们以 [SECOND](https://www.mdpi.com/1424-8220/18/10/3337)(Sparsely Embedded Convolutional Detection)为例展示如何开发新的组成模块。 + +#### 1. 定义一个新的骨干网络(如 SECOND) + +创建一个新文件 `mmdet3d/models/backbones/second.py`。 + +```python +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class SECOND(BaseModule): + + def __init__(self, arg1, arg2): + pass + + def forward(self, x): # 需要返回一个元组 + pass +``` + +#### 2. 导入该模块 + +您可以在 `mmdet3d/models/backbones/__init__.py` 中添加以下代码: + +```python +from .second import SECOND +``` + +或者在配置文件中添加以下代码,从而避免修改源码: + +```python +custom_imports = dict( + imports=['mmdet3d.models.backbones.second'], + allow_failed_imports=False) +``` + +#### 3. 在配置文件中使用骨干网络 + +```python +model = dict( + ... + backbone=dict( + type='SECOND', + arg1=xxx, + arg2=yyy), + ... +) +``` + +### 添加新的颈部网络 + +#### 1. 定义一个新的颈部网络(如 SECONDFPN) + +创建一个新文件 `mmdet3d/models/necks/second_fpn.py`。 + +```python +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class SECONDFPN(BaseModule): + + def __init__(self, + in_channels=[128, 128, 256], + out_channels=[256, 256, 256], + upsample_strides=[1, 2, 4], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + conv_cfg=dict(type='Conv2d', bias=False), + use_conv_for_no_stride=False, + init_cfg=None): + pass + + def forward(self, x): + # 具体实现忽略 + pass +``` + +#### 2. 导入该模块 + +您可以在 `mmdet3d/models/necks/__init__.py` 中添加以下代码: + +```python +from .second_fpn import SECONDFPN +``` + +或者在配置文件中添加以下代码,从而避免修改源码: + +```python +custom_imports = dict( + imports=['mmdet3d.models.necks.second_fpn'], + allow_failed_imports=False) +``` + +#### 3. 在配置文件中使用颈部网络 + +```python +model = dict( + ... + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + ... +) +``` + +### 添加新的检测头 + +接下来我们以 [PartA2 Head](https://arxiv.org/abs/1907.03670) 为例展示如何开发新的检测头。 + +**注意**:此处展示的 `PartA2 RoI Head` 将用于检测器的第二阶段。对于单阶段的检测头,请参考 `mmdet3d/models/dense_heads/` 中的例子。由于其简单高效,它们更常用于自动驾驶场景下的 3D 检测中。 + +首先,在 `mmdet3d/models/roi_heads/bbox_heads/parta2_bbox_head.py` 中添加新的 bbox head。`PartA2 RoI Head` 为目标检测实现了一个新的 bbox head。为了实现一个 bbox head,我们通常需要在新模块中实现如下两个函数。有时还需要实现其他相关函数,如 `loss` 和 `get_targets`。 + +```python +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class PartA2BboxHead(BaseModule): + """PartA2 RoI head.""" + + def __init__(self, + num_classes, + seg_in_channels, + part_in_channels, + seg_conv_channels=None, + part_conv_channels=None, + merge_conv_channels=None, + down_conv_channels=None, + shared_fc_channels=None, + cls_channels=None, + reg_channels=None, + dropout_ratio=0.1, + roi_feat_size=14, + with_corner_loss=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + loss_bbox=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='none', + loss_weight=1.0), + init_cfg=None): + super(PartA2BboxHead, self).__init__(init_cfg=init_cfg) + + def forward(self, seg_feats, part_feats): + pass +``` + +其次,如果有必要的话需要实现一个新的 RoI Head。我们从 `Base3DRoIHead` 中继承得到新的 `PartAggregationROIHead`。我们可以发现 `Base3DRoIHead` 已经实现了如下函数。 + +```python +from mmdet.models.roi_heads import BaseRoIHead + +from mmdet3d.registry import MODELS, TASK_UTILS + + +class Base3DRoIHead(BaseRoIHead): + """Base class for 3d RoIHeads.""" + + def __init__(self, + bbox_head=None, + bbox_roi_extractor=None, + mask_head=None, + mask_roi_extractor=None, + train_cfg=None, + test_cfg=None, + init_cfg=None): + super(Base3DRoIHead, self).__init__( + bbox_head=bbox_head, + bbox_roi_extractor=bbox_roi_extractor, + mask_head=mask_head, + mask_roi_extractor=mask_roi_extractor, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + + def init_bbox_head(self, bbox_roi_extractor: dict, + bbox_head: dict) -> None: + """Initialize box head and box roi extractor. + + Args: + bbox_roi_extractor (dict or ConfigDict): Config of box + roi extractor. + bbox_head (dict or ConfigDict): Config of box in box head. + """ + self.bbox_roi_extractor = MODELS.build(bbox_roi_extractor) + self.bbox_head = MODELS.build(bbox_head) + + def init_assigner_sampler(self): + """Initialize assigner and sampler.""" + self.bbox_assigner = None + self.bbox_sampler = None + if self.train_cfg: + if isinstance(self.train_cfg.assigner, dict): + self.bbox_assigner = TASK_UTILS.build(self.train_cfg.assigner) + elif isinstance(self.train_cfg.assigner, list): + self.bbox_assigner = [ + TASK_UTILS.build(res) for res in self.train_cfg.assigner + ] + self.bbox_sampler = TASK_UTILS.build(self.train_cfg.sampler) + + def init_mask_head(self): + """Initialize mask head, skip since ``PartAggregationROIHead`` does not + have one.""" + pass +``` + +接下来主要对 bbox_forward 的逻辑进行修改,同时其继承了来自 `Base3DRoIHead` 的其它逻辑。在 `mmdet3d/models/roi_heads/part_aggregation_roi_head.py` 中,我们实现了新的 RoI Head,如下所示: + +```python +from typing import Dict, List, Tuple + +from mmdet.models.task_modules import AssignResult, SamplingResult +from mmengine import ConfigDict +from torch import Tensor +from torch.nn import functional as F + +from mmdet3d.registry import MODELS +from mmdet3d.structures import bbox3d2roi +from mmdet3d.utils import InstanceList +from ...structures.det3d_data_sample import SampleList +from .base_3droi_head import Base3DRoIHead + + +@MODELS.register_module() +class PartAggregationROIHead(Base3DRoIHead): + """Part aggregation roi head for PartA2. + + Args: + semantic_head (ConfigDict): Config of semantic head. + num_classes (int): The number of classes. + seg_roi_extractor (ConfigDict): Config of seg_roi_extractor. + bbox_roi_extractor (ConfigDict): Config of part_roi_extractor. + bbox_head (ConfigDict): Config of bbox_head. + train_cfg (ConfigDict): Training config. + test_cfg (ConfigDict): Testing config. + """ + + def __init__(self, + semantic_head: dict, + num_classes: int = 3, + seg_roi_extractor: dict = None, + bbox_head: dict = None, + bbox_roi_extractor: dict = None, + train_cfg: dict = None, + test_cfg: dict = None, + init_cfg: dict = None) -> None: + super(PartAggregationROIHead, self).__init__( + bbox_head=bbox_head, + bbox_roi_extractor=bbox_roi_extractor, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + self.num_classes = num_classes + assert semantic_head is not None + self.init_seg_head(seg_roi_extractor, semantic_head) + + def init_seg_head(self, seg_roi_extractor: dict, + semantic_head: dict) -> None: + """Initialize semantic head and seg roi extractor. + + Args: + seg_roi_extractor (dict): Config of seg + roi extractor. + semantic_head (dict): Config of semantic head. + """ + self.semantic_head = MODELS.build(semantic_head) + self.seg_roi_extractor = MODELS.build(seg_roi_extractor) + + @property + def with_semantic(self): + """bool: whether the head has semantic branch""" + return hasattr(self, + 'semantic_head') and self.semantic_head is not None + + def predict(self, + feats_dict: Dict, + rpn_results_list: InstanceList, + batch_data_samples: SampleList, + rescale: bool = False, + **kwargs) -> InstanceList: + """Perform forward propagation of the roi head and predict detection + results on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + assert self.with_bbox, 'Bbox head must be implemented in PartA2.' + assert self.with_semantic, 'Semantic head must be implemented' \ + ' in PartA2.' + + batch_input_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + voxels_dict = feats_dict.pop('voxels_dict') + # TODO: Split predict semantic and bbox + results_list = self.predict_bbox(feats_dict, voxels_dict, + batch_input_metas, rpn_results_list, + self.test_cfg) + return results_list + + def predict_bbox(self, feats_dict: Dict, voxel_dict: Dict, + batch_input_metas: List[dict], + rpn_results_list: InstanceList, + test_cfg: ConfigDict) -> InstanceList: + """Perform forward propagation of the bbox head and predict detection + results on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + voxel_dict (dict): Contains information of voxels. + batch_input_metas (list[dict], Optional): Batch image meta info. + Defaults to None. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + test_cfg (Config): Test config. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + ... + + def loss(self, feats_dict: Dict, rpn_results_list: InstanceList, + batch_data_samples: SampleList, **kwargs) -> dict: + """Perform forward propagation and loss calculation of the detection + roi on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + dict[str, Tensor]: A dictionary of loss components + """ + assert len(rpn_results_list) == len(batch_data_samples) + losses = dict() + batch_gt_instances_3d = [] + batch_gt_instances_ignore = [] + voxels_dict = feats_dict.pop('voxels_dict') + for data_sample in batch_data_samples: + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + if 'ignored_instances' in data_sample: + batch_gt_instances_ignore.append(data_sample.ignored_instances) + else: + batch_gt_instances_ignore.append(None) + if self.with_semantic: + semantic_results = self._semantic_forward_train( + feats_dict, voxels_dict, batch_gt_instances_3d) + losses.update(semantic_results.pop('loss_semantic')) + + sample_results = self._assign_and_sample(rpn_results_list, + batch_gt_instances_3d) + if self.with_bbox: + feats_dict.update(semantic_results) + bbox_results = self._bbox_forward_train(feats_dict, voxels_dict, + sample_results) + losses.update(bbox_results['loss_bbox']) + + return losses +``` + +此处我们省略了相关函数的更多细节。更多细节请参考[代码](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/models/roi_heads/part_aggregation_roi_head.py)。 + +最后,用户需要在 `mmdet3d/models/roi_heads/bbox_heads/__init__.py` 和 `mmdet3d/models/roi_heads/__init__.py` 添加模块,从而能被相应的注册器找到并加载。 + +此外,用户也可以在配置文件中添加以下代码以达到相同的目的。 + +```python +custom_imports=dict( + imports=['mmdet3d.models.roi_heads.part_aggregation_roi_head', 'mmdet3d.models.roi_heads.bbox_heads.parta2_bbox_head'], + allow_failed_imports=False) +``` + +`PartAggregationROIHead` 的配置文件如下所示: + +```python +model = dict( + ... + roi_head=dict( + type='PartAggregationROIHead', + num_classes=3, + semantic_head=dict( + type='PointwiseSemanticHead', + in_channels=16, + extra_width=0.2, + seg_score_thr=0.3, + num_classes=3, + loss_seg=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + reduction='sum', + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_part=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0)), + seg_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='max')), + bbox_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='avg')), + bbox_head=dict( + type='PartA2BboxHead', + num_classes=3, + seg_in_channels=16, + part_in_channels=4, + seg_conv_channels=[64, 64], + part_conv_channels=[64, 64], + merge_conv_channels=[128, 128], + down_conv_channels=[128, 256], + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + shared_fc_channels=[256, 512, 512, 512], + cls_channels=[256, 256], + reg_channels=[256, 256], + dropout_ratio=0.1, + roi_feat_size=14, + with_corner_loss=True, + loss_bbox=dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=1.0), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0))), + ... +) +``` + +MMDetection 2.0 开始支持配置文件之间的继承,因此用户可以关注配置文件的修改。PartA2 Head 的第二阶段主要使用了新的 `PartAggregationROIHead` 和 `PartA2BboxHead`,需要根据对应模块的 `__init__` 函数来设置参数。 + +### 添加新的损失函数 + +假设您想要为检测框的回归添加一个新的损失函数 `MyLoss`。为了添加一个新的损失函数,用户需要在 `mmdet3d/models/losses/my_loss.py` 中实现该函数。装饰器 `weighted_loss` 能够保证对每个元素的损失进行加权平均。 + +```python +import torch +import torch.nn as nn +from mmdet.models.losses.utils import weighted_loss + +from mmdet3d.registry import MODELS + + +@weighted_loss +def my_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + + +@MODELS.register_module() +class MyLoss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(MyLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * my_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_bbox +``` + +接下来,用户需要在 `mmdet3d/models/losses/__init__.py` 添加该函数。 + +```python +from .my_loss import MyLoss, my_loss +``` + +或者在配置文件中添加以下代码以达到相同的目的。 + +```python +custom_imports=dict( + imports=['mmdet3d.models.losses.my_loss'], + allow_failed_imports=False) +``` + +为了使用该函数,用户需要修改 `loss_xxx` 域。由于 `MyLoss` 是用于回归的,您需要修改 head 中的 `loss_bbox` 域。 + +```python +loss_bbox=dict(type='MyLoss', loss_weight=1.0) +``` diff --git a/docs/zh_cn/advanced_guides/customize_runtime.md b/docs/zh_cn/advanced_guides/customize_runtime.md new file mode 100755 index 0000000..9fea0d1 --- /dev/null +++ b/docs/zh_cn/advanced_guides/customize_runtime.md @@ -0,0 +1,382 @@ +# 自定义运行时配置 + +## 自定义优化器设置 + +优化器相关的配置是由 `optim_wrapper` 管理的,其通常有三个字段:`optimizer`,`paramwise_cfg`,`clip_grad`。更多细节请参考 [OptimWrapper](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/optim_wrapper.html)。如下所示,使用 `AdamW` 作为`优化器`,骨干网络的学习率降低 10 倍,并添加了梯度裁剪。 + +```python +optim_wrapper = dict( + type='OptimWrapper', + # 优化器 + optimizer=dict( + type='AdamW', + lr=0.0001, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + + # 参数级学习率及权重衰减系数设置 + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + }, + norm_decay_mult=0.0), + + # 梯度裁剪 + clip_grad=dict(max_norm=0.01, norm_type=2)) +``` + +### 自定义 PyTorch 支持的优化器 + +我们已经支持使用所有 PyTorch 实现的优化器,且唯一需要修改的地方就是改变配置文件中的 `optim_wrapper` 字段中的 `optimizer` 字段。例如,如果您想使用 `Adam`(注意这样可能会使性能大幅下降),您可以这样修改: + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='Adam', lr=0.0003, weight_decay=0.0001)) +``` + +为了修改模型的学习率,用户只需要修改 `optimizer` 中的 `lr` 字段。用户可以根据 PyTorch 的 [API 文档](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim)直接设置参数。 + +### 自定义并实现优化器 + +#### 1. 定义新的优化器 + +一个自定义优化器可以按照如下过程定义: + +假设您想要添加一个叫 `MyOptimizer` 的,拥有参数 `a`,`b` 和 `c` 的优化器,您需要创建一个叫做 `mmdet3d/engine/optimizers` 的目录。接下来,应该在目录下某个文件中实现新的优化器,比如 `mmdet3d/engine/optimizers/my_optimizer.py`: + +```python +from torch.optim import Optimizer + +from mmdet3d.registry import OPTIMIZERS + + +@OPTIMIZERS.register_module() +class MyOptimizer(Optimizer): + + def __init__(self, a, b, c): + pass +``` + +#### 2. 将优化器添加到注册器 + +为了找到上述定义的优化器模块,该模块首先需要被引入主命名空间。有两种实现方法: + +- 修改 `mmdet3d/engine/optimizers/__init__.py` 导入该模块。 + + 新定义的模块应该在 `mmdet3d/engine/optimizers/__init__.py` 中被导入,从而被找到并且被添加到注册器中: + + ```python + from .my_optimizer import MyOptimizer + ``` + +- 在配置中使用 `custom_imports` 来人工导入新优化器。 + + ```python + custom_imports = dict(imports=['mmdet3d.engine.optimizers.my_optimizer'], allow_failed_imports=False) + ``` + + 模块 `mmdet3d.engine.optimizers.my_optimizer` 会在程序开始被导入,且 `MyOptimizer` 类在那时会自动被注册。注意到应该只有包含 `MyOptimizer` 类的包被导入。`mmdet3d.engine.optimizers.my_optimizer.MyOptimizer`**不能**被直接导入。 + + 事实上,用户可以在这种导入的方法中使用完全不同的文件目录结构,只要保证根目录能在 `PYTHONPATH` 中被定位。 + +#### 3. 在配置文件中指定优化器 + +接下来您可以在配置文件的 `optimizer` 字段中使用 `MyOptimizer`。在配置文件中,优化器在 `optimizer` 字段中以如下方式定义: + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)) +``` + +为了使用您自己的优化器,该字段可以改为: + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value)) +``` + +### 自定义优化器封装构造器 + +部分模型可能会拥有一些参数专属的优化器设置,比如 BatchNorm 层的权重衰减 (weight decay)。用户可以通过自定义优化器封装构造器来对那些细粒度的参数进行调优。 + +```python +from mmengine.optim import DefaultOptimWrapperConstructor + +from mmdet3d.registry import OPTIM_WRAPPER_CONSTRUCTORS +from .my_optimizer import MyOptimizer + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class MyOptimizerWrapperConstructor(DefaultOptimWrapperConstructor): + + def __init__(self, + optim_wrapper_cfg: dict, + paramwise_cfg: Optional[dict] = None): + pass + + def __call__(self, model: nn.Module) -> OptimWrapper: + + return optim_wrapper +``` + +默认优化器封装构造器在[这里](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/optimizer/default_constructor.py#L18)实现。这部分代码也可以用作新优化器封装构造器的模板。 + +### 额外的设置 + +没有在优化器部分实现的技巧应该通过优化器封装构造器或者钩子来实现(比如逐参数的学习率设置)。我们列举了一些常用的可以稳定训练过程或者加速训练的设置。我们欢迎提供更多类似设置的 PR 和 issue。 + +- __使用梯度裁剪 (gradient clip) 来稳定训练过程__:一些模型依赖梯度裁剪技术来裁剪训练中的梯度,以稳定训练过程。举例如下: + + ```python + optim_wrapper = dict( + _delete_=True, clip_grad=dict(max_norm=35, norm_type=2)) + ``` + + 如果您的配置继承了一个已经设置了 `optim_wrapper` 的基础配置,那么您可能需要 `_delete_=True` 字段来覆盖基础配置中无用的设置。更多细节请参考[配置文档](https://mmdetection3d.readthedocs.io/zh_CN/dev-1.x/user_guides/config.html)。 + +- __使用动量调度器 (momentum scheduler) 来加速模型收敛__:我们支持用动量调度器来根据学习率更改模型的动量,这样可以使模型更快地收敛。动量调度器通常和学习率调度器一起使用,例如,如下配置文件在 [3D 检测](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/schedules/cyclic-20e.py)中被用于加速模型收敛。更多细节请参考 [CosineAnnealingLR](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/lr_scheduler.py#L43) 和 [CosineAnnealingMomentum](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/momentum_scheduler.py#L71) 的实现方法。 + + ```python + param_scheduler = [ + # 学习率调度器 + # 在前 8 个 epoch,学习率从 0 升到 lr * 10 + # 在接下来 12 个 epoch,学习率从 lr * 10 降到 lr * 1e-4 + dict( + type='CosineAnnealingLR', + T_max=8, + eta_min=lr * 10, + begin=0, + end=8, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=12, + eta_min=lr * 1e-4, + begin=8, + end=20, + by_epoch=True, + convert_to_iter_based=True), + # 动量调度器 + # 在前 8 个 epoch,动量从 0 升到 0.85 / 0.95 + # 在接下来 12 个 epoch,动量从 0.85 / 0.95 升到 1 + dict( + type='CosineAnnealingMomentum', + T_max=8, + eta_min=0.85 / 0.95, + begin=0, + end=8, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=12, + eta_min=1, + begin=8, + end=20, + by_epoch=True, + convert_to_iter_based=True) + ] + ``` + +## 自定义训练调度 + +默认情况下我们使用阶梯式学习率衰减的 1 倍训练调度,这会调用 MMEngine 中的 [`MultiStepLR`](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/lr_scheduler.py#L144)。我们在[这里](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/lr_scheduler.py)支持了很多其他学习率调度,比如`余弦退火`和`多项式衰减`调度。下面是一些样例: + +- 多项式衰减调度: + + ```python + param_scheduler = [ + dict( + type='PolyLR', + power=0.9, + eta_min=1e-4, + begin=0, + end=8, + by_epoch=True)] + ``` + +- 余弦退火调度: + + ```python + param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=8, + eta_min=lr * 1e-5, + begin=0, + end=8, + by_epoch=True)] + ``` + +## 自定义训练循环控制器 + +默认情况下,我们在 `train_cfg` 中使用 `EpochBasedTrainLoop`,并在每一个训练 epoch 完成后进行一次验证,如下所示: + +```python +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_begin=1, val_interval=1) +``` + +事实上,[`IterBasedTrainLoop`](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L185) 和 [`EpochBasedTrainLoop`](https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py#L18) 都支持动态间隔验证,如下所示: + +```python +# 在第 365001 次迭代之前,我们每隔 5000 次迭代验证一次。 +# 在第 365000 次迭代之后,我们每隔 368750 次迭代验证一次, +# 这意味着我们在训练结束后进行验证。 + +interval = 5000 +max_iters = 368750 +dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] +train_cfg = dict( + type='IterBasedTrainLoop', + max_iters=max_iters, + val_interval=interval, + dynamic_intervals=dynamic_intervals) +``` + +## 自定义钩子 + +### 自定义并实现钩子 + +#### 1. 实现一个新钩子 + +MMEngine 提供了一些实用的[钩子](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/hook.html),但有些场合用户可能需要实现一个新的钩子。在 v1.1.0rc0 之后,MMDetection3D 在训练时支持基于 MMEngine 自定义钩子。因此用户可以直接在 mmdet3d 或者基于 mmdet3d 的代码库中实现钩子并通过更改训练配置来使用钩子。这里我们给出一个在 mmdet3d 中创建并使用新钩子的例子。 + +```python +from mmengine.hooks import Hook + +from mmdet3d.registry import HOOKS + + +@HOOKS.register_module() +class MyHook(Hook): + + def __init__(self, a, b): + + def before_run(self, runner) -> None: + + def after_run(self, runner) -> None: + + def before_train(self, runner) -> None: + + def after_train(self, runner) -> None: + + def before_train_epoch(self, runner) -> None: + + def after_train_epoch(self, runner) -> None: + + def before_train_iter(self, + runner, + batch_idx: int, + data_batch: DATA_BATCH = None) -> None: + + def after_train_iter(self, + runner, + batch_idx: int, + data_batch: DATA_BATCH = None, + outputs: Optional[dict] = None) -> None: +``` + +用户需要根据钩子的功能指定钩子在每个训练阶段时的行为,具体包括如下阶段:`before_run`,`after_run`,`before_train`,`after_train`,`before_train_epoch`,`after_train_epoch`,`before_train_iter`,和 `after_train_iter`。有更多的位点可以插入钩子,详情可参考 [base hook class](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/hook.py#L9)。 + +#### 2. 注册新钩子 + +接下来我们需要导入 `MyHook`。假设新钩子位于文件 `mmdet3d/engine/hooks/my_hook.py` 中,有两种实现方法: + +- 修改 `mmdet3d/engine/hooks/__init__.py` 导入该模块。 + + 新定义的模块应该在 `mmdet3d/engine/hooks/__init__.py` 中被导入,从而被找到并且被添加到注册器中: + + ```python + from .my_hook import MyHook + ``` + +- 在配置中使用 `custom_imports` 来人为地导入新钩子。 + + ```python + custom_imports = dict(imports=['mmdet3d.engine.hooks.my_hook'], allow_failed_imports=False) + ``` + +#### 3. 更改配置文件 + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value) +] +``` + +您可以将字段 `priority` 设置为 `'NORMAL'` 或者 `'HIGHEST'` 来设置钩子的优先级,如下所示: + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value, priority='NORMAL') +] +``` + +默认情况下,注册阶段钩子的优先级为 `'NORMAL'`。 + +### 使用 MMDetection3D 中实现的钩子 + +如果 MMDetection3D 中已经实现了该钩子,您可以直接通过更改配置文件来使用该钩子。 + +#### 例子:`DisableObjectSampleHook` + +我们实现了一个名为 [DisableObjectSampleHook](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/engine/hooks/disable_object_sample_hook.py) 的自定义钩子在训练阶段达到指定 epoch 后禁用 `ObjectSample` 增强策略。 + +如果有需要的话我们可以在配置文件中设置它: + +```python +custom_hooks = [dict(type='DisableObjectSampleHook', disable_after_epoch=15)] +``` + +### 更改默认的运行时钩子 + +有一些常用的钩子通过 `default_hooks` 注册,它们是: + +- `IterTimerHook`:该钩子用来记录加载数据的时间 'data_time' 和模型训练一步的时间 'time'。 +- `LoggerHook`:该钩子用来从`执行器(Runner)`的不同组件收集日志并将其写入终端,json 文件,tensorboard 和 wandb 等。 +- `ParamSchedulerHook`:该钩子用来更新优化器中的一些超参数,例如学习率和动量。 +- `CheckpointHook`:该钩子用来定期地保存检查点。 +- `DistSamplerSeedHook`:该钩子用来设置采样和批采样的种子。 +- `Det3DVisualizationHook`:该钩子用来可视化验证和测试过程的预测结果。 + +`IterTimerHook`,`ParamSchedulerHook` 和 `DistSamplerSeedHook` 都很简单,通常不需要修改,因此此处我们将介绍如何使用 `LoggerHook`,`CheckpointHook` 和 `Det3DVisualizationHook`。 + +#### CheckpointHook + +除了定期地保存检查点,[`CheckpointHook`](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/checkpoint_hook.py#L18) 提供了其它的可选项例如 `max_keep_ckpts`,`save_optimizer` 等。用户可以设置 `max_keep_ckpts` 只保存少量的检查点或者通过 `save_optimizer` 决定是否保存优化器的状态。参数的更多细节请参考[此处](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/checkpoint_hook.py#L18)。 + +```python +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=1, + max_keep_ckpts=3, + save_optimizer=True)) +``` + +#### LoggerHook + +`LoggerHook` 允许设置日志记录间隔。详细介绍可参考[文档](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/logger_hook.py#L19)。 + +```python +default_hooks = dict(logger=dict(type='LoggerHook', interval=50)) +``` + +#### Det3DVisualizationHook + +`Det3DVisualizationHook` 使用 `DetLocalVisualizer` 来可视化预测结果,`Det3DLocalVisualizer` 支持不同的后端,例如 `TensorboardVisBackend` 和 `WandbVisBackend`(更多细节请参考[文档](https://github.com/open-mmlab/mmengine/blob/main/mmengine/visualization/vis_backend.py))。用户可以添加多个后端来进行可视化,如下所示。 + +```python +default_hooks = dict( + visualization=dict(type='Det3DVisualizationHook', draw=True)) + +vis_backends = [dict(type='LocalVisBackend'), + dict(type='TensorboardVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') +``` diff --git a/docs/zh_cn/advanced_guides/datasets/index.rst b/docs/zh_cn/advanced_guides/datasets/index.rst new file mode 100755 index 0000000..1622f78 --- /dev/null +++ b/docs/zh_cn/advanced_guides/datasets/index.rst @@ -0,0 +1,11 @@ +.. toctree:: + :maxdepth: 3 + + kitti_det.md + nuscenes_det.md + lyft_det.md + waymo_det.md + sunrgbd_det.md + scannet_det.md + scannet_sem_seg.md + s3dis_sem_seg.md diff --git a/docs/zh_cn/advanced_guides/datasets/kitti_det.md b/docs/zh_cn/advanced_guides/datasets/kitti_det.md new file mode 100755 index 0000000..7b5f6e3 --- /dev/null +++ b/docs/zh_cn/advanced_guides/datasets/kitti_det.md @@ -0,0 +1,206 @@ +# 3D 目标检测 KITTI 数据集 + +本页提供了有关在 MMDetection3D 中使用 KITTI 数据集的具体教程。 + +## 数据准备 + +您可以在[这里](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)下载 KITTI 3D 检测数据并解压缩所有 zip 文件。此外,您可以在[这里](https://download.openmmlab.com/mmdetection3d/data/train_planes.zip)下载道路平面信息,其在训练过程中作为一个可选项,用来提高模型的性能。道路平面信息由 [AVOD](https://github.com/kujason/avod) 生成,更多细节请参考[此处](https://github.com/kujason/avod/issues/19)。 + +像准备数据集的一般方法一样,建议将数据集根目录链接到 `$MMDETECTION3D/data`。 + +在我们处理之前,文件夹结构应按如下方式组织: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── kitti +│ │ ├── ImageSets +│ │ ├── testing +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── velodyne +│ │ ├── training +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── label_2 +│ │ │ ├── velodyne +│ │ │ ├── planes (optional) +``` + +### 创建 KITTI 数据集 + +为了创建 KITTI 点云数据,首先需要加载原始的点云数据并生成相关的包含目标标签和标注框的数据标注文件,同时还需要为 KITTI 数据集生成每个单独的训练目标的点云数据,并将其存储在 `data/kitti/kitti_gt_database` 的 `.bin` 格式的文件中,此外,需要为训练数据或者验证数据生成 `.pkl` 格式的包含数据信息的文件。随后,通过运行下面的命令来创建最终的 KITTI 数据: + +```bash +mkdir ./data/kitti/ && mkdir ./data/kitti/ImageSets + +# 下载数据划分 +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/test.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/test.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/train.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/train.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/val.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/val.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/trainval.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/trainval.txt + +python tools/create_data.py kitti --root-path ./data/kitti --out-dir ./data/kitti --extra-tag kitti --with-plane +``` + +需要注意的是,如果您的本地磁盘没有充足的存储空间来存储转换后的数据,您可以通过改变 `--out-dir` 来指定其他任意的存储路径。如果您没有准备 `planes` 数据,您需要移除 `--with-plane` 标志。 + +处理后的文件夹结构应该如下: + +``` +kitti +├── ImageSets +│ ├── test.txt +│ ├── train.txt +│ ├── trainval.txt +│ ├── val.txt +├── testing +│ ├── calib +│ ├── image_2 +│ ├── velodyne +│ ├── velodyne_reduced +├── training +│ ├── calib +│ ├── image_2 +│ ├── label_2 +│ ├── velodyne +│ ├── velodyne_reduced +│ ├── planes (optional) +├── kitti_gt_database +│ ├── xxxxx.bin +├── kitti_infos_train.pkl +├── kitti_infos_val.pkl +├── kitti_dbinfos_train.pkl +├── kitti_infos_test.pkl +├── kitti_infos_trainval.pkl +``` + +- `kitti_gt_database/xxxxx.bin`:训练数据集中包含在 3D 标注框中的点云数据。 +- `kitti_infos_train.pkl`:训练数据集,该字典包含了两个键值:`metainfo` 和 `data_list`。`metainfo` 包含数据集的基本信息,例如 `categories`, `dataset` 和 `info_version`。`data_list` 是由字典组成的列表,每个字典(以下简称 `info`)包含了单个样本的所有详细信息。 + - info\['sample_idx'\]:该样本在整个数据集的索引。 + - info\['images'\]:多个相机捕获的图像信息。是一个字典,包含 5 个键值:`CAM0`, `CAM1`, `CAM2`, `CAM3`, `R0_rect`。 + - info\['images'\]\['R0_rect'\]:校准旋转矩阵,是一个 4x4 数组。 + - info\['images'\]\['CAM2'\]:包含 `CAM2` 相机传感器的信息。 + - info\['images'\]\['CAM2'\]\['img_path'\]:图像的文件名。 + - info\['images'\]\['CAM2'\]\['height'\]:图像的高。 + - info\['images'\]\['CAM2'\]\['width'\]:图像的宽。 + - info\['images'\]\['CAM2'\]\['cam2img'\]:相机到图像的变换矩阵,是一个 4x4 数组。 + - info\['images'\]\['CAM2'\]\['lidar2cam'\]:激光雷达到相机的变换矩阵,是一个 4x4 数组。 + - info\['images'\]\['CAM2'\]\['lidar2img'\]:激光雷达到图像的变换矩阵,是一个 4x4 数组。 + - info\['lidar_points'\]:是一个字典,包含了激光雷达点相关的信息。 + - info\['lidar_points'\]\['lidar_path'\]:激光雷达点云数据的文件名。 + - info\['lidar_points'\]\['num_pts_feats'\]:点的特征维度。 + - info\['lidar_points'\]\['Tr_velo_to_cam'\]:Velodyne 坐标到相机坐标的变换矩阵,是一个 4x4 数组。 + - info\['lidar_points'\]\['Tr_imu_to_velo'\]:IMU 坐标到 Velodyne 坐标的变换矩阵,是一个 4x4 数组。 + - info\['instances'\]:是一个字典组成的列表。每个字典包含单个实例的所有标注信息。对于其中的第 i 个实例,我们有: + - info\['instances'\]\[i\]\['bbox'\]:长度为 4 的列表,以 (x1, y1, x2, y2) 的顺序表示实例的 2D 边界框。 + - info\['instances'\]\[i\]\['bbox_3d'\]:长度为 7 的列表,以 (x, y, z, l, h, w, yaw) 的顺序表示实例的 3D 边界框。 + - info\['instances'\]\[i\]\['bbox_label'\]:是一个整数,表示实例的 2D 标签,-1 代表忽略。 + - info\['instances'\]\[i\]\['bbox_label_3d'\]:是一个整数,表示实例的 3D 标签,-1 代表忽略。 + - info\['instances'\]\[i\]\['depth'\]:3D 边界框投影到相关图像平面的中心点的深度。 + - info\['instances'\]\[i\]\['num_lidar_pts'\]:3D 边界框内的激光雷达点数。 + - info\['instances'\]\[i\]\['center_2d'\]:3D 边界框投影的 2D 中心。 + - info\['instances'\]\[i\]\['difficulty'\]:KITTI 官方定义的困难度,包括简单、适中、困难。 + - info\['instances'\]\[i\]\['truncated'\]:从 0(非截断)到 1(截断)的浮点数,其中截断指的是离开检测图像边界的检测目标。 + - info\['instances'\]\[i\]\['occluded'\]:整数 (0,1,2,3) 表示目标的遮挡状态:0 = 完全可见,1 = 部分遮挡,2 = 大面积遮挡,3 = 未知。 + - info\['instances'\]\[i\]\['group_ids'\]:用于多部分的物体。 + - info\['plane'\](可选):地平面信息。 + +更多细节请参考 [kitti_converter.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/kitti_converter.py) 和 [update_infos_to_v2.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/update_infos_to_v2.py)。 + +## 训练流程 + +下面展示了一个使用 KITTI 数据集进行 3D 目标检测的典型流程: + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, # x, y, z, intensity + use_dim=4), + dict( + type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +``` + +- 数据增强: + - `ObjectNoise`:对场景中的每个真实标注框目标添加噪音。 + - `RandomFlip3D`:对输入点云数据进行随机地水平翻转或者垂直翻转。 + - `GlobalRotScaleTrans`:对输入点云数据进行旋转。 + +## 评估 + +使用 8 个 GPU 以及 KITTI 指标评估的 PointPillars 的示例如下: + +```shell +bash tools/dist_test.sh configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py work_dirs/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class/latest.pth 8 +``` + +## 度量指标 + +KITTI 官方使用全类平均精度(mAP)和平均方向相似度(AOS)来评估 3D 目标检测的性能,更多细节请参考[官方网站](http://www.cvlibs.net/datasets/kitti/eval_3dobject.php)和[论文](http://www.cvlibs.net/publications/Geiger2012CVPR.pdf)。 + +MMDetection3D 采用相同的方法在 KITTI 数据集上进行评估,下面展示了一个评估结果的例子: + +``` +Car AP@0.70, 0.70, 0.70: +bbox AP:97.9252, 89.6183, 88.1564 +bev AP:90.4196, 87.9491, 85.1700 +3d AP:88.3891, 77.1624, 74.4654 +aos AP:97.70, 89.11, 87.38 +Car AP@0.70, 0.50, 0.50: +bbox AP:97.9252, 89.6183, 88.1564 +bev AP:98.3509, 90.2042, 89.6102 +3d AP:98.2800, 90.1480, 89.4736 +aos AP:97.70, 89.11, 87.38 +``` + +## 测试和提交 + +使用 8 个 GPU 在 KITTI 上测试 PointPillars 并生成对排行榜的提交的示例如下: + +- 首先,你需要在你的配置文件中修改 `test_dataloader` 和 `test_evaluator` 字典,如下所示: + + ```python + data_root = 'data/kitti/' + test_dataloader = dict( + dataset=dict( + ann_file='kitti_infos_test.pkl', + load_eval_anns=False, + data_prefix=dict(pts='testing/velodyne_reduced'))) + test_evaluator = dict( + ann_file=data_root + 'kitti_infos_test.pkl', + format_only=True, + pklfile_prefix='results/kitti-3class/kitti_results', + submission_prefix='results/kitti-3class/kitti_results') + ``` + +- 接下来,你可以运行如下测试脚本。 + + ```shell + ./tools/dist_test.sh configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py work_dirs/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class/latest.pth 8 + ``` + +在生成 `results/kitti-3class/kitti_results/xxxxx.txt` 后,您可以提交这些文件到 KITTI 官方网站进行基准测试,更多细节请参考 [KITTI 官方网站](http://www.cvlibs.net/datasets/kitti/index.php)。 diff --git a/docs/zh_cn/advanced_guides/datasets/lyft_det.md b/docs/zh_cn/advanced_guides/datasets/lyft_det.md new file mode 100755 index 0000000..e6a8230 --- /dev/null +++ b/docs/zh_cn/advanced_guides/datasets/lyft_det.md @@ -0,0 +1,195 @@ +# 3D 目标检测 Lyft 数据集 + +本页提供了有关在 MMDetection3D 中使用 Lyft 数据集的具体教程。 + +## 准备之前 + +您可以在[这里](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data)下载 Lyft 3D 检测数据并解压缩所有 zip 文件。 + +像准备数据集的一般方法一样,建议将数据集根目录链接到 `$MMDETECTION3D/data`。 + +在进行处理之前,文件夹结构应按如下方式组织: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── lyft +│ │ ├── v1.01-train +│ │ │ ├── v1.01-train (train_data) +│ │ │ ├── lidar (train_lidar) +│ │ │ ├── images (train_images) +│ │ │ ├── maps (train_maps) +│ │ ├── v1.01-test +│ │ │ ├── v1.01-test (test_data) +│ │ │ ├── lidar (test_lidar) +│ │ │ ├── images (test_images) +│ │ │ ├── maps (test_maps) +│ │ ├── train.txt +│ │ ├── val.txt +│ │ ├── test.txt +│ │ ├── sample_submission.csv +``` + +其中 `v1.01-train` 和 `v1.01-test` 包含与 nuScenes 数据集相同的元文件,`.txt` 文件包含数据划分的信息。Lyft 不提供训练集和验证集的官方划分方案,因此 MMDetection3D 对不同场景下的不同类别的目标数量进行分析,并提供了一个数据集划分方案。`sample_submission.csv` 是用于提交到 Kaggle 评估服务器的基本文件。需要注意的是,我们遵循了 Lyft 最初的文件夹命名以实现更清楚的文件组织。请将下载下来的原始文件夹按照上述组织结构重新命名。 + +## 数据准备 + +组织 Lyft 数据集的方式和组织 nuScenes 的方式相同,首先会生成几乎具有相同结构的 `.pkl` 文件,接着需要重点关注这两个数据集之间的不同点,更多关于数据集信息文件结构的说明请参考 [nuScenes 教程](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docs/zh_cn/advanced_guides/datasets/nuscenes_det.md)。 + +请通过运行下面的命令来生成 Lyft 的数据集信息文件: + +```bash +python tools/create_data.py lyft --root-path ./data/lyft --out-dir ./data/lyft --extra-tag lyft --version v1.01 +python tools/data_converter/lyft_data_fixer.py --version v1.01 --root-folder ./data/lyft +``` + +请注意,上面的第二行命令用于修复损坏的 lidar 数据文件,更多细节请参考此处[讨论](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/discussion/110000)。 + +处理后的文件夹结构应该如下: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── lyft +│ │ ├── v1.01-train +│ │ │ ├── v1.01-train (train_data) +│ │ │ ├── lidar (train_lidar) +│ │ │ ├── images (train_images) +│ │ │ ├── maps (train_maps) +│ │ ├── v1.01-test +│ │ │ ├── v1.01-test (test_data) +│ │ │ ├── lidar (test_lidar) +│ │ │ ├── images (test_images) +│ │ │ ├── maps (test_maps) +│ │ ├── train.txt +│ │ ├── val.txt +│ │ ├── test.txt +│ │ ├── sample_submission.csv +│ │ ├── lyft_infos_train.pkl +│ │ ├── lyft_infos_val.pkl +│ │ ├── lyft_infos_test.pkl +``` + +- `lyft_infos_train.pkl`:训练数据集信息,该字典包含两个关键字:`metainfo` 和 `data_list`。`metainfo` 包含数据集的基本信息,例如 `categories`, `dataset` 和 `info_version`。`data_list` 是由字典组成的列表,每个字典(以下简称 `info`)包含了单个样本的所有详细信息。 + - info\['sample_idx'\]:样本在整个数据集的索引。 + - info\['token'\]:样本数据标记。 + - info\['timestamp'\]:样本数据时间戳。 + - info\['lidar_points'\]:是一个字典,包含了所有与激光雷达点相关的信息。 + - info\['lidar_points'\]\['lidar_path'\]:激光雷达点云数据的文件名。 + - info\['lidar_points'\]\['num_pts_feats'\]:点的特征维度。 + - info\['lidar_points'\]\['lidar2ego'\]:该激光雷达传感器到自车的变换矩阵。(4x4 列表) + - info\['lidar_points'\]\['ego2global'\]:自车到全局坐标的变换矩阵。(4x4 列表) + - info\['lidar_sweeps'\]:是一个列表,包含了扫描信息(没有标注的中间帧)。 + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\['data_path'\]:第 i 次扫描的激光雷达数据的文件路径。 + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\[lidar2ego''\]:当前激光雷达传感器到自车在第 i 次扫描的变换矩阵。(4x4 列表) + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\['ego2global'\]:自车在第 i 次扫描到全局坐标的变换矩阵。(4x4 列表) + - info\['lidar_sweeps'\]\[i\]\['lidar2sensor'\]:从当前帧主激光雷达到第 i 帧扫描激光雷达的变换矩阵。(4x4 列表) + - info\['lidar_sweeps'\]\[i\]\['timestamp'\]:扫描数据的时间戳。 + - info\['lidar_sweeps'\]\[i\]\['sample_data_token'\]:扫描样本数据标记。 + - info\['images'\]:是一个字典,包含与每个相机对应的六个键值:`'CAM_FRONT'`, `'CAM_FRONT_RIGHT'`, `'CAM_FRONT_LEFT'`, `'CAM_BACK'`, `'CAM_BACK_LEFT'`, `'CAM_BACK_RIGHT'`。每个字典包含了对应相机的所有数据信息。 + - info\['images'\]\['CAM_XXX'\]\['img_path'\]:图像的文件名。 + - info\['images'\]\['CAM_XXX'\]\['cam2img'\]:当 3D 点投影到图像平面时需要的内参信息相关的变换矩阵。(3x3 列表) + - info\['images'\]\['CAM_XXX'\]\['sample_data_token'\]:图像样本数据标记。 + - info\['images'\]\['CAM_XXX'\]\['timestamp'\]:图像的时间戳。 + - info\['images'\]\['CAM_XXX'\]\['cam2ego'\]:该相机传感器到自车的变换矩阵。(4x4 列表) + - info\['images'\]\['CAM_XXX'\]\['lidar2cam'\]:激光雷达传感器到该相机的变换矩阵。(4x4 列表) + - info\['instances'\]:是一个字典组成的列表。每个字典包含单个实例的所有标注信息。对于其中的第 i 个实例,我们有: + - info\['instances'\]\[i\]\['bbox_3d'\]:长度为 7 的列表,以 (x, y, z, l, w, h, yaw) 的顺序表示实例在激光雷达坐标系下的 3D 边界框。 + - info\['instances'\]\[i\]\['bbox_label_3d'\]:整数从 0 开始表示实例的标签,其中 -1 代表忽略该类别。 + - info\['instances'\]\[i\]\['bbox_3d_isvalid'\]:每个包围框是否有效。一般情况下,我们只将包含至少一个激光雷达或雷达点的 3D 框作为有效框。 + +接下来将详细介绍 Lyft 数据集和 nuScenes 数据集之间的数据集信息文件中的不同点: + +- `lyft_database/xxxxx.bin` 文件不存在:由于真实标注框的采样对实验的影响可以忽略不计,在 Lyft 数据集中不会提取该目录和相关的 `.bin` 文件。 + +- `lyft_infos_train.pkl` + + - info\['instances'\]\[i\]\['velocity'\] 不存在:Lyft 数据集中不存在速度评估信息。 + - info\['instances'\]\[i\]\['num_lidar_pts'\] 及 info\['instances'\]\[i\]\['num_radar_pts'\] 不存在。 + +这里仅介绍存储在训练数据文件的数据记录信息。这同样适用于验证集和测试集(没有实例)。 + +更多关于 `lyft_infos_xxx.pkl` 的结构信息请参考 [lyft_converter.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/lyft_converter.py)。 + +## 训练流程 + +### 基于 LiDAR 的方法 + +Lyft 上基于 LiDAR 的 3D 检测(包括多模态方法)的训练流程与 nuScenes 几乎相同,如下所示: + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +``` + +与 nuScenes 相似,在 Lyft 上进行训练的模型也需要 `LoadPointsFromMultiSweeps` 步骤来从连续帧中加载点云数据。另外,考虑到 Lyft 中所收集的激光雷达点的强度是无效的,因此将 `LoadPointsFromMultiSweeps` 中的 `use_dim` 默认值设置为 `[0, 1, 2, 4]`,其中前三个维度表示点的坐标,最后一个维度表示时间戳的差异。 + +## 评估 + +使用 8 个 GPU 以及 Lyft 指标评估的 PointPillars 的示例如下: + +```shell +bash ./tools/dist_test.sh configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py checkpoints/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d_20210517_202818-fc6904c3.pth 8 +``` + +## 度量指标 + +Lyft 提出了一个更加严格的用以评估所预测的 3D 检测框的度量指标。判断一个预测框是否是正类的基本评判标准和 KITTI 一样,如基于 3D 交并比进行评估,然而,Lyft 采用与 COCO 相似的方式来计算平均精度 -- 计算 3D 交并比在 0.5-0.95 之间的不同阈值下的平均精度。实际上,重叠部分大于 0.7 的 3D 交并比是一项对于 3D 检测方法比较严格的标准,因此整体的性能似乎会偏低。相比于其他数据集,Lyft 上不同类别的标注不平衡是导致最终结果偏低的另一个重要原因。更多关于度量指标的定义请参考[官方网址](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/overview/evaluation)。 + +这里将采用官方方法对 Lyft 进行评估,下面展示了一个评估结果的例子: + +``` ++mAPs@0.5:0.95------+--------------+ +| class | mAP@0.5:0.95 | ++-------------------+--------------+ +| animal | 0.0 | +| bicycle | 0.099 | +| bus | 0.177 | +| car | 0.422 | +| emergency_vehicle | 0.0 | +| motorcycle | 0.049 | +| other_vehicle | 0.359 | +| pedestrian | 0.066 | +| truck | 0.176 | +| Overall | 0.15 | ++-------------------+--------------+ +``` + +## 测试和提交 + +使用 8 个 GPU 在 Lyft 上测试 PointPillars 并生成对排行榜的提交的示例如下: + +```shell +./tools/dist_test.sh configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py work_dirs/pp-lyft/latest.pth 8 --cfg-options test_evaluator.jsonfile_prefix=work_dirs/pp-lyft/results_challenge test_evaluator.csv_savepath=results/pp-lyft/results_challenge.csv +``` + +在生成 `work_dirs/pp-lyft/results_challenge.csv`,您可以将生成的文件提交到 Kaggle 评估服务器,请参考[官方网址](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles)获取更多细节。 + +同时还可以使用可视化工具将预测结果进行可视化,更多细节请参考[可视化文档](https://mmdetection3d.readthedocs.io/zh_CN/latest/useful_tools.html#visualization)。 diff --git a/docs/zh_cn/advanced_guides/datasets/nuscenes_det.md b/docs/zh_cn/advanced_guides/datasets/nuscenes_det.md new file mode 100755 index 0000000..bd4527e --- /dev/null +++ b/docs/zh_cn/advanced_guides/datasets/nuscenes_det.md @@ -0,0 +1,233 @@ +# 3D 目标检测 NuScenes 数据集 + +本页提供了有关在 MMDetection3D 中使用 nuScenes 数据集的具体教程。 + +## 准备之前 + +您可以在[这里](https://www.nuscenes.org/download)下载 nuScenes 3D 检测数据并解压缩所有 zip 文件。 + +像准备数据集的一般方法一样,建议将数据集根目录链接到 `$MMDETECTION3D/data`。 + +在我们处理之前,文件夹结构应按如下方式组织。 + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── nuscenes +│ │ ├── maps +│ │ ├── samples +│ │ ├── sweeps +│ │ ├── v1.0-test +| | ├── v1.0-trainval +``` + +## 数据准备 + +我们通常需要通过特定样式来使用 `.pkl` 文件组织有用的数据信息。要为 nuScenes 准备这些文件,请运行以下命令: + +```bash +python tools/create_data.py nuscenes --root-path ./data/nuscenes --out-dir ./data/nuscenes --extra-tag nuscenes +``` + +处理后的文件夹结构应该如下。 + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── nuscenes +│ │ ├── maps +│ │ ├── samples +│ │ ├── sweeps +│ │ ├── v1.0-test +| | ├── v1.0-trainval +│ │ ├── nuscenes_database +│ │ ├── nuscenes_infos_train.pkl +│ │ ├── nuscenes_infos_trainval.pkl +│ │ ├── nuscenes_infos_val.pkl +│ │ ├── nuscenes_infos_test.pkl +│ │ ├── nuscenes_dbinfos_train.pkl +``` + +- `nuscenes_database/xxxxx.bin`:训练数据集的每个 3D 包围框中包含的点云数据。 +- `nuscenes_infos_train.pkl`:训练数据集,该字典包含了两个键值:`metainfo` 和 `data_list`。`metainfo` 包含数据集的基本信息,例如 `categories`, `dataset` 和 `info_version`。`data_list` 是由字典组成的列表,每个字典(以下简称 `info`)包含了单个样本的所有详细信息。 + - info\['sample_idx'\]:样本在整个数据集的索引。 + - info\['token'\]:样本数据标记。 + - info\['timestamp'\]:样本数据时间戳。 + - info\['lidar_points'\]:是一个字典,包含了所有与激光雷达点相关的信息。 + - info\['lidar_points'\]\['lidar_path'\]:激光雷达点云数据的文件名。 + - info\['lidar_points'\]\['num_pts_feats'\]:点的特征维度。 + - info\['lidar_points'\]\['lidar2ego'\]:该激光雷达传感器到自车的变换矩阵。(4x4 列表) + - info\['lidar_points'\]\['ego2global'\]:自车到全局坐标的变换矩阵。(4x4 列表) + - info\['lidar_sweeps'\]:是一个列表,包含了扫描信息(没有标注的中间帧)。 + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\['data_path'\]:第 i 次扫描的激光雷达数据的文件路径。 + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\[lidar2ego''\]:当前激光雷达传感器到自车的变换矩阵。(4x4 列表) + - info\['lidar_sweeps'\]\[i\]\['lidar_points'\]\['ego2global'\]:自车到全局坐标的变换矩阵。(4x4 列表) + - info\['lidar_sweeps'\]\[i\]\['lidar2sensor'\]:从主激光雷达传感器到当前传感器(用于收集扫描数据)的变换矩阵。(4x4 列表) + - info\['lidar_sweeps'\]\[i\]\['timestamp'\]:扫描数据的时间戳。 + - info\['lidar_sweeps'\]\[i\]\['sample_data_token'\]:扫描样本数据标记。 + - info\['images'\]:是一个字典,包含与每个相机对应的六个键值:`'CAM_FRONT'`, `'CAM_FRONT_RIGHT'`, `'CAM_FRONT_LEFT'`, `'CAM_BACK'`, `'CAM_BACK_LEFT'`, `'CAM_BACK_RIGHT'`。每个字典包含了对应相机的所有数据信息。 + - info\['images'\]\['CAM_XXX'\]\['img_path'\]:图像的文件名。 + - info\['images'\]\['CAM_XXX'\]\['cam2img'\]:当 3D 点投影到图像平面时需要的内参信息相关的变换矩阵。(3x3 列表) + - info\['images'\]\['CAM_XXX'\]\['sample_data_token'\]:图像样本数据标记。 + - info\['images'\]\['CAM_XXX'\]\['timestamp'\]:图像的时间戳。 + - info\['images'\]\['CAM_XXX'\]\['cam2ego'\]:该相机传感器到自车的变换矩阵。(4x4 列表) + - info\['images'\]\['CAM_XXX'\]\['lidar2cam'\]:激光雷达传感器到该相机的变换矩阵。(4x4 列表) + - info\['instances'\]:是一个字典组成的列表。每个字典包含单个实例的所有标注信息。对于其中的第 i 个实例,我们有: + - info\['instances'\]\[i\]\['bbox_3d'\]:长度为 7 的列表,以 (x, y, z, l, w, h, yaw) 的顺序表示实例的 3D 边界框。 + - info\['instances'\]\[i\]\['bbox_label_3d'\]:整数表示实例的标签,-1 代表忽略。 + - info\['instances'\]\[i\]\['velocity'\]:3D 边界框的速度(由于不正确,没有垂直测量),大小为 (2, ) 的列表。 + - info\['instances'\]\[i\]\['num_lidar_pts'\]:每个 3D 边界框内包含的激光雷达点数。 + - info\['instances'\]\[i\]\['num_radar_pts'\]:每个 3D 边界框内包含的雷达点数。 + - info\['instances'\]\[i\]\['bbox_3d_isvalid'\]:每个包围框是否有效。一般情况下,我们只将包含至少一个激光雷达或雷达点的 3D 框作为有效框。 + - info\['cam_instances'\]:是一个字典,包含以下键值:`'CAM_FRONT'`, `'CAM_FRONT_RIGHT'`, `'CAM_FRONT_LEFT'`, `'CAM_BACK'`, `'CAM_BACK_LEFT'`, `'CAM_BACK_RIGHT'`。对于基于视觉的 3D 目标检测任务,我们将整个场景的 3D 标注划分至它们所属于的相应相机中。对于其中的第 i 个实例,我们有: + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['bbox_label'\]:实例标签。 + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['bbox_label_3d'\]:实例标签。 + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['bbox'\]:2D 边界框标注(3D 框投影的矩形框),顺序为 \[x1, y1, x2, y2\] 的列表。 + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['center_2d'\]:3D 框投影到图像上的中心点,大小为 (2, ) 的列表。 + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['depth'\]:3D 框投影中心的深度。 + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['velocity'\]:3D 边界框的速度(由于不正确,没有垂直测量),大小为 (2, ) 的列表。 + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['attr_label'\]:实例的属性标签。我们为属性分类维护了一个属性集合和映射。 + - info\['cam_instances'\]\['CAM_XXX'\]\[i\]\['bbox_3d'\]:长度为 7 的列表,以 (x, y, z, l, h, w, yaw) 的顺序表示实例的 3D 边界框。 + +注意: + +1. `instances` 和 `cam_instances` 中 `bbox_3d` 的区别。`bbox_3d` 都被转换到 MMDet3D 定义的坐标系下,`instances` 中的 `bbox_3d` 是在激光雷达坐标系下,而 `cam_instances` 是在相机坐标系下。注意它们 3D 框中表示的不同('l, w, h' 和 'l, h, w')。 + +2. 这里我们只解释训练信息文件中记录的数据。这同样适用于验证集和测试集(测试集的 `.pkl` 文件中不包含 `instances` 以及 `cam_instances`)。 + +获取 `nuscenes_infos_xxx.pkl` 的核心函数为 [\_fill_trainval_infos](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/nuscenes_converter.py#L146)。更多细节请参考 [nuscenes_converter.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/nuscenes_converter.py)。 + +## 训练流程 + +### 基于 LiDAR 的方法 + +nuScenes 上基于 LiDAR 的 3D 检测(包括多模态方法)的典型训练流程如下。 + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +``` + +与一般情况相比,nuScenes 有一个特定的 `'LoadPointsFromMultiSweeps'` 流水线来从连续帧加载点云。这是此设置中使用的常见做法。更多细节请参考 nuScenes [原始论文](https://arxiv.org/abs/1903.11027)。`'LoadPointsFromMultiSweeps'` 中的默认 `use_dim` 是 `[0, 1, 2, 4]`,其中前 3 个维度是指点坐标,最后一个是指时间戳差异。由于在拼接来自不同帧的点时使用点云的强度信息会产生噪声,因此默认情况下不使用点云的强度信息。 + +### 基于视觉的方法 + +nuScenes 上基于图像的 3D 检测的典型训练流水线如下。 + +```python +train_pipeline = [ + dict(type='LoadImageFromFileMono3D'), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=True, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='mmdet.Resize', img_scale=(1600, 900), keep_ratio=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'attr_labels', 'gt_bboxes_3d', + 'gt_labels_3d', 'centers_2d', 'depths' + ]), +] +``` + +它遵循 2D 检测的一般流水线,但在一些细节上有所不同: + +- 它使用单目流水线加载图像,其中包括额外的必需信息,如相机内参矩阵。 +- 它需要加载 3D 标注。 +- 一些数据增强技术需要调整,例如`RandomFlip3D`。目前我们不支持更多的增强方法,因为如何迁移和应用其他技术仍在探索中。 + +## 评估 + +使用 8 个 GPU 以及 nuScenes 指标评估的 PointPillars 的示例如下 + +```shell +bash ./tools/dist_test.sh configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py checkpoints/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20200620_230405-2fa62f3d.pth 8 +``` + +## 指标 + +NuScenes 提出了一个综合指标,即 nuScenes 检测分数(NDS),以评估不同的方法并设置基准测试。它由平均精度(mAP)、平均平移误差(ATE)、平均尺度误差(ASE)、平均方向误差(AOE)、平均速度误差(AVE)和平均属性误差(AAE)组成。更多细节请参考其[官方网站](https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Any)。 + +我们也采用这种方法对 nuScenes 进行评估。打印的评估结果示例如下: + +``` +mAP: 0.3197 +mATE: 0.7595 +mASE: 0.2700 +mAOE: 0.4918 +mAVE: 1.3307 +mAAE: 0.1724 +NDS: 0.3905 +Eval time: 170.8s + +Per-class results: +Object Class AP ATE ASE AOE AVE AAE +car 0.503 0.577 0.152 0.111 2.096 0.136 +truck 0.223 0.857 0.224 0.220 1.389 0.179 +bus 0.294 0.855 0.204 0.190 2.689 0.283 +trailer 0.081 1.094 0.243 0.553 0.742 0.167 +construction_vehicle 0.058 1.017 0.450 1.019 0.137 0.341 +pedestrian 0.392 0.687 0.284 0.694 0.876 0.158 +motorcycle 0.317 0.737 0.265 0.580 2.033 0.104 +bicycle 0.308 0.704 0.299 0.892 0.683 0.010 +traffic_cone 0.555 0.486 0.309 nan nan nan +barrier 0.466 0.581 0.269 0.169 nan nan +``` + +## 测试和提交 + +使用 8 个 GPU 在 nuScenes 上测试 PointPillars 并生成对排行榜的提交的示例如下 + +你需要在对应的配置文件中的 `test_evaluator` 里修改 `jsonfile_prefix`。举个例子,添加 `test_evaluator = dict(type='NuScenesMetric', jsonfile_prefix='work_dirs/pp-nus/results_eval.json')` 或在测试命令后使用 `--cfg-options "test_evaluator.jsonfile_prefix=work_dirs/pp-nus/results_eval.json)`。 + +```shell +./tools/dist_test.sh configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py work_dirs/pp-nus/latest.pth 8 --cfg-options 'test_evaluator.jsonfile_prefix=work_dirs/pp-nus/results_eval' +``` + +请注意,在[这里](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/datasets/nus-3d.py#L132)测试信息应更改为测试集而不是验证集。 + +生成 `work_dirs/pp-nus/results_eval.json` 后,您可以压缩并提交给 nuScenes 基准测试。更多信息请参考 [nuScenes 官方网站](https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Any)。 + +我们还可以使用我们开发的可视化工具将预测结果可视化。更多细节请参考[可视化文档](https://mmdetection3d.readthedocs.io/zh_CN/latest/useful_tools.html#id2)。 + +## 注意 + +### `NuScenesBox` 和我们的 `CameraInstanceBoxes` 之间的转换。 + +总的来说,`NuScenesBox` 和我们的 `CameraInstanceBoxes` 的主要区别主要体现在转向角(yaw)定义上。 `NuScenesBox` 定义了一个四元数或三个欧拉角的旋转,而我们的由于实际情况只定义了一个转向角(yaw),它需要我们在预处理和后处理中手动添加一些额外的旋转,例如[这里](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L673)。 + +另外,请注意,角点和位置的定义在 `NuScenesBox` 中是分离的。例如,在单目 3D 检测中,框位置的定义在其相机坐标中(有关汽车设置,请参阅其官方[插图](https://www.nuscenes.org/nuscenes#data-collection)),即与[我们的](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/bbox/structures/cam_box3d.py)一致。相比之下,它的角点是通过[惯例](https://github.com/nutonomy/nuscenes-devkit/blob/02e9200218977193a1058dd7234f935834378319/python-sdk/nuscenes/utils/data_classes.py#L527) 定义的,“x 向前, y 向左, z 向上”。它导致了与我们的 `CameraInstanceBoxes` 不同的维度和旋转定义理念。一个移除相似冲突的例子是 PR [#744](https://github.com/open-mmlab/mmdetection3d/pull/744)。同样的问题也存在于 LiDAR 系统中。为了解决它们,我们通常会在预处理和后处理中添加一些转换,以保证在整个训练和推理过程中框都在我们的坐标系系统里。 diff --git a/docs/zh_cn/advanced_guides/datasets/s3dis_sem_seg.md b/docs/zh_cn/advanced_guides/datasets/s3dis_sem_seg.md new file mode 100755 index 0000000..62dd2ba --- /dev/null +++ b/docs/zh_cn/advanced_guides/datasets/s3dis_sem_seg.md @@ -0,0 +1,271 @@ +# 3D 语义分割 S3DIS 数据集 + +## 数据集的准备 + +对于数据集准备的整体流程,请参考 S3DIS 的[指南](https://github.com/open-mmlab/mmdetection3d/blob/master/data/s3dis/README.md/)。 + +### 提取 S3DIS 数据 + +通过从原始数据中提取 S3DIS 数据,我们将点云数据读取并保存下相关的标注信息,例如语义分割标签和实例分割标签。 + +数据提取前的目录结构应该如下所示: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── s3dis +│ │ ├── meta_data +│ │ ├── Stanford3dDataset_v1.2_Aligned_Version +│ │ │ ├── Area_1 +│ │ │ │ ├── conferenceRoom_1 +│ │ │ │ ├── office_1 +│ │ │ │ ├── ... +│ │ │ ├── Area_2 +│ │ │ ├── Area_3 +│ │ │ ├── Area_4 +│ │ │ ├── Area_5 +│ │ │ ├── Area_6 +│ │ ├── indoor3d_util.py +│ │ ├── collect_indoor3d_data.py +│ │ ├── README.md +``` + +在 `Stanford3dDataset_v1.2_Aligned_Version` 目录下,所有房间依据所属区域被分为 6 组。 +我们通常使用 5 个区域进行训练,然后在余下 1 个区域上进行测试 (被余下的 1 个区域通常为区域 5)。 +在每个区域的目录下包含有多个房间的文件夹,每个文件夹是一个房间的原始点云数据和相关的标注信息。 +例如,在 `Area_1/office_1` 目录下的文件如下所示: + +- `office_1.txt`:一个 txt 文件存储着原始点云数据每个点的坐标和颜色信息。 + +- `Annotations/`:这个文件夹里包含有此房间中实例物体的信息 (以 txt 文件的形式存储)。每个 txt 文件表示一个实例,例如: + + - `chair_1.txt`:存储有该房间中一把椅子的点云数据。 + + 如果我们将 `Annotations/` 下的所有 txt 文件合并起来,得到的点云就和 `office_1.txt` 中的点云是一致的。 + +你可以通过 `python collect_indoor3d_data.py` 指令进行 S3DIS 数据的提取。 +主要步骤包括: + +- 从原始 txt 文件中读取点云数据、语义分割标签和实例分割标签。 +- 将点云数据和相关标注文件存储下来。 + +这其中的核心函数 `indoor3d_util.py` 中的 `export` 函数实现如下: + +```python +def export(anno_path, out_filename): + """将原始数据集的文件转化为点云、语义分割标签和实例分割掩码文件。 + 我们将同一房间中所有实例的点进行聚合。 + + 参数列表: + anno_path (str): 标注信息的路径,例如 Area_1/office_2/Annotations/ + out_filename (str): 保存点云和标签的路径 + file_format (str): txt 或 numpy,指定保存的文件格式 + + 注意: + 点云在处理过程中被整体移动了,保存下的点最小位于原点 (即没有负数坐标值) + """ + points_list = [] + ins_idx = 1 # 实例标签从 1 开始,因此最终实例标签为 0 的点就是无标注的点 + + # `anno_path` 的一个例子:Area_1/office_1/Annotations + # 其中以 txt 文件存储有该房间中所有实例物体的点云 + for f in glob.glob(osp.join(anno_path, '*.txt')): + # get class name of this instance + one_class = osp.basename(f).split('_')[0] + if one_class not in class_names: # 某些房间有 'staris' 类物体 + one_class = 'clutter' + points = np.loadtxt(f) + labels = np.ones((points.shape[0], 1)) * class2label[one_class] + ins_labels = np.ones((points.shape[0], 1)) * ins_idx + ins_idx += 1 + points_list.append(np.concatenate([points, labels, ins_labels], 1)) + + data_label = np.concatenate(points_list, 0) # [N, 8], (pts, rgb, sem, ins) + # 将点云对齐到原点 + xyz_min = np.amin(data_label, axis=0)[0:3] + data_label[:, 0:3] -= xyz_min + + np.save(f'{out_filename}_point.npy', data_label[:, :6].astype(np.float32)) + np.save(f'{out_filename}_sem_label.npy', data_label[:, 6].astype(np.int64)) + np.save(f'{out_filename}_ins_label.npy', data_label[:, 7].astype(np.int64)) + +``` + +上述代码中,我们读取 `Annotations/` 下的所有点云实例,将其合并得到整体房屋的点云,同时生成语义/实例分割的标签。 +在提取完每个房间的数据后,点云、语义分割和实例分割的标签文件应以 `.npy` 的格式被保存下来。 + +### 创建数据集 + +```shell +python tools/create_data.py s3dis --root-path ./data/s3dis \ +--out-dir ./data/s3dis --extra-tag s3dis +``` + +上述指令首先读取以 `.npy` 格式存储的点云、语义分割和实例分割标签文件,然后进一步将它们以 `.bin` 格式保存。 +同时,每个区域 `.pkl` 格式的信息文件也会被保存下来。 + +数据预处理后的目录结构如下所示: + +``` +s3dis +├── meta_data +├── indoor3d_util.py +├── collect_indoor3d_data.py +├── README.md +├── Stanford3dDataset_v1.2_Aligned_Version +├── s3dis_data +├── points +│ ├── xxxxx.bin +├── instance_mask +│ ├── xxxxx.bin +├── semantic_mask +│ ├── xxxxx.bin +├── seg_info +│ ├── Area_1_label_weight.npy +│ ├── Area_1_resampled_scene_idxs.npy +│ ├── Area_2_label_weight.npy +│ ├── Area_2_resampled_scene_idxs.npy +│ ├── Area_3_label_weight.npy +│ ├── Area_3_resampled_scene_idxs.npy +│ ├── Area_4_label_weight.npy +│ ├── Area_4_resampled_scene_idxs.npy +│ ├── Area_5_label_weight.npy +│ ├── Area_5_resampled_scene_idxs.npy +│ ├── Area_6_label_weight.npy +│ ├── Area_6_resampled_scene_idxs.npy +├── s3dis_infos_Area_1.pkl +├── s3dis_infos_Area_2.pkl +├── s3dis_infos_Area_3.pkl +├── s3dis_infos_Area_4.pkl +├── s3dis_infos_Area_5.pkl +├── s3dis_infos_Area_6.pkl +``` + +- `points/xxxxx.bin`:提取的点云数据。 +- `instance_mask/xxxxx.bin`:每个点云的实例标签,取值范围为 \[0, ${实例个数}\],其中 0 代表未标注的点。 +- `semantic_mask/xxxxx.bin`:每个点云的语义标签,取值范围为 \[0, 12\]。 +- `s3dis_infos_Area_1.pkl`:区域 1 的数据信息,每个房间的详细信息如下: + - info\['point_cloud'\]: {'num_features': 6, 'lidar_idx': sample_idx}. + - info\['pts_path'\]: `points/xxxxx.bin` 点云的路径。 + - info\['pts_instance_mask_path'\]: `instance_mask/xxxxx.bin` 实例标签的路径。 + - info\['pts_semantic_mask_path'\]: `semantic_mask/xxxxx.bin` 语义标签的路径。 +- `seg_info`:为支持语义分割任务所生成的信息文件。 + - `Area_1_label_weight.npy`:每一语义类别的权重系数。因为 S3DIS 中属于不同类的点的数量相差很大,一个常见的操作是在计算损失时对不同类别进行加权 (label re-weighting) 以得到更好的分割性能。 + - `Area_1_resampled_scene_idxs.npy`:每一个场景 (房间) 的重采样标签。在训练过程中,我们依据每个场景的点的数量,会对其进行不同次数的重采样,以保证训练数据均衡。 + +## 训练流程 + +S3DIS 上 3D 语义分割的一种典型数据载入流程如下所示: + +```python +class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', + 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter') +num_points = 4096 +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.0, + ignore_index=None, + use_normalized_coord=True, + enlarge_size=None, + min_unique_num=num_points // 4, + eps=0.0), + dict(type='NormalizePointsColor', color_mean=None), + dict( + type='GlobalRotScaleTrans', + rot_range=[-3.141592653589793, 3.141592653589793], # [-pi, pi] + scale_ratio_range=[0.8, 1.2], + translation_std=[0, 0, 0]), + dict( + type='RandomJitterPoints', + jitter_std=[0.01, 0.01, 0.01], + clip_range=[-0.05, 0.05]), + dict(type='RandomDropPointsColor', drop_ratio=0.2), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] +``` + +- `PointSegClassMapping`:在训练过程中,只有被使用的类别的序号会被映射到类似 \[0, 13) 范围内的类别标签。其余的类别序号会被转换为 `ignore_index` 所制定的忽略标签,在本例中是 `13`。 +- `IndoorPatchPointSample`:从输入点云中裁剪一个含有固定数量点的小块 (patch)。`block_size` 指定了裁剪块的边长,在 S3DIS 上这个数值一般设置为 `1.0`。 +- `NormalizePointsColor`:将输入点的颜色信息归一化,通过将 RGB 值除以 `255` 来实现。 +- 数据增广: + - `GlobalRotScaleTrans`:对输入点云进行随机旋转和放缩变换。 + - `RandomJitterPoints`:通过对每一个点施加不同的噪声向量以实现对点云的随机扰动。 + - `RandomDropPointsColor`:以 `drop_ratio` 的概率随机将点云的颜色值全部置零。 + +## 度量指标 + +通常我们使用平均交并比 (mean Intersection over Union, mIoU) 作为 ScanNet 语义分割任务的度量指标。 +具体而言,我们先计算所有类别的 IoU,然后取平均值作为 mIoU。 +更多实现细节请参考 [seg_eval.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/evaluation/functional/seg_eval.py)。 + +正如在 `提取 S3DIS 数据` 一节中所提及的,S3DIS 通常在 5 个区域上进行训练,然后在余下的 1 个区域上进行测试。但是在其他论文中,也有不同的划分方式。 +为了便于灵活划分训练和测试的子集,我们首先定义子数据集 (sub-dataset) 来表示每一个区域,然后根据区域划分对其进行合并,以得到完整的训练集。 +以下是在区域 1、2、3、4、6 上训练并在区域 5 上测试的一个配置文件例子: + +```python +dataset_type = 'S3DISSegDataset' +data_root = './data/s3dis/' +class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', + 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter') +train_area = [1, 2, 3, 4, 6] +test_area = 5 +train_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area], + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=train_pipeline, + modality=input_modality, + ignore_index=len(class_names), + scene_idxs=[ + f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area + ], + test_mode=False)) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_files=f's3dis_infos_Area_{test_area}.pkl', + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=test_pipeline, + modality=input_modality, + ignore_index=len(class_names), + scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy', + test_mode=True)) +val_dataloader = test_dataloader +``` + +可以看到,我们通过将多个相应路径构成的列表 (list) 输入 `ann_files` 和 `scene_idxs` 以实现训练测试集的划分。 +如果修改训练测试区域的划分,只需要简单修改 `train_area` 和 `test_area` 即可。 diff --git a/docs/zh_cn/advanced_guides/datasets/scannet_det.md b/docs/zh_cn/advanced_guides/datasets/scannet_det.md new file mode 100755 index 0000000..16cac3c --- /dev/null +++ b/docs/zh_cn/advanced_guides/datasets/scannet_det.md @@ -0,0 +1,293 @@ +# 3D 目标检测 ScanNet 数据集 + +## 数据集准备 + +请参考 ScanNet 的[指南](https://github.com/open-mmlab/mmdetection3d/blob/master/data/scannet/README.md)以查看总体流程。 + +### 提取 ScanNet 点云数据 + +通过提取 ScanNet 数据,我们加载原始点云文件,并生成包括语义标签、实例标签和真实物体包围框在内的相关标注。 + +```shell +python batch_load_scannet_data.py +``` + +数据处理之前的文件目录结构如下: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── scannet +│ │ ├── meta_data +│ │ ├── scans +│ │ │ ├── scenexxxx_xx +│ │ ├── batch_load_scannet_data.py +│ │ ├── load_scannet_data.py +│ │ ├── scannet_utils.py +│ │ ├── README.md +``` + +在 `scans` 文件夹下总共有 1201 个训练样本文件夹和 312 个验证样本文件夹,其中存有未处理的点云数据和相关的标注。比如说,在文件夹 `scene0001_01` 下文件是这样组织的: + +- `scene0001_01_vh_clean_2.ply`:存有每个顶点坐标和颜色的网格文件。网格的顶点被直接用作未处理的点云数据。 +- `scene0001_01.aggregation.json`:包含物体 ID、分割部分 ID、标签的标注文件。 +- `scene0001_01_vh_clean_2.0.010000.segs.json`:包含分割部分 ID 和顶点的分割标注文件。 +- `scene0001_01.txt`:包括对齐矩阵等的元文件。 +- `scene0001_01_vh_clean_2.labels.ply`:包含每个顶点类别的标注文件。 + +通过运行 `python batch_load_scannet_data.py` 来提取 ScanNet 数据。主要步骤包括: + +- 从原始文件中提取出点云、实例标签、语义标签和包围框标签文件。 +- 下采样原始点云并过滤掉不合法的类别。 +- 保存处理后的点云数据和相关的标注文件。 + +`load_scannet_data.py` 中的核心函数 `export` 如下: + +```python +def export(mesh_file, + agg_file, + seg_file, + meta_file, + label_map_file, + output_file=None, + test_mode=False): + + # 标签映射文件:./data/scannet/meta_data/scannetv2-labels.combined.tsv + # 该标签映射文件中有多种标签标准,比如 'nyu40id' + label_map = scannet_utils.read_label_mapping( + label_map_file, label_from='raw_category', label_to='nyu40id') + # 加载原始点云数据,特征包括6维:XYZRGB + mesh_vertices = scannet_utils.read_mesh_vertices_rgb(mesh_file) + + # 加载场景坐标轴对齐矩阵:一个 4x4 的变换矩阵 + # 将传感器坐标系下的原始点转化到另一个坐标系下 + # 该坐标系与房屋的两边平行(也就是与坐标轴平行) + lines = open(meta_file).readlines() + # 测试集的数据没有对齐矩阵 + axis_align_matrix = np.eye(4) + for line in lines: + if 'axisAlignment' in line: + axis_align_matrix = [ + float(x) + for x in line.rstrip().strip('axisAlignment = ').split(' ') + ] + break + axis_align_matrix = np.array(axis_align_matrix).reshape((4, 4)) + + # 对网格顶点进行全局的对齐 + pts = np.ones((mesh_vertices.shape[0], 4)) + # 同种类坐标下的原始点云,每一行的数据是 [x, y, z, 1] + pts[:, 0:3] = mesh_vertices[:, 0:3] + # 将原始网格顶点转换为对齐后的顶点 + pts = np.dot(pts, axis_align_matrix.transpose()) # Nx4 + aligned_mesh_vertices = np.concatenate([pts[:, 0:3], mesh_vertices[:, 3:]], + axis=1) + + # 加载语义与实例标签 + if not test_mode: + # 每个物体都有一个语义标签,并且包含几个分割部分 + object_id_to_segs, label_to_segs = read_aggregation(agg_file) + # 很多点属于同一分割部分 + seg_to_verts, num_verts = read_segmentation(seg_file) + label_ids = np.zeros(shape=(num_verts), dtype=np.uint32) + object_id_to_label_id = {} + for label, segs in label_to_segs.items(): + label_id = label_map[label] + for seg in segs: + verts = seg_to_verts[seg] + # 每个点都有一个语义标签 + label_ids[verts] = label_id + instance_ids = np.zeros( + shape=(num_verts), dtype=np.uint32) # 0:未标注的 + for object_id, segs in object_id_to_segs.items(): + for seg in segs: + verts = seg_to_verts[seg] + # object_id 从 1 开始计数,比如 1,2,3,.,,,.NUM_INSTANCES + # 每个点都属于一个物体 + instance_ids[verts] = object_id + if object_id not in object_id_to_label_id: + object_id_to_label_id[object_id] = label_ids[verts][0] + # 包围框格式为 [x, y, z, dx, dy, dz, label_id] + # [x, y, z] 是包围框的重力中心, [dx, dy, dz] 是与坐标轴平行的 + # [label_id] 是 'nyu40id' 标准下的语义标签 + # 注意:因为三维包围框是与坐标轴平行的,所以旋转角是 0 + unaligned_bboxes = extract_bbox(mesh_vertices, object_id_to_segs, + object_id_to_label_id, instance_ids) + aligned_bboxes = extract_bbox(aligned_mesh_vertices, object_id_to_segs, + object_id_to_label_id, instance_ids) + ... + + return mesh_vertices, label_ids, instance_ids, unaligned_bboxes, \ + aligned_bboxes, object_id_to_label_id, axis_align_matrix + +``` + +在从每个场景的扫描文件提取数据后,如果原始点云点数过多,可以将其下采样(比如到 50000 个点),但在三维语义分割任务中,点云不会被下采样。此外,在 `nyu40id` 标准之外的不合法语义标签或者可选的 `DONOT CARE` 类别标签应被过滤。最终,点云文件、语义标签、实例标签和真实物体的集合应被存储于 `.npy` 文件中。 + +### 提取 ScanNet RGB 色彩数据(可选的) + +通过提取 ScanNet RGB 色彩数据,对于每个场景我们加载 RGB 图像与配套 4x4 位姿矩阵、单个 4x4 相机内参矩阵的集合。请注意,这一步是可选的,除非要运行多视图物体检测,否则可以略去这步。 + +```shell +python extract_posed_images.py +``` + +1201 个训练样本,312 个验证样本和 100 个测试样本中的每一个都包含一个单独的 `.sens` 文件。比如说,对于场景 `0001_01` 我们有 `data/scannet/scans/scene0001_01/0001_01.sens`。对于这个场景所有图像和位姿数据都被提取至 `data/scannet/posed_images/scene0001_01`。具体来说,该文件夹下会有 300 个 xxxxx.jpg 格式的图像数据,300 个 xxxxx.txt 格式的相机位姿数据和一个单独的 `intrinsic.txt` 内参文件。通常来说,一个场景包含数千张图像。默认情况下,我们只会提取其中的 300 张,从而只占用少于 100 Gb 的空间。要想提取更多图像,请使用 `--max-images-per-scene` 参数。 + +### 创建数据集 + +```shell +python tools/create_data.py scannet --root-path ./data/scannet \ +--out-dir ./data/scannet --extra-tag scannet +``` + +上述提取的点云文件,语义类别标注文件,和物体实例标注文件被进一步以 `.bin` 格式保存。与此同时 `.pkl` 格式的文件被生成并用于训练和验证。获取数据信息的核心函数 `process_single_scene` 如下: + +```python +def process_single_scene(sample_idx): + + # 分别以 .bin 格式保存点云文件,语义类别标注文件和物体实例标注文件 + # 获取 info['pts_path'],info['pts_instance_mask_path'] 和 info['pts_semantic_mask_path'] + ... + + # 获取标注 + if has_label: + annotations = {} + # 包围框的形状为 [k, 6 + class] + aligned_box_label = self.get_aligned_box_label(sample_idx) + unaligned_box_label = self.get_unaligned_box_label(sample_idx) + annotations['gt_num'] = aligned_box_label.shape[0] + if annotations['gt_num'] != 0: + aligned_box = aligned_box_label[:, :-1] # k, 6 + unaligned_box = unaligned_box_label[:, :-1] + classes = aligned_box_label[:, -1] # k + annotations['name'] = np.array([ + self.label2cat[self.cat_ids2class[classes[i]]] + for i in range(annotations['gt_num']) + ]) + # 为了向后兼容,默认的参数名赋予了与坐标轴平行的包围框 + # 我们同时保存了对应的与坐标轴不平行的包围框的信息 + annotations['location'] = aligned_box[:, :3] + annotations['dimensions'] = aligned_box[:, 3:6] + annotations['gt_boxes_upright_depth'] = aligned_box + annotations['unaligned_location'] = unaligned_box[:, :3] + annotations['unaligned_dimensions'] = unaligned_box[:, 3:6] + annotations[ + 'unaligned_gt_boxes_upright_depth'] = unaligned_box + annotations['index'] = np.arange( + annotations['gt_num'], dtype=np.int32) + annotations['class'] = np.array([ + self.cat_ids2class[classes[i]] + for i in range(annotations['gt_num']) + ]) + axis_align_matrix = self.get_axis_align_matrix(sample_idx) + annotations['axis_align_matrix'] = axis_align_matrix # 4x4 + info['annos'] = annotations + return info +``` + +如上数据处理后,文件目录结构应如下: + +``` +scannet +├── meta_data +├── batch_load_scannet_data.py +├── load_scannet_data.py +├── scannet_utils.py +├── README.md +├── scans +├── scans_test +├── scannet_instance_data +├── points +│ ├── xxxxx.bin +├── instance_mask +│ ├── xxxxx.bin +├── semantic_mask +│ ├── xxxxx.bin +├── seg_info +│ ├── train_label_weight.npy +│ ├── train_resampled_scene_idxs.npy +│ ├── val_label_weight.npy +│ ├── val_resampled_scene_idxs.npy +├── posed_images +│ ├── scenexxxx_xx +│ │ ├── xxxxxx.txt +│ │ ├── xxxxxx.jpg +│ │ ├── intrinsic.txt +├── scannet_infos_train.pkl +├── scannet_infos_val.pkl +├── scannet_infos_test.pkl +``` + +- `points/xxxxx.bin`:下采样后,未与坐标轴平行(即没有对齐)的点云。因为 ScanNet 3D 检测任务将与坐标轴平行的点云作为输入,而 ScanNet 3D 语义分割任务将对齐前的点云作为输入,我们选择存储对齐前的点云和它们的对齐矩阵。请注意:在 3D 检测的预处理流程 [`GlobalAlignment`](https://github.com/open-mmlab/mmdetection3d/blob/9f0b01caf6aefed861ef4c3eb197c09362d26b32/mmdet3d/datasets/pipelines/transforms_3d.py#L423) 后,点云就都是与坐标轴平行的了。 +- `instance_mask/xxxxx.bin`:每个点的实例标签,值的范围为:\[0, NUM_INSTANCES\],其中 0 表示没有标注。 +- `semantic_mask/xxxxx.bin`:每个点的语义标签,值的范围为:\[1, 40\], 也就是 `nyu40id` 的标准。请注意:在训练流程 `PointSegClassMapping` 中,`nyu40id` 的 ID 会被映射到训练 ID。 +- `posed_images/scenexxxx_xx`:`.jpg` 图像的集合,还包含 `.txt` 格式的 4x4 相机姿态和单个 `.txt` 格式的相机内参矩阵文件。 +- `scannet_infos_train.pkl`:训练集的数据信息,每个场景的具体信息如下: + - info\['lidar_points'\]:字典包含与激光雷达点相关的信息。 + - info\['lidar_points'\]\['lidar_path'\]:激光雷达点云数据的文件名。 + - info\['lidar_points'\]\['num_pts_feats'\]:点的特征维度。 + - info\['lidar_points'\]\['axis_align_matrix'\]:用于对齐坐标轴的变换矩阵。 + - info\['pts_semantic_mask_path'\]:语义分割标注的文件名。 + - info\['pts_instance_mask_path'\]:实例分割标注的文件名。 + - info\['instances'\]:字典组成的列表,每个字典包含一个实例的所有标注信息。对于其中的第 i 个实例,我们有: + - info\['instances'\]\[i\]\['bbox_3d'\]:长度为 6 的列表,以 (x, y, z, l, w, h) 的顺序表示深度坐标系下与坐标轴平行的 3D 边界框。 + - info\[instances\]\[i\]\['bbox_label_3d'\]:3D 边界框的标签。 +- `scannet_infos_val.pkl`:验证集上的数据信息,与 `scannet_infos_train.pkl` 格式完全一致。 +- `scannet_infos_test.pkl`:测试集上的数据信息,与 `scannet_infos_train.pkl` 格式几乎完全一致,除了缺少标注。 + +## 训练流程 + +ScanNet 上 3D 物体检测的典型流程如下: + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True), + dict(type='GlobalAlignment', rotation_axis=2), + dict(type='PointSegClassMapping'), + dict(type='PointSample', num_points=40000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0], + shift_height=True), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', + 'pts_instance_mask' + ]) +] +``` + +- `GlobalAlignment`:输入的点云在施加了坐标轴平行的矩阵后应被转换为与坐标轴平行的形式。 +- `PointSegClassMapping`:训练中,只有合法的类别 ID 才会被映射到类别标签,比如 \[0, 18)。 +- 数据增强: + - `PointSample`:下采样输入点云。 + - `RandomFlip3D`:随机左右或前后翻转点云。 + - `GlobalRotScaleTrans`:旋转输入点云,对于 ScanNet 角度通常落入 \[-5, 5\](度)的范围;并放缩输入点云,对于 ScanNet 比例通常为 1.0(即不做缩放);最后平移输入点云,对于 ScanNet 通常位移量为 0(即不做位移)。 + +## 评估指标 + +通常使用 mAP(全类平均精度)来评估 ScanNet 的检测任务的性能,比如 `mAP@0.25` 和 `mAP@0.5`。具体来说,评估时调用一个通用的计算 3D 物体检测多个类别的精度和召回率的函数。更多细节请参考 [indoor_eval](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/evaluation/functional/indoor_eval.py)。 + +与在章节`提取 ScanNet 数据`中介绍的那样,所有真实物体的三维包围框是与坐标轴平行的,也就是说旋转角为 0。因此,预测包围框的网络接受的包围框旋转角监督也是 0,且在后处理阶段我们使用适用于与坐标轴平行的包围框的非极大值抑制(NMS),该过程不会考虑包围框的旋转。 diff --git a/docs/zh_cn/advanced_guides/datasets/scannet_sem_seg.md b/docs/zh_cn/advanced_guides/datasets/scannet_sem_seg.md new file mode 100755 index 0000000..f1f63b1 --- /dev/null +++ b/docs/zh_cn/advanced_guides/datasets/scannet_sem_seg.md @@ -0,0 +1,133 @@ +# 3D 语义分割 ScanNet 数据集 + +## 数据集的准备 + +ScanNet 3D 语义分割数据集的准备和 3D 检测任务的准备很相似,请查看[此文档](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docs/zh_cn/advanced_guides/datasets/scannet_det.md#%E6%95%B0%E6%8D%AE%E9%9B%86%E5%87%86%E5%A4%87)以获取更多细节。 +以下我们只罗列部分 3D 语义分割特有的处理步骤和数据信息。 + +### 提取 ScanNet 数据 + +因为 ScanNet 测试集对 3D 语义分割任务提供在线评测的基准,我们也需要下载其测试集并置于 `scannet` 目录下。 +数据预处理前的文件目录结构应如下所示: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── scannet +│ │ ├── meta_data +│ │ ├── scans +│ │ │ ├── scenexxxx_xx +│ │ ├── scans_test +│ │ │ ├── scenexxxx_xx +│ │ ├── batch_load_scannet_data.py +│ │ ├── load_scannet_data.py +│ │ ├── scannet_utils.py +│ │ ├── README.md +``` + +在 `scans_test` 目录下有 100 个测试集 scan 的文件夹,每个文件夹仅包含了原始点云数据和基础的数据元文件。 +例如,在 `scene0707_00` 这一目录下的文件如下所示: + +- `scene0707_00_vh_clean_2.ply`:原始网格文件,存储有每个顶点的坐标和颜色。网格的顶点会被选取作为处理后点云中的点。 +- `scene0707_00.txt`:数据的元文件,包含数据采集传感器的参数等信息。注意,与 `scans` 目录下的数据 (训练集和验证集) 不同,测试集 scan 并没有提供用于和坐标轴对齐的变换矩阵 (`axis-aligned matrix`)。 + +用户可以通过运行 `python batch_load_scannet_data.py` 指令来从原始文件中提取 ScanNet 数据。 +注意,测试集只会保存下点云数据,因为没有提供标注信息。 + +### 创建数据集 + +与 3D 检测任务类似,我们通过运行 `python tools/create_data.py scannet --root-path ./data/scannet --out-dir ./data/scannet --extra-tag scannet` 指令即可创建 ScanNet 数据集。 +预处理后的数据目录结构如下所示: + +``` +scannet +├── scannet_utils.py +├── batch_load_scannet_data.py +├── load_scannet_data.py +├── scannet_utils.py +├── README.md +├── scans +├── scans_test +├── scannet_instance_data +├── points +│ ├── xxxxx.bin +├── instance_mask +│ ├── xxxxx.bin +├── semantic_mask +│ ├── xxxxx.bin +├── seg_info +│ ├── train_label_weight.npy +│ ├── train_resampled_scene_idxs.npy +│ ├── val_label_weight.npy +│ ├── val_resampled_scene_idxs.npy +├── scannet_infos_train.pkl +├── scannet_infos_val.pkl +├── scannet_infos_test.pkl +``` + +- `seg_info`:为支持语义分割任务所生成的信息文件。 + - `train_label_weight.npy`:每一语义类别的权重系数。因为 ScanNet 中属于不同类的点的数量相差很大,一个常见的操作是在计算损失时对不同类别进行加权 (label re-weighting) 以得到更好的分割性能。 + - `train_resampled_scene_idxs.npy`:每一个场景 (房间) 的重采样标签。在训练过程中,我们依据每个场景的点的数量,会对其进行不同次数的重采样,以保证训练数据均衡。 + +## 训练流程 + +ScanNet 上 3D 语义分割的一种典型数据载入流程如下所示: + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.5, + ignore_index=len(class_names), + use_normalized_coord=False, + enlarge_size=0.2, + min_unique_num=None), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) +] +``` + +- `PointSegClassMapping`:在训练过程中,只有被使用的类别的序号会被映射到类似 \[0, 20) 范围内的类别标签。其余的类别序号会被转换为 `ignore_index` 所制定的忽略标签,在本例中是 `20`。 +- `IndoorPatchPointSample`:从输入点云中裁剪一个含有固定数量点的小块 (patch)。`block_size` 指定了裁剪块的边长,在 ScanNet 上这个数值一般设置为 `1.5`。 +- `NormalizePointsColor`:将输入点的颜色信息归一化,通过将 RGB 值除以 `255` 来实现。 + +## 度量指标 + +通常我们使用平均交并比 (mean Intersection over Union, mIoU) 作为 ScanNet 语义分割任务的度量指标。 +具体而言,我们先计算所有类别的 IoU,然后取平均值作为 mIoU。 +更多实现细节请参考 [seg_eval.py](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/evaluation/functional/seg_eval.py)。 + +## 在测试集上测试并提交结果 + +默认情况下,MMDet3D 的代码是在训练集上进行模型训练,然后在验证集上进行模型测试。 +如果你也想在在线基准上测试模型的性能,请在测试命令中加上 `--format-only` 的标记,同时也要将 ScanNet 数据集[配置文件](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/datasets/scannet_seg-3d-20class.py#L126)中的 `ann_file=data_root + 'scannet_infos_val.pkl'` 改成 `ann_file=data_root + 'scannet_infos_test.pkl'`。 +请记得通过 `txt_prefix` 来指定想要保存测试结果的文件夹名称。 + +以 PointNet++ (SSG) 在 ScanNet 上的测试为例,你可以运行以下命令来完成测试结果的保存: + +``` +./tools/dist_test.sh configs/pointnet2/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class.py \ + work_dirs/pointnet2_ssg/latest.pth --format-only \ + --eval-options txt_prefix=work_dirs/pointnet2_ssg/test_submission +``` + +在保存测试结果后,你可以将该文件夹压缩,然后提交到 [ScanNet 在线测试服务器](http://kaldir.vc.in.tum.de/scannet_benchmark/semantic_label_3d)上进行验证。 diff --git a/docs/zh_cn/advanced_guides/datasets/sunrgbd_det.md b/docs/zh_cn/advanced_guides/datasets/sunrgbd_det.md new file mode 100755 index 0000000..26ea769 --- /dev/null +++ b/docs/zh_cn/advanced_guides/datasets/sunrgbd_det.md @@ -0,0 +1,250 @@ +# 3D 目标检测 SUN RGB-D 数据集 + +## 数据集的准备 + +对于数据集准备的整体流程,请参考 SUN RGB-D 的[指南](https://github.com/open-mmlab/mmdetection3d/blob/master/data/sunrgbd/README.md)。 + +### 下载 SUN RGB-D 数据与工具包 + +在[这里](http://rgbd.cs.princeton.edu/data/)下载 SUN RGB-D 的数据。接下来,将 `SUNRGBD.zip`、`SUNRGBDMeta2DBB_v2.mat`、`SUNRGBDMeta3DBB_v2.mat` 和 `SUNRGBDtoolbox.zip` 移动到 `OFFICIAL_SUNRGBD` 文件夹,并解压文件。 + +下载完成后,数据处理之前的文件目录结构如下: + +``` +sunrgbd +├── README.md +├── matlab +│ ├── extract_rgbd_data_v1.m +│ ├── extract_rgbd_data_v2.m +│ ├── extract_split.m +├── OFFICIAL_SUNRGBD +│ ├── SUNRGBD +│ ├── SUNRGBDMeta2DBB_v2.mat +│ ├── SUNRGBDMeta3DBB_v2.mat +│ ├── SUNRGBDtoolbox +``` + +### 从原始数据中提取 3D 检测所需数据与标注 + +通过运行如下指令从原始文件中提取出 SUN RGB-D 的标注(这需要您的机器中安装了 MATLAB): + +```bash +matlab -nosplash -nodesktop -r 'extract_split;quit;' +matlab -nosplash -nodesktop -r 'extract_rgbd_data_v2;quit;' +matlab -nosplash -nodesktop -r 'extract_rgbd_data_v1;quit;' +``` + +主要的步骤包括: + +- 提取出训练集和验证集的索引文件; +- 从原始数据中提取出 3D 检测所需要的数据; +- 从原始的标注数据中提取并组织检测任务使用的标注数据。 + +用于从深度图中提取点云数据的 `extract_rgbd_data_v2.m` 的主要部分如下: + +```matlab +data = SUNRGBDMeta(imageId); +data.depthpath(1:16) = ''; +data.depthpath = strcat('../OFFICIAL_SUNRGBD', data.depthpath); +data.rgbpath(1:16) = ''; +data.rgbpath = strcat('../OFFICIAL_SUNRGBD', data.rgbpath); + +% 从深度图获取点云 +[rgb,points3d,depthInpaint,imsize]=read3dPoints(data); +rgb(isnan(points3d(:,1)),:) = []; +points3d(isnan(points3d(:,1)),:) = []; +points3d_rgb = [points3d, rgb]; + +% MAT 文件比 TXT 文件小三倍。在 Python 中我们可以使用 +% scipy.io.loadmat('xxx.mat')['points3d_rgb'] 来加载数据 +mat_filename = strcat(num2str(imageId,'%06d'), '.mat'); +txt_filename = strcat(num2str(imageId,'%06d'), '.txt'); +% 保存点云数据 +parsave(strcat(depth_folder, mat_filename), points3d_rgb); +``` + +用于提取并组织检测任务标注的 `extract_rgbd_data_v1.m` 的主要部分如下: + +```matlab +% 输出 2D 和 3D 包围框 +data2d = data; +fid = fopen(strcat(det_label_folder, txt_filename), 'w'); +for j = 1:length(data.groundtruth3DBB) + centroid = data.groundtruth3DBB(j).centroid; % 3D 包围框中心 + classname = data.groundtruth3DBB(j).classname; % 类名 + orientation = data.groundtruth3DBB(j).orientation; % 3D 包围框方向 + coeffs = abs(data.groundtruth3DBB(j).coeffs); % 3D 包围框大小 + box2d = data2d.groundtruth2DBB(j).gtBb2D; % 2D 包围框 + fprintf(fid, '%s %d %d %d %d %f %f %f %f %f %f %f %f\n', classname, box2d(1), box2d(2), box2d(3), box2d(4), centroid(1), centroid(2), centroid(3), coeffs(1), coeffs(2), coeffs(3), orientation(1), orientation(2)); +end +fclose(fid); +``` + +上面的两个脚本调用了 SUN RGB-D 提供的[工具包](https://rgbd.cs.princeton.edu/data/SUNRGBDtoolbox.zip)中的一些函数,如 `read3dPoints`。 + +使用上述脚本提取数据后,文件目录结构应如下: + +``` +sunrgbd +├── README.md +├── matlab +│ ├── extract_rgbd_data_v1.m +│ ├── extract_rgbd_data_v2.m +│ ├── extract_split.m +├── OFFICIAL_SUNRGBD +│ ├── SUNRGBD +│ ├── SUNRGBDMeta2DBB_v2.mat +│ ├── SUNRGBDMeta3DBB_v2.mat +│ ├── SUNRGBDtoolbox +├── sunrgbd_trainval +│ ├── calib +│ ├── depth +│ ├── image +│ ├── label +│ ├── label_v1 +│ ├── seg_label +│ ├── train_data_idx.txt +│ ├── val_data_idx.txt +``` + +在如下每个文件夹下,都有总计 5285 个训练集样本和 5050 个验证集样本: + +- `calib`:`.txt` 后缀的相机标定文件。 +- `depth`:`.mat` 后缀的点云文件,包含 xyz 坐标和 rgb 色彩值。 +- `image`:`.jpg` 后缀的二维图像文件。 +- `label`:`.txt` 后缀的用于检测任务的标注数据(版本二)。 +- `label_v1`:`.txt` 后缀的用于检测任务的标注数据(版本一)。 +- `seg_label`:`.txt` 后缀的用于分割任务的标注数据。 + +目前,我们使用版本一的数据用于训练与测试,因此版本二的标注并未使用。 + +### 创建数据集 + +请运行如下指令创建数据集: + +```shell +python tools/create_data.py sunrgbd --root-path ./data/sunrgbd \ +--out-dir ./data/sunrgbd --extra-tag sunrgbd +``` + +或者,如果使用 slurm,可以使用如下指令替代: + +``` +bash tools/create_data.sh sunrgbd +``` + +之前提到的点云数据就会被处理并以 `.bin` 格式重新存储。与此同时,`.pkl` 文件也被生成,用于存储数据标注和元信息。 + +如上数据处理后,文件目录结构应如下: + +``` +sunrgbd +├── README.md +├── matlab +│ ├── ... +├── OFFICIAL_SUNRGBD +│ ├── ... +├── sunrgbd_trainval +│ ├── ... +├── points +├── sunrgbd_infos_train.pkl +├── sunrgbd_infos_val.pkl +``` + +- `points/xxxxxx.bin`:降采样后的点云数据。 +- `sunrgbd_infos_train.pkl`:训练集数据信息(标注与元信息),每个场景所含数据信息具体如下: + - info\['lidar_points'\]:字典包含了与激光雷达点相关的信息。 + - info\['lidar_points'\]\['num_pts_feats'\]:点的特征维度。 + - info\['lidar_points'\]\['lidar_path'\]:激光雷达点云数据的文件名。 + - info\['images'\]:字典包含了与图像数据相关的信息。 + - info\['images'\]\['CAM0'\]\['img_path'\]:图像的文件名。 + - info\['images'\]\['CAM0'\]\['depth2img'\]:深度到图像的变换矩阵,形状为 (4, 4)。 + - info\['images'\]\['CAM0'\]\['height'\]:图像的高。 + - info\['images'\]\['CAM0'\]\['width'\]:图像的宽。 + - info\['instances'\]:由字典组成的列表,包含了该帧的所有标注信息。每个字典与单个实例的标注相关。对于其中的第 i 个实例,我们有: + - info\['instances'\]\[i\]\['bbox_3d'\]:长度为 7 的列表,表示深度坐标系下的 3D 边界框。 + - info\['instances'\]\[i\]\['bbox'\]:长度为 4 的列表,以 (x1, y1, x2, y2) 的顺序表示实例的 2D 边界框。 + - info\['instances'\]\[i\]\['bbox_label_3d'\]:整数表示实例的 3D 标签,-1 表示忽略该类别。 + - info\['instances'\]\[i\]\['bbox_label'\]:整数表示实例的 2D 标签,-1 表示忽略该类别。 +- `sunrgbd_infos_val.pkl`:验证集上的数据信息,与 `sunrgbd_infos_train.pkl` 格式完全一致。 + +## 训练流程 + +SUN RGB-D 上纯点云 3D 物体检测的典型流程如下: + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict(type='LoadAnnotations3D'), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.523599, 0.523599], + scale_ratio_range=[0.85, 1.15], + shift_height=True), + dict(type='PointSample', num_points=20000), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +``` + +点云上的数据增强 + +- `RandomFlip3D`:随机左右或前后翻转输入点云。 +- `GlobalRotScaleTrans`:旋转输入点云,对于 SUN RGB-D 角度通常落入 \[-30, 30\](度)的范围;并放缩输入点云,对于 SUN RGB-D 比例通常落入 \[0.85, 1.15\] 的范围;最后平移输入点云,对于 SUN RGB-D 通常位移量为 0(即不做位移)。 +- `PointSample`:降采样输入点云。 + +SUN RGB-D 上多模态(点云和图像)3D 物体检测的典型流程如下: + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations3D'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', scale=(1333, 600), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.0), + dict(type='Pad', size_divisor=32), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.523599, 0.523599], + scale_ratio_range=[0.85, 1.15], + shift_height=True), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d','img', 'gt_bboxes', 'gt_bboxes_labels']) +] +``` + +图像上的数据增强 + +- `Resize`:改变输入图像的大小,`keep_ratio=True` 意味着图像的比例不改变。 +- `RandomFlip`:随机地翻折图像。 + +图像增强的实现取自 [MMDetection](https://github.com/open-mmlab/mmdetection/tree/dev-3.x/mmdet/datasets/transforms)。 + +## 度量指标 + +与 ScanNet 一样,通常使用 mAP(全类平均精度)来评估 SUN RGB-D 的检测任务的性能,比如 `mAP@0.25` 和 `mAP@0.5`。具体来说,评估时调用一个通用的计算 3D 物体检测多个类别的精度和召回率的函数。更多细节请参考 [`indoor_eval.py`](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/evaluation/functional/indoor_eval.py)。 + +因为 SUN RGB-D 包含有图像数据,所以图像上的物体检测也是可行的。举个例子,在 ImVoteNet 中,我们首先训练了一个图像检测器,并且也使用 mAP 指标,如 `mAP@0.5`,来评估其表现。我们使用 [MMDetection](https://github.com/open-mmlab/mmdetection) 库中的 `eval_map` 函数来计算 mAP。 diff --git a/docs/zh_cn/advanced_guides/datasets/waymo_det.md b/docs/zh_cn/advanced_guides/datasets/waymo_det.md new file mode 100755 index 0000000..577ec15 --- /dev/null +++ b/docs/zh_cn/advanced_guides/datasets/waymo_det.md @@ -0,0 +1,168 @@ +# Waymo 数据集 + +本文档页包含了关于 MMDetection3D 中 Waymo 数据集用法的教程。 + +## 数据集准备 + +在准备 Waymo 数据集之前,如果您之前只安装了 `requirements/build.txt` 和 `requirements/runtime.txt` 中的依赖,请通过运行如下指令额外安装 Waymo 数据集所依赖的官方包: + +``` +# tf 2.1.0. +pip install waymo-open-dataset-tf-2-1-0==1.2.0 +# tf 2.0.0 +# pip install waymo-open-dataset-tf-2-0-0==1.2.0 +# tf 1.15.0 +# pip install waymo-open-dataset-tf-1-15-0==1.2.0 +``` + +或者 + +``` +pip install -r requirements/optional.txt +``` + +和准备数据集的通用方法一致,我们推荐将数据集根目录软链接至 `$MMDETECTION3D/data`。 +由于原始 Waymo 数据的格式基于 `tfrecord`,我们需要将原始数据进行预处理,以便于训练和测试时使用。我们的方法是将它们转换为 KITTI 格式。 + +处理之前,文件目录结构组织如下: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── waymo +│ │ ├── waymo_format +│ │ │ ├── training +│ │ │ ├── validation +│ │ │ ├── testing +│ │ │ ├── gt.bin +│ │ ├── kitti_format +│ │ │ ├── ImageSets + +``` + +您可以在[这里](https://waymo.com/open/download/)下载 1.2 版本的 Waymo 公开数据集,并在[这里](https://drive.google.com/drive/folders/18BVuF_RYJF0NjZpt8SnfzANiakoRMf0o?usp=sharing)下载其训练/验证/测试集拆分文件。接下来,请将 `tfrecord` 文件放入 `data/waymo/waymo_format/` 下的对应文件夹,并将 txt 格式的数据集拆分文件放入 `data/waymo/kitti_format/ImageSets`。在[这里](https://console.cloud.google.com/storage/browser/waymo_open_dataset_v_1_2_0/validation/ground_truth_objects)下载验证集使用的 bin 格式真实标注 (Ground Truth) 文件并放入 `data/waymo/waymo_format/`。小窍门:您可以使用 `gsutil` 来在命令行下载大规模数据集。您可以将该[工具](https://github.com/RalphMao/Waymo-Dataset-Tool) 作为一个例子来查看更多细节。之后,通过运行如下指令准备 Waymo 数据: + +```bash +python tools/create_data.py waymo --root-path ./data/waymo/ --out-dir ./data/waymo/ --workers 128 --extra-tag waymo +``` + +请注意,如果您的本地磁盘没有足够空间保存转换后的数据,您可以将 `--out-dir` 改为其他目录;只要在创建文件夹、准备数据并转换格式后,将数据文件链接到 `data/waymo/kitti_format` 即可。 + +在数据转换后,文件目录结构应组织如下: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── waymo +│ │ ├── waymo_format +│ │ │ ├── training +│ │ │ ├── validation +│ │ │ ├── testing +│ │ │ ├── gt.bin +│ │ ├── kitti_format +│ │ │ ├── ImageSets +│ │ │ ├── training +│ │ │ │ ├── calib +│ │ │ │ ├── image_0 +│ │ │ │ ├── image_1 +│ │ │ │ ├── image_2 +│ │ │ │ ├── image_3 +│ │ │ │ ├── image_4 +│ │ │ │ ├── label_0 +│ │ │ │ ├── label_1 +│ │ │ │ ├── label_2 +│ │ │ │ ├── label_3 +│ │ │ │ ├── label_4 +│ │ │ │ ├── label_all +│ │ │ │ ├── pose +│ │ │ │ ├── velodyne +│ │ │ ├── testing +│ │ │ │ ├── (the same as training) +│ │ │ ├── waymo_gt_database +│ │ │ ├── waymo_infos_trainval.pkl +│ │ │ ├── waymo_infos_train.pkl +│ │ │ ├── waymo_infos_val.pkl +│ │ │ ├── waymo_infos_test.pkl +│ │ │ ├── waymo_dbinfos_train.pkl + +``` + +因为 Waymo 数据的来源包含数个相机,这里我们将每个相机对应的图像和标签文件分别存储,并将相机位姿 (pose) 文件存储下来以供后续处理连续多帧的点云。我们使用 `{a}{bbb}{ccc}` 的名称编码方式为每帧数据命名,其中 `a` 是不同数据拆分的前缀(`0` 指代训练集,`1` 指代验证集,`2` 指代测试集),`bbb` 是分割部分 (segment) 的索引,而 `ccc` 是帧索引。您可以轻而易举地按照如上命名规则定位到所需的帧。我们将训练和验证所需数据按 KITTI 的方式集合在一起,然后将训练集/验证集/测试集的索引存储在 `ImageSet` 下的文件中。 + +## 训练 + +考虑到原始数据集中的数据有很多相似的帧,我们基本上可以主要使用一个子集来训练我们的模型。在我们初步的基线中,我们在每五帧图片中加载一帧。得益于我们的超参数设置和数据增强方案,我们得到了比 Waymo [原论文](https://arxiv.org/pdf/1912.04838.pdf)中更好的性能。请移步 `configs/pointpillars/` 下的 README.md 以查看更多配置和性能相关的细节。我们会尽快发布一个更完整的 Waymo 基准榜单 (benchmark)。 + +## 评估 + +为了在 Waymo 数据集上进行检测性能评估,请按照[此处指示](https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md)构建用于计算评估指标的二进制文件 `compute_detection_metrics_main`,并将它置于 `mmdet3d/core/evaluation/waymo_utils/` 下。您基本上可以按照下方命令安装 `bazel`,然后构建二进制文件: + +```shell +# download the code and enter the base directory +git clone https://github.com/waymo-research/waymo-open-dataset.git waymo-od +# git clone https://github.com/Abyssaledge/waymo-open-dataset-master waymo-od # if you want to use faster multi-thread version. +cd waymo-od +git checkout remotes/origin/master + +# use the Bazel build system +sudo apt-get install --assume-yes pkg-config zip g++ zlib1g-dev unzip python3 python3-pip +BAZEL_VERSION=3.1.0 +wget https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh +sudo bash bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh +sudo apt install build-essential + +# configure .bazelrc +./configure.sh +# delete previous bazel outputs and reset internal caches +bazel clean + +bazel build waymo_open_dataset/metrics/tools/compute_detection_metrics_main +cp bazel-bin/waymo_open_dataset/metrics/tools/compute_detection_metrics_main ../mmdetection3d/mmdet3d/evaluation/functional/waymo_utils/ +``` + +接下来,您就可以在 Waymo 上评估您的模型了。如下示例是使用 8 个图形处理器 (GPU) 在 Waymo 上用 Waymo 评价指标评估 PointPillars 模型的情景: + +```shell +./tools/dist_test.sh configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py checkpoints/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car_latest.pth +``` + +如果需要生成 bin 文件,需要在配置文件的 `test_evaluator` 中指定 `pklfile_prefix`,因此你可以在命令后添加 `--cfg-options "test_evaluator.pklfile_prefix=xxxx"`。 + +**注意**: + +1. 有时用 `bazel` 构建 `compute_detection_metrics_main` 的过程中会出现如下错误:`'round' 不是 'std' 的成员` (`'round' is not a member of 'std'`)。我们只需要移除该文件中,`round` 前的 `std::`。 + +2. 考虑到 Waymo 上评估一次耗时不短,我们建议只在模型训练结束时进行评估。 + +3. 为了在 CUDA 9 环境使用 TensorFlow,我们建议通过编译 TensorFlow 源码的方式使用。除了官方教程之外,您还可以参考该[链接](https://github.com/SmileTM/Tensorflow2.X-GPU-CUDA9.0)以寻找可能合适的预编译包以及编译源码的实用攻略。 + +## 测试并提交到官方服务器 + +如下是一个使用 8 个图形处理器在 Waymo 上测试 PointPillars,生成 bin 文件并提交结果到官方榜单的例子: + +如果你想生成 bin 文件并提交到服务器中,在运行测试指令前你需要在配置文件的 `test_evaluator` 中指定 `submission_prefix`。 + +在生成 bin 文件后,您可以简单地构建二进制文件 `create_submission`,并按照[指示](https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md/)创建一个提交文件。下面是一些示例: + +```shell +cd ../waymo-od/ +bazel build waymo_open_dataset/metrics/tools/create_submission +cp bazel-bin/waymo_open_dataset/metrics/tools/create_submission ../mmdetection3d/mmdet3d/core/evaluation/waymo_utils/ +vim waymo_open_dataset/metrics/tools/submission.txtpb # set the metadata information +cp waymo_open_dataset/metrics/tools/submission.txtpb ../mmdetection3d/mmdet3d/evaluation/functional/waymo_utils/ + +cd ../mmdetection3d +# suppose the result bin is in `results/waymo-car/submission` +mmdet3d/core/evaluation/waymo_utils/create_submission --input_filenames='results/waymo-car/kitti_results_test.bin' --output_filename='results/waymo-car/submission/model' --submission_filename='mmdet3d/evaluation/functional/waymo_utils/submission.txtpb' + +tar cvf results/waymo-car/submission/my_model.tar results/waymo-car/submission/my_model/ +gzip results/waymo-car/submission/my_model.tar +``` + +如果想用官方评估服务器评估您在验证集上的结果,您可以使用同样的方法生成提交文件,只需确保您在运行如上指令前更改 `submission.txtpb` 中的字段值即可。 diff --git a/docs/zh_cn/advanced_guides/index.rst b/docs/zh_cn/advanced_guides/index.rst new file mode 100755 index 0000000..1faa4c5 --- /dev/null +++ b/docs/zh_cn/advanced_guides/index.rst @@ -0,0 +1,27 @@ +Datasets +************** + +.. toctree:: + :maxdepth: 1 + + datasets/index.rst + + +Supported Tasks +************** + +.. toctree:: + :maxdepth: 2 + + supported_tasks/index.rst + + +Customization +************** + +.. toctree:: + :maxdepth: 2 + + customize_dataset.md + customize_models.md + customize_runtime.md diff --git a/docs/zh_cn/advanced_guides/supported_tasks/index.rst b/docs/zh_cn/advanced_guides/supported_tasks/index.rst new file mode 100755 index 0000000..c818f22 --- /dev/null +++ b/docs/zh_cn/advanced_guides/supported_tasks/index.rst @@ -0,0 +1,6 @@ +.. toctree:: + :maxdepth: 3 + + lidar_det3d.md + vision_det3d.md + lidar_sem_seg3d.md diff --git a/docs/zh_cn/advanced_guides/supported_tasks/lidar_det3d.md b/docs/zh_cn/advanced_guides/supported_tasks/lidar_det3d.md new file mode 100755 index 0000000..1277e31 --- /dev/null +++ b/docs/zh_cn/advanced_guides/supported_tasks/lidar_det3d.md @@ -0,0 +1,83 @@ +# 基于激光雷达的 3D 检测 + +基于激光雷达的 3D 检测是 MMDetection3D 支持的最基础的任务之一。它期望给定的模型以激光雷达采集的任意数量的特征点为输入,并为每一个感兴趣的目标预测 3D 框及类别标签。接下来,我们以 KITTI 数据集上的 PointPillars 为例,展示如何准备数据,在标准的 3D 检测基准上训练并测试模型,以及可视化并验证结果。 + +## 数据准备 + +首先,我们需要下载原始数据并按照[数据准备文档](https://mmdetection3d.readthedocs.io/zh_CN/dev-1.x/user_guides/dataset_prepare.html)中提供的标准方式重新组织数据。 + +由于不同数据集的原始数据有不同的组织方式,我们通常需要用 `.pkl` 文件收集有用的数据信息。因此,在准备好所有的原始数据之后,我们需要运行 `create_data.py` 中提供的脚本来为不同的数据集生成数据集信息。例如,对于 KITTI,我们需要运行如下命令: + +```shell +python tools/create_data.py kitti --root-path ./data/kitti --out-dir ./data/kitti --extra-tag kitti +``` + +随后,相关的目录结构将如下所示: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── kitti +│ │ ├── ImageSets +│ │ ├── testing +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── velodyne +│ │ │ ├── velodyne_reduced +│ │ ├── training +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── label_2 +│ │ │ ├── velodyne +│ │ │ ├── velodyne_reduced +│ │ ├── kitti_gt_database +│ │ ├── kitti_infos_train.pkl +│ │ ├── kitti_infos_trainval.pkl +│ │ ├── kitti_infos_val.pkl +│ │ ├── kitti_infos_test.pkl +│ │ ├── kitti_dbinfos_train.pkl +``` + +## 训练 + +接着,我们将使用提供的配置文件训练 PointPillars。当您使用不同的 GPU 设置进行训练时,您可以按照这个[教程](https://mmdetection3d.readthedocs.io/en/dev-1.x/user_guides/train_test.html)的示例。假设我们在一台具有 8 块 GPU 的机器上使用分布式训练: + +```shell +./tools/dist_train.sh configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py 8 +``` + +注意,配置文件名中的 `8xb6` 是指训练用了 8 块 GPU,每块 GPU 上有 6 个数据样本。如果您的自定义设置不同于此,那么有时候您需要相应地调整学习率。基本规则可以参考[此处](https://arxiv.org/abs/1706.02677)。我们已经支持了使用 `--auto-scale-lr` 来自动缩放学习率。 + +## 定量评估 + +在训练期间,模型权重文件将会根据配置文件中的 `train_cfg = dict(val_interval=xxx)` 设置被周期性地评估。我们支持不同数据集的官方评估方案。对于 KITTI,将对 3 个类别使用交并比(IoU)阈值分别为 0.5/0.7 的平均精度(mAP)来评估模型。评估结果将会被打印到终端中,如下所示: + +``` +Car AP@0.70, 0.70, 0.70: +bbox AP:98.1839, 89.7606, 88.7837 +bev AP:89.6905, 87.4570, 85.4865 +3d AP:87.4561, 76.7569, 74.1302 +aos AP:97.70, 88.73, 87.34 +Car AP@0.70, 0.50, 0.50: +bbox AP:98.1839, 89.7606, 88.7837 +bev AP:98.4400, 90.1218, 89.6270 +3d AP:98.3329, 90.0209, 89.4035 +aos AP:97.70, 88.73, 87.34 +``` + +此外,在训练完成后您也可以评估特定的模型权重文件。您可以简单地执行以下脚本: + +```shell +./tools/dist_test.sh configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py work_dirs/pointpillars/latest.pth 8 +``` + +## 测试与提交 + +如果您只想在在线基准上进行推理或测试模型性能,您需要在相应的评估器中指定 `submission_prefix`,例如,在配置文件中添加 `test_evaluator = dict(type='KittiMetric', ann_file=data_root + 'kitti_infos_test.pkl', format_only=True, pklfile_prefix='results/kitti-3class/kitti_results', submission_prefix='results/kitti-3class/kitti_results')`,然后可以得到结果文件。请确保配置文件中的[测试信息](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/datasets/kitti-3d-3class.py#L117)的 `data_prefix` 和 `ann_file` 由验证集相应地改为测试集。在生成结果后,您可以压缩文件夹并上传至 KITTI 评估服务器上。 + +## 定性评估 + +MMDetection3D 还提供了通用的可视化工具,以便于我们可以对训练好的模型预测的检测结果有一个直观的感受。您也可以在评估阶段通过设置 `--show` 来在线可视化检测结果,或者使用 `tools/misc/visualize_results.py` 来离线地进行可视化。此外,我们还提供了脚本 `tools/misc/browse_dataset.py` 用于可视化数据集而不做推理。更多的细节请参考[可视化文档](https://mmdetection3d.readthedocs.io/zh_CN/dev-1.x/user_guides/visualization.html)。 diff --git a/docs/zh_cn/advanced_guides/supported_tasks/lidar_sem_seg3d.md b/docs/zh_cn/advanced_guides/supported_tasks/lidar_sem_seg3d.md new file mode 100755 index 0000000..0e24b55 --- /dev/null +++ b/docs/zh_cn/advanced_guides/supported_tasks/lidar_sem_seg3d.md @@ -0,0 +1,78 @@ +# 基于激光雷达的 3D 语义分割 + +基于激光雷达的 3D 语义分割是 MMDetection3D 支持的最基础的任务之一。它期望给定的模型以激光雷达采集的任意数量的特征点为输入,并预测每个输入点的语义标签。接下来,我们以 ScanNet 数据集上的 PointNet++ (SSG) 为例,展示如何准备数据,在标准的 3D 语义分割基准上训练并测试模型,以及可视化并验证结果。 + +## 数据准备 + +首先,我们需要从 ScanNet [官方网站](http://kaldir.vc.in.tum.de/scannet_benchmark/documentation)下载原始数据。 + +由于不同数据集的原始数据有不同的组织方式,我们通常需要用 pkl 或 json 文件收集有用的数据信息。 + +因此,在准备好所有的原始数据之后,我们可以遵循 [ScanNet 文档](https://github.com/open-mmlab/mmdetection3d/blob/master/data/scannet/README.md/)中的说明生成数据信息。 + +随后,相关的目录结构将如下所示: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── scannet +│ │ ├── scannet_utils.py +│ │ ├── batch_load_scannet_data.py +│ │ ├── load_scannet_data.py +│ │ ├── scannet_utils.py +│ │ ├── README.md +│ │ ├── scans +│ │ ├── scans_test +│ │ ├── scannet_instance_data +│ │ ├── points +│ │ ├── instance_mask +│ │ ├── semantic_mask +│ │ ├── seg_info +│ │ │ ├── train_label_weight.npy +│ │ │ ├── train_resampled_scene_idxs.npy +│ │ │ ├── val_label_weight.npy +│ │ │ ├── val_resampled_scene_idxs.npy +│ │ ├── scannet_infos_train.pkl +│ │ ├── scannet_infos_val.pkl +│ │ ├── scannet_infos_test.pkl +``` + +## 训练 + +接着,我们将使用提供的配置文件训练 PointNet++ (SSG) 模型。当你使用不同的 GPU 设置进行训练时,你基本上可以按照这个[教程](https://mmdetection3d.readthedocs.io/zh_CN/latest/1_exist_data_model.html#inference-with-existing-models)的示例脚本。假设我们在一台具有 2 块 GPU 的机器上使用分布式训练: + +``` +./tools/dist_train.sh configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py 2 +``` + +注意,配置文件名中的 `16x2` 是指训练时用了 2 块 GPU,每块 GPU 上有 16 个样本。如果你的自定义设置不同于此,那么有时候你需要相应的调整学习率。基本规则可以参考[此处](https://arxiv.org/abs/1706.02677)。 + +## 定量评估 + +在训练期间,模型权重将会根据配置文件中的 `train_cfg = dict(val_interval=xxx)` 设置被周期性地评估。我们支持不同数据集的官方评估方案。对于 ScanNet,将使用 20 个类别的平均交并比 (mIoU) 对模型进行评估。评估结果将会被打印到终端中,如下所示: + +``` ++---------+--------+--------+---------+--------+--------+--------+--------+--------+--------+-----------+---------+---------+--------+---------+--------------+----------------+--------+--------+---------+----------------+--------+--------+---------+ +| classes | wall | floor | cabinet | bed | chair | sofa | table | door | window | bookshelf | picture | counter | desk | curtain | refrigerator | showercurtrain | toilet | sink | bathtub | otherfurniture | miou | acc | acc_cls | ++---------+--------+--------+---------+--------+--------+--------+--------+--------+--------+-----------+---------+---------+--------+---------+--------------+----------------+--------+--------+---------+----------------+--------+--------+---------+ +| results | 0.7257 | 0.9373 | 0.4625 | 0.6613 | 0.7707 | 0.5562 | 0.5864 | 0.4010 | 0.4558 | 0.7011 | 0.2500 | 0.4645 | 0.4540 | 0.5399 | 0.2802 | 0.3488 | 0.7359 | 0.4971 | 0.6922 | 0.3681 | 0.5444 | 0.8118 | 0.6695 | ++---------+--------+--------+---------+--------+--------+--------+--------+--------+--------+-----------+---------+---------+--------+---------+--------------+----------------+--------+--------+---------+----------------+--------+--------+---------+ +``` + +此外,在训练完成后你也可以评估特定的模型权重文件。你可以简单地执行以下脚本: + +``` +./tools/dist_test.sh configs/pointnet2/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class.py work_dirs/pointnet2_ssg/latest.pth 8 +``` + +## 测试与提交 + +如果你只想在在线基准上进行推理或测试模型性能,你需要在配置文件中的 `test_evalutor` 字段增加 `submission_prefix`, 例如配置文件增加 `test_evaluator = dict(type='SegMetric',submission_prefix=work_dirs/pointnet2_ssg/test_submission`)。 +并将 ScanNet 数据集[配置文件](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/datasets/scannet_seg-3d-20class.py#L129)中的 `ann_file=scannet_infos_val.pkl` 变成 `ann_file=scannet_infos_test.pkl`。在生成结果后,你可以压缩文件夹并上传至 [ScanNet 评估服务器](http://kaldir.vc.in.tum.de/scannet_benchmark/semantic_label_3d)上。 + +## 定性评估 + +MMDetection3D 还提供了通用的可视化工具,以便于我们可以对训练好的模型预测的分割结果有一个直观的感受。你也可以在评估阶段通过设置 `--eval-options 'show=True' 'out_dir=${SHOW_DIR}'` 来在线可视化分割结果,或者使用 `tools/misc/visualize_results.py` 来离线地进行可视化。此外,我们还提供了脚本 `tools/misc/browse_dataset.py` 用于可视化数据集而不做推理。更多的细节请参考[可视化文档](https://mmdetection3d.readthedocs.io/zh_CN/latest/useful_tools.html#visualization)。 diff --git a/docs/zh_cn/advanced_guides/supported_tasks/vision_det3d.md b/docs/zh_cn/advanced_guides/supported_tasks/vision_det3d.md new file mode 100755 index 0000000..18c546e --- /dev/null +++ b/docs/zh_cn/advanced_guides/supported_tasks/vision_det3d.md @@ -0,0 +1,114 @@ +# 基于视觉的 3D 检测 + +基于视觉的 3D 检测是指基于纯视觉输入的 3D 检测方法,例如基于单目、双目和多视图图像的 3D 检测。目前,我们只支持单目和多视图的 3D 检测方法。其他方法也应该与我们的框架兼容,并在将来得到支持。 + +它期望给定的模型以任意数量的图像作为输入,并为每一个感兴趣的目标预测 3D 框及类别标签。以 nuScenes 数据集 FCOS3D 为例,我们将展示如何准备数据,在标准的 3D 检测基准上训练并测试模型,以及可视化并验证结果。 + +## 数据准备 + +首先,我们需要下载原始数据并按照[数据准备文档](https://mmdetection3d.readthedocs.io/zh_CN/latest/data_preparation.html)中提供的标准方式重新组织数据。 + +由于不同数据集的原始数据有不同的组织方式,我们通常需要用 pkl 或 json 文件收集有用的数据信息。因此,在准备好所有的原始数据之后,我们需要运行 `create_data.py` 中提供的脚本来为不同的数据集生成数据信息。例如,对于 nuScenes,我们需要运行如下命令: + +``` +python tools/create_data.py nuscenes --root-path ./data/nuscenes --out-dir ./data/nuscenes --extra-tag nuscenes +``` + +随后,相关的目录结构将如下所示: + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── nuscenes +│ │ ├── maps +│ │ ├── samples +│ │ ├── sweeps +│ │ ├── v1.0-test +| | ├── v1.0-trainval +│ │ ├── nuscenes_database +│ │ ├── nuscenes_infos_train.pkl +│ │ ├── nuscenes_infos_trainval.pkl +│ │ ├── nuscenes_infos_val.pkl +│ │ ├── nuscenes_infos_test.pkl +│ │ ├── nuscenes_dbinfos_train.pkl +│ │ ├── nuscenes_infos_train_mono3d.coco.json +│ │ ├── nuscenes_infos_trainval_mono3d.coco.json +│ │ ├── nuscenes_infos_val_mono3d.coco.json +│ │ ├── nuscenes_infos_test_mono3d.coco.json +``` + +注意,此处的 pkl 文件主要用于使用 LiDAR 数据的方法,json 文件用于 2D 检测/纯视觉的 3D 检测。在 v0.13.0 支持单目 3D 检测之前,json 文件只包含 2D 检测的信息,因此如果你需要最新的信息,请切换到 v0.13.0 之后的分支。 + +## 训练 + +接着,我们将使用提供的配置文件训练 FCOS3D。基本的脚本与其他模型一样。当你使用不同的 GPU 设置进行训练时,你基本上可以按照这个[教程](https://mmdetection3d.readthedocs.io/zh_CN/latest/1_exist_data_model.html#inference-with-existing-models)的示例。假设我们在一台具有 8 块 GPU 的机器上使用分布式训练: + +``` +./tools/dist_train.sh configs/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d.py 8 +``` + +注意,配置文件名中的 `2x8` 是指训练时用了 8 块 GPU,每块 GPU 上有 2 个数据样本。如果你的自定义设置不同于此,那么有时候你需要相应的调整学习率。基本规则可以参考[此处](https://arxiv.org/abs/1706.02677)。 + +我们也可以通过运行以下命令微调 FCOS3D,从而达到更好的性能: + +``` +./tools/dist_train.sh fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune.py 8 +``` + +通过先前的脚本训练好一个基准模型后,请记得相应的修改[此处](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune.py#L8)的路径。 + +## 定量评估 + +在训练期间,模型权重文件将会根据配置文件中的 `evaluation = dict(interval=xxx)` 设置被周期性地评估。 + +我们支持不同数据集的官方评估方案。由于输出格式与基于其他模态的 3D 检测相同,因此评估方法也是一样的。 + +对于 nuScenes,将使用基于距离的平均精度(mAP)以及 nuScenes 检测分数(NDS)分别对 10 个类别进行评估。评估结果将会被打印到终端中,如下所示: + +``` +mAP: 0.3197 +mATE: 0.7595 +mASE: 0.2700 +mAOE: 0.4918 +mAVE: 1.3307 +mAAE: 0.1724 +NDS: 0.3905 +Eval time: 170.8s + +Per-class results: +Object Class AP ATE ASE AOE AVE AAE +car 0.503 0.577 0.152 0.111 2.096 0.136 +truck 0.223 0.857 0.224 0.220 1.389 0.179 +bus 0.294 0.855 0.204 0.190 2.689 0.283 +trailer 0.081 1.094 0.243 0.553 0.742 0.167 +construction_vehicle 0.058 1.017 0.450 1.019 0.137 0.341 +pedestrian 0.392 0.687 0.284 0.694 0.876 0.158 +motorcycle 0.317 0.737 0.265 0.580 2.033 0.104 +bicycle 0.308 0.704 0.299 0.892 0.683 0.010 +traffic_cone 0.555 0.486 0.309 nan nan nan +barrier 0.466 0.581 0.269 0.169 nan nan +``` + +此外,在训练完成后你也可以评估特定的模型权重文件。你可以简单地执行以下脚本: + +``` +./tools/dist_test.sh configs/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d.py \ + work_dirs/fcos3d/latest.pth --eval mAP +``` + +## 测试与提交 + +如果你只想在在线基准上进行推理或测试模型性能,你需要将之前评估脚本中的 `--eval mAP` 替换成 `--format-only`,并在需要的情况下指定 `jsonfile_prefix`,例如,添加选项 `--eval-options jsonfile_prefix=work_dirs/fcos3d/test_submission`。请确保配置文件中的[测试信息](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/datasets/nus-mono3d.py#L93)由验证集相应地改为测试集。 + +在生成结果后,你可以压缩文件夹并上传至 nuScenes 3D 检测挑战的 evalAI 评估服务器上。 + +## 定性评估 + +MMDetection3D 还提供了通用的可视化工具,以便于我们可以对训练好的模型预测的检测结果有一个直观的感受。你也可以在评估阶段通过设置 `--eval-options 'show=True' 'out_dir=${SHOW_DIR}'` 来在线可视化检测结果,或者使用 `tools/misc/visualize_results.py` 来离线地进行可视化。 + +此外,我们还提供了脚本 `tools/misc/browse_dataset.py` 用于可视化数据集而不做推理。更多的细节请参考[可视化文档](https://mmdetection3d.readthedocs.io/zh_CN/latest/useful_tools.html#visualization)。 + +注意,目前我们仅支持纯视觉方法在图像上的可视化。将来我们将集成在前景图以及鸟瞰图(BEV)中的可视化。 diff --git a/docs/zh_cn/api.rst b/docs/zh_cn/api.rst new file mode 100755 index 0000000..777e6f4 --- /dev/null +++ b/docs/zh_cn/api.rst @@ -0,0 +1,154 @@ +mmdet3d.apis +-------------- +.. automodule:: mmdet3d.apis + :members: + +mmdet3d.datasets +-------------- + +datasets +^^^^^^^^^^ +.. automodule:: mmdet3d.datasets + :members: + +transforms +^^^^^^^^^^^^ +.. automodule:: mmdet3d.datasets.transforms + :members: + +mmdet3d.engine +-------------- + +hooks +^^^^^^^^^^ +.. automodule:: mmdet3d.engine.hooks + :members: + +mmdet3d.evaluation +-------------------- + +functional +^^^^^^^^^^^^^^^^^ +.. automodule:: mmdet3d.evaluation.functional + :members: + +metrics +^^^^^^^^^^ +.. automodule:: mmdet3d.evaluation.metrics + :members: + +mmdet3d.models +-------------- + +backbones +^^^^^^^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.backbones + :members: + +data_preprocessors +^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.data_preprocessors + :members: + +decode_heads +^^^^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.decode_heads + :members: + +dense_heads +^^^^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.dense_heads + :members: + +detectors +^^^^^^^^^^ +.. automodule:: mmdet3d.models.detectors + :members: + +layers +^^^^^^^^^^ +.. automodule:: mmdet3d.models.layers + :members: + +losses +^^^^^^^^^^ +.. automodule:: mmdet3d.models.losses + :members: + +middle_encoders +^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.middle_encoders + :members: + +necks +^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.necks + :members: + +roi_heads +^^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.roi_heads + :members: + +segmentors +^^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.segmentors + :members: + +task_modules +^^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.task_modules + :members: + +test_time_augs +^^^^^^^^^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.test_time_augs + :members: + +utils +^^^^^^^^^^ +.. automodule:: mmdet3d.models.utils + :members: + +voxel_encoders +^^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.voxel_encoders + :members: + +mmdet3d.structures +-------------------- + +structures +^^^^^^^^^^^^^^^^^ +.. automodule:: mmdet3d.structures + :members: + +bbox_3d +^^^^^^^^^^ +.. automodule:: mmdet3d.structures.bbox_3d + :members: + +ops +^^^^^^^^^^ +.. automodule:: mmdet3d.structures.ops + :members: + +points +^^^^^^^^^^ +.. automodule:: mmdet3d.structures.points + :members: + +mmdet3d.testing +---------------- +.. automodule:: mmdet3d.testing + :members: + +mmdet3d.visualization +-------------------- +.. automodule:: mmdet3d.visualization + :members: + +mmdet3d.utils +-------------- +.. automodule:: mmdet3d.utils + :members: diff --git a/docs/zh_cn/conf.py b/docs/zh_cn/conf.py new file mode 100755 index 0000000..348059d --- /dev/null +++ b/docs/zh_cn/conf.py @@ -0,0 +1,161 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme +from m2r import MdInclude +from recommonmark.transform import AutoStructify +from sphinx.builders.html import StandaloneHTMLBuilder + +sys.path.insert(0, os.path.abspath('../../')) + +# -- Project information ----------------------------------------------------- + +project = 'MMDetection3D' +copyright = '2020-2023, OpenMMLab' +author = 'MMDetection3D Authors' + +version_file = '../../mmdet3d/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +# The full version, including alpha/beta/rc tags +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'myst_parser', + 'sphinx_markdown_tables', + 'sphinx.ext.autosectionlabel', + 'sphinx_copybutton', +] + +autodoc_mock_imports = [ + 'matplotlib', 'nuscenes', 'PIL', 'pycocotools', 'pyquaternion', + 'terminaltables', 'mmdet3d.version', 'mmdet3d.ops', 'mmcv.ops' +] +autosectionlabel_prefix_document = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The master toctree document. +master_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +# html_theme = 'sphinx_rtd_theme' +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +html_theme_options = { + # 'logo_url': 'https://mmocr.readthedocs.io/en/latest/', + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmdetection3d' + }, + { + 'name': + '上游库', + 'children': [ + { + 'name': 'MMCV', + 'url': 'https://github.com/open-mmlab/mmcv', + 'description': '基础视觉库' + }, + { + 'name': 'MMDetection', + 'url': 'https://github.com/open-mmlab/mmdetection', + 'description': '目标检测工具箱' + }, + ] + }, + ], + # Specify the language of shared menu + 'menu_lang': + 'cn', +} + +language = 'zh_CN' + +master_doc = 'index' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] + +latex_documents = [ + (master_doc, 'mmcv.tex', 'mmcv Documentation', 'MMCV Contributors', + 'manual'), +] + +# set priority when building html +StandaloneHTMLBuilder.supported_image_types = [ + 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg' +] +# Enable ::: for my_st +myst_enable_extensions = ['colon_fence'] +myst_heading_anchors = 3 + +language = 'zh_CN' + + +def builder_inited_handler(app): + subprocess.run(['./stat.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) + app.add_config_value('no_underscore_emphasis', False, 'env') + app.add_config_value('m2r_parse_relative_links', False, 'env') + app.add_config_value('m2r_anonymous_references', False, 'env') + app.add_config_value('m2r_disable_inline_math', False, 'env') + app.add_directive('mdinclude', MdInclude) + app.add_config_value('recommonmark_config', { + 'auto_toc_tree_section': 'Contents', + 'enable_eval_rst': True, + }, True) + app.add_transform(AutoStructify) diff --git a/docs/zh_cn/get_started.md b/docs/zh_cn/get_started.md new file mode 100755 index 0000000..5b3dbf5 --- /dev/null +++ b/docs/zh_cn/get_started.md @@ -0,0 +1,291 @@ +## 开始你的第一步 + +## 依赖 + +在本节中,我们将展示如何使用 PyTorch 准备环境。 + +MMDetection3D 支持在 Linux,Windows(实验性支持),MacOS 上运行,它需要 Python 3.7 以上,CUDA 9.2 以上和 PyTorch 1.6 以上。 + +```{note} +如果您对 PyTorch 有经验并且已经安装了它,您可以直接跳转到[下一小节](#安装流程)。否则,您可以按照下述步骤进行准备。 +``` + +**步骤 0.** 从[官方网站](https://docs.conda.io/en/latest/miniconda.html)下载并安装 Miniconda。 + +**步骤 1.** 创建并激活一个 conda 环境。 + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**步骤 2.** 基于 [PyTorch 官方说明](https://pytorch.org/get-started/locally/)安装 PyTorch,例如: + +在 GPU 平台上: + +```shell +conda install pytorch torchvision -c pytorch +``` + +在 CPU 平台上: + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +## 安装流程 + +我们推荐用户参照我们的最佳实践安装 MMDetection3D。不过,整个过程也是可定制化的,更多信息请参考[自定义安装](#自定义安装)章节。 + +### 最佳实践 + +**步骤 0.** 使用 [MIM](https://github.com/open-mmlab/mim) 安装 [MMEngine](https://github.com/open-mmlab/mmengine),[MMCV](https://github.com/open-mmlab/mmcv) 和 [MMDetection](https://github.com/open-mmlab/mmdetection)。 + +```shell +pip install -U openmim +mim install mmengine +mim install 'mmcv>=2.0.0rc4' +mim install 'mmdet>=3.0.0' +``` + +**注意**:在 MMCV-v2.x 中,`mmcv-full` 改名为 `mmcv`,如果您想安装不包含 CUDA 算子的 `mmcv`,您可以使用 `mim install "mmcv-lite>=2.0.0rc4"` 安装精简版。 + +**步骤 1.** 安装 MMDetection3D。 + +方案 a:如果您开发并直接运行 mmdet3d,从源码安装它: + +```shell +git clone https://github.com/open-mmlab/mmdetection3d.git -b dev-1.x +# "-b dev-1.x" 表示切换到 `dev-1.x` 分支。 +cd mmdetection3d +pip install -v -e . +# "-v" 指详细说明,或更多的输出 +# "-e" 表示在可编辑模式下安装项目,因此对代码所做的任何本地修改都会生效,从而无需重新安装。 +``` + +方案 b:如果您将 mmdet3d 作为依赖或第三方 Python 包使用,使用 MIM 安装: + +```shell +mim install "mmdet3d>=1.1.0rc0" +``` + +注意: + +1. 如果您希望使用 `opencv-python-headless` 而不是 `opencv-python`,您可以在安装 MMCV 之前安装它。 + +2. 一些安装依赖是可选的。简单地运行 `pip install -v -e .` 将会安装最低运行要求的版本。如果想要使用一些可选依赖项,例如 `albumentations` 和 `imagecorruptions`,可以使用 `pip install -r requirements/optional.txt` 进行手动安装,或者在使用 `pip` 时指定所需的附加功能(例如 `pip install -v -e .[optional]`),支持附加功能的有效键值包括 `all`、`tests`、`build` 以及 `optional`。 + + 我们已经支持 `spconv 2.0`。如果用户已经安装 `spconv 2.0`,代码会默认使用 `spconv 2.0`,它会比原生 `mmcv spconv` 使用更少的 GPU 内存。用户可以使用下列的命令来安装 `spconv 2.0`: + + ```shell + pip install cumm-cuxxx + pip install spconv-cuxxx + ``` + + `xxx` 表示环境中的 CUDA 版本。 + + 例如,使用 CUDA 10.2,对应命令是 `pip install cumm-cu102 && pip install spconv-cu102`。 + + 支持的 CUDA 版本包括 10.2,11.1,11.3 和 11.4。用户也可以通过源码编译来安装。更多细节请参考[spconv v2.x](https://github.com/traveller59/spconv)。 + + 我们也支持 `Minkowski Engine` 作为稀疏卷积的后端。如果需要,请参考[安装指南](https://github.com/NVIDIA/MinkowskiEngine#installation) 或者使用 `pip` 来安装: + + ```shell + conda install openblas-devel -c anaconda + pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps --install-option="--blas_include_dirs=/opt/conda/include" --install-option="--blas=openblas" + ``` + + 我们还支持 `Torchsparse` 作为稀疏卷积的后端。如果需要,请参考[安装指南](https://github.com/mit-han-lab/torchsparse#installation) 或者使用 `pip` 来安装: + + ```shell + sudo apt install libsparsehash-dev + pip install --upgrade git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0 + ``` + + 或者通过以下安装绕过sudo权限 + + ```shell + conda install -c bioconda sparsehash + export CPLUS_INCLUDE_PATH=CPLUS_INCLUDE_PATH:${YOUR_CONDA_ENVS_DIR}/include + pip install --upgrade git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0 + ``` + +3. 我们的代码目前不能在只有 CPU 的环境(CUDA 不可用)下编译。 + +### 验证安装 + +为了验证 MMDetection3D 是否安装正确,我们提供了一些示例代码来执行模型推理。 + +**步骤 1.** 我们需要下载配置文件和模型权重文件。 + +```shell +mim download mmdet3d --config pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car --dest . +``` + +下载将需要几秒钟或更长时间,这取决于您的网络环境。完成后,您会在当前文件夹中发现两个文件 `pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py` 和 `hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth`。 + +**步骤 2.** 推理验证。 + +方案 a:如果您从源码安装 MMDetection3D,那么直接运行以下命令进行验证: + +```shell +python demo/pcd_demo.py demo/data/kitti/000008.bin pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth --show +``` + +您会看到一个带有点云的可视化界面,其中包含有在汽车上绘制的检测框。 + +**注意**: + +如果您想输入一个 `.ply` 文件,您可以使用如下函数将它转换成 `.bin` 格式。然后您可以使用转化的 `.bin` 文件来运行样例。请注意在使用此脚本之前,您需要安装 `pandas` 和 `plyfile`。这个函数也可以用于训练 `ply 数据`时作为数据预处理来使用。 + +```python +import numpy as np +import pandas as pd +from plyfile import PlyData + +def convert_ply(input_path, output_path): + plydata = PlyData.read(input_path) # 读取文件 + data = plydata.elements[0].data # 读取数据 + data_pd = pd.DataFrame(data) # 转换成 DataFrame + data_np = np.zeros(data_pd.shape, dtype=np.float) # 初始化数组来存储数据 + property_names = data[0].dtype.names # 读取属性名称 + for i, name in enumerate( + property_names): # 通过属性读取数据 + data_np[:, i] = data_pd[name] + data_np.astype(np.float32).tofile(output_path) +``` + +例如: + +```python +convert_ply('./test.ply', './test.bin') +``` + +如果您有其他格式的点云数据(`.off`,`.obj` 等),您可以使用 `trimesh` 将它们转化成 `.ply`。 + +```python +import trimesh + +def to_ply(input_path, output_path, original_type): + mesh = trimesh.load(input_path, file_type=original_type) # 读取文件 + mesh.export(output_path, file_type='ply') # 转换成 ply +``` + +例如: + +```python +to_ply('./test.obj', './test.ply', 'obj') +``` + +方案 b:如果您使用 MIM 安装 MMDetection3D,那么可以打开您的 Python 解析器,复制并粘贴以下代码: + +```python +from mmdet3d.apis import init_model, inference_detector + +config_file = 'pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py' +checkpoint_file = 'hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth' +model = init_model(config_file, checkpoint_file) +inference_detector(model, 'demo/data/kitti/000008.bin') +``` + +您将会看到一个包含 `Det3DDataSample` 的列表,预测结果在 `pred_instances_3d` 里面,包含有检测框,类别和得分。 + +### 自定义安装 + +#### CUDA 版本 + +在安装 PyTorch 时,您需要指定 CUDA 的版本。如果您不清楚应该选择哪一个,请遵循我们的建议: + +- 对于 Ampere 架构的 NVIDIA GPU,例如 GeForce 30 系列以及 NVIDIA A100,CUDA 11 是必需的。 +- 对于更早的 NVIDIA GPU,CUDA 11 是向后兼容的,但 CUDA 10.2 提供更好的兼容性,并且更轻量。 + +请确保 GPU 驱动版本满足最低的版本需求。更多信息请参考此[表格](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions)。 + +```{note} +如果您遵循我们的最佳实践,您只需要安装 CUDA 运行库,这是因为不需要在本地编译 CUDA 代码。但如果您希望从源码编译 MMCV,或者开发其他 CUDA 算子,那么您需要从 NVIDIA 的[官网](https://developer.nvidia.com/cuda-downloads)安装完整的 CUDA 工具链,并且该版本应该与 PyTorch 的 CUDA 版本相匹配,比如在 `conda install` 指令里指定 cudatoolkit 版本。 +``` + +#### 不通过 MIM 安装 MMEngine + +如果想要使用 pip 而不是 MIM 安装 MMEngine,请参考 [MMEngine 安装指南](https://mmengine.readthedocs.io/zh_CN/latest/get_started/installation.html)。 + +例如,您可以通过以下指令安装 MMEngine: + +```shell +pip install mmengine +``` + +#### 不通过 MIM 安装 MMCV + +MMCV 包含 C++ 和 CUDA 拓展,因此其对 PyTorch 的依赖更复杂。MIM 会自动解决此类依赖关系并使安装更容易。但这不是必需的。 + +如果想要使用 pip 而不是 MIM 安装 MMCV,请参考 [MMCV 安装指南](https://mmcv.readthedocs.io/zh_CN/2.x/get_started/installation.html)。这需要用指定 url 的形式手动指定对应的 PyTorch 和 CUDA 版本。 + +例如,下述指令将会安装基于 PyTorch 1.12.x 和 CUDA 11.6 编译的 MMCV: + +```shell +pip install "mmcv>=2.0.0rc4" -f https://download.openmmlab.com/mmcv/dist/cu116/torch1.12.0/index.html +``` + +#### 在 Google Colab 中安装 + +[Google Colab](https://colab.research.google.com/) 通常已经安装了 PyTorch,因此我们只需要用如下命令安装 MMEngine,MMCV,MMDetection 和 MMDetection3D 即可。 + +**步骤 1.** 使用 [MIM](https://github.com/open-mmlab/mim) 安装 [MMEngine](https://github.com/open-mmlab/mmengine),[MMCV](https://github.com/open-mmlab/mmcv) 和 [MMDetection](https://github.com/open-mmlab/mmdetection)。 + +```shell +!pip3 install openmim +!mim install mmengine +!mim install "mmcv>=2.0.0rc4,<2.1.0" +!mim install "mmdet>=3.0.0,<3.1.0" +``` + +**步骤 2.** 从源码安装 MMDetection3D。 + +```shell +!git clone https://github.com/open-mmlab/mmdetection3d.git -b dev-1.x +%cd mmdetection3d +!pip install -e . +``` + +**步骤 3.** 验证安装是否成功。 + +```python +import mmdet3d +print(mmdet3d.__version__) +# 预期输出:1.1.0rc0 或其它版本号。 +``` + +```{note} +在 Jupyter Notebook 中,感叹号 `!` 用于执行外部命令,而 `%cd` 是一个[魔术命令](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd),用于切换 Python 的工作路径。 +``` + +#### 通过 Docker 使用 MMDetection3D + +我们提供了 [Dockerfile](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docker/Dockerfile) 来构建一个镜像。请确保您的 [docker 版本](https://docs.docker.com/engine/install/) >= 19.03。 + +```shell +# 基于 PyTorch 1.9,CUDA 11.1 构建镜像 +# 如果您想要其他版本,只需要修改 Dockerfile +docker build -t mmdetection3d docker/ +``` + +用以下命令运行 Docker 镜像: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmdetection3d/data mmdetection3d +``` + +### 故障排除 + +如果您在安装过程中遇到一些问题,请先参考 [FAQ](notes/faq.md) 页面。如果没有找到对应的解决方案,您也可以在 GitHub [提一个问题](https://github.com/open-mmlab/mmdetection3d/issues/new/choose)。 + +### 使用多个 MMDetection3D 版本进行开发 + +训练和测试的脚本已经在 `PYTHONPATH` 中进行了修改,以确保脚本使用当前目录中的 MMDetection3D。 + +要使环境中安装默认版本的 MMDetection3D 而不是当前正在使用的,可以删除出现在相关脚本中的代码: + +```shell +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +``` diff --git a/docs/zh_cn/index.rst b/docs/zh_cn/index.rst new file mode 100755 index 0000000..ece2461 --- /dev/null +++ b/docs/zh_cn/index.rst @@ -0,0 +1,56 @@ +欢迎来到 MMDetection3D 文档! +========================================== + +.. toctree:: + :maxdepth: 1 + :caption: 开始你的第一步 + + overview.md + get_started.md + +.. toctree:: + :maxdepth: 2 + :caption: 使用指南 + + user_guides/index.rst + +.. toctree:: + :maxdepth: 2 + :caption: 进阶教程 + + advanced_guides/index.rst + +.. toctree:: + :maxdepth: 1 + :caption: 迁移版本 + + migration.md + +.. toctree:: + :maxdepth: 1 + :caption: 接口文档(英文) + + api.rst + +.. toctree:: + :maxdepth: 1 + :caption: 模型仓库 + + model_zoo.md + +.. toctree:: + :maxdepth: 1 + :caption: 说明 + + notes/index.rst + +.. toctree:: + :caption: 语言切换 + + switch_language.md + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/zh_cn/model_zoo.md b/docs/zh_cn/model_zoo.md new file mode 100755 index 0000000..2a443f3 --- /dev/null +++ b/docs/zh_cn/model_zoo.md @@ -0,0 +1,113 @@ +# 模型库 + +## 通用设置 + +- 使用分布式训练; +- 为了和其他代码库做公平对比,本文展示的是使用 `torch.cuda.max_memory_allocated()` 在 8 个 GPUs 上得到的最大 GPU 显存占用值,需要注意的是,这些显存占用值通常小于 `nvidia-smi` 显示出来的显存占用值; +- 在模型库中所展示的推理时间是包括网络前向传播和后处理所需的总时间,不包括数据加载所需的时间,模型库中所展示的结果均由 [benchmark.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/benchmark.py) 脚本文件在 2000 张图像上所计算的平均时间。 + +## 基准结果 + +### SECOND + +请参考 [SECOND](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/second) 获取更多的细节,我们在 KITTI 和 Waymo 数据集上都给出了相应的基准结果。 + +### PointPillars + +请参考 [PointPillars](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointpillars) 获取更多细节,我们在 KITTI 、nuScenes 、Lyft 、Waymo 数据集上给出了相应的基准结果。 + +### Part-A2 + +请参考 [Part-A2](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/parta2) 获取更多细节。 + +### VoteNet + +请参考 [VoteNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/votenet) 获取更多细节,我们在 ScanNet 和 SUNRGBD 数据集上给出了相应的基准结果。 + +### Dynamic Voxelization + +请参考 [Dynamic Voxelization](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/dynamic_voxelization) 获取更多细节。 + +### MVXNet + +请参考 [MVXNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/mvxnet) 获取更多细节。 + +### RegNetX + +请参考 [RegNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/regnet) 获取更多细节,我们将 pointpillars 的主干网络替换成 RegNetX,并在 nuScenes 和 Lyft 数据集上给出了相应的基准结果。 + +### nuImages + +我们在 [nuImages 数据集](https://www.nuscenes.org/nuimages) 上也提供基准模型,请参考 [nuImages](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/nuimages) 获取更多细节,我们在该数据集上提供 Mask R-CNN , Cascade Mask R-CNN 和 HTC 的结果。 + +### H3DNet + +请参考 [H3DNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/h3dnet) 获取更多细节。 + +### 3DSSD + +请参考 [3DSSD](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/3dssd) 获取更多细节。 + +### CenterPoint + +请参考 [CenterPoint](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/centerpoint) 获取更多细节。 + +### SSN + +请参考 [SSN](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/ssn) 获取更多细节,我们将 pointpillars 中的检测头替换成 SSN 模型中所使用的 ‘shape-aware grouping heads’,并在 nuScenes 和 Lyft 数据集上给出了相应的基准结果。 + +### ImVoteNet + +请参考 [ImVoteNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/imvotenet) 获取更多细节,我们在 SUNRGBD 数据集上给出了相应的结果。 + +### FCOS3D + +请参考 [FCOS3D](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/fcos3d) 获取更多细节,我们在 nuScenes 数据集上给出了相应的结果。 + +### PointNet++ + +请参考 [PointNet++](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointnet2) 获取更多细节,我们在 ScanNet 和 S3DIS 数据集上给出了相应的结果。 + +### Group-Free-3D + +请参考 [Group-Free-3D](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/groupfree3d) 获取更多细节,我们在 ScanNet 数据集上给出了相应的结果。 + +### ImVoxelNet + +请参考 [ImVoxelNet](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/imvoxelnet) 获取更多细节,我们在 KITTI 数据集上给出了相应的结果。 + +### PAConv + +请参考 [PAConv](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/paconv) 获取更多细节,我们在 S3DIS 数据集上给出了相应的结果。 + +### DGCNN + +请参考 [DGCNN](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/dgcnn) 获取更多细节,我们在 S3DIS 数据集上给出了相应的结果。 + +### SMOKE + +请参考 [SMOKE](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/smoke) 获取更多细节,我们在 KITTI 数据集上给出了相应的结果。 + +### PGD + +请参考 [PGD](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pgd) 获取更多细节,我们在 KITTI 和 nuScenes 数据集上给出了相应的结果。 + +### PointRCNN + +请参考 [PointRCNN](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/point_rcnn) 获取更多细节,我们在 KITTI 数据集上给出了相应的结果。 + +### MonoFlex + +请参考 [MonoFlex](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/monoflex) 获取更多细节,我们在 KITTI 数据集上给出了相应的结果。 + +### SA-SSD + +请参考 [SA-SSD](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/sassd) 获取更多的细节,我们在 KITTI 数据集上给出了相应的基准结果。 + +### FCAF3D + +请参考 [FCAF3D](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/fcaf3d) 获取更多的细节,我们在 ScanNet, S3DIS 和 SUN RGB-D 数据集上给出了相应的基准结果。 + +### Mixed Precision (FP16) Training + +细节请参考 [Mixed Precision (FP16) Training 在 PointPillars 训练的样例](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointpillars/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d.py)。 diff --git a/docs/zh_cn/notes/benchmarks.md b/docs/zh_cn/notes/benchmarks.md new file mode 100755 index 0000000..b3b5c53 --- /dev/null +++ b/docs/zh_cn/notes/benchmarks.md @@ -0,0 +1,285 @@ +# 基准测试 + +这里我们对 MMDetection3D 和其他开源 3D 目标检测代码库中模型的训练速度和测试速度进行了基准测试。 + +## 配置 + +- 硬件:8 NVIDIA Tesla V100 (32G) GPUs, Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz +- 软件:Python 3.7, CUDA 10.1, cuDNN 7.6.5, PyTorch 1.3, numba 0.48.0. +- 模型:由于不同代码库所实现的模型种类有所不同,在基准测试中我们选择了 SECOND、PointPillars、Part-A2 和 VoteNet 几种模型,分别与其他代码库中的相应模型实现进行了对比。 +- 度量方法:我们使用整个训练过程中的平均吞吐量作为度量方法,并跳过每个 epoch 的前 50 次迭代以消除训练预热的影响。 + +## 主要结果 + +对于模型的训练速度(样本/秒),我们将 MMDetection3D 与其他实现了相同模型的代码库进行了对比。结果如下所示,表格内的数字越大,代表模型的训练速度越快。代码库中不支持的模型使用 `×` 进行标识。 + +| 模型 | MMDetection3D | OpenPCDet | votenet | Det3D | +| :-----------------: | :-----------: | :-------: | :-----: | :---: | +| VoteNet | 358 | × | 77 | × | +| PointPillars-car | 141 | × | × | 140 | +| PointPillars-3class | 107 | 44 | × | × | +| SECOND | 40 | 30 | × | × | +| Part-A2 | 17 | 14 | × | × | + +## 测试细节 + +### 为了计算速度所做的修改 + +- __MMDetection3D__:我们尝试使用与其他代码库中尽可能相同的配置,具体配置细节见 [基准测试配置](https://github.com/open-mmlab/MMDetection3D/blob/master/configs/benchmark)。 + +- __Det3D__:为了与 Det3D 进行比较,我们使用了 commit [519251e](https://github.com/poodarchu/Det3D/tree/519251e72a5c1fdd58972eabeac67808676b9bb7) 所对应的代码版本。 + +- __OpenPCDet__:为了与 OpenPCDet 进行比较,我们使用了 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 所对应的代码版本。 + + 为了计算训练速度,我们在 `./tools/train_utils/train_utils.py` 文件中添加了用于记录运行时间的代码。我们对每个 epoch 的训练速度进行计算,并报告所有 epoch 的平均速度。 + +
    + + (为了使用相同方法进行测试所做的具体修改 - 点击展开) + + + ```diff + diff --git a/tools/train_utils/train_utils.py b/tools/train_utils/train_utils.py + index 91f21dd..021359d 100644 + --- a/tools/train_utils/train_utils.py + +++ b/tools/train_utils/train_utils.py + @@ -2,6 +2,7 @@ import torch + import os + import glob + import tqdm + +import datetime + from torch.nn.utils import clip_grad_norm_ + + + @@ -13,7 +14,10 @@ def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, ac + if rank == 0: + pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True) + + + start_time = None + for cur_it in range(total_it_each_epoch): + + if cur_it > 49 and start_time is None: + + start_time = datetime.datetime.now() + try: + batch = next(dataloader_iter) + except StopIteration: + @@ -55,9 +59,11 @@ def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, ac + tb_log.add_scalar('learning_rate', cur_lr, accumulated_iter) + for key, val in tb_dict.items(): + tb_log.add_scalar('train_' + key, val, accumulated_iter) + + endtime = datetime.datetime.now() + + speed = (endtime - start_time).seconds / (total_it_each_epoch - 50) + if rank == 0: + pbar.close() + - return accumulated_iter + + return accumulated_iter, speed + + + def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg, + @@ -65,6 +71,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ + lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50, + merge_all_iters_to_one_epoch=False): + accumulated_iter = start_iter + + speeds = [] + with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar: + total_it_each_epoch = len(train_loader) + if merge_all_iters_to_one_epoch: + @@ -82,7 +89,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ + cur_scheduler = lr_warmup_scheduler + else: + cur_scheduler = lr_scheduler + - accumulated_iter = train_one_epoch( + + accumulated_iter, speed = train_one_epoch( + model, optimizer, train_loader, model_func, + lr_scheduler=cur_scheduler, + accumulated_iter=accumulated_iter, optim_cfg=optim_cfg, + @@ -91,7 +98,7 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ + total_it_each_epoch=total_it_each_epoch, + dataloader_iter=dataloader_iter + ) + - + + speeds.append(speed) + # save trained model + trained_epoch = cur_epoch + 1 + if trained_epoch % ckpt_save_interval == 0 and rank == 0: + @@ -107,6 +114,8 @@ def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_ + save_checkpoint( + checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name, + ) + + print(speed) + + print(f'*******{sum(speeds) / len(speeds)}******') + + + def model_state_to_cpu(model_state): + ``` + +
    + +### VoteNet + +- __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令: + + ```bash + ./tools/dist_train.sh configs/votenet/votenet_8xb16_sunrgbd-3d.py 8 --no-validate + ``` + +- __votenet__:在 commit [2f6d6d3](https://github.com/facebookresearch/votenet/tree/2f6d6d36ff98d96901182e935afe48ccee82d566) 版本下,执行如下命令: + + ```bash + python train.py --dataset sunrgbd --batch_size 16 + ``` + + 然后执行如下命令,对测试速度进行评估: + + ```bash + python eval.py --dataset sunrgbd --checkpoint_path log_sunrgbd/checkpoint.tar --batch_size 1 --dump_dir eval_sunrgbd --cluster_sampling seed_fps --use_3d_nms --use_cls_nms --per_class_proposal + ``` + + 注意,为了计算推理速度,我们对 `eval.py` 进行了修改。 + +
    + + (为了对相同模型进行测试所做的具体修改 - 点击展开) + + + ```diff + diff --git a/eval.py b/eval.py + index c0b2886..04921e9 100644 + --- a/eval.py + +++ b/eval.py + @@ -10,6 +10,7 @@ import os + import sys + import numpy as np + from datetime import datetime + +import time + import argparse + import importlib + import torch + @@ -28,7 +29,7 @@ parser.add_argument('--checkpoint_path', default=None, help='Model checkpoint pa + parser.add_argument('--dump_dir', default=None, help='Dump dir to save sample outputs [default: None]') + parser.add_argument('--num_point', type=int, default=20000, help='Point Number [default: 20000]') + parser.add_argument('--num_target', type=int, default=256, help='Point Number [default: 256]') + -parser.add_argument('--batch_size', type=int, default=8, help='Batch Size during training [default: 8]') + +parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 8]') + parser.add_argument('--vote_factor', type=int, default=1, help='Number of votes generated from each seed [default: 1]') + parser.add_argument('--cluster_sampling', default='vote_fps', help='Sampling strategy for vote clusters: vote_fps, seed_fps, random [default: vote_fps]') + parser.add_argument('--ap_iou_thresholds', default='0.25,0.5', help='A list of AP IoU thresholds [default: 0.25,0.5]') + @@ -132,6 +133,7 @@ CONFIG_DICT = {'remove_empty_box': (not FLAGS.faster_eval), 'use_3d_nms': FLAGS. + # ------------------------------------------------------------------------- GLOBAL CONFIG END + + def evaluate_one_epoch(): + + time_list = list() + stat_dict = {} + ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) \ + for iou_thresh in AP_IOU_THRESHOLDS] + @@ -144,6 +146,8 @@ def evaluate_one_epoch(): + + # Forward pass + inputs = {'point_clouds': batch_data_label['point_clouds']} + + torch.cuda.synchronize() + + start_time = time.perf_counter() + with torch.no_grad(): + end_points = net(inputs) + + @@ -161,6 +165,12 @@ def evaluate_one_epoch(): + + batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT) + batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT) + + torch.cuda.synchronize() + + elapsed = time.perf_counter() - start_time + + time_list.append(elapsed) + + + + if len(time_list==200): + + print("average inference time: %4f"%(sum(time_list[5:])/len(time_list[5:]))) + for ap_calculator in ap_calculator_list: + ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls) + + ``` + +### PointPillars-car + +- __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令: + + ```bash + ./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py 8 --no-validate + ``` + +- __Det3D__:在 commit [519251e](https://github.com/poodarchu/Det3D/tree/519251e72a5c1fdd58972eabeac67808676b9bb7) 版本下,使用 `kitti_point_pillars_mghead_syncbn.py` 并执行如下命令: + + ```bash + ./tools/scripts/train.sh --launcher=slurm --gpus=8 + ``` + + 注意,为了训练 PointPillars,我们对 `train.sh` 进行了修改。 + +
    + + (为了对相同模型进行测试所做的具体修改 - 点击展开) + + + ```diff + diff --git a/tools/scripts/train.sh b/tools/scripts/train.sh + index 3a93f95..461e0ea 100755 + --- a/tools/scripts/train.sh + +++ b/tools/scripts/train.sh + @@ -16,9 +16,9 @@ then + fi + + # Voxelnet + -python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py examples/second/configs/ kitti_car_vfev3_spmiddlefhd_rpn1_mghead_syncbn.py --work_dir=$SECOND_WORK_DIR + +# python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py examples/second/configs/ kitti_car_vfev3_spmiddlefhd_rpn1_mghead_syncbn.py --work_dir=$SECOND_WORK_DIR + # python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py examples/cbgs/configs/ nusc_all_vfev3_spmiddleresnetfhd_rpn2_mghead_syncbn.py --work_dir=$NUSC_CBGS_WORK_DIR + # python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py examples/second/configs/ lyft_all_vfev3_spmiddleresnetfhd_rpn2_mghead_syncbn.py --work_dir=$LYFT_CBGS_WORK_DIR + + # PointPillars + -# python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py ./examples/point_pillars/configs/ original_pp_mghead_syncbn_kitti.py --work_dir=$PP_WORK_DIR + +python -m torch.distributed.launch --nproc_per_node=8 ./tools/train.py ./examples/point_pillars/configs/ kitti_point_pillars_mghead_syncbn.py + ``` + +
    + +### PointPillars-3class + +- __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令: + + ```bash + ./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_4x8_80e_pcdet_kitti-3d-3class.py 8 --no-validate + ``` + +- __OpenPCDet__:在 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 版本下,执行如下命令: + + ```bash + cd tools + sh scripts/slurm_train.sh ${PARTITION} ${JOB_NAME} 8 --cfg_file ./cfgs/kitti_models/pointpillar.yaml --batch_size 32 --workers 32 --epochs 80 + ``` + +### SECOND + +基准测试中的 SECOND 指在 [second.Pytorch](https://github.com/traveller59/second.pytorch) 首次被实现的 [SECONDv1.5](https://github.com/traveller59/second.pytorch/blob/master/second/configs/all.fhd.config)。Det3D 实现的 SECOND 中,使用了自己实现的 Multi-Group Head,因此无法将它的速度与其他代码库进行对比。 + +- __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令: + + ```bash + ./tools/dist_train.sh configs/benchmark/hv_second_secfpn_4x8_80e_pcdet_kitti-3d-3class.py 8 --no-validate + ``` + +- __OpenPCDet__:在 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 版本下,执行如下命令: + + ```bash + cd tools + sh ./scripts/slurm_train.sh ${PARTITION} ${JOB_NAME} 8 --cfg_file ./cfgs/kitti_models/second.yaml --batch_size 32 --workers 32 --epochs 80 + ``` + +### Part-A2 + +- __MMDetection3D__:在 v0.1.0 版本下, 执行如下命令: + + ```bash + ./tools/dist_train.sh configs/benchmark/hv_PartA2_secfpn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py 8 --no-validate + ``` + +- __OpenPCDet__:在 commit [b32fbddb](https://github.com/open-mmlab/OpenPCDet/tree/b32fbddbe06183507bad433ed99b407cbc2175c2) 版本下,执行如下命令以进行模型训练: + + ```bash + cd tools + sh ./scripts/slurm_train.sh ${PARTITION} ${JOB_NAME} 8 --cfg_file ./cfgs/kitti_models/PartA2.yaml --batch_size 32 --workers 32 --epochs 80 + ``` diff --git a/docs/zh_cn/notes/changelog.md b/docs/zh_cn/notes/changelog.md new file mode 100755 index 0000000..258cba0 --- /dev/null +++ b/docs/zh_cn/notes/changelog.md @@ -0,0 +1 @@ +# v1.1 变更日志 diff --git a/docs/zh_cn/notes/changelog_v1.0.x.md b/docs/zh_cn/notes/changelog_v1.0.x.md new file mode 100755 index 0000000..d7916ef --- /dev/null +++ b/docs/zh_cn/notes/changelog_v1.0.x.md @@ -0,0 +1 @@ +# v1.0.x 变更日志 diff --git a/docs/zh_cn/notes/compatibility.md b/docs/zh_cn/notes/compatibility.md new file mode 100755 index 0000000..97144d1 --- /dev/null +++ b/docs/zh_cn/notes/compatibility.md @@ -0,0 +1 @@ +# 兼容性 diff --git a/docs/zh_cn/notes/faq.md b/docs/zh_cn/notes/faq.md new file mode 100755 index 0000000..6b5a989 --- /dev/null +++ b/docs/zh_cn/notes/faq.md @@ -0,0 +1,58 @@ +# 常见问题解答 + +我们列出了一些用户和开发者在开发过程中会遇到的常见问题以及对应的解决方案,如果您发现了任何频繁出现的问题,请随时扩充本列表,非常欢迎您提出的任何解决方案。如果您在环境配置、模型训练等工作中遇到任何的问题,请使用[问题模板](https://github.com/open-mmlab/mmdetection3d/blob/master/.github/ISSUE_TEMPLATE/error-report.md)来创建相应的 issue,并将所需的所有信息填入到问题模板中,我们会尽快解决您的问题。 + +## MMEngine/MMCV/MMDet/MMDet3D 安装 + +- 跟 MMEngine, MMCV, MMDetection 和 MMDetection3D 相关的编译问题; "ConvWS is already registered in conv layer"; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." + +- MMDetection3D 需要的 MMEngine, MMCV 和 MMDetection 的版本列在了下面。请安装正确版本的 MMEngine、MMCV 和 MMDetection 以避免相关的安装问题。 + + | MMDetection3D 版本 | MMEngine 版本 | MMCV 版本 | MMDetection 版本 | + | ------------------ | :----------------------: | :---------------------: | :----------------------: | + | dev-1.x | mmengine>=0.7.1, \<1.0.0 | mmcv>=2.0.0rc4, \<2.1.0 | mmdet>=3.0.0, \<3.1.0 | + | main | mmengine>=0.7.1, \<1.0.0 | mmcv>=2.0.0rc4, \<2.1.0 | mmdet>=3.0.0, \<3.1.0 | + | v1.1.0rc3 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc3, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | + | v1.1.0rc2 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc3, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | + | v1.1.0rc1 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | + | v1.1.0rc0 | mmengine>=0.1.0, \<1.0.0 | mmcv>=2.0.0rc0, \<2.1.0 | mmdet>=3.0.0rc0, \<3.1.0 | + + **注意**:如果你想安装 mmdet3d-v1.0.0rcx,可以在[此处](https://mmdetection3d.readthedocs.io/en/latest/faq.html#mmcv-mmdet-mmdet3d-installation)找到 MMDetection,MMSegmentation 和 MMCV 的兼容版本。请选择正确版本的 MMCV、MMDetection 和 MMSegmentation 以避免安装问题。 + +- 如果您在 `import open3d` 时遇到下面的问题: + + `OSError: /lib/x86_64-linux-gnu/libm.so.6: version 'GLIBC_2.27' not found` + + 请将 open3d 的版本降级至 0.9.0.0,因为最新版 open3d 需要 'GLIBC_2.27' 文件的支持, Ubuntu 16.04 系统中缺失该文件,且该文件仅存在于 Ubuntu 18.04 及之后的系统中。 + +- 如果您在 `import pycocotools` 时遇到版本错误的问题,这是由于 nuscenes-devkit 需要安装 pycocotools,然而 mmdet 依赖于 mmpycocotools,当前的解决方案如下所示,我们将会在之后全面支持 pycocotools : + + ```shell + pip uninstall pycocotools mmpycocotools + pip install mmpycocotools + ``` + + **注意**: 我们已经在 0.13.0 及之后的版本中全面支持 pycocotools。 + +- 如果您在导入 pycocotools 相关包时遇到下面的问题: + + `ValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject` + + 请将 pycocotools 的版本降级至 2.0.1,这是由于最新版本的 pycocotools 与 numpy \< 1.20.0 不兼容。或者通过下面的方式从源码进行编译来安装最新版本的 pycocotools : + + `pip install -e "git+https://github.com/cocodataset/cocoapi#egg=pycocotools&subdirectory=PythonAPI"` + + 或者 + + `pip install -e "git+https://github.com/ppwwyyxx/cocoapi#egg=pycocotools&subdirectory=PythonAPI"` + +- 如果您使用 cuda-9.0 的环境并遇到关于 numba 的错误, 您应该检查下 numba 的版本。在 cuda-9.0 环境中,高版本的 numba 是不支持的,我们建议安装 numba==0.53.0. + +## 如何标注点云? + +MMDetection3D 不支持点云标注。我们提供一些开源的标注工具供参考: + +- [SUSTechPOINTS](https://github.com/naurril/SUSTechPOINTS) +- [LATTE](https://github.com/bernwang/latte) + +此外,我们改进了 [LATTE](https://github.com/bernwang/latte) 以便更方便的标注。更多的细节请参考[这里](https://arxiv.org/abs/2011.10174)。 diff --git a/docs/zh_cn/notes/index.rst b/docs/zh_cn/notes/index.rst new file mode 100755 index 0000000..609f0e0 --- /dev/null +++ b/docs/zh_cn/notes/index.rst @@ -0,0 +1,8 @@ +.. toctree:: + :maxdepth: 3 + + benchmarks.md + changelog_v1.0.x.md + changelog.md + compatibility.md + faq.md diff --git a/docs/zh_cn/stat.py b/docs/zh_cn/stat.py new file mode 100755 index 0000000..b5f10a8 --- /dev/null +++ b/docs/zh_cn/stat.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +import functools as func +import glob +import re +from os import path as osp + +import numpy as np + +url_prefix = 'https://github.com/open-mmlab/mmdetection3d/blob/master/' + +files = sorted(glob.glob('../configs/*/README.md')) + +stats = [] +titles = [] +num_ckpts = 0 + +for f in files: + url = osp.dirname(f.replace('../', url_prefix)) + + with open(f, 'r') as content_file: + content = content_file.read() + + title = content.split('\n')[0].replace('#', '').strip() + ckpts = set(x.lower().strip() + for x in re.findall(r'https?://download.*\.pth', content) + if 'mmdetection3d' in x) + if len(ckpts) == 0: + continue + + _papertype = [x for x in re.findall(r'', content)] + assert len(_papertype) > 0 + papertype = _papertype[0] + + paper = set([(papertype, title)]) + + titles.append(title) + num_ckpts += len(ckpts) + statsmsg = f""" +\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) +""" + stats.append((paper, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) +msglist = '\n'.join(x for _, _, x in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +\n## Model Zoo Statistics + +* Number of papers: {len(set(titles))} +{countstr} + +* Number of checkpoints: {num_ckpts} +{msglist} +""" + +with open('model_zoo.md', 'a') as f: + f.write(modelzoo) diff --git a/docs/zh_cn/switch_language.md b/docs/zh_cn/switch_language.md new file mode 100755 index 0000000..d33d080 --- /dev/null +++ b/docs/zh_cn/switch_language.md @@ -0,0 +1,3 @@ +## English + +## 简体中文 diff --git a/docs/zh_cn/user_guides/2_new_data_model.md b/docs/zh_cn/user_guides/2_new_data_model.md new file mode 100755 index 0000000..4752c47 --- /dev/null +++ b/docs/zh_cn/user_guides/2_new_data_model.md @@ -0,0 +1,102 @@ +# 2: 在自定义数据集上进行训练 + +本文将主要介绍如何使用自定义数据集来进行模型的训练和测试,以 Waymo 数据集作为示例来说明整个流程。 + +基本步骤如下所示: + +1. 准备自定义数据集; +2. 准备配置文件; +3. 在自定义数据集上进行模型的训练、测试和推理。 + +## 准备自定义数据集 + +在 MMDetection3D 中有三种方式来自定义一个新的数据集: + +1. 将新数据集的数据格式重新组织成已支持的数据集格式; +2. 将新数据集的数据格式重新组织成已支持的一种中间格式; +3. 从头开始创建一个新的数据集。 + +由于前两种方式比第三种方式更加容易,我们更加建议采用前两种方式来自定义数据集。 + +在本文中,我们给出示例将数据转换成 KITTI 数据集的数据格式,你可以参考此处将你的数据集重新组织成 KITTI 格式。关于标准格式的数据集,你可以参考[自定义数据集文档](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docs/zh_cn/advanced_guides/customize_dataset.md)。 + +**注意**:考虑到 Waymo 数据集的格式与现有的其他数据集的格式的差别较大,因此本文以该数据集为例来讲解如何自定义数据集,从而方便理解数据集自定义的过程。若需要创建的新数据集与现有的数据集的组织格式较为相似,如 Lyft 数据集和 nuScenes 数据集,采用对数据集的中间格式进行转换的方式(第二种方式)相比于采用对数据格式进行转换的方式(第一种方式)会更加简单易行。 + +### KITTI 数据集格式 + +应用于 3D 目标检测的 KITTI 原始数据集的组织方式通常如下所示,其中 `ImageSets` 包含数据集划分文件,用以划分训练集/验证集/测试集,`calib` 包含对于每个数据样本的标定信息,`image_2` 和 `velodyne` 分别包含图像数据和点云数据,`label_2` 包含与 3D 目标检测相关的标注文件。 + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── kitti +│ │ ├── ImageSets +│ │ ├── testing +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── velodyne +│ │ ├── training +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── label_2 +│ │ │ ├── velodyne +``` + +KITTI 官方提供的目标检测开发[工具包](https://s3.eu-central-1.amazonaws.com/avg-kitti/devkit_object.zip)详细描述了 KITTI 数据集的标注格式,例如,KITTI 标注格式包含了以下的标注信息: + +``` +# 值 名称 描述 +---------------------------------------------------------------------------- + 1 类型 描述检测目标的类型:'Car','Van','Truck', + 'Pedestrian','Person_sitting','Cyclist','Tram', + 'Misc' 或 'DontCare' + 1 截断程度  从 0(非截断)到 1(截断)的浮点数,其中截断指的是离开检测图像边界的检测目标 + 1 遮挡程度  用来表示遮挡状态的四种整数(0,1,2,3): + 0 = 可见,1 = 部分遮挡 + 2 = 大面积遮挡,3 = 未知 + 1 观测角 观测目标的角度,取值范围为 [-pi..pi] + 4 标注框 检测目标在图像中的二维标注框(以0为初始下标):包括每个检测目标的左上角和右下角的坐标 + 3 维度  检测目标的三维维度:高度、宽度、长度(以米为单位) + 3 位置  相机坐标系下的三维位置 x,y,z(以米为单位) + 1 y 旋转  相机坐标系下检测目标绕着Y轴的旋转角,取值范围为 [-pi..pi] + 1 得分  仅在计算结果时使用,检测中表示置信度的浮点数,用于生成 p/r 曲线,在p/r 图中,越高的曲线表示结果越好。 +``` + +假定我们使用 Waymo 数据集。 + +在下载好数据集后,我们需要实现一个函数用来将输入数据和标注文件转换成 KITTI 风格。然后我们可以通过继承 `KittiDataset` 实现 `WaymoDataset`,用来加载数据以及训练模型,通过继承 `KittiMetric` 实现 `WaymoMetric` 来做模型的评估。 + +具体来说,首先使用[数据转换器](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/tools/dataset_converters/waymo_converter.py)将 Waymo 数据集转换成 KITTI 数据集的格式,并定义 [Waymo 类](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/datasets/waymo_dataset.py)对转换的数据进行处理。此外需要添加 waymo [评估类](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/evaluation/metrics/waymo_metric.py)来评估结果。因为我们将 Waymo 原始数据集进行预处理并重新组织成 KITTI 数据集的格式,因此可以比较容易通过继承 KittiDataset 类来实现 WaymoDataset 类。需要注意的是,由于 Waymo 数据集有相应的官方评估方法,我们需要进一步实现新的 Waymo 评估方法,更多关于评估方法参考[评估文档](https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/metric_and_evaluator.md)。最后,用户可以成功地转换数据并使用 `WaymoDataset` 训练以及 `WaymoMetric` 评估模型。 + +更多关于 Waymo 数据集预处理的中间结果的细节,请参照对应的[说明文档](https://mmdetection3d.readthedocs.io/zh_CN/latest/datasets/waymo_det.html)。 + +## 准备配置文件 + +第二步是准备配置文件来帮助数据集的读取和使用,另外,为了在 3D 检测中获得不错的性能,调整超参数通常是必要的。 + +假设我们想要使用 PointPillars 模型在 Waymo 数据集上实现三类的 3D 目标检测:vehicle、cyclist、pedestrian,参照 KITTI 数据集[配置文件](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/datasets/kitti-3d-3class.py)、模型[配置文件](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/models/pointpillars_hv_secfpn_kitti.py)和[整体配置文件](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py),我们需要准备[数据集配置文件](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/datasets/waymoD5-3d-3class.py)、[模型配置文件](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/_base_/models/pointpillars_hv_secfpn_waymo.py),并将这两种文件进行结合得到[整体配置文件](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py)。 + +## 训练一个新的模型 + +为了使用一个新的配置文件来训练模型,可以通过下面的命令来实现: + +```shell +python tools/train.py configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py +``` + +更多的使用细节,请参考[案例 1](https://mmdetection3d.readthedocs.io/zh_CN/latest/1_exist_data_model.html)。 + +## 测试和推理 + +为了测试已经训练好的模型的性能,可以通过下面的命令来实现: + +```shell +python tools/test.py configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py work_dirs/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class/latest.pth +``` + +**注意**:为了使用 Waymo 数据集的评估方法,需要参考[说明文档](https://mmdetection3d.readthedocs.io/zh_CN/latest/datasets/waymo_det.html)并按照官方指导来准备与评估相关联的文件。 + +更多有关测试和推理的使用细节,请参考[案例 1](https://mmdetection3d.readthedocs.io/zh_CN/latest/1_exist_data_model.html) 。 diff --git a/docs/zh_cn/user_guides/backends_support.md b/docs/zh_cn/user_guides/backends_support.md new file mode 100755 index 0000000..80c535d --- /dev/null +++ b/docs/zh_cn/user_guides/backends_support.md @@ -0,0 +1,154 @@ +# 后端支持 + +我们支持不同的文件客户端后端:磁盘、Ceph 和 LMDB 等。下面是修改配置使之从 Ceph 加载和保存数据的示例。 + +## 从 Ceph 读取数据和标注文件 + +我们支持从 Ceph 加载数据和生成的标注信息文件(pkl 和 json): + +```python +# set file client backends as Ceph +backend_args = dict( + backend='petrel', + path_mapping=dict({ + './data/nuscenes/': + 's3://openmmlab/datasets/detection3d/nuscenes/', # replace the path with your data path on Ceph + 'data/nuscenes/': + 's3://openmmlab/datasets/detection3d/nuscenes/' # replace the path with your data path on Ceph + })) + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + sample_groups=dict(Car=15), + classes=class_names, + # set file client for points loader to load training data + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + # set file client for data base sampler to load db info file + backend_args=backend_args) + +train_pipeline = [ + # set file client for loading training data + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, backend_args=backend_args), + # set file client for loading training data annotations + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, backend_args=backend_args), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[0.25, 0.25, 0.25], + global_rot_range=[0.0, 0.0], + rot_range=[-0.15707963267, 0.15707963267]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + # set file client for loading validation/testing data + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4, backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] + +data = dict( + # set file client for loading training info files (.pkl) + train=dict( + type='RepeatDataset', + times=2, + dataset=dict(pipeline=train_pipeline, classes=class_names, backend_args=backend_args)), + # set file client for loading validation info files (.pkl) + val=dict(pipeline=test_pipeline, classes=class_names,backend_args=backend_args), + # set file client for loading testing info files (.pkl) + test=dict(pipeline=test_pipeline, classes=class_names, backend_args=backend_args)) +``` + +## 从 Ceph 读取预训练模型 + +```python +model = dict( + pts_backbone=dict( + _delete_=True, + type='NoStemRegNet', + arch='regnetx_1.6gf', + init_cfg=dict( + type='Pretrained', checkpoint='s3://openmmlab/checkpoints/mmdetection3d/regnetx_1.6gf'), # replace the path with your pretrained model path on Ceph + ... +``` + +## 从 Ceph 读取模型权重文件 + +```python +# replace the path with your checkpoint path on Ceph +load_from = 's3://openmmlab/checkpoints/mmdetection3d/v0.1.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20200620_230614-77663cd6.pth' +resume_from = None +workflow = [('train', 1)] +``` + +## 保存模型权重文件至 Ceph + +```python +# checkpoint saving +# replace the path with your checkpoint saving path on Ceph +checkpoint_config = dict(interval=1, max_keep_ckpts=2, out_dir='s3://openmmlab/mmdetection3d') +``` + +## EvalHook 保存最优模型权重文件至 Ceph + +```python +# replace the path with your checkpoint saving path on Ceph +evaluation = dict(interval=1, save_best='bbox', out_dir='s3://openmmlab/mmdetection3d') +``` + +## 训练日志保存至 Ceph + +训练后的训练日志会备份到指定的 Ceph 路径。 + +```python +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', out_dir='s3://openmmlab/mmdetection3d'), + ]) +``` + +您还可以通过设置 `keep_local = False` 备份到指定的 Ceph 路径后删除本地训练日志。 + +```python +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', out_dir='s3://openmmlab/mmdetection3d', keep_local=False), + ]) +``` diff --git a/docs/zh_cn/user_guides/config.md b/docs/zh_cn/user_guides/config.md new file mode 100755 index 0000000..f971a10 --- /dev/null +++ b/docs/zh_cn/user_guides/config.md @@ -0,0 +1,558 @@ +# 学习配置文件 + +MMDetection3D 和其他 OpenMMLab 仓库使用 [MMEngine 的配置文件系统](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/config.html)。它具有模块化和继承性设计,以便于进行各种实验。 + +## 配置文件的内容 + +MMDetection3D 采用模块化设计,所有功能的模块可以通过配置文件进行配置。以 PointPillars 为例,我们将根据不同的功能模块介绍配置文件的各个字段。 + +### 模型配置 + +在 MMDetection3D 的配置中,我们使用 `model` 字段来配置检测算法的组件。除了 `voxel_encoder`,`backbone` 等神经网络组件外,还需要 `data_preprocessor`,`train_cfg` 和 `test_cfg`。`data_preprocessor` 负责对数据加载器(dataloader)输出的每一批数据进行预处理。模型配置中的 `train_cfg` 和 `test_cfg` 用于设置训练和测试组件的超参数。 + +```python +model = dict( + type='VoxelNet', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_layer=dict( + max_num_points=32, + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1], + voxel_size=[0.16, 0.16, 4], + max_voxels=(16000, 40000))), + voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=4, + feat_channels=[64], + with_distance=False, + voxel_size=[0.16, 0.16, 4], + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1]), + middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]), + backbone=dict( + type='SECOND', + in_channels=64, + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + assign_per_class=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[0, -39.68, -0.6, 69.12, 39.68, -0.6], + [0, -39.68, -0.6, 69.12, 39.68, -0.6], + [0, -39.68, -1.78, 69.12, 39.68, -1.78]], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', + beta=0.1111111111111111, + loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=False, + loss_weight=0.2)), + train_cfg=dict( + assigner=[ + dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1) + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) +``` + +### 数据集和评测器配置 + +在使用[执行器(Runner)](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/runner.html)进行训练、测试和验证时,我们需要配置[数据加载器](https://pytorch.org/docs/stable/data.html?highlight=data%20loader#torch.utils.data.DataLoader)。构建数据加载器需要设置数据集和数据处理流程。由于这部分的配置较为复杂,我们使用中间变量来简化数据加载器配置的编写。 + +```python +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] +input_modality = dict(use_lidar=True, use_camera=False) +metainfo = dict(classes=class_names) + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=15, Cyclist=15), + points_loader=dict( + type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4)) + +train_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler, use_ground_plane=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_labels_3d', 'gt_bboxes_3d']) +] +test_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +eval_pipeline = [ + dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=6, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='kitti_infos_train.pkl', + data_prefix=dict(pts='training/velodyne_reduced'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + box_type_3d='LiDAR'))) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne_reduced'), + ann_file='kitti_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +``` + +[评测器](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/evaluation.html)用于计算训练模型在验证和测试数据集上的指标。评测器的配置由一个或一组评价指标配置组成: + +```python +val_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_val.pkl', + metric='bbox') +test_evaluator = val_evaluator +``` + +由于测试数据集没有标注文件,因此 MMDetection3D 中的 test_dataloader 和 test_evaluator 配置通常等于 val。如果您想要保存在测试数据集上的检测结果,则可以像这样编写配置: + +```python +# 在测试集上推理, +# 并将检测结果转换格式以用于提交结果 +test_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='testing/velodyne_reduced'), + ann_file='kitti_infos_test.pkl', + load_eval_anns=False, + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR')) +test_evaluator = dict( + type='KittiMetric', + ann_file=data_root + 'kitti_infos_test.pkl', + metric='bbox', + format_only=True, + submission_prefix='results/kitti-3class/kitti_results') +``` + +### 训练和测试配置 + +MMEngine 的执行器使用循环(Loop)来控制训练,验证和测试过程。用户可以使用这些字段设置最大训练轮次和验证间隔: + +```python +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=80, + val_interval=2) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +``` + +### 优化配置 + +`optim_wrapper` 是配置优化相关设置的字段。优化器封装不仅提供了优化器的功能,还支持梯度裁剪、混合精度训练等功能。更多内容请看[优化器封装教程](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/optim_wrapper.html)。 + +```python +optim_wrapper = dict( # 优化器封装配置 + type='OptimWrapper', # 优化器封装类型,切换到 AmpOptimWrapper 启动混合精度训练 + optimizer=dict( # 优化器配置。支持 PyTorch 的各种优化器,请参考 https://pytorch.org/docs/stable/optim.html#algorithms + type='AdamW', lr=0.001, betas=(0.95, 0.99), weight_decay=0.01), + clip_grad=dict(max_norm=35, norm_type=2)) # 梯度裁剪选项。设置为 None 禁用梯度裁剪。使用方法请见 https://mmengine.readthedocs.io/zh_CN/latest/tutorials/optim_wrapper.html +``` + +`param_scheduler` 是配置调整优化器超参数(例如学习率和动量)的字段。用户可以组合多个调度器来创建所需要的参数调整策略。更多信息请参考[参数调度器教程](https://mmengine.readthedocs.io/zh_CN/latest/tutorials/param_scheduler.html)和[参数调度器 API 文档](https://mmengine.readthedocs.io/zh_CN/latest/api/optim.html#scheduler)。 + +```python +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=32, + eta_min=0.01, + begin=0, + end=32, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=48, + eta_min=1.0000000000000001e-07, + begin=32, + end=80, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=32, + eta_min=0.8947368421052632, + begin=0, + end=32, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=48, + eta_min=1, + begin=32, + end=80, + by_epoch=True, + convert_to_iter_based=True), +] +``` + +### 钩子配置 + +用户可以在训练、验证和测试循环上添加钩子,从而在运行期间插入一些操作。有两种不同的钩子字段,一种是 `default_hooks`,另一种是 `custom_hooks`。 + +`default_hooks` 是一个钩子配置字典,并且这些钩子是运行时所需要的。它们具有默认优先级,是不需要修改的。如果未设置,执行器将使用默认值。如果要禁用默认钩子,用户可以将其配置设置为 `None`。 + +```python +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=-1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='Det3DVisualizationHook')) +``` + +`custom_hooks` 是一个由其他钩子配置组成的列表。用户可以开发自己的钩子并将其插入到该字段中。 + +```python +custom_hooks = [] +``` + +### 运行配置 + +```python +default_scope = 'mmdet3d' # 寻找模块的默认注册器域。请参考 https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/registry.html + +env_cfg = dict( + cudnn_benchmark=False, # 是否启用 cudnn benchmark + mp_cfg=dict( # 多进程配置 + mp_start_method='fork', # 使用 fork 来启动多进程。'fork' 通常比 'spawn' 更快,但可能不安全。请参考 https://github.com/pytorch/pytorch/issues/1355 + opencv_num_threads=0), # 关闭 opencv 的多进程以避免系统超负荷 + dist_cfg=dict(backend='nccl')) # 分布式配置 + +vis_backends = [dict(type='LocalVisBackend')] # 可视化后端。请参考 https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/visualization.html +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +log_processor = dict( + type='LogProcessor', # 日志处理器用于处理运行时日志 + window_size=50, # 日志数值的平滑窗口 + by_epoch=True) # 是否使用 epoch 格式的日志。需要与训练循环的类型保持一致 + +log_level = 'INFO' # 日志等级 +load_from = None # 从给定路径加载模型检查点作为预训练模型。这不会恢复训练。 +resume = False # 是否从 `load_from` 中定义的检查点恢复。如果 `load_from` 为 None,它将恢复 `work_dir` 中的最近检查点。 +``` + +## 配置文件继承 + +在 `configs/_base_` 文件夹下有 4 个基本组件类型,分别是:数据集(dataset),模型(model),训练策略(schedule)和运行时的默认设置(default runtime)。许多方法,如 SECOND、PointPillars、PartA2、VoteNet 都能够很容易地构建出来。由 `_base_` 下的组件组成的配置,被我们称为 _原始配置(primitive)_。 + +对于同一个文件夹下的所有配置,推荐**只有一个**对应的 _原始配置_ 文件。所有其他的配置文件都应该继承自这个 _原始配置_ 文件。这样就能保证配置文件的最大继承深度为 3。 + +为了便于理解,我们建议贡献者继承现有方法。例如,如果在 PointPillars 的基础上做了一些修改,用户可以首先通过指定 `_base_ = '../pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py'` 来继承基础的 PointPillars 结构,然后修改配置文件中的必要参数以完成继承。 + +如果您在构建一个与任何现有方法都不共享的全新方法,那么可以在 `configs` 文件夹下创建一个新的例如 `xxx_rcnn` 文件夹。 + +更多细节请参考 [MMEngine 配置文件教程](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/config.html)。 + +通过设置 `_base_` 字段,我们可以设置当前配置文件继承自哪些文件。 + +当 `_base_` 为文件路径字符串时,表示继承一个配置文件的内容。 + +```python +_base_ = './pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py' +``` + +当 `_base_` 是多个文件路径组成的列表式,表示继承多个文件。 + +```python +_base_ = [ + '../_base_/models/pointpillars_hv_secfpn_kitti.py', + '../_base_/datasets/kitti-3d-3class.py', + '../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py' +] +``` + +如果需要检测配置文件,可以通过运行 `python tools/misc/print_config.py /PATH/TO/CONFIG` 来查看完整的配置。 + +### 忽略基础配置文件里的部分字段 + +有时,您也许会设置 `_delete_=True` 去忽略基础配置文件里的一些字段。您可以参考 [MMEngine 配置文件教程](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/config.html) 来获得一些简单的指导。 + +在 MMDetection3D 里,例如,修改以下 PointPillars 配置中的颈部网络: + +```python +model = dict( + type='MVXFasterRCNN', + data_preprocessor=dict(voxel_layer=dict(...)), + pts_voxel_encoder=dict(...), + pts_middle_encoder=dict(...), + pts_backbone=dict(...), + pts_neck=dict( + type='FPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + act_cfg=dict(type='ReLU'), + in_channels=[64, 128, 256], + out_channels=256, + start_level=0, + num_outs=3), + pts_bbox_head=dict(...)) +``` + +`FPN` 和 `SECONDFPN` 使用不同的关键字来构建: + +```python +_base_ = '../_base_/models/pointpillars_hv_fpn_nus.py' +model = dict( + pts_neck=dict( + _delete_=True, + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict(...)) +``` + +`_delete_=True` 将使用新的键去替换 `pts_neck` 字段内所有旧的键。 + +### 在配置文件里使用中间变量 + +配置文件里会使用一些中间变量,例如数据集里的 `train_pipeline`/`test_pipeline`。需要注意的是,当修改子配置文件中的中间变量时,用户需要再次将中间变量传递到对应的字段中。例如,我们想使用多尺度策略训练并测试 PointPillars,`train_pipeline`/`test_pipeline` 是我们想要修改的中间变量。 + +```python +_base_ = './nus-3d.py' +train_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_labels_3d', 'gt_bboxes_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=[0.95, 1.0, 1.05], + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +``` + +我们首先定义新的 `train_pipeline`/`test_pipeline`,然后传递到数据加载器字段中。 + +### 复用 \_base\_ 文件中的变量 + +如果用户希望复用 base 文件中的变量,则可以通过使用 `{{_base_.xxx}}` 获取对应变量的拷贝。例如: + +```python +_base_ = './pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py' + +a = {{_base_.model}} # 变量 `a` 等于 `_base_` 中定义的 `model` +``` + +## 通过脚本参数修改配置 + +当使用 `tools/train.py` 或者 `tools/test.py` 提交工作时,您可以通过指定 `--cfg-options` 来修改配置文件。 + +- 更新配置字典的键值 + + 可以按照原始配置文件中字典的键值顺序指定配置选项。例如,使用 `--cfg-options model.backbone.norm_eval=False` 将模型主干网络中的所有 BN 模块都改为 `train` 模式。 + +- 更新配置列表中的键值 + + 在配置文件里,一些配置字典被包含在列表中,例如,训练流程 `train_dataloader.dataset.pipeline` 通常是一个列表,例如 `[dict(type='LoadPointsFromFile'), ...]`。如果您想要将训练流程中的 `'LoadPointsFromFile'` 改成 `'LoadPointsFromDict'`,您需要指定 `--cfg-options data.train.pipeline.0.type=LoadPointsFromDict`。 + +- 更新列表/元组的值 + + 如果要更新的值是列表或元组。例如,配置文件通常设置 `model.data_preprocessor.mean=[123.675, 116.28, 103.53]`。如果您想要改变这个均值,您需要指定 `--cfg-options model.data_preprocessor.mean="[127,127,127]"`。注意,引号 `"` 是支持列表/元组数据类型所必需的,并且在指定值的引号内**不允许**有空格。 + +## 配置文件名称风格 + +我们遵循以下样式来命名配置文件。建议贡献者遵循相同的风格。 + +``` +{algorithm name}_{model component names [component1]_[component2]_[...]}_{training settings}_{training dataset information}_{testing dataset information}.py +``` + +文件名分为五个部分。所有部分和组件用 `_` 连接,每个部分或组件内的单词应该用 `-` 连接。 + +- `{algorithm name}`:算法的名称。它可以是检测器的名称,例如 `pointpillars`、`fcos3d` 等。 +- `{model component names}`:算法中使用的组件名称,如 voxel_encoder、backbone、neck 等。例如 `second_secfpn_head-dcn-circlenms` 表示使用 SECOND 的 SparseEncoder,SECONDFPN,以及带有 DCN 和 circle NMS 的检测头。 +- `{training settings}`:训练设置的信息,例如批量大小,数据增强,损失函数策略,调度器以及训练轮次/迭代。例如 `8xb4-tta-cyclic-20e` 表示使用 8 个 gpu,每个 gpu 有 4 个数据样本,测试增强,余弦退火学习率,训练 20 个 epoch。缩写介绍: + - `{gpu x batch_per_gpu}`:GPU 数和每个 GPU 的样本数。`bN` 表示每个 GPU 上的批量大小为 N。例如 `4xb4` 是 4 个 GPU,每个 GPU 有 4 个样本数的缩写。 + - `{schedule}`:训练方案,可选项为 `schedule-2x`、`schedule-3x`、`cyclic-20e` 等。`schedule-2x` 和 `schedule-3x` 分别代表 24 epoch 和 36 epoch。`cyclic-20e` 表示 20 epoch。 +- `{training dataset information}`:训练数据集名,例如 `kitti-3d-3class`,`nus-3d`,`s3dis-seg`,`scannet-seg`,`waymoD5-3d-car`。这里 `3d` 表示数据集用于 3D 目标检测,`seg` 表示数据集用于点云分割。 +- `{testing dataset information}`(可选):当模型在一个数据集上训练,在另一个数据集上测试时的测试数据集名。如果没有注明,则表示训练和测试的数据集类型相同。 diff --git a/docs/zh_cn/user_guides/coord_sys_tutorial.md b/docs/zh_cn/user_guides/coord_sys_tutorial.md new file mode 100755 index 0000000..eb87788 --- /dev/null +++ b/docs/zh_cn/user_guides/coord_sys_tutorial.md @@ -0,0 +1,245 @@ +# 坐标系 + +## 概述 + +MMDetection3D 使用 3 种不同的坐标系。3D 目标检测领域中不同坐标系的存在是非常有必要的,因为对于各种 3D 数据采集设备来说,如激光雷达、深度相机等,使用的坐标系是不一致的,不同的 3D 数据集也遵循不同的数据格式。早期的工作,比如 SECOND、VoteNet 将原始数据转换为另一种格式,形成了一些后续工作也遵循的约定,使得不同坐标系之间的转换变得更加复杂。 + +尽管数据集和采集设备多种多样,但是通过总结 3D 目标检测的工作线,我们可以将坐标系大致分为三类: + +- 相机坐标系 -- 大多数相机的坐标系,在该坐标系中 y 轴正方向指向地面,x 轴正方向指向右侧,z 轴正方向指向前方。 + + ``` + 上 z 前 + | ^ + | / + | / + | / + |/ + 左 ------ 0 ------> x 右 + | + | + | + | + v + y 下 + ``` + +- 激光雷达坐标系 -- 众多激光雷达的坐标系,在该坐标系中 z 轴负方向指向地面,x 轴正方向指向前方,y 轴正方向指向左侧。 + + ``` + z 上 x 前 + ^ ^ + | / + | / + | / + |/ + y 左 <------ 0 ------ 右 + ``` + +- 深度坐标系 -- VoteNet、H3DNet 等模型使用的坐标系,在该坐标系中 z 轴负方向指向地面,x 轴正方向指向右侧,y 轴正方向指向前方。 + + ``` + z 上 y 前 + ^ ^ + | / + | / + | / + |/ + 左 ------ 0 ------> x 右 + ``` + +该教程中的坐标系定义实际上**不仅仅是定义三个轴**。对于形如 `` $$`(x, y, z, dx, dy, dz, r)`$$ `` 的框来说,我们的坐标系也定义了如何解释框的尺寸 `` $$`(dx, dy, dz)`$$ `` 和转向角 (yaw) 角度 `` $$`r`$$ ``。 + +三个坐标系的图示如下: + +![](https://raw.githubusercontent.com/open-mmlab/mmdetection3d/master/resources/coord_sys_all.png) + +上面三张图是 3D 坐标系,下面三张图是鸟瞰图。 + +以后我们将坚持使用本教程中定义的三个坐标系。 + +## 转向角 (yaw) 的定义 + +请参考[维基百科](https://en.wikipedia.org/wiki/Euler_angles#Tait%E2%80%93Bryan_angles)了解转向角的标准定义。在目标检测中,我们选择一个轴作为重力轴,并在垂直于重力轴的平面 `` $$`\Pi`$$ `` 上选取一个参考方向,那么参考方向的转向角为 0,在 `` $$`\Pi`$$ `` 上的其他方向有非零的转向角,其角度取决于其与参考方向的角度。 + +目前,对于所有支持的数据集,标注不包括俯仰角 (pitch) 和滚动角 (roll),这意味着我们在预测框和计算框之间的重叠时只需考虑转向角 (yaw)。 + +在 MMDetection3D 中,所有坐标系都是右手坐标系,这意味着如果从重力轴的负方向(轴的正方向指向人眼)看,转向角 (yaw) 沿着逆时针方向增加。 + +下图显示,在右手坐标系中,如果我们设定 x 轴正方向为参考方向,那么 y 轴正方向的转向角 (yaw) 为 `` $$`\frac{\pi}{2}`$$ ``。 + +``` + z 上 y 前 (yaw=0.5*pi) + ^ ^ + | / + | / + | / + |/ +左 (yaw=pi) ------ 0 ------> x 右 (yaw=0) +``` + +对于一个框来说,其转向角 (yaw) 的值等于其方向减去一个参考方向。在 MMDetection3D 的所有三个坐标系中,参考方向总是 x 轴的正方向,而如果一个框的转向角 (yaw) 为 0,则其方向被定义为与 x 轴平行。框的转向角 (yaw) 的定义如下图所示。 + +``` + y 前 + ^ 框的方向 (yaw=0.5*pi) + /|\ ^ + | /|\ + | ____|____ + | | | | + | | | | +__|____|____|____|______\ x 右 + | | | | / + | | | | + | |____|____| + | +``` + +## 框尺寸的定义 + +框尺寸的定义与转向角 (yaw) 的定义是分不开的。在上一节中,我们提到如果一个框的转向角 (yaw) 为 0,它的方向就被定义为与 x 轴平行。那么自然地,一个框对应于 x 轴的尺寸应该是 `` $$`dx`$$ ``。但是,这在某些数据集中并非总是如此(我们稍后会解决这个问题)。 + +下图展示了 x 轴和 `` $$`dx`$$ ``,y 轴和 `` $$`dy`$$ `` 对应的含义。 + +``` +y 前 + ^ 框的方向 (yaw=0.5*pi) + /|\ ^ + | /|\ + | ____|____ + | | | | + | | | | dx +__|____|____|____|______\ x 右 + | | | | / + | | | | + | |____|____| + | dy +``` + +注意框的方向总是和 `` $$`dx`$$ `` 边平行。 + +``` +y 前 + ^ _________ + /|\ | | | + | | | | + | | | | dy + | |____|____|____\ 框的方向 (yaw=0) + | | | | / +__|____|____|____|_________\ x 右 + | | | | / + | |____|____| + | dx + | +``` + +## 与支持的数据集的原始坐标系的关系 + +### KITTI + +KITTI 数据集的原始标注是在相机坐标系下的,详见 [get_label_anno](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/data_converter/kitti_data_utils.py)。在 MMDetection3D 中,为了在 KITTI 数据集上训练基于激光雷达的模型,首先将数据从相机坐标系转换到激光雷达坐标,详见 [get_ann_info](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/kitti_dataset.py)。对于训练基于视觉的模型,数据保持在相机坐标系不变。 + +在 SECOND 中,框的激光雷达坐标系定义如下(鸟瞰图): + +![](https://raw.githubusercontent.com/traveller59/second.pytorch/master/images/kittibox.png) + +对于每个框来说,尺寸为 `` $$`(w, l, h)`$$ ``,转向角 (yaw) 的参考方向为 y 轴正方向。更多细节请参考[代码库](https://github.com/traveller59/second.pytorch#concepts)。 + +我们的激光雷达坐标系有两处改变: + +- 转向角 (yaw) 被定义为右手而非左手,从而保持一致性; +- 框的尺寸为 `` $$`(l, w, h)`$$ `` 而非 `` $$`(w, l, h)`$$ ``,由于在 KITTI 数据集中 `` $$`w`$$ `` 对应 `` $$`dy`$$ ``,`` $$`l`$$ `` 对应 `` $$`dx`$$ ``。 + +### Waymo + +我们使用 Waymo 数据集的 KITTI 格式数据。因此,在我们的实现中 KITTI 和 Waymo 也共用相同的坐标系。 + +### NuScenes + +NuScenes 提供了一个评估工具包,其中每个框都被包装成一个 `Box` 实例。`Box` 的坐标系不同于我们的激光雷达坐标系,在 `Box` 坐标系中,前两个表示框尺寸的元素分别对应 `` $$`(dy, dx)`$$ `` 或者 `` $$`(w, l)`$$ ``,和我们的表示方法相反。更多细节请参考 NuScenes [教程](https://github.com/open-mmlab/mmdetection3d/blob/master/docs/zh_cn/datasets/nuscenes_det.md#notes)。 + +读者可以参考 [NuScenes 开发工具](https://github.com/nutonomy/nuscenes-devkit/tree/master/python-sdk/nuscenes/eval/detection),了解 [NuScenes 框](https://github.com/nutonomy/nuscenes-devkit/blob/2c6a752319f23910d5f55cc995abc547a9e54142/python-sdk/nuscenes/utils/data_classes.py#L457) 的定义和 [NuScenes 评估](https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/eval/detection/evaluate.py)的过程。 + +### Lyft + +就涉及坐标系而言,Lyft 和 NuScenes 共用相同的数据格式。 + +请参考[官方网站](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data)获取更多信息。 + +### ScanNet + +ScanNet 的原始数据不是点云而是网格,需要在我们的深度坐标系下进行采样得到点云数据。对于 ScanNet 检测任务,框的标注是轴对齐的,并且转向角 (yaw) 始终是 0。因此,我们的深度坐标系中转向角 (yaw) 的方向对 ScanNet 没有影响。 + +### SUN RGB-D + +SUN RGB-D 的原始数据不是点云而是 RGB-D 图像。我们通过反投影,可以得到每张图像对应的点云,其在我们的深度坐标系下。但是,数据集的标注并不在我们的系统中,所以需要进行转换。 + +将原始标注转换为我们的深度坐标系下的标注的转换过程请参考 [sunrgbd_data_utils.py](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/data_converter/sunrgbd_data_utils.py)。 + +### S3DIS + +在我们的实现中,S3DIS 与 ScanNet 共用相同的坐标系。然而 S3DIS 是一个仅限于分割任务的数据集,因此没有标注是坐标系敏感的。 + +## 例子 + +### 框(在不同坐标系间)的转换 + +以相机坐标系和激光雷达坐标系间的转换为例: + +首先,对于点和框的中心点,坐标转换前后满足下列关系: + +- `` $$`x_{LiDAR}=z_{camera}`$$ `` +- `` $$`y_{LiDAR}=-x_{camera}`$$ `` +- `` $$`z_{LiDAR}=-y_{camera}`$$ `` + +然后,框的尺寸转换前后满足下列关系: + +- `` $$`dx_{LiDAR}=dx_{camera}`$$ `` +- `` $$`dy_{LiDAR}=dz_{camera}`$$ `` +- `` $$`dz_{LiDAR}=dy_{camera}`$$ `` + +最后,转向角 (yaw) 也应该被转换: + +- `` $$`r_{LiDAR}=-\frac{\pi}{2}-r_{camera}`$$ `` + +详见[此处](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/bbox/structures/box_3d_mode.py)代码了解更多细节。 + +### 鸟瞰图 + +如果 3D 框是 `` $$`(x, y, z, dx, dy, dz, r)`$$ ``,相机坐标系下框的鸟瞰图是 `` $$`(x, z, dx, dz, -r)`$$ ``。转向角 (yaw) 符号取反是因为相机坐标系重力轴的正方向指向地面。 + +详见[此处](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/bbox/structures/cam_box3d.py)代码了解更多细节。 + +### 框的旋转 + +我们将各种框的旋转设定为绕着重力轴逆时针旋转。因此,为了旋转一个 3D 框,我们首先需要计算新的框的中心,然后将旋转角度添加到转向角 (yaw)。 + +详见[此处](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/bbox/structures/cam_box3d.py)代码了解更多细节。 + +## 常见问题 + +#### Q1: 与框相关的算子是否适用于所有坐标系类型? + +否。例如,[用于 RoI-Aware Pooling 的算子](https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/roiaware_pool3d.py)只适用于深度坐标系和激光雷达坐标系下的框。由于如果从上方看,旋转是顺时针的,所以 KITTI 数据集[这里](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/core/evaluation/kitti_utils.py)的评估函数仅适用于相机坐标系下的框。 + +对于每个和框相关的算子,我们注明了其所适用的框类型。 + +#### Q2: 在每个坐标系中,三个轴是否分别准确地指向右侧、前方和地面? + +否。例如在 KITTI 中,从相机坐标系转换为激光雷达坐标系时,我们需要一个校准矩阵。 + +#### Q3: 框中转向角 (yaw) `` $$`2\pi`$$ `` 的相位差如何影响评估? + +对于交并比 (IoU) 计算,转向角 (yaw) 有 `` $$`2\pi`$$ `` 的相位差的两个框是相同的,所以不会影响评估。 + +对于角度预测评估,例如 NuScenes 中的 NDS 指标和 KITTI 中的 AOS 指标,会先对预测框的角度进行标准化,因此 `` $$`2\pi`$$ `` 的相位差不会改变结果。 + +#### Q4: 框中转向角 (yaw) `` $$`\pi`$$ `` 的相位差如何影响评估? + +对于交并比 (IoU) 计算,转向角 (yaw) 有 `` $$`\pi`$$ `` 的相位差的两个框是相同的,所以不会影响评估。 + +然而,对于角度预测评估,这会导致完全相反的方向。 + +考虑一辆汽车,转向角 (yaw) 是汽车前部方向与 x 轴正方向之间的夹角。如果我们将该角度增加 `` $$`\pi`$$ ``,车前部将变成车后部。 + +对于某些类别,例如障碍物,前后没有区别,因此 `` $$`\pi`$$ `` 的相位差不会对角度预测分数产生影响。 diff --git a/docs/zh_cn/user_guides/data_pipeline.md b/docs/zh_cn/user_guides/data_pipeline.md new file mode 100755 index 0000000..cf50d70 --- /dev/null +++ b/docs/zh_cn/user_guides/data_pipeline.md @@ -0,0 +1,191 @@ +# 自定义数据预处理流程 + +## 数据预处理流程的设计 + +遵循一般惯例,我们使用 `Dataset` 和 `DataLoader` 来调用多个进程进行数据的加载。`Dataset` 将会返回与模型前向传播的参数所对应的数据项构成的字典。因为目标检测中的数据的尺寸可能无法保持一致(如点云中点的数量、真实标注框的尺寸等),我们在 MMCV 中引入一个 `DataContainer` 类型,来帮助收集和分发不同尺寸的数据。请参考[此处](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py)获取更多细节。 + +数据预处理流程和数据集之间是互相分离的两个部分,通常数据集定义了如何处理标注信息,而数据预处理流程定义了准备数据项字典的所有步骤。数据集预处理流程包含一系列的操作,每个操作将一个字典作为输入,并输出应用于下一个转换的一个新的字典。 + +我们将在下图中展示一个最经典的数据集预处理流程,其中蓝色框表示预处理流程中的各项操作。随着预处理的进行,每一个操作都会添加新的键值(图中标记为绿色)到输出字典中,或者更新当前存在的键值(图中标记为橙色)。 + +![](../../../resources/data_pipeline.png) + +预处理流程中的各项操作主要分为数据加载、预处理、格式化、测试时的数据增强。 + +接下来将展示一个用于 PointPillars 模型的数据集预处理流程的例子。 + +```python +train_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + pts_scale_ratio=1.0, + flip=False, + pcd_horizontal_flip=False, + pcd_vertical_flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +``` + +对于每项操作,我们将列出相关的被添加/更新/移除的字典项。 + +### 数据加载 + +`LoadPointsFromFile` + +- 添加:points + +`LoadPointsFromMultiSweeps` + +- 更新:points + +`LoadAnnotations3D` + +- 添加:gt_bboxes_3d, gt_labels_3d, gt_bboxes, gt_labels, pts_instance_mask, pts_semantic_mask, bbox3d_fields, pts_mask_fields, pts_seg_fields + +### 预处理 + +`GlobalRotScaleTrans` + +- 添加:pcd_trans, pcd_rotation, pcd_scale_factor +- 更新:points, \*bbox3d_fields + +`RandomFlip3D` + +- 添加:flip, pcd_horizontal_flip, pcd_vertical_flip +- 更新:points, \*bbox3d_fields + +`PointsRangeFilter` + +- 更新:points + +`ObjectRangeFilter` + +- 更新:gt_bboxes_3d, gt_labels_3d + +`ObjectNameFilter` + +- 更新:gt_bboxes_3d, gt_labels_3d + +`PointShuffle` + +- 更新:points + +`PointsRangeFilter` + +- 更新:points + +### 格式化 + +`DefaultFormatBundle3D` + +- 更新:points, gt_bboxes_3d, gt_labels_3d, gt_bboxes, gt_labels + +`Collect3D` + +- 添加:img_meta (由 `meta_keys` 指定的键值构成的 img_meta) +- 移除:所有除 `keys` 指定的键值以外的其他键值 + +### 测试时的数据增强 + +`MultiScaleFlipAug` + +- 更新: scale, pcd_scale_factor, flip, flip_direction, pcd_horizontal_flip, pcd_vertical_flip (与这些指定的参数对应的增强后的数据列表) + +## 扩展并使用自定义数据集预处理方法 + +1. 在任意文件中写入新的数据集预处理方法,如 `my_pipeline.py`,该预处理方法的输入和输出均为字典 + + ```python + from mmdet.datasets import PIPELINES + + @PIPELINES.register_module() + class MyTransform: + + def __call__(self, results): + results['dummy'] = True + return results + ``` + +2. 导入新的预处理方法类 + + ```python + from .my_pipeline import MyTransform + ``` + +3. 在配置文件中使用该数据集预处理方法 + + ```python + train_pipeline = [ + dict( + type='LoadPointsFromFile', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + backend_args=backend_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='MyTransform'), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + ] + ``` diff --git a/docs/zh_cn/user_guides/dataset_prepare.md b/docs/zh_cn/user_guides/dataset_prepare.md new file mode 100755 index 0000000..094cd01 --- /dev/null +++ b/docs/zh_cn/user_guides/dataset_prepare.md @@ -0,0 +1,179 @@ +# 数据预处理 + +## 在数据预处理前 + +我们推荐用户将数据集的路径软链接到 `$MMDETECTION3D/data`。如果你的文件夹结构和以下所展示的结构不一致,你可能需要改变配置文件中相应的数据路径。 + +``` +mmdetection3d +├── mmdet3d +├── tools +├── configs +├── data +│ ├── nuscenes +│ │ ├── maps +│ │ ├── samples +│ │ ├── sweeps +│ │ ├── v1.0-test +| | ├── v1.0-trainval +│ ├── kitti +│ │ ├── ImageSets +│ │ ├── testing +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── velodyne +│ │ ├── training +│ │ │ ├── calib +│ │ │ ├── image_2 +│ │ │ ├── label_2 +│ │ │ ├── velodyne +│ ├── waymo +│ │ ├── waymo_format +│ │ │ ├── training +│ │ │ ├── validation +│ │ │ ├── testing +│ │ │ ├── gt.bin +│ │ ├── kitti_format +│ │ │ ├── ImageSets +│ ├── lyft +│ │ ├── v1.01-train +│ │ │ ├── v1.01-train (训练数据) +│ │ │ ├── lidar (训练激光雷达) +│ │ │ ├── images (训练图片) +│ │ │ ├── maps (训练地图) +│ │ ├── v1.01-test +│ │ │ ├── v1.01-test (测试数据) +│ │ │ ├── lidar (测试激光雷达) +│ │ │ ├── images (测试图片) +│ │ │ ├── maps (测试地图) +│ │ ├── train.txt +│ │ ├── val.txt +│ │ ├── test.txt +│ │ ├── sample_submission.csv +│ ├── s3dis +│ │ ├── meta_data +│ │ ├── Stanford3dDataset_v1.2_Aligned_Version +│ │ ├── collect_indoor3d_data.py +│ │ ├── indoor3d_util.py +│ │ ├── README.md +│ ├── scannet +│ │ ├── meta_data +│ │ ├── scans +│ │ ├── scans_test +│ │ ├── batch_load_scannet_data.py +│ │ ├── load_scannet_data.py +│ │ ├── scannet_utils.py +│ │ ├── README.md +│ ├── sunrgbd +│ │ ├── OFFICIAL_SUNRGBD +│ │ ├── matlab +│ │ ├── sunrgbd_data.py +│ │ ├── sunrgbd_utils.py +│ │ ├── README.md + +``` + +## 数据下载和预处理 + +### KITTI + +在[这里](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)下载 KITTI 的 3D 检测数据。通过运行以下指令对 KITTI 数据进行预处理: + +```bash +mkdir ./data/kitti/ && mkdir ./data/kitti/ImageSets + +# 下载数据划分文件 +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/test.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/test.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/train.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/train.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/val.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/val.txt +wget -c https://raw.githubusercontent.com/traveller59/second.pytorch/master/second/data/ImageSets/trainval.txt --no-check-certificate --content-disposition -O ./data/kitti/ImageSets/trainval.txt +``` + +然后通过运行以下指令生成信息文件: + +```bash +python tools/create_data.py kitti --root-path ./data/kitti --out-dir ./data/kitti --extra-tag kitti +``` + +在使用 slurm 的环境下,用户需要使用下面的指令: + +```bash +sh tools/create_data.sh kitti +``` + +### Waymo + +在[这里](https://waymo.com/open/download/)下载 Waymo 公开数据集 1.2 版本,在[这里](https://drive.google.com/drive/folders/18BVuF_RYJF0NjZpt8SnfzANiakoRMf0o?usp=sharing)下载其数据划分文件。然后,将 `.tfrecord` 文件置于 `data/waymo/waymo_format/` 目录下的相应位置,并将数据划分的 `.txt` 文件置于 `data/waymo/kitti_format/ImageSets` 目录下。在[这里](https://console.cloud.google.com/storage/browser/waymo_open_dataset_v_1_2_0/validation/ground_truth_objects)下载验证集的真实标签(`.bin` 文件)并将其置于 `data/waymo/waymo_format/`。提示:你可以使用 `gsutil` 来用命令下载大规模的数据集。更多细节请参考此[工具](https://github.com/RalphMao/Waymo-Dataset-Tool)。完成以上各步后,可以通过运行以下指令对 Waymo 数据进行预处理: + +```bash +python tools/create_data.py waymo --root-path ./data/waymo/ --out-dir ./data/waymo/ --workers 128 --extra-tag waymo +``` + +注意: + +- 如果你的硬盘空间大小不足以存储转换后的数据,你可以将 `--out-dir` 参数设定为别的路径。你只需要记得在那个路径下创建文件夹并下载数据,然后在数据预处理完成后将其链接回 `data/waymo/kitti_format` 即可。 + +- 如果你想在 Waymo 上进行更快的评估,你可以下载已经预处理好的[元信息文件](https://download.openmmlab.com/mmdetection3d/data/waymo/idx2metainfo.pkl)并将其放置在 `data/waymo/waymo_format/` 目录下。接着,你可以按照以下来修改数据集的配置: + + ```python + val_evaluator = dict( + type='WaymoMetric', + ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', + waymo_bin_file='./data/waymo/waymo_format/gt.bin', + data_root='./data/waymo/waymo_format', + backend_args=backend_args, + convert_kitti_format=True, + idx2metainfo='data/waymo/waymo_format/idx2metainfo.pkl' + ) + ``` + + 目前这种方式仅限于纯点云检测任务。 + +### NuScenes + +在[这里](https://www.nuscenes.org/download)下载 nuScenes 数据集 1.0 版本的完整数据文件。通过运行以下指令对 nuScenes 数据进行预处理: + +```bash +python tools/create_data.py nuscenes --root-path ./data/nuscenes --out-dir ./data/nuscenes --extra-tag nuscenes +``` + +### Lyft + +在[这里](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data)下载 Lyft 3D 检测数据。通过运行以下指令对 Lyft 数据进行预处理: + +```bash +python tools/create_data.py lyft --root-path ./data/lyft --out-dir ./data/lyft --extra-tag lyft --version v1.01 +python tools/data_converter/lyft_data_fixer.py --version v1.01 --root-folder ./data/lyft +``` + +注意,为了文件结构的清晰性,我们遵从了 Lyft 数据原先的文件夹名称。请按照上面展示出的文件结构对原始文件夹进行重命名。同样值得注意的是,第二行命令的目的是为了修复一个损坏的激光雷达数据文件。更多细节请参考[该讨论](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/discussion/110000)。 + +### S3DIS、ScanNet 和 SUN RGB-D + +请参考 S3DIS [README](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/data/s3dis/README.md) 文件以对其进行数据预处理。 + +请参考 ScanNet [README](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/data/scannet/README.md) 文件以对其进行数据预处理。 + +请参考 SUN RGB-D [README](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/data/sunrgbd/README.md) 文件以对其进行数据预处理。 + +### 自定义数据集 + +关于如何使用自定义数据集,请参考[自定义数据集](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docs/zh_cn/advanced_guides/customize_dataset.md)。 + +### 更新数据信息 + +如果你之前已经使用 v1.0.0rc1-v1.0.0rc4 版的 mmdetection3d 创建数据信息,现在你想使用最新的 v1.1.0 版 mmdetection3d,你需要更新数据信息文件。 + +```bash +python tools/dataset_converters/update_infos_to_v2.py --dataset ${DATA_SET} --pkl-path ${PKL_PATH} --out-dir ${OUT_DIR} +``` + +- `--dataset`:数据集名。 +- `--pkl-path`:指定数据信息 pkl 文件路径。 +- `--out-dir`:输出数据信息 pkl 文件目录。 + +例如: + +```bash +python tools/dataset_converters/update_infos_to_v2.py --dataset kitti --pkl-path ./data/kitti/kitti_infos_trainval.pkl --out-dir ./data/kitti +``` diff --git a/docs/zh_cn/user_guides/index.rst b/docs/zh_cn/user_guides/index.rst new file mode 100755 index 0000000..cd11405 --- /dev/null +++ b/docs/zh_cn/user_guides/index.rst @@ -0,0 +1,15 @@ +.. toctree:: + :maxdepth: 3 + + 2_new_data_model.md + backends_support.md + config.md + coord_sys_tutorial.md + data_pipeline.md + dataset_prepare.md + inference.md + index.rst + model_deployment.md + train_test.md + useful_tools.md + visualization.md diff --git a/docs/zh_cn/user_guides/inference.md b/docs/zh_cn/user_guides/inference.md new file mode 100755 index 0000000..10bec99 --- /dev/null +++ b/docs/zh_cn/user_guides/inference.md @@ -0,0 +1,89 @@ +# 推理 + +## 介绍 + +我们提供了多模态/单模态(基于激光雷达/图像)、室内/室外场景的 3D 检测和 3D 语义分割样例的脚本,预训练模型可以从 [Model Zoo](https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/docs/zh_cn/model_zoo.md) 下载。我们也提供了 KITTI、SUN RGB-D、nuScenes 和 ScanNet 数据集的预处理样本数据,你可以根据我们的预处理步骤使用任何其它数据。 + +## 测试 + +### 3D 检测 + +#### 单模态样例 + +在点云数据上测试 3D 检测器,运行: + +```shell +python demo/pcd_demo.py ${PCD_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--score-thr ${SCORE_THR}] [--out-dir ${OUT_DIR}] [--show] +``` + +点云和预测 3D 框的可视化结果会被保存在 `${OUT_DIR}/PCD_NAME`,它可以使用 [MeshLab](http://www.meshlab.net/) 打开。注意如果你设置了 `--show`,通过 [Open3D](http://www.open3d.org/) 可以在线显示预测结果。 + +在 KITTI 数据上测试 [SECOND](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/second) 模型: + +```shell +python demo/pcd_demo.py demo/data/kitti/000008.bin configs/second/second_hv-secfpn_8xb6-80e_kitti-3d-car.py checkpoints/second_hv-secfpn_8xb6-80e_kitti-3d-car_20200620_230238-393f000c.pth +``` + +在 SUN RGB-D 数据上测试 [VoteNet](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/votenet) 模型: + +```shell +python demo/pcd_demo.py demo/data/sunrgbd/sunrgbd_000017.bin configs/votenet/votenet_8xb16_sunrgbd-3d.py checkpoints/votenet_8xb16_sunrgbd-3d_20200620_230238-4483c0c0.pth +``` + +如果你正在使用的 mmdetection3d 版本 >= 0.6.0,记住转换 VoteNet 的模型权重文件,查看 [README](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/votenet/README.md/) 来获取转换模型权重文件的详细说明。 + +#### 多模态样例 + +在多模态数据(通常是点云和图像)上测试 3D 检测器,运行: + +```shell +python demo/multi_modality_demo.py ${PCD_FILE} ${IMAGE_FILE} ${ANNOTATION_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--score-thr ${SCORE_THR}] [--out-dir ${OUT_DIR}] [--show] +``` + +`ANNOTATION_FILE` 需要提供 3D 到 2D 的仿射矩阵,可视化结果会被保存在 `${OUT_DIR}/PCD_NAME`,其中包括点云、图像、预测的 3D 框以及它们在图像上的投影。 + +在 KITTI 数据上测试 [MVX-Net](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/mvxnet) 模型: + +```shell +python demo/multi_modality_demo.py demo/data/kitti/000008.bin demo/data/kitti/000008.png demo/data/kitti/000008.pkl configs/mvxnet/mvx_fpn-dv-second-secfpn_8xb2-80e_kitti-3d-3class.py checkpoints/mvx_fpn-dv-second-secfpn_8xb2-80e_kitti-3d-3class_20200621_003904-10140f2d.pth +``` + +在 SUN RGB-D 数据上测试 [ImVoteNet](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/imvotenet) 模型: + +```shell +python demo/multi_modality_demo.py demo/data/sunrgbd/sunrgbd_000017.bin demo/data/sunrgbd/sunrgbd_000017.jpg demo/data/sunrgbd/sunrgbd_000017_infos.pkl configs/imvotenet/imvotenet_stage2_8xb16_sunrgbd.py checkpoints/imvotenet_stage2_8xb16_sunrgbd_20210323_184021-d44dcb66.pth +``` + +### 单目 3D 检测 + +在图像数据上测试单目 3D 检测器,运行: + +```shell +python demo/mono_det_demo.py ${IMAGE_FILE} ${ANNOTATION_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--out-dir ${OUT_DIR}] [--show] +``` + +`ANNOTATION_FILE` 需要提供 3D 到 2D 的仿射矩阵(相机内参矩阵),可视化结果会被保存在 `${OUT_DIR}/PCD_NAME`,其中包括图像以及预测 3D 框在图像上的投影。 + +在 nuScenes 数据上测试 [FCOS3D](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/fcos3d) 模型: + +```shell +python demo/mono_det_demo.py demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.jpg demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.pkl configs/fcos3d/fcos3d_r101-caffe-dcn-fpn-head-gn_8xb2-1x_nus-mono3d_finetune.py checkpoints/fcos3d_r101-caffe-dcn-fpn-head-gn_8xb2-1x_nus-mono3d_finetune_20210717_095645-8d806dc2.pth +``` + +注意当对翻转图像可视化单目 3D 检测结果是,相机内参矩阵也应该相应修改。在 PR [#744](https://github.com/open-mmlab/mmdetection3d/pull/744) 中可以了解更多细节和示例。 + +### 3D 分割 + +在点云数据上测试 3D 分割器,运行: + +```shell +python demo/pc_seg_demo.py ${PCD_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--out-dir ${OUT_DIR}] [--show] +``` + +可视化结果会被保存在 `${OUT_DIR}/PCD_NAME`,其中包括点云以及预测的 3D 分割掩码。 + +在 ScanNet 数据上测试 [PointNet++ (SSG)](https://github.com/open-mmlab/mmdetection3d/tree/master/configs/pointnet2) 模型: + +```shell +python demo/pc_seg_demo.py demo/data/scannet/scene0000_00.bin configs/pointnet2/pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py checkpoints/pointnet2_ssg_2xb16-cosine-200e_scannet-seg_20210514_143644-ee73704a.pth +``` diff --git a/docs/zh_cn/user_guides/model_deployment.md b/docs/zh_cn/user_guides/model_deployment.md new file mode 100755 index 0000000..f66172c --- /dev/null +++ b/docs/zh_cn/user_guides/model_deployment.md @@ -0,0 +1,4 @@ +# 模型部署(待更新) + +MMDet3D 1.1 完全基于 [MMDeploy](https://mmdeploy.readthedocs.io/) 來部署模型。 +我们将在下一个版本完善这个文档。 diff --git a/docs/zh_cn/user_guides/train_test.md b/docs/zh_cn/user_guides/train_test.md new file mode 100755 index 0000000..73d908c --- /dev/null +++ b/docs/zh_cn/user_guides/train_test.md @@ -0,0 +1,260 @@ +# 使用已有模型在标准数据集上进行推理和训练 + +## 使用已有模型进行推理 + +这里我们提供了评测 SUNRGBD、ScanNet、KITTI 等多个数据集的测试脚本。 + +请参考[开始](https://mmdetection3d.readthedocs.io/zh_CN/dev-1.x/inference.html)下的验证/样例来获取更容易集成到其它项目和基本样例的高级接口。 + +### 在标准数据集上测试已有模型 + +- 单显卡 +- CPU +- 单节点多显卡 +- 多节点 + +你可以通过以下命令来测试数据集: + +```shell +# 单块显卡测试 +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] [--show] [--show-dir ${SHOW_DIR}] + +# CPU:禁用显卡并运行单块 CPU 测试脚本(实验性) +export CUDA_VISIBLE_DEVICES=-1 +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] [--show] [--show-dir ${SHOW_DIR}] + +# 多块显卡测试 +./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] +``` + +**注意**: + +目前我们只支持 SMOKE 的 CPU 推理测试。 + +可选参数: + +- `--show`:如果被指定,检测结果会在静默模式下被保存,用于调试和可视化,但只在单块 GPU 测试的情况下生效,和 `--show-dir` 搭配使用。 +- `--show-dir`:如果被指定,检测结果会被保存在指定文件夹下的 `***_points.obj` 和 `***_pred.obj` 文件中,用于调试和可视化,但只在单块 GPU 测试的情况下生效,对于这个选项,图形化界面在你的环境中不是必需的。 + +所有和评估相关的参数在相应的数据集配置的 `test_evaluator` 中设置。例如 `test_evaluator = dict(type='KittiMetric', ann_file=data_root + 'kitti_infos_val.pkl', pklfile_prefix=None, submission_prefix=None)` + +参数: + +- `type`:相对应的评价指标名,通常和数据集相关联。 +- `ann_file`:标注文件路径。 +- `pklfile_prefix`:可选参数。输出结果保存成 pickle 格式的文件名。如果没有指定,结果将不会保存成文件。 +- `submission_prefix`:可选参数。结果将被保存到文件中,然后你可以将它上传到官方评估服务器中。 + +示例: + +假定你已经把模型权重文件下载到 `checkpoints/` 文件夹下, + +1. 在 ScanNet 数据集上测试 VoteNet,保存模型,可视化预测结果 + + ```shell + python tools/test.py configs/votenet/votenet_8xb8_scannet-3d.py \ + checkpoints/votenet_8x8_scannet-3d-18class_20200620_230238-2cea9c3a.pth \ + --show --show-dir ./data/scannet/show_results + ``` + +2. 在 ScanNet 数据集上测试 VoteNet,保存模型,可视化预测结果,可视化真实标签,计算 mAP + + ```shell + python tools/test.py configs/votenet/votenet_8xb8_scannet-3d.py \ + checkpoints/votenet_8x8_scannet-3d-18class_20200620_230238-2cea9c3a.pth \ + --show --show-dir ./data/scannet/show_results + ``` + +3. 在 ScanNet 数据集上测试 VoteNet(不保存测试结果),计算 mAP + + ```shell + python tools/test.py configs/votenet/votenet_8xb8_scannet-3d.py \ + checkpoints/votenet_8x8_scannet-3d-18class_20200620_230238-2cea9c3a.pth + ``` + +4. 使用 8 块显卡在 KITTI 数据集上测试 SECOND,计算 mAP + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py \ + checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20200620_230238-9208083a.pth + ``` + +5. 使用 8 块显卡在 nuScenes 数据集上测试 PointPillars,生成提交给官方评测服务器的 json 文件 + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py \ + checkpoints/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20200620_230405-2fa62f3d.pth \ + --cfg-options 'test_evaluator.jsonfile_prefix=./pointpillars_nuscenes_results' + ``` + + 生成的结果会保存在 `./pointpillars_nuscenes_results` 目录。 + +6. 使用 8 块显卡在 KITTI 数据集上测试 SECOND,生成提交给官方评测服务器的 txt 文件 + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py \ + checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20200620_230238-9208083a.pth \ + --cfg-options 'test_evaluator.pklfile_prefix=./second_kitti_results' 'submission_prefix=./second_kitti_results' + ``` + + 生成的结果会保存在 `./second_kitti_results` 目录。 + +7. 使用 8 块显卡在 Lyft 数据集上测试 PointPillars,生成提交给排行榜的 pkl 文件 + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/hv_pointpillars_fpn_sbn-2x8_2x_lyft-3d.py \ + checkpoints/hv_pointpillars_fpn_sbn-2x8_2x_lyft-3d_latest.pth \ + --cfg-options 'test_evaluator.jsonfile_prefix=results/pp_lyft/results_challenge' \ + 'test_evaluator.csv_savepath=results/pp_lyft/results_challenge.csv' \ + 'test_evaluator.pklfile_prefix=results/pp_lyft/results_challenge.pkl' + ``` + + **注意**:为了生成 Lyft 数据集的提交结果,`--eval-options` 必须指定 `csv_savepath`。生成 csv 文件后,你可以使用[网站](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/submit)上给出的 kaggle 命令提交结果。 + + 注意在 [Lyft 数据集的配置文件](../../configs/_base_/datasets/lyft-3d.py),`test` 中的 `ann_file` 值为 `lyft_infos_test.pkl`,是没有标注的 Lyft 官方测试集。要在验证数据集上测试,请把它改为 `lyft_infos_val.pkl`。 + +8. 使用 8 块显卡在 waymo 数据集上测试 PointPillars,使用 waymo 度量方法计算 mAP + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py \ + checkpoints/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car_latest.pth \ + --cfg-options 'test_evaluator.pklfile_prefix=results/waymo-car/kitti_results' \ + 'test_evaluator.submission_prefix=results/waymo-car/kitti_results' + ``` + + **注意**:对于 waymo 数据集上的评估,请根据[说明](https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md/)构建二进制文件 `compute_detection_metrics_main` 来做度量计算,并把它放在 `mmdet3d/core/evaluation/waymo_utils/`。(在使用 bazel 构建 `compute_detection_metrics_main` 时,有时会出现 `'round' is not a member of 'std'` 的错误,我们只需要把那个文件中 `round` 前的 `std::` 去掉。)二进制文件生成时需要在 `--eval-options` 中给定 `pklfile_prefix`。对于度量方法,`waymo` 是推荐的官方评估策略,目前 `kitti` 评估是依照 KITTI 而来的,每个难度的结果和 KITTI 的定义并不完全一致。目前大多数物体都被标记为0难度,会在未来修复。它的不稳定原因包括评估的计算大、转换后的数据缺乏遮挡和截断、难度的定义不同以及平均精度的计算方法不同。 + +9. 使用 8 块显卡在 waymo 数据集上测试 PointPillars,生成 bin 文件并提交到排行榜 + + ```shell + ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py \ + checkpoints/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car_latest.pth \ + --cfg-options 'test_evaluator.pklfile_prefix=results/waymo-car/kitti_results' \ + 'test_evaluator.submission_prefix=results/waymo-car/kitti_results' + ``` + + **注意**:生成 bin 文件后,你可以简单地构建二进制文件 `create_submission`,并根据[说明](https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md/)创建提交的文件。要在验证服务器上评测验证数据集,你也可以用同样的方式生成提交的文件。 + +## 在标准数据集上训练预定义模型 + +MMDetection3D 分别用 `MMDistributedDataParallel` and `MMDataParallel` 实现了分布式训练和非分布式训练。 + +所有的输出(日志文件和模型权重文件)都会被保存到工作目录下,通过配置文件里的 `work_dir` 指定。 + +默认我们每过一个周期都在验证数据集上评测模型,你可以通过在训练配置里添加间隔参数来改变评测的时间间隔: + +```python +train_cfg = dict(type='EpochBasedTrainLoop', val_interval=1) # 每12个周期评估一次模型 +``` + +**重要**:配置文件中的默认学习率对应 8 块显卡,配置文件名里有具体的批量大小,比如 '2xb8' 表示一共 8 块显卡,每块显卡 2 个样本。 +根据 [Linear Scaling Rule](https://arxiv.org/abs/1706.02677),当你使用不同数量的显卡或每块显卡有不同数量的图像时,需要依批量大小按比例调整学习率。如果用 4 块显卡、每块显卡 2 幅图像时学习率为 0.01,那么用 16 块显卡、每块显卡 4 幅图像时学习率应设为 0.08。然而,由于大多数模型使用 ADAM 而不是 SGD 进行优化,上述规则可能并不适用,用户需要自己调整学习率。 + +### 使用单块显卡进行训练 + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +如果你想在命令中指定工作目录,添加参数 `--work-dir ${YOUR_WORK_DIR}`。 + +### 使用 CPU 进行训练 (实验性) + +在 CPU 上训练的过程与单 GPU 训练一致。 我们只需要在训练过程之前禁用显卡。 + +```shell +export CUDA_VISIBLE_DEVICES=-1 +``` + +之后运行单显卡训练脚本即可。 + +**注意**: + +目前,大多数点云相关算法都依赖于 3D CUDA 算子,无法在 CPU 上进行训练。 一些单目 3D 物体检测算法,例如 FCOS3D、SMOKE 可以在 CPU 上进行训练。我们不推荐用户使用 CPU 进行训练,这太过缓慢。我们支持这个功能是为了方便用户在没有显卡的机器上调试某些特定的方法。 + +### 使用多块显卡进行训练 + +```shell +./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +可选参数: + +- `--cfg-options 'Key=value'`:覆盖使用的配置中的一些设定。 + +### 使用多个机器进行训练 + +如果要在 [slurm](https://slurm.schedmd.com/) 管理的集群上运行 MMDectection3D,你可以使用 `slurm_train.sh` 脚本(该脚本也支持单机训练) + +```shell +[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} +``` + +下面是一个使用 16 块显卡在 dev 分区上训练 Mask R-CNN 的示例: + +```shell +GPUS=16 ./tools/slurm_train.sh dev pp_kitti_3class configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py /nfs/xxxx/pp_kitti_3class +``` + +你可以查看 [slurm_train.sh](https://github.com/open-mmlab/mmdetection/blob/master/tools/slurm_train.sh) 来获取所有的参数和环境变量。 + +如果您想使用由 ethernet 连接起来的多台机器, 您可以使用以下命令: + +在第一台机器上: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR ./tools/dist_train.sh $CONFIG $GPUS +``` + +在第二台机器上: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR ./tools/dist_train.sh $CONFIG $GPUS +``` + +但是,如果您不使用高速网路连接这几台机器的话,训练将会非常慢。 + +### 在单个机器上启动多个任务 + +如果你在单个机器上启动多个任务,比如,在具有8块显卡的机器上进行2个4块显卡训练的任务,你需要为每个任务指定不同的端口(默认为29500)以避免通信冲突。 + +如果你使用 `dist_train.sh` 启动训练任务,可以在命令中设置端口: + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +如果你使用 Slurm 启动训练任务,有两种方式指定端口: + +1. 通过 `--cfg-options` 设置端口,这是更推荐的,因为它不改变原来的配置 + + ```shell + CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} --cfg-options 'env_cfg.dist_cfg.port=29500' + CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} --cfg-options 'env_cfg.dist_cfg.port=29501' + ``` + +2. 修改配置文件(通常在配置文件的倒数第6行)来设置不同的通信端口 + + 在 `config1.py` 中, + + ```python + env_cfg = dict( + dist_cfg=dict(backend='nccl', port=29500) + ) + ``` + + 在 `config2.py` 中, + + ```python + env_cfg = dict( + dist_cfg=dict(backend='nccl', port=29501) + ) + ``` + + 然后,你可以使用 `config1.py` and `config2.py` 启动两个任务 + + ```shell + CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} + CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} + ``` diff --git a/docs/zh_cn/user_guides/useful_tools.md b/docs/zh_cn/user_guides/useful_tools.md new file mode 100755 index 0000000..770360d --- /dev/null +++ b/docs/zh_cn/user_guides/useful_tools.md @@ -0,0 +1,213 @@ +# 有用的工具 + +我们在 `tools/` 文件夹路径下提供了许多有用的工具。 + +## 日志分析 + +给定一个训练的日志文件,您可以绘制出 loss/mAP 曲线。首先需要运行 `pip install seaborn` 安装依赖包。 + +![loss曲线图](../../../resources/loss_curve.png) + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve [--keys ${KEYS}] [--title ${TITLE}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] [--mode ${MODE}] [--interval ${INTERVAL}] +``` + +**注意**: 如果您想绘制的指标是在验证阶段计算得到的,您需要添加一个标志 `--mode eval` ,如果您每经过一个 `${INTERVAL}` 的间隔进行评估,您需要增加一个参数 `--interval ${INTERVAL}`。 + +示例: + +- 绘制出某次运行的分类 loss。 + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_cls --legend loss_cls + ``` + +- 绘制出某次运行的分类和回归 loss,并且保存图片为 pdf 格式。 + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_cls loss_bbox --out losses.pdf + ``` + +- 在同一张图片中比较两次运行的 bbox mAP。 + + ```shell + # 根据 Car_3D_moderate_strict 在 KITTI 上评估 PartA2 和 second。 + python tools/analysis_tools/analyze_logs.py plot_curve tools/logs/PartA2.log.json tools/logs/second.log.json --keys KITTI/Car_3D_moderate_strict --legend PartA2 second --mode eval --interval 1 + # 根据 Car_3D_moderate_strict 在 KITTI 上分别对车和 3 类评估 PointPillars。 + python tools/analysis_tools/analyze_logs.py plot_curve tools/logs/pp-3class.log.json tools/logs/pp.log.json --keys KITTI/Car_3D_moderate_strict --legend pp-3class pp --mode eval --interval 2 + ``` + +您也能计算平均训练速度。 + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time log.json [--include-outliers] +``` + +预期输出应该如下所示。 + +``` +-----Analyze train time of work_dirs/some_exp/20190611_192040.log.json----- +slowest epoch 11, average time is 1.2024 +fastest epoch 1, average time is 1.1909 +time std over epochs is 0.0028 +average iter time: 1.1959 s/iter +``` + +  + +## 模型部署 + +**注意**:此工具仍然处于试验阶段,目前只有 SECOND 支持用 [`TorchServe`](https://pytorch.org/serve/) 部署,我们将会在未来支持更多的模型。 + +为了使用 [`TorchServe`](https://pytorch.org/serve/) 部署 `MMDetection3D` 模型,您可以遵循以下步骤: + +### 1. 将模型从 MMDetection3D 转换到 TorchServe + +```shell +python tools/deployment/mmdet3d2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +**Note**: ${MODEL_STORE} 需要为文件夹的绝对路径。 + +### 2. 构建 `mmdet3d-serve` 镜像 + +```shell +docker build -t mmdet3d-serve:latest docker/serve/ +``` + +### 3. 运行 `mmdet3d-serve` + +查看官网文档来 [使用 docker 运行 TorchServe](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment)。 + +为了在 GPU 上运行,您需要安装 [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)。您可以忽略 `--gpus` 参数,从而在 CPU 上运行。 + +例子: + +```shell +docker run --rm \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=$MODEL_STORE,target=/home/model-server/model-store \ +mmdet3d-serve:latest +``` + +[阅读文档](https://github.com/pytorch/serve/blob/072f5d088cce9bb64b2a18af065886c9b01b317b/docs/rest_api.md/) 关于 Inference (8080), Management (8081) and Metrics (8082) 接口。 + +### 4. 测试部署 + +您可以使用 `test_torchserver.py` 进行部署, 同时比较 torchserver 和 pytorch 的结果。 + +```shell +python tools/deployment/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] [--score-thr ${SCORE_THR}] +``` + +例子: + +```shell +python tools/deployment/test_torchserver.py demo/data/kitti/kitti_000008.bin configs/second/hv_second_secfpn_6x8_80e_kitti-3d-car.py checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-car_20200620_230238-393f000c.pth second +``` + +  + +# 模型复杂度 + +您可以使用 MMDetection 中的 `tools/analysis_tools/get_flops.py` 这个脚本文件,基于 [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) 计算一个给定模型的计算量 (FLOPS) 和参数量 (params)。 + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +您将会得到如下的结果: + +```text +============================== +Input shape: (4000, 4) +Flops: 5.78 GFLOPs +Params: 953.83 k +============================== +``` + +**注意**:此工具仍然处于试验阶段,我们不能保证数值是绝对正确的。您可以将结果用于简单的比较,但在写技术文档报告或者论文之前您需要再次确认一下。 + +1. 计算量 (FLOPs) 和输入形状有关,但是参数量 (params) 则和输入形状无关。默认的输入形状为 (1, 40000, 4)。 +2. 一些运算操作不计入计算量 (FLOPs),比如说像GN和定制的运算操作,详细细节请参考 [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py)。 +3. 我们现在仅仅支持单模态输入(点云或者图片)的单阶段模型的计算量 (FLOPs) 计算,我们将会在未来支持两阶段和多模态模型的计算。 + +  + +## 模型转换 + +### RegNet 模型转换到 MMDetection + +`tools/model_converters/regnet2mmdet.py` 将 pycls 预训练 RegNet 模型中的键转换为 MMDetection 风格。 + +```shell +python tools/model_converters/regnet2mmdet.py ${SRC} ${DST} [-h] +``` + +### Detectron ResNet 转换到 Pytorch + +MMDetection 中的 `tools/detectron2pytorch.py` 能够把原始的 detectron 中预训练的 ResNet 模型的键转换为 PyTorch 风格。 + +```shell +python tools/detectron2pytorch.py ${SRC} ${DST} ${DEPTH} [-h] +``` + +### 准备要发布的模型 + +`tools/model_converters/publish_model.py` 帮助用户准备他们用于发布的模型。 + +在您上传一个模型到云服务器 (AWS) 之前,您需要做以下几步: + +1. 将模型权重转换为 CPU 张量 +2. 删除记录优化器状态 (optimizer states) 的相关信息 +3. 计算检查点 (checkpoint) 文件的哈希编码 (hash id) 并且把哈希编码加到文件名里 + +```shell +python tools/model_converters/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +例如, + +```shell +python tools/model_converters/publish_model.py work_dirs/faster_rcnn/latest.pth faster_rcnn_r50_fpn_1x_20190801.pth +``` + +最终的输出文件名将会是 `faster_rcnn_r50_fpn_1x_20190801-{hash id}.pth`。 + +  + +# 数据集转换 + +`tools/dataset_converters/` 包含转换数据集为其他格式的一些工具。其中大多数转换数据集为基于 pickle 的信息文件,比如 KITTI,nuscense 和 lyft。Waymo 转换器被用来重新组织 waymo 原始数据为 KITTI 风格。用户能够参考它们了解我们转换数据格式的方法。将它们修改为 nuImages 转换器等脚本也很方便。 + +为了转换 nuImages 数据集为 COCO 格式,请使用下面的指令: + +```shell +python -u tools/dataset_converters/nuimage_converter.py --data-root ${DATA_ROOT} --version ${VERSIONS} \ + --out-dir ${OUT_DIR} --nproc ${NUM_WORKERS} --extra-tag ${TAG} +``` + +- `--data-root`: 数据集的根目录,默认为 `./data/nuimages`。 +- `--version`: 数据集的版本,默认为 `v1.0-mini`。要获取完整数据集,请使用 `--version v1.0-train v1.0-val v1.0-mini`。 +- `--out-dir`: 注释和语义掩码的输出目录,默认为 `./data/nuimages/annotations/`。 +- `--nproc`: 数据准备的进程数,默认为 `4`。由于图片是并行处理的,更大的进程数目能够减少准备时间。 +- `--extra-tag`: 注释的额外标签,默认为 `nuimages`。这可用于将不同时间处理的不同注释分开以供研究。 + +更多的数据准备细节参考 [doc](https://mmdetection3d.readthedocs.io/zh_CN/latest/data_preparation.html),nuImages 数据集的细节参考 [README](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/nuimages/README.md/)。 + +  + +# 其他内容 + +## 打印完整的配置文件 + +`tools/misc/print_config.py` 逐字打印整个配置文件,展开所有的导入。 + +```shell +python tools/misc/print_config.py ${CONFIG} [-h] [--options ${OPTIONS [OPTIONS...]}] +``` diff --git a/docs/zh_cn/user_guides/visualization.md b/docs/zh_cn/user_guides/visualization.md new file mode 100755 index 0000000..40de7f0 --- /dev/null +++ b/docs/zh_cn/user_guides/visualization.md @@ -0,0 +1,204 @@ +# 可视化 + +MMDetection3D 提供了 `Det3DLocalVisualizer` 用来在训练及测试阶段可视化和存储模型的状态以及结果,其具有以下特性: + +1. 支持多模态数据和多任务的基本绘图界面。 +2. 支持多个后端(如 local,TensorBoard),将训练状态(如 `loss`,`lr`)或模型评估指标写入指定的一个或多个后端中。 +3. 支持多模态数据真实标签的可视化,3D 检测结果的跨模态可视化。 + +## 基本绘制界面 + +继承自 `DetLocalVisualizer`,`Det3DLocalVisualizer` 提供了在 2D 图像上绘制常见目标的界面,例如绘制检测框、点、文本、线、圆、多边形、二进制掩码等。关于 2D 绘制的更多细节,请参考 MMDetection 中的[可视化文档](https://mmengine.readthedocs.io/zh_CN/latest/advanced_tutorials/visualization.html)。这里我们介绍 3D 绘制界面。 + +### 在图像上绘制点云 + +通过使用 `draw_points_on_image`,我们支持在图像上绘制点云。 + +```python +import mmcv +import numpy as np +from mmengine import load + +from mmdet3d.visualization import Det3DLocalVisualizer + +info_file = load('demo/data/kitti/000008.pkl') +points = np.fromfile('demo/data/kitti/000008.bin', dtype=np.float32) +points = points.reshape(-1, 4)[:, :3] +lidar2img = np.array(info_file['data_list'][0]['images']['CAM2']['lidar2img'], dtype=np.float32) + +visualizer = Det3DLocalVisualizer() +img = mmcv.imread('demo/data/kitti/000008.png') +img = mmcv.imconvert(img, 'bgr', 'rgb') +visualizer.set_image(img) +visualizer.draw_points_on_image(points, lidar2img) +visualizer.show() +``` + +![points_on_image](../../../resources/points_on_image.png) + +### 在点云上绘制 3D 框 + +通过使用 `draw_bboxes_3d`,我们支持在点云上绘制 3D 框。 + +```python +import torch + +from mmdet3d.visualization import Det3DLocalVisualizer +from mmdet3d.structures import LiDARInstance3DBoxes + +points = np.fromfile('tests/data/kitti/training/velodyne/000000.bin', dtype=np.float32) +points = points.reshape(-1, 4) +visualizer = Det3DLocalVisualizer() +# set point cloud in visualizer +visualizer.set_points(points) +bboxes_3d = LiDARInstance3DBoxes(torch.tensor( + [[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, + -1.5808]])), +# Draw 3D bboxes +visualizer.draw_bboxes_3d(bboxes_3d) +visualizer.show() +``` + +![mono3d](../../../resources/pcd.png) + +### 在图像上绘制投影的 3D 框 + +通过使用 `draw_proj_bboxes_3d`,我们支持在图像上绘制投影的 3D 框。 + +```python +import mmcv +import numpy as np +from mmengine import load + +from mmdet3d.visualization import Det3DLocalVisualizer +from mmdet3d.structures import CameraInstance3DBoxes + +info_file = load('demo/data/kitti/000008.pkl') +cam2img = np.array(info_file['data_list'][0]['images']['CAM2']['cam2img'], dtype=np.float32) +bboxes_3d = [] +for instance in info_file['data_list'][0]['instances']: + bboxes_3d.append(instance['bbox_3d']) +gt_bboxes_3d = np.array(bboxes_3d, dtype=np.float32) +gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d) +input_meta = {'cam2img': cam2img} + +visualizer = Det3DLocalVisualizer() + +img = mmcv.imread('demo/data/kitti/000008.png') +img = mmcv.imconvert(img, 'bgr', 'rgb') +visualizer.set_image(img) +# project 3D bboxes to image +visualizer.draw_proj_bboxes_3d(gt_bboxes_3d, input_meta) +visualizer.show() +``` + +![mono3d](../../../resources/mono3d.png) + +### 绘制 BEV 视角的框 + +通过使用 `draw_bev_bboxes`,我们支持绘制 BEV 视角下的框。 + +```python +import numpy as np +from mmengine import load + +from mmdet3d.visualization import Det3DLocalVisualizer +from mmdet3d.structures import CameraInstance3DBoxes + +info_file = load('demo/data/kitti/000008.pkl') +bboxes_3d = [] +for instance in info_file['data_list'][0]['instances']: + bboxes_3d.append(instance['bbox_3d']) +gt_bboxes_3d = np.array(bboxes_3d, dtype=np.float32) +gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d) + +visualizer = Det3DLocalVisualizer() +# set bev image in visualizer +visualizer.set_bev_image() +# draw bev bboxes +visualizer.draw_bev_bboxes(gt_bboxes_3d, edge_colors='orange') +visualizer.show() +``` + + + +### 绘制 3D 分割掩码 + +通过使用 `draw_seg_mask`,我们支持通过逐点着色来绘制分割掩码。 + +```python +import torch + +from mmdet3d.visualization import Det3DLocalVisualizer + +points = np.fromfile('tests/data/s3dis/points/Area_1_office_2.bin', dtype=np.float32) +points = points.reshape(-1, 3) +visualizer = Det3DLocalVisualizer() +mask = np.random.rand(points.shape[0], 3) +points_with_mask = np.concatenate((points, mask), axis=-1) +# Draw 3D points with mask +visualizer.draw_seg_mask(points_with_mask) +visualizer.show() +``` + +## 结果 + +如果想要可视化训练模型的预测结果,你可以运行如下指令: + +```bash +python tools/test.py ${CONFIG_FILE} ${CKPT_PATH} --show --show-dir ${SHOW_DIR} +``` + +运行该指令后,绘制的结果(包括输入数据和网络输出在输入上的可视化)将会被保存在 `${SHOW_DIR}` 中。 + +运行该指令后,你将在 `${SHOW_DIR}` 中获得输入数据,网络输出和真是标签在输入上的可视化(如在多模态检测任务和基于视觉的检测任务中的 `***_gt.png` 和 `***_pred.png`)。当启用 `show` 时,[Open3D](http://www.open3d.org/) 将会用于在线可视化结果。如果你是在没有 GUI 的远程服务器上测试时,在线可视化是不被支持的。你可以从远程服务器中下载 `results.pkl`,并在本地机器上离线可视化预测结果。 + +使用 `Open3D` 后端离线可视化结果,你可以运行如下指令: + +```bash +python tools/misc/visualize_results.py ${CONFIG_FILE} --result ${RESULTS_PATH} --show-dir ${SHOW_DIR} +``` + +![](../../../resources/open3d_visual.gif) + +这需要在远程服务器中能够推理并生成结果,然后用户在主机中使用 GUI 打开。 + +## 数据集 + +我们也提供了脚本来可视化数据集而无需推理。你可以使用 `tools/misc/browse_dataset.py` 来在线可视化加载的数据的真实标签,并保存在硬盘中。目前我们支持所有数据集的单模态 3D 检测和 3D 分割,KITTI 和 SUN RGB-D 的多模态 3D 检测,以及 nuScenes 的单目 3D 检测。如果想要浏览 KITTI 数据集,你可以运行如下指令: + +```shell +python tools/misc/browse_dataset.py configs/_base_/datasets/kitti-3d-3class.py --task lidar_det --output-dir ${OUTPUT_DIR} +``` + +**注意**:一旦指定了 `--output-dir`,当在 open3d 窗口中按下 `_ESC_` 时,用户指定的视图图像将会被保存下来。 + +为了验证数据的一致性和数据增强的效果,你可以加上 `--aug` 来可视化数据增强后的数据,指令如下所示: + +```shell +python tools/misc/browse_dataset.py configs/_base_/datasets/kitti-3d-3class.py --task det --aug --output-dir ${OUTPUT_DIR} +``` + +如果你想显示带有投影的 3D 边界框的 2D 图像,你需要一个支持多模态数据加载的配置文件,并将 `--task` 参数改为 `multi-modality_det`。示例如下: + +```shell +python tools/misc/browse_dataset.py configs/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class.py --task multi-modality_det --output-dir ${OUTPUT_DIR} +``` + +![](../../../resources/browse_dataset_multi_modality.png) + +你可以使用不同的配置浏览不同的数据集,例如在 3D 语义分割任务中可视化 ScanNet 数据集: + +```shell +python tools/misc/browse_dataset.py configs/_base_/datasets/scannet_seg-3d-20class.py --task lidar_seg --output-dir ${OUTPUT_DIR} --online +``` + +![](../../../resources/browse_dataset_seg.png) + +在单目 3D 检测任务中浏览 nuScenes 数据集: + +```shell +python tools/misc/browse_dataset.py configs/_base_/datasets/nus-mono3d.py --task mono_det --output-dir ${OUTPUT_DIR} --online +``` + +![](../../../resources/browse_dataset_mono.png) diff --git a/mmdet3d/__init__.py b/mmdet3d/__init__.py new file mode 100755 index 0000000..f054179 --- /dev/null +++ b/mmdet3d/__init__.py @@ -0,0 +1,38 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import mmdet +import mmengine +from mmengine.utils import digit_version + +from .version import __version__, version_info + +mmcv_minimum_version = '2.0.0rc4' +mmcv_maximum_version = '2.1.0' +mmcv_version = digit_version(mmcv.__version__) + +mmengine_minimum_version = '0.7.1' +mmengine_maximum_version = '1.0.0' +mmengine_version = digit_version(mmengine.__version__) + +mmdet_minimum_version = '2.24.0' +mmdet_maximum_version = '3.1.0' +mmdet_version = digit_version(mmdet.__version__) + +assert (mmcv_version >= digit_version(mmcv_minimum_version) + and mmcv_version < digit_version(mmcv_maximum_version)), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.' + +assert (mmengine_version >= digit_version(mmengine_minimum_version) + and mmengine_version < digit_version(mmengine_maximum_version)), \ + f'MMEngine=={mmengine.__version__} is used but incompatible. ' \ + f'Please install mmengine>={mmengine_minimum_version}, ' \ + f'<{mmengine_maximum_version}.' + +assert (mmdet_version >= digit_version(mmdet_minimum_version) + and mmdet_version < digit_version(mmdet_maximum_version)), \ + f'MMDET=={mmdet.__version__} is used but incompatible. ' \ + f'Please install mmdet>={mmdet_minimum_version}, ' \ + f'<{mmdet_maximum_version}.' + +__all__ = ['__version__', 'version_info', 'digit_version'] diff --git a/mmdet3d/apis/__init__.py b/mmdet3d/apis/__init__.py new file mode 100755 index 0000000..57f732f --- /dev/null +++ b/mmdet3d/apis/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .inference import (convert_SyncBN, inference_detector, + inference_mono_3d_detector, + inference_multi_modality_detector, inference_segmentor, + init_model) +from .inferencers import (Base3DInferencer, LidarDet3DInferencer, + LidarSeg3DInferencer, MonoDet3DInferencer, + MultiModalityDet3DInferencer) + +__all__ = [ + 'inference_detector', 'init_model', 'inference_mono_3d_detector', + 'convert_SyncBN', 'inference_multi_modality_detector', + 'inference_segmentor', 'Base3DInferencer', 'MonoDet3DInferencer', + 'LidarDet3DInferencer', 'LidarSeg3DInferencer', + 'MultiModalityDet3DInferencer' +] diff --git a/mmdet3d/apis/inference.py b/mmdet3d/apis/inference.py new file mode 100755 index 0000000..98b4373 --- /dev/null +++ b/mmdet3d/apis/inference.py @@ -0,0 +1,383 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from copy import deepcopy +from os import path as osp +from pathlib import Path +from typing import Optional, Sequence, Union + +import mmengine +import numpy as np +import torch +import torch.nn as nn +from mmengine.config import Config +from mmengine.dataset import Compose, pseudo_collate +from mmengine.registry import init_default_scope +from mmengine.runner import load_checkpoint + +from mmdet3d.registry import MODELS +from mmdet3d.structures import Box3DMode, Det3DDataSample, get_box_type +from mmdet3d.structures.det3d_data_sample import SampleList + + +def convert_SyncBN(config): + """Convert config's naiveSyncBN to BN. + + Args: + config (str or :obj:`mmengine.Config`): Config file path or the config + object. + """ + if isinstance(config, dict): + for item in config: + if item == 'norm_cfg': + config[item]['type'] = config[item]['type']. \ + replace('naiveSyncBN', 'BN') + else: + convert_SyncBN(config[item]) + + +def init_model(config: Union[str, Path, Config], + checkpoint: Optional[str] = None, + device: str = 'cuda:0', + cfg_options: Optional[dict] = None): + """Initialize a model from config file, which could be a 3D detector or a + 3D segmentor. + + Args: + config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, + :obj:`Path`, or the config object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. + device (str): Device to use. + cfg_options (dict, optional): Options to override some settings in + the used config. + + Returns: + nn.Module: The constructed detector. + """ + if isinstance(config, (str, Path)): + config = Config.fromfile(config) + elif not isinstance(config, Config): + raise TypeError('config must be a filename or Config object, ' + f'but got {type(config)}') + if cfg_options is not None: + config.merge_from_dict(cfg_options) + + convert_SyncBN(config.model) + config.model.train_cfg = None + init_default_scope(config.get('default_scope', 'mmdet3d')) + model = MODELS.build(config.model) + + if checkpoint is not None: + checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') + # save the dataset_meta in the model for convenience + if 'dataset_meta' in checkpoint.get('meta', {}): + # mmdet3d 1.x + model.dataset_meta = checkpoint['meta']['dataset_meta'] + elif 'CLASSES' in checkpoint.get('meta', {}): + # < mmdet3d 1.x + classes = checkpoint['meta']['CLASSES'] + model.dataset_meta = {'classes': classes} + + if 'PALETTE' in checkpoint.get('meta', {}): # 3D Segmentor + model.dataset_meta['palette'] = checkpoint['meta']['PALETTE'] + else: + # < mmdet3d 1.x + model.dataset_meta = {'classes': config.class_names} + + if 'PALETTE' in checkpoint.get('meta', {}): # 3D Segmentor + model.dataset_meta['palette'] = checkpoint['meta']['PALETTE'] + + model.cfg = config # save the config in the model for convenience + if device != 'cpu': + torch.cuda.set_device(device) + else: + warnings.warn('Don\'t suggest using CPU device. ' + 'Some functions are not supported for now.') + + model.to(device) + model.eval() + return model + + +PointsType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]] +ImagesType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]] + + +def inference_detector(model: nn.Module, + pcds: PointsType) -> Union[Det3DDataSample, SampleList]: + """Inference point cloud with the detector. + + Args: + model (nn.Module): The loaded detector. + pcds (str, ndarray, Sequence[str/ndarray]): + Either point cloud files or loaded point cloud. + + Returns: + :obj:`Det3DDataSample` or list[:obj:`Det3DDataSample`]: + If pcds is a list or tuple, the same length list type results + will be returned, otherwise return the detection results directly. + """ + if isinstance(pcds, (list, tuple)): + is_batch = True + else: + pcds = [pcds] + is_batch = False + + cfg = model.cfg + + if not isinstance(pcds[0], str): + cfg = cfg.copy() + # set loading pipeline type + cfg.test_dataloader.dataset.pipeline[0].type = 'LoadPointsFromDict' + + # build the data pipeline + test_pipeline = deepcopy(cfg.test_dataloader.dataset.pipeline) + test_pipeline = Compose(test_pipeline) + box_type_3d, box_mode_3d = \ + get_box_type(cfg.test_dataloader.dataset.box_type_3d) + + data = [] + for pcd in pcds: + # prepare data + if isinstance(pcd, str): + # load from point cloud file + data_ = dict( + lidar_points=dict(lidar_path=pcd), + timestamp=1, + # for ScanNet demo we need axis_align_matrix + axis_align_matrix=np.eye(4), + box_type_3d=box_type_3d, + box_mode_3d=box_mode_3d) + else: + # directly use loaded point cloud + data_ = dict( + points=pcd, + timestamp=1, + # for ScanNet demo we need axis_align_matrix + axis_align_matrix=np.eye(4), + box_type_3d=box_type_3d, + box_mode_3d=box_mode_3d) + data_ = test_pipeline(data_) + data.append(data_) + + collate_data = pseudo_collate(data) + + # forward the model + with torch.no_grad(): + results = model.test_step(collate_data) + + if not is_batch: + return results[0], data[0] + else: + return results, data + + +def inference_multi_modality_detector(model: nn.Module, + pcds: Union[str, Sequence[str]], + imgs: Union[str, Sequence[str]], + ann_file: Union[str, Sequence[str]], + cam_type: str = 'CAM2'): + """Inference point cloud with the multi-modality detector. Now we only + support multi-modality detector for KITTI dataset since the multi-view + image loading is not supported yet in this inference function. + + Args: + model (nn.Module): The loaded detector. + pcds (str, Sequence[str]): + Either point cloud files or loaded point cloud. + imgs (str, Sequence[str]): + Either image files or loaded images. + ann_file (str, Sequence[str]): Annotation files. + cam_type (str): Image of Camera chose to infer. + For kitti dataset, it should be 'CAM2', + and for nuscenes dataset, it should be + 'CAM_FRONT'. Defaults to 'CAM_FRONT'. + + Returns: + :obj:`Det3DDataSample` or list[:obj:`Det3DDataSample`]: + If pcds is a list or tuple, the same length list type results + will be returned, otherwise return the detection results directly. + """ + + # TODO: We will support + if isinstance(pcds, (list, tuple)): + is_batch = True + assert isinstance(imgs, (list, tuple)) + assert len(pcds) == len(imgs) + else: + pcds = [pcds] + imgs = [imgs] + is_batch = False + + cfg = model.cfg + + # build the data pipeline + test_pipeline = deepcopy(cfg.test_dataloader.dataset.pipeline) + test_pipeline = Compose(test_pipeline) + box_type_3d, box_mode_3d = \ + get_box_type(cfg.test_dataloader.dataset.box_type_3d) + + data_list = mmengine.load(ann_file)['data_list'] + + data = [] + for index, pcd in enumerate(pcds): + # get data info containing calib + img = imgs[index] + data_info = data_list[index] + img_path = data_info['images'][cam_type]['img_path'] + + if osp.basename(img_path) != osp.basename(img): + raise ValueError(f'the info file of {img_path} is not provided.') + + data_info['images'][cam_type]['img_path'] = img + cam2img = np.array(data_info['images'][cam_type]['cam2img']) + + # TODO: check the name consistency of + # image file and point cloud file + # TODO: support multi-view image loading + data_ = dict( + lidar_points=dict(lidar_path=pcd), + img_path=img, + box_type_3d=box_type_3d, + box_mode_3d=box_mode_3d, + cam2img=cam2img) + + # LiDAR to image conversion for KITTI dataset + if box_mode_3d == Box3DMode.LIDAR: + data_['lidar2img'] = np.array( + data_info['images'][cam_type]['lidar2img']) + # Depth to image conversion for SUNRGBD dataset + elif box_mode_3d == Box3DMode.DEPTH: + data_['depth2img'] = np.array( + data_info['images'][cam_type]['depth2img']) + + data_ = test_pipeline(data_) + data.append(data_) + + collate_data = pseudo_collate(data) + + # forward the model + with torch.no_grad(): + results = model.test_step(collate_data) + + if not is_batch: + return results[0], data[0] + else: + return results, data + + +def inference_mono_3d_detector(model: nn.Module, + imgs: ImagesType, + ann_file: Union[str, Sequence[str]], + cam_type: str = 'CAM_FRONT'): + """Inference image with the monocular 3D detector. + + Args: + model (nn.Module): The loaded detector. + imgs (str, Sequence[str]): + Either image files or loaded images. + ann_files (str, Sequence[str]): Annotation files. + cam_type (str): Image of Camera chose to infer. + For kitti dataset, it should be 'CAM_2', + and for nuscenes dataset, it should be + 'CAM_FRONT'. Defaults to 'CAM_FRONT'. + + Returns: + :obj:`Det3DDataSample` or list[:obj:`Det3DDataSample`]: + If pcds is a list or tuple, the same length list type results + will be returned, otherwise return the detection results directly. + """ + if isinstance(imgs, (list, tuple)): + is_batch = True + else: + imgs = [imgs] + is_batch = False + + cfg = model.cfg + + # build the data pipeline + test_pipeline = deepcopy(cfg.test_dataloader.dataset.pipeline) + test_pipeline = Compose(test_pipeline) + box_type_3d, box_mode_3d = \ + get_box_type(cfg.test_dataloader.dataset.box_type_3d) + + data_list = mmengine.load(ann_file)['data_list'] + assert len(imgs) == len(data_list) + + data = [] + for index, img in enumerate(imgs): + # get data info containing calib + data_info = data_list[index] + img_path = data_info['images'][cam_type]['img_path'] + if osp.basename(img_path) != osp.basename(img): + raise ValueError(f'the info file of {img_path} is not provided.') + + # replace the img_path in data_info with img + data_info['images'][cam_type]['img_path'] = img + data_ = dict( + images=data_info['images'], + box_type_3d=box_type_3d, + box_mode_3d=box_mode_3d) + + data_ = test_pipeline(data_) + data.append(data_) + + collate_data = pseudo_collate(data) + + # forward the model + with torch.no_grad(): + results = model.test_step(collate_data) + + if not is_batch: + return results[0] + else: + return results + + +def inference_segmentor(model: nn.Module, pcds: PointsType): + """Inference point cloud with the segmentor. + + Args: + model (nn.Module): The loaded segmentor. + pcds (str, Sequence[str]): + Either point cloud files or loaded point cloud. + + Returns: + :obj:`Det3DDataSample` or list[:obj:`Det3DDataSample`]: + If pcds is a list or tuple, the same length list type results + will be returned, otherwise return the detection results directly. + """ + if isinstance(pcds, (list, tuple)): + is_batch = True + else: + pcds = [pcds] + is_batch = False + + cfg = model.cfg + + # build the data pipeline + test_pipeline = deepcopy(cfg.test_dataloader.dataset.pipeline) + + new_test_pipeline = [] + for pipeline in test_pipeline: + if pipeline['type'] != 'LoadAnnotations3D': + new_test_pipeline.append(pipeline) + test_pipeline = Compose(new_test_pipeline) + + data = [] + # TODO: support load points array + for pcd in pcds: + data_ = dict(lidar_points=dict(lidar_path=pcd)) + data_ = test_pipeline(data_) + data.append(data_) + + collate_data = pseudo_collate(data) + + # forward the model + with torch.no_grad(): + results = model.test_step(collate_data) + + if not is_batch: + return results[0], data[0] + else: + return results, data diff --git a/mmdet3d/apis/inferencers/__init__.py b/mmdet3d/apis/inferencers/__init__.py new file mode 100755 index 0000000..0da7b52 --- /dev/null +++ b/mmdet3d/apis/inferencers/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_3d_inferencer import Base3DInferencer +from .lidar_det3d_inferencer import LidarDet3DInferencer +from .lidar_seg3d_inferencer import LidarSeg3DInferencer +from .mono_det3d_inferencer import MonoDet3DInferencer +from .multi_modality_det3d_inferencer import MultiModalityDet3DInferencer + +__all__ = [ + 'Base3DInferencer', 'MonoDet3DInferencer', 'LidarDet3DInferencer', + 'LidarSeg3DInferencer', 'MultiModalityDet3DInferencer' +] diff --git a/mmdet3d/apis/inferencers/base_3d_inferencer.py b/mmdet3d/apis/inferencers/base_3d_inferencer.py new file mode 100755 index 0000000..5bf411a --- /dev/null +++ b/mmdet3d/apis/inferencers/base_3d_inferencer.py @@ -0,0 +1,312 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import mmengine +import numpy as np +import torch.nn as nn +from mmengine.fileio import (get_file_backend, isdir, join_path, + list_dir_or_file) +from mmengine.infer.infer import BaseInferencer, ModelType +from mmengine.registry import init_default_scope +from mmengine.runner import load_checkpoint +from mmengine.structures import InstanceData +from mmengine.visualization import Visualizer + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +class Base3DInferencer(BaseInferencer): + """Base 3D model inferencer. + + Args: + model (str, optional): Path to the config file or the model name + defined in metafile. For example, it could be + "pgd-kitti" or + "configs/pgd/pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py". + If model is not specified, user must provide the + `weights` saved by MMEngine which contains the config string. + Defaults to None. + weights (str, optional): Path to the checkpoint. If it is not specified + and model is a model name of metafile, the weights will be loaded + from metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + scope (str): The scope of the model. Defaults to 'mmdet3d'. + palette (str): Color palette used for visualization. The order of + priority is palette -> config -> checkpoint. Defaults to 'none'. + """ + + preprocess_kwargs: set = set() + forward_kwargs: set = set() + visualize_kwargs: set = { + 'return_vis', 'show', 'wait_time', 'draw_pred', 'pred_score_thr', + 'img_out_dir' + } + postprocess_kwargs: set = { + 'print_result', 'pred_out_file', 'return_datasample' + } + + def __init__(self, + model: Union[ModelType, str, None] = None, + weights: Optional[str] = None, + device: Optional[str] = None, + scope: str = 'mmdet3d', + palette: str = 'none') -> None: + self.palette = palette + init_default_scope(scope) + super().__init__( + model=model, weights=weights, device=device, scope=scope) + + def _convert_syncbn(self, cfg: ConfigType): + """Convert config's naiveSyncBN to BN. + + Args: + config (str or :obj:`mmengine.Config`): Config file path + or the config object. + """ + if isinstance(cfg, dict): + for item in cfg: + if item == 'norm_cfg': + cfg[item]['type'] = cfg[item]['type']. \ + replace('naiveSyncBN', 'BN') + else: + self._convert_syncbn(cfg[item]) + + def _init_model( + self, + cfg: ConfigType, + weights: str, + device: str = 'cpu', + ) -> nn.Module: + self._convert_syncbn(cfg.model) + cfg.model.train_cfg = None + model = MODELS.build(cfg.model) + + checkpoint = load_checkpoint(model, weights, map_location='cpu') + if 'dataset_meta' in checkpoint.get('meta', {}): + # mmdet3d 1.x + model.dataset_meta = checkpoint['meta']['dataset_meta'] + elif 'CLASSES' in checkpoint.get('meta', {}): + # < mmdet3d 1.x + classes = checkpoint['meta']['CLASSES'] + model.dataset_meta = {'classes': classes} + + if 'PALETTE' in checkpoint.get('meta', {}): # 3D Segmentor + model.dataset_meta['palette'] = checkpoint['meta']['PALETTE'] + else: + # < mmdet3d 1.x + model.dataset_meta = {'classes': cfg.class_names} + + if 'PALETTE' in checkpoint.get('meta', {}): # 3D Segmentor + model.dataset_meta['palette'] = checkpoint['meta']['PALETTE'] + + model.cfg = cfg # save the config in the model for convenience + model.to(device) + model.eval() + return model + + def _inputs_to_list( + self, + inputs: Union[dict, list], + modality_key: Union[str, List[str]] = 'points') -> list: + """Preprocess the inputs to a list. + + Preprocess inputs to a list according to its type: + + - list or tuple: return inputs + - dict: the value of key 'points'/`img` is + - Directory path: return all files in the directory + - other cases: return a list containing the string. The string + could be a path to file, a url or other types of string according + to the task. + + Args: + inputs (Union[dict, list]): Inputs for the inferencer. + modality_key (Union[str, List[str]]): The key of the modality. + Defaults to 'points'. + + Returns: + list: List of input for the :meth:`preprocess`. + """ + if isinstance(modality_key, str): + modality_key = [modality_key] + assert set(modality_key).issubset({'points', 'img'}) + + for key in modality_key: + if isinstance(inputs, dict) and isinstance(inputs[key], str): + img = inputs[key] + backend = get_file_backend(img) + if hasattr(backend, 'isdir') and isdir(img): + # Backends like HttpsBackend do not implement `isdir`, so + # only those backends that implement `isdir` could accept + # the inputs as a directory + filename_list = list_dir_or_file(img, list_dir=False) + inputs = [{ + f'{key}': join_path(img, filename) + } for filename in filename_list] + + if not isinstance(inputs, (list, tuple)): + inputs = [inputs] + + return list(inputs) + + def _get_transform_idx(self, pipeline_cfg: ConfigType, name: str) -> int: + """Returns the index of the transform in a pipeline. + + If the transform is not found, returns -1. + """ + for i, transform in enumerate(pipeline_cfg): + if transform['type'] == name: + return i + return -1 + + def _init_visualizer(self, cfg: ConfigType) -> Optional[Visualizer]: + visualizer = super()._init_visualizer(cfg) + visualizer.dataset_meta = self.model.dataset_meta + return visualizer + + def __call__(self, + inputs: InputsType, + return_datasamples: bool = False, + batch_size: int = 1, + return_vis: bool = False, + show: bool = False, + wait_time: int = 0, + draw_pred: bool = True, + pred_score_thr: float = 0.3, + img_out_dir: str = '', + print_result: bool = False, + pred_out_file: str = '', + **kwargs) -> dict: + """Call the inferencer. + + Args: + inputs (InputsType): Inputs for the inferencer. + return_datasamples (bool): Whether to return results as + :obj:`BaseDataElement`. Defaults to False. + batch_size (int): Inference batch size. Defaults to 1. + return_vis (bool): Whether to return the visualization result. + Defaults to False. + show (bool): Whether to display the visualization results in a + popup window. Defaults to False. + wait_time (float): The interval of show (s). Defaults to 0. + draw_pred (bool): Whether to draw predicted bounding boxes. + Defaults to True. + pred_score_thr (float): Minimum score of bboxes to draw. + Defaults to 0.3. + img_out_dir (str): Output directory of visualization results. + If left as empty, no file will be saved. Defaults to ''. + print_result (bool): Whether to print the inference result w/o + visualization to the console. Defaults to False. + pred_out_file (str): File to save the inference results w/o + visualization. If left as empty, no file will be saved. + Defaults to ''. + **kwargs: Other keyword arguments passed to :meth:`preprocess`, + :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. + Each key in kwargs should be in the corresponding set of + ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs`` + and ``postprocess_kwargs``. + + Returns: + dict: Inference and visualization results. + """ + return super().__call__( + inputs, + return_datasamples, + batch_size, + return_vis=return_vis, + show=show, + wait_time=wait_time, + draw_pred=draw_pred, + pred_score_thr=pred_score_thr, + img_out_dir=img_out_dir, + print_result=print_result, + pred_out_file=pred_out_file, + **kwargs) + + def postprocess( + self, + preds: PredType, + visualization: Optional[List[np.ndarray]] = None, + return_datasample: bool = False, + print_result: bool = False, + pred_out_file: str = '', + ) -> Union[ResType, Tuple[ResType, np.ndarray]]: + """Process the predictions and visualization results from ``forward`` + and ``visualize``. + + This method should be responsible for the following tasks: + + 1. Convert datasamples into a json-serializable dict if needed. + 2. Pack the predictions and visualization results and return them. + 3. Dump or log the predictions. + + Args: + preds (List[Dict]): Predictions of the model. + visualization (np.ndarray, optional): Visualized predictions. + Defaults to None. + return_datasample (bool): Whether to use Datasample to store + inference results. If False, dict will be used. + Defaults to False. + print_result (bool): Whether to print the inference result w/o + visualization to the console. Defaults to False. + pred_out_file (str): File to save the inference results w/o + visualization. If left as empty, no file will be saved. + Defaults to ''. + + Returns: + dict: Inference and visualization results with key ``predictions`` + and ``visualization``. + + - ``visualization`` (Any): Returned by :meth:`visualize`. + - ``predictions`` (dict or DataSample): Returned by + :meth:`forward` and processed in :meth:`postprocess`. + If ``return_datasample=False``, it usually should be a + json-serializable dict containing only basic data elements such + as strings and numbers. + """ + result_dict = {} + results = preds + if not return_datasample: + results = [] + for pred in preds: + result = self.pred2dict(pred) + results.append(result) + result_dict['predictions'] = results + if print_result: + print(result_dict) + if pred_out_file != '': + mmengine.dump(result_dict, pred_out_file) + result_dict['visualization'] = visualization + return result_dict + + def pred2dict(self, data_sample: InstanceData) -> Dict: + """Extract elements necessary to represent a prediction into a + dictionary. + + It's better to contain only basic data elements such as strings and + numbers in order to guarantee it's json-serializable. + """ + result = {} + if 'pred_instances_3d' in data_sample: + pred_instances_3d = data_sample.pred_instances_3d.numpy() + result = { + 'bboxes_3d': pred_instances_3d.bboxes_3d.tensor.cpu().tolist(), + 'labels_3d': pred_instances_3d.labels_3d.tolist(), + 'scores_3d': pred_instances_3d.scores_3d.tolist() + } + + if 'pred_pts_seg' in data_sample: + pred_pts_seg = data_sample.pred_pts_seg.numpy() + result['pts_semantic_mask'] = \ + pred_pts_seg.pts_semantic_mask.tolist() + + return result diff --git a/mmdet3d/apis/inferencers/lidar_det3d_inferencer.py b/mmdet3d/apis/inferencers/lidar_det3d_inferencer.py new file mode 100755 index 0000000..a3fdc47 --- /dev/null +++ b/mmdet3d/apis/inferencers/lidar_det3d_inferencer.py @@ -0,0 +1,187 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Dict, List, Optional, Sequence, Union + +import mmengine +import numpy as np +from mmengine.dataset import Compose +from mmengine.infer.infer import ModelType +from mmengine.structures import InstanceData + +from mmdet3d.registry import INFERENCERS +from mmdet3d.utils import ConfigType +from .base_3d_inferencer import Base3DInferencer + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +@INFERENCERS.register_module(name='det3d-lidar') +@INFERENCERS.register_module() +class LidarDet3DInferencer(Base3DInferencer): + """The inferencer of LiDAR-based detection. + + Args: + model (str, optional): Path to the config file or the model name + defined in metafile. For example, it could be + "pointpillars_kitti-3class" or + "configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py". # noqa: E501 + If model is not specified, user must provide the + `weights` saved by MMEngine which contains the config string. + Defaults to None. + weights (str, optional): Path to the checkpoint. If it is not specified + and model is a model name of metafile, the weights will be loaded + from metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + scope (str): The scope of the model. Defaults to 'mmdet3d'. + palette (str): Color palette used for visualization. The order of + priority is palette -> config -> checkpoint. Defaults to 'none'. + """ + + preprocess_kwargs: set = set() + forward_kwargs: set = set() + visualize_kwargs: set = { + 'return_vis', 'show', 'wait_time', 'draw_pred', 'pred_score_thr', + 'img_out_dir' + } + postprocess_kwargs: set = { + 'print_result', 'pred_out_file', 'return_datasample' + } + + def __init__(self, + model: Union[ModelType, str, None] = None, + weights: Optional[str] = None, + device: Optional[str] = None, + scope: str = 'mmdet3d', + palette: str = 'none') -> None: + # A global counter tracking the number of frames processed, for + # naming of the output results + self.num_visualized_frames = 0 + super(LidarDet3DInferencer, self).__init__( + model=model, + weights=weights, + device=device, + scope=scope, + palette=palette) + + def _inputs_to_list(self, inputs: Union[dict, list]) -> list: + """Preprocess the inputs to a list. + + Preprocess inputs to a list according to its type: + + - list or tuple: return inputs + - dict: the value with key 'points' is + - Directory path: return all files in the directory + - other cases: return a list containing the string. The string + could be a path to file, a url or other types of string according + to the task. + + Args: + inputs (Union[dict, list]): Inputs for the inferencer. + + Returns: + list: List of input for the :meth:`preprocess`. + """ + return super()._inputs_to_list(inputs, modality_key='points') + + def _init_pipeline(self, cfg: ConfigType) -> Compose: + """Initialize the test pipeline.""" + pipeline_cfg = cfg.test_dataloader.dataset.pipeline + + load_point_idx = self._get_transform_idx(pipeline_cfg, + 'LoadPointsFromFile') + if load_point_idx == -1: + raise ValueError( + 'LoadPointsFromFile is not found in the test pipeline') + + load_cfg = pipeline_cfg[load_point_idx] + self.coord_type, self.load_dim = load_cfg['coord_type'], load_cfg[ + 'load_dim'] + self.use_dim = list(range(load_cfg['use_dim'])) if isinstance( + load_cfg['use_dim'], int) else load_cfg['use_dim'] + + pipeline_cfg[load_point_idx]['type'] = 'LidarDet3DInferencerLoader' + return Compose(pipeline_cfg) + + def visualize(self, + inputs: InputsType, + preds: PredType, + return_vis: bool = False, + show: bool = False, + wait_time: int = 0, + draw_pred: bool = True, + pred_score_thr: float = 0.3, + img_out_dir: str = '') -> Union[List[np.ndarray], None]: + """Visualize predictions. + + Args: + inputs (InputsType): Inputs for the inferencer. + preds (PredType): Predictions of the model. + return_vis (bool): Whether to return the visualization result. + Defaults to False. + show (bool): Whether to display the image in a popup window. + Defaults to False. + wait_time (float): The interval of show (s). Defaults to 0. + draw_pred (bool): Whether to draw predicted bounding boxes. + Defaults to True. + pred_score_thr (float): Minimum score of bboxes to draw. + Defaults to 0.3. + img_out_dir (str): Output directory of visualization results. + If left as empty, no file will be saved. Defaults to ''. + + Returns: + List[np.ndarray] or None: Returns visualization results only if + applicable. + """ + if self.visualizer is None or (not show and img_out_dir == '' + and not return_vis): + return None + + if getattr(self, 'visualizer') is None: + raise ValueError('Visualization needs the "visualizer" term' + 'defined in the config, but got None.') + + results = [] + + for single_input, pred in zip(inputs, preds): + single_input = single_input['points'] + if isinstance(single_input, str): + pts_bytes = mmengine.fileio.get(single_input) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, self.load_dim) + points = points[:, self.use_dim] + pc_name = osp.basename(single_input).split('.bin')[0] + pc_name = f'{pc_name}.png' + elif isinstance(single_input, np.ndarray): + points = single_input.copy() + pc_num = str(self.num_visualized_frames).zfill(8) + pc_name = f'pc_{pc_num}.png' + else: + raise ValueError('Unsupported input type: ' + f'{type(single_input)}') + + o3d_save_path = osp.join(img_out_dir, pc_name) \ + if img_out_dir != '' else None + + data_input = dict(points=points) + self.visualizer.add_datasample( + pc_name, + data_input, + pred, + show=show, + wait_time=wait_time, + draw_gt=False, + draw_pred=draw_pred, + pred_score_thr=pred_score_thr, + o3d_save_path=o3d_save_path, + vis_task='lidar_det', + ) + results.append(points) + self.num_visualized_frames += 1 + + return results diff --git a/mmdet3d/apis/inferencers/lidar_seg3d_inferencer.py b/mmdet3d/apis/inferencers/lidar_seg3d_inferencer.py new file mode 100755 index 0000000..0286a07 --- /dev/null +++ b/mmdet3d/apis/inferencers/lidar_seg3d_inferencer.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Dict, List, Optional, Sequence, Union + +import mmengine +import numpy as np +from mmengine.dataset import Compose +from mmengine.infer.infer import ModelType +from mmengine.structures import InstanceData + +from mmdet3d.registry import INFERENCERS +from mmdet3d.utils import ConfigType +from .base_3d_inferencer import Base3DInferencer + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +@INFERENCERS.register_module(name='seg3d-lidar') +@INFERENCERS.register_module() +class LidarSeg3DInferencer(Base3DInferencer): + """The inferencer of LiDAR-based segmentation. + + Args: + model (str, optional): Path to the config file or the model name + defined in metafile. For example, it could be + "pointnet2-ssg_s3dis-seg" or + "configs/pointnet2/pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py". + If model is not specified, user must provide the + `weights` saved by MMEngine which contains the config string. + Defaults to None. + weights (str, optional): Path to the checkpoint. If it is not specified + and model is a model name of metafile, the weights will be loaded + from metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + scope (str): The scope of the model. Defaults to 'mmdet3d'. + palette (str): Color palette used for visualization. The order of + priority is palette -> config -> checkpoint. Defaults to 'none'. + """ + + preprocess_kwargs: set = set() + forward_kwargs: set = set() + visualize_kwargs: set = { + 'return_vis', 'show', 'wait_time', 'draw_pred', 'pred_score_thr', + 'img_out_dir' + } + postprocess_kwargs: set = { + 'print_result', 'pred_out_file', 'return_datasample' + } + + def __init__(self, + model: Union[ModelType, str, None] = None, + weights: Optional[str] = None, + device: Optional[str] = None, + scope: str = 'mmdet3d', + palette: str = 'none') -> None: + # A global counter tracking the number of frames processed, for + # naming of the output results + self.num_visualized_frames = 0 + super(LidarSeg3DInferencer, self).__init__( + model=model, + weights=weights, + device=device, + scope=scope, + palette=palette) + + def _inputs_to_list(self, inputs: Union[dict, list]) -> list: + """Preprocess the inputs to a list. + + Preprocess inputs to a list according to its type: + + - list or tuple: return inputs + - dict: the value with key 'points' is + - Directory path: return all files in the directory + - other cases: return a list containing the string. The string + could be a path to file, a url or other types of string according + to the task. + + Args: + inputs (Union[dict, list]): Inputs for the inferencer. + + Returns: + list: List of input for the :meth:`preprocess`. + """ + return super()._inputs_to_list(inputs, modality_key='points') + + def _init_pipeline(self, cfg: ConfigType) -> Compose: + """Initialize the test pipeline.""" + pipeline_cfg = cfg.test_dataloader.dataset.pipeline + # Load annotation is also not applicable + idx = self._get_transform_idx(pipeline_cfg, 'LoadAnnotations3D') + if idx != -1: + del pipeline_cfg[idx] + + idx = self._get_transform_idx(pipeline_cfg, 'PointSegClassMapping') + if idx != -1: + del pipeline_cfg[idx] + + load_point_idx = self._get_transform_idx(pipeline_cfg, + 'LoadPointsFromFile') + if load_point_idx == -1: + raise ValueError( + 'LoadPointsFromFile is not found in the test pipeline') + + load_cfg = pipeline_cfg[load_point_idx] + self.coord_type, self.load_dim = load_cfg['coord_type'], load_cfg[ + 'load_dim'] + self.use_dim = list(range(load_cfg['use_dim'])) if isinstance( + load_cfg['use_dim'], int) else load_cfg['use_dim'] + + pipeline_cfg[load_point_idx]['type'] = 'LidarDet3DInferencerLoader' + return Compose(pipeline_cfg) + + def visualize(self, + inputs: InputsType, + preds: PredType, + return_vis: bool = False, + show: bool = False, + wait_time: int = 0, + draw_pred: bool = True, + pred_score_thr: float = 0.3, + img_out_dir: str = '') -> Union[List[np.ndarray], None]: + """Visualize predictions. + + Args: + inputs (InputsType): Inputs for the inferencer. + preds (PredType): Predictions of the model. + return_vis (bool): Whether to return the visualization result. + Defaults to False. + show (bool): Whether to display the image in a popup window. + Defaults to False. + wait_time (float): The interval of show (s). Defaults to 0. + draw_pred (bool): Whether to draw predicted bounding boxes. + Defaults to True. + pred_score_thr (float): Minimum score of bboxes to draw. + Defaults to 0.3. + img_out_dir (str): Output directory of visualization results. + If left as empty, no file will be saved. Defaults to ''. + + Returns: + List[np.ndarray] or None: Returns visualization results only if + applicable. + """ + if self.visualizer is None or (not show and img_out_dir == '' + and not return_vis): + return None + + if getattr(self, 'visualizer') is None: + raise ValueError('Visualization needs the "visualizer" term' + 'defined in the config, but got None.') + + results = [] + + for single_input, pred in zip(inputs, preds): + single_input = single_input['points'] + if isinstance(single_input, str): + pts_bytes = mmengine.fileio.get(single_input) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, self.load_dim) + points = points[:, self.use_dim] + pc_name = osp.basename(single_input).split('.bin')[0] + pc_name = f'{pc_name}.png' + elif isinstance(single_input, np.ndarray): + points = single_input.copy() + pc_num = str(self.num_visualized_frames).zfill(8) + pc_name = f'pc_{pc_num}.png' + else: + raise ValueError('Unsupported input type: ' + f'{type(single_input)}') + + o3d_save_path = osp.join(img_out_dir, pc_name) \ + if img_out_dir != '' else None + + data_input = dict(points=points) + self.visualizer.add_datasample( + pc_name, + data_input, + pred, + show=show, + wait_time=wait_time, + draw_gt=False, + draw_pred=draw_pred, + pred_score_thr=pred_score_thr, + o3d_save_path=o3d_save_path, + vis_task='lidar_seg', + ) + results.append(points) + self.num_visualized_frames += 1 + + return results diff --git a/mmdet3d/apis/inferencers/mono_det3d_inferencer.py b/mmdet3d/apis/inferencers/mono_det3d_inferencer.py new file mode 100755 index 0000000..a0ebfe5 --- /dev/null +++ b/mmdet3d/apis/inferencers/mono_det3d_inferencer.py @@ -0,0 +1,178 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Dict, List, Optional, Sequence, Union + +import mmcv +import mmengine +import numpy as np +from mmengine.dataset import Compose +from mmengine.infer.infer import ModelType +from mmengine.structures import InstanceData + +from mmdet3d.registry import INFERENCERS +from mmdet3d.utils import ConfigType +from .base_3d_inferencer import Base3DInferencer + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +@INFERENCERS.register_module(name='det3d-mono') +@INFERENCERS.register_module() +class MonoDet3DInferencer(Base3DInferencer): + """MMDet3D Monocular 3D object detection inferencer. + + Args: + model (str, optional): Path to the config file or the model name + defined in metafile. For example, it could be + "pgd_kitti" or + "configs/pgd/pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py". + If model is not specified, user must provide the + `weights` saved by MMEngine which contains the config string. + Defaults to None. + weights (str, optional): Path to the checkpoint. If it is not specified + and model is a model name of metafile, the weights will be loaded + from metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + scope (str): The scope of the model. Defaults to 'mmdet3d'. + palette (str): Color palette used for visualization. The order of + priority is palette -> config -> checkpoint. Defaults to 'none'. + """ + + preprocess_kwargs: set = set() + forward_kwargs: set = set() + visualize_kwargs: set = { + 'return_vis', 'show', 'wait_time', 'draw_pred', 'pred_score_thr', + 'img_out_dir' + } + postprocess_kwargs: set = { + 'print_result', 'pred_out_file', 'return_datasample' + } + + def __init__(self, + model: Union[ModelType, str, None] = None, + weights: Optional[str] = None, + device: Optional[str] = None, + scope: str = 'mmdet3d', + palette: str = 'none') -> None: + # A global counter tracking the number of images processed, for + # naming of the output images + self.num_visualized_imgs = 0 + super(MonoDet3DInferencer, self).__init__( + model=model, + weights=weights, + device=device, + scope=scope, + palette=palette) + + def _inputs_to_list(self, inputs: Union[dict, list]) -> list: + """Preprocess the inputs to a list. + + Preprocess inputs to a list according to its type: + + - list or tuple: return inputs + - dict: the value with key 'img' is + - Directory path: return all files in the directory + - other cases: return a list containing the string. The string + could be a path to file, a url or other types of string according + to the task. + + Args: + inputs (Union[dict, list]): Inputs for the inferencer. + + Returns: + list: List of input for the :meth:`preprocess`. + """ + return super()._inputs_to_list(inputs, modality_key='img') + + def _init_pipeline(self, cfg: ConfigType) -> Compose: + """Initialize the test pipeline.""" + pipeline_cfg = cfg.test_dataloader.dataset.pipeline + + load_img_idx = self._get_transform_idx(pipeline_cfg, + 'LoadImageFromFileMono3D') + if load_img_idx == -1: + raise ValueError( + 'LoadImageFromFileMono3D is not found in the test pipeline') + pipeline_cfg[load_img_idx]['type'] = 'MonoDet3DInferencerLoader' + return Compose(pipeline_cfg) + + def visualize(self, + inputs: InputsType, + preds: PredType, + return_vis: bool = False, + show: bool = False, + wait_time: int = 0, + draw_pred: bool = True, + pred_score_thr: float = 0.3, + img_out_dir: str = '') -> Union[List[np.ndarray], None]: + """Visualize predictions. + + Args: + inputs (List[Dict]): Inputs for the inferencer. + preds (List[Dict]): Predictions of the model. + return_vis (bool): Whether to return the visualization result. + Defaults to False. + show (bool): Whether to display the image in a popup window. + Defaults to False. + wait_time (float): The interval of show (s). Defaults to 0. + draw_pred (bool): Whether to draw predicted bounding boxes. + Defaults to True. + pred_score_thr (float): Minimum score of bboxes to draw. + Defaults to 0.3. + img_out_dir (str): Output directory of visualization results. + If left as empty, no file will be saved. Defaults to ''. + + Returns: + List[np.ndarray] or None: Returns visualization results only if + applicable. + """ + if self.visualizer is None or (not show and img_out_dir == '' + and not return_vis): + return None + + if getattr(self, 'visualizer') is None: + raise ValueError('Visualization needs the "visualizer" term' + 'defined in the config, but got None.') + + results = [] + + for single_input, pred in zip(inputs, preds): + if isinstance(single_input['img'], str): + img_bytes = mmengine.fileio.get(single_input['img']) + img = mmcv.imfrombytes(img_bytes) + img = img[:, :, ::-1] + img_name = osp.basename(single_input['img']) + elif isinstance(single_input['img'], np.ndarray): + img = single_input['img'].copy() + img_num = str(self.num_visualized_imgs).zfill(8) + img_name = f'{img_num}.jpg' + else: + raise ValueError('Unsupported input type: ' + f"{type(single_input['img'])}") + + out_file = osp.join(img_out_dir, img_name) if img_out_dir != '' \ + else None + + data_input = dict(img=img) + self.visualizer.add_datasample( + img_name, + data_input, + pred, + show=show, + wait_time=wait_time, + draw_gt=False, + draw_pred=draw_pred, + pred_score_thr=pred_score_thr, + out_file=out_file, + vis_task='mono_det', + ) + results.append(img) + self.num_visualized_imgs += 1 + + return results diff --git a/mmdet3d/apis/inferencers/multi_modality_det3d_inferencer.py b/mmdet3d/apis/inferencers/multi_modality_det3d_inferencer.py new file mode 100755 index 0000000..ab02e06 --- /dev/null +++ b/mmdet3d/apis/inferencers/multi_modality_det3d_inferencer.py @@ -0,0 +1,233 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings +from typing import Dict, List, Optional, Sequence, Union + +import mmcv +import mmengine +import numpy as np +from mmengine.dataset import Compose +from mmengine.infer.infer import ModelType +from mmengine.structures import InstanceData + +from mmdet3d.registry import INFERENCERS +from mmdet3d.utils import ConfigType +from .base_3d_inferencer import Base3DInferencer + +InstanceList = List[InstanceData] +InputType = Union[str, np.ndarray] +InputsType = Union[InputType, Sequence[InputType]] +PredType = Union[InstanceData, InstanceList] +ImgType = Union[np.ndarray, Sequence[np.ndarray]] +ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] + + +@INFERENCERS.register_module(name='det3d-multi_modality') +@INFERENCERS.register_module() +class MultiModalityDet3DInferencer(Base3DInferencer): + """The inferencer of multi-modality detection. + + Args: + model (str, optional): Path to the config file or the model name + defined in metafile. For example, it could be + "pointpillars_kitti-3class" or + "configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py". # noqa: E501 + If model is not specified, user must provide the + `weights` saved by MMEngine which contains the config string. + Defaults to None. + weights (str, optional): Path to the checkpoint. If it is not specified + and model is a model name of metafile, the weights will be loaded + from metafile. Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + scope (str): The scope of registry. Defaults to 'mmdet3d'. + palette (str): The palette of visualization. Defaults to 'none'. + """ + + preprocess_kwargs: set = set() + forward_kwargs: set = set() + visualize_kwargs: set = { + 'return_vis', 'show', 'wait_time', 'draw_pred', 'pred_score_thr', + 'img_out_dir' + } + postprocess_kwargs: set = { + 'print_result', 'pred_out_file', 'return_datasample' + } + + def __init__(self, + model: Union[ModelType, str, None] = None, + weights: Optional[str] = None, + device: Optional[str] = None, + scope: str = 'mmdet3d', + palette: str = 'none') -> None: + # A global counter tracking the number of frames processed, for + # naming of the output results + self.num_visualized_frames = 0 + super(MultiModalityDet3DInferencer, self).__init__( + model=model, + weights=weights, + device=device, + scope=scope, + palette=palette) + + def _inputs_to_list(self, inputs: Union[dict, list]) -> list: + """Preprocess the inputs to a list. + + Preprocess inputs to a list according to its type: + + - list or tuple: return inputs + - dict: the value with key 'points' is + - Directory path: return all files in the directory + - other cases: return a list containing the string. The string + could be a path to file, a url or other types of string according + to the task. + + Args: + inputs (Union[dict, list]): Inputs for the inferencer. + + Returns: + list: List of input for the :meth:`preprocess`. + """ + return super()._inputs_to_list(inputs, modality_key=['points', 'img']) + + def _init_pipeline(self, cfg: ConfigType) -> Compose: + """Initialize the test pipeline.""" + pipeline_cfg = cfg.test_dataloader.dataset.pipeline + + load_point_idx = self._get_transform_idx(pipeline_cfg, + 'LoadPointsFromFile') + load_mv_img_idx = self._get_transform_idx( + pipeline_cfg, 'LoadMultiViewImageFromFiles') + if load_mv_img_idx != -1: + warnings.warn( + 'LoadMultiViewImageFromFiles is not supported yet in the ' + 'multi-modality inferencer. Please remove it') + # Now, we only support ``LoadImageFromFile`` as the image loader in the + # original piepline. `LoadMultiViewImageFromFiles` is not supported + # yet. + load_img_idx = self._get_transform_idx(pipeline_cfg, + 'LoadImageFromFile') + + if load_point_idx == -1 or load_img_idx == -1: + raise ValueError( + 'Both LoadPointsFromFile and LoadImageFromFile must ' + 'be specified the pipeline, but LoadPointsFromFile is ' + f'{load_point_idx == -1} and LoadImageFromFile is ' + f'{load_img_idx}') + + load_cfg = pipeline_cfg[load_point_idx] + self.coord_type, self.load_dim = load_cfg['coord_type'], load_cfg[ + 'load_dim'] + self.use_dim = list(range(load_cfg['use_dim'])) if isinstance( + load_cfg['use_dim'], int) else load_cfg['use_dim'] + + load_point_args = pipeline_cfg[load_point_idx] + load_point_args.pop('type') + load_img_args = pipeline_cfg[load_img_idx] + load_img_args.pop('type') + + load_idx = min(load_point_idx, load_img_idx) + pipeline_cfg.pop(max(load_point_idx, load_img_idx)) + + pipeline_cfg[load_idx] = dict( + type='MultiModalityDet3DInferencerLoader', + load_point_args=load_point_args, + load_img_args=load_img_args) + + return Compose(pipeline_cfg) + + def visualize(self, + inputs: InputsType, + preds: PredType, + return_vis: bool = False, + show: bool = False, + wait_time: int = 0, + draw_pred: bool = True, + pred_score_thr: float = 0.3, + img_out_dir: str = '') -> Union[List[np.ndarray], None]: + """Visualize predictions. + + Args: + inputs (InputsType): Inputs for the inferencer. + preds (PredType): Predictions of the model. + return_vis (bool): Whether to return the visualization result. + Defaults to False. + show (bool): Whether to display the image in a popup window. + Defaults to False. + wait_time (float): The interval of show (s). Defaults to 0. + draw_pred (bool): Whether to draw predicted bounding boxes. + Defaults to True. + pred_score_thr (float): Minimum score of bboxes to draw. + Defaults to 0.3. + img_out_dir (str): Output directory of visualization results. + If left as empty, no file will be saved. Defaults to ''. + + Returns: + List[np.ndarray] or None: Returns visualization results only if + applicable. + """ + if self.visualizer is None or (not show and img_out_dir == '' + and not return_vis): + return None + + if getattr(self, 'visualizer') is None: + raise ValueError('Visualization needs the "visualizer" term' + 'defined in the config, but got None.') + + results = [] + + for single_input, pred in zip(inputs, preds): + points_input = single_input['points'] + if isinstance(points_input, str): + pts_bytes = mmengine.fileio.get(points_input) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, self.load_dim) + points = points[:, self.use_dim] + pc_name = osp.basename(points_input).split('.bin')[0] + pc_name = f'{pc_name}.png' + elif isinstance(points_input, np.ndarray): + points = points_input.copy() + pc_num = str(self.num_visualized_frames).zfill(8) + pc_name = f'pc_{pc_num}.png' + else: + raise ValueError('Unsupported input type: ' + f'{type(points_input)}') + + o3d_save_path = osp.join(img_out_dir, pc_name) \ + if img_out_dir != '' else None + + img_input = single_input['img'] + if isinstance(single_input['img'], str): + img_bytes = mmengine.fileio.get(img_input) + img = mmcv.imfrombytes(img_bytes) + img = img[:, :, ::-1] + img_name = osp.basename(img_input) + elif isinstance(img_input, np.ndarray): + img = img_input.copy() + img_num = str(self.num_visualized_frames).zfill(8) + img_name = f'{img_num}.jpg' + else: + raise ValueError('Unsupported input type: ' + f'{type(img_input)}') + + out_file = osp.join(img_out_dir, img_name) if img_out_dir != '' \ + else None + + data_input = dict(points=points, img=img) + self.visualizer.add_datasample( + pc_name, + data_input, + pred, + show=show, + wait_time=wait_time, + draw_gt=False, + draw_pred=draw_pred, + pred_score_thr=pred_score_thr, + o3d_save_path=o3d_save_path, + out_file=out_file, + vis_task='multi-modality_det', + ) + results.append(points) + self.num_visualized_frames += 1 + + return results diff --git a/mmdet3d/datasets/__init__.py b/mmdet3d/datasets/__init__.py new file mode 100755 index 0000000..d573ca4 --- /dev/null +++ b/mmdet3d/datasets/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dataset_wrappers import CBGSDataset +from .det3d_dataset import Det3DDataset +from .kitti_dataset import KittiDataset +from .lyft_dataset import LyftDataset +from .nuscenes_dataset import NuScenesDataset +# yapf: enable +from .s3dis_dataset import S3DISDataset, S3DISSegDataset +from .scannet_dataset import (ScanNetDataset, ScanNetInstanceSegDataset, + ScanNetSegDataset) +from .seg3d_dataset import Seg3DDataset +from .semantickitti_dataset import SemanticKittiDataset +from .sunrgbd_dataset import SUNRGBDDataset +# yapf: disable +from .transforms import (AffineResize, BackgroundPointsFilter, GlobalAlignment, + GlobalRotScaleTrans, IndoorPatchPointSample, + IndoorPointSample, LoadAnnotations3D, + LoadPointsFromDict, LoadPointsFromFile, + LoadPointsFromMultiSweeps, NormalizePointsColor, + ObjectNameFilter, ObjectNoise, ObjectRangeFilter, + ObjectSample, PointSample, PointShuffle, + PointsRangeFilter, RandomDropPointsColor, + RandomFlip3D, RandomJitterPoints, RandomResize3D, + RandomShiftScale, Resize3D, VoxelBasedPointSampler) +from .utils import get_loading_pipeline +from .waymo_dataset import WaymoDataset + +__all__ = [ + 'KittiDataset', 'CBGSDataset', 'NuScenesDataset', 'LyftDataset', + 'ObjectSample', 'RandomFlip3D', 'ObjectNoise', 'GlobalRotScaleTrans', + 'PointShuffle', 'ObjectRangeFilter', 'PointsRangeFilter', + 'LoadPointsFromFile', 'S3DISSegDataset', 'S3DISDataset', + 'NormalizePointsColor', 'IndoorPatchPointSample', 'IndoorPointSample', + 'PointSample', 'LoadAnnotations3D', 'GlobalAlignment', 'SUNRGBDDataset', + 'ScanNetDataset', 'ScanNetSegDataset', 'ScanNetInstanceSegDataset', + 'SemanticKittiDataset', 'Det3DDataset', 'Seg3DDataset', + 'LoadPointsFromMultiSweeps', 'WaymoDataset', 'BackgroundPointsFilter', + 'VoxelBasedPointSampler', 'get_loading_pipeline', 'RandomDropPointsColor', + 'RandomJitterPoints', 'ObjectNameFilter', 'AffineResize', + 'RandomShiftScale', 'LoadPointsFromDict', 'Resize3D', 'RandomResize3D', +] diff --git a/mmdet3d/datasets/convert_utils.py b/mmdet3d/datasets/convert_utils.py new file mode 100755 index 0000000..2a8da62 --- /dev/null +++ b/mmdet3d/datasets/convert_utils.py @@ -0,0 +1,421 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List, Optional, Tuple, Union + +import numpy as np +from nuscenes import NuScenes +from nuscenes.utils.geometry_utils import view_points +from pyquaternion import Quaternion +from shapely.geometry import MultiPoint, box + +from mmdet3d.structures import Box3DMode, CameraInstance3DBoxes, points_cam2img +from mmdet3d.structures.ops import box_np_ops + +kitti_categories = ('Pedestrian', 'Cyclist', 'Car', 'Van', 'Truck', + 'Person_sitting', 'Tram', 'Misc') + +waymo_categories = ('Car', 'Pedestrian', 'Cyclist') + +nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', + 'barrier') + +nus_attributes = ('cycle.with_rider', 'cycle.without_rider', + 'pedestrian.moving', 'pedestrian.standing', + 'pedestrian.sitting_lying_down', 'vehicle.moving', + 'vehicle.parked', 'vehicle.stopped', 'None') +NuScenesNameMapping = { + 'movable_object.barrier': 'barrier', + 'vehicle.bicycle': 'bicycle', + 'vehicle.bus.bendy': 'bus', + 'vehicle.bus.rigid': 'bus', + 'vehicle.car': 'car', + 'vehicle.construction': 'construction_vehicle', + 'vehicle.motorcycle': 'motorcycle', + 'human.pedestrian.adult': 'pedestrian', + 'human.pedestrian.child': 'pedestrian', + 'human.pedestrian.construction_worker': 'pedestrian', + 'human.pedestrian.police_officer': 'pedestrian', + 'movable_object.trafficcone': 'traffic_cone', + 'vehicle.trailer': 'trailer', + 'vehicle.truck': 'truck' +} +LyftNameMapping = { + 'bicycle': 'bicycle', + 'bus': 'bus', + 'car': 'car', + 'emergency_vehicle': 'emergency_vehicle', + 'motorcycle': 'motorcycle', + 'other_vehicle': 'other_vehicle', + 'pedestrian': 'pedestrian', + 'truck': 'truck', + 'animal': 'animal' +} + + +def get_nuscenes_2d_boxes(nusc: NuScenes, sample_data_token: str, + visibilities: List[str]) -> List[dict]: + """Get the 2d / mono3d annotation records for a given `sample_data_token` + of nuscenes dataset. + + Args: + nusc (:obj:`NuScenes`): NuScenes class. + sample_data_token (str): Sample data token belonging to a camera + keyframe. + visibilities (List[str]): Visibility filter. + + Return: + List[dict]: List of 2d annotation record that belongs to the input + `sample_data_token`. + """ + + # Get the sample data and the sample corresponding to that sample data. + sd_rec = nusc.get('sample_data', sample_data_token) + + assert sd_rec[ + 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ + ' for camera sample_data!' + if not sd_rec['is_key_frame']: + raise ValueError( + 'The 2D re-projections are available only for keyframes.') + + s_rec = nusc.get('sample', sd_rec['sample_token']) + + # Get the calibrated sensor and ego pose + # record to get the transformation matrices. + cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) + pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) + camera_intrinsic = np.array(cs_rec['camera_intrinsic']) + + # Get all the annotation with the specified visibilties. + ann_recs = [ + nusc.get('sample_annotation', token) for token in s_rec['anns'] + ] + ann_recs = [ + ann_rec for ann_rec in ann_recs + if (ann_rec['visibility_token'] in visibilities) + ] + + repro_recs = [] + + for ann_rec in ann_recs: + # Augment sample_annotation with token information. + ann_rec['sample_annotation_token'] = ann_rec['token'] + ann_rec['sample_data_token'] = sample_data_token + + # Get the box in global coordinates. + box = nusc.get_box(ann_rec['token']) + + # Move them to the ego-pose frame. + box.translate(-np.array(pose_rec['translation'])) + box.rotate(Quaternion(pose_rec['rotation']).inverse) + + # Move them to the calibrated sensor frame. + box.translate(-np.array(cs_rec['translation'])) + box.rotate(Quaternion(cs_rec['rotation']).inverse) + + # Filter out the corners that are not in front of the calibrated + # sensor. + corners_3d = box.corners() + in_front = np.argwhere(corners_3d[2, :] > 0).flatten() + corners_3d = corners_3d[:, in_front] + + # Project 3d box to 2d. + corner_coords = view_points(corners_3d, camera_intrinsic, + True).T[:, :2].tolist() + + # Keep only corners that fall within the image. + final_coords = post_process_coords(corner_coords) + + # Skip if the convex hull of the re-projected corners + # does not intersect the image canvas. + if final_coords is None: + continue + else: + min_x, min_y, max_x, max_y = final_coords + + # Generate dictionary record to be included in the .json file. + repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, + 'nuscenes') + + # if repro_rec is None, we do not append it into repre_recs + if repro_rec is not None: + loc = box.center.tolist() + + dim = box.wlh + dim[[0, 1, 2]] = dim[[1, 2, 0]] # convert wlh to our lhw + dim = dim.tolist() + + rot = box.orientation.yaw_pitch_roll[0] + rot = [-rot] # convert the rot to our cam coordinate + + global_velo2d = nusc.box_velocity(box.token)[:2] + global_velo3d = np.array([*global_velo2d, 0.0]) + e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix + c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix + cam_velo3d = global_velo3d @ np.linalg.inv( + e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + velo = cam_velo3d[0::2].tolist() + + repro_rec['bbox_3d'] = loc + dim + rot + repro_rec['velocity'] = velo + + center_3d = np.array(loc).reshape([1, 3]) + center_2d_with_depth = points_cam2img( + center_3d, camera_intrinsic, with_depth=True) + center_2d_with_depth = center_2d_with_depth.squeeze().tolist() + repro_rec['center_2d'] = center_2d_with_depth[:2] + repro_rec['depth'] = center_2d_with_depth[2] + # normalized center2D + depth + # if samples with depth < 0 will be removed + if repro_rec['depth'] <= 0: + continue + + ann_token = nusc.get('sample_annotation', + box.token)['attribute_tokens'] + if len(ann_token) == 0: + attr_name = 'None' + else: + attr_name = nusc.get('attribute', ann_token[0])['name'] + attr_id = nus_attributes.index(attr_name) + # repro_rec['attribute_name'] = attr_name + repro_rec['attr_label'] = attr_id + + repro_recs.append(repro_rec) + + return repro_recs + + +def get_kitti_style_2d_boxes(info: dict, + cam_idx: int = 2, + occluded: Tuple[int] = (0, 1, 2, 3), + annos: Optional[dict] = None, + mono3d: bool = True, + dataset: str = 'kitti') -> List[dict]: + """Get the 2d / mono3d annotation records for a given info. + + This function is used to get 2D/Mono3D annotations when loading annotations + from a kitti-style dataset class, such as KITTI and Waymo dataset. + + Args: + info (dict): Information of the given sample data. + cam_idx (int): Camera id which the 2d / mono3d annotations to obtain + belong to. In KITTI, typically only CAM 2 will be used, + and in Waymo, multi cameras could be used. + Defaults to 2. + occluded (Tuple[int]): Integer (0, 1, 2, 3) indicating occlusion state: + 0 = fully visible, 1 = partly occluded, 2 = largely occluded, + 3 = unknown, -1 = DontCare. + Defaults to (0, 1, 2, 3). + annos (dict, optional): Original annotations. Defaults to None. + mono3d (bool): Whether to get boxes with mono3d annotation. + Defaults to True. + dataset (str): Dataset name of getting 2d bboxes. + Defaults to 'kitti'. + + Return: + List[dict]: List of 2d / mono3d annotation record that + belongs to the input camera id. + """ + # Get calibration information + camera_intrinsic = info['calib'][f'P{cam_idx}'] + + repro_recs = [] + # if no annotations in info (test dataset), then return + if annos is None: + return repro_recs + + # Get all the annotation with the specified visibilties. + # filter the annotation bboxes by occluded attributes + ann_dicts = annos + mask = [(ocld in occluded) for ocld in ann_dicts['occluded']] + for k in ann_dicts.keys(): + ann_dicts[k] = ann_dicts[k][mask] + + # convert dict of list to list of dict + ann_recs = [] + for i in range(len(ann_dicts['occluded'])): + ann_rec = {} + for k in ann_dicts.keys(): + ann_rec[k] = ann_dicts[k][i] + ann_recs.append(ann_rec) + + for ann_idx, ann_rec in enumerate(ann_recs): + # Augment sample_annotation with token information. + ann_rec['sample_annotation_token'] = \ + f"{info['image']['image_idx']}.{ann_idx}" + ann_rec['sample_data_token'] = info['image']['image_idx'] + + loc = ann_rec['location'][np.newaxis, :] + dim = ann_rec['dimensions'][np.newaxis, :] + rot = ann_rec['rotation_y'][np.newaxis, np.newaxis] + + # transform the center from [0.5, 1.0, 0.5] to [0.5, 0.5, 0.5] + dst = np.array([0.5, 0.5, 0.5]) + src = np.array([0.5, 1.0, 0.5]) + # gravity center + loc_center = loc + dim * (dst - src) + gt_bbox_3d = np.concatenate([loc_center, dim, rot], + axis=1).astype(np.float32) + + # Filter out the corners that are not in front of the calibrated + # sensor. + corners_3d = box_np_ops.center_to_corner_box3d( + gt_bbox_3d[:, :3], + gt_bbox_3d[:, 3:6], + gt_bbox_3d[:, 6], (0.5, 0.5, 0.5), + axis=1) + corners_3d = corners_3d[0].T # (1, 8, 3) -> (3, 8) + in_front = np.argwhere(corners_3d[2, :] > 0).flatten() + corners_3d = corners_3d[:, in_front] + + # Project 3d box to 2d. + corner_coords = view_points(corners_3d, camera_intrinsic, + True).T[:, :2].tolist() + + # Keep only corners that fall within the image. + final_coords = post_process_coords( + corner_coords, + imsize=(info['image']['image_shape'][1], + info['image']['image_shape'][0])) + + # Skip if the convex hull of the re-projected corners + # does not intersect the image canvas. + if final_coords is None: + continue + else: + min_x, min_y, max_x, max_y = final_coords + + # Generate dictionary record to be included in the .json file. + repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, + dataset) + + # If mono3d=True, add 3D annotations in camera coordinates + if mono3d and (repro_rec is not None): + # use bottom center to represent the bbox_3d + repro_rec['bbox_3d'] = np.concatenate( + [loc, dim, rot], axis=1).astype(np.float32).squeeze().tolist() + repro_rec['velocity'] = -1 # no velocity in KITTI + + center_3d = np.array(loc_center).reshape([1, 3]) + center_2d_with_depth = points_cam2img( + center_3d, camera_intrinsic, with_depth=True) + center_2d_with_depth = center_2d_with_depth.squeeze().tolist() + + repro_rec['center_2d'] = center_2d_with_depth[:2] + repro_rec['depth'] = center_2d_with_depth[2] + # normalized center2D + depth + # samples with depth < 0 will be removed + if repro_rec['depth'] <= 0: + continue + repro_recs.append(repro_rec) + + return repro_recs + + +def convert_annos(info: dict, cam_idx: int) -> dict: + """Convert front-cam anns to i-th camera (KITTI-style info).""" + rect = info['calib']['R0_rect'].astype(np.float32) + lidar2cam0 = info['calib']['Tr_velo_to_cam'].astype(np.float32) + lidar2cami = info['calib'][f'Tr_velo_to_cam{cam_idx}'].astype(np.float32) + annos = info['annos'] + converted_annos = copy.deepcopy(annos) + loc = annos['location'] + dims = annos['dimensions'] + rots = annos['rotation_y'] + gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1).astype(np.float32) + # convert gt_bboxes_3d to velodyne coordinates + gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to( + Box3DMode.LIDAR, np.linalg.inv(rect @ lidar2cam0), correct_yaw=True) + # convert gt_bboxes_3d to cam coordinates + gt_bboxes_3d = gt_bboxes_3d.convert_to( + Box3DMode.CAM, rect @ lidar2cami, correct_yaw=True).tensor.numpy() + converted_annos['location'] = gt_bboxes_3d[:, :3] + converted_annos['dimensions'] = gt_bboxes_3d[:, 3:6] + converted_annos['rotation_y'] = gt_bboxes_3d[:, 6] + return converted_annos + + +def post_process_coords( + corner_coords: List[int], imsize: Tuple[int] = (1600, 900) +) -> Union[Tuple[float], None]: + """Get the intersection of the convex hull of the reprojected bbox corners + and the image canvas, return None if no intersection. + + Args: + corner_coords (List[int]): Corner coordinates of reprojected + bounding box. + imsize (Tuple[int]): Size of the image canvas. + Defaults to (1600, 900). + + Return: + Tuple[float] or None: Intersection of the convex hull of the 2D box + corners and the image canvas. + """ + polygon_from_2d_box = MultiPoint(corner_coords).convex_hull + img_canvas = box(0, 0, imsize[0], imsize[1]) + + if polygon_from_2d_box.intersects(img_canvas): + img_intersection = polygon_from_2d_box.intersection(img_canvas) + intersection_coords = np.array( + [coord for coord in img_intersection.exterior.coords]) + + min_x = min(intersection_coords[:, 0]) + min_y = min(intersection_coords[:, 1]) + max_x = max(intersection_coords[:, 0]) + max_y = max(intersection_coords[:, 1]) + + return min_x, min_y, max_x, max_y + else: + return None + + +def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, + dataset: str) -> Union[dict, None]: + """Generate one 2D annotation record given various information on top of + the 2D bounding box coordinates. + + Args: + ann_rec (dict): Original 3d annotation record. + x1 (float): Minimum value of the x coordinate. + y1 (float): Minimum value of the y coordinate. + x2 (float): Maximum value of the x coordinate. + y2 (float): Maximum value of the y coordinate. + dataset (str): Name of dataset. + + Returns: + dict or None: A sample 2d annotation record. + + - bbox_label (int): 2d box label id + - bbox_label_3d (int): 3d box label id + - bbox (List[float]): left x, top y, right x, bottom y of 2d box + - bbox_3d_isvalid (bool): whether the box is valid + """ + + if dataset == 'nuscenes': + cat_name = ann_rec['category_name'] + if cat_name not in NuScenesNameMapping: + return None + else: + cat_name = NuScenesNameMapping[cat_name] + categories = nus_categories + else: + if dataset == 'kitti': + categories = kitti_categories + elif dataset == 'waymo': + categories = waymo_categories + else: + raise NotImplementedError('Unsupported dataset!') + + cat_name = ann_rec['name'] + if cat_name not in categories: + return None + + rec = dict() + rec['bbox_label'] = categories.index(cat_name) + rec['bbox_label_3d'] = rec['bbox_label'] + rec['bbox'] = [x1, y1, x2, y2] + rec['bbox_3d_isvalid'] = True + + return rec diff --git a/mmdet3d/datasets/dataset_wrappers.py b/mmdet3d/datasets/dataset_wrappers.py new file mode 100755 index 0000000..398f854 --- /dev/null +++ b/mmdet3d/datasets/dataset_wrappers.py @@ -0,0 +1,182 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings +from typing import List, Set, Union + +import numpy as np +from mmengine.dataset import BaseDataset, force_full_init + +from mmdet3d.registry import DATASETS + + +@DATASETS.register_module() +class CBGSDataset: + """A wrapper of class sampled dataset with ann_file path. Implementation of + paper `Class-balanced Grouping and Sampling for Point Cloud 3D Object + Detection `_. + + Balance the number of scenes under different classes. + + Args: + dataset (:obj:`BaseDataset` or dict): The dataset to be class sampled. + lazy_init (bool): Whether to load annotation during instantiation. + Defaults to False. + """ + + def __init__(self, + dataset: Union[BaseDataset, dict], + lazy_init: bool = False) -> None: + self.dataset: BaseDataset + if isinstance(dataset, dict): + self.dataset = DATASETS.build(dataset) + elif isinstance(dataset, BaseDataset): + self.dataset = dataset + else: + raise TypeError( + 'elements in datasets sequence should be config or ' + f'`BaseDataset` instance, but got {type(dataset)}') + self._metainfo = self.dataset.metainfo + + self._fully_initialized = False + if not lazy_init: + self.full_init() + + @property + def metainfo(self) -> dict: + """Get the meta information of the repeated dataset. + + Returns: + dict: The meta information of repeated dataset. + """ + return copy.deepcopy(self._metainfo) + + def full_init(self) -> None: + """Loop to ``full_init`` each dataset.""" + if self._fully_initialized: + return + + self.dataset.full_init() + # Get sample_indices + self.sample_indices = self._get_sample_indices(self.dataset) + + self._fully_initialized = True + + def _get_sample_indices(self, dataset: BaseDataset) -> List[int]: + """Load sample indices according to ann_file. + + Args: + dataset (:obj:`BaseDataset`): The dataset. + + Returns: + List[dict]: List of indices after class sampling. + """ + classes = self.metainfo['classes'] + cat2id = {name: i for i, name in enumerate(classes)} + class_sample_idxs = {cat_id: [] for cat_id in cat2id.values()} + for idx in range(len(dataset)): + sample_cat_ids = dataset.get_cat_ids(idx) + for cat_id in sample_cat_ids: + if cat_id != -1: + # Filter categories that do not need to be cared. + # -1 indicates dontcare in MMDet3D. + class_sample_idxs[cat_id].append(idx) + duplicated_samples = sum( + [len(v) for _, v in class_sample_idxs.items()]) + class_distribution = { + k: len(v) / duplicated_samples + for k, v in class_sample_idxs.items() + } + + sample_indices = [] + + frac = 1.0 / len(classes) + ratios = [frac / v for v in class_distribution.values()] + for cls_inds, ratio in zip(list(class_sample_idxs.values()), ratios): + sample_indices += np.random.choice(cls_inds, + int(len(cls_inds) * + ratio)).tolist() + return sample_indices + + @force_full_init + def _get_ori_dataset_idx(self, idx: int) -> int: + """Convert global index to local index. + + Args: + idx (int): Global index of ``CBGSDataset``. + + Returns: + int: Local index of data. + """ + return self.sample_indices[idx] + + @force_full_init + def get_cat_ids(self, idx: int) -> Set[int]: + """Get category ids of class balanced dataset by index. + + Args: + idx (int): Index of data. + + Returns: + Set[int]: All categories in the sample of specified index. + """ + sample_idx = self._get_ori_dataset_idx(idx) + return self.dataset.get_cat_ids(sample_idx) + + @force_full_init + def get_data_info(self, idx: int) -> dict: + """Get annotation by index. + + Args: + idx (int): Global index of ``CBGSDataset``. + + Returns: + dict: The idx-th annotation of the dataset. + """ + sample_idx = self._get_ori_dataset_idx(idx) + return self.dataset.get_data_info(sample_idx) + + def __getitem__(self, idx: int) -> dict: + """Get item from infos according to the given index. + + Args: + idx (int): The index of self.sample_indices. + + Returns: + dict: Data dictionary of the corresponding index. + """ + if not self._fully_initialized: + warnings.warn('Please call `full_init` method manually to ' + 'accelerate the speed.') + self.full_init() + + ori_index = self._get_ori_dataset_idx(idx) + return self.dataset[ori_index] + + @force_full_init + def __len__(self) -> int: + """Return the length of data infos. + + Returns: + int: Length of data infos. + """ + return len(self.sample_indices) + + def get_subset_(self, indices: Union[List[int], int]) -> None: + """Not supported in ``CBGSDataset`` for the ambiguous meaning of sub- + dataset.""" + raise NotImplementedError( + '`CBGSDataset` does not support `get_subset` and ' + '`get_subset_` interfaces because this will lead to ambiguous ' + 'implementation of some methods. If you want to use `get_subset` ' + 'or `get_subset_` interfaces, please use them in the wrapped ' + 'dataset first and then use `CBGSDataset`.') + + def get_subset(self, indices: Union[List[int], int]) -> BaseDataset: + """Not supported in ``CBGSDataset`` for the ambiguous meaning of sub- + dataset.""" + raise NotImplementedError( + '`CBGSDataset` does not support `get_subset` and ' + '`get_subset_` interfaces because this will lead to ambiguous ' + 'implementation of some methods. If you want to use `get_subset` ' + 'or `get_subset_` interfaces, please use them in the wrapped ' + 'dataset first and then use `CBGSDataset`.') diff --git a/mmdet3d/datasets/det3d_dataset.py b/mmdet3d/datasets/det3d_dataset.py new file mode 100755 index 0000000..f8b431d --- /dev/null +++ b/mmdet3d/datasets/det3d_dataset.py @@ -0,0 +1,425 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os +from os import path as osp +from typing import Callable, List, Optional, Set, Union + +import numpy as np +import torch +from mmengine.dataset import BaseDataset +from mmengine.logging import print_log +from terminaltables import AsciiTable + +from mmdet3d.registry import DATASETS +from mmdet3d.structures import get_box_type + + +@DATASETS.register_module() +class Det3DDataset(BaseDataset): + """Base Class of 3D dataset. + + This is the base dataset of SUNRGB-D, ScanNet, nuScenes, and KITTI + dataset. + # TODO: doc link here for the standard data format + + Args: + data_root (str, optional): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to None. + ann_file (str): Annotation file path. Defaults to ''. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_prefix (dict): Prefix for training data. Defaults to + dict(pts='velodyne', img=''). + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used as input, + it usually has following keys: + + - use_camera: bool + - use_lidar: bool + Defaults to dict(use_lidar=True, use_camera=False). + default_cam_key (str, optional): The default camera name adopted. + Defaults to None. + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'LiDAR' in this dataset. Available options includes: + + - 'LiDAR': Box in LiDAR coordinates, usually for + outdoor point cloud 3d detection. + - 'Depth': Box in depth coordinates, usually for + indoor point cloud 3d detection. + - 'Camera': Box in camera coordinates, usually + for vision-based 3d detection. + filter_empty_gt (bool): Whether to filter the data with empty GT. + If it's set to be True, the example with empty annotations after + data pipeline will be dropped and a random example will be chosen + in `__getitem__`. Defaults to True. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + load_eval_anns (bool): Whether to load annotations in test_mode, + the annotation will be save in `eval_ann_infos`, which can be + used in Evaluator. Defaults to True. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + show_ins_var (bool): For debug purpose. Whether to show variation + of the number of instances before and after through pipeline. + Defaults to False. + """ + + def __init__(self, + data_root: Optional[str] = None, + ann_file: str = '', + metainfo: Optional[dict] = None, + data_prefix: dict = dict(pts='velodyne', img=''), + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_lidar=True, use_camera=False), + default_cam_key: str = None, + box_type_3d: dict = 'LiDAR', + filter_empty_gt: bool = True, + test_mode: bool = False, + load_eval_anns: bool = True, + backend_args: Optional[dict] = None, + show_ins_var: bool = False, + load_interval: int = 1, + **kwargs) -> None: + self.backend_args = backend_args + self.filter_empty_gt = filter_empty_gt + self.load_eval_anns = load_eval_anns + _default_modality_keys = ('use_lidar', 'use_camera') + if modality is None: + modality = dict() + + # Defaults to False if not specify + for key in _default_modality_keys: + if key not in modality: + modality[key] = False + self.modality = modality + self.default_cam_key = default_cam_key + assert self.modality['use_lidar'] or self.modality['use_camera'], ( + 'Please specify the `modality` (`use_lidar` ' + f', `use_camera`) for {self.__class__.__name__}') + + self.box_type_3d, self.box_mode_3d = get_box_type(box_type_3d) + + if metainfo is not None and 'classes' in metainfo: + # we allow to train on subset of self.METAINFO['classes'] + # map unselected labels to -1 + self.label_mapping = { + i: -1 + for i in range(len(self.METAINFO['classes'])) + } + self.label_mapping[-1] = -1 + for label_idx, name in enumerate(metainfo['classes']): + ori_label = self.METAINFO['classes'].index(name) + self.label_mapping[ori_label] = label_idx + + self.num_ins_per_cat = {name: 0 for name in metainfo['classes']} + else: + self.label_mapping = { + i: i + for i in range(len(self.METAINFO['classes'])) + } + self.label_mapping[-1] = -1 + + self.num_ins_per_cat = { + name: 0 + for name in self.METAINFO['classes'] + } + + super().__init__( + ann_file=ann_file, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + pipeline=pipeline, + test_mode=test_mode, + load_interval=load_interval, + **kwargs) + + # can be accessed by other component in runner + self.metainfo['box_type_3d'] = box_type_3d + self.metainfo['label_mapping'] = self.label_mapping + + # used for showing variation of the number of instances before and + # after through the pipeline + self.show_ins_var = show_ins_var + + # show statistics of this dataset + print_log('-' * 30, 'current') + print_log(f'The length of the dataset: {len(self)}', 'current') + content_show = [['category', 'number']] + for cat_name, num in self.num_ins_per_cat.items(): + content_show.append([cat_name, num]) + table = AsciiTable(content_show) + print_log( + f'The number of instances per category in the dataset:\n{table.table}', # noqa: E501 + 'current') + + def _remove_dontcare(self, ann_info: dict) -> dict: + """Remove annotations that do not need to be cared. + + -1 indicates dontcare in MMDet3d. + + Args: + ann_info (dict): Dict of annotation infos. The + instance with label `-1` will be removed. + + Returns: + dict: Annotations after filtering. + """ + img_filtered_annotations = {} + filter_mask = ann_info['gt_labels_3d'] > -1 + for key in ann_info.keys(): + if key != 'instances': + img_filtered_annotations[key] = (ann_info[key][filter_mask]) + else: + img_filtered_annotations[key] = ann_info[key] + return img_filtered_annotations + + def get_ann_info(self, index: int) -> dict: + """Get annotation info according to the given index. + + Use index to get the corresponding annotations, thus the + evalhook could use this api. + + Args: + index (int): Index of the annotation data to get. + + Returns: + dict: Annotation information. + """ + data_info = self.get_data_info(index) + # test model + if 'ann_info' not in data_info: + ann_info = self.parse_ann_info(data_info) + else: + ann_info = data_info['ann_info'] + + return ann_info + + def parse_ann_info(self, info: dict) -> Union[dict, None]: + """Process the `instances` in data info to `ann_info`. + + In `Custom3DDataset`, we simply concatenate all the field + in `instances` to `np.ndarray`, you can do the specific + process in subclass. You have to convert `gt_bboxes_3d` + to different coordinates according to the task. + + Args: + info (dict): Info dict. + + Returns: + dict or None: Processed `ann_info`. + """ + # add s or gt prefix for most keys after concat + # we only process 3d annotations here, the corresponding + # 2d annotation process is in the `LoadAnnotations3D` + # in `transforms` + name_mapping = { + 'bbox_label_3d': 'gt_labels_3d', + 'bbox_label': 'gt_bboxes_labels', + 'bbox': 'gt_bboxes', + 'bbox_3d': 'gt_bboxes_3d', + 'depth': 'depths', + 'center_2d': 'centers_2d', + 'attr_label': 'attr_labels', + 'velocity': 'velocities', + } + instances = info['instances'] + # empty gt + if len(instances) == 0: + return None + else: + keys = list(instances[0].keys()) + ann_info = dict() + for ann_name in keys: + temp_anns = [item[ann_name] for item in instances] + # map the original dataset label to training label + if 'label' in ann_name and ann_name != 'attr_label': + temp_anns = [ + self.label_mapping[item] for item in temp_anns + ] + if ann_name in name_mapping: + mapped_ann_name = name_mapping[ann_name] + else: + mapped_ann_name = ann_name + + if 'label' in ann_name: + temp_anns = np.array(temp_anns).astype(np.int64) + elif ann_name in name_mapping: + temp_anns = np.array(temp_anns).astype(np.float32) + else: + temp_anns = np.array(temp_anns) + + ann_info[mapped_ann_name] = temp_anns + ann_info['instances'] = info['instances'] + + for label in ann_info['gt_labels_3d']: + if label != -1: + cat_name = self.metainfo['classes'][label] + self.num_ins_per_cat[cat_name] += 1 + + return ann_info + + def parse_data_info(self, info: dict) -> dict: + """Process the raw data info. + + Convert all relative path of needed modality data file to + the absolute path. And process the `instances` field to + `ann_info` in training stage. + + Args: + info (dict): Raw info dict. + + Returns: + dict: Has `ann_info` in training stage. And + all path has been converted to absolute path. + """ + + if self.modality['use_lidar']: + info['lidar_points']['lidar_path'] = \ + osp.join( + self.data_prefix.get('pts', ''), + info['lidar_points']['lidar_path']) + + info['num_pts_feats'] = info['lidar_points']['num_pts_feats'] + info['lidar_path'] = info['lidar_points']['lidar_path'] + if 'lidar_sweeps' in info: + for sweep in info['lidar_sweeps']: + file_suffix = sweep['lidar_points']['lidar_path'].split( + os.sep)[-1] + if 'samples' in sweep['lidar_points']['lidar_path']: + sweep['lidar_points']['lidar_path'] = osp.join( + self.data_prefix['pts'], file_suffix) + else: + sweep['lidar_points']['lidar_path'] = osp.join( + self.data_prefix['sweeps'], file_suffix) + + if self.modality['use_camera']: + for cam_id, img_info in info['images'].items(): + if 'img_path' in img_info: + if cam_id in self.data_prefix: + cam_prefix = self.data_prefix[cam_id] + else: + cam_prefix = self.data_prefix.get('img', '') + img_info['img_path'] = osp.join(cam_prefix, + img_info['img_path']) + if self.default_cam_key is not None: + info['img_path'] = info['images'][ + self.default_cam_key]['img_path'] + if 'lidar2cam' in info['images'][self.default_cam_key]: + info['lidar2cam'] = np.array( + info['images'][self.default_cam_key]['lidar2cam']) + if 'cam2img' in info['images'][self.default_cam_key]: + info['cam2img'] = np.array( + info['images'][self.default_cam_key]['cam2img']) + if 'lidar2img' in info['images'][self.default_cam_key]: + info['lidar2img'] = np.array( + info['images'][self.default_cam_key]['lidar2img']) + else: + info['lidar2img'] = info['cam2img'] @ info['lidar2cam'] + + if not self.test_mode: + # used in training + info['ann_info'] = self.parse_ann_info(info) + if self.test_mode and self.load_eval_anns: + info['eval_ann_info'] = self.parse_ann_info(info) + + return info + + def _show_ins_var(self, old_labels: np.ndarray, + new_labels: torch.Tensor) -> None: + """Show variation of the number of instances before and after through + the pipeline. + + Args: + old_labels (np.ndarray): The labels before through the pipeline. + new_labels (torch.Tensor): The labels after through the pipeline. + """ + ori_num_per_cat = dict() + for label in old_labels: + if label != -1: + cat_name = self.metainfo['classes'][label] + ori_num_per_cat[cat_name] = ori_num_per_cat.get(cat_name, + 0) + 1 + new_num_per_cat = dict() + for label in new_labels: + if label != -1: + cat_name = self.metainfo['classes'][label] + new_num_per_cat[cat_name] = new_num_per_cat.get(cat_name, + 0) + 1 + content_show = [['category', 'new number', 'ori number']] + for cat_name, num in ori_num_per_cat.items(): + new_num = new_num_per_cat.get(cat_name, 0) + content_show.append([cat_name, new_num, num]) + table = AsciiTable(content_show) + print_log( + 'The number of instances per category after and before ' + f'through pipeline:\n{table.table}', 'current') + + def prepare_data(self, index: int) -> Union[dict, None]: + """Data preparation for both training and testing stage. + + Called by `__getitem__` of dataset. + + Args: + index (int): Index for accessing the target data. + + Returns: + dict or None: Data dict of the corresponding index. + """ + ori_input_dict = self.get_data_info(index) + + # deepcopy here to avoid inplace modification in pipeline. + input_dict = copy.deepcopy(ori_input_dict) + + # box_type_3d (str): 3D box type. + input_dict['box_type_3d'] = self.box_type_3d + # box_mode_3d (str): 3D box mode. + input_dict['box_mode_3d'] = self.box_mode_3d + + # pre-pipline return None to random another in `__getitem__` + if not self.test_mode and self.filter_empty_gt: + if len(input_dict['ann_info']['gt_labels_3d']) == 0: + return None + + example = self.pipeline(input_dict) + + if not self.test_mode and self.filter_empty_gt: + # after pipeline drop the example with empty annotations + # return None to random another in `__getitem__` + if example is None or len( + example['data_samples'].gt_instances_3d.labels_3d) == 0: + return None + + if self.show_ins_var: + if 'ann_info' in ori_input_dict: + self._show_ins_var( + ori_input_dict['ann_info']['gt_labels_3d'], + example['data_samples'].gt_instances_3d.labels_3d) + else: + print_log( + "'ann_info' is not in the input dict. It's probably that " + 'the data is not in training mode', + 'current', + level=30) + + return example + + def get_cat_ids(self, idx: int) -> Set[int]: + """Get category ids by index. Dataset wrapped by ClassBalancedDataset + must implement this method. + + The ``CBGSDataset`` or ``ClassBalancedDataset``requires a subclass + which implements this method. + + Args: + idx (int): The index of data. + + Returns: + set[int]: All categories in the sample of specified index. + """ + info = self.get_data_info(idx) + gt_labels = info['ann_info']['gt_labels_3d'].tolist() + return set(gt_labels) diff --git a/mmdet3d/datasets/kitti2d_dataset.py b/mmdet3d/datasets/kitti2d_dataset.py new file mode 100755 index 0000000..780ecbf --- /dev/null +++ b/mmdet3d/datasets/kitti2d_dataset.py @@ -0,0 +1,241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmengine +import numpy as np + +from mmdet3d.datasets import Det3DDataset +from mmdet3d.registry import DATASETS + + +@DATASETS.register_module() +class Kitti2DDataset(Det3DDataset): + r"""KITTI 2D Dataset. + + This class serves as the API for experiments on the `KITTI Dataset + `_. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + pipeline (list[dict], optional): Pipeline used for data processing. + Defaults to None. + classes (tuple[str], optional): Classes used in the dataset. + Defaults to None. + modality (dict, optional): Modality to specify the sensor data used + as input. Defaults to None. + box_type_3d (str, optional): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'LiDAR'. Available options includes + + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + filter_empty_gt (bool, optional): Whether to filter empty GT. + Defaults to True. + test_mode (bool, optional): Whether the dataset is in test mode. + Defaults to False. + """ + + classes = ('car', 'pedestrian', 'cyclist') + """ + Annotation format: + [ + { + 'image': { + 'image_idx': 0, + 'image_path': 'training/image_2/000000.png', + 'image_shape': array([ 370, 1224], dtype=int32) + }, + 'point_cloud': { + 'num_features': 4, + 'velodyne_path': 'training/velodyne/000000.bin' + }, + 'calib': { + 'P0': (4, 4), + 'P1': (4, 4), + 'P2': (4, 4), + 'P3': (4, 4), + 'R0_rect':4x4 np.array, + 'Tr_velo_to_cam': 4x4 np.array, + 'Tr_imu_to_velo': 4x4 np.array + }, + 'annos': { + 'name': (n), + 'truncated': (n), + 'occluded': (n), + 'alpha': (n), + 'bbox': (n, 4), + 'dimensions': (n, 3), + 'location': (n, 3), + 'rotation_y': (n), + 'score': (n), + 'index': array([0], dtype=int32), + 'group_ids': array([0], dtype=int32), + 'difficulty': array([0], dtype=int32), + 'num_points_in_gt': (n), + } + } + ] + """ + + def load_annotations(self, ann_file): + """Load annotations from ann_file. + + Args: + ann_file (str): Path of the annotation file. + + Returns: + list[dict]: List of annotations. + """ + self.data_infos = mmengine.load(ann_file) + self.cat2label = { + cat_name: i + for i, cat_name in enumerate(self.classes) + } + return self.data_infos + + def _filter_imgs(self, min_size=32): + """Filter images without ground truths.""" + valid_inds = [] + for i, img_info in enumerate(self.data_infos): + if len(img_info['annos']['name']) > 0: + valid_inds.append(i) + return valid_inds + + def get_ann_info(self, index): + """Get annotation info according to the given index. + + Args: + index (int): Index of the annotation data to get. + + Returns: + dict: Annotation information consists of the following keys: + + - bboxes (np.ndarray): Ground truth bboxes. + - labels (np.ndarray): Labels of ground truths. + """ + # Use index to get the annos, thus the evalhook could also use this api + info = self.data_infos[index] + annos = info['annos'] + gt_names = annos['name'] + gt_bboxes = annos['bbox'] + difficulty = annos['difficulty'] + + # remove classes that is not needed + selected = self.keep_arrays_by_name(gt_names, self.classes) + gt_bboxes = gt_bboxes[selected] + gt_names = gt_names[selected] + difficulty = difficulty[selected] + gt_labels = np.array([self.cat2label[n] for n in gt_names]) + + anns_results = dict( + bboxes=gt_bboxes.astype(np.float32), + labels=gt_labels, + ) + return anns_results + + def prepare_train_img(self, idx): + """Training image preparation. + + Args: + index (int): Index for accessing the target image data. + + Returns: + dict: Training image data dict after preprocessing + corresponding to the index. + """ + img_raw_info = self.data_infos[idx]['image'] + img_info = dict(filename=img_raw_info['image_path']) + ann_info = self.get_ann_info(idx) + if len(ann_info['bboxes']) == 0: + return None + results = dict(img_info=img_info, ann_info=ann_info) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + return self.pipeline(results) + + def prepare_test_img(self, idx): + """Prepare data for testing. + + Args: + index (int): Index for accessing the target image data. + + Returns: + dict: Testing image data dict after preprocessing + corresponding to the index. + """ + img_raw_info = self.data_infos[idx]['image'] + img_info = dict(filename=img_raw_info['image_path']) + results = dict(img_info=img_info) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + return self.pipeline(results) + + def drop_arrays_by_name(self, gt_names, used_classes): + """Drop irrelevant ground truths by name. + + Args: + gt_names (list[str]): Names of ground truths. + used_classes (list[str]): Classes of interest. + + Returns: + np.ndarray: Indices of ground truths that will be dropped. + """ + inds = [i for i, x in enumerate(gt_names) if x not in used_classes] + inds = np.array(inds, dtype=np.int64) + return inds + + def keep_arrays_by_name(self, gt_names, used_classes): + """Keep useful ground truths by name. + + Args: + gt_names (list[str]): Names of ground truths. + used_classes (list[str]): Classes of interest. + + Returns: + np.ndarray: Indices of ground truths that will be keeped. + """ + inds = [i for i, x in enumerate(gt_names) if x in used_classes] + inds = np.array(inds, dtype=np.int64) + return inds + + def reformat_bbox(self, outputs, out=None): + """Reformat bounding boxes to KITTI 2D styles. + + Args: + outputs (list[np.ndarray]): List of arrays storing the inferenced + bounding boxes and scores. + out (str, optional): The prefix of output file. + Default: None. + + Returns: + list[dict]: A list of dictionaries with the kitti 2D format. + """ + from mmdet3d.structures.ops.transforms import bbox2result_kitti2d + sample_idx = [info['image']['image_idx'] for info in self.data_infos] + result_files = bbox2result_kitti2d(outputs, self.classes, sample_idx, + out) + return result_files + + def evaluate(self, result_files, eval_types=None): + """Evaluation in KITTI protocol. + + Args: + result_files (str): Path of result files. + eval_types (str, optional): Types of evaluation. Default: None. + KITTI dataset only support 'bbox' evaluation type. + + Returns: + tuple (str, dict): Average precision results in str format + and average precision results in dict format. + """ + from mmdet3d.evaluation import kitti_eval + eval_types = ['bbox'] if not eval_types else eval_types + assert eval_types in ('bbox', ['bbox' + ]), 'KITTI data set only evaluate bbox' + gt_annos = [info['annos'] for info in self.data_infos] + ap_result_str, ap_dict = kitti_eval( + gt_annos, result_files, self.classes, eval_types=['bbox']) + return ap_result_str, ap_dict diff --git a/mmdet3d/datasets/kitti_dataset.py b/mmdet3d/datasets/kitti_dataset.py new file mode 100755 index 0000000..e8863d4 --- /dev/null +++ b/mmdet3d/datasets/kitti_dataset.py @@ -0,0 +1,171 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Callable, List, Union + +import numpy as np + +from mmdet3d.registry import DATASETS +from mmdet3d.structures import CameraInstance3DBoxes +from .det3d_dataset import Det3DDataset + + +@DATASETS.register_module() +class KittiDataset(Det3DDataset): + r"""KITTI Dataset. + + This class serves as the API for experiments on the `KITTI Dataset + `_. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_lidar=True). + default_cam_key (str): The default camera name adopted. + Defaults to 'CAM2'. + load_type (str): Type of loading mode. Defaults to 'frame_based'. + + - 'frame_based': Load all of the instances in the frame. + - 'mv_image_based': Load all of the instances in the frame and need + to convert to the FOV-based data type to support image-based + detector. + - 'fov_image_based': Only load the instances inside the default + cam, and need to convert to the FOV-based data type to support + image-based detector. + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'LiDAR' in this dataset. Available options includes: + + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + filter_empty_gt (bool): Whether to filter the data with empty GT. + If it's set to be True, the example with empty annotations after + data pipeline will be dropped and a random example will be chosen + in `__getitem__`. Defaults to True. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + pcd_limit_range (List[float]): The range of point cloud used to filter + invalid predicted boxes. + Defaults to [0, -40, -3, 70.4, 40, 0.0]. + """ + # TODO: use full classes of kitti + METAINFO = { + 'classes': ('Pedestrian', 'Cyclist', 'Car', 'Van', 'Truck', + 'Person_sitting', 'Tram', 'Misc') + } + + def __init__(self, + data_root: str, + ann_file: str, + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_lidar=True), + default_cam_key: str = 'CAM2', + load_type: str = 'frame_based', + box_type_3d: str = 'LiDAR', + filter_empty_gt: bool = True, + test_mode: bool = False, + pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0], + **kwargs) -> None: + + self.pcd_limit_range = pcd_limit_range + assert load_type in ('frame_based', 'mv_image_based', + 'fov_image_based') + self.load_type = load_type + super().__init__( + data_root=data_root, + ann_file=ann_file, + pipeline=pipeline, + modality=modality, + default_cam_key=default_cam_key, + box_type_3d=box_type_3d, + filter_empty_gt=filter_empty_gt, + test_mode=test_mode, + **kwargs) + assert self.modality is not None + assert box_type_3d.lower() in ('lidar', 'camera') + + def parse_data_info(self, info: dict) -> dict: + """Process the raw data info. + + The only difference with it in `Det3DDataset` + is the specific process for `plane`. + + Args: + info (dict): Raw info dict. + + Returns: + dict: Has `ann_info` in training stage. And + all path has been converted to absolute path. + """ + if self.modality['use_lidar']: + if 'plane' in info: + # convert ground plane to velodyne coordinates + plane = np.array(info['plane']) + lidar2cam = np.array( + info['images']['CAM2']['lidar2cam'], dtype=np.float32) + reverse = np.linalg.inv(lidar2cam) + + (plane_norm_cam, plane_off_cam) = (plane[:3], + -plane[:3] * plane[3]) + plane_norm_lidar = \ + (reverse[:3, :3] @ plane_norm_cam[:, None])[:, 0] + plane_off_lidar = ( + reverse[:3, :3] @ plane_off_cam[:, None][:, 0] + + reverse[:3, 3]) + plane_lidar = np.zeros_like(plane_norm_lidar, shape=(4, )) + plane_lidar[:3] = plane_norm_lidar + plane_lidar[3] = -plane_norm_lidar.T @ plane_off_lidar + else: + plane_lidar = None + + info['plane'] = plane_lidar + + if self.load_type == 'fov_image_based' and self.load_eval_anns: + info['instances'] = info['cam_instances'][self.default_cam_key] + + info = super().parse_data_info(info) + + return info + + def parse_ann_info(self, info: dict) -> dict: + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Data information of single data sample. + + Returns: + dict: Annotation information consists of the following keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): + 3D ground truth bboxes. + - bbox_labels_3d (np.ndarray): Labels of ground truths. + - gt_bboxes (np.ndarray): 2D ground truth bboxes. + - gt_labels (np.ndarray): Labels of ground truths. + - difficulty (int): Difficulty defined by KITTI. + 0, 1, 2 represent xxxxx respectively. + """ + ann_info = super().parse_ann_info(info) + if ann_info is None: + ann_info = dict() + # empty instance + ann_info['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros(0, dtype=np.int64) + + if self.load_type in ['fov_image_based', 'mv_image_based']: + ann_info['gt_bboxes'] = np.zeros((0, 4), dtype=np.float32) + ann_info['gt_bboxes_labels'] = np.array(0, dtype=np.int64) + ann_info['centers_2d'] = np.zeros((0, 2), dtype=np.float32) + ann_info['depths'] = np.zeros((0), dtype=np.float32) + + ann_info = self._remove_dontcare(ann_info) + # in kitti, lidar2cam = R0_rect @ Tr_velo_to_cam + lidar2cam = np.array(info['images']['CAM2']['lidar2cam']) + # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam` + gt_bboxes_3d = CameraInstance3DBoxes( + ann_info['gt_bboxes_3d']).convert_to(self.box_mode_3d, + np.linalg.inv(lidar2cam)) + ann_info['gt_bboxes_3d'] = gt_bboxes_3d + return ann_info diff --git a/mmdet3d/datasets/lyft_dataset.py b/mmdet3d/datasets/lyft_dataset.py new file mode 100755 index 0000000..05bb7a1 --- /dev/null +++ b/mmdet3d/datasets/lyft_dataset.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Callable, List, Union + +import numpy as np + +from mmdet3d.registry import DATASETS +from mmdet3d.structures import LiDARInstance3DBoxes +from .det3d_dataset import Det3DDataset + + +@DATASETS.register_module() +class LyftDataset(Det3DDataset): + r"""Lyft Dataset. + + This class serves as the API for experiments on the Lyft Dataset. + + Please refer to + ``_ + for data downloading. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_camera=False, use_lidar=True). + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'LiDAR' in this dataset. Available options includes: + + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + filter_empty_gt (bool): Whether to filter the data with empty GT. + If it's set to be True, the example with empty annotations after + data pipeline will be dropped and a random example will be chosen + in `__getitem__`. Defaults to True. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + """ + + METAINFO = { + 'classes': + ('car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', + 'motorcycle', 'bicycle', 'pedestrian', 'animal') + } + + def __init__(self, + data_root: str, + ann_file: str, + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_camera=False, use_lidar=True), + box_type_3d: str = 'LiDAR', + filter_empty_gt: bool = True, + test_mode: bool = False, + **kwargs): + assert box_type_3d.lower() in ['lidar'] + super().__init__( + data_root=data_root, + ann_file=ann_file, + pipeline=pipeline, + modality=modality, + box_type_3d=box_type_3d, + filter_empty_gt=filter_empty_gt, + test_mode=test_mode, + **kwargs) + + def parse_ann_info(self, info: dict) -> dict: + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Data information of single data sample. + + Returns: + dict: Annotation information consists of the following keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): + 3D ground truth bboxes. + - gt_labels_3d (np.ndarray): Labels of 3D ground truths. + """ + ann_info = super().parse_ann_info(info) + if ann_info is None: + # empty instance + anns_results = dict() + anns_results['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32) + anns_results['gt_labels_3d'] = np.zeros(0, dtype=np.int64) + return anns_results + gt_bboxes_3d = ann_info['gt_bboxes_3d'] + gt_labels_3d = ann_info['gt_labels_3d'] + + # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be + # the same as KITTI (0.5, 0.5, 0) + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + anns_results = dict( + gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels_3d) + return anns_results diff --git a/mmdet3d/datasets/nuscenes_dataset.py b/mmdet3d/datasets/nuscenes_dataset.py new file mode 100755 index 0000000..da4fa4e --- /dev/null +++ b/mmdet3d/datasets/nuscenes_dataset.py @@ -0,0 +1,238 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from os import path as osp +from typing import Callable, List, Union + +import numpy as np + +from mmdet3d.registry import DATASETS +from mmdet3d.structures import LiDARInstance3DBoxes +from mmdet3d.structures.bbox_3d.cam_box3d import CameraInstance3DBoxes +from .det3d_dataset import Det3DDataset + + +@DATASETS.register_module() +class NuScenesDataset(Det3DDataset): + r"""NuScenes Dataset. + + This class serves as the API for experiments on the NuScenes Dataset. + + Please refer to `NuScenes Dataset `_ + for data downloading. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + pipeline (list[dict]): Pipeline used for data processing. + Defaults to []. + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'LiDAR' in this dataset. Available options includes: + + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + load_type (str): Type of loading mode. Defaults to 'frame_based'. + + - 'frame_based': Load all of the instances in the frame. + - 'mv_image_based': Load all of the instances in the frame and need + to convert to the FOV-based data type to support image-based + detector. + - 'fov_image_based': Only load the instances inside the default + cam, and need to convert to the FOV-based data type to support + image-based detector. + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_camera=False, use_lidar=True). + filter_empty_gt (bool): Whether to filter the data with empty GT. + If it's set to be True, the example with empty annotations after + data pipeline will be dropped and a random example will be chosen + in `__getitem__`. Defaults to True. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + with_velocity (bool): Whether to include velocity prediction + into the experiments. Defaults to True. + use_valid_flag (bool): Whether to use `use_valid_flag` key + in the info file as mask to filter gt_boxes and gt_names. + Defaults to False. + """ + METAINFO = { + 'classes': + ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier'), + 'version': + 'v1.0-trainval' + } + + def __init__(self, + data_root: str, + ann_file: str, + pipeline: List[Union[dict, Callable]] = [], + box_type_3d: str = 'LiDAR', + load_type: str = 'frame_based', + modality: dict = dict( + use_camera=False, + use_lidar=True, + ), + filter_empty_gt: bool = True, + test_mode: bool = False, + with_velocity: bool = True, + use_valid_flag: bool = False, + load_interval: int = 1, + **kwargs) -> None: + self.use_valid_flag = use_valid_flag + self.with_velocity = with_velocity + + # TODO: Redesign multi-view data process in the future + assert load_type in ('frame_based', 'mv_image_based', + 'fov_image_based') + self.load_type = load_type + + assert box_type_3d.lower() in ('lidar', 'camera') + super().__init__( + data_root=data_root, + ann_file=ann_file, + modality=modality, + pipeline=pipeline, + box_type_3d=box_type_3d, + filter_empty_gt=filter_empty_gt, + test_mode=test_mode, + load_interval=load_interval, + **kwargs) + + def _filter_with_mask(self, ann_info: dict) -> dict: + """Remove annotations that do not need to be cared. + + Args: + ann_info (dict): Dict of annotation infos. + + Returns: + dict: Annotations after filtering. + """ + filtered_annotations = {} + if self.use_valid_flag: + filter_mask = ann_info['bbox_3d_isvalid'] + else: + filter_mask = ann_info['num_lidar_pts'] > 0 + for key in ann_info.keys(): + if key != 'instances': + filtered_annotations[key] = (ann_info[key][filter_mask]) + else: + filtered_annotations[key] = ann_info[key] + return filtered_annotations + + def parse_ann_info(self, info: dict) -> dict: + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Data information of single data sample. + + Returns: + dict: Annotation information consists of the following keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): + 3D ground truth bboxes. + - gt_labels_3d (np.ndarray): Labels of ground truths. + """ + ann_info = super().parse_ann_info(info) + if ann_info is not None: + + ann_info = self._filter_with_mask(ann_info) + + if self.with_velocity: + gt_bboxes_3d = ann_info['gt_bboxes_3d'] + gt_velocities = ann_info['velocities'] + nan_mask = np.isnan(gt_velocities[:, 0]) + gt_velocities[nan_mask] = [0.0, 0.0] + gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocities], + axis=-1) + ann_info['gt_bboxes_3d'] = gt_bboxes_3d + else: + # empty instance + ann_info = dict() + if self.with_velocity: + ann_info['gt_bboxes_3d'] = np.zeros((0, 9), dtype=np.float32) + else: + ann_info['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros(0, dtype=np.int64) + + if self.load_type in ['fov_image_based', 'mv_image_based']: + ann_info['gt_bboxes'] = np.zeros((0, 4), dtype=np.float32) + ann_info['gt_bboxes_labels'] = np.array(0, dtype=np.int64) + ann_info['attr_labels'] = np.array(0, dtype=np.int64) + ann_info['centers_2d'] = np.zeros((0, 2), dtype=np.float32) + ann_info['depths'] = np.zeros((0), dtype=np.float32) + + # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be + # the same as KITTI (0.5, 0.5, 0) + # TODO: Unify the coordinates + if self.load_type in ['fov_image_based', 'mv_image_based']: + gt_bboxes_3d = CameraInstance3DBoxes( + ann_info['gt_bboxes_3d'], + box_dim=ann_info['gt_bboxes_3d'].shape[-1], + origin=(0.5, 0.5, 0.5)) + else: + gt_bboxes_3d = LiDARInstance3DBoxes( + ann_info['gt_bboxes_3d'], + box_dim=ann_info['gt_bboxes_3d'].shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + ann_info['gt_bboxes_3d'] = gt_bboxes_3d + + return ann_info + + def parse_data_info(self, info: dict) -> Union[List[dict], dict]: + """Process the raw data info. + + The only difference with it in `Det3DDataset` + is the specific process for `plane`. + + Args: + info (dict): Raw info dict. + + Returns: + List[dict] or dict: Has `ann_info` in training stage. And + all path has been converted to absolute path. + """ + if self.load_type == 'mv_image_based': + data_list = [] + if self.modality['use_lidar']: + info['lidar_points']['lidar_path'] = \ + osp.join( + self.data_prefix.get('pts', ''), + info['lidar_points']['lidar_path']) + + if self.modality['use_camera']: + for cam_id, img_info in info['images'].items(): + if 'img_path' in img_info: + if cam_id in self.data_prefix: + cam_prefix = self.data_prefix[cam_id] + else: + cam_prefix = self.data_prefix.get('img', '') + img_info['img_path'] = osp.join( + cam_prefix, img_info['img_path']) + + for idx, (cam_id, img_info) in enumerate(info['images'].items()): + camera_info = dict() + camera_info['images'] = dict() + camera_info['images'][cam_id] = img_info + if 'cam_instances' in info and cam_id in info['cam_instances']: + camera_info['instances'] = info['cam_instances'][cam_id] + else: + camera_info['instances'] = [] + # TODO: check whether to change sample_idx for 6 cameras + # in one frame + camera_info['sample_idx'] = info['sample_idx'] * 6 + idx + camera_info['token'] = info['token'] + camera_info['ego2global'] = info['ego2global'] + + if not self.test_mode: + # used in traing + camera_info['ann_info'] = self.parse_ann_info(camera_info) + if self.test_mode and self.load_eval_anns: + camera_info['eval_ann_info'] = \ + self.parse_ann_info(camera_info) + data_list.append(camera_info) + return data_list + else: + data_info = super().parse_data_info(info) + return data_info diff --git a/mmdet3d/datasets/s3dis_dataset.py b/mmdet3d/datasets/s3dis_dataset.py new file mode 100755 index 0000000..0fa61da --- /dev/null +++ b/mmdet3d/datasets/s3dis_dataset.py @@ -0,0 +1,361 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from os import path as osp +from typing import Any, Callable, List, Optional, Tuple, Union + +import numpy as np + +from mmdet3d.registry import DATASETS +from mmdet3d.structures import DepthInstance3DBoxes +from .det3d_dataset import Det3DDataset +from .seg3d_dataset import Seg3DDataset + + +@DATASETS.register_module() +class S3DISDataset(Det3DDataset): + r"""S3DIS Dataset for Detection Task. + + This class is the inner dataset for S3DIS. Since S3DIS has 6 areas, we + often train on 5 of them and test on the remaining one. The one for + test is Area_5 as suggested in `GSDN `_. + To concatenate 5 areas during training + `mmengine.datasets.dataset_wrappers.ConcatDataset` should be used. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_prefix (dict): Prefix for data. Defaults to + dict(pts='points', + pts_instance_mask='instance_mask', + pts_semantic_mask='semantic_mask'). + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_camera=False, use_lidar=True). + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'Depth' in this dataset. Available options includes: + + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + filter_empty_gt (bool): Whether to filter the data with empty GT. + If it's set to be True, the example with empty annotations after + data pipeline will be dropped and a random example will be chosen + in `__getitem__`. Defaults to True. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + """ + METAINFO = { + 'classes': ('table', 'chair', 'sofa', 'bookcase', 'board'), + # the valid ids of segmentation annotations + 'seg_valid_class_ids': (7, 8, 9, 10, 11), + 'seg_all_class_ids': tuple(range(1, 14)) # possibly with 'stair' class + } + + def __init__(self, + data_root: str, + ann_file: str, + metainfo: Optional[dict] = None, + data_prefix: dict = dict( + pts='points', + pts_instance_mask='instance_mask', + pts_semantic_mask='semantic_mask'), + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_camera=False, use_lidar=True), + box_type_3d: str = 'Depth', + filter_empty_gt: bool = True, + test_mode: bool = False, + **kwargs) -> None: + + # construct seg_label_mapping for semantic mask + seg_max_cat_id = len(self.METAINFO['seg_all_class_ids']) + seg_valid_cat_ids = self.METAINFO['seg_valid_class_ids'] + neg_label = len(seg_valid_cat_ids) + seg_label_mapping = np.ones( + seg_max_cat_id + 1, dtype=np.int64) * neg_label + for cls_idx, cat_id in enumerate(seg_valid_cat_ids): + seg_label_mapping[cat_id] = cls_idx + self.seg_label_mapping = seg_label_mapping + + super().__init__( + data_root=data_root, + ann_file=ann_file, + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=pipeline, + modality=modality, + box_type_3d=box_type_3d, + filter_empty_gt=filter_empty_gt, + test_mode=test_mode, + **kwargs) + + self.metainfo['seg_label_mapping'] = self.seg_label_mapping + assert 'use_camera' in self.modality and \ + 'use_lidar' in self.modality + assert self.modality['use_camera'] or self.modality['use_lidar'] + + def parse_data_info(self, info: dict) -> dict: + """Process the raw data info. + + Args: + info (dict): Raw info dict. + + Returns: + dict: Has `ann_info` in training stage. And + all path has been converted to absolute path. + """ + info['pts_instance_mask_path'] = osp.join( + self.data_prefix.get('pts_instance_mask', ''), + info['pts_instance_mask_path']) + info['pts_semantic_mask_path'] = osp.join( + self.data_prefix.get('pts_semantic_mask', ''), + info['pts_semantic_mask_path']) + + info = super().parse_data_info(info) + # only be used in `PointSegClassMapping` in pipeline + # to map original semantic class to valid category ids. + info['seg_label_mapping'] = self.seg_label_mapping + return info + + def parse_ann_info(self, info: dict) -> dict: + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Info dict. + + Returns: + dict: Processed `ann_info`. + """ + ann_info = super().parse_ann_info(info) + # empty gt + if ann_info is None: + ann_info = dict() + ann_info['gt_bboxes_3d'] = np.zeros((0, 6), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros((0, ), dtype=np.int64) + # to target box structure + + ann_info['gt_bboxes_3d'] = DepthInstance3DBoxes( + ann_info['gt_bboxes_3d'], + box_dim=ann_info['gt_bboxes_3d'].shape[-1], + with_yaw=False, + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + return ann_info + + +class _S3DISSegDataset(Seg3DDataset): + r"""S3DIS Dataset for Semantic Segmentation Task. + + This class is the inner dataset for S3DIS. Since S3DIS has 6 areas, we + often train on 5 of them and test on the remaining one. + However, there is not a fixed train-test split of S3DIS. People often test + on Area_5 as suggested by `SEGCloud `_. + But many papers also report the average results of 6-fold cross validation + over the 6 areas (e.g. `DGCNN `_). + Therefore, we use an inner dataset for one area, and further use a dataset + wrapper to concat all the provided data in different areas. + + Args: + data_root (str, optional): Path of dataset root, Defaults to None. + ann_file (str): Path of annotation file. Defaults to ''. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_prefix (dict): Prefix for training data. Defaults to + dict(pts='points', pts_instance_mask='', pts_semantic_mask=''). + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_lidar=True, use_camera=False). + ignore_index (int, optional): The label index to be ignored, e.g. + unannotated points. If None is given, set to len(self.classes) to + be consistent with PointSegClassMapping function in pipeline. + Defaults to None. + scene_idxs (np.ndarray or str, optional): Precomputed index to load + data. For scenes with many points, we may sample it several times. + Defaults to None. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + """ + METAINFO = { + 'classes': + ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', + 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter'), + 'palette': [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0], + [255, 0, 255], [100, 100, 255], [200, 200, 100], + [170, 120, 200], [255, 0, 0], [200, 100, 100], + [10, 200, 100], [200, 200, 200], [50, 50, 50]], + 'seg_valid_class_ids': + tuple(range(13)), + 'seg_all_class_ids': + tuple(range(14)) # possibly with 'stair' class + } + + def __init__(self, + data_root: Optional[str] = None, + ann_file: str = '', + metainfo: Optional[dict] = None, + data_prefix: dict = dict( + pts='points', pts_instance_mask='', pts_semantic_mask=''), + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_lidar=True, use_camera=False), + ignore_index: Optional[int] = None, + scene_idxs: Optional[Union[np.ndarray, str]] = None, + test_mode: bool = False, + **kwargs) -> None: + super().__init__( + data_root=data_root, + ann_file=ann_file, + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=pipeline, + modality=modality, + ignore_index=ignore_index, + scene_idxs=scene_idxs, + test_mode=test_mode, + **kwargs) + + def get_scene_idxs(self, scene_idxs: Union[np.ndarray, str, + None]) -> np.ndarray: + """Compute scene_idxs for data sampling. + + We sample more times for scenes with more points. + """ + # when testing, we load one whole scene every time + if not self.test_mode and scene_idxs is None: + raise NotImplementedError( + 'please provide re-sampled scene indexes for training') + + return super().get_scene_idxs(scene_idxs) + + +@DATASETS.register_module() +class S3DISSegDataset(_S3DISSegDataset): + r"""S3DIS Dataset for Semantic Segmentation Task. + + This class serves as the API for experiments on the S3DIS Dataset. + It wraps the provided datasets of different areas. + We don't use `mmdet.datasets.dataset_wrappers.ConcatDataset` because we + need to concat the `scene_idxs` of different areas. + + Please refer to the `google form `_ for + data downloading. + + Args: + data_root (str, optional): Path of dataset root. Defaults to None. + ann_files (List[str]): Path of several annotation files. + Defaults to ''. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_prefix (dict): Prefix for training data. Defaults to + dict(pts='points', pts_instance_mask='', pts_semantic_mask=''). + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_lidar=True, use_camera=False). + ignore_index (int, optional): The label index to be ignored, e.g. + unannotated points. If None is given, set to len(self.classes) to + be consistent with PointSegClassMapping function in pipeline. + Defaults to None. + scene_idxs (List[np.ndarray] | List[str], optional): Precomputed index + to load data. For scenes with many points, we may sample it + several times. Defaults to None. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + """ + + def __init__(self, + data_root: Optional[str] = None, + ann_files: List[str] = '', + metainfo: Optional[dict] = None, + data_prefix: dict = dict( + pts='points', pts_instance_mask='', pts_semantic_mask=''), + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_lidar=True, use_camera=False), + ignore_index: Optional[int] = None, + scene_idxs: Optional[Union[List[np.ndarray], + List[str]]] = None, + test_mode: bool = False, + **kwargs) -> None: + + # make sure that ann_files and scene_idxs have same length + ann_files = self._check_ann_files(ann_files) + scene_idxs = self._check_scene_idxs(scene_idxs, len(ann_files)) + + # initialize some attributes as datasets[0] + super().__init__( + data_root=data_root, + ann_file=ann_files[0], + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=pipeline, + modality=modality, + ignore_index=ignore_index, + scene_idxs=scene_idxs[0], + test_mode=test_mode, + **kwargs) + + datasets = [ + _S3DISSegDataset( + data_root=data_root, + ann_file=ann_files[i], + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=pipeline, + modality=modality, + ignore_index=ignore_index, + scene_idxs=scene_idxs[i], + test_mode=test_mode, + **kwargs) for i in range(len(ann_files)) + ] + + # data_list and scene_idxs need to be concat + self.concat_data_list([dst.data_list for dst in datasets]) + + # set group flag for the sampler + if not self.test_mode: + self._set_group_flag() + + def concat_data_list(self, data_lists: List[List[dict]]) -> None: + """Concat data_list from several datasets to form self.data_list. + + Args: + data_lists (List[List[dict]]): List of dict containing + annotation information. + """ + self.data_list = [ + data for data_list in data_lists for data in data_list + ] + + @staticmethod + def _duplicate_to_list(x: Any, num: int) -> list: + """Repeat x `num` times to form a list.""" + return [x for _ in range(num)] + + def _check_ann_files( + self, ann_file: Union[List[str], Tuple[str], str]) -> List[str]: + """Make ann_files as list/tuple.""" + # ann_file could be str + if not isinstance(ann_file, (list, tuple)): + ann_file = self._duplicate_to_list(ann_file, 1) + return ann_file + + def _check_scene_idxs(self, scene_idx: Union[str, List[Union[list, tuple, + np.ndarray]], + List[str], None], + num: int) -> List[np.ndarray]: + """Make scene_idxs as list/tuple.""" + if scene_idx is None: + return self._duplicate_to_list(scene_idx, num) + # scene_idx could be str, np.ndarray, list or tuple + if isinstance(scene_idx, str): # str + return self._duplicate_to_list(scene_idx, num) + if isinstance(scene_idx[0], str): # list of str + return scene_idx + if isinstance(scene_idx[0], (list, tuple, np.ndarray)): # list of idx + return scene_idx + # single idx + return self._duplicate_to_list(scene_idx, num) diff --git a/mmdet3d/datasets/scannet_dataset.py b/mmdet3d/datasets/scannet_dataset.py new file mode 100755 index 0000000..c59e36a --- /dev/null +++ b/mmdet3d/datasets/scannet_dataset.py @@ -0,0 +1,347 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from os import path as osp +from typing import Callable, List, Optional, Union + +import numpy as np + +from mmdet3d.registry import DATASETS +from mmdet3d.structures import DepthInstance3DBoxes +from .det3d_dataset import Det3DDataset +from .seg3d_dataset import Seg3DDataset + + +@DATASETS.register_module() +class ScanNetDataset(Det3DDataset): + r"""ScanNet Dataset for Detection Task. + + This class serves as the API for experiments on the ScanNet Dataset. + + Please refer to the `github repo `_ + for data downloading. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_prefix (dict): Prefix for data. Defaults to + dict(pts='points', + pts_instance_mask='instance_mask', + pts_semantic_mask='semantic_mask'). + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_camera=False, use_lidar=True). + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'Depth' in this dataset. Available options includes: + + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + filter_empty_gt (bool): Whether to filter the data with empty GT. + If it's set to be True, the example with empty annotations after + data pipeline will be dropped and a random example will be chosen + in `__getitem__`. Defaults to True. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + """ + METAINFO = { + 'classes': + ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', + 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin'), + # the valid ids of segmentation annotations + 'seg_valid_class_ids': + (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), + 'seg_all_class_ids': + tuple(range(1, 41)) + } + + def __init__(self, + data_root: str, + ann_file: str, + metainfo: Optional[dict] = None, + data_prefix: dict = dict( + pts='points', + pts_instance_mask='instance_mask', + pts_semantic_mask='semantic_mask'), + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_camera=False, use_lidar=True), + box_type_3d: str = 'Depth', + filter_empty_gt: bool = True, + test_mode: bool = False, + **kwargs) -> None: + + # construct seg_label_mapping for semantic mask + seg_max_cat_id = len(self.METAINFO['seg_all_class_ids']) + seg_valid_cat_ids = self.METAINFO['seg_valid_class_ids'] + neg_label = len(seg_valid_cat_ids) + seg_label_mapping = np.ones( + seg_max_cat_id + 1, dtype=np.int64) * neg_label + for cls_idx, cat_id in enumerate(seg_valid_cat_ids): + seg_label_mapping[cat_id] = cls_idx + self.seg_label_mapping = seg_label_mapping + + super().__init__( + data_root=data_root, + ann_file=ann_file, + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=pipeline, + modality=modality, + box_type_3d=box_type_3d, + filter_empty_gt=filter_empty_gt, + test_mode=test_mode, + **kwargs) + + self.metainfo['seg_label_mapping'] = self.seg_label_mapping + assert 'use_camera' in self.modality and \ + 'use_lidar' in self.modality + assert self.modality['use_camera'] or self.modality['use_lidar'] + + @staticmethod + def _get_axis_align_matrix(info: dict) -> np.ndarray: + """Get axis_align_matrix from info. If not exist, return identity mat. + + Args: + info (dict): Info of a single sample data. + + Returns: + np.ndarray: 4x4 transformation matrix. + """ + if 'axis_align_matrix' in info: + return np.array(info['axis_align_matrix']) + else: + warnings.warn( + 'axis_align_matrix is not found in ScanNet data info, please ' + 'use new pre-process scripts to re-generate ScanNet data') + return np.eye(4).astype(np.float32) + + def parse_data_info(self, info: dict) -> dict: + """Process the raw data info. + + The only difference with it in `Det3DDataset` + is the specific process for `axis_align_matrix'. + + Args: + info (dict): Raw info dict. + + Returns: + dict: Has `ann_info` in training stage. And + all path has been converted to absolute path. + """ + info['axis_align_matrix'] = self._get_axis_align_matrix(info) + info['pts_instance_mask_path'] = osp.join( + self.data_prefix.get('pts_instance_mask', ''), + info['pts_instance_mask_path']) + info['pts_semantic_mask_path'] = osp.join( + self.data_prefix.get('pts_semantic_mask', ''), + info['pts_semantic_mask_path']) + + info = super().parse_data_info(info) + # only be used in `PointSegClassMapping` in pipeline + # to map original semantic class to valid category ids. + info['seg_label_mapping'] = self.seg_label_mapping + return info + + def parse_ann_info(self, info: dict) -> dict: + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Info dict. + + Returns: + dict: Processed `ann_info`. + """ + ann_info = super().parse_ann_info(info) + # empty gt + if ann_info is None: + ann_info = dict() + ann_info['gt_bboxes_3d'] = np.zeros((0, 6), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros((0, ), dtype=np.int64) + # to target box structure + + ann_info['gt_bboxes_3d'] = DepthInstance3DBoxes( + ann_info['gt_bboxes_3d'], + box_dim=ann_info['gt_bboxes_3d'].shape[-1], + with_yaw=False, + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + return ann_info + + +@DATASETS.register_module() +class ScanNetSegDataset(Seg3DDataset): + r"""ScanNet Dataset for Semantic Segmentation Task. + + This class serves as the API for experiments on the ScanNet Dataset. + + Please refer to the `github repo `_ + for data downloading. + + Args: + data_root (str, optional): Path of dataset root. Defaults to None. + ann_file (str): Path of annotation file. Defaults to ''. + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_prefix (dict): Prefix for training data. Defaults to + dict(pts='points', + img='', + pts_instance_mask='', + pts_semantic_mask=''). + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_lidar=True, use_camera=False). + ignore_index (int, optional): The label index to be ignored, e.g. + unannotated points. If None is given, set to len(self.classes) to + be consistent with PointSegClassMapping function in pipeline. + Defaults to None. + scene_idxs (np.ndarray or str, optional): Precomputed index to load + data. For scenes with many points, we may sample it several times. + Defaults to None. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + """ + METAINFO = { + 'classes': + ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', + 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'otherfurniture'), + 'palette': [ + [174, 199, 232], + [152, 223, 138], + [31, 119, 180], + [255, 187, 120], + [188, 189, 34], + [140, 86, 75], + [255, 152, 150], + [214, 39, 40], + [197, 176, 213], + [148, 103, 189], + [196, 156, 148], + [23, 190, 207], + [247, 182, 210], + [219, 219, 141], + [255, 127, 14], + [158, 218, 229], + [44, 160, 44], + [112, 128, 144], + [227, 119, 194], + [82, 84, 163], + ], + 'seg_valid_class_ids': (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, + 24, 28, 33, 34, 36, 39), + 'seg_all_class_ids': + tuple(range(41)), + } + + def __init__(self, + data_root: Optional[str] = None, + ann_file: str = '', + metainfo: Optional[dict] = None, + data_prefix: dict = dict( + pts='points', + img='', + pts_instance_mask='', + pts_semantic_mask=''), + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_lidar=True, use_camera=False), + ignore_index: Optional[int] = None, + scene_idxs: Optional[Union[np.ndarray, str]] = None, + test_mode: bool = False, + **kwargs) -> None: + super().__init__( + data_root=data_root, + ann_file=ann_file, + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=pipeline, + modality=modality, + ignore_index=ignore_index, + scene_idxs=scene_idxs, + test_mode=test_mode, + **kwargs) + + def get_scene_idxs(self, scene_idxs: Union[np.ndarray, str, + None]) -> np.ndarray: + """Compute scene_idxs for data sampling. + + We sample more times for scenes with more points. + """ + # when testing, we load one whole scene every time + if not self.test_mode and scene_idxs is None: + raise NotImplementedError( + 'please provide re-sampled scene indexes for training') + + return super().get_scene_idxs(scene_idxs) + + +@DATASETS.register_module() +class ScanNetInstanceSegDataset(Seg3DDataset): + + METAINFO = { + 'classes': + ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', + 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin'), + 'palette': [ + [174, 199, 232], + [152, 223, 138], + [31, 119, 180], + [255, 187, 120], + [188, 189, 34], + [140, 86, 75], + [255, 152, 150], + [214, 39, 40], + [197, 176, 213], + [148, 103, 189], + [196, 156, 148], + [23, 190, 207], + [247, 182, 210], + [219, 219, 141], + [255, 127, 14], + [158, 218, 229], + [44, 160, 44], + [112, 128, 144], + [227, 119, 194], + [82, 84, 163], + ], + 'seg_valid_class_ids': + (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), + 'seg_all_class_ids': + tuple(range(41)) + } + + def __init__(self, + data_root: Optional[str] = None, + ann_file: str = '', + metainfo: Optional[dict] = None, + data_prefix: dict = dict( + pts='points', + img='', + pts_instance_mask='', + pts_semantic_mask=''), + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_lidar=True, use_camera=False), + test_mode: bool = False, + ignore_index: Optional[int] = None, + scene_idxs: Optional[Union[np.ndarray, str]] = None, + backend_args: Optional[dict] = None, + **kwargs) -> None: + super().__init__( + data_root=data_root, + ann_file=ann_file, + metainfo=metainfo, + pipeline=pipeline, + data_prefix=data_prefix, + modality=modality, + test_mode=test_mode, + ignore_index=ignore_index, + scene_idxs=scene_idxs, + backend_args=backend_args, + **kwargs) diff --git a/mmdet3d/datasets/seg3d_dataset.py b/mmdet3d/datasets/seg3d_dataset.py new file mode 100755 index 0000000..5fd8a35 --- /dev/null +++ b/mmdet3d/datasets/seg3d_dataset.py @@ -0,0 +1,337 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from os import path as osp +from typing import Callable, List, Optional, Sequence, Union + +import numpy as np +from mmengine.dataset import BaseDataset +from mmengine.fileio import get_local_path + +from mmdet3d.registry import DATASETS + + +@DATASETS.register_module() +class Seg3DDataset(BaseDataset): + """Base Class for 3D semantic segmentation dataset. + + This is the base dataset of ScanNet, S3DIS and SemanticKITTI dataset. + + Args: + data_root (str, optional): Path of dataset root. Defaults to None. + ann_file (str): Path of annotation file. Defaults to ''. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_prefix (dict): Prefix for training data. Defaults to + dict(pts='points', + img='', + pts_instance_mask='', + pts_semantic_mask=''). + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used + as input, it usually has following keys: + + - use_camera: bool + - use_lidar: bool + Defaults to dict(use_lidar=True, use_camera=False). + ignore_index (int, optional): The label index to be ignored, e.g. + unannotated points. If None is given, set to len(self.classes) to + be consistent with PointSegClassMapping function in pipeline. + Defaults to None. + scene_idxs (np.ndarray or str, optional): Precomputed index to load + data. For scenes with many points, we may sample it several times. + Defaults to None. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + serialize_data (bool): Whether to hold memory using serialized objects, + when enabled, data loader workers can use shared RAM from master + process instead of making a copy. + Defaults to False for 3D Segmentation datasets. + load_eval_anns (bool): Whether to load annotations in test_mode, + the annotation will be save in `eval_ann_infos`, which can be used + in Evaluator. Defaults to True. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + """ + METAINFO = { + 'classes': None, # names of all classes data used for the task + 'palette': None, # official color for visualization + 'seg_valid_class_ids': None, # class_ids used for training + 'seg_all_class_ids': None, # all possible class_ids in loaded seg mask + } + + def __init__(self, + data_root: Optional[str] = None, + ann_file: str = '', + metainfo: Optional[dict] = None, + data_prefix: dict = dict( + pts='points', + img='', + pts_instance_mask='', + pts_semantic_mask=''), + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_lidar=True, use_camera=False), + ignore_index: Optional[int] = None, + scene_idxs: Optional[Union[str, np.ndarray]] = None, + test_mode: bool = False, + serialize_data: bool = False, + load_eval_anns: bool = True, + backend_args: Optional[dict] = None, + **kwargs) -> None: + self.backend_args = backend_args + self.modality = modality + self.load_eval_anns = load_eval_anns + + # TODO: We maintain the ignore_index attributes, + # but we may consider to remove it in the future. + self.ignore_index = len(self.METAINFO['classes']) if \ + ignore_index is None else ignore_index + + # Get label mapping for custom classes + new_classes = metainfo.get('classes', None) + + self.label_mapping, self.label2cat, seg_valid_class_ids = \ + self.get_label_mapping(new_classes) + + metainfo['label_mapping'] = self.label_mapping + metainfo['label2cat'] = self.label2cat + metainfo['ignore_index'] = self.ignore_index + metainfo['seg_valid_class_ids'] = seg_valid_class_ids + + # generate palette if it is not defined based on + # label mapping, otherwise directly use palette + # defined in dataset config. + palette = metainfo.get('palette', None) + updated_palette = self._update_palette(new_classes, palette) + + metainfo['palette'] = updated_palette + + # construct seg_label_mapping for semantic mask + self.seg_label_mapping = self.get_seg_label_mapping(metainfo) + + super().__init__( + ann_file=ann_file, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + pipeline=pipeline, + test_mode=test_mode, + serialize_data=serialize_data, + **kwargs) + + self.metainfo['seg_label_mapping'] = self.seg_label_mapping + self.scene_idxs = self.get_scene_idxs(scene_idxs) + self.data_list = [self.data_list[i] for i in self.scene_idxs] + + # set group flag for the sampler + if not self.test_mode: + self._set_group_flag() + + def get_label_mapping(self, + new_classes: Optional[Sequence] = None) -> tuple: + """Get label mapping. + + The ``label_mapping`` is a dictionary, its keys are the old label ids + and its values are the new label ids, and is used for changing pixel + labels in load_annotations. If and only if old classes in cls.METAINFO + is not equal to new classes in self._metainfo and nether of them is not + None, `label_mapping` is not None. + + Args: + new_classes (list or tuple, optional): The new classes name from + metainfo. Defaults to None. + + Returns: + tuple: The mapping from old classes in cls.METAINFO to + new classes in metainfo + """ + old_classes = self.METAINFO.get('classes', None) + if (new_classes is not None and old_classes is not None + and list(new_classes) != list(old_classes)): + if not set(new_classes).issubset(old_classes): + raise ValueError( + f'new classes {new_classes} is not a ' + f'subset of classes {old_classes} in METAINFO.') + + # obtain true id from valid_class_ids + valid_class_ids = [ + self.METAINFO['seg_valid_class_ids'][old_classes.index( + cls_name)] for cls_name in new_classes + ] + label_mapping = { + cls_id: self.ignore_index + for cls_id in self.METAINFO['seg_all_class_ids'] + } + label_mapping.update( + {cls_id: i + for i, cls_id in enumerate(valid_class_ids)}) + label2cat = {i: cat_name for i, cat_name in enumerate(new_classes)} + else: + label_mapping = { + cls_id: self.ignore_index + for cls_id in self.METAINFO['seg_all_class_ids'] + } + label_mapping.update({ + cls_id: i + for i, cls_id in enumerate( + self.METAINFO['seg_valid_class_ids']) + }) + # map label to category name + label2cat = { + i: cat_name + for i, cat_name in enumerate(self.METAINFO['classes']) + } + valid_class_ids = self.METAINFO['seg_valid_class_ids'] + + return label_mapping, label2cat, valid_class_ids + + def get_seg_label_mapping(self, metainfo=None): + """Get segmentation label mapping. + + The ``seg_label_mapping`` is an array, its indices are the old label + ids and its values are the new label ids, and is specifically used + for changing point labels in PointSegClassMapping. + + Args: + metainfo (dict, optional): Meta information to set + seg_label_mapping. Defaults to None. + + Returns: + tuple: The mapping from old classes to new classes. + """ + seg_max_cat_id = len(self.METAINFO['seg_all_class_ids']) + seg_valid_cat_ids = self.METAINFO['seg_valid_class_ids'] + neg_label = len(seg_valid_cat_ids) + seg_label_mapping = np.ones( + seg_max_cat_id + 1, dtype=np.int64) * neg_label + for cls_idx, cat_id in enumerate(seg_valid_cat_ids): + seg_label_mapping[cat_id] = cls_idx + return seg_label_mapping + + def _update_palette(self, new_classes: list, palette: Union[None, + list]) -> list: + """Update palette according to metainfo. + + If length of palette is equal to classes, just return the palette. + If palette is not defined, it will randomly generate a palette. + If classes is updated by customer, it will return the subset of + palette. + + Returns: + Sequence: Palette for current dataset. + """ + if palette is None: + # If palette is not defined, it generate a palette according + # to the original palette and classes. + old_classes = self.METAINFO.get('classes', None) + palette = [ + self.METAINFO['palette'][old_classes.index(cls_name)] + for cls_name in new_classes + ] + return palette + + # palette does match classes + if len(palette) == len(new_classes): + return palette + else: + raise ValueError('Once palette in set in metainfo, it should' + 'match classes in metainfo') + + def parse_data_info(self, info: dict) -> dict: + """Process the raw data info. + + Convert all relative path of needed modality data file to + the absolute path. And process + the `instances` field to `ann_info` in training stage. + + Args: + info (dict): Raw info dict. + + Returns: + dict: Has `ann_info` in training stage. And + all path has been converted to absolute path. + """ + if self.modality['use_lidar']: + info['lidar_points']['lidar_path'] = \ + osp.join( + self.data_prefix.get('pts', ''), + info['lidar_points']['lidar_path']) + if 'num_pts_feats' in info['lidar_points']: + info['num_pts_feats'] = info['lidar_points']['num_pts_feats'] + info['lidar_path'] = info['lidar_points']['lidar_path'] + + if self.modality['use_camera']: + for cam_id, img_info in info['images'].items(): + if 'img_path' in img_info: + img_info['img_path'] = osp.join( + self.data_prefix.get('img', ''), img_info['img_path']) + + if 'pts_instance_mask_path' in info: + info['pts_instance_mask_path'] = \ + osp.join(self.data_prefix.get('pts_instance_mask', ''), + info['pts_instance_mask_path']) + + if 'pts_semantic_mask_path' in info: + info['pts_semantic_mask_path'] = \ + osp.join(self.data_prefix.get('pts_semantic_mask', ''), + info['pts_semantic_mask_path']) + + # only be used in `PointSegClassMapping` in pipeline + # to map original semantic class to valid category ids. + info['seg_label_mapping'] = self.seg_label_mapping + + # 'eval_ann_info' will be updated in loading transforms + if self.test_mode and self.load_eval_anns: + info['eval_ann_info'] = dict() + + return info + + def prepare_data(self, idx: int) -> dict: + """Get data processed by ``self.pipeline``. + + Args: + idx (int): The index of ``data_info``. + + Returns: + dict: Results passed through ``self.pipeline``. + """ + if not self.test_mode: + data_info = self.get_data_info(idx) + # Pass the dataset to the pipeline during training to support mixed + # data augmentation, such as polarmix and lasermix. + data_info['dataset'] = self + return self.pipeline(data_info) + else: + return super().prepare_data(idx) + + def get_scene_idxs(self, scene_idxs: Union[None, str, + np.ndarray]) -> np.ndarray: + """Compute scene_idxs for data sampling. + + We sample more times for scenes with more points. + """ + if self.test_mode: + # when testing, we load one whole scene every time + return np.arange(len(self)).astype(np.int32) + + # we may need to re-sample different scenes according to scene_idxs + # this is necessary for indoor scene segmentation such as ScanNet + if scene_idxs is None: + scene_idxs = np.arange(len(self)) + if isinstance(scene_idxs, str): + scene_idxs = osp.join(self.data_root, scene_idxs) + with get_local_path( + scene_idxs, backend_args=self.backend_args) as local_path: + scene_idxs = np.load(local_path) + else: + scene_idxs = np.array(scene_idxs) + + return scene_idxs.astype(np.int32) + + def _set_group_flag(self) -> None: + """Set flag according to image aspect ratio. + + Images with aspect ratio greater than 1 will be set as group 1, + otherwise group 0. In 3D datasets, they are all the same, thus are all + zeros. + """ + self.flag = np.zeros(len(self), dtype=np.uint8) diff --git a/mmdet3d/datasets/semantickitti_dataset.py b/mmdet3d/datasets/semantickitti_dataset.py new file mode 100755 index 0000000..a8a57ce --- /dev/null +++ b/mmdet3d/datasets/semantickitti_dataset.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Callable, List, Optional, Union + +import numpy as np + +from mmdet3d.registry import DATASETS +from .seg3d_dataset import Seg3DDataset + + +@DATASETS.register_module() +class SemanticKittiDataset(Seg3DDataset): + r"""SemanticKitti Dataset. + + This class serves as the API for experiments on the SemanticKITTI Dataset + Please refer to `_ + for data downloading + + Args: + data_root (str, optional): Path of dataset root. Defaults to None. + ann_file (str): Path of annotation file. Defaults to ''. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_prefix (dict): Prefix for training data. Defaults to + dict(pts='', + img='', + pts_instance_mask='', + pts_semantic_mask=''). + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used as input, + it usually has following keys: + + - use_camera: bool + - use_lidar: bool + Defaults to dict(use_lidar=True, use_camera=False). + ignore_index (int, optional): The label index to be ignored, e.g. + unannotated points. If None is given, set to len(self.classes) to + be consistent with PointSegClassMapping function in pipeline. + Defaults to None. + scene_idxs (np.ndarray or str, optional): Precomputed index to load + data. For scenes with many points, we may sample it several times. + Defaults to None. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + """ + METAINFO = { + 'classes': ('car', 'bicycle', 'motorcycle', 'truck', 'bus', 'person', + 'bicyclist', 'motorcyclist', 'road', 'parking', 'sidewalk', + 'other-ground', 'building', 'fence', 'vegetation', + 'trunck', 'terrian', 'pole', 'traffic-sign'), + 'palette': [[100, 150, 245], [100, 230, 245], [30, 60, 150], + [80, 30, 180], [100, 80, 250], [155, 30, 30], + [255, 40, 200], [150, 30, 90], [255, 0, 255], + [255, 150, 255], [75, 0, 75], [175, 0, 75], [255, 200, 0], + [255, 120, 50], [0, 175, 0], [135, 60, 0], [150, 240, 80], + [255, 240, 150], [255, 0, 0]], + 'seg_valid_class_ids': + tuple(range(19)), + 'seg_all_class_ids': + tuple(range(19)), + } + + def __init__(self, + data_root: Optional[str] = None, + ann_file: str = '', + metainfo: Optional[dict] = None, + data_prefix: dict = dict( + pts='', + img='', + pts_instance_mask='', + pts_semantic_mask=''), + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_lidar=True, use_camera=False), + ignore_index: Optional[int] = None, + scene_idxs: Optional[Union[str, np.ndarray]] = None, + test_mode: bool = False, + **kwargs) -> None: + + super().__init__( + data_root=data_root, + ann_file=ann_file, + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=pipeline, + modality=modality, + ignore_index=ignore_index, + scene_idxs=scene_idxs, + test_mode=test_mode, + **kwargs) + + def get_seg_label_mapping(self, metainfo): + seg_label_mapping = np.zeros(metainfo['max_label'] + 1, dtype=np.int64) + for idx in metainfo['seg_label_mapping']: + seg_label_mapping[idx] = metainfo['seg_label_mapping'][idx] + return seg_label_mapping diff --git a/mmdet3d/datasets/sunrgbd_dataset.py b/mmdet3d/datasets/sunrgbd_dataset.py new file mode 100755 index 0000000..7ceb555 --- /dev/null +++ b/mmdet3d/datasets/sunrgbd_dataset.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import os.path as osp +from typing import Callable, List, Optional, Union + +import numpy as np + +from mmdet3d.registry import DATASETS +from mmdet3d.structures import DepthInstance3DBoxes +from .det3d_dataset import Det3DDataset + + +@DATASETS.register_module() +class SUNRGBDDataset(Det3DDataset): + r"""SUNRGBD Dataset. + + This class serves as the API for experiments on the SUNRGBD Dataset. + + See the `download page `_ + for data downloading. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_prefix (dict): Prefix for data. Defaults to + dict(pts='points',img='sunrgbd_trainval'). + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_camera=True, use_lidar=True). + default_cam_key (str): The default camera name adopted. + Defaults to 'CAM0'. + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'Depth' in this dataset. Available options includes: + + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + filter_empty_gt (bool): Whether to filter empty GT. + Defaults to True. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + """ + METAINFO = { + 'classes': ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', + 'dresser', 'night_stand', 'bookshelf', 'bathtub') + } + + def __init__(self, + data_root: str, + ann_file: str, + metainfo: Optional[dict] = None, + data_prefix: dict = dict( + pts='points', img='sunrgbd_trainval/image'), + pipeline: List[Union[dict, Callable]] = [], + default_cam_key: str = 'CAM0', + modality: dict = dict(use_camera=True, use_lidar=True), + box_type_3d: str = 'Depth', + filter_empty_gt: bool = True, + test_mode: bool = False, + **kwargs) -> None: + super().__init__( + data_root=data_root, + ann_file=ann_file, + metainfo=metainfo, + data_prefix=data_prefix, + pipeline=pipeline, + default_cam_key=default_cam_key, + modality=modality, + box_type_3d=box_type_3d, + filter_empty_gt=filter_empty_gt, + test_mode=test_mode, + **kwargs) + assert 'use_camera' in self.modality and \ + 'use_lidar' in self.modality + assert self.modality['use_camera'] or self.modality['use_lidar'] + + def parse_data_info(self, info: dict) -> dict: + """Process the raw data info. + + Convert all relative path of needed modality data file to + the absolute path. And process + the `instances` field to `ann_info` in training stage. + + Args: + info (dict): Raw info dict. + + Returns: + dict: Has `ann_info` in training stage. And + all path has been converted to absolute path. + """ + + if self.modality['use_lidar']: + info['lidar_points']['lidar_path'] = \ + osp.join( + self.data_prefix.get('pts', ''), + info['lidar_points']['lidar_path']) + + if self.modality['use_camera']: + for cam_id, img_info in info['images'].items(): + if 'img_path' in img_info: + img_info['img_path'] = osp.join( + self.data_prefix.get('img', ''), img_info['img_path']) + if self.default_cam_key is not None: + info['img_path'] = info['images'][ + self.default_cam_key]['img_path'] + info['depth2img'] = np.array( + info['images'][self.default_cam_key]['depth2img'], + dtype=np.float32) + + if not self.test_mode: + # used in traing + info['ann_info'] = self.parse_ann_info(info) + if self.test_mode and self.load_eval_anns: + info['eval_ann_info'] = self.parse_ann_info(info) + + return info + + def parse_ann_info(self, info: dict) -> dict: + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Info dict. + + Returns: + dict: Processed `ann_info` + """ + ann_info = super().parse_ann_info(info) + # process data without any annotations + if ann_info is None: + ann_info = dict() + ann_info['gt_bboxes_3d'] = np.zeros((0, 6), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros((0, ), dtype=np.int64) + # to target box structure + ann_info['gt_bboxes_3d'] = DepthInstance3DBoxes( + ann_info['gt_bboxes_3d'], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + return ann_info diff --git a/mmdet3d/datasets/transforms/__init__.py b/mmdet3d/datasets/transforms/__init__.py new file mode 100755 index 0000000..4c0587f --- /dev/null +++ b/mmdet3d/datasets/transforms/__init__.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dbsampler import DataBaseSampler +from .formating import Pack3DDetInputs +from .loading import (LoadAnnotations3D, LoadImageFromFileMono3D, + LoadMultiViewImageFromFiles, LoadPointsFromDict, + LoadPointsFromFile, LoadPointsFromMultiSweeps, + MonoDet3DInferencerLoader, + MultiModalityDet3DInferencerLoader, NormalizePointsColor, + PointSegClassMapping) +from .test_time_aug import MultiScaleFlipAug3D +# yapf: disable +from .transforms_3d import (AffineResize, BackgroundPointsFilter, + GlobalAlignment, GlobalRotScaleTrans, + IndoorPatchPointSample, IndoorPointSample, + LaserMix, MultiViewWrapper, ObjectNameFilter, + ObjectNoise, ObjectRangeFilter, ObjectSample, + PhotoMetricDistortion3D, PointSample, PointShuffle, + PointsRangeFilter, PolarMix, RandomDropPointsColor, + RandomFlip3D, RandomJitterPoints, RandomResize3D, + RandomShiftScale, Resize3D, VoxelBasedPointSampler) + +__all__ = [ + 'ObjectSample', 'RandomFlip3D', 'ObjectNoise', 'GlobalRotScaleTrans', + 'PointShuffle', 'ObjectRangeFilter', 'PointsRangeFilter', + 'Pack3DDetInputs', 'LoadMultiViewImageFromFiles', 'LoadPointsFromFile', + 'DataBaseSampler', 'NormalizePointsColor', 'LoadAnnotations3D', + 'IndoorPointSample', 'PointSample', 'PointSegClassMapping', + 'MultiScaleFlipAug3D', 'LoadPointsFromMultiSweeps', + 'BackgroundPointsFilter', 'VoxelBasedPointSampler', 'GlobalAlignment', + 'IndoorPatchPointSample', 'LoadImageFromFileMono3D', 'ObjectNameFilter', + 'RandomDropPointsColor', 'RandomJitterPoints', 'AffineResize', + 'RandomShiftScale', 'LoadPointsFromDict', 'Resize3D', 'RandomResize3D', + 'MultiViewWrapper', 'PhotoMetricDistortion3D', 'MonoDet3DInferencerLoader', + 'LidarDet3DInferencerLoader', 'PolarMix', 'LaserMix', + 'MultiModalityDet3DInferencerLoader' +] diff --git a/mmdet3d/datasets/transforms/data_augment_utils.py b/mmdet3d/datasets/transforms/data_augment_utils.py new file mode 100755 index 0000000..5744267 --- /dev/null +++ b/mmdet3d/datasets/transforms/data_augment_utils.py @@ -0,0 +1,411 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numba +import numpy as np +from numba.core.errors import NumbaPerformanceWarning + +from mmdet3d.structures.ops import box_np_ops + +warnings.filterwarnings('ignore', category=NumbaPerformanceWarning) + + +@numba.njit +def _rotation_box2d_jit_(corners, angle, rot_mat_T): + """Rotate 2D boxes. + + Args: + corners (np.ndarray): Corners of boxes. + angle (float): Rotation angle. + rot_mat_T (np.ndarray): Transposed rotation matrix. + """ + rot_sin = np.sin(angle) + rot_cos = np.cos(angle) + rot_mat_T[0, 0] = rot_cos + rot_mat_T[0, 1] = rot_sin + rot_mat_T[1, 0] = -rot_sin + rot_mat_T[1, 1] = rot_cos + corners[:] = corners @ rot_mat_T + + +@numba.jit(nopython=True) +def box_collision_test(boxes, qboxes, clockwise=True): + """Box collision test. + + Args: + boxes (np.ndarray): Corners of current boxes. + qboxes (np.ndarray): Boxes to be avoid colliding. + clockwise (bool, optional): Whether the corners are in + clockwise order. Default: True. + """ + N = boxes.shape[0] + K = qboxes.shape[0] + ret = np.zeros((N, K), dtype=np.bool_) + slices = np.array([1, 2, 3, 0]) + lines_boxes = np.stack((boxes, boxes[:, slices, :]), + axis=2) # [N, 4, 2(line), 2(xy)] + lines_qboxes = np.stack((qboxes, qboxes[:, slices, :]), axis=2) + # vec = np.zeros((2,), dtype=boxes.dtype) + boxes_standup = box_np_ops.corner_to_standup_nd_jit(boxes) + qboxes_standup = box_np_ops.corner_to_standup_nd_jit(qboxes) + for i in range(N): + for j in range(K): + # calculate standup first + iw = ( + min(boxes_standup[i, 2], qboxes_standup[j, 2]) - + max(boxes_standup[i, 0], qboxes_standup[j, 0])) + if iw > 0: + ih = ( + min(boxes_standup[i, 3], qboxes_standup[j, 3]) - + max(boxes_standup[i, 1], qboxes_standup[j, 1])) + if ih > 0: + for k in range(4): + for box_l in range(4): + A = lines_boxes[i, k, 0] + B = lines_boxes[i, k, 1] + C = lines_qboxes[j, box_l, 0] + D = lines_qboxes[j, box_l, 1] + acd = (D[1] - A[1]) * (C[0] - + A[0]) > (C[1] - A[1]) * ( + D[0] - A[0]) + bcd = (D[1] - B[1]) * (C[0] - + B[0]) > (C[1] - B[1]) * ( + D[0] - B[0]) + if acd != bcd: + abc = (C[1] - A[1]) * (B[0] - A[0]) > ( + B[1] - A[1]) * ( + C[0] - A[0]) + abd = (D[1] - A[1]) * (B[0] - A[0]) > ( + B[1] - A[1]) * ( + D[0] - A[0]) + if abc != abd: + ret[i, j] = True # collision. + break + if ret[i, j] is True: + break + if ret[i, j] is False: + # now check complete overlap. + # box overlap qbox: + box_overlap_qbox = True + for box_l in range(4): # point l in qboxes + for k in range(4): # corner k in boxes + vec = boxes[i, k] - boxes[i, (k + 1) % 4] + if clockwise: + vec = -vec + cross = vec[1] * ( + boxes[i, k, 0] - qboxes[j, box_l, 0]) + cross -= vec[0] * ( + boxes[i, k, 1] - qboxes[j, box_l, 1]) + if cross >= 0: + box_overlap_qbox = False + break + if box_overlap_qbox is False: + break + + if box_overlap_qbox is False: + qbox_overlap_box = True + for box_l in range(4): # point box_l in boxes + for k in range(4): # corner k in qboxes + vec = qboxes[j, k] - qboxes[j, (k + 1) % 4] + if clockwise: + vec = -vec + cross = vec[1] * ( + qboxes[j, k, 0] - boxes[i, box_l, 0]) + cross -= vec[0] * ( + qboxes[j, k, 1] - boxes[i, box_l, 1]) + if cross >= 0: # + qbox_overlap_box = False + break + if qbox_overlap_box is False: + break + if qbox_overlap_box: + ret[i, j] = True # collision. + else: + ret[i, j] = True # collision. + return ret + + +@numba.njit +def noise_per_box(boxes, valid_mask, loc_noises, rot_noises): + """Add noise to every box (only on the horizontal plane). + + Args: + boxes (np.ndarray): Input boxes with shape (N, 5). + valid_mask (np.ndarray): Mask to indicate which boxes are valid + with shape (N). + loc_noises (np.ndarray): Location noises with shape (N, M, 3). + rot_noises (np.ndarray): Rotation noises with shape (N, M). + + Returns: + np.ndarray: Mask to indicate whether the noise is + added successfully (pass the collision test). + """ + num_boxes = boxes.shape[0] + num_tests = loc_noises.shape[1] + box_corners = box_np_ops.box2d_to_corner_jit(boxes) + current_corners = np.zeros((4, 2), dtype=boxes.dtype) + rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) + success_mask = -np.ones((num_boxes, ), dtype=np.int64) + # print(valid_mask) + for i in range(num_boxes): + if valid_mask[i]: + for j in range(num_tests): + current_corners[:] = box_corners[i] + current_corners -= boxes[i, :2] + _rotation_box2d_jit_(current_corners, rot_noises[i, j], + rot_mat_T) + current_corners += boxes[i, :2] + loc_noises[i, j, :2] + coll_mat = box_collision_test( + current_corners.reshape(1, 4, 2), box_corners) + coll_mat[0, i] = False + # print(coll_mat) + if not coll_mat.any(): + success_mask[i] = j + box_corners[i] = current_corners + break + return success_mask + + +@numba.njit +def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises, + global_rot_noises): + """Add noise to every box (only on the horizontal plane). Version 2 used + when enable global rotations. + + Args: + boxes (np.ndarray): Input boxes with shape (N, 5). + valid_mask (np.ndarray): Mask to indicate which boxes are valid + with shape (N). + loc_noises (np.ndarray): Location noises with shape (N, M, 3). + rot_noises (np.ndarray): Rotation noises with shape (N, M). + + Returns: + np.ndarray: Mask to indicate whether the noise is + added successfully (pass the collision test). + """ + num_boxes = boxes.shape[0] + num_tests = loc_noises.shape[1] + box_corners = box_np_ops.box2d_to_corner_jit(boxes) + current_corners = np.zeros((4, 2), dtype=boxes.dtype) + current_box = np.zeros((1, 5), dtype=boxes.dtype) + rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) + dst_pos = np.zeros((2, ), dtype=boxes.dtype) + success_mask = -np.ones((num_boxes, ), dtype=np.int64) + corners_norm = np.zeros((4, 2), dtype=boxes.dtype) + corners_norm[1, 1] = 1.0 + corners_norm[2] = 1.0 + corners_norm[3, 0] = 1.0 + corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) + corners_norm = corners_norm.reshape(4, 2) + for i in range(num_boxes): + if valid_mask[i]: + for j in range(num_tests): + current_box[0, :] = boxes[i] + current_radius = np.sqrt(boxes[i, 0]**2 + boxes[i, 1]**2) + current_grot = np.arctan2(boxes[i, 0], boxes[i, 1]) + dst_grot = current_grot + global_rot_noises[i, j] + dst_pos[0] = current_radius * np.sin(dst_grot) + dst_pos[1] = current_radius * np.cos(dst_grot) + current_box[0, :2] = dst_pos + current_box[0, -1] += (dst_grot - current_grot) + + rot_sin = np.sin(current_box[0, -1]) + rot_cos = np.cos(current_box[0, -1]) + rot_mat_T[0, 0] = rot_cos + rot_mat_T[0, 1] = rot_sin + rot_mat_T[1, 0] = -rot_sin + rot_mat_T[1, 1] = rot_cos + current_corners[:] = current_box[ + 0, 2:4] * corners_norm @ rot_mat_T + current_box[0, :2] + current_corners -= current_box[0, :2] + _rotation_box2d_jit_(current_corners, rot_noises[i, j], + rot_mat_T) + current_corners += current_box[0, :2] + loc_noises[i, j, :2] + coll_mat = box_collision_test( + current_corners.reshape(1, 4, 2), box_corners) + coll_mat[0, i] = False + if not coll_mat.any(): + success_mask[i] = j + box_corners[i] = current_corners + loc_noises[i, j, :2] += (dst_pos - boxes[i, :2]) + rot_noises[i, j] += (dst_grot - current_grot) + break + return success_mask + + +def _select_transform(transform, indices): + """Select transform. + + Args: + transform (np.ndarray): Transforms to select from. + indices (np.ndarray): Mask to indicate which transform to select. + + Returns: + np.ndarray: Selected transforms. + """ + result = np.zeros((transform.shape[0], *transform.shape[2:]), + dtype=transform.dtype) + for i in range(transform.shape[0]): + if indices[i] != -1: + result[i] = transform[i, indices[i]] + return result + + +@numba.njit +def _rotation_matrix_3d_(rot_mat_T, angle, axis): + """Get the 3D rotation matrix. + + Args: + rot_mat_T (np.ndarray): Transposed rotation matrix. + angle (float): Rotation angle. + axis (int): Rotation axis. + """ + rot_sin = np.sin(angle) + rot_cos = np.cos(angle) + rot_mat_T[:] = np.eye(3) + if axis == 1: + rot_mat_T[0, 0] = rot_cos + rot_mat_T[0, 2] = rot_sin + rot_mat_T[2, 0] = -rot_sin + rot_mat_T[2, 2] = rot_cos + elif axis == 2 or axis == -1: + rot_mat_T[0, 0] = rot_cos + rot_mat_T[0, 1] = rot_sin + rot_mat_T[1, 0] = -rot_sin + rot_mat_T[1, 1] = rot_cos + elif axis == 0: + rot_mat_T[1, 1] = rot_cos + rot_mat_T[1, 2] = rot_sin + rot_mat_T[2, 1] = -rot_sin + rot_mat_T[2, 2] = rot_cos + + +@numba.njit +def points_transform_(points, centers, point_masks, loc_transform, + rot_transform, valid_mask): + """Apply transforms to points and box centers. + + Args: + points (np.ndarray): Input points. + centers (np.ndarray): Input box centers. + point_masks (np.ndarray): Mask to indicate which points need + to be transformed. + loc_transform (np.ndarray): Location transform to be applied. + rot_transform (np.ndarray): Rotation transform to be applied. + valid_mask (np.ndarray): Mask to indicate which boxes are valid. + """ + num_box = centers.shape[0] + num_points = points.shape[0] + rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype) + for i in range(num_box): + _rotation_matrix_3d_(rot_mat_T[i], rot_transform[i], 2) + for i in range(num_points): + for j in range(num_box): + if valid_mask[j]: + if point_masks[i, j] == 1: + points[i, :3] -= centers[j, :3] + points[i:i + 1, :3] = points[i:i + 1, :3] @ rot_mat_T[j] + points[i, :3] += centers[j, :3] + points[i, :3] += loc_transform[j] + break # only apply first box's transform + + +@numba.njit +def box3d_transform_(boxes, loc_transform, rot_transform, valid_mask): + """Transform 3D boxes. + + Args: + boxes (np.ndarray): 3D boxes to be transformed. + loc_transform (np.ndarray): Location transform to be applied. + rot_transform (np.ndarray): Rotation transform to be applied. + valid_mask (np.ndarray): Mask to indicate which boxes are valid. + """ + num_box = boxes.shape[0] + for i in range(num_box): + if valid_mask[i]: + boxes[i, :3] += loc_transform[i] + boxes[i, 6] += rot_transform[i] + + +def noise_per_object_v3_(gt_boxes, + points=None, + valid_mask=None, + rotation_perturb=np.pi / 4, + center_noise_std=1.0, + global_random_rot_range=np.pi / 4, + num_try=100): + """Random rotate or remove each groundtruth independently. use kitti viewer + to test this function points_transform_ + + Args: + gt_boxes (np.ndarray): Ground truth boxes with shape (N, 7). + points (np.ndarray, optional): Input point cloud with + shape (M, 4). Default: None. + valid_mask (np.ndarray, optional): Mask to indicate which + boxes are valid. Default: None. + rotation_perturb (float, optional): Rotation perturbation. + Default: pi / 4. + center_noise_std (float, optional): Center noise standard deviation. + Default: 1.0. + global_random_rot_range (float, optional): Global random rotation + range. Default: pi/4. + num_try (int, optional): Number of try. Default: 100. + """ + num_boxes = gt_boxes.shape[0] + if not isinstance(rotation_perturb, (list, tuple, np.ndarray)): + rotation_perturb = [-rotation_perturb, rotation_perturb] + if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)): + global_random_rot_range = [ + -global_random_rot_range, global_random_rot_range + ] + enable_grot = np.abs(global_random_rot_range[0] - + global_random_rot_range[1]) >= 1e-3 + + if not isinstance(center_noise_std, (list, tuple, np.ndarray)): + center_noise_std = [ + center_noise_std, center_noise_std, center_noise_std + ] + if valid_mask is None: + valid_mask = np.ones((num_boxes, ), dtype=np.bool_) + center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype) + + loc_noises = np.random.normal( + scale=center_noise_std, size=[num_boxes, num_try, 3]) + rot_noises = np.random.uniform( + rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try]) + gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1]) + grot_lowers = global_random_rot_range[0] - gt_grots + grot_uppers = global_random_rot_range[1] - gt_grots + global_rot_noises = np.random.uniform( + grot_lowers[..., np.newaxis], + grot_uppers[..., np.newaxis], + size=[num_boxes, num_try]) + + origin = (0.5, 0.5, 0) + gt_box_corners = box_np_ops.center_to_corner_box3d( + gt_boxes[:, :3], + gt_boxes[:, 3:6], + gt_boxes[:, 6], + origin=origin, + axis=2) + + # TODO: rewrite this noise box function? + if not enable_grot: + selected_noise = noise_per_box(gt_boxes[:, [0, 1, 3, 4, 6]], + valid_mask, loc_noises, rot_noises) + else: + selected_noise = noise_per_box_v2_(gt_boxes[:, [0, 1, 3, 4, 6]], + valid_mask, loc_noises, rot_noises, + global_rot_noises) + + loc_transforms = _select_transform(loc_noises, selected_noise) + rot_transforms = _select_transform(rot_noises, selected_noise) + surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners) + if points is not None: + # TODO: replace this points_in_convex function by my tools? + point_masks = box_np_ops.points_in_convex_polygon_3d_jit( + points[:, :3], surfaces) + points_transform_(points, gt_boxes[:, :3], point_masks, loc_transforms, + rot_transforms, valid_mask) + + box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask) diff --git a/mmdet3d/datasets/transforms/dbsampler.py b/mmdet3d/datasets/transforms/dbsampler.py new file mode 100755 index 0000000..56e8440 --- /dev/null +++ b/mmdet3d/datasets/transforms/dbsampler.py @@ -0,0 +1,345 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os +from typing import List, Optional + +import mmengine +import numpy as np +from mmengine.fileio import get_local_path + +from mmdet3d.datasets.transforms import data_augment_utils +from mmdet3d.registry import TRANSFORMS +from mmdet3d.structures.ops import box_np_ops + + +class BatchSampler: + """Class for sampling specific category of ground truths. + + Args: + sample_list (list[dict]): List of samples. + name (str, optional): The category of samples. Defaults to None. + epoch (int, optional): Sampling epoch. Defaults to None. + shuffle (bool): Whether to shuffle indices. Defaults to False. + drop_reminder (bool): Drop reminder. Defaults to False. + """ + + def __init__(self, + sampled_list: List[dict], + name: Optional[str] = None, + epoch: Optional[int] = None, + shuffle: bool = True, + drop_reminder: bool = False) -> None: + self._sampled_list = sampled_list + self._indices = np.arange(len(sampled_list)) + if shuffle: + np.random.shuffle(self._indices) + self._idx = 0 + self._example_num = len(sampled_list) + self._name = name + self._shuffle = shuffle + self._epoch = epoch + self._epoch_counter = 0 + self._drop_reminder = drop_reminder + + def _sample(self, num: int) -> List[int]: + """Sample specific number of ground truths and return indices. + + Args: + num (int): Sampled number. + + Returns: + list[int]: Indices of sampled ground truths. + """ + if self._idx + num >= self._example_num: + ret = self._indices[self._idx:].copy() + self._reset() + else: + ret = self._indices[self._idx:self._idx + num] + self._idx += num + return ret + + def _reset(self) -> None: + """Reset the index of batchsampler to zero.""" + assert self._name is not None + # print("reset", self._name) + if self._shuffle: + np.random.shuffle(self._indices) + self._idx = 0 + + def sample(self, num: int) -> List[dict]: + """Sample specific number of ground truths. + + Args: + num (int): Sampled number. + + Returns: + list[dict]: Sampled ground truths. + """ + indices = self._sample(num) + return [self._sampled_list[i] for i in indices] + + +@TRANSFORMS.register_module() +class DataBaseSampler(object): + """Class for sampling data from the ground truth database. + + Args: + info_path (str): Path of groundtruth database info. + data_root (str): Path of groundtruth database. + rate (float): Rate of actual sampled over maximum sampled number. + prepare (dict): Name of preparation functions and the input value. + sample_groups (dict): Sampled classes and numbers. + classes (list[str], optional): List of classes. Defaults to None. + points_loader (dict): Config of points loader. Defaults to + dict(type='LoadPointsFromFile', load_dim=4, use_dim=[0, 1, 2, 3]). + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + """ + + def __init__(self, + info_path: str, + data_root: str, + rate: float, + prepare: dict, + sample_groups: dict, + classes: Optional[List[str]] = None, + points_loader: dict = dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=[0, 1, 2, 3], + backend_args=None), + backend_args: Optional[dict] = None) -> None: + super().__init__() + self.data_root = data_root + self.info_path = info_path + self.rate = rate + self.prepare = prepare + self.classes = classes + self.cat2label = {name: i for i, name in enumerate(classes)} + self.label2cat = {i: name for i, name in enumerate(classes)} + self.points_loader = TRANSFORMS.build(points_loader) + self.backend_args = backend_args + + # load data base infos + with get_local_path( + info_path, backend_args=self.backend_args) as local_path: + # loading data from a file-like object needs file format + db_infos = mmengine.load(open(local_path, 'rb'), file_format='pkl') + + # filter database infos + from mmengine.logging import MMLogger + logger: MMLogger = MMLogger.get_current_instance() + for k, v in db_infos.items(): + logger.info(f'load {len(v)} {k} database infos in DataBaseSampler') + for prep_func, val in prepare.items(): + db_infos = getattr(self, prep_func)(db_infos, val) + logger.info('After filter database:') + for k, v in db_infos.items(): + logger.info(f'load {len(v)} {k} database infos in DataBaseSampler') + + self.db_infos = db_infos + + # load sample groups + # TODO: more elegant way to load sample groups + self.sample_groups = [] + for name, num in sample_groups.items(): + self.sample_groups.append({name: int(num)}) + + self.group_db_infos = self.db_infos # just use db_infos + self.sample_classes = [] + self.sample_max_nums = [] + for group_info in self.sample_groups: + self.sample_classes += list(group_info.keys()) + self.sample_max_nums += list(group_info.values()) + + self.sampler_dict = {} + for k, v in self.group_db_infos.items(): + self.sampler_dict[k] = BatchSampler(v, k, shuffle=True) + # TODO: No group_sampling currently + + @staticmethod + def filter_by_difficulty(db_infos: dict, removed_difficulty: list) -> dict: + """Filter ground truths by difficulties. + + Args: + db_infos (dict): Info of groundtruth database. + removed_difficulty (list): Difficulties that are not qualified. + + Returns: + dict: Info of database after filtering. + """ + new_db_infos = {} + for key, dinfos in db_infos.items(): + new_db_infos[key] = [ + info for info in dinfos + if info['difficulty'] not in removed_difficulty + ] + return new_db_infos + + @staticmethod + def filter_by_min_points(db_infos: dict, min_gt_points_dict: dict) -> dict: + """Filter ground truths by number of points in the bbox. + + Args: + db_infos (dict): Info of groundtruth database. + min_gt_points_dict (dict): Different number of minimum points + needed for different categories of ground truths. + + Returns: + dict: Info of database after filtering. + """ + for name, min_num in min_gt_points_dict.items(): + min_num = int(min_num) + if min_num > 0: + filtered_infos = [] + for info in db_infos[name]: + if info['num_points_in_gt'] >= min_num: + filtered_infos.append(info) + db_infos[name] = filtered_infos + return db_infos + + def sample_all(self, + gt_bboxes: np.ndarray, + gt_labels: np.ndarray, + img: Optional[np.ndarray] = None, + ground_plane: Optional[np.ndarray] = None) -> dict: + """Sampling all categories of bboxes. + + Args: + gt_bboxes (np.ndarray): Ground truth bounding boxes. + gt_labels (np.ndarray): Ground truth labels of boxes. + img (np.ndarray, optional): Image array. Defaults to None. + ground_plane (np.ndarray, optional): Ground plane information. + Defaults to None. + + Returns: + dict: Dict of sampled 'pseudo ground truths'. + + - gt_labels_3d (np.ndarray): ground truths labels + of sampled objects. + - gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): + sampled ground truth 3D bounding boxes + - points (np.ndarray): sampled points + - group_ids (np.ndarray): ids of sampled ground truths + """ + sampled_num_dict = {} + sample_num_per_class = [] + for class_name, max_sample_num in zip(self.sample_classes, + self.sample_max_nums): + class_label = self.cat2label[class_name] + # sampled_num = int(max_sample_num - + # np.sum([n == class_name for n in gt_names])) + sampled_num = int(max_sample_num - + np.sum([n == class_label for n in gt_labels])) + sampled_num = np.round(self.rate * sampled_num).astype(np.int64) + sampled_num_dict[class_name] = sampled_num + sample_num_per_class.append(sampled_num) + + sampled = [] + sampled_gt_bboxes = [] + avoid_coll_boxes = gt_bboxes + + for class_name, sampled_num in zip(self.sample_classes, + sample_num_per_class): + if sampled_num > 0: + sampled_cls = self.sample_class_v2(class_name, sampled_num, + avoid_coll_boxes) + + sampled += sampled_cls + if len(sampled_cls) > 0: + if len(sampled_cls) == 1: + sampled_gt_box = sampled_cls[0]['box3d_lidar'][ + np.newaxis, ...] + else: + sampled_gt_box = np.stack( + [s['box3d_lidar'] for s in sampled_cls], axis=0) + + sampled_gt_bboxes += [sampled_gt_box] + avoid_coll_boxes = np.concatenate( + [avoid_coll_boxes, sampled_gt_box], axis=0) + + ret = None + if len(sampled) > 0: + sampled_gt_bboxes = np.concatenate(sampled_gt_bboxes, axis=0) + # center = sampled_gt_bboxes[:, 0:3] + + # num_sampled = len(sampled) + s_points_list = [] + count = 0 + for info in sampled: + file_path = os.path.join( + self.data_root, + info['path']) if self.data_root else info['path'] + results = dict(lidar_points=dict(lidar_path=file_path)) + s_points = self.points_loader(results)['points'] + s_points.translate(info['box3d_lidar'][:3]) + + count += 1 + + s_points_list.append(s_points) + + gt_labels = np.array([self.cat2label[s['name']] for s in sampled], + dtype=np.long) + + if ground_plane is not None: + xyz = sampled_gt_bboxes[:, :3] + dz = (ground_plane[:3][None, :] * + xyz).sum(-1) + ground_plane[3] + sampled_gt_bboxes[:, 2] -= dz + for i, s_points in enumerate(s_points_list): + s_points.tensor[:, 2].sub_(dz[i]) + + ret = { + 'gt_labels_3d': + gt_labels, + 'gt_bboxes_3d': + sampled_gt_bboxes, + 'points': + s_points_list[0].cat(s_points_list), + 'group_ids': + np.arange(gt_bboxes.shape[0], + gt_bboxes.shape[0] + len(sampled)) + } + + return ret + + def sample_class_v2(self, name: str, num: int, + gt_bboxes: np.ndarray) -> List[dict]: + """Sampling specific categories of bounding boxes. + + Args: + name (str): Class of objects to be sampled. + num (int): Number of sampled bboxes. + gt_bboxes (np.ndarray): Ground truth boxes. + + Returns: + list[dict]: Valid samples after collision test. + """ + sampled = self.sampler_dict[name].sample(num) + sampled = copy.deepcopy(sampled) + num_gt = gt_bboxes.shape[0] + num_sampled = len(sampled) + gt_bboxes_bv = box_np_ops.center_to_corner_box2d( + gt_bboxes[:, 0:2], gt_bboxes[:, 3:5], gt_bboxes[:, 6]) + + sp_boxes = np.stack([i['box3d_lidar'] for i in sampled], axis=0) + boxes = np.concatenate([gt_bboxes, sp_boxes], axis=0).copy() + + sp_boxes_new = boxes[gt_bboxes.shape[0]:] + sp_boxes_bv = box_np_ops.center_to_corner_box2d( + sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5], sp_boxes_new[:, 6]) + + total_bv = np.concatenate([gt_bboxes_bv, sp_boxes_bv], axis=0) + coll_mat = data_augment_utils.box_collision_test(total_bv, total_bv) + diag = np.arange(total_bv.shape[0]) + coll_mat[diag, diag] = False + + valid_samples = [] + for i in range(num_gt, num_gt + num_sampled): + if coll_mat[i].any(): + coll_mat[i] = False + coll_mat[:, i] = False + else: + valid_samples.append(sampled[i - num_gt]) + return valid_samples diff --git a/mmdet3d/datasets/transforms/formating.py b/mmdet3d/datasets/transforms/formating.py new file mode 100755 index 0000000..477346a --- /dev/null +++ b/mmdet3d/datasets/transforms/formating.py @@ -0,0 +1,243 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Sequence, Union + +import mmengine +import numpy as np +import torch +from mmcv import BaseTransform +from mmengine.structures import InstanceData +from numpy import dtype + +from mmdet3d.registry import TRANSFORMS +from mmdet3d.structures import BaseInstance3DBoxes, Det3DDataSample, PointData +from mmdet3d.structures.points import BasePoints + + +def to_tensor( + data: Union[torch.Tensor, np.ndarray, Sequence, int, + float]) -> torch.Tensor: + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + + Args: + data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to + be converted. + + Returns: + torch.Tensor: the converted data. + """ + + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + if data.dtype is dtype('float64'): + data = data.astype(np.float32) + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not mmengine.is_str(data): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError(f'type {type(data)} cannot be converted to tensor.') + + +@TRANSFORMS.register_module() +class Pack3DDetInputs(BaseTransform): + INPUTS_KEYS = ['points', 'img'] + INSTANCEDATA_3D_KEYS = [ + 'gt_bboxes_3d', 'gt_labels_3d', 'attr_labels', 'depths', 'centers_2d' + ] + INSTANCEDATA_2D_KEYS = [ + 'gt_bboxes', + 'gt_bboxes_labels', + ] + + SEG_KEYS = [ + 'gt_seg_map', 'pts_instance_mask', 'pts_semantic_mask', + 'gt_semantic_seg' + ] + + def __init__( + self, + keys: tuple, + meta_keys: tuple = ('img_path', 'ori_shape', 'img_shape', 'lidar2img', + 'depth2img', 'cam2img', 'pad_shape', + 'scale_factor', 'flip', 'pcd_horizontal_flip', + 'pcd_vertical_flip', 'box_mode_3d', 'box_type_3d', + 'img_norm_cfg', 'num_pts_feats', 'pcd_trans', + 'sample_idx', 'pcd_scale_factor', 'pcd_rotation', + 'pcd_rotation_angle', 'lidar_path', + 'transformation_3d_flow', 'trans_mat', + 'affine_aug', 'sweep_img_metas', 'ori_cam2img', + 'cam2global', 'crop_offset', 'img_crop_offset', + 'resize_img_shape', 'lidar2cam', 'ori_lidar2img', + 'num_ref_frames', 'num_views', 'ego2global') + ) -> None: + self.keys = keys + self.meta_keys = meta_keys + + def _remove_prefix(self, key: str) -> str: + if key.startswith('gt_'): + key = key[3:] + return key + + def transform(self, results: Union[dict, + List[dict]]) -> Union[dict, List[dict]]: + """Method to pack the input data. when the value in this dict is a + list, it usually is in Augmentations Testing. + + Args: + results (dict | list[dict]): Result dict from the data pipeline. + + Returns: + dict | List[dict]: + + - 'inputs' (dict): The forward data of models. It usually contains + following keys: + + - points + - img + + - 'data_samples' (:obj:`Det3DDataSample`): The annotation info of + the sample. + """ + # augtest + if isinstance(results, list): + if len(results) == 1: + # simple test + return self.pack_single_results(results[0]) + pack_results = [] + for single_result in results: + pack_results.append(self.pack_single_results(single_result)) + return pack_results + # norm training and simple testing + elif isinstance(results, dict): + return self.pack_single_results(results) + else: + raise NotImplementedError + + def pack_single_results(self, results: dict) -> dict: + """Method to pack the single input data. when the value in this dict is + a list, it usually is in Augmentations Testing. + + Args: + results (dict): Result dict from the data pipeline. + + Returns: + dict: A dict contains + + - 'inputs' (dict): The forward data of models. It usually contains + following keys: + + - points + - img + + - 'data_samples' (:obj:`Det3DDataSample`): The annotation info + of the sample. + """ + # Format 3D data + if 'points' in results: + if isinstance(results['points'], BasePoints): + results['points'] = results['points'].tensor + + if 'img' in results: + if isinstance(results['img'], list): + # process multiple imgs in single frame + imgs = np.stack(results['img'], axis=0) + if imgs.flags.c_contiguous: + imgs = to_tensor(imgs).permute(0, 3, 1, 2).contiguous() + else: + imgs = to_tensor( + np.ascontiguousarray(imgs.transpose(0, 3, 1, 2))) + results['img'] = imgs + else: + img = results['img'] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + # To improve the computational speed by by 3-5 times, apply: + # `torch.permute()` rather than `np.transpose()`. + # Refer to https://github.com/open-mmlab/mmdetection/pull/9533 + # for more details + if img.flags.c_contiguous: + img = to_tensor(img).permute(2, 0, 1).contiguous() + else: + img = to_tensor( + np.ascontiguousarray(img.transpose(2, 0, 1))) + results['img'] = img + + for key in [ + 'proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels', + 'gt_bboxes_labels', 'attr_labels', 'pts_instance_mask', + 'pts_semantic_mask', 'centers_2d', 'depths', 'gt_labels_3d' + ]: + if key not in results: + continue + if isinstance(results[key], list): + results[key] = [to_tensor(res) for res in results[key]] + else: + results[key] = to_tensor(results[key]) + if 'gt_bboxes_3d' in results: + if not isinstance(results['gt_bboxes_3d'], BaseInstance3DBoxes): + results['gt_bboxes_3d'] = to_tensor(results['gt_bboxes_3d']) + + if 'gt_semantic_seg' in results: + results['gt_semantic_seg'] = to_tensor( + results['gt_semantic_seg'][None]) + if 'gt_seg_map' in results: + results['gt_seg_map'] = results['gt_seg_map'][None, ...] + + data_sample = Det3DDataSample() + gt_instances_3d = InstanceData() + gt_instances = InstanceData() + gt_pts_seg = PointData() + + img_metas = {} + for key in self.meta_keys: + if key in results: + img_metas[key] = results[key] + data_sample.set_metainfo(img_metas) + + inputs = {} + for key in self.keys: + if key in results: + if key in self.INPUTS_KEYS: + inputs[key] = results[key] + elif key in self.INSTANCEDATA_3D_KEYS: + gt_instances_3d[self._remove_prefix(key)] = results[key] + elif key in self.INSTANCEDATA_2D_KEYS: + if key == 'gt_bboxes_labels': + gt_instances['labels'] = results[key] + else: + gt_instances[self._remove_prefix(key)] = results[key] + elif key in self.SEG_KEYS: + gt_pts_seg[self._remove_prefix(key)] = results[key] + else: + raise NotImplementedError(f'Please modified ' + f'`Pack3DDetInputs` ' + f'to put {key} to ' + f'corresponding field') + + data_sample.gt_instances_3d = gt_instances_3d + data_sample.gt_instances = gt_instances + data_sample.gt_pts_seg = gt_pts_seg + if 'eval_ann_info' in results: + data_sample.eval_ann_info = results['eval_ann_info'] + else: + data_sample.eval_ann_info = None + + packed_results = dict() + packed_results['data_samples'] = data_sample + packed_results['inputs'] = inputs + + return packed_results + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(keys={self.keys})' + repr_str += f'(meta_keys={self.meta_keys})' + return repr_str diff --git a/mmdet3d/datasets/transforms/loading.py b/mmdet3d/datasets/transforms/loading.py new file mode 100755 index 0000000..a240a1c --- /dev/null +++ b/mmdet3d/datasets/transforms/loading.py @@ -0,0 +1,1319 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List, Optional, Union + +import mmcv +import mmengine +import numpy as np +from mmcv.transforms import LoadImageFromFile +from mmcv.transforms.base import BaseTransform +from mmdet.datasets.transforms import LoadAnnotations +from mmengine.fileio import get + +from mmdet3d.registry import TRANSFORMS +from mmdet3d.structures.bbox_3d import get_box_type +from mmdet3d.structures.points import BasePoints, get_points_type + + +@TRANSFORMS.register_module() +class LoadMultiViewImageFromFiles(BaseTransform): + """Load multi channel images from a list of separate channel files. + + Expects results['img_filename'] to be a list of filenames. + + Args: + to_float32 (bool): Whether to convert the img to float32. + Defaults to False. + color_type (str): Color type of the file. Defaults to 'unchanged'. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + num_views (int): Number of view in a frame. Defaults to 5. + num_ref_frames (int): Number of frame in loading. Defaults to -1. + test_mode (bool): Whether is test mode in loading. Defaults to False. + set_default_scale (bool): Whether to set default scale. + Defaults to True. + """ + + def __init__(self, + to_float32: bool = False, + color_type: str = 'unchanged', + backend_args: Optional[dict] = None, + num_views: int = 5, + num_ref_frames: int = -1, + test_mode: bool = False, + set_default_scale: bool = True) -> None: + self.to_float32 = to_float32 + self.color_type = color_type + self.backend_args = backend_args + self.num_views = num_views + # num_ref_frames is used for multi-sweep loading + self.num_ref_frames = num_ref_frames + # when test_mode=False, we randomly select previous frames + # otherwise, select the earliest one + self.test_mode = test_mode + self.set_default_scale = set_default_scale + + def transform(self, results: dict) -> Optional[dict]: + """Call function to load multi-view image from files. + + Args: + results (dict): Result dict containing multi-view image filenames. + + Returns: + dict: The result dict containing the multi-view image data. + Added keys and values are described below. + + - filename (str): Multi-view image filenames. + - img (np.ndarray): Multi-view image arrays. + - img_shape (tuple[int]): Shape of multi-view image arrays. + - ori_shape (tuple[int]): Shape of original image arrays. + - pad_shape (tuple[int]): Shape of padded image arrays. + - scale_factor (float): Scale factor. + - img_norm_cfg (dict): Normalization configuration of images. + """ + # TODO: consider split the multi-sweep part out of this pipeline + # Derive the mask and transform for loading of multi-sweep data + if self.num_ref_frames > 0: + # init choice with the current frame + init_choice = np.array([0], dtype=np.int64) + num_frames = len(results['img_filename']) // self.num_views - 1 + if num_frames == 0: # no previous frame, then copy cur frames + choices = np.random.choice( + 1, self.num_ref_frames, replace=True) + elif num_frames >= self.num_ref_frames: + # NOTE: suppose the info is saved following the order + # from latest to earlier frames + if self.test_mode: + choices = np.arange(num_frames - self.num_ref_frames, + num_frames) + 1 + # NOTE: +1 is for selecting previous frames + else: + choices = np.random.choice( + num_frames, self.num_ref_frames, replace=False) + 1 + elif num_frames > 0 and num_frames < self.num_ref_frames: + if self.test_mode: + base_choices = np.arange(num_frames) + 1 + random_choices = np.random.choice( + num_frames, + self.num_ref_frames - num_frames, + replace=True) + 1 + choices = np.concatenate([base_choices, random_choices]) + else: + choices = np.random.choice( + num_frames, self.num_ref_frames, replace=True) + 1 + else: + raise NotImplementedError + choices = np.concatenate([init_choice, choices]) + select_filename = [] + for choice in choices: + select_filename += results['img_filename'][choice * + self.num_views: + (choice + 1) * + self.num_views] + results['img_filename'] = select_filename + for key in ['cam2img', 'lidar2cam']: + if key in results: + select_results = [] + for choice in choices: + select_results += results[key][choice * + self.num_views:(choice + + 1) * + self.num_views] + results[key] = select_results + for key in ['ego2global']: + if key in results: + select_results = [] + for choice in choices: + select_results += [results[key][choice]] + results[key] = select_results + # Transform lidar2cam to + # [cur_lidar]2[prev_img] and [cur_lidar]2[prev_cam] + for key in ['lidar2cam']: + if key in results: + # only change matrices of previous frames + for choice_idx in range(1, len(choices)): + pad_prev_ego2global = np.eye(4) + prev_ego2global = results['ego2global'][choice_idx] + pad_prev_ego2global[:prev_ego2global. + shape[0], :prev_ego2global. + shape[1]] = prev_ego2global + pad_cur_ego2global = np.eye(4) + cur_ego2global = results['ego2global'][0] + pad_cur_ego2global[:cur_ego2global. + shape[0], :cur_ego2global. + shape[1]] = cur_ego2global + cur2prev = np.linalg.inv(pad_prev_ego2global).dot( + pad_cur_ego2global) + for result_idx in range(choice_idx * self.num_views, + (choice_idx + 1) * + self.num_views): + results[key][result_idx] = \ + results[key][result_idx].dot(cur2prev) + # Support multi-view images with different shapes + # TODO: record the origin shape and padded shape + filename, cam2img, lidar2cam = [], [], [] + for _, cam_item in results['images'].items(): + filename.append(cam_item['img_path']) + cam2img.append(cam_item['cam2img']) + lidar2cam.append(cam_item['lidar2cam']) + results['filename'] = filename + results['cam2img'] = cam2img + results['lidar2cam'] = lidar2cam + + results['ori_cam2img'] = copy.deepcopy(results['cam2img']) + + # img is of shape (h, w, c, num_views) + # h and w can be different for different views + img_bytes = [ + get(name, backend_args=self.backend_args) for name in filename + ] + imgs = [ + mmcv.imfrombytes(img_byte, flag=self.color_type) + for img_byte in img_bytes + ] + # handle the image with different shape + img_shapes = np.stack([img.shape for img in imgs], axis=0) + img_shape_max = np.max(img_shapes, axis=0) + img_shape_min = np.min(img_shapes, axis=0) + assert img_shape_min[-1] == img_shape_max[-1] + if not np.all(img_shape_max == img_shape_min): + pad_shape = img_shape_max[:2] + else: + pad_shape = None + if pad_shape is not None: + imgs = [ + mmcv.impad(img, shape=pad_shape, pad_val=0) for img in imgs + ] + img = np.stack(imgs, axis=-1) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + # unravel to list, see `DefaultFormatBundle` in formating.py + # which will transpose each image separately and then stack into array + results['img'] = [img[..., i] for i in range(img.shape[-1])] + results['img_shape'] = img.shape[:2] + results['ori_shape'] = img.shape[:2] + # Set initial values for default meta_keys + results['pad_shape'] = img.shape[:2] + if self.set_default_scale: + results['scale_factor'] = 1.0 + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + results['num_views'] = self.num_views + results['num_ref_frames'] = self.num_ref_frames + return results + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(to_float32={self.to_float32}, ' + repr_str += f"color_type='{self.color_type}', " + repr_str += f'num_views={self.num_views}, ' + repr_str += f'num_ref_frames={self.num_ref_frames}, ' + repr_str += f'test_mode={self.test_mode})' + return repr_str + + +@TRANSFORMS.register_module() +class LoadImageFromFileMono3D(LoadImageFromFile): + """Load an image from file in monocular 3D object detection. Compared to 2D + detection, additional camera parameters need to be loaded. + + Args: + kwargs (dict): Arguments are the same as those in + :class:`LoadImageFromFile`. + """ + + def transform(self, results: dict) -> dict: + """Call functions to load image and get image meta information. + + Args: + results (dict): Result dict from :obj:`mmdet.CustomDataset`. + + Returns: + dict: The dict contains loaded image and meta information. + """ + # TODO: load different camera image from data info, + # for kitti dataset, we load 'CAM2' image. + # for nuscenes dataset, we load 'CAM_FRONT' image. + + if 'CAM2' in results['images']: + filename = results['images']['CAM2']['img_path'] + results['cam2img'] = results['images']['CAM2']['cam2img'] + elif len(list(results['images'].keys())) == 1: + camera_type = list(results['images'].keys())[0] + filename = results['images'][camera_type]['img_path'] + results['cam2img'] = results['images'][camera_type]['cam2img'] + else: + raise NotImplementedError( + 'Currently we only support load image from kitti and' + 'nuscenes datasets') + + try: + img_bytes = get(filename, backend_args=self.backend_args) + img = mmcv.imfrombytes( + img_bytes, flag=self.color_type, backend=self.imdecode_backend) + except Exception as e: + if self.ignore_empty: + return None + else: + raise e + if self.to_float32: + img = img.astype(np.float32) + + results['img'] = img + results['img_shape'] = img.shape[:2] + results['ori_shape'] = img.shape[:2] + + return results + + +@TRANSFORMS.register_module() +class LoadImageFromNDArray(LoadImageFromFile): + """Load an image from ``results['img']``. + Similar with :obj:`LoadImageFromFile`, but the image has been loaded as + :obj:`np.ndarray` in ``results['img']``. Can be used when loading image + from webcam. + Required Keys: + - img + Modified Keys: + - img + - img_path + - img_shape + - ori_shape + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + """ + + def transform(self, results: dict) -> dict: + """Transform function to add image meta information. + + Args: + results (dict): Result dict with Webcam read image in + ``results['img']``. + Returns: + dict: The dict contains loaded image and meta information. + """ + + img = results['img'] + if self.to_float32: + img = img.astype(np.float32) + + results['img_path'] = None + results['img'] = img + results['img_shape'] = img.shape[:2] + results['ori_shape'] = img.shape[:2] + return results + + +@TRANSFORMS.register_module() +class LoadPointsFromMultiSweeps(BaseTransform): + """Load points from multiple sweeps. + + This is usually used for nuScenes dataset to utilize previous sweeps. + + Args: + sweeps_num (int): Number of sweeps. Defaults to 10. + load_dim (int): Dimension number of the loaded points. Defaults to 5. + use_dim (list[int]): Which dimension to use. Defaults to [0, 1, 2, 4]. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + pad_empty_sweeps (bool): Whether to repeat keyframe when + sweeps is empty. Defaults to False. + remove_close (bool): Whether to remove close points. Defaults to False. + test_mode (bool): If `test_mode=True`, it will not randomly sample + sweeps but select the nearest N frames. Defaults to False. + """ + + def __init__(self, + sweeps_num: int = 10, + load_dim: int = 5, + use_dim: List[int] = [0, 1, 2, 4], + backend_args: Optional[dict] = None, + pad_empty_sweeps: bool = False, + remove_close: bool = False, + test_mode: bool = False) -> None: + self.load_dim = load_dim + self.sweeps_num = sweeps_num + if isinstance(use_dim, int): + use_dim = list(range(use_dim)) + assert max(use_dim) < load_dim, \ + f'Expect all used dimensions < {load_dim}, got {use_dim}' + self.use_dim = use_dim + self.backend_args = backend_args + self.pad_empty_sweeps = pad_empty_sweeps + self.remove_close = remove_close + self.test_mode = test_mode + + def _load_points(self, pts_filename: str) -> np.ndarray: + """Private function to load point clouds data. + + Args: + pts_filename (str): Filename of point clouds data. + + Returns: + np.ndarray: An array containing point clouds data. + """ + try: + pts_bytes = get(pts_filename, backend_args=self.backend_args) + points = np.frombuffer(pts_bytes, dtype=np.float32) + except ConnectionError: + mmengine.check_file_exist(pts_filename) + if pts_filename.endswith('.npy'): + points = np.load(pts_filename) + else: + points = np.fromfile(pts_filename, dtype=np.float32) + return points + + def _remove_close(self, + points: Union[np.ndarray, BasePoints], + radius: float = 1.0) -> Union[np.ndarray, BasePoints]: + """Remove point too close within a certain radius from origin. + + Args: + points (np.ndarray | :obj:`BasePoints`): Sweep points. + radius (float): Radius below which points are removed. + Defaults to 1.0. + + Returns: + np.ndarray | :obj:`BasePoints`: Points after removing. + """ + if isinstance(points, np.ndarray): + points_numpy = points + elif isinstance(points, BasePoints): + points_numpy = points.tensor.numpy() + else: + raise NotImplementedError + x_filt = np.abs(points_numpy[:, 0]) < radius + y_filt = np.abs(points_numpy[:, 1]) < radius + not_close = np.logical_not(np.logical_and(x_filt, y_filt)) + return points[not_close] + + def transform(self, results: dict) -> dict: + """Call function to load multi-sweep point clouds from files. + + Args: + results (dict): Result dict containing multi-sweep point cloud + filenames. + + Returns: + dict: The result dict containing the multi-sweep points data. + Updated key and value are described below. + + - points (np.ndarray | :obj:`BasePoints`): Multi-sweep point + cloud arrays. + """ + points = results['points'] + points.tensor[:, 4] = 0 + sweep_points_list = [points] + ts = results['timestamp'] + if 'lidar_sweeps' not in results: + if self.pad_empty_sweeps: + for i in range(self.sweeps_num): + if self.remove_close: + sweep_points_list.append(self._remove_close(points)) + else: + sweep_points_list.append(points) + else: + if len(results['lidar_sweeps']) <= self.sweeps_num: + choices = np.arange(len(results['lidar_sweeps'])) + elif self.test_mode: + choices = np.arange(self.sweeps_num) + else: + choices = np.random.choice( + len(results['lidar_sweeps']), + self.sweeps_num, + replace=False) + for idx in choices: + sweep = results['lidar_sweeps'][idx] + points_sweep = self._load_points( + sweep['lidar_points']['lidar_path']) + points_sweep = np.copy(points_sweep).reshape(-1, self.load_dim) + if self.remove_close: + points_sweep = self._remove_close(points_sweep) + # bc-breaking: Timestamp has divided 1e6 in pkl infos. + sweep_ts = sweep['timestamp'] + lidar2sensor = np.array(sweep['lidar_points']['lidar2sensor']) + points_sweep[:, : + 3] = points_sweep[:, :3] @ lidar2sensor[:3, :3] + points_sweep[:, :3] -= lidar2sensor[:3, 3] + points_sweep[:, 4] = ts - sweep_ts + points_sweep = points.new_point(points_sweep) + sweep_points_list.append(points_sweep) + + points = points.cat(sweep_points_list) + points = points[:, self.use_dim] + results['points'] = points + return results + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + return f'{self.__class__.__name__}(sweeps_num={self.sweeps_num})' + + +@TRANSFORMS.register_module() +class PointSegClassMapping(BaseTransform): + """Map original semantic class to valid category ids. + + Required Keys: + + - seg_label_mapping (np.ndarray) + - pts_semantic_mask (np.ndarray) + + Added Keys: + + - points (np.float32) + + Map valid classes as 0~len(valid_cat_ids)-1 and + others as len(valid_cat_ids). + """ + + def transform(self, results: dict) -> dict: + """Call function to map original semantic class to valid category ids. + + Args: + results (dict): Result dict containing point semantic masks. + + Returns: + dict: The result dict containing the mapped category ids. + Updated key and value are described below. + + - pts_semantic_mask (np.ndarray): Mapped semantic masks. + """ + assert 'pts_semantic_mask' in results + pts_semantic_mask = results['pts_semantic_mask'] + + assert 'seg_label_mapping' in results + label_mapping = results['seg_label_mapping'] + converted_pts_sem_mask = label_mapping[pts_semantic_mask] + + results['pts_semantic_mask'] = converted_pts_sem_mask + + # 'eval_ann_info' will be passed to evaluator + if 'eval_ann_info' in results: + assert 'pts_semantic_mask' in results['eval_ann_info'] + results['eval_ann_info']['pts_semantic_mask'] = \ + converted_pts_sem_mask + + return results + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + return repr_str + + +@TRANSFORMS.register_module() +class NormalizePointsColor(BaseTransform): + """Normalize color of points. + + Args: + color_mean (list[float]): Mean color of the point cloud. + """ + + def __init__(self, color_mean: List[float]) -> None: + self.color_mean = color_mean + + def transform(self, input_dict: dict) -> dict: + """Call function to normalize color of points. + + Args: + results (dict): Result dict containing point clouds data. + + Returns: + dict: The result dict containing the normalized points. + Updated key and value are described below. + + - points (:obj:`BasePoints`): Points after color normalization. + """ + points = input_dict['points'] + assert points.attribute_dims is not None and \ + 'color' in points.attribute_dims.keys(), \ + 'Expect points have color attribute' + if self.color_mean is not None: + points.color = points.color - \ + points.color.new_tensor(self.color_mean) + points.color = points.color / 255.0 + input_dict['points'] = points + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(color_mean={self.color_mean})' + return repr_str + + +@TRANSFORMS.register_module() +class LoadPointsFromFile(BaseTransform): + """Load Points From File. + + Required Keys: + + - lidar_points (dict) + + - lidar_path (str) + + Added Keys: + + - points (np.float32) + + Args: + coord_type (str): The type of coordinates of points cloud. + Available options includes: + + - 'LIDAR': Points in LiDAR coordinates. + - 'DEPTH': Points in depth coordinates, usually for indoor dataset. + - 'CAMERA': Points in camera coordinates. + load_dim (int): The dimension of the loaded points. Defaults to 6. + use_dim (list[int] | int): Which dimensions of the points to use. + Defaults to [0, 1, 2]. For KITTI dataset, set use_dim=4 + or use_dim=[0, 1, 2, 3] to use the intensity dimension. + shift_height (bool): Whether to use shifted height. Defaults to False. + use_color (bool): Whether to use color features. Defaults to False. + norm_intensity (bool): Whether to normlize the intensity. Defaults to + False. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + """ + + def __init__(self, + coord_type: str, + load_dim: int = 6, + use_dim: Union[int, List[int]] = [0, 1, 2], + shift_height: bool = False, + use_color: bool = False, + norm_intensity: bool = False, + backend_args: Optional[dict] = None) -> None: + self.shift_height = shift_height + self.use_color = use_color + if isinstance(use_dim, int): + use_dim = list(range(use_dim)) + assert max(use_dim) < load_dim, \ + f'Expect all used dimensions < {load_dim}, got {use_dim}' + assert coord_type in ['CAMERA', 'LIDAR', 'DEPTH'] + + self.coord_type = coord_type + self.load_dim = load_dim + self.use_dim = use_dim + self.norm_intensity = norm_intensity + self.backend_args = backend_args + + def _load_points(self, pts_filename: str) -> np.ndarray: + """Private function to load point clouds data. + + Args: + pts_filename (str): Filename of point clouds data. + + Returns: + np.ndarray: An array containing point clouds data. + """ + try: + pts_bytes = get(pts_filename, backend_args=self.backend_args) + points = np.frombuffer(pts_bytes, dtype=np.float32) + except ConnectionError: + mmengine.check_file_exist(pts_filename) + if pts_filename.endswith('.npy'): + points = np.load(pts_filename) + else: + points = np.fromfile(pts_filename, dtype=np.float32) + + return points + + def transform(self, results: dict) -> dict: + """Method to load points data from file. + + Args: + results (dict): Result dict containing point clouds data. + + Returns: + dict: The result dict containing the point clouds data. + Added key and value are described below. + + - points (:obj:`BasePoints`): Point clouds data. + """ + pts_file_path = results['lidar_points']['lidar_path'] + points = self._load_points(pts_file_path) + points = points.reshape(-1, self.load_dim) + points = points[:, self.use_dim] + if self.norm_intensity: + assert len(self.use_dim) >= 4, \ + f'When using intensity norm, expect used dimensions >= 4, got {len(self.use_dim)}' # noqa: E501 + points[:, 3] = np.tanh(points[:, 3]) + attribute_dims = None + + if self.shift_height: + floor_height = np.percentile(points[:, 2], 0.99) + height = points[:, 2] - floor_height + points = np.concatenate( + [points[:, :3], + np.expand_dims(height, 1), points[:, 3:]], 1) + attribute_dims = dict(height=3) + + if self.use_color: + assert len(self.use_dim) >= 6 + if attribute_dims is None: + attribute_dims = dict() + attribute_dims.update( + dict(color=[ + points.shape[1] - 3, + points.shape[1] - 2, + points.shape[1] - 1, + ])) + + points_class = get_points_type(self.coord_type) + points = points_class( + points, points_dim=points.shape[-1], attribute_dims=attribute_dims) + results['points'] = points + + return results + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + '(' + repr_str += f'shift_height={self.shift_height}, ' + repr_str += f'use_color={self.use_color}, ' + repr_str += f'backend_args={self.backend_args}, ' + repr_str += f'load_dim={self.load_dim}, ' + repr_str += f'use_dim={self.use_dim})' + return repr_str + + +@TRANSFORMS.register_module() +class LoadPointsFromDict(LoadPointsFromFile): + """Load Points From Dict.""" + + def transform(self, results: dict) -> dict: + """Convert the type of points from ndarray to corresponding + `point_class`. + + Args: + results (dict): input result. The value of key `points` is a + numpy array. + + Returns: + dict: The processed results. + """ + assert 'points' in results + points = results['points'] + + if self.norm_intensity: + assert len(self.use_dim) >= 4, \ + f'When using intensity norm, expect used dimensions >= 4, got {len(self.use_dim)}' # noqa: E501 + points[:, 3] = np.tanh(points[:, 3]) + attribute_dims = None + + if self.shift_height: + floor_height = np.percentile(points[:, 2], 0.99) + height = points[:, 2] - floor_height + points = np.concatenate( + [points[:, :3], + np.expand_dims(height, 1), points[:, 3:]], 1) + attribute_dims = dict(height=3) + + if self.use_color: + assert len(self.use_dim) >= 6 + if attribute_dims is None: + attribute_dims = dict() + attribute_dims.update( + dict(color=[ + points.shape[1] - 3, + points.shape[1] - 2, + points.shape[1] - 1, + ])) + + points_class = get_points_type(self.coord_type) + points = points_class( + points, points_dim=points.shape[-1], attribute_dims=attribute_dims) + results['points'] = points + return results + + +@TRANSFORMS.register_module() +class LoadAnnotations3D(LoadAnnotations): + """Load Annotations3D. + + Load instance mask and semantic mask of points and + encapsulate the items into related fields. + + Required Keys: + + - ann_info (dict) + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes` | + :obj:`DepthInstance3DBoxes` | :obj:`CameraInstance3DBoxes`): + 3D ground truth bboxes. Only when `with_bbox_3d` is True + - gt_labels_3d (np.int64): Labels of ground truths. + Only when `with_label_3d` is True. + - gt_bboxes (np.float32): 2D ground truth bboxes. + Only when `with_bbox` is True. + - gt_labels (np.ndarray): Labels of ground truths. + Only when `with_label` is True. + - depths (np.ndarray): Only when + `with_bbox_depth` is True. + - centers_2d (np.ndarray): Only when + `with_bbox_depth` is True. + - attr_labels (np.ndarray): Attribute labels of instances. + Only when `with_attr_label` is True. + + - pts_instance_mask_path (str): Path of instance mask file. + Only when `with_mask_3d` is True. + - pts_semantic_mask_path (str): Path of semantic mask file. + Only when `with_seg_3d` is True. + - pts_panoptic_mask_path (str): Path of panoptic mask file. + Only when both `with_panoptic_3d` is True. + + Added Keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes` | + :obj:`DepthInstance3DBoxes` | :obj:`CameraInstance3DBoxes`): + 3D ground truth bboxes. Only when `with_bbox_3d` is True + - gt_labels_3d (np.int64): Labels of ground truths. + Only when `with_label_3d` is True. + - gt_bboxes (np.float32): 2D ground truth bboxes. + Only when `with_bbox` is True. + - gt_labels (np.int64): Labels of ground truths. + Only when `with_label` is True. + - depths (np.float32): Only when + `with_bbox_depth` is True. + - centers_2d (np.ndarray): Only when + `with_bbox_depth` is True. + - attr_labels (np.int64): Attribute labels of instances. + Only when `with_attr_label` is True. + - pts_instance_mask (np.int64): Instance mask of each point. + Only when `with_mask_3d` is True. + - pts_semantic_mask (np.int64): Semantic mask of each point. + Only when `with_seg_3d` is True. + + Args: + with_bbox_3d (bool): Whether to load 3D boxes. Defaults to True. + with_label_3d (bool): Whether to load 3D labels. Defaults to True. + with_attr_label (bool): Whether to load attribute label. + Defaults to False. + with_mask_3d (bool): Whether to load 3D instance masks for points. + Defaults to False. + with_seg_3d (bool): Whether to load 3D semantic masks for points. + Defaults to False. + with_bbox (bool): Whether to load 2D boxes. Defaults to False. + with_label (bool): Whether to load 2D labels. Defaults to False. + with_mask (bool): Whether to load 2D instance masks. Defaults to False. + with_seg (bool): Whether to load 2D semantic masks. Defaults to False. + with_bbox_depth (bool): Whether to load 2.5D boxes. Defaults to False. + with_panoptic_3d (bool): Whether to load 3D panoptic masks for points. + Defaults to False. + poly2mask (bool): Whether to convert polygon annotations to bitmasks. + Defaults to True. + seg_3d_dtype (str): String of dtype of 3D semantic masks. + Defaults to 'np.int64'. + seg_offset (int): The offset to split semantic and instance labels from + panoptic labels. Defaults to None. + dataset_type (str): Type of dataset used for splitting semantic and + instance labels. Defaults to None. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + """ + + def __init__(self, + with_bbox_3d: bool = True, + with_label_3d: bool = True, + with_attr_label: bool = False, + with_mask_3d: bool = False, + with_seg_3d: bool = False, + with_bbox: bool = False, + with_label: bool = False, + with_mask: bool = False, + with_seg: bool = False, + with_bbox_depth: bool = False, + with_panoptic_3d: bool = False, + poly2mask: bool = True, + seg_3d_dtype: str = 'np.int64', + seg_offset: int = None, + dataset_type: str = None, + backend_args: Optional[dict] = None) -> None: + super().__init__( + with_bbox=with_bbox, + with_label=with_label, + with_mask=with_mask, + with_seg=with_seg, + poly2mask=poly2mask, + backend_args=backend_args) + self.with_bbox_3d = with_bbox_3d + self.with_bbox_depth = with_bbox_depth + self.with_label_3d = with_label_3d + self.with_attr_label = with_attr_label + self.with_mask_3d = with_mask_3d + self.with_seg_3d = with_seg_3d + self.with_panoptic_3d = with_panoptic_3d + self.seg_3d_dtype = eval(seg_3d_dtype) + self.seg_offset = seg_offset + self.dataset_type = dataset_type + + def _load_bboxes_3d(self, results: dict) -> dict: + """Private function to move the 3D bounding box annotation from + `ann_info` field to the root of `results`. + + Args: + results (dict): Result dict from :obj:`mmdet3d.CustomDataset`. + + Returns: + dict: The dict containing loaded 3D bounding box annotations. + """ + + results['gt_bboxes_3d'] = results['ann_info']['gt_bboxes_3d'] + return results + + def _load_bboxes_depth(self, results: dict) -> dict: + """Private function to load 2.5D bounding box annotations. + + Args: + results (dict): Result dict from :obj:`mmdet3d.CustomDataset`. + + Returns: + dict: The dict containing loaded 2.5D bounding box annotations. + """ + + results['depths'] = results['ann_info']['depths'] + results['centers_2d'] = results['ann_info']['centers_2d'] + return results + + def _load_labels_3d(self, results: dict) -> dict: + """Private function to load label annotations. + + Args: + results (dict): Result dict from :obj:`mmdet3d.CustomDataset`. + + Returns: + dict: The dict containing loaded label annotations. + """ + + results['gt_labels_3d'] = results['ann_info']['gt_labels_3d'] + return results + + def _load_attr_labels(self, results: dict) -> dict: + """Private function to load label annotations. + + Args: + results (dict): Result dict from :obj:`mmdet3d.CustomDataset`. + + Returns: + dict: The dict containing loaded label annotations. + """ + results['attr_labels'] = results['ann_info']['attr_labels'] + return results + + def _load_masks_3d(self, results: dict) -> dict: + """Private function to load 3D mask annotations. + + Args: + results (dict): Result dict from :obj:`mmdet3d.CustomDataset`. + + Returns: + dict: The dict containing loaded 3D mask annotations. + """ + pts_instance_mask_path = results['pts_instance_mask_path'] + + try: + mask_bytes = get( + pts_instance_mask_path, backend_args=self.backend_args) + pts_instance_mask = np.frombuffer(mask_bytes, dtype=np.int64) + except ConnectionError: + mmengine.check_file_exist(pts_instance_mask_path) + pts_instance_mask = np.fromfile( + pts_instance_mask_path, dtype=np.int64) + + results['pts_instance_mask'] = pts_instance_mask + # 'eval_ann_info' will be passed to evaluator + if 'eval_ann_info' in results: + results['eval_ann_info']['pts_instance_mask'] = pts_instance_mask + return results + + def _load_semantic_seg_3d(self, results: dict) -> dict: + """Private function to load 3D semantic segmentation annotations. + + Args: + results (dict): Result dict from :obj:`mmdet3d.CustomDataset`. + + Returns: + dict: The dict containing the semantic segmentation annotations. + """ + pts_semantic_mask_path = results['pts_semantic_mask_path'] + + try: + mask_bytes = get( + pts_semantic_mask_path, backend_args=self.backend_args) + # add .copy() to fix read-only bug + pts_semantic_mask = np.frombuffer( + mask_bytes, dtype=self.seg_3d_dtype).copy() + except ConnectionError: + mmengine.check_file_exist(pts_semantic_mask_path) + pts_semantic_mask = np.fromfile( + pts_semantic_mask_path, dtype=np.int64) + + if self.dataset_type == 'semantickitti': + pts_semantic_mask = pts_semantic_mask.astype(np.int64) + pts_semantic_mask = pts_semantic_mask % self.seg_offset + # nuScenes loads semantic and panoptic labels from different files. + + results['pts_semantic_mask'] = pts_semantic_mask + + # 'eval_ann_info' will be passed to evaluator + if 'eval_ann_info' in results: + results['eval_ann_info']['pts_semantic_mask'] = pts_semantic_mask + return results + + def _load_panoptic_3d(self, results: dict) -> dict: + """Private function to load 3D panoptic segmentation annotations. + + Args: + results (dict): Result dict from :obj:`mmdet3d.CustomDataset`. + + Returns: + dict: The dict containing the panoptic segmentation annotations. + """ + pts_panoptic_mask_path = results['pts_panoptic_mask_path'] + + try: + mask_bytes = get( + pts_panoptic_mask_path, backend_args=self.backend_args) + # add .copy() to fix read-only bug + pts_panoptic_mask = np.frombuffer( + mask_bytes, dtype=self.seg_3d_dtype).copy() + except ConnectionError: + mmengine.check_file_exist(pts_panoptic_mask_path) + pts_panoptic_mask = np.fromfile( + pts_panoptic_mask_path, dtype=np.int64) + + if self.dataset_type == 'semantickitti': + pts_semantic_mask = pts_panoptic_mask.astype(np.int64) + pts_semantic_mask = pts_semantic_mask % self.seg_offset + elif self.dataset_type == 'nuscenes': + pts_semantic_mask = pts_semantic_mask // self.seg_offset + + results['pts_semantic_mask'] = pts_semantic_mask + + # We can directly take panoptic labels as instance ids. + pts_instance_mask = pts_panoptic_mask.astype(np.int64) + results['pts_instance_mask'] = pts_instance_mask + + # 'eval_ann_info' will be passed to evaluator + if 'eval_ann_info' in results: + results['eval_ann_info']['pts_semantic_mask'] = pts_semantic_mask + results['eval_ann_info']['pts_instance_mask'] = pts_instance_mask + return results + + def _load_bboxes(self, results: dict) -> None: + """Private function to load bounding box annotations. + + The only difference is it remove the proceess for + `ignore_flag` + + Args: + results (dict): Result dict from :obj:`mmcv.BaseDataset`. + + Returns: + dict: The dict contains loaded bounding box annotations. + """ + + results['gt_bboxes'] = results['ann_info']['gt_bboxes'] + + def _load_labels(self, results: dict) -> None: + """Private function to load label annotations. + + Args: + results (dict): Result dict from :obj :obj:`mmcv.BaseDataset`. + + Returns: + dict: The dict contains loaded label annotations. + """ + results['gt_bboxes_labels'] = results['ann_info']['gt_bboxes_labels'] + + def transform(self, results: dict) -> dict: + """Function to load multiple types annotations. + + Args: + results (dict): Result dict from :obj:`mmdet3d.CustomDataset`. + + Returns: + dict: The dict containing loaded 3D bounding box, label, mask and + semantic segmentation annotations. + """ + results = super().transform(results) + if self.with_bbox_3d: + results = self._load_bboxes_3d(results) + if self.with_bbox_depth: + results = self._load_bboxes_depth(results) + if self.with_label_3d: + results = self._load_labels_3d(results) + if self.with_attr_label: + results = self._load_attr_labels(results) + if self.with_panoptic_3d: + results = self._load_panoptic_3d(results) + if self.with_mask_3d: + results = self._load_masks_3d(results) + if self.with_seg_3d: + results = self._load_semantic_seg_3d(results) + return results + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + indent_str = ' ' + repr_str = self.__class__.__name__ + '(\n' + repr_str += f'{indent_str}with_bbox_3d={self.with_bbox_3d}, ' + repr_str += f'{indent_str}with_label_3d={self.with_label_3d}, ' + repr_str += f'{indent_str}with_attr_label={self.with_attr_label}, ' + repr_str += f'{indent_str}with_mask_3d={self.with_mask_3d}, ' + repr_str += f'{indent_str}with_seg_3d={self.with_seg_3d}, ' + repr_str += f'{indent_str}with_panoptic_3d={self.with_panoptic_3d}, ' + repr_str += f'{indent_str}with_bbox={self.with_bbox}, ' + repr_str += f'{indent_str}with_label={self.with_label}, ' + repr_str += f'{indent_str}with_mask={self.with_mask}, ' + repr_str += f'{indent_str}with_seg={self.with_seg}, ' + repr_str += f'{indent_str}with_bbox_depth={self.with_bbox_depth}, ' + repr_str += f'{indent_str}poly2mask={self.poly2mask})' + repr_str += f'{indent_str}seg_offset={self.seg_offset})' + + return repr_str + + +@TRANSFORMS.register_module() +class LidarDet3DInferencerLoader(BaseTransform): + """Load point cloud in the Inferencer's pipeline. + + Added keys: + - points + - timestamp + - axis_align_matrix + - box_type_3d + - box_mode_3d + """ + + def __init__(self, coord_type='LIDAR', **kwargs) -> None: + super().__init__() + self.from_file = TRANSFORMS.build( + dict(type='LoadPointsFromFile', coord_type=coord_type, **kwargs)) + self.from_ndarray = TRANSFORMS.build( + dict(type='LoadPointsFromDict', coord_type=coord_type, **kwargs)) + self.box_type_3d, self.box_mode_3d = get_box_type(coord_type) + + def transform(self, single_input: dict) -> dict: + """Transform function to add image meta information. + Args: + single_input (dict): Single input. + + Returns: + dict: The dict contains loaded image and meta information. + """ + assert 'points' in single_input, "key 'points' must be in input dict" + if isinstance(single_input['points'], str): + inputs = dict( + lidar_points=dict(lidar_path=single_input['points']), + timestamp=1, + # for ScanNet demo we need axis_align_matrix + axis_align_matrix=np.eye(4), + box_type_3d=self.box_type_3d, + box_mode_3d=self.box_mode_3d) + elif isinstance(single_input['points'], np.ndarray): + inputs = dict( + points=single_input['points'], + timestamp=1, + # for ScanNet demo we need axis_align_matrix + axis_align_matrix=np.eye(4), + box_type_3d=self.box_type_3d, + box_mode_3d=self.box_mode_3d) + else: + raise ValueError('Unsupported input points type: ' + f"{type(single_input['points'])}") + + if 'points' in inputs: + return self.from_ndarray(inputs) + return self.from_file(inputs) + + +@TRANSFORMS.register_module() +class MonoDet3DInferencerLoader(BaseTransform): + """Load an image from ``results['images']['CAMX']['img']``. Similar with + :obj:`LoadImageFromFileMono3D`, but the image has been loaded as + :obj:`np.ndarray` in ``results['images']['CAMX']['img']``. + + Added keys: + - img + - cam2img + - box_type_3d + - box_mode_3d + + """ + + def __init__(self, **kwargs) -> None: + super().__init__() + self.from_file = TRANSFORMS.build( + dict(type='LoadImageFromFileMono3D', **kwargs)) + self.from_ndarray = TRANSFORMS.build( + dict(type='LoadImageFromNDArray', **kwargs)) + + def transform(self, single_input: dict) -> dict: + """Transform function to add image meta information. + + Args: + single_input (dict): Result dict with Webcam read image in + ``results['images']['CAMX']['img']``. + Returns: + dict: The dict contains loaded image and meta information. + """ + box_type_3d, box_mode_3d = get_box_type('camera') + assert 'calib' in single_input and 'img' in single_input, \ + "key 'calib' and 'img' must be in input dict" + if isinstance(single_input['calib'], str): + calib_path = single_input['calib'] + with open(calib_path, 'r') as f: + lines = f.readlines() + cam2img = np.array([ + float(info) for info in lines[0].split(' ')[0:16] + ]).reshape([4, 4]) + elif isinstance(single_input['calib'], np.ndarray): + cam2img = single_input['calib'] + else: + raise ValueError('Unsupported input calib type: ' + f"{type(single_input['calib'])}") + + if isinstance(single_input['img'], str): + inputs = dict( + images=dict( + CAM_FRONT=dict( + img_path=single_input['img'], cam2img=cam2img)), + box_mode_3d=box_mode_3d, + box_type_3d=box_type_3d) + elif isinstance(single_input['img'], np.ndarray): + inputs = dict( + img=single_input['img'], + cam2img=cam2img, + box_type_3d=box_type_3d, + box_mode_3d=box_mode_3d) + else: + raise ValueError('Unsupported input image type: ' + f"{type(single_input['img'])}") + + if 'img' in inputs: + return self.from_ndarray(inputs) + return self.from_file(inputs) + + +@TRANSFORMS.register_module() +class MultiModalityDet3DInferencerLoader(BaseTransform): + """Load point cloud and image in the Inferencer's pipeline. + + Added keys: + - points + - img + - cam2img + - lidar2cam + - lidar2img + - timestamp + - axis_align_matrix + - box_type_3d + - box_mode_3d + """ + + def __init__(self, load_point_args: dict, load_img_args: dict) -> None: + super().__init__() + self.points_from_file = TRANSFORMS.build( + dict(type='LoadPointsFromFile', **load_point_args)) + self.points_from_ndarray = TRANSFORMS.build( + dict(type='LoadPointsFromDict', **load_point_args)) + coord_type = load_point_args['coord_type'] + self.box_type_3d, self.box_mode_3d = get_box_type(coord_type) + + self.imgs_from_file = TRANSFORMS.build( + dict(type='LoadImageFromFile', **load_img_args)) + self.imgs_from_ndarray = TRANSFORMS.build( + dict(type='LoadImageFromNDArray', **load_img_args)) + + def transform(self, single_input: dict) -> dict: + """Transform function to add image meta information. + Args: + single_input (dict): Single input. + + Returns: + dict: The dict contains loaded image, point cloud and meta + information. + """ + assert 'points' in single_input and 'img' in single_input and \ + 'calib' in single_input, "key 'points', 'img' and 'calib' must be " + f'in input dict, but got {single_input}' + if isinstance(single_input['points'], str): + inputs = dict( + lidar_points=dict(lidar_path=single_input['points']), + timestamp=1, + # for ScanNet demo we need axis_align_matrix + axis_align_matrix=np.eye(4), + box_type_3d=self.box_type_3d, + box_mode_3d=self.box_mode_3d) + elif isinstance(single_input['points'], np.ndarray): + inputs = dict( + points=single_input['points'], + timestamp=1, + # for ScanNet demo we need axis_align_matrix + axis_align_matrix=np.eye(4), + box_type_3d=self.box_type_3d, + box_mode_3d=self.box_mode_3d) + else: + raise ValueError('Unsupported input points type: ' + f"{type(single_input['points'])}") + + if 'points' in inputs: + points_inputs = self.points_from_ndarray(inputs) + else: + points_inputs = self.points_from_file(inputs) + + multi_modality_inputs = points_inputs + + box_type_3d, box_mode_3d = get_box_type('lidar') + if isinstance(single_input['calib'], str): + calib = mmengine.load(single_input['calib']) + + elif isinstance(single_input['calib'], dict): + calib = single_input['calib'] + else: + raise ValueError('Unsupported input calib type: ' + f"{type(single_input['calib'])}") + + cam2img = np.asarray(calib['cam2img'], dtype=np.float32) + lidar2cam = np.asarray(calib['lidar2cam'], dtype=np.float32) + if 'lidar2cam' in calib: + lidar2img = np.asarray(calib['lidar2img'], dtype=np.float32) + else: + lidar2img = cam2img @ lidar2cam + + if isinstance(single_input['img'], str): + inputs = dict( + img_path=single_input['img'], + cam2img=cam2img, + lidar2img=lidar2img, + lidar2cam=lidar2cam, + box_mode_3d=box_mode_3d, + box_type_3d=box_type_3d) + elif isinstance(single_input['img'], np.ndarray): + inputs = dict( + img=single_input['img'], + cam2img=cam2img, + lidar2img=lidar2img, + lidar2cam=lidar2cam, + box_type_3d=box_type_3d, + box_mode_3d=box_mode_3d) + else: + raise ValueError('Unsupported input image type: ' + f"{type(single_input['img'])}") + + if isinstance(single_input['img'], np.ndarray): + imgs_inputs = self.imgs_from_ndarray(inputs) + else: + imgs_inputs = self.imgs_from_file(inputs) + + multi_modality_inputs.update(imgs_inputs) + + return multi_modality_inputs diff --git a/mmdet3d/datasets/transforms/test_time_aug.py b/mmdet3d/datasets/transforms/test_time_aug.py new file mode 100755 index 0000000..1aea7a8 --- /dev/null +++ b/mmdet3d/datasets/transforms/test_time_aug.py @@ -0,0 +1,121 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from copy import deepcopy +from typing import Dict, List, Optional, Tuple, Union + +import mmengine +from mmcv import BaseTransform +from mmengine.dataset import Compose + +from mmdet3d.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class MultiScaleFlipAug3D(BaseTransform): + """Test-time augmentation with multiple scales and flipping. + + Args: + transforms (list[dict]): Transforms to apply in each augmentation. + img_scale (tuple | list[tuple]): Images scales for resizing. + pts_scale_ratio (float | list[float]): Points scale ratios for + resizing. + flip (bool): Whether apply flip augmentation. Defaults to False. + flip_direction (str | list[str]): Flip augmentation directions + for images, options are "horizontal" and "vertical". + If flip_direction is list, multiple flip augmentations will + be applied. It has no effect when ``flip == False``. + Defaults to 'horizontal'. + pcd_horizontal_flip (bool): Whether to apply horizontal flip + augmentation to point cloud. Defaults to False. + Note that it works only when 'flip' is turned on. + pcd_vertical_flip (bool): Whether to apply vertical flip + augmentation to point cloud. Defaults to False. + Note that it works only when 'flip' is turned on. + """ + + def __init__(self, + transforms: List[dict], + img_scale: Optional[Union[Tuple[int], List[Tuple[int]]]], + pts_scale_ratio: Union[float, List[float]], + flip: bool = False, + flip_direction: str = 'horizontal', + pcd_horizontal_flip: bool = False, + pcd_vertical_flip: bool = False) -> None: + self.transforms = Compose(transforms) + self.img_scale = img_scale if isinstance(img_scale, + list) else [img_scale] + self.pts_scale_ratio = pts_scale_ratio \ + if isinstance(pts_scale_ratio, list) else [float(pts_scale_ratio)] + + assert mmengine.is_list_of(self.img_scale, tuple) + assert mmengine.is_list_of(self.pts_scale_ratio, float) + + self.flip = flip + self.pcd_horizontal_flip = pcd_horizontal_flip + self.pcd_vertical_flip = pcd_vertical_flip + + self.flip_direction = flip_direction if isinstance( + flip_direction, list) else [flip_direction] + assert mmengine.is_list_of(self.flip_direction, str) + if not self.flip and self.flip_direction != ['horizontal']: + warnings.warn( + 'flip_direction has no effect when flip is set to False') + if (self.flip and not any([(t['type'] == 'RandomFlip3D' + or t['type'] == 'RandomFlip') + for t in transforms])): + warnings.warn( + 'flip has no effect when RandomFlip is not in transforms') + + def transform(self, results: Dict) -> List[Dict]: + """Call function to augment common fields in results. + + Args: + results (dict): Result dict contains the data to augment. + + Returns: + List[dict]: The list contains the data that is augmented with + different scales and flips. + """ + aug_data_list = [] + + # modified from `flip_aug = [False, True] if self.flip else [False]` + # to reduce unnecessary scenes when using double flip augmentation + # during test time + flip_aug = [True] if self.flip else [False] + pcd_horizontal_flip_aug = [False, True] \ + if self.flip and self.pcd_horizontal_flip else [False] + pcd_vertical_flip_aug = [False, True] \ + if self.flip and self.pcd_vertical_flip else [False] + for scale in self.img_scale: + # TODO refactor according to augtest docs + self.transforms.transforms[0].scale = scale + for pts_scale_ratio in self.pts_scale_ratio: + for flip in flip_aug: + for pcd_horizontal_flip in pcd_horizontal_flip_aug: + for pcd_vertical_flip in pcd_vertical_flip_aug: + for direction in self.flip_direction: + # results.copy will cause bug + # since it is shallow copy + _results = deepcopy(results) + _results['scale'] = scale + _results['flip'] = flip + _results['pcd_scale_factor'] = \ + pts_scale_ratio + _results['flip_direction'] = direction + _results['pcd_horizontal_flip'] = \ + pcd_horizontal_flip + _results['pcd_vertical_flip'] = \ + pcd_vertical_flip + data = self.transforms(_results) + aug_data_list.append(data) + + return aug_data_list + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(transforms={self.transforms}, ' + repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' + repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, ' + repr_str += f'flip_direction={self.flip_direction})' + return repr_str diff --git a/mmdet3d/datasets/transforms/transforms_3d.py b/mmdet3d/datasets/transforms/transforms_3d.py new file mode 100755 index 0000000..6b10e33 --- /dev/null +++ b/mmdet3d/datasets/transforms/transforms_3d.py @@ -0,0 +1,2668 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +import warnings +from typing import List, Optional, Sequence, Tuple, Union + +import cv2 +import mmcv +import numpy as np +import torch +from mmcv.transforms import BaseTransform, Compose, RandomResize, Resize +from mmdet.datasets.transforms import (PhotoMetricDistortion, RandomCrop, + RandomFlip) +from mmengine import is_list_of, is_tuple_of + +from mmdet3d.models.task_modules import VoxelGenerator +from mmdet3d.registry import TRANSFORMS +from mmdet3d.structures import (CameraInstance3DBoxes, DepthInstance3DBoxes, + LiDARInstance3DBoxes) +from mmdet3d.structures.ops import box_np_ops +from mmdet3d.structures.points import BasePoints +from .data_augment_utils import noise_per_object_v3_ + + +@TRANSFORMS.register_module() +class RandomDropPointsColor(BaseTransform): + r"""Randomly set the color of points to all zeros. + + Once this transform is executed, all the points' color will be dropped. + Refer to `PAConv `_ for more details. + + Args: + drop_ratio (float): The probability of dropping point colors. + Defaults to 0.2. + """ + + def __init__(self, drop_ratio: float = 0.2) -> None: + assert isinstance(drop_ratio, (int, float)) and 0 <= drop_ratio <= 1, \ + f'invalid drop_ratio value {drop_ratio}' + self.drop_ratio = drop_ratio + + def transform(self, input_dict: dict) -> dict: + """Call function to drop point colors. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after color dropping, 'points' key is updated + in the result dict. + """ + points = input_dict['points'] + assert points.attribute_dims is not None and \ + 'color' in points.attribute_dims, \ + 'Expect points have color attribute' + + # this if-expression is a bit strange + # `RandomDropPointsColor` is used in training 3D segmentor PAConv + # we discovered in our experiments that, using + # `if np.random.rand() > 1.0 - self.drop_ratio` consistently leads to + # better results than using `if np.random.rand() < self.drop_ratio` + # so we keep this hack in our codebase + if np.random.rand() > 1.0 - self.drop_ratio: + points.color = points.color * 0.0 + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(drop_ratio={self.drop_ratio})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomFlip3D(RandomFlip): + """Flip the points & bbox. + + If the input dict contains the key "flip", then the flag will be used, + otherwise it will be randomly decided by a ratio specified in the init + method. + + Required Keys: + + - points (np.float32) + - gt_bboxes_3d (np.float32) + + Modified Keys: + + - points (np.float32) + - gt_bboxes_3d (np.float32) + + Added Keys: + + - points (np.float32) + - pcd_trans (np.float32) + - pcd_rotation (np.float32) + - pcd_rotation_angle (np.float32) + - pcd_scale_factor (np.float32) + + Args: + sync_2d (bool): Whether to apply flip according to the 2D + images. If True, it will apply the same flip as that to 2D images. + If False, it will decide whether to flip randomly and independently + to that of 2D images. Defaults to True. + flip_ratio_bev_horizontal (float): The flipping probability + in horizontal direction. Defaults to 0.0. + flip_ratio_bev_vertical (float): The flipping probability + in vertical direction. Defaults to 0.0. + flip_box3d (bool): Whether to flip bounding box. In most of the case, + the box should be fliped. In cam-based bev detection, this is set + to False, since the flip of 2D images does not influence the 3D + box. Defaults to True. + """ + + def __init__(self, + sync_2d: bool = True, + flip_ratio_bev_horizontal: float = 0.0, + flip_ratio_bev_vertical: float = 0.0, + flip_box3d: bool = True, + **kwargs) -> None: + # `flip_ratio_bev_horizontal` is equal to + # for flip prob of 2d image when + # `sync_2d` is True + super(RandomFlip3D, self).__init__( + prob=flip_ratio_bev_horizontal, direction='horizontal', **kwargs) + self.sync_2d = sync_2d + self.flip_ratio_bev_horizontal = flip_ratio_bev_horizontal + self.flip_ratio_bev_vertical = flip_ratio_bev_vertical + self.flip_box3d = flip_box3d + if flip_ratio_bev_horizontal is not None: + assert isinstance( + flip_ratio_bev_horizontal, + (int, float)) and 0 <= flip_ratio_bev_horizontal <= 1 + if flip_ratio_bev_vertical is not None: + assert isinstance( + flip_ratio_bev_vertical, + (int, float)) and 0 <= flip_ratio_bev_vertical <= 1 + + def random_flip_data_3d(self, + input_dict: dict, + direction: str = 'horizontal') -> None: + """Flip 3D data randomly. + + `random_flip_data_3d` should take these situations into consideration: + + - 1. LIDAR-based 3d detection + - 2. LIDAR-based 3d segmentation + - 3. vision-only detection + - 4. multi-modality 3d detection. + + Args: + input_dict (dict): Result dict from loading pipeline. + direction (str): Flip direction. Defaults to 'horizontal'. + + Returns: + dict: Flipped results, 'points', 'bbox3d_fields' keys are + updated in the result dict. + """ + assert direction in ['horizontal', 'vertical'] + if self.flip_box3d: + if 'gt_bboxes_3d' in input_dict: + if 'points' in input_dict: + input_dict['points'] = input_dict['gt_bboxes_3d'].flip( + direction, points=input_dict['points']) + else: + # vision-only detection + input_dict['gt_bboxes_3d'].flip(direction) + else: + input_dict['points'].flip(direction) + + if 'centers_2d' in input_dict: + assert self.sync_2d is True and direction == 'horizontal', \ + 'Only support sync_2d=True and horizontal flip with images' + w = input_dict['img_shape'][1] + input_dict['centers_2d'][..., 0] = \ + w - input_dict['centers_2d'][..., 0] + # need to modify the horizontal position of camera center + # along u-axis in the image (flip like centers2d) + # ['cam2img'][0][2] = c_u + # see more details and examples at + # https://github.com/open-mmlab/mmdetection3d/pull/744 + input_dict['cam2img'][0][2] = w - input_dict['cam2img'][0][2] + + def _flip_on_direction(self, results: dict) -> None: + """Function to flip images, bounding boxes, semantic segmentation map + and keypoints. + + Add the override feature that if 'flip' is already in results, use it + to do the augmentation. + """ + if 'flip' not in results: + cur_dir = self._choose_direction() + else: + cur_dir = results['flip_direction'] + if cur_dir is None: + results['flip'] = False + results['flip_direction'] = None + else: + results['flip'] = True + results['flip_direction'] = cur_dir + self._flip(results) + + def transform(self, input_dict: dict) -> dict: + """Call function to flip points, values in the ``bbox3d_fields`` and + also flip 2D image and its annotations. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Flipped results, 'flip', 'flip_direction', + 'pcd_horizontal_flip' and 'pcd_vertical_flip' keys are added + into result dict. + """ + # flip 2D image and its annotations + if 'img' in input_dict: + super(RandomFlip3D, self).transform(input_dict) + + if self.sync_2d and 'img' in input_dict: + input_dict['pcd_horizontal_flip'] = input_dict['flip'] + input_dict['pcd_vertical_flip'] = False + else: + if 'pcd_horizontal_flip' not in input_dict: + flip_horizontal = True if np.random.rand( + ) < self.flip_ratio_bev_horizontal else False + input_dict['pcd_horizontal_flip'] = flip_horizontal + if 'pcd_vertical_flip' not in input_dict: + flip_vertical = True if np.random.rand( + ) < self.flip_ratio_bev_vertical else False + input_dict['pcd_vertical_flip'] = flip_vertical + + if 'transformation_3d_flow' not in input_dict: + input_dict['transformation_3d_flow'] = [] + + if input_dict['pcd_horizontal_flip']: + self.random_flip_data_3d(input_dict, 'horizontal') + input_dict['transformation_3d_flow'].extend(['HF']) + if input_dict['pcd_vertical_flip']: + self.random_flip_data_3d(input_dict, 'vertical') + input_dict['transformation_3d_flow'].extend(['VF']) + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(sync_2d={self.sync_2d},' + repr_str += f' flip_ratio_bev_vertical={self.flip_ratio_bev_vertical})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomJitterPoints(BaseTransform): + """Randomly jitter point coordinates. + + Different from the global translation in ``GlobalRotScaleTrans``, here we + apply different noises to each point in a scene. + + Args: + jitter_std (list[float]): The standard deviation of jittering noise. + This applies random noise to all points in a 3D scene, which is + sampled from a gaussian distribution whose standard deviation is + set by ``jitter_std``. Defaults to [0.01, 0.01, 0.01] + clip_range (list[float]): Clip the randomly generated jitter + noise into this range. If None is given, don't perform clipping. + Defaults to [-0.05, 0.05] + + Note: + This transform should only be used in point cloud segmentation tasks + because we don't transform ground-truth bboxes accordingly. + For similar transform in detection task, please refer to `ObjectNoise`. + """ + + def __init__(self, + jitter_std: List[float] = [0.01, 0.01, 0.01], + clip_range: List[float] = [-0.05, 0.05]) -> None: + seq_types = (list, tuple, np.ndarray) + if not isinstance(jitter_std, seq_types): + assert isinstance(jitter_std, (int, float)), \ + f'unsupported jitter_std type {type(jitter_std)}' + jitter_std = [jitter_std, jitter_std, jitter_std] + self.jitter_std = jitter_std + + if clip_range is not None: + if not isinstance(clip_range, seq_types): + assert isinstance(clip_range, (int, float)), \ + f'unsupported clip_range type {type(clip_range)}' + clip_range = [-clip_range, clip_range] + self.clip_range = clip_range + + def transform(self, input_dict: dict) -> dict: + """Call function to jitter all the points in the scene. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after adding noise to each point, + 'points' key is updated in the result dict. + """ + points = input_dict['points'] + jitter_std = np.array(self.jitter_std, dtype=np.float32) + jitter_noise = \ + np.random.randn(points.shape[0], 3) * jitter_std[None, :] + if self.clip_range is not None: + jitter_noise = np.clip(jitter_noise, self.clip_range[0], + self.clip_range[1]) + + points.translate(jitter_noise) + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(jitter_std={self.jitter_std},' + repr_str += f' clip_range={self.clip_range})' + return repr_str + + +@TRANSFORMS.register_module() +class ObjectSample(BaseTransform): + """Sample GT objects to the data. + + Required Keys: + + - points + - ann_info + - gt_bboxes_3d + - gt_labels_3d + - img (optional) + - gt_bboxes (optional) + + Modified Keys: + + - points + - gt_bboxes_3d + - gt_labels_3d + - img (optional) + - gt_bboxes (optional) + + Added Keys: + + - plane (optional) + + Args: + db_sampler (dict): Config dict of the database sampler. + sample_2d (bool): Whether to also paste 2D image patch to the images. + This should be true when applying multi-modality cut-and-paste. + Defaults to False. + use_ground_plane (bool): Whether to use ground plane to adjust the + 3D labels. Defaults to False. + """ + + def __init__(self, + db_sampler: dict, + sample_2d: bool = False, + use_ground_plane: bool = False) -> None: + self.sampler_cfg = db_sampler + self.sample_2d = sample_2d + if 'type' not in db_sampler.keys(): + db_sampler['type'] = 'DataBaseSampler' + self.db_sampler = TRANSFORMS.build(db_sampler) + self.use_ground_plane = use_ground_plane + self.disabled = False + + @staticmethod + def remove_points_in_boxes(points: BasePoints, + boxes: np.ndarray) -> np.ndarray: + """Remove the points in the sampled bounding boxes. + + Args: + points (:obj:`BasePoints`): Input point cloud array. + boxes (np.ndarray): Sampled ground truth boxes. + + Returns: + np.ndarray: Points with those in the boxes removed. + """ + masks = box_np_ops.points_in_rbbox(points.coord.numpy(), boxes) + points = points[np.logical_not(masks.any(-1))] + return points + + def transform(self, input_dict: dict) -> dict: + """Transform function to sample ground truth objects to the data. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after object sampling augmentation, + 'points', 'gt_bboxes_3d', 'gt_labels_3d' keys are updated + in the result dict. + """ + if self.disabled: + return input_dict + + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + gt_labels_3d = input_dict['gt_labels_3d'] + + if self.use_ground_plane: + ground_plane = input_dict.get('plane', None) + assert ground_plane is not None, '`use_ground_plane` is True ' \ + 'but find plane is None' + else: + ground_plane = None + # change to float for blending operation + points = input_dict['points'] + if self.sample_2d: + img = input_dict['img'] + gt_bboxes_2d = input_dict['gt_bboxes'] + # Assume for now 3D & 2D bboxes are the same + sampled_dict = self.db_sampler.sample_all( + gt_bboxes_3d.tensor.numpy(), + gt_labels_3d, + gt_bboxes_2d=gt_bboxes_2d, + img=img) + else: + sampled_dict = self.db_sampler.sample_all( + gt_bboxes_3d.tensor.numpy(), + gt_labels_3d, + img=None, + ground_plane=ground_plane) + + if sampled_dict is not None: + sampled_gt_bboxes_3d = sampled_dict['gt_bboxes_3d'] + sampled_points = sampled_dict['points'] + sampled_gt_labels = sampled_dict['gt_labels_3d'] + + gt_labels_3d = np.concatenate([gt_labels_3d, sampled_gt_labels], + axis=0) + gt_bboxes_3d = gt_bboxes_3d.new_box( + np.concatenate( + [gt_bboxes_3d.tensor.numpy(), sampled_gt_bboxes_3d])) + + points = self.remove_points_in_boxes(points, sampled_gt_bboxes_3d) + # check the points dimension + points = points.cat([sampled_points, points]) + + if self.sample_2d: + sampled_gt_bboxes_2d = sampled_dict['gt_bboxes_2d'] + gt_bboxes_2d = np.concatenate( + [gt_bboxes_2d, sampled_gt_bboxes_2d]).astype(np.float32) + + input_dict['gt_bboxes'] = gt_bboxes_2d + input_dict['img'] = sampled_dict['img'] + + input_dict['gt_bboxes_3d'] = gt_bboxes_3d + input_dict['gt_labels_3d'] = gt_labels_3d.astype(np.int64) + input_dict['points'] = points + + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(db_sampler={self.db_sampler},' + repr_str += f' sample_2d={self.sample_2d},' + repr_str += f' use_ground_plane={self.use_ground_plane})' + return repr_str + + +@TRANSFORMS.register_module() +class ObjectNoise(BaseTransform): + """Apply noise to each GT objects in the scene. + + Required Keys: + + - points + - gt_bboxes_3d + + Modified Keys: + + - points + - gt_bboxes_3d + + Args: + translation_std (list[float]): Standard deviation of the + distribution where translation noise are sampled from. + Defaults to [0.25, 0.25, 0.25]. + global_rot_range (list[float]): Global rotation to the scene. + Defaults to [0.0, 0.0]. + rot_range (list[float]): Object rotation range. + Defaults to [-0.15707963267, 0.15707963267]. + num_try (int): Number of times to try if the noise applied is invalid. + Defaults to 100. + """ + + def __init__(self, + translation_std: List[float] = [0.25, 0.25, 0.25], + global_rot_range: List[float] = [0.0, 0.0], + rot_range: List[float] = [-0.15707963267, 0.15707963267], + num_try: int = 100) -> None: + self.translation_std = translation_std + self.global_rot_range = global_rot_range + self.rot_range = rot_range + self.num_try = num_try + + def transform(self, input_dict: dict) -> dict: + """Transform function to apply noise to each ground truth in the scene. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after adding noise to each object, + 'points', 'gt_bboxes_3d' keys are updated in the result dict. + """ + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + points = input_dict['points'] + + # TODO: this is inplace operation + numpy_box = gt_bboxes_3d.tensor.numpy() + numpy_points = points.tensor.numpy() + + noise_per_object_v3_( + numpy_box, + numpy_points, + rotation_perturb=self.rot_range, + center_noise_std=self.translation_std, + global_random_rot_range=self.global_rot_range, + num_try=self.num_try) + + input_dict['gt_bboxes_3d'] = gt_bboxes_3d.new_box(numpy_box) + input_dict['points'] = points.new_point(numpy_points) + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(num_try={self.num_try},' + repr_str += f' translation_std={self.translation_std},' + repr_str += f' global_rot_range={self.global_rot_range},' + repr_str += f' rot_range={self.rot_range})' + return repr_str + + +@TRANSFORMS.register_module() +class GlobalAlignment(BaseTransform): + """Apply global alignment to 3D scene points by rotation and translation. + + Args: + rotation_axis (int): Rotation axis for points and bboxes rotation. + + Note: + We do not record the applied rotation and translation as in + GlobalRotScaleTrans. Because usually, we do not need to reverse + the alignment step. + For example, ScanNet 3D detection task uses aligned ground-truth + bounding boxes for evaluation. + """ + + def __init__(self, rotation_axis: int) -> None: + self.rotation_axis = rotation_axis + + def _trans_points(self, results: dict, trans_factor: np.ndarray) -> None: + """Private function to translate points. + + Args: + input_dict (dict): Result dict from loading pipeline. + trans_factor (np.ndarray): Translation vector to be applied. + + Returns: + dict: Results after translation, 'points' is updated in the dict. + """ + results['points'].translate(trans_factor) + + def _rot_points(self, results: dict, rot_mat: np.ndarray) -> None: + """Private function to rotate bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + rot_mat (np.ndarray): Rotation matrix to be applied. + + Returns: + dict: Results after rotation, 'points' is updated in the dict. + """ + # input should be rot_mat_T so I transpose it here + results['points'].rotate(rot_mat.T) + + def _check_rot_mat(self, rot_mat: np.ndarray) -> None: + """Check if rotation matrix is valid for self.rotation_axis. + + Args: + rot_mat (np.ndarray): Rotation matrix to be applied. + """ + is_valid = np.allclose(np.linalg.det(rot_mat), 1.0) + valid_array = np.zeros(3) + valid_array[self.rotation_axis] = 1.0 + is_valid &= (rot_mat[self.rotation_axis, :] == valid_array).all() + is_valid &= (rot_mat[:, self.rotation_axis] == valid_array).all() + assert is_valid, f'invalid rotation matrix {rot_mat}' + + def transform(self, results: dict) -> dict: + """Call function to shuffle points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after global alignment, 'points' and keys in + input_dict['bbox3d_fields'] are updated in the result dict. + """ + assert 'axis_align_matrix' in results, \ + 'axis_align_matrix is not provided in GlobalAlignment' + + axis_align_matrix = results['axis_align_matrix'] + assert axis_align_matrix.shape == (4, 4), \ + f'invalid shape {axis_align_matrix.shape} for axis_align_matrix' + rot_mat = axis_align_matrix[:3, :3] + trans_vec = axis_align_matrix[:3, -1] + + self._check_rot_mat(rot_mat) + self._rot_points(results, rot_mat) + self._trans_points(results, trans_vec) + + return results + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(rotation_axis={self.rotation_axis})' + return repr_str + + +@TRANSFORMS.register_module() +class GlobalRotScaleTrans(BaseTransform): + """Apply global rotation, scaling and translation to a 3D scene. + + Required Keys: + + - points (np.float32) + - gt_bboxes_3d (np.float32) + + Modified Keys: + + - points (np.float32) + - gt_bboxes_3d (np.float32) + + Added Keys: + + - points (np.float32) + - pcd_trans (np.float32) + - pcd_rotation (np.float32) + - pcd_rotation_angle (np.float32) + - pcd_scale_factor (np.float32) + + Args: + rot_range (list[float]): Range of rotation angle. + Defaults to [-0.78539816, 0.78539816] (close to [-pi/4, pi/4]). + scale_ratio_range (list[float]): Range of scale ratio. + Defaults to [0.95, 1.05]. + translation_std (list[float]): The standard deviation of + translation noise applied to a scene, which + is sampled from a gaussian distribution whose standard deviation + is set by ``translation_std``. Defaults to [0, 0, 0]. + shift_height (bool): Whether to shift height. + (the fourth dimension of indoor points) when scaling. + Defaults to False. + """ + + def __init__(self, + rot_range: List[float] = [-0.78539816, 0.78539816], + scale_ratio_range: List[float] = [0.95, 1.05], + translation_std: List[int] = [0, 0, 0], + shift_height: bool = False) -> None: + seq_types = (list, tuple, np.ndarray) + if not isinstance(rot_range, seq_types): + assert isinstance(rot_range, (int, float)), \ + f'unsupported rot_range type {type(rot_range)}' + rot_range = [-rot_range, rot_range] + self.rot_range = rot_range + + assert isinstance(scale_ratio_range, seq_types), \ + f'unsupported scale_ratio_range type {type(scale_ratio_range)}' + + self.scale_ratio_range = scale_ratio_range + + if not isinstance(translation_std, seq_types): + assert isinstance(translation_std, (int, float)), \ + f'unsupported translation_std type {type(translation_std)}' + translation_std = [ + translation_std, translation_std, translation_std + ] + assert all([std >= 0 for std in translation_std]), \ + 'translation_std should be positive' + self.translation_std = translation_std + self.shift_height = shift_height + + def _trans_bbox_points(self, input_dict: dict) -> None: + """Private function to translate bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after translation, 'points', 'pcd_trans' + and `gt_bboxes_3d` is updated in the result dict. + """ + translation_std = np.array(self.translation_std, dtype=np.float32) + trans_factor = np.random.normal(scale=translation_std, size=3).T + + input_dict['points'].translate(trans_factor) + input_dict['pcd_trans'] = trans_factor + if 'gt_bboxes_3d' in input_dict: + input_dict['gt_bboxes_3d'].translate(trans_factor) + + def _rot_bbox_points(self, input_dict: dict) -> None: + """Private function to rotate bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after rotation, 'points', 'pcd_rotation' + and `gt_bboxes_3d` is updated in the result dict. + """ + rotation = self.rot_range + noise_rotation = np.random.uniform(rotation[0], rotation[1]) + + if 'gt_bboxes_3d' in input_dict and \ + len(input_dict['gt_bboxes_3d'].tensor) != 0: + # rotate points with bboxes + points, rot_mat_T = input_dict['gt_bboxes_3d'].rotate( + noise_rotation, input_dict['points']) + input_dict['points'] = points + else: + # if no bbox in input_dict, only rotate points + rot_mat_T = input_dict['points'].rotate(noise_rotation) + + input_dict['pcd_rotation'] = rot_mat_T + input_dict['pcd_rotation_angle'] = noise_rotation + + def _scale_bbox_points(self, input_dict: dict) -> None: + """Private function to scale bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after scaling, 'points' and + `gt_bboxes_3d` is updated in the result dict. + """ + scale = input_dict['pcd_scale_factor'] + points = input_dict['points'] + points.scale(scale) + if self.shift_height: + assert 'height' in points.attribute_dims.keys(), \ + 'setting shift_height=True but points have no height attribute' + points.tensor[:, points.attribute_dims['height']] *= scale + input_dict['points'] = points + + if 'gt_bboxes_3d' in input_dict and \ + len(input_dict['gt_bboxes_3d'].tensor) != 0: + input_dict['gt_bboxes_3d'].scale(scale) + + def _random_scale(self, input_dict: dict) -> None: + """Private function to randomly set the scale factor. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after scaling, 'pcd_scale_factor' + are updated in the result dict. + """ + scale_factor = np.random.uniform(self.scale_ratio_range[0], + self.scale_ratio_range[1]) + input_dict['pcd_scale_factor'] = scale_factor + + def transform(self, input_dict: dict) -> dict: + """Private function to rotate, scale and translate bounding boxes and + points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after scaling, 'points', 'pcd_rotation', + 'pcd_scale_factor', 'pcd_trans' and `gt_bboxes_3d` are updated + in the result dict. + """ + if 'transformation_3d_flow' not in input_dict: + input_dict['transformation_3d_flow'] = [] + + self._rot_bbox_points(input_dict) + + if 'pcd_scale_factor' not in input_dict: + self._random_scale(input_dict) + self._scale_bbox_points(input_dict) + + self._trans_bbox_points(input_dict) + + input_dict['transformation_3d_flow'].extend(['R', 'S', 'T']) + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(rot_range={self.rot_range},' + repr_str += f' scale_ratio_range={self.scale_ratio_range},' + repr_str += f' translation_std={self.translation_std},' + repr_str += f' shift_height={self.shift_height})' + return repr_str + + +@TRANSFORMS.register_module() +class PointShuffle(BaseTransform): + """Shuffle input points.""" + + def transform(self, input_dict: dict) -> dict: + """Call function to shuffle points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'points', 'pts_instance_mask' + and 'pts_semantic_mask' keys are updated in the result dict. + """ + idx = input_dict['points'].shuffle() + idx = idx.numpy() + + pts_instance_mask = input_dict.get('pts_instance_mask', None) + pts_semantic_mask = input_dict.get('pts_semantic_mask', None) + + if pts_instance_mask is not None: + input_dict['pts_instance_mask'] = pts_instance_mask[idx] + + if pts_semantic_mask is not None: + input_dict['pts_semantic_mask'] = pts_semantic_mask[idx] + + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + return self.__class__.__name__ + + +@TRANSFORMS.register_module() +class ObjectRangeFilter(BaseTransform): + """Filter objects by the range. + + Required Keys: + + - gt_bboxes_3d + + Modified Keys: + + - gt_bboxes_3d + + Args: + point_cloud_range (list[float]): Point cloud range. + """ + + def __init__(self, point_cloud_range: List[float]) -> None: + self.pcd_range = np.array(point_cloud_range, dtype=np.float32) + + def transform(self, input_dict: dict) -> dict: + """Transform function to filter objects by the range. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' + keys are updated in the result dict. + """ + # Check points instance type and initialise bev_range + if isinstance(input_dict['gt_bboxes_3d'], + (LiDARInstance3DBoxes, DepthInstance3DBoxes)): + bev_range = self.pcd_range[[0, 1, 3, 4]] + elif isinstance(input_dict['gt_bboxes_3d'], CameraInstance3DBoxes): + bev_range = self.pcd_range[[0, 2, 3, 5]] + + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + gt_labels_3d = input_dict['gt_labels_3d'] + mask = gt_bboxes_3d.in_range_bev(bev_range) + gt_bboxes_3d = gt_bboxes_3d[mask] + # mask is a torch tensor but gt_labels_3d is still numpy array + # using mask to index gt_labels_3d will cause bug when + # len(gt_labels_3d) == 1, where mask=1 will be interpreted + # as gt_labels_3d[1] and cause out of index error + gt_labels_3d = gt_labels_3d[mask.numpy().astype(bool)] + + # limit rad to [-pi, pi] + gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi) + input_dict['gt_bboxes_3d'] = gt_bboxes_3d + input_dict['gt_labels_3d'] = gt_labels_3d + + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(point_cloud_range={self.pcd_range.tolist()})' + return repr_str + + +@TRANSFORMS.register_module() +class PointsRangeFilter(BaseTransform): + """Filter points by the range. + + Required Keys: + + - points + - pts_instance_mask (optional) + + Modified Keys: + + - points + - pts_instance_mask (optional) + + Args: + point_cloud_range (list[float]): Point cloud range. + """ + + def __init__(self, point_cloud_range: List[float]) -> None: + self.pcd_range = np.array(point_cloud_range, dtype=np.float32) + + def transform(self, input_dict: dict) -> dict: + """Transform function to filter points by the range. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'points', 'pts_instance_mask' + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = input_dict['points'] + points_mask = points.in_range_3d(self.pcd_range) + clean_points = points[points_mask] + input_dict['points'] = clean_points + points_mask = points_mask.numpy() + + pts_instance_mask = input_dict.get('pts_instance_mask', None) + pts_semantic_mask = input_dict.get('pts_semantic_mask', None) + + if pts_instance_mask is not None: + input_dict['pts_instance_mask'] = pts_instance_mask[points_mask] + + if pts_semantic_mask is not None: + input_dict['pts_semantic_mask'] = pts_semantic_mask[points_mask] + + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(point_cloud_range={self.pcd_range.tolist()})' + return repr_str + + +@TRANSFORMS.register_module() +class ObjectNameFilter(BaseTransform): + """Filter GT objects by their names. + + Required Keys: + + - gt_labels_3d + + Modified Keys: + + - gt_labels_3d + + Args: + classes (list[str]): List of class names to be kept for training. + """ + + def __init__(self, classes: List[str]) -> None: + self.classes = classes + self.labels = list(range(len(self.classes))) + + def transform(self, input_dict: dict) -> dict: + """Transform function to filter objects by their names. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' + keys are updated in the result dict. + """ + gt_labels_3d = input_dict['gt_labels_3d'] + gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d], + dtype=bool) + input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask] + input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask] + + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(classes={self.classes})' + return repr_str + + +@TRANSFORMS.register_module() +class PointSample(BaseTransform): + """Point sample. + + Sampling data to a certain number. + + Required Keys: + + - points + - pts_instance_mask (optional) + - pts_semantic_mask (optional) + + Modified Keys: + + - points + - pts_instance_mask (optional) + - pts_semantic_mask (optional) + + Args: + num_points (int): Number of points to be sampled. + sample_range (float, optional): The range where to sample points. + If not None, the points with depth larger than `sample_range` are + prior to be sampled. Defaults to None. + replace (bool): Whether the sampling is with or without replacement. + Defaults to False. + """ + + def __init__(self, + num_points: int, + sample_range: Optional[float] = None, + replace: bool = False) -> None: + self.num_points = num_points + self.sample_range = sample_range + self.replace = replace + + def _points_random_sampling( + self, + points: BasePoints, + num_samples: int, + sample_range: Optional[float] = None, + replace: bool = False, + return_choices: bool = False + ) -> Union[Tuple[BasePoints, np.ndarray], BasePoints]: + """Points random sampling. + + Sample points to a certain number. + + Args: + points (:obj:`BasePoints`): 3D Points. + num_samples (int): Number of samples to be sampled. + sample_range (float, optional): Indicating the range where the + points will be sampled. Defaults to None. + replace (bool): Sampling with or without replacement. + Defaults to False. + return_choices (bool): Whether return choice. Defaults to False. + + Returns: + tuple[:obj:`BasePoints`, np.ndarray] | :obj:`BasePoints`: + + - points (:obj:`BasePoints`): 3D Points. + - choices (np.ndarray, optional): The generated random samples. + """ + if not replace: + replace = (points.shape[0] < num_samples) + point_range = range(len(points)) + if sample_range is not None and not replace: + # Only sampling the near points when len(points) >= num_samples + dist = np.linalg.norm(points.coord.numpy(), axis=1) + far_inds = np.where(dist >= sample_range)[0] + near_inds = np.where(dist < sample_range)[0] + # in case there are too many far points + if len(far_inds) > num_samples: + far_inds = np.random.choice( + far_inds, num_samples, replace=False) + point_range = near_inds + num_samples -= len(far_inds) + choices = np.random.choice(point_range, num_samples, replace=replace) + if sample_range is not None and not replace: + choices = np.concatenate((far_inds, choices)) + # Shuffle points after sampling + np.random.shuffle(choices) + if return_choices: + return points[choices], choices + else: + return points[choices] + + def transform(self, input_dict: dict) -> dict: + """Transform function to sample points to in indoor scenes. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after sampling, 'points', 'pts_instance_mask' + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = input_dict['points'] + points, choices = self._points_random_sampling( + points, + self.num_points, + self.sample_range, + self.replace, + return_choices=True) + input_dict['points'] = points + + pts_instance_mask = input_dict.get('pts_instance_mask', None) + pts_semantic_mask = input_dict.get('pts_semantic_mask', None) + + if pts_instance_mask is not None: + pts_instance_mask = pts_instance_mask[choices] + input_dict['pts_instance_mask'] = pts_instance_mask + + if pts_semantic_mask is not None: + pts_semantic_mask = pts_semantic_mask[choices] + input_dict['pts_semantic_mask'] = pts_semantic_mask + + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(num_points={self.num_points},' + repr_str += f' sample_range={self.sample_range},' + repr_str += f' replace={self.replace})' + + return repr_str + + +@TRANSFORMS.register_module() +class IndoorPointSample(PointSample): + """Indoor point sample. + + Sampling data to a certain number. + NOTE: IndoorPointSample is deprecated in favor of PointSample + + Args: + num_points (int): Number of points to be sampled. + """ + + def __init__(self, *args, **kwargs): + warnings.warn( + 'IndoorPointSample is deprecated in favor of PointSample') + super(IndoorPointSample, self).__init__(*args, **kwargs) + + +@TRANSFORMS.register_module() +class IndoorPatchPointSample(BaseTransform): + r"""Indoor point sample within a patch. Modified from `PointNet++ `_. + + Sampling data to a certain number for semantic segmentation. + + Args: + num_points (int): Number of points to be sampled. + block_size (float): Size of a block to sample points from. + Defaults to 1.5. + sample_rate (float, optional): Stride used in sliding patch generation. + This parameter is unused in `IndoorPatchPointSample` and thus has + been deprecated. We plan to remove it in the future. + Defaults to None. + ignore_index (int, optional): Label index that won't be used for the + segmentation task. This is set in PointSegClassMapping as neg_cls. + If not None, will be used as a patch selection criterion. + Defaults to None. + use_normalized_coord (bool): Whether to use normalized xyz as + additional features. Defaults to False. + num_try (int): Number of times to try if the patch selected is invalid. + Defaults to 10. + enlarge_size (float): Enlarge the sampled patch to + [-block_size / 2 - enlarge_size, block_size / 2 + enlarge_size] as + an augmentation. If None, set it as 0. Defaults to 0.2. + min_unique_num (int, optional): Minimum number of unique points + the sampled patch should contain. If None, use PointNet++'s method + to judge uniqueness. Defaults to None. + eps (float): A value added to patch boundary to guarantee + points coverage. Defaults to 1e-2. + + Note: + This transform should only be used in the training process of point + cloud segmentation tasks. For the sliding patch generation and + inference process in testing, please refer to the `slide_inference` + function of `EncoderDecoder3D` class. + """ + + def __init__(self, + num_points: int, + block_size: float = 1.5, + sample_rate: Optional[float] = None, + ignore_index: Optional[int] = None, + use_normalized_coord: bool = False, + num_try: int = 10, + enlarge_size: float = 0.2, + min_unique_num: Optional[int] = None, + eps: float = 1e-2) -> None: + self.num_points = num_points + self.block_size = block_size + self.ignore_index = ignore_index + self.use_normalized_coord = use_normalized_coord + self.num_try = num_try + self.enlarge_size = enlarge_size if enlarge_size is not None else 0.0 + self.min_unique_num = min_unique_num + self.eps = eps + + if sample_rate is not None: + warnings.warn( + "'sample_rate' has been deprecated and will be removed in " + 'the future. Please remove them from your code.') + + def _input_generation(self, coords: np.ndarray, patch_center: np.ndarray, + coord_max: np.ndarray, attributes: np.ndarray, + attribute_dims: dict, + point_type: type) -> BasePoints: + """Generating model input. + + Generate input by subtracting patch center and adding additional + features. Currently support colors and normalized xyz as features. + + Args: + coords (np.ndarray): Sampled 3D Points. + patch_center (np.ndarray): Center coordinate of the selected patch. + coord_max (np.ndarray): Max coordinate of all 3D Points. + attributes (np.ndarray): features of input points. + attribute_dims (dict): Dictionary to indicate the meaning of extra + dimension. + point_type (type): class of input points inherited from BasePoints. + + Returns: + :obj:`BasePoints`: The generated input data. + """ + # subtract patch center, the z dimension is not centered + centered_coords = coords.copy() + centered_coords[:, 0] -= patch_center[0] + centered_coords[:, 1] -= patch_center[1] + + if self.use_normalized_coord: + normalized_coord = coords / coord_max + attributes = np.concatenate([attributes, normalized_coord], axis=1) + if attribute_dims is None: + attribute_dims = dict() + attribute_dims.update( + dict(normalized_coord=[ + attributes.shape[1], attributes.shape[1] + + 1, attributes.shape[1] + 2 + ])) + + points = np.concatenate([centered_coords, attributes], axis=1) + points = point_type( + points, points_dim=points.shape[1], attribute_dims=attribute_dims) + + return points + + def _patch_points_sampling( + self, points: BasePoints, + sem_mask: np.ndarray) -> Tuple[BasePoints, np.ndarray]: + """Patch points sampling. + + First sample a valid patch. + Then sample points within that patch to a certain number. + + Args: + points (:obj:`BasePoints`): 3D Points. + sem_mask (np.ndarray): semantic segmentation mask for input points. + + Returns: + tuple[:obj:`BasePoints`, np.ndarray]: + + - points (:obj:`BasePoints`): 3D Points. + - choices (np.ndarray): The generated random samples. + """ + coords = points.coord.numpy() + attributes = points.tensor[:, 3:].numpy() + attribute_dims = points.attribute_dims + point_type = type(points) + + coord_max = np.amax(coords, axis=0) + coord_min = np.amin(coords, axis=0) + + for _ in range(self.num_try): + # random sample a point as patch center + cur_center = coords[np.random.choice(coords.shape[0])] + + # boundary of a patch, which would be enlarged by + # `self.enlarge_size` as an augmentation + cur_max = cur_center + np.array( + [self.block_size / 2.0, self.block_size / 2.0, 0.0]) + cur_min = cur_center - np.array( + [self.block_size / 2.0, self.block_size / 2.0, 0.0]) + cur_max[2] = coord_max[2] + cur_min[2] = coord_min[2] + cur_choice = np.sum( + (coords >= (cur_min - self.enlarge_size)) * + (coords <= (cur_max + self.enlarge_size)), + axis=1) == 3 + + if not cur_choice.any(): # no points in this patch + continue + + cur_coords = coords[cur_choice, :] + cur_sem_mask = sem_mask[cur_choice] + point_idxs = np.where(cur_choice)[0] + mask = np.sum( + (cur_coords >= (cur_min - self.eps)) * (cur_coords <= + (cur_max + self.eps)), + axis=1) == 3 + + # two criteria for patch sampling, adopted from PointNet++ + # 1. selected patch should contain enough unique points + if self.min_unique_num is None: + # use PointNet++'s method as default + # [31, 31, 62] are just some big values used to transform + # coords from 3d array to 1d and then check their uniqueness + # this is used in all the ScanNet code following PointNet++ + vidx = np.ceil( + (cur_coords[mask, :] - cur_min) / (cur_max - cur_min) * + np.array([31.0, 31.0, 62.0])) + vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 + + vidx[:, 2]) + flag1 = len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02 + else: + # if `min_unique_num` is provided, directly compare with it + flag1 = mask.sum() >= self.min_unique_num + + # 2. selected patch should contain enough annotated points + if self.ignore_index is None: + flag2 = True + else: + flag2 = np.sum(cur_sem_mask != self.ignore_index) / \ + len(cur_sem_mask) >= 0.7 + + if flag1 and flag2: + break + + # sample idx to `self.num_points` + if point_idxs.size >= self.num_points: + # no duplicate in sub-sampling + choices = np.random.choice( + point_idxs, self.num_points, replace=False) + else: + # do not use random choice here to avoid some points not counted + dup = np.random.choice(point_idxs.size, + self.num_points - point_idxs.size) + idx_dup = np.concatenate( + [np.arange(point_idxs.size), + np.array(dup)], 0) + choices = point_idxs[idx_dup] + + # construct model input + points = self._input_generation(coords[choices], cur_center, coord_max, + attributes[choices], attribute_dims, + point_type) + + return points, choices + + def transform(self, input_dict: dict) -> dict: + """Call function to sample points to in indoor scenes. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after sampling, 'points', 'pts_instance_mask' + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = input_dict['points'] + + assert 'pts_semantic_mask' in input_dict.keys(), \ + 'semantic mask should be provided in training and evaluation' + pts_semantic_mask = input_dict['pts_semantic_mask'] + + points, choices = self._patch_points_sampling(points, + pts_semantic_mask) + + input_dict['points'] = points + input_dict['pts_semantic_mask'] = pts_semantic_mask[choices] + + # 'eval_ann_info' will be passed to evaluator + if 'eval_ann_info' in input_dict: + input_dict['eval_ann_info']['pts_semantic_mask'] = \ + pts_semantic_mask[choices] + + pts_instance_mask = input_dict.get('pts_instance_mask', None) + + if pts_instance_mask is not None: + input_dict['pts_instance_mask'] = pts_instance_mask[choices] + # 'eval_ann_info' will be passed to evaluator + if 'eval_ann_info' in input_dict: + input_dict['eval_ann_info']['pts_instance_mask'] = \ + pts_instance_mask[choices] + + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(num_points={self.num_points},' + repr_str += f' block_size={self.block_size},' + repr_str += f' ignore_index={self.ignore_index},' + repr_str += f' use_normalized_coord={self.use_normalized_coord},' + repr_str += f' num_try={self.num_try},' + repr_str += f' enlarge_size={self.enlarge_size},' + repr_str += f' min_unique_num={self.min_unique_num},' + repr_str += f' eps={self.eps})' + return repr_str + + +@TRANSFORMS.register_module() +class BackgroundPointsFilter(BaseTransform): + """Filter background points near the bounding box. + + Args: + bbox_enlarge_range (tuple[float] | float): Bbox enlarge range. + """ + + def __init__(self, bbox_enlarge_range: Union[Tuple[float], float]) -> None: + assert (is_tuple_of(bbox_enlarge_range, float) + and len(bbox_enlarge_range) == 3) \ + or isinstance(bbox_enlarge_range, float), \ + f'Invalid arguments bbox_enlarge_range {bbox_enlarge_range}' + + if isinstance(bbox_enlarge_range, float): + bbox_enlarge_range = [bbox_enlarge_range] * 3 + self.bbox_enlarge_range = np.array( + bbox_enlarge_range, dtype=np.float32)[np.newaxis, :] + + def transform(self, input_dict: dict) -> dict: + """Call function to filter points by the range. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'points', 'pts_instance_mask' + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = input_dict['points'] + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + + # avoid groundtruth being modified + gt_bboxes_3d_np = gt_bboxes_3d.tensor.clone().numpy() + gt_bboxes_3d_np[:, :3] = gt_bboxes_3d.gravity_center.clone().numpy() + + enlarged_gt_bboxes_3d = gt_bboxes_3d_np.copy() + enlarged_gt_bboxes_3d[:, 3:6] += self.bbox_enlarge_range + points_numpy = points.tensor.clone().numpy() + foreground_masks = box_np_ops.points_in_rbbox( + points_numpy, gt_bboxes_3d_np, origin=(0.5, 0.5, 0.5)) + enlarge_foreground_masks = box_np_ops.points_in_rbbox( + points_numpy, enlarged_gt_bboxes_3d, origin=(0.5, 0.5, 0.5)) + foreground_masks = foreground_masks.max(1) + enlarge_foreground_masks = enlarge_foreground_masks.max(1) + valid_masks = ~np.logical_and(~foreground_masks, + enlarge_foreground_masks) + + input_dict['points'] = points[valid_masks] + pts_instance_mask = input_dict.get('pts_instance_mask', None) + if pts_instance_mask is not None: + input_dict['pts_instance_mask'] = pts_instance_mask[valid_masks] + + pts_semantic_mask = input_dict.get('pts_semantic_mask', None) + if pts_semantic_mask is not None: + input_dict['pts_semantic_mask'] = pts_semantic_mask[valid_masks] + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(bbox_enlarge_range={self.bbox_enlarge_range.tolist()})' + return repr_str + + +@TRANSFORMS.register_module() +class VoxelBasedPointSampler(BaseTransform): + """Voxel based point sampler. + + Apply voxel sampling to multiple sweep points. + + Args: + cur_sweep_cfg (dict): Config for sampling current points. + prev_sweep_cfg (dict, optional): Config for sampling previous points. + Defaults to None. + time_dim (int): Index that indicate the time dimension + for input points. Defaults to 3. + """ + + def __init__(self, + cur_sweep_cfg: dict, + prev_sweep_cfg: Optional[dict] = None, + time_dim: int = 3) -> None: + self.cur_voxel_generator = VoxelGenerator(**cur_sweep_cfg) + self.cur_voxel_num = self.cur_voxel_generator._max_voxels + self.time_dim = time_dim + if prev_sweep_cfg is not None: + assert prev_sweep_cfg['max_num_points'] == \ + cur_sweep_cfg['max_num_points'] + self.prev_voxel_generator = VoxelGenerator(**prev_sweep_cfg) + self.prev_voxel_num = self.prev_voxel_generator._max_voxels + else: + self.prev_voxel_generator = None + self.prev_voxel_num = 0 + + def _sample_points(self, points: np.ndarray, sampler: VoxelGenerator, + point_dim: int) -> np.ndarray: + """Sample points for each points subset. + + Args: + points (np.ndarray): Points subset to be sampled. + sampler (VoxelGenerator): Voxel based sampler for + each points subset. + point_dim (int): The dimension of each points. + + Returns: + np.ndarray: Sampled points. + """ + voxels, coors, num_points_per_voxel = sampler.generate(points) + if voxels.shape[0] < sampler._max_voxels: + padding_points = np.zeros([ + sampler._max_voxels - voxels.shape[0], sampler._max_num_points, + point_dim + ], + dtype=points.dtype) + padding_points[:] = voxels[0] + sample_points = np.concatenate([voxels, padding_points], axis=0) + else: + sample_points = voxels + + return sample_points + + def transform(self, results: dict) -> dict: + """Call function to sample points from multiple sweeps. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after sampling, 'points', 'pts_instance_mask' + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = results['points'] + original_dim = points.shape[1] + + # TODO: process instance and semantic mask while _max_num_points + # is larger than 1 + # Extend points with seg and mask fields + map_fields2dim = [] + start_dim = original_dim + points_numpy = points.tensor.numpy() + extra_channel = [points_numpy] + for idx, key in enumerate(results['pts_mask_fields']): + map_fields2dim.append((key, idx + start_dim)) + extra_channel.append(results[key][..., None]) + + start_dim += len(results['pts_mask_fields']) + for idx, key in enumerate(results['pts_seg_fields']): + map_fields2dim.append((key, idx + start_dim)) + extra_channel.append(results[key][..., None]) + + points_numpy = np.concatenate(extra_channel, axis=-1) + + # Split points into two part, current sweep points and + # previous sweeps points. + # TODO: support different sampling methods for next sweeps points + # and previous sweeps points. + cur_points_flag = (points_numpy[:, self.time_dim] == 0) + cur_sweep_points = points_numpy[cur_points_flag] + prev_sweeps_points = points_numpy[~cur_points_flag] + if prev_sweeps_points.shape[0] == 0: + prev_sweeps_points = cur_sweep_points + + # Shuffle points before sampling + np.random.shuffle(cur_sweep_points) + np.random.shuffle(prev_sweeps_points) + + cur_sweep_points = self._sample_points(cur_sweep_points, + self.cur_voxel_generator, + points_numpy.shape[1]) + if self.prev_voxel_generator is not None: + prev_sweeps_points = self._sample_points(prev_sweeps_points, + self.prev_voxel_generator, + points_numpy.shape[1]) + + points_numpy = np.concatenate( + [cur_sweep_points, prev_sweeps_points], 0) + else: + points_numpy = cur_sweep_points + + if self.cur_voxel_generator._max_num_points == 1: + points_numpy = points_numpy.squeeze(1) + results['points'] = points.new_point(points_numpy[..., :original_dim]) + + # Restore the corresponding seg and mask fields + for key, dim_index in map_fields2dim: + results[key] = points_numpy[..., dim_index] + + return results + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + + def _auto_indent(repr_str, indent): + repr_str = repr_str.split('\n') + repr_str = [' ' * indent + t + '\n' for t in repr_str] + repr_str = ''.join(repr_str)[:-1] + return repr_str + + repr_str = self.__class__.__name__ + indent = 4 + repr_str += '(\n' + repr_str += ' ' * indent + f'num_cur_sweep={self.cur_voxel_num},\n' + repr_str += ' ' * indent + f'num_prev_sweep={self.prev_voxel_num},\n' + repr_str += ' ' * indent + f'time_dim={self.time_dim},\n' + repr_str += ' ' * indent + 'cur_voxel_generator=\n' + repr_str += f'{_auto_indent(repr(self.cur_voxel_generator), 8)},\n' + repr_str += ' ' * indent + 'prev_voxel_generator=\n' + repr_str += f'{_auto_indent(repr(self.prev_voxel_generator), 8)})' + return repr_str + + +@TRANSFORMS.register_module() +class AffineResize(BaseTransform): + """Get the affine transform matrices to the target size. + + Different from :class:`RandomAffine` in MMDetection, this class can + calculate the affine transform matrices while resizing the input image + to a fixed size. The affine transform matrices include: 1) matrix + transforming original image to the network input image size. 2) matrix + transforming original image to the network output feature map size. + + Args: + img_scale (tuple): Images scales for resizing. + down_ratio (int): The down ratio of feature map. + Actually the arg should be >= 1. + bbox_clip_border (bool): Whether clip the objects + outside the border of the image. Defaults to True. + """ + + def __init__(self, + img_scale: Tuple, + down_ratio: int, + bbox_clip_border: bool = True) -> None: + + self.img_scale = img_scale + self.down_ratio = down_ratio + self.bbox_clip_border = bbox_clip_border + + def transform(self, results: dict) -> dict: + """Call function to do affine transform to input image and labels. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Results after affine resize, 'affine_aug', 'trans_mat' + keys are added in the result dict. + """ + # The results have gone through RandomShiftScale before AffineResize + if 'center' not in results: + img = results['img'] + height, width = img.shape[:2] + center = np.array([width / 2, height / 2], dtype=np.float32) + size = np.array([width, height], dtype=np.float32) + results['affine_aug'] = False + else: + # The results did not go through RandomShiftScale before + # AffineResize + img = results['img'] + center = results['center'] + size = results['size'] + + trans_affine = self._get_transform_matrix(center, size, self.img_scale) + + img = cv2.warpAffine(img, trans_affine[:2, :], self.img_scale) + + if isinstance(self.down_ratio, tuple): + trans_mat = [ + self._get_transform_matrix( + center, size, + (self.img_scale[0] // ratio, self.img_scale[1] // ratio)) + for ratio in self.down_ratio + ] # (3, 3) + else: + trans_mat = self._get_transform_matrix( + center, size, (self.img_scale[0] // self.down_ratio, + self.img_scale[1] // self.down_ratio)) + + results['img'] = img + results['img_shape'] = img.shape + results['pad_shape'] = img.shape + results['trans_mat'] = trans_mat + + if 'gt_bboxes' in results: + self._affine_bboxes(results, trans_affine) + + if 'centers_2d' in results: + centers2d = self._affine_transform(results['centers_2d'], + trans_affine) + valid_index = (centers2d[:, 0] > + 0) & (centers2d[:, 0] < + self.img_scale[0]) & (centers2d[:, 1] > 0) & ( + centers2d[:, 1] < self.img_scale[1]) + results['centers_2d'] = centers2d[valid_index] + + if 'gt_bboxes' in results: + results['gt_bboxes'] = results['gt_bboxes'][valid_index] + if 'gt_bboxes_labels' in results: + results['gt_bboxes_labels'] = results['gt_bboxes_labels'][ + valid_index] + if 'gt_masks' in results: + raise NotImplementedError( + 'AffineResize only supports bbox.') + + if 'gt_bboxes_3d' in results: + results['gt_bboxes_3d'].tensor = results[ + 'gt_bboxes_3d'].tensor[valid_index] + if 'gt_labels_3d' in results: + results['gt_labels_3d'] = results['gt_labels_3d'][ + valid_index] + + results['depths'] = results['depths'][valid_index] + + return results + + def _affine_bboxes(self, results: dict, matrix: np.ndarray) -> None: + """Affine transform bboxes to input image. + + Args: + results (dict): Result dict from loading pipeline. + matrix (np.ndarray): Matrix transforming original + image to the network input image size. + shape: (3, 3) + """ + + bboxes = results['gt_bboxes'] + bboxes[:, :2] = self._affine_transform(bboxes[:, :2], matrix) + bboxes[:, 2:] = self._affine_transform(bboxes[:, 2:], matrix) + if self.bbox_clip_border: + bboxes[:, [0, 2]] = bboxes[:, [0, 2]].clip(0, + self.img_scale[0] - 1) + bboxes[:, [1, 3]] = bboxes[:, [1, 3]].clip(0, + self.img_scale[1] - 1) + results['gt_bboxes'] = bboxes + + def _affine_transform(self, points: np.ndarray, + matrix: np.ndarray) -> np.ndarray: + """Affine transform bbox points to input image. + + Args: + points (np.ndarray): Points to be transformed. + shape: (N, 2) + matrix (np.ndarray): Affine transform matrix. + shape: (3, 3) + + Returns: + np.ndarray: Transformed points. + """ + num_points = points.shape[0] + hom_points_2d = np.concatenate((points, np.ones((num_points, 1))), + axis=1) + hom_points_2d = hom_points_2d.T + affined_points = np.matmul(matrix, hom_points_2d).T + return affined_points[:, :2] + + def _get_transform_matrix(self, center: Tuple, scale: Tuple, + output_scale: Tuple[float]) -> np.ndarray: + """Get affine transform matrix. + + Args: + center (tuple): Center of current image. + scale (tuple): Scale of current image. + output_scale (tuple[float]): The transform target image scales. + + Returns: + np.ndarray: Affine transform matrix. + """ + # TODO: further add rot and shift here. + src_w = scale[0] + dst_w = output_scale[0] + dst_h = output_scale[1] + + src_dir = np.array([0, src_w * -0.5]) + dst_dir = np.array([0, dst_w * -0.5]) + + src = np.zeros((3, 2), dtype=np.float32) + dst = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + src[1, :] = center + src_dir + dst[0, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir + + src[2, :] = self._get_ref_point(src[0, :], src[1, :]) + dst[2, :] = self._get_ref_point(dst[0, :], dst[1, :]) + + get_matrix = cv2.getAffineTransform(src, dst) + + matrix = np.concatenate((get_matrix, [[0., 0., 1.]])) + + return matrix.astype(np.float32) + + def _get_ref_point(self, ref_point1: np.ndarray, + ref_point2: np.ndarray) -> np.ndarray: + """Get reference point to calculate affine transform matrix. + + While using opencv to calculate the affine matrix, we need at least + three corresponding points separately on original image and target + image. Here we use two points to get the the third reference point. + """ + d = ref_point1 - ref_point2 + ref_point3 = ref_point2 + np.array([-d[1], d[0]]) + return ref_point3 + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'down_ratio={self.down_ratio}) ' + return repr_str + + +@TRANSFORMS.register_module() +class RandomShiftScale(BaseTransform): + """Random shift scale. + + Different from the normal shift and scale function, it doesn't + directly shift or scale image. It can record the shift and scale + infos into loading TRANSFORMS. It's designed to be used with + AffineResize together. + + Args: + shift_scale (tuple[float]): Shift and scale range. + aug_prob (float): The shifting and scaling probability. + """ + + def __init__(self, shift_scale: Tuple[float], aug_prob: float) -> None: + + self.shift_scale = shift_scale + self.aug_prob = aug_prob + + def transform(self, results: dict) -> dict: + """Call function to record random shift and scale infos. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Results after random shift and scale, 'center', 'size' + and 'affine_aug' keys are added in the result dict. + """ + img = results['img'] + + height, width = img.shape[:2] + + center = np.array([width / 2, height / 2], dtype=np.float32) + size = np.array([width, height], dtype=np.float32) + + if random.random() < self.aug_prob: + shift, scale = self.shift_scale[0], self.shift_scale[1] + shift_ranges = np.arange(-shift, shift + 0.1, 0.1) + center[0] += size[0] * random.choice(shift_ranges) + center[1] += size[1] * random.choice(shift_ranges) + scale_ranges = np.arange(1 - scale, 1 + scale + 0.1, 0.1) + size *= random.choice(scale_ranges) + results['affine_aug'] = True + else: + results['affine_aug'] = False + + results['center'] = center + results['size'] = size + + return results + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(shift_scale={self.shift_scale}, ' + repr_str += f'aug_prob={self.aug_prob}) ' + return repr_str + + +@TRANSFORMS.register_module() +class Resize3D(Resize): + + def _resize_3d(self, results: dict) -> None: + """Resize centers_2d and modify camera intrinisc with + ``results['scale']``.""" + if 'centers_2d' in results: + results['centers_2d'] *= results['scale_factor'][:2] + results['cam2img'][0] *= np.array(results['scale_factor'][0]) + results['cam2img'][1] *= np.array(results['scale_factor'][1]) + + def transform(self, results: dict) -> dict: + """Transform function to resize images, bounding boxes, semantic + segmentation map and keypoints. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map', + 'gt_keypoints', 'scale', 'scale_factor', 'img_shape', + and 'keep_ratio' keys are updated in result dict. + """ + + super(Resize3D, self).transform(results) + self._resize_3d(results) + return results + + +@TRANSFORMS.register_module() +class RandomResize3D(RandomResize): + """The difference between RandomResize3D and RandomResize: + + 1. Compared to RandomResize, this class would further + check if scale is already set in results. + 2. During resizing, this class would modify the centers_2d + and cam2img with ``results['scale']``. + """ + + def _resize_3d(self, results: dict) -> None: + """Resize centers_2d and modify camera intrinisc with + ``results['scale']``.""" + if 'centers_2d' in results: + results['centers_2d'] *= results['scale_factor'][:2] + results['cam2img'][0] *= np.array(results['scale_factor'][0]) + results['cam2img'][1] *= np.array(results['scale_factor'][1]) + + def transform(self, results: dict) -> dict: + """Transform function to resize images, bounding boxes, masks, semantic + segmentation map. Compared to RandomResize, this function would further + check if scale is already set in results. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', + 'keep_ratio' keys are added into result dict. + """ + if 'scale' not in results: + results['scale'] = self._random_scale() + self.resize.scale = results['scale'] + results = self.resize(results) + self._resize_3d(results) + + return results + + +@TRANSFORMS.register_module() +class RandomCrop3D(RandomCrop): + """3D version of RandomCrop. RamdomCrop3D supports the modifications of + camera intrinsic matrix and using predefined randomness variable to do the + augmentation. + + The absolute ``crop_size`` is sampled based on ``crop_type`` and + ``image_size``, then the cropped results are generated. + + Required Keys: + + - img + - gt_bboxes (np.float32) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_masks (BitmapMasks | PolygonMasks) (optional) + - gt_ignore_flags (bool) (optional) + - gt_seg_map (np.uint8) (optional) + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_masks (optional) + - gt_ignore_flags (optional) + - gt_seg_map (optional) + + Added Keys: + + - homography_matrix + + Args: + crop_size (tuple): The relative ratio or absolute pixels of + height and width. + crop_type (str): One of "relative_range", "relative", + "absolute", "absolute_range". "relative" randomly crops + (h * crop_size[0], w * crop_size[1]) part from an input of size + (h, w). "relative_range" uniformly samples relative crop size from + range [crop_size[0], 1] and [crop_size[1], 1] for height and width + respectively. "absolute" crops from an input with absolute size + (crop_size[0], crop_size[1]). "absolute_range" uniformly samples + crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w + in range [crop_size[0], min(w, crop_size[1])]. + Defaults to "absolute". + allow_negative_crop (bool): Whether to allow a crop that does + not contain any bbox area. Defaults to False. + recompute_bbox (bool): Whether to re-compute the boxes based + on cropped instance masks. Defaults to False. + bbox_clip_border (bool): Whether clip the objects outside + the border of the image. Defaults to True. + rel_offset_h (tuple): The cropping interval of image height. Defaults + to (0., 1.). + rel_offset_w (tuple): The cropping interval of image width. Defaults + to (0., 1.). + + Note: + - If the image is smaller than the absolute crop size, return the + original image. + - The keys for bboxes, labels and masks must be aligned. That is, + ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and + ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and + ``gt_masks_ignore``. + - If the crop does not contain any gt-bbox region and + ``allow_negative_crop`` is set to False, skip this image. + """ + + def __init__( + self, + crop_size: tuple, + crop_type: str = 'absolute', + allow_negative_crop: bool = False, + recompute_bbox: bool = False, + bbox_clip_border: bool = True, + rel_offset_h: tuple = (0., 1.), + rel_offset_w: tuple = (0., 1.) + ) -> None: + super().__init__( + crop_size=crop_size, + crop_type=crop_type, + allow_negative_crop=allow_negative_crop, + recompute_bbox=recompute_bbox, + bbox_clip_border=bbox_clip_border) + # rel_offset specifies the relative offset range of cropping origin + # [0., 1.] means starting from 0*margin to 1*margin + 1 + self.rel_offset_h = rel_offset_h + self.rel_offset_w = rel_offset_w + + def _crop_data(self, + results: dict, + crop_size: tuple, + allow_negative_crop: bool = False) -> dict: + """Function to randomly crop images, bounding boxes, masks, semantic + segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + crop_size (tuple): Expected absolute size after cropping, (h, w). + allow_negative_crop (bool): Whether to allow a crop that does not + contain any bbox area. Defaults to False. + + Returns: + dict: Randomly cropped results, 'img_shape' key in result dict is + updated according to crop size. + """ + assert crop_size[0] > 0 and crop_size[1] > 0 + for key in results.get('img_fields', ['img']): + img = results[key] + if 'img_crop_offset' not in results: + margin_h = max(img.shape[0] - crop_size[0], 0) + margin_w = max(img.shape[1] - crop_size[1], 0) + # TOCHECK: a little different from LIGA implementation + offset_h = np.random.randint( + self.rel_offset_h[0] * margin_h, + self.rel_offset_h[1] * margin_h + 1) + offset_w = np.random.randint( + self.rel_offset_w[0] * margin_w, + self.rel_offset_w[1] * margin_w + 1) + else: + offset_w, offset_h = results['img_crop_offset'] + + crop_h = min(crop_size[0], img.shape[0]) + crop_w = min(crop_size[1], img.shape[1]) + crop_y1, crop_y2 = offset_h, offset_h + crop_h + crop_x1, crop_x2 = offset_w, offset_w + crop_w + + # crop the image + img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] + img_shape = img.shape + results[key] = img + results['img_shape'] = img_shape + + # crop bboxes accordingly and clip to the image boundary + for key in results.get('bbox_fields', []): + # e.g. gt_bboxes and gt_bboxes_ignore + bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h], + dtype=np.float32) + bboxes = results[key] - bbox_offset + if self.bbox_clip_border: + bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) + bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) + valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & ( + bboxes[:, 3] > bboxes[:, 1]) + # If the crop does not contain any gt-bbox area and + # allow_negative_crop is False, skip this image. + if (key == 'gt_bboxes' and not valid_inds.any() + and not allow_negative_crop): + return None + results[key] = bboxes[valid_inds, :] + # label fields. e.g. gt_labels and gt_labels_ignore + label_key = self.bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][valid_inds] + + # mask fields, e.g. gt_masks and gt_masks_ignore + mask_key = self.bbox2mask.get(key) + if mask_key in results: + results[mask_key] = results[mask_key][ + valid_inds.nonzero()[0]].crop( + np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) + if self.recompute_bbox: + results[key] = results[mask_key].get_bboxes() + + # crop semantic seg + for key in results.get('seg_fields', []): + results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2] + + # manipulate camera intrinsic matrix + # needs to apply offset to K instead of P2 (on KITTI) + if isinstance(results['cam2img'], list): + # TODO ignore this, but should handle it in the future + pass + else: + K = results['cam2img'][:3, :3].copy() + inv_K = np.linalg.inv(K) + T = np.matmul(inv_K, results['cam2img'][:3]) + K[0, 2] -= crop_x1 + K[1, 2] -= crop_y1 + offset_cam2img = np.matmul(K, T) + results['cam2img'][:offset_cam2img.shape[0], :offset_cam2img. + shape[1]] = offset_cam2img + + results['img_crop_offset'] = [offset_w, offset_h] + + return results + + def transform(self, results: dict) -> dict: + """Transform function to randomly crop images, bounding boxes, masks, + semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Randomly cropped results, 'img_shape' key in result dict is + updated according to crop size. + """ + image_size = results['img'].shape[:2] + if 'crop_size' not in results: + crop_size = self._get_crop_size(image_size) + results['crop_size'] = crop_size + else: + crop_size = results['crop_size'] + results = self._crop_data(results, crop_size, self.allow_negative_crop) + return results + + def __repr__(self) -> dict: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(crop_size={self.crop_size}, ' + repr_str += f'crop_type={self.crop_type}, ' + repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border}), ' + repr_str += f'rel_offset_h={self.rel_offset_h}), ' + repr_str += f'rel_offset_w={self.rel_offset_w})' + return repr_str + + +@TRANSFORMS.register_module() +class PhotoMetricDistortion3D(PhotoMetricDistortion): + """Apply photometric distortion to image sequentially, every transformation + is applied with a probability of 0.5. The position of random contrast is in + second or second to last. + + PhotoMetricDistortion3D further support using predefined randomness + variable to do the augmentation. + + 1. random brightness + 2. random contrast (mode 0) + 3. convert color from BGR to HSV + 4. random saturation + 5. random hue + 6. convert color from HSV to BGR + 7. random contrast (mode 1) + 8. randomly swap channels + + Required Keys: + + - img (np.uint8) + + Modified Keys: + + - img (np.float32) + + Args: + brightness_delta (int): delta of brightness. + contrast_range (sequence): range of contrast. + saturation_range (sequence): range of saturation. + hue_delta (int): delta of hue. + """ + + def transform(self, results: dict) -> dict: + """Transform function to perform photometric distortion on images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images distorted. + """ + assert 'img' in results, '`img` is not found in results' + img = results['img'] + img = img.astype(np.float32) + if 'photometric_param' not in results: + photometric_param = self._random_flags() + results['photometric_param'] = photometric_param + else: + photometric_param = results['photometric_param'] + + (mode, brightness_flag, contrast_flag, saturation_flag, hue_flag, + swap_flag, delta_value, alpha_value, saturation_value, hue_value, + swap_value) = photometric_param + + # random brightness + if brightness_flag: + img += delta_value + + # mode == 0 --> do random contrast first + # mode == 1 --> do random contrast last + if mode == 1: + if contrast_flag: + img *= alpha_value + + # convert color from BGR to HSV + img = mmcv.bgr2hsv(img) + + # random saturation + if saturation_flag: + img[..., 1] *= saturation_value + + # random hue + if hue_flag: + img[..., 0] += hue_value + img[..., 0][img[..., 0] > 360] -= 360 + img[..., 0][img[..., 0] < 0] += 360 + + # convert color from HSV to BGR + img = mmcv.hsv2bgr(img) + + # random contrast + if mode == 0: + if contrast_flag: + img *= alpha_value + + # randomly swap channels + if swap_flag: + img = img[..., swap_value] + + results['img'] = img + return results + + +@TRANSFORMS.register_module() +class MultiViewWrapper(BaseTransform): + """Wrap transformation from single-view into multi-view. + + The wrapper processes the images from multi-view one by one. For each + image, it constructs a pseudo dict according to the keys specified by the + 'process_fields' parameter. After the transformation is finished, desired + information can be collected by specifying the keys in the 'collected_keys' + parameter. Multi-view images share the same transformation parameters + but do not share the same magnitude when a random transformation is + conducted. + + Args: + transforms (list[dict]): A list of dict specifying the transformations + for the monocular situation. + override_aug_config (bool): flag of whether to use the same aug config + for multiview image. Defaults to True. + process_fields (list): Desired keys that the transformations should + be conducted on. Defaults to ['img', 'cam2img', 'lidar2cam']. + collected_keys (list): Collect information in transformation + like rotate angles, crop roi, and flip state. Defaults to + ['scale', 'scale_factor', 'crop', + 'crop_offset', 'ori_shape', + 'pad_shape', 'img_shape', + 'pad_fixed_size', 'pad_size_divisor', + 'flip', 'flip_direction', 'rotate']. + randomness_keys (list): The keys that related to the randomness + in transformation. Defaults to + ['scale', 'scale_factor', 'crop_size', 'flip', + 'flip_direction', 'photometric_param'] + """ + + def __init__( + self, + transforms: dict, + override_aug_config: bool = True, + process_fields: list = ['img', 'cam2img', 'lidar2cam'], + collected_keys: list = [ + 'scale', 'scale_factor', 'crop', 'img_crop_offset', 'ori_shape', + 'pad_shape', 'img_shape', 'pad_fixed_size', 'pad_size_divisor', + 'flip', 'flip_direction', 'rotate' + ], + randomness_keys: list = [ + 'scale', 'scale_factor', 'crop_size', 'img_crop_offset', 'flip', + 'flip_direction', 'photometric_param' + ] + ) -> None: + self.transforms = Compose(transforms) + self.override_aug_config = override_aug_config + self.collected_keys = collected_keys + self.process_fields = process_fields + self.randomness_keys = randomness_keys + + def transform(self, input_dict: dict) -> dict: + """Transform function to do the transform for multiview image. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: output dict after transformtaion + """ + # store the augmentation related keys for each image. + for key in self.collected_keys: + if key not in input_dict or \ + not isinstance(input_dict[key], list): + input_dict[key] = [] + prev_process_dict = {} + for img_id in range(len(input_dict['img'])): + process_dict = {} + + # override the process dict (e.g. scale in random scale, + # crop_size in random crop, flip, flip_direction in + # random flip) + if img_id != 0 and self.override_aug_config: + for key in self.randomness_keys: + if key in prev_process_dict: + process_dict[key] = prev_process_dict[key] + + for key in self.process_fields: + if key in input_dict: + process_dict[key] = input_dict[key][img_id] + process_dict = self.transforms(process_dict) + # store the randomness variable in transformation. + prev_process_dict = process_dict + + # store the related results to results_dict + for key in self.process_fields: + if key in process_dict: + input_dict[key][img_id] = process_dict[key] + # update the keys + for key in self.collected_keys: + if key in process_dict: + if len(input_dict[key]) == img_id + 1: + input_dict[key][img_id] = process_dict[key] + else: + input_dict[key].append(process_dict[key]) + + for key in self.collected_keys: + if len(input_dict[key]) == 0: + input_dict.pop(key) + return input_dict + + +@TRANSFORMS.register_module() +class PolarMix(BaseTransform): + """PolarMix data augmentation. + + The polarmix transform steps are as follows: + + 1. Another random point cloud is picked by dataset. + 2. Exchange sectors of two point clouds that are cut with certain + azimuth angles. + 3. Cut point instances from picked point cloud, rotate them by multiple + azimuth angles, and paste the cut and rotated instances. + + Required Keys: + + - points (:obj:`BasePoints`) + - pts_semantic_mask (np.int64) + - dataset (:obj:`BaseDataset`) + + Modified Keys: + + - points (:obj:`BasePoints`) + - pts_semantic_mask (np.int64) + + Args: + instance_classes (List[int]): Semantic masks which represent the + instance. + swap_ratio (float): Swap ratio of two point cloud. Defaults to 0.5. + rotate_paste_ratio (float): Rotate paste ratio. Defaults to 1.0. + pre_transform (Sequence[dict], optional): Sequence of transform object + or config dict to be composed. Defaults to None. + prob (float): The transformation probability. Defaults to 1.0. + """ + + def __init__(self, + instance_classes: List[int], + swap_ratio: float = 0.5, + rotate_paste_ratio: float = 1.0, + pre_transform: Optional[Sequence[dict]] = None, + prob: float = 1.0) -> None: + assert is_list_of(instance_classes, int), \ + 'instance_classes should be a list of int' + self.instance_classes = instance_classes + self.swap_ratio = swap_ratio + self.rotate_paste_ratio = rotate_paste_ratio + + self.prob = prob + if pre_transform is None: + self.pre_transform = None + else: + self.pre_transform = Compose(pre_transform) + + def polar_mix_transform(self, input_dict: dict, mix_results: dict) -> dict: + """PolarMix transform function. + + Args: + input_dict (dict): Result dict from loading pipeline. + mix_results (dict): Mixed dict picked from dataset. + + Returns: + dict: output dict after transformation. + """ + mix_points = mix_results['points'] + mix_pts_semantic_mask = mix_results['pts_semantic_mask'] + + points = input_dict['points'] + pts_semantic_mask = input_dict['pts_semantic_mask'] + + # 1. swap point cloud + if np.random.random() < self.swap_ratio: + start_angle = (np.random.random() - 1) * np.pi # -pi~0 + end_angle = start_angle + np.pi + # calculate horizontal angle for each point + yaw = -torch.atan2(points.coord[:, 1], points.coord[:, 0]) + mix_yaw = -torch.atan2(mix_points.coord[:, 1], mix_points.coord[:, + 0]) + + # select points in sector + idx = (yaw <= start_angle) | (yaw >= end_angle) + mix_idx = (mix_yaw > start_angle) & (mix_yaw < end_angle) + + # swap + points = points.cat([points[idx], mix_points[mix_idx]]) + pts_semantic_mask = np.concatenate( + (pts_semantic_mask[idx.numpy()], + mix_pts_semantic_mask[mix_idx.numpy()]), + axis=0) + + # 2. rotate-pasting + if np.random.random() < self.rotate_paste_ratio: + # extract instance points + instance_points, instance_pts_semantic_mask = [], [] + for instance_class in self.instance_classes: + mix_idx = mix_pts_semantic_mask == instance_class + instance_points.append(mix_points[mix_idx]) + instance_pts_semantic_mask.append( + mix_pts_semantic_mask[mix_idx]) + instance_points = mix_points.cat(instance_points) + instance_pts_semantic_mask = np.concatenate( + instance_pts_semantic_mask, axis=0) + + # rotate-copy + copy_points = [instance_points] + copy_pts_semantic_mask = [instance_pts_semantic_mask] + angle_list = [ + np.random.random() * np.pi * 2 / 3, + (np.random.random() + 1) * np.pi * 2 / 3 + ] + for angle in angle_list: + new_points = instance_points.clone() + new_points.rotate(angle) + copy_points.append(new_points) + copy_pts_semantic_mask.append(instance_pts_semantic_mask) + copy_points = instance_points.cat(copy_points) + copy_pts_semantic_mask = np.concatenate( + copy_pts_semantic_mask, axis=0) + + points = points.cat([points, copy_points]) + pts_semantic_mask = np.concatenate( + (pts_semantic_mask, copy_pts_semantic_mask), axis=0) + + input_dict['points'] = points + input_dict['pts_semantic_mask'] = pts_semantic_mask + return input_dict + + def transform(self, input_dict: dict) -> dict: + """PolarMix transform function. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: output dict after transformation. + """ + if np.random.rand() > self.prob: + return input_dict + + assert 'dataset' in input_dict, \ + '`dataset` is needed to pass through PolarMix, while not found.' + dataset = input_dict['dataset'] + + # get index of other point cloud + index = np.random.randint(0, len(dataset)) + + mix_results = dataset.get_data_info(index) + + if self.pre_transform is not None: + # pre_transform may also require dataset + mix_results.update({'dataset': dataset}) + # before polarmix need to go through + # the necessary pre_transform + mix_results = self.pre_transform(mix_results) + mix_results.pop('dataset') + + input_dict = self.polar_mix_transform(input_dict, mix_results) + + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(instance_classes={self.instance_classes}, ' + repr_str += f'swap_ratio={self.swap_ratio}, ' + repr_str += f'rotate_paste_ratio={self.rotate_paste_ratio}, ' + repr_str += f'pre_transform={self.pre_transform}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class LaserMix(BaseTransform): + """LaserMix data augmentation. + + The lasermix transform steps are as follows: + + 1. Another random point cloud is picked by dataset. + 2. Divide the point cloud into several regions according to pitch + angles and combine the areas crossly. + + Required Keys: + + - points (:obj:`BasePoints`) + - pts_semantic_mask (np.int64) + - dataset (:obj:`BaseDataset`) + + Modified Keys: + + - points (:obj:`BasePoints`) + - pts_semantic_mask (np.int64) + + Args: + num_areas (List[int]): A list of area numbers will be divided into. + pitch_angles (Sequence[float]): Pitch angles used to divide areas. + pre_transform (Sequence[dict], optional): Sequence of transform object + or config dict to be composed. Defaults to None. + prob (float): The transformation probability. Defaults to 1.0. + """ + + def __init__(self, + num_areas: List[int], + pitch_angles: Sequence[float], + pre_transform: Optional[Sequence[dict]] = None, + prob: float = 1.0) -> None: + assert is_list_of(num_areas, int), \ + 'num_areas should be a list of int.' + self.num_areas = num_areas + + assert len(pitch_angles) == 2, \ + 'The length of pitch_angles should be 2, ' \ + f'but got {len(pitch_angles)}.' + assert pitch_angles[1] > pitch_angles[0], \ + 'pitch_angles[1] should be larger than pitch_angles[0].' + self.pitch_angles = pitch_angles + + self.prob = prob + if pre_transform is None: + self.pre_transform = None + else: + self.pre_transform = Compose(pre_transform) + + def laser_mix_transform(self, input_dict: dict, mix_results: dict) -> dict: + """LaserMix transform function. + + Args: + input_dict (dict): Result dict from loading pipeline. + mix_results (dict): Mixed dict picked from dataset. + + Returns: + dict: output dict after transformation. + """ + mix_points = mix_results['points'] + mix_pts_semantic_mask = mix_results['pts_semantic_mask'] + + points = input_dict['points'] + pts_semantic_mask = input_dict['pts_semantic_mask'] + + rho = torch.sqrt(points.coord[:, 0]**2 + points.coord[:, 1]**2) + pitch = torch.atan2(points.coord[:, 2], rho) + pitch = torch.clamp(pitch, self.pitch_angles[0] + 1e-5, + self.pitch_angles[1] - 1e-5) + + mix_rho = torch.sqrt(mix_points.coord[:, 0]**2 + + mix_points.coord[:, 1]**2) + mix_pitch = torch.atan2(mix_points.coord[:, 2], mix_rho) + mix_pitch = torch.clamp(mix_pitch, self.pitch_angles[0] + 1e-5, + self.pitch_angles[1] - 1e-5) + + num_areas = np.random.choice(self.num_areas, size=1)[0] + angle_list = np.linspace(self.pitch_angles[1], self.pitch_angles[0], + num_areas + 1) + out_points = [] + out_pts_semantic_mask = [] + for i in range(num_areas): + # convert angle to radian + start_angle = angle_list[i + 1] / 180 * np.pi + end_angle = angle_list[i] / 180 * np.pi + if i % 2 == 0: # pick from original point cloud + idx = (pitch > start_angle) & (pitch <= end_angle) + out_points.append(points[idx]) + out_pts_semantic_mask.append(pts_semantic_mask[idx.numpy()]) + else: # pickle from mixed point cloud + idx = (mix_pitch > start_angle) & (mix_pitch <= end_angle) + out_points.append(mix_points[idx]) + out_pts_semantic_mask.append( + mix_pts_semantic_mask[idx.numpy()]) + out_points = points.cat(out_points) + out_pts_semantic_mask = np.concatenate(out_pts_semantic_mask, axis=0) + input_dict['points'] = out_points + input_dict['pts_semantic_mask'] = out_pts_semantic_mask + return input_dict + + def transform(self, input_dict: dict) -> dict: + """LaserMix transform function. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: output dict after transformation. + """ + if np.random.rand() > self.prob: + return input_dict + + assert 'dataset' in input_dict, \ + '`dataset` is needed to pass through LaserMix, while not found.' + dataset = input_dict['dataset'] + + # get index of other point cloud + index = np.random.randint(0, len(dataset)) + + mix_results = dataset.get_data_info(index) + + if self.pre_transform is not None: + # pre_transform may also require dataset + mix_results.update({'dataset': dataset}) + # before lasermix need to go through + # the necessary pre_transform + mix_results = self.pre_transform(mix_results) + mix_results.pop('dataset') + + input_dict = self.laser_mix_transform(input_dict, mix_results) + + return input_dict + + def __repr__(self) -> str: + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(num_areas={self.num_areas}, ' + repr_str += f'pitch_angles={self.pitch_angles}, ' + repr_str += f'pre_transform={self.pre_transform}, ' + repr_str += f'prob={self.prob})' + return repr_str diff --git a/mmdet3d/datasets/utils.py b/mmdet3d/datasets/utils.py new file mode 100755 index 0000000..b4ea9b3 --- /dev/null +++ b/mmdet3d/datasets/utils.py @@ -0,0 +1,123 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv.transforms import LoadImageFromFile +from pyquaternion import Quaternion + +# yapf: disable +from mmdet3d.datasets.transforms import (LoadAnnotations3D, + LoadImageFromFileMono3D, + LoadMultiViewImageFromFiles, + LoadPointsFromFile, + LoadPointsFromMultiSweeps, + MultiScaleFlipAug3D, Pack3DDetInputs, + PointSegClassMapping) +# yapf: enable +from mmdet3d.registry import TRANSFORMS + + +def is_loading_function(transform): + """Judge whether a transform function is a loading function. + + Note: `MultiScaleFlipAug3D` is a wrapper for multiple pipeline functions, + so we need to search if its inner transforms contain any loading function. + + Args: + transform (dict | :obj:`Pipeline`): A transform config or a function. + + Returns: + bool: Whether it is a loading function. None means can't judge. + When transform is `MultiScaleFlipAug3D`, we return None. + """ + # TODO: use more elegant way to distinguish loading modules + loading_functions = (LoadImageFromFile, LoadPointsFromFile, + LoadAnnotations3D, LoadMultiViewImageFromFiles, + LoadPointsFromMultiSweeps, Pack3DDetInputs, + LoadImageFromFileMono3D, PointSegClassMapping) + if isinstance(transform, dict): + obj_cls = TRANSFORMS.get(transform['type']) + if obj_cls is None: + return False + if obj_cls in loading_functions: + return True + if obj_cls in (MultiScaleFlipAug3D, ): + return None + elif callable(transform): + if isinstance(transform, loading_functions): + return True + if isinstance(transform, (MultiScaleFlipAug3D)): + return None + return False + + +def get_loading_pipeline(pipeline): + """Only keep loading image, points and annotations related configuration. + + Args: + pipeline (list[dict] | list[:obj:`Pipeline`]): + Data pipeline configs or list of pipeline functions. + + Returns: + list[dict] | list[:obj:`Pipeline`]): The new pipeline list with only + keep loading image, points and annotations related configuration. + + Examples: + >>> transforms = [ + ... dict(type='LoadPointsFromFile', + ... coord_type='LIDAR', load_dim=4, use_dim=4), + ... dict(type='LoadImageFromFile'), + ... dict(type='LoadAnnotations3D', + ... with_bbox=True, with_label_3d=True), + ... dict(type='Resize', + ... img_scale=[(640, 192), (2560, 768)], keep_ratio=True), + ... dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + ... dict(type='PointsRangeFilter', + ... point_cloud_range=point_cloud_range), + ... dict(type='ObjectRangeFilter', + ... point_cloud_range=point_cloud_range), + ... dict(type='PointShuffle'), + ... dict(type='Normalize', **img_norm_cfg), + ... dict(type='Pad', size_divisor=32), + ... dict(type='DefaultFormatBundle3D', class_names=class_names), + ... dict(type='Collect3D', + ... keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']) + ... ] + >>> expected_pipelines = [ + ... dict(type='LoadPointsFromFile', + ... coord_type='LIDAR', load_dim=4, use_dim=4), + ... dict(type='LoadImageFromFile'), + ... dict(type='LoadAnnotations3D', + ... with_bbox=True, with_label_3d=True), + ... dict(type='DefaultFormatBundle3D', class_names=class_names), + ... dict(type='Collect3D', + ... keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']) + ... ] + >>> assert expected_pipelines == \ + ... get_loading_pipeline(transforms) + """ + loading_pipeline = [] + for transform in pipeline: + is_loading = is_loading_function(transform) + if is_loading is None: # MultiScaleFlipAug3D + # extract its inner pipeline + if isinstance(transform, dict): + inner_pipeline = transform.get('transforms', []) + else: + inner_pipeline = transform.transforms.transforms + loading_pipeline.extend(get_loading_pipeline(inner_pipeline)) + elif is_loading: + loading_pipeline.append(transform) + assert len(loading_pipeline) > 0, \ + 'The data pipeline in your config file must include ' \ + 'loading step.' + return loading_pipeline + + +def convert_quaternion_to_matrix(quaternion: list, + translation: list = None) -> list: + """Compute a transform matrix by given quaternion and translation + vector.""" + result = np.eye(4) + result[:3, :3] = Quaternion(quaternion).rotation_matrix + if translation is not None: + result[:3, 3] = np.array(translation) + return result.astype(np.float32).tolist() diff --git a/mmdet3d/datasets/waymo_dataset.py b/mmdet3d/datasets/waymo_dataset.py new file mode 100755 index 0000000..2887e9c --- /dev/null +++ b/mmdet3d/datasets/waymo_dataset.py @@ -0,0 +1,239 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Callable, List, Union + +import numpy as np + +from mmdet3d.registry import DATASETS +from mmdet3d.structures import CameraInstance3DBoxes +from .det3d_dataset import Det3DDataset +from .kitti_dataset import KittiDataset + + +@DATASETS.register_module() +class WaymoDataset(KittiDataset): + """Waymo Dataset. + + This class serves as the API for experiments on the Waymo Dataset. + + Please refer to ``_for data downloading. + It is recommended to symlink the dataset root to $MMDETECTION3D/data and + organize them as the doc shows. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + data_prefix (dict): data prefix for point cloud and + camera data dict. Defaults to dict( + pts='velodyne', + CAM_FRONT='image_0', + CAM_FRONT_LEFT='image_1', + CAM_FRONT_RIGHT='image_2', + CAM_SIDE_LEFT='image_3', + CAM_SIDE_RIGHT='image_4') + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + modality (dict): Modality to specify the sensor data used + as input. Defaults to dict(use_lidar=True). + default_cam_key (str): Default camera key for lidar2img + association. Defaults to 'CAM_FRONT'. + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'LiDAR' in this dataset. Available options includes: + + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + load_type (str): Type of loading mode. Defaults to 'frame_based'. + + - 'frame_based': Load all of the instances in the frame. + - 'mv_image_based': Load all of the instances in the frame and need + to convert to the FOV-based data type to support image-based + detector. + - 'fov_image_based': Only load the instances inside the default + cam, and need to convert to the FOV-based data type to support + image-based detector. + filter_empty_gt (bool): Whether to filter the data with empty GT. + If it's set to be True, the example with empty annotations after + data pipeline will be dropped and a random example will be chosen + in `__getitem__`. Defaults to True. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + pcd_limit_range (List[float]): The range of point cloud + used to filter invalid predicted boxes. + Defaults to [-85, -85, -5, 85, 85, 5]. + cam_sync_instances (bool): If use the camera sync label + supported from waymo version 1.3.1. Defaults to False. + load_interval (int): load frame interval. Defaults to 1. + max_sweeps (int): max sweep for each frame. Defaults to 0. + """ + METAINFO = {'classes': ('Car', 'Pedestrian', 'Cyclist')} + + def __init__(self, + data_root: str, + ann_file: str, + data_prefix: dict = dict( + pts='velodyne', + CAM_FRONT='image_0', + CAM_FRONT_LEFT='image_1', + CAM_FRONT_RIGHT='image_2', + CAM_SIDE_LEFT='image_3', + CAM_SIDE_RIGHT='image_4'), + pipeline: List[Union[dict, Callable]] = [], + modality: dict = dict(use_lidar=True), + default_cam_key: str = 'CAM_FRONT', + box_type_3d: str = 'LiDAR', + load_type: str = 'frame_based', + filter_empty_gt: bool = True, + test_mode: bool = False, + pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0], + cam_sync_instances: bool = False, + load_interval: int = 1, + max_sweeps: int = 0, + **kwargs) -> None: + self.load_interval = load_interval + # set loading mode for different task settings + self.cam_sync_instances = cam_sync_instances + # construct self.cat_ids for vision-only anns parsing + self.cat_ids = range(len(self.METAINFO['classes'])) + self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} + self.max_sweeps = max_sweeps + # we do not provide backend_args to custom_3d init + # because we want disk loading for info + # while ceph loading for Prediction2Waymo + super().__init__( + data_root=data_root, + ann_file=ann_file, + pipeline=pipeline, + modality=modality, + box_type_3d=box_type_3d, + filter_empty_gt=filter_empty_gt, + pcd_limit_range=pcd_limit_range, + default_cam_key=default_cam_key, + data_prefix=data_prefix, + test_mode=test_mode, + load_type=load_type, + **kwargs) + + def parse_ann_info(self, info: dict) -> dict: + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Data information of single data sample. + + Returns: + dict: Annotation information consists of the following keys: + + - bboxes_3d (:obj:`LiDARInstance3DBoxes`): + 3D ground truth bboxes. + - bbox_labels_3d (np.ndarray): Labels of ground truths. + - gt_bboxes (np.ndarray): 2D ground truth bboxes. + - gt_labels (np.ndarray): Labels of ground truths. + - difficulty (int): Difficulty defined by KITTI. + 0, 1, 2 represent xxxxx respectively. + """ + ann_info = Det3DDataset.parse_ann_info(self, info) + if ann_info is None: + # empty instance + ann_info = {} + ann_info['gt_bboxes_3d'] = np.zeros((0, 7), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros(0, dtype=np.int64) + + ann_info = self._remove_dontcare(ann_info) + # in kitti, lidar2cam = R0_rect @ Tr_velo_to_cam + # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam` + if 'gt_bboxes' in ann_info: + gt_bboxes = ann_info['gt_bboxes'] + gt_bboxes_labels = ann_info['gt_bboxes_labels'] + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_bboxes_labels = np.zeros(0, dtype=np.int64) + if 'centers_2d' in ann_info: + centers_2d = ann_info['centers_2d'] + depths = ann_info['depths'] + else: + centers_2d = np.zeros((0, 2), dtype=np.float32) + depths = np.zeros((0), dtype=np.float32) + + # in waymo, lidar2cam = R0_rect @ Tr_velo_to_cam + # convert gt_bboxes_3d to velodyne coordinates with `lidar2cam` + lidar2cam = np.array(info['images'][self.default_cam_key]['lidar2cam']) + gt_bboxes_3d = CameraInstance3DBoxes( + ann_info['gt_bboxes_3d']).convert_to(self.box_mode_3d, + np.linalg.inv(lidar2cam)) + ann_info['gt_bboxes_3d'] = gt_bboxes_3d + + anns_results = dict( + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=ann_info['gt_labels_3d'], + gt_bboxes=gt_bboxes, + gt_bboxes_labels=gt_bboxes_labels, + centers_2d=centers_2d, + depths=depths) + + return anns_results + + def load_data_list(self) -> List[dict]: + """Add the load interval.""" + data_list = super().load_data_list() + data_list = data_list[::self.load_interval] + return data_list + + def parse_data_info(self, info: dict) -> Union[dict, List[dict]]: + """if task is lidar or multiview det, use super() method elif task is + mono3d, split the info from frame-wise to img-wise.""" + + if self.cam_sync_instances: + info['instances'] = info['cam_sync_instances'] + + if self.load_type == 'frame_based': + return super().parse_data_info(info) + elif self.load_type == 'fov_image_based': + # only loading the fov image and the fov instance + new_image_info = {} + new_image_info[self.default_cam_key] = \ + info['images'][self.default_cam_key] + info['images'] = new_image_info + info['instances'] = info['cam_instances'][self.default_cam_key] + return super().parse_data_info(info) + else: + # in the mono3d, the instances is from cam sync. + data_list = [] + if self.modality['use_lidar']: + info['lidar_points']['lidar_path'] = \ + osp.join( + self.data_prefix.get('pts', ''), + info['lidar_points']['lidar_path']) + + if self.modality['use_camera']: + for cam_key, img_info in info['images'].items(): + if 'img_path' in img_info: + cam_prefix = self.data_prefix.get(cam_key, '') + img_info['img_path'] = osp.join( + cam_prefix, img_info['img_path']) + + for (cam_key, img_info) in info['images'].items(): + camera_info = dict() + camera_info['images'] = dict() + camera_info['images'][cam_key] = img_info + if 'cam_instances' in info \ + and cam_key in info['cam_instances']: + camera_info['instances'] = info['cam_instances'][cam_key] + else: + camera_info['instances'] = [] + camera_info['ego2global'] = info['ego2global'] + if 'image_sweeps' in info: + camera_info['image_sweeps'] = info['image_sweeps'] + + # TODO check if need to modify the sample id + # TODO check when will use it except for evaluation. + camera_info['sample_idx'] = info['sample_idx'] + + if not self.test_mode: + # used in training + camera_info['ann_info'] = self.parse_ann_info(camera_info) + if self.test_mode and self.load_eval_anns: + info['eval_ann_info'] = self.parse_ann_info(info) + data_list.append(camera_info) + return data_list diff --git a/mmdet3d/engine/__init__.py b/mmdet3d/engine/__init__.py new file mode 100755 index 0000000..3490982 --- /dev/null +++ b/mmdet3d/engine/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .hooks import BenchmarkHook, Det3DVisualizationHook + +__all__ = ['Det3DVisualizationHook', 'BenchmarkHook'] diff --git a/mmdet3d/engine/hooks/__init__.py b/mmdet3d/engine/hooks/__init__.py new file mode 100755 index 0000000..578f173 --- /dev/null +++ b/mmdet3d/engine/hooks/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .benchmark_hook import BenchmarkHook +from .disable_object_sample_hook import DisableObjectSampleHook +from .visualization_hook import Det3DVisualizationHook + +__all__ = [ + 'Det3DVisualizationHook', 'BenchmarkHook', 'DisableObjectSampleHook' +] diff --git a/mmdet3d/engine/hooks/benchmark_hook.py b/mmdet3d/engine/hooks/benchmark_hook.py new file mode 100755 index 0000000..65e6133 --- /dev/null +++ b/mmdet3d/engine/hooks/benchmark_hook.py @@ -0,0 +1,38 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmengine.hooks import Hook + +from mmdet3d.registry import HOOKS + + +@HOOKS.register_module() +class BenchmarkHook(Hook): + """A hook that logs the training speed of each epch.""" + + priority = 'NORMAL' + + def after_train_epoch(self, runner) -> None: + """We use the average throughput in iterations of the entire training + run and skip the first 50 iterations of each epoch to skip GPU warmup + time. + + Args: + runner (Runner): The runner of the training process. + """ + message_hub = runner.message_hub + max_iter_num = len(runner.train_dataloader) + speed = message_hub.get_scalar('train/time').mean(max_iter_num - 50) + message_hub.update_scalar('train/speed', speed) + runner.logger.info( + f'Training speed of epoch {runner.epoch + 1} is {speed} s/iter') + + def after_train(self, runner) -> None: + """Log average training speed of entire training process. + + Args: + runner (Runner): The runner of the training process. + """ + message_hub = runner.message_hub + avg_speed = message_hub.get_scalar('train/speed').mean() + runner.logger.info('Average training speed of entire training process' + f'is {avg_speed} s/iter') diff --git a/mmdet3d/engine/hooks/disable_object_sample_hook.py b/mmdet3d/engine/hooks/disable_object_sample_hook.py new file mode 100755 index 0000000..d1f3c2a --- /dev/null +++ b/mmdet3d/engine/hooks/disable_object_sample_hook.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.hooks import Hook +from mmengine.model import is_model_wrapper +from mmengine.runner import Runner + +from mmdet3d.datasets.transforms import ObjectSample +from mmdet3d.registry import HOOKS + + +@HOOKS.register_module() +class DisableObjectSampleHook(Hook): + """The hook of disabling augmentations during training. + + Args: + disable_after_epoch (int): The number of epochs after which + the ``ObjectSample`` will be closed in the training. + Defaults to 15. + """ + + def __init__(self, disable_after_epoch: int = 15): + self.disable_after_epoch = disable_after_epoch + self._restart_dataloader = False + + def before_train_epoch(self, runner: Runner): + """Close augmentation. + + Args: + runner (Runner): The runner. + """ + epoch = runner.epoch + train_loader = runner.train_dataloader + model = runner.model + # TODO: refactor after mmengine using model wrapper + if is_model_wrapper(model): + model = model.module + if epoch == self.disable_after_epoch: + runner.logger.info('Disable ObjectSample') + for transform in runner.train_dataloader.dataset.pipeline.transforms: # noqa: E501 + if isinstance(transform, ObjectSample): + assert hasattr(transform, 'disabled') + transform.disabled = True + # The dataset pipeline cannot be updated when persistent_workers + # is True, so we need to force the dataloader's multi-process + # restart. This is a very hacky approach. + if hasattr(train_loader, 'persistent_workers' + ) and train_loader.persistent_workers is True: + train_loader._DataLoader__initialized = False + train_loader._iterator = None + self._restart_dataloader = True + else: + # Once the restart is complete, we need to restore + # the initialization flag. + if self._restart_dataloader: + train_loader._DataLoader__initialized = True diff --git a/mmdet3d/engine/hooks/visualization_hook.py b/mmdet3d/engine/hooks/visualization_hook.py new file mode 100755 index 0000000..167381b --- /dev/null +++ b/mmdet3d/engine/hooks/visualization_hook.py @@ -0,0 +1,241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings +from typing import Optional, Sequence + +import mmcv +import numpy as np +from mmengine.fileio import get +from mmengine.hooks import Hook +from mmengine.logging import print_log +from mmengine.runner import Runner +from mmengine.utils import mkdir_or_exist +from mmengine.visualization import Visualizer + +from mmdet3d.registry import HOOKS +from mmdet3d.structures import Det3DDataSample + + +@HOOKS.register_module() +class Det3DVisualizationHook(Hook): + """Detection Visualization Hook. Used to visualize validation and testing + process prediction results. + + In the testing phase: + + 1. If ``show`` is True, it means that only the prediction results are + visualized without storing data, so ``vis_backends`` needs to + be excluded. + 2. If ``test_out_dir`` is specified, it means that the prediction results + need to be saved to ``test_out_dir``. In order to avoid vis_backends + also storing data, so ``vis_backends`` needs to be excluded. + 3. ``vis_backends`` takes effect if the user does not specify ``show`` + and `test_out_dir``. You can set ``vis_backends`` to WandbVisBackend or + TensorboardVisBackend to store the prediction result in Wandb or + Tensorboard. + + Args: + draw (bool): whether to draw prediction results. If it is False, + it means that no drawing will be done. Defaults to False. + interval (int): The interval of visualization. Defaults to 50. + score_thr (float): The threshold to visualize the bboxes + and masks. Defaults to 0.3. + show (bool): Whether to display the drawn image. Default to False. + vis_task (str): Visualization task. Defaults to 'mono_det'. + wait_time (float): The interval of show (s). Defaults to 0. + draw_gt (bool): Whether to draw ground truth. Defaults to True. + draw_pred (bool): Whether to draw prediction. Defaults to True. + show_pcd_rgb (bool): Whether to show RGB point cloud. Defaults to + False. + test_out_dir (str, optional): directory where painted images + will be saved in testing process. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + """ + + def __init__(self, + draw: bool = False, + interval: int = 50, + score_thr: float = 0.3, + show: bool = False, + vis_task: str = 'mono_det', + wait_time: float = 0., + test_out_dir: Optional[str] = None, + draw_gt: bool = False, + draw_pred: bool = True, + show_pcd_rgb: bool = False, + backend_args: Optional[dict] = None): + self._visualizer: Visualizer = Visualizer.get_current_instance() + self.interval = interval + self.score_thr = score_thr + self.show = show + if self.show: + # No need to think about vis backends. + self._visualizer._vis_backends = {} + warnings.warn('The show is True, it means that only ' + 'the prediction results are visualized ' + 'without storing data, so vis_backends ' + 'needs to be excluded.') + self.vis_task = vis_task + + if wait_time == -1: + print_log( + 'Manual control mode, press [Right] to next sample.', + logger='current') + else: + print_log( + 'Autoplay mode, press [SPACE] to pause.', logger='current') + self.wait_time = wait_time + self.backend_args = backend_args + self.draw = draw + self.test_out_dir = test_out_dir + self._test_index = 0 + self.draw_gt = draw_gt + self.draw_pred = draw_pred + self.show_pcd_rgb = show_pcd_rgb + + def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict, + outputs: Sequence[Det3DDataSample]) -> None: + """Run after every ``self.interval`` validation iterations. + + Args: + runner (:obj:`Runner`): The runner of the validation process. + batch_idx (int): The index of the current batch in the val loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`DetDataSample`]]): A batch of data samples + that contain annotations and predictions. + """ + if self.draw is False: + return + + # There is no guarantee that the same batch of images + # is visualized for each evaluation. + total_curr_iter = runner.iter + batch_idx + + data_input = dict() + + # Visualize only the first data + if self.vis_task in [ + 'mono_det', 'multi-view_det', 'multi-modality_det' + ]: + assert 'img_path' in outputs[0], 'img_path is not in outputs[0]' + img_path = outputs[0].img_path + if isinstance(img_path, list): + img = [] + for single_img_path in img_path: + img_bytes = get( + single_img_path, backend_args=self.backend_args) + single_img = mmcv.imfrombytes( + img_bytes, channel_order='rgb') + img.append(single_img) + else: + img_bytes = get(img_path, backend_args=self.backend_args) + img = mmcv.imfrombytes(img_bytes, channel_order='rgb') + data_input['img'] = img + + if self.vis_task in ['lidar_det', 'multi-modality_det', 'lidar_seg']: + assert 'lidar_path' in outputs[ + 0], 'lidar_path is not in outputs[0]' + lidar_path = outputs[0].lidar_path + num_pts_feats = outputs[0].num_pts_feats + pts_bytes = get(lidar_path, backend_args=self.backend_args) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, num_pts_feats) + data_input['points'] = points + + if total_curr_iter % self.interval == 0: + self._visualizer.add_datasample( + 'val sample', + data_input, + data_sample=outputs[0], + draw_gt=self.draw_gt, + draw_pred=self.draw_pred, + show=self.show, + vis_task=self.vis_task, + wait_time=self.wait_time, + pred_score_thr=self.score_thr, + step=total_curr_iter, + show_pcd_rgb=self.show_pcd_rgb) + + def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict, + outputs: Sequence[Det3DDataSample]) -> None: + """Run after every testing iterations. + + Args: + runner (:obj:`Runner`): The runner of the testing process. + batch_idx (int): The index of the current batch in the val loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`DetDataSample`]): A batch of data samples + that contain annotations and predictions. + """ + if self.draw is False: + return + + if self.test_out_dir is not None: + self.test_out_dir = osp.join(runner.work_dir, runner.timestamp, + self.test_out_dir) + mkdir_or_exist(self.test_out_dir) + + for data_sample in outputs: + self._test_index += 1 + + data_input = dict() + assert 'img_path' in data_sample or 'lidar_path' in data_sample, \ + "'data_sample' must contain 'img_path' or 'lidar_path'" + + out_file = o3d_save_path = None + + if self.vis_task in [ + 'mono_det', 'multi-view_det', 'multi-modality_det' + ]: + assert 'img_path' in data_sample, \ + 'img_path is not in data_sample' + img_path = data_sample.img_path + if isinstance(img_path, list): + img = [] + for single_img_path in img_path: + img_bytes = get( + single_img_path, backend_args=self.backend_args) + single_img = mmcv.imfrombytes( + img_bytes, channel_order='rgb') + img.append(single_img) + else: + img_bytes = get(img_path, backend_args=self.backend_args) + img = mmcv.imfrombytes(img_bytes, channel_order='rgb') + data_input['img'] = img + if self.test_out_dir is not None: + if isinstance(img_path, list): + img_path = img_path[0] + out_file = osp.basename(img_path) + out_file = osp.join(self.test_out_dir, out_file) + + if self.vis_task in [ + 'lidar_det', 'multi-modality_det', 'lidar_seg' + ]: + assert 'lidar_path' in data_sample, \ + 'lidar_path is not in data_sample' + lidar_path = data_sample.lidar_path + num_pts_feats = data_sample.num_pts_feats + pts_bytes = get(lidar_path, backend_args=self.backend_args) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, num_pts_feats) + data_input['points'] = points + if self.test_out_dir is not None: + o3d_save_path = osp.basename(lidar_path).split( + '.')[0] + '.png' + o3d_save_path = osp.join(self.test_out_dir, o3d_save_path) + + self._visualizer.add_datasample( + 'test sample', + data_input, + data_sample=data_sample, + draw_gt=self.draw_gt, + draw_pred=self.draw_pred, + show=self.show, + vis_task=self.vis_task, + wait_time=self.wait_time, + pred_score_thr=self.score_thr, + out_file=out_file, + o3d_save_path=o3d_save_path, + step=self._test_index, + show_pcd_rgb=self.show_pcd_rgb) \ No newline at end of file diff --git a/mmdet3d/evaluation/__init__.py b/mmdet3d/evaluation/__init__.py new file mode 100755 index 0000000..8c23cc7 --- /dev/null +++ b/mmdet3d/evaluation/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet3d.evaluation.functional.kitti_utils import (do_eval, eval_class, + kitti_eval, + kitti_eval_coco_style) +from .functional import (aggregate_predictions, average_precision, + eval_det_cls, eval_map_recall, fast_hist, get_acc, + get_acc_cls, get_classwise_aps, get_single_class_aps, + indoor_eval, instance_seg_eval, load_lyft_gts, + load_lyft_predictions, lyft_eval, panoptic_seg_eval, + per_class_iou, rename_gt, seg_eval) +from .metrics import (IndoorMetric, InstanceSegMetric, KittiMetric, LyftMetric, + NuScenesMetric, PanopticSegMetric, SegMetric, + WaymoMetric) + +__all__ = [ + 'kitti_eval_coco_style', 'kitti_eval', 'indoor_eval', 'lyft_eval', + 'seg_eval', 'instance_seg_eval', 'average_precision', 'eval_det_cls', + 'eval_map_recall', 'indoor_eval', 'aggregate_predictions', 'rename_gt', + 'instance_seg_eval', 'load_lyft_gts', 'load_lyft_predictions', 'lyft_eval', + 'get_classwise_aps', 'get_single_class_aps', 'fast_hist', 'per_class_iou', + 'get_acc', 'get_acc_cls', 'seg_eval', 'KittiMetric', 'NuScenesMetric', + 'IndoorMetric', 'LyftMetric', 'SegMetric', 'InstanceSegMetric', + 'WaymoMetric', 'eval_class', 'do_eval', 'PanopticSegMetric', + 'panoptic_seg_eval' +] diff --git a/mmdet3d/evaluation/functional/__init__.py b/mmdet3d/evaluation/functional/__init__.py new file mode 100755 index 0000000..1e570e0 --- /dev/null +++ b/mmdet3d/evaluation/functional/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .indoor_eval import (average_precision, eval_det_cls, eval_map_recall, + indoor_eval) +from .instance_seg_eval import (aggregate_predictions, instance_seg_eval, + rename_gt) +from .kitti_utils import do_eval, kitti_eval, kitti_eval_coco_style +from .lyft_eval import (get_classwise_aps, get_single_class_aps, load_lyft_gts, + load_lyft_predictions, lyft_eval) +from .panoptic_seg_eval import panoptic_seg_eval +from .scannet_utils import evaluate_matches, scannet_eval +from .seg_eval import fast_hist, get_acc, get_acc_cls, per_class_iou, seg_eval + +__all__ = [ + 'average_precision', 'eval_det_cls', 'eval_map_recall', 'indoor_eval', + 'aggregate_predictions', 'rename_gt', 'instance_seg_eval', 'load_lyft_gts', + 'load_lyft_predictions', 'lyft_eval', 'get_classwise_aps', + 'get_single_class_aps', 'fast_hist', 'per_class_iou', 'get_acc', + 'get_acc_cls', 'seg_eval', 'kitti_eval', 'kitti_eval_coco_style', + 'scannet_eval', 'evaluate_matches', 'do_eval', 'panoptic_seg_eval' +] diff --git a/mmdet3d/evaluation/functional/indoor_eval.py b/mmdet3d/evaluation/functional/indoor_eval.py new file mode 100755 index 0000000..7742a66 --- /dev/null +++ b/mmdet3d/evaluation/functional/indoor_eval.py @@ -0,0 +1,302 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmengine.logging import print_log +from terminaltables import AsciiTable + + +def average_precision(recalls, precisions, mode='area'): + """Calculate average precision (for single or multiple scales). + + Args: + recalls (np.ndarray): Recalls with shape of (num_scales, num_dets) + or (num_dets, ). + precisions (np.ndarray): Precisions with shape of + (num_scales, num_dets) or (num_dets, ). + mode (str): 'area' or '11points', 'area' means calculating the area + under precision-recall curve, '11points' means calculating + the average precision of recalls at [0, 0.1, ..., 1] + + Returns: + float or np.ndarray: Calculated average precision. + """ + if recalls.ndim == 1: + recalls = recalls[np.newaxis, :] + precisions = precisions[np.newaxis, :] + + assert recalls.shape == precisions.shape + assert recalls.ndim == 2 + + num_scales = recalls.shape[0] + ap = np.zeros(num_scales, dtype=np.float32) + if mode == 'area': + zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) + ones = np.ones((num_scales, 1), dtype=recalls.dtype) + mrec = np.hstack((zeros, recalls, ones)) + mpre = np.hstack((zeros, precisions, zeros)) + for i in range(mpre.shape[1] - 1, 0, -1): + mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) + for i in range(num_scales): + ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] + ap[i] = np.sum( + (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) + elif mode == '11points': + for i in range(num_scales): + for thr in np.arange(0, 1 + 1e-3, 0.1): + precs = precisions[i, recalls[i, :] >= thr] + prec = precs.max() if precs.size > 0 else 0 + ap[i] += prec + ap /= 11 + else: + raise ValueError( + 'Unrecognized mode, only "area" and "11points" are supported') + return ap + + +def eval_det_cls(pred, gt, iou_thr=None): + """Generic functions to compute precision/recall for object detection for a + single class. + + Args: + pred (dict): Predictions mapping from image id to bounding boxes + and scores. + gt (dict): Ground truths mapping from image id to bounding boxes. + iou_thr (list[float]): A list of iou thresholds. + + Return: + tuple (np.ndarray, np.ndarray, float): Recalls, precisions and + average precision. + """ + + # {img_id: {'bbox': box structure, 'det': matched list}} + class_recs = {} + npos = 0 + for img_id in gt.keys(): + cur_gt_num = len(gt[img_id]) + if cur_gt_num != 0: + gt_cur = torch.zeros([cur_gt_num, 7], dtype=torch.float32) + for i in range(cur_gt_num): + gt_cur[i] = gt[img_id][i].tensor + bbox = gt[img_id][0].new_box(gt_cur) + else: + bbox = gt[img_id] + det = [[False] * len(bbox) for i in iou_thr] + npos += len(bbox) + class_recs[img_id] = {'bbox': bbox, 'det': det} + + # construct dets + image_ids = [] + confidence = [] + ious = [] + for img_id in pred.keys(): + cur_num = len(pred[img_id]) + if cur_num == 0: + continue + pred_cur = torch.zeros((cur_num, 7), dtype=torch.float32) + box_idx = 0 + for box, score in pred[img_id]: + image_ids.append(img_id) + confidence.append(score) + pred_cur[box_idx] = box.tensor + box_idx += 1 + pred_cur = box.new_box(pred_cur) + gt_cur = class_recs[img_id]['bbox'] + if len(gt_cur) > 0: + # calculate iou in each image + iou_cur = pred_cur.overlaps(pred_cur, gt_cur) + for i in range(cur_num): + ious.append(iou_cur[i]) + else: + for i in range(cur_num): + ious.append(np.zeros(1)) + + confidence = np.array(confidence) + + # sort by confidence + sorted_ind = np.argsort(-confidence) + image_ids = [image_ids[x] for x in sorted_ind] + ious = [ious[x] for x in sorted_ind] + + # go down dets and mark TPs and FPs + nd = len(image_ids) + tp_thr = [np.zeros(nd) for i in iou_thr] + fp_thr = [np.zeros(nd) for i in iou_thr] + for d in range(nd): + R = class_recs[image_ids[d]] + iou_max = -np.inf + BBGT = R['bbox'] + cur_iou = ious[d] + + if len(BBGT) > 0: + # compute overlaps + for j in range(len(BBGT)): + # iou = get_iou_main(get_iou_func, (bb, BBGT[j,...])) + iou = cur_iou[j] + if iou > iou_max: + iou_max = iou + jmax = j + + for iou_idx, thresh in enumerate(iou_thr): + if iou_max > thresh: + if not R['det'][iou_idx][jmax]: + tp_thr[iou_idx][d] = 1. + R['det'][iou_idx][jmax] = 1 + else: + fp_thr[iou_idx][d] = 1. + else: + fp_thr[iou_idx][d] = 1. + + ret = [] + for iou_idx, thresh in enumerate(iou_thr): + # compute precision recall + fp = np.cumsum(fp_thr[iou_idx]) + tp = np.cumsum(tp_thr[iou_idx]) + recall = tp / float(npos) + # avoid divide by zero in case the first detection matches a difficult + # ground truth + precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + ap = average_precision(recall, precision) + ret.append((recall, precision, ap)) + + return ret + + +def eval_map_recall(pred, gt, ovthresh=None): + """Evaluate mAP and recall. + + Generic functions to compute precision/recall for object detection + for multiple classes. + + Args: + pred (dict): Information of detection results, + which maps class_id and predictions. + gt (dict): Information of ground truths, which maps class_id and + ground truths. + ovthresh (list[float], optional): iou threshold. Default: None. + + Return: + tuple[dict]: dict results of recall, AP, and precision for all classes. + """ + + ret_values = {} + for classname in gt.keys(): + if classname in pred: + ret_values[classname] = eval_det_cls(pred[classname], + gt[classname], ovthresh) + recall = [{} for i in ovthresh] + precision = [{} for i in ovthresh] + ap = [{} for i in ovthresh] + + for label in gt.keys(): + for iou_idx, thresh in enumerate(ovthresh): + if label in pred: + recall[iou_idx][label], precision[iou_idx][label], ap[iou_idx][ + label] = ret_values[label][iou_idx] + else: + recall[iou_idx][label] = np.zeros(1) + precision[iou_idx][label] = np.zeros(1) + ap[iou_idx][label] = np.zeros(1) + + return recall, precision, ap + + +def indoor_eval(gt_annos, + dt_annos, + metric, + label2cat, + logger=None, + box_mode_3d=None): + """Indoor Evaluation. + + Evaluate the result of the detection. + + Args: + gt_annos (list[dict]): Ground truth annotations. + dt_annos (list[dict]): Detection annotations. the dict + includes the following keys + + - labels_3d (torch.Tensor): Labels of boxes. + - bboxes_3d (:obj:`BaseInstance3DBoxes`): + 3D bounding boxes in Depth coordinate. + - scores_3d (torch.Tensor): Scores of boxes. + metric (list[float]): IoU thresholds for computing average precisions. + label2cat (tuple): Map from label to category. + logger (logging.Logger | str, optional): The way to print the mAP + summary. See `mmdet.utils.print_log()` for details. Default: None. + + Return: + dict[str, float]: Dict of results. + """ + assert len(dt_annos) == len(gt_annos) + pred = {} # map {class_id: pred} + gt = {} # map {class_id: gt} + for img_id in range(len(dt_annos)): + # parse detected annotations + det_anno = dt_annos[img_id] + for i in range(len(det_anno['labels_3d'])): + label = det_anno['labels_3d'].numpy()[i] + bbox = det_anno['bboxes_3d'].convert_to(box_mode_3d)[i] + score = det_anno['scores_3d'].numpy()[i] + if label not in pred: + pred[int(label)] = {} + if img_id not in pred[label]: + pred[int(label)][img_id] = [] + if label not in gt: + gt[int(label)] = {} + if img_id not in gt[label]: + gt[int(label)][img_id] = [] + pred[int(label)][img_id].append((bbox, score)) + + # parse gt annotations + gt_anno = gt_annos[img_id] + + gt_boxes = gt_anno['gt_bboxes_3d'] + labels_3d = gt_anno['gt_labels_3d'] + + for i in range(len(labels_3d)): + label = labels_3d[i] + bbox = gt_boxes[i] + if label not in gt: + gt[label] = {} + if img_id not in gt[label]: + gt[label][img_id] = [] + gt[label][img_id].append(bbox) + + rec, prec, ap = eval_map_recall(pred, gt, metric) + ret_dict = dict() + header = ['classes'] + table_columns = [[label2cat[label] + for label in ap[0].keys()] + ['Overall']] + + for i, iou_thresh in enumerate(metric): + header.append(f'AP_{iou_thresh:.2f}') + header.append(f'AR_{iou_thresh:.2f}') + rec_list = [] + for label in ap[i].keys(): + ret_dict[f'{label2cat[label]}_AP_{iou_thresh:.2f}'] = float( + ap[i][label][0]) + ret_dict[f'mAP_{iou_thresh:.2f}'] = float( + np.mean(list(ap[i].values()))) + + table_columns.append(list(map(float, list(ap[i].values())))) + table_columns[-1] += [ret_dict[f'mAP_{iou_thresh:.2f}']] + table_columns[-1] = [f'{x:.4f}' for x in table_columns[-1]] + + for label in rec[i].keys(): + ret_dict[f'{label2cat[label]}_rec_{iou_thresh:.2f}'] = float( + rec[i][label][-1]) + rec_list.append(rec[i][label][-1]) + ret_dict[f'mAR_{iou_thresh:.2f}'] = float(np.mean(rec_list)) + + table_columns.append(list(map(float, rec_list))) + table_columns[-1] += [ret_dict[f'mAR_{iou_thresh:.2f}']] + table_columns[-1] = [f'{x:.4f}' for x in table_columns[-1]] + + table_data = [header] + table_rows = list(zip(*table_columns)) + table_data += table_rows + table = AsciiTable(table_data) + table.inner_footing_row_border = True + print_log('\n' + table.table, logger=logger) + + return ret_dict diff --git a/mmdet3d/evaluation/functional/instance_seg_eval.py b/mmdet3d/evaluation/functional/instance_seg_eval.py new file mode 100755 index 0000000..6d07171 --- /dev/null +++ b/mmdet3d/evaluation/functional/instance_seg_eval.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmengine.logging import print_log +from terminaltables import AsciiTable + +from .scannet_utils.evaluate_semantic_instance import scannet_eval + + +def aggregate_predictions(masks, labels, scores, valid_class_ids): + """Maps predictions to ScanNet evaluator format. + + Args: + masks (list[torch.Tensor]): Per scene predicted instance masks. + labels (list[torch.Tensor]): Per scene predicted instance labels. + scores (list[torch.Tensor]): Per scene predicted instance scores. + valid_class_ids (tuple[int]): Ids of valid categories. + + Returns: + list[dict]: Per scene aggregated predictions. + """ + infos = [] + for id, (mask, label, score) in enumerate(zip(masks, labels, scores)): + mask = mask.clone().numpy() + label = label.clone().numpy() + score = score.clone().numpy() + info = dict() + n_instances = mask.max() + 1 + for i in range(n_instances): + # match pred_instance['filename'] from assign_instances_for_scan + file_name = f'{id}_{i}' + info[file_name] = dict() + info[file_name]['mask'] = (mask == i).astype(np.int64) + info[file_name]['label_id'] = valid_class_ids[label[i]] + info[file_name]['conf'] = score[i] + infos.append(info) + return infos + + +def rename_gt(gt_semantic_masks, gt_instance_masks, valid_class_ids): + """Maps gt instance and semantic masks to instance masks for ScanNet + evaluator. + + Args: + gt_semantic_masks (list[torch.Tensor]): Per scene gt semantic masks. + gt_instance_masks (list[torch.Tensor]): Per scene gt instance masks. + valid_class_ids (tuple[int]): Ids of valid categories. + + Returns: + list[np.array]: Per scene instance masks. + """ + renamed_instance_masks = [] + for semantic_mask, instance_mask in zip(gt_semantic_masks, + gt_instance_masks): + semantic_mask = semantic_mask.clone().numpy() + instance_mask = instance_mask.clone().numpy() + unique = np.unique(instance_mask) + assert len(unique) < 1000 + for i in unique: + semantic_instance = semantic_mask[instance_mask == i] + semantic_unique = np.unique(semantic_instance) + assert len(semantic_unique) == 1 + if semantic_unique[0] < len(valid_class_ids): + instance_mask[ + instance_mask == + i] = 1000 * valid_class_ids[semantic_unique[0]] + i + renamed_instance_masks.append(instance_mask) + return renamed_instance_masks + + +def instance_seg_eval(gt_semantic_masks, + gt_instance_masks, + pred_instance_masks, + pred_instance_labels, + pred_instance_scores, + valid_class_ids, + class_labels, + options=None, + logger=None): + """Instance Segmentation Evaluation. + + Evaluate the result of the instance segmentation. + + Args: + gt_semantic_masks (list[torch.Tensor]): Ground truth semantic masks. + gt_instance_masks (list[torch.Tensor]): Ground truth instance masks. + pred_instance_masks (list[torch.Tensor]): Predicted instance masks. + pred_instance_labels (list[torch.Tensor]): Predicted instance labels. + pred_instance_scores (list[torch.Tensor]): Predicted instance labels. + valid_class_ids (tuple[int]): Ids of valid categories. + class_labels (tuple[str]): Names of valid categories. + options (dict, optional): Additional options. Keys may contain: + `overlaps`, `min_region_sizes`, `distance_threshes`, + `distance_confs`. Default: None. + logger (logging.Logger | str, optional): The way to print the mAP + summary. See `mmdet.utils.print_log()` for details. Default: None. + + Returns: + dict[str, float]: Dict of results. + """ + assert len(valid_class_ids) == len(class_labels) + id_to_label = { + valid_class_ids[i]: class_labels[i] + for i in range(len(valid_class_ids)) + } + preds = aggregate_predictions( + masks=pred_instance_masks, + labels=pred_instance_labels, + scores=pred_instance_scores, + valid_class_ids=valid_class_ids) + gts = rename_gt(gt_semantic_masks, gt_instance_masks, valid_class_ids) + metrics = scannet_eval( + preds=preds, + gts=gts, + options=options, + valid_class_ids=valid_class_ids, + class_labels=class_labels, + id_to_label=id_to_label) + header = ['classes', 'AP_0.25', 'AP_0.50', 'AP'] + rows = [] + for label, data in metrics['classes'].items(): + aps = [data['ap25%'], data['ap50%'], data['ap']] + rows.append([label] + [f'{ap:.4f}' for ap in aps]) + aps = metrics['all_ap_25%'], metrics['all_ap_50%'], metrics['all_ap'] + footer = ['Overall'] + [f'{ap:.4f}' for ap in aps] + table = AsciiTable([header] + rows + [footer]) + table.inner_footing_row_border = True + print_log('\n' + table.table, logger=logger) + return metrics diff --git a/mmdet3d/evaluation/functional/kitti_utils/__init__.py b/mmdet3d/evaluation/functional/kitti_utils/__init__.py new file mode 100755 index 0000000..bf46c1b --- /dev/null +++ b/mmdet3d/evaluation/functional/kitti_utils/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .eval import do_eval, eval_class, kitti_eval, kitti_eval_coco_style + +__all__ = ['kitti_eval', 'kitti_eval_coco_style', 'do_eval', 'eval_class'] diff --git a/mmdet3d/evaluation/functional/kitti_utils/eval.py b/mmdet3d/evaluation/functional/kitti_utils/eval.py new file mode 100755 index 0000000..5e55952 --- /dev/null +++ b/mmdet3d/evaluation/functional/kitti_utils/eval.py @@ -0,0 +1,950 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import gc +import io as sysio + +import numba +import numpy as np + + +@numba.jit +def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41): + scores.sort() + scores = scores[::-1] + current_recall = 0 + thresholds = [] + for i, score in enumerate(scores): + l_recall = (i + 1) / num_gt + if i < (len(scores) - 1): + r_recall = (i + 2) / num_gt + else: + r_recall = l_recall + if (((r_recall - current_recall) < (current_recall - l_recall)) + and (i < (len(scores) - 1))): + continue + # recall = l_recall + thresholds.append(score) + current_recall += 1 / (num_sample_pts - 1.0) + return thresholds + + +def clean_data(gt_anno, dt_anno, current_class, difficulty): + CLASS_NAMES = ['car', 'pedestrian', 'cyclist'] + MIN_HEIGHT = [40, 25, 25] + MAX_OCCLUSION = [0, 1, 2] + MAX_TRUNCATION = [0.15, 0.3, 0.5] + dc_bboxes, ignored_gt, ignored_dt = [], [], [] + current_cls_name = CLASS_NAMES[current_class].lower() + num_gt = len(gt_anno['name']) + num_dt = len(dt_anno['name']) + num_valid_gt = 0 + for i in range(num_gt): + bbox = gt_anno['bbox'][i] + gt_name = gt_anno['name'][i].lower() + height = bbox[3] - bbox[1] + valid_class = -1 + if (gt_name == current_cls_name): + valid_class = 1 + elif (current_cls_name == 'Pedestrian'.lower() + and 'Person_sitting'.lower() == gt_name): + valid_class = 0 + elif (current_cls_name == 'Car'.lower() and 'Van'.lower() == gt_name): + valid_class = 0 + else: + valid_class = -1 + ignore = False + if ((gt_anno['occluded'][i] > MAX_OCCLUSION[difficulty]) + or (gt_anno['truncated'][i] > MAX_TRUNCATION[difficulty]) + or (height <= MIN_HEIGHT[difficulty])): + ignore = True + if valid_class == 1 and not ignore: + ignored_gt.append(0) + num_valid_gt += 1 + elif (valid_class == 0 or (ignore and (valid_class == 1))): + ignored_gt.append(1) + else: + ignored_gt.append(-1) + # for i in range(num_gt): + if gt_anno['name'][i] == 'DontCare': + dc_bboxes.append(gt_anno['bbox'][i]) + for i in range(num_dt): + if (dt_anno['name'][i].lower() == current_cls_name): + valid_class = 1 + else: + valid_class = -1 + height = abs(dt_anno['bbox'][i, 3] - dt_anno['bbox'][i, 1]) + if height < MIN_HEIGHT[difficulty]: + ignored_dt.append(1) + elif valid_class == 1: + ignored_dt.append(0) + else: + ignored_dt.append(-1) + + return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes + + +@numba.jit(nopython=True) +def image_box_overlap(boxes, query_boxes, criterion=-1): + N = boxes.shape[0] + K = query_boxes.shape[0] + overlaps = np.zeros((N, K), dtype=boxes.dtype) + for k in range(K): + qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) * + (query_boxes[k, 3] - query_boxes[k, 1])) + for n in range(N): + iw = ( + min(boxes[n, 2], query_boxes[k, 2]) - + max(boxes[n, 0], query_boxes[k, 0])) + if iw > 0: + ih = ( + min(boxes[n, 3], query_boxes[k, 3]) - + max(boxes[n, 1], query_boxes[k, 1])) + if ih > 0: + if criterion == -1: + ua = ((boxes[n, 2] - boxes[n, 0]) * + (boxes[n, 3] - boxes[n, 1]) + qbox_area - + iw * ih) + elif criterion == 0: + ua = ((boxes[n, 2] - boxes[n, 0]) * + (boxes[n, 3] - boxes[n, 1])) + elif criterion == 1: + ua = qbox_area + else: + ua = 1.0 + overlaps[n, k] = iw * ih / ua + return overlaps + + +def bev_box_overlap(boxes, qboxes, criterion=-1): + from .rotate_iou import rotate_iou_gpu_eval + riou = rotate_iou_gpu_eval(boxes, qboxes, criterion) + return riou + + +@numba.jit(nopython=True, parallel=True) +def d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1): + # ONLY support overlap in CAMERA, not lidar. + # TODO: change to use prange for parallel mode, should check the difference + N, K = boxes.shape[0], qboxes.shape[0] + for i in numba.prange(N): + for j in numba.prange(K): + if rinc[i, j] > 0: + # iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] + + # qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1])) + iw = ( + min(boxes[i, 1], qboxes[j, 1]) - + max(boxes[i, 1] - boxes[i, 4], + qboxes[j, 1] - qboxes[j, 4])) + + if iw > 0: + area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5] + area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5] + inc = iw * rinc[i, j] + if criterion == -1: + ua = (area1 + area2 - inc) + elif criterion == 0: + ua = area1 + elif criterion == 1: + ua = area2 + else: + ua = inc + rinc[i, j] = inc / ua + else: + rinc[i, j] = 0.0 + + +def d3_box_overlap(boxes, qboxes, criterion=-1): + from .rotate_iou import rotate_iou_gpu_eval + rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]], + qboxes[:, [0, 2, 3, 5, 6]], 2) + d3_box_overlap_kernel(boxes, qboxes, rinc, criterion) + return rinc + + +@numba.jit(nopython=True) +def compute_statistics_jit(overlaps, + gt_datas, + dt_datas, + ignored_gt, + ignored_det, + dc_bboxes, + metric, + min_overlap, + thresh=0, + compute_fp=False, + compute_aos=False): + + det_size = dt_datas.shape[0] + gt_size = gt_datas.shape[0] + dt_scores = dt_datas[:, -1] + dt_alphas = dt_datas[:, 4] + gt_alphas = gt_datas[:, 4] + dt_bboxes = dt_datas[:, :4] + # gt_bboxes = gt_datas[:, :4] + + assigned_detection = [False] * det_size + ignored_threshold = [False] * det_size + if compute_fp: + for i in range(det_size): + if (dt_scores[i] < thresh): + ignored_threshold[i] = True + NO_DETECTION = -10000000 + tp, fp, fn, similarity = 0, 0, 0, 0 + # thresholds = [0.0] + # delta = [0.0] + thresholds = np.zeros((gt_size, )) + thresh_idx = 0 + delta = np.zeros((gt_size, )) + delta_idx = 0 + for i in range(gt_size): + if ignored_gt[i] == -1: + continue + det_idx = -1 + valid_detection = NO_DETECTION + max_overlap = 0 + assigned_ignored_det = False + + for j in range(det_size): + if (ignored_det[j] == -1): + continue + if (assigned_detection[j]): + continue + if (ignored_threshold[j]): + continue + overlap = overlaps[j, i] + dt_score = dt_scores[j] + if (not compute_fp and (overlap > min_overlap) + and dt_score > valid_detection): + det_idx = j + valid_detection = dt_score + elif (compute_fp and (overlap > min_overlap) + and (overlap > max_overlap or assigned_ignored_det) + and ignored_det[j] == 0): + max_overlap = overlap + det_idx = j + valid_detection = 1 + assigned_ignored_det = False + elif (compute_fp and (overlap > min_overlap) + and (valid_detection == NO_DETECTION) + and ignored_det[j] == 1): + det_idx = j + valid_detection = 1 + assigned_ignored_det = True + + if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0: + fn += 1 + elif ((valid_detection != NO_DETECTION) + and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)): + assigned_detection[det_idx] = True + elif valid_detection != NO_DETECTION: + tp += 1 + # thresholds.append(dt_scores[det_idx]) + thresholds[thresh_idx] = dt_scores[det_idx] + thresh_idx += 1 + if compute_aos: + # delta.append(gt_alphas[i] - dt_alphas[det_idx]) + delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx] + delta_idx += 1 + + assigned_detection[det_idx] = True + if compute_fp: + for i in range(det_size): + if (not (assigned_detection[i] or ignored_det[i] == -1 + or ignored_det[i] == 1 or ignored_threshold[i])): + fp += 1 + nstuff = 0 + if metric == 0: + overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0) + for i in range(dc_bboxes.shape[0]): + for j in range(det_size): + if (assigned_detection[j]): + continue + if (ignored_det[j] == -1 or ignored_det[j] == 1): + continue + if (ignored_threshold[j]): + continue + if overlaps_dt_dc[j, i] > min_overlap: + assigned_detection[j] = True + nstuff += 1 + fp -= nstuff + if compute_aos: + tmp = np.zeros((fp + delta_idx, )) + # tmp = [0] * fp + for i in range(delta_idx): + tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0 + # tmp.append((1.0 + np.cos(delta[i])) / 2.0) + # assert len(tmp) == fp + tp + # assert len(delta) == tp + if tp > 0 or fp > 0: + similarity = np.sum(tmp) + else: + similarity = -1 + return tp, fp, fn, similarity, thresholds[:thresh_idx] + + +def get_split_parts(num, num_part): + if num % num_part == 0: + same_part = num // num_part + return [same_part] * num_part + else: + same_part = num // (num_part - 1) + remain_num = num % (num_part - 1) + return [same_part] * (num_part - 1) + [remain_num] + + +@numba.jit(nopython=True) +def fused_compute_statistics(overlaps, + pr, + gt_nums, + dt_nums, + dc_nums, + gt_datas, + dt_datas, + dontcares, + ignored_gts, + ignored_dets, + metric, + min_overlap, + thresholds, + compute_aos=False): + gt_num = 0 + dt_num = 0 + dc_num = 0 + for i in range(gt_nums.shape[0]): + for t, thresh in enumerate(thresholds): + overlap = overlaps[dt_num:dt_num + dt_nums[i], + gt_num:gt_num + gt_nums[i]] + + gt_data = gt_datas[gt_num:gt_num + gt_nums[i]] + dt_data = dt_datas[dt_num:dt_num + dt_nums[i]] + ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]] + ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]] + dontcare = dontcares[dc_num:dc_num + dc_nums[i]] + tp, fp, fn, similarity, _ = compute_statistics_jit( + overlap, + gt_data, + dt_data, + ignored_gt, + ignored_det, + dontcare, + metric, + min_overlap=min_overlap, + thresh=thresh, + compute_fp=True, + compute_aos=compute_aos) + pr[t, 0] += tp + pr[t, 1] += fp + pr[t, 2] += fn + if similarity != -1: + pr[t, 3] += similarity + gt_num += gt_nums[i] + dt_num += dt_nums[i] + dc_num += dc_nums[i] + + +def calculate_iou_partly(dt_annos, gt_annos, metric, num_parts=50): + """Fast iou algorithm. this function can be used independently to do result + analysis. Must be used in CAMERA coordinate system. + + Args: + dt_annos (dict): Must from get_label_annos() in kitti_common.py. + gt_annos (dict): Must from get_label_annos() in kitti_common.py. + metric (int): Eval type. 0: bbox, 1: bev, 2: 3d. + num_parts (int): A parameter for fast calculate algorithm. + """ + assert len(dt_annos) == len(gt_annos) + total_gt_num = np.stack([len(a['name']) for a in gt_annos], 0) + total_dt_num = np.stack([len(a['name']) for a in dt_annos], 0) + num_examples = len(dt_annos) + split_parts = get_split_parts(num_examples, num_parts) + parted_overlaps = [] + example_idx = 0 + + for num_part in split_parts: + dt_annos_part = dt_annos[example_idx:example_idx + num_part] + gt_annos_part = gt_annos[example_idx:example_idx + num_part] + if metric == 0: + dt_boxes = np.concatenate([a['bbox'] for a in dt_annos_part], 0) + gt_boxes = np.concatenate([a['bbox'] for a in gt_annos_part], 0) + overlap_part = image_box_overlap(dt_boxes, gt_boxes) + elif metric == 1: + loc = np.concatenate( + [a['location'][:, [0, 2]] for a in dt_annos_part], 0) + dims = np.concatenate( + [a['dimensions'][:, [0, 2]] for a in dt_annos_part], 0) + rots = np.concatenate([a['rotation_y'] for a in dt_annos_part], 0) + dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + loc = np.concatenate( + [a['location'][:, [0, 2]] for a in gt_annos_part], 0) + dims = np.concatenate( + [a['dimensions'][:, [0, 2]] for a in gt_annos_part], 0) + rots = np.concatenate([a['rotation_y'] for a in gt_annos_part], 0) + gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + overlap_part = bev_box_overlap(dt_boxes, + gt_boxes).astype(np.float64) + elif metric == 2: + loc = np.concatenate([a['location'] for a in dt_annos_part], 0) + dims = np.concatenate([a['dimensions'] for a in dt_annos_part], 0) + rots = np.concatenate([a['rotation_y'] for a in dt_annos_part], 0) + dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + loc = np.concatenate([a['location'] for a in gt_annos_part], 0) + dims = np.concatenate([a['dimensions'] for a in gt_annos_part], 0) + rots = np.concatenate([a['rotation_y'] for a in gt_annos_part], 0) + gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + overlap_part = d3_box_overlap(dt_boxes, + gt_boxes).astype(np.float64) + else: + raise ValueError('unknown metric') + parted_overlaps.append(overlap_part) + example_idx += num_part + overlaps = [] + example_idx = 0 + for j, num_part in enumerate(split_parts): + gt_num_idx, dt_num_idx = 0, 0 + for i in range(num_part): + gt_box_num = total_gt_num[example_idx + i] + dt_box_num = total_dt_num[example_idx + i] + overlaps.append( + parted_overlaps[j][dt_num_idx:dt_num_idx + dt_box_num, + gt_num_idx:gt_num_idx + gt_box_num]) + gt_num_idx += gt_box_num + dt_num_idx += dt_box_num + example_idx += num_part + + return overlaps, parted_overlaps, total_dt_num, total_gt_num + + +def _prepare_data(gt_annos, dt_annos, current_class, difficulty): + gt_datas_list = [] + dt_datas_list = [] + total_dc_num = [] + ignored_gts, ignored_dets, dontcares = [], [], [] + total_num_valid_gt = 0 + for i in range(len(gt_annos)): + rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty) + num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets + ignored_gts.append(np.array(ignored_gt, dtype=np.int64)) + ignored_dets.append(np.array(ignored_det, dtype=np.int64)) + if len(dc_bboxes) == 0: + dc_bboxes = np.zeros((0, 4)).astype(np.float64) + else: + dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64) + total_dc_num.append(dc_bboxes.shape[0]) + dontcares.append(dc_bboxes) + total_num_valid_gt += num_valid_gt + gt_datas = np.concatenate( + [gt_annos[i]['bbox'], gt_annos[i]['alpha'][..., np.newaxis]], 1) + dt_datas = np.concatenate([ + dt_annos[i]['bbox'], dt_annos[i]['alpha'][..., np.newaxis], + dt_annos[i]['score'][..., np.newaxis] + ], 1) + gt_datas_list.append(gt_datas) + dt_datas_list.append(dt_datas) + total_dc_num = np.stack(total_dc_num, axis=0) + return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, + total_dc_num, total_num_valid_gt) + + +def eval_class(gt_annos, + dt_annos, + current_classes, + difficultys, + metric, + min_overlaps, + compute_aos=False, + num_parts=200): + """Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP. + + Args: + gt_annos (dict): Must from get_label_annos() in kitti_common.py. + dt_annos (dict): Must from get_label_annos() in kitti_common.py. + current_classes (list[int]): 0: car, 1: pedestrian, 2: cyclist. + difficultys (list[int]): Eval difficulty, 0: easy, 1: normal, 2: hard + metric (int): Eval type. 0: bbox, 1: bev, 2: 3d + min_overlaps (float): Min overlap. format: + [num_overlap, metric, class]. + num_parts (int): A parameter for fast calculate algorithm + + Returns: + dict[str, np.ndarray]: recall, precision and aos + """ + assert len(gt_annos) == len(dt_annos) + num_examples = len(gt_annos) + if num_examples < num_parts: + num_parts = num_examples + split_parts = get_split_parts(num_examples, num_parts) + + rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts) + overlaps, parted_overlaps, total_dt_num, total_gt_num = rets + + N_SAMPLE_PTS = 41 + num_minoverlap = len(min_overlaps) + num_class = len(current_classes) + num_difficulty = len(difficultys) + precision = np.zeros( + [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) + recall = np.zeros( + [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) + aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) + for m, current_class in enumerate(current_classes): + for idx_l, difficulty in enumerate(difficultys): + rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty) + (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, + dontcares, total_dc_num, total_num_valid_gt) = rets + for k, min_overlap in enumerate(min_overlaps[:, metric, m]): + thresholdss = [] + for i in range(len(gt_annos)): + rets = compute_statistics_jit( + overlaps[i], + gt_datas_list[i], + dt_datas_list[i], + ignored_gts[i], + ignored_dets[i], + dontcares[i], + metric, + min_overlap=min_overlap, + thresh=0.0, + compute_fp=False) + tp, fp, fn, similarity, thresholds = rets + thresholdss += thresholds.tolist() + thresholdss = np.array(thresholdss) + thresholds = get_thresholds(thresholdss, total_num_valid_gt) + thresholds = np.array(thresholds) + pr = np.zeros([len(thresholds), 4]) + idx = 0 + for j, num_part in enumerate(split_parts): + gt_datas_part = np.concatenate( + gt_datas_list[idx:idx + num_part], 0) + dt_datas_part = np.concatenate( + dt_datas_list[idx:idx + num_part], 0) + dc_datas_part = np.concatenate( + dontcares[idx:idx + num_part], 0) + ignored_dets_part = np.concatenate( + ignored_dets[idx:idx + num_part], 0) + ignored_gts_part = np.concatenate( + ignored_gts[idx:idx + num_part], 0) + fused_compute_statistics( + parted_overlaps[j], + pr, + total_gt_num[idx:idx + num_part], + total_dt_num[idx:idx + num_part], + total_dc_num[idx:idx + num_part], + gt_datas_part, + dt_datas_part, + dc_datas_part, + ignored_gts_part, + ignored_dets_part, + metric, + min_overlap=min_overlap, + thresholds=thresholds, + compute_aos=compute_aos) + idx += num_part + for i in range(len(thresholds)): + recall[m, idx_l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2]) + precision[m, idx_l, k, i] = pr[i, 0] / ( + pr[i, 0] + pr[i, 1]) + if compute_aos: + aos[m, idx_l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1]) + for i in range(len(thresholds)): + precision[m, idx_l, k, i] = np.max( + precision[m, idx_l, k, i:], axis=-1) + recall[m, idx_l, k, i] = np.max( + recall[m, idx_l, k, i:], axis=-1) + if compute_aos: + aos[m, idx_l, k, i] = np.max( + aos[m, idx_l, k, i:], axis=-1) + ret_dict = { + 'recall': recall, + 'precision': precision, + 'orientation': aos, + } + + # clean temp variables + del overlaps + del parted_overlaps + + gc.collect() + return ret_dict + + +def get_mAP11(prec): + sums = 0 + for i in range(0, prec.shape[-1], 4): + sums = sums + prec[..., i] + return sums / 11 * 100 + + +def get_mAP40(prec): + sums = 0 + for i in range(1, prec.shape[-1]): + sums = sums + prec[..., i] + return sums / 40 * 100 + + +def print_str(value, *arg, sstream=None): + if sstream is None: + sstream = sysio.StringIO() + sstream.truncate(0) + sstream.seek(0) + print(value, *arg, file=sstream) + return sstream.getvalue() + + +def do_eval(gt_annos, + dt_annos, + current_classes, + min_overlaps, + eval_types=['bbox', 'bev', '3d']): + # min_overlaps: [num_minoverlap, metric, num_class] + difficultys = [0, 1, 2] + mAP11_bbox = None + mAP11_aos = None + mAP40_bbox = None + mAP40_aos = None + if 'bbox' in eval_types: + ret = eval_class( + gt_annos, + dt_annos, + current_classes, + difficultys, + 0, + min_overlaps, + compute_aos=('aos' in eval_types)) + # ret: [num_class, num_diff, num_minoverlap, num_sample_points] + mAP11_bbox = get_mAP11(ret['precision']) + mAP40_bbox = get_mAP40(ret['precision']) + if 'aos' in eval_types: + mAP11_aos = get_mAP11(ret['orientation']) + mAP40_aos = get_mAP40(ret['orientation']) + + mAP11_bev = None + mAP40_bev = None + if 'bev' in eval_types: + ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1, + min_overlaps) + mAP11_bev = get_mAP11(ret['precision']) + mAP40_bev = get_mAP40(ret['precision']) + + mAP11_3d = None + mAP40_3d = None + if '3d' in eval_types: + ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2, + min_overlaps) + mAP11_3d = get_mAP11(ret['precision']) + mAP40_3d = get_mAP40(ret['precision']) + return (mAP11_bbox, mAP11_bev, mAP11_3d, mAP11_aos, mAP40_bbox, mAP40_bev, + mAP40_3d, mAP40_aos) + + +def do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges, + compute_aos): + # overlap_ranges: [range, metric, num_class] + min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]]) + for i in range(overlap_ranges.shape[1]): + for j in range(overlap_ranges.shape[2]): + min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j]) + mAP_bbox, mAP_bev, mAP_3d, mAP_aos, _, _, \ + _, _ = do_eval(gt_annos, dt_annos, + current_classes, min_overlaps, + compute_aos) + # ret: [num_class, num_diff, num_minoverlap] + mAP_bbox = mAP_bbox.mean(-1) + mAP_bev = mAP_bev.mean(-1) + mAP_3d = mAP_3d.mean(-1) + if mAP_aos is not None: + mAP_aos = mAP_aos.mean(-1) + return mAP_bbox, mAP_bev, mAP_3d, mAP_aos + + +def kitti_eval(gt_annos, + dt_annos, + current_classes, + eval_types=['bbox', 'bev', '3d']): + """KITTI evaluation. + + Args: + gt_annos (list[dict]): Contain gt information of each sample. + dt_annos (list[dict]): Contain detected information of each sample. + current_classes (list[str]): Classes to evaluation. + eval_types (list[str], optional): Types to eval. + Defaults to ['bbox', 'bev', '3d']. + + Returns: + tuple: String and dict of evaluation results. + """ + assert len(eval_types) > 0, 'must contain at least one evaluation type' + if 'aos' in eval_types: + assert 'bbox' in eval_types, 'must evaluate bbox when evaluating aos' + overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7, + 0.5], [0.7, 0.5, 0.5, 0.7, 0.5], + [0.7, 0.5, 0.5, 0.7, 0.5]]) + overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7, 0.5], + [0.5, 0.25, 0.25, 0.5, 0.25], + [0.5, 0.25, 0.25, 0.5, 0.25]]) + min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5] + class_to_name = { + 0: 'Car', + 1: 'Pedestrian', + 2: 'Cyclist', + 3: 'Van', + 4: 'Person_sitting', + } + name_to_class = {v: n for n, v in class_to_name.items()} + if not isinstance(current_classes, (list, tuple)): + current_classes = [current_classes] + current_classes_int = [] + for curcls in current_classes: + if isinstance(curcls, str): + current_classes_int.append(name_to_class[curcls]) + else: + current_classes_int.append(curcls) + current_classes = current_classes_int + min_overlaps = min_overlaps[:, :, current_classes] + result = '' + # check whether alpha is valid + compute_aos = False + pred_alpha = False + valid_alpha_gt = False + for anno in dt_annos: + mask = (anno['alpha'] != -10) + if anno['alpha'][mask].shape[0] != 0: + pred_alpha = True + break + for anno in gt_annos: + if anno['alpha'][0] != -10: + valid_alpha_gt = True + break + compute_aos = (pred_alpha and valid_alpha_gt) + if compute_aos: + eval_types.append('aos') + + mAP11_bbox, mAP11_bev, mAP11_3d, mAP11_aos, mAP40_bbox, mAP40_bev, \ + mAP40_3d, mAP40_aos = do_eval(gt_annos, dt_annos, + current_classes, min_overlaps, + eval_types) + + ret_dict = {} + difficulty = ['easy', 'moderate', 'hard'] + + # calculate AP11 + result += '\n----------- AP11 Results ------------\n\n' + for j, curcls in enumerate(current_classes): + # mAP threshold array: [num_minoverlap, metric, class] + # mAP result: [num_class, num_diff, num_minoverlap] + curcls_name = class_to_name[curcls] + for i in range(min_overlaps.shape[0]): + # prepare results for print + result += ('{} AP11@{:.2f}, {:.2f}, {:.2f}:\n'.format( + curcls_name, *min_overlaps[i, :, j])) + if mAP11_bbox is not None: + result += 'bbox AP11:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP11_bbox[j, :, i]) + if mAP11_bev is not None: + result += 'bev AP11:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP11_bev[j, :, i]) + if mAP11_3d is not None: + result += '3d AP11:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP11_3d[j, :, i]) + if compute_aos: + result += 'aos AP11:{:.2f}, {:.2f}, {:.2f}\n'.format( + *mAP11_aos[j, :, i]) + + # prepare results for logger + for idx in range(3): + if i == 0: + postfix = f'{difficulty[idx]}_strict' + else: + postfix = f'{difficulty[idx]}_loose' + prefix = f'KITTI/{curcls_name}' + if mAP11_3d is not None: + ret_dict[f'{prefix}_3D_AP11_{postfix}'] =\ + mAP11_3d[j, idx, i] + if mAP11_bev is not None: + ret_dict[f'{prefix}_BEV_AP11_{postfix}'] =\ + mAP11_bev[j, idx, i] + if mAP11_bbox is not None: + ret_dict[f'{prefix}_2D_AP11_{postfix}'] =\ + mAP11_bbox[j, idx, i] + + # calculate mAP11 over all classes if there are multiple classes + if len(current_classes) > 1: + # prepare results for print + result += ('\nOverall AP11@{}, {}, {}:\n'.format(*difficulty)) + if mAP11_bbox is not None: + mAP11_bbox = mAP11_bbox.mean(axis=0) + result += 'bbox AP11:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP11_bbox[:, 0]) + if mAP11_bev is not None: + mAP11_bev = mAP11_bev.mean(axis=0) + result += 'bev AP11:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP11_bev[:, 0]) + if mAP11_3d is not None: + mAP11_3d = mAP11_3d.mean(axis=0) + result += '3d AP11:{:.4f}, {:.4f}, {:.4f}\n'.format(*mAP11_3d[:, + 0]) + if compute_aos: + mAP11_aos = mAP11_aos.mean(axis=0) + result += 'aos AP11:{:.2f}, {:.2f}, {:.2f}\n'.format( + *mAP11_aos[:, 0]) + + # prepare results for logger + for idx in range(3): + postfix = f'{difficulty[idx]}' + if mAP11_3d is not None: + ret_dict[f'KITTI/Overall_3D_AP11_{postfix}'] = mAP11_3d[idx, 0] + if mAP11_bev is not None: + ret_dict[f'KITTI/Overall_BEV_AP11_{postfix}'] =\ + mAP11_bev[idx, 0] + if mAP11_bbox is not None: + ret_dict[f'KITTI/Overall_2D_AP11_{postfix}'] =\ + mAP11_bbox[idx, 0] + + # Calculate AP40 + result += '\n----------- AP40 Results ------------\n\n' + for j, curcls in enumerate(current_classes): + # mAP threshold array: [num_minoverlap, metric, class] + # mAP result: [num_class, num_diff, num_minoverlap] + curcls_name = class_to_name[curcls] + for i in range(min_overlaps.shape[0]): + # prepare results for print + result += ('{} AP40@{:.2f}, {:.2f}, {:.2f}:\n'.format( + curcls_name, *min_overlaps[i, :, j])) + if mAP40_bbox is not None: + result += 'bbox AP40:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP40_bbox[j, :, i]) + if mAP40_bev is not None: + result += 'bev AP40:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP40_bev[j, :, i]) + if mAP40_3d is not None: + result += '3d AP40:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP40_3d[j, :, i]) + if compute_aos: + result += 'aos AP40:{:.2f}, {:.2f}, {:.2f}\n'.format( + *mAP40_aos[j, :, i]) + + # prepare results for logger + for idx in range(3): + if i == 0: + postfix = f'{difficulty[idx]}_strict' + else: + postfix = f'{difficulty[idx]}_loose' + prefix = f'KITTI/{curcls_name}' + if mAP40_3d is not None: + ret_dict[f'{prefix}_3D_AP40_{postfix}'] =\ + mAP40_3d[j, idx, i] + if mAP40_bev is not None: + ret_dict[f'{prefix}_BEV_AP40_{postfix}'] =\ + mAP40_bev[j, idx, i] + if mAP40_bbox is not None: + ret_dict[f'{prefix}_2D_AP40_{postfix}'] =\ + mAP40_bbox[j, idx, i] + + # calculate mAP40 over all classes if there are multiple classes + if len(current_classes) > 1: + # prepare results for print + result += ('\nOverall AP40@{}, {}, {}:\n'.format(*difficulty)) + if mAP40_bbox is not None: + mAP40_bbox = mAP40_bbox.mean(axis=0) + result += 'bbox AP40:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP40_bbox[:, 0]) + if mAP40_bev is not None: + mAP40_bev = mAP40_bev.mean(axis=0) + result += 'bev AP40:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP40_bev[:, 0]) + if mAP40_3d is not None: + mAP40_3d = mAP40_3d.mean(axis=0) + result += '3d AP40:{:.4f}, {:.4f}, {:.4f}\n'.format(*mAP40_3d[:, + 0]) + if compute_aos: + mAP40_aos = mAP40_aos.mean(axis=0) + result += 'aos AP40:{:.2f}, {:.2f}, {:.2f}\n'.format( + *mAP40_aos[:, 0]) + + # prepare results for logger + for idx in range(3): + postfix = f'{difficulty[idx]}' + if mAP40_3d is not None: + ret_dict[f'KITTI/Overall_3D_AP40_{postfix}'] = mAP40_3d[idx, 0] + if mAP40_bev is not None: + ret_dict[f'KITTI/Overall_BEV_AP40_{postfix}'] =\ + mAP40_bev[idx, 0] + if mAP40_bbox is not None: + ret_dict[f'KITTI/Overall_2D_AP40_{postfix}'] =\ + mAP40_bbox[idx, 0] + + return result, ret_dict + + +def kitti_eval_coco_style(gt_annos, dt_annos, current_classes): + """coco style evaluation of kitti. + + Args: + gt_annos (list[dict]): Contain gt information of each sample. + dt_annos (list[dict]): Contain detected information of each sample. + current_classes (list[str]): Classes to evaluation. + + Returns: + string: Evaluation results. + """ + class_to_name = { + 0: 'Car', + 1: 'Pedestrian', + 2: 'Cyclist', + 3: 'Van', + 4: 'Person_sitting', + } + class_to_range = { + 0: [0.5, 0.95, 10], + 1: [0.25, 0.7, 10], + 2: [0.25, 0.7, 10], + 3: [0.5, 0.95, 10], + 4: [0.25, 0.7, 10], + } + name_to_class = {v: n for n, v in class_to_name.items()} + if not isinstance(current_classes, (list, tuple)): + current_classes = [current_classes] + current_classes_int = [] + for curcls in current_classes: + if isinstance(curcls, str): + current_classes_int.append(name_to_class[curcls]) + else: + current_classes_int.append(curcls) + current_classes = current_classes_int + overlap_ranges = np.zeros([3, 3, len(current_classes)]) + for i, curcls in enumerate(current_classes): + overlap_ranges[:, :, i] = np.array(class_to_range[curcls])[:, + np.newaxis] + result = '' + # check whether alpha is valid + compute_aos = False + for anno in dt_annos: + if anno['alpha'].shape[0] != 0: + if anno['alpha'][0] != -10: + compute_aos = True + break + mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval( + gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos) + for j, curcls in enumerate(current_classes): + # mAP threshold array: [num_minoverlap, metric, class] + # mAP result: [num_class, num_diff, num_minoverlap] + o_range = np.array(class_to_range[curcls])[[0, 2, 1]] + o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1) + result += print_str((f'{class_to_name[curcls]} ' + 'coco AP@{:.2f}:{:.2f}:{:.2f}:'.format(*o_range))) + result += print_str((f'bbox AP:{mAPbbox[j, 0]:.2f}, ' + f'{mAPbbox[j, 1]:.2f}, ' + f'{mAPbbox[j, 2]:.2f}')) + result += print_str((f'bev AP:{mAPbev[j, 0]:.2f}, ' + f'{mAPbev[j, 1]:.2f}, ' + f'{mAPbev[j, 2]:.2f}')) + result += print_str((f'3d AP:{mAP3d[j, 0]:.2f}, ' + f'{mAP3d[j, 1]:.2f}, ' + f'{mAP3d[j, 2]:.2f}')) + if compute_aos: + result += print_str((f'aos AP:{mAPaos[j, 0]:.2f}, ' + f'{mAPaos[j, 1]:.2f}, ' + f'{mAPaos[j, 2]:.2f}')) + return result diff --git a/mmdet3d/evaluation/functional/kitti_utils/rotate_iou.py b/mmdet3d/evaluation/functional/kitti_utils/rotate_iou.py new file mode 100755 index 0000000..9ed75bf --- /dev/null +++ b/mmdet3d/evaluation/functional/kitti_utils/rotate_iou.py @@ -0,0 +1,379 @@ +# Copyright (c) OpenMMLab. All rights reserved. +##################### +# Based on https://github.com/hongzhenwang/RRPN-revise +# Licensed under The MIT License +# Author: yanyan, scrin@foxmail.com +##################### +import math + +import numba +import numpy as np +from numba import cuda + + +@numba.jit(nopython=True) +def div_up(m, n): + return m // n + (m % n > 0) + + +@cuda.jit(device=True, inline=True) +def trangle_area(a, b, c): + return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * + (b[0] - c[0])) / 2.0 + + +@cuda.jit(device=True, inline=True) +def area(int_pts, num_of_inter): + area_val = 0.0 + for i in range(num_of_inter - 2): + area_val += abs( + trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4], + int_pts[2 * i + 4:2 * i + 6])) + return area_val + + +@cuda.jit(device=True, inline=True) +def sort_vertex_in_convex_polygon(int_pts, num_of_inter): + if num_of_inter > 0: + center = cuda.local.array((2, ), dtype=numba.float32) + center[:] = 0.0 + for i in range(num_of_inter): + center[0] += int_pts[2 * i] + center[1] += int_pts[2 * i + 1] + center[0] /= num_of_inter + center[1] /= num_of_inter + v = cuda.local.array((2, ), dtype=numba.float32) + vs = cuda.local.array((16, ), dtype=numba.float32) + for i in range(num_of_inter): + v[0] = int_pts[2 * i] - center[0] + v[1] = int_pts[2 * i + 1] - center[1] + d = math.sqrt(v[0] * v[0] + v[1] * v[1]) + v[0] = v[0] / d + v[1] = v[1] / d + if v[1] < 0: + v[0] = -2 - v[0] + vs[i] = v[0] + j = 0 + temp = 0 + for i in range(1, num_of_inter): + if vs[i - 1] > vs[i]: + temp = vs[i] + tx = int_pts[2 * i] + ty = int_pts[2 * i + 1] + j = i + while j > 0 and vs[j - 1] > temp: + vs[j] = vs[j - 1] + int_pts[j * 2] = int_pts[j * 2 - 2] + int_pts[j * 2 + 1] = int_pts[j * 2 - 1] + j -= 1 + + vs[j] = temp + int_pts[j * 2] = tx + int_pts[j * 2 + 1] = ty + + +@cuda.jit(device=True, inline=True) +def line_segment_intersection(pts1, pts2, i, j, temp_pts): + A = cuda.local.array((2, ), dtype=numba.float32) + B = cuda.local.array((2, ), dtype=numba.float32) + C = cuda.local.array((2, ), dtype=numba.float32) + D = cuda.local.array((2, ), dtype=numba.float32) + + A[0] = pts1[2 * i] + A[1] = pts1[2 * i + 1] + + B[0] = pts1[2 * ((i + 1) % 4)] + B[1] = pts1[2 * ((i + 1) % 4) + 1] + + C[0] = pts2[2 * j] + C[1] = pts2[2 * j + 1] + + D[0] = pts2[2 * ((j + 1) % 4)] + D[1] = pts2[2 * ((j + 1) % 4) + 1] + BA0 = B[0] - A[0] + BA1 = B[1] - A[1] + DA0 = D[0] - A[0] + CA0 = C[0] - A[0] + DA1 = D[1] - A[1] + CA1 = C[1] - A[1] + acd = DA1 * CA0 > CA1 * DA0 + bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) + if acd != bcd: + abc = CA1 * BA0 > BA1 * CA0 + abd = DA1 * BA0 > BA1 * DA0 + if abc != abd: + DC0 = D[0] - C[0] + DC1 = D[1] - C[1] + ABBA = A[0] * B[1] - B[0] * A[1] + CDDC = C[0] * D[1] - D[0] * C[1] + DH = BA1 * DC0 - BA0 * DC1 + Dx = ABBA * DC0 - BA0 * CDDC + Dy = ABBA * DC1 - BA1 * CDDC + temp_pts[0] = Dx / DH + temp_pts[1] = Dy / DH + return True + return False + + +@cuda.jit(device=True, inline=True) +def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts): + a = cuda.local.array((2, ), dtype=numba.float32) + b = cuda.local.array((2, ), dtype=numba.float32) + c = cuda.local.array((2, ), dtype=numba.float32) + d = cuda.local.array((2, ), dtype=numba.float32) + + a[0] = pts1[2 * i] + a[1] = pts1[2 * i + 1] + + b[0] = pts1[2 * ((i + 1) % 4)] + b[1] = pts1[2 * ((i + 1) % 4) + 1] + + c[0] = pts2[2 * j] + c[1] = pts2[2 * j + 1] + + d[0] = pts2[2 * ((j + 1) % 4)] + d[1] = pts2[2 * ((j + 1) % 4) + 1] + + area_abc = trangle_area(a, b, c) + area_abd = trangle_area(a, b, d) + + if area_abc * area_abd >= 0: + return False + + area_cda = trangle_area(c, d, a) + area_cdb = area_cda + area_abc - area_abd + + if area_cda * area_cdb >= 0: + return False + t = area_cda / (area_abd - area_abc) + + dx = t * (b[0] - a[0]) + dy = t * (b[1] - a[1]) + temp_pts[0] = a[0] + dx + temp_pts[1] = a[1] + dy + return True + + +@cuda.jit(device=True, inline=True) +def point_in_quadrilateral(pt_x, pt_y, corners): + ab0 = corners[2] - corners[0] + ab1 = corners[3] - corners[1] + + ad0 = corners[6] - corners[0] + ad1 = corners[7] - corners[1] + + ap0 = pt_x - corners[0] + ap1 = pt_y - corners[1] + + abab = ab0 * ab0 + ab1 * ab1 + abap = ab0 * ap0 + ab1 * ap1 + adad = ad0 * ad0 + ad1 * ad1 + adap = ad0 * ap0 + ad1 * ap1 + + return abab >= abap and abap >= 0 and adad >= adap and adap >= 0 + + +@cuda.jit(device=True, inline=True) +def quadrilateral_intersection(pts1, pts2, int_pts): + num_of_inter = 0 + for i in range(4): + if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2): + int_pts[num_of_inter * 2] = pts1[2 * i] + int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1] + num_of_inter += 1 + if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1): + int_pts[num_of_inter * 2] = pts2[2 * i] + int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1] + num_of_inter += 1 + temp_pts = cuda.local.array((2, ), dtype=numba.float32) + for i in range(4): + for j in range(4): + has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts) + if has_pts: + int_pts[num_of_inter * 2] = temp_pts[0] + int_pts[num_of_inter * 2 + 1] = temp_pts[1] + num_of_inter += 1 + + return num_of_inter + + +@cuda.jit(device=True, inline=True) +def rbbox_to_corners(corners, rbbox): + # generate clockwise corners and rotate it clockwise + angle = rbbox[4] + a_cos = math.cos(angle) + a_sin = math.sin(angle) + center_x = rbbox[0] + center_y = rbbox[1] + x_d = rbbox[2] + y_d = rbbox[3] + corners_x = cuda.local.array((4, ), dtype=numba.float32) + corners_y = cuda.local.array((4, ), dtype=numba.float32) + corners_x[0] = -x_d / 2 + corners_x[1] = -x_d / 2 + corners_x[2] = x_d / 2 + corners_x[3] = x_d / 2 + corners_y[0] = -y_d / 2 + corners_y[1] = y_d / 2 + corners_y[2] = y_d / 2 + corners_y[3] = -y_d / 2 + for i in range(4): + corners[2 * i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x + corners[2 * i + + 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y + + +@cuda.jit(device=True, inline=True) +def inter(rbbox1, rbbox2): + """Compute intersection of two rotated boxes. + + Args: + rbox1 (np.ndarray, shape=[5]): Rotated 2d box. + rbox2 (np.ndarray, shape=[5]): Rotated 2d box. + + Returns: + float: Intersection of two rotated boxes. + """ + corners1 = cuda.local.array((8, ), dtype=numba.float32) + corners2 = cuda.local.array((8, ), dtype=numba.float32) + intersection_corners = cuda.local.array((16, ), dtype=numba.float32) + + rbbox_to_corners(corners1, rbbox1) + rbbox_to_corners(corners2, rbbox2) + + num_intersection = quadrilateral_intersection(corners1, corners2, + intersection_corners) + sort_vertex_in_convex_polygon(intersection_corners, num_intersection) + # print(intersection_corners.reshape([-1, 2])[:num_intersection]) + + return area(intersection_corners, num_intersection) + + +@cuda.jit(device=True, inline=True) +def devRotateIoUEval(rbox1, rbox2, criterion=-1): + """Compute rotated iou on device. + + Args: + rbox1 (np.ndarray, shape=[5]): Rotated 2d box. + rbox2 (np.ndarray, shape=[5]): Rotated 2d box. + criterion (int, optional): Indicate different type of iou. + -1 indicate `area_inter / (area1 + area2 - area_inter)`, + 0 indicate `area_inter / area1`, + 1 indicate `area_inter / area2`. + + Returns: + float: iou between two input boxes. + """ + area1 = rbox1[2] * rbox1[3] + area2 = rbox2[2] * rbox2[3] + area_inter = inter(rbox1, rbox2) + if criterion == -1: + return area_inter / (area1 + area2 - area_inter) + elif criterion == 0: + return area_inter / area1 + elif criterion == 1: + return area_inter / area2 + else: + return area_inter + + +@cuda.jit( + '(int64, int64, float32[:], float32[:], float32[:], int32)', + fastmath=False) +def rotate_iou_kernel_eval(N, + K, + dev_boxes, + dev_query_boxes, + dev_iou, + criterion=-1): + """Kernel of computing rotated IoU. This function is for bev boxes in + camera coordinate system ONLY (the rotation is clockwise). + + Args: + N (int): The number of boxes. + K (int): The number of query boxes. + dev_boxes (np.ndarray): Boxes on device. + dev_query_boxes (np.ndarray): Query boxes on device. + dev_iou (np.ndarray): Computed iou to return. + criterion (int, optional): Indicate different type of iou. + -1 indicate `area_inter / (area1 + area2 - area_inter)`, + 0 indicate `area_inter / area1`, + 1 indicate `area_inter / area2`. + """ + threadsPerBlock = 8 * 8 + row_start = cuda.blockIdx.x + col_start = cuda.blockIdx.y + tx = cuda.threadIdx.x + row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) + col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) + block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + + dev_query_box_idx = threadsPerBlock * col_start + tx + dev_box_idx = threadsPerBlock * row_start + tx + if (tx < col_size): + block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] + block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] + block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] + block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] + block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] + if (tx < row_size): + block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] + block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] + block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] + block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] + block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] + cuda.syncthreads() + if tx < row_size: + for i in range(col_size): + offset = ( + row_start * threadsPerBlock * K + col_start * threadsPerBlock + + tx * K + i) + dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5], + block_boxes[tx * 5:tx * 5 + 5], + criterion) + + +def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0): + """Rotated box iou running in gpu. 500x faster than cpu version (take 5ms + in one example with numba.cuda code). convert from [this project]( + https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation). + + This function is for bev boxes in camera coordinate system ONLY + (the rotation is clockwise). + + Args: + boxes (torch.Tensor): rbboxes. format: centers, dims, + angles(clockwise when positive) with the shape of [N, 5]. + query_boxes (torch.FloatTensor, shape=(K, 5)): + rbboxes to compute iou with boxes. + device_id (int, optional): Defaults to 0. Device to use. + criterion (int, optional): Indicate different type of iou. + -1 indicate `area_inter / (area1 + area2 - area_inter)`, + 0 indicate `area_inter / area1`, + 1 indicate `area_inter / area2`. + + Returns: + np.ndarray: IoU results. + """ + boxes = boxes.astype(np.float32) + query_boxes = query_boxes.astype(np.float32) + N = boxes.shape[0] + K = query_boxes.shape[0] + iou = np.zeros((N, K), dtype=np.float32) + if N == 0 or K == 0: + return iou + threadsPerBlock = 8 * 8 + cuda.select_device(device_id) + blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) + + stream = cuda.stream() + with stream.auto_synchronize(): + boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) + query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) + iou_dev = cuda.to_device(iou.reshape([-1]), stream) + rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, + stream](N, K, boxes_dev, query_boxes_dev, + iou_dev, criterion) + iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) + return iou.astype(boxes.dtype) diff --git a/mmdet3d/evaluation/functional/lyft_eval.py b/mmdet3d/evaluation/functional/lyft_eval.py new file mode 100755 index 0000000..72a1156 --- /dev/null +++ b/mmdet3d/evaluation/functional/lyft_eval.py @@ -0,0 +1,285 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from os import path as osp + +import mmengine +import numpy as np +from lyft_dataset_sdk.eval.detection.mAP_evaluation import (Box3D, get_ap, + get_class_names, + get_ious, + group_by_key, + wrap_in_box) +from mmengine.logging import print_log +from terminaltables import AsciiTable + + +def load_lyft_gts(lyft, data_root, eval_split, logger=None): + """Loads ground truth boxes from database. + + Args: + lyft (:obj:`LyftDataset`): Lyft class in the sdk. + data_root (str): Root of data for reading splits. + eval_split (str): Name of the split for evaluation. + logger (logging.Logger | str, optional): Logger used for printing + related information during evaluation. Default: None. + + Returns: + list[dict]: List of annotation dictionaries. + """ + split_scenes = mmengine.list_from_file( + osp.join(data_root, f'{eval_split}.txt')) + + # Read out all sample_tokens in DB. + sample_tokens_all = [s['token'] for s in lyft.sample] + assert len(sample_tokens_all) > 0, 'Error: Database has no samples!' + + if eval_split == 'test': + # Check that you aren't trying to cheat :) + assert len(lyft.sample_annotation) > 0, \ + 'Error: You are trying to evaluate on the test set \ + but you do not have the annotations!' + + sample_tokens = [] + for sample_token in sample_tokens_all: + scene_token = lyft.get('sample', sample_token)['scene_token'] + scene_record = lyft.get('scene', scene_token) + if scene_record['name'] in split_scenes: + sample_tokens.append(sample_token) + + all_annotations = [] + + print_log('Loading ground truth annotations...', logger=logger) + # Load annotations and filter predictions and annotations. + for sample_token in mmengine.track_iter_progress(sample_tokens): + sample = lyft.get('sample', sample_token) + sample_annotation_tokens = sample['anns'] + for sample_annotation_token in sample_annotation_tokens: + # Get label name in detection task and filter unused labels. + sample_annotation = \ + lyft.get('sample_annotation', sample_annotation_token) + detection_name = sample_annotation['category_name'] + if detection_name is None: + continue + annotation = { + 'sample_token': sample_token, + 'translation': sample_annotation['translation'], + 'size': sample_annotation['size'], + 'rotation': sample_annotation['rotation'], + 'name': detection_name, + } + all_annotations.append(annotation) + + return all_annotations + + +def load_lyft_predictions(res_path): + """Load Lyft predictions from json file. + + Args: + res_path (str): Path of result json file recording detections. + + Returns: + list[dict]: List of prediction dictionaries. + """ + predictions = mmengine.load(res_path) + predictions = predictions['results'] + all_preds = [] + for sample_token in predictions.keys(): + all_preds.extend(predictions[sample_token]) + return all_preds + + +def lyft_eval(lyft, data_root, res_path, eval_set, output_dir, logger=None): + """Evaluation API for Lyft dataset. + + Args: + lyft (:obj:`LyftDataset`): Lyft class in the sdk. + data_root (str): Root of data for reading splits. + res_path (str): Path of result json file recording detections. + eval_set (str): Name of the split for evaluation. + output_dir (str): Output directory for output json files. + logger (logging.Logger | str, optional): Logger used for printing + related information during evaluation. Default: None. + + Returns: + dict[str, float]: The evaluation results. + """ + # evaluate by lyft metrics + gts = load_lyft_gts(lyft, data_root, eval_set, logger) + predictions = load_lyft_predictions(res_path) + + class_names = get_class_names(gts) + print('Calculating mAP@0.5:0.95...') + + iou_thresholds = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95] + metrics = {} + average_precisions = \ + get_classwise_aps(gts, predictions, class_names, iou_thresholds) + APs_data = [['IOU', 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]] + + mAPs = np.mean(average_precisions, axis=0) + mAPs_cate = np.mean(average_precisions, axis=1) + final_mAP = np.mean(mAPs) + + metrics['average_precisions'] = average_precisions.tolist() + metrics['mAPs'] = mAPs.tolist() + metrics['Final mAP'] = float(final_mAP) + metrics['class_names'] = class_names + metrics['mAPs_cate'] = mAPs_cate.tolist() + + APs_data = [['class', 'mAP@0.5:0.95']] + for i in range(len(class_names)): + row = [class_names[i], round(mAPs_cate[i], 3)] + APs_data.append(row) + APs_data.append(['Overall', round(final_mAP, 3)]) + APs_table = AsciiTable(APs_data, title='mAPs@0.5:0.95') + APs_table.inner_footing_row_border = True + print_log(APs_table.table, logger=logger) + + res_path = osp.join(output_dir, 'lyft_metrics.json') + mmengine.dump(metrics, res_path) + return metrics + + +def get_classwise_aps(gt, predictions, class_names, iou_thresholds): + """Returns an array with an average precision per class. + + Note: Ground truth and predictions should have the following format. + + .. code-block:: + + gt = [{ + 'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207 + fbb039a550991a5149214f98cec136ac', + 'translation': [974.2811881299899, 1714.6815014457964, + -23.689857123368846], + 'size': [1.796, 4.488, 1.664], + 'rotation': [0.14882026466054782, 0, 0, 0.9888642620837121], + 'name': 'car' + }] + + predictions = [{ + 'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207 + fbb039a550991a5149214f98cec136ac', + 'translation': [971.8343488872263, 1713.6816097857359, + -25.82534357061308], + 'size': [2.519726579986132, 7.810161372666739, 3.483438286096803], + 'rotation': [0.10913582721095375, 0.04099572636992043, + 0.01927712319721745, 1.029328402625659], + 'name': 'car', + 'score': 0.3077029437237213 + }] + + Args: + gt (list[dict]): list of dictionaries in the format described below. + predictions (list[dict]): list of dictionaries in the format + described below. + class_names (list[str]): list of the class names. + iou_thresholds (list[float]): IOU thresholds used to calculate + TP / FN + + Returns: + np.ndarray: an array with an average precision per class. + """ + assert all([0 <= iou_th <= 1 for iou_th in iou_thresholds]) + + gt_by_class_name = group_by_key(gt, 'name') + pred_by_class_name = group_by_key(predictions, 'name') + + average_precisions = np.zeros((len(class_names), len(iou_thresholds))) + + for class_id, class_name in enumerate(class_names): + if class_name in pred_by_class_name: + recalls, precisions, average_precision = get_single_class_aps( + gt_by_class_name[class_name], pred_by_class_name[class_name], + iou_thresholds) + average_precisions[class_id, :] = average_precision + + return average_precisions + + +def get_single_class_aps(gt, predictions, iou_thresholds): + """Compute recall and precision for all iou thresholds. Adapted from + LyftDatasetDevkit. + + Args: + gt (list[dict]): list of dictionaries in the format described above. + predictions (list[dict]): list of dictionaries in the format + described below. + iou_thresholds (list[float]): IOU thresholds used to calculate + TP / FN + + Returns: + tuple[np.ndarray]: Returns (recalls, precisions, average precisions) + for each class. + """ + num_gts = len(gt) + image_gts = group_by_key(gt, 'sample_token') + image_gts = wrap_in_box(image_gts) + + sample_gt_checked = { + sample_token: np.zeros((len(boxes), len(iou_thresholds))) + for sample_token, boxes in image_gts.items() + } + + predictions = sorted(predictions, key=lambda x: x['score'], reverse=True) + + # go down dets and mark TPs and FPs + num_predictions = len(predictions) + tps = np.zeros((num_predictions, len(iou_thresholds))) + fps = np.zeros((num_predictions, len(iou_thresholds))) + + for prediction_index, prediction in enumerate(predictions): + predicted_box = Box3D(**prediction) + + sample_token = prediction['sample_token'] + + max_overlap = -np.inf + jmax = -1 + + if sample_token in image_gts: + gt_boxes = image_gts[sample_token] + # gt_boxes per sample + gt_checked = sample_gt_checked[sample_token] + # gt flags per sample + else: + gt_boxes = [] + gt_checked = None + + if len(gt_boxes) > 0: + overlaps = get_ious(gt_boxes, predicted_box) + + max_overlap = np.max(overlaps) + + jmax = np.argmax(overlaps) + + for i, iou_threshold in enumerate(iou_thresholds): + if max_overlap > iou_threshold: + if gt_checked[jmax, i] == 0: + tps[prediction_index, i] = 1.0 + gt_checked[jmax, i] = 1 + else: + fps[prediction_index, i] = 1.0 + else: + fps[prediction_index, i] = 1.0 + + # compute precision recall + fps = np.cumsum(fps, axis=0) + tps = np.cumsum(tps, axis=0) + + recalls = tps / float(num_gts) + # avoid divide by zero in case the first detection + # matches a difficult ground truth + precisions = tps / np.maximum(tps + fps, np.finfo(np.float64).eps) + + aps = [] + for i in range(len(iou_thresholds)): + recall = recalls[:, i] + precision = precisions[:, i] + assert np.all(0 <= recall) & np.all(recall <= 1) + assert np.all(0 <= precision) & np.all(precision <= 1) + ap = get_ap(recall, precision) + aps.append(ap) + + aps = np.array(aps) + + return recalls, precisions, aps diff --git a/mmdet3d/evaluation/functional/panoptic_seg_eval.py b/mmdet3d/evaluation/functional/panoptic_seg_eval.py new file mode 100755 index 0000000..6029b73 --- /dev/null +++ b/mmdet3d/evaluation/functional/panoptic_seg_eval.py @@ -0,0 +1,387 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Tuple + +import numpy as np +from mmengine.logging import MMLogger, print_log + +PQReturnsType = Tuple[np.double, np.double, np.ndarray, np.ndarray, np.ndarray] + + +class EvalPanoptic: + r"""Evaluate panoptic results for Semantickitti and NuScenes. + Please refer to the `semantic kitti api + `_ for more details + + Args: + classes (list): Classes used in the dataset. + thing_classes (list): Thing classes used in the dataset. + stuff_classes (list): Stuff classes used in the dataset. + min_num_points (int): Minimum number of points of an object to be + counted as ground truth in evaluation. + id_offset (int): Offset for instance ids to concat with + semantic labels. + label2cat (dict[str]): Mapping from label to category. + ignore_index (list[int]): Indices of ignored classes in evaluation. + logger (logging.Logger | str, optional): Logger used for printing. + Defaults to None. + """ + + def __init__(self, + classes: List[str], + thing_classes: List[str], + stuff_classes: List[str], + min_num_points: int, + id_offset: int, + label2cat: Dict[str, str], + ignore_index: List[str], + logger: MMLogger = None): + self.classes = classes + self.thing_classes = thing_classes + self.stuff_classes = stuff_classes + self.ignore_index = np.array(ignore_index, dtype=int) + self.num_classes = len(classes) + self.label2cat = label2cat + self.logger = logger + self.include = np.array( + [n for n in range(self.num_classes) if n not in self.ignore_index], + dtype=int) + self.id_offset = id_offset + self.eps = 1e-15 + self.min_num_points = min_num_points + self.reset() + + def reset(self): + """Reset class variables.""" + # general things + # iou stuff + self.confusion_matrix = np.zeros((self.num_classes, self.num_classes), + dtype=int) + # panoptic stuff + self.pan_tp = np.zeros(self.num_classes, dtype=int) + self.pan_iou = np.zeros(self.num_classes, dtype=np.double) + self.pan_fp = np.zeros(self.num_classes, dtype=int) + self.pan_fn = np.zeros(self.num_classes, dtype=int) + + self.evaluated_fnames = [] + + def evaluate(self, gt_labels: List[Dict[str, np.ndarray]], + seg_preds: List[Dict[str, np.ndarray]]) -> Dict[str, float]: + """Evaluate the predictions. + + Args: + gt_labels (list[dict[np.ndarray]]): Ground Truth. + seg_preds (list[dict[np.ndarray]]): Predictions. + + Returns: + dict[float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + assert len(seg_preds) == len(gt_labels) + for f in range(len(seg_preds)): + gt_semantic_seg = gt_labels[f]['pts_semantic_mask'].astype(int) + gt_instance_seg = gt_labels[f]['pts_instance_mask'].astype(int) + pred_semantic_seg = seg_preds[f]['pts_semantic_mask'].astype(int) + pred_instance_seg = seg_preds[f]['pts_instance_mask'].astype(int) + + self.add_semantic_sample(pred_semantic_seg, gt_semantic_seg) + self.add_panoptic_sample(pred_semantic_seg, gt_semantic_seg, + pred_instance_seg, gt_instance_seg) + + result_dicts = self.print_results() + + return result_dicts + + def print_results(self) -> Dict[str, float]: + """Print results. + + Returns: + dict[float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + pq, sq, rq, all_pq, all_sq, all_rq = self.get_pq() + miou, iou = self.get_iou() + + # now make a nice dictionary + output_dict = {} + + # make python variables + pq = pq.item() + sq = sq.item() + rq = rq.item() + all_pq = all_pq.flatten().tolist() + all_sq = all_sq.flatten().tolist() + all_rq = all_rq.flatten().tolist() + miou = miou.item() + iou = iou.flatten().tolist() + + output_dict['all'] = {} + output_dict['all']['pq'] = pq + output_dict['all']['sq'] = sq + output_dict['all']['rq'] = rq + output_dict['all']['miou'] = miou + for idx, (_pq, _sq, _rq, + _iou) in enumerate(zip(all_pq, all_sq, all_rq, iou)): + class_str = self.classes[idx] + output_dict[class_str] = {} + output_dict[class_str]['pq'] = _pq + output_dict[class_str]['sq'] = _sq + output_dict[class_str]['rq'] = _rq + output_dict[class_str]['miou'] = _iou + + pq_dagger = np.mean( + [float(output_dict[c]['pq']) for c in self.thing_classes] + + [float(output_dict[c]['miou']) for c in self.stuff_classes]) + + pq_things = np.mean( + [float(output_dict[c]['pq']) for c in self.thing_classes]) + rq_things = np.mean( + [float(output_dict[c]['rq']) for c in self.thing_classes]) + sq_things = np.mean( + [float(output_dict[c]['sq']) for c in self.thing_classes]) + + pq_stuff = np.mean( + [float(output_dict[c]['pq']) for c in self.stuff_classes]) + rq_stuff = np.mean( + [float(output_dict[c]['rq']) for c in self.stuff_classes]) + sq_stuff = np.mean( + [float(output_dict[c]['sq']) for c in self.stuff_classes]) + + result_dicts = {} + result_dicts['pq'] = float(pq) + result_dicts['pq_dagger'] = float(pq_dagger) + result_dicts['sq_mean'] = float(sq) + result_dicts['rq_mean'] = float(rq) + result_dicts['miou'] = float(miou) + result_dicts['pq_stuff'] = float(pq_stuff) + result_dicts['rq_stuff'] = float(rq_stuff) + result_dicts['sq_stuff'] = float(sq_stuff) + result_dicts['pq_things'] = float(pq_things) + result_dicts['rq_things'] = float(rq_things) + result_dicts['sq_things'] = float(sq_things) + + if self.logger is not None: + print_log('| | IoU | PQ | RQ | SQ |', + self.logger) + for k, v in output_dict.items(): + print_log( + '|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format( + k.ljust(8)[-8:], v['miou'], v['pq'], v['rq'], v['sq']), + self.logger) + print_log('True Positive: ', self.logger) + print_log('\t|\t'.join([str(x) for x in self.pan_tp]), self.logger) + print_log('False Positive: ') + print_log('\t|\t'.join([str(x) for x in self.pan_fp]), self.logger) + print_log('False Negative: ') + print_log('\t|\t'.join([str(x) for x in self.pan_fn]), self.logger) + + else: + print('| | IoU | PQ | RQ | SQ |') + for k, v in output_dict.items(): + print('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format( + k.ljust(8)[-8:], v['miou'], v['pq'], v['rq'], v['sq'])) + print('True Positive: ') + print('\t|\t'.join([str(x) for x in self.pan_tp])) + print('False Positive: ') + print('\t|\t'.join([str(x) for x in self.pan_fp])) + print('False Negative: ') + print('\t|\t'.join([str(x) for x in self.pan_fn])) + + return result_dicts + + def get_pq(self) -> PQReturnsType: + """Get results of PQ metric. + + Returns: + tuple(np.ndarray): PQ, SQ, RQ of each class and all class. + """ + # get PQ and first calculate for all classes + sq_all = self.pan_iou.astype(np.double) / np.maximum( + self.pan_tp.astype(np.double), self.eps) + rq_all = self.pan_tp.astype(np.double) / np.maximum( + self.pan_tp.astype(np.double) + 0.5 * self.pan_fp.astype(np.double) + + 0.5 * self.pan_fn.astype(np.double), self.eps) + pq_all = sq_all * rq_all + + # then do the REAL mean (no ignored classes) + sq = sq_all[self.include].mean() + rq = rq_all[self.include].mean() + pq = pq_all[self.include].mean() + + return (pq, sq, rq, pq_all, sq_all, rq_all) + + def get_iou(self) -> Tuple[np.double, np.ndarray]: + """Get results of IOU metric. + + Returns: + tuple(np.ndarray): iou of all class and each class. + """ + tp, fp, fn = self.get_iou_stats() + intersection = tp + union = tp + fp + fn + union = np.maximum(union, self.eps) + iou = intersection.astype(np.double) / union.astype(np.double) + iou_mean = (intersection[self.include].astype(np.double) / + union[self.include].astype(np.double)).mean() + + return iou_mean, iou + + def get_iou_stats(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Get IOU statistics of TP, FP and FN. + + Returns: + tuple(np.ndarray): TP, FP, FN of all class. + """ + # copy to avoid modifying the real deal + conf = self.confusion_matrix.copy().astype(np.double) + # remove fp from confusion on the ignore classes predictions + # points that were predicted of another class, but were ignore + # (corresponds to zeroing the cols of those classes, + # since the predictions go on the rows) + conf[:, self.ignore_index] = 0 + + # get the clean stats + tp = conf.diagonal() + fp = conf.sum(axis=1) - tp + fn = conf.sum(axis=0) - tp + return tp, fp, fn + + def add_semantic_sample(self, semantic_preds: np.ndarray, + gt_semantics: np.ndarray): + """Add one batch of semantic predictions and ground truths. + + Args: + semantic_preds (np.ndarray): Semantic predictions. + gt_semantics (np.ndarray): Semantic ground truths. + """ + idxs = np.stack([semantic_preds, gt_semantics], axis=0) + # make confusion matrix (cols = gt, rows = pred) + np.add.at(self.confusion_matrix, tuple(idxs), 1) + + def add_panoptic_sample(self, semantic_preds: np.ndarray, + gt_semantics: np.ndarray, + instance_preds: np.ndarray, + gt_instances: np.ndarray): + """Add one sample of panoptic predictions and ground truths for + evaluation. + + Args: + semantic_preds (np.ndarray): Semantic predictions. + gt_semantics (np.ndarray): Semantic ground truths. + instance_preds (np.ndarray): Instance predictions. + gt_instances (np.ndarray): Instance ground truths. + """ + # avoid zero (ignored label) + instance_preds = instance_preds + 1 + gt_instances = gt_instances + 1 + + # only interested in points that are + # outside the void area (not in excluded classes) + for cl in self.ignore_index: + # make a mask for this class + gt_not_in_excl_mask = gt_semantics != cl + # remove all other points + semantic_preds = semantic_preds[gt_not_in_excl_mask] + gt_semantics = gt_semantics[gt_not_in_excl_mask] + instance_preds = instance_preds[gt_not_in_excl_mask] + gt_instances = gt_instances[gt_not_in_excl_mask] + + # first step is to count intersections > 0.5 IoU + # for each class (except the ignored ones) + for cl in self.include: + # get a class mask + pred_inst_in_cl_mask = semantic_preds == cl + gt_inst_in_cl_mask = gt_semantics == cl + + # get instance points in class (makes outside stuff 0) + pred_inst_in_cl = instance_preds * pred_inst_in_cl_mask.astype(int) + gt_inst_in_cl = gt_instances * gt_inst_in_cl_mask.astype(int) + + # generate the areas for each unique instance prediction + unique_pred, counts_pred = np.unique( + pred_inst_in_cl[pred_inst_in_cl > 0], return_counts=True) + id2idx_pred = {id: idx for idx, id in enumerate(unique_pred)} + matched_pred = np.array([False] * unique_pred.shape[0]) + + # generate the areas for each unique instance gt_np + unique_gt, counts_gt = np.unique( + gt_inst_in_cl[gt_inst_in_cl > 0], return_counts=True) + id2idx_gt = {id: idx for idx, id in enumerate(unique_gt)} + matched_gt = np.array([False] * unique_gt.shape[0]) + + # generate intersection using offset + valid_combos = np.logical_and(pred_inst_in_cl > 0, + gt_inst_in_cl > 0) + id_offset_combo = pred_inst_in_cl[ + valid_combos] + self.id_offset * gt_inst_in_cl[valid_combos] + unique_combo, counts_combo = np.unique( + id_offset_combo, return_counts=True) + + # generate an intersection map + # count the intersections with over 0.5 IoU as TP + gt_labels = unique_combo // self.id_offset + pred_labels = unique_combo % self.id_offset + gt_areas = np.array([counts_gt[id2idx_gt[id]] for id in gt_labels]) + pred_areas = np.array( + [counts_pred[id2idx_pred[id]] for id in pred_labels]) + intersections = counts_combo + unions = gt_areas + pred_areas - intersections + ious = intersections.astype(float) / unions.astype(float) + + tp_indexes = ious > 0.5 + self.pan_tp[cl] += np.sum(tp_indexes) + self.pan_iou[cl] += np.sum(ious[tp_indexes]) + + matched_gt[[id2idx_gt[id] for id in gt_labels[tp_indexes]]] = True + matched_pred[[id2idx_pred[id] + for id in pred_labels[tp_indexes]]] = True + + # count the FN + if len(counts_gt) > 0: + self.pan_fn[cl] += np.sum( + np.logical_and(counts_gt >= self.min_num_points, + ~matched_gt)) + + # count the FP + if len(matched_pred) > 0: + self.pan_fp[cl] += np.sum( + np.logical_and(counts_pred >= self.min_num_points, + ~matched_pred)) + + +def panoptic_seg_eval(gt_labels: List[np.ndarray], + seg_preds: List[np.ndarray], + classes: List[str], + thing_classes: List[str], + stuff_classes: List[str], + min_num_points: int, + id_offset: int, + label2cat: Dict[str, str], + ignore_index: List[int], + logger: MMLogger = None) -> Dict[str, float]: + """Panoptic Segmentation Evaluation. + + Evaluate the result of the panoptic segmentation. + + Args: + gt_labels (list[dict[np.ndarray]]): Ground Truth. + seg_preds (list[dict[np.ndarray]]): Predictions. + classes (list[str]): Classes used in the dataset. + thing_classes (list[str]): Thing classes used in the dataset. + stuff_classes (list[str]): Stuff classes used in the dataset. + min_num_points (int): Minimum point number of object to be + counted as ground truth in evaluation. + id_offset (int): Offset for instance ids to concat with + semantic labels. + label2cat (dict[str]): Mapping from label to category. + ignore_index (list[int]): Indices of ignored classes in evaluation. + logger (logging.Logger | str, optional): Logger used for printing. + Defaults to None. + + Returns: + dict[float]: Dict of results. + """ + panoptic_seg_eval = EvalPanoptic(classes, thing_classes, stuff_classes, + min_num_points, id_offset, label2cat, + ignore_index, logger) + ret_dict = panoptic_seg_eval.evaluate(gt_labels, seg_preds) + return ret_dict diff --git a/mmdet3d/evaluation/functional/scannet_utils/__init__.py b/mmdet3d/evaluation/functional/scannet_utils/__init__.py new file mode 100755 index 0000000..c98ea83 --- /dev/null +++ b/mmdet3d/evaluation/functional/scannet_utils/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .evaluate_semantic_instance import evaluate_matches, scannet_eval + +__all__ = ['scannet_eval', 'evaluate_matches'] diff --git a/mmdet3d/evaluation/functional/scannet_utils/evaluate_semantic_instance.py b/mmdet3d/evaluation/functional/scannet_utils/evaluate_semantic_instance.py new file mode 100755 index 0000000..2b15747 --- /dev/null +++ b/mmdet3d/evaluation/functional/scannet_utils/evaluate_semantic_instance.py @@ -0,0 +1,347 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# adapted from https://github.com/ScanNet/ScanNet/blob/master/BenchmarkScripts/3d_evaluation/evaluate_semantic_instance.py # noqa +from copy import deepcopy + +import numpy as np + +from . import util_3d + + +def evaluate_matches(matches, class_labels, options): + """Evaluate instance segmentation from matched gt and predicted instances + for all scenes. + + Args: + matches (dict): Contains gt2pred and pred2gt infos for every scene. + class_labels (tuple[str]): Class names. + options (dict): ScanNet evaluator options. See get_options. + + Returns: + np.array: Average precision scores for all thresholds and categories. + """ + overlaps = options['overlaps'] + min_region_sizes = [options['min_region_sizes'][0]] + dist_threshes = [options['distance_threshes'][0]] + dist_confs = [options['distance_confs'][0]] + + # results: class x overlap + ap = np.zeros((len(dist_threshes), len(class_labels), len(overlaps)), + np.float) + for di, (min_region_size, distance_thresh, distance_conf) in enumerate( + zip(min_region_sizes, dist_threshes, dist_confs)): + for oi, overlap_th in enumerate(overlaps): + pred_visited = {} + for m in matches: + for label_name in class_labels: + for p in matches[m]['pred'][label_name]: + if 'filename' in p: + pred_visited[p['filename']] = False + for li, label_name in enumerate(class_labels): + y_true = np.empty(0) + y_score = np.empty(0) + hard_false_negatives = 0 + has_gt = False + has_pred = False + for m in matches: + pred_instances = matches[m]['pred'][label_name] + gt_instances = matches[m]['gt'][label_name] + # filter groups in ground truth + gt_instances = [ + gt for gt in gt_instances + if gt['instance_id'] >= 1000 and gt['vert_count'] >= + min_region_size and gt['med_dist'] <= distance_thresh + and gt['dist_conf'] >= distance_conf + ] + if gt_instances: + has_gt = True + if pred_instances: + has_pred = True + + cur_true = np.ones(len(gt_instances)) + cur_score = np.ones(len(gt_instances)) * (-float('inf')) + cur_match = np.zeros(len(gt_instances), dtype=bool) + # collect matches + for (gti, gt) in enumerate(gt_instances): + found_match = False + for pred in gt['matched_pred']: + # greedy assignments + if pred_visited[pred['filename']]: + continue + overlap = float(pred['intersection']) / ( + gt['vert_count'] + pred['vert_count'] - + pred['intersection']) + if overlap > overlap_th: + confidence = pred['confidence'] + # if already have a prediction for this gt, + # the prediction with the lower score is automatically a false positive # noqa + if cur_match[gti]: + max_score = max(cur_score[gti], confidence) + min_score = min(cur_score[gti], confidence) + cur_score[gti] = max_score + # append false positive + cur_true = np.append(cur_true, 0) + cur_score = np.append(cur_score, min_score) + cur_match = np.append(cur_match, True) + # otherwise set score + else: + found_match = True + cur_match[gti] = True + cur_score[gti] = confidence + pred_visited[pred['filename']] = True + if not found_match: + hard_false_negatives += 1 + # remove non-matched ground truth instances + cur_true = cur_true[cur_match] + cur_score = cur_score[cur_match] + + # collect non-matched predictions as false positive + for pred in pred_instances: + found_gt = False + for gt in pred['matched_gt']: + overlap = float(gt['intersection']) / ( + gt['vert_count'] + pred['vert_count'] - + gt['intersection']) + if overlap > overlap_th: + found_gt = True + break + if not found_gt: + num_ignore = pred['void_intersection'] + for gt in pred['matched_gt']: + # group? + if gt['instance_id'] < 1000: + num_ignore += gt['intersection'] + # small ground truth instances + if gt['vert_count'] < min_region_size or gt[ + 'med_dist'] > distance_thresh or gt[ + 'dist_conf'] < distance_conf: + num_ignore += gt['intersection'] + proportion_ignore = float( + num_ignore) / pred['vert_count'] + # if not ignored append false positive + if proportion_ignore <= overlap_th: + cur_true = np.append(cur_true, 0) + confidence = pred['confidence'] + cur_score = np.append(cur_score, confidence) + + # append to overall results + y_true = np.append(y_true, cur_true) + y_score = np.append(y_score, cur_score) + + # compute average precision + if has_gt and has_pred: + # compute precision recall curve first + + # sorting and cumsum + score_arg_sort = np.argsort(y_score) + y_score_sorted = y_score[score_arg_sort] + y_true_sorted = y_true[score_arg_sort] + y_true_sorted_cumsum = np.cumsum(y_true_sorted) + + # unique thresholds + (thresholds, unique_indices) = np.unique( + y_score_sorted, return_index=True) + num_prec_recall = len(unique_indices) + 1 + + # prepare precision recall + num_examples = len(y_score_sorted) + # follow https://github.com/ScanNet/ScanNet/pull/26 ? # noqa + num_true_examples = y_true_sorted_cumsum[-1] if len( + y_true_sorted_cumsum) > 0 else 0 + precision = np.zeros(num_prec_recall) + recall = np.zeros(num_prec_recall) + + # deal with the first point + y_true_sorted_cumsum = np.append(y_true_sorted_cumsum, 0) + # deal with remaining + for idx_res, idx_scores in enumerate(unique_indices): + cumsum = y_true_sorted_cumsum[idx_scores - 1] + tp = num_true_examples - cumsum + fp = num_examples - idx_scores - tp + fn = cumsum + hard_false_negatives + p = float(tp) / (tp + fp) + r = float(tp) / (tp + fn) + precision[idx_res] = p + recall[idx_res] = r + + # first point in curve is artificial + precision[-1] = 1. + recall[-1] = 0. + + # compute average of precision-recall curve + recall_for_conv = np.copy(recall) + recall_for_conv = np.append(recall_for_conv[0], + recall_for_conv) + recall_for_conv = np.append(recall_for_conv, 0.) + + stepWidths = np.convolve(recall_for_conv, [-0.5, 0, 0.5], + 'valid') + # integrate is now simply a dot product + ap_current = np.dot(precision, stepWidths) + + elif has_gt: + ap_current = 0.0 + else: + ap_current = float('nan') + ap[di, li, oi] = ap_current + return ap + + +def compute_averages(aps, options, class_labels): + """Averages AP scores for all categories. + + Args: + aps (np.array): AP scores for all thresholds and categories. + options (dict): ScanNet evaluator options. See get_options. + class_labels (tuple[str]): Class names. + + Returns: + dict: Overall and per-category AP scores. + """ + d_inf = 0 + o50 = np.where(np.isclose(options['overlaps'], 0.5)) + o25 = np.where(np.isclose(options['overlaps'], 0.25)) + o_all_but25 = np.where( + np.logical_not(np.isclose(options['overlaps'], 0.25))) + avg_dict = {} + avg_dict['all_ap'] = np.nanmean(aps[d_inf, :, o_all_but25]) + avg_dict['all_ap_50%'] = np.nanmean(aps[d_inf, :, o50]) + avg_dict['all_ap_25%'] = np.nanmean(aps[d_inf, :, o25]) + avg_dict['classes'] = {} + for (li, label_name) in enumerate(class_labels): + avg_dict['classes'][label_name] = {} + avg_dict['classes'][label_name]['ap'] = np.average(aps[d_inf, li, + o_all_but25]) + avg_dict['classes'][label_name]['ap50%'] = np.average(aps[d_inf, li, + o50]) + avg_dict['classes'][label_name]['ap25%'] = np.average(aps[d_inf, li, + o25]) + return avg_dict + + +def assign_instances_for_scan(pred_info, gt_ids, options, valid_class_ids, + class_labels, id_to_label): + """Assign gt and predicted instances for a single scene. + + Args: + pred_info (dict): Predicted masks, labels and scores. + gt_ids (np.array): Ground truth instance masks. + options (dict): ScanNet evaluator options. See get_options. + valid_class_ids (tuple[int]): Ids of valid categories. + class_labels (tuple[str]): Class names. + id_to_label (dict[int, str]): Mapping of valid class id to class label. + + Returns: + dict: Per class assigned gt to predicted instances. + dict: Per class assigned predicted to gt instances. + """ + # get gt instances + gt_instances = util_3d.get_instances(gt_ids, valid_class_ids, class_labels, + id_to_label) + # associate + gt2pred = deepcopy(gt_instances) + for label in gt2pred: + for gt in gt2pred[label]: + gt['matched_pred'] = [] + pred2gt = {} + for label in class_labels: + pred2gt[label] = [] + num_pred_instances = 0 + # mask of void labels in the ground truth + bool_void = np.logical_not(np.in1d(gt_ids // 1000, valid_class_ids)) + # go through all prediction masks + for pred_mask_file in pred_info: + label_id = int(pred_info[pred_mask_file]['label_id']) + conf = pred_info[pred_mask_file]['conf'] + if not label_id in id_to_label: # noqa E713 + continue + label_name = id_to_label[label_id] + # read the mask + pred_mask = pred_info[pred_mask_file]['mask'] + if len(pred_mask) != len(gt_ids): + raise ValueError('len(pred_mask) != len(gt_ids)') + # convert to binary + pred_mask = np.not_equal(pred_mask, 0) + num = np.count_nonzero(pred_mask) + if num < options['min_region_sizes'][0]: + continue # skip if empty + + pred_instance = {} + pred_instance['filename'] = pred_mask_file + pred_instance['pred_id'] = num_pred_instances + pred_instance['label_id'] = label_id + pred_instance['vert_count'] = num + pred_instance['confidence'] = conf + pred_instance['void_intersection'] = np.count_nonzero( + np.logical_and(bool_void, pred_mask)) + + # matched gt instances + matched_gt = [] + # go through all gt instances with matching label + for (gt_num, gt_inst) in enumerate(gt2pred[label_name]): + intersection = np.count_nonzero( + np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask)) + if intersection > 0: + gt_copy = gt_inst.copy() + pred_copy = pred_instance.copy() + gt_copy['intersection'] = intersection + pred_copy['intersection'] = intersection + matched_gt.append(gt_copy) + gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy) + pred_instance['matched_gt'] = matched_gt + num_pred_instances += 1 + pred2gt[label_name].append(pred_instance) + + return gt2pred, pred2gt + + +def scannet_eval(preds, gts, options, valid_class_ids, class_labels, + id_to_label): + """Evaluate instance segmentation in ScanNet protocol. + + Args: + preds (list[dict]): Per scene predictions of mask, label and + confidence. + gts (list[np.array]): Per scene ground truth instance masks. + options (dict): ScanNet evaluator options. See get_options. + valid_class_ids (tuple[int]): Ids of valid categories. + class_labels (tuple[str]): Class names. + id_to_label (dict[int, str]): Mapping of valid class id to class label. + + Returns: + dict: Overall and per-category AP scores. + """ + options = get_options(options) + matches = {} + for i, (pred, gt) in enumerate(zip(preds, gts)): + matches_key = i + # assign gt to predictions + gt2pred, pred2gt = assign_instances_for_scan(pred, gt, options, + valid_class_ids, + class_labels, id_to_label) + matches[matches_key] = {} + matches[matches_key]['gt'] = gt2pred + matches[matches_key]['pred'] = pred2gt + + ap_scores = evaluate_matches(matches, class_labels, options) + avgs = compute_averages(ap_scores, options, class_labels) + return avgs + + +def get_options(options=None): + """Set ScanNet evaluator options. + + Args: + options (dict, optional): Not default options. Default: None. + + Returns: + dict: Updated options with all 4 keys. + """ + assert options is None or isinstance(options, dict) + _options = dict( + overlaps=np.append(np.arange(0.5, 0.95, 0.05), 0.25), + min_region_sizes=np.array([100]), + distance_threshes=np.array([float('inf')]), + distance_confs=np.array([-float('inf')])) + if options is not None: + _options.update(options) + return _options diff --git a/mmdet3d/evaluation/functional/scannet_utils/util_3d.py b/mmdet3d/evaluation/functional/scannet_utils/util_3d.py new file mode 100755 index 0000000..527d341 --- /dev/null +++ b/mmdet3d/evaluation/functional/scannet_utils/util_3d.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# adapted from https://github.com/ScanNet/ScanNet/blob/master/BenchmarkScripts/util_3d.py # noqa +import json + +import numpy as np + + +class Instance: + """Single instance for ScanNet evaluator. + + Args: + mesh_vert_instances (np.array): Instance ids for each point. + instance_id: Id of single instance. + """ + instance_id = 0 + label_id = 0 + vert_count = 0 + med_dist = -1 + dist_conf = 0.0 + + def __init__(self, mesh_vert_instances, instance_id): + if instance_id == -1: + return + self.instance_id = int(instance_id) + self.label_id = int(self.get_label_id(instance_id)) + self.vert_count = int( + self.get_instance_verts(mesh_vert_instances, instance_id)) + + @staticmethod + def get_label_id(instance_id): + return int(instance_id // 1000) + + @staticmethod + def get_instance_verts(mesh_vert_instances, instance_id): + return (mesh_vert_instances == instance_id).sum() + + def to_json(self): + return json.dumps( + self, default=lambda o: o.__dict__, sort_keys=True, indent=4) + + def to_dict(self): + dict = {} + dict['instance_id'] = self.instance_id + dict['label_id'] = self.label_id + dict['vert_count'] = self.vert_count + dict['med_dist'] = self.med_dist + dict['dist_conf'] = self.dist_conf + return dict + + def from_json(self, data): + self.instance_id = int(data['instance_id']) + self.label_id = int(data['label_id']) + self.vert_count = int(data['vert_count']) + if 'med_dist' in data: + self.med_dist = float(data['med_dist']) + self.dist_conf = float(data['dist_conf']) + + def __str__(self): + return '(' + str(self.instance_id) + ')' + + +def get_instances(ids, class_ids, class_labels, id2label): + """Transform gt instance mask to Instance objects. + + Args: + ids (np.array): Instance ids for each point. + class_ids: (tuple[int]): Ids of valid categories. + class_labels (tuple[str]): Class names. + id2label: (dict[int, str]): Mapping of valid class id to class label. + + Returns: + dict [str, list]: Instance objects grouped by class label. + """ + instances = {} + for label in class_labels: + instances[label] = [] + instance_ids = np.unique(ids) + for id in instance_ids: + if id == 0: + continue + inst = Instance(ids, id) + if inst.label_id in class_ids: + instances[id2label[inst.label_id]].append(inst.to_dict()) + return instances diff --git a/mmdet3d/evaluation/functional/seg_eval.py b/mmdet3d/evaluation/functional/seg_eval.py new file mode 100755 index 0000000..b78df12 --- /dev/null +++ b/mmdet3d/evaluation/functional/seg_eval.py @@ -0,0 +1,134 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmengine.logging import print_log +from terminaltables import AsciiTable + + +def fast_hist(preds, labels, num_classes): + """Compute the confusion matrix for every batch. + + Args: + preds (np.ndarray): Prediction labels of points with shape of + (num_points, ). + labels (np.ndarray): Ground truth labels of points with shape of + (num_points, ). + num_classes (int): number of classes + + Returns: + np.ndarray: Calculated confusion matrix. + """ + + k = (labels >= 0) & (labels < num_classes) + bin_count = np.bincount( + num_classes * labels[k].astype(int) + preds[k], + minlength=num_classes**2) + return bin_count[:num_classes**2].reshape(num_classes, num_classes) + + +def per_class_iou(hist): + """Compute the per class iou. + + Args: + hist(np.ndarray): Overall confusion martix + (num_classes, num_classes ). + + Returns: + np.ndarray: Calculated per class iou + """ + + return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) + + +def get_acc(hist): + """Compute the overall accuracy. + + Args: + hist(np.ndarray): Overall confusion martix + (num_classes, num_classes ). + + Returns: + float: Calculated overall acc + """ + + return np.diag(hist).sum() / hist.sum() + + +def get_acc_cls(hist): + """Compute the class average accuracy. + + Args: + hist(np.ndarray): Overall confusion martix + (num_classes, num_classes ). + + Returns: + float: Calculated class average acc + """ + + return np.nanmean(np.diag(hist) / hist.sum(axis=1)) + + +def seg_eval(gt_labels, seg_preds, label2cat, ignore_index, logger=None): + """Semantic Segmentation Evaluation. + + Evaluate the result of the Semantic Segmentation. + + Args: + gt_labels (list[torch.Tensor]): Ground truth labels. + seg_preds (list[torch.Tensor]): Predictions. + label2cat (dict): Map from label to category name. + ignore_index (int): Index that will be ignored in evaluation. + logger (logging.Logger | str, optional): The way to print the mAP + summary. See `mmdet.utils.print_log()` for details. Default: None. + + Returns: + dict[str, float]: Dict of results. + """ + assert len(seg_preds) == len(gt_labels) + num_classes = len(label2cat) + + hist_list = [] + for i in range(len(gt_labels)): + gt_seg = gt_labels[i].astype(np.int64) + pred_seg = seg_preds[i].astype(np.int64) + + # filter out ignored points + pred_seg[gt_seg == ignore_index] = -1 + gt_seg[gt_seg == ignore_index] = -1 + + # calculate one instance result + hist_list.append(fast_hist(pred_seg, gt_seg, num_classes)) + + iou = per_class_iou(sum(hist_list)) + # if ignore_index is in iou, replace it with nan + if ignore_index < len(iou): + iou[ignore_index] = np.nan + miou = np.nanmean(iou) + acc = get_acc(sum(hist_list)) + acc_cls = get_acc_cls(sum(hist_list)) + + header = ['classes'] + for i in range(len(label2cat)): + header.append(label2cat[i]) + header.extend(['miou', 'acc', 'acc_cls']) + + ret_dict = dict() + table_columns = [['results']] + for i in range(len(label2cat)): + ret_dict[label2cat[i]] = float(iou[i]) + table_columns.append([f'{iou[i]:.4f}']) + ret_dict['miou'] = float(miou) + ret_dict['acc'] = float(acc) + ret_dict['acc_cls'] = float(acc_cls) + + table_columns.append([f'{miou:.4f}']) + table_columns.append([f'{acc:.4f}']) + table_columns.append([f'{acc_cls:.4f}']) + + table_data = [header] + table_rows = list(zip(*table_columns)) + table_data += table_rows + table = AsciiTable(table_data) + table.inner_footing_row_border = True + print_log('\n' + table.table, logger=logger) + + return ret_dict diff --git a/mmdet3d/evaluation/functional/waymo_utils/__init__.py b/mmdet3d/evaluation/functional/waymo_utils/__init__.py new file mode 100755 index 0000000..722fdc4 --- /dev/null +++ b/mmdet3d/evaluation/functional/waymo_utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from .prediction_to_waymo import Prediction2Waymo + +__all__ = ['Prediction2Waymo'] diff --git a/mmdet3d/evaluation/functional/waymo_utils/prediction_to_waymo.py b/mmdet3d/evaluation/functional/waymo_utils/prediction_to_waymo.py new file mode 100755 index 0000000..b9da804 --- /dev/null +++ b/mmdet3d/evaluation/functional/waymo_utils/prediction_to_waymo.py @@ -0,0 +1,419 @@ +# Copyright (c) OpenMMLab. All rights reserved. +r"""Adapted from `Waymo to KITTI converter + `_. +""" + +try: + from waymo_open_dataset import dataset_pb2 as open_dataset + from waymo_open_dataset import label_pb2 + from waymo_open_dataset.protos import metrics_pb2 + from waymo_open_dataset.protos.metrics_pb2 import Objects +except ImportError: + Objects = None + raise ImportError( + 'Please run "pip install waymo-open-dataset-tf-2-1-0==1.2.0" ' + 'to install the official devkit first.') + +from glob import glob +from os.path import join +from typing import List, Optional + +import mmengine +import numpy as np +import tensorflow as tf + + +class Prediction2Waymo(object): + """Predictions to Waymo converter. The format of prediction results could + be original format or kitti-format. + + This class serves as the converter to change predictions from KITTI to + Waymo format. + + Args: + results (list[dict]): Prediction results. + waymo_tfrecords_dir (str): Directory to load waymo raw data. + waymo_results_save_dir (str): Directory to save converted predictions + in waymo format (.bin files). + waymo_results_final_path (str): Path to save combined + predictions in waymo format (.bin file), like 'a/b/c.bin'. + prefix (str): Prefix of filename. In general, 0 for training, 1 for + validation and 2 for testing. + classes (dict): A list of class name. + workers (str): Number of parallel processes. Defaults to 2. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + from_kitti_format (bool, optional): Whether the reuslts are kitti + format. Defaults to False. + idx2metainfo (Optional[dict], optional): The mapping from sample_idx to + metainfo. The metainfo must contain the keys: 'idx2contextname' and + 'idx2timestamp'. Defaults to None. + """ + + def __init__(self, + results: List[dict], + waymo_tfrecords_dir: str, + waymo_results_save_dir: str, + waymo_results_final_path: str, + prefix: str, + classes: dict, + workers: int = 2, + backend_args: Optional[dict] = None, + from_kitti_format: bool = False, + idx2metainfo: Optional[dict] = None): + + self.results = results + self.waymo_tfrecords_dir = waymo_tfrecords_dir + self.waymo_results_save_dir = waymo_results_save_dir + self.waymo_results_final_path = waymo_results_final_path + self.prefix = prefix + self.classes = classes + self.workers = int(workers) + self.backend_args = backend_args + self.from_kitti_format = from_kitti_format + if idx2metainfo is not None: + self.idx2metainfo = idx2metainfo + # If ``fast_eval``, the metainfo does not need to be read from + # original data online. It's preprocessed offline. + self.fast_eval = True + else: + self.fast_eval = False + + self.name2idx = {} + + self.k2w_cls_map = { + 'Car': label_pb2.Label.TYPE_VEHICLE, + 'Pedestrian': label_pb2.Label.TYPE_PEDESTRIAN, + 'Sign': label_pb2.Label.TYPE_SIGN, + 'Cyclist': label_pb2.Label.TYPE_CYCLIST, + } + + if self.from_kitti_format: + self.T_ref_to_front_cam = np.array([[0.0, 0.0, 1.0, 0.0], + [-1.0, 0.0, 0.0, 0.0], + [0.0, -1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0]]) + # ``sample_idx`` of the sample in kitti-format is an array + for idx, result in enumerate(results): + if len(result['sample_idx']) > 0: + self.name2idx[str(result['sample_idx'][0])] = idx + else: + # ``sample_idx`` of the sample in the original prediction + # is an int value. + for idx, result in enumerate(results): + self.name2idx[str(result['sample_idx'])] = idx + + if not self.fast_eval: + # need to read original '.tfrecord' file + self.get_file_names() + # turn on eager execution for older tensorflow versions + if int(tf.__version__.split('.')[0]) < 2: + tf.enable_eager_execution() + + self.create_folder() + + def get_file_names(self): + """Get file names of waymo raw data.""" + if 'path_mapping' in self.backend_args: + for path in self.backend_args['path_mapping'].keys(): + if path in self.waymo_tfrecords_dir: + self.waymo_tfrecords_dir = \ + self.waymo_tfrecords_dir.replace( + path, self.backend_args['path_mapping'][path]) + from petrel_client.client import Client + client = Client() + contents = client.list(self.waymo_tfrecords_dir) + self.waymo_tfrecord_pathnames = list() + for content in sorted(list(contents)): + if content.endswith('tfrecord'): + self.waymo_tfrecord_pathnames.append( + join(self.waymo_tfrecords_dir, content)) + else: + self.waymo_tfrecord_pathnames = sorted( + glob(join(self.waymo_tfrecords_dir, '*.tfrecord'))) + print(len(self.waymo_tfrecord_pathnames), 'tfrecords found.') + + def create_folder(self): + """Create folder for data conversion.""" + mmengine.mkdir_or_exist(self.waymo_results_save_dir) + + def parse_objects(self, kitti_result, T_k2w, context_name, + frame_timestamp_micros): + """Parse one prediction with several instances in kitti format and + convert them to `Object` proto. + + Args: + kitti_result (dict): Predictions in kitti format. + + - name (np.ndarray): Class labels of predictions. + - dimensions (np.ndarray): Height, width, length of boxes. + - location (np.ndarray): Bottom center of boxes (x, y, z). + - rotation_y (np.ndarray): Orientation of boxes. + - score (np.ndarray): Scores of predictions. + T_k2w (np.ndarray): Transformation matrix from kitti to waymo. + context_name (str): Context name of the frame. + frame_timestamp_micros (int): Frame timestamp. + + Returns: + :obj:`Object`: Predictions in waymo dataset Object proto. + """ + + def parse_one_object(instance_idx): + """Parse one instance in kitti format and convert them to `Object` + proto. + + Args: + instance_idx (int): Index of the instance to be converted. + + Returns: + :obj:`Object`: Predicted instance in waymo dataset + Object proto. + """ + cls = kitti_result['name'][instance_idx] + length = round(kitti_result['dimensions'][instance_idx, 0], 4) + height = round(kitti_result['dimensions'][instance_idx, 1], 4) + width = round(kitti_result['dimensions'][instance_idx, 2], 4) + x = round(kitti_result['location'][instance_idx, 0], 4) + y = round(kitti_result['location'][instance_idx, 1], 4) + z = round(kitti_result['location'][instance_idx, 2], 4) + rotation_y = round(kitti_result['rotation_y'][instance_idx], 4) + score = round(kitti_result['score'][instance_idx], 4) + + # y: downwards; move box origin from bottom center (kitti) to + # true center (waymo) + y -= height / 2 + # frame transformation: kitti -> waymo + x, y, z = self.transform(T_k2w, x, y, z) + + # different conventions + heading = -(rotation_y + np.pi / 2) + while heading < -np.pi: + heading += 2 * np.pi + while heading > np.pi: + heading -= 2 * np.pi + + box = label_pb2.Label.Box() + box.center_x = x + box.center_y = y + box.center_z = z + box.length = length + box.width = width + box.height = height + box.heading = heading + + o = metrics_pb2.Object() + o.object.box.CopyFrom(box) + o.object.type = self.k2w_cls_map[cls] + o.score = score + + o.context_name = context_name + o.frame_timestamp_micros = frame_timestamp_micros + + return o + + objects = metrics_pb2.Objects() + + for instance_idx in range(len(kitti_result['name'])): + o = parse_one_object(instance_idx) + objects.objects.append(o) + + return objects + + def convert_one(self, file_idx): + """Convert action for single file. + + Args: + file_idx (int): Index of the file to be converted. + """ + file_pathname = self.waymo_tfrecord_pathnames[file_idx] + if 's3://' in file_pathname and tf.__version__ >= '2.6.0': + try: + import tensorflow_io as tfio # noqa: F401 + except ImportError: + raise ImportError( + "Please run 'pip install tensorflow-io' to install tensorflow_io first." # noqa: E501 + ) + file_data = tf.data.TFRecordDataset(file_pathname, compression_type='') + + for frame_num, frame_data in enumerate(file_data): + frame = open_dataset.Frame() + frame.ParseFromString(bytearray(frame_data.numpy())) + + filename = f'{self.prefix}{file_idx:03d}{frame_num:03d}' + + context_name = frame.context.name + frame_timestamp_micros = frame.timestamp_micros + + if filename in self.name2idx: + if self.from_kitti_format: + for camera in frame.context.camera_calibrations: + # FRONT = 1, see dataset.proto for details + if camera.name == 1: + T_front_cam_to_vehicle = np.array( + camera.extrinsic.transform).reshape(4, 4) + + T_k2w = T_front_cam_to_vehicle @ self.T_ref_to_front_cam + + kitti_result = \ + self.results[self.name2idx[filename]] + objects = self.parse_objects(kitti_result, T_k2w, + context_name, + frame_timestamp_micros) + else: + index = self.name2idx[filename] + objects = self.parse_objects_from_origin( + self.results[index], context_name, + frame_timestamp_micros) + + else: + print(filename, 'not found.') + objects = metrics_pb2.Objects() + + with open( + join(self.waymo_results_save_dir, f'{filename}.bin'), + 'wb') as f: + f.write(objects.SerializeToString()) + + def convert_one_fast(self, res_index: int): + """Convert action for single file. It read the metainfo from the + preprocessed file offline and will be faster. + + Args: + res_index (int): The indices of the results. + """ + sample_idx = self.results[res_index]['sample_idx'] + if len(self.results[res_index]['pred_instances_3d']) > 0: + objects = self.parse_objects_from_origin( + self.results[res_index], + self.idx2metainfo[str(sample_idx)]['contextname'], + self.idx2metainfo[str(sample_idx)]['timestamp']) + else: + print(sample_idx, 'not found.') + objects = metrics_pb2.Objects() + + with open( + join(self.waymo_results_save_dir, f'{sample_idx}.bin'), + 'wb') as f: + f.write(objects.SerializeToString()) + + def parse_objects_from_origin(self, result: dict, contextname: str, + timestamp: str) -> Objects: + """Parse obejcts from the original prediction results. + + Args: + result (dict): The original prediction results. + contextname (str): The ``contextname`` of sample in waymo. + timestamp (str): The ``timestamp`` of sample in waymo. + + Returns: + metrics_pb2.Objects: The parsed object. + """ + lidar_boxes = result['pred_instances_3d']['bboxes_3d'].tensor + scores = result['pred_instances_3d']['scores_3d'] + labels = result['pred_instances_3d']['labels_3d'] + + def parse_one_object(index): + class_name = self.classes[labels[index].item()] + + box = label_pb2.Label.Box() + height = lidar_boxes[index][5].item() + heading = lidar_boxes[index][6].item() + + while heading < -np.pi: + heading += 2 * np.pi + while heading > np.pi: + heading -= 2 * np.pi + + box.center_x = lidar_boxes[index][0].item() + box.center_y = lidar_boxes[index][1].item() + box.center_z = lidar_boxes[index][2].item() + height / 2 + box.length = lidar_boxes[index][3].item() + box.width = lidar_boxes[index][4].item() + box.height = height + box.heading = heading + + o = metrics_pb2.Object() + o.object.box.CopyFrom(box) + o.object.type = self.k2w_cls_map[class_name] + o.score = scores[index].item() + o.context_name = contextname + o.frame_timestamp_micros = timestamp + + return o + + objects = metrics_pb2.Objects() + for i in range(len(lidar_boxes)): + objects.objects.append(parse_one_object(i)) + + return objects + + def convert(self): + """Convert action.""" + print('Start converting ...') + convert_func = self.convert_one_fast if self.fast_eval else \ + self.convert_one + + # from torch.multiprocessing import set_sharing_strategy + # # Force using "file_system" sharing strategy for stability + # set_sharing_strategy("file_system") + + # mmengine.track_parallel_progress(convert_func, range(len(self)), + # self.workers) + + # TODO: Support multiprocessing. Now, multiprocessing evaluation will + # cause shared memory error in torch-1.10 and torch-1.11. Details can + # be seen in https://github.com/pytorch/pytorch/issues/67864. + prog_bar = mmengine.ProgressBar(len(self)) + for i in range(len(self)): + convert_func(i) + prog_bar.update() + + print('\nFinished ...') + + # combine all files into one .bin + pathnames = sorted(glob(join(self.waymo_results_save_dir, '*.bin'))) + combined = self.combine(pathnames) + + with open(self.waymo_results_final_path, 'wb') as f: + f.write(combined.SerializeToString()) + + def __len__(self): + """Length of the filename list.""" + return len(self.results) if self.fast_eval else len( + self.waymo_tfrecord_pathnames) + + def transform(self, T, x, y, z): + """Transform the coordinates with matrix T. + + Args: + T (np.ndarray): Transformation matrix. + x(float): Coordinate in x axis. + y(float): Coordinate in y axis. + z(float): Coordinate in z axis. + + Returns: + list: Coordinates after transformation. + """ + pt_bef = np.array([x, y, z, 1.0]).reshape(4, 1) + pt_aft = np.matmul(T, pt_bef) + return pt_aft[:3].flatten().tolist() + + def combine(self, pathnames): + """Combine predictions in waymo format for each sample together. + + Args: + pathnames (str): Paths to save predictions. + + Returns: + :obj:`Objects`: Combined predictions in Objects proto. + """ + combined = metrics_pb2.Objects() + + for pathname in pathnames: + objects = metrics_pb2.Objects() + with open(pathname, 'rb') as f: + objects.ParseFromString(f.read()) + for o in objects.objects: + combined.objects.append(o) + + return combined diff --git a/mmdet3d/evaluation/metrics/__init__.py b/mmdet3d/evaluation/metrics/__init__.py new file mode 100755 index 0000000..b4dae15 --- /dev/null +++ b/mmdet3d/evaluation/metrics/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .indoor_metric import IndoorMetric # noqa: F401,F403 +from .instance_seg_metric import InstanceSegMetric # noqa: F401,F403 +from .kitti_metric import KittiMetric # noqa: F401,F403 +from .lyft_metric import LyftMetric # noqa: F401,F403 +from .nuscenes_metric import NuScenesMetric # noqa: F401,F403 +from .panoptic_seg_metric import PanopticSegMetric # noqa: F401,F403 +from .seg_metric import SegMetric # noqa: F401,F403 +from .waymo_metric import WaymoMetric # noqa: F401,F403 + +__all__ = [ + 'KittiMetric', 'NuScenesMetric', 'IndoorMetric', 'LyftMetric', 'SegMetric', + 'InstanceSegMetric', 'WaymoMetric', 'PanopticSegMetric' +] diff --git a/mmdet3d/evaluation/metrics/indoor_metric.py b/mmdet3d/evaluation/metrics/indoor_metric.py new file mode 100755 index 0000000..987e641 --- /dev/null +++ b/mmdet3d/evaluation/metrics/indoor_metric.py @@ -0,0 +1,169 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict +from typing import Dict, List, Optional, Sequence, Union + +import numpy as np +from mmdet.evaluation import eval_map +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmdet3d.evaluation import indoor_eval +from mmdet3d.registry import METRICS +from mmdet3d.structures import get_box_type + + +@METRICS.register_module() +class IndoorMetric(BaseMetric): + """Indoor scene evaluation metric. + + Args: + iou_thr (float or List[float]): List of iou threshold when calculate + the metric. Defaults to [0.25, 0.5]. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix will + be used instead. Defaults to None. + """ + + def __init__(self, + iou_thr: List[float] = [0.25, 0.5], + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super(IndoorMetric, self).__init__( + prefix=prefix, collect_device=collect_device) + self.iou_thr = [iou_thr] if isinstance(iou_thr, float) else iou_thr + + def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. + + The processed results should be stored in ``self.results``, which will + be used to compute the metrics when all batches have been processed. + + Args: + data_batch (dict): A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + pred_3d = data_sample['pred_instances_3d'] + eval_ann_info = data_sample['eval_ann_info'] + cpu_pred_3d = dict() + for k, v in pred_3d.items(): + if hasattr(v, 'to'): + cpu_pred_3d[k] = v.to('cpu') + else: + cpu_pred_3d[k] = v + self.results.append((eval_ann_info, cpu_pred_3d)) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + ann_infos = [] + pred_results = [] + + for eval_ann, sinlge_pred_results in results: + ann_infos.append(eval_ann) + pred_results.append(sinlge_pred_results) + + # some checkpoints may not record the key "box_type_3d" + box_type_3d, box_mode_3d = get_box_type( + self.dataset_meta.get('box_type_3d', 'depth')) + + ret_dict = indoor_eval( + ann_infos, + pred_results, + self.iou_thr, + self.dataset_meta['classes'], + logger=logger, + box_mode_3d=box_mode_3d) + + return ret_dict + + +@METRICS.register_module() +class Indoor2DMetric(BaseMetric): + """indoor 2d predictions evaluation metric. + + Args: + iou_thr (float or List[float]): List of iou threshold when calculate + the metric. Defaults to [0.5]. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix will + be used instead. Defaults to None. + """ + + def __init__(self, + iou_thr: Union[float, List[float]] = [0.5], + collect_device: str = 'cpu', + prefix: Optional[str] = None): + super(Indoor2DMetric, self).__init__( + prefix=prefix, collect_device=collect_device) + self.iou_thr = [iou_thr] if isinstance(iou_thr, float) else iou_thr + + def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. + + The processed results should be stored in ``self.results``, which will + be used to compute the metrics when all batches have been processed. + + Args: + data_batch (dict): A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + pred = data_sample['pred_instances'] + eval_ann_info = data_sample['eval_ann_info'] + ann = dict( + labels=eval_ann_info['gt_bboxes_labels'], + bboxes=eval_ann_info['gt_bboxes']) + + pred_bboxes = pred['bboxes'].cpu().numpy() + pred_scores = pred['scores'].cpu().numpy() + pred_labels = pred['labels'].cpu().numpy() + + dets = [] + for label in range(len(self.dataset_meta['classes'])): + index = np.where(pred_labels == label)[0] + pred_bbox_scores = np.hstack( + [pred_bboxes[index], pred_scores[index].reshape((-1, 1))]) + dets.append(pred_bbox_scores) + + self.results.append((ann, dets)) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + annotations, preds = zip(*results) + eval_results = OrderedDict() + for iou_thr_2d_single in self.iou_thr: + mean_ap, _ = eval_map( + preds, + annotations, + scale_ranges=None, + iou_thr=iou_thr_2d_single, + dataset=self.dataset_meta['classes'], + logger=logger) + eval_results['mAP_' + str(iou_thr_2d_single)] = mean_ap + return eval_results diff --git a/mmdet3d/evaluation/metrics/instance_seg_metric.py b/mmdet3d/evaluation/metrics/instance_seg_metric.py new file mode 100755 index 0000000..95b1e51 --- /dev/null +++ b/mmdet3d/evaluation/metrics/instance_seg_metric.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional, Sequence + +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmdet3d.evaluation import instance_seg_eval +from mmdet3d.registry import METRICS + + +@METRICS.register_module() +class InstanceSegMetric(BaseMetric): + """3D instance segmentation evaluation metric. + + Args: + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix will + be used instead. Defaults to None. + """ + + def __init__(self, + collect_device: str = 'cpu', + prefix: Optional[str] = None): + super(InstanceSegMetric, self).__init__( + prefix=prefix, collect_device=collect_device) + + def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. + + The processed results should be stored in ``self.results``, which will + be used to compute the metrics when all batches have been processed. + + Args: + data_batch (dict): A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + pred_3d = data_sample['pred_pts_seg'] + eval_ann_info = data_sample['eval_ann_info'] + cpu_pred_3d = dict() + for k, v in pred_3d.items(): + if hasattr(v, 'to'): + cpu_pred_3d[k] = v.to('cpu') + else: + cpu_pred_3d[k] = v + self.results.append((eval_ann_info, cpu_pred_3d)) + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + self.classes = self.dataset_meta['classes'] + self.valid_class_ids = self.dataset_meta['seg_valid_class_ids'] + + gt_semantic_masks = [] + gt_instance_masks = [] + pred_instance_masks = [] + pred_instance_labels = [] + pred_instance_scores = [] + + for eval_ann, sinlge_pred_results in results: + gt_semantic_masks.append(eval_ann['pts_semantic_mask']) + gt_instance_masks.append(eval_ann['pts_instance_mask']) + pred_instance_masks.append( + sinlge_pred_results['pts_instance_mask']) + pred_instance_labels.append(sinlge_pred_results['instance_labels']) + pred_instance_scores.append(sinlge_pred_results['instance_scores']) + + ret_dict = instance_seg_eval( + gt_semantic_masks, + gt_instance_masks, + pred_instance_masks, + pred_instance_labels, + pred_instance_scores, + valid_class_ids=self.valid_class_ids, + class_labels=self.classes, + logger=logger) + + return ret_dict diff --git a/mmdet3d/evaluation/metrics/kitti_metric.py b/mmdet3d/evaluation/metrics/kitti_metric.py new file mode 100755 index 0000000..f2c18f0 --- /dev/null +++ b/mmdet3d/evaluation/metrics/kitti_metric.py @@ -0,0 +1,650 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import tempfile +from os import path as osp +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import mmengine +import numpy as np +import torch +from mmengine import load +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger, print_log + +from mmdet3d.evaluation import kitti_eval +from mmdet3d.registry import METRICS +from mmdet3d.structures import (Box3DMode, CameraInstance3DBoxes, + LiDARInstance3DBoxes, points_cam2img) + + +@METRICS.register_module() +class KittiMetric(BaseMetric): + """Kitti evaluation metric. + + Args: + ann_file (str): Annotation file path. + metric (str or List[str]): Metrics to be evaluated. Defaults to 'bbox'. + pcd_limit_range (List[float]): The range of point cloud used to filter + invalid predicted boxes. Defaults to [0, -40, -3, 70.4, 40, 0.0]. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix will + be used instead. Defaults to None. + pklfile_prefix (str, optional): The prefix of pkl files, including the + file path and the prefix of filename, e.g., "a/b/prefix". If not + specified, a temp file will be created. Defaults to None. + default_cam_key (str): The default camera for lidar to camera + conversion. By default, KITTI: 'CAM2', Waymo: 'CAM_FRONT'. + Defaults to 'CAM2'. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result to a + specific format and submit it to the test server. + Defaults to False. + submission_prefix (str, optional): The prefix of submission data. If + not specified, the submission data will not be generated. + Defaults to None. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + """ + + def __init__(self, + ann_file: str, + metric: Union[str, List[str]] = 'bbox', + pcd_limit_range: List[float] = [0, -40, -3, 70.4, 40, 0.0], + prefix: Optional[str] = None, + pklfile_prefix: Optional[str] = None, + default_cam_key: str = 'CAM2', + format_only: bool = False, + submission_prefix: Optional[str] = None, + collect_device: str = 'cpu', + backend_args: Optional[dict] = None) -> None: + self.default_prefix = 'Kitti metric' + super(KittiMetric, self).__init__( + collect_device=collect_device, prefix=prefix) + self.pcd_limit_range = pcd_limit_range + self.ann_file = ann_file + self.pklfile_prefix = pklfile_prefix + self.format_only = format_only + if self.format_only: + assert submission_prefix is not None, 'submission_prefix must be ' + 'not None when format_only is True, otherwise the result files ' + 'will be saved to a temp directory which will be cleaned up at ' + 'the end.' + + self.submission_prefix = submission_prefix + self.default_cam_key = default_cam_key + self.backend_args = backend_args + + allowed_metrics = ['bbox', 'img_bbox', 'mAP', 'LET_mAP'] + self.metrics = metric if isinstance(metric, list) else [metric] + for metric in self.metrics: + if metric not in allowed_metrics: + raise KeyError("metric should be one of 'bbox', 'img_bbox', " + f'but got {metric}.') + + def convert_annos_to_kitti_annos(self, data_infos: dict) -> List[dict]: + """Convert loading annotations to Kitti annotations. + + Args: + data_infos (dict): Data infos including metainfo and annotations + loaded from ann_file. + + Returns: + List[dict]: List of Kitti annotations. + """ + data_annos = data_infos['data_list'] + if not self.format_only: + cat2label = data_infos['metainfo']['categories'] + label2cat = dict((v, k) for (k, v) in cat2label.items()) + assert 'instances' in data_annos[0] + for i, annos in enumerate(data_annos): + if len(annos['instances']) == 0: + kitti_annos = { + 'name': np.array([]), + 'truncated': np.array([]), + 'occluded': np.array([]), + 'alpha': np.array([]), + 'bbox': np.zeros([0, 4]), + 'dimensions': np.zeros([0, 3]), + 'location': np.zeros([0, 3]), + 'rotation_y': np.array([]), + 'score': np.array([]), + } + else: + kitti_annos = { + 'name': [], + 'truncated': [], + 'occluded': [], + 'alpha': [], + 'bbox': [], + 'location': [], + 'dimensions': [], + 'rotation_y': [], + 'score': [] + } + for instance in annos['instances']: + label = instance['bbox_label'] + kitti_annos['name'].append(label2cat[label]) + kitti_annos['truncated'].append(instance['truncated']) + kitti_annos['occluded'].append(instance['occluded']) + kitti_annos['alpha'].append(instance['alpha']) + kitti_annos['bbox'].append(instance['bbox']) + kitti_annos['location'].append(instance['bbox_3d'][:3]) + kitti_annos['dimensions'].append( + instance['bbox_3d'][3:6]) + kitti_annos['rotation_y'].append( + instance['bbox_3d'][6]) + kitti_annos['score'].append(instance['score']) + for name in kitti_annos: + kitti_annos[name] = np.array(kitti_annos[name]) + data_annos[i]['kitti_annos'] = kitti_annos + return data_annos + + def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. + + The processed results should be stored in ``self.results``, which will + be used to compute the metrics when all batches have been processed. + + Args: + data_batch (dict): A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + + for data_sample in data_samples: + result = dict() + pred_3d = data_sample['pred_instances_3d'] + pred_2d = data_sample['pred_instances'] + for attr_name in pred_3d: + pred_3d[attr_name] = pred_3d[attr_name].to('cpu') + result['pred_instances_3d'] = pred_3d + for attr_name in pred_2d: + pred_2d[attr_name] = pred_2d[attr_name].to('cpu') + result['pred_instances'] = pred_2d + sample_idx = data_sample['sample_idx'] + result['sample_idx'] = sample_idx + self.results.append(result) + + def compute_metrics(self, results: List[dict]) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (List[dict]): The processed results of the whole dataset. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + self.classes = self.dataset_meta['classes'] + + # load annotations + pkl_infos = load(self.ann_file, backend_args=self.backend_args) + self.data_infos = self.convert_annos_to_kitti_annos(pkl_infos) + result_dict, tmp_dir = self.format_results( + results, + pklfile_prefix=self.pklfile_prefix, + submission_prefix=self.submission_prefix, + classes=self.classes) + + metric_dict = {} + + if self.format_only: + logger.info( + f'results are saved in {osp.dirname(self.submission_prefix)}') + return metric_dict + + gt_annos = [ + self.data_infos[result['sample_idx']]['kitti_annos'] + for result in results + ] + + for metric in self.metrics: + ap_dict = self.kitti_evaluate( + result_dict, + gt_annos, + metric=metric, + logger=logger, + classes=self.classes) + for result in ap_dict: + metric_dict[result] = ap_dict[result] + + if tmp_dir is not None: + tmp_dir.cleanup() + return metric_dict + + def kitti_evaluate(self, + results_dict: dict, + gt_annos: List[dict], + metric: Optional[str] = None, + classes: Optional[List[str]] = None, + logger: Optional[MMLogger] = None) -> Dict[str, float]: + """Evaluation in KITTI protocol. + + Args: + results_dict (dict): Formatted results of the dataset. + gt_annos (List[dict]): Contain gt information of each sample. + metric (str, optional): Metrics to be evaluated. Defaults to None. + classes (List[str], optional): A list of class name. + Defaults to None. + logger (MMLogger, optional): Logger used for printing related + information during evaluation. Defaults to None. + + Returns: + Dict[str, float]: Results of each evaluation metric. + """ + ap_dict = dict() + for name in results_dict: + if name == 'pred_instances' or metric == 'img_bbox': + eval_types = ['bbox'] + else: + eval_types = ['bbox', 'bev', '3d'] + ap_result_str, ap_dict_ = kitti_eval( + gt_annos, results_dict[name], classes, eval_types=eval_types) + for ap_type, ap in ap_dict_.items(): + ap_dict[f'{name}/{ap_type}'] = float(f'{ap:.4f}') + + print_log(f'Results of {name}:\n' + ap_result_str, logger=logger) + + return ap_dict + + def format_results( + self, + results: List[dict], + pklfile_prefix: Optional[str] = None, + submission_prefix: Optional[str] = None, + classes: Optional[List[str]] = None + ) -> Tuple[dict, Union[tempfile.TemporaryDirectory, None]]: + """Format the results to pkl file. + + Args: + results (List[dict]): Testing results of the dataset. + pklfile_prefix (str, optional): The prefix of pkl files. It + includes the file path and the prefix of filename, e.g., + "a/b/prefix". If not specified, a temp file will be created. + Defaults to None. + submission_prefix (str, optional): The prefix of submitted files. + It includes the file path and the prefix of filename, e.g., + "a/b/prefix". If not specified, a temp file will be created. + Defaults to None. + classes (List[str], optional): A list of class name. + Defaults to None. + + Returns: + tuple: (result_dict, tmp_dir), result_dict is a dict containing the + formatted result, tmp_dir is the temporal directory created for + saving json files when jsonfile_prefix is not specified. + """ + if pklfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + pklfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + result_dict = dict() + sample_idx_list = [result['sample_idx'] for result in results] + for name in results[0]: + if submission_prefix is not None: + submission_prefix_ = osp.join(submission_prefix, name) + else: + submission_prefix_ = None + if pklfile_prefix is not None: + pklfile_prefix_ = osp.join(pklfile_prefix, name) + '.pkl' + else: + pklfile_prefix_ = None + if 'pred_instances' in name and '3d' in name and name[ + 0] != '_' and results[0][name]: + net_outputs = [result[name] for result in results] + result_list_ = self.bbox2result_kitti(net_outputs, + sample_idx_list, classes, + pklfile_prefix_, + submission_prefix_) + result_dict[name] = result_list_ + elif name == 'pred_instances' and name[0] != '_' and results[0][ + name]: + net_outputs = [result[name] for result in results] + result_list_ = self.bbox2result_kitti2d( + net_outputs, sample_idx_list, classes, pklfile_prefix_, + submission_prefix_) + result_dict[name] = result_list_ + return result_dict, tmp_dir + + def bbox2result_kitti( + self, + net_outputs: List[dict], + sample_idx_list: List[int], + class_names: List[str], + pklfile_prefix: Optional[str] = None, + submission_prefix: Optional[str] = None) -> List[dict]: + """Convert 3D detection results to kitti format for evaluation and test + submission. + + Args: + net_outputs (List[dict]): List of dict storing the inferenced + bounding boxes and scores. + sample_idx_list (List[int]): List of input sample idx. + class_names (List[str]): A list of class names. + pklfile_prefix (str, optional): The prefix of pkl file. + Defaults to None. + submission_prefix (str, optional): The prefix of submission file. + Defaults to None. + + Returns: + List[dict]: A list of dictionaries with the kitti format. + """ + assert len(net_outputs) == len(self.data_infos), \ + 'invalid list length of network outputs' + if submission_prefix is not None: + mmengine.mkdir_or_exist(submission_prefix) + + det_annos = [] + print('\nConverting 3D prediction to KITTI format') + for idx, pred_dicts in enumerate( + mmengine.track_iter_progress(net_outputs)): + sample_idx = sample_idx_list[idx] + info = self.data_infos[sample_idx] + # Here default used 'CAM2' to compute metric. If you want to + # use another camera, please modify it. + image_shape = (info['images'][self.default_cam_key]['height'], + info['images'][self.default_cam_key]['width']) + box_dict = self.convert_valid_bboxes(pred_dicts, info) + anno = { + 'name': [], + 'truncated': [], + 'occluded': [], + 'alpha': [], + 'bbox': [], + 'dimensions': [], + 'location': [], + 'rotation_y': [], + 'score': [] + } + if len(box_dict['bbox']) > 0: + box_2d_preds = box_dict['bbox'] + box_preds = box_dict['box3d_camera'] + scores = box_dict['scores'] + box_preds_lidar = box_dict['box3d_lidar'] + label_preds = box_dict['label_preds'] + pred_box_type_3d = box_dict['pred_box_type_3d'] + + for box, box_lidar, bbox, score, label in zip( + box_preds, box_preds_lidar, box_2d_preds, scores, + label_preds): + bbox[2:] = np.minimum(bbox[2:], image_shape[::-1]) + bbox[:2] = np.maximum(bbox[:2], [0, 0]) + anno['name'].append(class_names[int(label)]) + anno['truncated'].append(0.0) + anno['occluded'].append(0) + if pred_box_type_3d == CameraInstance3DBoxes: + anno['alpha'].append(-np.arctan2(box[0], box[2]) + + box[6]) + elif pred_box_type_3d == LiDARInstance3DBoxes: + anno['alpha'].append( + -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6]) + anno['bbox'].append(bbox) + anno['dimensions'].append(box[3:6]) + anno['location'].append(box[:3]) + anno['rotation_y'].append(box[6]) + anno['score'].append(score) + + anno = {k: np.stack(v) for k, v in anno.items()} + else: + anno = { + 'name': np.array([]), + 'truncated': np.array([]), + 'occluded': np.array([]), + 'alpha': np.array([]), + 'bbox': np.zeros([0, 4]), + 'dimensions': np.zeros([0, 3]), + 'location': np.zeros([0, 3]), + 'rotation_y': np.array([]), + 'score': np.array([]), + } + + if submission_prefix is not None: + curr_file = f'{submission_prefix}/{sample_idx:06d}.txt' + with open(curr_file, 'w') as f: + bbox = anno['bbox'] + loc = anno['location'] + dims = anno['dimensions'] # lhw -> hwl + + for idx in range(len(bbox)): + print( + '{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} ' + '{:.4f} {:.4f} {:.4f} ' + '{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format( + anno['name'][idx], anno['alpha'][idx], + bbox[idx][0], bbox[idx][1], bbox[idx][2], + bbox[idx][3], dims[idx][1], dims[idx][2], + dims[idx][0], loc[idx][0], loc[idx][1], + loc[idx][2], anno['rotation_y'][idx], + anno['score'][idx]), + file=f) + + anno['sample_idx'] = np.array( + [sample_idx] * len(anno['score']), dtype=np.int64) + + det_annos.append(anno) + + if pklfile_prefix is not None: + if not pklfile_prefix.endswith(('.pkl', '.pickle')): + out = f'{pklfile_prefix}.pkl' + else: + out = pklfile_prefix + mmengine.dump(det_annos, out) + print(f'Result is saved to {out}.') + + return det_annos + + def bbox2result_kitti2d( + self, + net_outputs: List[dict], + sample_idx_list: List[int], + class_names: List[str], + pklfile_prefix: Optional[str] = None, + submission_prefix: Optional[str] = None) -> List[dict]: + """Convert 2D detection results to kitti format for evaluation and test + submission. + + Args: + net_outputs (List[dict]): List of dict storing the inferenced + bounding boxes and scores. + sample_idx_list (List[int]): List of input sample idx. + class_names (List[str]): A list of class names. + pklfile_prefix (str, optional): The prefix of pkl file. + Defaults to None. + submission_prefix (str, optional): The prefix of submission file. + Defaults to None. + + Returns: + List[dict]: A list of dictionaries with the kitti format. + """ + assert len(net_outputs) == len(self.data_infos), \ + 'invalid list length of network outputs' + det_annos = [] + print('\nConverting 2D prediction to KITTI format') + for i, bboxes_per_sample in enumerate( + mmengine.track_iter_progress(net_outputs)): + anno = dict( + name=[], + truncated=[], + occluded=[], + alpha=[], + bbox=[], + dimensions=[], + location=[], + rotation_y=[], + score=[]) + sample_idx = sample_idx_list[i] + + num_example = 0 + bbox = bboxes_per_sample['bboxes'] + for i in range(bbox.shape[0]): + anno['name'].append(class_names[int( + bboxes_per_sample['labels'][i])]) + anno['truncated'].append(0.0) + anno['occluded'].append(0) + anno['alpha'].append(0.0) + anno['bbox'].append(bbox[i, :4]) + # set dimensions (height, width, length) to zero + anno['dimensions'].append( + np.zeros(shape=[3], dtype=np.float32)) + # set the 3D translation to (-1000, -1000, -1000) + anno['location'].append( + np.ones(shape=[3], dtype=np.float32) * (-1000.0)) + anno['rotation_y'].append(0.0) + anno['score'].append(bboxes_per_sample['scores'][i]) + num_example += 1 + + if num_example == 0: + anno = dict( + name=np.array([]), + truncated=np.array([]), + occluded=np.array([]), + alpha=np.array([]), + bbox=np.zeros([0, 4]), + dimensions=np.zeros([0, 3]), + location=np.zeros([0, 3]), + rotation_y=np.array([]), + score=np.array([]), + ) + else: + anno = {k: np.stack(v) for k, v in anno.items()} + + anno['sample_idx'] = np.array( + [sample_idx] * num_example, dtype=np.int64) + det_annos.append(anno) + + if pklfile_prefix is not None: + if not pklfile_prefix.endswith(('.pkl', '.pickle')): + out = f'{pklfile_prefix}.pkl' + else: + out = pklfile_prefix + mmengine.dump(det_annos, out) + print(f'Result is saved to {out}.') + + if submission_prefix is not None: + # save file in submission format + mmengine.mkdir_or_exist(submission_prefix) + print(f'Saving KITTI submission to {submission_prefix}') + for i, anno in enumerate(det_annos): + sample_idx = sample_idx_list[i] + cur_det_file = f'{submission_prefix}/{sample_idx:06d}.txt' + with open(cur_det_file, 'w') as f: + bbox = anno['bbox'] + loc = anno['location'] + dims = anno['dimensions'][::-1] # lhw -> hwl + for idx in range(len(bbox)): + print( + '{} -1 -1 {:4f} {:4f} {:4f} {:4f} {:4f} {:4f} ' + '{:4f} {:4f} {:4f} {:4f} {:4f} {:4f} {:4f}'.format( + anno['name'][idx], + anno['alpha'][idx], + *bbox[idx], # 4 float + *dims[idx], # 3 float + *loc[idx], # 3 float + anno['rotation_y'][idx], + anno['score'][idx]), + file=f, + ) + print(f'Result is saved to {submission_prefix}') + + return det_annos + + def convert_valid_bboxes(self, box_dict: dict, info: dict) -> dict: + """Convert the predicted boxes into valid ones. + + Args: + box_dict (dict): Box dictionaries to be converted. + + - bboxes_3d (:obj:`BaseInstance3DBoxes`): 3D bounding boxes. + - scores_3d (Tensor): Scores of boxes. + - labels_3d (Tensor): Class labels of boxes. + info (dict): Data info. + + Returns: + dict: Valid predicted boxes. + + - bbox (np.ndarray): 2D bounding boxes. + - box3d_camera (np.ndarray): 3D bounding boxes in + camera coordinate. + - box3d_lidar (np.ndarray): 3D bounding boxes in + LiDAR coordinate. + - scores (np.ndarray): Scores of boxes. + - label_preds (np.ndarray): Class label predictions. + - sample_idx (int): Sample index. + """ + # TODO: refactor this function + box_preds = box_dict['bboxes_3d'] + scores = box_dict['scores_3d'] + labels = box_dict['labels_3d'] + sample_idx = info['sample_idx'] + box_preds.limit_yaw(offset=0.5, period=np.pi * 2) + + if len(box_preds) == 0: + return dict( + bbox=np.zeros([0, 4]), + box3d_camera=np.zeros([0, 7]), + box3d_lidar=np.zeros([0, 7]), + scores=np.zeros([0]), + label_preds=np.zeros([0, 4]), + sample_idx=sample_idx) + # Here default used 'CAM2' to compute metric. If you want to + # use another camera, please modify it. + lidar2cam = np.array( + info['images'][self.default_cam_key]['lidar2cam']).astype( + np.float32) + P2 = np.array(info['images'][self.default_cam_key]['cam2img']).astype( + np.float32) + img_shape = (info['images'][self.default_cam_key]['height'], + info['images'][self.default_cam_key]['width']) + P2 = box_preds.tensor.new_tensor(P2) + + if isinstance(box_preds, LiDARInstance3DBoxes): + box_preds_camera = box_preds.convert_to(Box3DMode.CAM, lidar2cam) + box_preds_lidar = box_preds + elif isinstance(box_preds, CameraInstance3DBoxes): + box_preds_camera = box_preds + box_preds_lidar = box_preds.convert_to(Box3DMode.LIDAR, + np.linalg.inv(lidar2cam)) + + box_corners = box_preds_camera.corners + box_corners_in_image = points_cam2img(box_corners, P2) + # box_corners_in_image: [N, 8, 2] + minxy = torch.min(box_corners_in_image, dim=1)[0] + maxxy = torch.max(box_corners_in_image, dim=1)[0] + box_2d_preds = torch.cat([minxy, maxxy], dim=1) + # Post-processing + # check box_preds_camera + image_shape = box_preds.tensor.new_tensor(img_shape) + valid_cam_inds = ((box_2d_preds[:, 0] < image_shape[1]) & + (box_2d_preds[:, 1] < image_shape[0]) & + (box_2d_preds[:, 2] > 0) & (box_2d_preds[:, 3] > 0)) + # check box_preds_lidar + if isinstance(box_preds, LiDARInstance3DBoxes): + limit_range = box_preds.tensor.new_tensor(self.pcd_limit_range) + valid_pcd_inds = ((box_preds_lidar.center > limit_range[:3]) & + (box_preds_lidar.center < limit_range[3:])) + valid_inds = valid_cam_inds & valid_pcd_inds.all(-1) + else: + valid_inds = valid_cam_inds + + if valid_inds.sum() > 0: + return dict( + bbox=box_2d_preds[valid_inds, :].numpy(), + pred_box_type_3d=type(box_preds), + box3d_camera=box_preds_camera[valid_inds].tensor.numpy(), + box3d_lidar=box_preds_lidar[valid_inds].tensor.numpy(), + scores=scores[valid_inds].numpy(), + label_preds=labels[valid_inds].numpy(), + sample_idx=sample_idx) + else: + return dict( + bbox=np.zeros([0, 4]), + pred_box_type_3d=type(box_preds), + box3d_camera=np.zeros([0, 7]), + box3d_lidar=np.zeros([0, 7]), + scores=np.zeros([0]), + label_preds=np.zeros([0]), + sample_idx=sample_idx) diff --git a/mmdet3d/evaluation/metrics/lyft_metric.py b/mmdet3d/evaluation/metrics/lyft_metric.py new file mode 100755 index 0000000..176e5aa --- /dev/null +++ b/mmdet3d/evaluation/metrics/lyft_metric.py @@ -0,0 +1,412 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile +from os import path as osp +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import mmengine +import numpy as np +import pandas as pd +from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft +from lyft_dataset_sdk.utils.data_classes import Box as LyftBox +from mmengine import load +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger +from pyquaternion import Quaternion + +from mmdet3d.evaluation import lyft_eval +from mmdet3d.registry import METRICS + + +@METRICS.register_module() +class LyftMetric(BaseMetric): + """Lyft evaluation metric. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + metric (str or List[str]): Metrics to be evaluated. Defaults to 'bbox'. + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_camera=False, use_lidar=True). + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix will + be used instead. Defaults to None. + jsonfile_prefix (str, optional): The prefix of json files including the + file path and the prefix of filename, e.g., "a/b/prefix". If not + specified, a temp file will be created. Defaults to None. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result to a + specific format and submit it to the test server. + Defaults to False. + csv_savepath (str, optional): The path for saving csv files. It + includes the file path and the csv filename, e.g., + "a/b/filename.csv". If not specified, the result will not be + converted to csv file. Defaults to None. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + """ + + def __init__(self, + data_root: str, + ann_file: str, + metric: Union[str, List[str]] = 'bbox', + modality=dict( + use_camera=False, + use_lidar=True, + ), + prefix: Optional[str] = None, + jsonfile_prefix: str = None, + format_only: bool = False, + csv_savepath: str = None, + collect_device: str = 'cpu', + backend_args: Optional[dict] = None) -> None: + self.default_prefix = 'Lyft metric' + super(LyftMetric, self).__init__( + collect_device=collect_device, prefix=prefix) + self.ann_file = ann_file + self.data_root = data_root + self.modality = modality + self.jsonfile_prefix = jsonfile_prefix + self.format_only = format_only + if self.format_only: + assert csv_savepath is not None, 'csv_savepath must be not None ' + 'when format_only is True, otherwise the result files will be ' + 'saved to a temp directory which will be cleaned up at the end.' + + self.backend_args = backend_args + self.csv_savepath = csv_savepath + self.metrics = metric if isinstance(metric, list) else [metric] + + def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and data_samples. + + The processed results should be stored in ``self.results``, which will + be used to compute the metrics when all batches have been processed. + + Args: + data_batch (dict): A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + result = dict() + pred_3d = data_sample['pred_instances_3d'] + pred_2d = data_sample['pred_instances'] + for attr_name in pred_3d: + pred_3d[attr_name] = pred_3d[attr_name].to('cpu') + result['pred_instances_3d'] = pred_3d + for attr_name in pred_2d: + pred_2d[attr_name] = pred_2d[attr_name].to('cpu') + result['pred_instances'] = pred_2d + sample_idx = data_sample['sample_idx'] + result['sample_idx'] = sample_idx + self.results.append(result) + + def compute_metrics(self, results: List[dict]) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (List[dict]): The processed results of the whole dataset. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + classes = self.dataset_meta['classes'] + self.version = self.dataset_meta['version'] + + # load annotations + self.data_infos = load( + osp.join(self.data_root, self.ann_file), + backend_args=self.backend_args)['data_list'] + result_dict, tmp_dir = self.format_results(results, classes, + self.jsonfile_prefix, + self.csv_savepath) + + metric_dict = {} + + if self.format_only: + logger.info( + f'results are saved in {osp.dirname(self.csv_savepath)}') + return metric_dict + + for metric in self.metrics: + ap_dict = self.lyft_evaluate( + result_dict, metric=metric, logger=logger) + for result in ap_dict: + metric_dict[result] = ap_dict[result] + + if tmp_dir is not None: + tmp_dir.cleanup() + return metric_dict + + def format_results( + self, + results: List[dict], + classes: Optional[List[str]] = None, + jsonfile_prefix: Optional[str] = None, + csv_savepath: Optional[str] = None + ) -> Tuple[dict, Union[tempfile.TemporaryDirectory, None]]: + """Format the results to json (standard format for COCO evaluation). + + Args: + results (List[dict]): Testing results of the dataset. + classes (List[str], optional): A list of class name. + Defaults to None. + jsonfile_prefix (str, optional): The prefix of json files. It + includes the file path and the prefix of filename, e.g., + "a/b/prefix". If not specified, a temp file will be created. + Defaults to None. + csv_savepath (str, optional): The path for saving csv files. It + includes the file path and the csv filename, e.g., + "a/b/filename.csv". If not specified, the result will not be + converted to csv file. Defaults to None. + + Returns: + tuple: Returns (result_dict, tmp_dir), where ``result_dict`` is a + dict containing the json filepaths, ``tmp_dir`` is the temporal + directory created for saving json files when ``jsonfile_prefix`` is + not specified. + """ + assert isinstance(results, list), 'results must be a list' + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + result_dict = dict() + sample_idx_list = [result['sample_idx'] for result in results] + + for name in results[0]: + if 'pred' in name and '3d' in name and name[0] != '_': + print(f'\nFormating bboxes of {name}') + # format result of model output in Det3dDataSample, + # include 'pred_instances_3d','pts_pred_instances_3d', + # 'img_pred_instances_3d' + results_ = [out[name] for out in results] + tmp_file_ = osp.join(jsonfile_prefix, name) + result_dict[name] = self._format_bbox(results_, + sample_idx_list, classes, + tmp_file_) + if csv_savepath is not None: + if 'pred_instances_3d' in result_dict: + self.json2csv(result_dict['pred_instances_3d'], csv_savepath) + elif 'pts_pred_instances_3d' in result_dict: + self.json2csv(result_dict['pts_pred_instances_3d'], + csv_savepath) + return result_dict, tmp_dir + + def json2csv(self, json_path: str, csv_savepath: str) -> None: + """Convert the json file to csv format for submission. + + Args: + json_path (str): Path of the result json file. + csv_savepath (str): Path to save the csv file. + """ + results = mmengine.load(json_path)['results'] + sample_list_path = osp.join(self.data_root, 'sample_submission.csv') + data = pd.read_csv(sample_list_path) + Id_list = list(data['Id']) + pred_list = list(data['PredictionString']) + cnt = 0 + print('Converting the json to csv...') + for token in results.keys(): + cnt += 1 + predictions = results[token] + prediction_str = '' + for i in range(len(predictions)): + prediction_str += \ + str(predictions[i]['score']) + ' ' + \ + str(predictions[i]['translation'][0]) + ' ' + \ + str(predictions[i]['translation'][1]) + ' ' + \ + str(predictions[i]['translation'][2]) + ' ' + \ + str(predictions[i]['size'][0]) + ' ' + \ + str(predictions[i]['size'][1]) + ' ' + \ + str(predictions[i]['size'][2]) + ' ' + \ + str(Quaternion(list(predictions[i]['rotation'])) + .yaw_pitch_roll[0]) + ' ' + \ + predictions[i]['name'] + ' ' + prediction_str = prediction_str[:-1] + idx = Id_list.index(token) + pred_list[idx] = prediction_str + df = pd.DataFrame({'Id': Id_list, 'PredictionString': pred_list}) + mmengine.mkdir_or_exist(os.path.dirname(csv_savepath)) + df.to_csv(csv_savepath, index=False) + + def _format_bbox(self, + results: List[dict], + sample_idx_list: List[int], + classes: Optional[List[str]] = None, + jsonfile_prefix: Optional[str] = None) -> str: + """Convert the results to the standard format. + + Args: + results (List[dict]): Testing results of the dataset. + sample_idx_list (List[int]): List of result sample idx. + classes (List[str], optional): A list of class name. + Defaults to None. + jsonfile_prefix (str, optional): The prefix of the output jsonfile. + You can specify the output directory/filename by modifying the + jsonfile_prefix. Defaults to None. + + Returns: + str: Path of the output json file. + """ + lyft_annos = {} + + print('Start to convert detection format...') + for i, det in enumerate(mmengine.track_iter_progress(results)): + annos = [] + boxes = output_to_lyft_box(det) + sample_idx = sample_idx_list[i] + sample_token = self.data_infos[sample_idx]['token'] + boxes = lidar_lyft_box_to_global(self.data_infos[sample_idx], + boxes) + for i, box in enumerate(boxes): + name = classes[box.label] + lyft_anno = dict( + sample_token=sample_token, + translation=box.center.tolist(), + size=box.wlh.tolist(), + rotation=box.orientation.elements.tolist(), + name=name, + score=box.score) + annos.append(lyft_anno) + lyft_annos[sample_token] = annos + lyft_submissions = { + 'meta': self.modality, + 'results': lyft_annos, + } + + mmengine.mkdir_or_exist(jsonfile_prefix) + res_path = osp.join(jsonfile_prefix, 'results_lyft.json') + print('Results writes to', res_path) + mmengine.dump(lyft_submissions, res_path) + return res_path + + def lyft_evaluate(self, + result_dict: dict, + metric: str = 'bbox', + logger: Optional[MMLogger] = None) -> Dict[str, float]: + """Evaluation in Lyft protocol. + + Args: + result_dict (dict): Formatted results of the dataset. + metric (str): Metrics to be evaluated. Defaults to 'bbox'. + logger (MMLogger, optional): Logger used for printing related + information during evaluation. Defaults to None. + + Returns: + Dict[str, float]: Evaluation results. + """ + metric_dict = dict() + for name in result_dict: + print(f'Evaluating bboxes of {name}') + ret_dict = self._evaluate_single( + result_dict[name], logger=logger, result_name=name) + metric_dict.update(ret_dict) + return metric_dict + + def _evaluate_single(self, + result_path: str, + logger: MMLogger = None, + result_name: str = 'pts_bbox') -> dict: + """Evaluation for a single model in Lyft protocol. + + Args: + result_path (str): Path of the result file. + logger (MMLogger, optional): Logger used for printing related + information during evaluation. Defaults to None. + result_name (str): Result name in the metric prefix. + Defaults to 'pts_bbox'. + + Returns: + Dict[str, float]: Dictionary of evaluation details. + """ + output_dir = osp.join(*osp.split(result_path)[:-1]) + lyft = Lyft( + data_path=osp.join(self.data_root, self.version), + json_path=osp.join(self.data_root, self.version, self.version), + verbose=True) + eval_set_map = { + 'v1.01-train': 'val', + } + metrics = lyft_eval(lyft, self.data_root, result_path, + eval_set_map[self.version], output_dir, logger) + + # record metrics + detail = dict() + metric_prefix = f'{result_name}_Lyft' + + for i, name in enumerate(metrics['class_names']): + AP = float(metrics['mAPs_cate'][i]) + detail[f'{metric_prefix}/{name}_AP'] = AP + + detail[f'{metric_prefix}/mAP'] = metrics['Final mAP'] + return detail + + +def output_to_lyft_box(detection: dict) -> List[LyftBox]: + """Convert the output to the box class in the Lyft. + + Args: + detection (dict): Detection results. + + Returns: + List[:obj:`LyftBox`]: List of standard LyftBoxes. + """ + bbox3d = detection['bboxes_3d'] + scores = detection['scores_3d'].numpy() + labels = detection['labels_3d'].numpy() + + box_gravity_center = bbox3d.gravity_center.numpy() + box_dims = bbox3d.dims.numpy() + box_yaw = bbox3d.yaw.numpy() + + # our LiDAR coordinate system -> Lyft box coordinate system + lyft_box_dims = box_dims[:, [1, 0, 2]] + + box_list = [] + for i in range(len(bbox3d)): + quat = Quaternion(axis=[0, 0, 1], radians=box_yaw[i]) + box = LyftBox( + box_gravity_center[i], + lyft_box_dims[i], + quat, + label=labels[i], + score=scores[i]) + box_list.append(box) + return box_list + + +def lidar_lyft_box_to_global(info: dict, + boxes: List[LyftBox]) -> List[LyftBox]: + """Convert the box from ego to global coordinate. + + Args: + info (dict): Info for a specific sample data, including the calibration + information. + boxes (List[:obj:`LyftBox`]): List of predicted LyftBoxes. + + Returns: + List[:obj:`LyftBox`]: List of standard LyftBoxes in the global + coordinate. + """ + box_list = [] + for box in boxes: + # Move box to ego vehicle coord system + lidar2ego = np.array(info['lidar_points']['lidar2ego']) + box.rotate(Quaternion(matrix=lidar2ego, rtol=1e-05, atol=1e-07)) + box.translate(lidar2ego[:3, 3]) + # Move box to global coord system + ego2global = np.array(info['ego2global']) + box.rotate(Quaternion(matrix=ego2global, rtol=1e-05, atol=1e-07)) + box.translate(ego2global[:3, 3]) + box_list.append(box) + return box_list diff --git a/mmdet3d/evaluation/metrics/nuscenes_metric.py b/mmdet3d/evaluation/metrics/nuscenes_metric.py new file mode 100755 index 0000000..e30c0bc --- /dev/null +++ b/mmdet3d/evaluation/metrics/nuscenes_metric.py @@ -0,0 +1,788 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import tempfile +from os import path as osp +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import mmengine +import numpy as np +import pyquaternion +import torch +from mmengine import Config, load +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger +from nuscenes.eval.detection.config import config_factory +from nuscenes.eval.detection.data_classes import DetectionConfig +from nuscenes.utils.data_classes import Box as NuScenesBox + +from mmdet3d.models.layers import box3d_multiclass_nms +from mmdet3d.registry import METRICS +from mmdet3d.structures import (CameraInstance3DBoxes, LiDARInstance3DBoxes, + bbox3d2result, xywhr2xyxyr) + + +@METRICS.register_module() +class NuScenesMetric(BaseMetric): + """Nuscenes evaluation metric. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + metric (str or List[str]): Metrics to be evaluated. Defaults to 'bbox'. + modality (dict): Modality to specify the sensor data used as input. + Defaults to dict(use_camera=False, use_lidar=True). + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix will + be used instead. Defaults to None. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result to a + specific format and submit it to the test server. + Defaults to False. + jsonfile_prefix (str, optional): The prefix of json files including the + file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Defaults to None. + eval_version (str): Configuration version of evaluation. + Defaults to 'detection_cvpr_2019'. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + """ + NameMapping = { + 'movable_object.barrier': 'barrier', + 'vehicle.bicycle': 'bicycle', + 'vehicle.bus.bendy': 'bus', + 'vehicle.bus.rigid': 'bus', + 'vehicle.car': 'car', + 'vehicle.construction': 'construction_vehicle', + 'vehicle.motorcycle': 'motorcycle', + 'human.pedestrian.adult': 'pedestrian', + 'human.pedestrian.child': 'pedestrian', + 'human.pedestrian.construction_worker': 'pedestrian', + 'human.pedestrian.police_officer': 'pedestrian', + 'movable_object.trafficcone': 'traffic_cone', + 'vehicle.trailer': 'trailer', + 'vehicle.truck': 'truck' + } + DefaultAttribute = { + 'car': 'vehicle.parked', + 'pedestrian': 'pedestrian.moving', + 'trailer': 'vehicle.parked', + 'truck': 'vehicle.parked', + 'bus': 'vehicle.moving', + 'motorcycle': 'cycle.without_rider', + 'construction_vehicle': 'vehicle.parked', + 'bicycle': 'cycle.without_rider', + 'barrier': '', + 'traffic_cone': '', + } + # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa + ErrNameMapping = { + 'trans_err': 'mATE', + 'scale_err': 'mASE', + 'orient_err': 'mAOE', + 'vel_err': 'mAVE', + 'attr_err': 'mAAE' + } + + def __init__(self, + data_root: str, + ann_file: str, + metric: Union[str, List[str]] = 'bbox', + modality: dict = dict(use_camera=False, use_lidar=True), + prefix: Optional[str] = None, + format_only: bool = False, + jsonfile_prefix: Optional[str] = None, + eval_version: str = 'detection_cvpr_2019', + collect_device: str = 'cpu', + backend_args: Optional[dict] = None) -> None: + self.default_prefix = 'NuScenes metric' + super(NuScenesMetric, self).__init__( + collect_device=collect_device, prefix=prefix) + if modality is None: + modality = dict( + use_camera=False, + use_lidar=True, + ) + self.ann_file = ann_file + self.data_root = data_root + self.modality = modality + self.format_only = format_only + if self.format_only: + assert jsonfile_prefix is not None, 'jsonfile_prefix must be not ' + 'None when format_only is True, otherwise the result files will ' + 'be saved to a temp directory which will be cleanup at the end.' + + self.jsonfile_prefix = jsonfile_prefix + self.backend_args = backend_args + + self.metrics = metric if isinstance(metric, list) else [metric] + + self.eval_version = eval_version + self.eval_detection_configs = config_factory(self.eval_version) + + def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. + + The processed results should be stored in ``self.results``, which will + be used to compute the metrics when all batches have been processed. + + Args: + data_batch (dict): A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + result = dict() + pred_3d = data_sample['pred_instances_3d'] + pred_2d = data_sample['pred_instances'] + for attr_name in pred_3d: + pred_3d[attr_name] = pred_3d[attr_name].to('cpu') + result['pred_instances_3d'] = pred_3d + for attr_name in pred_2d: + pred_2d[attr_name] = pred_2d[attr_name].to('cpu') + result['pred_instances'] = pred_2d + sample_idx = data_sample['sample_idx'] + result['sample_idx'] = sample_idx + self.results.append(result) + + def compute_metrics(self, results: List[dict]) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (List[dict]): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + classes = self.dataset_meta['classes'] + self.version = self.dataset_meta['version'] + # load annotations + self.data_infos = load( + self.ann_file, backend_args=self.backend_args)['data_list'] + result_dict, tmp_dir = self.format_results(results, classes, + self.jsonfile_prefix) + + metric_dict = {} + + if self.format_only: + logger.info( + f'results are saved in {osp.basename(self.jsonfile_prefix)}') + return metric_dict + + for metric in self.metrics: + ap_dict = self.nus_evaluate( + result_dict, classes=classes, metric=metric, logger=logger) + for result in ap_dict: + metric_dict[result] = ap_dict[result] + + if tmp_dir is not None: + tmp_dir.cleanup() + return metric_dict + + def nus_evaluate(self, + result_dict: dict, + metric: str = 'bbox', + classes: Optional[List[str]] = None, + logger: Optional[MMLogger] = None) -> Dict[str, float]: + """Evaluation in Nuscenes protocol. + + Args: + result_dict (dict): Formatted results of the dataset. + metric (str): Metrics to be evaluated. Defaults to 'bbox'. + classes (List[str], optional): A list of class name. + Defaults to None. + logger (MMLogger, optional): Logger used for printing related + information during evaluation. Defaults to None. + + Returns: + Dict[str, float]: Results of each evaluation metric. + """ + metric_dict = dict() + for name in result_dict: + print(f'Evaluating bboxes of {name}') + ret_dict = self._evaluate_single( + result_dict[name], classes=classes, result_name=name) + metric_dict.update(ret_dict) + return metric_dict + + def _evaluate_single( + self, + result_path: str, + classes: Optional[List[str]] = None, + result_name: str = 'pred_instances_3d') -> Dict[str, float]: + """Evaluation for a single model in nuScenes protocol. + + Args: + result_path (str): Path of the result file. + classes (List[str], optional): A list of class name. + Defaults to None. + result_name (str): Result name in the metric prefix. + Defaults to 'pred_instances_3d'. + + Returns: + Dict[str, float]: Dictionary of evaluation details. + """ + from nuscenes import NuScenes + from nuscenes.eval.detection.evaluate import NuScenesEval + + output_dir = osp.join(*osp.split(result_path)[:-1]) + nusc = NuScenes( + version=self.version, dataroot=self.data_root, verbose=False) + eval_set_map = { + 'v1.0-mini': 'mini_val', + 'v1.0-trainval': 'val', + } + nusc_eval = NuScenesEval( + nusc, + config=self.eval_detection_configs, + result_path=result_path, + eval_set=eval_set_map[self.version], + output_dir=output_dir, + verbose=False) + nusc_eval.main(render_curves=False) + + # record metrics + metrics = mmengine.load(osp.join(output_dir, 'metrics_summary.json')) + detail = dict() + metric_prefix = f'{result_name}_NuScenes' + for name in classes: + for k, v in metrics['label_aps'][name].items(): + val = float(f'{v:.4f}') + detail[f'{metric_prefix}/{name}_AP_dist_{k}'] = val + for k, v in metrics['label_tp_errors'][name].items(): + val = float(f'{v:.4f}') + detail[f'{metric_prefix}/{name}_{k}'] = val + for k, v in metrics['tp_errors'].items(): + val = float(f'{v:.4f}') + detail[f'{metric_prefix}/{self.ErrNameMapping[k]}'] = val + + detail[f'{metric_prefix}/NDS'] = metrics['nd_score'] + detail[f'{metric_prefix}/mAP'] = metrics['mean_ap'] + return detail + + def format_results( + self, + results: List[dict], + classes: Optional[List[str]] = None, + jsonfile_prefix: Optional[str] = None + ) -> Tuple[dict, Union[tempfile.TemporaryDirectory, None]]: + """Format the mmdet3d results to standard NuScenes json file. + + Args: + results (List[dict]): Testing results of the dataset. + classes (List[str], optional): A list of class name. + Defaults to None. + jsonfile_prefix (str, optional): The prefix of json files. It + includes the file path and the prefix of filename, e.g., + "a/b/prefix". If not specified, a temp file will be created. + Defaults to None. + + Returns: + tuple: Returns (result_dict, tmp_dir), where ``result_dict`` is a + dict containing the json filepaths, ``tmp_dir`` is the temporal + directory created for saving json files when ``jsonfile_prefix`` is + not specified. + """ + assert isinstance(results, list), 'results must be a list' + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + result_dict = dict() + sample_idx_list = [result['sample_idx'] for result in results] + + for name in results[0]: + if 'pred' in name and '3d' in name and name[0] != '_': + print(f'\nFormating bboxes of {name}') + results_ = [out[name] for out in results] + tmp_file_ = osp.join(jsonfile_prefix, name) + box_type_3d = type(results_[0]['bboxes_3d']) + if box_type_3d == LiDARInstance3DBoxes: + result_dict[name] = self._format_lidar_bbox( + results_, sample_idx_list, classes, tmp_file_) + elif box_type_3d == CameraInstance3DBoxes: + result_dict[name] = self._format_camera_bbox( + results_, sample_idx_list, classes, tmp_file_) + + return result_dict, tmp_dir + + def get_attr_name(self, attr_idx: int, label_name: str) -> str: + """Get attribute from predicted index. + + This is a workaround to predict attribute when the predicted velocity + is not reliable. We map the predicted attribute index to the one in the + attribute set. If it is consistent with the category, we will keep it. + Otherwise, we will use the default attribute. + + Args: + attr_idx (int): Attribute index. + label_name (str): Predicted category name. + + Returns: + str: Predicted attribute name. + """ + # TODO: Simplify the variable name + AttrMapping_rev2 = [ + 'cycle.with_rider', 'cycle.without_rider', 'pedestrian.moving', + 'pedestrian.standing', 'pedestrian.sitting_lying_down', + 'vehicle.moving', 'vehicle.parked', 'vehicle.stopped', 'None' + ] + if label_name == 'car' or label_name == 'bus' \ + or label_name == 'truck' or label_name == 'trailer' \ + or label_name == 'construction_vehicle': + if AttrMapping_rev2[attr_idx] == 'vehicle.moving' or \ + AttrMapping_rev2[attr_idx] == 'vehicle.parked' or \ + AttrMapping_rev2[attr_idx] == 'vehicle.stopped': + return AttrMapping_rev2[attr_idx] + else: + return self.DefaultAttribute[label_name] + elif label_name == 'pedestrian': + if AttrMapping_rev2[attr_idx] == 'pedestrian.moving' or \ + AttrMapping_rev2[attr_idx] == 'pedestrian.standing' or \ + AttrMapping_rev2[attr_idx] == \ + 'pedestrian.sitting_lying_down': + return AttrMapping_rev2[attr_idx] + else: + return self.DefaultAttribute[label_name] + elif label_name == 'bicycle' or label_name == 'motorcycle': + if AttrMapping_rev2[attr_idx] == 'cycle.with_rider' or \ + AttrMapping_rev2[attr_idx] == 'cycle.without_rider': + return AttrMapping_rev2[attr_idx] + else: + return self.DefaultAttribute[label_name] + else: + return self.DefaultAttribute[label_name] + + def _format_camera_bbox(self, + results: List[dict], + sample_idx_list: List[int], + classes: Optional[List[str]] = None, + jsonfile_prefix: Optional[str] = None) -> str: + """Convert the results to the standard format. + + Args: + results (List[dict]): Testing results of the dataset. + sample_idx_list (List[int]): List of result sample idx. + classes (List[str], optional): A list of class name. + Defaults to None. + jsonfile_prefix (str, optional): The prefix of the output jsonfile. + You can specify the output directory/filename by modifying the + jsonfile_prefix. Defaults to None. + + Returns: + str: Path of the output json file. + """ + nusc_annos = {} + + print('Start to convert detection format...') + + # Camera types in Nuscenes datasets + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + + CAM_NUM = 6 + + for i, det in enumerate(mmengine.track_iter_progress(results)): + + sample_idx = sample_idx_list[i] + + frame_sample_idx = sample_idx // CAM_NUM + camera_type_id = sample_idx % CAM_NUM + + if camera_type_id == 0: + boxes_per_frame = [] + attrs_per_frame = [] + + # need to merge results from images of the same sample + annos = [] + boxes, attrs = output_to_nusc_box(det) + sample_token = self.data_infos[frame_sample_idx]['token'] + camera_type = camera_types[camera_type_id] + boxes, attrs = cam_nusc_box_to_global( + self.data_infos[frame_sample_idx], boxes, attrs, classes, + self.eval_detection_configs, camera_type) + boxes_per_frame.extend(boxes) + attrs_per_frame.extend(attrs) + # Remove redundant predictions caused by overlap of images + if (sample_idx + 1) % CAM_NUM != 0: + continue + boxes = global_nusc_box_to_cam(self.data_infos[frame_sample_idx], + boxes_per_frame, classes, + self.eval_detection_configs) + cam_boxes3d, scores, labels = nusc_box_to_cam_box3d(boxes) + # box nms 3d over 6 images in a frame + # TODO: move this global setting into config + nms_cfg = dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=4096, + nms_thr=0.05, + score_thr=0.01, + min_bbox_size=0, + max_per_frame=500) + nms_cfg = Config(nms_cfg) + cam_boxes3d_for_nms = xywhr2xyxyr(cam_boxes3d.bev) + boxes3d = cam_boxes3d.tensor + # generate attr scores from attr labels + attrs = labels.new_tensor([attr for attr in attrs_per_frame]) + boxes3d, scores, labels, attrs = box3d_multiclass_nms( + boxes3d, + cam_boxes3d_for_nms, + scores, + nms_cfg.score_thr, + nms_cfg.max_per_frame, + nms_cfg, + mlvl_attr_scores=attrs) + cam_boxes3d = CameraInstance3DBoxes(boxes3d, box_dim=9) + det = bbox3d2result(cam_boxes3d, scores, labels, attrs) + boxes, attrs = output_to_nusc_box(det) + boxes, attrs = cam_nusc_box_to_global( + self.data_infos[frame_sample_idx], boxes, attrs, classes, + self.eval_detection_configs) + + for i, box in enumerate(boxes): + name = classes[box.label] + attr = self.get_attr_name(attrs[i], name) + nusc_anno = dict( + sample_token=sample_token, + translation=box.center.tolist(), + size=box.wlh.tolist(), + rotation=box.orientation.elements.tolist(), + velocity=box.velocity[:2].tolist(), + detection_name=name, + detection_score=box.score, + attribute_name=attr) + annos.append(nusc_anno) + # other views results of the same frame should be concatenated + if sample_token in nusc_annos: + nusc_annos[sample_token].extend(annos) + else: + nusc_annos[sample_token] = annos + + nusc_submissions = { + 'meta': self.modality, + 'results': nusc_annos, + } + + mmengine.mkdir_or_exist(jsonfile_prefix) + res_path = osp.join(jsonfile_prefix, 'results_nusc.json') + print(f'Results writes to {res_path}') + mmengine.dump(nusc_submissions, res_path) + return res_path + + def _format_lidar_bbox(self, + results: List[dict], + sample_idx_list: List[int], + classes: Optional[List[str]] = None, + jsonfile_prefix: Optional[str] = None) -> str: + """Convert the results to the standard format. + + Args: + results (List[dict]): Testing results of the dataset. + sample_idx_list (List[int]): List of result sample idx. + classes (List[str], optional): A list of class name. + Defaults to None. + jsonfile_prefix (str, optional): The prefix of the output jsonfile. + You can specify the output directory/filename by modifying the + jsonfile_prefix. Defaults to None. + + Returns: + str: Path of the output json file. + """ + nusc_annos = {} + + print('Start to convert detection format...') + for i, det in enumerate(mmengine.track_iter_progress(results)): + annos = [] + boxes, attrs = output_to_nusc_box(det) + sample_idx = sample_idx_list[i] + sample_token = self.data_infos[sample_idx]['token'] + boxes = lidar_nusc_box_to_global(self.data_infos[sample_idx], + boxes, classes, + self.eval_detection_configs) + for i, box in enumerate(boxes): + name = classes[box.label] + if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2: + if name in [ + 'car', + 'construction_vehicle', + 'bus', + 'truck', + 'trailer', + ]: + attr = 'vehicle.moving' + elif name in ['bicycle', 'motorcycle']: + attr = 'cycle.with_rider' + else: + attr = self.DefaultAttribute[name] + else: + if name in ['pedestrian']: + attr = 'pedestrian.standing' + elif name in ['bus']: + attr = 'vehicle.stopped' + else: + attr = self.DefaultAttribute[name] + + nusc_anno = dict( + sample_token=sample_token, + translation=box.center.tolist(), + size=box.wlh.tolist(), + rotation=box.orientation.elements.tolist(), + velocity=box.velocity[:2].tolist(), + detection_name=name, + detection_score=box.score, + attribute_name=attr) + annos.append(nusc_anno) + nusc_annos[sample_token] = annos + nusc_submissions = { + 'meta': self.modality, + 'results': nusc_annos, + } + mmengine.mkdir_or_exist(jsonfile_prefix) + res_path = osp.join(jsonfile_prefix, 'results_nusc.json') + print(f'Results writes to {res_path}') + mmengine.dump(nusc_submissions, res_path) + return res_path + + +def output_to_nusc_box( + detection: dict) -> Tuple[List[NuScenesBox], Union[np.ndarray, None]]: + """Convert the output to the box class in the nuScenes. + + Args: + detection (dict): Detection results. + + - bboxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. + - scores_3d (torch.Tensor): Detection scores. + - labels_3d (torch.Tensor): Predicted box labels. + + Returns: + Tuple[List[:obj:`NuScenesBox`], np.ndarray or None]: List of standard + NuScenesBoxes and attribute labels. + """ + bbox3d = detection['bboxes_3d'] + scores = detection['scores_3d'].numpy() + labels = detection['labels_3d'].numpy() + attrs = None + if 'attr_labels' in detection: + attrs = detection['attr_labels'].numpy() + + box_gravity_center = bbox3d.gravity_center.numpy() + box_dims = bbox3d.dims.numpy() + box_yaw = bbox3d.yaw.numpy() + + box_list = [] + + if isinstance(bbox3d, LiDARInstance3DBoxes): + # our LiDAR coordinate system -> nuScenes box coordinate system + nus_box_dims = box_dims[:, [1, 0, 2]] + for i in range(len(bbox3d)): + quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i]) + velocity = (*bbox3d.tensor[i, 7:9], 0.0) + # velo_val = np.linalg.norm(box3d[i, 7:9]) + # velo_ori = box3d[i, 6] + # velocity = ( + # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0) + box = NuScenesBox( + box_gravity_center[i], + nus_box_dims[i], + quat, + label=labels[i], + score=scores[i], + velocity=velocity) + box_list.append(box) + elif isinstance(bbox3d, CameraInstance3DBoxes): + # our Camera coordinate system -> nuScenes box coordinate system + # convert the dim/rot to nuscbox convention + nus_box_dims = box_dims[:, [2, 0, 1]] + nus_box_yaw = -box_yaw + for i in range(len(bbox3d)): + q1 = pyquaternion.Quaternion( + axis=[0, 0, 1], radians=nus_box_yaw[i]) + q2 = pyquaternion.Quaternion(axis=[1, 0, 0], radians=np.pi / 2) + quat = q2 * q1 + velocity = (bbox3d.tensor[i, 7], 0.0, bbox3d.tensor[i, 8]) + box = NuScenesBox( + box_gravity_center[i], + nus_box_dims[i], + quat, + label=labels[i], + score=scores[i], + velocity=velocity) + box_list.append(box) + else: + raise NotImplementedError( + f'Do not support convert {type(bbox3d)} bboxes ' + 'to standard NuScenesBoxes.') + + return box_list, attrs + + +def lidar_nusc_box_to_global( + info: dict, boxes: List[NuScenesBox], classes: List[str], + eval_configs: DetectionConfig) -> List[NuScenesBox]: + """Convert the box from ego to global coordinate. + + Args: + info (dict): Info for a specific sample data, including the calibration + information. + boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. + classes (List[str]): Mapped classes in the evaluation. + eval_configs (:obj:`DetectionConfig`): Evaluation configuration object. + + Returns: + List[:obj:`DetectionConfig`]: List of standard NuScenesBoxes in the + global coordinate. + """ + box_list = [] + for box in boxes: + # Move box to ego vehicle coord system + lidar2ego = np.array(info['lidar_points']['lidar2ego']) + box.rotate( + pyquaternion.Quaternion(matrix=lidar2ego, rtol=1e-05, atol=1e-07)) + box.translate(lidar2ego[:3, 3]) + # filter det in ego. + cls_range_map = eval_configs.class_range + radius = np.linalg.norm(box.center[:2], 2) + det_range = cls_range_map[classes[box.label]] + if radius > det_range: + continue + # Move box to global coord system + ego2global = np.array(info['ego2global']) + box.rotate( + pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05, atol=1e-07)) + box.translate(ego2global[:3, 3]) + box_list.append(box) + return box_list + + +def cam_nusc_box_to_global( + info: dict, + boxes: List[NuScenesBox], + attrs: np.ndarray, + classes: List[str], + eval_configs: DetectionConfig, + camera_type: str = 'CAM_FRONT', +) -> Tuple[List[NuScenesBox], List[int]]: + """Convert the box from camera to global coordinate. + + Args: + info (dict): Info for a specific sample data, including the calibration + information. + boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. + attrs (np.ndarray): Predicted attributes. + classes (List[str]): Mapped classes in the evaluation. + eval_configs (:obj:`DetectionConfig`): Evaluation configuration object. + camera_type (str): Type of camera. Defaults to 'CAM_FRONT'. + + Returns: + Tuple[List[:obj:`NuScenesBox`], List[int]]: List of standard + NuScenesBoxes in the global coordinate and attribute label. + """ + box_list = [] + attr_list = [] + for (box, attr) in zip(boxes, attrs): + # Move box to ego vehicle coord system + cam2ego = np.array(info['images'][camera_type]['cam2ego']) + box.rotate( + pyquaternion.Quaternion(matrix=cam2ego, rtol=1e-05, atol=1e-07)) + box.translate(cam2ego[:3, 3]) + # filter det in ego. + cls_range_map = eval_configs.class_range + radius = np.linalg.norm(box.center[:2], 2) + det_range = cls_range_map[classes[box.label]] + if radius > det_range: + continue + # Move box to global coord system + ego2global = np.array(info['ego2global']) + box.rotate( + pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05, atol=1e-07)) + box.translate(ego2global[:3, 3]) + box_list.append(box) + attr_list.append(attr) + return box_list, attr_list + + +def global_nusc_box_to_cam(info: dict, boxes: List[NuScenesBox], + classes: List[str], + eval_configs: DetectionConfig) -> List[NuScenesBox]: + """Convert the box from global to camera coordinate. + + Args: + info (dict): Info for a specific sample data, including the calibration + information. + boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. + classes (List[str]): Mapped classes in the evaluation. + eval_configs (:obj:`DetectionConfig`): Evaluation configuration object. + + Returns: + List[:obj:`NuScenesBox`]: List of standard NuScenesBoxes in camera + coordinate. + """ + box_list = [] + for box in boxes: + # Move box to ego vehicle coord system + ego2global = np.array(info['ego2global']) + box.translate(-ego2global[:3, 3]) + box.rotate( + pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05, + atol=1e-07).inverse) + # filter det in ego. + cls_range_map = eval_configs.class_range + radius = np.linalg.norm(box.center[:2], 2) + det_range = cls_range_map[classes[box.label]] + if radius > det_range: + continue + # Move box to camera coord system + cam2ego = np.array(info['images']['CAM_FRONT']['cam2ego']) + box.translate(-cam2ego[:3, 3]) + box.rotate( + pyquaternion.Quaternion(matrix=cam2ego, rtol=1e-05, + atol=1e-07).inverse) + box_list.append(box) + return box_list + + +def nusc_box_to_cam_box3d( + boxes: List[NuScenesBox] +) -> Tuple[CameraInstance3DBoxes, torch.Tensor, torch.Tensor]: + """Convert boxes from :obj:`NuScenesBox` to :obj:`CameraInstance3DBoxes`. + + Args: + boxes (:obj:`List[NuScenesBox]`): List of predicted NuScenesBoxes. + + Returns: + Tuple[:obj:`CameraInstance3DBoxes`, torch.Tensor, torch.Tensor]: + Converted 3D bounding boxes, scores and labels. + """ + locs = torch.Tensor([b.center for b in boxes]).view(-1, 3) + dims = torch.Tensor([b.wlh for b in boxes]).view(-1, 3) + rots = torch.Tensor([b.orientation.yaw_pitch_roll[0] + for b in boxes]).view(-1, 1) + velocity = torch.Tensor([b.velocity[0::2] for b in boxes]).view(-1, 2) + + # convert nusbox to cambox convention + dims[:, [0, 1, 2]] = dims[:, [1, 2, 0]] + rots = -rots + + boxes_3d = torch.cat([locs, dims, rots, velocity], dim=1).cuda() + cam_boxes3d = CameraInstance3DBoxes( + boxes_3d, box_dim=9, origin=(0.5, 0.5, 0.5)) + scores = torch.Tensor([b.score for b in boxes]).cuda() + labels = torch.LongTensor([b.label for b in boxes]).cuda() + nms_scores = scores.new_zeros(scores.shape[0], 10 + 1) + indices = labels.new_tensor(list(range(scores.shape[0]))) + nms_scores[indices, labels] = scores + return cam_boxes3d, nms_scores, labels diff --git a/mmdet3d/evaluation/metrics/panoptic_seg_metric.py b/mmdet3d/evaluation/metrics/panoptic_seg_metric.py new file mode 100755 index 0000000..02e4d09 --- /dev/null +++ b/mmdet3d/evaluation/metrics/panoptic_seg_metric.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Dict, List, Optional + +from mmengine.logging import MMLogger + +from mmdet3d.evaluation import panoptic_seg_eval +from mmdet3d.registry import METRICS +from .seg_metric import SegMetric + + +@METRICS.register_module() +class PanopticSegMetric(SegMetric): + """3D Panoptic segmentation evaluation metric. + + Args: + thing_class_inds (list[int]): Indices of thing classes. + stuff_class_inds (list[int]): Indices of stuff classes. + min_num_points (int): Minimum number of points of an object to be + counted as ground truth in evaluation. + id_offset (int): Offset for instance ids to concat with + semantic labels. + collect_device (str, optional): Device name used for collecting + results from different ranks during distributed training. + Must be 'cpu' or 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Default to None. + pklfile_prefix (str, optional): The prefix of pkl files, including + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default to None. + submission_prefix (str, optional): The prefix of submission data. + If not specified, the submission data will not be generated. + Default to None. + """ + + def __init__(self, + thing_class_inds: List[int], + stuff_class_inds: List[int], + min_num_points: int, + id_offset: int, + collect_device: str = 'cpu', + prefix: Optional[str] = None, + pklfile_prefix: str = None, + submission_prefix: str = None, + **kwargs): + self.thing_class_inds = thing_class_inds + self.stuff_class_inds = stuff_class_inds + self.min_num_points = min_num_points + self.id_offset = id_offset + + super(PanopticSegMetric, self).__init__( + pklfile_prefix=pklfile_prefix, + submission_prefix=submission_prefix, + prefix=prefix, + collect_device=collect_device, + **kwargs) + + # TODO modify format_result for panoptic segmentation evaluation, \ + # different datasets have different needs. + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + if self.submission_prefix: + self.format_results(results) + return None + + label2cat = self.dataset_meta['label2cat'] + ignore_index = self.dataset_meta['ignore_index'] + classes = self.dataset_meta['classes'] + thing_classes = [classes[i] for i in self.thing_class_inds] + stuff_classes = [classes[i] for i in self.stuff_class_inds] + + gt_labels = [] + seg_preds = [] + for eval_ann, sinlge_pred_results in results: + gt_labels.append(eval_ann) + seg_preds.append(sinlge_pred_results) + + ret_dict = panoptic_seg_eval(gt_labels, seg_preds, classes, + thing_classes, stuff_classes, + self.min_num_points, self.id_offset, + label2cat, [ignore_index], logger) + + return ret_dict diff --git a/mmdet3d/evaluation/metrics/seg_metric.py b/mmdet3d/evaluation/metrics/seg_metric.py new file mode 100755 index 0000000..0bd81e7 --- /dev/null +++ b/mmdet3d/evaluation/metrics/seg_metric.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile +from typing import Dict, Optional, Sequence + +import mmcv +import numpy as np +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmdet3d.evaluation import seg_eval +from mmdet3d.registry import METRICS + + +@METRICS.register_module() +class SegMetric(BaseMetric): + """3D semantic segmentation evaluation metric. + + Args: + collect_device (str, optional): Device name used for collecting + results from different ranks during distributed training. + Must be 'cpu' or 'gpu'. Defaults to 'cpu'. + prefix (str): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Default: None. + pklfile_prefix (str, optional): The prefix of pkl files, including + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + submission_prefix (str, optional): The prefix of submission data. + If not specified, the submission data will not be generated. + Default: None. + """ + + def __init__(self, + collect_device: str = 'cpu', + prefix: Optional[str] = None, + pklfile_prefix: str = None, + submission_prefix: str = None, + **kwargs): + self.pklfile_prefix = pklfile_prefix + self.submission_prefix = submission_prefix + super(SegMetric, self).__init__( + prefix=prefix, collect_device=collect_device) + + def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. + + The processed results should be stored in ``self.results``, + which will be used to compute the metrics when all batches + have been processed. + + Args: + data_batch (dict): A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from + the model. + """ + for data_sample in data_samples: + pred_3d = data_sample['pred_pts_seg'] + eval_ann_info = data_sample['eval_ann_info'] + cpu_pred_3d = dict() + for k, v in pred_3d.items(): + if hasattr(v, 'to'): + cpu_pred_3d[k] = v.to('cpu').numpy() + else: + cpu_pred_3d[k] = v + self.results.append((eval_ann_info, cpu_pred_3d)) + + def format_results(self, results): + r"""Format the results to txt file. Refer to `ScanNet documentation + `_. + + Args: + outputs (list[dict]): Testing results of the dataset. + + Returns: + tuple: (outputs, tmp_dir), outputs is the detection results, + tmp_dir is the temporal directory created for saving submission + files when ``submission_prefix`` is not specified. + """ + + submission_prefix = self.submission_prefix + if submission_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + submission_prefix = osp.join(tmp_dir.name, 'results') + mmcv.mkdir_or_exist(submission_prefix) + ignore_index = self.dataset_meta['ignore_index'] + # need to map network output to original label idx + cat2label = np.zeros(len(self.dataset_meta['label2cat'])).astype( + np.int64) + for original_label, output_idx in self.dataset_meta['label2cat'].items( + ): + if output_idx != ignore_index: + cat2label[output_idx] = original_label + + for i, (eval_ann, result) in enumerate(results): + sample_idx = eval_ann['point_cloud']['lidar_idx'] + pred_sem_mask = result['semantic_mask'].numpy().astype(np.int64) + pred_label = cat2label[pred_sem_mask] + curr_file = f'{submission_prefix}/{sample_idx}.txt' + np.savetxt(curr_file, pred_label, fmt='%d') + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + + if self.submission_prefix: + self.format_results(results) + return None + + label2cat = self.dataset_meta['label2cat'] + ignore_index = self.dataset_meta['ignore_index'] + + gt_semantic_masks = [] + pred_semantic_masks = [] + + for eval_ann, sinlge_pred_results in results: + gt_semantic_masks.append(eval_ann['pts_semantic_mask']) + pred_semantic_masks.append( + sinlge_pred_results['pts_semantic_mask']) + + ret_dict = seg_eval( + gt_semantic_masks, + pred_semantic_masks, + label2cat, + ignore_index, + logger=logger) + + return ret_dict diff --git a/mmdet3d/evaluation/metrics/waymo_metric.py b/mmdet3d/evaluation/metrics/waymo_metric.py new file mode 100755 index 0000000..7b96167 --- /dev/null +++ b/mmdet3d/evaluation/metrics/waymo_metric.py @@ -0,0 +1,710 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import tempfile +from os import path as osp +from typing import Dict, List, Optional, Tuple, Union + +import mmengine +import numpy as np +import torch +from mmengine import Config, load +from mmengine.logging import MMLogger, print_log + +from mmdet3d.models.layers import box3d_multiclass_nms +from mmdet3d.registry import METRICS +from mmdet3d.structures import (Box3DMode, CameraInstance3DBoxes, + LiDARInstance3DBoxes, bbox3d2result, + points_cam2img, xywhr2xyxyr) +from .kitti_metric import KittiMetric + + +@METRICS.register_module() +class WaymoMetric(KittiMetric): + """Waymo evaluation metric. + + Args: + ann_file (str): The path of the annotation file in kitti format. + waymo_bin_file (str): The path of the annotation file in waymo format. + data_root (str): Path of dataset root. Used for storing waymo + evaluation programs. + split (str): The split of the evaluation set. Defaults to 'training'. + metric (str or List[str]): Metrics to be evaluated. Defaults to 'mAP'. + pcd_limit_range (List[float]): The range of point cloud used to filter + invalid predicted boxes. Defaults to [-85, -85, -5, 85, 85, 5]. + convert_kitti_format (bool): Whether to convert the results to kitti + format. Now, in order to be compatible with camera-based methods, + defaults to True. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix will + be used instead. Defaults to None. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result to a + specific format and submit it to the test server. + Defaults to False. + pklfile_prefix (str, optional): The prefix of pkl files, including the + file path and the prefix of filename, e.g., "a/b/prefix". If not + specified, a temp file will be created. Defaults to None. + submission_prefix (str, optional): The prefix of submission data. If + not specified, the submission data will not be generated. + Defaults to None. + load_type (str): Type of loading mode during training. + + - 'frame_based': Load all of the instances in the frame. + - 'mv_image_based': Load all of the instances in the frame and need + to convert to the FOV-based data type to support image-based + detector. + - 'fov_image_based': Only load the instances inside the default cam + and need to convert to the FOV-based data type to support image- + based detector. + default_cam_key (str): The default camera for lidar to camera + conversion. By default, KITTI: 'CAM2', Waymo: 'CAM_FRONT'. + Defaults to 'CAM_FRONT'. + use_pred_sample_idx (bool): In formating results, use the sample index + from the prediction or from the load annotations. By default, + KITTI: True, Waymo: False, Waymo has a conversion process, which + needs to use the sample idx from load annotation. + Defaults to False. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + idx2metainfo (str, optional): The file path of the metainfo in waymo. + It stores the mapping from sample_idx to metainfo. The metainfo + must contain the keys: 'idx2contextname' and 'idx2timestamp'. + Defaults to None. + """ + num_cams = 5 + + def __init__(self, + ann_file: str, + waymo_bin_file: str, + data_root: str, + split: str = 'training', + metric: Union[str, List[str]] = 'mAP', + pcd_limit_range: List[float] = [-85, -85, -5, 85, 85, 5], + convert_kitti_format: bool = True, + prefix: Optional[str] = None, + format_only: bool = False, + pklfile_prefix: Optional[str] = None, + submission_prefix: Optional[str] = None, + load_type: str = 'frame_based', + default_cam_key: str = 'CAM_FRONT', + use_pred_sample_idx: bool = False, + collect_device: str = 'cpu', + backend_args: Optional[dict] = None, + idx2metainfo: Optional[str] = None) -> None: + self.waymo_bin_file = waymo_bin_file + self.data_root = data_root + self.split = split + self.load_type = load_type + self.use_pred_sample_idx = use_pred_sample_idx + self.convert_kitti_format = convert_kitti_format + + if idx2metainfo is not None: + self.idx2metainfo = mmengine.load(idx2metainfo) + else: + self.idx2metainfo = None + + super(WaymoMetric, self).__init__( + ann_file=ann_file, + metric=metric, + pcd_limit_range=pcd_limit_range, + prefix=prefix, + pklfile_prefix=pklfile_prefix, + submission_prefix=submission_prefix, + default_cam_key=default_cam_key, + collect_device=collect_device, + backend_args=backend_args) + self.format_only = format_only + if self.format_only: + assert pklfile_prefix is not None, 'pklfile_prefix must be not ' + 'None when format_only is True, otherwise the result files will ' + 'be saved to a temp directory which will be cleaned up at the end.' + + self.default_prefix = 'Waymo metric' + + def compute_metrics(self, results: List[dict]) -> Dict[str, float]: + """Compute the metrics from processed results. + + Args: + results (List[dict]): The processed results of the whole dataset. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() + self.classes = self.dataset_meta['classes'] + + # load annotations + self.data_infos = load(self.ann_file)['data_list'] + assert len(results) == len(self.data_infos), \ + 'invalid list length of network outputs' + # different from kitti, waymo do not need to convert the ann file + # handle the mv_image_based load_mode + if self.load_type == 'mv_image_based': + new_data_infos = [] + for info in self.data_infos: + height = info['images'][self.default_cam_key]['height'] + width = info['images'][self.default_cam_key]['width'] + for (cam_key, img_info) in info['images'].items(): + camera_info = dict() + camera_info['images'] = dict() + camera_info['images'][cam_key] = img_info + # TODO remove the check by updating the data info; + if 'height' not in img_info: + img_info['height'] = height + img_info['width'] = width + if 'cam_instances' in info \ + and cam_key in info['cam_instances']: + camera_info['instances'] = info['cam_instances'][ + cam_key] + else: + camera_info['instances'] = [] + camera_info['ego2global'] = info['ego2global'] + if 'image_sweeps' in info: + camera_info['image_sweeps'] = info['image_sweeps'] + + # TODO check if need to modify the sample idx + # TODO check when will use it except for evaluation. + camera_info['sample_idx'] = info['sample_idx'] + new_data_infos.append(camera_info) + self.data_infos = new_data_infos + + if self.pklfile_prefix is None: + eval_tmp_dir = tempfile.TemporaryDirectory() + pklfile_prefix = osp.join(eval_tmp_dir.name, 'results') + else: + eval_tmp_dir = None + pklfile_prefix = self.pklfile_prefix + + result_dict, tmp_dir = self.format_results( + results, + pklfile_prefix=pklfile_prefix, + submission_prefix=self.submission_prefix, + classes=self.classes) + + metric_dict = {} + + if self.format_only: + logger.info('results are saved in ' + f'{osp.dirname(self.pklfile_prefix)}') + return metric_dict + + for metric in self.metrics: + ap_dict = self.waymo_evaluate( + pklfile_prefix, metric=metric, logger=logger) + metric_dict.update(ap_dict) + if eval_tmp_dir is not None: + eval_tmp_dir.cleanup() + + if tmp_dir is not None: + tmp_dir.cleanup() + return metric_dict + + def waymo_evaluate(self, + pklfile_prefix: str, + metric: Optional[str] = None, + logger: Optional[MMLogger] = None) -> Dict[str, float]: + """Evaluation in Waymo protocol. + + Args: + pklfile_prefix (str): The location that stored the prediction + results. + metric (str, optional): Metric to be evaluated. Defaults to None. + logger (MMLogger, optional): Logger used for printing related + information during evaluation. Defaults to None. + + Returns: + Dict[str, float]: Results of each evaluation metric. + """ + + import subprocess + + if metric == 'mAP': + eval_str = 'mmdet3d/evaluation/functional/waymo_utils/' + \ + f'compute_detection_metrics_main {pklfile_prefix}.bin ' + \ + f'{self.waymo_bin_file}' + print(eval_str) + ret_bytes = subprocess.check_output(eval_str, shell=True) + ret_texts = ret_bytes.decode('utf-8') + print_log(ret_texts, logger=logger) + + ap_dict = { + 'Vehicle/L1 mAP': 0, + 'Vehicle/L1 mAPH': 0, + 'Vehicle/L2 mAP': 0, + 'Vehicle/L2 mAPH': 0, + 'Pedestrian/L1 mAP': 0, + 'Pedestrian/L1 mAPH': 0, + 'Pedestrian/L2 mAP': 0, + 'Pedestrian/L2 mAPH': 0, + 'Sign/L1 mAP': 0, + 'Sign/L1 mAPH': 0, + 'Sign/L2 mAP': 0, + 'Sign/L2 mAPH': 0, + 'Cyclist/L1 mAP': 0, + 'Cyclist/L1 mAPH': 0, + 'Cyclist/L2 mAP': 0, + 'Cyclist/L2 mAPH': 0, + 'Overall/L1 mAP': 0, + 'Overall/L1 mAPH': 0, + 'Overall/L2 mAP': 0, + 'Overall/L2 mAPH': 0 + } + mAP_splits = ret_texts.split('mAP ') + mAPH_splits = ret_texts.split('mAPH ') + for idx, key in enumerate(ap_dict.keys()): + split_idx = int(idx / 2) + 1 + if idx % 2 == 0: # mAP + ap_dict[key] = float(mAP_splits[split_idx].split(']')[0]) + else: # mAPH + ap_dict[key] = float(mAPH_splits[split_idx].split(']')[0]) + ap_dict['Overall/L1 mAP'] = \ + (ap_dict['Vehicle/L1 mAP'] + ap_dict['Pedestrian/L1 mAP'] + + ap_dict['Cyclist/L1 mAP']) / 3 + ap_dict['Overall/L1 mAPH'] = \ + (ap_dict['Vehicle/L1 mAPH'] + ap_dict['Pedestrian/L1 mAPH'] + + ap_dict['Cyclist/L1 mAPH']) / 3 + ap_dict['Overall/L2 mAP'] = \ + (ap_dict['Vehicle/L2 mAP'] + ap_dict['Pedestrian/L2 mAP'] + + ap_dict['Cyclist/L2 mAP']) / 3 + ap_dict['Overall/L2 mAPH'] = \ + (ap_dict['Vehicle/L2 mAPH'] + ap_dict['Pedestrian/L2 mAPH'] + + ap_dict['Cyclist/L2 mAPH']) / 3 + elif metric == 'LET_mAP': + eval_str = 'mmdet3d/evaluation/functional/waymo_utils/' + \ + f'compute_detection_let_metrics_main {pklfile_prefix}.bin ' + \ + f'{self.waymo_bin_file}' + + print(eval_str) + ret_bytes = subprocess.check_output(eval_str, shell=True) + ret_texts = ret_bytes.decode('utf-8') + + print_log(ret_texts, logger=logger) + ap_dict = { + 'Vehicle mAPL': 0, + 'Vehicle mAP': 0, + 'Vehicle mAPH': 0, + 'Pedestrian mAPL': 0, + 'Pedestrian mAP': 0, + 'Pedestrian mAPH': 0, + 'Sign mAPL': 0, + 'Sign mAP': 0, + 'Sign mAPH': 0, + 'Cyclist mAPL': 0, + 'Cyclist mAP': 0, + 'Cyclist mAPH': 0, + 'Overall mAPL': 0, + 'Overall mAP': 0, + 'Overall mAPH': 0 + } + mAPL_splits = ret_texts.split('mAPL ') + mAP_splits = ret_texts.split('mAP ') + mAPH_splits = ret_texts.split('mAPH ') + for idx, key in enumerate(ap_dict.keys()): + split_idx = int(idx / 3) + 1 + if idx % 3 == 0: # mAPL + ap_dict[key] = float(mAPL_splits[split_idx].split(']')[0]) + elif idx % 3 == 1: # mAP + ap_dict[key] = float(mAP_splits[split_idx].split(']')[0]) + else: # mAPH + ap_dict[key] = float(mAPH_splits[split_idx].split(']')[0]) + ap_dict['Overall mAPL'] = \ + (ap_dict['Vehicle mAPL'] + ap_dict['Pedestrian mAPL'] + + ap_dict['Cyclist mAPL']) / 3 + ap_dict['Overall mAP'] = \ + (ap_dict['Vehicle mAP'] + ap_dict['Pedestrian mAP'] + + ap_dict['Cyclist mAP']) / 3 + ap_dict['Overall mAPH'] = \ + (ap_dict['Vehicle mAPH'] + ap_dict['Pedestrian mAPH'] + + ap_dict['Cyclist mAPH']) / 3 + return ap_dict + + def format_results( + self, + results: List[dict], + pklfile_prefix: Optional[str] = None, + submission_prefix: Optional[str] = None, + classes: Optional[List[str]] = None + ) -> Tuple[dict, Union[tempfile.TemporaryDirectory, None]]: + """Format the results to bin file. + + Args: + results (List[dict]): Testing results of the dataset. + pklfile_prefix (str, optional): The prefix of pkl files. It + includes the file path and the prefix of filename, e.g., + "a/b/prefix". If not specified, a temp file will be created. + Defaults to None. + submission_prefix (str, optional): The prefix of submitted files. + It includes the file path and the prefix of filename, e.g., + "a/b/prefix". If not specified, a temp file will be created. + Defaults to None. + classes (List[str], optional): A list of class name. + Defaults to None. + + Returns: + tuple: (result_dict, tmp_dir), result_dict is a dict containing the + formatted result, tmp_dir is the temporal directory created for + saving json files when jsonfile_prefix is not specified. + """ + waymo_save_tmp_dir = tempfile.TemporaryDirectory() + waymo_results_save_dir = waymo_save_tmp_dir.name + waymo_results_final_path = f'{pklfile_prefix}.bin' + + if self.convert_kitti_format: + results_kitti_format, tmp_dir = super().format_results( + results, pklfile_prefix, submission_prefix, classes) + final_results = results_kitti_format['pred_instances_3d'] + else: + final_results = results + for i, res in enumerate(final_results): + # Actually, `sample_idx` here is the filename without suffix. + # It's for identitying the sample in formating. + res['sample_idx'] = self.data_infos[i]['sample_idx'] + res['pred_instances_3d']['bboxes_3d'].limit_yaw( + offset=0.5, period=np.pi * 2) + + waymo_root = self.data_root + if self.split == 'training': + waymo_tfrecords_dir = osp.join(waymo_root, 'validation') + prefix = '1' + elif self.split == 'testing': + waymo_tfrecords_dir = osp.join(waymo_root, 'testing') + prefix = '2' + else: + raise ValueError('Not supported split value.') + + from ..functional.waymo_utils.prediction_to_waymo import \ + Prediction2Waymo + converter = Prediction2Waymo( + final_results, + waymo_tfrecords_dir, + waymo_results_save_dir, + waymo_results_final_path, + prefix, + classes, + backend_args=self.backend_args, + from_kitti_format=self.convert_kitti_format, + idx2metainfo=self.idx2metainfo) + converter.convert() + waymo_save_tmp_dir.cleanup() + + return final_results, waymo_save_tmp_dir + + def merge_multi_view_boxes(self, box_dict_per_frame: List[dict], + cam0_info: dict) -> dict: + """Merge bounding boxes predicted from multi-view images. + + Args: + box_dict_per_frame (List[dict]): The results of prediction for each + camera. + cam0_info (dict): Store the sample idx for the given frame. + + Returns: + dict: Merged results. + """ + box_dict = dict() + # convert list[dict] to dict[list] + for key in box_dict_per_frame[0].keys(): + box_dict[key] = list() + for cam_idx in range(self.num_cams): + box_dict[key].append(box_dict_per_frame[cam_idx][key]) + # merge each elements + box_dict['sample_idx'] = cam0_info['image_id'] + for key in ['bbox', 'box3d_lidar', 'scores', 'label_preds']: + box_dict[key] = np.concatenate(box_dict[key]) + + # apply nms to box3d_lidar (box3d_camera are in different systems) + # TODO: move this global setting into config + nms_cfg = dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=500, + nms_thr=0.05, + score_thr=0.001, + min_bbox_size=0, + max_per_frame=100) + nms_cfg = Config(nms_cfg) + lidar_boxes3d = LiDARInstance3DBoxes( + torch.from_numpy(box_dict['box3d_lidar']).cuda()) + scores = torch.from_numpy(box_dict['scores']).cuda() + labels = torch.from_numpy(box_dict['label_preds']).long().cuda() + nms_scores = scores.new_zeros(scores.shape[0], len(self.classes) + 1) + indices = labels.new_tensor(list(range(scores.shape[0]))) + nms_scores[indices, labels] = scores + lidar_boxes3d_for_nms = xywhr2xyxyr(lidar_boxes3d.bev) + boxes3d = lidar_boxes3d.tensor + # generate attr scores from attr labels + boxes3d, scores, labels = box3d_multiclass_nms( + boxes3d, lidar_boxes3d_for_nms, nms_scores, nms_cfg.score_thr, + nms_cfg.max_per_frame, nms_cfg) + lidar_boxes3d = LiDARInstance3DBoxes(boxes3d) + det = bbox3d2result(lidar_boxes3d, scores, labels) + box_preds_lidar = det['bboxes_3d'] + scores = det['scores_3d'] + labels = det['labels_3d'] + # box_preds_camera is in the cam0 system + lidar2cam = cam0_info['images'][self.default_cam_key]['lidar2img'] + lidar2cam = np.array(lidar2cam).astype(np.float32) + box_preds_camera = box_preds_lidar.convert_to( + Box3DMode.CAM, lidar2cam, correct_yaw=True) + # Note: bbox is meaningless in final evaluation, set to 0 + merged_box_dict = dict( + bbox=np.zeros([box_preds_lidar.tensor.shape[0], 4]), + box3d_camera=box_preds_camera.tensor.numpy(), + box3d_lidar=box_preds_lidar.tensor.numpy(), + scores=scores.numpy(), + label_preds=labels.numpy(), + sample_idx=box_dict['sample_idx'], + ) + return merged_box_dict + + def bbox2result_kitti( + self, + net_outputs: List[dict], + sample_idx_list: List[int], + class_names: List[str], + pklfile_prefix: Optional[str] = None, + submission_prefix: Optional[str] = None) -> List[dict]: + """Convert 3D detection results to kitti format for evaluation and test + submission. + + Args: + net_outputs (List[dict]): List of dict storing the inferenced + bounding boxes and scores. + sample_idx_list (List[int]): List of input sample idx. + class_names (List[str]): A list of class names. + pklfile_prefix (str, optional): The prefix of pkl file. + Defaults to None. + submission_prefix (str, optional): The prefix of submission file. + Defaults to None. + + Returns: + List[dict]: A list of dictionaries with the kitti format. + """ + if submission_prefix is not None: + mmengine.mkdir_or_exist(submission_prefix) + + det_annos = [] + print('\nConverting prediction to KITTI format') + for idx, pred_dicts in enumerate( + mmengine.track_iter_progress(net_outputs)): + sample_idx = sample_idx_list[idx] + info = self.data_infos[sample_idx] + + if self.load_type == 'mv_image_based': + if idx % self.num_cams == 0: + box_dict_per_frame = [] + cam0_key = list(info['images'].keys())[0] + cam0_info = info + # Here in mono3d, we use the 'CAM_FRONT' "the first + # index in the camera" as the default image shape. + # If you want to another camera, please modify it. + image_shape = (info['images'][cam0_key]['height'], + info['images'][cam0_key]['width']) + box_dict = self.convert_valid_bboxes(pred_dicts, info) + else: + box_dict = self.convert_valid_bboxes(pred_dicts, info) + # Here default used 'CAM_FRONT' to compute metric. + # If you want to use another camera, please modify it. + image_shape = (info['images'][self.default_cam_key]['height'], + info['images'][self.default_cam_key]['width']) + if self.load_type == 'mv_image_based': + box_dict_per_frame.append(box_dict) + if (idx + 1) % self.num_cams != 0: + continue + box_dict = self.merge_multi_view_boxes(box_dict_per_frame, + cam0_info) + + anno = { + 'name': [], + 'truncated': [], + 'occluded': [], + 'alpha': [], + 'bbox': [], + 'dimensions': [], + 'location': [], + 'rotation_y': [], + 'score': [] + } + if len(box_dict['bbox']) > 0: + box_2d_preds = box_dict['bbox'] + box_preds = box_dict['box3d_camera'] + scores = box_dict['scores'] + box_preds_lidar = box_dict['box3d_lidar'] + label_preds = box_dict['label_preds'] + + for box, box_lidar, bbox, score, label in zip( + box_preds, box_preds_lidar, box_2d_preds, scores, + label_preds): + bbox[2:] = np.minimum(bbox[2:], image_shape[::-1]) + bbox[:2] = np.maximum(bbox[:2], [0, 0]) + anno['name'].append(class_names[int(label)]) + anno['truncated'].append(0.0) + anno['occluded'].append(0) + anno['alpha'].append( + -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6]) + anno['bbox'].append(bbox) + anno['dimensions'].append(box[3:6]) + anno['location'].append(box[:3]) + anno['rotation_y'].append(box[6]) + anno['score'].append(score) + + anno = {k: np.stack(v) for k, v in anno.items()} + else: + anno = { + 'name': np.array([]), + 'truncated': np.array([]), + 'occluded': np.array([]), + 'alpha': np.array([]), + 'bbox': np.zeros([0, 4]), + 'dimensions': np.zeros([0, 3]), + 'location': np.zeros([0, 3]), + 'rotation_y': np.array([]), + 'score': np.array([]), + } + + if submission_prefix is not None: + curr_file = f'{submission_prefix}/{sample_idx:06d}.txt' + with open(curr_file, 'w') as f: + bbox = anno['bbox'] + loc = anno['location'] + dims = anno['dimensions'] # lhw -> hwl + + for idx in range(len(bbox)): + print( + '{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} ' + '{:.4f} {:.4f} {:.4f} ' + '{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format( + anno['name'][idx], anno['alpha'][idx], + bbox[idx][0], bbox[idx][1], bbox[idx][2], + bbox[idx][3], dims[idx][1], dims[idx][2], + dims[idx][0], loc[idx][0], loc[idx][1], + loc[idx][2], anno['rotation_y'][idx], + anno['score'][idx]), + file=f) + if self.use_pred_sample_idx: + save_sample_idx = sample_idx + else: + # use the sample idx in the info file + # In waymo validation sample_idx in prediction is 000xxx + # but in info file it is 1000xxx + save_sample_idx = box_dict['sample_idx'] + anno['sample_idx'] = np.array( + [save_sample_idx] * len(anno['score']), dtype=np.int64) + + det_annos.append(anno) + + if pklfile_prefix is not None: + if not pklfile_prefix.endswith(('.pkl', '.pickle')): + out = f'{pklfile_prefix}.pkl' + else: + out = pklfile_prefix + mmengine.dump(det_annos, out) + print(f'Result is saved to {out}.') + + return det_annos + + def convert_valid_bboxes(self, box_dict: dict, info: dict) -> dict: + """Convert the predicted boxes into valid ones. Should handle the + load_model (frame_based, mv_image_based, fov_image_based), separately. + + Args: + box_dict (dict): Box dictionaries to be converted. + + - bboxes_3d (:obj:`BaseInstance3DBoxes`): 3D bounding boxes. + - scores_3d (Tensor): Scores of boxes. + - labels_3d (Tensor): Class labels of boxes. + info (dict): Data info. + + Returns: + dict: Valid predicted boxes. + + - bbox (np.ndarray): 2D bounding boxes. + - box3d_camera (np.ndarray): 3D bounding boxes in camera + coordinate. + - box3d_lidar (np.ndarray): 3D bounding boxes in LiDAR coordinate. + - scores (np.ndarray): Scores of boxes. + - label_preds (np.ndarray): Class label predictions. + - sample_idx (int): Sample index. + """ + # TODO: refactor this function + box_preds = box_dict['bboxes_3d'] + scores = box_dict['scores_3d'] + labels = box_dict['labels_3d'] + sample_idx = info['sample_idx'] + box_preds.limit_yaw(offset=0.5, period=np.pi * 2) + + if len(box_preds) == 0: + return dict( + bbox=np.zeros([0, 4]), + box3d_camera=np.zeros([0, 7]), + box3d_lidar=np.zeros([0, 7]), + scores=np.zeros([0]), + label_preds=np.zeros([0, 4]), + sample_idx=sample_idx) + # Here default used 'CAM_FRONT' to compute metric. If you want to + # use another camera, please modify it. + if self.load_type in ['frame_based', 'fov_image_based']: + cam_key = self.default_cam_key + elif self.load_type == 'mv_image_based': + cam_key = list(info['images'].keys())[0] + else: + raise NotImplementedError + + lidar2cam = np.array(info['images'][cam_key]['lidar2cam']).astype( + np.float32) + P2 = np.array(info['images'][cam_key]['cam2img']).astype(np.float32) + img_shape = (info['images'][cam_key]['height'], + info['images'][cam_key]['width']) + P2 = box_preds.tensor.new_tensor(P2) + + if isinstance(box_preds, LiDARInstance3DBoxes): + box_preds_camera = box_preds.convert_to(Box3DMode.CAM, lidar2cam) + box_preds_lidar = box_preds + elif isinstance(box_preds, CameraInstance3DBoxes): + box_preds_camera = box_preds + box_preds_lidar = box_preds.convert_to(Box3DMode.LIDAR, + np.linalg.inv(lidar2cam)) + + box_corners = box_preds_camera.corners + box_corners_in_image = points_cam2img(box_corners, P2) + # box_corners_in_image: [N, 8, 2] + minxy = torch.min(box_corners_in_image, dim=1)[0] + maxxy = torch.max(box_corners_in_image, dim=1)[0] + box_2d_preds = torch.cat([minxy, maxxy], dim=1) + # Post-processing + # check box_preds_camera + image_shape = box_preds.tensor.new_tensor(img_shape) + valid_cam_inds = ((box_2d_preds[:, 0] < image_shape[1]) & + (box_2d_preds[:, 1] < image_shape[0]) & + (box_2d_preds[:, 2] > 0) & (box_2d_preds[:, 3] > 0)) + # check box_preds_lidar + if self.load_type in ['frame_based']: + limit_range = box_preds.tensor.new_tensor(self.pcd_limit_range) + valid_pcd_inds = ((box_preds_lidar.center > limit_range[:3]) & + (box_preds_lidar.center < limit_range[3:])) + valid_inds = valid_pcd_inds.all(-1) + elif self.load_type in ['mv_image_based', 'fov_image_based']: + valid_inds = valid_cam_inds + + if valid_inds.sum() > 0: + return dict( + bbox=box_2d_preds[valid_inds, :].numpy(), + pred_box_type_3d=type(box_preds), + box3d_camera=box_preds_camera[valid_inds].tensor.numpy(), + box3d_lidar=box_preds_lidar[valid_inds].tensor.numpy(), + scores=scores[valid_inds].numpy(), + label_preds=labels[valid_inds].numpy(), + sample_idx=sample_idx) + else: + return dict( + bbox=np.zeros([0, 4]), + pred_box_type_3d=type(box_preds), + box3d_camera=np.zeros([0, 7]), + box3d_lidar=np.zeros([0, 7]), + scores=np.zeros([0]), + label_preds=np.zeros([0]), + sample_idx=sample_idx) diff --git a/mmdet3d/models/__init__.py b/mmdet3d/models/__init__.py new file mode 100755 index 0000000..56ab280 --- /dev/null +++ b/mmdet3d/models/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet3d.models.layers.fusion_layers import * # noqa: F401,F403 +from .backbones import * # noqa: F401,F403 +from .data_preprocessors import * # noqa: F401,F403 +from .decode_heads import * # noqa: F401,F403 +from .dense_heads import * # noqa: F401,F403 +from .detectors import * # noqa: F401,F403 +from .layers import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .middle_encoders import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 +from .roi_heads import * # noqa: F401,F403 +from .segmentors import * # noqa: F401,F403 +from .test_time_augs import * # noqa: F401,F403 +from .utils import * # noqa: F401,F403 +from .voxel_encoders import * # noqa: F401,F403 +from .language_models import * # noqa: F401,F403 diff --git a/mmdet3d/models/backbones/__init__.py b/mmdet3d/models/backbones/__init__.py new file mode 100755 index 0000000..5d64fea --- /dev/null +++ b/mmdet3d/models/backbones/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.backbones import SSDVGG, HRNet, ResNet, ResNetV1d, ResNeXt + +from .cylinder3d import Asymm3DSpconv +from .dgcnn import DGCNNBackbone +from .dla import DLANet +from .mink_resnet import MinkResNet +from .minkunet_backbone import MinkUNetBackbone +from .multi_backbone import MultiBackbone +from .nostem_regnet import NoStemRegNet +from .pointnet2_sa_msg import PointNet2SAMSG +from .pointnet2_sa_ssg import PointNet2SASSG +from .second import SECOND +from .spvcnn_backone import SPVCNNBackbone + +__all__ = [ + 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'NoStemRegNet', + 'SECOND', 'DGCNNBackbone', 'PointNet2SASSG', 'PointNet2SAMSG', + 'MultiBackbone', 'DLANet', 'MinkResNet', 'Asymm3DSpconv', + 'MinkUNetBackbone', 'SPVCNNBackbone' +] diff --git a/mmdet3d/models/backbones/base_pointnet.py b/mmdet3d/models/backbones/base_pointnet.py new file mode 100755 index 0000000..7cfc2b2 --- /dev/null +++ b/mmdet3d/models/backbones/base_pointnet.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from abc import ABCMeta + +from mmengine.model import BaseModule + + +class BasePointNet(BaseModule, metaclass=ABCMeta): + """Base class for PointNet.""" + + def __init__(self, init_cfg=None, pretrained=None): + super(BasePointNet, self).__init__(init_cfg) + self.fp16_enabled = False + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + + @staticmethod + def _split_point_feats(points): + """Split coordinates and features of input points. + + Args: + points (torch.Tensor): Point coordinates with features, + with shape (B, N, 3 + input_feature_dim). + + Returns: + torch.Tensor: Coordinates of input points. + torch.Tensor: Features of input points. + """ + xyz = points[..., 0:3].contiguous() + if points.size(-1) > 3: + features = points[..., 3:].transpose(1, 2).contiguous() + else: + features = None + + return xyz, features diff --git a/mmdet3d/models/backbones/cylinder3d.py b/mmdet3d/models/backbones/cylinder3d.py new file mode 100755 index 0000000..7736542 --- /dev/null +++ b/mmdet3d/models/backbones/cylinder3d.py @@ -0,0 +1,480 @@ +# Copyright (c) OpenMMLab. All rights reserved. +r"""Modified from Cylinder3D. + +Please refer to `Cylinder3D github page +`_ for details +""" + +from typing import List, Optional + +import numpy as np +import torch +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmcv.ops import (SparseConv3d, SparseConvTensor, SparseInverseConv3d, + SubMConv3d) +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType + + +class AsymmResBlock(BaseModule): + """Asymmetrical Residual Block. + + Args: + in_channels (int): Input channels of the block. + out_channels (int): Output channels of the block. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for + normalization layer. + act_cfg (:obj:`ConfigDict` or dict): Config dict of activation layers. + Defaults to dict(type='LeakyReLU'). + indice_key (str, optional): Name of indice tables. Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + norm_cfg: ConfigType, + act_cfg: ConfigType = dict(type='LeakyReLU'), + indice_key: Optional[str] = None): + super().__init__() + + self.conv0_0 = SubMConv3d( + in_channels, + out_channels, + kernel_size=(1, 3, 3), + padding=1, + bias=False, + indice_key=indice_key + 'bef') + self.act0_0 = build_activation_layer(act_cfg) + self.bn0_0 = build_norm_layer(norm_cfg, out_channels)[1] + + self.conv0_1 = SubMConv3d( + out_channels, + out_channels, + kernel_size=(3, 1, 3), + padding=1, + bias=False, + indice_key=indice_key + 'bef') + self.act0_1 = build_activation_layer(act_cfg) + self.bn0_1 = build_norm_layer(norm_cfg, out_channels)[1] + + self.conv1_0 = SubMConv3d( + in_channels, + out_channels, + kernel_size=(3, 1, 3), + padding=1, + bias=False, + indice_key=indice_key + 'bef') + self.act1_0 = build_activation_layer(act_cfg) + self.bn1_0 = build_norm_layer(norm_cfg, out_channels)[1] + + self.conv1_1 = SubMConv3d( + out_channels, + out_channels, + kernel_size=(1, 3, 3), + padding=1, + bias=False, + indice_key=indice_key + 'bef') + self.act1_1 = build_activation_layer(act_cfg) + self.bn1_1 = build_norm_layer(norm_cfg, out_channels)[1] + + def forward(self, x: SparseConvTensor) -> SparseConvTensor: + """Forward pass.""" + shortcut = self.conv0_0(x) + + shortcut.features = self.act0_0(shortcut.features) + shortcut.features = self.bn0_0(shortcut.features) + + shortcut = self.conv0_1(shortcut) + shortcut.features = self.act0_1(shortcut.features) + shortcut.features = self.bn0_1(shortcut.features) + + res = self.conv1_0(x) + res.features = self.act1_0(res.features) + res.features = self.bn1_0(res.features) + + res = self.conv1_1(res) + res.features = self.act1_1(res.features) + res.features = self.bn1_1(res.features) + + res.features = res.features + shortcut.features + + return res + + +class AsymmeDownBlock(BaseModule): + """Asymmetrical DownSample Block. + + Args: + in_channels (int): Input channels of the block. + out_channels (int): Output channels of the block. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for + normalization layer. + act_cfg (:obj:`ConfigDict` or dict): Config dict of activation layers. + Defaults to dict(type='LeakyReLU'). + pooling (bool): Whether pooling features at the end of + block. Defaults: True. + height_pooling (bool): Whether pooling features at + the height dimension. Defaults: False. + indice_key (str, optional): Name of indice tables. Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + norm_cfg: ConfigType, + act_cfg: ConfigType = dict(type='LeakyReLU'), + pooling: bool = True, + height_pooling: bool = False, + indice_key: Optional[str] = None): + super().__init__() + self.pooling = pooling + + self.conv0_0 = SubMConv3d( + in_channels, + out_channels, + kernel_size=(3, 1, 3), + padding=1, + bias=False, + indice_key=indice_key + 'bef') + self.act0_0 = build_activation_layer(act_cfg) + self.bn0_0 = build_norm_layer(norm_cfg, out_channels)[1] + + self.conv0_1 = SubMConv3d( + out_channels, + out_channels, + kernel_size=(1, 3, 3), + padding=1, + bias=False, + indice_key=indice_key + 'bef') + self.act0_1 = build_activation_layer(act_cfg) + self.bn0_1 = build_norm_layer(norm_cfg, out_channels)[1] + + self.conv1_0 = SubMConv3d( + in_channels, + out_channels, + kernel_size=(1, 3, 3), + padding=1, + bias=False, + indice_key=indice_key + 'bef') + self.act1_0 = build_activation_layer(act_cfg) + self.bn1_0 = build_norm_layer(norm_cfg, out_channels)[1] + + self.conv1_1 = SubMConv3d( + out_channels, + out_channels, + kernel_size=(3, 1, 3), + padding=1, + bias=False, + indice_key=indice_key + 'bef') + self.act1_1 = build_activation_layer(act_cfg) + self.bn1_1 = build_norm_layer(norm_cfg, out_channels)[1] + + if pooling: + if height_pooling: + self.pool = SparseConv3d( + out_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + indice_key=indice_key, + bias=False) + else: + self.pool = SparseConv3d( + out_channels, + out_channels, + kernel_size=3, + stride=(2, 2, 1), + padding=1, + indice_key=indice_key, + bias=False) + + def forward(self, x: SparseConvTensor) -> SparseConvTensor: + """Forward pass.""" + shortcut = self.conv0_0(x) + shortcut.features = self.act0_0(shortcut.features) + shortcut.features = self.bn0_0(shortcut.features) + + shortcut = self.conv0_1(shortcut) + shortcut.features = self.act0_1(shortcut.features) + shortcut.features = self.bn0_1(shortcut.features) + + res = self.conv1_0(x) + res.features = self.act1_0(res.features) + res.features = self.bn1_0(res.features) + + res = self.conv1_1(res) + res.features = self.act1_1(res.features) + res.features = self.bn1_1(res.features) + + res.features = res.features + shortcut.features + + if self.pooling: + pooled_res = self.pool(res) + return pooled_res, res + else: + return res + + +class AsymmeUpBlock(BaseModule): + """Asymmetrical UpSample Block. + + Args: + in_channels (int): Input channels of the block. + out_channels (int): Output channels of the block. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for + normalization layer. + act_cfg (:obj:`ConfigDict` or dict): Config dict of activation layers. + Defaults to dict(type='LeakyReLU'). + indice_key (str, optional): Name of indice tables. Defaults to None. + up_key (str, optional): Name of indice tables used in + SparseInverseConv3d. Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + norm_cfg: ConfigType, + act_cfg: ConfigType = dict(type='LeakyReLU'), + indice_key: Optional[str] = None, + up_key: Optional[str] = None): + super().__init__() + + self.trans_conv = SubMConv3d( + in_channels, + out_channels, + kernel_size=(3, 3, 3), + padding=1, + bias=False, + indice_key=indice_key + 'new_up') + self.trans_act = build_activation_layer(act_cfg) + self.trans_bn = build_norm_layer(norm_cfg, out_channels)[1] + + self.conv1 = SubMConv3d( + out_channels, + out_channels, + kernel_size=(1, 3, 3), + padding=1, + bias=False, + indice_key=indice_key) + self.act1 = build_activation_layer(act_cfg) + self.bn1 = build_norm_layer(norm_cfg, out_channels)[1] + + self.conv2 = SubMConv3d( + out_channels, + out_channels, + kernel_size=(3, 1, 3), + padding=1, + bias=False, + indice_key=indice_key) + self.act2 = build_activation_layer(act_cfg) + self.bn2 = build_norm_layer(norm_cfg, out_channels)[1] + + self.conv3 = SubMConv3d( + out_channels, + out_channels, + kernel_size=(3, 3, 3), + padding=1, + bias=False, + indice_key=indice_key) + self.act3 = build_activation_layer(act_cfg) + self.bn3 = build_norm_layer(norm_cfg, out_channels)[1] + + self.up_subm = SparseInverseConv3d( + out_channels, + out_channels, + kernel_size=3, + indice_key=up_key, + bias=False) + + def forward(self, x: SparseConvTensor, + skip: SparseConvTensor) -> SparseConvTensor: + """Forward pass.""" + x_trans = self.trans_conv(x) + x_trans.features = self.trans_act(x_trans.features) + x_trans.features = self.trans_bn(x_trans.features) + + # upsample + up = self.up_subm(x_trans) + + up.features = up.features + skip.features + + up = self.conv1(up) + up.features = self.act1(up.features) + up.features = self.bn1(up.features) + + up = self.conv2(up) + up.features = self.act2(up.features) + up.features = self.bn2(up.features) + + up = self.conv3(up) + up.features = self.act3(up.features) + up.features = self.bn3(up.features) + + return up + + +class DDCMBlock(BaseModule): + """Dimension-Decomposition based Context Modeling. + + Args: + in_channels (int): Input channels of the block. + out_channels (int): Output channels of the block. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for + normalization layer. + act_cfg (:obj:`ConfigDict` or dict): Config dict of activation layers. + Defaults to dict(type='Sigmoid'). + indice_key (str, optional): Name of indice tables. Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + norm_cfg: ConfigType, + act_cfg: ConfigType = dict(type='Sigmoid'), + indice_key: Optional[str] = None): + super().__init__() + + self.conv1 = SubMConv3d( + in_channels, + out_channels, + kernel_size=(3, 1, 1), + padding=1, + bias=False, + indice_key=indice_key) + self.bn1 = build_norm_layer(norm_cfg, out_channels)[1] + self.act1 = build_activation_layer(act_cfg) + + self.conv2 = SubMConv3d( + in_channels, + out_channels, + kernel_size=(1, 3, 1), + padding=1, + bias=False, + indice_key=indice_key) + self.bn2 = build_norm_layer(norm_cfg, out_channels)[1] + self.act2 = build_activation_layer(act_cfg) + + self.conv3 = SubMConv3d( + in_channels, + out_channels, + kernel_size=(1, 1, 3), + padding=1, + bias=False, + indice_key=indice_key) + self.bn3 = build_norm_layer(norm_cfg, out_channels)[1] + self.act3 = build_activation_layer(act_cfg) + + def forward(self, x: SparseConvTensor) -> SparseConvTensor: + """Forward pass.""" + shortcut = self.conv1(x) + shortcut.features = self.bn1(shortcut.features) + shortcut.features = self.act1(shortcut.features) + + shortcut2 = self.conv2(x) + shortcut2.features = self.bn2(shortcut2.features) + shortcut2.features = self.act2(shortcut2.features) + + shortcut3 = self.conv3(x) + shortcut3.features = self.bn3(shortcut3.features) + shortcut3.features = self.act3(shortcut3.features) + shortcut.features = shortcut.features + \ + shortcut2.features + shortcut3.features + + shortcut.features = shortcut.features * x.features + + return shortcut + + +@MODELS.register_module() +class Asymm3DSpconv(BaseModule): + """Asymmetrical 3D convolution networks. + + Args: + grid_size (int): Size of voxel grids. + input_channels (int): Input channels of the block. + base_channels (int): Initial size of feature channels before + feeding into Encoder-Decoder structure. Defaults to 16. + backbone_depth (int): The depth of backbone. The backbone contains + downblocks and upblocks with the number of backbone_depth. + height_pooing (List[bool]): List indicating which downblocks perform + height pooling. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01)). + init_cfg (dict, optional): Initialization config. + Defaults to None. + """ + + def __init__(self, + grid_size: int, + input_channels: int, + base_channels: int = 16, + backbone_depth: int = 4, + height_pooing: List[bool] = [True, True, False, False], + norm_cfg: ConfigType = dict( + type='BN1d', eps=1e-3, momentum=0.01), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.grid_size = grid_size + self.backbone_depth = backbone_depth + self.down_context = AsymmResBlock( + input_channels, base_channels, indice_key='pre', norm_cfg=norm_cfg) + + self.down_block_list = torch.nn.ModuleList() + self.up_block_list = torch.nn.ModuleList() + for i in range(self.backbone_depth): + self.down_block_list.append( + AsymmeDownBlock( + 2**i * base_channels, + 2**(i + 1) * base_channels, + height_pooling=height_pooing[i], + indice_key='down' + str(i), + norm_cfg=norm_cfg)) + if i == self.backbone_depth - 1: + self.up_block_list.append( + AsymmeUpBlock( + 2**(i + 1) * base_channels, + 2**(i + 1) * base_channels, + up_key='down' + str(i), + indice_key='up' + str(self.backbone_depth - 1 - i), + norm_cfg=norm_cfg)) + else: + self.up_block_list.append( + AsymmeUpBlock( + 2**(i + 2) * base_channels, + 2**(i + 1) * base_channels, + up_key='down' + str(i), + indice_key='up' + str(self.backbone_depth - 1 - i), + norm_cfg=norm_cfg)) + + self.ddcm = DDCMBlock( + 2 * base_channels, + 2 * base_channels, + indice_key='ddcm', + norm_cfg=norm_cfg) + + def forward(self, voxel_features: torch.Tensor, coors: torch.Tensor, + batch_size: int) -> SparseConvTensor: + """Forward pass.""" + coors = coors.int() + ret = SparseConvTensor(voxel_features, coors, np.array(self.grid_size), + batch_size) + ret = self.down_context(ret) + + down_skip_list = [] + down_pool = ret + for i in range(self.backbone_depth): + down_pool, down_skip = self.down_block_list[i](down_pool) + down_skip_list.append(down_skip) + + up = down_pool + for i in range(self.backbone_depth - 1, -1, -1): + up = self.up_block_list[i](up, down_skip_list[i]) + + ddcm = self.ddcm(up) + ddcm.features = torch.cat((ddcm.features, up.features), 1) + + return ddcm diff --git a/mmdet3d/models/backbones/dgcnn.py b/mmdet3d/models/backbones/dgcnn.py new file mode 100755 index 0000000..3713d1d --- /dev/null +++ b/mmdet3d/models/backbones/dgcnn.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.model import BaseModule +from torch import nn as nn + +from mmdet3d.models.layers import DGCNNFAModule, DGCNNGFModule +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class DGCNNBackbone(BaseModule): + """Backbone network for DGCNN. + + Args: + in_channels (int): Input channels of point cloud. + num_samples (tuple[int], optional): The number of samples for knn or + ball query in each graph feature (GF) module. + Defaults to (20, 20, 20). + knn_modes (tuple[str], optional): Mode of KNN of each knn module. + Defaults to ('D-KNN', 'F-KNN', 'F-KNN'). + radius (tuple[float], optional): Sampling radii of each GF module. + Defaults to (None, None, None). + gf_channels (tuple[tuple[int]], optional): Out channels of each mlp in + GF module. Defaults to ((64, 64), (64, 64), (64, )). + fa_channels (tuple[int], optional): Out channels of each mlp in FA + module. Defaults to (1024, ). + act_cfg (dict, optional): Config of activation layer. + Defaults to dict(type='ReLU'). + init_cfg (dict, optional): Initialization config. + Defaults to None. + """ + + def __init__(self, + in_channels, + num_samples=(20, 20, 20), + knn_modes=('D-KNN', 'F-KNN', 'F-KNN'), + radius=(None, None, None), + gf_channels=((64, 64), (64, 64), (64, )), + fa_channels=(1024, ), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.num_gf = len(gf_channels) + + assert len(num_samples) == len(knn_modes) == len(radius) == len( + gf_channels), 'Num_samples, knn_modes, radius and gf_channels \ + should have the same length.' + + self.GF_modules = nn.ModuleList() + gf_in_channel = in_channels * 2 + skip_channel_list = [gf_in_channel] # input channel list + + for gf_index in range(self.num_gf): + cur_gf_mlps = list(gf_channels[gf_index]) + cur_gf_mlps = [gf_in_channel] + cur_gf_mlps + gf_out_channel = cur_gf_mlps[-1] + + self.GF_modules.append( + DGCNNGFModule( + mlp_channels=cur_gf_mlps, + num_sample=num_samples[gf_index], + knn_mode=knn_modes[gf_index], + radius=radius[gf_index], + act_cfg=act_cfg)) + skip_channel_list.append(gf_out_channel) + gf_in_channel = gf_out_channel * 2 + + fa_in_channel = sum(skip_channel_list[1:]) + cur_fa_mlps = list(fa_channels) + cur_fa_mlps = [fa_in_channel] + cur_fa_mlps + + self.FA_module = DGCNNFAModule( + mlp_channels=cur_fa_mlps, act_cfg=act_cfg) + + def forward(self, points): + """Forward pass. + + Args: + points (torch.Tensor): point coordinates with features, + with shape (B, N, in_channels). + + Returns: + dict[str, list[torch.Tensor]]: Outputs after graph feature (GF) and + feature aggregation (FA) modules. + + - gf_points (list[torch.Tensor]): Outputs after each GF module. + - fa_points (torch.Tensor): Outputs after FA module. + """ + gf_points = [points] + + for i in range(self.num_gf): + cur_points = self.GF_modules[i](gf_points[i]) + gf_points.append(cur_points) + + fa_points = self.FA_module(gf_points) + + out = dict(gf_points=gf_points, fa_points=fa_points) + return out diff --git a/mmdet3d/models/backbones/dla.py b/mmdet3d/models/backbones/dla.py new file mode 100755 index 0000000..6fd9927 --- /dev/null +++ b/mmdet3d/models/backbones/dla.py @@ -0,0 +1,446 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule +from torch import nn + +from mmdet3d.registry import MODELS + + +def dla_build_norm_layer(cfg, num_features): + """Build normalization layer specially designed for DLANet. + + Args: + cfg (dict): The norm layer config, which should contain: + + - type (str): Layer type. + - layer args: Args needed to instantiate a norm layer. + - requires_grad (bool, optional): Whether stop gradient updates. + num_features (int): Number of input channels. + + + Returns: + Function: Build normalization layer in mmcv. + """ + cfg_ = cfg.copy() + if cfg_['type'] == 'GN': + if num_features % 32 == 0: + return build_norm_layer(cfg_, num_features) + else: + assert 'num_groups' in cfg_ + cfg_['num_groups'] = cfg_['num_groups'] // 2 + return build_norm_layer(cfg_, num_features) + else: + return build_norm_layer(cfg_, num_features) + + +class BasicBlock(BaseModule): + """BasicBlock in DLANet. + + Args: + in_channels (int): Input feature channel. + out_channels (int): Output feature channel. + norm_cfg (dict): Dictionary to construct and config + norm layer. + conv_cfg (dict): Dictionary to construct and config + conv layer. + stride (int, optional): Conv stride. Default: 1. + dilation (int, optional): Conv dilation. Default: 1. + init_cfg (dict, optional): Initialization config. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + norm_cfg, + conv_cfg, + stride=1, + dilation=1, + init_cfg=None): + super(BasicBlock, self).__init__(init_cfg) + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + out_channels, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.norm1 = dla_build_norm_layer(norm_cfg, out_channels)[1] + self.relu = nn.ReLU(inplace=True) + self.conv2 = build_conv_layer( + conv_cfg, + out_channels, + out_channels, + 3, + stride=1, + padding=dilation, + dilation=dilation, + bias=False) + self.norm2 = dla_build_norm_layer(norm_cfg, out_channels)[1] + self.stride = stride + + def forward(self, x, identity=None): + """Forward function.""" + + if identity is None: + identity = x + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + out = self.conv2(out) + out = self.norm2(out) + out += identity + out = self.relu(out) + + return out + + +class Root(BaseModule): + """Root in DLANet. + + Args: + in_channels (int): Input feature channel. + out_channels (int): Output feature channel. + norm_cfg (dict): Dictionary to construct and config + norm layer. + conv_cfg (dict): Dictionary to construct and config + conv layer. + kernel_size (int): Size of convolution kernel. + add_identity (bool): Whether to add identity in root. + init_cfg (dict, optional): Initialization config. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + norm_cfg, + conv_cfg, + kernel_size, + add_identity, + init_cfg=None): + super(Root, self).__init__(init_cfg) + self.conv = build_conv_layer( + conv_cfg, + in_channels, + out_channels, + 1, + stride=1, + padding=(kernel_size - 1) // 2, + bias=False) + self.norm = dla_build_norm_layer(norm_cfg, out_channels)[1] + self.relu = nn.ReLU(inplace=True) + self.add_identity = add_identity + + def forward(self, feat_list): + """Forward function. + + Args: + feat_list (list[torch.Tensor]): Output features from + multiple layers. + """ + children = feat_list + x = self.conv(torch.cat(feat_list, 1)) + x = self.norm(x) + if self.add_identity: + x += children[0] + x = self.relu(x) + + return x + + +class Tree(BaseModule): + """Tree in DLANet. + + Args: + levels (int): The level of the tree. + block (nn.Module): The block module in tree. + in_channels: Input feature channel. + out_channels: Output feature channel. + norm_cfg (dict): Dictionary to construct and config + norm layer. + conv_cfg (dict): Dictionary to construct and config + conv layer. + stride (int, optional): Convolution stride. + Default: 1. + level_root (bool, optional): whether belongs to the + root layer. + root_dim (int, optional): Root input feature channel. + root_kernel_size (int, optional): Size of root + convolution kernel. Default: 1. + dilation (int, optional): Conv dilation. Default: 1. + add_identity (bool, optional): Whether to add + identity in root. Default: False. + init_cfg (dict, optional): Initialization config. + Default: None. + """ + + def __init__(self, + levels, + block, + in_channels, + out_channels, + norm_cfg, + conv_cfg, + stride=1, + level_root=False, + root_dim=None, + root_kernel_size=1, + dilation=1, + add_identity=False, + init_cfg=None): + super(Tree, self).__init__(init_cfg) + if root_dim is None: + root_dim = 2 * out_channels + if level_root: + root_dim += in_channels + if levels == 1: + self.root = Root(root_dim, out_channels, norm_cfg, conv_cfg, + root_kernel_size, add_identity) + self.tree1 = block( + in_channels, + out_channels, + norm_cfg, + conv_cfg, + stride, + dilation=dilation) + self.tree2 = block( + out_channels, + out_channels, + norm_cfg, + conv_cfg, + 1, + dilation=dilation) + else: + self.tree1 = Tree( + levels - 1, + block, + in_channels, + out_channels, + norm_cfg, + conv_cfg, + stride, + root_dim=None, + root_kernel_size=root_kernel_size, + dilation=dilation, + add_identity=add_identity) + self.tree2 = Tree( + levels - 1, + block, + out_channels, + out_channels, + norm_cfg, + conv_cfg, + root_dim=root_dim + out_channels, + root_kernel_size=root_kernel_size, + dilation=dilation, + add_identity=add_identity) + self.level_root = level_root + self.root_dim = root_dim + self.downsample = None + self.project = None + self.levels = levels + if stride > 1: + self.downsample = nn.MaxPool2d(stride, stride=stride) + if in_channels != out_channels: + self.project = nn.Sequential( + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + 1, + stride=1, + bias=False), + dla_build_norm_layer(norm_cfg, out_channels)[1]) + + def forward(self, x, identity=None, children=None): + children = [] if children is None else children + bottom = self.downsample(x) if self.downsample else x + identity = self.project(bottom) if self.project else bottom + if self.level_root: + children.append(bottom) + x1 = self.tree1(x, identity) + if self.levels == 1: + x2 = self.tree2(x1) + feat_list = [x2, x1] + children + x = self.root(feat_list) + else: + children.append(x1) + x = self.tree2(x1, children=children) + return x + + +@MODELS.register_module() +class DLANet(BaseModule): + r"""`DLA backbone `_. + + Args: + depth (int): Depth of DLA. Default: 34. + in_channels (int, optional): Number of input image channels. + Default: 3. + norm_cfg (dict, optional): Dictionary to construct and config + norm layer. Default: None. + conv_cfg (dict, optional): Dictionary to construct and config + conv layer. Default: None. + layer_with_level_root (list[bool], optional): Whether to apply + level_root in each DLA layer, this is only used for + tree levels. Default: (False, True, True, True). + with_identity_root (bool, optional): Whether to add identity + in root layer. Default: False. + pretrained (str, optional): model pretrained path. + Default: None. + init_cfg (dict or list[dict], optional): Initialization + config dict. Default: None + """ + arch_settings = { + 34: (BasicBlock, (1, 1, 1, 2, 2, 1), (16, 32, 64, 128, 256, 512)), + } + + def __init__(self, + depth, + in_channels=3, + out_indices=(0, 1, 2, 3, 4, 5), + frozen_stages=-1, + norm_cfg=None, + conv_cfg=None, + layer_with_level_root=(False, True, True, True), + with_identity_root=False, + pretrained=None, + init_cfg=None): + super(DLANet, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalida depth {depth} for DLA') + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + + block, levels, channels = self.arch_settings[depth] + self.channels = channels + self.num_levels = len(levels) + self.frozen_stages = frozen_stages + self.out_indices = out_indices + assert max(out_indices) < self.num_levels + self.base_layer = nn.Sequential( + build_conv_layer( + conv_cfg, + in_channels, + channels[0], + 7, + stride=1, + padding=3, + bias=False), + dla_build_norm_layer(norm_cfg, channels[0])[1], + nn.ReLU(inplace=True)) + + # DLANet first uses two conv layers then uses several + # Tree layers + for i in range(2): + level_layer = self._make_conv_level( + channels[0], + channels[i], + levels[i], + norm_cfg, + conv_cfg, + stride=i + 1) + layer_name = f'level{i}' + self.add_module(layer_name, level_layer) + + for i in range(2, self.num_levels): + dla_layer = Tree( + levels[i], + block, + channels[i - 1], + channels[i], + norm_cfg, + conv_cfg, + 2, + level_root=layer_with_level_root[i - 2], + add_identity=with_identity_root) + layer_name = f'level{i}' + self.add_module(layer_name, dla_layer) + + self._freeze_stages() + + def _make_conv_level(self, + in_channels, + out_channels, + num_convs, + norm_cfg, + conv_cfg, + stride=1, + dilation=1): + """Conv modules. + + Args: + in_channels (int): Input feature channel. + out_channels (int): Output feature channel. + num_convs (int): Number of Conv module. + norm_cfg (dict): Dictionary to construct and config + norm layer. + conv_cfg (dict): Dictionary to construct and config + conv layer. + stride (int, optional): Conv stride. Default: 1. + dilation (int, optional): Conv dilation. Default: 1. + """ + modules = [] + for i in range(num_convs): + modules.extend([ + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + 3, + stride=stride if i == 0 else 1, + padding=dilation, + bias=False, + dilation=dilation), + dla_build_norm_layer(norm_cfg, out_channels)[1], + nn.ReLU(inplace=True) + ]) + in_channels = out_channels + return nn.Sequential(*modules) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.base_layer.eval() + for param in self.base_layer.parameters(): + param.requires_grad = False + + for i in range(2): + m = getattr(self, f'level{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'level{i+1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + outs = [] + x = self.base_layer(x) + for i in range(self.num_levels): + x = getattr(self, 'level{}'.format(i))(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) diff --git a/mmdet3d/models/backbones/mink_resnet.py b/mmdet3d/models/backbones/mink_resnet.py new file mode 100755 index 0000000..91ff10c --- /dev/null +++ b/mmdet3d/models/backbones/mink_resnet.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Follow https://github.com/NVIDIA/MinkowskiEngine/blob/master/examples/resnet.py # noqa +# and mmcv.cnn.ResNet +from typing import List, Union + +try: + import MinkowskiEngine as ME + from MinkowskiEngine import SparseTensor + from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck +except ImportError: + # blocks are used in the static part of MinkResNet + ME = BasicBlock = Bottleneck = SparseTensor = None + +import torch.nn as nn +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class MinkResNet(BaseModule): + r"""Minkowski ResNet backbone. See `4D Spatio-Temporal ConvNets + `_ for more details. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input channels, 3 for RGB. + num_stages (int): Resnet stages. Defaults to 4. + pool (bool): Whether to add max pooling after first conv. + Defaults to True. + """ + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth: int, + in_channels: int, + num_stages: int = 4, + pool: bool = True): + super(MinkResNet, self).__init__() + if ME is None: + raise ImportError( + 'Please follow `get_started.md` to install MinkowskiEngine.`') + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + assert 4 >= num_stages >= 1 + block, stage_blocks = self.arch_settings[depth] + stage_blocks = stage_blocks[:num_stages] + self.num_stages = num_stages + self.pool = pool + + self.inplanes = 64 + self.conv1 = ME.MinkowskiConvolution( + in_channels, self.inplanes, kernel_size=3, stride=2, dimension=3) + # May be BatchNorm is better, but we follow original implementation. + self.norm1 = ME.MinkowskiInstanceNorm(self.inplanes) + self.relu = ME.MinkowskiReLU(inplace=True) + if self.pool: + self.maxpool = ME.MinkowskiMaxPooling( + kernel_size=2, stride=2, dimension=3) + + for i in range(len(stage_blocks)): + setattr( + self, f'layer{i + 1}', + self._make_layer(block, 64 * 2**i, stage_blocks[i], stride=2)) + + def init_weights(self): + """Initialize weights.""" + for m in self.modules(): + if isinstance(m, ME.MinkowskiConvolution): + ME.utils.kaiming_normal_( + m.kernel, mode='fan_out', nonlinearity='relu') + + if isinstance(m, ME.MinkowskiBatchNorm): + nn.init.constant_(m.bn.weight, 1) + nn.init.constant_(m.bn.bias, 0) + + def _make_layer(self, block: Union[BasicBlock, Bottleneck], planes: int, + blocks: int, stride: int) -> nn.Module: + """Make single level of residual blocks. + + Args: + block (BasicBlock | Bottleneck): Residual block class. + planes (int): Number of convolution filters. + blocks (int): Number of blocks in the layers. + stride (int): Stride of the first convolutional layer. + + Returns: + nn.Module: With residual blocks. + """ + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + ME.MinkowskiConvolution( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + dimension=3), + ME.MinkowskiBatchNorm(planes * block.expansion)) + layers = [] + layers.append( + block( + self.inplanes, + planes, + stride=stride, + downsample=downsample, + dimension=3)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, stride=1, dimension=3)) + return nn.Sequential(*layers) + + def forward(self, x: SparseTensor) -> List[SparseTensor]: + """Forward pass of ResNet. + + Args: + x (ME.SparseTensor): Input sparse tensor. + + Returns: + list[ME.SparseTensor]: Output sparse tensors. + """ + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + if self.pool: + x = self.maxpool(x) + outs = [] + for i in range(self.num_stages): + x = getattr(self, f'layer{i + 1}')(x) + outs.append(x) + return outs diff --git a/mmdet3d/models/backbones/minkunet_backbone.py b/mmdet3d/models/backbones/minkunet_backbone.py new file mode 100755 index 0000000..22a725c --- /dev/null +++ b/mmdet3d/models/backbones/minkunet_backbone.py @@ -0,0 +1,121 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmengine.model import BaseModule +from mmengine.registry import MODELS +from torch import Tensor, nn + +from mmdet3d.models.layers import (TorchSparseConvModule, + TorchSparseResidualBlock) +from mmdet3d.models.layers.torchsparse import IS_TORCHSPARSE_AVAILABLE +from mmdet3d.utils import OptMultiConfig + +if IS_TORCHSPARSE_AVAILABLE: + import torchsparse + from torchsparse.tensor import SparseTensor +else: + SparseTensor = None + + +@MODELS.register_module() +class MinkUNetBackbone(BaseModule): + r"""MinkUNet backbone with TorchSparse backend. + + Refer to `implementation code `_. + + Args: + in_channels (int): Number of input voxel feature channels. + Defaults to 4. + base_channels (int): The input channels for first encoder layer. + Defaults to 32. + encoder_channels (List[int]): Convolutional channels of each encode + layer. Defaults to [32, 64, 128, 256]. + decoder_channels (List[int]): Convolutional channels of each decode + layer. Defaults to [256, 128, 96, 96]. + num_stages (int): Number of stages in encoder and decoder. + Defaults to 4. + init_cfg (dict or :obj:`ConfigDict` or List[dict or :obj:`ConfigDict`] + , optional): Initialization config dict. + """ + + def __init__(self, + in_channels: int = 4, + base_channels: int = 32, + encoder_channels: List[int] = [32, 64, 128, 256], + decoder_channels: List[int] = [256, 128, 96, 96], + num_stages: int = 4, + init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg) + assert num_stages == len(encoder_channels) == len(decoder_channels) + self.num_stages = num_stages + self.conv_input = nn.Sequential( + TorchSparseConvModule(in_channels, base_channels, kernel_size=3), + TorchSparseConvModule(base_channels, base_channels, kernel_size=3)) + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + encoder_channels.insert(0, base_channels) + decoder_channels.insert(0, encoder_channels[-1]) + for i in range(num_stages): + self.encoder.append( + nn.Sequential( + TorchSparseConvModule( + encoder_channels[i], + encoder_channels[i], + kernel_size=2, + stride=2), + TorchSparseResidualBlock( + encoder_channels[i], + encoder_channels[i + 1], + kernel_size=3), + TorchSparseResidualBlock( + encoder_channels[i + 1], + encoder_channels[i + 1], + kernel_size=3))) + + self.decoder.append( + nn.ModuleList([ + TorchSparseConvModule( + decoder_channels[i], + decoder_channels[i + 1], + kernel_size=2, + stride=2, + transposed=True), + nn.Sequential( + TorchSparseResidualBlock( + decoder_channels[i + 1] + encoder_channels[-2 - i], + decoder_channels[i + 1], + kernel_size=3), + TorchSparseResidualBlock( + decoder_channels[i + 1], + decoder_channels[i + 1], + kernel_size=3)) + ])) + + def forward(self, voxel_features: Tensor, coors: Tensor) -> SparseTensor: + """Forward function. + + Args: + voxel_features (Tensor): Voxel features in shape (N, C). + coors (Tensor): Coordinates in shape (N, 4), + the columns in the order of (x_idx, y_idx, z_idx, batch_idx). + + Returns: + SparseTensor: Backbone features. + """ + x = torchsparse.SparseTensor(voxel_features, coors) + x = self.conv_input(x) + laterals = [x] + for encoder_layer in self.encoder: + x = encoder_layer(x) + laterals.append(x) + laterals = laterals[:-1][::-1] + + decoder_outs = [] + for i, decoder_layer in enumerate(self.decoder): + x = decoder_layer[0](x) + x = torchsparse.cat((x, laterals[i])) + x = decoder_layer[1](x) + decoder_outs.append(x) + + return decoder_outs[-1] diff --git a/mmdet3d/models/backbones/multi_backbone.py b/mmdet3d/models/backbones/multi_backbone.py new file mode 100755 index 0000000..5f77c25 --- /dev/null +++ b/mmdet3d/models/backbones/multi_backbone.py @@ -0,0 +1,127 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings + +import torch +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch import nn as nn + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class MultiBackbone(BaseModule): + """MultiBackbone with different configs. + + Args: + num_streams (int): The number of backbones. + backbones (list or dict): A list of backbone configs. + aggregation_mlp_channels (list[int]): Specify the mlp layers + for feature aggregation. + conv_cfg (dict): Config dict of convolutional layers. + norm_cfg (dict): Config dict of normalization layers. + act_cfg (dict): Config dict of activation layers. + suffixes (list): A list of suffixes to rename the return dict + for each backbone. + """ + + def __init__(self, + num_streams, + backbones, + aggregation_mlp_channels=None, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-5, momentum=0.01), + act_cfg=dict(type='ReLU'), + suffixes=('net0', 'net1'), + init_cfg=None, + pretrained=None, + **kwargs): + super().__init__(init_cfg=init_cfg) + assert isinstance(backbones, dict) or isinstance(backbones, list) + if isinstance(backbones, dict): + backbones_list = [] + for ind in range(num_streams): + backbones_list.append(copy.deepcopy(backbones)) + backbones = backbones_list + + assert len(backbones) == num_streams + assert len(suffixes) == num_streams + + self.backbone_list = nn.ModuleList() + # Rename the ret_dict with different suffixs. + self.suffixes = suffixes + + out_channels = 0 + + for backbone_cfg in backbones: + out_channels += backbone_cfg['fp_channels'][-1][-1] + self.backbone_list.append(MODELS.build(backbone_cfg)) + + # Feature aggregation layers + if aggregation_mlp_channels is None: + aggregation_mlp_channels = [ + out_channels, out_channels // 2, + out_channels // len(self.backbone_list) + ] + else: + aggregation_mlp_channels.insert(0, out_channels) + + self.aggregation_layers = nn.Sequential() + for i in range(len(aggregation_mlp_channels) - 1): + self.aggregation_layers.add_module( + f'layer{i}', + ConvModule( + aggregation_mlp_channels[i], + aggregation_mlp_channels[i + 1], + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + bias=True, + inplace=True)) + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + + def forward(self, points): + """Forward pass. + + Args: + points (torch.Tensor): point coordinates with features, + with shape (B, N, 3 + input_feature_dim). + + Returns: + dict[str, list[torch.Tensor]]: Outputs from multiple backbones. + + - fp_xyz[suffix] (list[torch.Tensor]): The coordinates of + each fp features. + - fp_features[suffix] (list[torch.Tensor]): The features + from each Feature Propagate Layers. + - fp_indices[suffix] (list[torch.Tensor]): Indices of the + input points. + - hd_feature (torch.Tensor): The aggregation feature + from multiple backbones. + """ + ret = {} + fp_features = [] + for ind in range(len(self.backbone_list)): + cur_ret = self.backbone_list[ind](points) + cur_suffix = self.suffixes[ind] + fp_features.append(cur_ret['fp_features'][-1]) + cur_ret_new = dict() + if cur_suffix != '': + for k in cur_ret.keys(): + cur_ret_new[k + '_' + cur_suffix] = cur_ret[k] + ret.update(cur_ret_new) + + # Combine the features here + hd_feature = torch.cat(fp_features, dim=1) + hd_feature = self.aggregation_layers(hd_feature) + ret['hd_feature'] = hd_feature + return ret diff --git a/mmdet3d/models/backbones/nostem_regnet.py b/mmdet3d/models/backbones/nostem_regnet.py new file mode 100755 index 0000000..24a572c --- /dev/null +++ b/mmdet3d/models/backbones/nostem_regnet.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.backbones import RegNet + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class NoStemRegNet(RegNet): + """RegNet backbone without Stem for 3D detection. + + More details can be found in `paper `_ . + + Args: + arch (dict): The parameter of RegNets. + - w0 (int): Initial width. + - wa (float): Slope of width. + - wm (float): Quantization parameter to quantize the width. + - depth (int): Depth of the backbone. + - group_w (int): Width of group. + - bot_mul (float): Bottleneck ratio, i.e. expansion of bottleneck. + strides (Sequence[int]): Strides of the first block of each stage. + base_channels (int): Base channels after stem layer. + in_channels (int): Number of input image channels. Normally 3. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. + + Example: + >>> from mmdet3d.models import NoStemRegNet + >>> import torch + >>> self = NoStemRegNet( + arch=dict( + w0=88, + wa=26.31, + wm=2.25, + group_w=48, + depth=25, + bot_mul=1.0)) + >>> self.eval() + >>> inputs = torch.rand(1, 64, 16, 16) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 96, 8, 8) + (1, 192, 4, 4) + (1, 432, 2, 2) + (1, 1008, 1, 1) + """ + + def __init__(self, arch, init_cfg=None, **kwargs): + super(NoStemRegNet, self).__init__(arch, init_cfg=init_cfg, **kwargs) + + def _make_stem_layer(self, in_channels, base_channels): + """Override the original function that do not initialize a stem layer + since 3D detector's voxel encoder works like a stem layer.""" + return + + def forward(self, x): + """Forward function of backbone. + + Args: + x (torch.Tensor): Features in shape (N, C, H, W). + + Returns: + tuple[torch.Tensor]: Multi-scale features. + """ + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) diff --git a/mmdet3d/models/backbones/pointnet2_sa_msg.py b/mmdet3d/models/backbones/pointnet2_sa_msg.py new file mode 100755 index 0000000..6675f28 --- /dev/null +++ b/mmdet3d/models/backbones/pointnet2_sa_msg.py @@ -0,0 +1,191 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +import torch +from mmcv.cnn import ConvModule +from torch import nn as nn + +from mmdet3d.models.layers.pointnet_modules import build_sa_module +from mmdet3d.registry import MODELS +from mmdet3d.utils import OptConfigType +from .base_pointnet import BasePointNet + +ThreeTupleIntType = Tuple[Tuple[Tuple[int, int, int]]] +TwoTupleIntType = Tuple[Tuple[int, int, int]] +TwoTupleStrType = Tuple[Tuple[str]] + + +@MODELS.register_module() +class PointNet2SAMSG(BasePointNet): + """PointNet2 with Multi-scale grouping. + + Args: + in_channels (int): Input channels of point cloud. + num_points (tuple[int]): The number of points which each SA + module samples. + radii (tuple[float]): Sampling radii of each SA module. + num_samples (tuple[int]): The number of samples for ball + query in each SA module. + sa_channels (tuple[tuple[int]]): Out channels of each mlp in SA module. + aggregation_channels (tuple[int]): Out channels of aggregation + multi-scale grouping features. + fps_mods Sequence[Tuple[str]]: Mod of FPS for each SA module. + fps_sample_range_lists (tuple[tuple[int]]): The number of sampling + points which each SA module samples. + dilated_group (tuple[bool]): Whether to use dilated ball query for + out_indices (Sequence[int]): Output from which stages. + norm_cfg (dict): Config of normalization layer. + sa_cfg (dict): Config of set abstraction module, which may contain + the following keys and values: + + - pool_mod (str): Pool method ('max' or 'avg') for SA modules. + - use_xyz (bool): Whether to use xyz as a part of features. + - normalize_xyz (bool): Whether to normalize xyz with radii in + each SA module. + """ + + def __init__(self, + in_channels: int, + num_points: Tuple[int] = (2048, 1024, 512, 256), + radii: Tuple[Tuple[float, float, float]] = ( + (0.2, 0.4, 0.8), + (0.4, 0.8, 1.6), + (1.6, 3.2, 4.8), + ), + num_samples: TwoTupleIntType = ((32, 32, 64), (32, 32, 64), + (32, 32, 32)), + sa_channels: ThreeTupleIntType = (((16, 16, 32), (16, 16, 32), + (32, 32, 64)), + ((64, 64, 128), + (64, 64, 128), (64, 96, + 128)), + ((128, 128, 256), + (128, 192, 256), (128, 256, + 256))), + aggregation_channels: Tuple[int] = (64, 128, 256), + fps_mods: TwoTupleStrType = (('D-FPS'), ('FS'), ('F-FPS', + 'D-FPS')), + fps_sample_range_lists: TwoTupleIntType = ((-1), (-1), (512, + -1)), + dilated_group: Tuple[bool] = (True, True, True), + out_indices: Tuple[int] = (2, ), + norm_cfg: dict = dict(type='BN2d'), + sa_cfg: dict = dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False), + init_cfg: OptConfigType = None): + super().__init__(init_cfg=init_cfg) + self.num_sa = len(sa_channels) + self.out_indices = out_indices + assert max(out_indices) < self.num_sa + assert len(num_points) == len(radii) == len(num_samples) == len( + sa_channels) + if aggregation_channels is not None: + assert len(sa_channels) == len(aggregation_channels) + else: + aggregation_channels = [None] * len(sa_channels) + + self.SA_modules = nn.ModuleList() + self.aggregation_mlps = nn.ModuleList() + sa_in_channel = in_channels - 3 # number of channels without xyz + skip_channel_list = [sa_in_channel] + + for sa_index in range(self.num_sa): + cur_sa_mlps = list(sa_channels[sa_index]) + sa_out_channel = 0 + for radius_index in range(len(radii[sa_index])): + cur_sa_mlps[radius_index] = [sa_in_channel] + list( + cur_sa_mlps[radius_index]) + sa_out_channel += cur_sa_mlps[radius_index][-1] + + if isinstance(fps_mods[sa_index], tuple): + cur_fps_mod = list(fps_mods[sa_index]) + else: + cur_fps_mod = list([fps_mods[sa_index]]) + + if isinstance(fps_sample_range_lists[sa_index], tuple): + cur_fps_sample_range_list = list( + fps_sample_range_lists[sa_index]) + else: + cur_fps_sample_range_list = list( + [fps_sample_range_lists[sa_index]]) + + self.SA_modules.append( + build_sa_module( + num_point=num_points[sa_index], + radii=radii[sa_index], + sample_nums=num_samples[sa_index], + mlp_channels=cur_sa_mlps, + fps_mod=cur_fps_mod, + fps_sample_range_list=cur_fps_sample_range_list, + dilated_group=dilated_group[sa_index], + norm_cfg=norm_cfg, + cfg=sa_cfg, + bias=True)) + skip_channel_list.append(sa_out_channel) + + cur_aggregation_channel = aggregation_channels[sa_index] + if cur_aggregation_channel is None: + self.aggregation_mlps.append(None) + sa_in_channel = sa_out_channel + else: + self.aggregation_mlps.append( + ConvModule( + sa_out_channel, + cur_aggregation_channel, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + kernel_size=1, + bias=True)) + sa_in_channel = cur_aggregation_channel + + def forward(self, points: torch.Tensor): + """Forward pass. + + Args: + points (torch.Tensor): point coordinates with features, + with shape (B, N, 3 + input_feature_dim). + + Returns: + dict[str, torch.Tensor]: Outputs of the last SA module. + + - sa_xyz (torch.Tensor): The coordinates of sa features. + - sa_features (torch.Tensor): The features from the + last Set Aggregation Layers. + - sa_indices (torch.Tensor): Indices of the + input points. + """ + xyz, features = self._split_point_feats(points) + + batch, num_points = xyz.shape[:2] + indices = xyz.new_tensor(range(num_points)).unsqueeze(0).repeat( + batch, 1).long() + + sa_xyz = [xyz] + sa_features = [features] + sa_indices = [indices] + + out_sa_xyz = [xyz] + out_sa_features = [features] + out_sa_indices = [indices] + + for i in range(self.num_sa): + cur_xyz, cur_features, cur_indices = self.SA_modules[i]( + sa_xyz[i], sa_features[i]) + if self.aggregation_mlps[i] is not None: + cur_features = self.aggregation_mlps[i](cur_features) + sa_xyz.append(cur_xyz) + sa_features.append(cur_features) + sa_indices.append( + torch.gather(sa_indices[-1], 1, cur_indices.long())) + if i in self.out_indices: + out_sa_xyz.append(sa_xyz[-1]) + out_sa_features.append(sa_features[-1]) + out_sa_indices.append(sa_indices[-1]) + + return dict( + sa_xyz=out_sa_xyz, + sa_features=out_sa_features, + sa_indices=out_sa_indices) diff --git a/mmdet3d/models/backbones/pointnet2_sa_ssg.py b/mmdet3d/models/backbones/pointnet2_sa_ssg.py new file mode 100755 index 0000000..28c7812 --- /dev/null +++ b/mmdet3d/models/backbones/pointnet2_sa_ssg.py @@ -0,0 +1,141 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn as nn + +from mmdet3d.models.layers import PointFPModule, build_sa_module +from mmdet3d.registry import MODELS +from .base_pointnet import BasePointNet + + +@MODELS.register_module() +class PointNet2SASSG(BasePointNet): + """PointNet2 with Single-scale grouping. + + Args: + in_channels (int): Input channels of point cloud. + num_points (tuple[int]): The number of points which each SA + module samples. + radius (tuple[float]): Sampling radii of each SA module. + num_samples (tuple[int]): The number of samples for ball + query in each SA module. + sa_channels (tuple[tuple[int]]): Out channels of each mlp in SA module. + fp_channels (tuple[tuple[int]]): Out channels of each mlp in FP module. + norm_cfg (dict): Config of normalization layer. + sa_cfg (dict): Config of set abstraction module, which may contain + the following keys and values: + + - pool_mod (str): Pool method ('max' or 'avg') for SA modules. + - use_xyz (bool): Whether to use xyz as a part of features. + - normalize_xyz (bool): Whether to normalize xyz with radii in + each SA module. + """ + + def __init__(self, + in_channels, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.num_sa = len(sa_channels) + self.num_fp = len(fp_channels) + + assert len(num_points) == len(radius) == len(num_samples) == len( + sa_channels) + assert len(sa_channels) >= len(fp_channels) + + self.SA_modules = nn.ModuleList() + sa_in_channel = in_channels - 3 # number of channels without xyz + skip_channel_list = [sa_in_channel] + + for sa_index in range(self.num_sa): + cur_sa_mlps = list(sa_channels[sa_index]) + cur_sa_mlps = [sa_in_channel] + cur_sa_mlps + sa_out_channel = cur_sa_mlps[-1] + + self.SA_modules.append( + build_sa_module( + num_point=num_points[sa_index], + radius=radius[sa_index], + num_sample=num_samples[sa_index], + mlp_channels=cur_sa_mlps, + norm_cfg=norm_cfg, + cfg=sa_cfg)) + skip_channel_list.append(sa_out_channel) + sa_in_channel = sa_out_channel + + self.FP_modules = nn.ModuleList() + + fp_source_channel = skip_channel_list.pop() + fp_target_channel = skip_channel_list.pop() + for fp_index in range(len(fp_channels)): + cur_fp_mlps = list(fp_channels[fp_index]) + cur_fp_mlps = [fp_source_channel + fp_target_channel] + cur_fp_mlps + self.FP_modules.append(PointFPModule(mlp_channels=cur_fp_mlps)) + if fp_index != len(fp_channels) - 1: + fp_source_channel = cur_fp_mlps[-1] + fp_target_channel = skip_channel_list.pop() + + def forward(self, points): + """Forward pass. + + Args: + points (torch.Tensor): point coordinates with features, + with shape (B, N, 3 + input_feature_dim). + + Returns: + dict[str, list[torch.Tensor]]: Outputs after SA and FP modules. + + - fp_xyz (list[torch.Tensor]): The coordinates of + each fp features. + - fp_features (list[torch.Tensor]): The features + from each Feature Propagate Layers. + - fp_indices (list[torch.Tensor]): Indices of the + input points. + """ + xyz, features = self._split_point_feats(points) + + batch, num_points = xyz.shape[:2] + indices = xyz.new_tensor(range(num_points)).unsqueeze(0).repeat( + batch, 1).long() + + sa_xyz = [xyz] + sa_features = [features] + sa_indices = [indices] + + for i in range(self.num_sa): + cur_xyz, cur_features, cur_indices = self.SA_modules[i]( + sa_xyz[i], sa_features[i]) + sa_xyz.append(cur_xyz) + sa_features.append(cur_features) + sa_indices.append( + torch.gather(sa_indices[-1], 1, cur_indices.long())) + + fp_xyz = [sa_xyz[-1]] + fp_features = [sa_features[-1]] + fp_indices = [sa_indices[-1]] + + for i in range(self.num_fp): + fp_features.append(self.FP_modules[i]( + sa_xyz[self.num_sa - i - 1], sa_xyz[self.num_sa - i], + sa_features[self.num_sa - i - 1], fp_features[-1])) + fp_xyz.append(sa_xyz[self.num_sa - i - 1]) + fp_indices.append(sa_indices[self.num_sa - i - 1]) + + ret = dict( + fp_xyz=fp_xyz, + fp_features=fp_features, + fp_indices=fp_indices, + sa_xyz=sa_xyz, + sa_features=sa_features, + sa_indices=sa_indices) + return ret diff --git a/mmdet3d/models/backbones/second.py b/mmdet3d/models/backbones/second.py new file mode 100755 index 0000000..bb1748a --- /dev/null +++ b/mmdet3d/models/backbones/second.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule +from torch import nn as nn + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class SECOND(BaseModule): + """Backbone network for SECOND/PointPillars/PartA2/MVXNet. + + Args: + in_channels (int): Input channels. + out_channels (list[int]): Output channels for multi-scale feature maps. + layer_nums (list[int]): Number of layers in each stage. + layer_strides (list[int]): Strides of each stage. + norm_cfg (dict): Config dict of normalization layers. + conv_cfg (dict): Config dict of convolutional layers. + """ + + def __init__(self, + in_channels=128, + out_channels=[128, 128, 256], + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False), + init_cfg=None, + pretrained=None): + super(SECOND, self).__init__(init_cfg=init_cfg) + assert len(layer_strides) == len(layer_nums) + assert len(out_channels) == len(layer_nums) + + in_filters = [in_channels, *out_channels[:-1]] + # note that when stride > 1, conv2d with same padding isn't + # equal to pad-conv2d. we should use pad-conv2d. + blocks = [] + for i, layer_num in enumerate(layer_nums): + block = [ + build_conv_layer( + conv_cfg, + in_filters[i], + out_channels[i], + 3, + stride=layer_strides[i], + padding=1), + build_norm_layer(norm_cfg, out_channels[i])[1], + nn.ReLU(inplace=True), + ] + for j in range(layer_num): + block.append( + build_conv_layer( + conv_cfg, + out_channels[i], + out_channels[i], + 3, + padding=1)) + block.append(build_norm_layer(norm_cfg, out_channels[i])[1]) + block.append(nn.ReLU(inplace=True)) + + block = nn.Sequential(*block) + blocks.append(block) + + self.blocks = nn.ModuleList(blocks) + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + else: + self.init_cfg = dict(type='Kaiming', layer='Conv2d') + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): Input with shape (N, C, H, W). + + Returns: + tuple[torch.Tensor]: Multi-scale features. + """ + outs = [] + for i in range(len(self.blocks)): + x = self.blocks[i](x) + outs.append(x) + return tuple(outs) diff --git a/mmdet3d/models/backbones/spvcnn_backone.py b/mmdet3d/models/backbones/spvcnn_backone.py new file mode 100755 index 0000000..fbdb94c --- /dev/null +++ b/mmdet3d/models/backbones/spvcnn_backone.py @@ -0,0 +1,237 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence + +import torch +from mmengine.registry import MODELS +from torch import Tensor, nn + +from mmdet3d.models.layers.torchsparse import IS_TORCHSPARSE_AVAILABLE +from mmdet3d.utils import OptMultiConfig +from .minkunet_backbone import MinkUNetBackbone + +if IS_TORCHSPARSE_AVAILABLE: + import torchsparse + import torchsparse.nn.functional as F + from torchsparse.nn.utils import get_kernel_offsets + from torchsparse.tensor import PointTensor, SparseTensor +else: + PointTensor = SparseTensor = None + + +@MODELS.register_module() +class SPVCNNBackbone(MinkUNetBackbone): + """SPVCNN backbone with torchsparse backend. + + More details can be found in `paper `_ . + + Args: + in_channels (int): Number of input voxel feature channels. + Defaults to 4. + base_channels (int): The input channels for first encoder layer. + Defaults to 32. + encoder_channels (List[int]): Convolutional channels of each encode + layer. Defaults to [32, 64, 128, 256]. + decoder_channels (List[int]): Convolutional channels of each decode + layer. Defaults to [256, 128, 96, 96]. + num_stages (int): Number of stages in encoder and decoder. + Defaults to 4. + drop_ratio (float): Dropout ratio of voxel features. Defaults to 0.3. + init_cfg (dict or :obj:`ConfigDict` or list[dict or :obj:`ConfigDict`] + , optional): Initialization config dict. Defaults to None. + """ + + def __init__(self, + in_channels: int = 4, + base_channels: int = 32, + encoder_channels: Sequence[int] = [32, 64, 128, 256], + decoder_channels: Sequence[int] = [256, 128, 96, 96], + num_stages: int = 4, + drop_ratio: float = 0.3, + init_cfg: OptMultiConfig = None) -> None: + super().__init__( + in_channels=in_channels, + base_channels=base_channels, + encoder_channels=encoder_channels, + decoder_channels=decoder_channels, + num_stages=num_stages, + init_cfg=init_cfg) + + self.point_transforms = nn.ModuleList([ + nn.Sequential( + nn.Linear(base_channels, encoder_channels[-1]), + nn.BatchNorm1d(encoder_channels[-1]), nn.ReLU(True)), + nn.Sequential( + nn.Linear(encoder_channels[-1], decoder_channels[2]), + nn.BatchNorm1d(decoder_channels[2]), nn.ReLU(True)), + nn.Sequential( + nn.Linear(decoder_channels[2], decoder_channels[4]), + nn.BatchNorm1d(decoder_channels[4]), nn.ReLU(True)) + ]) + self.dropout = nn.Dropout(drop_ratio, True) + + def forward(self, voxel_features: Tensor, coors: Tensor) -> PointTensor: + """Forward function. + + Args: + voxel_features (Tensor): Voxel features in shape (N, C). + coors (Tensor): Coordinates in shape (N, 4), + the columns in the order of (x_idx, y_idx, z_idx, batch_idx). + + Returns: + PointTensor: Backbone features. + """ + voxels = SparseTensor(voxel_features, coors) + points = PointTensor(voxels.F, voxels.C.float()) + voxels = self.initial_voxelize(points) + + voxels = self.conv_input(voxels) + points = self.voxel_to_point(voxels, points) + voxels = self.point_to_voxel(voxels, points) + laterals = [voxels] + for encoder in self.encoder: + voxels = encoder(voxels) + laterals.append(voxels) + laterals = laterals[:-1][::-1] + + points = self.voxel_to_point(voxels, points, self.point_transforms[0]) + voxels = self.point_to_voxel(voxels, points) + voxels.F = self.dropout(voxels.F) + + decoder_outs = [] + for i, decoder in enumerate(self.decoder): + voxels = decoder[0](voxels) + voxels = torchsparse.cat((voxels, laterals[i])) + voxels = decoder[1](voxels) + decoder_outs.append(voxels) + if i == 1: + points = self.voxel_to_point(voxels, points, + self.point_transforms[1]) + voxels = self.point_to_voxel(voxels, points) + voxels.F = self.dropout(voxels.F) + + points = self.voxel_to_point(voxels, points, self.point_transforms[2]) + return points + + def initial_voxelize(self, points: PointTensor) -> SparseTensor: + """Voxelization again based on input PointTensor. + + Args: + points (PointTensor): Input points after voxelization. + + Returns: + SparseTensor: New voxels. + """ + pc_hash = F.sphash(torch.floor(points.C).int()) + sparse_hash = torch.unique(pc_hash) + idx_query = F.sphashquery(pc_hash, sparse_hash) + counts = F.spcount(idx_query.int(), len(sparse_hash)) + + inserted_coords = F.spvoxelize( + torch.floor(points.C), idx_query, counts) + inserted_coords = torch.round(inserted_coords).int() + inserted_feat = F.spvoxelize(points.F, idx_query, counts) + + new_tensor = SparseTensor(inserted_feat, inserted_coords, 1) + new_tensor.cmaps.setdefault(new_tensor.stride, new_tensor.coords) + points.additional_features['idx_query'][1] = idx_query + points.additional_features['counts'][1] = counts + return new_tensor + + def voxel_to_point(self, + voxels: SparseTensor, + points: PointTensor, + point_transform: Optional[nn.Module] = None, + nearest: bool = False) -> PointTensor: + """Feed voxel features to points. + + Args: + voxels (SparseTensor): Input voxels. + points (PointTensor): Input points. + point_transform (nn.Module, optional): Point transform module + for input point features. Defaults to None. + nearest (bool): Whether to use nearest neighbor interpolation. + Defaults to False. + + Returns: + PointTensor: Points with new features. + """ + if points.idx_query is None or points.weights is None or \ + points.idx_query.get(voxels.s) is None or \ + points.weights.get(voxels.s) is None: + offsets = get_kernel_offsets( + 2, voxels.s, 1, device=points.F.device) + old_hash = F.sphash( + torch.cat([ + torch.floor(points.C[:, :3] / voxels.s[0]).int() * + voxels.s[0], points.C[:, -1].int().view(-1, 1) + ], 1), offsets) + pc_hash = F.sphash(voxels.C.to(points.F.device)) + idx_query = F.sphashquery(old_hash, pc_hash) + weights = F.calc_ti_weights( + points.C, idx_query, + scale=voxels.s[0]).transpose(0, 1).contiguous() + idx_query = idx_query.transpose(0, 1).contiguous() + if nearest: + weights[:, 1:] = 0. + idx_query[:, 1:] = -1 + new_features = F.spdevoxelize(voxels.F, idx_query, weights) + new_tensor = PointTensor( + new_features, + points.C, + idx_query=points.idx_query, + weights=points.weights) + new_tensor.additional_features = points.additional_features + new_tensor.idx_query[voxels.s] = idx_query + new_tensor.weights[voxels.s] = weights + points.idx_query[voxels.s] = idx_query + points.weights[voxels.s] = weights + else: + new_features = F.spdevoxelize(voxels.F, + points.idx_query.get(voxels.s), + points.weights.get(voxels.s)) + new_tensor = PointTensor( + new_features, + points.C, + idx_query=points.idx_query, + weights=points.weights) + new_tensor.additional_features = points.additional_features + + if point_transform is not None: + new_tensor.F = new_tensor.F + point_transform(points.F) + + return new_tensor + + def point_to_voxel(self, voxels: SparseTensor, + points: PointTensor) -> SparseTensor: + """Feed point features to voxels. + + Args: + voxels (SparseTensor): Input voxels. + points (PointTensor): Input points. + + Returns: + SparseTensor: Voxels with new features. + """ + if points.additional_features is None or \ + points.additional_features.get('idx_query') is None or \ + points.additional_features['idx_query'].get(voxels.s) is None: + pc_hash = F.sphash( + torch.cat([ + torch.floor(points.C[:, :3] / voxels.s[0]).int() * + voxels.s[0], points.C[:, -1].int().view(-1, 1) + ], 1)) + sparse_hash = F.sphash(voxels.C) + idx_query = F.sphashquery(pc_hash, sparse_hash) + counts = F.spcount(idx_query.int(), voxels.C.shape[0]) + points.additional_features['idx_query'][voxels.s] = idx_query + points.additional_features['counts'][voxels.s] = counts + else: + idx_query = points.additional_features['idx_query'][voxels.s] + counts = points.additional_features['counts'][voxels.s] + + inserted_features = F.spvoxelize(points.F, idx_query, counts) + new_tensor = SparseTensor(inserted_features, voxels.C, voxels.s) + new_tensor.cmaps = voxels.cmaps + new_tensor.kmaps = voxels.kmaps + + return new_tensor diff --git a/mmdet3d/models/data_preprocessors/__init__.py b/mmdet3d/models/data_preprocessors/__init__.py new file mode 100755 index 0000000..8bf69f2 --- /dev/null +++ b/mmdet3d/models/data_preprocessors/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .data_preprocessor import Det3DDataPreprocessor + +__all__ = ['Det3DDataPreprocessor'] diff --git a/mmdet3d/models/data_preprocessors/data_preprocessor.py b/mmdet3d/models/data_preprocessors/data_preprocessor.py new file mode 100755 index 0000000..85286c9 --- /dev/null +++ b/mmdet3d/models/data_preprocessors/data_preprocessor.py @@ -0,0 +1,524 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from numbers import Number +from typing import Dict, List, Optional, Sequence, Union + +import numpy as np +import torch +from mmdet.models import DetDataPreprocessor +from mmengine.model import stack_batch +from mmengine.utils import is_list_of +from torch.nn import functional as F + +from mmdet3d.registry import MODELS +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils import OptConfigType +from .utils import multiview_img_stack_batch +from .voxelize import VoxelizationByGridShape, dynamic_scatter_3d + + +@MODELS.register_module() +class Det3DDataPreprocessor(DetDataPreprocessor): + """Points / Image pre-processor for point clouds / vision-only / multi- + modality 3D detection tasks. + + It provides the data pre-processing as follows + + - Collate and move image and point cloud data to the target device. + + - 1) For image data: + - Pad images in inputs to the maximum size of current batch with defined + ``pad_value``. The padding size can be divisible by a defined + ``pad_size_divisor``. + - Stack images in inputs to batch_imgs. + - Convert images in inputs from bgr to rgb if the shape of input is + (3, H, W). + - Normalize images in inputs with defined std and mean. + - Do batch augmentations during training. + + - 2) For point cloud data: + - If no voxelization, directly return list of point cloud data. + - If voxelization is applied, voxelize point cloud according to + ``voxel_type`` and obtain ``voxels``. + + Args: + voxel (bool): Whether to apply voxelization to point cloud. + Defaults to False. + voxel_type (str): Voxelization type. Two voxelization types are + provided: 'hard' and 'dynamic', respectively for hard + voxelization and dynamic voxelization. Defaults to 'hard'. + voxel_layer (dict or :obj:`ConfigDict`, optional): Voxelization layer + config. Defaults to None. + mean (Sequence[Number], optional): The pixel mean of R, G, B channels. + Defaults to None. + std (Sequence[Number], optional): The pixel standard deviation of + R, G, B channels. Defaults to None. + pad_size_divisor (int): The size of padded image should be + divisible by ``pad_size_divisor``. Defaults to 1. + pad_value (Number): The padded pixel value. Defaults to 0. + pad_mask (bool): Whether to pad instance masks. Defaults to False. + mask_pad_value (int): The padded pixel value for instance masks. + Defaults to 0. + pad_seg (bool): Whether to pad semantic segmentation maps. + Defaults to False. + seg_pad_value (int): The padded pixel value for semantic + segmentation maps. Defaults to 255. + bgr_to_rgb (bool): Whether to convert image from BGR to RGB. + Defaults to False. + rgb_to_bgr (bool): Whether to convert image from RGB to BGR. + Defaults to False. + boxtype2tensor (bool): Whether to keep the ``BaseBoxes`` type of + bboxes data or not. Defaults to True. + batch_augments (List[dict], optional): Batch-level augmentations. + Defaults to None. + """ + + def __init__(self, + voxel: bool = False, + voxel_type: str = 'hard', + voxel_layer: OptConfigType = None, + mean: Sequence[Number] = None, + std: Sequence[Number] = None, + pad_size_divisor: int = 1, + pad_value: Union[float, int] = 0, + pad_mask: bool = False, + mask_pad_value: int = 0, + pad_seg: bool = False, + seg_pad_value: int = 255, + bgr_to_rgb: bool = False, + rgb_to_bgr: bool = False, + boxtype2tensor: bool = True, + batch_augments: Optional[List[dict]] = None) -> None: + super(Det3DDataPreprocessor, self).__init__( + mean=mean, + std=std, + pad_size_divisor=pad_size_divisor, + pad_value=pad_value, + pad_mask=pad_mask, + mask_pad_value=mask_pad_value, + pad_seg=pad_seg, + seg_pad_value=seg_pad_value, + bgr_to_rgb=bgr_to_rgb, + rgb_to_bgr=rgb_to_bgr, + batch_augments=batch_augments) + self.voxel = voxel + self.voxel_type = voxel_type + if voxel: + self.voxel_layer = VoxelizationByGridShape(**voxel_layer) + + def forward(self, + data: Union[dict, List[dict]], + training: bool = False) -> Union[dict, List[dict]]: + """Perform normalization, padding and bgr2rgb conversion based on + ``BaseDataPreprocessor``. + + Args: + data (dict or List[dict]): Data from dataloader. + The dict contains the whole batch data, when it is + a list[dict], the list indicate test time augmentation. + training (bool): Whether to enable training time augmentation. + Defaults to False. + + Returns: + dict or List[dict]: Data in the same format as the model input. + """ + if isinstance(data, list): + num_augs = len(data) + aug_batch_data = [] + for aug_id in range(num_augs): + single_aug_batch_data = self.simple_process( + data[aug_id], training) + aug_batch_data.append(single_aug_batch_data) + return aug_batch_data + + else: + return self.simple_process(data, training) + + def simple_process(self, data: dict, training: bool = False) -> dict: + """Perform normalization, padding and bgr2rgb conversion for img data + based on ``BaseDataPreprocessor``, and voxelize point cloud if `voxel` + is set to be True. + + Args: + data (dict): Data sampled from dataloader. + training (bool): Whether to enable training time augmentation. + Defaults to False. + + Returns: + dict: Data in the same format as the model input. + """ + if 'img' in data['inputs']: + batch_pad_shape = self._get_pad_shape(data) + + data = self.collate_data(data) + inputs, data_samples = data['inputs'], data['data_samples'] + batch_inputs = dict() + + if 'points' in inputs: + batch_inputs['points'] = inputs['points'] + + if self.voxel: + voxel_dict = self.voxelize(inputs['points'], data_samples) + batch_inputs['voxels'] = voxel_dict + + if 'imgs' in inputs: + imgs = inputs['imgs'] + + if data_samples is not None: + # NOTE the batched image size information may be useful, e.g. + # in DETR, this is needed for the construction of masks, which + # is then used for the transformer_head. + batch_input_shape = tuple(imgs[0].size()[-2:]) + for data_sample, pad_shape in zip(data_samples, + batch_pad_shape): + data_sample.set_metainfo({ + 'batch_input_shape': batch_input_shape, + 'pad_shape': pad_shape + }) + + if hasattr(self, 'boxtype2tensor') and self.boxtype2tensor: + from mmdet.models.utils.misc import \ + samplelist_boxtype2tensor + samplelist_boxtype2tensor(data_samples) + elif hasattr(self, 'boxlist2tensor') and self.boxlist2tensor: + from mmdet.models.utils.misc import \ + samplelist_boxlist2tensor + samplelist_boxlist2tensor(data_samples) + if self.pad_mask: + self.pad_gt_masks(data_samples) + + if self.pad_seg: + self.pad_gt_sem_seg(data_samples) + + if training and self.batch_augments is not None: + for batch_aug in self.batch_augments: + imgs, data_samples = batch_aug(imgs, data_samples) + batch_inputs['imgs'] = imgs + + return {'inputs': batch_inputs, 'data_samples': data_samples} + + def preprocess_img(self, _batch_img: torch.Tensor) -> torch.Tensor: + # channel transform + if self._channel_conversion: + _batch_img = _batch_img[[2, 1, 0], ...] + # Convert to float after channel conversion to ensure + # efficiency + _batch_img = _batch_img.float() + # Normalization. + if self._enable_normalize: + if self.mean.shape[0] == 3: + assert _batch_img.dim() == 3 and _batch_img.shape[0] == 3, ( + 'If the mean has 3 values, the input tensor ' + 'should in shape of (3, H, W), but got the ' + f'tensor with shape {_batch_img.shape}') + _batch_img = (_batch_img - self.mean) / self.std + return _batch_img + + def collate_data(self, data: dict) -> dict: + """Copying data to the target device and Performs normalization, + padding and bgr2rgb conversion and stack based on + ``BaseDataPreprocessor``. + + Collates the data sampled from dataloader into a list of dict and + list of labels, and then copies tensor to the target device. + + Args: + data (dict): Data sampled from dataloader. + + Returns: + dict: Data in the same format as the model input. + """ + data = self.cast_data(data) # type: ignore + + if 'img' in data['inputs']: + _batch_imgs = data['inputs']['img'] + # Process data with `pseudo_collate`. + if is_list_of(_batch_imgs, torch.Tensor): + batch_imgs = [] + img_dim = _batch_imgs[0].dim() + for _batch_img in _batch_imgs: + if img_dim == 3: # standard img + _batch_img = self.preprocess_img(_batch_img) + elif img_dim == 4: + _batch_img = [ + self.preprocess_img(_img) for _img in _batch_img + ] + + _batch_img = torch.stack(_batch_img, dim=0) + + batch_imgs.append(_batch_img) + + # Pad and stack Tensor. + if img_dim == 3: + batch_imgs = stack_batch(batch_imgs, self.pad_size_divisor, + self.pad_value) + elif img_dim == 4: + batch_imgs = multiview_img_stack_batch( + batch_imgs, self.pad_size_divisor, self.pad_value) + + # Process data with `default_collate`. + elif isinstance(_batch_imgs, torch.Tensor): + assert _batch_imgs.dim() == 4, ( + 'The input of `ImgDataPreprocessor` should be a NCHW ' + 'tensor or a list of tensor, but got a tensor with ' + f'shape: {_batch_imgs.shape}') + if self._channel_conversion: + _batch_imgs = _batch_imgs[:, [2, 1, 0], ...] + # Convert to float after channel conversion to ensure + # efficiency + _batch_imgs = _batch_imgs.float() + if self._enable_normalize: + _batch_imgs = (_batch_imgs - self.mean) / self.std + h, w = _batch_imgs.shape[2:] + target_h = math.ceil( + h / self.pad_size_divisor) * self.pad_size_divisor + target_w = math.ceil( + w / self.pad_size_divisor) * self.pad_size_divisor + pad_h = target_h - h + pad_w = target_w - w + batch_imgs = F.pad(_batch_imgs, (0, pad_w, 0, pad_h), + 'constant', self.pad_value) + else: + raise TypeError( + 'Output of `cast_data` should be a list of dict ' + 'or a tuple with inputs and data_samples, but got' + f'{type(data)}: {data}') + + data['inputs']['imgs'] = batch_imgs + + data.setdefault('data_samples', None) + + return data + + def _get_pad_shape(self, data: dict) -> List[tuple]: + """Get the pad_shape of each image based on data and + pad_size_divisor.""" + # rewrite `_get_pad_shape` for obtaining image inputs. + _batch_inputs = data['inputs']['img'] + # Process data with `pseudo_collate`. + if is_list_of(_batch_inputs, torch.Tensor): + batch_pad_shape = [] + for ori_input in _batch_inputs: + if ori_input.dim() == 4: + # mean multiview input, select one of the + # image to calculate the pad shape + ori_input = ori_input[0] + pad_h = int( + np.ceil(ori_input.shape[1] / + self.pad_size_divisor)) * self.pad_size_divisor + pad_w = int( + np.ceil(ori_input.shape[2] / + self.pad_size_divisor)) * self.pad_size_divisor + batch_pad_shape.append((pad_h, pad_w)) + # Process data with `default_collate`. + elif isinstance(_batch_inputs, torch.Tensor): + assert _batch_inputs.dim() == 4, ( + 'The input of `ImgDataPreprocessor` should be a NCHW tensor ' + 'or a list of tensor, but got a tensor with shape: ' + f'{_batch_inputs.shape}') + pad_h = int( + np.ceil(_batch_inputs.shape[1] / + self.pad_size_divisor)) * self.pad_size_divisor + pad_w = int( + np.ceil(_batch_inputs.shape[2] / + self.pad_size_divisor)) * self.pad_size_divisor + batch_pad_shape = [(pad_h, pad_w)] * _batch_inputs.shape[0] + else: + raise TypeError('Output of `cast_data` should be a list of dict ' + 'or a tuple with inputs and data_samples, but got ' + f'{type(data)}: {data}') + return batch_pad_shape + + @torch.no_grad() + def voxelize(self, points: List[torch.Tensor], + data_samples: SampleList) -> Dict[str, torch.Tensor]: + """Apply voxelization to point cloud. + + Args: + points (List[Tensor]): Point cloud in one data batch. + data_samples: (list[:obj:`Det3DDataSample`]): The annotation data + of every samples. Add voxel-wise annotation for segmentation. + + Returns: + Dict[str, Tensor]: Voxelization information. + + - voxels (Tensor): Features of voxels, shape is MxNxC for hard + voxelization, NxC for dynamic voxelization. + - coors (Tensor): Coordinates of voxels, shape is Nx(1+NDim), + where 1 represents the batch index. + - num_points (Tensor, optional): Number of points in each voxel. + - voxel_centers (Tensor, optional): Centers of voxels. + """ + + voxel_dict = dict() + + if self.voxel_type == 'hard': + voxels, coors, num_points, voxel_centers = [], [], [], [] + for i, res in enumerate(points): + res_voxels, res_coors, res_num_points = self.voxel_layer(res) + res_voxel_centers = ( + res_coors[:, [2, 1, 0]] + 0.5) * res_voxels.new_tensor( + self.voxel_layer.voxel_size) + res_voxels.new_tensor( + self.voxel_layer.point_cloud_range[0:3]) + res_coors = F.pad(res_coors, (1, 0), mode='constant', value=i) + voxels.append(res_voxels) + coors.append(res_coors) + num_points.append(res_num_points) + voxel_centers.append(res_voxel_centers) + + voxels = torch.cat(voxels, dim=0) + coors = torch.cat(coors, dim=0) + num_points = torch.cat(num_points, dim=0) + voxel_centers = torch.cat(voxel_centers, dim=0) + + voxel_dict['num_points'] = num_points + voxel_dict['voxel_centers'] = voxel_centers + elif self.voxel_type == 'dynamic': + coors = [] + # dynamic voxelization only provide a coors mapping + for i, res in enumerate(points): + res_coors = self.voxel_layer(res) + res_coors = F.pad(res_coors, (1, 0), mode='constant', value=i) + coors.append(res_coors) + voxels = torch.cat(points, dim=0) + coors = torch.cat(coors, dim=0) + elif self.voxel_type == 'cylindrical': + voxels, coors = [], [] + for i, (res, data_sample) in enumerate(zip(points, data_samples)): + rho = torch.sqrt(res[:, 0]**2 + res[:, 1]**2) + phi = torch.atan2(res[:, 1], res[:, 0]) + polar_res = torch.stack((rho, phi, res[:, 2]), dim=-1) + min_bound = polar_res.new_tensor( + self.voxel_layer.point_cloud_range[:3]) + max_bound = polar_res.new_tensor( + self.voxel_layer.point_cloud_range[3:]) + try: # only support PyTorch >= 1.9.0 + polar_res_clamp = torch.clamp(polar_res, min_bound, + max_bound) + except TypeError: + polar_res_clamp = polar_res.clone() + for coor_idx in range(3): + polar_res_clamp[:, coor_idx][ + polar_res[:, coor_idx] > + max_bound[coor_idx]] = max_bound[coor_idx] + polar_res_clamp[:, coor_idx][ + polar_res[:, coor_idx] < + min_bound[coor_idx]] = min_bound[coor_idx] + res_coors = torch.floor( + (polar_res_clamp - min_bound) / polar_res_clamp.new_tensor( + self.voxel_layer.voxel_size)).int() + self.get_voxel_seg(res_coors, data_sample) + res_coors = F.pad(res_coors, (1, 0), mode='constant', value=i) + res_voxels = torch.cat((polar_res, res[:, :2], res[:, 3:]), + dim=-1) + voxels.append(res_voxels) + coors.append(res_coors) + voxels = torch.cat(voxels, dim=0) + coors = torch.cat(coors, dim=0) + elif self.voxel_type == 'minkunet': + voxels, coors = [], [] + voxel_size = points[0].new_tensor(self.voxel_layer.voxel_size) + for i, (res, data_sample) in enumerate(zip(points, data_samples)): + res_coors = torch.round(res[:, :3] / voxel_size).int() + res_coors -= res_coors.min(0)[0] + + res_coors_numpy = res_coors.cpu().numpy() + inds, voxel2point_map = self.sparse_quantize( + res_coors_numpy, return_index=True, return_inverse=True) + voxel2point_map = torch.from_numpy(voxel2point_map).cuda() + if self.training: + if len(inds) > 80000: + inds = np.random.choice(inds, 80000, replace=False) + inds = torch.from_numpy(inds).cuda() + data_sample.gt_pts_seg.voxel_semantic_mask \ + = data_sample.gt_pts_seg.pts_semantic_mask[inds] + res_voxel_coors = res_coors[inds] + res_voxels = res[inds] + res_voxel_coors = F.pad( + res_voxel_coors, (0, 1), mode='constant', value=i) + data_sample.voxel2point_map = voxel2point_map.long() + voxels.append(res_voxels) + coors.append(res_voxel_coors) + voxels = torch.cat(voxels, dim=0) + coors = torch.cat(coors, dim=0) + + else: + raise ValueError(f'Invalid voxelization type {self.voxel_type}') + + voxel_dict['voxels'] = voxels + voxel_dict['coors'] = coors + + return voxel_dict + + def get_voxel_seg(self, res_coors: torch.Tensor, data_sample: SampleList): + """Get voxel-wise segmentation label and point2voxel map. + + Args: + res_coors (Tensor): The voxel coordinates of points, Nx3. + data_sample: (:obj:`Det3DDataSample`): The annotation data of + every samples. Add voxel-wise annotation forsegmentation. + """ + + if self.training: + pts_semantic_mask = data_sample.gt_pts_seg.pts_semantic_mask + voxel_semantic_mask, _, point2voxel_map = dynamic_scatter_3d( + F.one_hot(pts_semantic_mask.long()).float(), res_coors, 'mean', + True) + voxel_semantic_mask = torch.argmax(voxel_semantic_mask, dim=-1) + data_sample.gt_pts_seg.voxel_semantic_mask = voxel_semantic_mask + data_sample.gt_pts_seg.point2voxel_map = point2voxel_map + else: + pseudo_tensor = res_coors.new_ones([res_coors.shape[0], 1]).float() + _, _, point2voxel_map = dynamic_scatter_3d(pseudo_tensor, + res_coors, 'mean', True) + data_sample.gt_pts_seg.point2voxel_map = point2voxel_map + + def ravel_hash(self, x: np.ndarray) -> np.ndarray: + """Get voxel coordinates hash for np.unique(). + + Args: + x (np.ndarray): The voxel coordinates of points, Nx3. + + Returns: + np.ndarray: Voxels coordinates hash. + """ + assert x.ndim == 2, x.shape + + x = x - np.min(x, axis=0) + x = x.astype(np.uint64, copy=False) + xmax = np.max(x, axis=0).astype(np.uint64) + 1 + + h = np.zeros(x.shape[0], dtype=np.uint64) + for k in range(x.shape[1] - 1): + h += x[:, k] + h *= xmax[k + 1] + h += x[:, -1] + return h + + def sparse_quantize(self, + coords: np.ndarray, + return_index: bool = False, + return_inverse: bool = False) -> List[np.ndarray]: + """Sparse Quantization for voxel coordinates used in Minkunet. + + Args: + coords (np.ndarray): The voxel coordinates of points, Nx3. + return_index (bool): Whether to return the indices of the + unique coords, shape (M,). + return_inverse (bool): Whether to return the indices of the + original coords shape (N,). + + Returns: + List[np.ndarray] or None: Return index and inverse map if + return_index and return_inverse is True. + """ + _, indices, inverse_indices = np.unique( + self.ravel_hash(coords), return_index=True, return_inverse=True) + coords = coords[indices] + + outputs = [] + if return_index: + outputs += [indices] + if return_inverse: + outputs += [inverse_indices] + return outputs diff --git a/mmdet3d/models/data_preprocessors/utils.py b/mmdet3d/models/data_preprocessors/utils.py new file mode 100755 index 0000000..e69df3b --- /dev/null +++ b/mmdet3d/models/data_preprocessors/utils.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Union + +import torch +import torch.nn.functional as F + + +def multiview_img_stack_batch( + tensor_list: List[torch.Tensor], + pad_size_divisor: int = 1, + pad_value: Union[int, float] = 0) -> torch.Tensor: + """ + Compared to the stack_batch in mmengine.model.utils, + multiview_img_stack_batch further handle the multiview images. + see diff of padded_sizes[:, :-2] = 0 vs padded_sizes[:, 0] = 0 in line 47 + Stack multiple tensors to form a batch and pad the tensor to the max + shape use the right bottom padding mode in these images. If + ``pad_size_divisor > 0``, add padding to ensure the shape of each dim is + divisible by ``pad_size_divisor``. + + Args: + tensor_list (List[Tensor]): A list of tensors with the same dim. + pad_size_divisor (int): If ``pad_size_divisor > 0``, add padding + to ensure the shape of each dim is divisible by + ``pad_size_divisor``. This depends on the model, and many + models need to be divisible by 32. Defaults to 1. + pad_value (int or float): The padding value. Defaults to 0. + + Returns: + Tensor: The n dim tensor. + """ + assert isinstance( + tensor_list, + list), f'Expected input type to be list, but got {type(tensor_list)}' + assert tensor_list, '`tensor_list` could not be an empty list' + assert len({ + tensor.ndim + for tensor in tensor_list + }) == 1, ('Expected the dimensions of all tensors must be the same, ' + f'but got {[tensor.ndim for tensor in tensor_list]}') + + dim = tensor_list[0].dim() + num_img = len(tensor_list) + all_sizes: torch.Tensor = torch.Tensor( + [tensor.shape for tensor in tensor_list]) + max_sizes = torch.ceil( + torch.max(all_sizes, dim=0)[0] / pad_size_divisor) * pad_size_divisor + padded_sizes = max_sizes - all_sizes + # The first dim normally means channel, which should not be padded. + padded_sizes[:, :-2] = 0 + if padded_sizes.sum() == 0: + return torch.stack(tensor_list) + # `pad` is the second arguments of `F.pad`. If pad is (1, 2, 3, 4), + # it means that padding the last dim with 1(left) 2(right), padding the + # penultimate dim to 3(top) 4(bottom). The order of `pad` is opposite of + # the `padded_sizes`. Therefore, the `padded_sizes` needs to be reversed, + # and only odd index of pad should be assigned to keep padding "right" and + # "bottom". + pad = torch.zeros(num_img, 2 * dim, dtype=torch.int) + pad[:, 1::2] = padded_sizes[:, range(dim - 1, -1, -1)] + batch_tensor = [] + for idx, tensor in enumerate(tensor_list): + batch_tensor.append( + F.pad(tensor, tuple(pad[idx].tolist()), value=pad_value)) + return torch.stack(batch_tensor) diff --git a/mmdet3d/models/data_preprocessors/voxelize.py b/mmdet3d/models/data_preprocessors/voxelize.py new file mode 100755 index 0000000..25cd5bf --- /dev/null +++ b/mmdet3d/models/data_preprocessors/voxelize.py @@ -0,0 +1,326 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Any, List, Optional, Tuple, Union + +import torch +from mmcv.utils import ext_loader +from torch import nn +from torch.autograd import Function +from torch.nn import functional as F +from torch.nn.modules.utils import _pair + +ext_module = ext_loader.load_ext('_ext', [ + 'dynamic_voxelize_forward', 'hard_voxelize_forward', + 'dynamic_point_to_voxel_forward', 'dynamic_point_to_voxel_backward' +]) + + +class _Voxelization(Function): + + @staticmethod + def forward( + ctx: Any, + points: torch.Tensor, + voxel_size: Union[tuple, float], + coors_range: Union[tuple, float], + max_points: int = 35, + max_voxels: int = 20000, + deterministic: bool = True) -> Union[Tuple[torch.Tensor], Tuple]: + """Convert kitti points(N, >=3) to voxels. + + Args: + points (torch.Tensor): [N, ndim]. Points[:, :3] contain xyz points + and points[:, 3:] contain other information like reflectivity. + voxel_size (tuple or float): The size of voxel with the shape of + [3]. + coors_range (tuple or float): The coordinate range of voxel with + the shape of [6]. + max_points (int, optional): maximum points contained in a voxel. if + max_points=-1, it means using dynamic_voxelize. Default: 35. + max_voxels (int, optional): maximum voxels this function create. + for second, 20000 is a good choice. Users should shuffle points + before call this function because max_voxels may drop points. + Default: 20000. + deterministic: bool. whether to invoke the non-deterministic + version of hard-voxelization implementations. non-deterministic + version is considerablly fast but is not deterministic. only + affects hard voxelization. default True. for more information + of this argument and the implementation insights, please refer + to the following links: + https://github.com/open-mmlab/mmdetection3d/issues/894 + https://github.com/open-mmlab/mmdetection3d/pull/904 + it is an experimental feature and we will appreciate it if + you could share with us the failing cases. + + Returns: + tuple[torch.Tensor]: tuple[torch.Tensor]: A tuple contains three + elements. The first one is the output voxels with the shape of + [M, max_points, n_dim], which only contain points and returned + when max_points != -1. The second is the voxel coordinates with + shape of [M, 3]. The last is number of point per voxel with the + shape of [M], which only returned when max_points != -1. + """ + if max_points == -1 or max_voxels == -1: + coors = points.new_zeros(size=(points.size(0), 3), dtype=torch.int) + ext_module.dynamic_voxelize_forward( + points, + torch.tensor(voxel_size, dtype=torch.float), + torch.tensor(coors_range, dtype=torch.float), + coors, + NDim=3) + return coors + else: + voxels = points.new_zeros( + size=(max_voxels, max_points, points.size(1))) + coors = points.new_zeros(size=(max_voxels, 3), dtype=torch.int) + num_points_per_voxel = points.new_zeros( + size=(max_voxels, ), dtype=torch.int) + voxel_num = torch.zeros(size=(), dtype=torch.long) + ext_module.hard_voxelize_forward( + points, + torch.tensor(voxel_size, dtype=torch.float), + torch.tensor(coors_range, dtype=torch.float), + voxels, + coors, + num_points_per_voxel, + voxel_num, + max_points=max_points, + max_voxels=max_voxels, + NDim=3, + deterministic=deterministic) + # select the valid voxels + voxels_out = voxels[:voxel_num] + coors_out = coors[:voxel_num] + num_points_per_voxel_out = num_points_per_voxel[:voxel_num] + return voxels_out, coors_out, num_points_per_voxel_out + + +voxelization = _Voxelization.apply + + +class VoxelizationByGridShape(nn.Module): + """Voxelization that allows inferring voxel size automatically based on + grid shape. + + Please refer to `Point-Voxel CNN for Efficient 3D Deep Learning + `_ for more details. + + Args: + point_cloud_range (list): + [x_min, y_min, z_min, x_max, y_max, z_max] + max_num_points (int): max number of points per voxel + voxel_size (list): list [x, y, z] or [rho, phi, z] + size of single voxel. + grid_shape (list): [L, W, H], grid shape of voxelization. + max_voxels (tuple or int): max number of voxels in + (training, testing) time + deterministic: bool. whether to invoke the non-deterministic + version of hard-voxelization implementations. non-deterministic + version is considerablly fast but is not deterministic. only + affects hard voxelization. default True. for more information + of this argument and the implementation insights, please refer + to the following links: + https://github.com/open-mmlab/mmdetection3d/issues/894 + https://github.com/open-mmlab/mmdetection3d/pull/904 + it is an experimental feature and we will appreciate it if + you could share with us the failing cases. + """ + + def __init__(self, + point_cloud_range: List, + max_num_points: int, + voxel_size: List = [], + grid_shape: List[int] = [], + max_voxels: Union[tuple, int] = 20000, + deterministic: bool = True): + super().__init__() + if voxel_size and grid_shape: + raise ValueError('voxel_size is mutually exclusive grid_shape') + self.point_cloud_range = point_cloud_range + self.max_num_points = max_num_points + if isinstance(max_voxels, tuple): + self.max_voxels = max_voxels + else: + self.max_voxels = _pair(max_voxels) + self.deterministic = deterministic + + point_cloud_range = torch.tensor( + point_cloud_range, dtype=torch.float32) + if voxel_size: + self.voxel_size = voxel_size + voxel_size = torch.tensor(voxel_size, dtype=torch.float32) + grid_shape = (point_cloud_range[3:] - + point_cloud_range[:3]) / voxel_size + grid_shape = torch.round(grid_shape).long().tolist() + self.grid_shape = grid_shape + elif grid_shape: + grid_shape = torch.tensor(grid_shape, dtype=torch.float32) + voxel_size = (point_cloud_range[3:] - point_cloud_range[:3]) / ( + grid_shape - 1) + voxel_size = voxel_size.tolist() + self.voxel_size = voxel_size + else: + raise ValueError('must assign a value to voxel_size or grid_shape') + + def forward(self, input: torch.Tensor) -> torch.Tensor: + if self.training: + max_voxels = self.max_voxels[0] + else: + max_voxels = self.max_voxels[1] + + return voxelization(input, self.voxel_size, self.point_cloud_range, + self.max_num_points, max_voxels, + self.deterministic) + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += 'voxel_size=' + str(self.voxel_size) + s += ', grid_shape=' + str(self.grid_shape) + s += ', point_cloud_range=' + str(self.point_cloud_range) + s += ', max_num_points=' + str(self.max_num_points) + s += ', max_voxels=' + str(self.max_voxels) + s += ', deterministic=' + str(self.deterministic) + s += ')' + return s + + +class _DynamicScatter(Function): + """Different from the mmcv implementation, here it is allowed to return + point2voxel_map.""" + + @staticmethod + def forward(ctx: Any, + feats: torch.Tensor, + coors: torch.Tensor, + reduce_type: str = 'max', + return_map: str = False) -> Tuple[torch.Tensor, torch.Tensor]: + """convert kitti points(N, >=3) to voxels. + + Args: + feats (torch.Tensor): [N, C]. Points features to be reduced + into voxels. + coors (torch.Tensor): [N, ndim]. Corresponding voxel coordinates + (specifically multi-dim voxel index) of each points. + reduce_type (str, optional): Reduce op. support 'max', 'sum' and + 'mean'. Default: 'max'. + return_map (str, optional): Whether to return point2voxel_map. + + Returns: + tuple[torch.Tensor]: A tuple contains two elements. The first one + is the voxel features with shape [M, C] which are respectively + reduced from input features that share the same voxel coordinates. + The second is voxel coordinates with shape [M, ndim]. + """ + results = ext_module.dynamic_point_to_voxel_forward( + feats, coors, reduce_type) + (voxel_feats, voxel_coors, point2voxel_map, + voxel_points_count) = results + ctx.reduce_type = reduce_type + ctx.save_for_backward(feats, voxel_feats, point2voxel_map, + voxel_points_count) + ctx.mark_non_differentiable(voxel_coors) + if return_map: + return voxel_feats, voxel_coors, point2voxel_map + else: + return voxel_feats, voxel_coors + + @staticmethod + def backward(ctx: Any, + grad_voxel_feats: torch.Tensor, + grad_voxel_coors: Optional[torch.Tensor] = None) -> tuple: + (feats, voxel_feats, point2voxel_map, + voxel_points_count) = ctx.saved_tensors + grad_feats = torch.zeros_like(feats) + # TODO: whether to use index put or use cuda_backward + # To use index put, need point to voxel index + ext_module.dynamic_point_to_voxel_backward( + grad_feats, grad_voxel_feats.contiguous(), feats, voxel_feats, + point2voxel_map, voxel_points_count, ctx.reduce_type) + return grad_feats, None, None + + +dynamic_scatter_3d = _DynamicScatter.apply + + +class DynamicScatter3D(nn.Module): + """Scatters points into voxels, used in the voxel encoder with dynamic + voxelization. + + Note: + The CPU and GPU implementation get the same output, but have numerical + difference after summation and division (e.g., 5e-7). + + Args: + voxel_size (list): list [x, y, z] size of three dimension. + point_cloud_range (list): The coordinate range of points, [x_min, + y_min, z_min, x_max, y_max, z_max]. + average_points (bool): whether to use avg pooling to scatter points + into voxel. + """ + + def __init__(self, voxel_size: List, point_cloud_range: List, + average_points: bool): + super().__init__() + + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + self.average_points = average_points + + def forward_single( + self, points: torch.Tensor, + coors: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Scatters points into voxels. + + Args: + points (torch.Tensor): Points to be reduced into voxels. + coors (torch.Tensor): Corresponding voxel coordinates (specifically + multi-dim voxel index) of each points. + + Returns: + tuple[torch.Tensor]: A tuple contains two elements. The first one + is the voxel features with shape [M, C] which are respectively + reduced from input features that share the same voxel coordinates. + The second is voxel coordinates with shape [M, ndim]. + """ + reduce = 'mean' if self.average_points else 'max' + return dynamic_scatter_3d(points.contiguous(), coors.contiguous(), + reduce) + + def forward(self, points: torch.Tensor, + coors: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Scatters points/features into voxels. + + Args: + points (torch.Tensor): Points to be reduced into voxels. + coors (torch.Tensor): Corresponding voxel coordinates (specifically + multi-dim voxel index) of each points. + + Returns: + tuple[torch.Tensor]: A tuple contains two elements. The first one + is the voxel features with shape [M, C] which are respectively + reduced from input features that share the same voxel coordinates. + The second is voxel coordinates with shape [M, ndim]. + """ + if coors.size(-1) == 3: + return self.forward_single(points, coors) + else: + batch_size = coors[-1, 0] + 1 + voxels, voxel_coors = [], [] + for i in range(batch_size): + inds = torch.where(coors[:, 0] == i) + voxel, voxel_coor = self.forward_single( + points[inds], coors[inds][:, 1:]) + coor_pad = F.pad(voxel_coor, (1, 0), mode='constant', value=i) + voxel_coors.append(coor_pad) + voxels.append(voxel) + features = torch.cat(voxels, dim=0) + feature_coors = torch.cat(voxel_coors, dim=0) + + return features, feature_coors + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += 'voxel_size=' + str(self.voxel_size) + s += ', point_cloud_range=' + str(self.point_cloud_range) + s += ', average_points=' + str(self.average_points) + s += ')' + return s diff --git a/mmdet3d/models/decode_heads/__init__.py b/mmdet3d/models/decode_heads/__init__.py new file mode 100755 index 0000000..f7560e5 --- /dev/null +++ b/mmdet3d/models/decode_heads/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .cylinder3d_head import Cylinder3DHead +from .dgcnn_head import DGCNNHead +from .minkunet_head import MinkUNetHead +from .paconv_head import PAConvHead +from .pointnet2_head import PointNet2Head + +__all__ = [ + 'PointNet2Head', 'DGCNNHead', 'PAConvHead', 'Cylinder3DHead', + 'MinkUNetHead' +] diff --git a/mmdet3d/models/decode_heads/cylinder3d_head.py b/mmdet3d/models/decode_heads/cylinder3d_head.py new file mode 100755 index 0000000..26c621c --- /dev/null +++ b/mmdet3d/models/decode_heads/cylinder3d_head.py @@ -0,0 +1,158 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch +from mmcv.ops import SparseConvTensor, SparseModule, SubMConv3d + +from mmdet3d.registry import MODELS +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils import OptMultiConfig +from mmdet3d.utils.typing_utils import ConfigType +from .decode_head import Base3DDecodeHead + + +@MODELS.register_module() +class Cylinder3DHead(Base3DDecodeHead): + """Cylinder3D decoder head. + + Decoder head used in `Cylinder3D `_. + Refer to the + `official code `_. + + Args: + channels (int): Channels after modules, before conv_seg. + num_classes (int): Number of classes. + dropout_ratio (float): Ratio of dropout layer. Defaults to 0. + conv_cfg (dict or :obj:`ConfigDict`): Config of conv layers. + Defaults to dict(type='Conv1d'). + norm_cfg (dict or :obj:`ConfigDict`): Config of norm layers. + Defaults to dict(type='BN1d'). + act_cfg (dict or :obj:`ConfigDict`): Config of activation layers. + Defaults to dict(type='ReLU'). + loss_ce (dict or :obj:`ConfigDict`): Config of CrossEntropy loss. + Defaults to dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, + loss_weight=1.0). + loss_lovasz (dict or :obj:`ConfigDict`): Config of Lovasz loss. + Defaults to dict(type='LovaszLoss', loss_weight=1.0). + conv_seg_kernel_size (int): The kernel size used in conv_seg. + Defaults to 3. + ignore_index (int): The label index to be ignored. When using masked + BCE loss, ignore_index should be set to None. Defaults to 19. + init_cfg (dict or :obj:`ConfigDict` or list[dict or :obj:`ConfigDict`], + optional): Initialization config dict. Defaults to None. + """ + + def __init__(self, + channels: int, + num_classes: int, + dropout_ratio: float = 0, + conv_cfg: ConfigType = dict(type='Conv1d'), + norm_cfg: ConfigType = dict(type='BN1d'), + act_cfg: ConfigType = dict(type='ReLU'), + loss_ce: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, + loss_weight=1.0), + loss_lovasz: ConfigType = dict( + type='LovaszLoss', loss_weight=1.0), + conv_seg_kernel_size: int = 3, + ignore_index: int = 19, + init_cfg: OptMultiConfig = None) -> None: + super(Cylinder3DHead, self).__init__( + channels=channels, + num_classes=num_classes, + dropout_ratio=dropout_ratio, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + conv_seg_kernel_size=conv_seg_kernel_size, + init_cfg=init_cfg) + + self.loss_lovasz = MODELS.build(loss_lovasz) + self.loss_ce = MODELS.build(loss_ce) + self.ignore_index = ignore_index + + def build_conv_seg(self, channels: int, num_classes: int, + kernel_size: int) -> SparseModule: + return SubMConv3d( + channels, + num_classes, + indice_key='logit', + kernel_size=kernel_size, + stride=1, + padding=1, + bias=True) + + def forward(self, sparse_voxels: SparseConvTensor) -> SparseConvTensor: + """Forward function.""" + sparse_logits = self.cls_seg(sparse_voxels) + return sparse_logits + + def loss_by_feat(self, seg_logit: SparseConvTensor, + batch_data_samples: SampleList) -> dict: + """Compute semantic segmentation loss. + + Args: + seg_logit (SparseConvTensor): Predicted per-voxel + segmentation logits of shape [num_voxels, num_classes] + stored in SparseConvTensor. + batch_data_samples (List[:obj:`Det3DDataSample`]): The seg + data samples. It usually includes information such + as `metainfo` and `gt_pts_seg`. + + Returns: + Dict[str, Tensor]: A dictionary of loss components. + """ + + gt_semantic_segs = [ + data_sample.gt_pts_seg.voxel_semantic_mask + for data_sample in batch_data_samples + ] + seg_label = torch.cat(gt_semantic_segs) + seg_logit_feat = seg_logit.features + loss = dict() + loss['loss_ce'] = self.loss_ce( + seg_logit_feat, seg_label, ignore_index=self.ignore_index) + loss['loss_lovasz'] = self.loss_lovasz( + seg_logit_feat, seg_label, ignore_index=self.ignore_index) + + return loss + + def predict( + self, + inputs: SparseConvTensor, + batch_inputs_dict: dict, + batch_data_samples: SampleList, + ) -> torch.Tensor: + """Forward function for testing. + + Args: + inputs (SparseConvTensor): Feature from backbone. + batch_inputs_dict (dict): Input sample dict which includes 'points' + and 'voxels' keys. + + - points (List[Tensor]): Point cloud of each sample. + - voxels (dict): Dict of voxelized voxels and the corresponding + coordinates. + batch_data_samples (List[:obj:`Det3DDataSample`]): The det3d data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. We use `point2voxel_map` in this function. + + Returns: + List[torch.Tensor]: List of point-wise segmentation logits. + """ + seg_logits = self.forward(inputs).features + + seg_pred_list = [] + coors = batch_inputs_dict['voxels']['voxel_coors'] + for batch_idx in range(len(batch_data_samples)): + seg_logits_sample = seg_logits[coors[:, 0] == batch_idx] + point2voxel_map = batch_data_samples[ + batch_idx].gt_pts_seg.point2voxel_map.long() + point_seg_predicts = seg_logits_sample[point2voxel_map] + seg_pred_list.append(point_seg_predicts) + + return seg_pred_list diff --git a/mmdet3d/models/decode_heads/decode_head.py b/mmdet3d/models/decode_heads/decode_head.py new file mode 100755 index 0000000..58688d8 --- /dev/null +++ b/mmdet3d/models/decode_heads/decode_head.py @@ -0,0 +1,178 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import Dict, List + +import torch +from mmengine.model import BaseModule, normal_init +from torch import Tensor +from torch import nn as nn + +from mmdet3d.registry import MODELS +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils.typing_utils import ConfigType, OptMultiConfig + + +class Base3DDecodeHead(BaseModule, metaclass=ABCMeta): + """Base class for BaseDecodeHead. + + 1. The ``init_weights`` method is used to initialize decode_head's + model parameters. After segmentor initialization, ``init_weights`` + is triggered when ``segmentor.init_weights()`` is called externally. + + 2. The ``loss`` method is used to calculate the loss of decode_head, + which includes two steps: (1) the decode_head model performs forward + propagation to obtain the feature maps (2) The ``loss_by_feat`` method + is called based on the feature maps to calculate the loss. + + .. code:: text + + loss(): forward() -> loss_by_feat() + + 3. The ``predict`` method is used to predict segmentation results, + which includes two steps: (1) the decode_head model performs forward + propagation to obtain the feature maps (2) The ``predict_by_feat`` method + is called based on the feature maps to predict segmentation results + including post-processing. + + .. code:: text + + predict(): forward() -> predict_by_feat() + + Args: + channels (int): Channels after modules, before conv_seg. + num_classes (int): Number of classes. + dropout_ratio (float): Ratio of dropout layer. Defaults to 0.5. + conv_cfg (dict or :obj:`ConfigDict`): Config of conv layers. + Defaults to dict(type='Conv1d'). + norm_cfg (dict or :obj:`ConfigDict`): Config of norm layers. + Defaults to dict(type='BN1d'). + act_cfg (dict or :obj:`ConfigDict`): Config of activation layers. + Defaults to dict(type='ReLU'). + loss_decode (dict or :obj:`ConfigDict`): Config of decode loss. + Defaults to dict(type='mmdet.CrossEntropyLoss', use_sigmoid=False, + class_weight=None, loss_weight=1.0). + conv_seg_kernel_size (int): The kernel size used in conv_seg. + Defaults to 1. + ignore_index (int): The label index to be ignored. When using masked + BCE loss, ignore_index should be set to None. Defaults to 255. + init_cfg (dict or :obj:`ConfigDict` or list[dict or :obj:`ConfigDict`], + optional): Initialization config dict. Defaults to None. + """ + + def __init__(self, + channels: int, + num_classes: int, + dropout_ratio: float = 0.5, + conv_cfg: ConfigType = dict(type='Conv1d'), + norm_cfg: ConfigType = dict(type='BN1d'), + act_cfg: ConfigType = dict(type='ReLU'), + loss_decode: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, + loss_weight=1.0), + conv_seg_kernel_size: int = 1, + ignore_index: int = 255, + init_cfg: OptMultiConfig = None) -> None: + super(Base3DDecodeHead, self).__init__(init_cfg=init_cfg) + self.channels = channels + self.num_classes = num_classes + self.dropout_ratio = dropout_ratio + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.loss_decode = MODELS.build(loss_decode) + self.ignore_index = ignore_index + + self.conv_seg = self.build_conv_seg( + channels=channels, + num_classes=num_classes, + kernel_size=conv_seg_kernel_size) + if dropout_ratio > 0: + self.dropout = nn.Dropout(dropout_ratio) + else: + self.dropout = None + + def init_weights(self) -> None: + """Initialize weights of classification layer.""" + super().init_weights() + normal_init(self.conv_seg, mean=0, std=0.01) + + @abstractmethod + def forward(self, feats_dict: dict) -> Tensor: + """Placeholder of forward function.""" + pass + + def build_conv_seg(self, channels: int, num_classes: int, + kernel_size: int) -> nn.Module: + """Build Convolutional Segmentation Layers.""" + return nn.Conv1d(channels, num_classes, kernel_size=kernel_size) + + def cls_seg(self, feat: Tensor) -> Tensor: + """Classify each points.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.conv_seg(feat) + return output + + def loss(self, inputs: dict, batch_data_samples: SampleList, + train_cfg: ConfigType) -> Dict[str, Tensor]: + """Forward function for training. + + Args: + inputs (dict): Feature dict from backbone. + batch_data_samples (List[:obj:`Det3DDataSample`]): The seg data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + train_cfg (dict or :obj:`ConfigDict`): The training config. + + Returns: + Dict[str, Tensor]: A dictionary of loss components. + """ + seg_logits = self.forward(inputs) + losses = self.loss_by_feat(seg_logits, batch_data_samples) + return losses + + def predict(self, inputs: dict, batch_input_metas: List[dict], + test_cfg: ConfigType) -> Tensor: + """Forward function for testing. + + Args: + inputs (dict): Feature dict from backbone. + batch_input_metas (List[dict]): Meta information of a batch of + samples. + test_cfg (dict or :obj:`ConfigDict`): The testing config. + + Returns: + Tensor: Output segmentation map. + """ + seg_logits = self.forward(inputs) + + return seg_logits + + def _stack_batch_gt(self, batch_data_samples: SampleList) -> Tensor: + gt_semantic_segs = [ + data_sample.gt_pts_seg.pts_semantic_mask + for data_sample in batch_data_samples + ] + return torch.stack(gt_semantic_segs, dim=0) + + def loss_by_feat(self, seg_logit: Tensor, + batch_data_samples: SampleList) -> Dict[str, Tensor]: + """Compute semantic segmentation loss. + + Args: + seg_logit (Tensor): Predicted per-point segmentation logits of + shape [B, num_classes, N]. + batch_data_samples (List[:obj:`Det3DDataSample`]): The seg data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + + Returns: + Dict[str, Tensor]: A dictionary of loss components. + """ + seg_label = self._stack_batch_gt(batch_data_samples) + loss = dict() + loss['loss_sem_seg'] = self.loss_decode( + seg_logit, seg_label, ignore_index=self.ignore_index) + return loss diff --git a/mmdet3d/models/decode_heads/dgcnn_head.py b/mmdet3d/models/decode_heads/dgcnn_head.py new file mode 100755 index 0000000..b64d2b8 --- /dev/null +++ b/mmdet3d/models/decode_heads/dgcnn_head.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +from mmcv.cnn.bricks import ConvModule +from torch import Tensor + +from mmdet3d.models.layers import DGCNNFPModule +from mmdet3d.registry import MODELS +from .decode_head import Base3DDecodeHead + + +@MODELS.register_module() +class DGCNNHead(Base3DDecodeHead): + r"""DGCNN decoder head. + + Decoder head used in `DGCNN `_. + Refer to the + `reimplementation code `_. + + Args: + fp_channels (Sequence[int]): Tuple of mlp channels in feature + propagation (FP) modules. Defaults to (1216, 512). + """ + + def __init__(self, fp_channels: Sequence[int] = (1216, 512), + **kwargs) -> None: + super(DGCNNHead, self).__init__(**kwargs) + + self.FP_module = DGCNNFPModule( + mlp_channels=fp_channels, act_cfg=self.act_cfg) + + # https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_sem_seg.py#L40 + self.pre_seg_conv = ConvModule( + fp_channels[-1], + self.channels, + kernel_size=1, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _extract_input(self, feat_dict: dict) -> Tensor: + """Extract inputs from features dictionary. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + torch.Tensor: Points for decoder. + """ + fa_points = feat_dict['fa_points'] + + return fa_points + + def forward(self, feat_dict: dict) -> Tensor: + """Forward pass. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + Tensor: Segmentation map of shape [B, num_classes, N]. + """ + fa_points = self._extract_input(feat_dict) + + fp_points = self.FP_module(fa_points) + fp_points = fp_points.transpose(1, 2).contiguous() + output = self.pre_seg_conv(fp_points) + output = self.cls_seg(output) + + return output diff --git a/mmdet3d/models/decode_heads/minkunet_head.py b/mmdet3d/models/decode_heads/minkunet_head.py new file mode 100755 index 0000000..97d8fdf --- /dev/null +++ b/mmdet3d/models/decode_heads/minkunet_head.py @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models.layers.torchsparse import IS_TORCHSPARSE_AVAILABLE +from mmdet3d.registry import MODELS +from mmdet3d.structures.det3d_data_sample import SampleList +from .decode_head import Base3DDecodeHead + +if IS_TORCHSPARSE_AVAILABLE: + from torchsparse import SparseTensor +else: + SparseTensor = None + + +@MODELS.register_module() +class MinkUNetHead(Base3DDecodeHead): + r"""MinkUNet decoder head with TorchSparse backend. + + Refer to `implementation code `_. + + Args: + channels (int): The input channel of conv_seg. + num_classes (int): Number of classes. + """ + + def __init__(self, channels: int, num_classes: int, **kwargs) -> None: + super().__init__(channels, num_classes, **kwargs) + + def build_conv_seg(self, channels: int, num_classes: int, + kernel_size: int) -> nn.Module: + """Build Convolutional Segmentation Layers.""" + return nn.Linear(channels, num_classes) + + def _stack_batch_gt(self, batch_data_samples: SampleList) -> Tensor: + """Concat voxel-wise Groud Truth.""" + gt_semantic_segs = [ + data_sample.gt_pts_seg.voxel_semantic_mask + for data_sample in batch_data_samples + ] + return torch.cat(gt_semantic_segs) + + def predict(self, inputs: SparseTensor, + batch_data_samples: SampleList) -> List[Tensor]: + """Forward function for testing. + + Args: + inputs (SparseTensor): Features from backone. + batch_data_samples (List[:obj:`Det3DDataSample`]): The seg + data samples. + + Returns: + List[Tensor]: The segmentation prediction mask of each batch. + """ + seg_logits = self.forward(inputs) + + batch_idx = inputs.C[:, -1] + seg_logit_list = [] + for i, data_sample in enumerate(batch_data_samples): + seg_logit = seg_logits[batch_idx == i] + seg_logit = seg_logit[data_sample.voxel2point_map] + seg_logit_list.append(seg_logit) + + return seg_logit_list + + def forward(self, x: SparseTensor) -> Tensor: + """Forward function. + + Args: + x (SparseTensor): Features from backbone. + + Returns: + Tensor: Segmentation map of shape [N, C]. + Note that output contains all points from each batch. + """ + output = self.cls_seg(x.F) + return output diff --git a/mmdet3d/models/decode_heads/paconv_head.py b/mmdet3d/models/decode_heads/paconv_head.py new file mode 100755 index 0000000..6ae20b8 --- /dev/null +++ b/mmdet3d/models/decode_heads/paconv_head.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +from mmcv.cnn.bricks import ConvModule +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.utils.typing_utils import ConfigType +from .pointnet2_head import PointNet2Head + + +@MODELS.register_module() +class PAConvHead(PointNet2Head): + r"""PAConv decoder head. + + Decoder head used in `PAConv `_. + Refer to the `official code `_. + + Args: + fp_channels (Sequence[Sequence[int]]): Tuple of mlp channels in FP + modules. Defaults to ((768, 256, 256), (384, 256, 256), + (320, 256, 128), (128 + 6, 128, 128, 128)). + fp_norm_cfg (dict or :obj:`ConfigDict`): Config of norm layers used in + FP modules. Defaults to dict(type='BN2d'). + """ + + def __init__(self, + fp_channels: Sequence[Sequence[int]] = ((768, 256, 256), + (384, 256, 256), + (320, 256, + 128), (128 + 6, 128, + 128, 128)), + fp_norm_cfg: ConfigType = dict(type='BN2d'), + **kwargs) -> None: + super(PAConvHead, self).__init__( + fp_channels=fp_channels, fp_norm_cfg=fp_norm_cfg, **kwargs) + + # https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/pointnet2/pointnet2_paconv_seg.py#L53 + # PointNet++'s decoder conv has bias while PAConv's doesn't have + # so we need to rebuild it here + self.pre_seg_conv = ConvModule( + fp_channels[-1][-1], + self.channels, + kernel_size=1, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, feat_dict: dict) -> Tensor: + """Forward pass. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + torch.Tensor: Segmentation map of shape [B, num_classes, N]. + """ + sa_xyz, sa_features = self._extract_input(feat_dict) + + # PointNet++ doesn't use the first level of `sa_features` as input + # while PAConv inputs it through skip-connection + fp_feature = sa_features[-1] + + for i in range(self.num_fp): + # consume the points in a bottom-up manner + fp_feature = self.FP_modules[i](sa_xyz[-(i + 2)], sa_xyz[-(i + 1)], + sa_features[-(i + 2)], fp_feature) + + output = self.pre_seg_conv(fp_feature) + output = self.cls_seg(output) + + return output diff --git a/mmdet3d/models/decode_heads/pointnet2_head.py b/mmdet3d/models/decode_heads/pointnet2_head.py new file mode 100755 index 0000000..2a762cd --- /dev/null +++ b/mmdet3d/models/decode_heads/pointnet2_head.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Sequence, Tuple + +from mmcv.cnn.bricks import ConvModule +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models.layers import PointFPModule +from mmdet3d.registry import MODELS +from mmdet3d.utils.typing_utils import ConfigType +from .decode_head import Base3DDecodeHead + + +@MODELS.register_module() +class PointNet2Head(Base3DDecodeHead): + r"""PointNet2 decoder head. + + Decoder head used in `PointNet++ `_. + Refer to the `official code `_. + + Args: + fp_channels (Sequence[Sequence[int]]): Tuple of mlp channels in FP + modules. Defaults to ((768, 256, 256), (384, 256, 256), + (320, 256, 128), (128, 128, 128, 128)). + fp_norm_cfg (dict or :obj:`ConfigDict`): Config of norm layers used + in FP modules. Defaults to dict(type='BN2d'). + """ + + def __init__(self, + fp_channels: Sequence[Sequence[int]] = ((768, 256, 256), + (384, 256, 256), + (320, 256, 128), + (128, 128, 128, 128)), + fp_norm_cfg: ConfigType = dict(type='BN2d'), + **kwargs) -> None: + super(PointNet2Head, self).__init__(**kwargs) + + self.num_fp = len(fp_channels) + self.FP_modules = nn.ModuleList() + for cur_fp_mlps in fp_channels: + self.FP_modules.append( + PointFPModule(mlp_channels=cur_fp_mlps, norm_cfg=fp_norm_cfg)) + + # https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_sem_seg.py#L40 + self.pre_seg_conv = ConvModule( + fp_channels[-1][-1], + self.channels, + kernel_size=1, + bias=True, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _extract_input(self, + feat_dict: dict) -> Tuple[List[Tensor], List[Tensor]]: + """Extract inputs from features dictionary. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + Tuple[List[Tensor], List[Tensor]]: Coordinates and features of + multiple levels of points. + """ + sa_xyz = feat_dict['sa_xyz'] + sa_features = feat_dict['sa_features'] + assert len(sa_xyz) == len(sa_features) + + return sa_xyz, sa_features + + def forward(self, feat_dict: dict) -> Tensor: + """Forward pass. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + Tensor: Segmentation map of shape [B, num_classes, N]. + """ + sa_xyz, sa_features = self._extract_input(feat_dict) + + # https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_sem_seg.py#L24 + sa_features[0] = None + + fp_feature = sa_features[-1] + + for i in range(self.num_fp): + # consume the points in a bottom-up manner + fp_feature = self.FP_modules[i](sa_xyz[-(i + 2)], sa_xyz[-(i + 1)], + sa_features[-(i + 2)], fp_feature) + output = self.pre_seg_conv(fp_feature) + output = self.cls_seg(output) + + return output diff --git a/mmdet3d/models/dense_heads/__init__.py b/mmdet3d/models/dense_heads/__init__.py new file mode 100755 index 0000000..2503ee8 --- /dev/null +++ b/mmdet3d/models/dense_heads/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .anchor3d_head import Anchor3DHead +from .anchor_free_mono3d_head import AnchorFreeMono3DHead +from .base_3d_dense_head import Base3DDenseHead +from .base_conv_bbox_head import BaseConvBboxHead +from .base_mono3d_dense_head import BaseMono3DDenseHead +from .centerpoint_head import CenterHead +from .fcaf3d_head import FCAF3DHead +from .fcos_mono3d_head import FCOSMono3DHead +from .free_anchor3d_head import FreeAnchor3DHead +from .groupfree3d_head import GroupFree3DHead +from .imvoxel_head import ImVoxelHead +from .monoflex_head import MonoFlexHead +from .parta2_rpn_head import PartA2RPNHead +from .pgd_head import PGDHead +from .point_rpn_head import PointRPNHead +from .shape_aware_head import ShapeAwareHead +from .smoke_mono3d_head import SMOKEMono3DHead +from .ssd_3d_head import SSD3DHead +from .vote_head import VoteHead + +__all__ = [ + 'Anchor3DHead', 'FreeAnchor3DHead', 'PartA2RPNHead', 'VoteHead', + 'SSD3DHead', 'BaseConvBboxHead', 'CenterHead', 'ShapeAwareHead', + 'BaseMono3DDenseHead', 'AnchorFreeMono3DHead', 'FCOSMono3DHead', + 'GroupFree3DHead', 'PointRPNHead', 'SMOKEMono3DHead', 'PGDHead', + 'MonoFlexHead', 'Base3DDenseHead', 'FCAF3DHead', 'ImVoxelHead' +] diff --git a/mmdet3d/models/dense_heads/anchor3d_head.py b/mmdet3d/models/dense_heads/anchor3d_head.py new file mode 100755 index 0000000..aa7a2d5 --- /dev/null +++ b/mmdet3d/models/dense_heads/anchor3d_head.py @@ -0,0 +1,427 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import List, Tuple + +import numpy as np +import torch +from mmdet.models.utils import multi_apply +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models.task_modules import PseudoSampler +from mmdet3d.models.test_time_augs import merge_aug_bboxes_3d +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.utils.typing_utils import (ConfigType, InstanceList, + OptConfigType, OptInstanceList) +from .base_3d_dense_head import Base3DDenseHead +from .train_mixins import AnchorTrainMixin + + +@MODELS.register_module() +class Anchor3DHead(Base3DDenseHead, AnchorTrainMixin): + """Anchor-based head for SECOND/PointPillars/MVXNet/PartA2. + + Args: + num_classes (int): Number of classes. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of channels of the feature map. + use_direction_classifier (bool): Whether to add a direction classifier. + anchor_generator(dict): Config dict of anchor generator. + assigner_per_size (bool): Whether to do assignment for each separate + anchor size. + assign_per_class (bool): Whether to do assignment for each class. + diff_rad_by_sin (bool): Whether to change the difference into sin + difference for box regression loss. + dir_offset (float | int): The offset of BEV rotation angles. + (TODO: may be moved into box coder) + dir_limit_offset (float | int): The limited range of BEV + rotation angles. (TODO: may be moved into box coder) + bbox_coder (dict): Config dict of box coders. + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + loss_dir (dict): Config of direction classifier loss. + train_cfg (dict): Train configs. + test_cfg (dict): Test configs. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + feat_channels: int = 256, + use_direction_classifier: bool = True, + anchor_generator: ConfigType = dict( + type='Anchor3DRangeGenerator', + range=[0, -39.68, -1.78, 69.12, 39.68, -1.78], + strides=[2], + sizes=[[3.9, 1.6, 1.56]], + rotations=[0, 1.57], + custom_values=[], + reshape_out=False), + assigner_per_size: bool = False, + assign_per_class: bool = False, + diff_rad_by_sin: bool = True, + dir_offset: float = -np.pi / 2, + dir_limit_offset: int = 0, + bbox_coder: ConfigType = dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + loss_bbox: ConfigType = dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + loss_weight=2.0), + loss_dir: ConfigType = dict( + type='mmdet.CrossEntropyLoss', loss_weight=0.2), + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptConfigType = None) -> None: + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.num_classes = num_classes + self.feat_channels = feat_channels + self.diff_rad_by_sin = diff_rad_by_sin + self.use_direction_classifier = use_direction_classifier + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.assigner_per_size = assigner_per_size + self.assign_per_class = assign_per_class + self.dir_offset = dir_offset + self.dir_limit_offset = dir_limit_offset + warnings.warn( + 'dir_offset and dir_limit_offset will be depressed and be ' + 'incorporated into box coder in the future') + self.fp16_enabled = False + + # build anchor generator + self.prior_generator = TASK_UTILS.build(anchor_generator) + # In 3D detection, the anchor stride is connected with anchor size + self.num_anchors = self.prior_generator.num_base_anchors + # build box coder + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.box_code_size = self.bbox_coder.code_size + + # build loss function + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + self.sampling = loss_cls['type'] not in [ + 'mmdet.FocalLoss', 'mmdet.GHMC' + ] + if not self.use_sigmoid_cls: + self.num_classes += 1 + self.loss_cls = MODELS.build(loss_cls) + self.loss_bbox = MODELS.build(loss_bbox) + self.loss_dir = MODELS.build(loss_dir) + self.fp16_enabled = False + + self._init_layers() + self._init_assigner_sampler() + + if init_cfg is None: + self.init_cfg = dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)) + + def _init_assigner_sampler(self): + """Initialize the target assigner and sampler of the head.""" + if self.train_cfg is None: + return + + if self.sampling: + self.bbox_sampler = TASK_UTILS.build(self.train_cfg.sampler) + else: + self.bbox_sampler = PseudoSampler() + if isinstance(self.train_cfg.assigner, dict): + self.bbox_assigner = TASK_UTILS.build(self.train_cfg.assigner) + elif isinstance(self.train_cfg.assigner, list): + self.bbox_assigner = [ + TASK_UTILS.build(res) for res in self.train_cfg.assigner + ] + + def _init_layers(self): + """Initialize neural network layers of the head.""" + self.cls_out_channels = self.num_anchors * self.num_classes + self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1) + self.conv_reg = nn.Conv2d(self.feat_channels, + self.num_anchors * self.box_code_size, 1) + if self.use_direction_classifier: + self.conv_dir_cls = nn.Conv2d(self.feat_channels, + self.num_anchors * 2, 1) + + def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor]: + """Forward function on a single-scale feature map. + + Args: + x (Tensor): Features of a single scale level. + + Returns: + tuple: + cls_score (Tensor): Cls scores for a single scale level + the channels number is num_base_priors * num_classes. + bbox_pred (Tensor): Box energies / deltas for a single scale + level, the channels number is num_base_priors * C. + dir_cls_pred (Tensor | None): Direction classification + prediction for a single scale level, the channels + number is num_base_priors * 2. + """ + cls_score = self.conv_cls(x) + bbox_pred = self.conv_reg(x) + dir_cls_pred = None + if self.use_direction_classifier: + dir_cls_pred = self.conv_dir_cls(x) + return cls_score, bbox_pred, dir_cls_pred + + def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: + """Forward pass. + + Args: + x (tuple[Tensor]): Features from the upstream network, + each is a 4D-tensor. + + Returns: + tuple: A tuple of classification scores, bbox and direction + classification prediction. + + - cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, the channels number + is num_base_priors * num_classes. + - bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, the channels number + is num_base_priors * C. + - dir_cls_preds (list[Tensor|None]): Direction classification + predictions for all scale levels, each is a 4D-tensor, + the channels number is num_base_priors * 2. + """ + return multi_apply(self.forward_single, x) + + # TODO: Support augmentation test + def aug_test(self, + aug_batch_feats, + aug_batch_input_metas, + rescale=False, + **kwargs): + aug_bboxes = [] + # only support aug_test for one sample + for x, input_meta in zip(aug_batch_feats, aug_batch_input_metas): + outs = self.forward(x) + bbox_list = self.get_results(*outs, [input_meta], rescale=rescale) + bbox_dict = dict( + bboxes_3d=bbox_list[0].bboxes_3d, + scores_3d=bbox_list[0].scores_3d, + labels_3d=bbox_list[0].labels_3d) + aug_bboxes.append(bbox_dict) + # after merging, bboxes will be rescaled to the original image size + merged_bboxes = merge_aug_bboxes_3d(aug_bboxes, aug_batch_input_metas, + self.test_cfg) + return [merged_bboxes] + + def get_anchors(self, + featmap_sizes: List[tuple], + input_metas: List[dict], + device: str = 'cuda') -> list: + """Get anchors according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + input_metas (list[dict]): contain pcd and img's meta info. + device (str): device of current module. + + Returns: + list[list[torch.Tensor]]: Anchors of each image, valid flags + of each image. + """ + num_imgs = len(input_metas) + # since feature map sizes of all images are the same, we only compute + # anchors for one time + multi_level_anchors = self.prior_generator.grid_anchors( + featmap_sizes, device=device) + anchor_list = [multi_level_anchors for _ in range(num_imgs)] + return anchor_list + + def _loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, + dir_cls_pred: Tensor, labels: Tensor, + label_weights: Tensor, bbox_targets: Tensor, + bbox_weights: Tensor, dir_targets: Tensor, + dir_weights: Tensor, num_total_samples: int): + """Calculate loss of Single-level results. + + Args: + cls_score (Tensor): Class score in single-level. + bbox_pred (Tensor): Bbox prediction in single-level. + dir_cls_pred (Tensor): Predictions of direction class + in single-level. + labels (Tensor): Labels of class. + label_weights (Tensor): Weights of class loss. + bbox_targets (Tensor): Targets of bbox predictions. + bbox_weights (Tensor): Weights of bbox loss. + dir_targets (Tensor): Targets of direction predictions. + dir_weights (Tensor): Weights of direction loss. + num_total_samples (int): The number of valid samples. + + Returns: + tuple[torch.Tensor]: Losses of class, bbox + and direction, respectively. + """ + # classification loss + if num_total_samples is None: + num_total_samples = int(cls_score.shape[0]) + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.num_classes) + assert labels.max().item() <= self.num_classes + loss_cls = self.loss_cls( + cls_score, labels, label_weights, avg_factor=num_total_samples) + + # regression loss + bbox_pred = bbox_pred.permute(0, 2, 3, + 1).reshape(-1, self.box_code_size) + bbox_targets = bbox_targets.reshape(-1, self.box_code_size) + bbox_weights = bbox_weights.reshape(-1, self.box_code_size) + + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & (labels < bg_class_ind)).nonzero( + as_tuple=False).reshape(-1) + num_pos = len(pos_inds) + + pos_bbox_pred = bbox_pred[pos_inds] + pos_bbox_targets = bbox_targets[pos_inds] + pos_bbox_weights = bbox_weights[pos_inds] + + # dir loss + if self.use_direction_classifier: + dir_cls_pred = dir_cls_pred.permute(0, 2, 3, 1).reshape(-1, 2) + dir_targets = dir_targets.reshape(-1) + dir_weights = dir_weights.reshape(-1) + pos_dir_cls_pred = dir_cls_pred[pos_inds] + pos_dir_targets = dir_targets[pos_inds] + pos_dir_weights = dir_weights[pos_inds] + + if num_pos > 0: + code_weight = self.train_cfg.get('code_weight', None) + if code_weight: + pos_bbox_weights = pos_bbox_weights * bbox_weights.new_tensor( + code_weight) + if self.diff_rad_by_sin: + pos_bbox_pred, pos_bbox_targets = self.add_sin_difference( + pos_bbox_pred, pos_bbox_targets) + loss_bbox = self.loss_bbox( + pos_bbox_pred, + pos_bbox_targets, + pos_bbox_weights, + avg_factor=num_total_samples) + + # direction classification loss + loss_dir = None + if self.use_direction_classifier: + loss_dir = self.loss_dir( + pos_dir_cls_pred, + pos_dir_targets, + pos_dir_weights, + avg_factor=num_total_samples) + else: + loss_bbox = pos_bbox_pred.sum() + if self.use_direction_classifier: + loss_dir = pos_dir_cls_pred.sum() + + return loss_cls, loss_bbox, loss_dir + + @staticmethod + def add_sin_difference(boxes1: Tensor, boxes2: Tensor) -> tuple: + """Convert the rotation difference to difference in sine function. + + Args: + boxes1 (torch.Tensor): Original Boxes in shape (NxC), where C>=7 + and the 7th dimension is rotation dimension. + boxes2 (torch.Tensor): Target boxes in shape (NxC), where C>=7 and + the 7th dimension is rotation dimension. + + Returns: + tuple[torch.Tensor]: ``boxes1`` and ``boxes2`` whose 7th + dimensions are changed. + """ + rad_pred_encoding = torch.sin(boxes1[..., 6:7]) * torch.cos( + boxes2[..., 6:7]) + rad_tg_encoding = torch.cos(boxes1[..., 6:7]) * torch.sin(boxes2[..., + 6:7]) + boxes1 = torch.cat( + [boxes1[..., :6], rad_pred_encoding, boxes1[..., 7:]], dim=-1) + boxes2 = torch.cat([boxes2[..., :6], rad_tg_encoding, boxes2[..., 7:]], + dim=-1) + return boxes1, boxes2 + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + batch_gt_instances_3d: InstanceList, + batch_input_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (list[torch.Tensor]): Multi-level class scores. + bbox_preds (list[torch.Tensor]): Multi-level bbox predictions. + dir_cls_preds (list[torch.Tensor]): Multi-level direction + class predictions. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` + and ``labels_3d`` attributes. + batch_input_metas (list[dict]): Contain pcd and img's meta info. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, list[torch.Tensor]]: Classification, bbox, and + direction losses of each level. + + - loss_cls (list[torch.Tensor]): Classification losses. + - loss_bbox (list[torch.Tensor]): Box regression losses. + - loss_dir (list[torch.Tensor]): Direction classification + losses. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + device = cls_scores[0].device + anchor_list = self.get_anchors( + featmap_sizes, batch_input_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.anchor_target_3d( + anchor_list, + batch_gt_instances_3d, + batch_input_metas, + batch_gt_instances_ignore=batch_gt_instances_ignore, + num_classes=self.num_classes, + label_channels=label_channels, + sampling=self.sampling) + + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + dir_targets_list, dir_weights_list, num_total_pos, + num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + + # num_total_samples = None + losses_cls, losses_bbox, losses_dir = multi_apply( + self._loss_by_feat_single, + cls_scores, + bbox_preds, + dir_cls_preds, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + dir_targets_list, + dir_weights_list, + num_total_samples=num_total_samples) + return dict( + loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dir=losses_dir) diff --git a/mmdet3d/models/dense_heads/anchor_free_mono3d_head.py b/mmdet3d/models/dense_heads/anchor_free_mono3d_head.py new file mode 100755 index 0000000..d4c2585 --- /dev/null +++ b/mmdet3d/models/dense_heads/anchor_free_mono3d_head.py @@ -0,0 +1,480 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import abstractmethod +from typing import Any, List, Sequence, Tuple, Union + +import torch +from mmcv.cnn import ConvModule +from mmdet.models.utils import multi_apply +from mmengine.model import bias_init_with_prob, normal_init +from torch import Tensor +from torch import nn as nn + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, InstanceList, OptConfigType +from .base_mono3d_dense_head import BaseMono3DDenseHead + + +@MODELS.register_module() +class AnchorFreeMono3DHead(BaseMono3DDenseHead): + """Anchor-free head for monocular 3D object detection. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels. + Used in child classes. Defaults to 256. + stacked_convs (int): Number of stacking convs of the head. + strides (Sequence[int] or Sequence[Tuple[int, int]]): Downsample + factor of each feature map. + dcn_on_last_conv (bool): If true, use dcn in the last + layer of towers. Default: False. + conv_bias (bool or str): If specified as `auto`, it will be + decided by the norm_cfg. Bias of conv will be set as True + if `norm_cfg` is None, otherwise False. Default: 'auto'. + background_label (bool, Optional): Label ID of background, + set as 0 for RPN and num_classes for other heads. + It will automatically set as `num_classes` if None is given. + use_direction_classifier (bool): + Whether to add a direction classifier. + diff_rad_by_sin (bool): Whether to change the difference + into sin difference for box regression loss. Defaults to True. + dir_offset (float): Parameter used in direction + classification. Defaults to 0. + dir_limit_offset (float): Parameter used in direction + classification. Defaults to 0. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + loss_dir (:obj:`ConfigDict` or dict): Config of direction classifier + loss. + loss_attr (:obj:`ConfigDict` or dict): Config of attribute classifier + loss, which is only active when `pred_attrs=True`. + bbox_code_size (int): Dimensions of predicted bounding boxes. + pred_attrs (bool): Whether to predict attributes. + Defaults to False. + num_attrs (int): The number of attributes to be predicted. + Default: 9. + pred_velo (bool): Whether to predict velocity. + Defaults to False. + pred_bbox2d (bool): Whether to predict 2D boxes. + Defaults to False. + group_reg_dims (tuple[int], optional): The dimension of each regression + target group. Default: (2, 1, 3, 1, 2). + cls_branch (tuple[int], optional): Channels for classification branch. + Default: (128, 64). + reg_branch (tuple[tuple], optional): Channels for regression branch. + Default: ( + (128, 64), # offset + (128, 64), # depth + (64, ), # size + (64, ), # rot + () # velo + ), + dir_branch (Sequence[int]): Channels for direction + classification branch. Default: (64, ). + attr_branch (Sequence[int]): Channels for classification branch. + Default: (64, ). + conv_cfg (:obj:`ConfigDict` or dict, Optional): Config dict for + convolution layer. Default: None. + norm_cfg (:obj:`ConfigDict` or dict, Optional): Config dict for + normalization layer. Default: None. + train_cfg (:obj:`ConfigDict` or dict, Optional): Training config + of anchor head. + test_cfg (:obj:`ConfigDict` or dict, Optional): Testing config of + anchor head. + init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ + dict]): Initialization config dict. + """ # noqa: W605 + + _version = 1 + + def __init__( + self, + num_classes: int, + in_channels: int, + feat_channels: int = 256, + stacked_convs: int = 4, + strides: Sequence[int] = (4, 8, 16, 32, 64), + dcn_on_last_conv: bool = False, + conv_bias: Union[bool, str] = 'auto', + background_label: bool = None, + use_direction_classifier: bool = True, + diff_rad_by_sin: bool = True, + dir_offset: int = 0, + dir_limit_offset: int = 0, + loss_cls: ConfigType = dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox: ConfigType = dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_attr: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + bbox_code_size: int = 9, # For nuscenes + pred_attrs: bool = False, + num_attrs: int = 9, # For nuscenes + pred_velo: bool = False, + pred_bbox2d: bool = False, + group_reg_dims: Sequence[int] = ( + 2, 1, 3, 1, 2), # offset, depth, size, rot, velo, + cls_branch: Sequence[int] = (128, 64), + reg_branch: Sequence[Tuple[int, int]] = ( + (128, 64), # offset + (128, 64), # depth + (64, ), # size + (64, ), # rot + () # velo + ), + dir_branch: Sequence[int] = (64, ), + attr_branch: Sequence[int] = (64, ), + conv_cfg: OptConfigType = None, + norm_cfg: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptConfigType = None) -> None: + super().__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.cls_out_channels = num_classes + self.in_channels = in_channels + self.feat_channels = feat_channels + self.stacked_convs = stacked_convs + self.strides = strides + self.dcn_on_last_conv = dcn_on_last_conv + assert conv_bias == 'auto' or isinstance(conv_bias, bool) + self.conv_bias = conv_bias + self.use_direction_classifier = use_direction_classifier + self.diff_rad_by_sin = diff_rad_by_sin + self.dir_offset = dir_offset + self.dir_limit_offset = dir_limit_offset + self.loss_cls = MODELS.build(loss_cls) + self.loss_bbox = MODELS.build(loss_bbox) + self.loss_dir = MODELS.build(loss_dir) + self.bbox_code_size = bbox_code_size + self.group_reg_dims = list(group_reg_dims) + self.cls_branch = cls_branch + self.reg_branch = reg_branch + assert len(reg_branch) == len(group_reg_dims), 'The number of '\ + 'element in reg_branch and group_reg_dims should be the same.' + self.pred_velo = pred_velo + self.pred_bbox2d = pred_bbox2d + self.out_channels = [] + for reg_branch_channels in reg_branch: + if len(reg_branch_channels) > 0: + self.out_channels.append(reg_branch_channels[-1]) + else: + self.out_channels.append(-1) + self.dir_branch = dir_branch + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.fp16_enabled = False + self.background_label = ( + num_classes if background_label is None else background_label) + # background_label should be either 0 or num_classes + assert (self.background_label == 0 + or self.background_label == num_classes) + self.pred_attrs = pred_attrs + self.attr_background_label = -1 + self.num_attrs = num_attrs + if self.pred_attrs: + self.attr_background_label = num_attrs + self.loss_attr = MODELS.build(loss_attr) + self.attr_branch = attr_branch + + self._init_layers() + + def _init_layers(self): + """Initialize layers of the head.""" + self._init_cls_convs() + self._init_reg_convs() + self._init_predictor() + + def _init_cls_convs(self): + """Initialize classification conv layers of the head.""" + self.cls_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + if self.dcn_on_last_conv and i == self.stacked_convs - 1: + conv_cfg = dict(type='DCNv2') + else: + conv_cfg = self.conv_cfg + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.conv_bias)) + + def _init_reg_convs(self): + """Initialize bbox regression conv layers of the head.""" + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + if self.dcn_on_last_conv and i == self.stacked_convs - 1: + conv_cfg = dict(type='DCNv2') + else: + conv_cfg = self.conv_cfg + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.conv_bias)) + + def _init_branch(self, conv_channels=(64), conv_strides=(1)): + """Initialize conv layers as a prediction branch.""" + conv_before_pred = nn.ModuleList() + if isinstance(conv_channels, int): + conv_channels = [self.feat_channels] + [conv_channels] + conv_strides = [conv_strides] + else: + conv_channels = [self.feat_channels] + list(conv_channels) + conv_strides = list(conv_strides) + for i in range(len(conv_strides)): + conv_before_pred.append( + ConvModule( + conv_channels[i], + conv_channels[i + 1], + 3, + stride=conv_strides[i], + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.conv_bias)) + + return conv_before_pred + + def _init_predictor(self): + """Initialize predictor layers of the head.""" + self.conv_cls_prev = self._init_branch( + conv_channels=self.cls_branch, + conv_strides=(1, ) * len(self.cls_branch)) + self.conv_cls = nn.Conv2d(self.cls_branch[-1], self.cls_out_channels, + 1) + self.conv_reg_prevs = nn.ModuleList() + self.conv_regs = nn.ModuleList() + for i in range(len(self.group_reg_dims)): + reg_dim = self.group_reg_dims[i] + reg_branch_channels = self.reg_branch[i] + out_channel = self.out_channels[i] + if len(reg_branch_channels) > 0: + self.conv_reg_prevs.append( + self._init_branch( + conv_channels=reg_branch_channels, + conv_strides=(1, ) * len(reg_branch_channels))) + self.conv_regs.append(nn.Conv2d(out_channel, reg_dim, 1)) + else: + self.conv_reg_prevs.append(None) + self.conv_regs.append( + nn.Conv2d(self.feat_channels, reg_dim, 1)) + if self.use_direction_classifier: + self.conv_dir_cls_prev = self._init_branch( + conv_channels=self.dir_branch, + conv_strides=(1, ) * len(self.dir_branch)) + self.conv_dir_cls = nn.Conv2d(self.dir_branch[-1], 2, 1) + if self.pred_attrs: + self.conv_attr_prev = self._init_branch( + conv_channels=self.attr_branch, + conv_strides=(1, ) * len(self.attr_branch)) + self.conv_attr = nn.Conv2d(self.attr_branch[-1], self.num_attrs, 1) + + def init_weights(self): + """Initialize weights of the head. + + We currently still use the customized defined init_weights because the + default init of DCN triggered by the init_cfg will init + conv_offset.weight, which mistakenly affects the training stability. + """ + for modules in [self.cls_convs, self.reg_convs, self.conv_cls_prev]: + for m in modules: + if isinstance(m.conv, nn.Conv2d): + normal_init(m.conv, std=0.01) + for conv_reg_prev in self.conv_reg_prevs: + if conv_reg_prev is None: + continue + for m in conv_reg_prev: + if isinstance(m.conv, nn.Conv2d): + normal_init(m.conv, std=0.01) + if self.use_direction_classifier: + for m in self.conv_dir_cls_prev: + if isinstance(m.conv, nn.Conv2d): + normal_init(m.conv, std=0.01) + if self.pred_attrs: + for m in self.conv_attr_prev: + if isinstance(m.conv, nn.Conv2d): + normal_init(m.conv, std=0.01) + bias_cls = bias_init_with_prob(0.01) + normal_init(self.conv_cls, std=0.01, bias=bias_cls) + for conv_reg in self.conv_regs: + normal_init(conv_reg, std=0.01) + if self.use_direction_classifier: + normal_init(self.conv_dir_cls, std=0.01, bias=bias_cls) + if self.pred_attrs: + normal_init(self.conv_attr, std=0.01, bias=bias_cls) + + def forward( + self, x: Tuple[Tensor] + ) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]: + """Forward features from the upstream network. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually contain classification scores, bbox predictions, + and direction class predictions. + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * bbox_code_size. + dir_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * 2. (bin = 2) + attr_preds (list[Tensor]): Attribute scores for each scale + level, each is a 4D-tensor, the channel number is + num_points * num_attrs. + """ + return multi_apply(self.forward_single, x)[:5] + + def forward_single(self, x: Tensor) -> Tuple[Tensor, ...]: + """Forward features of a single scale level. + + Args: + x (Tensor): FPN feature maps of the specified stride. + + Returns: + tuple: Scores for each class, bbox predictions, direction class, + and attributes, features after classification and regression + conv layers, some models needs these features like FCOS. + """ + cls_feat = x + reg_feat = x + + for cls_layer in self.cls_convs: + cls_feat = cls_layer(cls_feat) + # clone the cls_feat for reusing the feature map afterwards + clone_cls_feat = cls_feat.clone() + for conv_cls_prev_layer in self.conv_cls_prev: + clone_cls_feat = conv_cls_prev_layer(clone_cls_feat) + cls_score = self.conv_cls(clone_cls_feat) + + for reg_layer in self.reg_convs: + reg_feat = reg_layer(reg_feat) + bbox_pred = [] + for i in range(len(self.group_reg_dims)): + # clone the reg_feat for reusing the feature map afterwards + clone_reg_feat = reg_feat.clone() + if len(self.reg_branch[i]) > 0: + for conv_reg_prev_layer in self.conv_reg_prevs[i]: + clone_reg_feat = conv_reg_prev_layer(clone_reg_feat) + bbox_pred.append(self.conv_regs[i](clone_reg_feat)) + bbox_pred = torch.cat(bbox_pred, dim=1) + + dir_cls_pred = None + if self.use_direction_classifier: + clone_reg_feat = reg_feat.clone() + for conv_dir_cls_prev_layer in self.conv_dir_cls_prev: + clone_reg_feat = conv_dir_cls_prev_layer(clone_reg_feat) + dir_cls_pred = self.conv_dir_cls(clone_reg_feat) + + attr_pred = None + if self.pred_attrs: + # clone the cls_feat for reusing the feature map afterwards + clone_cls_feat = cls_feat.clone() + for conv_attr_prev_layer in self.conv_attr_prev: + clone_cls_feat = conv_attr_prev_layer(clone_cls_feat) + attr_pred = self.conv_attr(clone_cls_feat) + + return cls_score, bbox_pred, dir_cls_pred, attr_pred, cls_feat, \ + reg_feat + + @abstractmethod + def get_targets(self, points: List[Tensor], + batch_gt_instances: InstanceList) -> Any: + """Compute regression, classification and centerss targets for points + in multiple images. + + Args: + points (list[Tensor]): Points of each fpn level, each has shape + (num_points, 2). + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes``、``labels`` + 、``bboxes_3d``、``labels_3d``、``depths``、``centers_2d`` + and attributes. + """ + raise NotImplementedError + + # TODO: Refactor using MlvlPointGenerator in MMDet. + def _get_points_single(self, + featmap_size: Tuple[int], + stride: int, + dtype: torch.dtype, + device: torch.device, + flatten: bool = False) -> Tuple[Tensor, Tensor]: + """Get points of a single scale level. + + Args: + featmap_size (tuple[int]): Single scale level feature map + size. + stride (int): Downsample factor of the feature map. + dtype (torch.dtype): Type of points. + device (torch.device): Device of points. + flatten (bool): Whether to flatten the tensor. + Defaults to False. + + Returns: + tuple: points of each image. + """ + h, w = featmap_size + x_range = torch.arange(w, dtype=dtype, device=device) + y_range = torch.arange(h, dtype=dtype, device=device) + y, x = torch.meshgrid(y_range, x_range) + if flatten: + y = y.flatten() + x = x.flatten() + return y, x + + # TODO: Refactor using MlvlPointGenerator in MMDet. + def get_points(self, + featmap_sizes: List[Tuple[int]], + dtype: torch.dtype, + device: torch.device, + flatten: bool = False) -> List[Tuple[Tensor, Tensor]]: + """Get points according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + dtype (torch.dtype): Type of points. + device (torch.device): Device of points. + flatten (bool): Whether to flatten the tensor. + Defaults to False. + + Returns: + list[tuple]: points of each image. + """ + mlvl_points = [] + for i in range(len(featmap_sizes)): + mlvl_points.append( + self._get_points_single(featmap_sizes[i], self.strides[i], + dtype, device, flatten)) + return mlvl_points diff --git a/mmdet3d/models/dense_heads/base_3d_dense_head.py b/mmdet3d/models/dense_heads/base_3d_dense_head.py new file mode 100755 index 0000000..a38695a --- /dev/null +++ b/mmdet3d/models/dense_heads/base_3d_dense_head.py @@ -0,0 +1,381 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import List, Optional, Tuple + +import numpy as np +import torch +from mmdet.models.utils import select_single_mlvl +from mmengine.config import ConfigDict +from mmengine.model import BaseModule, constant_init +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.models.layers import box3d_multiclass_nms +from mmdet3d.structures import limit_period, xywhr2xyxyr +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils.typing_utils import InstanceList, OptMultiConfig + + +class Base3DDenseHead(BaseModule, metaclass=ABCMeta): + """Base class for 3D DenseHeads. + + 1. The ``init_weights`` method is used to initialize densehead's + model parameters. After detector initialization, ``init_weights`` + is triggered when ``detector.init_weights()`` is called externally. + + 2. The ``loss`` method is used to calculate the loss of densehead, + which includes two steps: (1) the densehead model performs forward + propagation to obtain the feature maps (2) The ``loss_by_feat`` method + is called based on the feature maps to calculate the loss. + + .. code:: text + + loss(): forward() -> loss_by_feat() + + 3. The ``predict`` method is used to predict detection results, + which includes two steps: (1) the densehead model performs forward + propagation to obtain the feature maps (2) The ``predict_by_feat`` method + is called based on the feature maps to predict detection results including + post-processing. + + .. code:: text + + predict(): forward() -> predict_by_feat() + + 4. The ``loss_and_predict`` method is used to return loss and detection + results at the same time. It will call densehead's ``forward``, + ``loss_by_feat`` and ``predict_by_feat`` methods in order. If one-stage is + used as RPN, the densehead needs to return both losses and predictions. + This predictions is used as the proposal of roihead. + + .. code:: text + + loss_and_predict(): forward() -> loss_by_feat() -> predict_by_feat() + """ + + def __init__(self, init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg=init_cfg) + + def init_weights(self) -> None: + """Initialize the weights.""" + super().init_weights() + # avoid init_cfg overwrite the initialization of `conv_offset` + for m in self.modules(): + # DeformConv2dPack, ModulatedDeformConv2dPack + if hasattr(m, 'conv_offset'): + constant_init(m.conv_offset, 0) + + def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList, + **kwargs) -> dict: + """Perform forward propagation and loss calculation of the detection + head on the features of the upstream network. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + + Returns: + dict: A dictionary of loss components. + """ + outs = self(x) + + batch_gt_instances_3d = [] + batch_gt_instances_ignore = [] + batch_input_metas = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + + loss_inputs = outs + (batch_gt_instances_3d, batch_input_metas, + batch_gt_instances_ignore) + losses = self.loss_by_feat(*loss_inputs) + return losses + + @abstractmethod + def loss_by_feat(self, **kwargs) -> dict: + """Calculate the loss based on the features extracted by the detection + head.""" + pass + + def loss_and_predict(self, + x: Tuple[Tensor], + batch_data_samples: SampleList, + proposal_cfg: Optional[ConfigDict] = None, + **kwargs) -> Tuple[dict, InstanceList]: + """Perform forward propagation of the head, then calculate loss and + predictions from the features and data samples. + + Args: + x (tuple[Tensor]): Features from FPN. + batch_data_samples (list[:obj:`Det3DDataSample`]): Each item + contains the meta information of each image and + corresponding annotations. + proposal_cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + + Returns: + tuple: the return value is a tuple contains: + + - losses: (dict[str, Tensor]): A dictionary of loss components. + - predictions (list[:obj:`InstanceData`]): Detection + results of each image after the post process. + """ + batch_gt_instances = [] + batch_gt_instances_ignore = [] + batch_input_metas = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instances.append(data_sample.gt_instances_3d) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + + outs = self(x) + + loss_inputs = outs + (batch_gt_instances, batch_input_metas, + batch_gt_instances_ignore) + losses = self.loss_by_feat(*loss_inputs) + + predictions = self.predict_by_feat( + *outs, batch_input_metas=batch_input_metas, cfg=proposal_cfg) + return losses, predictions + + def predict(self, + x: Tuple[Tensor], + batch_data_samples: SampleList, + rescale: bool = False) -> InstanceList: + """Perform forward propagation of the 3D detection head and predict + detection results on the features of the upstream network. + + Args: + x (tuple[Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_pts_panoptic_seg` and + `gt_pts_sem_seg`. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + batch_input_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + outs = self(x) + predictions = self.predict_by_feat( + *outs, batch_input_metas=batch_input_metas, rescale=rescale) + return predictions + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + batch_input_metas: Optional[List[dict]] = None, + cfg: Optional[ConfigDict] = None, + rescale: bool = False, + **kwargs) -> InstanceList: + """Transform a batch of output features extracted from the head into + bbox results. + + Args: + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + score_factors (list[Tensor], optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, num_priors * 1, H, W). Defaults to None. + batch_input_metas (list[dict], Optional): Batch inputs meta info. + Defaults to None. + cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + assert len(cls_scores) == len(bbox_preds) + assert len(cls_scores) == len(dir_cls_preds) + num_levels = len(cls_scores) + featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] + mlvl_priors = self.prior_generator.grid_anchors( + featmap_sizes, device=cls_scores[0].device) + mlvl_priors = [ + prior.reshape(-1, self.box_code_size) for prior in mlvl_priors + ] + + result_list = [] + + for input_id in range(len(batch_input_metas)): + + input_meta = batch_input_metas[input_id] + cls_score_list = select_single_mlvl(cls_scores, input_id) + bbox_pred_list = select_single_mlvl(bbox_preds, input_id) + dir_cls_pred_list = select_single_mlvl(dir_cls_preds, input_id) + + results = self._predict_by_feat_single( + cls_score_list=cls_score_list, + bbox_pred_list=bbox_pred_list, + dir_cls_pred_list=dir_cls_pred_list, + mlvl_priors=mlvl_priors, + input_meta=input_meta, + cfg=cfg, + rescale=rescale, + **kwargs) + result_list.append(results) + return result_list + + def _predict_by_feat_single(self, + cls_score_list: List[Tensor], + bbox_pred_list: List[Tensor], + dir_cls_pred_list: List[Tensor], + mlvl_priors: List[Tensor], + input_meta: dict, + cfg: ConfigDict, + rescale: bool = False, + **kwargs) -> InstanceData: + """Transform a single points sample's features extracted from the head + into bbox results. + + Args: + cls_score_list (list[Tensor]): Box scores from all scale + levels of a single point cloud sample, each item has shape + (num_priors * num_classes, H, W). + bbox_pred_list (list[Tensor]): Box energies / deltas from + all scale levels of a single point cloud sample, each item + has shape (num_priors * C, H, W). + dir_cls_pred_list (list[Tensor]): Predictions of direction class + from all scale levels of a single point cloud sample, each + item has shape (num_priors * 2, H, W). + mlvl_priors (list[Tensor]): Each element in the list is + the priors of a single level in feature pyramid. In all + anchor-based methods, it has shape (num_priors, 4). In + all anchor-free methods, it has shape (num_priors, 2) + when `with_stride=True`, otherwise it still has shape + (num_priors, 4). + input_meta (dict): Contain point clouds and image meta info. + cfg (:obj:`ConfigDict`): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + + Returns: + :obj:`InstanceData`: Detection results of each image + after the post process. + Each item usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + """ + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_priors) + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_dir_scores = [] + for cls_score, bbox_pred, dir_cls_pred, priors in zip( + cls_score_list, bbox_pred_list, dir_cls_pred_list, + mlvl_priors): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + assert cls_score.size()[-2:] == dir_cls_pred.size()[-2:] + dir_cls_pred = dir_cls_pred.permute(1, 2, 0).reshape(-1, 2) + dir_cls_score = torch.max(dir_cls_pred, dim=-1)[1] + + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.num_classes) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + bbox_pred = bbox_pred.permute(1, 2, + 0).reshape(-1, self.box_code_size) + + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + if self.use_sigmoid_cls: + max_scores, _ = scores.max(dim=1) + else: + max_scores, _ = scores[:, :-1].max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + priors = priors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + dir_cls_score = dir_cls_score[topk_inds] + + bboxes = self.bbox_coder.decode(priors, bbox_pred) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_dir_scores.append(dir_cls_score) + + mlvl_bboxes = torch.cat(mlvl_bboxes) + mlvl_bboxes_for_nms = xywhr2xyxyr(input_meta['box_type_3d']( + mlvl_bboxes, box_dim=self.box_code_size).bev) + mlvl_scores = torch.cat(mlvl_scores) + mlvl_dir_scores = torch.cat(mlvl_dir_scores) + + if self.use_sigmoid_cls: + # Add a dummy background class to the front when using sigmoid + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) + + score_thr = cfg.get('score_thr', 0) + results = box3d_multiclass_nms(mlvl_bboxes, mlvl_bboxes_for_nms, + mlvl_scores, score_thr, cfg.max_num, + cfg, mlvl_dir_scores) + bboxes, scores, labels, dir_scores = results + if bboxes.shape[0] > 0: + dir_rot = limit_period(bboxes[..., 6] - self.dir_offset, + self.dir_limit_offset, np.pi) + bboxes[..., 6] = ( + dir_rot + self.dir_offset + + np.pi * dir_scores.to(bboxes.dtype)) + bboxes = input_meta['box_type_3d'](bboxes, box_dim=self.box_code_size) + results = InstanceData() + results.bboxes_3d = bboxes + results.scores_3d = scores + results.labels_3d = labels + + return results + + # TODO: Support augmentation test + def aug_test(self, + aug_batch_feats, + aug_batch_input_metas, + rescale=False, + with_ori_nms=False, + **kwargs): + pass diff --git a/mmdet3d/models/dense_heads/base_conv_bbox_head.py b/mmdet3d/models/dense_heads/base_conv_bbox_head.py new file mode 100755 index 0000000..4f4b875 --- /dev/null +++ b/mmdet3d/models/dense_heads/base_conv_bbox_head.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import ConvModule +from mmcv.cnn.bricks import build_conv_layer +from mmengine.model import BaseModule +from torch import nn as nn + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class BaseConvBboxHead(BaseModule): + r"""More general bbox head, with shared conv layers and two optional + separated branches. + + .. code-block:: none + + /-> cls convs -> cls_score + shared convs + \-> reg convs -> bbox_pred + """ + + def __init__(self, + in_channels=0, + shared_conv_channels=(), + cls_conv_channels=(), + num_cls_out_channels=0, + reg_conv_channels=(), + num_reg_out_channels=0, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'), + bias='auto', + init_cfg=None, + *args, + **kwargs): + super(BaseConvBboxHead, self).__init__( + init_cfg=init_cfg, *args, **kwargs) + assert in_channels > 0 + assert num_cls_out_channels > 0 + assert num_reg_out_channels > 0 + self.in_channels = in_channels + self.shared_conv_channels = shared_conv_channels + self.cls_conv_channels = cls_conv_channels + self.num_cls_out_channels = num_cls_out_channels + self.reg_conv_channels = reg_conv_channels + self.num_reg_out_channels = num_reg_out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.bias = bias + + # add shared convs + if len(self.shared_conv_channels) > 0: + self.shared_convs = self._add_conv_branch( + self.in_channels, self.shared_conv_channels) + out_channels = self.shared_conv_channels[-1] + else: + out_channels = self.in_channels + + # add cls specific branch + prev_channel = out_channels + if len(self.cls_conv_channels) > 0: + self.cls_convs = self._add_conv_branch(prev_channel, + self.cls_conv_channels) + prev_channel = self.cls_conv_channels[-1] + + self.conv_cls = build_conv_layer( + conv_cfg, + in_channels=prev_channel, + out_channels=num_cls_out_channels, + kernel_size=1) + # add reg specific branch + prev_channel = out_channels + if len(self.reg_conv_channels) > 0: + self.reg_convs = self._add_conv_branch(prev_channel, + self.reg_conv_channels) + prev_channel = self.reg_conv_channels[-1] + + self.conv_reg = build_conv_layer( + conv_cfg, + in_channels=prev_channel, + out_channels=num_reg_out_channels, + kernel_size=1) + + def _add_conv_branch(self, in_channels, conv_channels): + """Add shared or separable branch.""" + conv_spec = [in_channels] + list(conv_channels) + # add branch specific conv layers + conv_layers = nn.Sequential() + for i in range(len(conv_spec) - 1): + conv_layers.add_module( + f'layer{i}', + ConvModule( + conv_spec[i], + conv_spec[i + 1], + kernel_size=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=self.bias, + inplace=True)) + return conv_layers + + def forward(self, feats): + """Forward. + + Args: + feats (Tensor): Input features + + Returns: + Tensor: Class scores predictions + Tensor: Regression predictions + """ + # shared part + if len(self.shared_conv_channels) > 0: + x = self.shared_convs(feats) + + # separate branches + x_cls = x + x_reg = x + + if len(self.cls_conv_channels) > 0: + x_cls = self.cls_convs(x_cls) + cls_score = self.conv_cls(x_cls) + + if len(self.reg_conv_channels) > 0: + x_reg = self.reg_convs(x_reg) + bbox_pred = self.conv_reg(x_reg) + + return cls_score, bbox_pred diff --git a/mmdet3d/models/dense_heads/base_mono3d_dense_head.py b/mmdet3d/models/dense_heads/base_mono3d_dense_head.py new file mode 100755 index 0000000..5627ce1 --- /dev/null +++ b/mmdet3d/models/dense_heads/base_mono3d_dense_head.py @@ -0,0 +1,186 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import Optional, Tuple + +from mmengine.config import ConfigDict +from mmengine.model import BaseModule +from torch import Tensor + +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils import InstanceList, OptMultiConfig + + +class BaseMono3DDenseHead(BaseModule, metaclass=ABCMeta): + """Base class for Monocular 3D DenseHeads. + + 1. The ``loss`` method is used to calculate the loss of densehead, + which includes two steps: (1) the densehead model performs forward + propagation to obtain the feature maps (2) The ``loss_by_feat`` method + is called based on the feature maps to calculate the loss. + + .. code:: text + + loss(): forward() -> loss_by_feat() + + 2. The ``predict`` method is used to predict detection results, + which includes two steps: (1) the densehead model performs forward + propagation to obtain the feature maps (2) The ``predict_by_feat`` method + is called based on the feature maps to predict detection results including + post-processing. + + .. code:: text + + predict(): forward() -> predict_by_feat() + + 3. The ``loss_and_predict`` method is used to return loss and detection + results at the same time. It will call densehead's ``forward``, + ``loss_by_feat`` and ``predict_by_feat`` methods in order. If one-stage is + used as RPN, the densehead needs to return both losses and predictions. + This predictions is used as the proposal of roihead. + + .. code:: text + + loss_and_predict(): forward() -> loss_by_feat() -> predict_by_feat() + """ + + def __init__(self, init_cfg: OptMultiConfig = None) -> None: + super(BaseMono3DDenseHead, self).__init__(init_cfg=init_cfg) + + def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList, + **kwargs) -> dict: + """ + Args: + x (list[Tensor]): Features from FPN. + batch_data_samples (list[:obj:`Det3DDataSample`]): Each item + contains the meta information of each image and corresponding + annotations. + + Returns: + tuple or Tensor: When `proposal_cfg` is None, the detector is a \ + normal one-stage detector, The return value is the losses. + + - losses: (dict[str, Tensor]): A dictionary of loss components. + + When the `proposal_cfg` is not None, the head is used as a + `rpn_head`, the return value is a tuple contains: + + - losses: (dict[str, Tensor]): A dictionary of loss components. + - results_list (list[:obj:`InstanceData`]): Detection + results of each image after the post process. + Each item usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (:obj:`BaseInstance3DBoxes`): Contains a tensor + with shape (num_instances, C), the last dimension C of a + 3D box is (x, y, z, x_size, y_size, z_size, yaw, ...), where + C >= 7. C = 7 for kitti and C = 9 for nuscenes with extra 2 + dims of velocity. + """ + + outs = self(x) + batch_gt_instances_3d = [] + batch_gt_instances = [] + batch_gt_instances_ignore = [] + batch_img_metas = [] + for data_sample in batch_data_samples: + batch_img_metas.append(data_sample.metainfo) + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + batch_gt_instances.append(data_sample.gt_instances) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + + loss_inputs = outs + (batch_gt_instances_3d, batch_gt_instances, + batch_img_metas, batch_gt_instances_ignore) + losses = self.loss_by_feat(*loss_inputs) + + return losses + + @abstractmethod + def loss_by_feat(self, **kwargs) -> dict: + """Calculate the loss based on the features extracted by the detection + head.""" + pass + + def loss_and_predict(self, + x: Tuple[Tensor], + batch_data_samples: SampleList, + proposal_cfg: Optional[ConfigDict] = None, + **kwargs) -> Tuple[dict, InstanceList]: + """Perform forward propagation of the head, then calculate loss and + predictions from the features and data samples. + + Args: + x (tuple[Tensor]): Features from FPN. + batch_data_samples (list[:obj:`Det3DDataSample`]): Each item + contains the meta information of each image and + corresponding annotations. + proposal_cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + + Returns: + tuple: the return value is a tuple contains: + + - losses: (dict[str, Tensor]): A dictionary of loss components. + - predictions (list[:obj:`InstanceData`]): Detection + results of each image after the post process. + """ + batch_gt_instances_3d = [] + batch_gt_instances = [] + batch_gt_instances_ignore = [] + batch_img_metas = [] + for data_sample in batch_data_samples: + batch_img_metas.append(data_sample.metainfo) + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + batch_gt_instances.append(data_sample.gt_instances) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + + outs = self(x) + + loss_inputs = outs + (batch_gt_instances_3d, batch_gt_instances, + batch_img_metas, batch_gt_instances_ignore) + losses = self.loss_by_feat(*loss_inputs) + + predictions = self.predict_by_feat( + *outs, batch_img_metas=batch_img_metas, cfg=proposal_cfg) + + return losses, predictions + + def predict(self, + x: Tuple[Tensor], + batch_data_samples: SampleList, + rescale: bool = False) -> InstanceList: + """Perform forward propagation of the detection head and predict + detection results on the features of the upstream network. + + Args: + x (tuple[Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_pts_panoptic_seg` and `gt_pts_sem_seg`. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[obj:`InstanceData`]: Detection results of each image + after the post process. + """ + batch_img_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + outs = self(x) + predictions = self.predict_by_feat( + *outs, batch_img_metas=batch_img_metas, rescale=rescale) + + return predictions + + @abstractmethod + def predict_by_feat(self, **kwargs) -> InstanceList: + """Transform a batch of output features extracted from the head into + bbox results.""" + pass diff --git a/mmdet3d/models/dense_heads/centerpoint_head.py b/mmdet3d/models/dense_heads/centerpoint_head.py new file mode 100755 index 0000000..72f8f74 --- /dev/null +++ b/mmdet3d/models/dense_heads/centerpoint_head.py @@ -0,0 +1,926 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Dict, List, Optional, Tuple, Union + +import torch +from mmcv.cnn import ConvModule, build_conv_layer +from mmdet.models.utils import multi_apply +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import Tensor, nn + +from mmdet3d.models.utils import (clip_sigmoid, draw_heatmap_gaussian, + gaussian_radius) +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures import Det3DDataSample, xywhr2xyxyr +from ..layers import circle_nms, nms_bev + + +@MODELS.register_module() +class SeparateHead(BaseModule): + """SeparateHead for CenterHead. + + Args: + in_channels (int): Input channels for conv_layer. + heads (dict): Conv information. + head_conv (int, optional): Output channels. + Default: 64. + final_kernel (int, optional): Kernel size for the last conv layer. + Default: 1. + init_bias (float, optional): Initial bias. Default: -2.19. + conv_cfg (dict, optional): Config of conv layer. + Default: dict(type='Conv2d') + norm_cfg (dict, optional): Config of norm layer. + Default: dict(type='BN2d'). + bias (str, optional): Type of bias. Default: 'auto'. + """ + + def __init__(self, + in_channels, + heads, + head_conv=64, + final_kernel=1, + init_bias=-2.19, + conv_cfg=dict(type='Conv2d'), + norm_cfg=dict(type='BN2d'), + bias='auto', + init_cfg=None, + **kwargs): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(SeparateHead, self).__init__(init_cfg=init_cfg) + self.heads = heads + self.init_bias = init_bias + for head in self.heads: + classes, num_conv = self.heads[head] + conv_layers = [] + c_in = in_channels + for i in range(num_conv - 1): + conv_layers.append( + ConvModule( + c_in, + head_conv, + kernel_size=final_kernel, + stride=1, + padding=final_kernel // 2, + bias=bias, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + c_in = head_conv + + conv_layers.append( + build_conv_layer( + conv_cfg, + head_conv, + classes, + kernel_size=final_kernel, + stride=1, + padding=final_kernel // 2, + bias=True)) + conv_layers = nn.Sequential(*conv_layers) + + self.__setattr__(head, conv_layers) + + if init_cfg is None: + self.init_cfg = dict(type='Kaiming', layer='Conv2d') + + def init_weights(self): + """Initialize weights.""" + super().init_weights() + for head in self.heads: + if head == 'heatmap': + self.__getattr__(head)[-1].bias.data.fill_(self.init_bias) + + def forward(self, x): + """Forward function for SepHead. + + Args: + x (torch.Tensor): Input feature map with the shape of + [B, 512, 128, 128]. + + Returns: + dict[str: torch.Tensor]: contains the following keys: + + -reg (torch.Tensor): 2D regression value with the + shape of [B, 2, H, W]. + -height (torch.Tensor): Height value with the + shape of [B, 1, H, W]. + -dim (torch.Tensor): Size value with the shape + of [B, 3, H, W]. + -rot (torch.Tensor): Rotation value with the + shape of [B, 2, H, W]. + -vel (torch.Tensor): Velocity value with the + shape of [B, 2, H, W]. + -heatmap (torch.Tensor): Heatmap with the shape of + [B, N, H, W]. + """ + ret_dict = dict() + for head in self.heads: + ret_dict[head] = self.__getattr__(head)(x) + + return ret_dict + + +@MODELS.register_module() +class DCNSeparateHead(BaseModule): + r"""DCNSeparateHead for CenterHead. + + .. code-block:: none + /-----> DCN for heatmap task -----> heatmap task. + feature + \-----> DCN for regression tasks -----> regression tasks + + Args: + in_channels (int): Input channels for conv_layer. + num_cls (int): Number of classes. + heads (dict): Conv information. + dcn_config (dict): Config of dcn layer. + head_conv (int, optional): Output channels. + Default: 64. + final_kernel (int, optional): Kernel size for the last conv + layer. Default: 1. + init_bias (float, optional): Initial bias. Default: -2.19. + conv_cfg (dict, optional): Config of conv layer. + Default: dict(type='Conv2d') + norm_cfg (dict, optional): Config of norm layer. + Default: dict(type='BN2d'). + bias (str, optional): Type of bias. Default: 'auto'. + """ # noqa: W605 + + def __init__(self, + in_channels, + num_cls, + heads, + dcn_config, + head_conv=64, + final_kernel=1, + init_bias=-2.19, + conv_cfg=dict(type='Conv2d'), + norm_cfg=dict(type='BN2d'), + bias='auto', + init_cfg=None, + **kwargs): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(DCNSeparateHead, self).__init__(init_cfg=init_cfg) + if 'heatmap' in heads: + heads.pop('heatmap') + # feature adaptation with dcn + # use separate features for classification / regression + self.feature_adapt_cls = build_conv_layer(dcn_config) + + self.feature_adapt_reg = build_conv_layer(dcn_config) + + # heatmap prediction head + cls_head = [ + ConvModule( + in_channels, + head_conv, + kernel_size=3, + padding=1, + conv_cfg=conv_cfg, + bias=bias, + norm_cfg=norm_cfg), + build_conv_layer( + conv_cfg, + head_conv, + num_cls, + kernel_size=3, + stride=1, + padding=1, + bias=bias) + ] + self.cls_head = nn.Sequential(*cls_head) + self.init_bias = init_bias + # other regression target + self.task_head = SeparateHead( + in_channels, + heads, + head_conv=head_conv, + final_kernel=final_kernel, + bias=bias) + if init_cfg is None: + self.init_cfg = dict(type='Kaiming', layer='Conv2d') + + def init_weights(self): + """Initialize weights.""" + super().init_weights() + self.cls_head[-1].bias.data.fill_(self.init_bias) + + def forward(self, x): + """Forward function for DCNSepHead. + + Args: + x (torch.Tensor): Input feature map with the shape of + [B, 512, 128, 128]. + + Returns: + dict[str: torch.Tensor]: contains the following keys: + + -reg (torch.Tensor): 2D regression value with the + shape of [B, 2, H, W]. + -height (torch.Tensor): Height value with the + shape of [B, 1, H, W]. + -dim (torch.Tensor): Size value with the shape + of [B, 3, H, W]. + -rot (torch.Tensor): Rotation value with the + shape of [B, 2, H, W]. + -vel (torch.Tensor): Velocity value with the + shape of [B, 2, H, W]. + -heatmap (torch.Tensor): Heatmap with the shape of + [B, N, H, W]. + """ + center_feat = self.feature_adapt_cls(x) + reg_feat = self.feature_adapt_reg(x) + + cls_score = self.cls_head(center_feat) + ret = self.task_head(reg_feat) + ret['heatmap'] = cls_score + + return ret + + +@MODELS.register_module() +class CenterHead(BaseModule): + """CenterHead for CenterPoint. + + Args: + in_channels (list[int] | int, optional): Channels of the input + feature map. Default: [128]. + tasks (list[dict], optional): Task information including class number + and class names. Default: None. + bbox_coder (dict, optional): Bbox coder configs. Default: None. + common_heads (dict, optional): Conv information for common heads. + Default: dict(). + loss_cls (dict, optional): Config of classification loss function. + Default: dict(type='GaussianFocalLoss', reduction='mean'). + loss_bbox (dict, optional): Config of regression loss function. + Default: dict(type='L1Loss', reduction='none'). + separate_head (dict, optional): Config of separate head. Default: dict( + type='SeparateHead', init_bias=-2.19, final_kernel=3) + share_conv_channel (int, optional): Output channels for share_conv + layer. Default: 64. + num_heatmap_convs (int, optional): Number of conv layers for heatmap + conv layer. Default: 2. + conv_cfg (dict, optional): Config of conv layer. + Default: dict(type='Conv2d') + norm_cfg (dict, optional): Config of norm layer. + Default: dict(type='BN2d'). + bias (str): Type of bias. Default: 'auto'. + norm_bbox (bool): Whether normalize the bbox predictions. + Defaults to True. + train_cfg (dict, optional): Train-time configs. Default: None. + test_cfg (dict, optional): Test-time configs. Default: None. + init_cfg (dict, optional): Config for initialization. + """ + + def __init__(self, + in_channels: Union[List[int], int] = [128], + tasks: Optional[List[dict]] = None, + bbox_coder: Optional[dict] = None, + common_heads: dict = dict(), + loss_cls: dict = dict( + type='mmdet.GaussianFocalLoss', reduction='mean'), + loss_bbox: dict = dict( + type='mmdet.L1Loss', reduction='none', loss_weight=0.25), + separate_head: dict = dict( + type='mmdet.SeparateHead', + init_bias=-2.19, + final_kernel=3), + share_conv_channel: int = 64, + num_heatmap_convs: int = 2, + conv_cfg: dict = dict(type='Conv2d'), + norm_cfg: dict = dict(type='BN2d'), + bias: str = 'auto', + norm_bbox: bool = True, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None, + **kwargs): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(CenterHead, self).__init__(init_cfg=init_cfg, **kwargs) + + # TODO we should rename this variable, + # for example num_classes_per_task ? + # {'num_class': 2, 'class_names': ['pedestrian', 'traffic_cone']}] + # TODO seems num_classes is useless + num_classes = [len(t['class_names']) for t in tasks] + self.class_names = [t['class_names'] for t in tasks] + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.in_channels = in_channels + self.num_classes = num_classes + self.norm_bbox = norm_bbox + + self.loss_cls = MODELS.build(loss_cls) + self.loss_bbox = MODELS.build(loss_bbox) + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.num_anchor_per_locs = [n for n in num_classes] + self.fp16_enabled = False + + # a shared convolution + self.shared_conv = ConvModule( + in_channels, + share_conv_channel, + kernel_size=3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=bias) + + self.task_heads = nn.ModuleList() + + for num_cls in num_classes: + heads = copy.deepcopy(common_heads) + heads.update(dict(heatmap=(num_cls, num_heatmap_convs))) + separate_head.update( + in_channels=share_conv_channel, heads=heads, num_cls=num_cls) + self.task_heads.append(MODELS.build(separate_head)) + + def forward_single(self, x: Tensor) -> dict: + """Forward function for CenterPoint. + + Args: + x (torch.Tensor): Input feature map with the shape of + [B, 512, 128, 128]. + + Returns: + list[dict]: Output results for tasks. + """ + ret_dicts = [] + + x = self.shared_conv(x) + + for task in self.task_heads: + ret_dicts.append(task(x)) + + return ret_dicts + + def forward(self, feats: List[Tensor]) -> Tuple[List[Tensor]]: + """Forward pass. + + Args: + feats (list[torch.Tensor]): Multi-level features, e.g., + features produced by FPN. + + Returns: + tuple(list[dict]): Output results for tasks. + """ + return multi_apply(self.forward_single, feats) + + def _gather_feat(self, feat, ind, mask=None): + """Gather feature map. + + Given feature map and index, return indexed feature map. + + Args: + feat (torch.tensor): Feature map with the shape of [B, H*W, 10]. + ind (torch.Tensor): Index of the ground truth boxes with the + shape of [B, max_obj]. + mask (torch.Tensor, optional): Mask of the feature map with the + shape of [B, max_obj]. Default: None. + + Returns: + torch.Tensor: Feature map after gathering with the shape + of [B, max_obj, 10]. + """ + dim = feat.size(2) + ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) + feat = feat.gather(1, ind) + if mask is not None: + mask = mask.unsqueeze(2).expand_as(feat) + feat = feat[mask] + feat = feat.view(-1, dim) + return feat + + def get_targets( + self, + batch_gt_instances_3d: List[InstanceData], + ) -> Tuple[List[Tensor]]: + """Generate targets. + + How each output is transformed: + + Each nested list is transposed so that all same-index elements in + each sub-list (1, ..., N) become the new sub-lists. + [ [a0, a1, a2, ... ], [b0, b1, b2, ... ], ... ] + ==> [ [a0, b0, ... ], [a1, b1, ... ], [a2, b2, ... ] ] + + The new transposed nested list is converted into a list of N + tensors generated by concatenating tensors in the new sub-lists. + [ tensor0, tensor1, tensor2, ... ] + + Args: + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and\ + ``labels_3d`` attributes. + + Returns: + Returns: + tuple[list[torch.Tensor]]: Tuple of target including + the following results in order. + + - list[torch.Tensor]: Heatmap scores. + - list[torch.Tensor]: Ground truth boxes. + - list[torch.Tensor]: Indexes indicating the + position of the valid boxes. + - list[torch.Tensor]: Masks indicating which + boxes are valid. + """ + heatmaps, anno_boxes, inds, masks = multi_apply( + self.get_targets_single, batch_gt_instances_3d) + # Transpose heatmaps + heatmaps = list(map(list, zip(*heatmaps))) + heatmaps = [torch.stack(hms_) for hms_ in heatmaps] + # Transpose anno_boxes + anno_boxes = list(map(list, zip(*anno_boxes))) + anno_boxes = [torch.stack(anno_boxes_) for anno_boxes_ in anno_boxes] + # Transpose inds + inds = list(map(list, zip(*inds))) + inds = [torch.stack(inds_) for inds_ in inds] + # Transpose inds + masks = list(map(list, zip(*masks))) + masks = [torch.stack(masks_) for masks_ in masks] + return heatmaps, anno_boxes, inds, masks + + def get_targets_single(self, + gt_instances_3d: InstanceData) -> Tuple[Tensor]: + """Generate training targets for a single sample. + + Args: + gt_instances_3d (:obj:`InstanceData`): Gt_instances of + single data sample. It usually includes + ``bboxes_3d`` and ``labels_3d`` attributes. + + Returns: + tuple[list[torch.Tensor]]: Tuple of target including + the following results in order. + + - list[torch.Tensor]: Heatmap scores. + - list[torch.Tensor]: Ground truth boxes. + - list[torch.Tensor]: Indexes indicating the position + of the valid boxes. + - list[torch.Tensor]: Masks indicating which boxes + are valid. + """ + gt_labels_3d = gt_instances_3d.labels_3d + gt_bboxes_3d = gt_instances_3d.bboxes_3d + device = gt_labels_3d.device + gt_bboxes_3d = torch.cat( + (gt_bboxes_3d.gravity_center, gt_bboxes_3d.tensor[:, 3:]), + dim=1).to(device) + max_objs = self.train_cfg['max_objs'] * self.train_cfg['dense_reg'] + grid_size = torch.tensor(self.train_cfg['grid_size']).to(device) + pc_range = torch.tensor(self.train_cfg['point_cloud_range']) + voxel_size = torch.tensor(self.train_cfg['voxel_size']) + + feature_map_size = grid_size[:2] // self.train_cfg['out_size_factor'] + + # reorganize the gt_dict by tasks + task_masks = [] + flag = 0 + for class_name in self.class_names: + task_masks.append([ + torch.where(gt_labels_3d == class_name.index(i) + flag) + for i in class_name + ]) + flag += len(class_name) + + task_boxes = [] + task_classes = [] + flag2 = 0 + for idx, mask in enumerate(task_masks): + task_box = [] + task_class = [] + for m in mask: + task_box.append(gt_bboxes_3d[m]) + # 0 is background for each task, so we need to add 1 here. + task_class.append(gt_labels_3d[m] + 1 - flag2) + task_boxes.append(torch.cat(task_box, axis=0).to(device)) + task_classes.append(torch.cat(task_class).long().to(device)) + flag2 += len(mask) + draw_gaussian = draw_heatmap_gaussian + heatmaps, anno_boxes, inds, masks = [], [], [], [] + + for idx, task_head in enumerate(self.task_heads): + heatmap = gt_bboxes_3d.new_zeros( + (len(self.class_names[idx]), feature_map_size[1], + feature_map_size[0])) + + anno_box = gt_bboxes_3d.new_zeros((max_objs, 10), + dtype=torch.float32) + + ind = gt_labels_3d.new_zeros((max_objs), dtype=torch.int64) + mask = gt_bboxes_3d.new_zeros((max_objs), dtype=torch.uint8) + + num_objs = min(task_boxes[idx].shape[0], max_objs) + + for k in range(num_objs): + cls_id = task_classes[idx][k] - 1 + + length = task_boxes[idx][k][3] + width = task_boxes[idx][k][4] + length = length / voxel_size[0] / self.train_cfg[ + 'out_size_factor'] + width = width / voxel_size[1] / self.train_cfg[ + 'out_size_factor'] + + if width > 0 and length > 0: + radius = gaussian_radius( + (width, length), + min_overlap=self.train_cfg['gaussian_overlap']) + radius = max(self.train_cfg['min_radius'], int(radius)) + + # be really careful for the coordinate system of + # your box annotation. + x, y, z = task_boxes[idx][k][0], task_boxes[idx][k][ + 1], task_boxes[idx][k][2] + + coor_x = ( + x - pc_range[0] + ) / voxel_size[0] / self.train_cfg['out_size_factor'] + coor_y = ( + y - pc_range[1] + ) / voxel_size[1] / self.train_cfg['out_size_factor'] + + center = torch.tensor([coor_x, coor_y], + dtype=torch.float32, + device=device) + center_int = center.to(torch.int32) + + # throw out not in range objects to avoid out of array + # area when creating the heatmap + if not (0 <= center_int[0] < feature_map_size[0] + and 0 <= center_int[1] < feature_map_size[1]): + continue + + draw_gaussian(heatmap[cls_id], center_int, radius) + + new_idx = k + x, y = center_int[0], center_int[1] + + assert (y * feature_map_size[0] + x < + feature_map_size[0] * feature_map_size[1]) + + ind[new_idx] = y * feature_map_size[0] + x + mask[new_idx] = 1 + # TODO: support other outdoor dataset + vx, vy = task_boxes[idx][k][7:] + rot = task_boxes[idx][k][6] + box_dim = task_boxes[idx][k][3:6] + if self.norm_bbox: + box_dim = box_dim.log() + anno_box[new_idx] = torch.cat([ + center - torch.tensor([x, y], device=device), + z.unsqueeze(0), box_dim, + torch.sin(rot).unsqueeze(0), + torch.cos(rot).unsqueeze(0), + vx.unsqueeze(0), + vy.unsqueeze(0) + ]) + + heatmaps.append(heatmap) + anno_boxes.append(anno_box) + masks.append(mask) + inds.append(ind) + return heatmaps, anno_boxes, inds, masks + + def loss(self, pts_feats: List[Tensor], + batch_data_samples: List[Det3DDataSample], *args, + **kwargs) -> Dict[str, Tensor]: + """Forward function for point cloud branch. + + Args: + pts_feats (list[torch.Tensor]): Features of point cloud branch + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, . + + Returns: + dict: Losses of each branch. + """ + outs = self(pts_feats) + batch_gt_instance_3d = [] + for data_sample in batch_data_samples: + batch_gt_instance_3d.append(data_sample.gt_instances_3d) + losses = self.loss_by_feat(outs, batch_gt_instance_3d) + return losses + + def loss_by_feat(self, preds_dicts: Tuple[List[dict]], + batch_gt_instances_3d: List[InstanceData], *args, + **kwargs): + """Loss function for CenterHead. + + Args: + preds_dicts (tuple[list[dict]]): Prediction results of + multiple tasks. The outer tuple indicate different + tasks head, and the internal list indicate different + FPN level. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and\ + ``labels_3d`` attributes. + + Returns: + dict[str,torch.Tensor]: Loss of heatmap and bbox of each task. + """ + + heatmaps, anno_boxes, inds, masks = self.get_targets( + batch_gt_instances_3d) + loss_dict = dict() + for task_id, preds_dict in enumerate(preds_dicts): + # heatmap focal loss + preds_dict[0]['heatmap'] = clip_sigmoid(preds_dict[0]['heatmap']) + num_pos = heatmaps[task_id].eq(1).float().sum().item() + loss_heatmap = self.loss_cls( + preds_dict[0]['heatmap'], + heatmaps[task_id], + avg_factor=max(num_pos, 1)) + target_box = anno_boxes[task_id] + # reconstruct the anno_box from multiple reg heads + preds_dict[0]['anno_box'] = torch.cat( + (preds_dict[0]['reg'], preds_dict[0]['height'], + preds_dict[0]['dim'], preds_dict[0]['rot'], + preds_dict[0]['vel']), + dim=1) + + # Regression loss for dimension, offset, height, rotation + ind = inds[task_id] + num = masks[task_id].float().sum() + pred = preds_dict[0]['anno_box'].permute(0, 2, 3, 1).contiguous() + pred = pred.view(pred.size(0), -1, pred.size(3)) + pred = self._gather_feat(pred, ind) + mask = masks[task_id].unsqueeze(2).expand_as(target_box).float() + isnotnan = (~torch.isnan(target_box)).float() + mask *= isnotnan + + code_weights = self.train_cfg.get('code_weights', None) + bbox_weights = mask * mask.new_tensor(code_weights) + loss_bbox = self.loss_bbox( + pred, target_box, bbox_weights, avg_factor=(num + 1e-4)) + loss_dict[f'task{task_id}.loss_heatmap'] = loss_heatmap + loss_dict[f'task{task_id}.loss_bbox'] = loss_bbox + return loss_dict + + def predict(self, + pts_feats: Dict[str, torch.Tensor], + batch_data_samples: List[Det3DDataSample], + rescale=True, + **kwargs) -> List[InstanceData]: + """ + Args: + pts_feats (dict): Point features.. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes meta information of data. + rescale (bool): Whether rescale the resutls to + the original scale. + + Returns: + list[:obj:`InstanceData`]: List of processed predictions. Each + InstanceData contains 3d Bounding boxes and corresponding + scores and labels. + """ + preds_dict = self(pts_feats) + batch_size = len(batch_data_samples) + batch_input_metas = [] + for batch_index in range(batch_size): + metainfo = batch_data_samples[batch_index].metainfo + batch_input_metas.append(metainfo) + + results_list = self.predict_by_feat( + preds_dict, batch_input_metas, rescale=rescale, **kwargs) + return results_list + + def predict_by_feat(self, preds_dicts: Tuple[List[dict]], + batch_input_metas: List[dict], *args, + **kwargs) -> List[InstanceData]: + """Generate bboxes from bbox head predictions. + + Args: + preds_dicts (tuple[list[dict]]): Prediction results of + multiple tasks. The outer tuple indicate different + tasks head, and the internal list indicate different + FPN level. + batch_input_metas (list[dict]): Meta info of multiple + inputs. + + Returns: + list[:obj:`InstanceData`]: Instance prediction + results of each sample after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (:obj:`LiDARInstance3DBoxes`): Prediction + of bboxes, contains a tensor with shape + (num_instances, 7) or (num_instances, 9), and + the last 2 dimensions of 9 is + velocity. + """ + rets = [] + for task_id, preds_dict in enumerate(preds_dicts): + num_class_with_bg = self.num_classes[task_id] + batch_size = preds_dict[0]['heatmap'].shape[0] + batch_heatmap = preds_dict[0]['heatmap'].sigmoid() + + batch_reg = preds_dict[0]['reg'] + batch_hei = preds_dict[0]['height'] + + if self.norm_bbox: + batch_dim = torch.exp(preds_dict[0]['dim']) + else: + batch_dim = preds_dict[0]['dim'] + + batch_rots = preds_dict[0]['rot'][:, 0].unsqueeze(1) + batch_rotc = preds_dict[0]['rot'][:, 1].unsqueeze(1) + + if 'vel' in preds_dict[0]: + batch_vel = preds_dict[0]['vel'] + else: + batch_vel = None + temp = self.bbox_coder.decode( + batch_heatmap, + batch_rots, + batch_rotc, + batch_hei, + batch_dim, + batch_vel, + reg=batch_reg, + task_id=task_id) + assert self.test_cfg['nms_type'] in ['circle', 'rotate'] + batch_reg_preds = [box['bboxes'] for box in temp] + batch_cls_preds = [box['scores'] for box in temp] + batch_cls_labels = [box['labels'] for box in temp] + if self.test_cfg['nms_type'] == 'circle': + ret_task = [] + for i in range(batch_size): + boxes3d = temp[i]['bboxes'] + scores = temp[i]['scores'] + labels = temp[i]['labels'] + centers = boxes3d[:, [0, 1]] + boxes = torch.cat([centers, scores.view(-1, 1)], dim=1) + keep = torch.tensor( + circle_nms( + boxes.detach().cpu().numpy(), + self.test_cfg['min_radius'][task_id], + post_max_size=self.test_cfg['post_max_size']), + dtype=torch.long, + device=boxes.device) + + boxes3d = boxes3d[keep] + scores = scores[keep] + labels = labels[keep] + ret = dict(bboxes=boxes3d, scores=scores, labels=labels) + ret_task.append(ret) + rets.append(ret_task) + else: + rets.append( + self.get_task_detections(num_class_with_bg, + batch_cls_preds, batch_reg_preds, + batch_cls_labels, + batch_input_metas)) + + # Merge branches results + num_samples = len(rets[0]) + + ret_list = [] + for i in range(num_samples): + temp_instances = InstanceData() + for k in rets[0][i].keys(): + if k == 'bboxes': + bboxes = torch.cat([ret[i][k] for ret in rets]) + bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5 + bboxes = batch_input_metas[i]['box_type_3d']( + bboxes, self.bbox_coder.code_size) + elif k == 'scores': + scores = torch.cat([ret[i][k] for ret in rets]) + elif k == 'labels': + flag = 0 + for j, num_class in enumerate(self.num_classes): + rets[j][i][k] += flag + flag += num_class + labels = torch.cat([ret[i][k].int() for ret in rets]) + temp_instances.bboxes_3d = bboxes + temp_instances.scores_3d = scores + temp_instances.labels_3d = labels + ret_list.append(temp_instances) + return ret_list + + def get_task_detections(self, num_class_with_bg, batch_cls_preds, + batch_reg_preds, batch_cls_labels, img_metas): + """Rotate nms for each task. + + Args: + num_class_with_bg (int): Number of classes for the current task. + batch_cls_preds (list[torch.Tensor]): Prediction score with the + shape of [N]. + batch_reg_preds (list[torch.Tensor]): Prediction bbox with the + shape of [N, 9]. + batch_cls_labels (list[torch.Tensor]): Prediction label with the + shape of [N]. + img_metas (list[dict]): Meta information of each sample. + + Returns: + list[dict[str: torch.Tensor]]: contains the following keys: + + -bboxes (torch.Tensor): Prediction bboxes after nms with the + shape of [N, 9]. + -scores (torch.Tensor): Prediction scores after nms with the + shape of [N]. + -labels (torch.Tensor): Prediction labels after nms with the + shape of [N]. + """ + predictions_dicts = [] + post_center_range = self.test_cfg['post_center_limit_range'] + if len(post_center_range) > 0: + post_center_range = torch.tensor( + post_center_range, + dtype=batch_reg_preds[0].dtype, + device=batch_reg_preds[0].device) + + for i, (box_preds, cls_preds, cls_labels) in enumerate( + zip(batch_reg_preds, batch_cls_preds, batch_cls_labels)): + + # Apply NMS in bird eye view + + # get the highest score per prediction, then apply nms + # to remove overlapped box. + if num_class_with_bg == 1: + top_scores = cls_preds.squeeze(-1) + top_labels = torch.zeros( + cls_preds.shape[0], + device=cls_preds.device, + dtype=torch.long) + + else: + top_labels = cls_labels.long() + top_scores = cls_preds.squeeze(-1) + + if self.test_cfg['score_threshold'] > 0.0: + thresh = torch.tensor( + [self.test_cfg['score_threshold']], + device=cls_preds.device).type_as(cls_preds) + top_scores_keep = top_scores >= thresh + top_scores = top_scores.masked_select(top_scores_keep) + + if top_scores.shape[0] != 0: + if self.test_cfg['score_threshold'] > 0.0: + box_preds = box_preds[top_scores_keep] + top_labels = top_labels[top_scores_keep] + + boxes_for_nms = xywhr2xyxyr(img_metas[i]['box_type_3d']( + box_preds[:, :], self.bbox_coder.code_size).bev) + # the nms in 3d detection just remove overlap boxes. + + selected = nms_bev( + boxes_for_nms, + top_scores, + thresh=self.test_cfg['nms_thr'], + pre_max_size=self.test_cfg['pre_max_size'], + post_max_size=self.test_cfg['post_max_size']) + else: + selected = [] + + # if selected is not None: + selected_boxes = box_preds[selected] + selected_labels = top_labels[selected] + selected_scores = top_scores[selected] + + # finally generate predictions. + if selected_boxes.shape[0] != 0: + box_preds = selected_boxes + scores = selected_scores + label_preds = selected_labels + final_box_preds = box_preds + final_scores = scores + final_labels = label_preds + if post_center_range is not None: + mask = (final_box_preds[:, :3] >= + post_center_range[:3]).all(1) + mask &= (final_box_preds[:, :3] <= + post_center_range[3:]).all(1) + predictions_dict = dict( + bboxes=final_box_preds[mask], + scores=final_scores[mask], + labels=final_labels[mask]) + else: + predictions_dict = dict( + bboxes=final_box_preds, + scores=final_scores, + labels=final_labels) + else: + dtype = batch_reg_preds[0].dtype + device = batch_reg_preds[0].device + predictions_dict = dict( + bboxes=torch.zeros([0, self.bbox_coder.code_size], + dtype=dtype, + device=device), + scores=torch.zeros([0], dtype=dtype, device=device), + labels=torch.zeros([0], + dtype=top_labels.dtype, + device=device)) + + predictions_dicts.append(predictions_dict) + return predictions_dicts diff --git a/mmdet3d/models/dense_heads/fcaf3d_head.py b/mmdet3d/models/dense_heads/fcaf3d_head.py new file mode 100755 index 0000000..ed53b7d --- /dev/null +++ b/mmdet3d/models/dense_heads/fcaf3d_head.py @@ -0,0 +1,696 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/mmdet3d/models/dense_heads/fcaf3d_neck_with_head.py # noqa +from typing import List, Optional, Tuple + +try: + import MinkowskiEngine as ME + from MinkowskiEngine import SparseTensor +except ImportError: + # Please follow get_started.md to install MinkowskiEngine. + ME = SparseTensor = None + pass + +import torch +from mmcv.cnn import Scale +from mmcv.ops import nms3d, nms3d_normal +from mmdet.utils import reduce_mean +from mmengine.model import bias_init_with_prob +from mmengine.structures import InstanceData +from torch import Tensor, nn + +from mmdet3d.registry import MODELS +from mmdet3d.structures import BaseInstance3DBoxes, rotation_3d_in_axis +from mmdet3d.utils import InstanceList, OptInstanceList +from .base_3d_dense_head import Base3DDenseHead + + +@MODELS.register_module() +class FCAF3DHead(Base3DDenseHead): + r"""Bbox head of `FCAF3D `_. + + Actually here we store both the sparse 3D FPN and a head. The neck and + the head can not be simply separated as pruning score on the i-th level + of FPN requires classification scores from i+1-th level of the head. + + Args: + num_classes (int): Number of classes. + in_channels (tuple(int)): Number of channels in input tensors. + out_channels (int): Number of channels in the neck output tensors. + num_reg_outs (int): Number of regression layer channels. + voxel_size (float): Voxel size in meters. + pts_prune_threshold (int): Pruning threshold on each feature level. + pts_assign_threshold (int): Box to location assigner parameter. + Assigner selects the maximum feature level with more locations + inside the box than pts_assign_threshold. + pts_center_threshold (int): Box to location assigner parameter. + After feature level for the box is determined, assigner selects + pts_center_threshold locations closest to the box center. + center_loss (dict): Config of centerness loss. Defaults to + dict(type='mmdet.CrossEntropyLoss', use_sigmoid=True). + bbox_loss (dict): Config of bbox loss. Defaults to + dict(type='AxisAlignedIoULoss'). + cls_loss (dict): Config of classification loss. Defaults to + dict = dict(type='mmdet.FocalLoss'). + train_cfg (dict, optional): Config for train stage. Defaults to None. + test_cfg (dict, optional): Config for test stage. Defaults to None. + init_cfg (dict, optional): Config for weight initialization. + Defaults to None. + """ + + def __init__(self, + num_classes: int, + in_channels: Tuple[int], + out_channels: int, + num_reg_outs: int, + voxel_size: float, + pts_prune_threshold: int, + pts_assign_threshold: int, + pts_center_threshold: int, + center_loss: dict = dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=True), + bbox_loss: dict = dict(type='AxisAlignedIoULoss'), + cls_loss: dict = dict(type='mmdet.FocalLoss'), + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super(FCAF3DHead, self).__init__(init_cfg) + if ME is None: + raise ImportError( + 'Please follow `get_started.md` to install MinkowskiEngine.`') + self.voxel_size = voxel_size + self.pts_prune_threshold = pts_prune_threshold + self.pts_assign_threshold = pts_assign_threshold + self.pts_center_threshold = pts_center_threshold + self.center_loss = MODELS.build(center_loss) + self.bbox_loss = MODELS.build(bbox_loss) + self.cls_loss = MODELS.build(cls_loss) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self._init_layers(in_channels, out_channels, num_reg_outs, num_classes) + + @staticmethod + def _make_block(in_channels: int, out_channels: int) -> nn.Module: + """Construct Conv-Norm-Act block. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + + Returns: + torch.nn.Module: With corresponding layers. + """ + return nn.Sequential( + ME.MinkowskiConvolution( + in_channels, out_channels, kernel_size=3, dimension=3), + ME.MinkowskiBatchNorm(out_channels), ME.MinkowskiELU()) + + @staticmethod + def _make_up_block(in_channels: int, out_channels: int) -> nn.Module: + """Construct DeConv-Norm-Act-Conv-Norm-Act block. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + + Returns: + torch.nn.Module: With corresponding layers. + """ + return nn.Sequential( + ME.MinkowskiGenerativeConvolutionTranspose( + in_channels, + out_channels, + kernel_size=2, + stride=2, + dimension=3), ME.MinkowskiBatchNorm(out_channels), + ME.MinkowskiELU(), + ME.MinkowskiConvolution( + out_channels, out_channels, kernel_size=3, dimension=3), + ME.MinkowskiBatchNorm(out_channels), ME.MinkowskiELU()) + + def _init_layers(self, in_channels: Tuple[int], out_channels: int, + num_reg_outs: int, num_classes: int): + """Initialize layers. + + Args: + in_channels (tuple[int]): Number of channels in input tensors. + out_channels (int): Number of channels in the neck output tensors. + num_reg_outs (int): Number of regression layer channels. + num_classes (int): Number of classes. + """ + # neck layers + self.pruning = ME.MinkowskiPruning() + for i in range(len(in_channels)): + if i > 0: + self.__setattr__( + f'up_block_{i}', + self._make_up_block(in_channels[i], in_channels[i - 1])) + self.__setattr__(f'out_block_{i}', + self._make_block(in_channels[i], out_channels)) + + # head layers + self.conv_center = ME.MinkowskiConvolution( + out_channels, 1, kernel_size=1, dimension=3) + self.conv_reg = ME.MinkowskiConvolution( + out_channels, num_reg_outs, kernel_size=1, dimension=3) + self.conv_cls = ME.MinkowskiConvolution( + out_channels, num_classes, kernel_size=1, bias=True, dimension=3) + self.scales = nn.ModuleList( + [Scale(1.) for _ in range(len(in_channels))]) + + def init_weights(self): + """Initialize weights.""" + nn.init.normal_(self.conv_center.kernel, std=.01) + nn.init.normal_(self.conv_reg.kernel, std=.01) + nn.init.normal_(self.conv_cls.kernel, std=.01) + nn.init.constant_(self.conv_cls.bias, bias_init_with_prob(.01)) + + def forward(self, x: List[Tensor]) -> Tuple[List[Tensor], ...]: + """Forward pass. + + Args: + x (list[Tensor]): Features from the backbone. + + Returns: + Tuple[List[Tensor], ...]: Predictions of the head. + """ + center_preds, bbox_preds, cls_preds, points = [], [], [], [] + inputs = x + x = inputs[-1] + prune_score = None + for i in range(len(inputs) - 1, -1, -1): + if i < len(inputs) - 1: + x = self.__getattr__(f'up_block_{i + 1}')(x) + x = inputs[i] + x + x = self._prune(x, prune_score) + + out = self.__getattr__(f'out_block_{i}')(x) + center_pred, bbox_pred, cls_pred, point, prune_score = \ + self._forward_single(out, self.scales[i]) + center_preds.append(center_pred) + bbox_preds.append(bbox_pred) + cls_preds.append(cls_pred) + points.append(point) + return center_preds[::-1], bbox_preds[::-1], cls_preds[::-1], \ + points[::-1] + + def _prune(self, x: SparseTensor, scores: SparseTensor) -> SparseTensor: + """Prunes the tensor by score thresholding. + + Args: + x (SparseTensor): Tensor to be pruned. + scores (SparseTensor): Scores for thresholding. + + Returns: + SparseTensor: Pruned tensor. + """ + with torch.no_grad(): + coordinates = x.C.float() + interpolated_scores = scores.features_at_coordinates(coordinates) + prune_mask = interpolated_scores.new_zeros( + (len(interpolated_scores)), dtype=torch.bool) + for permutation in x.decomposition_permutations: + score = interpolated_scores[permutation] + mask = score.new_zeros((len(score)), dtype=torch.bool) + topk = min(len(score), self.pts_prune_threshold) + ids = torch.topk(score.squeeze(1), topk, sorted=False).indices + mask[ids] = True + prune_mask[permutation[mask]] = True + x = self.pruning(x, prune_mask) + return x + + def _forward_single(self, x: SparseTensor, + scale: Scale) -> Tuple[Tensor, ...]: + """Forward pass per level. + + Args: + x (SparseTensor): Per level neck output tensor. + scale (mmcv.cnn.Scale): Per level multiplication weight. + + Returns: + tuple[Tensor]: Per level head predictions. + """ + center_pred = self.conv_center(x).features + scores = self.conv_cls(x) + cls_pred = scores.features + prune_scores = ME.SparseTensor( + scores.features.max(dim=1, keepdim=True).values, + coordinate_map_key=scores.coordinate_map_key, + coordinate_manager=scores.coordinate_manager) + reg_final = self.conv_reg(x).features + reg_distance = torch.exp(scale(reg_final[:, :6])) + reg_angle = reg_final[:, 6:] + bbox_pred = torch.cat((reg_distance, reg_angle), dim=1) + + center_preds, bbox_preds, cls_preds, points = [], [], [], [] + for permutation in x.decomposition_permutations: + center_preds.append(center_pred[permutation]) + bbox_preds.append(bbox_pred[permutation]) + cls_preds.append(cls_pred[permutation]) + + points = x.decomposed_coordinates + for i in range(len(points)): + points[i] = points[i] * self.voxel_size + + return center_preds, bbox_preds, cls_preds, points, prune_scores + + def _loss_by_feat_single(self, center_preds: List[Tensor], + bbox_preds: List[Tensor], cls_preds: List[Tensor], + points: List[Tensor], + gt_bboxes: BaseInstance3DBoxes, gt_labels: Tensor, + input_meta: dict) -> Tuple[Tensor, ...]: + """Loss function of single sample. + + Args: + center_preds (list[Tensor]): Centerness predictions for all levels. + bbox_preds (list[Tensor]): Bbox predictions for all levels. + cls_preds (list[Tensor]): Classification predictions for all + levels. + points (list[Tensor]): Final location coordinates for all levels. + gt_bboxes (:obj:`BaseInstance3DBoxes`): Ground truth boxes. + gt_labels (Tensor): Ground truth labels. + input_meta (dict): Scene meta info. + + Returns: + tuple[Tensor, ...]: Centerness, bbox, and classification loss + values. + """ + center_targets, bbox_targets, cls_targets = self.get_targets( + points, gt_bboxes, gt_labels) + + center_preds = torch.cat(center_preds) + bbox_preds = torch.cat(bbox_preds) + cls_preds = torch.cat(cls_preds) + points = torch.cat(points) + + # cls loss + pos_inds = torch.nonzero(cls_targets >= 0).squeeze(1) + n_pos = points.new_tensor(len(pos_inds)) + n_pos = max(reduce_mean(n_pos), 1.) + cls_loss = self.cls_loss(cls_preds, cls_targets, avg_factor=n_pos) + + # bbox and centerness losses + pos_center_preds = center_preds[pos_inds] + pos_bbox_preds = bbox_preds[pos_inds] + pos_center_targets = center_targets[pos_inds].unsqueeze(1) + pos_bbox_targets = bbox_targets[pos_inds] + # reduce_mean is outside if / else block to prevent deadlock + center_denorm = max( + reduce_mean(pos_center_targets.sum().detach()), 1e-6) + if len(pos_inds) > 0: + pos_points = points[pos_inds] + center_loss = self.center_loss( + pos_center_preds, pos_center_targets, avg_factor=n_pos) + + bbox_loss = self.bbox_loss( + self._bbox_to_loss( + self._bbox_pred_to_bbox(pos_points, pos_bbox_preds)), + self._bbox_to_loss(pos_bbox_targets), + weight=pos_center_targets.squeeze(1), + avg_factor=center_denorm) + else: + center_loss = pos_center_preds.sum() + bbox_loss = pos_bbox_preds.sum() + return center_loss, bbox_loss, cls_loss + + def loss_by_feat(self, + center_preds: List[List[Tensor]], + bbox_preds: List[List[Tensor]], + cls_preds: List[List[Tensor]], + points: List[List[Tensor]], + batch_gt_instances_3d: InstanceList, + batch_input_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None, + **kwargs) -> dict: + """Loss function about feature. + + Args: + center_preds (list[list[Tensor]]): Centerness predictions for + all scenes. The first list contains predictions from different + levels. The second list contains predictions in a mini-batch. + bbox_preds (list[list[Tensor]]): Bbox predictions for all scenes. + The first list contains predictions from different + levels. The second list contains predictions in a mini-batch. + cls_preds (list[list[Tensor]]): Classification predictions for all + scenes. The first list contains predictions from different + levels. The second list contains predictions in a mini-batch. + points (list[list[Tensor]]): Final location coordinates for all + scenes. The first list contains predictions from different + levels. The second list contains predictions in a mini-batch. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、` + `labels_3d``、``depths``、``centers_2d`` and attributes. + batch_input_metas (list[dict]): Meta information of each input, + e.g., image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict: Centerness, bbox, and classification losses. + """ + center_losses, bbox_losses, cls_losses = [], [], [] + for i in range(len(batch_input_metas)): + center_loss, bbox_loss, cls_loss = self._loss_by_feat_single( + center_preds=[x[i] for x in center_preds], + bbox_preds=[x[i] for x in bbox_preds], + cls_preds=[x[i] for x in cls_preds], + points=[x[i] for x in points], + input_meta=batch_input_metas[i], + gt_bboxes=batch_gt_instances_3d[i].bboxes_3d, + gt_labels=batch_gt_instances_3d[i].labels_3d) + center_losses.append(center_loss) + bbox_losses.append(bbox_loss) + cls_losses.append(cls_loss) + return dict( + center_loss=torch.mean(torch.stack(center_losses)), + bbox_loss=torch.mean(torch.stack(bbox_losses)), + cls_loss=torch.mean(torch.stack(cls_losses))) + + def _predict_by_feat_single(self, center_preds: List[Tensor], + bbox_preds: List[Tensor], + cls_preds: List[Tensor], points: List[Tensor], + input_meta: dict) -> InstanceData: + """Generate boxes for single sample. + + Args: + center_preds (list[Tensor]): Centerness predictions for all levels. + bbox_preds (list[Tensor]): Bbox predictions for all levels. + cls_preds (list[Tensor]): Classification predictions for all + levels. + points (list[Tensor]): Final location coordinates for all levels. + input_meta (dict): Scene meta info. + + Returns: + InstanceData: Predicted bounding boxes, scores and labels. + """ + mlvl_bboxes, mlvl_scores = [], [] + for center_pred, bbox_pred, cls_pred, point in zip( + center_preds, bbox_preds, cls_preds, points): + scores = cls_pred.sigmoid() * center_pred.sigmoid() + max_scores, _ = scores.max(dim=1) + + if len(scores) > self.test_cfg.nms_pre > 0: + _, ids = max_scores.topk(self.test_cfg.nms_pre) + bbox_pred = bbox_pred[ids] + scores = scores[ids] + point = point[ids] + + bboxes = self._bbox_pred_to_bbox(point, bbox_pred) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + + bboxes = torch.cat(mlvl_bboxes) + scores = torch.cat(mlvl_scores) + bboxes, scores, labels = self._single_scene_multiclass_nms( + bboxes, scores, input_meta) + + bboxes = input_meta['box_type_3d']( + bboxes, + box_dim=bboxes.shape[1], + with_yaw=bboxes.shape[1] == 7, + origin=(.5, .5, .5)) + + results = InstanceData() + results.bboxes_3d = bboxes + results.scores_3d = scores + results.labels_3d = labels + return results + + def predict_by_feat(self, center_preds: List[List[Tensor]], + bbox_preds: List[List[Tensor]], cls_preds, + points: List[List[Tensor]], + batch_input_metas: List[dict], + **kwargs) -> List[InstanceData]: + """Generate boxes for all scenes. + + Args: + center_preds (list[list[Tensor]]): Centerness predictions for + all scenes. + bbox_preds (list[list[Tensor]]): Bbox predictions for all scenes. + cls_preds (list[list[Tensor]]): Classification predictions for all + scenes. + points (list[list[Tensor]]): Final location coordinates for all + scenes. + batch_input_metas (list[dict]): Meta infos for all scenes. + + Returns: + list[InstanceData]: Predicted bboxes, scores, and labels for + all scenes. + """ + results = [] + for i in range(len(batch_input_metas)): + result = self._predict_by_feat_single( + center_preds=[x[i] for x in center_preds], + bbox_preds=[x[i] for x in bbox_preds], + cls_preds=[x[i] for x in cls_preds], + points=[x[i] for x in points], + input_meta=batch_input_metas[i]) + results.append(result) + return results + + @staticmethod + def _bbox_to_loss(bbox: Tensor) -> Tensor: + """Transform box to the axis-aligned or rotated iou loss format. + + Args: + bbox (Tensor): 3D box of shape (N, 6) or (N, 7). + + Returns: + Tensor: Transformed 3D box of shape (N, 6) or (N, 7). + """ + # rotated iou loss accepts (x, y, z, w, h, l, heading) + if bbox.shape[-1] != 6: + return bbox + + # axis-aligned case: x, y, z, w, h, l -> x1, y1, z1, x2, y2, z2 + return torch.stack( + (bbox[..., 0] - bbox[..., 3] / 2, bbox[..., 1] - bbox[..., 4] / 2, + bbox[..., 2] - bbox[..., 5] / 2, bbox[..., 0] + bbox[..., 3] / 2, + bbox[..., 1] + bbox[..., 4] / 2, bbox[..., 2] + bbox[..., 5] / 2), + dim=-1) + + @staticmethod + def _bbox_pred_to_bbox(points: Tensor, bbox_pred: Tensor) -> Tensor: + """Transform predicted bbox parameters to bbox. + + Args: + points (Tensor): Final locations of shape (N, 3) + bbox_pred (Tensor): Predicted bbox parameters of shape (N, 6) + or (N, 8). + + Returns: + Tensor: Transformed 3D box of shape (N, 6) or (N, 7). + """ + if bbox_pred.shape[0] == 0: + return bbox_pred + + x_center = points[:, 0] + (bbox_pred[:, 1] - bbox_pred[:, 0]) / 2 + y_center = points[:, 1] + (bbox_pred[:, 3] - bbox_pred[:, 2]) / 2 + z_center = points[:, 2] + (bbox_pred[:, 5] - bbox_pred[:, 4]) / 2 + + # dx_min, dx_max, dy_min, dy_max, dz_min, dz_max -> x, y, z, w, l, h + base_bbox = torch.stack([ + x_center, + y_center, + z_center, + bbox_pred[:, 0] + bbox_pred[:, 1], + bbox_pred[:, 2] + bbox_pred[:, 3], + bbox_pred[:, 4] + bbox_pred[:, 5], + ], -1) + + # axis-aligned case + if bbox_pred.shape[1] == 6: + return base_bbox + + # rotated case: ..., sin(2a)ln(q), cos(2a)ln(q) + scale = bbox_pred[:, 0] + bbox_pred[:, 1] + \ + bbox_pred[:, 2] + bbox_pred[:, 3] + q = torch.exp( + torch.sqrt( + torch.pow(bbox_pred[:, 6], 2) + torch.pow(bbox_pred[:, 7], 2))) + alpha = 0.5 * torch.atan2(bbox_pred[:, 6], bbox_pred[:, 7]) + return torch.stack( + (x_center, y_center, z_center, scale / (1 + q), scale / + (1 + q) * q, bbox_pred[:, 5] + bbox_pred[:, 4], alpha), + dim=-1) + + @staticmethod + def _get_face_distances(points: Tensor, boxes: Tensor) -> Tensor: + """Calculate distances from point to box faces. + + Args: + points (Tensor): Final locations of shape (N_points, N_boxes, 3). + boxes (Tensor): 3D boxes of shape (N_points, N_boxes, 7) + + Returns: + Tensor: Face distances of shape (N_points, N_boxes, 6), + (dx_min, dx_max, dy_min, dy_max, dz_min, dz_max). + """ + shift = torch.stack( + (points[..., 0] - boxes[..., 0], points[..., 1] - boxes[..., 1], + points[..., 2] - boxes[..., 2]), + dim=-1).permute(1, 0, 2) + shift = rotation_3d_in_axis( + shift, -boxes[0, :, 6], axis=2).permute(1, 0, 2) + centers = boxes[..., :3] + shift + dx_min = centers[..., 0] - boxes[..., 0] + boxes[..., 3] / 2 + dx_max = boxes[..., 0] + boxes[..., 3] / 2 - centers[..., 0] + dy_min = centers[..., 1] - boxes[..., 1] + boxes[..., 4] / 2 + dy_max = boxes[..., 1] + boxes[..., 4] / 2 - centers[..., 1] + dz_min = centers[..., 2] - boxes[..., 2] + boxes[..., 5] / 2 + dz_max = boxes[..., 2] + boxes[..., 5] / 2 - centers[..., 2] + return torch.stack((dx_min, dx_max, dy_min, dy_max, dz_min, dz_max), + dim=-1) + + @staticmethod + def _get_centerness(face_distances: Tensor) -> Tensor: + """Compute point centerness w.r.t containing box. + + Args: + face_distances (Tensor): Face distances of shape (B, N, 6), + (dx_min, dx_max, dy_min, dy_max, dz_min, dz_max). + + Returns: + Tensor: Centerness of shape (B, N). + """ + x_dims = face_distances[..., [0, 1]] + y_dims = face_distances[..., [2, 3]] + z_dims = face_distances[..., [4, 5]] + centerness_targets = x_dims.min(dim=-1)[0] / x_dims.max(dim=-1)[0] * \ + y_dims.min(dim=-1)[0] / y_dims.max(dim=-1)[0] * \ + z_dims.min(dim=-1)[0] / z_dims.max(dim=-1)[0] + return torch.sqrt(centerness_targets) + + @torch.no_grad() + def get_targets(self, points: Tensor, gt_bboxes: BaseInstance3DBoxes, + gt_labels: Tensor) -> Tuple[Tensor, ...]: + """Compute targets for final locations for a single scene. + + Args: + points (list[Tensor]): Final locations for all levels. + gt_bboxes (BaseInstance3DBoxes): Ground truth boxes. + gt_labels (Tensor): Ground truth labels. + + Returns: + tuple[Tensor, ...]: Centerness, bbox and classification + targets for all locations. + """ + float_max = points[0].new_tensor(1e8) + n_levels = len(points) + levels = torch.cat([ + points[i].new_tensor(i).expand(len(points[i])) + for i in range(len(points)) + ]) + points = torch.cat(points) + gt_bboxes = gt_bboxes.to(points.device) + n_points = len(points) + n_boxes = len(gt_bboxes) + volumes = gt_bboxes.volume.unsqueeze(0).expand(n_points, n_boxes) + + # condition 1: point inside box + boxes = torch.cat((gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), + dim=1) + boxes = boxes.expand(n_points, n_boxes, 7) + points = points.unsqueeze(1).expand(n_points, n_boxes, 3) + face_distances = self._get_face_distances(points, boxes) + inside_box_condition = face_distances.min(dim=-1).values > 0 + + # condition 2: positive points per level >= limit + # calculate positive points per scale + n_pos_points_per_level = [] + for i in range(n_levels): + n_pos_points_per_level.append( + torch.sum(inside_box_condition[levels == i], dim=0)) + # find best level + n_pos_points_per_level = torch.stack(n_pos_points_per_level, dim=0) + lower_limit_mask = n_pos_points_per_level < self.pts_assign_threshold + lower_index = torch.argmax(lower_limit_mask.int(), dim=0) - 1 + lower_index = torch.where(lower_index < 0, 0, lower_index) + all_upper_limit_mask = torch.all( + torch.logical_not(lower_limit_mask), dim=0) + best_level = torch.where(all_upper_limit_mask, n_levels - 1, + lower_index) + # keep only points with best level + best_level = best_level.expand(n_points, n_boxes) + levels = torch.unsqueeze(levels, 1).expand(n_points, n_boxes) + level_condition = best_level == levels + + # condition 3: limit topk points per box by centerness + centerness = self._get_centerness(face_distances) + centerness = torch.where(inside_box_condition, centerness, + torch.ones_like(centerness) * -1) + centerness = torch.where(level_condition, centerness, + torch.ones_like(centerness) * -1) + top_centerness = torch.topk( + centerness, + min(self.pts_center_threshold + 1, len(centerness)), + dim=0).values[-1] + topk_condition = centerness > top_centerness.unsqueeze(0) + + # condition 4: min volume box per point + volumes = torch.where(inside_box_condition, volumes, float_max) + volumes = torch.where(level_condition, volumes, float_max) + volumes = torch.where(topk_condition, volumes, float_max) + min_volumes, min_inds = volumes.min(dim=1) + + center_targets = centerness[torch.arange(n_points), min_inds] + bbox_targets = boxes[torch.arange(n_points), min_inds] + if not gt_bboxes.with_yaw: + bbox_targets = bbox_targets[:, :-1] + cls_targets = gt_labels[min_inds] + cls_targets = torch.where(min_volumes == float_max, -1, cls_targets) + return center_targets, bbox_targets, cls_targets + + def _single_scene_multiclass_nms(self, bboxes: Tensor, scores: Tensor, + input_meta: dict) -> Tuple[Tensor, ...]: + """Multi-class nms for a single scene. + + Args: + bboxes (Tensor): Predicted boxes of shape (N_boxes, 6) or + (N_boxes, 7). + scores (Tensor): Predicted scores of shape (N_boxes, N_classes). + input_meta (dict): Scene meta data. + + Returns: + tuple[Tensor, ...]: Predicted bboxes, scores and labels. + """ + num_classes = scores.shape[1] + with_yaw = bboxes.shape[1] == 7 + nms_bboxes, nms_scores, nms_labels = [], [], [] + for i in range(num_classes): + ids = scores[:, i] > self.test_cfg.score_thr + if not ids.any(): + continue + + class_scores = scores[ids, i] + class_bboxes = bboxes[ids] + if with_yaw: + nms_function = nms3d + else: + class_bboxes = torch.cat( + (class_bboxes, torch.zeros_like(class_bboxes[:, :1])), + dim=1) + nms_function = nms3d_normal + + nms_ids = nms_function(class_bboxes, class_scores, + self.test_cfg.iou_thr) + nms_bboxes.append(class_bboxes[nms_ids]) + nms_scores.append(class_scores[nms_ids]) + nms_labels.append( + bboxes.new_full( + class_scores[nms_ids].shape, i, dtype=torch.long)) + + if len(nms_bboxes): + nms_bboxes = torch.cat(nms_bboxes, dim=0) + nms_scores = torch.cat(nms_scores, dim=0) + nms_labels = torch.cat(nms_labels, dim=0) + else: + nms_bboxes = bboxes.new_zeros((0, bboxes.shape[1])) + nms_scores = bboxes.new_zeros((0, )) + nms_labels = bboxes.new_zeros((0, )) + + if not with_yaw: + nms_bboxes = nms_bboxes[:, :6] + + return nms_bboxes, nms_scores, nms_labels diff --git a/mmdet3d/models/dense_heads/fcos_mono3d_head.py b/mmdet3d/models/dense_heads/fcos_mono3d_head.py new file mode 100755 index 0000000..1816431 --- /dev/null +++ b/mmdet3d/models/dense_heads/fcos_mono3d_head.py @@ -0,0 +1,958 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Tuple + +import numpy as np +import torch +from mmcv.cnn import Scale +from mmdet.models.utils import multi_apply, select_single_mlvl +from mmengine.model import normal_init +from mmengine.structures import InstanceData +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models.layers import box3d_multiclass_nms +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures import limit_period, points_img2cam, xywhr2xyxyr +from mmdet3d.utils import (ConfigType, InstanceList, OptConfigType, + OptInstanceList) +from .anchor_free_mono3d_head import AnchorFreeMono3DHead + +RangeType = Sequence[Tuple[int, int]] + +INF = 1e8 + + +@MODELS.register_module() +class FCOSMono3DHead(AnchorFreeMono3DHead): + """Anchor-free head used in FCOS3D. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple + level points. + center_sampling (bool): If true, use center sampling. Default: True. + center_sample_radius (float): Radius of center sampling. Default: 1.5. + norm_on_bbox (bool): If true, normalize the regression targets + with FPN strides. Default: True. + centerness_on_reg (bool): If true, position centerness on the + regress branch. Please refer to + https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. + Default: True. + centerness_alpha (float): Parameter used to adjust the intensity + attenuation from the center to the periphery. Default: 2.5. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + loss_dir (:obj:`ConfigDict` or dict): Config of direction classification loss. + loss_attr (:obj:`ConfigDict` or dict): Config of attribute classification loss. + loss_centerness (:obj:`ConfigDict` or dict): Config of centerness loss. + norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and config norm layer. + Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). + centerness_branch (tuple[int]): Channels for centerness branch. + Default: (64, ). + init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ + dict]): Initialization config dict. + """ # noqa: E501 + + def __init__(self, + regress_ranges: RangeType = ((-1, 48), (48, 96), (96, 192), + (192, 384), (384, INF)), + center_sampling: bool = True, + center_sample_radius: float = 1.5, + norm_on_bbox: bool = True, + centerness_on_reg: bool = True, + centerness_alpha: float = 2.5, + loss_cls: ConfigType = dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox: ConfigType = dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + loss_weight=1.0), + loss_dir: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_attr: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_centerness: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + bbox_coder: ConfigType = dict( + type='FCOS3DBBoxCoder', code_size=9), + norm_cfg: ConfigType = dict( + type='GN', num_groups=32, requires_grad=True), + centerness_branch: Tuple[int] = (64, ), + init_cfg: OptConfigType = None, + **kwargs) -> None: + self.regress_ranges = regress_ranges + self.center_sampling = center_sampling + self.center_sample_radius = center_sample_radius + self.norm_on_bbox = norm_on_bbox + self.centerness_on_reg = centerness_on_reg + self.centerness_alpha = centerness_alpha + self.centerness_branch = centerness_branch + super().__init__( + loss_cls=loss_cls, + loss_bbox=loss_bbox, + loss_dir=loss_dir, + loss_attr=loss_attr, + norm_cfg=norm_cfg, + init_cfg=init_cfg, + **kwargs) + self.loss_centerness = MODELS.build(loss_centerness) + bbox_coder['code_size'] = self.bbox_code_size + self.bbox_coder = TASK_UTILS.build(bbox_coder) + + def _init_layers(self): + """Initialize layers of the head.""" + super()._init_layers() + self.conv_centerness_prev = self._init_branch( + conv_channels=self.centerness_branch, + conv_strides=(1, ) * len(self.centerness_branch)) + self.conv_centerness = nn.Conv2d(self.centerness_branch[-1], 1, 1) + self.scale_dim = 3 # only for offset, depth and size regression + self.scales = nn.ModuleList([ + nn.ModuleList([Scale(1.0) for _ in range(self.scale_dim)]) + for _ in self.strides + ]) + + def init_weights(self): + """Initialize weights of the head. + + We currently still use the customized init_weights because the default + init of DCN triggered by the init_cfg will init conv_offset.weight, + which mistakenly affects the training stability. + """ + super().init_weights() + for m in self.conv_centerness_prev: + if isinstance(m.conv, nn.Conv2d): + normal_init(m.conv, std=0.01) + normal_init(self.conv_centerness, std=0.01) + + def forward( + self, x: Tuple[Tensor] + ) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor], + List[Tensor]]: + """Forward features from the upstream network. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * bbox_code_size. + dir_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * 2. (bin = 2). + attr_preds (list[Tensor]): Attribute scores for each scale + level, each is a 4D-tensor, the channel number is + num_points * num_attrs. + centernesses (list[Tensor]): Centerness for each scale level, + each is a 4D-tensor, the channel number is num_points * 1. + """ + # Note: we use [:5] to filter feats and only return predictions + return multi_apply(self.forward_single, x, self.scales, + self.strides)[:5] + + def forward_single(self, x: Tensor, scale: Scale, + stride: int) -> Tuple[Tensor, ...]: + """Forward features of a single scale level. + + Args: + x (Tensor): FPN feature maps of the specified stride. + scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize + the bbox prediction. + stride (int): The corresponding stride for feature maps, only + used to normalize the bbox prediction when self.norm_on_bbox + is True. + + Returns: + tuple: scores for each class, bbox and direction class + predictions, centerness predictions of input feature maps. + """ + cls_score, bbox_pred, dir_cls_pred, attr_pred, cls_feat, reg_feat = \ + super().forward_single(x) + + if self.centerness_on_reg: + clone_reg_feat = reg_feat.clone() + for conv_centerness_prev_layer in self.conv_centerness_prev: + clone_reg_feat = conv_centerness_prev_layer(clone_reg_feat) + centerness = self.conv_centerness(clone_reg_feat) + else: + clone_cls_feat = cls_feat.clone() + for conv_centerness_prev_layer in self.conv_centerness_prev: + clone_cls_feat = conv_centerness_prev_layer(clone_cls_feat) + centerness = self.conv_centerness(clone_cls_feat) + + bbox_pred = self.bbox_coder.decode(bbox_pred, scale, stride, + self.training, cls_score) + + return cls_score, bbox_pred, dir_cls_pred, attr_pred, centerness, \ + cls_feat, reg_feat + + @staticmethod + def add_sin_difference(boxes1: Tensor, + boxes2: Tensor) -> Tuple[Tensor, Tensor]: + """Convert the rotation difference to difference in sine function. + + Args: + boxes1 (torch.Tensor): Original Boxes in shape (NxC), where C>=7 + and the 7th dimension is rotation dimension. + boxes2 (torch.Tensor): Target boxes in shape (NxC), where C>=7 and + the 7th dimension is rotation dimension. + + Returns: + tuple[torch.Tensor]: ``boxes1`` and ``boxes2`` whose 7th + dimensions are changed. + """ + rad_pred_encoding = torch.sin(boxes1[..., 6:7]) * torch.cos( + boxes2[..., 6:7]) + rad_tg_encoding = torch.cos(boxes1[..., 6:7]) * torch.sin(boxes2[..., + 6:7]) + boxes1 = torch.cat( + [boxes1[..., :6], rad_pred_encoding, boxes1[..., 7:]], dim=-1) + boxes2 = torch.cat([boxes2[..., :6], rad_tg_encoding, boxes2[..., 7:]], + dim=-1) + return boxes1, boxes2 + + @staticmethod + def get_direction_target(reg_targets: Tensor, + dir_offset: int = 0, + dir_limit_offset: float = 0.0, + num_bins: int = 2, + one_hot: bool = True) -> Tensor: + """Encode direction to 0 ~ num_bins-1. + + Args: + reg_targets (torch.Tensor): Bbox regression targets. + dir_offset (int, optional): Direction offset. Default to 0. + dir_limit_offset (float, optional): Offset to set the direction + range. Default to 0.0. + num_bins (int, optional): Number of bins to divide 2*PI. + Default to 2. + one_hot (bool, optional): Whether to encode as one hot. + Default to True. + + Returns: + torch.Tensor: Encoded direction targets. + """ + rot_gt = reg_targets[..., 6] + offset_rot = limit_period(rot_gt - dir_offset, dir_limit_offset, + 2 * np.pi) + dir_cls_targets = torch.floor(offset_rot / + (2 * np.pi / num_bins)).long() + dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1) + if one_hot: + dir_targets = torch.zeros( + *list(dir_cls_targets.shape), + num_bins, + dtype=reg_targets.dtype, + device=dir_cls_targets.device) + dir_targets.scatter_(dir_cls_targets.unsqueeze(dim=-1).long(), 1.0) + dir_cls_targets = dir_targets + return dir_cls_targets + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + attr_preds: List[Tensor], + centernesses: List[Tensor], + batch_gt_instances_3d: InstanceList, + batch_gt_instacnes: InstanceList, + batch_img_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * bbox_code_size. + dir_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * 2. (bin = 2) + attr_preds (list[Tensor]): Attribute scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_attrs. + centernesses (list[Tensor]): Centerness for each scale level, each + is a 4D-tensor, the channel number is num_points * 1. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、` + `labels_3d``、``depths``、``centers_2d`` and attributes. + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes``、``labels``. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert len(cls_scores) == len(bbox_preds) == len(centernesses) == len( + attr_preds) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, + bbox_preds[0].device) + labels_3d, bbox_targets_3d, centerness_targets, attr_targets = \ + self.get_targets(all_level_points, batch_gt_instances_3d, + batch_gt_instacnes) + + num_imgs = cls_scores[0].size(0) + # flatten cls_scores, bbox_preds, dir_cls_preds and centerness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(-1, sum(self.group_reg_dims)) + for bbox_pred in bbox_preds + ] + flatten_dir_cls_preds = [ + dir_cls_pred.permute(0, 2, 3, 1).reshape(-1, 2) + for dir_cls_pred in dir_cls_preds + ] + flatten_centerness = [ + centerness.permute(0, 2, 3, 1).reshape(-1) + for centerness in centernesses + ] + flatten_cls_scores = torch.cat(flatten_cls_scores) + flatten_bbox_preds = torch.cat(flatten_bbox_preds) + flatten_dir_cls_preds = torch.cat(flatten_dir_cls_preds) + flatten_centerness = torch.cat(flatten_centerness) + flatten_labels_3d = torch.cat(labels_3d) + flatten_bbox_targets_3d = torch.cat(bbox_targets_3d) + flatten_centerness_targets = torch.cat(centerness_targets) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((flatten_labels_3d >= 0) + & (flatten_labels_3d < bg_class_ind)).nonzero().reshape(-1) + num_pos = len(pos_inds) + + loss_cls = self.loss_cls( + flatten_cls_scores, + flatten_labels_3d, + avg_factor=num_pos + num_imgs) # avoid num_pos is 0 + + pos_bbox_preds = flatten_bbox_preds[pos_inds] + pos_dir_cls_preds = flatten_dir_cls_preds[pos_inds] + pos_centerness = flatten_centerness[pos_inds] + + if self.pred_attrs: + flatten_attr_preds = [ + attr_pred.permute(0, 2, 3, 1).reshape(-1, self.num_attrs) + for attr_pred in attr_preds + ] + flatten_attr_preds = torch.cat(flatten_attr_preds) + flatten_attr_targets = torch.cat(attr_targets) + pos_attr_preds = flatten_attr_preds[pos_inds] + + if num_pos > 0: + pos_bbox_targets_3d = flatten_bbox_targets_3d[pos_inds] + pos_centerness_targets = flatten_centerness_targets[pos_inds] + if self.pred_attrs: + pos_attr_targets = flatten_attr_targets[pos_inds] + bbox_weights = pos_centerness_targets.new_ones( + len(pos_centerness_targets), sum(self.group_reg_dims)) + equal_weights = pos_centerness_targets.new_ones( + pos_centerness_targets.shape) + + code_weight = self.train_cfg.get('code_weight', None) + if code_weight: + assert len(code_weight) == sum(self.group_reg_dims) + bbox_weights = bbox_weights * bbox_weights.new_tensor( + code_weight) + + if self.use_direction_classifier: + pos_dir_cls_targets = self.get_direction_target( + pos_bbox_targets_3d, + self.dir_offset, + self.dir_limit_offset, + one_hot=False) + + if self.diff_rad_by_sin: + pos_bbox_preds, pos_bbox_targets_3d = self.add_sin_difference( + pos_bbox_preds, pos_bbox_targets_3d) + + loss_offset = self.loss_bbox( + pos_bbox_preds[:, :2], + pos_bbox_targets_3d[:, :2], + weight=bbox_weights[:, :2], + avg_factor=equal_weights.sum()) + loss_depth = self.loss_bbox( + pos_bbox_preds[:, 2], + pos_bbox_targets_3d[:, 2], + weight=bbox_weights[:, 2], + avg_factor=equal_weights.sum()) + loss_size = self.loss_bbox( + pos_bbox_preds[:, 3:6], + pos_bbox_targets_3d[:, 3:6], + weight=bbox_weights[:, 3:6], + avg_factor=equal_weights.sum()) + loss_rotsin = self.loss_bbox( + pos_bbox_preds[:, 6], + pos_bbox_targets_3d[:, 6], + weight=bbox_weights[:, 6], + avg_factor=equal_weights.sum()) + loss_velo = None + if self.pred_velo: + loss_velo = self.loss_bbox( + pos_bbox_preds[:, 7:9], + pos_bbox_targets_3d[:, 7:9], + weight=bbox_weights[:, 7:9], + avg_factor=equal_weights.sum()) + + loss_centerness = self.loss_centerness(pos_centerness, + pos_centerness_targets) + + # direction classification loss + loss_dir = None + # TODO: add more check for use_direction_classifier + if self.use_direction_classifier: + loss_dir = self.loss_dir( + pos_dir_cls_preds, + pos_dir_cls_targets, + equal_weights, + avg_factor=equal_weights.sum()) + + # attribute classification loss + loss_attr = None + if self.pred_attrs: + loss_attr = self.loss_attr( + pos_attr_preds, + pos_attr_targets, + pos_centerness_targets, + avg_factor=pos_centerness_targets.sum()) + + else: + # need absolute due to possible negative delta x/y + loss_offset = pos_bbox_preds[:, :2].sum() + loss_depth = pos_bbox_preds[:, 2].sum() + loss_size = pos_bbox_preds[:, 3:6].sum() + loss_rotsin = pos_bbox_preds[:, 6].sum() + loss_velo = None + if self.pred_velo: + loss_velo = pos_bbox_preds[:, 7:9].sum() + loss_centerness = pos_centerness.sum() + loss_dir = None + if self.use_direction_classifier: + loss_dir = pos_dir_cls_preds.sum() + loss_attr = None + if self.pred_attrs: + loss_attr = pos_attr_preds.sum() + + loss_dict = dict( + loss_cls=loss_cls, + loss_offset=loss_offset, + loss_depth=loss_depth, + loss_size=loss_size, + loss_rotsin=loss_rotsin, + loss_centerness=loss_centerness) + + if loss_velo is not None: + loss_dict['loss_velo'] = loss_velo + + if loss_dir is not None: + loss_dict['loss_dir'] = loss_dir + + if loss_attr is not None: + loss_dict['loss_attr'] = loss_attr + + return loss_dict + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + attr_preds: List[Tensor], + centernesses: List[Tensor], + batch_img_metas: Optional[List[dict]] = None, + cfg: OptConfigType = None, + rescale: bool = False) -> InstanceList: + """Transform network output for a batch into bbox predictions. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_points * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_points * 4, H, W) + dir_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * 2. (bin = 2) + attr_preds (list[Tensor]): Attribute scores for each scale level + Has shape (N, num_points * num_attrs, H, W) + centernesses (list[Tensor]): Centerness for each scale level with + shape (N, num_points * 1, H, W) + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + + Returns: + list[:obj:`InstanceData`]: Object detection results of each image + after the post process. Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C), where C >= 7. + """ + assert len(cls_scores) == len(bbox_preds) == len(dir_cls_preds) == \ + len(centernesses) == len(attr_preds) + num_levels = len(cls_scores) + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + # TODO: refactor using prior_generator + mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, + bbox_preds[0].device) + result_list = [] + for img_id in range(len(batch_img_metas)): + img_meta = batch_img_metas[img_id] + cls_score_list = select_single_mlvl(cls_scores, img_id) + bbox_pred_list = select_single_mlvl(bbox_preds, img_id) + + if self.use_direction_classifier: + dir_cls_pred_list = select_single_mlvl(dir_cls_preds, img_id) + else: + dir_cls_pred_list = [ + cls_scores[i][img_id].new_full( + [2, *cls_scores[i][img_id].shape[1:]], 0).detach() + for i in range(num_levels) + ] + + if self.pred_attrs: + attr_pred_list = select_single_mlvl(attr_preds, img_id) + else: + attr_pred_list = [ + cls_scores[i][img_id].new_full( + [self.num_attrs, *cls_scores[i][img_id].shape[1:]], + self.attr_background_label).detach() + for i in range(num_levels) + ] + + centerness_pred_list = select_single_mlvl(centernesses, img_id) + results = self._predict_by_feat_single( + cls_score_list=cls_score_list, + bbox_pred_list=bbox_pred_list, + dir_cls_pred_list=dir_cls_pred_list, + attr_pred_list=attr_pred_list, + centerness_pred_list=centerness_pred_list, + mlvl_points=mlvl_points, + img_meta=img_meta, + cfg=cfg, + rescale=rescale) + result_list.append(results) + result_list_2d = None + return result_list, result_list_2d + + def _predict_by_feat_single(self, + cls_score_list: List[Tensor], + bbox_pred_list: List[Tensor], + dir_cls_pred_list: List[Tensor], + attr_pred_list: List[Tensor], + centerness_pred_list: List[Tensor], + mlvl_points: Tensor, + img_meta: dict, + cfg: ConfigType, + rescale: bool = False) -> InstanceData: + """Transform outputs for a single batch item into bbox predictions. + + Args: + cls_scores (list[Tensor]): Box scores for a single scale level + Has shape (num_points * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for a single scale + level with shape (num_points * bbox_code_size, H, W). + dir_cls_preds (list[Tensor]): Box scores for direction class + predictions on a single scale level with shape + (num_points * 2, H, W) + attr_preds (list[Tensor]): Attribute scores for each scale level + Has shape (N, num_points * num_attrs, H, W) + centernesses (list[Tensor]): Centerness for a single scale level + with shape (num_points, H, W). + mlvl_points (list[Tensor]): Box reference for a single scale level + with shape (num_total_points, 2). + img_meta (dict): Metadata of input image. + cfg (mmengine.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + + Returns: + :obj:`InstanceData`: 3D Detection results of each image + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C), where C >= 7. + """ + view = np.array(img_meta['cam2img']) + scale_factor = img_meta['scale_factor'] + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_points) + mlvl_centers_2d = [] + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_dir_scores = [] + mlvl_attr_scores = [] + mlvl_centerness = [] + + for cls_score, bbox_pred, dir_cls_pred, attr_pred, centerness, \ + points in zip(cls_score_list, bbox_pred_list, + dir_cls_pred_list, attr_pred_list, + centerness_pred_list, mlvl_points): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + scores = cls_score.permute(1, 2, 0).reshape( + -1, self.cls_out_channels).sigmoid() + dir_cls_pred = dir_cls_pred.permute(1, 2, 0).reshape(-1, 2) + dir_cls_score = torch.max(dir_cls_pred, dim=-1)[1] + attr_pred = attr_pred.permute(1, 2, 0).reshape(-1, self.num_attrs) + attr_score = torch.max(attr_pred, dim=-1)[1] + centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid() + + bbox_pred = bbox_pred.permute(1, 2, + 0).reshape(-1, + sum(self.group_reg_dims)) + bbox_pred = bbox_pred[:, :self.bbox_code_size] + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + max_scores, _ = (scores * centerness[:, None]).max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + points = points[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + dir_cls_pred = dir_cls_pred[topk_inds, :] + centerness = centerness[topk_inds] + dir_cls_score = dir_cls_score[topk_inds] + attr_score = attr_score[topk_inds] + # change the offset to actual center predictions + bbox_pred[:, :2] = points - bbox_pred[:, :2] + if rescale: + bbox_pred[:, :2] /= bbox_pred[:, :2].new_tensor(scale_factor) + pred_center2d = bbox_pred[:, :3].clone() + bbox_pred[:, :3] = points_img2cam(bbox_pred[:, :3], view) + mlvl_centers_2d.append(pred_center2d) + mlvl_bboxes.append(bbox_pred) + mlvl_scores.append(scores) + mlvl_dir_scores.append(dir_cls_score) + mlvl_attr_scores.append(attr_score) + mlvl_centerness.append(centerness) + + mlvl_centers_2d = torch.cat(mlvl_centers_2d) + mlvl_bboxes = torch.cat(mlvl_bboxes) + mlvl_dir_scores = torch.cat(mlvl_dir_scores) + + # change local yaw to global yaw for 3D nms + cam2img = mlvl_centers_2d.new_zeros((4, 4)) + cam2img[:view.shape[0], :view.shape[1]] = \ + mlvl_centers_2d.new_tensor(view) + mlvl_bboxes = self.bbox_coder.decode_yaw(mlvl_bboxes, mlvl_centers_2d, + mlvl_dir_scores, + self.dir_offset, cam2img) + + mlvl_bboxes_for_nms = xywhr2xyxyr(img_meta['box_type_3d']( + mlvl_bboxes, box_dim=self.bbox_code_size, + origin=(0.5, 0.5, 0.5)).bev) + + mlvl_scores = torch.cat(mlvl_scores) + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 + # BG cat_id: num_class + mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) + mlvl_attr_scores = torch.cat(mlvl_attr_scores) + mlvl_centerness = torch.cat(mlvl_centerness) + # no scale_factors in box3d_multiclass_nms + # Then we multiply it from outside + mlvl_nms_scores = mlvl_scores * mlvl_centerness[:, None] + results = box3d_multiclass_nms(mlvl_bboxes, mlvl_bboxes_for_nms, + mlvl_nms_scores, cfg.score_thr, + cfg.max_per_img, cfg, mlvl_dir_scores, + mlvl_attr_scores) + bboxes, scores, labels, dir_scores, attrs = results + attrs = attrs.to(labels.dtype) # change data type to int + bboxes = img_meta['box_type_3d']( + bboxes, box_dim=self.bbox_code_size, origin=(0.5, 0.5, 0.5)) + # Note that the predictions use origin (0.5, 0.5, 0.5) + # Due to the ground truth centers_2d are the gravity center of objects + # v0.10.0 fix inplace operation to the input tensor of cam_box3d + # So here we also need to add origin=(0.5, 0.5, 0.5) + + results = InstanceData() + results.bboxes_3d = bboxes + results.scores_3d = scores + results.labels_3d = labels + if self.pred_attrs and attrs is not None: + results.attr_labels = attrs + + return results + + def _get_points_single(self, + featmap_size: Tuple[int], + stride: int, + dtype: torch.dtype, + device: torch.device, + flatten: bool = False) -> Tensor: + """Get points of a single scale level. + + Args: + featmap_size (tuple[int]): Single scale level feature map size. + stride (int): Downsample factor of the feature map. + dtype (torch.dtype): Type of points. + device (torch.device): Device of points. + flatten (bool): Whether to flatten the tensor. + Defaults to False. + + Returns: + Tensor: points of each image. + """ + y, x = super()._get_points_single(featmap_size, stride, dtype, device) + points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), + dim=-1) + stride // 2 + return points + + def get_targets( + self, + points: List[Tensor], + batch_gt_instances_3d: InstanceList, + batch_gt_instances: InstanceList, + ) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]: + """Compute regression, classification and centerss targets for points + in multiple images. + + Args: + points (list[Tensor]): Points of each fpn level, each has shape + (num_points, 2). + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、 + ``labels_3d``、``depths``、``centers_2d`` and attributes. + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes``、``labels``. + + Returns: + tuple: Targets of each level. + + - concat_lvl_labels_3d (list[Tensor]): 3D Labels of each level. + - concat_lvl_bbox_targets_3d (list[Tensor]): 3D BBox targets of + each level. + - concat_lvl_centerness_targets (list[Tensor]): Centerness targets + of each level. + - concat_lvl_attr_targets (list[Tensor]): Attribute targets of + each level. + """ + assert len(points) == len(self.regress_ranges) + num_levels = len(points) + # expand regress ranges to align with points + expanded_regress_ranges = [ + points[i].new_tensor(self.regress_ranges[i])[None].expand_as( + points[i]) for i in range(num_levels) + ] + # concat all levels points and regress ranges + concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) + concat_points = torch.cat(points, dim=0) + + # the number of points per img, per lvl + num_points = [center.size(0) for center in points] + + if 'attr_labels' not in batch_gt_instances_3d[0]: + for gt_instances_3d in batch_gt_instances_3d: + gt_instances_3d.attr_labels = \ + gt_instances_3d.labels_3d.new_full( + gt_instances_3d.labels_3d.shape, + self.attr_background_label + ) + + # get labels and bbox_targets of each image + _, _, labels_3d_list, bbox_targets_3d_list, centerness_targets_list, \ + attr_targets_list = multi_apply( + self._get_target_single, + batch_gt_instances_3d, + batch_gt_instances, + points=concat_points, + regress_ranges=concat_regress_ranges, + num_points_per_lvl=num_points) + + # split to per img, per level + labels_3d_list = [ + labels_3d.split(num_points, 0) for labels_3d in labels_3d_list + ] + bbox_targets_3d_list = [ + bbox_targets_3d.split(num_points, 0) + for bbox_targets_3d in bbox_targets_3d_list + ] + centerness_targets_list = [ + centerness_targets.split(num_points, 0) + for centerness_targets in centerness_targets_list + ] + attr_targets_list = [ + attr_targets.split(num_points, 0) + for attr_targets in attr_targets_list + ] + + # concat per level image + concat_lvl_labels_3d = [] + concat_lvl_bbox_targets_3d = [] + concat_lvl_centerness_targets = [] + concat_lvl_attr_targets = [] + for i in range(num_levels): + concat_lvl_labels_3d.append( + torch.cat([labels[i] for labels in labels_3d_list])) + concat_lvl_centerness_targets.append( + torch.cat([ + centerness_targets[i] + for centerness_targets in centerness_targets_list + ])) + bbox_targets_3d = torch.cat([ + bbox_targets_3d[i] for bbox_targets_3d in bbox_targets_3d_list + ]) + concat_lvl_attr_targets.append( + torch.cat( + [attr_targets[i] for attr_targets in attr_targets_list])) + if self.norm_on_bbox: + bbox_targets_3d[:, : + 2] = bbox_targets_3d[:, :2] / self.strides[i] + concat_lvl_bbox_targets_3d.append(bbox_targets_3d) + return concat_lvl_labels_3d, concat_lvl_bbox_targets_3d, \ + concat_lvl_centerness_targets, concat_lvl_attr_targets + + def _get_target_single( + self, gt_instances_3d: InstanceData, gt_instances: InstanceData, + points: Tensor, regress_ranges: Tensor, + num_points_per_lvl: List[int]) -> Tuple[Tensor, ...]: + """Compute regression and classification targets for a single image.""" + num_points = points.size(0) + num_gts = len(gt_instances_3d) + gt_bboxes = gt_instances.bboxes + gt_labels = gt_instances.labels + gt_bboxes_3d = gt_instances_3d.bboxes_3d + gt_labels_3d = gt_instances_3d.labels_3d + centers_2d = gt_instances_3d.centers_2d + depths = gt_instances_3d.depths + attr_labels = gt_instances_3d.attr_labels + + if not isinstance(gt_bboxes_3d, torch.Tensor): + gt_bboxes_3d = gt_bboxes_3d.tensor.to(gt_bboxes.device) + if num_gts == 0: + return gt_labels.new_full((num_points,), self.background_label), \ + gt_bboxes.new_zeros((num_points, 4)), \ + gt_labels_3d.new_full( + (num_points,), self.background_label), \ + gt_bboxes_3d.new_zeros((num_points, self.bbox_code_size)), \ + gt_bboxes_3d.new_zeros((num_points,)), \ + attr_labels.new_full( + (num_points,), self.attr_background_label) + + # change orientation to local yaw + gt_bboxes_3d[..., 6] = -torch.atan2( + gt_bboxes_3d[..., 0], gt_bboxes_3d[..., 2]) + gt_bboxes_3d[..., 6] + + areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( + gt_bboxes[:, 3] - gt_bboxes[:, 1]) + areas = areas[None].repeat(num_points, 1) + regress_ranges = regress_ranges[:, None, :].expand( + num_points, num_gts, 2) + gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) + centers_2d = centers_2d[None].expand(num_points, num_gts, 2) + gt_bboxes_3d = gt_bboxes_3d[None].expand(num_points, num_gts, + self.bbox_code_size) + depths = depths[None, :, None].expand(num_points, num_gts, 1) + xs, ys = points[:, 0], points[:, 1] + xs = xs[:, None].expand(num_points, num_gts) + ys = ys[:, None].expand(num_points, num_gts) + + delta_xs = (xs - centers_2d[..., 0])[..., None] + delta_ys = (ys - centers_2d[..., 1])[..., None] + bbox_targets_3d = torch.cat( + (delta_xs, delta_ys, depths, gt_bboxes_3d[..., 3:]), dim=-1) + + left = xs - gt_bboxes[..., 0] + right = gt_bboxes[..., 2] - xs + top = ys - gt_bboxes[..., 1] + bottom = gt_bboxes[..., 3] - ys + bbox_targets = torch.stack((left, top, right, bottom), -1) + + assert self.center_sampling is True, 'Setting center_sampling to '\ + 'False has not been implemented for FCOS3D.' + # condition1: inside a `center bbox` + radius = self.center_sample_radius + center_xs = centers_2d[..., 0] + center_ys = centers_2d[..., 1] + center_gts = torch.zeros_like(gt_bboxes) + stride = center_xs.new_zeros(center_xs.shape) + + # project the points on current lvl back to the `original` sizes + lvl_begin = 0 + for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): + lvl_end = lvl_begin + num_points_lvl + stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius + lvl_begin = lvl_end + + center_gts[..., 0] = center_xs - stride + center_gts[..., 1] = center_ys - stride + center_gts[..., 2] = center_xs + stride + center_gts[..., 3] = center_ys + stride + + cb_dist_left = xs - center_gts[..., 0] + cb_dist_right = center_gts[..., 2] - xs + cb_dist_top = ys - center_gts[..., 1] + cb_dist_bottom = center_gts[..., 3] - ys + center_bbox = torch.stack( + (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) + inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 + + # condition2: limit the regression range for each location + max_regress_distance = bbox_targets.max(-1)[0] + inside_regress_range = ( + (max_regress_distance >= regress_ranges[..., 0]) + & (max_regress_distance <= regress_ranges[..., 1])) + + # center-based criterion to deal with ambiguity + dists = torch.sqrt(torch.sum(bbox_targets_3d[..., :2]**2, dim=-1)) + dists[inside_gt_bbox_mask == 0] = INF + dists[inside_regress_range == 0] = INF + min_dist, min_dist_inds = dists.min(dim=1) + + labels = gt_labels[min_dist_inds] + labels_3d = gt_labels_3d[min_dist_inds] + attr_labels = attr_labels[min_dist_inds] + labels[min_dist == INF] = self.background_label # set as BG + labels_3d[min_dist == INF] = self.background_label # set as BG + attr_labels[min_dist == INF] = self.attr_background_label + + bbox_targets = bbox_targets[range(num_points), min_dist_inds] + bbox_targets_3d = bbox_targets_3d[range(num_points), min_dist_inds] + relative_dists = torch.sqrt( + torch.sum(bbox_targets_3d[..., :2]**2, + dim=-1)) / (1.414 * stride[:, 0]) + # [N, 1] / [N, 1] + centerness_targets = torch.exp(-self.centerness_alpha * relative_dists) + + return labels, bbox_targets, labels_3d, bbox_targets_3d, \ + centerness_targets, attr_labels diff --git a/mmdet3d/models/dense_heads/free_anchor3d_head.py b/mmdet3d/models/dense_heads/free_anchor3d_head.py new file mode 100755 index 0000000..94cf712 --- /dev/null +++ b/mmdet3d/models/dense_heads/free_anchor3d_head.py @@ -0,0 +1,291 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List + +import torch +from torch import Tensor +from torch.nn import functional as F + +from mmdet3d.registry import MODELS +from mmdet3d.structures import bbox_overlaps_nearest_3d +from mmdet3d.utils import InstanceList, OptInstanceList +from .anchor3d_head import Anchor3DHead +from .train_mixins import get_direction_target + + +@MODELS.register_module() +class FreeAnchor3DHead(Anchor3DHead): + r"""`FreeAnchor `_ head for 3D detection. + + Note: + This implementation is directly modified from the `mmdet implementation + `_. + We find it also works on 3D detection with minor modification, i.e., + different hyper-parameters and a additional direction classifier. + + Args: + pre_anchor_topk (int): Number of boxes that be token in each bag. + bbox_thr (float): The threshold of the saturated linear function. It is + usually the same with the IoU threshold used in NMS. + gamma (float): Gamma parameter in focal loss. + alpha (float): Alpha parameter in focal loss. + kwargs (dict): Other arguments are the same as those in :class:`Anchor3DHead`. + """ # noqa: E501 + + def __init__(self, + pre_anchor_topk: int = 50, + bbox_thr: float = 0.6, + gamma: float = 2.0, + alpha: float = 0.5, + init_cfg: dict = None, + **kwargs) -> None: + super().__init__(init_cfg=init_cfg, **kwargs) + self.pre_anchor_topk = pre_anchor_topk + self.bbox_thr = bbox_thr + self.gamma = gamma + self.alpha = alpha + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + batch_gt_instances_3d: InstanceList, + batch_input_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> Dict: + """Calculate loss of FreeAnchor head. + + Args: + cls_scores (list[torch.Tensor]): Classification scores of + different samples. + bbox_preds (list[torch.Tensor]): Box predictions of + different samples + dir_cls_preds (list[torch.Tensor]): Direction predictions of + different samples + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + batch_input_metas (list[dict]): Contain pcd and img's meta info. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, torch.Tensor]: Loss items. + + - positive_bag_loss (torch.Tensor): Loss of positive samples. + - negative_bag_loss (torch.Tensor): Loss of negative samples. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + anchor_list = self.get_anchors(featmap_sizes, batch_input_metas) + mlvl_anchors = [torch.cat(anchor) for anchor in anchor_list] + + # concatenate each level + cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape( + cls_score.size(0), -1, self.num_classes) + for cls_score in cls_scores + ] + bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape( + bbox_pred.size(0), -1, self.box_code_size) + for bbox_pred in bbox_preds + ] + dir_cls_preds = [ + dir_cls_pred.permute(0, 2, 3, + 1).reshape(dir_cls_pred.size(0), -1, 2) + for dir_cls_pred in dir_cls_preds + ] + + cls_scores = torch.cat(cls_scores, dim=1) + bbox_preds = torch.cat(bbox_preds, dim=1) + dir_cls_preds = torch.cat(dir_cls_preds, dim=1) + + cls_probs = torch.sigmoid(cls_scores) + box_prob = [] + num_pos = 0 + positive_losses = [] + for _, (anchors, gt_instance_3d, cls_prob, bbox_pred, + dir_cls_pred) in enumerate( + zip(mlvl_anchors, batch_gt_instances_3d, cls_probs, + bbox_preds, dir_cls_preds)): + + gt_bboxes = gt_instance_3d.bboxes_3d.tensor.to(anchors.device) + gt_labels = gt_instance_3d.labels_3d.to(anchors.device) + with torch.no_grad(): + # box_localization: a_{j}^{loc}, shape: [j, 4] + pred_boxes = self.bbox_coder.decode(anchors, bbox_pred) + + # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] + object_box_iou = bbox_overlaps_nearest_3d( + gt_bboxes, pred_boxes) + + # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] + t1 = self.bbox_thr + t2 = object_box_iou.max( + dim=1, keepdim=True).values.clamp(min=t1 + 1e-6) + object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp( + min=0, max=1) + + # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] + num_obj = gt_labels.size(0) + indices = torch.stack( + [torch.arange(num_obj).type_as(gt_labels), gt_labels], + dim=0) + + object_cls_box_prob = torch.sparse_coo_tensor( + indices, object_box_prob) + + # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] + """ + from "start" to "end" implement: + image_box_iou = torch.sparse.max(object_cls_box_prob, + dim=0).t() + + """ + # start + box_cls_prob = torch.sparse.sum( + object_cls_box_prob, dim=0).to_dense() + + indices = torch.nonzero(box_cls_prob, as_tuple=False).t_() + if indices.numel() == 0: + image_box_prob = torch.zeros( + anchors.size(0), + self.num_classes).type_as(object_box_prob) + else: + nonzero_box_prob = torch.where( + (gt_labels.unsqueeze(dim=-1) == indices[0]), + object_box_prob[:, indices[1]], + torch.tensor( + [0]).type_as(object_box_prob)).max(dim=0).values + + # upmap to shape [j, c] + image_box_prob = torch.sparse_coo_tensor( + indices.flip([0]), + nonzero_box_prob, + size=(anchors.size(0), self.num_classes)).to_dense() + # end + + box_prob.append(image_box_prob) + + # construct bags for objects + match_quality_matrix = bbox_overlaps_nearest_3d(gt_bboxes, anchors) + _, matched = torch.topk( + match_quality_matrix, + self.pre_anchor_topk, + dim=1, + sorted=False) + del match_quality_matrix + + # matched_cls_prob: P_{ij}^{cls} + matched_cls_prob = torch.gather( + cls_prob[matched], 2, + gt_labels.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, + 1)).squeeze(2) + + # matched_box_prob: P_{ij}^{loc} + matched_anchors = anchors[matched] + matched_object_targets = self.bbox_coder.encode( + matched_anchors, + gt_bboxes.unsqueeze(dim=1).expand_as(matched_anchors)) + + # direction classification loss + loss_dir = None + if self.use_direction_classifier: + # also calculate direction prob: P_{ij}^{dir} + matched_dir_targets = get_direction_target( + matched_anchors, + matched_object_targets, + self.dir_offset, + self.dir_limit_offset, + one_hot=False) + loss_dir = self.loss_dir( + dir_cls_pred[matched].transpose(-2, -1), + matched_dir_targets, + reduction_override='none') + + # generate bbox weights + if self.diff_rad_by_sin: + bbox_preds_clone = bbox_pred.clone() + bbox_preds_clone[matched], matched_object_targets = \ + self.add_sin_difference( + bbox_preds_clone[matched], matched_object_targets) + bbox_weights = matched_anchors.new_ones(matched_anchors.size()) + # Use pop is not right, check performance + code_weight = self.train_cfg.get('code_weight', None) + if code_weight: + bbox_weights = bbox_weights * bbox_weights.new_tensor( + code_weight) + loss_bbox = self.loss_bbox( + bbox_preds_clone[matched], + matched_object_targets, + bbox_weights, + reduction_override='none').sum(-1) + + if loss_dir is not None: + loss_bbox += loss_dir + matched_box_prob = torch.exp(-loss_bbox) + + # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} + num_pos += len(gt_bboxes) + positive_losses.append( + self.positive_bag_loss(matched_cls_prob, matched_box_prob)) + + positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) + + # box_prob: P{a_{j} \in A_{+}} + box_prob = torch.stack(box_prob, dim=0) + + # negative_loss: + # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| + negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max( + 1, num_pos * self.pre_anchor_topk) + + losses = { + 'positive_bag_loss': positive_loss, + 'negative_bag_loss': negative_loss + } + return losses + + def positive_bag_loss(self, matched_cls_prob: Tensor, + matched_box_prob: Tensor) -> Tensor: + """Generate positive bag loss. + + Args: + matched_cls_prob (torch.Tensor): Classification probability + of matched positive samples. + matched_box_prob (torch.Tensor): Bounding box probability + of matched positive samples. + + Returns: + torch.Tensor: Loss of positive samples. + """ + # bag_prob = Mean-max(matched_prob) + matched_prob = matched_cls_prob * matched_box_prob + weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) + weight /= weight.sum(dim=1).unsqueeze(dim=-1) + bag_prob = (weight * matched_prob).sum(dim=1) + # positive_bag_loss = -self.alpha * log(bag_prob) + bag_prob = bag_prob.clamp(0, 1) # to avoid bug of BCE, check + return self.alpha * F.binary_cross_entropy( + bag_prob, torch.ones_like(bag_prob), reduction='none') + + def negative_bag_loss(self, cls_prob: Tensor, box_prob: Tensor) -> Tensor: + """Generate negative bag loss. + + Args: + cls_prob (torch.Tensor): Classification probability + of negative samples. + box_prob (torch.Tensor): Bounding box probability + of negative samples. + + Returns: + torch.Tensor: Loss of negative samples. + """ + prob = cls_prob * (1 - box_prob) + prob = prob.clamp(0, 1) # to avoid bug of BCE, check + negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( + prob, torch.zeros_like(prob), reduction='none') + return (1 - self.alpha) * negative_bag_loss diff --git a/mmdet3d/models/dense_heads/groupfree3d_head.py b/mmdet3d/models/dense_heads/groupfree3d_head.py new file mode 100755 index 0000000..2c5de94 --- /dev/null +++ b/mmdet3d/models/dense_heads/groupfree3d_head.py @@ -0,0 +1,1110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch +from mmcv.cnn import ConvModule +from mmcv.cnn.bricks.transformer import (build_positional_encoding, + build_transformer_layer) +from mmcv.ops import PointsSampler as Points_Sampler +from mmcv.ops import gather_points +from mmdet.models.utils import multi_apply +from mmengine.model import BaseModule, xavier_init +from mmengine.structures import InstanceData +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.models.layers import aligned_3d_nms +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures import BaseInstance3DBoxes, Det3DDataSample +from mmdet3d.structures.det3d_data_sample import SampleList +from .base_conv_bbox_head import BaseConvBboxHead + +EPS = 1e-6 + + +class PointsObjClsModule(BaseModule): + """object candidate point prediction from seed point features. + + Args: + in_channel (int): number of channels of seed point features. + num_convs (int, optional): number of conv layers. + Default: 3. + conv_cfg (dict, optional): Config of convolution. + Default: dict(type='Conv1d'). + norm_cfg (dict, optional): Config of normalization. + Default: dict(type='BN1d'). + act_cfg (dict, optional): Config of activation. + Default: dict(type='ReLU'). + """ + + def __init__(self, + in_channel: int, + num_convs: int = 3, + conv_cfg: dict = dict(type='Conv1d'), + norm_cfg: dict = dict(type='BN1d'), + act_cfg: dict = dict(type='ReLU'), + init_cfg: Optional[dict] = None): + super().__init__(init_cfg=init_cfg) + conv_channels = [in_channel for _ in range(num_convs - 1)] + conv_channels.append(1) + + self.mlp = nn.Sequential() + prev_channels = in_channel + for i in range(num_convs): + self.mlp.add_module( + f'layer{i}', + ConvModule( + prev_channels, + conv_channels[i], + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if i < num_convs - 1 else None, + act_cfg=act_cfg if i < num_convs - 1 else None, + bias=True, + inplace=True)) + prev_channels = conv_channels[i] + + def forward(self, seed_features): + """Forward pass. + + Args: + seed_features (torch.Tensor): seed features, dims: + (batch_size, feature_dim, num_seed) + + Returns: + torch.Tensor: objectness logits, dim: + (batch_size, 1, num_seed) + """ + return self.mlp(seed_features) + + +class GeneralSamplingModule(nn.Module): + """Sampling Points. + + Sampling points with given index. + """ + + def forward(self, xyz: Tensor, features: Tensor, + sample_inds: Tensor) -> Tuple[Tensor]: + """Forward pass. + + Args: + xyz (Tensor): (B, N, 3) the coordinates of the features. + features (Tensor): (B, C, N) features to sample. + sample_inds (Tensor): (B, M) the given index, + where M is the number of points. + + Returns: + Tensor: (B, M, 3) coordinates of sampled features + Tensor: (B, C, M) the sampled features. + Tensor: (B, M) the given index. + """ + xyz_t = xyz.transpose(1, 2).contiguous() + new_xyz = gather_points(xyz_t, sample_inds).transpose(1, + 2).contiguous() + new_features = gather_points(features, sample_inds).contiguous() + + return new_xyz, new_features, sample_inds + + +@MODELS.register_module() +class GroupFree3DHead(BaseModule): + r"""Bbox head of `Group-Free 3D `_. + + Args: + num_classes (int): The number of class. + in_channels (int): The dims of input features from backbone. + bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for encoding and + decoding boxes. + num_decoder_layers (int): The number of transformer decoder layers. + transformerlayers (dict): Config for transformer decoder. + train_cfg (dict, optional): Config for training. + test_cfg (dict, optional): Config for testing. + num_proposal (int): The number of initial sampling candidates. + pred_layer_cfg (dict, optional): Config of classfication and regression + prediction layers. + size_cls_agnostic (bool): Whether the predicted size is class-agnostic. + gt_per_seed (int): the number of candidate instance each point belongs + to. + sampling_objectness_loss (dict, optional): Config of initial sampling + objectness loss. + objectness_loss (dict, optional): Config of objectness loss. + center_loss (dict, optional): Config of center loss. + dir_class_loss (dict, optional): Config of direction classification + loss. + dir_res_loss (dict, optional): Config of direction residual + regression loss. + size_class_loss (dict, optional): Config of size classification loss. + size_res_loss (dict, optional): Config of size residual + regression loss. + size_reg_loss (dict, optional): Config of class-agnostic size + regression loss. + semantic_loss (dict, optional): Config of point-wise semantic + segmentation loss. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + bbox_coder: dict, + num_decoder_layers: int, + transformerlayers: dict, + decoder_self_posembeds: dict = dict( + type='ConvBNPositionalEncoding', + input_channel=6, + num_pos_feats=288), + decoder_cross_posembeds: dict = dict( + type='ConvBNPositionalEncoding', + input_channel=3, + num_pos_feats=288), + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + num_proposal: int = 128, + pred_layer_cfg: Optional[dict] = None, + size_cls_agnostic: bool = True, + gt_per_seed: int = 3, + sampling_objectness_loss: Optional[dict] = None, + objectness_loss: Optional[dict] = None, + center_loss: Optional[dict] = None, + dir_class_loss: Optional[dict] = None, + dir_res_loss: Optional[dict] = None, + size_class_loss: Optional[dict] = None, + size_res_loss: Optional[dict] = None, + size_reg_loss: Optional[dict] = None, + semantic_loss: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super(GroupFree3DHead, self).__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.num_proposal = num_proposal + self.in_channels = in_channels + self.num_decoder_layers = num_decoder_layers + self.size_cls_agnostic = size_cls_agnostic + self.gt_per_seed = gt_per_seed + + # Transformer decoder layers + if isinstance(transformerlayers, dict): + transformerlayers = [ + copy.deepcopy(transformerlayers) + for _ in range(num_decoder_layers) + ] + else: + assert isinstance(transformerlayers, list) and \ + len(transformerlayers) == num_decoder_layers + self.decoder_layers = nn.ModuleList() + for i in range(self.num_decoder_layers): + self.decoder_layers.append( + build_transformer_layer(transformerlayers[i])) + self.embed_dims = self.decoder_layers[0].embed_dims + assert self.embed_dims == decoder_self_posembeds['num_pos_feats'] + assert self.embed_dims == decoder_cross_posembeds['num_pos_feats'] + + # bbox_coder + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.num_sizes = self.bbox_coder.num_sizes + self.num_dir_bins = self.bbox_coder.num_dir_bins + + # Initial object candidate sampling + self.gsample_module = GeneralSamplingModule() + self.fps_module = Points_Sampler([self.num_proposal]) + self.points_obj_cls = PointsObjClsModule(self.in_channels) + + self.fp16_enabled = False + + # initial candidate prediction + self.conv_pred = BaseConvBboxHead( + **pred_layer_cfg, + num_cls_out_channels=self._get_cls_out_channels(), + num_reg_out_channels=self._get_reg_out_channels()) + + # query proj and key proj + self.decoder_query_proj = nn.Conv1d( + self.embed_dims, self.embed_dims, kernel_size=1) + self.decoder_key_proj = nn.Conv1d( + self.embed_dims, self.embed_dims, kernel_size=1) + + # query position embed + self.decoder_self_posembeds = nn.ModuleList() + for _ in range(self.num_decoder_layers): + self.decoder_self_posembeds.append( + build_positional_encoding(decoder_self_posembeds)) + # key position embed + self.decoder_cross_posembeds = nn.ModuleList() + for _ in range(self.num_decoder_layers): + self.decoder_cross_posembeds.append( + build_positional_encoding(decoder_cross_posembeds)) + + # Prediction Head + self.prediction_heads = nn.ModuleList() + for i in range(self.num_decoder_layers): + self.prediction_heads.append( + BaseConvBboxHead( + **pred_layer_cfg, + num_cls_out_channels=self._get_cls_out_channels(), + num_reg_out_channels=self._get_reg_out_channels())) + + self.loss_sampling_objectness = MODELS.build(sampling_objectness_loss) + self.loss_objectness = MODELS.build(objectness_loss) + self.loss_center = MODELS.build(center_loss) + self.loss_dir_res = MODELS.build(dir_res_loss) + self.loss_dir_class = MODELS.build(dir_class_loss) + self.loss_semantic = MODELS.build(semantic_loss) + if self.size_cls_agnostic: + self.loss_size_reg = MODELS.build(size_reg_loss) + else: + self.loss_size_res = MODELS.build(size_res_loss) + self.loss_size_class = MODELS.build(size_class_loss) + + def init_weights(self): + """Initialize weights of transformer decoder in GroupFree3DHead.""" + # initialize transformer + for m in self.decoder_layers.parameters(): + if m.dim() > 1: + xavier_init(m, distribution='uniform') + for m in self.decoder_self_posembeds.parameters(): + if m.dim() > 1: + xavier_init(m, distribution='uniform') + for m in self.decoder_cross_posembeds.parameters(): + if m.dim() > 1: + xavier_init(m, distribution='uniform') + + def _get_cls_out_channels(self): + """Return the channel number of classification outputs.""" + # Class numbers (k) + objectness (1) + return self.num_classes + 1 + + def _get_reg_out_channels(self): + """Return the channel number of regression outputs.""" + # center residual (3), + # heading class+residual (num_dir_bins*2), + # size class+residual(num_sizes*4 or 3) + if self.size_cls_agnostic: + return 6 + self.num_dir_bins * 2 + else: + return 3 + self.num_dir_bins * 2 + self.num_sizes * 4 + + def _extract_input(self, feat_dict: dict) -> Tuple[Tensor]: + """Extract inputs from features dictionary. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + Tuple[Tensor]: + + - seed_points (Tensor): Coordinates of input points. + - seed_features (Tensor): Features of input points. + - seed_indices (Tensor): Indices of input points. + """ + + seed_points = feat_dict['fp_xyz'][-1] + seed_features = feat_dict['fp_features'][-1] + seed_indices = feat_dict['fp_indices'][-1] + + return seed_points, seed_features, seed_indices + + @property + def sample_mode(self): + """ + Returns: + str: Sample mode for initial candidates sampling. + """ + if self.training: + sample_mode = self.train_cfg.sample_mode + else: + sample_mode = self.test_cfg.sample_mode + assert sample_mode in ['fps', 'kps'] + return sample_mode + + def forward(self, feat_dict: dict) -> dict: + """Forward pass. + + Note: + The forward of GroupFree3DHead is divided into 2 steps: + + 1. Initial object candidates sampling. + 2. Iterative object box prediction by transformer decoder. + + Args: + feat_dict (dict): Feature dict from backbone. + + + Returns: + results (dict): Predictions of GroupFree3D head. + """ + sample_mode = self.sample_mode + + seed_xyz, seed_features, seed_indices = self._extract_input(feat_dict) + + results = dict( + seed_points=seed_xyz, + seed_features=seed_features, + seed_indices=seed_indices) + + # 1. Initial object candidates sampling. + if sample_mode == 'fps': + sample_inds = self.fps_module(seed_xyz, seed_features) + elif sample_mode == 'kps': + points_obj_cls_logits = self.points_obj_cls( + seed_features) # (batch_size, 1, num_seed) + points_obj_cls_scores = points_obj_cls_logits.sigmoid().squeeze(1) + sample_inds = torch.topk(points_obj_cls_scores, + self.num_proposal)[1].int() + results['seeds_obj_cls_logits'] = points_obj_cls_logits + else: + raise NotImplementedError( + f'Sample mode {sample_mode} is not supported!') + + candidate_xyz, candidate_features, sample_inds = self.gsample_module( + seed_xyz, seed_features, sample_inds) + + results['query_points_xyz'] = candidate_xyz # (B, M, 3) + results['query_points_feature'] = candidate_features # (B, C, M) + results['query_points_sample_inds'] = sample_inds.long() # (B, M) + + prefix = 'proposal.' + cls_predictions, reg_predictions = self.conv_pred(candidate_features) + decode_res = self.bbox_coder.split_pred(cls_predictions, + reg_predictions, candidate_xyz, + prefix) + + results.update(decode_res) + bbox3d = self.bbox_coder.decode(results, prefix) + + # 2. Iterative object box prediction by transformer decoder. + base_bbox3d = bbox3d[:, :, :6].detach().clone() + + query = self.decoder_query_proj(candidate_features).permute(2, 0, 1) + key = self.decoder_key_proj(seed_features).permute(2, 0, 1) + value = key + + # transformer decoder + results['num_decoder_layers'] = 0 + for i in range(self.num_decoder_layers): + prefix = f's{i}.' + + query_pos = self.decoder_self_posembeds[i](base_bbox3d).permute( + 2, 0, 1) + key_pos = self.decoder_cross_posembeds[i](seed_xyz).permute( + 2, 0, 1) + + query = self.decoder_layers[i]( + query, key, value, query_pos=query_pos, + key_pos=key_pos).permute(1, 2, 0) + + results[f'{prefix}query'] = query + + cls_predictions, reg_predictions = self.prediction_heads[i](query) + decode_res = self.bbox_coder.split_pred(cls_predictions, + reg_predictions, + candidate_xyz, prefix) + # TODO: should save bbox3d instead of decode_res? + results.update(decode_res) + + bbox3d = self.bbox_coder.decode(results, prefix) + results[f'{prefix}bbox3d'] = bbox3d + base_bbox3d = bbox3d[:, :, :6].detach().clone() + query = query.permute(2, 0, 1) + + results['num_decoder_layers'] += 1 + + return results + + def loss(self, points: List[torch.Tensor], feats_dict: Dict[str, + torch.Tensor], + batch_data_samples: SampleList, **kwargs) -> dict: + """ + Args: + points (list[tensor]): Points cloud of multiple samples. + feats_dict (dict): Predictions from backbone or FPN. + batch_data_samples (list[:obj:`Det3DDataSample`]): Each item + contains the meta information of each sample and + corresponding annotations. + + Returns: + dict: A dictionary of loss components. + """ + preds_dict = self.forward(feats_dict) + batch_gt_instance_3d = [] + batch_gt_instances_ignore = [] + batch_input_metas = [] + batch_pts_semantic_mask = [] + batch_pts_instance_mask = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instance_3d.append(data_sample.gt_instances_3d) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + batch_pts_semantic_mask.append( + data_sample.gt_pts_seg.get('pts_semantic_mask', None)) + batch_pts_instance_mask.append( + data_sample.gt_pts_seg.get('pts_instance_mask', None)) + + loss_inputs = (points, preds_dict, batch_gt_instance_3d) + losses = self.loss_by_feat( + *loss_inputs, + batch_pts_semantic_mask=batch_pts_semantic_mask, + batch_pts_instance_mask=batch_pts_instance_mask, + batch_input_metas=batch_input_metas, + batch_gt_instances_ignore=batch_gt_instances_ignore) + return losses + + def loss_by_feat( + self, + points: List[torch.Tensor], + feats_dict: dict, + batch_gt_instances_3d: List[InstanceData], + batch_pts_semantic_mask: Optional[List[torch.Tensor]] = None, + batch_pts_instance_mask: Optional[List[torch.Tensor]] = None, + ret_target: bool = False, + **kwargs) -> dict: + """Compute loss. + + Args: + points (list[torch.Tensor]): Input points. + feats_dict (dict): Predictions from previous component. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + batch_pts_semantic_mask (list[tensor]): Semantic mask + of points cloud. Defaults to None. + batch_pts_semantic_mask (list[tensor]): Instance mask + of points cloud. Defaults to None. + ret_target (bool): Return targets or not. Defaults to False. + + Returns: + dict: Losses of `GroupFree3D`. + """ + targets = self.get_targets(points, feats_dict, batch_gt_instances_3d, + batch_pts_semantic_mask, + batch_pts_instance_mask) + (sampling_targets, sampling_weights, assigned_size_targets, + size_class_targets, size_res_targets, dir_class_targets, + dir_res_targets, center_targets, assigned_center_targets, + mask_targets, valid_gt_masks, objectness_targets, objectness_weights, + box_loss_weights, valid_gt_weights) = targets + + batch_size, proposal_num = size_class_targets.shape[:2] + + losses = dict() + + # calculate objectness classification loss + sampling_obj_score = feats_dict['seeds_obj_cls_logits'].reshape(-1, 1) + sampling_objectness_loss = self.loss_sampling_objectness( + sampling_obj_score, + 1 - sampling_targets.reshape(-1), + sampling_weights.reshape(-1), + avg_factor=batch_size) + losses['sampling_objectness_loss'] = sampling_objectness_loss + + prefixes = ['proposal.'] + [ + f's{i}.' for i in range(feats_dict['num_decoder_layers']) + ] + num_stages = len(prefixes) + for prefix in prefixes: + + # calculate objectness loss + obj_score = feats_dict[f'{prefix}obj_scores'].transpose(2, 1) + objectness_loss = self.loss_objectness( + obj_score.reshape(-1, 1), + 1 - objectness_targets.reshape(-1), + objectness_weights.reshape(-1), + avg_factor=batch_size) + losses[f'{prefix}objectness_loss'] = objectness_loss / num_stages + + # calculate center loss + box_loss_weights_expand = box_loss_weights.unsqueeze(-1).expand( + -1, -1, 3) + center_loss = self.loss_center( + feats_dict[f'{prefix}center'], + assigned_center_targets, + weight=box_loss_weights_expand) + losses[f'{prefix}center_loss'] = center_loss / num_stages + + # calculate direction class loss + dir_class_loss = self.loss_dir_class( + feats_dict[f'{prefix}dir_class'].transpose(2, 1), + dir_class_targets, + weight=box_loss_weights) + losses[f'{prefix}dir_class_loss'] = dir_class_loss / num_stages + + # calculate direction residual loss + heading_label_one_hot = size_class_targets.new_zeros( + (batch_size, proposal_num, self.num_dir_bins)) + heading_label_one_hot.scatter_(2, dir_class_targets.unsqueeze(-1), + 1) + dir_res_norm = torch.sum( + feats_dict[f'{prefix}dir_res_norm'] * heading_label_one_hot, + -1) + dir_res_loss = self.loss_dir_res( + dir_res_norm, dir_res_targets, weight=box_loss_weights) + losses[f'{prefix}dir_res_loss'] = dir_res_loss / num_stages + + if self.size_cls_agnostic: + # calculate class-agnostic size loss + size_reg_loss = self.loss_size_reg( + feats_dict[f'{prefix}size'], + assigned_size_targets, + weight=box_loss_weights_expand) + losses[f'{prefix}size_reg_loss'] = size_reg_loss / num_stages + + else: + # calculate size class loss + size_class_loss = self.loss_size_class( + feats_dict[f'{prefix}size_class'].transpose(2, 1), + size_class_targets, + weight=box_loss_weights) + losses[ + f'{prefix}size_class_loss'] = size_class_loss / num_stages + + # calculate size residual loss + one_hot_size_targets = size_class_targets.new_zeros( + (batch_size, proposal_num, self.num_sizes)) + one_hot_size_targets.scatter_(2, + size_class_targets.unsqueeze(-1), + 1) + one_hot_size_targets_expand = one_hot_size_targets.unsqueeze( + -1).expand(-1, -1, -1, 3).contiguous() + size_residual_norm = torch.sum( + feats_dict[f'{prefix}size_res_norm'] * + one_hot_size_targets_expand, 2) + box_loss_weights_expand = box_loss_weights.unsqueeze( + -1).expand(-1, -1, 3) + size_res_loss = self.loss_size_res( + size_residual_norm, + size_res_targets, + weight=box_loss_weights_expand) + losses[f'{prefix}size_res_loss'] = size_res_loss / num_stages + + # calculate semantic loss + semantic_loss = self.loss_semantic( + feats_dict[f'{prefix}sem_scores'].transpose(2, 1), + mask_targets, + weight=box_loss_weights) + losses[f'{prefix}semantic_loss'] = semantic_loss / num_stages + + if ret_target: + losses['targets'] = targets + + return losses + + def get_targets( + self, + points: List[Tensor], + feats_dict: dict = None, + batch_gt_instances_3d: List[InstanceData] = None, + batch_pts_semantic_mask: List[torch.Tensor] = None, + batch_pts_instance_mask: List[torch.Tensor] = None, + max_gt_num: int = 64, + ): + """Generate targets of GroupFree3D head. + + Args: + points (list[torch.Tensor]): Points of each batch. + feats_dict (torch.Tensor): Predictions of previous component. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + batch_pts_semantic_mask (list[tensor]): Semantic gt mask for + point clouds. Defaults to None. + batch_pts_instance_mask (list[tensor]): Instance gt mask for + point clouds. Defaults to None. + max_gt_num (int): Max number of GTs for single batch. Defaults + to 64. + + Returns: + tuple[torch.Tensor]: Targets of GroupFree3D head. + """ + # find empty example + valid_gt_masks = list() + gt_num = list() + batch_gt_labels_3d = [ + gt_instances_3d.labels_3d + for gt_instances_3d in batch_gt_instances_3d + ] + batch_gt_bboxes_3d = [ + gt_instances_3d.bboxes_3d + for gt_instances_3d in batch_gt_instances_3d + ] + + for index in range(len(batch_gt_labels_3d)): + if len(batch_gt_labels_3d[index]) == 0: + fake_box = batch_gt_bboxes_3d[index].tensor.new_zeros( + 1, batch_gt_bboxes_3d[index].tensor.shape[-1]) + batch_gt_bboxes_3d[index] = batch_gt_bboxes_3d[index].new_box( + fake_box) + batch_gt_labels_3d[index] = batch_gt_labels_3d[ + index].new_zeros(1) + valid_gt_masks.append(batch_gt_labels_3d[index].new_zeros(1)) + gt_num.append(1) + else: + valid_gt_masks.append(batch_gt_labels_3d[index].new_ones( + batch_gt_labels_3d[index].shape)) + gt_num.append(batch_gt_labels_3d[index].shape[0]) + + max_gt_nums = [max_gt_num for _ in range(len(batch_gt_labels_3d))] + + if batch_pts_semantic_mask is None: + batch_pts_semantic_mask = [ + None for i in range(len(batch_gt_labels_3d)) + ] + batch_pts_instance_mask = [ + None for i in range(len(batch_gt_labels_3d)) + ] + + seed_points = [ + feats_dict['seed_points'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + seed_indices = [ + feats_dict['seed_indices'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + candidate_indices = [ + feats_dict['query_points_sample_inds'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + (sampling_targets, assigned_size_targets, size_class_targets, + size_res_targets, dir_class_targets, dir_res_targets, center_targets, + assigned_center_targets, mask_targets, + objectness_targets, objectness_masks) = multi_apply( + self._get_targets_single, points, batch_gt_bboxes_3d, + batch_gt_labels_3d, batch_pts_semantic_mask, + batch_pts_instance_mask, max_gt_nums, seed_points, seed_indices, + candidate_indices) + + # pad targets as original code of GroupFree3D. + for index in range(len(batch_gt_labels_3d)): + pad_num = max_gt_num - batch_gt_labels_3d[index].shape[0] + valid_gt_masks[index] = F.pad(valid_gt_masks[index], (0, pad_num)) + + sampling_targets = torch.stack(sampling_targets) + sampling_weights = (sampling_targets >= 0).float() + sampling_normalizer = sampling_weights.sum(dim=1, keepdim=True).float() + sampling_weights /= sampling_normalizer.clamp(min=1.0) + + assigned_size_targets = torch.stack(assigned_size_targets) + center_targets = torch.stack(center_targets) + valid_gt_masks = torch.stack(valid_gt_masks) + + assigned_center_targets = torch.stack(assigned_center_targets) + objectness_targets = torch.stack(objectness_targets) + + objectness_weights = torch.stack(objectness_masks) + cls_normalizer = objectness_weights.sum(dim=1, keepdim=True).float() + objectness_weights /= cls_normalizer.clamp(min=1.0) + + box_loss_weights = objectness_targets.float() / ( + objectness_targets.sum().float() + EPS) + + valid_gt_weights = valid_gt_masks.float() / ( + valid_gt_masks.sum().float() + EPS) + + dir_class_targets = torch.stack(dir_class_targets) + dir_res_targets = torch.stack(dir_res_targets) + size_class_targets = torch.stack(size_class_targets) + size_res_targets = torch.stack(size_res_targets) + mask_targets = torch.stack(mask_targets) + + return (sampling_targets, sampling_weights, assigned_size_targets, + size_class_targets, size_res_targets, dir_class_targets, + dir_res_targets, center_targets, assigned_center_targets, + mask_targets, valid_gt_masks, objectness_targets, + objectness_weights, box_loss_weights, valid_gt_weights) + + def _get_targets_single(self, + points: Tensor, + gt_bboxes_3d: BaseInstance3DBoxes, + gt_labels_3d: Tensor, + pts_semantic_mask: Optional[Tensor] = None, + pts_instance_mask: Optional[Tensor] = None, + max_gt_nums: Optional[int] = None, + seed_points: Optional[Tensor] = None, + seed_indices: Optional[Tensor] = None, + candidate_indices: Optional[Tensor] = None, + seed_points_obj_topk: int = 4): + """Generate targets of GroupFree3D head for single batch. + + Args: + points (torch.Tensor): Points of each batch. + gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): Ground truth + boxes of each batch. + gt_labels_3d (torch.Tensor): Labels of each batch. + pts_semantic_mask (torch.Tensor, optional): Point-wise semantic + label of each batch. Defaults to None. + pts_instance_mask (torch.Tensor, optional): Point-wise instance + label of each batch. Defaults to None. + max_gt_nums (int, optional): Max number of GTs for single batch. + Defaults to None. + seed_points (torch.Tensor,optional): Coordinates of seed points. + Defaults to None. + seed_indices (torch.Tensor,optional): Indices of seed points. + Defaults to None. + candidate_indices (torch.Tensor,optional): Indices of object + candidates. Defaults to None. + seed_points_obj_topk (int): k value of k-Closest Points Sampling. + Defaults to 4. + + Returns: + tuple[torch.Tensor]: Targets of GroupFree3D head. + """ + + assert self.bbox_coder.with_rot or pts_semantic_mask is not None + + gt_bboxes_3d = gt_bboxes_3d.to(points.device) + + # generate center, dir, size target + (center_targets, size_targets, size_class_targets, size_res_targets, + dir_class_targets, + dir_res_targets) = self.bbox_coder.encode(gt_bboxes_3d, gt_labels_3d) + + # pad targets as original code of GroupFree3D + pad_num = max_gt_nums - gt_labels_3d.shape[0] + box_label_mask = points.new_zeros([max_gt_nums]) + box_label_mask[:gt_labels_3d.shape[0]] = 1 + + gt_bboxes_pad = F.pad(gt_bboxes_3d.tensor, (0, 0, 0, pad_num)) + gt_bboxes_pad[gt_labels_3d.shape[0]:, 0:3] += 1000 + gt_bboxes_3d = gt_bboxes_3d.new_box(gt_bboxes_pad) + + gt_labels_3d = F.pad(gt_labels_3d, (0, pad_num)) + + center_targets = F.pad(center_targets, (0, 0, 0, pad_num), value=1000) + size_targets = F.pad(size_targets, (0, 0, 0, pad_num)) + size_class_targets = F.pad(size_class_targets, (0, pad_num)) + size_res_targets = F.pad(size_res_targets, (0, 0, 0, pad_num)) + dir_class_targets = F.pad(dir_class_targets, (0, pad_num)) + dir_res_targets = F.pad(dir_res_targets, (0, pad_num)) + + # 0. generate pts_instance_label and pts_obj_mask + num_points = points.shape[0] + pts_obj_mask = points.new_zeros([num_points], dtype=torch.long) + pts_instance_label = points.new_zeros([num_points], + dtype=torch.long) - 1 + + if self.bbox_coder.with_rot: + vote_targets = points.new_zeros([num_points, 4 * self.gt_per_seed]) + vote_target_idx = points.new_zeros([num_points], dtype=torch.long) + box_indices_all = gt_bboxes_3d.points_in_boxes_part(points) + for i in range(gt_labels_3d.shape[0]): + box_indices = box_indices_all[:, i] + indices = torch.nonzero( + box_indices, as_tuple=False).squeeze(-1) + selected_points = points[indices] + pts_obj_mask[indices] = 1 + vote_targets_tmp = vote_targets[indices] + votes = gt_bboxes_3d.gravity_center[i].unsqueeze( + 0) - selected_points[:, :3] + + for j in range(self.gt_per_seed): + column_indices = torch.nonzero( + vote_target_idx[indices] == j, + as_tuple=False).squeeze(-1) + vote_targets_tmp[column_indices, + int(j * 3):int(j * 3 + + 3)] = votes[column_indices] + vote_targets_tmp[column_indices, + j + 3 * self.gt_per_seed] = i + if j == 0: + vote_targets_tmp[ + column_indices, :3 * + self.gt_per_seed] = votes[column_indices].repeat( + 1, self.gt_per_seed) + vote_targets_tmp[column_indices, + 3 * self.gt_per_seed:] = i + + vote_targets[indices] = vote_targets_tmp + vote_target_idx[indices] = torch.clamp( + vote_target_idx[indices] + 1, max=2) + + dist = points.new_zeros([num_points, self.gt_per_seed]) + 1000 + for j in range(self.gt_per_seed): + dist[:, j] = (vote_targets[:, 3 * j:3 * j + 3]**2).sum(-1) + + instance_indices = torch.argmin( + dist, dim=-1).unsqueeze(-1) + 3 * self.gt_per_seed + instance_lable = torch.gather(vote_targets, 1, + instance_indices).squeeze(-1) + pts_instance_label = instance_lable.long() + pts_instance_label[pts_obj_mask == 0] = -1 + + elif pts_instance_mask is not None and pts_semantic_mask is not None: + for i in torch.unique(pts_instance_mask): + indices = torch.nonzero( + pts_instance_mask == i, as_tuple=False).squeeze(-1) + + if pts_semantic_mask[indices[0]] < self.num_classes: + selected_points = points[indices, :3] + center = 0.5 * ( + selected_points.min(0)[0] + selected_points.max(0)[0]) + + delta_xyz = center - center_targets + instance_lable = torch.argmin((delta_xyz**2).sum(-1)) + pts_instance_label[indices] = instance_lable + pts_obj_mask[indices] = 1 + + else: + raise NotImplementedError + + # 1. generate objectness targets in sampling head + gt_num = gt_labels_3d.shape[0] + num_seed = seed_points.shape[0] + num_candidate = candidate_indices.shape[0] + + object_assignment = torch.gather(pts_instance_label, 0, seed_indices) + # set background points to the last gt bbox as original code + object_assignment[object_assignment < 0] = gt_num - 1 + object_assignment_one_hot = gt_bboxes_3d.tensor.new_zeros( + (num_seed, gt_num)) + object_assignment_one_hot.scatter_(1, object_assignment.unsqueeze(-1), + 1) # (num_seed, gt_num) + + delta_xyz = seed_points.unsqueeze( + 1) - gt_bboxes_3d.gravity_center.unsqueeze( + 0) # (num_seed, gt_num, 3) + delta_xyz = delta_xyz / (gt_bboxes_3d.dims.unsqueeze(0) + EPS) + + new_dist = torch.sum(delta_xyz**2, dim=-1) + euclidean_dist1 = torch.sqrt(new_dist + EPS) + euclidean_dist1 = euclidean_dist1 * object_assignment_one_hot + 100 * ( + 1 - object_assignment_one_hot) + # (gt_num, num_seed) + euclidean_dist1 = euclidean_dist1.permute(1, 0) + + # gt_num x topk + topk_inds = torch.topk( + euclidean_dist1, + seed_points_obj_topk, + largest=False)[1] * box_label_mask[:, None] + \ + (box_label_mask[:, None] - 1) + topk_inds = topk_inds.long() + topk_inds = topk_inds.view(-1).contiguous() + + sampling_targets = torch.zeros( + num_seed + 1, dtype=torch.long).to(points.device) + sampling_targets[topk_inds] = 1 + sampling_targets = sampling_targets[:num_seed] + # pts_instance_label + objectness_label_mask = torch.gather(pts_instance_label, 0, + seed_indices) # num_seed + sampling_targets[objectness_label_mask < 0] = 0 + + # 2. objectness target + seed_obj_gt = torch.gather(pts_obj_mask, 0, seed_indices) # num_seed + objectness_targets = torch.gather(seed_obj_gt, 0, + candidate_indices) # num_candidate + + # 3. box target + seed_instance_label = torch.gather(pts_instance_label, 0, + seed_indices) # num_seed + query_points_instance_label = torch.gather( + seed_instance_label, 0, candidate_indices) # num_candidate + + # Set assignment + # (num_candidate, ) with values in 0,1,...,gt_num-1 + assignment = query_points_instance_label + # set background points to the last gt bbox as original code + assignment[assignment < 0] = gt_num - 1 + assignment_expand = assignment.unsqueeze(1).expand(-1, 3) + + assigned_center_targets = center_targets[assignment] + assigned_size_targets = size_targets[assignment] + + dir_class_targets = dir_class_targets[assignment] + dir_res_targets = dir_res_targets[assignment] + dir_res_targets /= (np.pi / self.num_dir_bins) + + size_class_targets = size_class_targets[assignment] + size_res_targets = \ + torch.gather(size_res_targets, 0, assignment_expand) + one_hot_size_targets = gt_bboxes_3d.tensor.new_zeros( + (num_candidate, self.num_sizes)) + one_hot_size_targets.scatter_(1, size_class_targets.unsqueeze(-1), 1) + one_hot_size_targets = one_hot_size_targets.unsqueeze(-1).expand( + -1, -1, 3) # (num_candidate,num_size_cluster,3) + mean_sizes = size_res_targets.new_tensor( + self.bbox_coder.mean_sizes).unsqueeze(0) + pos_mean_sizes = torch.sum(one_hot_size_targets * mean_sizes, 1) + size_res_targets /= pos_mean_sizes + + mask_targets = gt_labels_3d[assignment].long() + + objectness_masks = points.new_ones((num_candidate)) + + return (sampling_targets, assigned_size_targets, size_class_targets, + size_res_targets, dir_class_targets, dir_res_targets, + center_targets, assigned_center_targets, mask_targets, + objectness_targets, objectness_masks) + + def predict(self, points: List[torch.Tensor], + feats_dict: Dict[str, torch.Tensor], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[InstanceData]: + """ + Args: + points (list[tensor]): Point clouds of multiple samples. + feats_dict (dict): Features from FPN or backbone. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes meta information of data. + + Returns: + list[:obj:`InstanceData`]: List of processed predictions. Each + InstanceData contains 3d Bounding boxes and corresponding + scores and labels. + """ + preds_dict = self(feats_dict) + batch_size = len(batch_data_samples) + batch_input_metas = [] + for batch_index in range(batch_size): + metainfo = batch_data_samples[batch_index].metainfo + batch_input_metas.append(metainfo) + + results_list = self.predict_by_feat(points, preds_dict, + batch_input_metas, **kwargs) + return results_list + + def predict_by_feat(self, + points: List[torch.Tensor], + bbox_preds_dict: dict, + batch_input_metas: List[dict], + use_nms: bool = True, + **kwargs) -> List[InstanceData]: + """Generate bboxes from vote head predictions. + + Args: + points (List[torch.Tensor]): Input points of multiple samples. + bbox_preds_dict (dict): Predictions from groupfree3d head. + batch_input_metas (list[dict]): Each item + contains the meta information of each sample. + use_nms (bool): Whether to apply NMS, skip nms postprocessing + while using vote head in rpn stage. + + Returns: + list[:obj:`InstanceData`]: List of processed predictions. Each + InstanceData cantains 3d Bounding boxes and corresponding + scores and labels. + """ + # support multi-stage predictions + assert self.test_cfg['prediction_stages'] in \ + ['last', 'all', 'last_three'] + + if self.test_cfg['prediction_stages'] == 'last': + prefixes = [f's{self.num_decoder_layers - 1}.'] + elif self.test_cfg['prediction_stages'] == 'all': + prefixes = ['proposal.'] + \ + [f's{i}.' for i in range(self.num_decoder_layers)] + elif self.test_cfg['prediction_stages'] == 'last_three': + prefixes = [ + f's{i}.' for i in range(self.num_decoder_layers - + 3, self.num_decoder_layers) + ] + else: + raise NotImplementedError + + obj_scores = list() + sem_scores = list() + bbox3d = list() + for prefix in prefixes: + # decode boxes + obj_score = bbox_preds_dict[f'{prefix}obj_scores'][..., + -1].sigmoid() + sem_score = bbox_preds_dict[f'{prefix}sem_scores'].softmax(-1) + bbox = self.bbox_coder.decode(bbox_preds_dict, prefix) + obj_scores.append(obj_score) + sem_scores.append(sem_score) + bbox3d.append(bbox) + + obj_scores = torch.cat(obj_scores, dim=1) + sem_scores = torch.cat(sem_scores, dim=1) + bbox3d = torch.cat(bbox3d, dim=1) + stack_points = torch.stack(points) + results_list = list() + if use_nms: + batch_size = bbox3d.shape[0] + temp_results = InstanceData() + for b in range(batch_size): + bbox_selected, score_selected, labels = \ + self.multiclass_nms_single(obj_scores[b], + sem_scores[b], + bbox3d[b], + stack_points[b, ..., :3], + batch_input_metas[b]) + bbox = batch_input_metas[b]['box_type_3d']( + bbox_selected, + box_dim=bbox_selected.shape[-1], + with_yaw=self.bbox_coder.with_rot) + temp_results.bboxes_3d = bbox + temp_results.scores_3d = score_selected + temp_results.labels_3d = labels + results_list.append(temp_results) + return results_list + else: + return bbox3d + + def multiclass_nms_single(self, obj_scores, sem_scores, bbox, points, + input_meta): + """Multi-class nms in single batch. + + Args: + obj_scores (torch.Tensor): Objectness score of bounding boxes. + sem_scores (torch.Tensor): semantic class score of bounding boxes. + bbox (torch.Tensor): Predicted bounding boxes. + points (torch.Tensor): Input points. + input_meta (dict): Point cloud and image's meta info. + + Returns: + tuple[torch.Tensor]: Bounding boxes, scores and labels. + """ + bbox = input_meta['box_type_3d']( + bbox, + box_dim=bbox.shape[-1], + with_yaw=self.bbox_coder.with_rot, + origin=(0.5, 0.5, 0.5)) + box_indices = bbox.points_in_boxes_all(points) + + corner3d = bbox.corners + minmax_box3d = corner3d.new(torch.Size((corner3d.shape[0], 6))) + minmax_box3d[:, :3] = torch.min(corner3d, dim=1)[0] + minmax_box3d[:, 3:] = torch.max(corner3d, dim=1)[0] + + nonempty_box_mask = box_indices.T.sum(1) > 5 + + bbox_classes = torch.argmax(sem_scores, -1) + nms_selected = aligned_3d_nms(minmax_box3d[nonempty_box_mask], + obj_scores[nonempty_box_mask], + bbox_classes[nonempty_box_mask], + self.test_cfg.nms_thr) + + # filter empty boxes and boxes with low score + scores_mask = (obj_scores > self.test_cfg.score_thr) + nonempty_box_inds = torch.nonzero( + nonempty_box_mask, as_tuple=False).flatten() + nonempty_mask = torch.zeros_like(bbox_classes).scatter( + 0, nonempty_box_inds[nms_selected], 1) + selected = (nonempty_mask.bool() & scores_mask.bool()) + + if self.test_cfg.per_class_proposal: + bbox_selected, score_selected, labels = [], [], [] + for k in range(sem_scores.shape[-1]): + bbox_selected.append(bbox[selected].tensor) + score_selected.append(obj_scores[selected] * + sem_scores[selected][:, k]) + labels.append( + torch.zeros_like(bbox_classes[selected]).fill_(k)) + bbox_selected = torch.cat(bbox_selected, 0) + score_selected = torch.cat(score_selected, 0) + labels = torch.cat(labels, 0) + else: + bbox_selected = bbox[selected].tensor + score_selected = obj_scores[selected] + labels = bbox_classes[selected] + + return bbox_selected, score_selected, labels diff --git a/mmdet3d/models/dense_heads/imvoxel_head.py b/mmdet3d/models/dense_heads/imvoxel_head.py new file mode 100755 index 0000000..948cb8a --- /dev/null +++ b/mmdet3d/models/dense_heads/imvoxel_head.py @@ -0,0 +1,696 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple + +import torch +from mmcv.cnn import Scale +from mmcv.ops import nms3d, nms3d_normal +from mmdet.models.utils import multi_apply +from mmdet.utils import reduce_mean +from mmengine.config import ConfigDict +from mmengine.model import BaseModule, bias_init_with_prob, normal_init +from mmengine.structures import InstanceData +from torch import Tensor, nn + +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures.bbox_3d.utils import rotation_3d_in_axis +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils.typing_utils import (ConfigType, InstanceList, + OptConfigType, OptInstanceList) + + +@MODELS.register_module() +class ImVoxelHead(BaseModule): + r"""`ImVoxelNet`_ head for indoor + datasets. + + Args: + n_classes (int): Number of classes. + n_levels (int): Number of feature levels. + n_channels (int): Number of channels in input tensors. + n_reg_outs (int): Number of regression layer channels. + pts_assign_threshold (int): Min number of location per box to + be assigned with. + pts_center_threshold (int): Max number of locations per box to + be assigned with. + center_loss (dict, optional): Config of centerness loss. + Default: dict(type='CrossEntropyLoss', use_sigmoid=True). + bbox_loss (dict, optional): Config of bbox loss. + Default: dict(type='RotatedIoU3DLoss'). + cls_loss (dict, optional): Config of classification loss. + Default: dict(type='FocalLoss'). + train_cfg (dict, optional): Config for train stage. Defaults to None. + test_cfg (dict, optional): Config for test stage. Defaults to None. + init_cfg (dict, optional): Config for weight initialization. + Defaults to None. + """ + + def __init__(self, + n_classes: int, + n_levels: int, + n_channels: int, + n_reg_outs: int, + pts_assign_threshold: int, + pts_center_threshold: int, + prior_generator: ConfigType, + center_loss: ConfigType = dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=True), + bbox_loss: ConfigType = dict(type='RotatedIoU3DLoss'), + cls_loss: ConfigType = dict(type='mmdet.FocalLoss'), + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptConfigType = None): + super(ImVoxelHead, self).__init__(init_cfg) + self.pts_assign_threshold = pts_assign_threshold + self.pts_center_threshold = pts_center_threshold + self.prior_generator = TASK_UTILS.build(prior_generator) + self.center_loss = MODELS.build(center_loss) + self.bbox_loss = MODELS.build(bbox_loss) + self.cls_loss = MODELS.build(cls_loss) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self._init_layers(n_channels, n_reg_outs, n_classes, n_levels) + + def _init_layers(self, n_channels, n_reg_outs, n_classes, n_levels): + """Initialize neural network layers of the head.""" + self.conv_center = nn.Conv3d(n_channels, 1, 3, padding=1, bias=False) + self.conv_reg = nn.Conv3d( + n_channels, n_reg_outs, 3, padding=1, bias=False) + self.conv_cls = nn.Conv3d(n_channels, n_classes, 3, padding=1) + self.scales = nn.ModuleList([Scale(1.) for _ in range(n_levels)]) + + def init_weights(self): + """Initialize all layer weights.""" + normal_init(self.conv_center, std=.01) + normal_init(self.conv_reg, std=.01) + normal_init(self.conv_cls, std=.01, bias=bias_init_with_prob(.01)) + + def _forward_single(self, x: Tensor, scale: Scale): + """Forward pass per level. + + Args: + x (Tensor): Per level 3d neck output tensor. + scale (mmcv.cnn.Scale): Per level multiplication weight. + + Returns: + tuple[Tensor]: Centerness, bbox and classification predictions. + """ + reg_final = self.conv_reg(x) + reg_distance = torch.exp(scale(reg_final[:, :6])) + reg_angle = reg_final[:, 6:] + bbox_pred = torch.cat((reg_distance, reg_angle), dim=1) + return self.conv_center(x), bbox_pred, self.conv_cls(x) + + def forward(self, x: Tensor): + """Forward function. + + Args: + x (list[Tensor]): Features from 3d neck. + + Returns: + tuple[Tensor]: Centerness, bbox and classification predictions. + """ + return multi_apply(self._forward_single, x, self.scales) + + def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList, + **kwargs) -> dict: + """Perform forward propagation and loss calculation of the detection + head on the features of the upstream network. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + + Returns: + dict: A dictionary of loss components. + """ + valid_pred = x[-1] + outs = self(x[:-1]) + + batch_gt_instances_3d = [] + batch_gt_instances_ignore = [] + batch_input_metas = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + + loss_inputs = outs + (valid_pred, batch_gt_instances_3d, + batch_input_metas, batch_gt_instances_ignore) + losses = self.loss_by_feat(*loss_inputs) + return losses + + def loss_and_predict(self, + x: Tuple[Tensor], + batch_data_samples: SampleList, + proposal_cfg: Optional[ConfigDict] = None, + **kwargs) -> Tuple[dict, InstanceList]: + """Perform forward propagation of the head, then calculate loss and + predictions from the features and data samples. + + Args: + x (tuple[Tensor]): Features from FPN. + batch_data_samples (list[:obj:`Det3DDataSample`]): Each item + contains the meta information of each image and + corresponding annotations. + proposal_cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + + Returns: + tuple: the return value is a tuple contains: + + - losses: (dict[str, Tensor]): A dictionary of loss components. + - predictions (list[:obj:`InstanceData`]): Detection + results of each image after the post process. + """ + batch_gt_instances_3d = [] + batch_gt_instances_ignore = [] + batch_input_metas = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + + valid_pred = x[-1] + outs = self(x[:-1]) + + loss_inputs = outs + (valid_pred, batch_gt_instances_3d, + batch_input_metas, batch_gt_instances_ignore) + losses = self.loss_by_feat(*loss_inputs) + + predictions = self.predict_by_feat( + *outs, + valid_pred=valid_pred, + batch_input_metas=batch_input_metas, + cfg=proposal_cfg) + return losses, predictions + + def predict(self, + x: Tuple[Tensor], + batch_data_samples: SampleList, + rescale: bool = False) -> InstanceList: + """Perform forward propagation of the 3D detection head and predict + detection results on the features of the upstream network. + + Args: + x (tuple[Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_pts_panoptic_seg` and + `gt_pts_sem_seg`. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + batch_input_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + valid_pred = x[-1] + outs = self(x[:-1]) + predictions = self.predict_by_feat( + *outs, + valid_pred=valid_pred, + batch_input_metas=batch_input_metas, + rescale=rescale) + return predictions + + def _loss_by_feat_single(self, center_preds, bbox_preds, cls_preds, + valid_preds, input_meta, gt_bboxes, gt_labels): + """Per scene loss function. + + Args: + center_preds (list[Tensor]): Centerness predictions for all levels. + bbox_preds (list[Tensor]): Bbox predictions for all levels. + cls_preds (list[Tensor]): Classification predictions for all + levels. + valid_preds (list[Tensor]): Valid mask predictions for all levels. + input_meta (dict): Scene meta info. + gt_bboxes (BaseInstance3DBoxes): Ground truth boxes. + gt_labels (Tensor): Ground truth labels. + + Returns: + tuple[Tensor]: Centerness, bbox, and classification loss values. + """ + points = self._get_points(center_preds) + center_targets, bbox_targets, cls_targets = self._get_targets( + points, gt_bboxes, gt_labels) + + center_preds = torch.cat( + [x.permute(1, 2, 3, 0).reshape(-1) for x in center_preds]) + bbox_preds = torch.cat([ + x.permute(1, 2, 3, 0).reshape(-1, x.shape[0]) for x in bbox_preds + ]) + cls_preds = torch.cat( + [x.permute(1, 2, 3, 0).reshape(-1, x.shape[0]) for x in cls_preds]) + valid_preds = torch.cat( + [x.permute(1, 2, 3, 0).reshape(-1) for x in valid_preds]) + points = torch.cat(points) + + # cls loss + pos_inds = torch.nonzero( + torch.logical_and(cls_targets >= 0, valid_preds)).squeeze(1) + n_pos = points.new_tensor(len(pos_inds)) + n_pos = max(reduce_mean(n_pos), 1.) + if torch.any(valid_preds): + cls_loss = self.cls_loss( + cls_preds[valid_preds], + cls_targets[valid_preds], + avg_factor=n_pos) + else: + cls_loss = cls_preds[valid_preds].sum() + + # bbox and centerness losses + pos_center_preds = center_preds[pos_inds] + pos_bbox_preds = bbox_preds[pos_inds] + if len(pos_inds) > 0: + pos_center_targets = center_targets[pos_inds] + pos_bbox_targets = bbox_targets[pos_inds] + pos_points = points[pos_inds] + center_loss = self.center_loss( + pos_center_preds, pos_center_targets, avg_factor=n_pos) + bbox_loss = self.bbox_loss( + self._bbox_pred_to_bbox(pos_points, pos_bbox_preds), + pos_bbox_targets, + weight=pos_center_targets, + avg_factor=pos_center_targets.sum()) + else: + center_loss = pos_center_preds.sum() + bbox_loss = pos_bbox_preds.sum() + return center_loss, bbox_loss, cls_loss + + def loss_by_feat(self, + center_preds: List[List[Tensor]], + bbox_preds: List[List[Tensor]], + cls_preds: List[List[Tensor]], + valid_pred: Tensor, + batch_gt_instances_3d: InstanceList, + batch_input_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None, + **kwargs) -> dict: + """Per scene loss function. + + Args: + center_preds (list[list[Tensor]]): Centerness predictions for + all scenes. The first list contains predictions from different + levels. The second list contains predictions in a mini-batch. + bbox_preds (list[list[Tensor]]): Bbox predictions for all scenes. + The first list contains predictions from different + levels. The second list contains predictions in a mini-batch. + cls_preds (list[list[Tensor]]): Classification predictions for all + scenes. The first list contains predictions from different + levels. The second list contains predictions in a mini-batch. + valid_pred (Tensor): Valid mask prediction for all scenes. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、` + `labels_3d``、``depths``、``centers_2d`` and attributes. + batch_input_metas (list[dict]): Meta information of each image, + e.g., image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict: Centerness, bbox, and classification loss values. + """ + valid_preds = self._upsample_valid_preds(valid_pred, center_preds) + center_losses, bbox_losses, cls_losses = [], [], [] + for i in range(len(batch_input_metas)): + center_loss, bbox_loss, cls_loss = self._loss_by_feat_single( + center_preds=[x[i] for x in center_preds], + bbox_preds=[x[i] for x in bbox_preds], + cls_preds=[x[i] for x in cls_preds], + valid_preds=[x[i] for x in valid_preds], + input_meta=batch_input_metas[i], + gt_bboxes=batch_gt_instances_3d[i].bboxes_3d, + gt_labels=batch_gt_instances_3d[i].labels_3d) + center_losses.append(center_loss) + bbox_losses.append(bbox_loss) + cls_losses.append(cls_loss) + return dict( + center_loss=torch.mean(torch.stack(center_losses)), + bbox_loss=torch.mean(torch.stack(bbox_losses)), + cls_loss=torch.mean(torch.stack(cls_losses))) + + def _predict_by_feat_single(self, center_preds: List[Tensor], + bbox_preds: List[Tensor], + cls_preds: List[Tensor], + valid_preds: List[Tensor], + input_meta: dict) -> InstanceData: + """Generate boxes for single sample. + + Args: + center_preds (list[Tensor]): Centerness predictions for all levels. + bbox_preds (list[Tensor]): Bbox predictions for all levels. + cls_preds (list[Tensor]): Classification predictions for all + levels. + valid_preds (tuple[Tensor]): Upsampled valid masks for all feature + levels. + input_meta (dict): Scene meta info. + + Returns: + tuple[Tensor]: Predicted bounding boxes, scores and labels. + """ + points = self._get_points(center_preds) + mlvl_bboxes, mlvl_scores = [], [] + for center_pred, bbox_pred, cls_pred, valid_pred, point in zip( + center_preds, bbox_preds, cls_preds, valid_preds, points): + center_pred = center_pred.permute(1, 2, 3, 0).reshape(-1, 1) + bbox_pred = bbox_pred.permute(1, 2, 3, + 0).reshape(-1, bbox_pred.shape[0]) + cls_pred = cls_pred.permute(1, 2, 3, + 0).reshape(-1, cls_pred.shape[0]) + valid_pred = valid_pred.permute(1, 2, 3, 0).reshape(-1, 1) + + scores = cls_pred.sigmoid() * center_pred.sigmoid() * valid_pred + max_scores, _ = scores.max(dim=1) + + if len(scores) > self.test_cfg.nms_pre > 0: + _, ids = max_scores.topk(self.test_cfg.nms_pre) + bbox_pred = bbox_pred[ids] + scores = scores[ids] + point = point[ids] + + bboxes = self._bbox_pred_to_bbox(point, bbox_pred) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + + bboxes = torch.cat(mlvl_bboxes) + scores = torch.cat(mlvl_scores) + bboxes, scores, labels = self._single_scene_multiclass_nms( + bboxes, scores, input_meta) + + bboxes = input_meta['box_type_3d']( + bboxes, + box_dim=bboxes.shape[1], + with_yaw=bboxes.shape[1] == 7, + origin=(.5, .5, .5)) + + results = InstanceData() + results.bboxes_3d = bboxes + results.scores_3d = scores + results.labels_3d = labels + return results + + def predict_by_feat(self, center_preds: List[List[Tensor]], + bbox_preds: List[List[Tensor]], + cls_preds: List[List[Tensor]], valid_pred: Tensor, + batch_input_metas: List[dict], + **kwargs) -> List[InstanceData]: + """Generate boxes for all scenes. + + Args: + center_preds (list[list[Tensor]]): Centerness predictions for + all scenes. + bbox_preds (list[list[Tensor]]): Bbox predictions for all scenes. + cls_preds (list[list[Tensor]]): Classification predictions for all + scenes. + valid_pred (Tensor): Valid mask prediction for all scenes. + batch_input_metas (list[dict]): Meta infos for all scenes. + + Returns: + list[tuple[Tensor]]: Predicted bboxes, scores, and labels for + all scenes. + """ + valid_preds = self._upsample_valid_preds(valid_pred, center_preds) + results = [] + for i in range(len(batch_input_metas)): + results.append( + self._predict_by_feat_single( + center_preds=[x[i] for x in center_preds], + bbox_preds=[x[i] for x in bbox_preds], + cls_preds=[x[i] for x in cls_preds], + valid_preds=[x[i] for x in valid_preds], + input_meta=batch_input_metas[i])) + return results + + @staticmethod + def _upsample_valid_preds(valid_pred, features): + """Upsample valid mask predictions. + + Args: + valid_pred (Tensor): Valid mask prediction. + features (Tensor): Feature tensor. + + Returns: + tuple[Tensor]: Upsampled valid masks for all feature levels. + """ + return [ + nn.Upsample(size=x.shape[-3:], + mode='trilinear')(valid_pred).round().bool() + for x in features + ] + + def _get_points(self, features): + """Generate final locations. + + Args: + features (list[Tensor]): Feature tensors for all feature levels. + + Returns: + list(Tensor): Final locations for all feature levels. + """ + points = [] + for x in features: + n_voxels = x.size()[-3:][::-1] + points.append( + self.prior_generator.grid_anchors( + [n_voxels], + device=x.device)[0][:, :3].reshape(n_voxels + + (3, )).permute( + 2, 1, 0, + 3).reshape(-1, 3)) + return points + + @staticmethod + def _bbox_pred_to_bbox(points, bbox_pred): + """Transform predicted bbox parameters to bbox. + + Args: + points (Tensor): Final locations of shape (N, 3). + bbox_pred (Tensor): Predicted bbox parameters of shape (N, 7). + + Returns: + Tensor: Transformed 3D box of shape (N, 7). + """ + if bbox_pred.shape[0] == 0: + return bbox_pred + + # dx_min, dx_max, dy_min, dy_max, dz_min, dz_max, alpha -> + # x_center, y_center, z_center, w, l, h, alpha + shift = torch.stack(((bbox_pred[:, 1] - bbox_pred[:, 0]) / 2, + (bbox_pred[:, 3] - bbox_pred[:, 2]) / 2, + (bbox_pred[:, 5] - bbox_pred[:, 4]) / 2), + dim=-1).view(-1, 1, 3) + shift = rotation_3d_in_axis(shift, bbox_pred[:, 6], axis=2)[:, 0, :] + center = points + shift + size = torch.stack( + (bbox_pred[:, 0] + bbox_pred[:, 1], bbox_pred[:, 2] + + bbox_pred[:, 3], bbox_pred[:, 4] + bbox_pred[:, 5]), + dim=-1) + return torch.cat((center, size, bbox_pred[:, 6:7]), dim=-1) + + # The function is directly copied from FCAF3DHead. + @staticmethod + def _get_face_distances(points, boxes): + """Calculate distances from point to box faces. + + Args: + points (Tensor): Final locations of shape (N_points, N_boxes, 3). + boxes (Tensor): 3D boxes of shape (N_points, N_boxes, 7) + + Returns: + Tensor: Face distances of shape (N_points, N_boxes, 6), + (dx_min, dx_max, dy_min, dy_max, dz_min, dz_max). + """ + shift = torch.stack( + (points[..., 0] - boxes[..., 0], points[..., 1] - boxes[..., 1], + points[..., 2] - boxes[..., 2]), + dim=-1).permute(1, 0, 2) + shift = rotation_3d_in_axis( + shift, -boxes[0, :, 6], axis=2).permute(1, 0, 2) + centers = boxes[..., :3] + shift + dx_min = centers[..., 0] - boxes[..., 0] + boxes[..., 3] / 2 + dx_max = boxes[..., 0] + boxes[..., 3] / 2 - centers[..., 0] + dy_min = centers[..., 1] - boxes[..., 1] + boxes[..., 4] / 2 + dy_max = boxes[..., 1] + boxes[..., 4] / 2 - centers[..., 1] + dz_min = centers[..., 2] - boxes[..., 2] + boxes[..., 5] / 2 + dz_max = boxes[..., 2] + boxes[..., 5] / 2 - centers[..., 2] + return torch.stack((dx_min, dx_max, dy_min, dy_max, dz_min, dz_max), + dim=-1) + + # The function is directly copied from FCAF3DHead. + @staticmethod + def _get_centerness(face_distances): + """Compute point centerness w.r.t containing box. + + Args: + face_distances (Tensor): Face distances of shape (B, N, 6), + (dx_min, dx_max, dy_min, dy_max, dz_min, dz_max). + + Returns: + Tensor: Centerness of shape (B, N). + """ + x_dims = face_distances[..., [0, 1]] + y_dims = face_distances[..., [2, 3]] + z_dims = face_distances[..., [4, 5]] + centerness_targets = x_dims.min(dim=-1)[0] / x_dims.max(dim=-1)[0] * \ + y_dims.min(dim=-1)[0] / y_dims.max(dim=-1)[0] * \ + z_dims.min(dim=-1)[0] / z_dims.max(dim=-1)[0] + return torch.sqrt(centerness_targets) + + # The function is directly copied from FCAF3DHead. + @torch.no_grad() + def _get_targets(self, points, gt_bboxes, gt_labels): + """Compute targets for final locations for a single scene. + + Args: + points (list[Tensor]): Final locations for all levels. + gt_bboxes (BaseInstance3DBoxes): Ground truth boxes. + gt_labels (Tensor): Ground truth labels. + + Returns: + tuple[Tensor]: Centerness, bbox and classification + targets for all locations. + """ + float_max = points[0].new_tensor(1e8) + n_levels = len(points) + levels = torch.cat([ + points[i].new_tensor(i).expand(len(points[i])) + for i in range(len(points)) + ]) + points = torch.cat(points) + gt_bboxes = gt_bboxes.to(points.device) + n_points = len(points) + n_boxes = len(gt_bboxes) + volumes = gt_bboxes.volume.unsqueeze(0).expand(n_points, n_boxes) + + # condition 1: point inside box + boxes = torch.cat((gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), + dim=1) + boxes = boxes.expand(n_points, n_boxes, 7) + points = points.unsqueeze(1).expand(n_points, n_boxes, 3) + face_distances = self._get_face_distances(points, boxes) + inside_box_condition = face_distances.min(dim=-1).values > 0 + + # condition 2: positive points per level >= limit + # calculate positive points per scale + n_pos_points_per_level = [] + for i in range(n_levels): + n_pos_points_per_level.append( + torch.sum(inside_box_condition[levels == i], dim=0)) + # find best level + n_pos_points_per_level = torch.stack(n_pos_points_per_level, dim=0) + lower_limit_mask = n_pos_points_per_level < self.pts_assign_threshold + lower_index = torch.argmax(lower_limit_mask.int(), dim=0) - 1 + lower_index = torch.where(lower_index < 0, 0, lower_index) + all_upper_limit_mask = torch.all( + torch.logical_not(lower_limit_mask), dim=0) + best_level = torch.where(all_upper_limit_mask, n_levels - 1, + lower_index) + # keep only points with best level + best_level = best_level.expand(n_points, n_boxes) + levels = torch.unsqueeze(levels, 1).expand(n_points, n_boxes) + level_condition = best_level == levels + + # condition 3: limit topk points per box by centerness + centerness = self._get_centerness(face_distances) + centerness = torch.where(inside_box_condition, centerness, + torch.ones_like(centerness) * -1) + centerness = torch.where(level_condition, centerness, + torch.ones_like(centerness) * -1) + top_centerness = torch.topk( + centerness, + min(self.pts_center_threshold + 1, len(centerness)), + dim=0).values[-1] + topk_condition = centerness > top_centerness.unsqueeze(0) + + # condition 4: min volume box per point + volumes = torch.where(inside_box_condition, volumes, float_max) + volumes = torch.where(level_condition, volumes, float_max) + volumes = torch.where(topk_condition, volumes, float_max) + min_volumes, min_inds = volumes.min(dim=1) + + center_targets = centerness[torch.arange(n_points), min_inds] + bbox_targets = boxes[torch.arange(n_points), min_inds] + if not gt_bboxes.with_yaw: + bbox_targets = bbox_targets[:, :-1] + cls_targets = gt_labels[min_inds] + cls_targets = torch.where(min_volumes == float_max, -1, cls_targets) + return center_targets, bbox_targets, cls_targets + + # Originally ImVoxelNet utilizes 2d nms as mmdetection3d didn't + # support 3d nms. But since mmcv==1.5.2 we simply use nms3d here. + # The function is directly copied from FCAF3DHead. + def _single_scene_multiclass_nms(self, bboxes, scores, input_meta): + """Multi-class nms for a single scene. + + Args: + bboxes (Tensor): Predicted boxes of shape (N_boxes, 6) or + (N_boxes, 7). + scores (Tensor): Predicted scores of shape (N_boxes, N_classes). + input_meta (dict): Scene meta data. + + Returns: + tuple[Tensor]: Predicted bboxes, scores and labels. + """ + n_classes = scores.shape[1] + with_yaw = bboxes.shape[1] == 7 + nms_bboxes, nms_scores, nms_labels = [], [], [] + for i in range(n_classes): + ids = scores[:, i] > self.test_cfg.score_thr + if not ids.any(): + continue + + class_scores = scores[ids, i] + class_bboxes = bboxes[ids] + if with_yaw: + nms_function = nms3d + else: + class_bboxes = torch.cat( + (class_bboxes, torch.zeros_like(class_bboxes[:, :1])), + dim=1) + nms_function = nms3d_normal + + nms_ids = nms_function(class_bboxes, class_scores, + self.test_cfg.iou_thr) + nms_bboxes.append(class_bboxes[nms_ids]) + nms_scores.append(class_scores[nms_ids]) + nms_labels.append( + bboxes.new_full( + class_scores[nms_ids].shape, i, dtype=torch.long)) + + if len(nms_bboxes): + nms_bboxes = torch.cat(nms_bboxes, dim=0) + nms_scores = torch.cat(nms_scores, dim=0) + nms_labels = torch.cat(nms_labels, dim=0) + else: + nms_bboxes = bboxes.new_zeros((0, bboxes.shape[1])) + nms_scores = bboxes.new_zeros((0, )) + nms_labels = bboxes.new_zeros((0, )) + + if with_yaw: + box_dim = 7 + else: + box_dim = 6 + nms_bboxes = nms_bboxes[:, :box_dim] + + return nms_bboxes, nms_scores, nms_labels diff --git a/mmdet3d/models/dense_heads/monoflex_head.py b/mmdet3d/models/dense_heads/monoflex_head.py new file mode 100755 index 0000000..72f0257 --- /dev/null +++ b/mmdet3d/models/dense_heads/monoflex_head.py @@ -0,0 +1,804 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +from mmdet.models.utils import (gaussian_radius, gen_gaussian_target, + multi_apply) +from mmdet.models.utils.gaussian_target import (get_local_maximum, + get_topk_from_heatmap, + transpose_and_gather_feat) +from mmengine.config import ConfigDict +from mmengine.model import xavier_init +from mmengine.structures import InstanceData +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models.layers import EdgeFusionModule +from mmdet3d.models.task_modules.builder import build_bbox_coder +from mmdet3d.models.utils import (filter_outside_objs, get_edge_indices, + get_ellip_gaussian_2D, get_keypoints, + handle_proj_objs) +from mmdet3d.registry import MODELS +from mmdet3d.structures import Det3DDataSample +from .anchor_free_mono3d_head import AnchorFreeMono3DHead + + +@MODELS.register_module() +class MonoFlexHead(AnchorFreeMono3DHead): + r"""MonoFlex head used in `MonoFlex `_ + + .. code-block:: none + + / --> 3 x 3 conv --> 1 x 1 conv --> [edge fusion] --> cls + | + | --> 3 x 3 conv --> 1 x 1 conv --> 2d bbox + | + | --> 3 x 3 conv --> 1 x 1 conv --> [edge fusion] --> 2d offsets + | + | --> 3 x 3 conv --> 1 x 1 conv --> keypoints offsets + | + | --> 3 x 3 conv --> 1 x 1 conv --> keypoints uncertainty + feature + | --> 3 x 3 conv --> 1 x 1 conv --> keypoints uncertainty + | + | --> 3 x 3 conv --> 1 x 1 conv --> 3d dimensions + | + | |--- 1 x 1 conv --> ori cls + | --> 3 x 3 conv --| + | |--- 1 x 1 conv --> ori offsets + | + | --> 3 x 3 conv --> 1 x 1 conv --> depth + | + \ --> 3 x 3 conv --> 1 x 1 conv --> depth uncertainty + + Args: + use_edge_fusion (bool): Whether to use edge fusion module while + feature extraction. + edge_fusion_inds (list[tuple]): Indices of feature to use edge fusion. + edge_heatmap_ratio (float): Ratio of generating target heatmap. + filter_outside_objs (bool, optional): Whether to filter the + outside objects. Default: True. + loss_cls (dict, optional): Config of classification loss. + Default: loss_cls=dict(type='GaussionFocalLoss', loss_weight=1.0). + loss_bbox (dict, optional): Config of localization loss. + Default: loss_bbox=dict(type='IOULoss', loss_weight=10.0). + loss_dir (dict, optional): Config of direction classification loss. + Default: dict(type='MultibinLoss', loss_weight=0.1). + loss_keypoints (dict, optional): Config of keypoints loss. + Default: dict(type='L1Loss', loss_weight=0.1). + loss_dims: (dict, optional): Config of dimensions loss. + Default: dict(type='L1Loss', loss_weight=0.1). + loss_offsets_2d: (dict, optional): Config of offsets_2d loss. + Default: dict(type='L1Loss', loss_weight=0.1). + loss_direct_depth: (dict, optional): Config of directly regression depth loss. + Default: dict(type='L1Loss', loss_weight=0.1). + loss_keypoints_depth: (dict, optional): Config of keypoints decoded depth loss. + Default: dict(type='L1Loss', loss_weight=0.1). + loss_combined_depth: (dict, optional): Config of combined depth loss. + Default: dict(type='L1Loss', loss_weight=0.1). + loss_attr (dict, optional): Config of attribute classification loss. + In MonoFlex, Default: None. + bbox_coder (dict, optional): Bbox coder for encoding and decoding boxes. + Default: dict(type='MonoFlexCoder', code_size=7). + norm_cfg (dict, optional): Dictionary to construct and config norm layer. + Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). + init_cfg (dict): Initialization config dict. Default: None. + """ # noqa: E501 + + def __init__(self, + num_classes: int, + in_channels: int, + use_edge_fusion: bool, + edge_fusion_inds: List[Tuple], + edge_heatmap_ratio: float, + filter_outside_objs: bool = True, + loss_cls: dict = dict( + type='mmdet.GaussianFocalLoss', loss_weight=1.0), + loss_bbox: dict = dict(type='mmdet.IoULoss', loss_weight=0.1), + loss_dir: dict = dict(type='MultiBinLoss', loss_weight=0.1), + loss_keypoints: dict = dict( + type='mmdet.L1Loss', loss_weight=0.1), + loss_dims: dict = dict(type='mmdet.L1Loss', loss_weight=0.1), + loss_offsets_2d: dict = dict( + type='mmdet.L1Loss', loss_weight=0.1), + loss_direct_depth: dict = dict( + type='mmdet.L1Loss', loss_weight=0.1), + loss_keypoints_depth: dict = dict( + type='mmdet.L1Loss', loss_weight=0.1), + loss_combined_depth: dict = dict( + type='mmdet.L1Loss', loss_weight=0.1), + loss_attr: Optional[dict] = None, + bbox_coder: dict = dict(type='MonoFlexCoder', code_size=7), + norm_cfg: Union[ConfigDict, dict] = dict(type='BN'), + init_cfg: Optional[Union[ConfigDict, dict]] = None, + init_bias: float = -2.19, + **kwargs) -> None: + self.use_edge_fusion = use_edge_fusion + self.edge_fusion_inds = edge_fusion_inds + super().__init__( + num_classes, + in_channels, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + loss_dir=loss_dir, + loss_attr=loss_attr, + norm_cfg=norm_cfg, + init_cfg=init_cfg, + **kwargs) + self.filter_outside_objs = filter_outside_objs + self.edge_heatmap_ratio = edge_heatmap_ratio + self.init_bias = init_bias + self.loss_dir = MODELS.build(loss_dir) + self.loss_keypoints = MODELS.build(loss_keypoints) + self.loss_dims = MODELS.build(loss_dims) + self.loss_offsets_2d = MODELS.build(loss_offsets_2d) + self.loss_direct_depth = MODELS.build(loss_direct_depth) + self.loss_keypoints_depth = MODELS.build(loss_keypoints_depth) + self.loss_combined_depth = MODELS.build(loss_combined_depth) + self.bbox_coder = build_bbox_coder(bbox_coder) + + def _init_edge_module(self): + """Initialize edge fusion module for feature extraction.""" + self.edge_fuse_cls = EdgeFusionModule(self.num_classes, 256) + for i in range(len(self.edge_fusion_inds)): + reg_inds, out_inds = self.edge_fusion_inds[i] + out_channels = self.group_reg_dims[reg_inds][out_inds] + fusion_layer = EdgeFusionModule(out_channels, 256) + layer_name = f'edge_fuse_reg_{reg_inds}_{out_inds}' + self.add_module(layer_name, fusion_layer) + + def init_weights(self): + """Initialize weights.""" + super().init_weights() + self.conv_cls.bias.data.fill_(self.init_bias) + xavier_init(self.conv_regs[4][0], gain=0.01) + xavier_init(self.conv_regs[7][0], gain=0.01) + for m in self.conv_regs.modules(): + if isinstance(m, nn.Conv2d): + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _init_predictor(self): + """Initialize predictor layers of the head.""" + self.conv_cls_prev = self._init_branch( + conv_channels=self.cls_branch, + conv_strides=(1, ) * len(self.cls_branch)) + self.conv_cls = nn.Conv2d(self.cls_branch[-1], self.cls_out_channels, + 1) + # init regression head + self.conv_reg_prevs = nn.ModuleList() + # init output head + self.conv_regs = nn.ModuleList() + # group_reg_dims: + # ((4, ), (2, ), (20, ), (3, ), (3, ), (8, 8), (1, ), (1, )) + for i in range(len(self.group_reg_dims)): + reg_dims = self.group_reg_dims[i] + reg_branch_channels = self.reg_branch[i] + out_channel = self.out_channels[i] + reg_list = nn.ModuleList() + if len(reg_branch_channels) > 0: + self.conv_reg_prevs.append( + self._init_branch( + conv_channels=reg_branch_channels, + conv_strides=(1, ) * len(reg_branch_channels))) + for reg_dim in reg_dims: + reg_list.append(nn.Conv2d(out_channel, reg_dim, 1)) + self.conv_regs.append(reg_list) + else: + self.conv_reg_prevs.append(None) + for reg_dim in reg_dims: + reg_list.append(nn.Conv2d(self.feat_channels, reg_dim, 1)) + self.conv_regs.append(reg_list) + + def _init_layers(self): + """Initialize layers of the head.""" + self._init_predictor() + if self.use_edge_fusion: + self._init_edge_module() + + def loss(self, x: List[Tensor], batch_data_samples: List[Det3DDataSample], + **kwargs): + """ + Args: + x (list[Tensor]): Features from FPN. + batch_data_samples (list[:obj:`Det3DDataSample`]): Each item + contains the meta information of each image and corresponding + annotations. + proposal_cfg (mmengine.Config, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + + Returns: + tuple or Tensor: When `proposal_cfg` is None, the detector is a \ + normal one-stage detector, The return value is the losses. + + - losses: (dict[str, Tensor]): A dictionary of loss components. + + When the `proposal_cfg` is not None, the head is used as a + `rpn_head`, the return value is a tuple contains: + + - losses: (dict[str, Tensor]): A dictionary of loss components. + - results_list (list[:obj:`InstanceData`]): Detection + results of each image after the post process. + Each item usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (:obj:`BaseInstance3DBoxes`): Contains a tensor + with shape (num_instances, C), the last dimension C of a + 3D box is (x, y, z, x_size, y_size, z_size, yaw, ...), where + C >= 7. C = 7 for kitti and C = 9 for nuscenes with extra 2 + dims of velocity. + """ + + batch_gt_instances_3d = [] + batch_gt_instances = [] + batch_gt_instances_ignore = [] + batch_img_metas = [] + for data_sample in batch_data_samples: + batch_img_metas.append(data_sample.metainfo) + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + batch_gt_instances.append(data_sample.gt_instances) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + + # monoflex head needs img_metas for feature extraction + outs = self(x, batch_img_metas) + loss_inputs = outs + (batch_gt_instances_3d, batch_img_metas, + batch_gt_instances_ignore) + losses = self.loss(*loss_inputs) + + return losses + + def forward(self, feats: List[Tensor], batch_img_metas: List[dict]): + """Forward features from the upstream network. + + Args: + feats (list[Tensor]): Features from the upstream network, each is + a 4D-tensor. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + + Returns: + tuple: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * bbox_code_size. + """ + mlvl_batch_img_metas = [batch_img_metas for i in range(len(feats))] + return multi_apply(self.forward_single, feats, mlvl_batch_img_metas) + + def forward_single(self, x: Tensor, batch_img_metas: List[dict]): + """Forward features of a single scale level. + + Args: + x (Tensor): Feature maps from a specific FPN feature level. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + + Returns: + tuple: Scores for each class, bbox predictions. + """ + img_h, img_w = batch_img_metas[0]['pad_shape'][:2] + batch_size, _, feat_h, feat_w = x.shape + downsample_ratio = img_h / feat_h + + for conv_cls_prev_layer in self.conv_cls_prev: + cls_feat = conv_cls_prev_layer(x) + out_cls = self.conv_cls(cls_feat) + + if self.use_edge_fusion: + # calculate the edge indices for the batch data + edge_indices_list = get_edge_indices( + batch_img_metas, downsample_ratio, device=x.device) + edge_lens = [ + edge_indices.shape[0] for edge_indices in edge_indices_list + ] + max_edge_len = max(edge_lens) + edge_indices = x.new_zeros((batch_size, max_edge_len, 2), + dtype=torch.long) + for i in range(batch_size): + edge_indices[i, :edge_lens[i]] = edge_indices_list[i] + # cls feature map edge fusion + out_cls = self.edge_fuse_cls(cls_feat, out_cls, edge_indices, + edge_lens, feat_h, feat_w) + + bbox_pred = [] + + for i in range(len(self.group_reg_dims)): + reg_feat = x.clone() + # feature regression head + if len(self.reg_branch[i]) > 0: + for conv_reg_prev_layer in self.conv_reg_prevs[i]: + reg_feat = conv_reg_prev_layer(reg_feat) + + for j, conv_reg in enumerate(self.conv_regs[i]): + out_reg = conv_reg(reg_feat) + # Use Edge Fusion Module + if self.use_edge_fusion and (i, j) in self.edge_fusion_inds: + # reg feature map edge fusion + out_reg = getattr(self, 'edge_fuse_reg_{}_{}'.format( + i, j))(reg_feat, out_reg, edge_indices, edge_lens, + feat_h, feat_w) + bbox_pred.append(out_reg) + + bbox_pred = torch.cat(bbox_pred, dim=1) + cls_score = out_cls.sigmoid() # turn to 0-1 + cls_score = cls_score.clamp(min=1e-4, max=1 - 1e-4) + + return cls_score, bbox_pred + + def predict_by_feat(self, cls_scores: List[Tensor], + bbox_preds: List[Tensor], batch_img_metas: List[dict]): + """Generate bboxes from bbox head predictions. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level. + bbox_preds (list[Tensor]): Box regression for each scale. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool): If True, return boxes in original image space. + Returns: + list[tuple[:obj:`CameraInstance3DBoxes`, Tensor, Tensor, None]]: + Each item in result_list is 4-tuple. + """ + assert len(cls_scores) == len(bbox_preds) == 1 + cam2imgs = torch.stack([ + cls_scores[0].new_tensor(input_meta['cam2img']) + for input_meta in batch_img_metas + ]) + batch_bboxes, batch_scores, batch_topk_labels = self._decode_heatmap( + cls_scores[0], + bbox_preds[0], + batch_img_metas, + cam2imgs=cam2imgs, + topk=100, + kernel=3) + + result_list = [] + for img_id in range(len(batch_img_metas)): + + bboxes = batch_bboxes[img_id] + scores = batch_scores[img_id] + labels = batch_topk_labels[img_id] + + keep_idx = scores > 0.25 + bboxes = bboxes[keep_idx] + scores = scores[keep_idx] + labels = labels[keep_idx] + + bboxes = batch_img_metas[img_id]['box_type_3d']( + bboxes, box_dim=self.bbox_code_size, origin=(0.5, 0.5, 0.5)) + attrs = None + + results = InstanceData() + results.bboxes_3d = bboxes + results.scores_3d = scores + results.labels_3d = labels + + if attrs is not None: + results.attr_labels = attrs + + result_list.append(results) + + return result_list + + def _decode_heatmap(self, + cls_score: Tensor, + reg_pred: Tensor, + batch_img_metas: List[dict], + cam2imgs: Tensor, + topk: int = 100, + kernel: int = 3): + """Transform outputs into detections raw bbox predictions. + + Args: + class_score (Tensor): Center predict heatmap, + shape (B, num_classes, H, W). + reg_pred (Tensor): Box regression map. + shape (B, channel, H , W). + batch_img_metas (List[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + cam2imgs (Tensor): Camera intrinsic matrix. + shape (N, 4, 4) + topk (int, optional): Get top k center keypoints from heatmap. + Default 100. + kernel (int, optional): Max pooling kernel for extract local + maximum pixels. Default 3. + + Returns: + tuple[torch.Tensor]: Decoded output of SMOKEHead, containing + the following Tensors: + - batch_bboxes (Tensor): Coords of each 3D box. + shape (B, k, 7) + - batch_scores (Tensor): Scores of each 3D box. + shape (B, k) + - batch_topk_labels (Tensor): Categories of each 3D box. + shape (B, k) + """ + img_h, img_w = batch_img_metas[0]['pad_shape'][:2] + batch_size, _, feat_h, feat_w = cls_score.shape + + downsample_ratio = img_h / feat_h + center_heatmap_pred = get_local_maximum(cls_score, kernel=kernel) + + *batch_dets, topk_ys, topk_xs = get_topk_from_heatmap( + center_heatmap_pred, k=topk) + batch_scores, batch_index, batch_topk_labels = batch_dets + + regression = transpose_and_gather_feat(reg_pred, batch_index) + regression = regression.view(-1, 8) + + pred_base_centers_2d = torch.cat( + [topk_xs.view(-1, 1), + topk_ys.view(-1, 1).float()], dim=1) + preds = self.bbox_coder.decode(regression, batch_topk_labels, + downsample_ratio, cam2imgs) + pred_locations = self.bbox_coder.decode_location( + pred_base_centers_2d, preds['offsets_2d'], preds['combined_depth'], + cam2imgs, downsample_ratio) + pred_yaws = self.bbox_coder.decode_orientation( + preds['orientations']).unsqueeze(-1) + pred_dims = preds['dimensions'] + batch_bboxes = torch.cat((pred_locations, pred_dims, pred_yaws), dim=1) + batch_bboxes = batch_bboxes.view(batch_size, -1, self.bbox_code_size) + return batch_bboxes, batch_scores, batch_topk_labels + + def get_predictions(self, pred_reg, labels3d, centers_2d, reg_mask, + batch_indices, batch_img_metas, downsample_ratio): + """Prepare predictions for computing loss. + + Args: + pred_reg (Tensor): Box regression map. + shape (B, channel, H , W). + labels3d (Tensor): Labels of each 3D box. + shape (B * max_objs, ) + centers_2d (Tensor): Coords of each projected 3D box + center on image. shape (N, 2) + reg_mask (Tensor): Indexes of the existence of the 3D box. + shape (B * max_objs, ) + batch_indices (Tenosr): Batch indices of the 3D box. + shape (N, 3) + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + downsample_ratio (int): The stride of feature map. + + Returns: + dict: The predictions for computing loss. + """ + batch, channel = pred_reg.shape[0], pred_reg.shape[1] + w = pred_reg.shape[3] + cam2imgs = torch.stack([ + centers_2d.new_tensor(img_meta['cam2img']) + for img_meta in batch_img_metas + ]) + # (batch_size, 4, 4) -> (N, 4, 4) + cam2imgs = cam2imgs[batch_indices, :, :] + centers_2d_inds = centers_2d[:, 1] * w + centers_2d[:, 0] + centers_2d_inds = centers_2d_inds.view(batch, -1) + pred_regression = transpose_and_gather_feat(pred_reg, centers_2d_inds) + pred_regression_pois = pred_regression.view(-1, channel)[reg_mask] + preds = self.bbox_coder.decode(pred_regression_pois, labels3d, + downsample_ratio, cam2imgs) + + return preds + + def get_targets(self, batch_gt_instances_3d: List[InstanceData], + batch_gt_instances: List[InstanceData], + feat_shape: Tuple[int], batch_img_metas: List[dict]): + """Get training targets for batch images. +`` + Args: + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、 + ``labels_3d``、``depths``、``centers_2d`` and attributes. + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes``、``labels``. + feat_shape (tuple[int]): Feature map shape with value, + shape (B, _, H, W). + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + + + Returns: + tuple[Tensor, dict]: The Tensor value is the targets of + center heatmap, the dict has components below: + - base_centers_2d_target (Tensor): Coords of each projected + 3D box center on image. shape (B * max_objs, 2), + [dtype: int] + - labels3d (Tensor): Labels of each 3D box. + shape (N, ) + - reg_mask (Tensor): Mask of the existence of the 3D box. + shape (B * max_objs, ) + - batch_indices (Tensor): Batch id of the 3D box. + shape (N, ) + - depth_target (Tensor): Depth target of each 3D box. + shape (N, ) + - keypoints2d_target (Tensor): Keypoints of each projected 3D box + on image. shape (N, 10, 2) + - keypoints_mask (Tensor): Keypoints mask of each projected 3D + box on image. shape (N, 10) + - keypoints_depth_mask (Tensor): Depths decoded from keypoints + of each 3D box. shape (N, 3) + - orientations_target (Tensor): Orientation (encoded local yaw) + target of each 3D box. shape (N, ) + - offsets_2d_target (Tensor): Offsets target of each projected + 3D box. shape (N, 2) + - dimensions_target (Tensor): Dimensions target of each 3D box. + shape (N, 3) + - downsample_ratio (int): The stride of feature map. + """ + + gt_bboxes_list = [ + gt_instances.bboxes for gt_instances in batch_gt_instances + ] + gt_labels_list = [ + gt_instances.labels for gt_instances in batch_gt_instances + ] + gt_bboxes_3d_list = [ + gt_instances_3d.bboxes_3d + for gt_instances_3d in batch_gt_instances_3d + ] + gt_labels_3d_list = [ + gt_instances_3d.labels_3d + for gt_instances_3d in batch_gt_instances_3d + ] + centers_2d_list = [ + gt_instances_3d.centers_2d + for gt_instances_3d in batch_gt_instances_3d + ] + depths_list = [ + gt_instances_3d.depths for gt_instances_3d in batch_gt_instances_3d + ] + + img_h, img_w = batch_img_metas[0]['pad_shape'][:2] + batch_size, _, feat_h, feat_w = feat_shape + + width_ratio = float(feat_w / img_w) # 1/4 + height_ratio = float(feat_h / img_h) # 1/4 + + assert width_ratio == height_ratio + + # Whether to filter the objects which are not in FOV. + if self.filter_outside_objs: + filter_outside_objs(gt_bboxes_list, gt_labels_list, + gt_bboxes_3d_list, gt_labels_3d_list, + centers_2d_list, batch_img_metas) + + # transform centers_2d to base centers_2d for regression and + # heatmap generation. + # centers_2d = int(base_centers_2d) + offsets_2d + base_centers_2d_list, offsets_2d_list, trunc_mask_list = \ + handle_proj_objs(centers_2d_list, gt_bboxes_list, batch_img_metas) + + keypoints2d_list, keypoints_mask_list, keypoints_depth_mask_list = \ + get_keypoints(gt_bboxes_3d_list, centers_2d_list, batch_img_metas) + + center_heatmap_target = gt_bboxes_list[-1].new_zeros( + [batch_size, self.num_classes, feat_h, feat_w]) + + for batch_id in range(batch_size): + # project gt_bboxes from input image to feat map + gt_bboxes = gt_bboxes_list[batch_id] * width_ratio + gt_labels = gt_labels_list[batch_id] + + # project base centers_2d from input image to feat map + gt_base_centers_2d = base_centers_2d_list[batch_id] * width_ratio + trunc_masks = trunc_mask_list[batch_id] + + for j, base_center2d in enumerate(gt_base_centers_2d): + if trunc_masks[j]: + # for outside objects, generate ellipse heatmap + base_center2d_x_int, base_center2d_y_int = \ + base_center2d.int() + scale_box_w = min(base_center2d_x_int - gt_bboxes[j][0], + gt_bboxes[j][2] - base_center2d_x_int) + scale_box_h = min(base_center2d_y_int - gt_bboxes[j][1], + gt_bboxes[j][3] - base_center2d_y_int) + radius_x = scale_box_w * self.edge_heatmap_ratio + radius_y = scale_box_h * self.edge_heatmap_ratio + radius_x, radius_y = max(0, int(radius_x)), max( + 0, int(radius_y)) + assert min(radius_x, radius_y) == 0 + ind = gt_labels[j] + get_ellip_gaussian_2D( + center_heatmap_target[batch_id, ind], + [base_center2d_x_int, base_center2d_y_int], radius_x, + radius_y) + else: + base_center2d_x_int, base_center2d_y_int = \ + base_center2d.int() + scale_box_h = (gt_bboxes[j][3] - gt_bboxes[j][1]) + scale_box_w = (gt_bboxes[j][2] - gt_bboxes[j][0]) + radius = gaussian_radius([scale_box_h, scale_box_w], + min_overlap=0.7) + radius = max(0, int(radius)) + ind = gt_labels[j] + gen_gaussian_target( + center_heatmap_target[batch_id, ind], + [base_center2d_x_int, base_center2d_y_int], radius) + + avg_factor = max(1, center_heatmap_target.eq(1).sum()) + num_ctrs = [centers_2d.shape[0] for centers_2d in centers_2d_list] + max_objs = max(num_ctrs) + batch_indices = [ + centers_2d_list[0].new_full((num_ctrs[i], ), i) + for i in range(batch_size) + ] + batch_indices = torch.cat(batch_indices, dim=0) + reg_mask = torch.zeros( + (batch_size, max_objs), + dtype=torch.bool).to(base_centers_2d_list[0].device) + gt_bboxes_3d = batch_img_metas[0]['box_type_3d'].cat(gt_bboxes_3d_list) + gt_bboxes_3d = gt_bboxes_3d.to(base_centers_2d_list[0].device) + + # encode original local yaw to multibin format + orienations_target = self.bbox_coder.encode(gt_bboxes_3d) + + batch_base_centers_2d = base_centers_2d_list[0].new_zeros( + (batch_size, max_objs, 2)) + + for i in range(batch_size): + reg_mask[i, :num_ctrs[i]] = 1 + batch_base_centers_2d[i, :num_ctrs[i]] = base_centers_2d_list[i] + + flatten_reg_mask = reg_mask.flatten() + + # transform base centers_2d from input scale to output scale + batch_base_centers_2d = batch_base_centers_2d.view(-1, 2) * width_ratio + + dimensions_target = gt_bboxes_3d.tensor[:, 3:6] + labels_3d = torch.cat(gt_labels_3d_list) + keypoints2d_target = torch.cat(keypoints2d_list) + keypoints_mask = torch.cat(keypoints_mask_list) + keypoints_depth_mask = torch.cat(keypoints_depth_mask_list) + offsets_2d_target = torch.cat(offsets_2d_list) + bboxes2d = torch.cat(gt_bboxes_list) + + # transform FCOS style bbox into [x1, y1, x2, y2] format. + bboxes2d_target = torch.cat([bboxes2d[:, 0:2] * -1, bboxes2d[:, 2:]], + dim=-1) + depths = torch.cat(depths_list) + + target_labels = dict( + base_centers_2d_target=batch_base_centers_2d.int(), + labels3d=labels_3d, + reg_mask=flatten_reg_mask, + batch_indices=batch_indices, + bboxes2d_target=bboxes2d_target, + depth_target=depths, + keypoints2d_target=keypoints2d_target, + keypoints_mask=keypoints_mask, + keypoints_depth_mask=keypoints_depth_mask, + orienations_target=orienations_target, + offsets_2d_target=offsets_2d_target, + dimensions_target=dimensions_target, + downsample_ratio=1 / width_ratio) + + return center_heatmap_target, avg_factor, target_labels + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + batch_gt_instances_3d: List[InstanceData], + batch_gt_instances: List[InstanceData], + batch_img_metas: List[dict], + batch_gt_instances_ignore: Optional[List[InstanceData]] = None): + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level. + shape (num_gt, 4). + bbox_preds (list[Tensor]): Box dims is a 4D-tensor, the channel + number is bbox_code_size. + shape (B, 7, H, W). + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、 + ``labels_3d``、``depths``、``centers_2d`` and attributes. + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes``、``labels``. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert len(cls_scores) == len(bbox_preds) == 1 + assert batch_gt_instances_ignore is None + center2d_heatmap = cls_scores[0] + pred_reg = bbox_preds[0] + + center2d_heatmap_target, avg_factor, target_labels = \ + self.get_targets(batch_gt_instances_3d, + batch_gt_instances, + center2d_heatmap.shape, + batch_img_metas) + + preds = self.get_predictions( + pred_reg=pred_reg, + labels3d=target_labels['labels3d'], + centers_2d=target_labels['base_centers_2d_target'], + reg_mask=target_labels['reg_mask'], + batch_indices=target_labels['batch_indices'], + batch_img_metas=batch_img_metas, + downsample_ratio=target_labels['downsample_ratio']) + + # heatmap loss + loss_cls = self.loss_cls( + center2d_heatmap, center2d_heatmap_target, avg_factor=avg_factor) + + # bbox2d regression loss + loss_bbox = self.loss_bbox(preds['bboxes2d'], + target_labels['bboxes2d_target']) + + # keypoints loss, the keypoints in predictions and target are all + # local coordinates. Check the mask dtype should be bool, not int + # or float to ensure the indexing is bool index + keypoints2d_mask = target_labels['keypoints2d_mask'] + loss_keypoints = self.loss_keypoints( + preds['keypoints2d'][keypoints2d_mask], + target_labels['keypoints2d_target'][keypoints2d_mask]) + + # orientations loss + loss_dir = self.loss_dir(preds['orientations'], + target_labels['orientations_target']) + + # dimensions loss + loss_dims = self.loss_dims(preds['dimensions'], + target_labels['dimensions_target']) + + # offsets for center heatmap + loss_offsets_2d = self.loss_offsets_2d( + preds['offsets_2d'], target_labels['offsets_2d_target']) + + # directly regressed depth loss with direct depth uncertainty loss + direct_depth_weights = torch.exp(-preds['direct_depth_uncertainty']) + loss_weight_1 = self.loss_direct_depth.loss_weight + loss_direct_depth = self.loss_direct_depth( + preds['direct_depth'], target_labels['depth_target'], + direct_depth_weights) + loss_uncertainty_1 =\ + preds['direct_depth_uncertainty'] * loss_weight_1 + loss_direct_depth = loss_direct_depth + loss_uncertainty_1.mean() + + # keypoints decoded depth loss with keypoints depth uncertainty loss + depth_mask = target_labels['keypoints_depth_mask'] + depth_target = target_labels['depth_target'].unsqueeze(-1).repeat(1, 3) + valid_keypoints_depth_uncertainty = preds[ + 'keypoints_depth_uncertainty'][depth_mask] + valid_keypoints_depth_weights = torch.exp( + -valid_keypoints_depth_uncertainty) + loss_keypoints_depth = self.loss_keypoint_depth( + preds['keypoints_depth'][depth_mask], depth_target[depth_mask], + valid_keypoints_depth_weights) + loss_weight_2 = self.loss_keypoints_depth.loss_weight + loss_uncertainty_2 =\ + valid_keypoints_depth_uncertainty * loss_weight_2 + loss_keypoints_depth = loss_keypoints_depth + loss_uncertainty_2.mean() + + # combined depth loss for optimiaze the uncertainty + loss_combined_depth = self.loss_combined_depth( + preds['combined_depth'], target_labels['depth_target']) + + loss_dict = dict( + loss_cls=loss_cls, + loss_bbox=loss_bbox, + loss_keypoints=loss_keypoints, + loss_dir=loss_dir, + loss_dims=loss_dims, + loss_offsets_2d=loss_offsets_2d, + loss_direct_depth=loss_direct_depth, + loss_keypoints_depth=loss_keypoints_depth, + loss_combined_depth=loss_combined_depth) + + return loss_dict diff --git a/mmdet3d/models/dense_heads/parta2_rpn_head.py b/mmdet3d/models/dense_heads/parta2_rpn_head.py new file mode 100755 index 0000000..c3a31ae --- /dev/null +++ b/mmdet3d/models/dense_heads/parta2_rpn_head.py @@ -0,0 +1,398 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Tuple + +import numpy as np +import torch +from mmengine import ConfigDict +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.models.layers import nms_bev, nms_normal_bev +from mmdet3d.registry import MODELS +from mmdet3d.structures import limit_period, xywhr2xyxyr +from mmdet3d.utils.typing_utils import InstanceList +from ...structures.det3d_data_sample import SampleList +from .anchor3d_head import Anchor3DHead + + +@MODELS.register_module() +class PartA2RPNHead(Anchor3DHead): + """RPN head for PartA2. + + Note: + The main difference between the PartA2 RPN head and the Anchor3DHead + lies in their output during inference. PartA2 RPN head further returns + the original classification score for the second stage since the bbox + head in RoI head does not do classification task. + + Different from RPN heads in 2D detectors, this RPN head does + multi-class classification task and uses FocalLoss like the SECOND and + PointPillars do. But this head uses class agnostic nms rather than + multi-class nms. + + Args: + num_classes (int): Number of classes. + in_channels (int): Number of channels in the input feature map. + train_cfg (dict): Train configs. + test_cfg (dict): Test configs. + feat_channels (int): Number of channels of the feature map. + use_direction_classifier (bool): Whether to add a direction classifier. + anchor_generator(dict): Config dict of anchor generator. + assigner_per_size (bool): Whether to do assignment for each separate + anchor size. + assign_per_class (bool): Whether to do assignment for each class. + diff_rad_by_sin (bool): Whether to change the difference into sin + difference for box regression loss. + dir_offset (float | int): The offset of BEV rotation angles + (TODO: may be moved into box coder) + dir_limit_offset (float | int): The limited range of BEV + rotation angles. (TODO: may be moved into box coder) + bbox_coder (dict): Config dict of box coders. + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + loss_dir (dict): Config of direction classifier loss. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + train_cfg: ConfigDict, + test_cfg: ConfigDict, + feat_channels: int = 256, + use_direction_classifier: bool = True, + anchor_generator: Dict = dict( + type='Anchor3DRangeGenerator', + range=[0, -39.68, -1.78, 69.12, 39.68, -1.78], + strides=[2], + sizes=[[3.9, 1.6, 1.56]], + rotations=[0, 1.57], + custom_values=[], + reshape_out=False), + assigner_per_size: bool = False, + assign_per_class: bool = False, + diff_rad_by_sin: bool = True, + dir_offset: float = -np.pi / 2, + dir_limit_offset: float = 0, + bbox_coder: Dict = dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls: Dict = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + loss_bbox: Dict = dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + loss_weight=2.0), + loss_dir: Dict = dict( + type='mmdet.CrossEntropyLoss', loss_weight=0.2), + init_cfg: Dict = None) -> None: + super().__init__(num_classes, in_channels, feat_channels, + use_direction_classifier, anchor_generator, + assigner_per_size, assign_per_class, diff_rad_by_sin, + dir_offset, dir_limit_offset, bbox_coder, loss_cls, + loss_bbox, loss_dir, train_cfg, test_cfg, init_cfg) + + def _predict_by_feat_single(self, + cls_score_list: List[Tensor], + bbox_pred_list: List[Tensor], + dir_cls_pred_list: List[Tensor], + mlvl_priors: List[Tensor], + input_meta: List[dict], + cfg: ConfigDict, + rescale: List[Tensor] = False): + """Get bboxes of single branch. + + Args: + cls_score_list (torch.Tensor): Class score in single batch. + bbox_pred_list (torch.Tensor): Bbox prediction in single batch. + dir_cls_pred_list (torch.Tensor): Predictions of direction class + in single batch. + mlvl_priors (List[torch.Tensor]): Multi-level anchors + in single batch. + input_meta (list[dict]): Contain pcd and img's meta info. + cfg (:obj:`ConfigDict`): Training or testing config. + rescale (list[torch.Tensor]): whether th rescale bbox. + + Returns: + dict: Predictions of single batch containing the following keys: + + - boxes_3d (:obj:`BaseInstance3DBoxes`): Predicted 3d bboxes. + - scores_3d (torch.Tensor): Score of each bbox. + - labels_3d (torch.Tensor): Label of each bbox. + - cls_preds (torch.Tensor): Class score of each bbox. + """ + assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_priors) + mlvl_bboxes = [] + mlvl_max_scores = [] + mlvl_label_pred = [] + mlvl_dir_scores = [] + mlvl_cls_score = [] + for cls_score, bbox_pred, dir_cls_pred, anchors in zip( + cls_score_list, bbox_pred_list, dir_cls_pred_list, + mlvl_priors): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + assert cls_score.size()[-2:] == dir_cls_pred.size()[-2:] + dir_cls_pred = dir_cls_pred.permute(1, 2, 0).reshape(-1, 2) + dir_cls_score = torch.max(dir_cls_pred, dim=-1)[1] + + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.num_classes) + + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + bbox_pred = bbox_pred.permute(1, 2, + 0).reshape(-1, self.box_code_size) + + nms_pre = cfg.get('nms_pre', -1) + if self.use_sigmoid_cls: + max_scores, pred_labels = scores.max(dim=1) + else: + max_scores, pred_labels = scores[:, :-1].max(dim=1) + # get topk + if nms_pre > 0 and scores.shape[0] > nms_pre: + topk_scores, topk_inds = max_scores.topk(nms_pre) + anchors = anchors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + max_scores = topk_scores + cls_score = scores[topk_inds, :] + dir_cls_score = dir_cls_score[topk_inds] + pred_labels = pred_labels[topk_inds] + + bboxes = self.bbox_coder.decode(anchors, bbox_pred) + mlvl_bboxes.append(bboxes) + mlvl_max_scores.append(max_scores) + mlvl_cls_score.append(cls_score) + mlvl_label_pred.append(pred_labels) + mlvl_dir_scores.append(dir_cls_score) + + mlvl_bboxes = torch.cat(mlvl_bboxes) + mlvl_bboxes_for_nms = xywhr2xyxyr(input_meta['box_type_3d']( + mlvl_bboxes, box_dim=self.box_code_size).bev) + mlvl_max_scores = torch.cat(mlvl_max_scores) + mlvl_label_pred = torch.cat(mlvl_label_pred) + mlvl_dir_scores = torch.cat(mlvl_dir_scores) + # shape [k, num_class] before sigmoid + # PartA2 need to keep raw classification score + # because the bbox head in the second stage does not have + # classification branch, + # roi head need this score as classification score + mlvl_cls_score = torch.cat(mlvl_cls_score) + + score_thr = cfg.get('score_thr', 0) + result = self.class_agnostic_nms(mlvl_bboxes, mlvl_bboxes_for_nms, + mlvl_max_scores, mlvl_label_pred, + mlvl_cls_score, mlvl_dir_scores, + score_thr, cfg, input_meta) + return result + + def loss_and_predict(self, + feats_dict: Dict, + batch_data_samples: SampleList, + proposal_cfg: ConfigDict = None, + **kwargs) -> Tuple[dict, InstanceList]: + """Perform forward propagation of the head, then calculate loss and + predictions from the features and data samples. + + Args: + feats_dict (dict): Contains features from the first stage. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + proposal_cfg (ConfigDict, optional): Proposal config. + + Returns: + tuple: the return value is a tuple contains: + + - losses: (dict[str, Tensor]): A dictionary of loss components. + - predictions (list[:obj:`InstanceData`]): Detection + results of each sample after the post process. + """ + batch_gt_instances_3d = [] + batch_gt_instances_ignore = [] + batch_input_metas = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + + outs = self(feats_dict['neck_feats']) + + loss_inputs = outs + (batch_gt_instances_3d, batch_input_metas, + batch_gt_instances_ignore) + losses = self.loss_by_feat(*loss_inputs) + + predictions = self.predict_by_feat( + *outs, batch_input_metas=batch_input_metas, cfg=proposal_cfg) + return losses, predictions + + def loss_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + batch_gt_instances_3d: InstanceList, + batch_input_metas: List[dict], + batch_gt_instances_ignore: InstanceList = None) -> Dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (list[torch.Tensor]): Multi-level class scores. + bbox_preds (list[torch.Tensor]): Multi-level bbox predictions. + dir_cls_preds (list[torch.Tensor]): Multi-level direction + class predictions. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + batch_input_metas (list[dict]): Contain pcd and img's meta info. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, list[torch.Tensor]]: Classification, bbox, and + direction losses of each level. + + - loss_rpn_cls (list[torch.Tensor]): Classification losses. + - loss_rpn_bbox (list[torch.Tensor]): Box regression losses. + - loss_rpn_dir (list[torch.Tensor]): Direction classification + losses. + """ + loss_dict = super().loss_by_feat(cls_scores, bbox_preds, dir_cls_preds, + batch_gt_instances_3d, + batch_input_metas, + batch_gt_instances_ignore) + # change the loss key names to avoid conflict + return dict( + loss_rpn_cls=loss_dict['loss_cls'], + loss_rpn_bbox=loss_dict['loss_bbox'], + loss_rpn_dir=loss_dict['loss_dir']) + + def class_agnostic_nms(self, mlvl_bboxes: Tensor, + mlvl_bboxes_for_nms: Tensor, + mlvl_max_scores: Tensor, mlvl_label_pred: Tensor, + mlvl_cls_score: Tensor, mlvl_dir_scores: Tensor, + score_thr: int, cfg: ConfigDict, + input_meta: dict) -> Dict: + """Class agnostic nms for single batch. + + Args: + mlvl_bboxes (torch.Tensor): Bboxes from Multi-level. + mlvl_bboxes_for_nms (torch.Tensor): Bboxes for nms + (bev or minmax boxes) from Multi-level. + mlvl_max_scores (torch.Tensor): Max scores of Multi-level bbox. + mlvl_label_pred (torch.Tensor): Class predictions + of Multi-level bbox. + mlvl_cls_score (torch.Tensor): Class scores of + Multi-level bbox. + mlvl_dir_scores (torch.Tensor): Direction scores of + Multi-level bbox. + score_thr (int): Score threshold. + cfg (:obj:`ConfigDict`): Training or testing config. + input_meta (dict): Contain pcd and img's meta info. + + Returns: + dict: Predictions of single batch. Contain the keys: + + - boxes_3d (:obj:`BaseInstance3DBoxes`): Predicted 3d bboxes. + - scores_3d (torch.Tensor): Score of each bbox. + - labels_3d (torch.Tensor): Label of each bbox. + - cls_preds (torch.Tensor): Class score of each bbox. + """ + bboxes = [] + scores = [] + labels = [] + dir_scores = [] + cls_scores = [] + score_thr_inds = mlvl_max_scores > score_thr + _scores = mlvl_max_scores[score_thr_inds] + _bboxes_for_nms = mlvl_bboxes_for_nms[score_thr_inds, :] + if cfg.use_rotate_nms: + nms_func = nms_bev + else: + nms_func = nms_normal_bev + selected = nms_func(_bboxes_for_nms, _scores, cfg.nms_thr) + + _mlvl_bboxes = mlvl_bboxes[score_thr_inds, :] + _mlvl_dir_scores = mlvl_dir_scores[score_thr_inds] + _mlvl_label_pred = mlvl_label_pred[score_thr_inds] + _mlvl_cls_score = mlvl_cls_score[score_thr_inds] + + if len(selected) > 0: + bboxes.append(_mlvl_bboxes[selected]) + scores.append(_scores[selected]) + labels.append(_mlvl_label_pred[selected]) + cls_scores.append(_mlvl_cls_score[selected]) + dir_scores.append(_mlvl_dir_scores[selected]) + dir_rot = limit_period(bboxes[-1][..., 6] - self.dir_offset, + self.dir_limit_offset, np.pi) + bboxes[-1][..., 6] = ( + dir_rot + self.dir_offset + + np.pi * dir_scores[-1].to(bboxes[-1].dtype)) + + if bboxes: + bboxes = torch.cat(bboxes, dim=0) + scores = torch.cat(scores, dim=0) + cls_scores = torch.cat(cls_scores, dim=0) + labels = torch.cat(labels, dim=0) + if bboxes.shape[0] > cfg.nms_post: + _, inds = scores.sort(descending=True) + inds = inds[:cfg.nms_post] + bboxes = bboxes[inds, :] + labels = labels[inds] + scores = scores[inds] + cls_scores = cls_scores[inds] + bboxes = input_meta['box_type_3d']( + bboxes, box_dim=self.box_code_size) + result = InstanceData() + result.bboxes_3d = bboxes + result.scores_3d = scores + result.labels_3d = labels + result.cls_preds = cls_scores + return result + else: + result = InstanceData() + result.bboxes_3d = input_meta['box_type_3d']( + mlvl_bboxes.new_zeros([0, self.box_code_size]), + box_dim=self.box_code_size) + result.scores_3d = mlvl_bboxes.new_zeros([0]) + result.labels_3d = mlvl_bboxes.new_zeros([0]) + result.cls_preds = mlvl_bboxes.new_zeros( + [0, mlvl_cls_score.shape[-1]]) + return result + + def predict(self, feats_dict: Dict, + batch_data_samples: SampleList) -> InstanceList: + """Perform forward propagation of the 3D detection head and predict + detection results on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + batch_input_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + rpn_outs = self(feats_dict['neck_feats']) + proposal_cfg = self.test_cfg + + proposal_list = self.predict_by_feat( + *rpn_outs, cfg=proposal_cfg, batch_input_metas=batch_input_metas) + return proposal_list diff --git a/mmdet3d/models/dense_heads/pgd_head.py b/mmdet3d/models/dense_heads/pgd_head.py new file mode 100755 index 0000000..c33ddb9 --- /dev/null +++ b/mmdet3d/models/dense_heads/pgd_head.py @@ -0,0 +1,1241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple + +import numpy as np +import torch +from mmcv.cnn import Scale +from mmdet.models.utils import multi_apply +from mmdet.structures.bbox import distance2bbox +from mmengine.model import bias_init_with_prob, normal_init +from mmengine.structures import InstanceData +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.models.layers import box3d_multiclass_nms +from mmdet3d.registry import MODELS +from mmdet3d.structures import points_cam2img, points_img2cam, xywhr2xyxyr +from mmdet3d.utils.typing_utils import (ConfigType, InstanceList, + OptConfigType, OptInstanceList) +from .fcos_mono3d_head import FCOSMono3DHead + + +@MODELS.register_module() +class PGDHead(FCOSMono3DHead): + r"""Anchor-free head used in `PGD `_. + + Args: + use_depth_classifer (bool, optional): Whether to use depth classifier. + Defaults to True. + use_only_reg_proj (bool, optional): Whether to use only direct + regressed depth in the re-projection (to make the network easier + to learn). Defaults to False. + weight_dim (int, optional): Dimension of the location-aware weight + map. Defaults to -1. + weight_branch (tuple[tuple[int]], optional): Feature map channels of + the convolutional branch for weight map. Defaults to ((256, ), ). + depth_branch (tuple[int], optional): Feature map channels of the + branch for probabilistic depth estimation. Defaults to (64, ), + depth_range (tuple[float], optional): Range of depth estimation. + Defaults to (0, 70), + depth_unit (int, optional): Unit of depth range division. Defaults to + 10. + division (str, optional): Depth division method. Options include + 'uniform', 'linear', 'log', 'loguniform'. Defaults to 'uniform'. + depth_bins (int, optional): Discrete bins of depth division. Defaults + to 8. + loss_depth (dict, optional): Depth loss. Defaults to dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0). + loss_bbox2d (dict, optional): Loss for 2D box estimation. Defaults to + dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0). + loss_consistency (dict, optional): Consistency loss. Defaults to + dict(type='GIoULoss', loss_weight=1.0), + pred_velo (bool, optional): Whether to predict velocity. Defaults to + False. + pred_bbox2d (bool, optional): Whether to predict 2D bounding boxes. + Defaults to True. + pred_keypoints (bool, optional): Whether to predict keypoints. + Defaults to False, + bbox_coder (dict, optional): Bounding box coder. Defaults to + dict(type='PGDBBoxCoder', base_depths=((28.01, 16.32), ), + base_dims=((0.8, 1.73, 0.6), (1.76, 1.73, 0.6), (3.9, 1.56, 1.6)), + code_size=7). + """ + + def __init__(self, + use_depth_classifier: bool = True, + use_onlyreg_proj: bool = False, + weight_dim: int = -1, + weight_branch: Tuple[Tuple] = ((256, ), ), + depth_branch: Tuple = (64, ), + depth_range: Tuple = (0, 70), + depth_unit: int = 10, + division: str = 'uniform', + depth_bins: int = 8, + loss_depth: dict = dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + loss_weight=1.0), + loss_bbox2d: dict = dict( + type='mmdet.SmoothL1Loss', + beta=1.0 / 9.0, + loss_weight=1.0), + loss_consistency: dict = dict( + type='mmdet.GIoULoss', loss_weight=1.0), + pred_bbox2d: bool = True, + pred_keypoints: bool = False, + bbox_coder: dict = dict( + type='PGDBBoxCoder', + base_depths=((28.01, 16.32), ), + base_dims=((0.8, 1.73, 0.6), (1.76, 1.73, 0.6), + (3.9, 1.56, 1.6)), + code_size=7), + **kwargs) -> None: + self.use_depth_classifier = use_depth_classifier + self.use_onlyreg_proj = use_onlyreg_proj + self.depth_branch = depth_branch + self.pred_keypoints = pred_keypoints + self.weight_dim = weight_dim + self.weight_branch = weight_branch + self.weight_out_channels = [] + for weight_branch_channels in weight_branch: + if len(weight_branch_channels) > 0: + self.weight_out_channels.append(weight_branch_channels[-1]) + else: + self.weight_out_channels.append(-1) + self.depth_range = depth_range + self.depth_unit = depth_unit + self.division = division + if self.division == 'uniform': + self.num_depth_cls = int( + (depth_range[1] - depth_range[0]) / depth_unit) + 1 + if self.num_depth_cls != depth_bins: + print('Warning: The number of bins computed from ' + + 'depth_unit is different from given parameter! ' + + 'Depth_unit will be considered with priority in ' + + 'Uniform Division.') + else: + self.num_depth_cls = depth_bins + super().__init__( + pred_bbox2d=pred_bbox2d, bbox_coder=bbox_coder, **kwargs) + self.loss_depth = MODELS.build(loss_depth) + if self.pred_bbox2d: + self.loss_bbox2d = MODELS.build(loss_bbox2d) + self.loss_consistency = MODELS.build(loss_consistency) + if self.pred_keypoints: + self.kpts_start = 9 if self.pred_velo else 7 + + def _init_layers(self): + """Initialize layers of the head.""" + super()._init_layers() + if self.pred_bbox2d: + self.scale_dim += 1 + if self.pred_keypoints: + self.scale_dim += 1 + self.scales = nn.ModuleList([ + nn.ModuleList([Scale(1.0) for _ in range(self.scale_dim)]) + for _ in self.strides + ]) + + def _init_predictor(self): + """Initialize predictor layers of the head.""" + super()._init_predictor() + + if self.use_depth_classifier: + self.conv_depth_cls_prev = self._init_branch( + conv_channels=self.depth_branch, + conv_strides=(1, ) * len(self.depth_branch)) + self.conv_depth_cls = nn.Conv2d(self.depth_branch[-1], + self.num_depth_cls, 1) + # Data-agnostic single param lambda for local depth fusion + self.fuse_lambda = nn.Parameter(torch.tensor(10e-5)) + + if self.weight_dim != -1: + self.conv_weight_prevs = nn.ModuleList() + self.conv_weights = nn.ModuleList() + for i in range(self.weight_dim): + weight_branch_channels = self.weight_branch[i] + weight_out_channel = self.weight_out_channels[i] + if len(weight_branch_channels) > 0: + self.conv_weight_prevs.append( + self._init_branch( + conv_channels=weight_branch_channels, + conv_strides=(1, ) * len(weight_branch_channels))) + self.conv_weights.append( + nn.Conv2d(weight_out_channel, 1, 1)) + else: + self.conv_weight_prevs.append(None) + self.conv_weights.append( + nn.Conv2d(self.feat_channels, 1, 1)) + + def init_weights(self): + """Initialize weights of the head. + + We currently still use the customized defined init_weights because the + default init of DCN triggered by the init_cfg will init + conv_offset.weight, which mistakenly affects the training stability. + """ + super().init_weights() + + bias_cls = bias_init_with_prob(0.01) + if self.use_depth_classifier: + for m in self.conv_depth_cls_prev: + if isinstance(m.conv, nn.Conv2d): + normal_init(m.conv, std=0.01) + normal_init(self.conv_depth_cls, std=0.01, bias=bias_cls) + + if self.weight_dim != -1: + for conv_weight_prev in self.conv_weight_prevs: + if conv_weight_prev is None: + continue + for m in conv_weight_prev: + if isinstance(m.conv, nn.Conv2d): + normal_init(m.conv, std=0.01) + for conv_weight in self.conv_weights: + normal_init(conv_weight, std=0.01) + + def forward(self, x: Tuple[Tensor]) -> Tuple[Tensor, ...]: + """Forward features from the upstream network. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * bbox_code_size. + dir_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * 2. (bin = 2). + weight (list[Tensor]): Location-aware weight maps on each + scale level, each is a 4D-tensor, the channel number is + num_points * 1. + depth_cls_preds (list[Tensor]): Box scores for depth class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * self.num_depth_cls. + attr_preds (list[Tensor]): Attribute scores for each scale + level, each is a 4D-tensor, the channel number is + num_points * num_attrs. + centernesses (list[Tensor]): Centerness for each scale level, + each is a 4D-tensor, the channel number is num_points * 1. + """ + return multi_apply(self.forward_single, x, self.scales, self.strides) + + def forward_single(self, x: Tensor, scale: Scale, + stride: int) -> Tuple[Tensor, ...]: + """Forward features of a single scale level. + + Args: + x (Tensor): FPN feature maps of the specified stride. + scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize + the bbox prediction. + stride (int): The corresponding stride for feature maps, only + used to normalize the bbox prediction when self.norm_on_bbox + is True. + + Returns: + tuple: scores for each class, bbox and direction class + predictions, depth class predictions, location-aware weights, + attribute and centerness predictions of input feature maps. + """ + cls_score, bbox_pred, dir_cls_pred, attr_pred, centerness, cls_feat, \ + reg_feat = super().forward_single(x, scale, stride) + + max_regress_range = stride * self.regress_ranges[0][1] / \ + self.strides[0] + bbox_pred = self.bbox_coder.decode_2d(bbox_pred, scale, stride, + max_regress_range, self.training, + self.pred_keypoints, + self.pred_bbox2d) + + depth_cls_pred = None + if self.use_depth_classifier: + clone_reg_feat = reg_feat.clone() + for conv_depth_cls_prev_layer in self.conv_depth_cls_prev: + clone_reg_feat = conv_depth_cls_prev_layer(clone_reg_feat) + depth_cls_pred = self.conv_depth_cls(clone_reg_feat) + + weight = None + if self.weight_dim != -1: + weight = [] + for i in range(self.weight_dim): + clone_reg_feat = reg_feat.clone() + if len(self.weight_branch[i]) > 0: + for conv_weight_prev_layer in self.conv_weight_prevs[i]: + clone_reg_feat = conv_weight_prev_layer(clone_reg_feat) + weight.append(self.conv_weights[i](clone_reg_feat)) + weight = torch.cat(weight, dim=1) + + return cls_score, bbox_pred, dir_cls_pred, depth_cls_pred, weight, \ + attr_pred, centerness + + def get_proj_bbox2d(self, + bbox_preds: List[Tensor], + pos_dir_cls_preds: List[Tensor], + labels_3d: List[Tensor], + bbox_targets_3d: List[Tensor], + pos_points: Tensor, + pos_inds: Tensor, + batch_img_metas: List[dict], + pos_depth_cls_preds: Optional[Tensor] = None, + pos_weights: Optional[Tensor] = None, + pos_cls_scores: Optional[Tensor] = None, + with_kpts: bool = False) -> Tuple[Tensor]: + """Decode box predictions and get projected 2D attributes. + + Args: + bbox_preds (list[Tensor]): Box predictions for each scale + level, each is a 4D-tensor, the channel number is + num_points * bbox_code_size. + pos_dir_cls_preds (Tensor): Box scores for direction class + predictions of positive boxes on all the scale levels in shape + (num_pos_points, 2). + labels_3d (list[Tensor]): 3D box category labels for each scale + level, each is a 4D-tensor. + bbox_targets_3d (list[Tensor]): 3D box targets for each scale + level, each is a 4D-tensor, the channel number is + num_points * bbox_code_size. + pos_points (Tensor): Foreground points. + pos_inds (Tensor): Index of foreground points from flattened + tensors. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + pos_depth_cls_preds (Tensor, optional): Probabilistic depth map of + positive boxes on all the scale levels in shape + (num_pos_points, self.num_depth_cls). Defaults to None. + pos_weights (Tensor, optional): Location-aware weights of positive + boxes in shape (num_pos_points, self.weight_dim). Defaults to + None. + pos_cls_scores (Tensor, optional): Classification scores of + positive boxes in shape (num_pos_points, self.num_classes). + Defaults to None. + with_kpts (bool, optional): Whether to output keypoints targets. + Defaults to False. + + Returns: + tuple[Tensor]: Exterior 2D boxes from projected 3D boxes, + predicted 2D boxes and keypoint targets (if necessary). + """ + views = [np.array(img_meta['cam2img']) for img_meta in batch_img_metas] + num_imgs = len(batch_img_metas) + img_idx = [] + for label in labels_3d: + for idx in range(num_imgs): + img_idx.append( + labels_3d[0].new_ones(int(len(label) / num_imgs)) * idx) + img_idx = torch.cat(img_idx) + pos_img_idx = img_idx[pos_inds] + + flatten_strided_bbox_preds = [] + flatten_strided_bbox2d_preds = [] + flatten_bbox_targets_3d = [] + flatten_strides = [] + + for stride_idx, bbox_pred in enumerate(bbox_preds): + flatten_bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape( + -1, sum(self.group_reg_dims)) + flatten_bbox_pred[:, :2] *= self.strides[stride_idx] + flatten_bbox_pred[:, -4:] *= self.strides[stride_idx] + flatten_strided_bbox_preds.append( + flatten_bbox_pred[:, :self.bbox_coder.bbox_code_size]) + flatten_strided_bbox2d_preds.append(flatten_bbox_pred[:, -4:]) + + bbox_target_3d = bbox_targets_3d[stride_idx].clone() + bbox_target_3d[:, :2] *= self.strides[stride_idx] + bbox_target_3d[:, -4:] *= self.strides[stride_idx] + flatten_bbox_targets_3d.append(bbox_target_3d) + + flatten_stride = flatten_bbox_pred.new_ones( + *flatten_bbox_pred.shape[:-1], 1) * self.strides[stride_idx] + flatten_strides.append(flatten_stride) + + flatten_strided_bbox_preds = torch.cat(flatten_strided_bbox_preds) + flatten_strided_bbox2d_preds = torch.cat(flatten_strided_bbox2d_preds) + flatten_bbox_targets_3d = torch.cat(flatten_bbox_targets_3d) + flatten_strides = torch.cat(flatten_strides) + pos_strided_bbox_preds = flatten_strided_bbox_preds[pos_inds] + pos_strided_bbox2d_preds = flatten_strided_bbox2d_preds[pos_inds] + pos_bbox_targets_3d = flatten_bbox_targets_3d[pos_inds] + pos_strides = flatten_strides[pos_inds] + + pos_decoded_bbox2d_preds = distance2bbox(pos_points, + pos_strided_bbox2d_preds) + + pos_strided_bbox_preds[:, :2] = \ + pos_points - pos_strided_bbox_preds[:, :2] + pos_bbox_targets_3d[:, :2] = \ + pos_points - pos_bbox_targets_3d[:, :2] + + if self.use_depth_classifier and (not self.use_onlyreg_proj): + pos_prob_depth_preds = self.bbox_coder.decode_prob_depth( + pos_depth_cls_preds, self.depth_range, self.depth_unit, + self.division, self.num_depth_cls) + sig_alpha = torch.sigmoid(self.fuse_lambda) + pos_strided_bbox_preds[:, 2] = \ + sig_alpha * pos_strided_bbox_preds.clone()[:, 2] + \ + (1 - sig_alpha) * pos_prob_depth_preds + + box_corners_in_image = pos_strided_bbox_preds.new_zeros( + (*pos_strided_bbox_preds.shape[:-1], 8, 2)) + box_corners_in_image_gt = pos_strided_bbox_preds.new_zeros( + (*pos_strided_bbox_preds.shape[:-1], 8, 2)) + + for idx in range(num_imgs): + mask = (pos_img_idx == idx) + if pos_strided_bbox_preds[mask].shape[0] == 0: + continue + cam2img = torch.eye( + 4, + dtype=pos_strided_bbox_preds.dtype, + device=pos_strided_bbox_preds.device) + view_shape = views[idx].shape + cam2img[:view_shape[0], :view_shape[1]] = \ + pos_strided_bbox_preds.new_tensor(views[idx]) + + centers2d_preds = pos_strided_bbox_preds.clone()[mask, :2] + centers2d_targets = pos_bbox_targets_3d.clone()[mask, :2] + centers3d_targets = points_img2cam(pos_bbox_targets_3d[mask, :3], + views[idx]) + + # use predicted depth to re-project the 2.5D centers + pos_strided_bbox_preds[mask, :3] = points_img2cam( + pos_strided_bbox_preds[mask, :3], views[idx]) + pos_bbox_targets_3d[mask, :3] = centers3d_targets + + # depth fixed when computing re-project 3D bboxes + pos_strided_bbox_preds[mask, 2] = \ + pos_bbox_targets_3d.clone()[mask, 2] + + # decode yaws + if self.use_direction_classifier: + pos_dir_cls_scores = torch.max( + pos_dir_cls_preds[mask], dim=-1)[1] + pos_strided_bbox_preds[mask] = self.bbox_coder.decode_yaw( + pos_strided_bbox_preds[mask], centers2d_preds, + pos_dir_cls_scores, self.dir_offset, cam2img) + pos_bbox_targets_3d[mask, 6] = torch.atan2( + centers2d_targets[:, 0] - cam2img[0, 2], + cam2img[0, 0]) + pos_bbox_targets_3d[mask, 6] + + corners = batch_img_metas[0]['box_type_3d']( + pos_strided_bbox_preds[mask], + box_dim=self.bbox_coder.bbox_code_size, + origin=(0.5, 0.5, 0.5)).corners + box_corners_in_image[mask] = points_cam2img(corners, cam2img) + + corners_gt = batch_img_metas[0]['box_type_3d']( + pos_bbox_targets_3d[mask, :self.bbox_code_size], + box_dim=self.bbox_coder.bbox_code_size, + origin=(0.5, 0.5, 0.5)).corners + box_corners_in_image_gt[mask] = points_cam2img(corners_gt, cam2img) + + minxy = torch.min(box_corners_in_image, dim=1)[0] + maxxy = torch.max(box_corners_in_image, dim=1)[0] + proj_bbox2d_preds = torch.cat([minxy, maxxy], dim=1) + + outputs = (proj_bbox2d_preds, pos_decoded_bbox2d_preds) + + if with_kpts: + norm_strides = pos_strides * self.regress_ranges[0][1] / \ + self.strides[0] + kpts_targets = box_corners_in_image_gt - pos_points[..., None, :] + kpts_targets = kpts_targets.view( + (*pos_strided_bbox_preds.shape[:-1], 16)) + kpts_targets /= norm_strides + + outputs += (kpts_targets, ) + + return outputs + + def get_pos_predictions(self, bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + depth_cls_preds: List[Tensor], + weights: List[Tensor], attr_preds: List[Tensor], + centernesses: List[Tensor], pos_inds: Tensor, + batch_img_metas: List[dict]) -> Tuple[Tensor]: + """Flatten predictions and get positive ones. + + Args: + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * bbox_code_size. + dir_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * 2. (bin = 2) + depth_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * self.num_depth_cls. + attr_preds (list[Tensor]): Attribute scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_attrs. + centernesses (list[Tensor]): Centerness for each scale level, each + is a 4D-tensor, the channel number is num_points * 1. + pos_inds (Tensor): Index of foreground points from flattened + tensors. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + + Returns: + tuple[Tensor]: Box predictions, direction classes, probabilistic + depth maps, location-aware weight maps, attributes and + centerness predictions. + """ + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(-1, sum(self.group_reg_dims)) + for bbox_pred in bbox_preds + ] + flatten_dir_cls_preds = [ + dir_cls_pred.permute(0, 2, 3, 1).reshape(-1, 2) + for dir_cls_pred in dir_cls_preds + ] + flatten_centerness = [ + centerness.permute(0, 2, 3, 1).reshape(-1) + for centerness in centernesses + ] + flatten_bbox_preds = torch.cat(flatten_bbox_preds) + flatten_dir_cls_preds = torch.cat(flatten_dir_cls_preds) + flatten_centerness = torch.cat(flatten_centerness) + pos_bbox_preds = flatten_bbox_preds[pos_inds] + pos_dir_cls_preds = flatten_dir_cls_preds[pos_inds] + pos_centerness = flatten_centerness[pos_inds] + + pos_depth_cls_preds = None + if self.use_depth_classifier: + flatten_depth_cls_preds = [ + depth_cls_pred.permute(0, 2, 3, + 1).reshape(-1, self.num_depth_cls) + for depth_cls_pred in depth_cls_preds + ] + flatten_depth_cls_preds = torch.cat(flatten_depth_cls_preds) + pos_depth_cls_preds = flatten_depth_cls_preds[pos_inds] + + pos_weights = None + if self.weight_dim != -1: + flatten_weights = [ + weight.permute(0, 2, 3, 1).reshape(-1, self.weight_dim) + for weight in weights + ] + flatten_weights = torch.cat(flatten_weights) + pos_weights = flatten_weights[pos_inds] + + pos_attr_preds = None + if self.pred_attrs: + flatten_attr_preds = [ + attr_pred.permute(0, 2, 3, 1).reshape(-1, self.num_attrs) + for attr_pred in attr_preds + ] + flatten_attr_preds = torch.cat(flatten_attr_preds) + pos_attr_preds = flatten_attr_preds[pos_inds] + + return pos_bbox_preds, pos_dir_cls_preds, pos_depth_cls_preds, \ + pos_weights, pos_attr_preds, pos_centerness + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + depth_cls_preds: List[Tensor], + weights: List[Tensor], + attr_preds: List[Tensor], + centernesses: List[Tensor], + batch_gt_instances_3d: InstanceList, + batch_gt_instances: InstanceList, + batch_img_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * bbox_code_size. + dir_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * 2. (bin = 2) + depth_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * self.num_depth_cls. + weights (list[Tensor]): Location-aware weights for each scale + level, each is a 4D-tensor, the channel number is + num_points * self.weight_dim. + attr_preds (list[Tensor]): Attribute scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_attrs. + centernesses (list[Tensor]): Centerness for each scale level, each + is a 4D-tensor, the channel number is num_points * 1. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes``、``labels`` + 、``bboxes_3d``、``labels_3d``、``depths``、``centers_2d`` and + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert len(cls_scores) == len(bbox_preds) == len(dir_cls_preds) == \ + len(depth_cls_preds) == len(weights) == len(centernesses) == \ + len(attr_preds), 'The length of cls_scores, bbox_preds, ' \ + 'dir_cls_preds, depth_cls_preds, weights, centernesses, and' \ + f'attr_preds: {len(cls_scores)}, {len(bbox_preds)}, ' \ + f'{len(dir_cls_preds)}, {len(depth_cls_preds)}, {len(weights)}' \ + f'{len(centernesses)}, {len(attr_preds)} are inconsistent.' + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, + bbox_preds[0].device) + labels_3d, bbox_targets_3d, centerness_targets, attr_targets = \ + self.get_targets( + all_level_points, batch_gt_instances_3d, batch_gt_instances) + + num_imgs = cls_scores[0].size(0) + # flatten cls_scores and targets + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) + for cls_score in cls_scores + ] + flatten_cls_scores = torch.cat(flatten_cls_scores) + flatten_labels_3d = torch.cat(labels_3d) + flatten_bbox_targets_3d = torch.cat(bbox_targets_3d) + flatten_centerness_targets = torch.cat(centerness_targets) + flatten_points = torch.cat( + [points.repeat(num_imgs, 1) for points in all_level_points]) + if self.pred_attrs: + flatten_attr_targets = torch.cat(attr_targets) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((flatten_labels_3d >= 0) + & (flatten_labels_3d < bg_class_ind)).nonzero().reshape(-1) + num_pos = len(pos_inds) + + loss_dict = dict() + + loss_dict['loss_cls'] = self.loss_cls( + flatten_cls_scores, + flatten_labels_3d, + avg_factor=num_pos + num_imgs) # avoid num_pos is 0 + + pos_bbox_preds, pos_dir_cls_preds, pos_depth_cls_preds, pos_weights, \ + pos_attr_preds, pos_centerness = self.get_pos_predictions( + bbox_preds, dir_cls_preds, depth_cls_preds, weights, + attr_preds, centernesses, pos_inds, batch_img_metas) + + if num_pos > 0: + pos_bbox_targets_3d = flatten_bbox_targets_3d[pos_inds] + pos_centerness_targets = flatten_centerness_targets[pos_inds] + pos_points = flatten_points[pos_inds] + if self.pred_attrs: + pos_attr_targets = flatten_attr_targets[pos_inds] + if self.use_direction_classifier: + pos_dir_cls_targets = self.get_direction_target( + pos_bbox_targets_3d, self.dir_offset, one_hot=False) + + bbox_weights = pos_centerness_targets.new_ones( + len(pos_centerness_targets), sum(self.group_reg_dims)) + equal_weights = pos_centerness_targets.new_ones( + pos_centerness_targets.shape) + code_weight = self.train_cfg.get('code_weight', None) + if code_weight: + assert len(code_weight) == sum(self.group_reg_dims) + bbox_weights = bbox_weights * bbox_weights.new_tensor( + code_weight) + + if self.diff_rad_by_sin: + pos_bbox_preds, pos_bbox_targets_3d = self.add_sin_difference( + pos_bbox_preds, pos_bbox_targets_3d) + + loss_dict['loss_offset'] = self.loss_bbox( + pos_bbox_preds[:, :2], + pos_bbox_targets_3d[:, :2], + weight=bbox_weights[:, :2], + avg_factor=equal_weights.sum()) + loss_dict['loss_size'] = self.loss_bbox( + pos_bbox_preds[:, 3:6], + pos_bbox_targets_3d[:, 3:6], + weight=bbox_weights[:, 3:6], + avg_factor=equal_weights.sum()) + loss_dict['loss_rotsin'] = self.loss_bbox( + pos_bbox_preds[:, 6], + pos_bbox_targets_3d[:, 6], + weight=bbox_weights[:, 6], + avg_factor=equal_weights.sum()) + if self.pred_velo: + loss_dict['loss_velo'] = self.loss_bbox( + pos_bbox_preds[:, 7:9], + pos_bbox_targets_3d[:, 7:9], + weight=bbox_weights[:, 7:9], + avg_factor=equal_weights.sum()) + + proj_bbox2d_inputs = (bbox_preds, pos_dir_cls_preds, labels_3d, + bbox_targets_3d, pos_points, pos_inds, + batch_img_metas) + + # direction classification loss + # TODO: add more check for use_direction_classifier + if self.use_direction_classifier: + loss_dict['loss_dir'] = self.loss_dir( + pos_dir_cls_preds, + pos_dir_cls_targets, + equal_weights, + avg_factor=equal_weights.sum()) + + # init depth loss with the one computed from direct regression + loss_dict['loss_depth'] = self.loss_bbox( + pos_bbox_preds[:, 2], + pos_bbox_targets_3d[:, 2], + weight=bbox_weights[:, 2], + avg_factor=equal_weights.sum()) + # depth classification loss + if self.use_depth_classifier: + pos_prob_depth_preds = self.bbox_coder.decode_prob_depth( + pos_depth_cls_preds, self.depth_range, self.depth_unit, + self.division, self.num_depth_cls) + sig_alpha = torch.sigmoid(self.fuse_lambda) + if self.weight_dim != -1: + loss_fuse_depth = self.loss_depth( + sig_alpha * pos_bbox_preds[:, 2] + + (1 - sig_alpha) * pos_prob_depth_preds, + pos_bbox_targets_3d[:, 2], + sigma=pos_weights[:, 0], + weight=bbox_weights[:, 2], + avg_factor=equal_weights.sum()) + else: + loss_fuse_depth = self.loss_depth( + sig_alpha * pos_bbox_preds[:, 2] + + (1 - sig_alpha) * pos_prob_depth_preds, + pos_bbox_targets_3d[:, 2], + weight=bbox_weights[:, 2], + avg_factor=equal_weights.sum()) + loss_dict['loss_depth'] = loss_fuse_depth + + proj_bbox2d_inputs += (pos_depth_cls_preds, ) + + if self.pred_keypoints: + # use smoothL1 to compute consistency loss for keypoints + # normalize the offsets with strides + proj_bbox2d_preds, pos_decoded_bbox2d_preds, kpts_targets = \ + self.get_proj_bbox2d(*proj_bbox2d_inputs, with_kpts=True) + loss_dict['loss_kpts'] = self.loss_bbox( + pos_bbox_preds[:, self.kpts_start:self.kpts_start + 16], + kpts_targets, + weight=bbox_weights[:, + self.kpts_start:self.kpts_start + 16], + avg_factor=equal_weights.sum()) + + if self.pred_bbox2d: + loss_dict['loss_bbox2d'] = self.loss_bbox2d( + pos_bbox_preds[:, -4:], + pos_bbox_targets_3d[:, -4:], + weight=bbox_weights[:, -4:], + avg_factor=equal_weights.sum()) + if not self.pred_keypoints: + proj_bbox2d_preds, pos_decoded_bbox2d_preds = \ + self.get_proj_bbox2d(*proj_bbox2d_inputs) + loss_dict['loss_consistency'] = self.loss_consistency( + proj_bbox2d_preds, + pos_decoded_bbox2d_preds, + weight=bbox_weights[:, -4:], + avg_factor=equal_weights.sum()) + + loss_dict['loss_centerness'] = self.loss_centerness( + pos_centerness, pos_centerness_targets) + + # attribute classification loss + if self.pred_attrs: + loss_dict['loss_attr'] = self.loss_attr( + pos_attr_preds, + pos_attr_targets, + pos_centerness_targets, + avg_factor=pos_centerness_targets.sum()) + + else: + # need absolute due to possible negative delta x/y + loss_dict['loss_offset'] = pos_bbox_preds[:, :2].sum() + loss_dict['loss_size'] = pos_bbox_preds[:, 3:6].sum() + loss_dict['loss_rotsin'] = pos_bbox_preds[:, 6].sum() + loss_dict['loss_depth'] = pos_bbox_preds[:, 2].sum() + if self.pred_velo: + loss_dict['loss_velo'] = pos_bbox_preds[:, 7:9].sum() + if self.pred_keypoints: + loss_dict['loss_kpts'] = pos_bbox_preds[:, + self.kpts_start:self. + kpts_start + 16].sum() + if self.pred_bbox2d: + loss_dict['loss_bbox2d'] = pos_bbox_preds[:, -4:].sum() + loss_dict['loss_consistency'] = pos_bbox_preds[:, -4:].sum() + loss_dict['loss_centerness'] = pos_centerness.sum() + if self.use_direction_classifier: + loss_dict['loss_dir'] = pos_dir_cls_preds.sum() + if self.use_depth_classifier: + sig_alpha = torch.sigmoid(self.fuse_lambda) + loss_fuse_depth = \ + sig_alpha * pos_bbox_preds[:, 2].sum() + \ + (1 - sig_alpha) * pos_depth_cls_preds.sum() + if self.weight_dim != -1: + loss_fuse_depth *= torch.exp(-pos_weights[:, 0].sum()) + loss_dict['loss_depth'] = loss_fuse_depth + if self.pred_attrs: + loss_dict['loss_attr'] = pos_attr_preds.sum() + + return loss_dict + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + depth_cls_preds: List[Tensor], + weights: List[Tensor], + attr_preds: List[Tensor], + centernesses: List[Tensor], + batch_img_metas: Optional[List[dict]] = None, + cfg: OptConfigType = None, + rescale: bool = False) -> InstanceList: + """Transform network output for a batch into bbox predictions. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_points * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_points * 4, H, W) + dir_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * 2. (bin = 2) + depth_cls_preds (list[Tensor]): Box scores for direction class + predictions on each scale level, each is a 4D-tensor, + the channel number is num_points * self.num_depth_cls. + weights (list[Tensor]): Location-aware weights for each scale + level, each is a 4D-tensor, the channel number is + num_points * self.weight_dim. + attr_preds (list[Tensor]): Attribute scores for each scale level + Has shape (N, num_points * num_attrs, H, W) + centernesses (list[Tensor]): Centerness for each scale level with + shape (N, num_points * 1, H, W) + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + cfg (mmengine.Config, optional): Test / postprocessing config, + if None, test_cfg would be used. Defaults to None. + rescale (bool, optional): If True, return boxes in original image + space. Defaults to False. + + Returns: + list[tuple[Tensor]]: Each item in result_list is a tuple, which + consists of predicted 3D boxes, scores, labels, attributes and + 2D boxes (if necessary). + """ + assert len(cls_scores) == len(bbox_preds) == len(dir_cls_preds) == \ + len(depth_cls_preds) == len(weights) == len(centernesses) == \ + len(attr_preds), 'The length of cls_scores, bbox_preds, ' \ + 'dir_cls_preds, depth_cls_preds, weights, centernesses, and' \ + f'attr_preds: {len(cls_scores)}, {len(bbox_preds)}, ' \ + f'{len(dir_cls_preds)}, {len(depth_cls_preds)}, {len(weights)}' \ + f'{len(centernesses)}, {len(attr_preds)} are inconsistent.' + num_levels = len(cls_scores) + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, + bbox_preds[0].device) + result_list = [] + result_list_2d = [] + + for img_id in range(len(batch_img_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + if self.use_direction_classifier: + dir_cls_pred_list = [ + dir_cls_preds[i][img_id].detach() + for i in range(num_levels) + ] + else: + dir_cls_pred_list = [ + cls_scores[i][img_id].new_full( + [2, *cls_scores[i][img_id].shape[1:]], 0).detach() + for i in range(num_levels) + ] + if self.use_depth_classifier: + depth_cls_pred_list = [ + depth_cls_preds[i][img_id].detach() + for i in range(num_levels) + ] + else: + depth_cls_pred_list = [ + cls_scores[i][img_id].new_full( + [self.num_depth_cls, *cls_scores[i][img_id].shape[1:]], + 0).detach() for i in range(num_levels) + ] + if self.weight_dim != -1: + weight_list = [ + weights[i][img_id].detach() for i in range(num_levels) + ] + else: + weight_list = [ + cls_scores[i][img_id].new_full( + [1, *cls_scores[i][img_id].shape[1:]], 0).detach() + for i in range(num_levels) + ] + if self.pred_attrs: + attr_pred_list = [ + attr_preds[i][img_id].detach() for i in range(num_levels) + ] + else: + attr_pred_list = [ + cls_scores[i][img_id].new_full( + [self.num_attrs, *cls_scores[i][img_id].shape[1:]], + self.attr_background_label).detach() + for i in range(num_levels) + ] + centerness_pred_list = [ + centernesses[i][img_id].detach() for i in range(num_levels) + ] + img_meta = batch_img_metas[img_id] + results, results_2d = self._predict_by_feat_single( + cls_score_list=cls_score_list, + bbox_pred_list=bbox_pred_list, + dir_cls_pred_list=dir_cls_pred_list, + depth_cls_pred_list=depth_cls_pred_list, + weight_list=weight_list, + attr_pred_list=attr_pred_list, + centerness_pred_list=centerness_pred_list, + mlvl_points=mlvl_points, + img_meta=img_meta, + cfg=cfg, + rescale=rescale) + result_list.append(results) + result_list_2d.append(results_2d) + return result_list, result_list_2d + + def _predict_by_feat_single(self, + cls_score_list: List[Tensor], + bbox_pred_list: List[Tensor], + dir_cls_pred_list: List[Tensor], + depth_cls_pred_list: List[Tensor], + weight_list: List[Tensor], + attr_pred_list: List[Tensor], + centerness_pred_list: List[Tensor], + mlvl_points: Tensor, + img_meta: dict, + cfg: ConfigType, + rescale: bool = False) -> InstanceData: + """Transform outputs for a single batch item into bbox predictions. + + Args: + cls_scores (list[Tensor]): Box scores for a single scale level + Has shape (num_points * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for a single scale + level with shape (num_points * bbox_code_size, H, W). + dir_cls_preds (list[Tensor]): Box scores for direction class + predictions on a single scale level with shape + (num_points * 2, H, W) + depth_cls_preds (list[Tensor]): Box scores for probabilistic depth + predictions on a single scale level with shape + (num_points * self.num_depth_cls, H, W) + weights (list[Tensor]): Location-aware weight maps on a single + scale level with shape (num_points * self.weight_dim, H, W). + attr_preds (list[Tensor]): Attribute scores for each scale level + Has shape (N, num_points * num_attrs, H, W) + centernesses (list[Tensor]): Centerness for a single scale level + with shape (num_points, H, W). + mlvl_points (list[Tensor]): Box reference for a single scale level + with shape (num_total_points, 2). + img_meta (dict): Metadata of input image. + cfg (mmengine.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool, optional): If True, return boxes in original image + space. Defaults to False. + + Returns: + tuples[Tensor]: Predicted 3D boxes, scores, labels, attributes and + 2D boxes (if necessary). + """ + view = np.array(img_meta['cam2img']) + scale_factor = img_meta['scale_factor'] + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_points) + mlvl_centers2d = [] + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_dir_scores = [] + mlvl_attr_scores = [] + mlvl_centerness = [] + mlvl_depth_cls_scores = [] + mlvl_depth_uncertainty = [] + mlvl_bboxes2d = None + if self.pred_bbox2d: + mlvl_bboxes2d = [] + + for cls_score, bbox_pred, dir_cls_pred, depth_cls_pred, weight, \ + attr_pred, centerness, points in zip( + cls_score_list, bbox_pred_list, dir_cls_pred_list, + depth_cls_pred_list, weight_list, attr_pred_list, + centerness_pred_list, mlvl_points): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + scores = cls_score.permute(1, 2, 0).reshape( + -1, self.cls_out_channels).sigmoid() + dir_cls_pred = dir_cls_pred.permute(1, 2, 0).reshape(-1, 2) + dir_cls_score = torch.max(dir_cls_pred, dim=-1)[1] + depth_cls_pred = depth_cls_pred.permute(1, 2, 0).reshape( + -1, self.num_depth_cls) + depth_cls_score = F.softmax( + depth_cls_pred, dim=-1).topk( + k=2, dim=-1)[0].mean(dim=-1) + if self.weight_dim != -1: + weight = weight.permute(1, 2, 0).reshape(-1, self.weight_dim) + else: + weight = weight.permute(1, 2, 0).reshape(-1, 1) + depth_uncertainty = torch.exp(-weight[:, -1]) + attr_pred = attr_pred.permute(1, 2, 0).reshape(-1, self.num_attrs) + attr_score = torch.max(attr_pred, dim=-1)[1] + centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid() + + bbox_pred = bbox_pred.permute(1, 2, + 0).reshape(-1, + sum(self.group_reg_dims)) + bbox_pred3d = bbox_pred[:, :self.bbox_coder.bbox_code_size] + if self.pred_bbox2d: + bbox_pred2d = bbox_pred[:, -4:] + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + merged_scores = scores * centerness[:, None] + if self.use_depth_classifier: + merged_scores *= depth_cls_score[:, None] + if self.weight_dim != -1: + merged_scores *= depth_uncertainty[:, None] + max_scores, _ = merged_scores.max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + points = points[topk_inds, :] + bbox_pred3d = bbox_pred3d[topk_inds, :] + scores = scores[topk_inds, :] + dir_cls_pred = dir_cls_pred[topk_inds, :] + depth_cls_pred = depth_cls_pred[topk_inds, :] + centerness = centerness[topk_inds] + dir_cls_score = dir_cls_score[topk_inds] + depth_cls_score = depth_cls_score[topk_inds] + depth_uncertainty = depth_uncertainty[topk_inds] + attr_score = attr_score[topk_inds] + if self.pred_bbox2d: + bbox_pred2d = bbox_pred2d[topk_inds, :] + # change the offset to actual center predictions + bbox_pred3d[:, :2] = points - bbox_pred3d[:, :2] + if rescale: + bbox_pred3d[:, :2] /= bbox_pred3d[:, :2].new_tensor( + scale_factor[0]) + if self.pred_bbox2d: + bbox_pred2d /= bbox_pred2d.new_tensor(scale_factor[0]) + if self.use_depth_classifier: + prob_depth_pred = self.bbox_coder.decode_prob_depth( + depth_cls_pred, self.depth_range, self.depth_unit, + self.division, self.num_depth_cls) + sig_alpha = torch.sigmoid(self.fuse_lambda) + bbox_pred3d[:, 2] = sig_alpha * bbox_pred3d[:, 2] + \ + (1 - sig_alpha) * prob_depth_pred + pred_center2d = bbox_pred3d[:, :3].clone() + bbox_pred3d[:, :3] = points_img2cam(bbox_pred3d[:, :3], view) + mlvl_centers2d.append(pred_center2d) + mlvl_bboxes.append(bbox_pred3d) + mlvl_scores.append(scores) + mlvl_dir_scores.append(dir_cls_score) + mlvl_depth_cls_scores.append(depth_cls_score) + mlvl_attr_scores.append(attr_score) + mlvl_centerness.append(centerness) + mlvl_depth_uncertainty.append(depth_uncertainty) + if self.pred_bbox2d: + bbox_pred2d = distance2bbox( + points, bbox_pred2d, max_shape=img_meta['img_shape']) + mlvl_bboxes2d.append(bbox_pred2d) + + mlvl_centers2d = torch.cat(mlvl_centers2d) + mlvl_bboxes = torch.cat(mlvl_bboxes) + mlvl_dir_scores = torch.cat(mlvl_dir_scores) + if self.pred_bbox2d: + mlvl_bboxes2d = torch.cat(mlvl_bboxes2d) + + # change local yaw to global yaw for 3D nms + cam2img = torch.eye( + 4, dtype=mlvl_centers2d.dtype, device=mlvl_centers2d.device) + cam2img[:view.shape[0], :view.shape[1]] = \ + mlvl_centers2d.new_tensor(view) + mlvl_bboxes = self.bbox_coder.decode_yaw(mlvl_bboxes, mlvl_centers2d, + mlvl_dir_scores, + self.dir_offset, cam2img) + + mlvl_bboxes_for_nms = xywhr2xyxyr(img_meta['box_type_3d']( + mlvl_bboxes, + box_dim=self.bbox_coder.bbox_code_size, + origin=(0.5, 0.5, 0.5)).bev) + + mlvl_scores = torch.cat(mlvl_scores) + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 + # BG cat_id: num_class + mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) + mlvl_attr_scores = torch.cat(mlvl_attr_scores) + mlvl_centerness = torch.cat(mlvl_centerness) + # no scale_factors in box3d_multiclass_nms + # Then we multiply it from outside + mlvl_nms_scores = mlvl_scores * mlvl_centerness[:, None] + if self.use_depth_classifier: # multiply the depth confidence + mlvl_depth_cls_scores = torch.cat(mlvl_depth_cls_scores) + mlvl_nms_scores *= mlvl_depth_cls_scores[:, None] + if self.weight_dim != -1: + mlvl_depth_uncertainty = torch.cat(mlvl_depth_uncertainty) + mlvl_nms_scores *= mlvl_depth_uncertainty[:, None] + nms_results = box3d_multiclass_nms(mlvl_bboxes, mlvl_bboxes_for_nms, + mlvl_nms_scores, cfg.score_thr, + cfg.max_per_img, cfg, + mlvl_dir_scores, mlvl_attr_scores, + mlvl_bboxes2d) + bboxes, scores, labels, dir_scores, attrs = nms_results[0:5] + attrs = attrs.to(labels.dtype) # change data type to int + bboxes = img_meta['box_type_3d']( + bboxes, + box_dim=self.bbox_coder.bbox_code_size, + origin=(0.5, 0.5, 0.5)) + # Note that the predictions use origin (0.5, 0.5, 0.5) + # Due to the ground truth centers2d are the gravity center of objects + # v0.10.0 fix inplace operation to the input tensor of cam_box3d + # So here we also need to add origin=(0.5, 0.5, 0.5) + if not self.pred_attrs: + attrs = None + + results = InstanceData() + results.bboxes_3d = bboxes + results.scores_3d = scores + results.labels_3d = labels + + if attrs is not None: + results.attr_labels = attrs + + results_2d = InstanceData() + + if self.pred_bbox2d: + bboxes2d = nms_results[-1] + results_2d.bboxes = bboxes2d + results_2d.scores = scores + results_2d.labels = labels + + return results, results_2d + + def get_targets( + self, + points: List[Tensor], + batch_gt_instances_3d: InstanceList, + batch_gt_instances: InstanceList, + ) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]: + """Compute regression, classification and centerss targets for points + in multiple images. + + Args: + points (list[Tensor]): Points of each fpn level, each has shape + (num_points, 2). + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、 + ``labels_3d``、``depths``、``centers_2d`` and attributes. + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes``、``labels``. + + Returns: + tuple: + concat_lvl_labels (list[Tensor]): Labels of each level. \ + concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ + level. + """ + assert len(points) == len(self.regress_ranges) + num_levels = len(points) + # expand regress ranges to align with points + expanded_regress_ranges = [ + points[i].new_tensor(self.regress_ranges[i])[None].expand_as( + points[i]) for i in range(num_levels) + ] + # concat all levels points and regress ranges + concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) + concat_points = torch.cat(points, dim=0) + + # the number of points per img, per lvl + num_points = [center.size(0) for center in points] + + if 'attr_labels' not in batch_gt_instances_3d[0]: + for gt_instances_3d in batch_gt_instances_3d: + gt_instances_3d.attr_labels = \ + gt_instances_3d.labels_3d.new_full( + gt_instances_3d.labels_3d.shape, + self.attr_background_label) + + # get labels and bbox_targets of each image + _, bbox_targets_list, labels_3d_list, bbox_targets_3d_list, \ + centerness_targets_list, attr_targets_list = multi_apply( + self._get_target_single, + batch_gt_instances_3d, + batch_gt_instances, + points=concat_points, + regress_ranges=concat_regress_ranges, + num_points_per_lvl=num_points) + + # split to per img, per level + bbox_targets_list = [ + bbox_targets.split(num_points, 0) + for bbox_targets in bbox_targets_list + ] + labels_3d_list = [ + labels_3d.split(num_points, 0) for labels_3d in labels_3d_list + ] + bbox_targets_3d_list = [ + bbox_targets_3d.split(num_points, 0) + for bbox_targets_3d in bbox_targets_3d_list + ] + centerness_targets_list = [ + centerness_targets.split(num_points, 0) + for centerness_targets in centerness_targets_list + ] + attr_targets_list = [ + attr_targets.split(num_points, 0) + for attr_targets in attr_targets_list + ] + + # concat per level image + concat_lvl_labels_3d = [] + concat_lvl_bbox_targets_3d = [] + concat_lvl_centerness_targets = [] + concat_lvl_attr_targets = [] + for i in range(num_levels): + concat_lvl_labels_3d.append( + torch.cat([labels[i] for labels in labels_3d_list])) + concat_lvl_centerness_targets.append( + torch.cat([ + centerness_targets[i] + for centerness_targets in centerness_targets_list + ])) + bbox_targets_3d = torch.cat([ + bbox_targets_3d[i] for bbox_targets_3d in bbox_targets_3d_list + ]) + if self.pred_bbox2d: + bbox_targets = torch.cat( + [bbox_targets[i] for bbox_targets in bbox_targets_list]) + bbox_targets_3d = torch.cat([bbox_targets_3d, bbox_targets], + dim=1) + concat_lvl_attr_targets.append( + torch.cat( + [attr_targets[i] for attr_targets in attr_targets_list])) + if self.norm_on_bbox: + bbox_targets_3d[:, :2] = \ + bbox_targets_3d[:, :2] / self.strides[i] + if self.pred_bbox2d: + bbox_targets_3d[:, -4:] = \ + bbox_targets_3d[:, -4:] / self.strides[i] + concat_lvl_bbox_targets_3d.append(bbox_targets_3d) + return concat_lvl_labels_3d, concat_lvl_bbox_targets_3d, \ + concat_lvl_centerness_targets, concat_lvl_attr_targets diff --git a/mmdet3d/models/dense_heads/point_rpn_head.py b/mmdet3d/models/dense_heads/point_rpn_head.py new file mode 100755 index 0000000..a575162 --- /dev/null +++ b/mmdet3d/models/dense_heads/point_rpn_head.py @@ -0,0 +1,511 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple + +import torch +from mmdet.models.utils import multi_apply +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models.layers import nms_bev, nms_normal_bev +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures import xywhr2xyxyr +from mmdet3d.structures.bbox_3d import (BaseInstance3DBoxes, + DepthInstance3DBoxes, + LiDARInstance3DBoxes) +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils.typing_utils import InstanceList + + +@MODELS.register_module() +class PointRPNHead(BaseModule): + """RPN module for PointRCNN. + + Args: + num_classes (int): Number of classes. + train_cfg (dict): Train configs. + test_cfg (dict): Test configs. + pred_layer_cfg (dict, optional): Config of classification and + regression prediction layers. Defaults to None. + enlarge_width (float, optional): Enlarge bbox for each side to ignore + close points. Defaults to 0.1. + cls_loss (dict, optional): Config of direction classification loss. + Defaults to None. + bbox_loss (dict, optional): Config of localization loss. + Defaults to None. + bbox_coder (dict, optional): Config dict of box coders. + Defaults to None. + init_cfg (dict, optional): Config of initialization. Defaults to None. + """ + + def __init__(self, + num_classes: int, + train_cfg: dict, + test_cfg: dict, + pred_layer_cfg: Optional[dict] = None, + enlarge_width: float = 0.1, + cls_loss: Optional[dict] = None, + bbox_loss: Optional[dict] = None, + bbox_coder: Optional[dict] = None, + init_cfg: Optional[dict] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.enlarge_width = enlarge_width + + # build loss function + self.bbox_loss = MODELS.build(bbox_loss) + self.cls_loss = MODELS.build(cls_loss) + + # build box coder + self.bbox_coder = TASK_UTILS.build(bbox_coder) + + # build pred conv + self.cls_layers = self._make_fc_layers( + fc_cfg=pred_layer_cfg.cls_linear_channels, + input_channels=pred_layer_cfg.in_channels, + output_channels=self._get_cls_out_channels()) + + self.reg_layers = self._make_fc_layers( + fc_cfg=pred_layer_cfg.reg_linear_channels, + input_channels=pred_layer_cfg.in_channels, + output_channels=self._get_reg_out_channels()) + + def _make_fc_layers(self, fc_cfg: dict, input_channels: int, + output_channels: int) -> nn.Sequential: + """Make fully connect layers. + + Args: + fc_cfg (dict): Config of fully connect. + input_channels (int): Input channels for fc_layers. + output_channels (int): Input channels for fc_layers. + + Returns: + nn.Sequential: Fully connect layers. + """ + fc_layers = [] + c_in = input_channels + for k in range(0, fc_cfg.__len__()): + fc_layers.extend([ + nn.Linear(c_in, fc_cfg[k], bias=False), + nn.BatchNorm1d(fc_cfg[k]), + nn.ReLU(), + ]) + c_in = fc_cfg[k] + fc_layers.append(nn.Linear(c_in, output_channels, bias=True)) + return nn.Sequential(*fc_layers) + + def _get_cls_out_channels(self): + """Return the channel number of classification outputs.""" + # Class numbers (k) + objectness (1) + return self.num_classes + + def _get_reg_out_channels(self): + """Return the channel number of regression outputs.""" + # Bbox classification and regression + # (center residual (3), size regression (3) + # torch.cos(yaw) (1), torch.sin(yaw) (1) + return self.bbox_coder.code_size + + def forward(self, feat_dict: dict) -> Tuple[List[Tensor]]: + """Forward pass. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + tuple[list[torch.Tensor]]: Predicted boxes and classification + scores. + """ + point_features = feat_dict['fp_features'] + point_features = point_features.permute(0, 2, 1).contiguous() + batch_size = point_features.shape[0] + feat_cls = point_features.view(-1, point_features.shape[-1]) + feat_reg = point_features.view(-1, point_features.shape[-1]) + + point_cls_preds = self.cls_layers(feat_cls).reshape( + batch_size, -1, self._get_cls_out_channels()) + point_box_preds = self.reg_layers(feat_reg).reshape( + batch_size, -1, self._get_reg_out_channels()) + return point_box_preds, point_cls_preds + + def loss_by_feat( + self, + bbox_preds: List[Tensor], + cls_preds: List[Tensor], + points: List[Tensor], + batch_gt_instances_3d: InstanceList, + batch_input_metas: Optional[List[dict]] = None, + batch_gt_instances_ignore: Optional[InstanceList] = None) -> Dict: + """Compute loss. + + Args: + bbox_preds (list[torch.Tensor]): Predictions from forward of + PointRCNN RPN_Head. + cls_preds (list[torch.Tensor]): Classification from forward of + PointRCNN RPN_Head. + points (list[torch.Tensor]): Input points. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances_3d. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + batch_input_metas (list[dict]): Contain pcd and img's meta info. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict: Losses of PointRCNN RPN module. + """ + targets = self.get_targets(points, batch_gt_instances_3d) + (bbox_targets, mask_targets, positive_mask, negative_mask, + box_loss_weights, point_targets) = targets + + # bbox loss + bbox_loss = self.bbox_loss(bbox_preds, bbox_targets, + box_loss_weights.unsqueeze(-1)) + # calculate semantic loss + semantic_points = cls_preds.reshape(-1, self.num_classes) + semantic_targets = mask_targets + semantic_targets[negative_mask] = self.num_classes + semantic_points_label = semantic_targets + # for ignore, but now we do not have ignored label + semantic_loss_weight = negative_mask.float() + positive_mask.float() + semantic_loss = self.cls_loss(semantic_points, + semantic_points_label.reshape(-1), + semantic_loss_weight.reshape(-1)) + semantic_loss /= positive_mask.float().sum() + losses = dict(bbox_loss=bbox_loss, semantic_loss=semantic_loss) + + return losses + + def get_targets(self, points: List[Tensor], + batch_gt_instances_3d: InstanceList) -> Tuple[Tensor]: + """Generate targets of PointRCNN RPN head. + + Args: + points (list[torch.Tensor]): Points in one batch. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances_3d. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + + Returns: + tuple[torch.Tensor]: Targets of PointRCNN RPN head. + """ + gt_labels_3d = [ + instances.labels_3d for instances in batch_gt_instances_3d + ] + gt_bboxes_3d = [ + instances.bboxes_3d for instances in batch_gt_instances_3d + ] + + (bbox_targets, mask_targets, positive_mask, negative_mask, + point_targets) = multi_apply(self.get_targets_single, points, + gt_bboxes_3d, gt_labels_3d) + + bbox_targets = torch.stack(bbox_targets) + mask_targets = torch.stack(mask_targets) + positive_mask = torch.stack(positive_mask) + negative_mask = torch.stack(negative_mask) + box_loss_weights = positive_mask / (positive_mask.sum() + 1e-6) + + return (bbox_targets, mask_targets, positive_mask, negative_mask, + box_loss_weights, point_targets) + + def get_targets_single(self, points: Tensor, + gt_bboxes_3d: BaseInstance3DBoxes, + gt_labels_3d: Tensor) -> Tuple[Tensor]: + """Generate targets of PointRCNN RPN head for single batch. + + Args: + points (torch.Tensor): Points of each batch. + gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): Ground truth + boxes of each batch. + gt_labels_3d (torch.Tensor): Labels of each batch. + + Returns: + tuple[torch.Tensor]: Targets of ssd3d head. + """ + gt_bboxes_3d = gt_bboxes_3d.to(points.device) + + valid_gt = gt_labels_3d != -1 + gt_bboxes_3d = gt_bboxes_3d[valid_gt] + gt_labels_3d = gt_labels_3d[valid_gt] + + # transform the bbox coordinate to the point cloud coordinate + gt_bboxes_3d_tensor = gt_bboxes_3d.tensor.clone() + gt_bboxes_3d_tensor[..., 2] += gt_bboxes_3d_tensor[..., 5] / 2 + + points_mask, assignment = self._assign_targets_by_points_inside( + gt_bboxes_3d, points) + gt_bboxes_3d_tensor = gt_bboxes_3d_tensor[assignment] + mask_targets = gt_labels_3d[assignment] + + bbox_targets = self.bbox_coder.encode(gt_bboxes_3d_tensor, + points[..., 0:3], mask_targets) + + positive_mask = (points_mask.max(1)[0] > 0) + # add ignore_mask + extend_gt_bboxes_3d = gt_bboxes_3d.enlarged_box(self.enlarge_width) + points_mask, _ = self._assign_targets_by_points_inside( + extend_gt_bboxes_3d, points) + negative_mask = (points_mask.max(1)[0] == 0) + + point_targets = points[..., 0:3] + return (bbox_targets, mask_targets, positive_mask, negative_mask, + point_targets) + + def predict_by_feat(self, points: Tensor, bbox_preds: List[Tensor], + cls_preds: List[Tensor], batch_input_metas: List[dict], + cfg: Optional[dict]) -> InstanceList: + """Generate bboxes from RPN head predictions. + + Args: + points (torch.Tensor): Input points. + bbox_preds (list[tensor]): Regression predictions from PointRCNN + head. + cls_preds (list[tensor]): Class scores predictions from PointRCNN + head. + batch_input_metas (list[dict]): Batch inputs meta info. + cfg (ConfigDict, optional): Test / postprocessing + configuration. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + - cls_preds (torch.Tensor): Class score of each bbox. + """ + sem_scores = cls_preds.sigmoid() + obj_scores = sem_scores.max(-1)[0] + object_class = sem_scores.argmax(dim=-1) + + batch_size = sem_scores.shape[0] + results = list() + for b in range(batch_size): + bbox3d = self.bbox_coder.decode(bbox_preds[b], points[b, ..., :3], + object_class[b]) + mask = ~bbox3d.sum(dim=1).isinf() + bbox_selected, score_selected, labels, cls_preds_selected = \ + self.class_agnostic_nms(obj_scores[b][mask], + sem_scores[b][mask, :], + bbox3d[mask, :], + points[b, ..., :3][mask, :], + batch_input_metas[b], + cfg.nms_cfg) + bbox_selected = batch_input_metas[b]['box_type_3d']( + bbox_selected, box_dim=bbox_selected.shape[-1]) + result = InstanceData() + result.bboxes_3d = bbox_selected + result.scores_3d = score_selected + result.labels_3d = labels + result.cls_preds = cls_preds_selected + results.append(result) + return results + + def class_agnostic_nms(self, obj_scores: Tensor, sem_scores: Tensor, + bbox: Tensor, points: Tensor, input_meta: Dict, + nms_cfg: Dict) -> Tuple[Tensor]: + """Class agnostic nms. + + Args: + obj_scores (torch.Tensor): Objectness score of bounding boxes. + sem_scores (torch.Tensor): Semantic class score of bounding boxes. + bbox (torch.Tensor): Predicted bounding boxes. + points (torch.Tensor): Input points. + input_meta (dict): Contain pcd and img's meta info. + nms_cfg (dict): NMS config dict. + + Returns: + tuple[torch.Tensor]: Bounding boxes, scores and labels. + """ + if nms_cfg.use_rotate_nms: + nms_func = nms_bev + else: + nms_func = nms_normal_bev + + num_bbox = bbox.shape[0] + bbox = input_meta['box_type_3d']( + bbox.clone(), + box_dim=bbox.shape[-1], + with_yaw=True, + origin=(0.5, 0.5, 0.5)) + + if isinstance(bbox, LiDARInstance3DBoxes): + box_idx = bbox.points_in_boxes(points) + box_indices = box_idx.new_zeros([num_bbox + 1]) + box_idx[box_idx == -1] = num_bbox + box_indices.scatter_add_(0, box_idx.long(), + box_idx.new_ones(box_idx.shape)) + box_indices = box_indices[:-1] + nonempty_box_mask = box_indices >= 0 + elif isinstance(bbox, DepthInstance3DBoxes): + box_indices = bbox.points_in_boxes(points) + nonempty_box_mask = box_indices.T.sum(1) >= 0 + else: + raise NotImplementedError('Unsupported bbox type!') + + bbox = bbox[nonempty_box_mask] + + if nms_cfg.score_thr is not None: + score_thr = nms_cfg.score_thr + keep = (obj_scores >= score_thr) + obj_scores = obj_scores[keep] + sem_scores = sem_scores[keep] + bbox = bbox.tensor[keep] + + if bbox.tensor.shape[0] > 0: + topk = min(nms_cfg.nms_pre, obj_scores.shape[0]) + obj_scores_nms, indices = torch.topk(obj_scores, k=topk) + bbox_for_nms = xywhr2xyxyr(bbox[indices].bev) + sem_scores_nms = sem_scores[indices] + + keep = nms_func(bbox_for_nms, obj_scores_nms, nms_cfg.iou_thr) + keep = keep[:nms_cfg.nms_post] + + bbox_selected = bbox.tensor[indices][keep] + score_selected = obj_scores_nms[keep] + cls_preds = sem_scores_nms[keep] + labels = torch.argmax(cls_preds, -1) + if bbox_selected.shape[0] > nms_cfg.nms_post: + _, inds = score_selected.sort(descending=True) + inds = inds[:score_selected.nms_post] + bbox_selected = bbox_selected[inds, :] + labels = labels[inds] + score_selected = score_selected[inds] + cls_preds = cls_preds[inds, :] + else: + bbox_selected = bbox.tensor + score_selected = obj_scores.new_zeros([0]) + labels = obj_scores.new_zeros([0]) + cls_preds = obj_scores.new_zeros([0, sem_scores.shape[-1]]) + return bbox_selected, score_selected, labels, cls_preds + + def _assign_targets_by_points_inside(self, bboxes_3d: BaseInstance3DBoxes, + points: Tensor) -> Tuple[Tensor]: + """Compute assignment by checking whether point is inside bbox. + + Args: + bboxes_3d (:obj:`BaseInstance3DBoxes`): Instance of bounding boxes. + points (torch.Tensor): Points of a batch. + + Returns: + tuple[torch.Tensor]: Flags indicating whether each point is + inside bbox and the index of box where each point are in. + """ + # TODO: align points_in_boxes function in each box_structures + num_bbox = bboxes_3d.tensor.shape[0] + if isinstance(bboxes_3d, LiDARInstance3DBoxes): + assignment = bboxes_3d.points_in_boxes(points[:, 0:3]).long() + points_mask = assignment.new_zeros( + [assignment.shape[0], num_bbox + 1]) + assignment[assignment == -1] = num_bbox + points_mask.scatter_(1, assignment.unsqueeze(1), 1) + points_mask = points_mask[:, :-1] + assignment[assignment == num_bbox] = num_bbox - 1 + elif isinstance(bboxes_3d, DepthInstance3DBoxes): + points_mask = bboxes_3d.points_in_boxes(points) + assignment = points_mask.argmax(dim=-1) + else: + raise NotImplementedError('Unsupported bbox type!') + + return points_mask, assignment + + def predict(self, feats_dict: Dict, + batch_data_samples: SampleList) -> InstanceList: + """Perform forward propagation of the 3D detection head and predict + detection results on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + batch_input_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + raw_points = feats_dict.pop('raw_points') + bbox_preds, cls_preds = self(feats_dict) + proposal_cfg = self.test_cfg + + proposal_list = self.predict_by_feat( + raw_points, + bbox_preds, + cls_preds, + cfg=proposal_cfg, + batch_input_metas=batch_input_metas) + feats_dict['points_cls_preds'] = cls_preds + return proposal_list + + def loss_and_predict(self, + feats_dict: Dict, + batch_data_samples: SampleList, + proposal_cfg: Optional[dict] = None, + **kwargs) -> Tuple[dict, InstanceList]: + """Perform forward propagation of the head, then calculate loss and + predictions from the features and data samples. + + Args: + feats_dict (dict): Contains features from the first stage. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + proposal_cfg (ConfigDict, optional): Proposal config. + + Returns: + tuple: the return value is a tuple contains: + + - losses: (dict[str, Tensor]): A dictionary of loss components. + - predictions (list[:obj:`InstanceData`]): Detection + results of each sample after the post process. + """ + batch_gt_instances_3d = [] + batch_gt_instances_ignore = [] + batch_input_metas = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + raw_points = feats_dict.pop('raw_points') + bbox_preds, cls_preds = self(feats_dict) + + loss_inputs = (bbox_preds, cls_preds, + raw_points) + (batch_gt_instances_3d, batch_input_metas, + batch_gt_instances_ignore) + losses = self.loss_by_feat(*loss_inputs) + + predictions = self.predict_by_feat( + raw_points, + bbox_preds, + cls_preds, + batch_input_metas=batch_input_metas, + cfg=proposal_cfg) + feats_dict['points_cls_preds'] = cls_preds + if predictions[0].bboxes_3d.tensor.isinf().any(): + print(predictions) + return losses, predictions diff --git a/mmdet3d/models/dense_heads/shape_aware_head.py b/mmdet3d/models/dense_heads/shape_aware_head.py new file mode 100755 index 0000000..4c192c6 --- /dev/null +++ b/mmdet3d/models/dense_heads/shape_aware_head.py @@ -0,0 +1,537 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch +from mmcv.cnn import ConvModule +from mmdet.models.utils import multi_apply +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models.layers import box3d_multiclass_nms +from mmdet3d.registry import MODELS +from mmdet3d.structures import limit_period, xywhr2xyxyr +from mmdet3d.utils import InstanceList, OptInstanceList +from .anchor3d_head import Anchor3DHead + + +@MODELS.register_module() +class BaseShapeHead(BaseModule): + """Base Shape-aware Head in Shape Signature Network. + + Note: + This base shape-aware grouping head uses default settings for small + objects. For large and huge objects, it is recommended to use + heavier heads, like (64, 64, 64) and (128, 128, 64, 64, 64) in + shared conv channels, (2, 1, 1) and (2, 1, 2, 1, 1) in shared + conv strides. For tiny objects, we can use smaller heads, like + (32, 32) channels and (1, 1) strides. + + Args: + num_cls (int): Number of classes. + num_base_anchors (int): Number of anchors per location. + box_code_size (int): The dimension of boxes to be encoded. + in_channels (int): Input channels for convolutional layers. + shared_conv_channels (tuple, optional): Channels for shared + convolutional layers. Default: (64, 64). + shared_conv_strides (tuple): Strides for shared + convolutional layers. Default: (1, 1). + use_direction_classifier (bool): Whether to use direction + classifier. Default: True. + conv_cfg (dict): Config of conv layer. + Default: dict(type='Conv2d') + norm_cfg (dict): Config of norm layer. + Default: dict(type='BN2d'). + bias (bool | str): Type of bias. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_cls: int, + num_base_anchors: int, + box_code_size: int, + in_channels: int, + shared_conv_channels: Tuple = (64, 64), + shared_conv_strides: Tuple = (1, 1), + use_direction_classifier: bool = True, + conv_cfg: Dict = dict(type='Conv2d'), + norm_cfg: Dict = dict(type='BN2d'), + bias: bool = False, + init_cfg: Optional[dict] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.num_cls = num_cls + self.num_base_anchors = num_base_anchors + self.use_direction_classifier = use_direction_classifier + self.box_code_size = box_code_size + + assert len(shared_conv_channels) == len(shared_conv_strides), \ + 'Lengths of channels and strides list should be equal.' + + self.shared_conv_channels = [in_channels] + list(shared_conv_channels) + self.shared_conv_strides = list(shared_conv_strides) + + shared_conv = [] + for i in range(len(self.shared_conv_strides)): + shared_conv.append( + ConvModule( + self.shared_conv_channels[i], + self.shared_conv_channels[i + 1], + kernel_size=3, + stride=self.shared_conv_strides[i], + padding=1, + conv_cfg=conv_cfg, + bias=bias, + norm_cfg=norm_cfg)) + + self.shared_conv = nn.Sequential(*shared_conv) + + out_channels = self.shared_conv_channels[-1] + self.conv_cls = nn.Conv2d(out_channels, num_base_anchors * num_cls, 1) + self.conv_reg = nn.Conv2d(out_channels, + num_base_anchors * box_code_size, 1) + + if use_direction_classifier: + self.conv_dir_cls = nn.Conv2d(out_channels, num_base_anchors * 2, + 1) + if init_cfg is None: + if use_direction_classifier: + self.init_cfg = dict( + type='Kaiming', + layer='Conv2d', + override=[ + dict(type='Normal', name='conv_reg', std=0.01), + dict( + type='Normal', + name='conv_cls', + std=0.01, + bias_prob=0.01), + dict( + type='Normal', + name='conv_dir_cls', + std=0.01, + bias_prob=0.01) + ]) + else: + self.init_cfg = dict( + type='Kaiming', + layer='Conv2d', + override=[ + dict(type='Normal', name='conv_reg', std=0.01), + dict( + type='Normal', + name='conv_cls', + std=0.01, + bias_prob=0.01) + ]) + + def forward(self, x: Tensor) -> Dict: + """Forward function for SmallHead. + + Args: + x (torch.Tensor): Input feature map with the shape of + [B, C, H, W]. + + Returns: + dict[torch.Tensor]: Contain score of each class, bbox + regression and direction classification predictions. + Note that all the returned tensors are reshaped as + [bs*num_base_anchors*H*W, num_cls/box_code_size/dir_bins]. + It is more convenient to concat anchors for different + classes even though they have different feature map sizes. + """ + x = self.shared_conv(x) + cls_score = self.conv_cls(x) + bbox_pred = self.conv_reg(x) + featmap_size = bbox_pred.shape[-2:] + H, W = featmap_size + B = bbox_pred.shape[0] + cls_score = cls_score.view(-1, self.num_base_anchors, self.num_cls, H, + W).permute(0, 1, 3, 4, + 2).reshape(B, -1, self.num_cls) + bbox_pred = bbox_pred.view(-1, self.num_base_anchors, + self.box_code_size, H, W).permute( + 0, 1, 3, 4, + 2).reshape(B, -1, self.box_code_size) + + dir_cls_preds = None + if self.use_direction_classifier: + dir_cls_preds = self.conv_dir_cls(x) + dir_cls_preds = dir_cls_preds.view(-1, self.num_base_anchors, 2, H, + W).permute(0, 1, 3, 4, + 2).reshape(B, -1, 2) + ret = dict( + cls_score=cls_score, + bbox_pred=bbox_pred, + dir_cls_preds=dir_cls_preds, + featmap_size=featmap_size) + return ret + + +@MODELS.register_module() +class ShapeAwareHead(Anchor3DHead): + """Shape-aware grouping head for SSN. + + Args: + tasks (dict): Shape-aware groups of multi-class objects. + assign_per_class (bool): Whether to do assignment for each + class. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + tasks: Dict, + assign_per_class: bool = True, + init_cfg: Optional[dict] = None, + **kwargs) -> Dict: + self.tasks = tasks + self.featmap_sizes = [] + super().__init__( + assign_per_class=assign_per_class, init_cfg=init_cfg, **kwargs) + + def init_weights(self): + if not self._is_init: + for m in self.heads: + if hasattr(m, 'init_weights'): + m.init_weights() + self._is_init = True + else: + warnings.warn(f'init_weights of {self.__class__.__name__} has ' + f'been called more than once.') + + def _init_layers(self): + """Initialize neural network layers of the head.""" + self.heads = nn.ModuleList() + cls_ptr = 0 + for task in self.tasks: + sizes = self.prior_generator.sizes[cls_ptr:cls_ptr + + task['num_class']] + num_size = torch.tensor(sizes).reshape(-1, 3).size(0) + num_rot = len(self.prior_generator.rotations) + num_base_anchors = num_rot * num_size + branch = dict( + type='BaseShapeHead', + num_cls=self.num_classes, + num_base_anchors=num_base_anchors, + box_code_size=self.box_code_size, + in_channels=self.in_channels, + shared_conv_channels=task['shared_conv_channels'], + shared_conv_strides=task['shared_conv_strides']) + self.heads.append(MODELS.build(branch)) + cls_ptr += task['num_class'] + + def forward_single(self, x: Tensor) -> Tuple[Tensor]: + """Forward function on a single-scale feature map. + + Args: + x (torch.Tensor): Input features. + Returns: + tuple[torch.Tensor]: Contain score of each class, bbox + regression and direction classification predictions. + """ + results = [] + + for head in self.heads: + results.append(head(x)) + + cls_score = torch.cat([result['cls_score'] for result in results], + dim=1) + bbox_pred = torch.cat([result['bbox_pred'] for result in results], + dim=1) + dir_cls_preds = None + if self.use_direction_classifier: + dir_cls_preds = torch.cat( + [result['dir_cls_preds'] for result in results], dim=1) + + self.featmap_sizes = [] + for i, task in enumerate(self.tasks): + for _ in range(task['num_class']): + self.featmap_sizes.append(results[i]['featmap_size']) + assert len(self.featmap_sizes) == len(self.prior_generator.ranges), \ + 'Length of feature map sizes must be equal to length of ' + \ + 'different ranges of anchor generator.' + + return cls_score, bbox_pred, dir_cls_preds + + def loss_single(self, cls_score: Tensor, bbox_pred: Tensor, + dir_cls_preds: Tensor, labels: Tensor, + label_weights: Tensor, bbox_targets: Tensor, + bbox_weights: Tensor, dir_targets: Tensor, + dir_weights: Tensor, + num_total_samples: int) -> Tuple[Tensor]: + """Calculate loss of Single-level results. + + Args: + cls_score (torch.Tensor): Class score in single-level. + bbox_pred (torch.Tensor): Bbox prediction in single-level. + dir_cls_preds (torch.Tensor): Predictions of direction class + in single-level. + labels (torch.Tensor): Labels of class. + label_weights (torch.Tensor): Weights of class loss. + bbox_targets (torch.Tensor): Targets of bbox predictions. + bbox_weights (torch.Tensor): Weights of bbox loss. + dir_targets (torch.Tensor): Targets of direction predictions. + dir_weights (torch.Tensor): Weights of direction loss. + num_total_samples (int): The number of valid samples. + + Returns: + tuple[torch.Tensor]: Losses of class, bbox + and direction, respectively. + """ + # classification loss + if num_total_samples is None: + num_total_samples = int(cls_score.shape[0]) + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = cls_score.reshape(-1, self.num_classes) + loss_cls = self.loss_cls( + cls_score, labels, label_weights, avg_factor=num_total_samples) + + # regression loss + bbox_targets = bbox_targets.reshape(-1, self.box_code_size) + bbox_weights = bbox_weights.reshape(-1, self.box_code_size) + code_weight = self.train_cfg.get('code_weight', None) + + if code_weight: + bbox_weights = bbox_weights * bbox_weights.new_tensor(code_weight) + bbox_pred = bbox_pred.reshape(-1, self.box_code_size) + if self.diff_rad_by_sin: + bbox_pred, bbox_targets = self.add_sin_difference( + bbox_pred, bbox_targets) + loss_bbox = self.loss_bbox( + bbox_pred, + bbox_targets, + bbox_weights, + avg_factor=num_total_samples) + + # direction classification loss + loss_dir = None + if self.use_direction_classifier: + dir_cls_preds = dir_cls_preds.reshape(-1, 2) + dir_targets = dir_targets.reshape(-1) + dir_weights = dir_weights.reshape(-1) + loss_dir = self.loss_dir( + dir_cls_preds, + dir_targets, + dir_weights, + avg_factor=num_total_samples) + + return loss_cls, loss_bbox, loss_dir + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + batch_gt_instances_3d: InstanceList, + batch_input_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> Dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (list[torch.Tensor]): Multi-level class scores. + bbox_preds (list[torch.Tensor]): Multi-level bbox predictions. + dir_cls_preds (list[torch.Tensor]): Multi-level direction + class predictions. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + batch_input_metas (list[dict]): Contain pcd and sample's meta info. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, list[torch.Tensor]]: Classification, bbox, and + direction losses of each level. + + - loss_cls (list[torch.Tensor]): Classification losses. + - loss_bbox (list[torch.Tensor]): Box regression losses. + - loss_dir (list[torch.Tensor]): Direction classification + losses. + """ + device = cls_scores[0].device + anchor_list = self.get_anchors( + self.featmap_sizes, batch_input_metas, device=device) + cls_reg_targets = self.anchor_target_3d( + anchor_list, + batch_gt_instances_3d, + batch_input_metas, + batch_gt_instances_ignore=batch_gt_instances_ignore, + num_classes=self.num_classes, + sampling=self.sampling) + + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + dir_targets_list, dir_weights_list, num_total_pos, + num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + + # num_total_samples = None + losses_cls, losses_bbox, losses_dir = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + dir_cls_preds, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + dir_targets_list, + dir_weights_list, + num_total_samples=num_total_samples) + return dict( + loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dir=losses_dir) + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + dir_cls_preds: List[Tensor], + batch_input_metas: List[dict], + cfg: Optional[dict] = None, + rescale: List[Tensor] = False) -> List[tuple]: + """Transform a batch of output features extracted from the head into + bbox results. + + Args: + cls_scores (list[torch.Tensor]): Multi-level class scores. + bbox_preds (list[torch.Tensor]): Multi-level bbox predictions. + dir_cls_preds (list[torch.Tensor]): Multi-level direction + class predictions. + batch_input_metas (list[dict]): Contain pcd and img's meta info. + cfg (:obj:`ConfigDict`, optional): Training or testing config. + Default: None. + rescale (list[torch.Tensor], optional): Whether to rescale bbox. + Default: False. + + Returns: + list[tuple]: Prediction resultes of batches. + """ + assert len(cls_scores) == len(bbox_preds) + assert len(cls_scores) == len(dir_cls_preds) + num_levels = len(cls_scores) + assert num_levels == 1, 'Only support single level inference.' + device = cls_scores[0].device + mlvl_anchors = self.prior_generator.grid_anchors( + self.featmap_sizes, device=device) + # `anchor` is a list of anchors for different classes + mlvl_anchors = [torch.cat(anchor, dim=0) for anchor in mlvl_anchors] + + result_list = [] + for img_id in range(len(batch_input_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + dir_cls_pred_list = [ + dir_cls_preds[i][img_id].detach() for i in range(num_levels) + ] + + input_meta = batch_input_metas[img_id] + proposals = self._predict_by_feat_single(cls_score_list, + bbox_pred_list, + dir_cls_pred_list, + mlvl_anchors, input_meta, + cfg, rescale) + result_list.append(proposals) + return result_list + + def _predict_by_feat_single(self, + cls_scores: Tensor, + bbox_preds: Tensor, + dir_cls_preds: Tensor, + mlvl_anchors: List[Tensor], + input_meta: List[dict], + cfg: Dict = None, + rescale: List[Tensor] = False): + """Transform a single point's features extracted from the head into + bbox results. + + Args: + cls_scores (torch.Tensor): Class score in single batch. + bbox_preds (torch.Tensor): Bbox prediction in single batch. + dir_cls_preds (torch.Tensor): Predictions of direction class + in single batch. + mlvl_anchors (List[torch.Tensor]): Multi-level anchors + in single batch. + input_meta (list[dict]): Contain pcd and img's meta info. + cfg (:obj:`ConfigDict`): Training or testing config. + rescale (list[torch.Tensor]): whether to rescale bbox. + Default: False. + + Returns: + tuple: Contain predictions of single batch. + + - bboxes (:obj:`BaseInstance3DBoxes`): Predicted 3d bboxes. + - scores (torch.Tensor): Class score of each bbox. + - labels (torch.Tensor): Label of each bbox. + """ + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_dir_scores = [] + for cls_score, bbox_pred, dir_cls_pred, anchors in zip( + cls_scores, bbox_preds, dir_cls_preds, mlvl_anchors): + assert cls_score.size()[-2] == bbox_pred.size()[-2] + assert cls_score.size()[-2] == dir_cls_pred.size()[-2] + dir_cls_score = torch.max(dir_cls_pred, dim=-1)[1] + + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + if self.use_sigmoid_cls: + max_scores, _ = scores.max(dim=1) + else: + max_scores, _ = scores[:, :-1].max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + anchors = anchors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + dir_cls_score = dir_cls_score[topk_inds] + + bboxes = self.bbox_coder.decode(anchors, bbox_pred) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_dir_scores.append(dir_cls_score) + + mlvl_bboxes = torch.cat(mlvl_bboxes) + mlvl_bboxes_for_nms = xywhr2xyxyr(input_meta['box_type_3d']( + mlvl_bboxes, box_dim=self.box_code_size).bev) + mlvl_scores = torch.cat(mlvl_scores) + mlvl_dir_scores = torch.cat(mlvl_dir_scores) + + if self.use_sigmoid_cls: + # Add a dummy background class to the front when using sigmoid + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) + + score_thr = cfg.get('score_thr', 0) + results = box3d_multiclass_nms(mlvl_bboxes, mlvl_bboxes_for_nms, + mlvl_scores, score_thr, cfg.max_num, + cfg, mlvl_dir_scores) + bboxes, scores, labels, dir_scores = results + if bboxes.shape[0] > 0: + dir_rot = limit_period(bboxes[..., 6] - self.dir_offset, + self.dir_limit_offset, np.pi) + bboxes[..., 6] = ( + dir_rot + self.dir_offset + + np.pi * dir_scores.to(bboxes.dtype)) + bboxes = input_meta['box_type_3d'](bboxes, box_dim=self.box_code_size) + results = InstanceData() + results.bboxes_3d = bboxes + results.scores_3d = scores + results.labels_3d = labels + return results diff --git a/mmdet3d/models/dense_heads/smoke_mono3d_head.py b/mmdet3d/models/dense_heads/smoke_mono3d_head.py new file mode 100755 index 0000000..23f3ad2 --- /dev/null +++ b/mmdet3d/models/dense_heads/smoke_mono3d_head.py @@ -0,0 +1,554 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple + +import torch +from mmdet.models.utils import (gaussian_radius, gen_gaussian_target, + multi_apply) +from mmdet.models.utils.gaussian_target import (get_local_maximum, + get_topk_from_heatmap, + transpose_and_gather_feat) +from mmengine.structures import InstanceData +from torch import Tensor +from torch.nn import functional as F + +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.utils import (ConfigType, InstanceList, OptConfigType, + OptInstanceList, OptMultiConfig) +from .anchor_free_mono3d_head import AnchorFreeMono3DHead + + +@MODELS.register_module() +class SMOKEMono3DHead(AnchorFreeMono3DHead): + r"""Anchor-free head used in `SMOKE `_ + + .. code-block:: none + + /-----> 3*3 conv -----> 1*1 conv -----> cls + feature + \-----> 3*3 conv -----> 1*1 conv -----> reg + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + dim_channel (list[int]): indices of dimension offset preds in + regression heatmap channels. + ori_channel (list[int]): indices of orientation offset pred in + regression heatmap channels. + bbox_coder (:obj:`ConfigDict` or dict): Bbox coder for encoding + and decoding boxes. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + Default: loss_cls=dict(type='GaussionFocalLoss', loss_weight=1.0). + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + Default: loss_bbox=dict(type='L1Loss', loss_weight=10.0). + loss_dir (:obj:`ConfigDict` or dict, Optional): Config of direction + classification loss. In SMOKE, Default: None. + loss_attr (:obj:`ConfigDict` or dict, Optional): Config of attribute + classification loss. In SMOKE, Default: None. + norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and config norm layer. + Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). + init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ + dict]): Initialization config dict. Defaults to None. + """ # noqa: E501 + + def __init__(self, + num_classes: int, + in_channels: int, + dim_channel: List[int], + ori_channel: List[int], + bbox_coder: ConfigType, + loss_cls: ConfigType = dict( + type='mmdet.GaussionFocalLoss', loss_weight=1.0), + loss_bbox: ConfigType = dict( + type='mmdet.L1Loss', loss_weight=0.1), + loss_dir: OptConfigType = None, + loss_attr: OptConfigType = None, + norm_cfg: OptConfigType = dict( + type='GN', num_groups=32, requires_grad=True), + init_cfg: OptMultiConfig = None, + **kwargs) -> None: + super().__init__( + num_classes, + in_channels, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + loss_dir=loss_dir, + loss_attr=loss_attr, + norm_cfg=norm_cfg, + init_cfg=init_cfg, + **kwargs) + self.dim_channel = dim_channel + self.ori_channel = ori_channel + self.bbox_coder = TASK_UTILS.build(bbox_coder) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: + """Forward features from the upstream network. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * bbox_code_size. + """ + return multi_apply(self.forward_single, x) + + def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: + """Forward features of a single scale level. + + Args: + x (Tensor): Input feature map. + + Returns: + tuple: Scores for each class, bbox of input feature maps. + """ + cls_score, bbox_pred, dir_cls_pred, attr_pred, cls_feat, reg_feat = \ + super().forward_single(x) + cls_score = cls_score.sigmoid() # turn to 0-1 + cls_score = cls_score.clamp(min=1e-4, max=1 - 1e-4) + # (N, C, H, W) + offset_dims = bbox_pred[:, self.dim_channel, ...] + bbox_pred[:, self.dim_channel, ...] = offset_dims.sigmoid() - 0.5 + # (N, C, H, W) + vector_ori = bbox_pred[:, self.ori_channel, ...] + bbox_pred[:, self.ori_channel, ...] = F.normalize(vector_ori) + return cls_score, bbox_pred + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + batch_img_metas: Optional[List[dict]] = None, + rescale: bool = None) -> InstanceList: + """Generate bboxes from bbox head predictions. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level. + bbox_preds (list[Tensor]): Box regression for each scale. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool): If True, return boxes in original image space. + + Returns: + list[:obj:`InstanceData`]: 3D Detection results of each image + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, 7). + """ + assert len(cls_scores) == len(bbox_preds) == 1 + cam2imgs = torch.stack([ + cls_scores[0].new_tensor(img_meta['cam2img']) + for img_meta in batch_img_metas + ]) + trans_mats = torch.stack([ + cls_scores[0].new_tensor(img_meta['trans_mat']) + for img_meta in batch_img_metas + ]) + batch_bboxes, batch_scores, batch_topk_labels = self._decode_heatmap( + cls_scores[0], + bbox_preds[0], + batch_img_metas, + cam2imgs=cam2imgs, + trans_mats=trans_mats, + topk=100, + kernel=3) + + result_list = [] + for img_id in range(len(batch_img_metas)): + + bboxes = batch_bboxes[img_id] + scores = batch_scores[img_id] + labels = batch_topk_labels[img_id] + + keep_idx = scores > 0.25 + bboxes = bboxes[keep_idx] + scores = scores[keep_idx] + labels = labels[keep_idx] + + bboxes = batch_img_metas[img_id]['box_type_3d']( + bboxes, box_dim=self.bbox_code_size, origin=(0.5, 0.5, 0.5)) + attrs = None + + results = InstanceData() + results.bboxes_3d = bboxes + results.labels_3d = labels + results.scores_3d = scores + + if attrs is not None: + results.attr_labels = attrs + + result_list.append(results) + + return result_list + + def _decode_heatmap(self, + cls_score: Tensor, + reg_pred: Tensor, + batch_img_metas: List[dict], + cam2imgs: Tensor, + trans_mats: Tensor, + topk: int = 100, + kernel: int = 3) -> Tuple[Tensor, Tensor, Tensor]: + """Transform outputs into detections raw bbox predictions. + + Args: + class_score (Tensor): Center predict heatmap, + shape (B, num_classes, H, W). + reg_pred (Tensor): Box regression map. + shape (B, channel, H , W). + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + cam2imgs (Tensor): Camera intrinsic matrixs. + shape (B, 4, 4) + trans_mats (Tensor): Transformation matrix from original image + to feature map. + shape: (batch, 3, 3) + topk (int): Get top k center keypoints from heatmap. Default 100. + kernel (int): Max pooling kernel for extract local maximum pixels. + Default 3. + + Returns: + tuple[torch.Tensor]: Decoded output of SMOKEHead, containing + the following Tensors: + + - batch_bboxes (Tensor): Coords of each 3D box. + shape (B, k, 7) + - batch_scores (Tensor): Scores of each 3D box. + shape (B, k) + - batch_topk_labels (Tensor): Categories of each 3D box. + shape (B, k) + """ + img_h, img_w = batch_img_metas[0]['pad_shape'][:2] + bs, _, feat_h, feat_w = cls_score.shape + + center_heatmap_pred = get_local_maximum(cls_score, kernel=kernel) + + *batch_dets, topk_ys, topk_xs = get_topk_from_heatmap( + center_heatmap_pred, k=topk) + batch_scores, batch_index, batch_topk_labels = batch_dets + + regression = transpose_and_gather_feat(reg_pred, batch_index) + regression = regression.view(-1, 8) + + points = torch.cat([topk_xs.view(-1, 1), + topk_ys.view(-1, 1).float()], + dim=1) + locations, dimensions, orientations = self.bbox_coder.decode( + regression, points, batch_topk_labels, cam2imgs, trans_mats) + + batch_bboxes = torch.cat((locations, dimensions, orientations), dim=1) + batch_bboxes = batch_bboxes.view(bs, -1, self.bbox_code_size) + return batch_bboxes, batch_scores, batch_topk_labels + + def get_predictions(self, labels_3d: Tensor, centers_2d: Tensor, + gt_locations: Tensor, gt_dimensions: Tensor, + gt_orientations: Tensor, indices: Tensor, + batch_img_metas: List[dict], pred_reg: Tensor) -> dict: + """Prepare predictions for computing loss. + + Args: + labels_3d (Tensor): Labels of each 3D box. + shape (B, max_objs, ) + centers_2d (Tensor): Coords of each projected 3D box + center on image. shape (B * max_objs, 2) + gt_locations (Tensor): Coords of each 3D box's location. + shape (B * max_objs, 3) + gt_dimensions (Tensor): Dimensions of each 3D box. + shape (N, 3) + gt_orientations (Tensor): Orientation(yaw) of each 3D box. + shape (N, 1) + indices (Tensor): Indices of the existence of the 3D box. + shape (B * max_objs, ) + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + pre_reg (Tensor): Box regression map. + shape (B, channel, H , W). + + Returns: + dict: the dict has components below: + + - bbox3d_yaws (:obj:`CameraInstance3DBoxes`): + bbox calculated using pred orientations. + - bbox3d_dims (:obj:`CameraInstance3DBoxes`): + bbox calculated using pred dimensions. + - bbox3d_locs (:obj:`CameraInstance3DBoxes`): + bbox calculated using pred locations. + """ + batch, channel = pred_reg.shape[0], pred_reg.shape[1] + w = pred_reg.shape[3] + cam2imgs = torch.stack([ + gt_locations.new_tensor(img_meta['cam2img']) + for img_meta in batch_img_metas + ]) + trans_mats = torch.stack([ + gt_locations.new_tensor(img_meta['trans_mat']) + for img_meta in batch_img_metas + ]) + centers_2d_inds = centers_2d[:, 1] * w + centers_2d[:, 0] + centers_2d_inds = centers_2d_inds.view(batch, -1) + pred_regression = transpose_and_gather_feat(pred_reg, centers_2d_inds) + pred_regression_pois = pred_regression.view(-1, channel) + locations, dimensions, orientations = self.bbox_coder.decode( + pred_regression_pois, centers_2d, labels_3d, cam2imgs, trans_mats, + gt_locations) + + locations, dimensions, orientations = locations[indices], dimensions[ + indices], orientations[indices] + + locations[:, 1] += dimensions[:, 1] / 2 + + gt_locations = gt_locations[indices] + + assert len(locations) == len(gt_locations) + assert len(dimensions) == len(gt_dimensions) + assert len(orientations) == len(gt_orientations) + bbox3d_yaws = self.bbox_coder.encode(gt_locations, gt_dimensions, + orientations, batch_img_metas) + bbox3d_dims = self.bbox_coder.encode(gt_locations, dimensions, + gt_orientations, batch_img_metas) + bbox3d_locs = self.bbox_coder.encode(locations, gt_dimensions, + gt_orientations, batch_img_metas) + + pred_bboxes = dict(ori=bbox3d_yaws, dim=bbox3d_dims, loc=bbox3d_locs) + + return pred_bboxes + + def get_targets(self, batch_gt_instances_3d: InstanceList, + batch_gt_instances: InstanceList, feat_shape: Tuple[int], + batch_img_metas: List[dict]) -> Tuple[Tensor, int, dict]: + """Get training targets for batch images. + + Args: + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、 + ``labels_3d``、``depths``、``centers_2d`` and attributes. + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes``、``labels``. + feat_shape (tuple[int]): Feature map shape with value, + shape (B, _, H, W). + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + + Returns: + tuple[Tensor, int, dict]: The Tensor value is the targets of + center heatmap, the dict has components below: + + - gt_centers_2d (Tensor): Coords of each projected 3D box + center on image. shape (B * max_objs, 2) + - gt_labels_3d (Tensor): Labels of each 3D box. + shape (B, max_objs, ) + - indices (Tensor): Indices of the existence of the 3D box. + shape (B * max_objs, ) + - affine_indices (Tensor): Indices of the affine of the 3D box. + shape (N, ) + - gt_locs (Tensor): Coords of each 3D box's location. + shape (N, 3) + - gt_dims (Tensor): Dimensions of each 3D box. + shape (N, 3) + - gt_yaws (Tensor): Orientation(yaw) of each 3D box. + shape (N, 1) + - gt_cors (Tensor): Coords of the corners of each 3D box. + shape (N, 8, 3) + """ + + gt_bboxes = [ + gt_instances.bboxes for gt_instances in batch_gt_instances + ] + gt_labels = [ + gt_instances.labels for gt_instances in batch_gt_instances + ] + gt_bboxes_3d = [ + gt_instances_3d.bboxes_3d + for gt_instances_3d in batch_gt_instances_3d + ] + gt_labels_3d = [ + gt_instances_3d.labels_3d + for gt_instances_3d in batch_gt_instances_3d + ] + centers_2d = [ + gt_instances_3d.centers_2d + for gt_instances_3d in batch_gt_instances_3d + ] + img_shape = batch_img_metas[0]['pad_shape'] + + reg_mask = torch.stack([ + gt_bboxes[0].new_tensor( + not img_meta['affine_aug'], dtype=torch.bool) + for img_meta in batch_img_metas + ]) + + img_h, img_w = img_shape[:2] + bs, _, feat_h, feat_w = feat_shape + + width_ratio = float(feat_w / img_w) # 1/4 + height_ratio = float(feat_h / img_h) # 1/4 + + assert width_ratio == height_ratio + + center_heatmap_target = gt_bboxes[-1].new_zeros( + [bs, self.num_classes, feat_h, feat_w]) + + gt_centers_2d = centers_2d.copy() + + for batch_id in range(bs): + gt_bbox = gt_bboxes[batch_id] + gt_label = gt_labels[batch_id] + # project centers_2d from input image to feat map + gt_center_2d = gt_centers_2d[batch_id] * width_ratio + + for j, center in enumerate(gt_center_2d): + center_x_int, center_y_int = center.int() + scale_box_h = (gt_bbox[j][3] - gt_bbox[j][1]) * height_ratio + scale_box_w = (gt_bbox[j][2] - gt_bbox[j][0]) * width_ratio + radius = gaussian_radius([scale_box_h, scale_box_w], + min_overlap=0.7) + radius = max(0, int(radius)) + ind = gt_label[j] + gen_gaussian_target(center_heatmap_target[batch_id, ind], + [center_x_int, center_y_int], radius) + + avg_factor = max(1, center_heatmap_target.eq(1).sum()) + num_ctrs = [center_2d.shape[0] for center_2d in centers_2d] + max_objs = max(num_ctrs) + + reg_inds = torch.cat( + [reg_mask[i].repeat(num_ctrs[i]) for i in range(bs)]) + + inds = torch.zeros((bs, max_objs), + dtype=torch.bool).to(centers_2d[0].device) + + # put gt 3d bboxes to gpu + gt_bboxes_3d = [ + gt_bbox_3d.to(centers_2d[0].device) for gt_bbox_3d in gt_bboxes_3d + ] + + batch_centers_2d = centers_2d[0].new_zeros((bs, max_objs, 2)) + batch_labels_3d = gt_labels_3d[0].new_zeros((bs, max_objs)) + batch_gt_locations = \ + gt_bboxes_3d[0].tensor.new_zeros((bs, max_objs, 3)) + for i in range(bs): + inds[i, :num_ctrs[i]] = 1 + batch_centers_2d[i, :num_ctrs[i]] = centers_2d[i] + batch_labels_3d[i, :num_ctrs[i]] = gt_labels_3d[i] + batch_gt_locations[i, :num_ctrs[i]] = \ + gt_bboxes_3d[i].tensor[:, :3] + + inds = inds.flatten() + batch_centers_2d = batch_centers_2d.view(-1, 2) * width_ratio + batch_gt_locations = batch_gt_locations.view(-1, 3) + + # filter the empty image, without gt_bboxes_3d + gt_bboxes_3d = [ + gt_bbox_3d for gt_bbox_3d in gt_bboxes_3d + if gt_bbox_3d.tensor.shape[0] > 0 + ] + + gt_dimensions = torch.cat( + [gt_bbox_3d.tensor[:, 3:6] for gt_bbox_3d in gt_bboxes_3d]) + gt_orientations = torch.cat([ + gt_bbox_3d.tensor[:, 6].unsqueeze(-1) + for gt_bbox_3d in gt_bboxes_3d + ]) + gt_corners = torch.cat( + [gt_bbox_3d.corners for gt_bbox_3d in gt_bboxes_3d]) + + target_labels = dict( + gt_centers_2d=batch_centers_2d.long(), + gt_labels_3d=batch_labels_3d, + indices=inds, + reg_indices=reg_inds, + gt_locs=batch_gt_locations, + gt_dims=gt_dimensions, + gt_yaws=gt_orientations, + gt_cors=gt_corners) + + return center_heatmap_target, avg_factor, target_labels + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + batch_gt_instances_3d: InstanceList, + batch_gt_instances: InstanceList, + batch_img_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level. + shape (num_gt, 4). + bbox_preds (list[Tensor]): Box dims is a 4D-tensor, the channel + number is bbox_code_size. + shape (B, 7, H, W). + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、 + ``labels_3d``、``depths``、``centers_2d`` and attributes. + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes``、``labels``. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, Tensor]: A dictionary of loss components, which has + components below: + + - loss_cls (Tensor): loss of cls heatmap. + - loss_bbox (Tensor): loss of bbox heatmap. + """ + assert len(cls_scores) == len(bbox_preds) == 1 + center_2d_heatmap = cls_scores[0] + pred_reg = bbox_preds[0] + + center_2d_heatmap_target, avg_factor, target_labels = \ + self.get_targets(batch_gt_instances_3d, + batch_gt_instances, + center_2d_heatmap.shape, + batch_img_metas) + + pred_bboxes = self.get_predictions( + labels_3d=target_labels['gt_labels_3d'], + centers_2d=target_labels['gt_centers_2d'], + gt_locations=target_labels['gt_locs'], + gt_dimensions=target_labels['gt_dims'], + gt_orientations=target_labels['gt_yaws'], + indices=target_labels['indices'], + batch_img_metas=batch_img_metas, + pred_reg=pred_reg) + + loss_cls = self.loss_cls( + center_2d_heatmap, center_2d_heatmap_target, avg_factor=avg_factor) + + reg_inds = target_labels['reg_indices'] + + loss_bbox_oris = self.loss_bbox( + pred_bboxes['ori'].corners[reg_inds, ...], + target_labels['gt_cors'][reg_inds, ...]) + + loss_bbox_dims = self.loss_bbox( + pred_bboxes['dim'].corners[reg_inds, ...], + target_labels['gt_cors'][reg_inds, ...]) + + loss_bbox_locs = self.loss_bbox( + pred_bboxes['loc'].corners[reg_inds, ...], + target_labels['gt_cors'][reg_inds, ...]) + + loss_bbox = loss_bbox_dims + loss_bbox_locs + loss_bbox_oris + + loss_dict = dict(loss_cls=loss_cls, loss_bbox=loss_bbox) + + return loss_dict diff --git a/mmdet3d/models/dense_heads/ssd_3d_head.py b/mmdet3d/models/dense_heads/ssd_3d_head.py new file mode 100755 index 0000000..b8f42f8 --- /dev/null +++ b/mmdet3d/models/dense_heads/ssd_3d_head.py @@ -0,0 +1,583 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +from mmcv.ops.nms import batched_nms +from mmdet.models.utils import multi_apply +from mmengine import ConfigDict +from mmengine.structures import InstanceData +from torch import Tensor +from torch.nn import functional as F + +from mmdet3d.registry import MODELS +from mmdet3d.structures import BaseInstance3DBoxes +from mmdet3d.structures.bbox_3d import (DepthInstance3DBoxes, + LiDARInstance3DBoxes, + rotation_3d_in_axis) +from .vote_head import VoteHead + + +@MODELS.register_module() +class SSD3DHead(VoteHead): + r"""Bbox head of `3DSSD `_. + + Args: + num_classes (int): The number of class. + bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for encoding and + decoding boxes. + train_cfg (dict): Config for training. + test_cfg (dict): Config for testing. + vote_module_cfg (dict): Config of VoteModule for point-wise votes. + vote_aggregation_cfg (dict): Config of vote aggregation layer. + pred_layer_cfg (dict): Config of classfication and regression + prediction layers. + conv_cfg (dict): Config of convolution in prediction layer. + norm_cfg (dict): Config of BN in prediction layer. + act_cfg (dict): Config of activation in prediction layer. + objectness_loss (dict): Config of objectness loss. + center_loss (dict): Config of center loss. + dir_class_loss (dict): Config of direction classification loss. + dir_res_loss (dict): Config of direction residual regression loss. + size_res_loss (dict): Config of size residual regression loss. + corner_loss (dict): Config of bbox corners regression loss. + vote_loss (dict): Config of candidate points regression loss. + """ + + def __init__(self, + num_classes: int, + bbox_coder: Union[ConfigDict, dict], + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + vote_module_cfg: Optional[dict] = None, + vote_aggregation_cfg: Optional[dict] = None, + pred_layer_cfg: Optional[dict] = None, + objectness_loss: Optional[dict] = None, + center_loss: Optional[dict] = None, + dir_class_loss: Optional[dict] = None, + dir_res_loss: Optional[dict] = None, + size_res_loss: Optional[dict] = None, + corner_loss: Optional[dict] = None, + vote_loss: Optional[dict] = None, + init_cfg: Optional[dict] = None) -> None: + super(SSD3DHead, self).__init__( + num_classes, + bbox_coder, + train_cfg=train_cfg, + test_cfg=test_cfg, + vote_module_cfg=vote_module_cfg, + vote_aggregation_cfg=vote_aggregation_cfg, + pred_layer_cfg=pred_layer_cfg, + objectness_loss=objectness_loss, + center_loss=center_loss, + dir_class_loss=dir_class_loss, + dir_res_loss=dir_res_loss, + size_class_loss=None, + size_res_loss=size_res_loss, + semantic_loss=None, + init_cfg=init_cfg) + self.corner_loss = MODELS.build(corner_loss) + self.vote_loss = MODELS.build(vote_loss) + self.num_candidates = vote_module_cfg['num_points'] + + def _get_cls_out_channels(self) -> int: + """Return the channel number of classification outputs.""" + # Class numbers (k) + objectness (1) + return self.num_classes + + def _get_reg_out_channels(self) -> int: + """Return the channel number of regression outputs.""" + # Bbox classification and regression + # (center residual (3), size regression (3) + # heading class+residual (num_dir_bins*2)), + return 3 + 3 + self.num_dir_bins * 2 + + def _extract_input(self, feat_dict: dict) -> Tuple: + """Extract inputs from features dictionary. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + torch.Tensor: Coordinates of input points. + torch.Tensor: Features of input points. + torch.Tensor: Indices of input points. + """ + seed_points = feat_dict['sa_xyz'][-1] + seed_features = feat_dict['sa_features'][-1] + seed_indices = feat_dict['sa_indices'][-1] + + return seed_points, seed_features, seed_indices + + def loss_by_feat( + self, + points: List[torch.Tensor], + bbox_preds_dict: dict, + batch_gt_instances_3d: List[InstanceData], + batch_pts_semantic_mask: Optional[List[torch.Tensor]] = None, + batch_pts_instance_mask: Optional[List[torch.Tensor]] = None, + batch_input_metas: List[dict] = None, + ret_target: bool = False, + **kwargs) -> dict: + """Compute loss. + + Args: + points (list[torch.Tensor]): Input points. + bbox_preds_dict (dict): Predictions from forward of vote head. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + batch_pts_semantic_mask (list[tensor]): Semantic mask + of points cloud. Defaults to None. Defaults to None. + batch_pts_semantic_mask (list[tensor]): Instance mask + of points cloud. Defaults to None. Defaults to None. + batch_input_metas (list[dict]): Contain pcd and img's meta info. + ret_target (bool): Return targets or not. Defaults to False. + + Returns: + dict: Losses of 3DSSD. + """ + + targets = self.get_targets(points, bbox_preds_dict, + batch_gt_instances_3d, + batch_pts_semantic_mask, + batch_pts_instance_mask) + (vote_targets, center_targets, size_res_targets, dir_class_targets, + dir_res_targets, mask_targets, centerness_targets, corner3d_targets, + vote_mask, positive_mask, negative_mask, centerness_weights, + box_loss_weights, heading_res_loss_weight) = targets + + # calculate centerness loss + centerness_loss = self.loss_objectness( + bbox_preds_dict['obj_scores'].transpose(2, 1), + centerness_targets, + weight=centerness_weights) + + # calculate center loss + center_loss = self.loss_center( + bbox_preds_dict['center_offset'], + center_targets, + weight=box_loss_weights.unsqueeze(-1)) + + # calculate direction class loss + dir_class_loss = self.loss_dir_class( + bbox_preds_dict['dir_class'].transpose(1, 2), + dir_class_targets, + weight=box_loss_weights) + + # calculate direction residual loss + dir_res_loss = self.loss_dir_res( + bbox_preds_dict['dir_res_norm'], + dir_res_targets.unsqueeze(-1).repeat(1, 1, self.num_dir_bins), + weight=heading_res_loss_weight) + + # calculate size residual loss + size_loss = self.loss_size_res( + bbox_preds_dict['size'], + size_res_targets, + weight=box_loss_weights.unsqueeze(-1)) + + # calculate corner loss + one_hot_dir_class_targets = dir_class_targets.new_zeros( + bbox_preds_dict['dir_class'].shape) + one_hot_dir_class_targets.scatter_(2, dir_class_targets.unsqueeze(-1), + 1) + pred_bbox3d = self.bbox_coder.decode( + dict( + center=bbox_preds_dict['center'], + dir_res=bbox_preds_dict['dir_res'], + dir_class=one_hot_dir_class_targets, + size=bbox_preds_dict['size'])) + pred_bbox3d = pred_bbox3d.reshape(-1, pred_bbox3d.shape[-1]) + pred_bbox3d = batch_input_metas[0]['box_type_3d']( + pred_bbox3d.clone(), + box_dim=pred_bbox3d.shape[-1], + with_yaw=self.bbox_coder.with_rot, + origin=(0.5, 0.5, 0.5)) + pred_corners3d = pred_bbox3d.corners.reshape(-1, 8, 3) + corner_loss = self.corner_loss( + pred_corners3d, + corner3d_targets.reshape(-1, 8, 3), + weight=box_loss_weights.view(-1, 1, 1)) + + # calculate vote loss + vote_loss = self.vote_loss( + bbox_preds_dict['vote_offset'].transpose(1, 2), + vote_targets, + weight=vote_mask.unsqueeze(-1)) + + losses = dict( + centerness_loss=centerness_loss, + center_loss=center_loss, + dir_class_loss=dir_class_loss, + dir_res_loss=dir_res_loss, + size_res_loss=size_loss, + corner_loss=corner_loss, + vote_loss=vote_loss) + + return losses + + def get_targets( + self, + points: List[Tensor], + bbox_preds_dict: dict = None, + batch_gt_instances_3d: List[InstanceData] = None, + batch_pts_semantic_mask: List[torch.Tensor] = None, + batch_pts_instance_mask: List[torch.Tensor] = None, + ) -> Tuple[Tensor]: + """Generate targets of 3DSSD head. + + Args: + points (list[torch.Tensor]): Points of each batch. + bbox_preds_dict (dict): Bounding box predictions of + vote head. Defaults to None. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes`` and ``labels`` + attributes. Defaults to None. + batch_pts_semantic_mask (list[tensor]): Semantic gt mask for + point clouds. Defaults to None. + batch_pts_instance_mask (list[tensor]): Instance gt mask for + point clouds. Defaults to None. + + Returns: + tuple[torch.Tensor]: Targets of 3DSSD head. + """ + batch_gt_labels_3d = [ + gt_instances_3d.labels_3d + for gt_instances_3d in batch_gt_instances_3d + ] + batch_gt_bboxes_3d = [ + gt_instances_3d.bboxes_3d + for gt_instances_3d in batch_gt_instances_3d + ] + + # find empty example + for index in range(len(batch_gt_labels_3d)): + if len(batch_gt_labels_3d[index]) == 0: + fake_box = batch_gt_bboxes_3d[index].tensor.new_zeros( + 1, batch_gt_bboxes_3d[index].tensor.shape[-1]) + batch_gt_bboxes_3d[index] = batch_gt_bboxes_3d[index].new_box( + fake_box) + batch_gt_labels_3d[index] = batch_gt_labels_3d[ + index].new_zeros(1) + + if batch_pts_semantic_mask is None: + batch_pts_semantic_mask = [ + None for _ in range(len(batch_gt_labels_3d)) + ] + batch_pts_instance_mask = [ + None for _ in range(len(batch_gt_labels_3d)) + ] + + aggregated_points = [ + bbox_preds_dict['aggregated_points'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + seed_points = [ + bbox_preds_dict['seed_points'][i, :self.num_candidates].detach() + for i in range(len(batch_gt_labels_3d)) + ] + + (vote_targets, center_targets, size_res_targets, dir_class_targets, + dir_res_targets, mask_targets, centerness_targets, corner3d_targets, + vote_mask, positive_mask, negative_mask) = multi_apply( + self.get_targets_single, points, batch_gt_bboxes_3d, + batch_gt_labels_3d, batch_pts_semantic_mask, + batch_pts_instance_mask, aggregated_points, seed_points) + + center_targets = torch.stack(center_targets) + positive_mask = torch.stack(positive_mask) + negative_mask = torch.stack(negative_mask) + dir_class_targets = torch.stack(dir_class_targets) + dir_res_targets = torch.stack(dir_res_targets) + size_res_targets = torch.stack(size_res_targets) + mask_targets = torch.stack(mask_targets) + centerness_targets = torch.stack(centerness_targets).detach() + corner3d_targets = torch.stack(corner3d_targets) + vote_targets = torch.stack(vote_targets) + vote_mask = torch.stack(vote_mask) + + center_targets -= bbox_preds_dict['aggregated_points'] + + centerness_weights = (positive_mask + + negative_mask).unsqueeze(-1).repeat( + 1, 1, self.num_classes).float() + centerness_weights = centerness_weights / \ + (centerness_weights.sum() + 1e-6) + vote_mask = vote_mask / (vote_mask.sum() + 1e-6) + + box_loss_weights = positive_mask / (positive_mask.sum() + 1e-6) + + batch_size, proposal_num = dir_class_targets.shape[:2] + heading_label_one_hot = dir_class_targets.new_zeros( + (batch_size, proposal_num, self.num_dir_bins)) + heading_label_one_hot.scatter_(2, dir_class_targets.unsqueeze(-1), 1) + heading_res_loss_weight = heading_label_one_hot * \ + box_loss_weights.unsqueeze(-1) + + return (vote_targets, center_targets, size_res_targets, + dir_class_targets, dir_res_targets, mask_targets, + centerness_targets, corner3d_targets, vote_mask, positive_mask, + negative_mask, centerness_weights, box_loss_weights, + heading_res_loss_weight) + + def get_targets_single(self, + points: Tensor, + gt_bboxes_3d: BaseInstance3DBoxes, + gt_labels_3d: Tensor, + pts_semantic_mask: Optional[Tensor] = None, + pts_instance_mask: Optional[Tensor] = None, + aggregated_points: Optional[Tensor] = None, + seed_points: Optional[Tensor] = None, + **kwargs): + """Generate targets of ssd3d head for single batch. + + Args: + points (torch.Tensor): Points of each batch. + gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): Ground truth + boxes of each batch. + gt_labels_3d (torch.Tensor): Labels of each batch. + pts_semantic_mask (torch.Tensor): Point-wise semantic + label of each batch. + pts_instance_mask (torch.Tensor): Point-wise instance + label of each batch. + aggregated_points (torch.Tensor): Aggregated points from + candidate points layer. + seed_points (torch.Tensor): Seed points of candidate points. + + Returns: + tuple[torch.Tensor]: Targets of ssd3d head. + """ + assert self.bbox_coder.with_rot or pts_semantic_mask is not None + gt_bboxes_3d = gt_bboxes_3d.to(points.device) + valid_gt = gt_labels_3d != -1 + gt_bboxes_3d = gt_bboxes_3d[valid_gt] + gt_labels_3d = gt_labels_3d[valid_gt] + + # Generate fake GT for empty scene + if valid_gt.sum() == 0: + vote_targets = points.new_zeros(self.num_candidates, 3) + center_targets = points.new_zeros(self.num_candidates, 3) + size_res_targets = points.new_zeros(self.num_candidates, 3) + dir_class_targets = points.new_zeros( + self.num_candidates, dtype=torch.int64) + dir_res_targets = points.new_zeros(self.num_candidates) + mask_targets = points.new_zeros( + self.num_candidates, dtype=torch.int64) + centerness_targets = points.new_zeros(self.num_candidates, + self.num_classes) + corner3d_targets = points.new_zeros(self.num_candidates, 8, 3) + vote_mask = points.new_zeros(self.num_candidates, dtype=torch.bool) + positive_mask = points.new_zeros( + self.num_candidates, dtype=torch.bool) + negative_mask = points.new_ones( + self.num_candidates, dtype=torch.bool) + return (vote_targets, center_targets, size_res_targets, + dir_class_targets, dir_res_targets, mask_targets, + centerness_targets, corner3d_targets, vote_mask, + positive_mask, negative_mask) + + gt_corner3d = gt_bboxes_3d.corners + + (center_targets, size_targets, dir_class_targets, + dir_res_targets) = self.bbox_coder.encode(gt_bboxes_3d, gt_labels_3d) + + points_mask, assignment = self._assign_targets_by_points_inside( + gt_bboxes_3d, aggregated_points) + + center_targets = center_targets[assignment] + size_res_targets = size_targets[assignment] + mask_targets = gt_labels_3d[assignment] + dir_class_targets = dir_class_targets[assignment] + dir_res_targets = dir_res_targets[assignment] + corner3d_targets = gt_corner3d[assignment] + + top_center_targets = center_targets.clone() + top_center_targets[:, 2] += size_res_targets[:, 2] + dist = torch.norm(aggregated_points - top_center_targets, dim=1) + dist_mask = dist < self.train_cfg.pos_distance_thr + positive_mask = (points_mask.max(1)[0] > 0) * dist_mask + negative_mask = (points_mask.max(1)[0] == 0) + + # Centerness loss targets + canonical_xyz = aggregated_points - center_targets + if self.bbox_coder.with_rot: + # TODO: Align points rotation implementation of + # LiDARInstance3DBoxes and DepthInstance3DBoxes + canonical_xyz = rotation_3d_in_axis( + canonical_xyz.unsqueeze(0).transpose(0, 1), + -gt_bboxes_3d.yaw[assignment], + axis=2).squeeze(1) + distance_front = torch.clamp( + size_res_targets[:, 0] - canonical_xyz[:, 0], min=0) + distance_back = torch.clamp( + size_res_targets[:, 0] + canonical_xyz[:, 0], min=0) + distance_left = torch.clamp( + size_res_targets[:, 1] - canonical_xyz[:, 1], min=0) + distance_right = torch.clamp( + size_res_targets[:, 1] + canonical_xyz[:, 1], min=0) + distance_top = torch.clamp( + size_res_targets[:, 2] - canonical_xyz[:, 2], min=0) + distance_bottom = torch.clamp( + size_res_targets[:, 2] + canonical_xyz[:, 2], min=0) + + centerness_l = torch.min(distance_front, distance_back) / torch.max( + distance_front, distance_back) + centerness_w = torch.min(distance_left, distance_right) / torch.max( + distance_left, distance_right) + centerness_h = torch.min(distance_bottom, distance_top) / torch.max( + distance_bottom, distance_top) + centerness_targets = torch.clamp( + centerness_l * centerness_w * centerness_h, min=0) + centerness_targets = centerness_targets.pow(1 / 3.0) + centerness_targets = torch.clamp(centerness_targets, min=0, max=1) + + proposal_num = centerness_targets.shape[0] + one_hot_centerness_targets = centerness_targets.new_zeros( + (proposal_num, self.num_classes)) + one_hot_centerness_targets.scatter_(1, mask_targets.unsqueeze(-1), 1) + centerness_targets = centerness_targets.unsqueeze( + 1) * one_hot_centerness_targets + + # Vote loss targets + enlarged_gt_bboxes_3d = gt_bboxes_3d.enlarged_box( + self.train_cfg.expand_dims_length) + enlarged_gt_bboxes_3d.tensor[:, 2] -= self.train_cfg.expand_dims_length + vote_mask, vote_assignment = self._assign_targets_by_points_inside( + enlarged_gt_bboxes_3d, seed_points) + + vote_targets = gt_bboxes_3d.gravity_center + vote_targets = vote_targets[vote_assignment] - seed_points + vote_mask = vote_mask.max(1)[0] > 0 + + return (vote_targets, center_targets, size_res_targets, + dir_class_targets, dir_res_targets, mask_targets, + centerness_targets, corner3d_targets, vote_mask, positive_mask, + negative_mask) + + def predict_by_feat(self, points: List[torch.Tensor], + bbox_preds_dict: dict, batch_input_metas: List[dict], + **kwargs) -> List[InstanceData]: + """Generate bboxes from vote head predictions. + + Args: + points (List[torch.Tensor]): Input points of multiple samples. + bbox_preds_dict (dict): Predictions from vote head. + batch_input_metas (list[dict]): Each item + contains the meta information of each sample. + + Returns: + list[:obj:`InstanceData`]: List of processed predictions. Each + InstanceData cantains 3d Bounding boxes and corresponding + scores and labels. + """ + # decode boxes + sem_scores = F.sigmoid(bbox_preds_dict['obj_scores']).transpose(1, 2) + obj_scores = sem_scores.max(-1)[0] + bbox3d = self.bbox_coder.decode(bbox_preds_dict) + batch_size = bbox3d.shape[0] + points = torch.stack(points) + results_list = [] + for b in range(batch_size): + temp_results = InstanceData() + bbox_selected, score_selected, labels = self.multiclass_nms_single( + obj_scores[b], sem_scores[b], bbox3d[b], points[b, ..., :3], + batch_input_metas[b]) + + bbox = batch_input_metas[b]['box_type_3d']( + bbox_selected.clone(), + box_dim=bbox_selected.shape[-1], + with_yaw=self.bbox_coder.with_rot) + + temp_results.bboxes_3d = bbox + temp_results.scores_3d = score_selected + temp_results.labels_3d = labels + results_list.append(temp_results) + + return results_list + + def multiclass_nms_single(self, obj_scores: Tensor, sem_scores: Tensor, + bbox: Tensor, points: Tensor, + input_meta: dict) -> Tuple[Tensor]: + """Multi-class nms in single batch. + + Args: + obj_scores (torch.Tensor): Objectness score of bounding boxes. + sem_scores (torch.Tensor): Semantic class score of bounding boxes. + bbox (torch.Tensor): Predicted bounding boxes. + points (torch.Tensor): Input points. + input_meta (dict): Point cloud and image's meta info. + + Returns: + tuple[torch.Tensor]: Bounding boxes, scores and labels. + """ + bbox = input_meta['box_type_3d']( + bbox.clone(), + box_dim=bbox.shape[-1], + with_yaw=self.bbox_coder.with_rot, + origin=(0.5, 0.5, 0.5)) + + if isinstance(bbox, (LiDARInstance3DBoxes, DepthInstance3DBoxes)): + box_indices = bbox.points_in_boxes_all(points) + nonempty_box_mask = box_indices.T.sum(1) >= 0 + else: + raise NotImplementedError('Unsupported bbox type!') + + corner3d = bbox.corners + minmax_box3d = corner3d.new(torch.Size((corner3d.shape[0], 6))) + minmax_box3d[:, :3] = torch.min(corner3d, dim=1)[0] + minmax_box3d[:, 3:] = torch.max(corner3d, dim=1)[0] + + bbox_classes = torch.argmax(sem_scores, -1) + nms_keep = batched_nms( + minmax_box3d[nonempty_box_mask][:, [0, 1, 3, 4]], + obj_scores[nonempty_box_mask], bbox_classes[nonempty_box_mask], + self.test_cfg.nms_cfg)[1] + + if nms_keep.shape[0] > self.test_cfg.max_output_num: + nms_keep = nms_keep[:self.test_cfg.max_output_num] + + # filter empty boxes and boxes with low score + scores_mask = (obj_scores >= self.test_cfg.score_thr) + nonempty_box_inds = torch.nonzero( + nonempty_box_mask, as_tuple=False).flatten() + nonempty_mask = torch.zeros_like(bbox_classes).scatter( + 0, nonempty_box_inds[nms_keep], 1) + selected = (nonempty_mask.bool() & scores_mask.bool()) + + if self.test_cfg.per_class_proposal: + bbox_selected, score_selected, labels = [], [], [] + for k in range(sem_scores.shape[-1]): + bbox_selected.append(bbox[selected].tensor) + score_selected.append(obj_scores[selected]) + labels.append( + torch.zeros_like(bbox_classes[selected]).fill_(k)) + bbox_selected = torch.cat(bbox_selected, 0) + score_selected = torch.cat(score_selected, 0) + labels = torch.cat(labels, 0) + else: + bbox_selected = bbox[selected].tensor + score_selected = obj_scores[selected] + labels = bbox_classes[selected] + + return bbox_selected, score_selected, labels + + def _assign_targets_by_points_inside(self, bboxes_3d: BaseInstance3DBoxes, + points: Tensor) -> Tuple: + """Compute assignment by checking whether point is inside bbox. + + Args: + bboxes_3d (BaseInstance3DBoxes): Instance of bounding boxes. + points (torch.Tensor): Points of a batch. + + Returns: + tuple[torch.Tensor]: Flags indicating whether each point is + inside bbox and the index of box where each point are in. + """ + if isinstance(bboxes_3d, (LiDARInstance3DBoxes, DepthInstance3DBoxes)): + points_mask = bboxes_3d.points_in_boxes_all(points) + assignment = points_mask.argmax(dim=-1) + else: + raise NotImplementedError('Unsupported bbox type!') + + return points_mask, assignment diff --git a/mmdet3d/models/dense_heads/train_mixins.py b/mmdet3d/models/dense_heads/train_mixins.py new file mode 100755 index 0000000..01c7eb7 --- /dev/null +++ b/mmdet3d/models/dense_heads/train_mixins.py @@ -0,0 +1,353 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmdet.models.utils import images_to_levels, multi_apply +from mmengine.structures import InstanceData + +from mmdet3d.structures import limit_period + + +class AnchorTrainMixin(object): + """Mixin class for target assigning of dense heads.""" + + def anchor_target_3d(self, + anchor_list, + batch_gt_instances_3d, + batch_input_metas, + batch_gt_instances_ignore=None, + label_channels=1, + num_classes=1, + sampling=True): + """Compute regression and classification targets for anchors. + + Args: + anchor_list (list[list]): Multi level anchors of each image. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Ground truth + bboxes of each image. + batch_input_metas (list[dict]): Meta info of each image. + batch_gt_instances_ignore (list): Ignore list of gt bboxes. + label_channels (int): The channel of labels. + num_classes (int): The number of classes. + sampling (bool): Whether to sample anchors. + + Returns: + tuple (list, list, list, list, list, list, int, int): + Anchor targets, including labels, label weights, + bbox targets, bbox weights, direction targets, + direction weights, number of positive anchors and + number of negative anchors. + """ + num_inputs = len(batch_input_metas) + assert len(anchor_list) == num_inputs + + if isinstance(anchor_list[0][0], list): + # sizes of anchors are different + # anchor number of a single level + num_level_anchors = [ + sum([anchor.size(0) for anchor in anchors]) + for anchors in anchor_list[0] + ] + for i in range(num_inputs): + anchor_list[i] = anchor_list[i][0] + else: + # anchor number of multi levels + num_level_anchors = [ + anchors.view(-1, self.box_code_size).size(0) + for anchors in anchor_list[0] + ] + # concat all level anchors and flags to a single tensor + for i in range(num_inputs): + anchor_list[i] = torch.cat(anchor_list[i]) + + # compute targets for each image + if batch_gt_instances_ignore is None: + batch_gt_instances_ignore = [None for _ in range(num_inputs)] + + (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, + all_dir_targets, all_dir_weights, pos_inds_list, + neg_inds_list) = multi_apply( + self.anchor_target_3d_single, + anchor_list, + batch_gt_instances_3d, + batch_gt_instances_ignore, + batch_input_metas, + label_channels=label_channels, + num_classes=num_classes, + sampling=sampling) + + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + labels_list = images_to_levels(all_labels, num_level_anchors) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_anchors) + dir_targets_list = images_to_levels(all_dir_targets, num_level_anchors) + dir_weights_list = images_to_levels(all_dir_weights, num_level_anchors) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, dir_targets_list, dir_weights_list, + num_total_pos, num_total_neg) + + def anchor_target_3d_single(self, + anchors, + gt_instance_3d, + gt_instance_ignore, + input_meta, + label_channels=1, + num_classes=1, + sampling=True): + """Compute targets of anchors in single batch. + + Args: + anchors (torch.Tensor): Concatenated multi-level anchor. + gt_instance_3d (:obj:`InstanceData`): Gt bboxes. + gt_instance_ignore (:obj:`InstanceData`): Ignored gt bboxes. + input_meta (dict): Meta info of each image. + label_channels (int): The channel of labels. + num_classes (int): The number of classes. + sampling (bool): Whether to sample anchors. + + Returns: + tuple[torch.Tensor]: Anchor targets. + """ + if isinstance(self.bbox_assigner, + list) and (not isinstance(anchors, list)): + feat_size = anchors.size(0) * anchors.size(1) * anchors.size(2) + rot_angles = anchors.size(-2) + assert len(self.bbox_assigner) == anchors.size(-3) + (total_labels, total_label_weights, total_bbox_targets, + total_bbox_weights, total_dir_targets, total_dir_weights, + total_pos_inds, total_neg_inds) = [], [], [], [], [], [], [], [] + current_anchor_num = 0 + for i, assigner in enumerate(self.bbox_assigner): + current_anchors = anchors[..., i, :, :].reshape( + -1, self.box_code_size) + current_anchor_num += current_anchors.size(0) + if self.assign_per_class: + gt_per_cls = (gt_instance_3d.labels_3d == i) + gt_per_cls_instance = InstanceData() + gt_per_cls_instance.labels_3d = gt_instance_3d.labels_3d[ + gt_per_cls] + gt_per_cls_instance.bboxes_3d = gt_instance_3d.bboxes_3d[ + gt_per_cls, :] + anchor_targets = self.anchor_target_single_assigner( + assigner, current_anchors, gt_per_cls_instance, + gt_instance_ignore, input_meta, num_classes, sampling) + else: + anchor_targets = self.anchor_target_single_assigner( + assigner, current_anchors, gt_instance_3d, + gt_instance_ignore, input_meta, num_classes, sampling) + + (labels, label_weights, bbox_targets, bbox_weights, + dir_targets, dir_weights, pos_inds, neg_inds) = anchor_targets + total_labels.append(labels.reshape(feat_size, 1, rot_angles)) + total_label_weights.append( + label_weights.reshape(feat_size, 1, rot_angles)) + total_bbox_targets.append( + bbox_targets.reshape(feat_size, 1, rot_angles, + anchors.size(-1))) + total_bbox_weights.append( + bbox_weights.reshape(feat_size, 1, rot_angles, + anchors.size(-1))) + total_dir_targets.append( + dir_targets.reshape(feat_size, 1, rot_angles)) + total_dir_weights.append( + dir_weights.reshape(feat_size, 1, rot_angles)) + total_pos_inds.append(pos_inds) + total_neg_inds.append(neg_inds) + + total_labels = torch.cat(total_labels, dim=-2).reshape(-1) + total_label_weights = torch.cat( + total_label_weights, dim=-2).reshape(-1) + total_bbox_targets = torch.cat( + total_bbox_targets, dim=-3).reshape(-1, anchors.size(-1)) + total_bbox_weights = torch.cat( + total_bbox_weights, dim=-3).reshape(-1, anchors.size(-1)) + total_dir_targets = torch.cat( + total_dir_targets, dim=-2).reshape(-1) + total_dir_weights = torch.cat( + total_dir_weights, dim=-2).reshape(-1) + total_pos_inds = torch.cat(total_pos_inds, dim=0).reshape(-1) + total_neg_inds = torch.cat(total_neg_inds, dim=0).reshape(-1) + return (total_labels, total_label_weights, total_bbox_targets, + total_bbox_weights, total_dir_targets, total_dir_weights, + total_pos_inds, total_neg_inds) + elif isinstance(self.bbox_assigner, list) and isinstance( + anchors, list): + # class-aware anchors with different feature map sizes + assert len(self.bbox_assigner) == len(anchors), \ + 'The number of bbox assigners and anchors should be the same.' + (total_labels, total_label_weights, total_bbox_targets, + total_bbox_weights, total_dir_targets, total_dir_weights, + total_pos_inds, total_neg_inds) = [], [], [], [], [], [], [], [] + current_anchor_num = 0 + for i, assigner in enumerate(self.bbox_assigner): + current_anchors = anchors[i] + current_anchor_num += current_anchors.size(0) + if self.assign_per_class: + gt_per_cls = (gt_instance_3d.labels_3d == i) + gt_per_cls_instance = InstanceData() + gt_per_cls_instance.labels_3d = gt_instance_3d.labels_3d[ + gt_per_cls] + gt_per_cls_instance.bboxes_3d = gt_instance_3d.bboxes_3d[ + gt_per_cls, :] + anchor_targets = self.anchor_target_single_assigner( + assigner, current_anchors, gt_per_cls_instance, + gt_instance_ignore, input_meta, num_classes, sampling) + else: + anchor_targets = self.anchor_target_single_assigner( + assigner, current_anchors, gt_instance_3d, + gt_instance_ignore, input_meta, num_classes, sampling) + + (labels, label_weights, bbox_targets, bbox_weights, + dir_targets, dir_weights, pos_inds, neg_inds) = anchor_targets + total_labels.append(labels) + total_label_weights.append(label_weights) + total_bbox_targets.append( + bbox_targets.reshape(-1, anchors[i].size(-1))) + total_bbox_weights.append( + bbox_weights.reshape(-1, anchors[i].size(-1))) + total_dir_targets.append(dir_targets) + total_dir_weights.append(dir_weights) + total_pos_inds.append(pos_inds) + total_neg_inds.append(neg_inds) + + total_labels = torch.cat(total_labels, dim=0) + total_label_weights = torch.cat(total_label_weights, dim=0) + total_bbox_targets = torch.cat(total_bbox_targets, dim=0) + total_bbox_weights = torch.cat(total_bbox_weights, dim=0) + total_dir_targets = torch.cat(total_dir_targets, dim=0) + total_dir_weights = torch.cat(total_dir_weights, dim=0) + total_pos_inds = torch.cat(total_pos_inds, dim=0) + total_neg_inds = torch.cat(total_neg_inds, dim=0) + return (total_labels, total_label_weights, total_bbox_targets, + total_bbox_weights, total_dir_targets, total_dir_weights, + total_pos_inds, total_neg_inds) + else: + return self.anchor_target_single_assigner(self.bbox_assigner, + anchors, gt_instance_3d, + gt_instance_ignore, + input_meta, num_classes, + sampling) + + def anchor_target_single_assigner(self, + bbox_assigner, + anchors, + gt_instance_3d, + gt_instance_ignore, + input_meta, + num_classes=1, + sampling=True): + """Assign anchors and encode positive anchors. + + Args: + bbox_assigner (BaseAssigner): assign positive and negative boxes. + anchors (torch.Tensor): Concatenated multi-level anchor. + gt_instance_3d (:obj:`InstanceData`): Gt bboxes. + gt_instance_ignore (torch.Tensor): Ignored gt bboxes. + input_meta (dict): Meta info of each image. + num_classes (int): The number of classes. + sampling (bool): Whether to sample anchors. + + Returns: + tuple[torch.Tensor]: Anchor targets. + """ + anchors = anchors.reshape(-1, anchors.size(-1)) + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + bbox_weights = torch.zeros_like(anchors) + dir_targets = anchors.new_zeros((anchors.shape[0]), dtype=torch.long) + dir_weights = anchors.new_zeros((anchors.shape[0]), dtype=torch.float) + labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + if len(gt_instance_3d.bboxes_3d) > 0: + if not isinstance(gt_instance_3d.bboxes_3d, torch.Tensor): + gt_instance_3d.bboxes_3d = gt_instance_3d.bboxes_3d.tensor.to( + anchors.device) + pred_instance_3d = InstanceData(priors=anchors) + assign_result = bbox_assigner.assign(pred_instance_3d, + gt_instance_3d, + gt_instance_ignore) + sampling_result = self.bbox_sampler.sample(assign_result, + pred_instance_3d, + gt_instance_3d) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + else: + pos_inds = torch.nonzero( + anchors.new_zeros((anchors.shape[0], ), dtype=torch.bool) > 0, + as_tuple=False).squeeze(-1).unique() + neg_inds = torch.nonzero( + anchors.new_zeros((anchors.shape[0], ), dtype=torch.bool) == 0, + as_tuple=False).squeeze(-1).unique() + + if gt_instance_3d.labels_3d is not None: + labels += num_classes + if len(pos_inds) > 0: + pos_bbox_targets = self.bbox_coder.encode( + sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) + pos_dir_targets = get_direction_target( + sampling_result.pos_bboxes, + pos_bbox_targets, + self.dir_offset, + self.dir_limit_offset, + one_hot=False) + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + dir_targets[pos_inds] = pos_dir_targets + dir_weights[pos_inds] = 1.0 + + if gt_instance_3d.labels_3d is None: + labels[pos_inds] = 1 + else: + labels[pos_inds] = gt_instance_3d.labels_3d[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + return (labels, label_weights, bbox_targets, bbox_weights, dir_targets, + dir_weights, pos_inds, neg_inds) + + +def get_direction_target(anchors, + reg_targets, + dir_offset=0, + dir_limit_offset=0, + num_bins=2, + one_hot=True): + """Encode direction to 0 ~ num_bins-1. + + Args: + anchors (torch.Tensor): Concatenated multi-level anchor. + reg_targets (torch.Tensor): Bbox regression targets. + dir_offset (int): Direction offset. + num_bins (int): Number of bins to divide 2*PI. + one_hot (bool): Whether to encode as one hot. + + Returns: + torch.Tensor: Encoded direction targets. + """ + rot_gt = reg_targets[..., 6] + anchors[..., 6] + offset_rot = limit_period(rot_gt - dir_offset, dir_limit_offset, 2 * np.pi) + dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long() + dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1) + if one_hot: + dir_targets = torch.zeros( + *list(dir_cls_targets.shape), + num_bins, + dtype=anchors.dtype, + device=dir_cls_targets.device) + dir_targets.scatter_(dir_cls_targets.unsqueeze(dim=-1).long(), 1.0) + dir_cls_targets = dir_targets + return dir_cls_targets diff --git a/mmdet3d/models/dense_heads/vote_head.py b/mmdet3d/models/dense_heads/vote_head.py new file mode 100755 index 0000000..4ecceab --- /dev/null +++ b/mmdet3d/models/dense_heads/vote_head.py @@ -0,0 +1,838 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from mmcv.ops import furthest_point_sample +from mmdet.models.utils import multi_apply +from mmengine import ConfigDict +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import Tensor +from torch.nn import functional as F + +from mmdet3d.models.layers import VoteModule, aligned_3d_nms, build_sa_module +from mmdet3d.models.losses import chamfer_distance +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures import Det3DDataSample +from .base_conv_bbox_head import BaseConvBboxHead + + +@MODELS.register_module() +class VoteHead(BaseModule): + r"""Bbox head of `Votenet `_. + + Args: + num_classes (int): The number of class. + bbox_coder (ConfigDict, dict): Bbox coder for encoding and + decoding boxes. Defaults to None. + train_cfg (dict, optional): Config for training. Defaults to None. + test_cfg (dict, optional): Config for testing. Defaults to None. + vote_module_cfg (dict, optional): Config of VoteModule for + point-wise votes. Defaults to None. + vote_aggregation_cfg (dict, optional): Config of vote + aggregation layer. Defaults to None. + pred_layer_cfg (dict, optional): Config of classification + and regression prediction layers. Defaults to None. + objectness_loss (dict, optional): Config of objectness loss. + Defaults to None. + center_loss (dict, optional): Config of center loss. + Defaults to None. + dir_class_loss (dict, optional): Config of direction + classification loss. Defaults to None. + dir_res_loss (dict, optional): Config of direction + residual regression loss. Defaults to None. + size_class_loss (dict, optional): Config of size + classification loss. Defaults to None. + size_res_loss (dict, optional): Config of size + residual regression loss. Defaults to None. + semantic_loss (dict, optional): Config of point-wise + semantic segmentation loss. Defaults to None. + iou_loss (dict, optional): Config of IOU loss for + regression. Defaults to None. + init_cfg (dict, optional): Config of model weight + initialization. Defaults to None. + """ + + def __init__(self, + num_classes: int, + bbox_coder: Union[ConfigDict, dict], + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + vote_module_cfg: Optional[dict] = None, + vote_aggregation_cfg: Optional[dict] = None, + pred_layer_cfg: Optional[dict] = None, + objectness_loss: Optional[dict] = None, + center_loss: Optional[dict] = None, + dir_class_loss: Optional[dict] = None, + dir_res_loss: Optional[dict] = None, + size_class_loss: Optional[dict] = None, + size_res_loss: Optional[dict] = None, + semantic_loss: Optional[dict] = None, + iou_loss: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super(VoteHead, self).__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + self.gt_per_seed = vote_module_cfg['gt_per_seed'] + self.num_proposal = vote_aggregation_cfg['num_point'] + + self.loss_objectness = MODELS.build(objectness_loss) + self.loss_center = MODELS.build(center_loss) + self.loss_dir_res = MODELS.build(dir_res_loss) + self.loss_dir_class = MODELS.build(dir_class_loss) + self.loss_size_res = MODELS.build(size_res_loss) + if size_class_loss is not None: + self.size_class_loss = MODELS.build(size_class_loss) + if semantic_loss is not None: + self.semantic_loss = MODELS.build(semantic_loss) + if iou_loss is not None: + self.iou_loss = MODELS.build(iou_loss) + else: + self.iou_loss = None + + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.num_sizes = self.bbox_coder.num_sizes + self.num_dir_bins = self.bbox_coder.num_dir_bins + + self.vote_module = VoteModule(**vote_module_cfg) + self.vote_aggregation = build_sa_module(vote_aggregation_cfg) + self.fp16_enabled = False + + # Bbox classification and regression + self.conv_pred = BaseConvBboxHead( + **pred_layer_cfg, + num_cls_out_channels=self._get_cls_out_channels(), + num_reg_out_channels=self._get_reg_out_channels()) + + @property + def sample_mode(self): + if self.training: + sample_mode = self.train_cfg.sample_mode + else: + sample_mode = self.test_cfg.sample_mode + assert sample_mode in ['vote', 'seed', 'random', 'spec'] + return sample_mode + + def _get_cls_out_channels(self): + """Return the channel number of classification outputs.""" + # Class numbers (k) + objectness (2) + return self.num_classes + 2 + + def _get_reg_out_channels(self): + """Return the channel number of regression outputs.""" + # Objectness scores (2), center residual (3), + # heading class+residual (num_dir_bins*2), + # size class+residual(num_sizes*4) + return 3 + self.num_dir_bins * 2 + self.num_sizes * 4 + + def _extract_input(self, feat_dict: dict) -> tuple: + """Extract inputs from features dictionary. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + tuple[Tensor]: Arrage as following three tensor. + + - Coordinates of input points. + - Features of input points. + - Indices of input points. + """ + + # for imvotenet + if 'seed_points' in feat_dict and \ + 'seed_features' in feat_dict and \ + 'seed_indices' in feat_dict: + seed_points = feat_dict['seed_points'] + seed_features = feat_dict['seed_features'] + seed_indices = feat_dict['seed_indices'] + # for votenet + else: + seed_points = feat_dict['fp_xyz'][-1] + seed_features = feat_dict['fp_features'][-1] + seed_indices = feat_dict['fp_indices'][-1] + + return seed_points, seed_features, seed_indices + + def predict(self, + points: List[torch.Tensor], + feats_dict: Dict[str, torch.Tensor], + batch_data_samples: List[Det3DDataSample], + use_nms: bool = True, + **kwargs) -> List[InstanceData]: + """ + Args: + points (list[tensor]): Point clouds of multiple samples. + feats_dict (dict): Features from FPN or backbone.. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes meta information of data. + use_nms (bool): Whether do the nms for predictions. + Defaults to True. + + Returns: + list[:obj:`InstanceData`]: List of processed predictions. Each + InstanceData contains 3d Bounding boxes and corresponding + scores and labels. + """ + preds_dict = self(feats_dict) + # `preds_dict` can be used in H3DNET + feats_dict.update(preds_dict) + + batch_size = len(batch_data_samples) + batch_input_metas = [] + for batch_index in range(batch_size): + metainfo = batch_data_samples[batch_index].metainfo + batch_input_metas.append(metainfo) + + results_list = self.predict_by_feat( + points, preds_dict, batch_input_metas, use_nms=use_nms, **kwargs) + return results_list + + def loss_and_predict(self, + points: List[torch.Tensor], + feats_dict: Dict[str, torch.Tensor], + batch_data_samples: List[Det3DDataSample], + ret_target: bool = False, + proposal_cfg: dict = None, + **kwargs) -> Tuple: + """ + Args: + points (list[tensor]): Points cloud of multiple samples. + feats_dict (dict): Predictions from backbone or FPN. + batch_data_samples (list[:obj:`Det3DDataSample`]): Each item + contains the meta information of each sample and + corresponding annotations. + ret_target (bool): Whether return the assigned target. + Defaults to False. + proposal_cfg (dict): Configure for proposal process. + Defaults to True. + + Returns: + tuple: Contains loss and predictions after post-process. + """ + preds_dict = self.forward(feats_dict) + feats_dict.update(preds_dict) + batch_gt_instance_3d = [] + batch_gt_instances_ignore = [] + batch_input_metas = [] + batch_pts_semantic_mask = [] + batch_pts_instance_mask = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instance_3d.append(data_sample.gt_instances_3d) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + batch_pts_semantic_mask.append( + data_sample.gt_pts_seg.get('pts_semantic_mask', None)) + batch_pts_instance_mask.append( + data_sample.gt_pts_seg.get('pts_instance_mask', None)) + + loss_inputs = (points, preds_dict, batch_gt_instance_3d) + losses = self.loss_by_feat( + *loss_inputs, + batch_pts_semantic_mask=batch_pts_semantic_mask, + batch_pts_instance_mask=batch_pts_instance_mask, + batch_input_metas=batch_input_metas, + batch_gt_instances_ignore=batch_gt_instances_ignore, + ret_target=ret_target, + **kwargs) + + results_list = self.predict_by_feat( + points, + preds_dict, + batch_input_metas, + use_nms=proposal_cfg.use_nms, + **kwargs) + + return losses, results_list + + def loss(self, + points: List[torch.Tensor], + feats_dict: Dict[str, torch.Tensor], + batch_data_samples: List[Det3DDataSample], + ret_target: bool = False, + **kwargs) -> dict: + """ + Args: + points (list[tensor]): Points cloud of multiple samples. + feats_dict (dict): Predictions from backbone or FPN. + batch_data_samples (list[:obj:`Det3DDataSample`]): Each item + contains the meta information of each sample and + corresponding annotations. + ret_target (bool): Whether return the assigned target. + Defaults to False. + + Returns: + dict: A dictionary of loss components. + """ + preds_dict = self.forward(feats_dict) + batch_gt_instance_3d = [] + batch_gt_instances_ignore = [] + batch_input_metas = [] + batch_pts_semantic_mask = [] + batch_pts_instance_mask = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instance_3d.append(data_sample.gt_instances_3d) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + batch_pts_semantic_mask.append( + data_sample.gt_pts_seg.get('pts_semantic_mask', None)) + batch_pts_instance_mask.append( + data_sample.gt_pts_seg.get('pts_instance_mask', None)) + + loss_inputs = (points, preds_dict, batch_gt_instance_3d) + losses = self.loss_by_feat( + *loss_inputs, + batch_pts_semantic_mask=batch_pts_semantic_mask, + batch_pts_instance_mask=batch_pts_instance_mask, + batch_input_metas=batch_input_metas, + batch_gt_instances_ignore=batch_gt_instances_ignore, + ret_target=ret_target, + **kwargs) + return losses + + def forward(self, feat_dict: dict) -> dict: + """Forward pass. + + Note: + The forward of VoteHead is divided into 4 steps: + + 1. Generate vote_points from seed_points. + 2. Aggregate vote_points. + 3. Predict bbox and score. + 4. Decode predictions. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + dict: Predictions of vote head. + """ + + seed_points, seed_features, seed_indices = self._extract_input( + feat_dict) + + # 1. generate vote_points from seed_points + vote_points, vote_features, vote_offset = self.vote_module( + seed_points, seed_features) + results = dict( + seed_points=seed_points, + seed_indices=seed_indices, + vote_points=vote_points, + vote_features=vote_features, + vote_offset=vote_offset) + + # 2. aggregate vote_points + if self.sample_mode == 'vote': + # use fps in vote_aggregation + aggregation_inputs = dict( + points_xyz=vote_points, features=vote_features) + elif self.sample_mode == 'seed': + # FPS on seed and choose the votes corresponding to the seeds + sample_indices = furthest_point_sample(seed_points, + self.num_proposal) + aggregation_inputs = dict( + points_xyz=vote_points, + features=vote_features, + indices=sample_indices) + elif self.sample_mode == 'random': + # Random sampling from the votes + batch_size, num_seed = seed_points.shape[:2] + sample_indices = seed_points.new_tensor( + torch.randint(0, num_seed, (batch_size, self.num_proposal)), + dtype=torch.int32) + aggregation_inputs = dict( + points_xyz=vote_points, + features=vote_features, + indices=sample_indices) + elif self.sample_mode == 'spec': + # Specify the new center in vote_aggregation + aggregation_inputs = dict( + points_xyz=seed_points, + features=seed_features, + target_xyz=vote_points) + else: + raise NotImplementedError( + f'Sample mode {self.sample_mode} is not supported!') + + vote_aggregation_ret = self.vote_aggregation(**aggregation_inputs) + aggregated_points, features, aggregated_indices = vote_aggregation_ret + + results['aggregated_points'] = aggregated_points + results['aggregated_features'] = features + results['aggregated_indices'] = aggregated_indices + + # 3. predict bbox and score + cls_predictions, reg_predictions = self.conv_pred(features) + + # 4. decode predictions + decode_res = self.bbox_coder.split_pred(cls_predictions, + reg_predictions, + aggregated_points) + results.update(decode_res) + return results + + def loss_by_feat( + self, + points: List[torch.Tensor], + bbox_preds_dict: dict, + batch_gt_instances_3d: List[InstanceData], + batch_pts_semantic_mask: Optional[List[torch.Tensor]] = None, + batch_pts_instance_mask: Optional[List[torch.Tensor]] = None, + ret_target: bool = False, + **kwargs) -> dict: + """Compute loss. + + Args: + points (list[torch.Tensor]): Input points. + bbox_preds_dict (dict): Predictions from forward of vote head. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_pts_semantic_mask (list[tensor]): Semantic mask + of points cloud. Defaults to None. + batch_pts_semantic_mask (list[tensor]): Instance mask + of points cloud. Defaults to None. + batch_input_metas (list[dict]): Contain pcd and img's meta info. + ret_target (bool): Return targets or not. Defaults to False. + + Returns: + dict: Losses of Votenet. + """ + + targets = self.get_targets(points, bbox_preds_dict, + batch_gt_instances_3d, + batch_pts_semantic_mask, + batch_pts_instance_mask) + (vote_targets, vote_target_masks, size_class_targets, size_res_targets, + dir_class_targets, dir_res_targets, center_targets, + assigned_center_targets, mask_targets, valid_gt_masks, + objectness_targets, objectness_weights, box_loss_weights, + valid_gt_weights) = targets + + # calculate vote loss + vote_loss = self.vote_module.get_loss(bbox_preds_dict['seed_points'], + bbox_preds_dict['vote_points'], + bbox_preds_dict['seed_indices'], + vote_target_masks, vote_targets) + + # calculate objectness loss + objectness_loss = self.loss_objectness( + bbox_preds_dict['obj_scores'].transpose(2, 1), + objectness_targets, + weight=objectness_weights) + + # calculate center loss + source2target_loss, target2source_loss = self.loss_center( + bbox_preds_dict['center'], + center_targets, + src_weight=box_loss_weights, + dst_weight=valid_gt_weights) + center_loss = source2target_loss + target2source_loss + + # calculate direction class loss + dir_class_loss = self.loss_dir_class( + bbox_preds_dict['dir_class'].transpose(2, 1), + dir_class_targets, + weight=box_loss_weights) + + # calculate direction residual loss + batch_size, proposal_num = size_class_targets.shape[:2] + heading_label_one_hot = vote_targets.new_zeros( + (batch_size, proposal_num, self.num_dir_bins)) + heading_label_one_hot.scatter_(2, dir_class_targets.unsqueeze(-1), 1) + dir_res_norm = torch.sum( + bbox_preds_dict['dir_res_norm'] * heading_label_one_hot, -1) + dir_res_loss = self.loss_dir_res( + dir_res_norm, dir_res_targets, weight=box_loss_weights) + + # calculate size class loss + size_class_loss = self.size_class_loss( + bbox_preds_dict['size_class'].transpose(2, 1), + size_class_targets, + weight=box_loss_weights) + + # calculate size residual loss + one_hot_size_targets = vote_targets.new_zeros( + (batch_size, proposal_num, self.num_sizes)) + one_hot_size_targets.scatter_(2, size_class_targets.unsqueeze(-1), 1) + one_hot_size_targets_expand = one_hot_size_targets.unsqueeze( + -1).repeat(1, 1, 1, 3).contiguous() + size_residual_norm = torch.sum( + bbox_preds_dict['size_res_norm'] * one_hot_size_targets_expand, 2) + box_loss_weights_expand = box_loss_weights.unsqueeze(-1).repeat( + 1, 1, 3) + size_res_loss = self.loss_size_res( + size_residual_norm, + size_res_targets, + weight=box_loss_weights_expand) + + # calculate semantic loss + semantic_loss = self.semantic_loss( + bbox_preds_dict['sem_scores'].transpose(2, 1), + mask_targets, + weight=box_loss_weights) + + losses = dict( + vote_loss=vote_loss, + objectness_loss=objectness_loss, + semantic_loss=semantic_loss, + center_loss=center_loss, + dir_class_loss=dir_class_loss, + dir_res_loss=dir_res_loss, + size_class_loss=size_class_loss, + size_res_loss=size_res_loss) + + if self.iou_loss: + corners_pred = self.bbox_coder.decode_corners( + bbox_preds_dict['center'], size_residual_norm, + one_hot_size_targets_expand) + corners_target = self.bbox_coder.decode_corners( + assigned_center_targets, size_res_targets, + one_hot_size_targets_expand) + iou_loss = self.iou_loss( + corners_pred, corners_target, weight=box_loss_weights) + losses['iou_loss'] = iou_loss + + if ret_target: + losses['targets'] = targets + + return losses + + def get_targets( + self, + points, + bbox_preds: dict = None, + batch_gt_instances_3d: List[InstanceData] = None, + batch_pts_semantic_mask: List[torch.Tensor] = None, + batch_pts_instance_mask: List[torch.Tensor] = None, + ): + """Generate targets of vote head. + + Args: + points (list[torch.Tensor]): Points of each batch. + bbox_preds (torch.Tensor): Bounding box predictions of vote head. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_pts_semantic_mask (list[tensor]): Semantic gt mask for + point clouds. Defaults to None. + batch_pts_instance_mask (list[tensor]): Instance gt mask for + point clouds. Defaults to None. + + Returns: + tuple[torch.Tensor]: Targets of vote head. + """ + # find empty example + valid_gt_masks = list() + gt_num = list() + batch_gt_labels_3d = [ + gt_instances_3d.labels_3d + for gt_instances_3d in batch_gt_instances_3d + ] + batch_gt_bboxes_3d = [ + gt_instances_3d.bboxes_3d + for gt_instances_3d in batch_gt_instances_3d + ] + for index in range(len(batch_gt_labels_3d)): + if len(batch_gt_labels_3d[index]) == 0: + fake_box = batch_gt_bboxes_3d[index].tensor.new_zeros( + 1, batch_gt_bboxes_3d[index].tensor.shape[-1]) + batch_gt_bboxes_3d[index] = batch_gt_bboxes_3d[index].new_box( + fake_box) + batch_gt_labels_3d[index] = batch_gt_labels_3d[ + index].new_zeros(1) + valid_gt_masks.append(batch_gt_labels_3d[index].new_zeros(1)) + gt_num.append(1) + else: + valid_gt_masks.append(batch_gt_labels_3d[index].new_ones( + batch_gt_labels_3d[index].shape)) + gt_num.append(batch_gt_labels_3d[index].shape[0]) + max_gt_num = max(gt_num) + + aggregated_points = [ + bbox_preds['aggregated_points'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + (vote_targets, vote_target_masks, size_class_targets, size_res_targets, + dir_class_targets, dir_res_targets, center_targets, + assigned_center_targets, mask_targets, + objectness_targets, objectness_masks) = multi_apply( + self._get_targets_single, points, batch_gt_bboxes_3d, + batch_gt_labels_3d, batch_pts_semantic_mask, + batch_pts_instance_mask, aggregated_points) + + # pad targets as original code of votenet. + for index in range(len(batch_gt_labels_3d)): + pad_num = max_gt_num - batch_gt_labels_3d[index].shape[0] + center_targets[index] = F.pad(center_targets[index], + (0, 0, 0, pad_num)) + valid_gt_masks[index] = F.pad(valid_gt_masks[index], (0, pad_num)) + + vote_targets = torch.stack(vote_targets) + vote_target_masks = torch.stack(vote_target_masks) + center_targets = torch.stack(center_targets) + valid_gt_masks = torch.stack(valid_gt_masks) + + assigned_center_targets = torch.stack(assigned_center_targets) + objectness_targets = torch.stack(objectness_targets) + objectness_weights = torch.stack(objectness_masks) + objectness_weights /= (torch.sum(objectness_weights) + 1e-6) + box_loss_weights = objectness_targets.float() / ( + torch.sum(objectness_targets).float() + 1e-6) + valid_gt_weights = valid_gt_masks.float() / ( + torch.sum(valid_gt_masks.float()) + 1e-6) + dir_class_targets = torch.stack(dir_class_targets) + dir_res_targets = torch.stack(dir_res_targets) + size_class_targets = torch.stack(size_class_targets) + size_res_targets = torch.stack(size_res_targets) + mask_targets = torch.stack(mask_targets) + + return (vote_targets, vote_target_masks, size_class_targets, + size_res_targets, dir_class_targets, dir_res_targets, + center_targets, assigned_center_targets, mask_targets, + valid_gt_masks, objectness_targets, objectness_weights, + box_loss_weights, valid_gt_weights) + + def _get_targets_single(self, + points, + gt_bboxes_3d, + gt_labels_3d, + pts_semantic_mask=None, + pts_instance_mask=None, + aggregated_points=None): + """Generate targets of vote head for single batch. + + Args: + points (torch.Tensor): Points of each batch. + gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): Ground truth + boxes of each batch. + gt_labels_3d (torch.Tensor): Labels of each batch. + pts_semantic_mask (torch.Tensor): Point-wise semantic + label of each batch. + pts_instance_mask (torch.Tensor): Point-wise instance + label of each batch. + aggregated_points (torch.Tensor): Aggregated points from + vote aggregation layer. + + Returns: + tuple[torch.Tensor]: Targets of vote head. + """ + assert self.bbox_coder.with_rot or pts_semantic_mask is not None + + gt_bboxes_3d = gt_bboxes_3d.to(points.device) + + # generate votes target + num_points = points.shape[0] + if self.bbox_coder.with_rot: + vote_targets = points.new_zeros([num_points, 3 * self.gt_per_seed]) + vote_target_masks = points.new_zeros([num_points], + dtype=torch.long) + vote_target_idx = points.new_zeros([num_points], dtype=torch.long) + box_indices_all = gt_bboxes_3d.points_in_boxes_all(points) + for i in range(gt_labels_3d.shape[0]): + box_indices = box_indices_all[:, i] + indices = torch.nonzero( + box_indices, as_tuple=False).squeeze(-1) + selected_points = points[indices] + vote_target_masks[indices] = 1 + vote_targets_tmp = vote_targets[indices] + votes = gt_bboxes_3d.gravity_center[i].unsqueeze( + 0) - selected_points[:, :3] + + for j in range(self.gt_per_seed): + column_indices = torch.nonzero( + vote_target_idx[indices] == j, + as_tuple=False).squeeze(-1) + vote_targets_tmp[column_indices, + int(j * 3):int(j * 3 + + 3)] = votes[column_indices] + if j == 0: + vote_targets_tmp[column_indices] = votes[ + column_indices].repeat(1, self.gt_per_seed) + + vote_targets[indices] = vote_targets_tmp + vote_target_idx[indices] = torch.clamp( + vote_target_idx[indices] + 1, max=2) + elif pts_semantic_mask is not None: + vote_targets = points.new_zeros([num_points, 3]) + vote_target_masks = points.new_zeros([num_points], + dtype=torch.long) + for i in torch.unique(pts_instance_mask): + indices = torch.nonzero( + pts_instance_mask == i, as_tuple=False).squeeze(-1) + if pts_semantic_mask[indices[0]] < self.num_classes: + selected_points = points[indices, :3] + center = 0.5 * ( + selected_points.min(0)[0] + selected_points.max(0)[0]) + vote_targets[indices, :] = center - selected_points + vote_target_masks[indices] = 1 + vote_targets = vote_targets.repeat((1, self.gt_per_seed)) + else: + raise NotImplementedError + + (center_targets, size_class_targets, size_res_targets, + dir_class_targets, + dir_res_targets) = self.bbox_coder.encode(gt_bboxes_3d, gt_labels_3d) + + proposal_num = aggregated_points.shape[0] + distance1, _, assignment, _ = chamfer_distance( + aggregated_points.unsqueeze(0), + center_targets.unsqueeze(0), + reduction='none') + assignment = assignment.squeeze(0) + euclidean_distance1 = torch.sqrt(distance1.squeeze(0) + 1e-6) + + objectness_targets = points.new_zeros((proposal_num), dtype=torch.long) + objectness_targets[ + euclidean_distance1 < self.train_cfg['pos_distance_thr']] = 1 + + objectness_masks = points.new_zeros((proposal_num)) + objectness_masks[ + euclidean_distance1 < self.train_cfg['pos_distance_thr']] = 1.0 + objectness_masks[ + euclidean_distance1 > self.train_cfg['neg_distance_thr']] = 1.0 + + dir_class_targets = dir_class_targets[assignment] + dir_res_targets = dir_res_targets[assignment] + dir_res_targets /= (np.pi / self.num_dir_bins) + size_class_targets = size_class_targets[assignment] + size_res_targets = size_res_targets[assignment] + + one_hot_size_targets = gt_bboxes_3d.tensor.new_zeros( + (proposal_num, self.num_sizes)) + one_hot_size_targets.scatter_(1, size_class_targets.unsqueeze(-1), 1) + one_hot_size_targets = one_hot_size_targets.unsqueeze(-1).repeat( + 1, 1, 3) + mean_sizes = size_res_targets.new_tensor( + self.bbox_coder.mean_sizes).unsqueeze(0) + pos_mean_sizes = torch.sum(one_hot_size_targets * mean_sizes, 1) + size_res_targets /= pos_mean_sizes + + mask_targets = gt_labels_3d[assignment] + assigned_center_targets = center_targets[assignment] + + return (vote_targets, vote_target_masks, size_class_targets, + size_res_targets, dir_class_targets, + dir_res_targets, center_targets, assigned_center_targets, + mask_targets.long(), objectness_targets, objectness_masks) + + def predict_by_feat(self, + points: List[torch.Tensor], + bbox_preds_dict: dict, + batch_input_metas: List[dict], + use_nms: bool = True, + **kwargs) -> List[InstanceData]: + """Generate bboxes from vote head predictions. + + Args: + points (List[torch.Tensor]): Input points of multiple samples. + bbox_preds_dict (dict): Predictions from vote head. + batch_input_metas (list[dict]): Each item + contains the meta information of each sample. + use_nms (bool): Whether to apply NMS, skip nms postprocessing + while using vote head in rpn stage. + + Returns: + list[:obj:`InstanceData`] or Tensor: Return list of processed + predictions when `use_nms` is True. Each InstanceData cantains + 3d Bounding boxes and corresponding scores and labels. + Return raw bboxes when `use_nms` is False. + """ + # decode boxes + stack_points = torch.stack(points) + obj_scores = F.softmax(bbox_preds_dict['obj_scores'], dim=-1)[..., -1] + sem_scores = F.softmax(bbox_preds_dict['sem_scores'], dim=-1) + bbox3d = self.bbox_coder.decode(bbox_preds_dict) + + batch_size = bbox3d.shape[0] + results_list = list() + if use_nms: + for batch_index in range(batch_size): + temp_results = InstanceData() + bbox_selected, score_selected, labels = \ + self.multiclass_nms_single( + obj_scores[batch_index], + sem_scores[batch_index], + bbox3d[batch_index], + stack_points[batch_index, ..., :3], + batch_input_metas[batch_index]) + bbox = batch_input_metas[batch_index]['box_type_3d']( + bbox_selected, + box_dim=bbox_selected.shape[-1], + with_yaw=self.bbox_coder.with_rot) + temp_results.bboxes_3d = bbox + temp_results.scores_3d = score_selected + temp_results.labels_3d = labels + results_list.append(temp_results) + + return results_list + else: + # TODO unify it when refactor the Augtest + return bbox3d + + def multiclass_nms_single(self, obj_scores: Tensor, sem_scores: Tensor, + bbox: Tensor, points: Tensor, + input_meta: dict) -> Tuple: + """Multi-class nms in single batch. + + Args: + obj_scores (torch.Tensor): Objectness score of bounding boxes. + sem_scores (torch.Tensor): semantic class score of bounding boxes. + bbox (torch.Tensor): Predicted bounding boxes. + points (torch.Tensor): Input points. + input_meta (dict): Point cloud and image's meta info. + + Returns: + tuple[torch.Tensor]: Bounding boxes, scores and labels. + """ + bbox = input_meta['box_type_3d']( + bbox, + box_dim=bbox.shape[-1], + with_yaw=self.bbox_coder.with_rot, + origin=(0.5, 0.5, 0.5)) + box_indices = bbox.points_in_boxes_all(points) + + corner3d = bbox.corners + minmax_box3d = corner3d.new(torch.Size((corner3d.shape[0], 6))) + minmax_box3d[:, :3] = torch.min(corner3d, dim=1)[0] + minmax_box3d[:, 3:] = torch.max(corner3d, dim=1)[0] + + nonempty_box_mask = box_indices.T.sum(1) > 5 + + bbox_classes = torch.argmax(sem_scores, -1) + nms_selected = aligned_3d_nms(minmax_box3d[nonempty_box_mask], + obj_scores[nonempty_box_mask], + bbox_classes[nonempty_box_mask], + self.test_cfg.nms_thr) + + # filter empty boxes and boxes with low score + scores_mask = (obj_scores > self.test_cfg.score_thr) + nonempty_box_inds = torch.nonzero( + nonempty_box_mask, as_tuple=False).flatten() + nonempty_mask = torch.zeros_like(bbox_classes).scatter( + 0, nonempty_box_inds[nms_selected], 1) + selected = (nonempty_mask.bool() & scores_mask.bool()) + + if self.test_cfg.per_class_proposal: + bbox_selected, score_selected, labels = [], [], [] + for k in range(sem_scores.shape[-1]): + bbox_selected.append(bbox[selected].tensor) + score_selected.append(obj_scores[selected] * + sem_scores[selected][:, k]) + labels.append( + torch.zeros_like(bbox_classes[selected]).fill_(k)) + bbox_selected = torch.cat(bbox_selected, 0) + score_selected = torch.cat(score_selected, 0) + labels = torch.cat(labels, 0) + else: + bbox_selected = bbox[selected].tensor + score_selected = obj_scores[selected] + labels = bbox_classes[selected] + + return bbox_selected, score_selected, labels diff --git a/mmdet3d/models/detectors/__init__.py b/mmdet3d/models/detectors/__init__.py new file mode 100755 index 0000000..c95e00c --- /dev/null +++ b/mmdet3d/models/detectors/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import Base3DDetector +from .centerpoint import CenterPoint +from .dfm import DfM +from .dynamic_voxelnet import DynamicVoxelNet +from .fcos_mono3d import FCOSMono3D +from .groupfree3dnet import GroupFree3DNet +from .h3dnet import H3DNet +from .imvotenet import ImVoteNet +from .imvoxelnet import ImVoxelNet +from .mink_single_stage import MinkSingleStage3DDetector +from .multiview_dfm import MultiViewDfM +from .mvx_faster_rcnn import DynamicMVXFasterRCNN, MVXFasterRCNN +from .mvx_two_stage import MVXTwoStageDetector +from .parta2 import PartA2 +from .point_rcnn import PointRCNN +from .pv_rcnn import PointVoxelRCNN +from .sassd import SASSD +from .single_stage_mono3d import SingleStageMono3DDetector +from .smoke_mono3d import SMOKEMono3D +from .ssd3dnet import SSD3DNet +from .votenet import VoteNet +from .voxelnet import VoxelNet + +__all__ = [ + 'Base3DDetector', 'VoxelNet', 'DynamicVoxelNet', 'MVXTwoStageDetector', + 'DynamicMVXFasterRCNN', 'MVXFasterRCNN', 'PartA2', 'VoteNet', 'H3DNet', + 'CenterPoint', 'SSD3DNet', 'ImVoteNet', 'SingleStageMono3DDetector', + 'FCOSMono3D', 'ImVoxelNet', 'GroupFree3DNet', 'PointRCNN', 'SMOKEMono3D', + 'SASSD', 'MinkSingleStage3DDetector', 'MultiViewDfM', 'DfM', + 'PointVoxelRCNN' +] diff --git a/mmdet3d/models/detectors/base.py b/mmdet3d/models/detectors/base.py new file mode 100755 index 0000000..9f7bb5d --- /dev/null +++ b/mmdet3d/models/detectors/base.py @@ -0,0 +1,152 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Union + +from mmdet.models import BaseDetector +from mmengine.structures import InstanceData + +from mmdet3d.registry import MODELS +from mmdet3d.structures.det3d_data_sample import (ForwardResults, + OptSampleList, SampleList) +from mmdet3d.utils.typing_utils import (OptConfigType, OptInstanceList, + OptMultiConfig) + + +@MODELS.register_module() +class Base3DDetector(BaseDetector): + """Base class for 3D detectors. + + Args: + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`BaseDataPreprocessor`. it usually includes, + ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``. + init_cfg (dict or ConfigDict, optional): the config to control the + initialization. Defaults to None. + """ + + def __init__(self, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None) -> None: + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + def forward(self, + inputs: Union[dict, List[dict]], + data_samples: OptSampleList = None, + mode: str = 'tensor', + **kwargs) -> ForwardResults: + """The unified entry for a forward process in both training and test. + + The method should accept three modes: "tensor", "predict" and "loss": + + - "tensor": Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`Det3DDataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + inputs (dict | list[dict]): When it is a list[dict], the + outer list indicate the test time augmentation. Each + dict contains batch inputs + which include 'points' and 'imgs' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor): Image tensor has shape (B, C, H, W). + data_samples (list[:obj:`Det3DDataSample`], + list[list[:obj:`Det3DDataSample`]], optional): The + annotation data of every samples. When it is a list[list], the + outer list indicate the test time augmentation, and the + inter list indicate the batch. Otherwise, the list simply + indicate the batch. Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="predict"``, return a list of :obj:`Det3DDataSample`. + - If ``mode="loss"``, return a dict of tensor. + """ + if mode == 'loss': + return self.loss(inputs, data_samples, **kwargs) + elif mode == 'predict': + if isinstance(data_samples[0], list): + # aug test + assert len(data_samples[0]) == 1, 'Only support ' \ + 'batch_size 1 ' \ + 'in mmdet3d when ' \ + 'do the test' \ + 'time augmentation.' + return self.aug_test(inputs, data_samples, **kwargs) + else: + return self.predict(inputs, data_samples, **kwargs) + elif mode == 'tensor': + return self._forward(inputs, data_samples, **kwargs) + else: + raise RuntimeError(f'Invalid mode "{mode}". ' + 'Only supports loss, predict and tensor mode') + + def add_pred_to_datasample( + self, + data_samples: SampleList, + data_instances_3d: OptInstanceList = None, + data_instances_2d: OptInstanceList = None, + ) -> SampleList: + """Convert results list to `Det3DDataSample`. + + Subclasses could override it to be compatible for some multi-modality + 3D detectors. + + Args: + data_samples (list[:obj:`Det3DDataSample`]): The input data. + data_instances_3d (list[:obj:`InstanceData`], optional): 3D + Detection results of each sample. + data_instances_2d (list[:obj:`InstanceData`], optional): 2D + Detection results of each sample. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input. Each Det3DDataSample usually contains + 'pred_instances_3d'. And the ``pred_instances_3d`` normally + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of 3D bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C) where C >=7. + + When there are image prediction in some models, it should + contains `pred_instances`, And the ``pred_instances`` normally + contains following keys. + + - scores (Tensor): Classification scores of image, has a shape + (num_instance, ) + - labels (Tensor): Predict Labels of 2D bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Contains a tensor with shape + (num_instances, 4). + """ + + assert (data_instances_2d is not None) or \ + (data_instances_3d is not None),\ + 'please pass at least one type of data_samples' + + if data_instances_2d is None: + data_instances_2d = [ + InstanceData() for _ in range(len(data_instances_3d)) + ] + if data_instances_3d is None: + data_instances_3d = [ + InstanceData() for _ in range(len(data_instances_2d)) + ] + + for i, data_sample in enumerate(data_samples): + data_sample.pred_instances_3d = data_instances_3d[i] + data_sample.pred_instances = data_instances_2d[i] + return data_samples diff --git a/mmdet3d/models/detectors/centerpoint.py b/mmdet3d/models/detectors/centerpoint.py new file mode 100755 index 0000000..e628f01 --- /dev/null +++ b/mmdet3d/models/detectors/centerpoint.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +from mmdet3d.registry import MODELS +from .mvx_two_stage import MVXTwoStageDetector + + +@MODELS.register_module() +class CenterPoint(MVXTwoStageDetector): + """Base class of Multi-modality VoxelNet. + + Args: + pts_voxel_encoder (dict, optional): Point voxelization + encoder layer. Defaults to None. + pts_middle_encoder (dict, optional): Middle encoder layer + of points cloud modality. Defaults to None. + pts_fusion_layer (dict, optional): Fusion layer. + Defaults to None. + img_backbone (dict, optional): Backbone of extracting + images feature. Defaults to None. + pts_backbone (dict, optional): Backbone of extracting + points features. Defaults to None. + img_neck (dict, optional): Neck of extracting + image features. Defaults to None. + pts_neck (dict, optional): Neck of extracting + points features. Defaults to None. + pts_bbox_head (dict, optional): Bboxes head of + point cloud modality. Defaults to None. + img_roi_head (dict, optional): RoI head of image + modality. Defaults to None. + img_rpn_head (dict, optional): RPN head of image + modality. Defaults to None. + train_cfg (dict, optional): Train config of model. + Defaults to None. + test_cfg (dict, optional): Train config of model. + Defaults to None. + init_cfg (dict, optional): Initialize config of + model. Defaults to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`Det3DDataPreprocessor`. Defaults to None. + """ + + def __init__(self, + pts_voxel_encoder: Optional[dict] = None, + pts_middle_encoder: Optional[dict] = None, + pts_fusion_layer: Optional[dict] = None, + img_backbone: Optional[dict] = None, + pts_backbone: Optional[dict] = None, + img_neck: Optional[dict] = None, + pts_neck: Optional[dict] = None, + pts_bbox_head: Optional[dict] = None, + img_roi_head: Optional[dict] = None, + img_rpn_head: Optional[dict] = None, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None, + data_preprocessor: Optional[dict] = None, + **kwargs): + + super(CenterPoint, + self).__init__(pts_voxel_encoder, pts_middle_encoder, + pts_fusion_layer, img_backbone, pts_backbone, + img_neck, pts_neck, pts_bbox_head, img_roi_head, + img_rpn_head, train_cfg, test_cfg, init_cfg, + data_preprocessor, **kwargs) diff --git a/mmdet3d/models/detectors/dfm.py b/mmdet3d/models/detectors/dfm.py new file mode 100755 index 0000000..736e43e --- /dev/null +++ b/mmdet3d/models/detectors/dfm.py @@ -0,0 +1,234 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmdet.models.detectors import BaseDetector + +from mmdet3d.registry import MODELS +from mmdet3d.structures.ops import bbox3d2result +from mmdet3d.utils import ConfigType + + +@MODELS.register_module() +class DfM(BaseDetector): + r"""`Monocular 3D Object Detection with Depth from Motion. + `_. + + Args: + backbone (:obj:`ConfigDict` or dict): The backbone config. + neck (:obj:`ConfigDict` or dict): The neck config. + backbone_stereo (:obj:`ConfigDict` or dict): The stereo backbone + config. + backbone_3d (:obj:`ConfigDict` or dict): The 3d backbone config. + neck_3d (:obj:`ConfigDict` or dict): The 3D neck config. + bbox_head_3d (:obj:`ConfigDict` or dict): The 3d bbox head config. + neck_2d (:obj:`ConfigDict` or dict, optional): The 2D neck config + for 2D object detection. Defaults to None. + bbox_head_2d (:obj:`ConfigDict` or dict, optional): The 2D bbox + head config for 2D object detection. Defaults to None. + depth_head_2d (:obj:`ConfigDict` or dict, optional): The 2D depth + head config for depth estimation in fov space. Defaults to None. + depth_head (:obj:`ConfigDict` or dict, optional): The depth head + config for depth estimation in 3D voxel projected to fov space . + train_cfg (:obj:`ConfigDict` or dict, optional): Config dict of + training hyper-parameters. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Config dict of test + hyper-parameters. Defaults to None. + pretrained (:obj: `ConfigDict` or dict optional): The pretrained + config. + init_cfg (:obj:`ConfigDict` or dict, optional): The initialization + config. Defaults to None. + """ + + def __init__(self, + backbone: ConfigType, + neck: ConfigType, + backbone_stereo: ConfigType, + backbone_3d: ConfigType, + neck_3d: ConfigType, + bbox_head_3d: ConfigType, + neck_2d=None, + bbox_head_2d=None, + depth_head_2d=None, + depth_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.backbone = MODELS.build(backbone) + self.neck = MODELS.build(neck) + if backbone_stereo is not None: + backbone_stereo.update(cat_img_feature=self.neck.cat_img_feature) + backbone_stereo.update(in_sem_channels=self.neck.sem_channels[-1]) + self.backbone_stereo = MODELS.build(backbone_stereo) + assert self.neck.cat_img_feature == \ + self.backbone_stereo.cat_img_feature + assert self.neck.sem_channels[ + -1] == self.backbone_stereo.in_sem_channels + if backbone_3d is not None: + self.backbone_3d = MODELS.build(backbone_3d) + if neck_3d is not None: + self.neck_3d = MODELS.build(neck_3d) + if neck_2d is not None: + self.neck_2d = MODELS.build(neck_2d) + if bbox_head_2d is not None: + self.bbox_head_2d = MODELS.build(bbox_head_2d) + if depth_head_2d is not None: + self.depth_head_2d = MODELS.build(depth_head_2d) + if depth_head is not None: + self.depth_head = MODELS.build(depth_head) + self.depth_samples = self.depth_head.depth_samples + self.train_cfg = train_cfg + self.test_cfg = test_cfg + bbox_head_3d.update(train_cfg=train_cfg) + bbox_head_3d.update(test_cfg=test_cfg) + self.bbox_head_3d = MODELS.build(bbox_head_3d) + + @property + def with_backbone_3d(self): + """Whether the detector has a 3D backbone.""" + return hasattr(self, 'backbone_3d') and self.backbone_3d is not None + + @property + def with_neck_3d(self): + """Whether the detector has a 3D neck.""" + return hasattr(self, 'neck_3d') and self.neck_3d is not None + + @property + def with_neck_2d(self): + """Whether the detector has a 2D neck.""" + return hasattr(self, 'neck_2d') and self.neck_2d is not None + + @property + def with_bbox_head_2d(self): + """Whether the detector has a 2D detection head.""" + return hasattr(self, 'bbox_head_2d') and self.bbox_head_2d is not None + + @property + def with_depth_head_2d(self): + """Whether the detector has a image-based depth head.""" + return hasattr(self, + 'depth_head_2d') and self.depth_head_2d is not None + + @property + def with_depth_head(self): + """Whether the detector has a frustum-based depth head.""" + return hasattr(self, 'depth_head') and self.depth_head is not None + + def extract_feat(self, img, img_metas): + """Feature extraction for perspective-view images. + + Args: + img (torch.Tensor): Images of shape [B, N, C_in, H, W]. + img_metas (list): Image meta information. Each element corresponds + to a group of images. len(img_metas) == B. + + Returns: + torch.Tensor: bev feature with shape [B, C_out, N_y, N_x]. + """ + # split input img into current and previous ones + batch_size, N, C_in, H, W = img.shape + cur_imgs = img[:, 0] + prev_imgs = img[:, 1] # TODO: to support multiple prev imgs + # 2D backbone for feature extraction + cur_feats = self.backbone(cur_imgs) + cur_feats = [cur_imgs] + list(cur_feats) + prev_feats = self.backbone(prev_imgs) + prev_feats = [prev_imgs] + list(prev_feats) + # SPP module as the feature neck + cur_stereo_feat, cur_sem_feat = self.neck(cur_feats) + prev_stereo_feat, prev_sem_feat = self.neck(prev_feats) + # derive cur2prevs + cur_pose = torch.tensor( + [img_meta['cam2global'] for img_meta in img_metas], + device=img.device)[:, None, :, :] # (B, 1, 4, 4) + prev_poses = [] + for img_meta in img_metas: + sweep_img_metas = img_meta['sweep_img_metas'] + prev_poses.append([ + sweep_img_meta['cam2global'] + for sweep_img_meta in sweep_img_metas + ]) + prev_poses = torch.tensor(prev_poses, device=img.device) + pad_prev_cam2global = torch.eye(4)[None, None].expand( + batch_size, N - 1, 4, 4).to(img.device) + pad_prev_cam2global[:, :, :prev_poses.shape[-2], :prev_poses. + shape[-1]] = prev_poses + pad_cur_cam2global = torch.eye(4)[None, + None].expand(batch_size, 1, 4, + 4).to(img.device) + pad_cur_cam2global[:, :, :cur_pose.shape[-2], :cur_pose. + shape[-1]] = cur_pose + # (B, N-1, 4, 4) * (B, 1, 4, 4) -> (B, N-1, 4, 4) + # torch.linalg.solve is faster and more numerically stable + # than torch.matmul(torch.linalg.inv(A), B) + # empirical results show that torch.linalg.solve can derive + # almost the same result with np.linalg.inv + # while torch.linalg.inv can not + cur2prevs = torch.linalg.solve(pad_prev_cam2global, pad_cur_cam2global) + for meta_idx, img_meta in enumerate(img_metas): + img_meta['cur2prevs'] = cur2prevs[meta_idx] + # stereo backbone for depth estimation + # volume_feat: (batch_size, Cv, Nz, Ny, Nx) + volume_feat = self.backbone_stereo(cur_stereo_feat, prev_stereo_feat, + img_metas, cur_sem_feat) + # height compression + _, Cv, Nz, Ny, Nx = volume_feat.shape + bev_feat = volume_feat.view(batch_size, Cv * Nz, Ny, Nx) + bev_feat_prehg, bev_feat = self.neck_3d(bev_feat) + return bev_feat + + def forward_train(self, + img, + img_metas, + gt_bboxes_3d, + gt_labels_3d, + depth_img=None, + **kwargs): + """Forward function for training.""" + bev_feat = self.extract_feat(img, img_metas) + outs = self.bbox_head_3d([bev_feat]) + losses = self.bbox_head_3d.loss(*outs, gt_bboxes_3d, gt_labels_3d, + img_metas) + # TODO: loss_dense_depth, loss_2d, loss_imitation + return losses + + def forward_test(self, img, img_metas, **kwargs): + """Forward of testing. + + Args: + img (torch.Tensor): Input images of shape (N, C_in, H, W). + img_metas (list): Image metas. + + Returns: + list[dict]: Predicted 3d boxes. + """ + # not supporting aug_test for now + return self.simple_test(img, img_metas) + + def simple_test(self, img, img_metas): + """Simple inference forward without test time augmentation.""" + bev_feat = self.extract_feat(img, img_metas) + # bbox_head takes a list of feature from different levels as input + # so need [bev_feat] + outs = self.bbox_head_3d([bev_feat]) + bbox_list = self.bbox_head_3d.get_bboxes(*outs, img_metas) + bbox_results = [ + bbox3d2result(det_bboxes, det_scores, det_labels) + for det_bboxes, det_scores, det_labels in bbox_list + ] + # add pseudo-lidar label to each pred_dict for post-processing + for bbox_result in bbox_results: + bbox_result['pseudo_lidar'] = True + return bbox_results + + def aug_test(self, imgs, img_metas, **kwargs): + """Test with augmentations. + + Args: + imgs (list[torch.Tensor]): Input images of shape (N, C_in, H, W). + img_metas (list): Image metas. + + Returns: + list[dict]: Predicted 3d boxes. + """ + raise NotImplementedError diff --git a/mmdet3d/models/detectors/dynamic_voxelnet.py b/mmdet3d/models/detectors/dynamic_voxelnet.py new file mode 100755 index 0000000..ab4e27b --- /dev/null +++ b/mmdet3d/models/detectors/dynamic_voxelnet.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig +from .voxelnet import VoxelNet + + +@MODELS.register_module() +class DynamicVoxelNet(VoxelNet): + r"""VoxelNet using `dynamic voxelization + `_. + """ + + def __init__(self, + voxel_encoder: ConfigType, + middle_encoder: ConfigType, + backbone: ConfigType, + neck: OptConfigType = None, + bbox_head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None) -> None: + super().__init__( + voxel_encoder=voxel_encoder, + middle_encoder=middle_encoder, + backbone=backbone, + neck=neck, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + def extract_feat(self, batch_inputs_dict: dict) -> Tuple[Tensor]: + """Extract features from points.""" + voxel_dict = batch_inputs_dict['voxels'] + voxel_features, feature_coors = self.voxel_encoder( + voxel_dict['voxels'], voxel_dict['coors']) + batch_size = voxel_dict['coors'][-1, 0].item() + 1 + x = self.middle_encoder(voxel_features, feature_coors, batch_size) + x = self.backbone(x) + if self.with_neck: + x = self.neck(x) + return x diff --git a/mmdet3d/models/detectors/fcos_mono3d.py b/mmdet3d/models/detectors/fcos_mono3d.py new file mode 100755 index 0000000..c425ae8 --- /dev/null +++ b/mmdet3d/models/detectors/fcos_mono3d.py @@ -0,0 +1,101 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict + +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig +from ...structures.det3d_data_sample import SampleList +from .single_stage_mono3d import SingleStageMono3DDetector + + +@MODELS.register_module() +class FCOSMono3D(SingleStageMono3DDetector): + r"""`FCOS3D `_ for monocular 3D object detection. + + Currently please refer to our entry on the + `leaderboard `_. + + Args: + backbone (:obj:`ConfigDict` or dict): The backbone config. + neck (:obj:`ConfigDict` or dict): The neck config. + bbox_head (:obj:`ConfigDict` or dict): The bbox head config. + train_cfg (:obj:`ConfigDict` or dict, optional): The training config + of FCOS. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): The testing config + of FCOS. Defaults to None. + data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of + :class:`DetDataPreprocessor` to process the input data. + Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ # noqa: E501 + + def __init__(self, + backbone: ConfigType, + neck: ConfigType, + bbox_head: ConfigType, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + def predict(self, + batch_inputs_dict: Dict[str, Tensor], + batch_data_samples: SampleList, + rescale: bool = True) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'imgs' keys + + - imgs (torch.Tensor: Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + rescale (bool): Whether to rescale the results. + Defaults to True. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input. Each Det3DDataSample usually contains + 'pred_instances_3d'. And the ``pred_instances_3d`` normally + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of 3D bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C) where C >=7. + + When there are 2D prediction in models, it should + contains `pred_instances`, And the ``pred_instances`` normally + contains following keys. + + - scores (Tensor): Classification scores of image, has a shape + (num_instance, ) + - labels (Tensor): Predict Labels of 2D bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Contains a tensor with shape + (num_instances, 4). + """ + x = self.extract_feat(batch_inputs_dict) + results_list, results_list_2d = self.bbox_head.predict( + x, batch_data_samples, rescale=rescale) + predictions = self.add_pred_to_datasample(batch_data_samples, + results_list, + results_list_2d) + return predictions diff --git a/mmdet3d/models/detectors/groupfree3dnet.py b/mmdet3d/models/detectors/groupfree3dnet.py new file mode 100755 index 0000000..935f3ce --- /dev/null +++ b/mmdet3d/models/detectors/groupfree3dnet.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmdet3d.registry import MODELS +from ...structures.det3d_data_sample import SampleList +from .single_stage import SingleStage3DDetector + + +@MODELS.register_module() +class GroupFree3DNet(SingleStage3DDetector): + """`Group-Free 3D `_.""" + + def __init__(self, + backbone, + bbox_head=None, + train_cfg=None, + test_cfg=None, + init_cfg=None, + **kwargs): + super(GroupFree3DNet, self).__init__( + backbone=backbone, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg, + **kwargs) + + def loss(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> dict: + """Calculate losses from a batch of inputs dict and data samples. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'imgs' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_pts_seg`. + + Returns: + dict: A dictionary of loss components. + """ + x = self.extract_feat(batch_inputs_dict) + points = batch_inputs_dict['points'] + losses = self.bbox_head.loss(points, x, batch_data_samples, **kwargs) + return losses + + def predict(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'imgs' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_pts_seg`. + rescale (bool): Whether to rescale the results. + Defaults to True. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input images. Each Det3DDataSample usually contain + 'pred_instances_3d'. And the ``pred_instances_3d`` usually + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C) where C >=7. + """ + x = self.extract_feat(batch_inputs_dict) + points = batch_inputs_dict['points'] + results_list = self.bbox_head.predict(points, x, batch_data_samples, + **kwargs) + predictions = self.add_pred_to_datasample(batch_data_samples, + results_list) + return predictions diff --git a/mmdet3d/models/detectors/h3dnet.py b/mmdet3d/models/detectors/h3dnet.py new file mode 100755 index 0000000..3ce6e92 --- /dev/null +++ b/mmdet3d/models/detectors/h3dnet.py @@ -0,0 +1,157 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Union + +import torch +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.structures import Det3DDataSample +from .two_stage import TwoStage3DDetector + + +@MODELS.register_module() +class H3DNet(TwoStage3DDetector): + r"""H3DNet model. + + Please refer to the `paper `_ + + Args: + backbone (dict): Config dict of detector's backbone. + neck (dict, optional): Config dict of neck. Defaults to None. + rpn_head (dict, optional): Config dict of rpn head. Defaults to None. + roi_head (dict, optional): Config dict of roi head. Defaults to None. + train_cfg (dict, optional): Config dict of training hyper-parameters. + Defaults to None. + test_cfg (dict, optional): Config dict of test hyper-parameters. + Defaults to None. + init_cfg (dict, optional): the config to control the + initialization. Default to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`BaseDataPreprocessor`. it usually includes, + ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``. + """ + + def __init__(self, + backbone: dict, + neck: Optional[dict] = None, + rpn_head: Optional[dict] = None, + roi_head: Optional[dict] = None, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None, + data_preprocessor: Optional[dict] = None, + **kwargs) -> None: + super(H3DNet, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg, + data_preprocessor=data_preprocessor, + **kwargs) + + def extract_feat(self, batch_inputs_dict: dict) -> None: + """Directly extract features from the backbone+neck. + + Args: + + batch_inputs_dict (dict): The model input dict which include + 'points'. + + - points (list[torch.Tensor]): Point cloud of each sample. + + Returns: + dict: Dict of feature. + """ + stack_points = torch.stack(batch_inputs_dict['points']) + x = self.backbone(stack_points) + if self.with_neck: + x = self.neck(x) + return x + + def loss(self, batch_inputs_dict: Dict[str, Union[List, Tensor]], + batch_data_samples: List[Det3DDataSample], **kwargs) -> dict: + """ + Args: + batch_inputs_dict (dict): The model input dict which include + 'points' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + feats_dict = self.extract_feat(batch_inputs_dict) + + feats_dict['fp_xyz'] = [feats_dict['fp_xyz_net0'][-1]] + feats_dict['fp_features'] = [feats_dict['hd_feature']] + feats_dict['fp_indices'] = [feats_dict['fp_indices_net0'][-1]] + + losses = dict() + if self.with_rpn: + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + # note, the feats_dict would be added new key & value in rpn_head + rpn_losses, rpn_proposals = self.rpn_head.loss_and_predict( + batch_inputs_dict['points'], + feats_dict, + batch_data_samples, + ret_target=True, + proposal_cfg=proposal_cfg) + feats_dict['targets'] = rpn_losses.pop('targets') + losses.update(rpn_losses) + feats_dict['rpn_proposals'] = rpn_proposals + else: + raise NotImplementedError + + roi_losses = self.roi_head.loss(batch_inputs_dict['points'], + feats_dict, batch_data_samples, + **kwargs) + losses.update(roi_losses) + + return losses + + def predict( + self, batch_input_dict: Dict, + batch_data_samples: List[Det3DDataSample] + ) -> List[Det3DDataSample]: + """Get model predictions. + + Args: + points (list[torch.Tensor]): Points of each sample. + batch_data_samples (list[:obj:`Det3DDataSample`]): Each item + contains the meta information of each sample and + corresponding annotations. + + Returns: + list: Predicted 3d boxes. + """ + + feats_dict = self.extract_feat(batch_input_dict) + feats_dict['fp_xyz'] = [feats_dict['fp_xyz_net0'][-1]] + feats_dict['fp_features'] = [feats_dict['hd_feature']] + feats_dict['fp_indices'] = [feats_dict['fp_indices_net0'][-1]] + + if self.with_rpn: + proposal_cfg = self.test_cfg.rpn + rpn_proposals = self.rpn_head.predict( + batch_input_dict['points'], + feats_dict, + batch_data_samples, + use_nms=proposal_cfg.use_nms) + feats_dict['rpn_proposals'] = rpn_proposals + else: + raise NotImplementedError + + results_list = self.roi_head.predict( + batch_input_dict['points'], + feats_dict, + batch_data_samples, + suffix='_optimized') + return self.add_pred_to_datasample(batch_data_samples, results_list) diff --git a/mmdet3d/models/detectors/imvotenet.py b/mmdet3d/models/detectors/imvotenet.py new file mode 100755 index 0000000..0f02a59 --- /dev/null +++ b/mmdet3d/models/detectors/imvotenet.py @@ -0,0 +1,537 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import torch +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.structures import Det3DDataSample +from ..layers import MLP +from .base import Base3DDetector + + +def sample_valid_seeds(mask: Tensor, num_sampled_seed: int = 1024) -> Tensor: + r"""Randomly sample seeds from all imvotes. + + Modified from ``_ + + Args: + mask (torch.Tensor): Bool tensor in shape ( + seed_num*max_imvote_per_pixel), indicates + whether this imvote corresponds to a 2D bbox. + num_sampled_seed (int): How many to sample from all imvotes. + + Returns: + torch.Tensor: Indices with shape (num_sampled_seed). + """ # noqa: E501 + device = mask.device + batch_size = mask.shape[0] + sample_inds = mask.new_zeros((batch_size, num_sampled_seed), + dtype=torch.int64) + for bidx in range(batch_size): + # return index of non zero elements + valid_inds = torch.nonzero(mask[bidx, :]).squeeze(-1) + if len(valid_inds) < num_sampled_seed: + # compute set t1 - t2 + t1 = torch.arange(num_sampled_seed, device=device) + t2 = valid_inds % num_sampled_seed + combined = torch.cat((t1, t2)) + uniques, counts = combined.unique(return_counts=True) + difference = uniques[counts == 1] + + rand_inds = torch.randperm( + len(difference), + device=device)[:num_sampled_seed - len(valid_inds)] + cur_sample_inds = difference[rand_inds] + cur_sample_inds = torch.cat((valid_inds, cur_sample_inds)) + else: + rand_inds = torch.randperm( + len(valid_inds), device=device)[:num_sampled_seed] + cur_sample_inds = valid_inds[rand_inds] + sample_inds[bidx, :] = cur_sample_inds + return sample_inds + + +@MODELS.register_module() +class ImVoteNet(Base3DDetector): + r"""`ImVoteNet `_ for 3D detection. + + ImVoteNet is based on fusing 2D votes in images and 3D votes in point + clouds, which explicitly extract both geometric and semantic features + from the 2D images. It leverage camera parameters to lift these + features to 3D. A multi-tower training scheme also improve the synergy + of 2D-3D feature fusion. + + """ + + def __init__(self, + pts_backbone: Optional[dict] = None, + pts_bbox_heads: Optional[dict] = None, + pts_neck: Optional[dict] = None, + img_backbone: Optional[dict] = None, + img_neck: Optional[dict] = None, + img_roi_head: Optional[dict] = None, + img_rpn_head: Optional[dict] = None, + img_mlp: Optional[dict] = None, + freeze_img_branch: bool = False, + fusion_layer: Optional[dict] = None, + num_sampled_seed: Optional[dict] = None, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None, + **kwargs) -> None: + + super(ImVoteNet, self).__init__(init_cfg=init_cfg, **kwargs) + + # point branch + if pts_backbone is not None: + self.pts_backbone = MODELS.build(pts_backbone) + if pts_neck is not None: + self.pts_neck = MODELS.build(pts_neck) + if pts_bbox_heads is not None: + pts_bbox_head_common = pts_bbox_heads.common + pts_bbox_head_common.update( + train_cfg=train_cfg.pts if train_cfg is not None else None) + pts_bbox_head_common.update(test_cfg=test_cfg.pts) + pts_bbox_head_joint = pts_bbox_head_common.copy() + pts_bbox_head_joint.update(pts_bbox_heads.joint) + pts_bbox_head_pts = pts_bbox_head_common.copy() + pts_bbox_head_pts.update(pts_bbox_heads.pts) + pts_bbox_head_img = pts_bbox_head_common.copy() + pts_bbox_head_img.update(pts_bbox_heads.img) + + self.pts_bbox_head_joint = MODELS.build(pts_bbox_head_joint) + self.pts_bbox_head_pts = MODELS.build(pts_bbox_head_pts) + self.pts_bbox_head_img = MODELS.build(pts_bbox_head_img) + self.pts_bbox_heads = [ + self.pts_bbox_head_joint, self.pts_bbox_head_pts, + self.pts_bbox_head_img + ] + self.loss_weights = pts_bbox_heads.loss_weights + + # image branch + if img_backbone: + self.img_backbone = MODELS.build(img_backbone) + if img_neck is not None: + self.img_neck = MODELS.build(img_neck) + if img_rpn_head is not None: + rpn_train_cfg = train_cfg.img_rpn if train_cfg \ + is not None else None + img_rpn_head_ = img_rpn_head.copy() + img_rpn_head_.update( + train_cfg=rpn_train_cfg, test_cfg=test_cfg.img_rpn) + self.img_rpn_head = MODELS.build(img_rpn_head_) + if img_roi_head is not None: + rcnn_train_cfg = train_cfg.img_rcnn if train_cfg \ + is not None else None + img_roi_head.update( + train_cfg=rcnn_train_cfg, test_cfg=test_cfg.img_rcnn) + self.img_roi_head = MODELS.build(img_roi_head) + + # fusion + if fusion_layer is not None: + self.fusion_layer = MODELS.build(fusion_layer) + self.max_imvote_per_pixel = fusion_layer.max_imvote_per_pixel + + self.freeze_img_branch = freeze_img_branch + if freeze_img_branch: + self.freeze_img_branch_params() + + if img_mlp is not None: + self.img_mlp = MLP(**img_mlp) + + self.num_sampled_seed = num_sampled_seed + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def _forward(self): + raise NotImplementedError + + def freeze_img_branch_params(self): + """Freeze all image branch parameters.""" + if self.with_img_bbox_head: + for param in self.img_bbox_head.parameters(): + param.requires_grad = False + if self.with_img_backbone: + for param in self.img_backbone.parameters(): + param.requires_grad = False + if self.with_img_neck: + for param in self.img_neck.parameters(): + param.requires_grad = False + if self.with_img_rpn: + for param in self.img_rpn_head.parameters(): + param.requires_grad = False + if self.with_img_roi_head: + for param in self.img_roi_head.parameters(): + param.requires_grad = False + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """Overload in order to load img network ckpts into img branch.""" + module_names = ['backbone', 'neck', 'roi_head', 'rpn_head'] + for key in list(state_dict): + for module_name in module_names: + if key.startswith(module_name) and ('img_' + + key) not in state_dict: + state_dict['img_' + key] = state_dict.pop(key) + + super()._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, unexpected_keys, + error_msgs) + + def train(self, mode=True): + """Overload in order to keep image branch modules in eval mode.""" + super(ImVoteNet, self).train(mode) + if self.freeze_img_branch: + if self.with_img_bbox_head: + self.img_bbox_head.eval() + if self.with_img_backbone: + self.img_backbone.eval() + if self.with_img_neck: + self.img_neck.eval() + if self.with_img_rpn: + self.img_rpn_head.eval() + if self.with_img_roi_head: + self.img_roi_head.eval() + + @property + def with_img_bbox(self): + """bool: Whether the detector has a 2D image box head.""" + return ((hasattr(self, 'img_roi_head') and self.img_roi_head.with_bbox) + or (hasattr(self, 'img_bbox_head') + and self.img_bbox_head is not None)) + + @property + def with_img_bbox_head(self): + """bool: Whether the detector has a 2D image box head (not roi).""" + return hasattr(self, + 'img_bbox_head') and self.img_bbox_head is not None + + @property + def with_img_backbone(self): + """bool: Whether the detector has a 2D image backbone.""" + return hasattr(self, 'img_backbone') and self.img_backbone is not None + + @property + def with_img_neck(self): + """bool: Whether the detector has a neck in image branch.""" + return hasattr(self, 'img_neck') and self.img_neck is not None + + @property + def with_img_rpn(self): + """bool: Whether the detector has a 2D RPN in image detector branch.""" + return hasattr(self, 'img_rpn_head') and self.img_rpn_head is not None + + @property + def with_img_roi_head(self): + """bool: Whether the detector has a RoI Head in image branch.""" + return hasattr(self, 'img_roi_head') and self.img_roi_head is not None + + @property + def with_pts_bbox(self): + """bool: Whether the detector has a 3D box head.""" + return hasattr(self, + 'pts_bbox_head') and self.pts_bbox_head is not None + + @property + def with_pts_backbone(self): + """bool: Whether the detector has a 3D backbone.""" + return hasattr(self, 'pts_backbone') and self.pts_backbone is not None + + @property + def with_pts_neck(self): + """bool: Whether the detector has a neck in 3D detector branch.""" + return hasattr(self, 'pts_neck') and self.pts_neck is not None + + def extract_feat(self, imgs): + """Just to inherit from abstract method.""" + pass + + def extract_img_feat(self, img: Tensor) -> Sequence[Tensor]: + """Directly extract features from the img backbone+neck.""" + x = self.img_backbone(img) + if self.with_img_neck: + x = self.img_neck(x) + return x + + def extract_pts_feat(self, pts: Tensor) -> Tuple[Tensor]: + """Extract features of points.""" + x = self.pts_backbone(pts) + if self.with_pts_neck: + x = self.pts_neck(x) + + seed_points = x['fp_xyz'][-1] + seed_features = x['fp_features'][-1] + seed_indices = x['fp_indices'][-1] + + return (seed_points, seed_features, seed_indices) + + def loss(self, batch_inputs_dict: Dict[str, Union[List, Tensor]], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """ + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'imgs` keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (list[torch.Tensor]): Image tensor with shape + (N, C, H ,W). + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + imgs = batch_inputs_dict.get('imgs', None) + points = batch_inputs_dict.get('points', None) + if points is None: + x = self.extract_img_feat(imgs) + losses = dict() + # RPN forward and loss + if self.with_img_rpn: + proposal_cfg = self.train_cfg.get('img_rpn_proposal', + self.test_cfg.img_rpn) + rpn_data_samples = copy.deepcopy(batch_data_samples) + # set cat_id of gt_labels to 0 in RPN + for data_sample in rpn_data_samples: + data_sample.gt_instances.labels = \ + torch.zeros_like(data_sample.gt_instances.labels) + + rpn_losses, rpn_results_list = \ + self.img_rpn_head.loss_and_predict( + x, rpn_data_samples, + proposal_cfg=proposal_cfg, **kwargs) + # avoid get same name with roi_head loss + keys = rpn_losses.keys() + for key in keys: + if 'loss' in key and 'rpn' not in key: + rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key) + losses.update(rpn_losses) + else: + assert batch_data_samples[0].get('proposals', None) is not None + # use pre-defined proposals in InstanceData for + # the second stage + # to extract ROI features. + rpn_results_list = [ + data_sample.proposals for data_sample in batch_data_samples + ] + + roi_losses = self.img_roi_head.loss(x, rpn_results_list, + batch_data_samples, **kwargs) + losses.update(roi_losses) + return losses + else: + with torch.no_grad(): + results_2d = self.predict_img_only( + batch_inputs_dict['imgs'], + batch_data_samples, + rescale=False) + # tensor with shape (n, 6), the 6 arrange + # as [x1, x2, y1, y2, score, label] + pred_bboxes_with_label_list = [] + for single_results in results_2d: + cat_preds = torch.cat( + (single_results.bboxes, single_results.scores[:, None], + single_results.labels[:, None]), + dim=-1) + cat_preds = cat_preds[torch.argsort( + cat_preds[:, 4], descending=True)] + # drop half bboxes during training for better generalization + if self.training: + rand_drop = torch.randperm( + len(cat_preds))[:(len(cat_preds) + 1) // 2] + rand_drop = torch.sort(rand_drop)[0] + cat_preds = cat_preds[rand_drop] + + pred_bboxes_with_label_list.append(cat_preds) + + stack_points = torch.stack(points) + seeds_3d, seed_3d_features, seed_indices = \ + self.extract_pts_feat(stack_points) + img_metas = [item.metainfo for item in batch_data_samples] + img_features, masks = self.fusion_layer( + imgs, pred_bboxes_with_label_list, seeds_3d, img_metas) + + inds = sample_valid_seeds(masks, self.num_sampled_seed) + batch_size, img_feat_size = img_features.shape[:2] + pts_feat_size = seed_3d_features.shape[1] + inds_img = inds.view(batch_size, 1, + -1).expand(-1, img_feat_size, -1) + img_features = img_features.gather(-1, inds_img) + inds = inds % inds.shape[1] + inds_seed_xyz = inds.view(batch_size, -1, 1).expand(-1, -1, 3) + seeds_3d = seeds_3d.gather(1, inds_seed_xyz) + inds_seed_feats = inds.view(batch_size, 1, + -1).expand(-1, pts_feat_size, -1) + seed_3d_features = seed_3d_features.gather(-1, inds_seed_feats) + seed_indices = seed_indices.gather(1, inds) + + img_features = self.img_mlp(img_features) + fused_features = torch.cat([seed_3d_features, img_features], dim=1) + + feat_dict_joint = dict( + seed_points=seeds_3d, + seed_features=fused_features, + seed_indices=seed_indices) + feat_dict_pts = dict( + seed_points=seeds_3d, + seed_features=seed_3d_features, + seed_indices=seed_indices) + feat_dict_img = dict( + seed_points=seeds_3d, + seed_features=img_features, + seed_indices=seed_indices) + + losses_towers = [] + losses_joint = self.pts_bbox_head_joint.loss( + points, feat_dict_joint, batch_data_samples) + losses_pts = self.pts_bbox_head_pts.loss(points, feat_dict_pts, + batch_data_samples) + losses_img = self.pts_bbox_head_img.loss(points, feat_dict_img, + batch_data_samples) + losses_towers.append(losses_joint) + losses_towers.append(losses_pts) + losses_towers.append(losses_img) + combined_losses = dict() + for loss_term in losses_joint: + if 'loss' in loss_term: + combined_losses[loss_term] = 0 + for i in range(len(losses_towers)): + combined_losses[loss_term] += \ + losses_towers[i][loss_term] * \ + self.loss_weights[i] + else: + # only save the metric of the joint head + # if it is not a loss + combined_losses[loss_term] = \ + losses_towers[0][loss_term] + + return combined_losses + + def predict(self, batch_inputs_dict: Dict[str, Optional[Tensor]], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """Forward of testing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points' and 'imgs keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (list[torch.Tensor]): Tensor of Images. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + """ + points = batch_inputs_dict.get('points', None) + imgs = batch_inputs_dict.get('imgs', None) + if points is None: + assert imgs is not None + results_2d = self.predict_img_only(imgs, batch_data_samples) + return self.add_pred_to_datasample( + batch_data_samples, data_instances_2d=results_2d) + + else: + results_2d = self.predict_img_only( + batch_inputs_dict['imgs'], batch_data_samples, rescale=False) + # tensor with shape (n, 6), the 6 arrange + # as [x1, x2, y1, y2, score, label] + pred_bboxes_with_label_list = [] + for single_results in results_2d: + cat_preds = torch.cat( + (single_results.bboxes, single_results.scores[:, None], + single_results.labels[:, None]), + dim=-1) + cat_preds = cat_preds[torch.argsort( + cat_preds[:, 4], descending=True)] + pred_bboxes_with_label_list.append(cat_preds) + + stack_points = torch.stack(points) + seeds_3d, seed_3d_features, seed_indices = \ + self.extract_pts_feat(stack_points) + + img_features, masks = self.fusion_layer( + imgs, pred_bboxes_with_label_list, seeds_3d, + [item.metainfo for item in batch_data_samples]) + + inds = sample_valid_seeds(masks, self.num_sampled_seed) + batch_size, img_feat_size = img_features.shape[:2] + pts_feat_size = seed_3d_features.shape[1] + inds_img = inds.view(batch_size, 1, + -1).expand(-1, img_feat_size, -1) + img_features = img_features.gather(-1, inds_img) + inds = inds % inds.shape[1] + inds_seed_xyz = inds.view(batch_size, -1, 1).expand(-1, -1, 3) + seeds_3d = seeds_3d.gather(1, inds_seed_xyz) + inds_seed_feats = inds.view(batch_size, 1, + -1).expand(-1, pts_feat_size, -1) + seed_3d_features = seed_3d_features.gather(-1, inds_seed_feats) + seed_indices = seed_indices.gather(1, inds) + + img_features = self.img_mlp(img_features) + + fused_features = torch.cat([seed_3d_features, img_features], dim=1) + + feat_dict = dict( + seed_points=seeds_3d, + seed_features=fused_features, + seed_indices=seed_indices) + + results_3d = self.pts_bbox_head_joint.predict( + batch_inputs_dict['points'], + feat_dict, + batch_data_samples, + rescale=True) + + return self.add_pred_to_datasample(batch_data_samples, results_3d) + + def predict_img_only(self, + imgs: Tensor, + batch_data_samples: List[Det3DDataSample], + rescale: bool = True) -> List[InstanceData]: + """Predict results from a batch of imgs with post- processing. + + Args: + imgs (Tensor): Inputs images with shape (N, C, H, W). + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + rescale (bool): Whether to rescale the results. + Defaults to True. + + Returns: + list[:obj:`InstanceData`]: Return the list of detection + results of the input images, usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + """ + + assert self.with_img_bbox, 'Img bbox head must be implemented.' + assert self.with_img_backbone, 'Img backbone must be implemented.' + assert self.with_img_rpn, 'Img rpn must be implemented.' + assert self.with_img_roi_head, 'Img roi head must be implemented.' + x = self.extract_img_feat(imgs) + + # If there are no pre-defined proposals, use RPN to get proposals + if batch_data_samples[0].get('proposals', None) is None: + rpn_results_list = self.img_rpn_head.predict( + x, batch_data_samples, rescale=False) + else: + rpn_results_list = [ + data_sample.proposals for data_sample in batch_data_samples + ] + + results_list = self.img_roi_head.predict( + x, rpn_results_list, batch_data_samples, rescale=rescale) + + return results_list diff --git a/mmdet3d/models/detectors/imvoxelnet.py b/mmdet3d/models/detectors/imvoxelnet.py new file mode 100755 index 0000000..e97c328 --- /dev/null +++ b/mmdet3d/models/detectors/imvoxelnet.py @@ -0,0 +1,275 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple, Union + +import torch +from mmengine.structures import InstanceData + +from mmdet3d.models.detectors import Base3DDetector +from mmdet3d.models.layers.fusion_layers.point_fusion import point_sample +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures.bbox_3d import get_proj_mat_by_coord_type +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils import ConfigType, OptConfigType, OptInstanceList + + +@MODELS.register_module() +class ImVoxelNet(Base3DDetector): + r"""`ImVoxelNet `_. + + Args: + backbone (:obj:`ConfigDict` or dict): The backbone config. + neck (:obj:`ConfigDict` or dict): The neck config. + neck_3d (:obj:`ConfigDict` or dict): The 3D neck config. + bbox_head (:obj:`ConfigDict` or dict): The bbox head config. + prior_generator (:obj:`ConfigDict` or dict): The prior points + generator config. + n_voxels (list): Number of voxels along x, y, z axis. + coord_type (str): The type of coordinates of points cloud: + 'DEPTH', 'LIDAR', or 'CAMERA'. + train_cfg (:obj:`ConfigDict` or dict, optional): Config dict of + training hyper-parameters. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Config dict of test + hyper-parameters. Defaults to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`BaseDataPreprocessor`. it usually includes, + ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``. + init_cfg (:obj:`ConfigDict` or dict, optional): The initialization + config. Defaults to None. + """ + + def __init__(self, + backbone: ConfigType, + neck: ConfigType, + neck_3d: ConfigType, + bbox_head: ConfigType, + prior_generator: ConfigType, + n_voxels: List, + coord_type: str, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptConfigType = None): + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + self.backbone = MODELS.build(backbone) + self.neck = MODELS.build(neck) + self.neck_3d = MODELS.build(neck_3d) + bbox_head.update(train_cfg=train_cfg) + bbox_head.update(test_cfg=test_cfg) + self.bbox_head = MODELS.build(bbox_head) + self.prior_generator = TASK_UTILS.build(prior_generator) + self.n_voxels = n_voxels + self.coord_type = coord_type + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def extract_feat(self, batch_inputs_dict: dict, + batch_data_samples: SampleList): + """Extract 3d features from the backbone -> fpn -> 3d projection. + + -> 3d neck -> bbox_head. + + Args: + batch_inputs_dict (dict): The model input dict which include + the 'imgs' key. + + - imgs (torch.Tensor, optional): Image of each sample. + batch_data_samples (list[:obj:`DetDataSample`]): The batch + data samples. It usually includes information such + as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. + + Returns: + Tuple: + - torch.Tensor: Features of shape (N, C_out, N_x, N_y, N_z). + - torch.Tensor: Valid mask of shape (N, 1, N_x, N_y, N_z). + """ + img = batch_inputs_dict['imgs'] + batch_img_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + x = self.backbone(img) + x = self.neck(x)[0] + points = self.prior_generator.grid_anchors([self.n_voxels[::-1]], + device=img.device)[0][:, :3] + volumes, valid_preds = [], [] + for feature, img_meta in zip(x, batch_img_metas): + img_scale_factor = ( + points.new_tensor(img_meta['scale_factor'][:2]) + if 'scale_factor' in img_meta.keys() else 1) + img_flip = img_meta['flip'] if 'flip' in img_meta.keys() else False + img_crop_offset = ( + points.new_tensor(img_meta['img_crop_offset']) + if 'img_crop_offset' in img_meta.keys() else 0) + proj_mat = points.new_tensor( + get_proj_mat_by_coord_type(img_meta, self.coord_type)) + volume = point_sample( + img_meta, + img_features=feature[None, ...], + points=points, + proj_mat=points.new_tensor(proj_mat), + coord_type=self.coord_type, + img_scale_factor=img_scale_factor, + img_crop_offset=img_crop_offset, + img_flip=img_flip, + img_pad_shape=img.shape[-2:], + img_shape=img_meta['img_shape'][:2], + aligned=False) + volumes.append( + volume.reshape(self.n_voxels[::-1] + [-1]).permute(3, 2, 1, 0)) + valid_preds.append( + ~torch.all(volumes[-1] == 0, dim=0, keepdim=True)) + x = torch.stack(volumes) + x = self.neck_3d(x) + return x, torch.stack(valid_preds).float() + + def loss(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> Union[dict, list]: + """Calculate losses from a batch of inputs and data samples. + + Args: + batch_inputs_dict (dict): The model input dict which include + the 'imgs' key. + + - imgs (torch.Tensor, optional): Image of each sample. + batch_data_samples (list[:obj:`DetDataSample`]): The batch + data samples. It usually includes information such + as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. + + Returns: + dict: A dictionary of loss components. + """ + x, valid_preds = self.extract_feat(batch_inputs_dict, + batch_data_samples) + # For indoor datasets ImVoxelNet uses ImVoxelHead that handles + # mask of visible voxels. + if self.coord_type == 'DEPTH': + x += (valid_preds, ) + losses = self.bbox_head.loss(x, batch_data_samples, **kwargs) + return losses + + def predict(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + batch_inputs_dict (dict): The model input dict which include + the 'imgs' key. + + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input images. Each Det3DDataSample usually contain + 'pred_instances_3d'. And the ``pred_instances_3d`` usually + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C) where C >=7. + """ + x, valid_preds = self.extract_feat(batch_inputs_dict, + batch_data_samples) + # For indoor datasets ImVoxelNet uses ImVoxelHead that handles + # mask of visible voxels. + if self.coord_type == 'DEPTH': + x += (valid_preds, ) + results_list = \ + self.bbox_head.predict(x, batch_data_samples, **kwargs) + predictions = self.add_pred_to_datasample(batch_data_samples, + results_list) + return predictions + + def _forward(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + *args, **kwargs) -> Tuple[List[torch.Tensor]]: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + batch_inputs_dict (dict): The model input dict which include + the 'imgs' key. + + - imgs (torch.Tensor, optional): Image of each sample. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + tuple[list]: A tuple of features from ``bbox_head`` forward. + """ + x, valid_preds = self.extract_feat(batch_inputs_dict, + batch_data_samples) + # For indoor datasets ImVoxelNet uses ImVoxelHead that handles + # mask of visible voxels. + if self.coord_type == 'DEPTH': + x += (valid_preds, ) + results = self.bbox_head.forward(x) + return results + + def convert_to_datasample( + self, + data_samples: SampleList, + data_instances_3d: OptInstanceList = None, + data_instances_2d: OptInstanceList = None, + ) -> SampleList: + """Convert results list to `Det3DDataSample`. + + Subclasses could override it to be compatible for some multi-modality + 3D detectors. + + Args: + data_samples (list[:obj:`Det3DDataSample`]): The input data. + data_instances_3d (list[:obj:`InstanceData`], optional): 3D + Detection results of each sample. + data_instances_2d (list[:obj:`InstanceData`], optional): 2D + Detection results of each sample. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input. Each Det3DDataSample usually contains + 'pred_instances_3d'. And the ``pred_instances_3d`` normally + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of 3D bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C) where C >=7. + + When there are image prediction in some models, it should + contains `pred_instances`, And the ``pred_instances`` normally + contains following keys. + + - scores (Tensor): Classification scores of image, has a shape + (num_instance, ) + - labels (Tensor): Predict Labels of 2D bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Contains a tensor with shape + (num_instances, 4). + """ + + assert (data_instances_2d is not None) or \ + (data_instances_3d is not None),\ + 'please pass at least one type of data_samples' + + if data_instances_2d is None: + data_instances_2d = [ + InstanceData() for _ in range(len(data_instances_3d)) + ] + if data_instances_3d is None: + data_instances_3d = [ + InstanceData() for _ in range(len(data_instances_2d)) + ] + + for i, data_sample in enumerate(data_samples): + data_sample.pred_instances_3d = data_instances_3d[i] + data_sample.pred_instances = data_instances_2d[i] + return data_samples diff --git a/mmdet3d/models/detectors/mink_single_stage.py b/mmdet3d/models/detectors/mink_single_stage.py new file mode 100755 index 0000000..a3c9f57 --- /dev/null +++ b/mmdet3d/models/detectors/mink_single_stage.py @@ -0,0 +1,136 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/mmdet3d/models/detectors/single_stage_sparse.py # noqa +from typing import Dict, List, OrderedDict, Tuple, Union + +import torch +from torch import Tensor + +try: + import MinkowskiEngine as ME +except ImportError: + # Please follow get_started.md to install MinkowskiEngine. + ME = None + pass + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig +from .single_stage import SingleStage3DDetector + + +@MODELS.register_module() +class MinkSingleStage3DDetector(SingleStage3DDetector): + r"""MinkSingleStage3DDetector. + + This class serves as a base class for single-stage 3D detectors based on + MinkowskiEngine `GSDN `_. + + + Args: + backbone (dict): Config dict of detector's backbone. + neck (dict, optional): Config dict of neck. Defaults to None. + bbox_head (dict, optional): Config dict of box head. Defaults to None. + train_cfg (dict, optional): Config dict of training hyper-parameters. + Defaults to None. + test_cfg (dict, optional): Config dict of test hyper-parameters. + Defaults to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`BaseDataPreprocessor`. it usually includes, + ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``. + init_cfg (dict or ConfigDict, optional): the config to control the + initialization. Defaults to None. + """ + _version = 2 + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + bbox_head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + if ME is None: + raise ImportError( + 'Please follow `get_started.md` to install MinkowskiEngine.`') + self.voxel_size = bbox_head['voxel_size'] + + def extract_feat( + self, batch_inputs_dict: Dict[str, Tensor] + ) -> Union[Tuple[torch.Tensor], Dict[str, Tensor]]: + """Directly extract features from the backbone+neck. + + Args: + batch_inputs_dict (dict): The model input dict which includes + 'points' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + + Returns: + tuple[Tensor] | dict: For outside 3D object detection, we + typically obtain a tuple of features from the backbone + neck, + and for inside 3D object detection, usually a dict containing + features will be obtained. + """ + points = batch_inputs_dict['points'] + + coordinates, features = ME.utils.batch_sparse_collate( + [(p[:, :3] / self.voxel_size, p[:, 3:]) for p in points], + device=points[0].device) + x = ME.SparseTensor(coordinates=coordinates, features=features) + + x = self.backbone(x) + if self.with_neck: + x = self.neck(x) + return x + + def _load_from_state_dict(self, state_dict: OrderedDict, prefix: str, + local_metadata: Dict, strict: bool, + missing_keys: List[str], + unexpected_keys: List[str], + error_msgs: List[str]) -> None: + """Load checkpoint. + + Args: + state_dict (dict): a dict containing parameters and + persistent buffers. + prefix (str): the prefix for parameters and buffers used in this + module + local_metadata (dict): a dict containing the metadata for this + module. + strict (bool): whether to strictly enforce that the keys in + :attr:`state_dict` with :attr:`prefix` match the names of + parameters and buffers in this module + missing_keys (list of str): if ``strict=True``, add missing keys to + this list + unexpected_keys (list of str): if ``strict=True``, add unexpected + keys to this list + error_msgs (list of str): error messages should be added to this + list, and will be reported together in + :meth:`~torch.nn.Module.load_state_dict` + """ + # The names of some parameters in FCAF3D has been changed + # since 2022.10. + version = local_metadata.get('version', None) + if (version is None or + version < 2) and self.__class__ is MinkSingleStage3DDetector: + convert_dict = {'head.': 'bbox_head.'} + state_dict_keys = list(state_dict.keys()) + for k in state_dict_keys: + for ori_key, convert_key in convert_dict.items(): + if ori_key in k: + convert_key = k.replace(ori_key, convert_key) + state_dict[convert_key] = state_dict[k] + del state_dict[k] + + super(MinkSingleStage3DDetector, + self)._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, + unexpected_keys, error_msgs) diff --git a/mmdet3d/models/detectors/multiview_dfm.py b/mmdet3d/models/detectors/multiview_dfm.py new file mode 100755 index 0000000..fce4c92 --- /dev/null +++ b/mmdet3d/models/detectors/multiview_dfm.py @@ -0,0 +1,384 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmdet.models.detectors import BaseDetector + +from mmdet3d.models.layers.fusion_layers.point_fusion import (point_sample, + voxel_sample) +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures.bbox_3d.utils import get_lidar2img +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils import ConfigType, OptConfigType +from .dfm import DfM +from .imvoxelnet import ImVoxelNet + + +@MODELS.register_module() +class MultiViewDfM(ImVoxelNet, DfM): + r"""Waymo challenge solution of `MV-FCOS3D++ + `_. + + Args: + backbone (:obj:`ConfigDict` or dict): The backbone config. + neck (:obj:`ConfigDict` or dict): The neck config. + backbone_stereo (:obj:`ConfigDict` or dict): The stereo backbone + config. + backbone_3d (:obj:`ConfigDict` or dict): The 3d backbone config. + neck_3d (:obj:`ConfigDict` or dict): The 3D neck config. + bbox_head (:obj:`ConfigDict` or dict): The bbox head config. + voxel_size (:obj:`ConfigDict` or dict): The voxel size. + anchor_generator (:obj:`ConfigDict` or dict): The anchor generator + config. + neck_2d (:obj:`ConfigDict` or dict, optional): The 2D neck config + for 2D object detection. Defaults to None. + bbox_head_2d (:obj:`ConfigDict` or dict, optional): The 2D bbox + head config for 2D object detection. Defaults to None. + depth_head_2d (:obj:`ConfigDict` or dict, optional): The 2D depth + head config for depth estimation in fov space. Defaults to None. + depth_head (:obj:`ConfigDict` or dict, optional): The depth head + config for depth estimation in 3D voxel projected to fov space . + train_cfg (:obj:`ConfigDict` or dict, optional): Config dict of + training hyper-parameters. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Config dict of test + hyper-parameters. Defaults to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`BaseDataPreprocessor`. it usually includes, + ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``. + valid_sample (bool): Whether to filter invalid points in view + transformation. Defaults to True. + temporal_aggregate (str): Key to determine the aggregation way in + temporal fusion. Defaults to 'concat'. + transform_depth (bool): Key to determine the transformation of depth. + Defaults to True. + init_cfg (:obj:`ConfigDict` or dict, optional): The initialization + config. Defaults to None. + """ + + def __init__(self, + backbone: ConfigType, + neck: ConfigType, + backbone_stereo: ConfigType, + backbone_3d: ConfigType, + neck_3d: ConfigType, + bbox_head: ConfigType, + voxel_size: ConfigType, + anchor_generator: ConfigType, + neck_2d: ConfigType = None, + bbox_head_2d: ConfigType = None, + depth_head_2d: ConfigType = None, + depth_head: ConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + valid_sample: bool = True, + temporal_aggregate: str = 'concat', + transform_depth: bool = True, + init_cfg: OptConfigType = None): + # TODO merge with DFM + BaseDetector.__init__( + self, data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + self.backbone = MODELS.build(backbone) + self.neck = MODELS.build(neck) + if backbone_stereo is not None: + backbone_stereo.update(cat_img_feature=self.neck.cat_img_feature) + backbone_stereo.update(in_sem_channels=self.neck.sem_channels[-1]) + self.backbone_stereo = MODELS.build(backbone_stereo) + assert self.neck.cat_img_feature == \ + self.backbone_stereo.cat_img_feature + assert self.neck.sem_channels[ + -1] == self.backbone_stereo.in_sem_channels + if backbone_3d is not None: + self.backbone_3d = MODELS.build(backbone_3d) + if neck_3d is not None: + self.neck_3d = MODELS.build(neck_3d) + if neck_2d is not None: + self.neck_2d = MODELS.build(neck_2d) + if bbox_head_2d is not None: + self.bbox_head_2d = MODELS.build(bbox_head_2d) + if depth_head_2d is not None: + self.depth_head_2d = MODELS.build(depth_head_2d) + if depth_head is not None: + self.depth_head = MODELS.build(depth_head) + self.depth_samples = self.depth_head.depth_samples + self.train_cfg = train_cfg + self.test_cfg = test_cfg + bbox_head.update(train_cfg=train_cfg) + bbox_head.update(test_cfg=test_cfg) + self.bbox_head = MODELS.build(bbox_head) + self.voxel_size = voxel_size + self.voxel_range = anchor_generator['ranges'][0] + self.n_voxels = [ + round((self.voxel_range[3] - self.voxel_range[0]) / + self.voxel_size[0]), + round((self.voxel_range[4] - self.voxel_range[1]) / + self.voxel_size[1]), + round((self.voxel_range[5] - self.voxel_range[2]) / + self.voxel_size[2]) + ] + self.anchor_generator = TASK_UTILS.build(anchor_generator) + self.valid_sample = valid_sample + self.temporal_aggregate = temporal_aggregate + self.transform_depth = transform_depth + + def extract_feat(self, batch_inputs_dict: dict, + batch_data_samples: SampleList): + """Extract 3d features from the backbone -> fpn -> 3d projection. + + Args: + batch_inputs_dict (dict): The model input dict which include + the 'imgs' key. + + - imgs (torch.Tensor, optional): Image of each sample. + batch_data_samples (list[:obj:`DetDataSample`]): The batch + data samples. It usually includes information such + as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. + + Returns: + torch.Tensor: of shape (N, C_out, N_x, N_y, N_z) + """ + # TODO: Nt means the number of frames temporally + # num_views means the number of views of a frame + img = batch_inputs_dict['imgs'] + batch_img_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + batch_size, _, C_in, H, W = img.shape + num_views = batch_img_metas[0]['num_views'] + num_ref_frames = batch_img_metas[0]['num_ref_frames'] + if num_ref_frames > 0: + num_frames = num_ref_frames + 1 + else: + num_frames = 1 + input_shape = img.shape[-2:] + # NOTE: input_shape is the largest pad_shape of the batch of images + for img_meta in batch_img_metas: + img_meta.update(input_shape=input_shape) + if num_ref_frames > 0: + cur_imgs = img[:, :num_views].reshape(-1, C_in, H, W) + prev_imgs = img[:, num_views:].reshape(-1, C_in, H, W) + cur_feats = self.backbone(cur_imgs) + cur_feats = self.neck(cur_feats)[0] + with torch.no_grad(): + prev_feats = self.backbone(prev_imgs) + prev_feats = self.neck(prev_feats)[0] + _, C_feat, H_feat, W_feat = cur_feats.shape + cur_feats = cur_feats.view(batch_size, -1, C_feat, H_feat, W_feat) + prev_feats = prev_feats.view(batch_size, -1, C_feat, H_feat, + W_feat) + batch_feats = torch.cat([cur_feats, prev_feats], dim=1) + else: + batch_imgs = img.view(-1, C_in, H, W) + batch_feats = self.backbone(batch_imgs) + # TODO: support SPP module neck + batch_feats = self.neck(batch_feats)[0] + _, C_feat, H_feat, W_feat = batch_feats.shape + batch_feats = batch_feats.view(batch_size, -1, C_feat, H_feat, + W_feat) + # transform the feature to voxel & stereo space + transform_feats = self.feature_transformation(batch_feats, + batch_img_metas, + num_views, num_frames) + if self.with_depth_head_2d: + transform_feats += (batch_feats[:, :num_views], ) + return transform_feats + + def feature_transformation(self, batch_feats, batch_img_metas, num_views, + num_frames): + """Feature transformation from perspective view to BEV. + + Args: + batch_feats (torch.Tensor): Perspective view features of shape + (batch_size, num_views, C, H, W). + batch_img_metas (list[dict]): Image meta information. Each element + corresponds to a group of images. len(img_metas) == B. + num_views (int): Number of views. + num_frames (int): Number of consecutive frames. + + Returns: + tuple[torch.Tensor]: Volume features and (optionally) stereo \ + features. + """ + # TODO: support more complicated 2D feature sampling + points = self.anchor_generator.grid_anchors( + [self.n_voxels[::-1]], device=batch_feats.device)[0][:, :3] + volumes = [] + img_scale_factors = [] + img_flips = [] + img_crop_offsets = [] + for feature, img_meta in zip(batch_feats, batch_img_metas): + + # TODO: remove feature sampling from back + # TODO: support different scale_factors/flip/crop_offset for + # different views + frame_volume = [] + frame_valid_nums = [] + for frame_idx in range(num_frames): + volume = [] + valid_flags = [] + if isinstance(img_meta['img_shape'], list): + img_shape = img_meta['img_shape'][frame_idx][:2] + else: + img_shape = img_meta['img_shape'][:2] + + for view_idx in range(num_views): + + sample_idx = frame_idx * num_views + view_idx + + if 'scale_factor' in img_meta: + img_scale_factor = img_meta['scale_factor'][sample_idx] + if isinstance(img_scale_factor, np.ndarray) and \ + len(img_meta['scale_factor']) >= 2: + img_scale_factor = ( + points.new_tensor(img_scale_factor[:2])) + else: + img_scale_factor = ( + points.new_tensor(img_scale_factor)) + else: + img_scale_factor = (1) + img_flip = img_meta['flip'][sample_idx] \ + if 'flip' in img_meta.keys() else False + img_crop_offset = ( + points.new_tensor( + img_meta['img_crop_offset'][sample_idx]) + if 'img_crop_offset' in img_meta.keys() else 0) + lidar2cam = points.new_tensor( + img_meta['lidar2cam'][sample_idx]) + cam2img = points.new_tensor( + img_meta['ori_cam2img'][sample_idx]) + # align the precision, the tensor is converted to float32 + lidar2img = get_lidar2img(cam2img.double(), + lidar2cam.double()) + lidar2img = lidar2img.float() + + sample_results = point_sample( + img_meta, + img_features=feature[sample_idx][None, ...], + points=points, + proj_mat=lidar2img, + coord_type='LIDAR', + img_scale_factor=img_scale_factor, + img_crop_offset=img_crop_offset, + img_flip=img_flip, + img_pad_shape=img_meta['input_shape'], + img_shape=img_shape, + aligned=False, + valid_flag=self.valid_sample) + if self.valid_sample: + volume.append(sample_results[0]) + valid_flags.append(sample_results[1]) + else: + volume.append(sample_results) + # TODO: save valid flags, more reasonable feat fusion + if self.valid_sample: + valid_nums = torch.stack( + valid_flags, dim=0).sum(0) # (N, ) + volume = torch.stack(volume, dim=0).sum(0) + valid_mask = valid_nums > 0 + volume[~valid_mask] = 0 + frame_valid_nums.append(valid_nums) + else: + volume = torch.stack(volume, dim=0).mean(0) + frame_volume.append(volume) + + img_scale_factors.append(img_scale_factor) + img_flips.append(img_flip) + img_crop_offsets.append(img_crop_offset) + + if self.valid_sample: + if self.temporal_aggregate == 'mean': + frame_volume = torch.stack(frame_volume, dim=0).sum(0) + frame_valid_nums = torch.stack( + frame_valid_nums, dim=0).sum(0) + frame_valid_mask = frame_valid_nums > 0 + frame_volume[~frame_valid_mask] = 0 + frame_volume = frame_volume / torch.clamp( + frame_valid_nums[:, None], min=1) + elif self.temporal_aggregate == 'concat': + frame_valid_nums = torch.stack(frame_valid_nums, dim=1) + frame_volume = torch.stack(frame_volume, dim=1) + frame_valid_mask = frame_valid_nums > 0 + frame_volume[~frame_valid_mask] = 0 + frame_volume = (frame_volume / torch.clamp( + frame_valid_nums[:, :, None], min=1)).flatten( + start_dim=1, end_dim=2) + else: + frame_volume = torch.stack(frame_volume, dim=0).mean(0) + volumes.append( + frame_volume.reshape(self.n_voxels[::-1] + [-1]).permute( + 3, 2, 1, 0)) + volume_feat = torch.stack(volumes) # (B, C, N_x, N_y, N_z) + if self.with_backbone_3d: + outputs = self.backbone_3d(volume_feat) + volume_feat = outputs[0] + if self.backbone_3d.output_bev: + # use outputs[0] if len(outputs) == 1 + # use outputs[1] if len(outputs) == 2 + # TODO: unify the output formats + bev_feat = outputs[-1] + # grid_sample stereo features from the volume feature + # TODO: also support temporal modeling for depth head + if self.with_depth_head: + batch_stereo_feats = [] + for batch_idx in range(volume_feat.shape[0]): + stereo_feat = [] + for view_idx in range(num_views): + img_scale_factor = img_scale_factors[batch_idx] \ + if self.transform_depth else points.new_tensor( + [1., 1.]) + img_crop_offset = img_crop_offsets[batch_idx] \ + if self.transform_depth else points.new_tensor( + [0., 0.]) + img_flip = img_flips[batch_idx] if self.transform_depth \ + else False + img_pad_shape = img_meta['input_shape'] \ + if self.transform_depth else img_meta['ori_shape'][:2] + lidar2cam = points.new_tensor( + batch_img_metas[batch_idx]['lidar2cam'][view_idx]) + cam2img = points.new_tensor( + img_meta[batch_idx]['lidar2cam'][view_idx]) + proj_mat = torch.matmul(cam2img, lidar2cam) + stereo_feat.append( + voxel_sample( + volume_feat[batch_idx][None], + voxel_range=self.voxel_range, + voxel_size=self.voxel_size, + depth_samples=volume_feat.new_tensor( + self.depth_samples), + proj_mat=proj_mat, + downsample_factor=self.depth_head. + downsample_factor, + img_scale_factor=img_scale_factor, + img_crop_offset=img_crop_offset, + img_flip=img_flip, + img_pad_shape=img_pad_shape, + img_shape=batch_img_metas[batch_idx]['img_shape'] + [view_idx][:2], + aligned=True)) # TODO: study the aligned setting + batch_stereo_feats.append(torch.cat(stereo_feat)) + # cat (N, C, D, H, W) -> (B*N, C, D, H, W) + batch_stereo_feats = torch.cat(batch_stereo_feats) + if self.with_neck_3d: + if self.with_backbone_3d and self.backbone_3d.output_bev: + spatial_features = self.neck_3d(bev_feat) + # TODO: unify the outputs of neck_3d + volume_feat = spatial_features[1] + else: + volume_feat = self.neck_3d(volume_feat)[0] + # TODO: unify the output format of neck_3d + transform_feats = (volume_feat, ) + if self.with_depth_head: + transform_feats += (batch_stereo_feats, ) + return transform_feats + + def aug_test(self, imgs, img_metas, **kwargs): + """Test with augmentations. + + Args: + imgs (list[torch.Tensor]): Input images of shape (N, C_in, H, W). + img_metas (list): Image metas. + + Returns: + list[dict]: Predicted 3d boxes. + """ + raise NotImplementedError diff --git a/mmdet3d/models/detectors/mvx_faster_rcnn.py b/mmdet3d/models/detectors/mvx_faster_rcnn.py new file mode 100755 index 0000000..de858d0 --- /dev/null +++ b/mmdet3d/models/detectors/mvx_faster_rcnn.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Sequence + +from torch import Tensor + +from mmdet3d.registry import MODELS +from .mvx_two_stage import MVXTwoStageDetector + + +@MODELS.register_module() +class MVXFasterRCNN(MVXTwoStageDetector): + """Multi-modality VoxelNet using Faster R-CNN.""" + + def __init__(self, **kwargs): + super(MVXFasterRCNN, self).__init__(**kwargs) + + +@MODELS.register_module() +class DynamicMVXFasterRCNN(MVXTwoStageDetector): + """Multi-modality VoxelNet using Faster R-CNN and dynamic voxelization.""" + + def __init__(self, **kwargs): + super(DynamicMVXFasterRCNN, self).__init__(**kwargs) + + def extract_pts_feat( + self, + voxel_dict: Dict[str, Tensor], + points: Optional[List[Tensor]] = None, + img_feats: Optional[Sequence[Tensor]] = None, + batch_input_metas: Optional[List[dict]] = None + ) -> Sequence[Tensor]: + """Extract features of points. + + Args: + voxel_dict(Dict[str, Tensor]): Dict of voxelization infos. + points (List[tensor], optional): Point cloud of multiple inputs. + img_feats (list[Tensor], tuple[tensor], optional): Features from + image backbone. + batch_input_metas (list[dict], optional): The meta information + of multiple samples. Defaults to True. + + Returns: + Sequence[tensor]: points features of multiple inputs + from backbone or neck. + """ + if not self.with_pts_bbox: + return None + voxel_features, feature_coors = self.pts_voxel_encoder( + voxel_dict['voxels'], voxel_dict['coors'], points, img_feats, + batch_input_metas) + batch_size = voxel_dict['coors'][-1, 0] + 1 + x = self.pts_middle_encoder(voxel_features, feature_coors, batch_size) + x = self.pts_backbone(x) + if self.with_pts_neck: + x = self.pts_neck(x) + return x diff --git a/mmdet3d/models/detectors/mvx_two_stage.py b/mmdet3d/models/detectors/mvx_two_stage.py new file mode 100755 index 0000000..537d821 --- /dev/null +++ b/mmdet3d/models/detectors/mvx_two_stage.py @@ -0,0 +1,407 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Dict, List, Optional, Sequence + +import torch +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.structures import Det3DDataSample +from .base import Base3DDetector + + +@MODELS.register_module() +class MVXTwoStageDetector(Base3DDetector): + """Base class of Multi-modality VoxelNet. + + Args: + pts_voxel_encoder (dict, optional): Point voxelization + encoder layer. Defaults to None. + pts_middle_encoder (dict, optional): Middle encoder layer + of points cloud modality. Defaults to None. + pts_fusion_layer (dict, optional): Fusion layer. + Defaults to None. + img_backbone (dict, optional): Backbone of extracting + images feature. Defaults to None. + pts_backbone (dict, optional): Backbone of extracting + points features. Defaults to None. + img_neck (dict, optional): Neck of extracting + image features. Defaults to None. + pts_neck (dict, optional): Neck of extracting + points features. Defaults to None. + pts_bbox_head (dict, optional): Bboxes head of + point cloud modality. Defaults to None. + img_roi_head (dict, optional): RoI head of image + modality. Defaults to None. + img_rpn_head (dict, optional): RPN head of image + modality. Defaults to None. + train_cfg (dict, optional): Train config of model. + Defaults to None. + test_cfg (dict, optional): Train config of model. + Defaults to None. + init_cfg (dict, optional): Initialize config of + model. Defaults to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`Det3DDataPreprocessor`. Defaults to None. + """ + + def __init__(self, + pts_voxel_encoder: Optional[dict] = None, + pts_middle_encoder: Optional[dict] = None, + pts_fusion_layer: Optional[dict] = None, + img_backbone: Optional[dict] = None, + pts_backbone: Optional[dict] = None, + img_neck: Optional[dict] = None, + pts_neck: Optional[dict] = None, + pts_bbox_head: Optional[dict] = None, + img_roi_head: Optional[dict] = None, + img_rpn_head: Optional[dict] = None, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None, + data_preprocessor: Optional[dict] = None, + **kwargs): + super(MVXTwoStageDetector, self).__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor, **kwargs) + + if pts_voxel_encoder: + self.pts_voxel_encoder = MODELS.build(pts_voxel_encoder) + if pts_middle_encoder: + self.pts_middle_encoder = MODELS.build(pts_middle_encoder) + if pts_backbone: + self.pts_backbone = MODELS.build(pts_backbone) + if pts_fusion_layer: + self.pts_fusion_layer = MODELS.build(pts_fusion_layer) + if pts_neck is not None: + self.pts_neck = MODELS.build(pts_neck) + if pts_bbox_head: + pts_train_cfg = train_cfg.pts if train_cfg else None + pts_bbox_head.update(train_cfg=pts_train_cfg) + pts_test_cfg = test_cfg.pts if test_cfg else None + pts_bbox_head.update(test_cfg=pts_test_cfg) + self.pts_bbox_head = MODELS.build(pts_bbox_head) + + if img_backbone: + self.img_backbone = MODELS.build(img_backbone) + if img_neck is not None: + self.img_neck = MODELS.build(img_neck) + if img_rpn_head is not None: + self.img_rpn_head = MODELS.build(img_rpn_head) + if img_roi_head is not None: + self.img_roi_head = MODELS.build(img_roi_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + @property + def with_img_shared_head(self): + """bool: Whether the detector has a shared head in image branch.""" + return hasattr(self, + 'img_shared_head') and self.img_shared_head is not None + + @property + def with_pts_bbox(self): + """bool: Whether the detector has a 3D box head.""" + return hasattr(self, + 'pts_bbox_head') and self.pts_bbox_head is not None + + @property + def with_img_bbox(self): + """bool: Whether the detector has a 2D image box head.""" + return hasattr(self, + 'img_bbox_head') and self.img_bbox_head is not None + + @property + def with_img_backbone(self): + """bool: Whether the detector has a 2D image backbone.""" + return hasattr(self, 'img_backbone') and self.img_backbone is not None + + @property + def with_pts_backbone(self): + """bool: Whether the detector has a 3D backbone.""" + return hasattr(self, 'pts_backbone') and self.pts_backbone is not None + + @property + def with_fusion(self): + """bool: Whether the detector has a fusion layer.""" + return hasattr(self, + 'pts_fusion_layer') and self.fusion_layer is not None + + @property + def with_img_neck(self): + """bool: Whether the detector has a neck in image branch.""" + return hasattr(self, 'img_neck') and self.img_neck is not None + + @property + def with_pts_neck(self): + """bool: Whether the detector has a neck in 3D detector branch.""" + return hasattr(self, 'pts_neck') and self.pts_neck is not None + + @property + def with_img_rpn(self): + """bool: Whether the detector has a 2D RPN in image detector branch.""" + return hasattr(self, 'img_rpn_head') and self.img_rpn_head is not None + + @property + def with_img_roi_head(self): + """bool: Whether the detector has a RoI Head in image branch.""" + return hasattr(self, 'img_roi_head') and self.img_roi_head is not None + + @property + def with_voxel_encoder(self): + """bool: Whether the detector has a voxel encoder.""" + return hasattr(self, + 'voxel_encoder') and self.voxel_encoder is not None + + @property + def with_middle_encoder(self): + """bool: Whether the detector has a middle encoder.""" + return hasattr(self, + 'middle_encoder') and self.middle_encoder is not None + + def _forward(self): + pass + + def extract_img_feat(self, img: Tensor, input_metas: List[dict]) -> dict: + """Extract features of images.""" + if self.with_img_backbone and img is not None: + input_shape = img.shape[-2:] + # update real input shape of each single img + for img_meta in input_metas: + img_meta.update(input_shape=input_shape) + + if img.dim() == 5 and img.size(0) == 1: + img.squeeze_() + elif img.dim() == 5 and img.size(0) > 1: + B, N, C, H, W = img.size() + img = img.view(B * N, C, H, W) + img_feats = self.img_backbone(img) + else: + return None + if self.with_img_neck: + img_feats = self.img_neck(img_feats) + return img_feats + + def extract_pts_feat( + self, + voxel_dict: Dict[str, Tensor], + points: Optional[List[Tensor]] = None, + img_feats: Optional[Sequence[Tensor]] = None, + batch_input_metas: Optional[List[dict]] = None + ) -> Sequence[Tensor]: + """Extract features of points. + + Args: + voxel_dict(Dict[str, Tensor]): Dict of voxelization infos. + points (List[tensor], optional): Point cloud of multiple inputs. + img_feats (list[Tensor], tuple[tensor], optional): Features from + image backbone. + batch_input_metas (list[dict], optional): The meta information + of multiple samples. Defaults to True. + + Returns: + Sequence[tensor]: points features of multiple inputs + from backbone or neck. + """ + if not self.with_pts_bbox: + return None + voxel_features = self.pts_voxel_encoder(voxel_dict['voxels'], + voxel_dict['num_points'], + voxel_dict['coors'], img_feats, + batch_input_metas) + batch_size = voxel_dict['coors'][-1, 0] + 1 + x = self.pts_middle_encoder(voxel_features, voxel_dict['coors'], + batch_size) + x = self.pts_backbone(x) + if self.with_pts_neck: + x = self.pts_neck(x) + return x + + def extract_feat(self, batch_inputs_dict: dict, + batch_input_metas: List[dict]) -> tuple: + """Extract features from images and points. + + Args: + batch_inputs_dict (dict): Dict of batch inputs. It + contains + + - points (List[tensor]): Point cloud of multiple inputs. + - imgs (tensor): Image tensor with shape (B, C, H, W). + batch_input_metas (list[dict]): Meta information of multiple inputs + in a batch. + + Returns: + tuple: Two elements in tuple arrange as + image features and point cloud features. + """ + voxel_dict = batch_inputs_dict.get('voxels', None) + imgs = batch_inputs_dict.get('imgs', None) + points = batch_inputs_dict.get('points', None) + img_feats = self.extract_img_feat(imgs, batch_input_metas) + pts_feats = self.extract_pts_feat( + voxel_dict, + points=points, + img_feats=img_feats, + batch_input_metas=batch_input_metas) + return (img_feats, pts_feats) + + def loss(self, batch_inputs_dict: Dict[List, torch.Tensor], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """ + Args: + batch_inputs_dict (dict): The model input dict which include + 'points' and `imgs` keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor): Tensor of batch images, has shape + (B, C, H ,W) + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, . + + Returns: + dict[str, Tensor]: A dictionary of loss components. + + """ + + batch_input_metas = [item.metainfo for item in batch_data_samples] + img_feats, pts_feats = self.extract_feat(batch_inputs_dict, + batch_input_metas) + losses = dict() + if pts_feats: + losses_pts = self.pts_bbox_head.loss(pts_feats, batch_data_samples, + **kwargs) + losses.update(losses_pts) + if img_feats: + losses_img = self.loss_imgs(img_feats, batch_data_samples) + losses.update(losses_img) + return losses + + def loss_imgs(self, x: List[Tensor], + batch_data_samples: List[Det3DDataSample], **kwargs): + """Forward function for image branch. + + This function works similar to the forward function of Faster R-CNN. + + Args: + x (list[torch.Tensor]): Image features of shape (B, C, H, W) + of multiple levels. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, . + + Returns: + dict: Losses of each branch. + """ + losses = dict() + # RPN forward and loss + if self.with_img_rpn: + proposal_cfg = self.test_cfg.rpn + rpn_data_samples = copy.deepcopy(batch_data_samples) + # set cat_id of gt_labels to 0 in RPN + for data_sample in rpn_data_samples: + data_sample.gt_instances.labels = \ + torch.zeros_like(data_sample.gt_instances.labels) + rpn_losses, rpn_results_list = self.img_rpn_head.loss_and_predict( + x, rpn_data_samples, proposal_cfg=proposal_cfg, **kwargs) + # avoid get same name with roi_head loss + keys = rpn_losses.keys() + for key in keys: + if 'loss' in key and 'rpn' not in key: + rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key) + losses.update(rpn_losses) + + else: + if 'proposals' in batch_data_samples[0]: + # use pre-defined proposals in InstanceData + # for the second stage + # to extract ROI features. + rpn_results_list = [ + data_sample.proposals for data_sample in batch_data_samples + ] + else: + rpn_results_list = None + # bbox head forward and loss + if self.with_img_bbox: + roi_losses = self.img_roi_head.loss(x, rpn_results_list, + batch_data_samples, **kwargs) + losses.update(roi_losses) + return losses + + def predict_imgs(self, + x: List[Tensor], + batch_data_samples: List[Det3DDataSample], + rescale: bool = True, + **kwargs) -> InstanceData: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + x (List[Tensor]): Image features from FPN. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + rescale (bool): Whether to rescale the results. + Defaults to True. + """ + + if batch_data_samples[0].get('proposals', None) is None: + rpn_results_list = self.img_rpn_head.predict( + x, batch_data_samples, rescale=False) + else: + rpn_results_list = [ + data_sample.proposals for data_sample in batch_data_samples + ] + results_list = self.img_roi_head.predict( + x, rpn_results_list, batch_data_samples, rescale=rescale, **kwargs) + return results_list + + def predict(self, batch_inputs_dict: Dict[str, Optional[Tensor]], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """Forward of testing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input sample. Each Det3DDataSample usually contain + 'pred_instances_3d'. And the ``pred_instances_3d`` usually + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bbox_3d (:obj:`BaseInstance3DBoxes`): Prediction of bboxes, + contains a tensor with shape (num_instances, 7). + """ + batch_input_metas = [item.metainfo for item in batch_data_samples] + img_feats, pts_feats = self.extract_feat(batch_inputs_dict, + batch_input_metas) + if pts_feats and self.with_pts_bbox: + results_list_3d = self.pts_bbox_head.predict( + pts_feats, batch_data_samples, **kwargs) + else: + results_list_3d = None + + if img_feats and self.with_img_bbox: + # TODO check this for camera modality + results_list_2d = self.predict_imgs(img_feats, batch_data_samples, + **kwargs) + else: + results_list_2d = None + + detsamples = self.add_pred_to_datasample(batch_data_samples, + results_list_3d, + results_list_2d) + return detsamples diff --git a/mmdet3d/models/detectors/parta2.py b/mmdet3d/models/detectors/parta2.py new file mode 100755 index 0000000..9011abd --- /dev/null +++ b/mmdet3d/models/detectors/parta2.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional + +from mmdet3d.registry import MODELS +from .two_stage import TwoStage3DDetector + + +@MODELS.register_module() +class PartA2(TwoStage3DDetector): + r"""Part-A2 detector. + + Please refer to the `paper `_ + """ + + def __init__(self, + voxel_encoder: dict, + middle_encoder: dict, + backbone: dict, + neck: dict = None, + rpn_head: dict = None, + roi_head: dict = None, + train_cfg: dict = None, + test_cfg: dict = None, + init_cfg: dict = None, + data_preprocessor: Optional[dict] = None): + super(PartA2, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg, + data_preprocessor=data_preprocessor) + self.voxel_encoder = MODELS.build(voxel_encoder) + self.middle_encoder = MODELS.build(middle_encoder) + + def extract_feat(self, batch_inputs_dict: Dict) -> Dict: + """Directly extract features from the backbone+neck. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'imgs' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + Returns: + tuple[Tensor] | dict: For outside 3D object detection, we + typically obtain a tuple of features from the backbone + neck, + and for inside 3D object detection, usually a dict containing + features will be obtained. + """ + voxel_dict = batch_inputs_dict['voxels'] + voxel_features = self.voxel_encoder(voxel_dict['voxels'], + voxel_dict['num_points'], + voxel_dict['coors']) + batch_size = voxel_dict['coors'][-1, 0].item() + 1 + feats_dict = self.middle_encoder(voxel_features, voxel_dict['coors'], + batch_size) + x = self.backbone(feats_dict['spatial_features']) + if self.with_neck: + neck_feats = self.neck(x) + feats_dict.update({'neck_feats': neck_feats}) + feats_dict['voxels_dict'] = voxel_dict + return feats_dict diff --git a/mmdet3d/models/detectors/point_rcnn.py b/mmdet3d/models/detectors/point_rcnn.py new file mode 100755 index 0000000..acf6ab5 --- /dev/null +++ b/mmdet3d/models/detectors/point_rcnn.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional + +import torch + +from mmdet3d.registry import MODELS +from .two_stage import TwoStage3DDetector + + +@MODELS.register_module() +class PointRCNN(TwoStage3DDetector): + r"""PointRCNN detector. + + Please refer to the `PointRCNN `_ + + Args: + backbone (dict): Config dict of detector's backbone. + neck (dict, optional): Config dict of neck. Defaults to None. + rpn_head (dict, optional): Config of RPN head. Defaults to None. + roi_head (dict, optional): Config of ROI head. Defaults to None. + train_cfg (dict, optional): Train configs. Defaults to None. + test_cfg (dict, optional): Test configs. Defaults to None. + pretrained (str, optional): Model pretrained path. Defaults to None. + init_cfg (dict, optional): Config of initialization. Defaults to None. + """ + + def __init__(self, + backbone: dict, + neck: Optional[dict] = None, + rpn_head: Optional[dict] = None, + roi_head: Optional[dict] = None, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None, + data_preprocessor: Optional[dict] = None) -> Optional: + super(PointRCNN, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg, + data_preprocessor=data_preprocessor) + + def extract_feat(self, batch_inputs_dict: Dict) -> Dict: + """Directly extract features from the backbone+neck. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'imgs' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + Returns: + dict: Features from the backbone+neck and raw points. + """ + points = torch.stack(batch_inputs_dict['points']) + x = self.backbone(points) + + if self.with_neck: + x = self.neck(x) + return dict( + fp_features=x['fp_features'].clone(), + fp_points=x['fp_xyz'].clone(), + raw_points=points) diff --git a/mmdet3d/models/detectors/pv_rcnn.py b/mmdet3d/models/detectors/pv_rcnn.py new file mode 100755 index 0000000..ac03a61 --- /dev/null +++ b/mmdet3d/models/detectors/pv_rcnn.py @@ -0,0 +1,232 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Optional + +from mmdet3d.registry import MODELS +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils import InstanceList +from .two_stage import TwoStage3DDetector + + +@MODELS.register_module() +class PointVoxelRCNN(TwoStage3DDetector): + r"""PointVoxelRCNN detector. + + Please refer to the `PointVoxelRCNN `_. + + Args: + voxel_encoder (dict): Point voxelization encoder layer. + middle_encoder (dict): Middle encoder layer + of points cloud modality. + backbone (dict): Backbone of extracting points features. + neck (dict, optional): Neck of extracting points features. + Defaults to None. + rpn_head (dict, optional): Config of RPN head. Defaults to None. + points_encoder (dict, optional): Points encoder to extract point-wise + features. Defaults to None. + roi_head (dict, optional): Config of ROI head. Defaults to None. + train_cfg (dict, optional): Train config of model. + Defaults to None. + test_cfg (dict, optional): Train config of model. + Defaults to None. + init_cfg (dict, optional): Initialize config of + model. Defaults to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`Det3DDataPreprocessor`. Defaults to None. + """ + + def __init__(self, + voxel_encoder: dict, + middle_encoder: dict, + backbone: dict, + neck: Optional[dict] = None, + rpn_head: Optional[dict] = None, + points_encoder: Optional[dict] = None, + roi_head: Optional[dict] = None, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None, + data_preprocessor: Optional[dict] = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg, + data_preprocessor=data_preprocessor) + self.voxel_encoder = MODELS.build(voxel_encoder) + self.middle_encoder = MODELS.build(middle_encoder) + self.points_encoder = MODELS.build(points_encoder) + + def predict(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'voxels' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - voxels (dict[torch.Tensor]): Voxels of the batch sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input samples. Each Det3DDataSample usually contain + 'pred_instances_3d'. And the ``pred_instances_3d`` usually + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C) where C >=7. + """ + feats_dict = self.extract_feat(batch_inputs_dict) + if self.with_rpn: + rpn_results_list = self.rpn_head.predict(feats_dict, + batch_data_samples) + else: + rpn_results_list = [ + data_sample.proposals for data_sample in batch_data_samples + ] + + # extrack points feats by points_encoder + points_feats_dict = self.extract_points_feat(batch_inputs_dict, + feats_dict, + rpn_results_list) + + results_list_3d = self.roi_head.predict(points_feats_dict, + rpn_results_list, + batch_data_samples) + + # connvert to Det3DDataSample + results_list = self.add_pred_to_datasample(batch_data_samples, + results_list_3d) + + return results_list + + def extract_feat(self, batch_inputs_dict: dict) -> dict: + """Extract features from the input voxels. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'voxels' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - voxels (dict[torch.Tensor]): Voxels of the batch sample. + + Returns: + dict: We typically obtain a dict of features from the backbone + + neck, it includes: + + - spatial_feats (torch.Tensor): Spatial feats from middle + encoder. + - multi_scale_3d_feats (list[torch.Tensor]): Multi scale + middle feats from middle encoder. + - neck_feats (torch.Tensor): Neck feats from neck. + """ + feats_dict = dict() + voxel_dict = batch_inputs_dict['voxels'] + voxel_features = self.voxel_encoder(voxel_dict['voxels'], + voxel_dict['num_points'], + voxel_dict['coors']) + batch_size = voxel_dict['coors'][-1, 0].item() + 1 + feats_dict['spatial_feats'], feats_dict[ + 'multi_scale_3d_feats'] = self.middle_encoder( + voxel_features, voxel_dict['coors'], batch_size) + x = self.backbone(feats_dict['spatial_feats']) + if self.with_neck: + neck_feats = self.neck(x) + feats_dict['neck_feats'] = neck_feats + return feats_dict + + def extract_points_feat(self, batch_inputs_dict: dict, feats_dict: dict, + rpn_results_list: InstanceList) -> dict: + """Extract point-wise features from the raw points and voxel features. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'voxels' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - voxels (dict[torch.Tensor]): Voxels of the batch sample. + feats_dict (dict): Contains features from the first stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + + Returns: + dict: Contain Point-wise features, include: + - keypoints (torch.Tensor): Sampled key points. + - keypoint_features (torch.Tensor): Gather key points features + from multi input. + - fusion_keypoint_features (torch.Tensor): Fusion + keypoint_features by point_feature_fusion_layer. + """ + return self.points_encoder(batch_inputs_dict, feats_dict, + rpn_results_list) + + def loss(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs): + """Calculate losses from a batch of inputs and data samples. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'voxels' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - voxels (dict[torch.Tensor]): Voxels of the batch sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + dict: A dictionary of loss components. + """ + feats_dict = self.extract_feat(batch_inputs_dict) + + losses = dict() + + # RPN forward and loss + if self.with_rpn: + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + rpn_data_samples = copy.deepcopy(batch_data_samples) + + rpn_losses, rpn_results_list = self.rpn_head.loss_and_predict( + feats_dict, + rpn_data_samples, + proposal_cfg=proposal_cfg, + **kwargs) + # avoid get same name with roi_head loss + keys = rpn_losses.keys() + for key in keys: + if 'loss' in key and 'rpn' not in key: + rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key) + losses.update(rpn_losses) + else: + # TODO: Not support currently, should have a check at Fast R-CNN + assert batch_data_samples[0].get('proposals', None) is not None + # use pre-defined proposals in InstanceData for the second stage + # to extract ROI features. + rpn_results_list = [ + data_sample.proposals for data_sample in batch_data_samples + ] + + points_feats_dict = self.extract_points_feat(batch_inputs_dict, + feats_dict, + rpn_results_list) + + roi_losses = self.roi_head.loss(points_feats_dict, rpn_results_list, + batch_data_samples) + losses.update(roi_losses) + + return losses diff --git a/mmdet3d/models/detectors/sassd.py b/mmdet3d/models/detectors/sassd.py new file mode 100755 index 0000000..76f130c --- /dev/null +++ b/mmdet3d/models/detectors/sassd.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple, Union + +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig +from ...structures.det3d_data_sample import SampleList +from .single_stage import SingleStage3DDetector + + +@MODELS.register_module() +class SASSD(SingleStage3DDetector): + r"""`SASSD ` _ for 3D detection.""" + + def __init__(self, + voxel_encoder: ConfigType, + middle_encoder: ConfigType, + backbone: ConfigType, + neck: OptConfigType = None, + bbox_head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super(SASSD, self).__init__( + backbone=backbone, + neck=neck, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + self.voxel_encoder = MODELS.build(voxel_encoder) + self.middle_encoder = MODELS.build(middle_encoder) + + def extract_feat( + self, + batch_inputs_dict: dict, + test_mode: bool = True + ) -> Union[Tuple[Tuple[Tensor], Tuple], Tuple[Tensor]]: + """Extract features from points. + + Args: + batch_inputs_dict (dict): The batch inputs. + test_mode (bool, optional): Whether test mode. Defaults to True. + + Returns: + Union[Tuple[Tuple[Tensor], Tuple], Tuple[Tensor]]: In test mode, it + returns the features of points from multiple levels. In training + mode, it returns the features of points from multiple levels and a + tuple containing the mean features of points and the targets of + clssification and regression. + """ + voxel_dict = batch_inputs_dict['voxels'] + voxel_features = self.voxel_encoder(voxel_dict['voxels'], + voxel_dict['num_points'], + voxel_dict['coors']) + batch_size = voxel_dict['coors'][-1, 0].item() + 1 + # `point_misc` is a tuple containing the mean features of points and + # the targets of clssification and regression. It's only used for + # calculating auxiliary loss in training mode. + x, point_misc = self.middle_encoder(voxel_features, + voxel_dict['coors'], batch_size, + test_mode) + x = self.backbone(x) + if self.with_neck: + x = self.neck(x) + + return (x, point_misc) if not test_mode else x + + def loss(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> dict: + """Calculate losses from a batch of inputs dict and data samples. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points' keys. + - points (list[torch.Tensor]): Point cloud of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + dict: A dictionary of loss components. + """ + x, point_misc = self.extract_feat(batch_inputs_dict, test_mode=False) + batch_gt_bboxes_3d = [ + data_sample.gt_instances_3d.bboxes_3d + for data_sample in batch_data_samples + ] + aux_loss = self.middle_encoder.aux_loss(*point_misc, + batch_gt_bboxes_3d) + losses = self.bbox_head.loss(x, batch_data_samples) + losses.update(aux_loss) + return losses diff --git a/mmdet3d/models/detectors/single_stage.py b/mmdet3d/models/detectors/single_stage.py new file mode 100755 index 0000000..7719944 --- /dev/null +++ b/mmdet3d/models/detectors/single_stage.py @@ -0,0 +1,163 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Tuple, Union + +import torch +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig +from ...structures.det3d_data_sample import OptSampleList, SampleList +from .base import Base3DDetector + + +@MODELS.register_module() +class SingleStage3DDetector(Base3DDetector): + """SingleStage3DDetector. + + This class serves as a base class for single-stage 3D detectors which + directly and densely predict 3D bounding boxes on the output features + of the backbone+neck. + + + Args: + backbone (dict): Config dict of detector's backbone. + neck (dict, optional): Config dict of neck. Defaults to None. + bbox_head (dict, optional): Config dict of box head. Defaults to None. + train_cfg (dict, optional): Config dict of training hyper-parameters. + Defaults to None. + test_cfg (dict, optional): Config dict of test hyper-parameters. + Defaults to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`BaseDataPreprocessor`. it usually includes, + ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``. + init_cfg (dict or ConfigDict, optional): the config to control the + initialization. Defaults to None. + """ + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + bbox_head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None) -> None: + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + self.backbone = MODELS.build(backbone) + if neck is not None: + self.neck = MODELS.build(neck) + bbox_head.update(train_cfg=train_cfg) + bbox_head.update(test_cfg=test_cfg) + self.bbox_head = MODELS.build(bbox_head) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def loss(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> Union[dict, list]: + """Calculate losses from a batch of inputs dict and data samples. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'img' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + dict: A dictionary of loss components. + """ + x = self.extract_feat(batch_inputs_dict) + losses = self.bbox_head.loss(x, batch_data_samples, **kwargs) + return losses + + def predict(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'img' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + rescale (bool): Whether to rescale the results. + Defaults to True. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input samples. Each Det3DDataSample usually contain + 'pred_instances_3d'. And the ``pred_instances_3d`` usually + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C) where C >=7. + """ + x = self.extract_feat(batch_inputs_dict) + results_list = self.bbox_head.predict(x, batch_data_samples, **kwargs) + predictions = self.add_pred_to_datasample(batch_data_samples, + results_list) + return predictions + + def _forward(self, + batch_inputs_dict: dict, + data_samples: OptSampleList = None, + **kwargs) -> Tuple[List[torch.Tensor]]: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'img' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + tuple[list]: A tuple of features from ``bbox_head`` forward. + """ + x = self.extract_feat(batch_inputs_dict) + results = self.bbox_head.forward(x) + return results + + def extract_feat( + self, batch_inputs_dict: Dict[str, Tensor] + ) -> Union[Tuple[torch.Tensor], Dict[str, Tensor]]: + """Directly extract features from the backbone+neck. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'img' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + Returns: + tuple[Tensor] | dict: For outside 3D object detection, we + typically obtain a tuple of features from the backbone + neck, + and for inside 3D object detection, usually a dict containing + features will be obtained. + """ + points = batch_inputs_dict['points'] + stack_points = torch.stack(points) + x = self.backbone(stack_points) + if self.with_neck: + x = self.neck(x) + return x diff --git a/mmdet3d/models/detectors/single_stage_mono3d.py b/mmdet3d/models/detectors/single_stage_mono3d.py new file mode 100755 index 0000000..5865db3 --- /dev/null +++ b/mmdet3d/models/detectors/single_stage_mono3d.py @@ -0,0 +1,99 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +from mmdet.models.detectors.single_stage import SingleStageDetector +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils import OptInstanceList + + +@MODELS.register_module() +class SingleStageMono3DDetector(SingleStageDetector): + """Base class for monocular 3D single-stage detectors. + + Monocular 3D single-stage detectors directly and densely predict bounding + boxes on the output features of the backbone+neck. + """ + + def add_pred_to_datasample( + self, + data_samples: SampleList, + data_instances_3d: OptInstanceList = None, + data_instances_2d: OptInstanceList = None, + ) -> SampleList: + """Convert results list to `Det3DDataSample`. + + Args: + data_samples (list[:obj:`Det3DDataSample`]): The input data. + data_instances_3d (list[:obj:`InstanceData`], optional): 3D + Detection results of each image. Defaults to None. + data_instances_2d (list[:obj:`InstanceData`], optional): 2D + Detection results of each image. Defaults to None. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input. Each Det3DDataSample usually contains + 'pred_instances_3d'. And the ``pred_instances_3d`` normally + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of 3D bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C) where C >=7. + + When there are 2D prediction in some models, it should + contains `pred_instances`, And the ``pred_instances`` normally + contains following keys. + + - scores (Tensor): Classification scores of image, has a shape + (num_instance, ) + - labels (Tensor): Predict Labels of 2D bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Contains a tensor with shape + (num_instances, 4). + """ + + assert (data_instances_2d is not None) or \ + (data_instances_3d is not None),\ + 'please pass at least one type of data_samples' + + if data_instances_2d is None: + data_instances_2d = [ + InstanceData() for _ in range(len(data_instances_3d)) + ] + if data_instances_3d is None: + data_instances_3d = [ + InstanceData() for _ in range(len(data_instances_2d)) + ] + + for i, data_sample in enumerate(data_samples): + data_sample.pred_instances_3d = data_instances_3d[i] + data_sample.pred_instances = data_instances_2d[i] + return data_samples + + def extract_feat(self, batch_inputs_dict: dict) -> Tuple[Tensor]: + """Extract features. + + Args: + batch_inputs_dict (dict): Contains 'img' key + with image tensor with shape (N, C, H ,W). + + Returns: + tuple[Tensor]: Multi-level features that may have + different resolutions. + """ + batch_imgs = batch_inputs_dict['imgs'] + x = self.backbone(batch_imgs) + if self.with_neck: + x = self.neck(x) + return x + + # TODO: Support test time augmentation + def aug_test(self, imgs, img_metas, rescale=False): + """Test function with test time augmentation.""" + pass diff --git a/mmdet3d/models/detectors/smoke_mono3d.py b/mmdet3d/models/detectors/smoke_mono3d.py new file mode 100755 index 0000000..e1b04d4 --- /dev/null +++ b/mmdet3d/models/detectors/smoke_mono3d.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig +from .single_stage_mono3d import SingleStageMono3DDetector + + +@MODELS.register_module() +class SMOKEMono3D(SingleStageMono3DDetector): + r"""SMOKE `_ for monocular 3D object + detection. + + Args: + backbone (:obj:`ConfigDict` or dict): The backbone config. + neck (:obj:`ConfigDict` or dict): The neck config. + bbox_head (:obj:`ConfigDict` or dict): The bbox head config. + train_cfg (:obj:`ConfigDict` or dict, optional): The training config + of FCOS. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): The testing config + of FCOS. Defaults to None. + data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of + :class:`DetDataPreprocessor` to process the input data. + Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + backbone: ConfigType, + neck: ConfigType, + bbox_head: ConfigType, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) diff --git a/mmdet3d/models/detectors/ssd3dnet.py b/mmdet3d/models/detectors/ssd3dnet.py new file mode 100755 index 0000000..4bf85e9 --- /dev/null +++ b/mmdet3d/models/detectors/ssd3dnet.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet3d.registry import MODELS +from .votenet import VoteNet + + +@MODELS.register_module() +class SSD3DNet(VoteNet): + """3DSSDNet model. + + https://arxiv.org/abs/2002.10187.pdf + """ + + def __init__(self, + backbone, + bbox_head=None, + train_cfg=None, + test_cfg=None, + init_cfg=None, + **kwargs): + super(SSD3DNet, self).__init__( + backbone=backbone, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg, + **kwargs) diff --git a/mmdet3d/models/detectors/two_stage.py b/mmdet3d/models/detectors/two_stage.py new file mode 100755 index 0000000..8e003e7 --- /dev/null +++ b/mmdet3d/models/detectors/two_stage.py @@ -0,0 +1,208 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Union + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig +from ...structures.det3d_data_sample import SampleList +from .base import Base3DDetector + + +@MODELS.register_module() +class TwoStage3DDetector(Base3DDetector): + """Base class of two-stage 3D detector. + + It inherits original ``:class:Base3DDetector``. This class could serve as a + base class for all two-stage 3D detectors. + """ + + def __init__( + self, + backbone: ConfigType, + neck: OptConfigType = None, + rpn_head: OptConfigType = None, + roi_head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None, + data_preprocessor: OptConfigType = None, + ) -> None: + super(TwoStage3DDetector, self).__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + self.backbone = MODELS.build(backbone) + + if neck is not None: + self.neck = MODELS.build(neck) + + if rpn_head is not None: + rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None + rpn_head_ = rpn_head.copy() + rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn) + rpn_head_num_classes = rpn_head_.get('num_classes', None) + if rpn_head_num_classes is None: + rpn_head_.update(num_classes=1) + self.rpn_head = MODELS.build(rpn_head_) + + if roi_head is not None: + # update train and test cfg here for now + rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None + roi_head.update(train_cfg=rcnn_train_cfg) + roi_head.update(test_cfg=test_cfg.rcnn) + self.roi_head = MODELS.build(roi_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + @property + def with_rpn(self) -> bool: + """bool: whether the detector has RPN""" + return hasattr(self, 'rpn_head') and self.rpn_head is not None + + @property + def with_roi_head(self) -> bool: + """bool: whether the detector has a RoI head""" + return hasattr(self, 'roi_head') and self.roi_head is not None + + def loss(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> Union[dict, list]: + """Calculate losses from a batch of inputs and data samples. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'imgs' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + dict: A dictionary of loss components. + """ + feats_dict = self.extract_feat(batch_inputs_dict) + + losses = dict() + + # RPN forward and loss + if self.with_rpn: + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + rpn_data_samples = copy.deepcopy(batch_data_samples) + + rpn_losses, rpn_results_list = self.rpn_head.loss_and_predict( + feats_dict, + rpn_data_samples, + proposal_cfg=proposal_cfg, + **kwargs) + # avoid get same name with roi_head loss + keys = rpn_losses.keys() + for key in keys: + if 'loss' in key and 'rpn' not in key: + losses[f'rpn_{key}'] = rpn_losses[key] + else: + losses[key] = rpn_losses[key] + else: + # TODO: Not support currently, should have a check at Fast R-CNN + assert batch_data_samples[0].get('proposals', None) is not None + # use pre-defined proposals in InstanceData for the second stage + # to extract ROI features. + rpn_results_list = [ + data_sample.proposals for data_sample in batch_data_samples + ] + + roi_losses = self.roi_head.loss(feats_dict, rpn_results_list, + batch_data_samples, **kwargs) + losses.update(roi_losses) + + return losses + + def predict(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'imgs' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input samples. Each Det3DDataSample usually contain + 'pred_instances_3d'. And the ``pred_instances_3d`` usually + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C) where C >=7. + """ + feats_dict = self.extract_feat(batch_inputs_dict) + + if self.with_rpn: + rpn_results_list = self.rpn_head.predict(feats_dict, + batch_data_samples) + + else: + rpn_results_list = [ + data_sample.proposals for data_sample in batch_data_samples + ] + + results_list = self.roi_head.predict(feats_dict, rpn_results_list, + batch_data_samples) + + # connvert to Det3DDataSample + results_list = self.add_pred_to_datasample(batch_data_samples, + results_list) + + return results_list + + def _forward(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> tuple: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'img' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + tuple: A tuple of features from ``rpn_head`` and ``roi_head`` + forward. + """ + feats_dict = self.extract_feat(batch_inputs_dict) + rpn_outs = self.rpn_head.forward(feats_dict['neck_feats']) + + # If there are no pre-defined proposals, use RPN to get proposals + if batch_data_samples[0].get('proposals', None) is None: + batch_input_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + rpn_results_list = self.rpn_head.predict_by_feat( + *rpn_outs, batch_input_metas=batch_input_metas) + else: + # TODO: Not checked currently. + rpn_results_list = [ + data_sample.proposals for data_sample in batch_data_samples + ] + + # roi_head + roi_outs = self.roi_head._forward(feats_dict, rpn_results_list) + return rpn_outs + roi_outs diff --git a/mmdet3d/models/detectors/votenet.py b/mmdet3d/models/detectors/votenet.py new file mode 100755 index 0000000..ae70889 --- /dev/null +++ b/mmdet3d/models/detectors/votenet.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Union + +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.structures import Det3DDataSample +from ..test_time_augs import merge_aug_bboxes_3d +from .single_stage import SingleStage3DDetector + + +@MODELS.register_module() +class VoteNet(SingleStage3DDetector): + r"""`VoteNet `_ for 3D detection. + + Args: + backbone (dict): Config dict of detector's backbone. + bbox_head (dict, optional): Config dict of box head. Defaults to None. + train_cfg (dict, optional): Config dict of training hyper-parameters. + Defaults to None. + test_cfg (dict, optional): Config dict of test hyper-parameters. + Defaults to None. + init_cfg (dict, optional): the config to control the + initialization. Default to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`BaseDataPreprocessor`. it usually includes, + ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``. + """ + + def __init__(self, + backbone: dict, + bbox_head: Optional[dict] = None, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None, + data_preprocessor: Optional[dict] = None, + **kwargs): + super(VoteNet, self).__init__( + backbone=backbone, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg, + data_preprocessor=data_preprocessor, + **kwargs) + + def loss(self, batch_inputs_dict: Dict[str, Union[List, Tensor]], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """ + Args: + batch_inputs_dict (dict): The model input dict which include + 'points' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + feat_dict = self.extract_feat(batch_inputs_dict) + points = batch_inputs_dict['points'] + losses = self.bbox_head.loss(points, feat_dict, batch_data_samples, + **kwargs) + return losses + + def predict(self, batch_inputs_dict: Dict[str, Optional[Tensor]], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """Forward of testing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input sample. Each Det3DDataSample usually contain + 'pred_instances_3d'. And the ``pred_instances_3d`` usually + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (:obj:`BaseInstance3DBoxes`): Prediction of bboxes, + contains a tensor with shape (num_instances, 7). + """ + feats_dict = self.extract_feat(batch_inputs_dict) + points = batch_inputs_dict['points'] + results_list = self.bbox_head.predict(points, feats_dict, + batch_data_samples, **kwargs) + data_3d_samples = self.add_pred_to_datasample(batch_data_samples, + results_list) + return data_3d_samples + + def aug_test(self, aug_inputs_list: List[dict], + aug_data_samples: List[List[dict]], **kwargs): + """Test with augmentation. + + Batch size always is 1 when do the augtest. + + Args: + aug_inputs_list (List[dict]): The list indicate same data + under differecnt augmentation. + aug_data_samples (List[List[dict]]): The outer list + indicate different augmentation, and the inter + list indicate the batch size. + """ + num_augs = len(aug_inputs_list) + if num_augs == 1: + return self.predict(aug_inputs_list[0], aug_data_samples[0]) + + batch_size = len(aug_data_samples[0]) + assert batch_size == 1 + multi_aug_results = [] + for aug_id in range(num_augs): + batch_inputs_dict = aug_inputs_list[aug_id] + batch_data_samples = aug_data_samples[aug_id] + feats_dict = self.extract_feat(batch_inputs_dict) + points = batch_inputs_dict['points'] + results_list = self.bbox_head.predict(points, feats_dict, + batch_data_samples, **kwargs) + multi_aug_results.append(results_list[0]) + aug_input_metas_list = [] + for aug_index in range(num_augs): + metainfo = aug_data_samples[aug_id][0].metainfo + aug_input_metas_list.append(metainfo) + + aug_results_list = [item.to_dict() for item in multi_aug_results] + # after merging, bboxes will be rescaled to the original image size + merged_results_dict = merge_aug_bboxes_3d(aug_results_list, + aug_input_metas_list, + self.bbox_head.test_cfg) + + merged_results = InstanceData(**merged_results_dict) + data_3d_samples = self.add_pred_to_datasample(batch_data_samples, + [merged_results]) + return data_3d_samples diff --git a/mmdet3d/models/detectors/voxelnet.py b/mmdet3d/models/detectors/voxelnet.py new file mode 100755 index 0000000..8f55926 --- /dev/null +++ b/mmdet3d/models/detectors/voxelnet.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig +from .single_stage import SingleStage3DDetector + + +@MODELS.register_module() +class VoxelNet(SingleStage3DDetector): + r"""`VoxelNet `_ for 3D detection.""" + + def __init__(self, + voxel_encoder: ConfigType, + middle_encoder: ConfigType, + backbone: ConfigType, + neck: OptConfigType = None, + bbox_head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + self.voxel_encoder = MODELS.build(voxel_encoder) + self.middle_encoder = MODELS.build(middle_encoder) + + def extract_feat(self, batch_inputs_dict: dict) -> Tuple[Tensor]: + """Extract features from points.""" + voxel_dict = batch_inputs_dict['voxels'] + voxel_features = self.voxel_encoder(voxel_dict['voxels'], + voxel_dict['num_points'], + voxel_dict['coors']) + batch_size = voxel_dict['coors'][-1, 0].item() + 1 + x = self.middle_encoder(voxel_features, voxel_dict['coors'], + batch_size) + x = self.backbone(x) + if self.with_neck: + x = self.neck(x) + return x diff --git a/mmdet3d/models/language_models/__init__.py b/mmdet3d/models/language_models/__init__.py new file mode 100644 index 0000000..70f1a22 --- /dev/null +++ b/mmdet3d/models/language_models/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bert import BertModel + +__all__ = ['BertModel'] diff --git a/mmdet3d/models/language_models/bert.py b/mmdet3d/models/language_models/bert.py new file mode 100644 index 0000000..461ddcc --- /dev/null +++ b/mmdet3d/models/language_models/bert.py @@ -0,0 +1,233 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict +from typing import Sequence + +import torch +from mmengine.model import BaseModel +from torch import nn + +try: + from transformers import AutoTokenizer, BertConfig + from transformers import BertModel as HFBertModel +except ImportError: + AutoTokenizer = None + HFBertModel = None + +# from mmdet.registry import MODELS +from mmdet3d.registry import MODELS + + +def generate_masks_with_special_tokens_and_transfer_map( + tokenized, special_tokens_list): + """Generate attention mask between each pair of special tokens. + + Only token pairs in between two special tokens are attended to + and thus the attention mask for these pairs is positive. + + Args: + input_ids (torch.Tensor): input ids. Shape: [bs, num_token] + special_tokens_mask (list): special tokens mask. + + Returns: + Tuple(Tensor, Tensor): + - attention_mask is the attention mask between each tokens. + Only token pairs in between two special tokens are positive. + Shape: [bs, num_token, num_token]. + - position_ids is the position id of tokens within each valid sentence. + The id starts from 0 whenenver a special token is encountered. + Shape: [bs, num_token] + """ + input_ids = tokenized['input_ids'] + bs, num_token = input_ids.shape + # special_tokens_mask: + # bs, num_token. 1 for special tokens. 0 for normal tokens + special_tokens_mask = torch.zeros((bs, num_token), + device=input_ids.device).bool() + + for special_token in special_tokens_list: + special_tokens_mask |= input_ids == special_token + + # idxs: each row is a list of indices of special tokens + idxs = torch.nonzero(special_tokens_mask) + + # generate attention mask and positional ids + attention_mask = ( + torch.eye(num_token, + device=input_ids.device).bool().unsqueeze(0).repeat( + bs, 1, 1)) + position_ids = torch.zeros((bs, num_token), device=input_ids.device) + previous_col = 0 + for i in range(idxs.shape[0]): + row, col = idxs[i] + if (col == 0) or (col == num_token - 1): + attention_mask[row, col, col] = True + position_ids[row, col] = 0 + else: + attention_mask[row, previous_col + 1:col + 1, + previous_col + 1:col + 1] = True + position_ids[row, previous_col + 1:col + 1] = torch.arange( + 0, col - previous_col, device=input_ids.device) + previous_col = col + + return attention_mask, position_ids.to(torch.long) + + +@MODELS.register_module() +class BertModel(BaseModel): + """BERT model for language embedding only encoder. + + Args: + name (str, optional): name of the pretrained BERT model from + HuggingFace. Defaults to bert-base-uncased. + max_tokens (int, optional): maximum number of tokens to be + used for BERT. Defaults to 256. + pad_to_max (bool, optional): whether to pad the tokens to max_tokens. + Defaults to True. + use_sub_sentence_represent (bool, optional): whether to use sub + sentence represent introduced in `Grounding DINO + `. Defaults to False. + special_tokens_list (list, optional): special tokens used to split + subsentence. It cannot be None when `use_sub_sentence_represent` + is True. Defaults to None. + add_pooling_layer (bool, optional): whether to adding pooling + layer in bert encoder. Defaults to False. + num_layers_of_embedded (int, optional): number of layers of + the embedded model. Defaults to 1. + use_checkpoint (bool, optional): whether to use gradient checkpointing. + Defaults to False. + """ + + def __init__(self, + name: str = 'bert-base-uncased', + max_tokens: int = 256, + pad_to_max: bool = True, + use_sub_sentence_represent: bool = False, + special_tokens_list: list = None, + add_pooling_layer: bool = False, + num_layers_of_embedded: int = 1, + use_checkpoint: bool = False, + **kwargs) -> None: + + super().__init__(**kwargs) + self.max_tokens = max_tokens + self.pad_to_max = pad_to_max + + if AutoTokenizer is None: + raise RuntimeError( + 'transformers is not installed, please install it by: ' + 'pip install transformers.') + text_encoder_type='./text' + self.tokenizer = AutoTokenizer.from_pretrained(text_encoder_type) + self.language_backbone = nn.Sequential( + OrderedDict([('body', + BertEncoder( + name, + add_pooling_layer=add_pooling_layer, + num_layers_of_embedded=num_layers_of_embedded, + use_checkpoint=use_checkpoint))])) + + self.use_sub_sentence_represent = use_sub_sentence_represent + if self.use_sub_sentence_represent: + assert special_tokens_list is not None, \ + 'special_tokens should not be None \ + if use_sub_sentence_represent is True' + + self.special_tokens = self.tokenizer.convert_tokens_to_ids( + special_tokens_list) + + def forward(self, captions: Sequence[str], **kwargs) -> dict: + """Forward function.""" + device = next(self.language_backbone.parameters()).device + tokenized = self.tokenizer.batch_encode_plus( + captions, + max_length=self.max_tokens, + padding='max_length' if self.pad_to_max else 'longest', + return_special_tokens_mask=True, + return_tensors='pt', + truncation=True).to(device) + input_ids = tokenized.input_ids + if self.use_sub_sentence_represent: + attention_mask, position_ids = \ + generate_masks_with_special_tokens_and_transfer_map( + tokenized, self.special_tokens) + token_type_ids = tokenized['token_type_ids'] + + else: + attention_mask = tokenized.attention_mask + position_ids = None + token_type_ids = None + + tokenizer_input = { + 'input_ids': input_ids, + 'attention_mask': attention_mask, + 'position_ids': position_ids, + 'token_type_ids': token_type_ids + } + language_dict_features = self.language_backbone(tokenizer_input) + if self.use_sub_sentence_represent: + language_dict_features['position_ids'] = position_ids + language_dict_features[ + 'text_token_mask'] = tokenized.attention_mask.bool() + return language_dict_features + + +class BertEncoder(nn.Module): + """BERT encoder for language embedding. + + Args: + name (str): name of the pretrained BERT model from HuggingFace. + Defaults to bert-base-uncased. + add_pooling_layer (bool): whether to add a pooling layer. + num_layers_of_embedded (int): number of layers of the embedded model. + Defaults to 1. + use_checkpoint (bool): whether to use gradient checkpointing. + Defaults to False. + """ + + def __init__(self, + name: str, + add_pooling_layer: bool = False, + num_layers_of_embedded: int = 1, + use_checkpoint: bool = False): + super().__init__() + if BertConfig is None: + raise RuntimeError( + 'transformers is not installed, please install it by: ' + 'pip install transformers.') + text_encoder_type='./text' + config = BertConfig.from_pretrained(text_encoder_type) + config.gradient_checkpointing = use_checkpoint + # only encoder + self.model = HFBertModel.from_pretrained( + text_encoder_type, add_pooling_layer=add_pooling_layer, config=config) + self.language_dim = config.hidden_size + self.num_layers_of_embedded = num_layers_of_embedded + + def forward(self, x) -> dict: + mask = x['attention_mask'] + + outputs = self.model( + input_ids=x['input_ids'], + attention_mask=mask, + position_ids=x['position_ids'], + token_type_ids=x['token_type_ids'], + output_hidden_states=True, + ) + + # outputs has 13 layers, 1 input layer and 12 hidden layers + encoded_layers = outputs.hidden_states[1:] + features = torch.stack(encoded_layers[-self.num_layers_of_embedded:], + 1).mean(1) + # language embedding has shape [len(phrase), seq_len, language_dim] + features = features / self.num_layers_of_embedded + if mask.dim() == 2: + embedded = features * mask.unsqueeze(-1).float() + else: + embedded = features + + results = { + 'embedded': embedded, + 'masks': mask, + 'hidden': encoded_layers[-1] + } + return results diff --git a/mmdet3d/models/layers/__init__.py b/mmdet3d/models/layers/__init__.py new file mode 100755 index 0000000..6b5fb2a --- /dev/null +++ b/mmdet3d/models/layers/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .box3d_nms import (aligned_3d_nms, box3d_multiclass_nms, circle_nms, + nms_bev, nms_normal_bev) +from .dgcnn_modules import DGCNNFAModule, DGCNNFPModule, DGCNNGFModule +from .edge_fusion_module import EdgeFusionModule +from .fusion_layers import (PointFusion, VoteFusion, apply_3d_transformation, + bbox_2d_transform, coord_2d_transform) +from .mlp import MLP +from .norm import NaiveSyncBatchNorm1d, NaiveSyncBatchNorm2d +from .paconv import PAConv, PAConvCUDA +from .pointnet_modules import (PAConvCUDASAModule, PAConvCUDASAModuleMSG, + PAConvSAModule, PAConvSAModuleMSG, + PointFPModule, PointSAModule, PointSAModuleMSG, + build_sa_module) +from .sparse_block import (SparseBasicBlock, SparseBottleneck, + make_sparse_convmodule) +from .torchsparse_block import TorchSparseConvModule, TorchSparseResidualBlock +from .transformer import GroupFree3DMHA +from .vote_module import VoteModule + +__all__ = [ + 'VoteModule', 'GroupFree3DMHA', 'EdgeFusionModule', 'DGCNNFAModule', + 'DGCNNFPModule', 'DGCNNGFModule', 'NaiveSyncBatchNorm1d', + 'NaiveSyncBatchNorm2d', 'PAConv', 'PAConvCUDA', 'SparseBasicBlock', + 'SparseBottleneck', 'make_sparse_convmodule', 'PointFusion', 'VoteFusion', + 'apply_3d_transformation', 'bbox_2d_transform', 'coord_2d_transform', + 'MLP', 'box3d_multiclass_nms', 'aligned_3d_nms', 'circle_nms', 'nms_bev', + 'nms_normal_bev', 'build_sa_module', 'PointSAModuleMSG', 'PointSAModule', + 'PointFPModule', 'PAConvSAModule', 'PAConvSAModuleMSG', + 'PAConvCUDASAModule', 'PAConvCUDASAModuleMSG', 'TorchSparseConvModule', + 'TorchSparseResidualBlock' +] diff --git a/mmdet3d/models/layers/box3d_nms.py b/mmdet3d/models/layers/box3d_nms.py new file mode 100755 index 0000000..d3d2f78 --- /dev/null +++ b/mmdet3d/models/layers/box3d_nms.py @@ -0,0 +1,295 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import numba +import numpy as np +import torch +from mmcv.ops import nms, nms_rotated +from torch import Tensor + + +def box3d_multiclass_nms( + mlvl_bboxes: Tensor, + mlvl_bboxes_for_nms: Tensor, + mlvl_scores: Tensor, + score_thr: float, + max_num: int, + cfg: dict, + mlvl_dir_scores: Optional[Tensor] = None, + mlvl_attr_scores: Optional[Tensor] = None, + mlvl_bboxes2d: Optional[Tensor] = None) -> Tuple[Tensor]: + """Multi-class NMS for 3D boxes. The IoU used for NMS is defined as the 2D + IoU between BEV boxes. + + Args: + mlvl_bboxes (Tensor): Multi-level boxes with shape (N, M). + M is the dimensions of boxes. + mlvl_bboxes_for_nms (Tensor): Multi-level boxes with shape (N, 5) + ([x1, y1, x2, y2, ry]). N is the number of boxes. + The coordinate system of the BEV boxes is counterclockwise. + mlvl_scores (Tensor): Multi-level boxes with shape (N, C + 1). + N is the number of boxes. C is the number of classes. + score_thr (float): Score threshold to filter boxes with low confidence. + max_num (int): Maximum number of boxes will be kept. + cfg (dict): Configuration dict of NMS. + mlvl_dir_scores (Tensor, optional): Multi-level scores of direction + classifier. Defaults to None. + mlvl_attr_scores (Tensor, optional): Multi-level scores of attribute + classifier. Defaults to None. + mlvl_bboxes2d (Tensor, optional): Multi-level 2D bounding boxes. + Defaults to None. + + Returns: + Tuple[Tensor]: Return results after nms, including 3D bounding boxes, + scores, labels, direction scores, attribute scores (optional) and + 2D bounding boxes (optional). + """ + # do multi class nms + # the fg class id range: [0, num_classes-1] + num_classes = mlvl_scores.shape[1] - 1 + bboxes = [] + scores = [] + labels = [] + dir_scores = [] + attr_scores = [] + bboxes2d = [] + for i in range(0, num_classes): + # get bboxes and scores of this class + cls_inds = mlvl_scores[:, i] > score_thr + if not cls_inds.any(): + continue + + _scores = mlvl_scores[cls_inds, i] + _bboxes_for_nms = mlvl_bboxes_for_nms[cls_inds, :] + + if cfg.use_rotate_nms: + nms_func = nms_bev + else: + nms_func = nms_normal_bev + + selected = nms_func(_bboxes_for_nms, _scores, cfg.nms_thr) + _mlvl_bboxes = mlvl_bboxes[cls_inds, :] + bboxes.append(_mlvl_bboxes[selected]) + scores.append(_scores[selected]) + cls_label = mlvl_bboxes.new_full((len(selected), ), + i, + dtype=torch.long) + labels.append(cls_label) + + if mlvl_dir_scores is not None: + _mlvl_dir_scores = mlvl_dir_scores[cls_inds] + dir_scores.append(_mlvl_dir_scores[selected]) + if mlvl_attr_scores is not None: + _mlvl_attr_scores = mlvl_attr_scores[cls_inds] + attr_scores.append(_mlvl_attr_scores[selected]) + if mlvl_bboxes2d is not None: + _mlvl_bboxes2d = mlvl_bboxes2d[cls_inds] + bboxes2d.append(_mlvl_bboxes2d[selected]) + + if bboxes: + bboxes = torch.cat(bboxes, dim=0) + scores = torch.cat(scores, dim=0) + labels = torch.cat(labels, dim=0) + if mlvl_dir_scores is not None: + dir_scores = torch.cat(dir_scores, dim=0) + if mlvl_attr_scores is not None: + attr_scores = torch.cat(attr_scores, dim=0) + if mlvl_bboxes2d is not None: + bboxes2d = torch.cat(bboxes2d, dim=0) + if bboxes.shape[0] > max_num: + _, inds = scores.sort(descending=True) + inds = inds[:max_num] + bboxes = bboxes[inds, :] + labels = labels[inds] + scores = scores[inds] + if mlvl_dir_scores is not None: + dir_scores = dir_scores[inds] + if mlvl_attr_scores is not None: + attr_scores = attr_scores[inds] + if mlvl_bboxes2d is not None: + bboxes2d = bboxes2d[inds] + else: + bboxes = mlvl_scores.new_zeros((0, mlvl_bboxes.size(-1))) + scores = mlvl_scores.new_zeros((0, )) + labels = mlvl_scores.new_zeros((0, ), dtype=torch.long) + if mlvl_dir_scores is not None: + dir_scores = mlvl_scores.new_zeros((0, )) + if mlvl_attr_scores is not None: + attr_scores = mlvl_scores.new_zeros((0, )) + if mlvl_bboxes2d is not None: + bboxes2d = mlvl_scores.new_zeros((0, 4)) + + results = (bboxes, scores, labels) + + if mlvl_dir_scores is not None: + results = results + (dir_scores, ) + if mlvl_attr_scores is not None: + results = results + (attr_scores, ) + if mlvl_bboxes2d is not None: + results = results + (bboxes2d, ) + + return results + + +def aligned_3d_nms(boxes: Tensor, scores: Tensor, classes: Tensor, + thresh: float) -> Tensor: + """3D NMS for aligned boxes. + + Args: + boxes (Tensor): Aligned box with shape [N, 6]. + scores (Tensor): Scores of each box. + classes (Tensor): Class of each box. + thresh (float): IoU threshold for nms. + + Returns: + Tensor: Indices of selected boxes. + """ + x1 = boxes[:, 0] + y1 = boxes[:, 1] + z1 = boxes[:, 2] + x2 = boxes[:, 3] + y2 = boxes[:, 4] + z2 = boxes[:, 5] + area = (x2 - x1) * (y2 - y1) * (z2 - z1) + zero = boxes.new_zeros(1, ) + + score_sorted = torch.argsort(scores) + pick = [] + while (score_sorted.shape[0] != 0): + last = score_sorted.shape[0] + i = score_sorted[-1] + pick.append(i) + + xx1 = torch.max(x1[i], x1[score_sorted[:last - 1]]) + yy1 = torch.max(y1[i], y1[score_sorted[:last - 1]]) + zz1 = torch.max(z1[i], z1[score_sorted[:last - 1]]) + xx2 = torch.min(x2[i], x2[score_sorted[:last - 1]]) + yy2 = torch.min(y2[i], y2[score_sorted[:last - 1]]) + zz2 = torch.min(z2[i], z2[score_sorted[:last - 1]]) + classes1 = classes[i] + classes2 = classes[score_sorted[:last - 1]] + inter_l = torch.max(zero, xx2 - xx1) + inter_w = torch.max(zero, yy2 - yy1) + inter_h = torch.max(zero, zz2 - zz1) + + inter = inter_l * inter_w * inter_h + iou = inter / (area[i] + area[score_sorted[:last - 1]] - inter) + iou = iou * (classes1 == classes2).float() + score_sorted = score_sorted[torch.nonzero( + iou <= thresh, as_tuple=False).flatten()] + + indices = boxes.new_tensor(pick, dtype=torch.long) + return indices + + +@numba.jit(nopython=True) +def circle_nms(dets: Tensor, thresh: float, post_max_size: int = 83) -> Tensor: + """Circular NMS. + + An object is only counted as positive if no other center with a higher + confidence exists within a radius r using a bird-eye view distance metric. + + Args: + dets (Tensor): Detection results with the shape of [N, 3]. + thresh (float): Value of threshold. + post_max_size (int): Max number of prediction to be kept. + Defaults to 83. + + Returns: + Tensor: Indexes of the detections to be kept. + """ + x1 = dets[:, 0] + y1 = dets[:, 1] + scores = dets[:, 2] + order = scores.argsort()[::-1].astype(np.int32) # highest->lowest + ndets = dets.shape[0] + suppressed = np.zeros((ndets), dtype=np.int32) + keep = [] + for _i in range(ndets): + i = order[_i] # start with highest score box + if suppressed[ + i] == 1: # if any box have enough iou with this, remove it + continue + keep.append(i) + for _j in range(_i + 1, ndets): + j = order[_j] + if suppressed[j] == 1: + continue + # calculate center distance between i and j box + dist = (x1[i] - x1[j])**2 + (y1[i] - y1[j])**2 + + # ovr = inter / areas[j] + if dist <= thresh: + suppressed[j] = 1 + + if post_max_size < len(keep): + return keep[:post_max_size] + + return keep + + +# This function duplicates functionality of mmcv.ops.iou_3d.nms_bev +# from mmcv<=1.5, but using cuda ops from mmcv.ops.nms.nms_rotated. +# Nms api will be unified in mmdetection3d one day. +def nms_bev(boxes: Tensor, + scores: Tensor, + thresh: float, + pre_max_size: Optional[int] = None, + post_max_size: Optional[int] = None) -> Tensor: + """NMS function GPU implementation (for BEV boxes). The overlap of two + boxes for IoU calculation is defined as the exact overlapping area of the + two boxes. In this function, one can also set ``pre_max_size`` and + ``post_max_size``. + + Args: + boxes (Tensor): Input boxes with the shape of [N, 5] + ([x1, y1, x2, y2, ry]). + scores (Tensor): Scores of boxes with the shape of [N]. + thresh (float): Overlap threshold of NMS. + pre_max_size (int, optional): Max size of boxes before NMS. + Defaults to None. + post_max_size (int, optional): Max size of boxes after NMS. + Defaults to None. + + Returns: + Tensor: Indexes after NMS. + """ + assert boxes.size(1) == 5, 'Input boxes shape should be [N, 5]' + order = scores.sort(0, descending=True)[1] + if pre_max_size is not None: + order = order[:pre_max_size] + boxes = boxes[order].contiguous() + scores = scores[order] + + # xyxyr -> back to xywhr + # note: better skip this step before nms_bev call in the future + boxes = torch.stack( + ((boxes[:, 0] + boxes[:, 2]) / 2, (boxes[:, 1] + boxes[:, 3]) / 2, + boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1], boxes[:, 4]), + dim=-1) + + keep = nms_rotated(boxes, scores, thresh)[1] + keep = order[keep] + if post_max_size is not None: + keep = keep[:post_max_size] + return keep + + +# This function duplicates functionality of mmcv.ops.iou_3d.nms_normal_bev +# from mmcv<=1.5, but using cuda ops from mmcv.ops.nms.nms. +# Nms api will be unified in mmdetection3d one day. +def nms_normal_bev(boxes: Tensor, scores: Tensor, thresh: float) -> Tensor: + """Normal NMS function GPU implementation (for BEV boxes). The overlap of + two boxes for IoU calculation is defined as the exact overlapping area of + the two boxes WITH their yaw angle set to 0. + + Args: + boxes (Tensor): Input boxes with shape (N, 5). + scores (Tensor): Scores of predicted boxes with shape (N). + thresh (float): Overlap threshold of NMS. + + Returns: + Tensor: Remaining indices with scores in descending order. + """ + assert boxes.shape[1] == 5, 'Input boxes shape should be [N, 5]' + return nms(boxes[:, :-1], scores, thresh)[1] diff --git a/mmdet3d/models/layers/dgcnn_modules/__init__.py b/mmdet3d/models/layers/dgcnn_modules/__init__.py new file mode 100755 index 0000000..67beb09 --- /dev/null +++ b/mmdet3d/models/layers/dgcnn_modules/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dgcnn_fa_module import DGCNNFAModule +from .dgcnn_fp_module import DGCNNFPModule +from .dgcnn_gf_module import DGCNNGFModule + +__all__ = ['DGCNNFAModule', 'DGCNNFPModule', 'DGCNNGFModule'] diff --git a/mmdet3d/models/layers/dgcnn_modules/dgcnn_fa_module.py b/mmdet3d/models/layers/dgcnn_modules/dgcnn_fa_module.py new file mode 100755 index 0000000..81420b1 --- /dev/null +++ b/mmdet3d/models/layers/dgcnn_modules/dgcnn_fa_module.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch import Tensor +from torch import nn as nn + +from mmdet3d.utils import ConfigType, OptMultiConfig + + +class DGCNNFAModule(BaseModule): + """Point feature aggregation module used in DGCNN. + + Aggregate all the features of points. + + Args: + mlp_channels (List[int]): List of mlp channels. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN1d'). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to dict(type='ReLU'). + init_cfg (:obj:`ConfigDict` or dict or List[:obj:`Contigdict` or dict], + optional): Initialization config dict. Defaults to None. + """ + + def __init__(self, + mlp_channels: List[int], + norm_cfg: ConfigType = dict(type='BN1d'), + act_cfg: ConfigType = dict(type='ReLU'), + init_cfg: OptMultiConfig = None) -> None: + super(DGCNNFAModule, self).__init__(init_cfg=init_cfg) + self.mlps = nn.Sequential() + for i in range(len(mlp_channels) - 1): + self.mlps.add_module( + f'layer{i}', + ConvModule( + mlp_channels[i], + mlp_channels[i + 1], + kernel_size=(1, ), + stride=(1, ), + conv_cfg=dict(type='Conv1d'), + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, points: List[Tensor]) -> Tensor: + """forward. + + Args: + points (List[Tensor]): Tensor of the features to be aggregated. + + Returns: + Tensor: (B, N, M) M = mlp[-1]. Tensor of the output points. + """ + + if len(points) > 1: + new_points = torch.cat(points[1:], dim=-1) + new_points = new_points.transpose(1, 2).contiguous() # (B, C, N) + new_points_copy = new_points + + new_points = self.mlps(new_points) + + new_fa_points = new_points.max(dim=-1, keepdim=True)[0] + new_fa_points = new_fa_points.repeat(1, 1, new_points.shape[-1]) + + new_points = torch.cat([new_fa_points, new_points_copy], dim=1) + new_points = new_points.transpose(1, 2).contiguous() + else: + new_points = points + + return new_points diff --git a/mmdet3d/models/layers/dgcnn_modules/dgcnn_fp_module.py b/mmdet3d/models/layers/dgcnn_modules/dgcnn_fp_module.py new file mode 100755 index 0000000..1e25a5d --- /dev/null +++ b/mmdet3d/models/layers/dgcnn_modules/dgcnn_fp_module.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch import Tensor +from torch import nn as nn + +from mmdet3d.utils import ConfigType, OptMultiConfig + + +class DGCNNFPModule(BaseModule): + """Point feature propagation module used in DGCNN. + + Propagate the features from one set to another. + + Args: + mlp_channels (List[int]): List of mlp channels. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN1d'). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to dict(type='ReLU'). + init_cfg (:obj:`ConfigDict` or dict or List[:obj:`Contigdict` or dict], + optional): Initialization config dict. Defaults to None. + """ + + def __init__(self, + mlp_channels: List[int], + norm_cfg: ConfigType = dict(type='BN1d'), + act_cfg: ConfigType = dict(type='ReLU'), + init_cfg: OptMultiConfig = None) -> None: + super(DGCNNFPModule, self).__init__(init_cfg=init_cfg) + self.mlps = nn.Sequential() + for i in range(len(mlp_channels) - 1): + self.mlps.add_module( + f'layer{i}', + ConvModule( + mlp_channels[i], + mlp_channels[i + 1], + kernel_size=(1, ), + stride=(1, ), + conv_cfg=dict(type='Conv1d'), + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, points: Tensor) -> Tensor: + """Forward. + + Args: + points (Tensor): (B, N, C) Tensor of the input points. + + Returns: + Tensor: (B, N, M) M = mlp[-1]. Tensor of the new points. + """ + + if points is not None: + new_points = points.transpose(1, 2).contiguous() # (B, C, N) + new_points = self.mlps(new_points) + new_points = new_points.transpose(1, 2).contiguous() + else: + new_points = points + + return new_points diff --git a/mmdet3d/models/layers/dgcnn_modules/dgcnn_gf_module.py b/mmdet3d/models/layers/dgcnn_modules/dgcnn_gf_module.py new file mode 100755 index 0000000..fc92669 --- /dev/null +++ b/mmdet3d/models/layers/dgcnn_modules/dgcnn_gf_module.py @@ -0,0 +1,222 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +from mmcv.cnn import ConvModule +from mmcv.ops.group_points import GroupAll, QueryAndGroup, grouping_operation +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.utils import ConfigType + + +class BaseDGCNNGFModule(nn.Module): + """Base module for point graph feature module used in DGCNN. + + Args: + radii (List[float]): List of radius in each knn or ball query. + sample_nums (List[int]): Number of samples in each knn or ball query. + mlp_channels (List[List[int]]): Specify of the dgcnn before the global + pooling for each graph feature module. + knn_modes (List[str]): Type of KNN method, valid mode + ['F-KNN', 'D-KNN']. Defaults to ['F-KNN']. + dilated_group (bool): Whether to use dilated ball query. + Defaults to False. + use_xyz (bool): Whether to use xyz as point features. + Defaults to True. + pool_mode (str): Type of pooling method. Defaults to 'max'. + normalize_xyz (bool): If ball query, whether to normalize local XYZ + with radius. Defaults to False. + grouper_return_grouped_xyz (bool): Whether to return grouped xyz in + `QueryAndGroup`. Defaults to False. + grouper_return_grouped_idx (bool): Whether to return grouped idx in + `QueryAndGroup`. Defaults to False. + """ + + def __init__(self, + radii: List[float], + sample_nums: List[int], + mlp_channels: List[List[int]], + knn_modes: List[str] = ['F-KNN'], + dilated_group: bool = False, + use_xyz: bool = True, + pool_mode: str = 'max', + normalize_xyz: bool = False, + grouper_return_grouped_xyz: bool = False, + grouper_return_grouped_idx: bool = False) -> None: + super(BaseDGCNNGFModule, self).__init__() + + assert len(sample_nums) == len( + mlp_channels + ), 'Num_samples and mlp_channels should have the same length.' + assert pool_mode in ['max', 'avg' + ], "Pool_mode should be one of ['max', 'avg']." + assert isinstance(knn_modes, list) or isinstance( + knn_modes, tuple), 'The type of knn_modes should be list or tuple.' + + if isinstance(mlp_channels, tuple): + mlp_channels = list(map(list, mlp_channels)) + self.mlp_channels = mlp_channels + + self.pool_mode = pool_mode + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + self.knn_modes = knn_modes + + for i in range(len(sample_nums)): + sample_num = sample_nums[i] + if sample_num is not None: + if self.knn_modes[i] == 'D-KNN': + grouper = QueryAndGroup( + radii[i], + sample_num, + use_xyz=use_xyz, + normalize_xyz=normalize_xyz, + return_grouped_xyz=grouper_return_grouped_xyz, + return_grouped_idx=True) + else: + grouper = QueryAndGroup( + radii[i], + sample_num, + use_xyz=use_xyz, + normalize_xyz=normalize_xyz, + return_grouped_xyz=grouper_return_grouped_xyz, + return_grouped_idx=grouper_return_grouped_idx) + else: + grouper = GroupAll(use_xyz) + self.groupers.append(grouper) + + def _pool_features(self, features: Tensor) -> Tensor: + """Perform feature aggregation using pooling operation. + + Args: + features (Tensor): (B, C, N, K) Features of locally grouped + points before pooling. + + Returns: + Tensor: (B, C, N) Pooled features aggregating local information. + """ + if self.pool_mode == 'max': + # (B, C, N, 1) + new_features = F.max_pool2d( + features, kernel_size=[1, features.size(3)]) + elif self.pool_mode == 'avg': + # (B, C, N, 1) + new_features = F.avg_pool2d( + features, kernel_size=[1, features.size(3)]) + else: + raise NotImplementedError + + return new_features.squeeze(-1).contiguous() + + def forward(self, points: Tensor) -> Tensor: + """forward. + + Args: + points (Tensor): (B, N, C) Input points. + + Returns: + Tensor: (B, N, C1) New points generated from each graph + feature module. + """ + new_points_list = [points] + + for i in range(len(self.groupers)): + + new_points = new_points_list[i] + new_points_trans = new_points.transpose( + 1, 2).contiguous() # (B, C, N) + + if self.knn_modes[i] == 'D-KNN': + # (B, N, C) -> (B, N, K) + idx = self.groupers[i](new_points[..., -3:].contiguous(), + new_points[..., -3:].contiguous())[-1] + + grouped_results = grouping_operation( + new_points_trans, idx) # (B, C, N) -> (B, C, N, K) + grouped_results -= new_points_trans.unsqueeze(-1) + else: + grouped_results = self.groupers[i]( + new_points, new_points) # (B, N, C) -> (B, C, N, K) + + new_points = new_points_trans.unsqueeze(-1).repeat( + 1, 1, 1, grouped_results.shape[-1]) + new_points = torch.cat([grouped_results, new_points], dim=1) + + # (B, mlp[-1], N, K) + new_points = self.mlps[i](new_points) + + # (B, mlp[-1], N) + new_points = self._pool_features(new_points) + new_points = new_points.transpose(1, 2).contiguous() + new_points_list.append(new_points) + + return new_points + + +class DGCNNGFModule(BaseDGCNNGFModule): + """Point graph feature module used in DGCNN. + + Args: + mlp_channels (List[int]): Specify of the dgcnn before the global + pooling for each graph feature module. + num_sample (int, optional): Number of samples in each knn or ball + query. Defaults to None. + knn_mode (str): Type of KNN method, valid mode ['F-KNN', 'D-KNN']. + Defaults to 'F-KNN'. + radius (float, optional): Radius to group with. Defaults to None. + dilated_group (bool): Whether to use dilated ball query. + Defaults to False. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN2d'). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to dict(type='ReLU'). + use_xyz (bool): Whether to use xyz as point features. Defaults to True. + pool_mode (str): Type of pooling method. Defaults to 'max'. + normalize_xyz (bool): If ball query, whether to normalize local XYZ + with radius. Defaults to False. + bias (bool or str): If specified as `auto`, it will be decided by + `norm_cfg`. `bias` will be set as True if `norm_cfg` is None, + otherwise False. Defaults to 'auto'. + """ + + def __init__(self, + mlp_channels: List[int], + num_sample: Optional[int] = None, + knn_mode: str = 'F-KNN', + radius: Optional[float] = None, + dilated_group: bool = False, + norm_cfg: ConfigType = dict(type='BN2d'), + act_cfg: ConfigType = dict(type='ReLU'), + use_xyz: bool = True, + pool_mode: str = 'max', + normalize_xyz: bool = False, + bias: Union[bool, str] = 'auto') -> None: + super(DGCNNGFModule, self).__init__( + mlp_channels=[mlp_channels], + sample_nums=[num_sample], + knn_modes=[knn_mode], + radii=[radius], + use_xyz=use_xyz, + pool_mode=pool_mode, + normalize_xyz=normalize_xyz, + dilated_group=dilated_group) + + for i in range(len(self.mlp_channels)): + mlp_channel = self.mlp_channels[i] + + mlp = nn.Sequential() + for i in range(len(mlp_channel) - 1): + mlp.add_module( + f'layer{i}', + ConvModule( + mlp_channel[i], + mlp_channel[i + 1], + kernel_size=(1, 1), + stride=(1, 1), + conv_cfg=dict(type='Conv2d'), + norm_cfg=norm_cfg, + act_cfg=act_cfg, + bias=bias)) + self.mlps.append(mlp) diff --git a/mmdet3d/models/layers/edge_fusion_module.py b/mmdet3d/models/layers/edge_fusion_module.py new file mode 100755 index 0000000..fdac05e --- /dev/null +++ b/mmdet3d/models/layers/edge_fusion_module.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.utils import ConfigType + + +class EdgeFusionModule(BaseModule): + """Edge Fusion Module for feature map. + + Args: + out_channels (int): The number of output channels. + feat_channels (int): The number of channels in feature map + during edge feature fusion. + kernel_size (int): Kernel size of convolution. Defaults to 3. + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to dict(type='ReLU'). + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN1d'). + """ + + def __init__( + self, + out_channels: int, + feat_channels: int, + kernel_size: int = 3, + act_cfg: ConfigType = dict(type='ReLU'), + norm_cfg: ConfigType = dict(type='BN1d') + ) -> None: + super(EdgeFusionModule, self).__init__() + self.edge_convs = nn.Sequential( + ConvModule( + feat_channels, + feat_channels, + kernel_size=kernel_size, + padding=kernel_size // 2, + conv_cfg=dict(type='Conv1d'), + norm_cfg=norm_cfg, + act_cfg=act_cfg), + nn.Conv1d(feat_channels, out_channels, kernel_size=1)) + self.feat_channels = feat_channels + + def forward(self, features: Tensor, fused_features: Tensor, + edge_indices: Tensor, edge_lens: List[int], output_h: int, + output_w: int) -> Tensor: + """Forward pass. + + Args: + features (Tensor): Different representative features for fusion. + fused_features (Tensor): Different representative features + to be fused. + edge_indices (Tensor): Batch image edge indices. + edge_lens (List[int]): List of edge length of each image. + output_h (int): Height of output feature map. + output_w (int): Width of output feature map. + + Returns: + Tensor: Fused feature maps. + """ + batch_size = features.shape[0] + # normalize + grid_edge_indices = edge_indices.view(batch_size, -1, 1, 2).float() + grid_edge_indices[..., 0] = \ + grid_edge_indices[..., 0] / (output_w - 1) * 2 - 1 + grid_edge_indices[..., 1] = \ + grid_edge_indices[..., 1] / (output_h - 1) * 2 - 1 + + # apply edge fusion + edge_features = F.grid_sample( + features, grid_edge_indices, align_corners=True).squeeze(-1) + edge_output = self.edge_convs(edge_features) + + for k in range(batch_size): + edge_indice_k = edge_indices[k, :edge_lens[k]] + fused_features[k, :, edge_indice_k[:, 1], + edge_indice_k[:, 0]] += edge_output[ + k, :, :edge_lens[k]] + + return fused_features diff --git a/mmdet3d/models/layers/fusion_layers/__init__.py b/mmdet3d/models/layers/fusion_layers/__init__.py new file mode 100755 index 0000000..6df4741 --- /dev/null +++ b/mmdet3d/models/layers/fusion_layers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .coord_transform import (apply_3d_transformation, bbox_2d_transform, + coord_2d_transform) +from .point_fusion import PointFusion +from .vote_fusion import VoteFusion + +__all__ = [ + 'PointFusion', 'VoteFusion', 'apply_3d_transformation', + 'bbox_2d_transform', 'coord_2d_transform' +] diff --git a/mmdet3d/models/layers/fusion_layers/coord_transform.py b/mmdet3d/models/layers/fusion_layers/coord_transform.py new file mode 100755 index 0000000..5bcb6cb --- /dev/null +++ b/mmdet3d/models/layers/fusion_layers/coord_transform.py @@ -0,0 +1,224 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial +from typing import Tuple + +import torch +from torch import Tensor + +from mmdet3d.structures.points import get_points_type + + +def apply_3d_transformation(pcd: Tensor, + coord_type: str, + img_meta: dict, + reverse: bool = False) -> Tensor: + """Apply transformation to input point cloud. + + Args: + pcd (Tensor): The point cloud to be transformed. + coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'. + img_meta(dict): Meta info regarding data transformation. + reverse (bool): Reversed transformation or not. Defaults to False. + + Note: + The elements in img_meta['transformation_3d_flow']: + + - "T" stands for translation; + - "S" stands for scale; + - "R" stands for rotation; + - "HF" stands for horizontal flip; + - "VF" stands for vertical flip. + + Returns: + Tensor: The transformed point cloud. + """ + + dtype = pcd.dtype + device = pcd.device + + pcd_rotate_mat = ( + torch.tensor(img_meta['pcd_rotation'], dtype=dtype, device=device) + if 'pcd_rotation' in img_meta else torch.eye( + 3, dtype=dtype, device=device)) + + pcd_scale_factor = ( + img_meta['pcd_scale_factor'] if 'pcd_scale_factor' in img_meta else 1.) + + pcd_trans_factor = ( + torch.tensor(img_meta['pcd_trans'], dtype=dtype, device=device) + if 'pcd_trans' in img_meta else torch.zeros( + (3), dtype=dtype, device=device)) + + pcd_horizontal_flip = img_meta[ + 'pcd_horizontal_flip'] if 'pcd_horizontal_flip' in \ + img_meta else False + + pcd_vertical_flip = img_meta[ + 'pcd_vertical_flip'] if 'pcd_vertical_flip' in \ + img_meta else False + + flow = img_meta['transformation_3d_flow'] \ + if 'transformation_3d_flow' in img_meta else [] + + pcd = pcd.clone() # prevent inplace modification + pcd = get_points_type(coord_type)(pcd) + + horizontal_flip_func = partial(pcd.flip, bev_direction='horizontal') \ + if pcd_horizontal_flip else lambda: None + vertical_flip_func = partial(pcd.flip, bev_direction='vertical') \ + if pcd_vertical_flip else lambda: None + if reverse: + scale_func = partial(pcd.scale, scale_factor=1.0 / pcd_scale_factor) + translate_func = partial(pcd.translate, trans_vector=-pcd_trans_factor) + # pcd_rotate_mat @ pcd_rotate_mat.inverse() is not + # exactly an identity matrix + # use angle to create the inverse rot matrix neither. + rotate_func = partial(pcd.rotate, rotation=pcd_rotate_mat.inverse()) + + # reverse the pipeline + flow = flow[::-1] + else: + scale_func = partial(pcd.scale, scale_factor=pcd_scale_factor) + translate_func = partial(pcd.translate, trans_vector=pcd_trans_factor) + rotate_func = partial(pcd.rotate, rotation=pcd_rotate_mat) + + flow_mapping = { + 'T': translate_func, + 'S': scale_func, + 'R': rotate_func, + 'HF': horizontal_flip_func, + 'VF': vertical_flip_func + } + for op in flow: + assert op in flow_mapping, f'This 3D data '\ + f'transformation op ({op}) is not supported' + func = flow_mapping[op] + func() + + return pcd.coord + + +def extract_2d_info( + img_meta: dict, + tensor: Tensor) -> Tuple[int, int, int, int, Tensor, bool, Tensor]: + """Extract image augmentation information from img_meta. + + Args: + img_meta (dict): Meta info regarding data transformation. + tensor (Tensor): Input tensor used to create new ones. + + Returns: + Tuple[int, int, int, int, torch.Tensor, bool, torch.Tensor]: + The extracted information. + """ + img_shape = img_meta['img_shape'] + ori_shape = img_meta['ori_shape'] + img_h, img_w = img_shape + ori_h, ori_w = ori_shape + + img_scale_factor = ( + tensor.new_tensor(img_meta['scale_factor'][:2]) + if 'scale_factor' in img_meta else tensor.new_tensor([1.0, 1.0])) + img_flip = img_meta['flip'] if 'flip' in img_meta else False + img_crop_offset = ( + tensor.new_tensor(img_meta['img_crop_offset']) + if 'img_crop_offset' in img_meta else tensor.new_tensor([0.0, 0.0])) + + return (img_h, img_w, ori_h, ori_w, img_scale_factor, img_flip, + img_crop_offset) + + +def bbox_2d_transform(img_meta: dict, bbox_2d: Tensor, + ori2new: bool) -> Tensor: + """Transform 2d bbox according to img_meta. + + Args: + img_meta (dict): Meta info regarding data transformation. + bbox_2d (Tensor): Shape (..., >4) The input 2d bboxes to transform. + ori2new (bool): Origin img coord system to new or not. + + Returns: + Tensor: The transformed 2d bboxes. + """ + + img_h, img_w, ori_h, ori_w, img_scale_factor, img_flip, \ + img_crop_offset = extract_2d_info(img_meta, bbox_2d) + + bbox_2d_new = bbox_2d.clone() + + if ori2new: + bbox_2d_new[:, 0] = bbox_2d_new[:, 0] * img_scale_factor[0] + bbox_2d_new[:, 2] = bbox_2d_new[:, 2] * img_scale_factor[0] + bbox_2d_new[:, 1] = bbox_2d_new[:, 1] * img_scale_factor[1] + bbox_2d_new[:, 3] = bbox_2d_new[:, 3] * img_scale_factor[1] + + bbox_2d_new[:, 0] = bbox_2d_new[:, 0] + img_crop_offset[0] + bbox_2d_new[:, 2] = bbox_2d_new[:, 2] + img_crop_offset[0] + bbox_2d_new[:, 1] = bbox_2d_new[:, 1] + img_crop_offset[1] + bbox_2d_new[:, 3] = bbox_2d_new[:, 3] + img_crop_offset[1] + + if img_flip: + bbox_2d_r = img_w - bbox_2d_new[:, 0] + bbox_2d_l = img_w - bbox_2d_new[:, 2] + bbox_2d_new[:, 0] = bbox_2d_l + bbox_2d_new[:, 2] = bbox_2d_r + else: + if img_flip: + bbox_2d_r = img_w - bbox_2d_new[:, 0] + bbox_2d_l = img_w - bbox_2d_new[:, 2] + bbox_2d_new[:, 0] = bbox_2d_l + bbox_2d_new[:, 2] = bbox_2d_r + + bbox_2d_new[:, 0] = bbox_2d_new[:, 0] - img_crop_offset[0] + bbox_2d_new[:, 2] = bbox_2d_new[:, 2] - img_crop_offset[0] + bbox_2d_new[:, 1] = bbox_2d_new[:, 1] - img_crop_offset[1] + bbox_2d_new[:, 3] = bbox_2d_new[:, 3] - img_crop_offset[1] + + bbox_2d_new[:, 0] = bbox_2d_new[:, 0] / img_scale_factor[0] + bbox_2d_new[:, 2] = bbox_2d_new[:, 2] / img_scale_factor[0] + bbox_2d_new[:, 1] = bbox_2d_new[:, 1] / img_scale_factor[1] + bbox_2d_new[:, 3] = bbox_2d_new[:, 3] / img_scale_factor[1] + + return bbox_2d_new + + +def coord_2d_transform(img_meta: dict, coord_2d: Tensor, + ori2new: bool) -> Tensor: + """Transform 2d pixel coordinates according to img_meta. + + Args: + img_meta (dict): Meta info regarding data transformation. + coord_2d (Tensor): Shape (..., 2) The input 2d coords to transform. + ori2new (bool): Origin img coord system to new or not. + + Returns: + Tensor: The transformed 2d coordinates. + """ + + img_h, img_w, ori_h, ori_w, img_scale_factor, img_flip, \ + img_crop_offset = extract_2d_info(img_meta, coord_2d) + + coord_2d_new = coord_2d.clone() + + if ori2new: + # TODO here we assume this order of transformation + coord_2d_new[..., 0] = coord_2d_new[..., 0] * img_scale_factor[0] + coord_2d_new[..., 1] = coord_2d_new[..., 1] * img_scale_factor[1] + + coord_2d_new[..., 0] += img_crop_offset[0] + coord_2d_new[..., 1] += img_crop_offset[1] + + # flip uv coordinates and bbox + if img_flip: + coord_2d_new[..., 0] = img_w - coord_2d_new[..., 0] + else: + if img_flip: + coord_2d_new[..., 0] = img_w - coord_2d_new[..., 0] + + coord_2d_new[..., 0] -= img_crop_offset[0] + coord_2d_new[..., 1] -= img_crop_offset[1] + + coord_2d_new[..., 0] = coord_2d_new[..., 0] / img_scale_factor[0] + coord_2d_new[..., 1] = coord_2d_new[..., 1] / img_scale_factor[1] + + return coord_2d_new diff --git a/mmdet3d/models/layers/fusion_layers/point_fusion.py b/mmdet3d/models/layers/fusion_layers/point_fusion.py new file mode 100755 index 0000000..170f2ae --- /dev/null +++ b/mmdet3d/models/layers/fusion_layers/point_fusion.py @@ -0,0 +1,418 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple, Union + +import torch +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.registry import MODELS +from mmdet3d.structures.bbox_3d import (get_proj_mat_by_coord_type, + points_cam2img, points_img2cam) +from mmdet3d.utils import OptConfigType, OptMultiConfig +from . import apply_3d_transformation + + +def point_sample(img_meta: dict, + img_features: Tensor, + points: Tensor, + proj_mat: Tensor, + coord_type: str, + img_scale_factor: Tensor, + img_crop_offset: Tensor, + img_flip: bool, + img_pad_shape: Tuple[int], + img_shape: Tuple[int], + aligned: bool = True, + padding_mode: str = 'zeros', + align_corners: bool = True, + valid_flag: bool = False) -> Tensor: + """Obtain image features using points. + + Args: + img_meta (dict): Meta info. + img_features (Tensor): 1 x C x H x W image features. + points (Tensor): Nx3 point cloud in LiDAR coordinates. + proj_mat (Tensor): 4x4 transformation matrix. + coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'. + img_scale_factor (Tensor): Scale factor with shape of + (w_scale, h_scale). + img_crop_offset (Tensor): Crop offset used to crop image during + data augmentation with shape of (w_offset, h_offset). + img_flip (bool): Whether the image is flipped. + img_pad_shape (Tuple[int]): Int tuple indicates the h & w after + padding. This is necessary to obtain features in feature map. + img_shape (Tuple[int]): Int tuple indicates the h & w before padding + after scaling. This is necessary for flipping coordinates. + aligned (bool): Whether to use bilinear interpolation when + sampling image features for each point. Defaults to True. + padding_mode (str): Padding mode when padding values for + features of out-of-image points. Defaults to 'zeros'. + align_corners (bool): Whether to align corners when + sampling image features for each point. Defaults to True. + valid_flag (bool): Whether to filter out the points that outside + the image and with depth smaller than 0. Defaults to False. + + Returns: + Tensor: NxC image features sampled by point coordinates. + """ + + # apply transformation based on info in img_meta + points = apply_3d_transformation( + points, coord_type, img_meta, reverse=True) + + # project points to image coordinate + if valid_flag: + proj_pts = points_cam2img(points, proj_mat, with_depth=True) + pts_2d = proj_pts[..., :2] + depths = proj_pts[..., 2] + else: + pts_2d = points_cam2img(points, proj_mat) + + # img transformation: scale -> crop -> flip + # the image is resized by img_scale_factor + img_coors = pts_2d[:, 0:2] * img_scale_factor # Nx2 + img_coors -= img_crop_offset + + # grid sample, the valid grid range should be in [-1,1] + coor_x, coor_y = torch.split(img_coors, 1, dim=1) # each is Nx1 + + if img_flip: + # by default we take it as horizontal flip + # use img_shape before padding for flip + ori_h, ori_w = img_shape + coor_x = ori_w - coor_x + + h, w = img_pad_shape + norm_coor_y = coor_y / h * 2 - 1 + norm_coor_x = coor_x / w * 2 - 1 + grid = torch.cat([norm_coor_x, norm_coor_y], + dim=1).unsqueeze(0).unsqueeze(0) # Nx2 -> 1x1xNx2 + + # align_corner=True provides higher performance + mode = 'bilinear' if aligned else 'nearest' + point_features = F.grid_sample( + img_features, + grid, + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners) # 1xCx1xN feats + + if valid_flag: + # (N, ) + valid = (coor_x.squeeze() < w) & (coor_x.squeeze() > 0) & ( + coor_y.squeeze() < h) & (coor_y.squeeze() > 0) & ( + depths > 0) + valid_features = point_features.squeeze().t() + valid_features[~valid] = 0 + return valid_features, valid # (N, C), (N,) + + return point_features.squeeze().t() + + +@MODELS.register_module() +class PointFusion(BaseModule): + """Fuse image features from multi-scale features. + + Args: + img_channels (List[int] or int): Channels of image features. + It could be a list if the input is multi-scale image features. + pts_channels (int): Channels of point features + mid_channels (int): Channels of middle layers + out_channels (int): Channels of output fused features + img_levels (List[int] or int): Number of image levels. Defaults to 3. + coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'. Defaults to 'LIDAR'. + conv_cfg (:obj:`ConfigDict` or dict): Config dict for convolution + layers of middle layers. Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layers of middle layers. Defaults to None. + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to None. + init_cfg (:obj:`ConfigDict` or dict or List[:obj:`Contigdict` or dict], + optional): Initialization config dict. Defaults to None. + activate_out (bool): Whether to apply relu activation to output + features. Defaults to True. + fuse_out (bool): Whether to apply conv layer to the fused features. + Defaults to False. + dropout_ratio (int or float): Dropout ratio of image features to + prevent overfitting. Defaults to 0. + aligned (bool): Whether to apply aligned feature fusion. + Defaults to True. + align_corners (bool): Whether to align corner when sampling features + according to points. Defaults to True. + padding_mode (str): Mode used to pad the features of points that do not + have corresponding image features. Defaults to 'zeros'. + lateral_conv (bool): Whether to apply lateral convs to image features. + Defaults to True. + """ + + def __init__(self, + img_channels: Union[List[int], int], + pts_channels: int, + mid_channels: int, + out_channels: int, + img_levels: Union[List[int], int] = 3, + coord_type: str = 'LIDAR', + conv_cfg: OptConfigType = None, + norm_cfg: OptConfigType = None, + act_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None, + activate_out: bool = True, + fuse_out: bool = False, + dropout_ratio: Union[int, float] = 0, + aligned: bool = True, + align_corners: bool = True, + padding_mode: str = 'zeros', + lateral_conv: bool = True) -> None: + super(PointFusion, self).__init__(init_cfg=init_cfg) + if isinstance(img_levels, int): + img_levels = [img_levels] + if isinstance(img_channels, int): + img_channels = [img_channels] * len(img_levels) + assert isinstance(img_levels, list) + assert isinstance(img_channels, list) + assert len(img_channels) == len(img_levels) + + self.img_levels = img_levels + self.coord_type = coord_type + self.act_cfg = act_cfg + self.activate_out = activate_out + self.fuse_out = fuse_out + self.dropout_ratio = dropout_ratio + self.img_channels = img_channels + self.aligned = aligned + self.align_corners = align_corners + self.padding_mode = padding_mode + + self.lateral_convs = None + if lateral_conv: + self.lateral_convs = nn.ModuleList() + for i in range(len(img_channels)): + l_conv = ConvModule( + img_channels[i], + mid_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=self.act_cfg, + inplace=False) + self.lateral_convs.append(l_conv) + self.img_transform = nn.Sequential( + nn.Linear(mid_channels * len(img_channels), out_channels), + nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01), + ) + else: + self.img_transform = nn.Sequential( + nn.Linear(sum(img_channels), out_channels), + nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01), + ) + self.pts_transform = nn.Sequential( + nn.Linear(pts_channels, out_channels), + nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01), + ) + + if self.fuse_out: + self.fuse_conv = nn.Sequential( + nn.Linear(mid_channels, out_channels), + # For pts the BN is initialized differently by default + # TODO: check whether this is necessary + nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01), + nn.ReLU(inplace=False)) + + if init_cfg is None: + self.init_cfg = [ + dict(type='Xavier', layer='Conv2d', distribution='uniform'), + dict(type='Xavier', layer='Linear', distribution='uniform') + ] + + def forward(self, img_feats: List[Tensor], pts: List[Tensor], + pts_feats: Tensor, img_metas: List[dict]) -> Tensor: + """Forward function. + + Args: + img_feats (List[Tensor]): Image features. + pts: (List[Tensor]): A batch of points with shape N x 3. + pts_feats (Tensor): A tensor consist of point features of the + total batch. + img_metas (List[dict]): Meta information of images. + + Returns: + Tensor: Fused features of each point. + """ + img_pts = self.obtain_mlvl_feats(img_feats, pts, img_metas) + img_pre_fuse = self.img_transform(img_pts) + if self.training and self.dropout_ratio > 0: + img_pre_fuse = F.dropout(img_pre_fuse, self.dropout_ratio) + pts_pre_fuse = self.pts_transform(pts_feats) + + fuse_out = img_pre_fuse + pts_pre_fuse + if self.activate_out: + fuse_out = F.relu(fuse_out) + if self.fuse_out: + fuse_out = self.fuse_conv(fuse_out) + + return fuse_out + + def obtain_mlvl_feats(self, img_feats: List[Tensor], pts: List[Tensor], + img_metas: List[dict]) -> Tensor: + """Obtain multi-level features for each point. + + Args: + img_feats (List[Tensor]): Multi-scale image features produced + by image backbone in shape (N, C, H, W). + pts (List[Tensor]): Points of each sample. + img_metas (List[dict]): Meta information for each sample. + + Returns: + Tensor: Corresponding image features of each point. + """ + if self.lateral_convs is not None: + img_ins = [ + lateral_conv(img_feats[i]) + for i, lateral_conv in zip(self.img_levels, self.lateral_convs) + ] + else: + img_ins = img_feats + img_feats_per_point = [] + # Sample multi-level features + for i in range(len(img_metas)): + mlvl_img_feats = [] + for level in range(len(self.img_levels)): + mlvl_img_feats.append( + self.sample_single(img_ins[level][i:i + 1], pts[i][:, :3], + img_metas[i])) + mlvl_img_feats = torch.cat(mlvl_img_feats, dim=-1) + img_feats_per_point.append(mlvl_img_feats) + + img_pts = torch.cat(img_feats_per_point, dim=0) + return img_pts + + def sample_single(self, img_feats: Tensor, pts: Tensor, + img_meta: dict) -> Tensor: + """Sample features from single level image feature map. + + Args: + img_feats (Tensor): Image feature map in shape (1, C, H, W). + pts (Tensor): Points of a single sample. + img_meta (dict): Meta information of the single sample. + + Returns: + Tensor: Single level image features of each point. + """ + # TODO: image transformation also extracted + img_scale_factor = ( + pts.new_tensor(img_meta['scale_factor'][:2]) + if 'scale_factor' in img_meta.keys() else 1) + img_flip = img_meta['flip'] if 'flip' in img_meta.keys() else False + img_crop_offset = ( + pts.new_tensor(img_meta['img_crop_offset']) + if 'img_crop_offset' in img_meta.keys() else 0) + proj_mat = get_proj_mat_by_coord_type(img_meta, self.coord_type) + img_pts = point_sample( + img_meta=img_meta, + img_features=img_feats, + points=pts, + proj_mat=pts.new_tensor(proj_mat), + coord_type=self.coord_type, + img_scale_factor=img_scale_factor, + img_crop_offset=img_crop_offset, + img_flip=img_flip, + img_pad_shape=img_meta['input_shape'][:2], + img_shape=img_meta['img_shape'][:2], + aligned=self.aligned, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + ) + return img_pts + + +def voxel_sample(voxel_features: Tensor, + voxel_range: List[float], + voxel_size: List[float], + depth_samples: Tensor, + proj_mat: Tensor, + downsample_factor: int, + img_scale_factor: Tensor, + img_crop_offset: Tensor, + img_flip: bool, + img_pad_shape: Tuple[int], + img_shape: Tuple[int], + aligned: bool = True, + padding_mode: str = 'zeros', + align_corners: bool = True) -> Tensor: + """Obtain image features using points. + + Args: + voxel_features (Tensor): 1 x C x Nx x Ny x Nz voxel features. + voxel_range (List[float]): The range of voxel features. + voxel_size (List[float]): The voxel size of voxel features. + depth_samples (Tensor): N depth samples in LiDAR coordinates. + proj_mat (Tensor): ORIGINAL LiDAR2img projection matrix for N views. + downsample_factor (int): The downsample factor in rescaling. + img_scale_factor (Tensor): Scale factor with shape of + (w_scale, h_scale). + img_crop_offset (Tensor): Crop offset used to crop image during + data augmentation with shape of (w_offset, h_offset). + img_flip (bool): Whether the image is flipped. + img_pad_shape (Tuple[int]): Int tuple indicates the h & w after + padding. This is necessary to obtain features in feature map. + img_shape (Tuple[int]): Int tuple indicates the h & w before padding + after scaling. This is necessary for flipping coordinates. + aligned (bool): Whether to use bilinear interpolation when + sampling image features for each point. Defaults to True. + padding_mode (str): Padding mode when padding values for + features of out-of-image points. Defaults to 'zeros'. + align_corners (bool): Whether to align corners when + sampling image features for each point. Defaults to True. + + Returns: + Tensor: 1xCxDxHxW frustum features sampled from voxel features. + """ + # construct frustum grid + device = voxel_features.device + h, w = img_pad_shape + h_out = round(h / downsample_factor) + w_out = round(w / downsample_factor) + ws = (torch.linspace(0, w_out - 1, w_out) * downsample_factor).to(device) + hs = (torch.linspace(0, h_out - 1, h_out) * downsample_factor).to(device) + depths = depth_samples[::downsample_factor] + num_depths = len(depths) + ds_3d, ys_3d, xs_3d = torch.meshgrid(depths, hs, ws) + # grid: (D, H_out, W_out, 3) -> (D*H_out*W_out, 3) + grid = torch.stack([xs_3d, ys_3d, ds_3d], dim=-1).view(-1, 3) + # recover the coordinates in the canonical space + # reverse order of augmentations: flip -> crop -> scale + if img_flip: + # by default we take it as horizontal flip + # use img_shape before padding for flip + ori_h, ori_w = img_shape + grid[:, 0] = ori_w - grid[:, 0] + grid[:, :2] += img_crop_offset + grid[:, :2] /= img_scale_factor + # grid3d: (D*H_out*W_out, 3) in LiDAR coordinate system + grid3d = points_img2cam(grid, proj_mat) + # convert the 3D point coordinates to voxel coordinates + voxel_range = torch.tensor(voxel_range).to(device).view(1, 6) + voxel_size = torch.tensor(voxel_size).to(device).view(1, 3) + # suppose the voxel grid is generated with AlignedAnchorGenerator + # -0.5 given each grid is located at the center of the grid + # TODO: study whether here needs -0.5 + grid3d = (grid3d - voxel_range[:, :3]) / voxel_size - 0.5 + grid_size = (voxel_range[:, 3:] - voxel_range[:, :3]) / voxel_size + # normalize grid3d to (-1, 1) + grid3d = grid3d / grid_size * 2 - 1 + # (x, y, z) -> (z, y, x) for grid_sampling + grid3d = grid3d.view(1, num_depths, h_out, w_out, 3)[..., [2, 1, 0]] + # align_corner=True provides higher performance + mode = 'bilinear' if aligned else 'nearest' + frustum_features = F.grid_sample( + voxel_features, + grid3d, + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners) # 1xCxDxHxW feats + + return frustum_features diff --git a/mmdet3d/models/layers/fusion_layers/vote_fusion.py b/mmdet3d/models/layers/fusion_layers/vote_fusion.py new file mode 100755 index 0000000..e9b4084 --- /dev/null +++ b/mmdet3d/models/layers/fusion_layers/vote_fusion.py @@ -0,0 +1,207 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch +from torch import Tensor +from torch import nn as nn + +from mmdet3d.registry import MODELS +from mmdet3d.structures import points_cam2img +from . import apply_3d_transformation, bbox_2d_transform, coord_2d_transform + +EPS = 1e-6 + + +@MODELS.register_module() +class VoteFusion(nn.Module): + """Fuse 2d features from 3d seeds. + + Args: + num_classes (int): Number of classes. + max_imvote_per_pixel (int): Max number of imvotes. + """ + + def __init__(self, + num_classes: int = 10, + max_imvote_per_pixel: int = 3) -> None: + super(VoteFusion, self).__init__() + self.num_classes = num_classes + self.max_imvote_per_pixel = max_imvote_per_pixel + + def forward(self, imgs: List[Tensor], bboxes_2d_rescaled: List[Tensor], + seeds_3d_depth: List[Tensor], + img_metas: List[dict]) -> Tuple[Tensor]: + """Forward function. + + Args: + imgs (List[Tensor]): Image features. + bboxes_2d_rescaled (List[Tensor]): 2D bboxes. + seeds_3d_depth (List[Tensor]): 3D seeds. + img_metas (List[dict]): Meta information of images. + + Returns: + Tuple[Tensor]: + + - img_features: Concatenated cues of each point. + - masks: Validity mask of each feature. + """ + img_features = [] + masks = [] + for i, data in enumerate( + zip(imgs, bboxes_2d_rescaled, seeds_3d_depth, img_metas)): + img, bbox_2d_rescaled, seed_3d_depth, img_meta = data + bbox_num = bbox_2d_rescaled.shape[0] + seed_num = seed_3d_depth.shape[0] + + img_shape = img_meta['img_shape'] + # first reverse the data transformations + xyz_depth = apply_3d_transformation( + seed_3d_depth, 'DEPTH', img_meta, reverse=True) + + # project points from depth to image + depth2img = xyz_depth.new_tensor(img_meta['depth2img']) + uvz_origin = points_cam2img(xyz_depth, depth2img, True) + z_cam = uvz_origin[..., 2] + uv_origin = (uvz_origin[..., :2] - 1).round() + + # rescale 2d coordinates and bboxes + uv_rescaled = coord_2d_transform(img_meta, uv_origin, True) + bbox_2d_origin = bbox_2d_transform(img_meta, bbox_2d_rescaled, + False) + + if bbox_num == 0: + imvote_num = seed_num * self.max_imvote_per_pixel + + # use zero features + two_cues = torch.zeros((15, imvote_num), + device=seed_3d_depth.device) + mask_zero = torch.zeros( + imvote_num - seed_num, device=seed_3d_depth.device).bool() + mask_one = torch.ones( + seed_num, device=seed_3d_depth.device).bool() + mask = torch.cat([mask_one, mask_zero], dim=0) + else: + # expand bboxes and seeds + bbox_expanded = bbox_2d_origin.view(1, bbox_num, -1).expand( + seed_num, -1, -1) + seed_2d_expanded = uv_origin.view(seed_num, 1, + -1).expand(-1, bbox_num, -1) + seed_2d_expanded_x, seed_2d_expanded_y = \ + seed_2d_expanded.split(1, dim=-1) + + bbox_expanded_l, bbox_expanded_t, bbox_expanded_r, \ + bbox_expanded_b, bbox_expanded_conf, bbox_expanded_cls = \ + bbox_expanded.split(1, dim=-1) + bbox_expanded_midx = (bbox_expanded_l + bbox_expanded_r) / 2 + bbox_expanded_midy = (bbox_expanded_t + bbox_expanded_b) / 2 + + seed_2d_in_bbox_x = (seed_2d_expanded_x > bbox_expanded_l) * \ + (seed_2d_expanded_x < bbox_expanded_r) + seed_2d_in_bbox_y = (seed_2d_expanded_y > bbox_expanded_t) * \ + (seed_2d_expanded_y < bbox_expanded_b) + seed_2d_in_bbox = seed_2d_in_bbox_x * seed_2d_in_bbox_y + + # semantic cues, dim=class_num + sem_cue = torch.zeros_like(bbox_expanded_conf).expand( + -1, -1, self.num_classes) + sem_cue = sem_cue.scatter(-1, bbox_expanded_cls.long(), + bbox_expanded_conf) + + # bbox center - uv + delta_u = bbox_expanded_midx - seed_2d_expanded_x + delta_v = bbox_expanded_midy - seed_2d_expanded_y + + seed_3d_expanded = seed_3d_depth.view(seed_num, 1, -1).expand( + -1, bbox_num, -1) + + z_cam = z_cam.view(seed_num, 1, 1).expand(-1, bbox_num, -1) + imvote = torch.cat( + [delta_u, delta_v, + torch.zeros_like(delta_v)], dim=-1).view(-1, 3) + imvote = imvote * z_cam.reshape(-1, 1) + imvote = imvote @ torch.inverse(depth2img.t()) + + # apply transformation to lifted imvotes + imvote = apply_3d_transformation( + imvote, 'DEPTH', img_meta, reverse=False) + + seed_3d_expanded = seed_3d_expanded.reshape(imvote.shape) + + # ray angle + ray_angle = seed_3d_expanded + imvote + ray_angle /= torch.sqrt(torch.sum(ray_angle**2, -1) + + EPS).unsqueeze(-1) + + # imvote lifted to 3d + xz = ray_angle[:, [0, 2]] / (ray_angle[:, [1]] + EPS) \ + * seed_3d_expanded[:, [1]] - seed_3d_expanded[:, [0, 2]] + + # geometric cues, dim=5 + geo_cue = torch.cat([xz, ray_angle], + dim=-1).view(seed_num, -1, 5) + + two_cues = torch.cat([geo_cue, sem_cue], dim=-1) + # mask to 0 if seed not in bbox + two_cues = two_cues * seed_2d_in_bbox.float() + + feature_size = two_cues.shape[-1] + # if bbox number is too small, append zeros + if bbox_num < self.max_imvote_per_pixel: + append_num = self.max_imvote_per_pixel - bbox_num + append_zeros = torch.zeros( + (seed_num, append_num, 1), + device=seed_2d_in_bbox.device).bool() + seed_2d_in_bbox = torch.cat( + [seed_2d_in_bbox, append_zeros], dim=1) + append_zeros = torch.zeros( + (seed_num, append_num, feature_size), + device=two_cues.device) + two_cues = torch.cat([two_cues, append_zeros], dim=1) + append_zeros = torch.zeros((seed_num, append_num, 1), + device=two_cues.device) + bbox_expanded_conf = torch.cat( + [bbox_expanded_conf, append_zeros], dim=1) + + # sort the valid seed-bbox pair according to confidence + pair_score = seed_2d_in_bbox.float() + bbox_expanded_conf + # and find the largests + mask, indices = pair_score.topk( + self.max_imvote_per_pixel, + dim=1, + largest=True, + sorted=True) + + indices_img = indices.expand(-1, -1, feature_size) + two_cues = two_cues.gather(dim=1, index=indices_img) + two_cues = two_cues.transpose(1, 0) + two_cues = two_cues.reshape(-1, feature_size).transpose( + 1, 0).contiguous() + + # since conf is ~ (0, 1), floor gives us validity + mask = mask.floor().int() + mask = mask.transpose(1, 0).reshape(-1).bool() + + # clear the padding + img = img[:, :img_shape[0], :img_shape[1]] + img_flatten = img.reshape(3, -1).float() + img_flatten /= 255. + + # take the normalized pixel value as texture cue + uv_rescaled[:, 0] = torch.clamp(uv_rescaled[:, 0].round(), 0, + img_shape[1] - 1) + uv_rescaled[:, 1] = torch.clamp(uv_rescaled[:, 1].round(), 0, + img_shape[0] - 1) + uv_flatten = uv_rescaled[:, 1].round() * \ + img_shape[1] + uv_rescaled[:, 0].round() + uv_expanded = uv_flatten.unsqueeze(0).expand(3, -1).long() + txt_cue = torch.gather(img_flatten, dim=-1, index=uv_expanded) + txt_cue = txt_cue.unsqueeze(1).expand(-1, + self.max_imvote_per_pixel, + -1).reshape(3, -1) + + # append texture cue + img_feature = torch.cat([two_cues, txt_cue], dim=0) + img_features.append(img_feature) + masks.append(mask) + + return torch.stack(img_features, 0), torch.stack(masks, 0) diff --git a/mmdet3d/models/layers/mlp.py b/mmdet3d/models/layers/mlp.py new file mode 100755 index 0000000..837d1f1 --- /dev/null +++ b/mmdet3d/models/layers/mlp.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch import Tensor +from torch import nn as nn + +from mmdet3d.utils import ConfigType, OptMultiConfig + + +class MLP(BaseModule): + """A simple MLP module. + + Pass features (B, C, N) through an MLP. + + Args: + in_channels (int): Number of channels of input features. + Defaults to 18. + conv_channels (Tuple[int]): Out channels of the convolution. + Defaults to (256, 256). + conv_cfg (:obj:`ConfigDict` or dict): Config dict for convolution + layer. Defaults to dict(type='Conv1d'). + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN1d'). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to dict(type='ReLU'). + init_cfg (:obj:`ConfigDict` or dict or List[:obj:`Contigdict` or dict], + optional): Initialization config dict. Defaults to None. + """ + + def __init__(self, + in_channel: int = 18, + conv_channels: Tuple[int] = (256, 256), + conv_cfg: ConfigType = dict(type='Conv1d'), + norm_cfg: ConfigType = dict(type='BN1d'), + act_cfg: ConfigType = dict(type='ReLU'), + init_cfg: OptMultiConfig = None) -> None: + super(MLP, self).__init__(init_cfg=init_cfg) + self.mlp = nn.Sequential() + prev_channels = in_channel + for i, conv_channel in enumerate(conv_channels): + self.mlp.add_module( + f'layer{i}', + ConvModule( + prev_channels, + conv_channels[i], + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + bias=True, + inplace=True)) + prev_channels = conv_channels[i] + + def forward(self, img_features: Tensor) -> Tensor: + return self.mlp(img_features) diff --git a/mmdet3d/models/layers/norm.py b/mmdet3d/models/layers/norm.py new file mode 100755 index 0000000..a5c721e --- /dev/null +++ b/mmdet3d/models/layers/norm.py @@ -0,0 +1,152 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.registry import MODELS +from torch import Tensor +from torch import distributed as dist +from torch import nn as nn +from torch.autograd.function import Function + + +class AllReduce(Function): + + @staticmethod + def forward(ctx, input: Tensor) -> Tensor: + input_list = [ + torch.zeros_like(input) for k in range(dist.get_world_size()) + ] + # Use allgather instead of allreduce in-place operations is unreliable + dist.all_gather(input_list, input, async_op=False) + inputs = torch.stack(input_list, dim=0) + return torch.sum(inputs, dim=0) + + @staticmethod + def backward(ctx, grad_output: Tensor) -> Tensor: + dist.all_reduce(grad_output, async_op=False) + return grad_output + + +@MODELS.register_module('naiveSyncBN1d') +class NaiveSyncBatchNorm1d(nn.BatchNorm1d): + """Synchronized Batch Normalization for 3D Tensors. + + Note: + This implementation is modified from + https://github.com/facebookresearch/detectron2/ + + `torch.nn.SyncBatchNorm` has known unknown bugs. + It produces significantly worse AP (and sometimes goes NaN) + when the batch size on each worker is quite different + (e.g., when scale augmentation is used). + In 3D detection, different workers has points of different shapes, + which also cause instability. + + Use this implementation before `nn.SyncBatchNorm` is fixed. + It is slower than `nn.SyncBatchNorm`. + """ + + def __init__(self, *args: list, **kwargs: dict) -> None: + super(NaiveSyncBatchNorm1d, self).__init__(*args, **kwargs) + + def forward(self, input: Tensor) -> Tensor: + """ + Args: + input (Tensor): Has shape (N, C) or (N, C, L), where N is + the batch size, C is the number of features or + channels, and L is the sequence length + + Returns: + Tensor: Has shape (N, C) or (N, C, L), same shape as input. + """ + assert input.dtype == torch.float32, \ + f'input should be in float32 type, got {input.dtype}' + using_dist = dist.is_available() and dist.is_initialized() + if (not using_dist) or dist.get_world_size() == 1 \ + or not self.training: + return super().forward(input) + assert input.shape[0] > 0, 'SyncBN does not support empty inputs' + is_two_dim = input.dim() == 2 + if is_two_dim: + input = input.unsqueeze(2) + + C = input.shape[1] + mean = torch.mean(input, dim=[0, 2]) + meansqr = torch.mean(input * input, dim=[0, 2]) + + vec = torch.cat([mean, meansqr], dim=0) + vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size()) + + mean, meansqr = torch.split(vec, C) + var = meansqr - mean * mean + self.running_mean += self.momentum * ( + mean.detach() - self.running_mean) + self.running_var += self.momentum * (var.detach() - self.running_var) + + invstd = torch.rsqrt(var + self.eps) + scale = self.weight * invstd + bias = self.bias - mean * scale + scale = scale.reshape(1, -1, 1) + bias = bias.reshape(1, -1, 1) + output = input * scale + bias + if is_two_dim: + output = output.squeeze(2) + return output + + +@MODELS.register_module('naiveSyncBN2d') +class NaiveSyncBatchNorm2d(nn.BatchNorm2d): + """Synchronized Batch Normalization for 4D Tensors. + + Note: + This implementation is modified from + https://github.com/facebookresearch/detectron2/ + + `torch.nn.SyncBatchNorm` has known unknown bugs. + It produces significantly worse AP (and sometimes goes NaN) + when the batch size on each worker is quite different + (e.g., when scale augmentation is used). + This phenomenon also occurs when the multi-modality feature fusion + modules of multi-modality detectors use SyncBN. + + Use this implementation before `nn.SyncBatchNorm` is fixed. + It is slower than `nn.SyncBatchNorm`. + """ + + def __init__(self, *args: list, **kwargs: dict) -> None: + super(NaiveSyncBatchNorm2d, self).__init__(*args, **kwargs) + + def forward(self, input: Tensor) -> Tensor: + """ + Args: + Input (Tensor): Feature has shape (N, C, H, W). + + Returns: + Tensor: Has shape (N, C, H, W), same shape as input. + """ + assert input.dtype == torch.float32, \ + f'input should be in float32 type, got {input.dtype}' + using_dist = dist.is_available() and dist.is_initialized() + if (not using_dist) or \ + dist.get_world_size() == 1 or \ + not self.training: + return super().forward(input) + + assert input.shape[0] > 0, 'SyncBN does not support empty inputs' + C = input.shape[1] + mean = torch.mean(input, dim=[0, 2, 3]) + meansqr = torch.mean(input * input, dim=[0, 2, 3]) + + vec = torch.cat([mean, meansqr], dim=0) + vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size()) + + mean, meansqr = torch.split(vec, C) + var = meansqr - mean * mean + self.running_mean += self.momentum * ( + mean.detach() - self.running_mean) + self.running_var += self.momentum * (var.detach() - self.running_var) + + invstd = torch.rsqrt(var + self.eps) + scale = self.weight * invstd + bias = self.bias - mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + return input * scale + bias diff --git a/mmdet3d/models/layers/paconv/__init__.py b/mmdet3d/models/layers/paconv/__init__.py new file mode 100755 index 0000000..d71c766 --- /dev/null +++ b/mmdet3d/models/layers/paconv/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .paconv import PAConv, PAConvCUDA + +__all__ = ['PAConv', 'PAConvCUDA'] diff --git a/mmdet3d/models/layers/paconv/paconv.py b/mmdet3d/models/layers/paconv/paconv.py new file mode 100755 index 0000000..04aaa2a --- /dev/null +++ b/mmdet3d/models/layers/paconv/paconv.py @@ -0,0 +1,402 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List, Tuple, Union + +import torch +from mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer +from mmcv.ops import assign_score_withk as assign_score_cuda +from mmengine.model import constant_init +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.utils import ConfigType +from .utils import assign_kernel_withoutk, assign_score, calc_euclidian_dist + + +class ScoreNet(nn.Module): + r"""ScoreNet that outputs coefficient scores to assemble kernel weights in + the weight bank according to the relative position of point pairs. + + Args: + mlp_channels (List[int]): Hidden unit sizes of SharedMLP layers. + last_bn (bool): Whether to use BN on the last output of mlps. + Defaults to False. + score_norm (str): Normalization function of output scores. + Can be 'softmax', 'sigmoid' or 'identity'. Defaults to 'softmax'. + temp_factor (float): Temperature factor to scale the output + scores before softmax. Defaults to 1.0. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN2d'). + bias (bool or str): If specified as `auto`, it will be decided by + `norm_cfg`. `bias` will be set as True if `norm_cfg` is None, + otherwise False. Defaults to 'auto'. + + Note: + The official code applies xavier_init to all Conv layers in ScoreNet, + see `PAConv `_. However in our experiments, we + did not find much difference in applying such xavier initialization + or not. So we neglect this initialization in our implementation. + """ + + def __init__(self, + mlp_channels: List[int], + last_bn: bool = False, + score_norm: str = 'softmax', + temp_factor: float = 1.0, + norm_cfg: ConfigType = dict(type='BN2d'), + bias: Union[bool, str] = 'auto') -> None: + super(ScoreNet, self).__init__() + + assert score_norm in ['softmax', 'sigmoid', 'identity'], \ + f'unsupported score_norm function {score_norm}' + + self.score_norm = score_norm + self.temp_factor = temp_factor + + self.mlps = nn.Sequential() + for i in range(len(mlp_channels) - 2): + self.mlps.add_module( + f'layer{i}', + ConvModule( + mlp_channels[i], + mlp_channels[i + 1], + kernel_size=(1, 1), + stride=(1, 1), + conv_cfg=dict(type='Conv2d'), + norm_cfg=norm_cfg, + bias=bias)) + + # for the last mlp that outputs scores, no relu and possibly no bn + i = len(mlp_channels) - 2 + self.mlps.add_module( + f'layer{i}', + ConvModule( + mlp_channels[i], + mlp_channels[i + 1], + kernel_size=(1, 1), + stride=(1, 1), + conv_cfg=dict(type='Conv2d'), + norm_cfg=norm_cfg if last_bn else None, + act_cfg=None, + bias=bias)) + + def forward(self, xyz_features: Tensor) -> Tensor: + """Forward. + + Args: + xyz_features (Tensor): (B, C, N, K) Features constructed from xyz + coordinates of point pairs. May contain relative positions, + Euclidean distance, etc. + + Returns: + Tensor: (B, N, K, M) Predicted scores for `M` kernels. + """ + scores = self.mlps(xyz_features) # (B, M, N, K) + + # perform score normalization + if self.score_norm == 'softmax': + scores = F.softmax(scores / self.temp_factor, dim=1) + elif self.score_norm == 'sigmoid': + scores = torch.sigmoid(scores / self.temp_factor) + else: # 'identity' + scores = scores + + scores = scores.permute(0, 2, 3, 1) # (B, N, K, M) + + return scores + + +class PAConv(nn.Module): + """Non-CUDA version of PAConv. + + PAConv stores a trainable weight bank containing several kernel weights. + Given input points and features, it computes coefficient scores to assemble + those kernels to form conv kernels, and then runs convolution on the input. + + Args: + in_channels (int): Input channels of point features. + out_channels (int): Output channels of point features. + num_kernels (int): Number of kernel weights in the weight bank. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN2d', momentum=0.1). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to dict(type='ReLU', inplace=True). + scorenet_input (str): Type of input to ScoreNet. + Can be 'identity', 'w_neighbor' or 'w_neighbor_dist'. + Defaults to 'w_neighbor_dist'. + weight_bank_init (str): Init method of weight bank kernels. + Can be 'kaiming' or 'xavier'. Defaults to 'kaiming'. + kernel_input (str): Input features to be multiplied with kernel + weights. Can be 'identity' or 'w_neighbor'. + Defaults to 'w_neighbor'. + scorenet_cfg (dict): Config of the ScoreNet module, which may contain + the following keys and values: + + - mlp_channels (List[int]): Hidden units of MLPs. + - score_norm (str): Normalization function of output scores. + Can be 'softmax', 'sigmoid' or 'identity'. + - temp_factor (float): Temperature factor to scale the output + scores before softmax. + - last_bn (bool): Whether to use BN on the last output of mlps. + Defaults to dict(mlp_channels=[16, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False). + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + num_kernels: int, + norm_cfg: ConfigType = dict(type='BN2d', momentum=0.1), + act_cfg: ConfigType = dict(type='ReLU', inplace=True), + scorenet_input: str = 'w_neighbor_dist', + weight_bank_init: str = 'kaiming', + kernel_input: str = 'w_neighbor', + scorenet_cfg: dict = dict( + mlp_channels=[16, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False) + ) -> None: + super(PAConv, self).__init__() + + # determine weight kernel size according to used features + if kernel_input == 'identity': + # only use grouped_features + kernel_mul = 1 + elif kernel_input == 'w_neighbor': + # concat of (grouped_features - center_features, grouped_features) + kernel_mul = 2 + else: + raise NotImplementedError( + f'unsupported kernel_input {kernel_input}') + self.kernel_input = kernel_input + in_channels = kernel_mul * in_channels + + # determine mlp channels in ScoreNet according to used xyz features + if scorenet_input == 'identity': + # only use relative position (grouped_xyz - center_xyz) + self.scorenet_in_channels = 3 + elif scorenet_input == 'w_neighbor': + # (grouped_xyz - center_xyz, grouped_xyz) + self.scorenet_in_channels = 6 + elif scorenet_input == 'w_neighbor_dist': + # (center_xyz, grouped_xyz - center_xyz, Euclidean distance) + self.scorenet_in_channels = 7 + else: + raise NotImplementedError( + f'unsupported scorenet_input {scorenet_input}') + self.scorenet_input = scorenet_input + + # construct kernel weights in weight bank + # self.weight_bank is of shape [C, num_kernels * out_c] + # where C can be in_c or (2 * in_c) + if weight_bank_init == 'kaiming': + weight_init = nn.init.kaiming_normal_ + elif weight_bank_init == 'xavier': + weight_init = nn.init.xavier_normal_ + else: + raise NotImplementedError( + f'unsupported weight bank init method {weight_bank_init}') + + self.num_kernels = num_kernels # the parameter `m` in the paper + weight_bank = weight_init( + torch.empty(self.num_kernels, in_channels, out_channels)) + weight_bank = weight_bank.permute(1, 0, 2).reshape( + in_channels, self.num_kernels * out_channels).contiguous() + self.weight_bank = nn.Parameter(weight_bank, requires_grad=True) + + # construct ScoreNet + scorenet_cfg_ = copy.deepcopy(scorenet_cfg) + scorenet_cfg_['mlp_channels'].insert(0, self.scorenet_in_channels) + scorenet_cfg_['mlp_channels'].append(self.num_kernels) + self.scorenet = ScoreNet(**scorenet_cfg_) + + self.bn = build_norm_layer(norm_cfg, out_channels)[1] if \ + norm_cfg is not None else None + self.activate = build_activation_layer(act_cfg) if \ + act_cfg is not None else None + + # set some basic attributes of Conv layers + self.in_channels = in_channels + self.out_channels = out_channels + + self.init_weights() + + def init_weights(self) -> None: + """Initialize weights of shared MLP layers and BN layers.""" + if self.bn is not None: + constant_init(self.bn, val=1, bias=0) + + def _prepare_scorenet_input(self, points_xyz: Tensor) -> Tensor: + """Prepare input point pairs features for self.ScoreNet. + + Args: + points_xyz (Tensor): (B, 3, npoint, K) Coordinates of the + grouped points. + + Returns: + Tensor: (B, C, npoint, K) The generated features per point pair. + """ + B, _, npoint, K = points_xyz.size() + center_xyz = points_xyz[..., :1].repeat(1, 1, 1, K) + xyz_diff = points_xyz - center_xyz # [B, 3, npoint, K] + if self.scorenet_input == 'identity': + xyz_features = xyz_diff + elif self.scorenet_input == 'w_neighbor': + xyz_features = torch.cat((xyz_diff, points_xyz), dim=1) + else: # w_neighbor_dist + euclidian_dist = calc_euclidian_dist( + center_xyz.permute(0, 2, 3, 1).reshape(B * npoint * K, 3), + points_xyz.permute(0, 2, 3, 1).reshape(B * npoint * K, 3)).\ + reshape(B, 1, npoint, K) + xyz_features = torch.cat((center_xyz, xyz_diff, euclidian_dist), + dim=1) + return xyz_features + + def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]: + """Forward. + + Args: + inputs (Tuple[Tensor]): + + - features (Tensor): (B, in_c, npoint, K) + Features of the queried points. + - points_xyz (Tensor): (B, 3, npoint, K) + Coordinates of the grouped points. + + Returns: + Tuple[Tensor]: + + - new_features: (B, out_c, npoint, K) Features after PAConv. + - points_xyz: Same as input. + """ + features, points_xyz = inputs + B, _, npoint, K = features.size() + + if self.kernel_input == 'w_neighbor': + center_features = features[..., :1].repeat(1, 1, 1, K) + features_diff = features - center_features + # to (B, 2 * in_c, npoint, K) + features = torch.cat((features_diff, features), dim=1) + + # prepare features for between each point and its grouping center + xyz_features = self._prepare_scorenet_input(points_xyz) + + # scores to assemble kernel weights + scores = self.scorenet(xyz_features) # [B, npoint, K, m] + + # first compute out features over all kernels + # features is [B, C, npoint, K], weight_bank is [C, m * out_c] + new_features = torch.matmul( + features.permute(0, 2, 3, 1), + self.weight_bank).view(B, npoint, K, self.num_kernels, + -1) # [B, npoint, K, m, out_c] + + # then aggregate using scores + new_features = assign_score(scores, new_features) + # to [B, out_c, npoint, K] + new_features = new_features.permute(0, 3, 1, 2).contiguous() + + if self.bn is not None: + new_features = self.bn(new_features) + if self.activate is not None: + new_features = self.activate(new_features) + + # in order to keep input output consistency + # so that we can wrap PAConv in Sequential + return (new_features, points_xyz) + + +class PAConvCUDA(PAConv): + """CUDA version of PAConv that implements a cuda op to efficiently perform + kernel assembling. + + Different from vanilla PAConv, the input features of this function is not + grouped by centers. Instead, they will be queried on-the-fly by the + additional input `points_idx`. This avoids the large intermediate matrix. + See the `paper `_ appendix Sec. D for + more detailed descriptions. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + num_kernels: int, + norm_cfg: ConfigType = dict(type='BN2d', momentum=0.1), + act_cfg: ConfigType = dict(type='ReLU', inplace=True), + scorenet_input: str = 'w_neighbor_dist', + weight_bank_init: str = 'kaiming', + kernel_input: str = 'w_neighbor', + scorenet_cfg: dict = dict( + mlp_channels=[8, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False) + ) -> None: + super(PAConvCUDA, self).__init__( + in_channels=in_channels, + out_channels=out_channels, + num_kernels=num_kernels, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + scorenet_input=scorenet_input, + weight_bank_init=weight_bank_init, + kernel_input=kernel_input, + scorenet_cfg=scorenet_cfg) + + assert self.kernel_input == 'w_neighbor', \ + 'CUDA implemented PAConv only supports w_neighbor kernel_input' + + def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]: + """Forward. + + Args: + inputs (Tuple[Tensor]): + + - features (Tensor): (B, in_c, N) + Features of all points in the current point cloud. + Different from non-CUDA version PAConv, here the features + are not grouped by each center to form a K dim. + - points_xyz (Tensor): (B, 3, npoint, K) + Coordinates of the grouped points. + - points_idx (Tensor): (B, npoint, K) + Index of the grouped points. + + Returns: + Tuple[Tensor]: + + - new_features: (B, out_c, npoint, K) Features after PAConv. + - points_xyz: Same as input. + - points_idx: Same as input. + """ + features, points_xyz, points_idx = inputs + + # prepare features for between each point and its grouping center + xyz_features = self._prepare_scorenet_input(points_xyz) + + # scores to assemble kernel weights + scores = self.scorenet(xyz_features) # [B, npoint, K, m] + + # pre-compute features for points and centers separately + # features is [B, in_c, N], weight_bank is [C, m * out_dim] + point_feat, center_feat = assign_kernel_withoutk( + features, self.weight_bank, self.num_kernels) + + # aggregate features using custom cuda op + new_features = assign_score_cuda( + scores, point_feat, center_feat, points_idx, + 'sum').contiguous() # [B, out_c, npoint, K] + + if self.bn is not None: + new_features = self.bn(new_features) + if self.activate is not None: + new_features = self.activate(new_features) + + # in order to keep input output consistency + return (new_features, points_xyz, points_idx) diff --git a/mmdet3d/models/layers/paconv/utils.py b/mmdet3d/models/layers/paconv/utils.py new file mode 100755 index 0000000..e126b1a --- /dev/null +++ b/mmdet3d/models/layers/paconv/utils.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +import torch +from torch import Tensor + + +def calc_euclidian_dist(xyz1: Tensor, xyz2: Tensor) -> Tensor: + """Calculate the Euclidean distance between two sets of points. + + Args: + xyz1 (Tensor): (N, 3) The first set of points. + xyz2 (Tensor): (N, 3) The second set of points. + + Returns: + Tensor: (N, ) The Euclidean distance between each point pair. + """ + assert xyz1.shape[0] == xyz2.shape[0], 'number of points are not the same' + assert xyz1.shape[1] == xyz2.shape[1] == 3, \ + 'points coordinates dimension is not 3' + return torch.norm(xyz1 - xyz2, dim=-1) + + +def assign_score(scores: Tensor, point_features: Tensor) -> Tensor: + """Perform weighted sum to aggregate output features according to scores. + This function is used in non-CUDA version of PAConv. + + Compared to the cuda op assigh_score_withk, this pytorch implementation + pre-computes output features for the neighbors of all centers, and then + performs aggregation. It consumes more GPU memories. + + Args: + scores (Tensor): (B, npoint, K, M) Predicted scores to + aggregate weight matrices in the weight bank. + `npoint` is the number of sampled centers. + `K` is the number of queried neighbors. + `M` is the number of weight matrices in the weight bank. + point_features (Tensor): (B, npoint, K, M, out_dim) + Pre-computed point features to be aggregated. + + Returns: + Tensor: (B, npoint, K, out_dim) The aggregated features. + """ + B, npoint, K, M = scores.size() + scores = scores.view(B, npoint, K, 1, M) + output = torch.matmul(scores, point_features).view(B, npoint, K, -1) + return output + + +def assign_kernel_withoutk(features: Tensor, kernels: Tensor, + M: int) -> Tuple[Tensor]: + """Pre-compute features with weight matrices in weight bank. This function + is used before cuda op assign_score_withk in CUDA version PAConv. + + Args: + features (Tensor): (B, in_dim, N) Input features of all points. + `N` is the number of points in current point cloud. + kernels (Tensor): (2 * in_dim, M * out_dim) Weight matrices in + the weight bank, transformed from (M, 2 * in_dim, out_dim). + `2 * in_dim` is because the input features are concatenation of + (point_features - center_features, point_features). + M (int): Number of weight matrices in the weight bank. + + Returns: + Tuple[Tensor]: Both of shape (B, N, M, out_dim). + + - point_features: Pre-computed features for points. + - center_features: Pre-computed features for centers. + """ + B, in_dim, N = features.size() + feat_trans = features.permute(0, 2, 1) # [B, N, in_dim] + out_feat_half1 = torch.matmul(feat_trans, kernels[:in_dim]).view( + B, N, M, -1) # [B, N, M, out_dim] + out_feat_half2 = torch.matmul(feat_trans, kernels[in_dim:]).view( + B, N, M, -1) # [B, N, M, out_dim] + + # TODO: why this hard-coded if condition? + # when the network input is only xyz without additional features + # xyz will be used as features, so that features.size(1) == 3 % 2 != 0 + # we need to compensate center_features because otherwise + # `point_features - center_features` will result in all zeros? + if features.size(1) % 2 != 0: + out_feat_half_coord = torch.matmul( + feat_trans[:, :, :3], # [B, N, 3] + kernels[in_dim:in_dim + 3]).view(B, N, M, -1) # [B, N, M, out_dim] + else: + out_feat_half_coord = torch.zeros_like(out_feat_half2) + + point_features = out_feat_half1 + out_feat_half2 + center_features = out_feat_half1 + out_feat_half_coord + return point_features, center_features diff --git a/mmdet3d/models/layers/pointnet_modules/__init__.py b/mmdet3d/models/layers/pointnet_modules/__init__.py new file mode 100755 index 0000000..13d6e1d --- /dev/null +++ b/mmdet3d/models/layers/pointnet_modules/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import build_sa_module +from .paconv_sa_module import (PAConvCUDASAModule, PAConvCUDASAModuleMSG, + PAConvSAModule, PAConvSAModuleMSG) +from .point_fp_module import PointFPModule +from .point_sa_module import PointSAModule, PointSAModuleMSG +from .stack_point_sa_module import StackedSAModuleMSG + +__all__ = [ + 'build_sa_module', 'PointSAModuleMSG', 'PointSAModule', 'PointFPModule', + 'PAConvSAModule', 'PAConvSAModuleMSG', 'PAConvCUDASAModule', + 'PAConvCUDASAModuleMSG', 'StackedSAModuleMSG' +] diff --git a/mmdet3d/models/layers/pointnet_modules/builder.py b/mmdet3d/models/layers/pointnet_modules/builder.py new file mode 100755 index 0000000..2274f9c --- /dev/null +++ b/mmdet3d/models/layers/pointnet_modules/builder.py @@ -0,0 +1,45 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +from mmengine.registry import Registry +from torch import nn as nn + +SA_MODULES = Registry( + name='point_sa_module', + locations=['mmdet3d.models.layers.pointnet_modules']) + + +def build_sa_module(cfg: Union[dict, None], *args, **kwargs) -> nn.Module: + """Build PointNet2 set abstraction (SA) module. + + Args: + cfg (dict or None): The SA module config, which should contain: + + - type (str): Module type. + - module args: Args needed to instantiate an SA module. + args (argument list): Arguments passed to the `__init__` + method of the corresponding module. + kwargs (keyword arguments): Keyword arguments passed to the `__init__` + method of the corresponding SA module . + + Returns: + nn.Module: Created SA module. + """ + if cfg is None: + cfg_ = dict(type='PointSAModule') + else: + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + module_type = cfg_.pop('type') + if module_type not in SA_MODULES: + raise KeyError(f'Unrecognized module type {module_type}') + else: + sa_module = SA_MODULES.get(module_type) + + module = sa_module(*args, **kwargs, **cfg_) + + return module diff --git a/mmdet3d/models/layers/pointnet_modules/paconv_sa_module.py b/mmdet3d/models/layers/pointnet_modules/paconv_sa_module.py new file mode 100755 index 0000000..a6e55d0 --- /dev/null +++ b/mmdet3d/models/layers/pointnet_modules/paconv_sa_module.py @@ -0,0 +1,383 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models.layers.paconv import PAConv, PAConvCUDA +from mmdet3d.utils import ConfigType +from .builder import SA_MODULES +from .point_sa_module import BasePointSAModule + + +@SA_MODULES.register_module() +class PAConvSAModuleMSG(BasePointSAModule): + r"""Point set abstraction module with multi-scale grouping (MSG) used in + PAConv networks. + + Replace the MLPs in `PointSAModuleMSG` with PAConv layers. + See the `paper `_ for more details. + + Args: + num_point (int): Number of points. + radii (List[float]): List of radius in each ball query. + sample_nums (List[int]): Number of samples in each ball query. + mlp_channels (List[List[int]]): Specify of the pointnet before + the global pooling for each scale. + paconv_num_kernels (List[List[int]]): Number of kernel weights in the + weight banks of each layer's PAConv. + fps_mod (List[str]): Type of FPS method, valid mod + ['F-FPS', 'D-FPS', 'FS']. Defaults to ['D-FPS']. + + - F-FPS: Using feature distances for FPS. + - D-FPS: Using Euclidean distances of points for FPS. + - FS: Using F-FPS and D-FPS simultaneously. + fps_sample_range_list (List[int]): Range of points to apply FPS. + Defaults to [-1]. + dilated_group (bool): Whether to use dilated ball query. + Defaults to False. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN2d', momentum=0.1). + use_xyz (bool): Whether to use xyz. Defaults to True. + pool_mod (str): Type of pooling method. Defaults to 'max'. + normalize_xyz (bool): Whether to normalize local XYZ with radius. + Defaults to False. + bias (bool or str): If specified as `auto`, it will be decided by + `norm_cfg`. `bias` will be set as True if `norm_cfg` is None, + otherwise False. Defaults to 'auto'. + paconv_kernel_input (str): Input features to be multiplied + with kernel weights. Can be 'identity' or 'w_neighbor'. + Defaults to 'w_neighbor'. + scorenet_input (str): Type of the input to ScoreNet. + Defaults to 'w_neighbor_dist'. Can be the following values: + + - 'identity': Use xyz coordinates as input. + - 'w_neighbor': Use xyz coordinates and the difference with center + points as input. + - 'w_neighbor_dist': Use xyz coordinates, the difference with + center points and the Euclidean distance as input. + scorenet_cfg (dict): Config of the ScoreNet module, which + may contain the following keys and values: + + - mlp_channels (List[int]): Hidden units of MLPs. + - score_norm (str): Normalization function of output scores. + Can be 'softmax', 'sigmoid' or 'identity'. + - temp_factor (float): Temperature factor to scale the output + scores before softmax. + - last_bn (bool): Whether to use BN on the last output of mlps. + Defaults to dict(mlp_channels=[16, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False). + """ + + def __init__( + self, + num_point: int, + radii: List[float], + sample_nums: List[int], + mlp_channels: List[List[int]], + paconv_num_kernels: List[List[int]], + fps_mod: List[str] = ['D-FPS'], + fps_sample_range_list: List[int] = [-1], + dilated_group: bool = False, + norm_cfg: ConfigType = dict(type='BN2d', momentum=0.1), + use_xyz: bool = True, + pool_mod: str = 'max', + normalize_xyz: bool = False, + bias: Union[bool, str] = 'auto', + paconv_kernel_input: str = 'w_neighbor', + scorenet_input: str = 'w_neighbor_dist', + scorenet_cfg: dict = dict( + mlp_channels=[16, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False) + ) -> None: + super(PAConvSAModuleMSG, self).__init__( + num_point=num_point, + radii=radii, + sample_nums=sample_nums, + mlp_channels=mlp_channels, + fps_mod=fps_mod, + fps_sample_range_list=fps_sample_range_list, + dilated_group=dilated_group, + use_xyz=use_xyz, + pool_mod=pool_mod, + normalize_xyz=normalize_xyz, + grouper_return_grouped_xyz=True) + + assert len(paconv_num_kernels) == len(mlp_channels) + for i in range(len(mlp_channels)): + assert len(paconv_num_kernels[i]) == len(mlp_channels[i]) - 1, \ + 'PAConv number of kernel weights wrong' + + # in PAConv, bias only exists in ScoreNet + scorenet_cfg['bias'] = bias + + for i in range(len(self.mlp_channels)): + mlp_channel = self.mlp_channels[i] + if use_xyz: + mlp_channel[0] += 3 + + num_kernels = paconv_num_kernels[i] + + mlp = nn.Sequential() + for i in range(len(mlp_channel) - 1): + mlp.add_module( + f'layer{i}', + PAConv( + mlp_channel[i], + mlp_channel[i + 1], + num_kernels[i], + norm_cfg=norm_cfg, + kernel_input=paconv_kernel_input, + scorenet_input=scorenet_input, + scorenet_cfg=scorenet_cfg)) + self.mlps.append(mlp) + + +@SA_MODULES.register_module() +class PAConvSAModule(PAConvSAModuleMSG): + r"""Point set abstraction module with single-scale grouping (SSG) used in + PAConv networks. + + Replace the MLPs in `PointSAModule` with PAConv layers. See the `paper + `_ for more details. + """ + + def __init__( + self, + mlp_channels: List[int], + paconv_num_kernels: List[int], + num_point: Optional[int] = None, + radius: Optional[float] = None, + num_sample: Optional[int] = None, + norm_cfg: ConfigType = dict(type='BN2d', momentum=0.1), + use_xyz: bool = True, + pool_mod: str = 'max', + fps_mod: List[str] = ['D-FPS'], + fps_sample_range_list: List[int] = [-1], + normalize_xyz: bool = False, + paconv_kernel_input: str = 'w_neighbor', + scorenet_input: str = 'w_neighbor_dist', + scorenet_cfg: dict = dict( + mlp_channels=[16, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False) + ) -> None: + super(PAConvSAModule, self).__init__( + mlp_channels=[mlp_channels], + paconv_num_kernels=[paconv_num_kernels], + num_point=num_point, + radii=[radius], + sample_nums=[num_sample], + norm_cfg=norm_cfg, + use_xyz=use_xyz, + pool_mod=pool_mod, + fps_mod=fps_mod, + fps_sample_range_list=fps_sample_range_list, + normalize_xyz=normalize_xyz, + paconv_kernel_input=paconv_kernel_input, + scorenet_input=scorenet_input, + scorenet_cfg=scorenet_cfg) + + +@SA_MODULES.register_module() +class PAConvCUDASAModuleMSG(BasePointSAModule): + r"""Point set abstraction module with multi-scale grouping (MSG) used in + PAConv networks. + + Replace the non CUDA version PAConv with CUDA implemented PAConv for + efficient computation. See the `paper `_ + for more details. + """ + + def __init__( + self, + num_point: int, + radii: List[float], + sample_nums: List[int], + mlp_channels: List[List[int]], + paconv_num_kernels: List[List[int]], + fps_mod: List[str] = ['D-FPS'], + fps_sample_range_list: List[int] = [-1], + dilated_group: bool = False, + norm_cfg: ConfigType = dict(type='BN2d', momentum=0.1), + use_xyz: bool = True, + pool_mod: str = 'max', + normalize_xyz: bool = False, + bias: Union[bool, str] = 'auto', + paconv_kernel_input: str = 'w_neighbor', + scorenet_input: str = 'w_neighbor_dist', + scorenet_cfg: dict = dict( + mlp_channels=[8, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False) + ) -> None: + super(PAConvCUDASAModuleMSG, self).__init__( + num_point=num_point, + radii=radii, + sample_nums=sample_nums, + mlp_channels=mlp_channels, + fps_mod=fps_mod, + fps_sample_range_list=fps_sample_range_list, + dilated_group=dilated_group, + use_xyz=use_xyz, + pool_mod=pool_mod, + normalize_xyz=normalize_xyz, + grouper_return_grouped_xyz=True, + grouper_return_grouped_idx=True) + + assert len(paconv_num_kernels) == len(mlp_channels) + for i in range(len(mlp_channels)): + assert len(paconv_num_kernels[i]) == len(mlp_channels[i]) - 1, \ + 'PAConv number of kernel weights wrong' + + # in PAConv, bias only exists in ScoreNet + scorenet_cfg['bias'] = bias + + # we need to manually concat xyz for CUDA implemented PAConv + self.use_xyz = use_xyz + + for i in range(len(self.mlp_channels)): + mlp_channel = self.mlp_channels[i] + if use_xyz: + mlp_channel[0] += 3 + + num_kernels = paconv_num_kernels[i] + + # can't use `nn.Sequential` for PAConvCUDA because its input and + # output have different shapes + mlp = nn.ModuleList() + for i in range(len(mlp_channel) - 1): + mlp.append( + PAConvCUDA( + mlp_channel[i], + mlp_channel[i + 1], + num_kernels[i], + norm_cfg=norm_cfg, + kernel_input=paconv_kernel_input, + scorenet_input=scorenet_input, + scorenet_cfg=scorenet_cfg)) + self.mlps.append(mlp) + + def forward( + self, + points_xyz: Tensor, + features: Optional[Tensor] = None, + indices: Optional[Tensor] = None, + target_xyz: Optional[Tensor] = None, + ) -> Tuple[Tensor]: + """Forward. + + Args: + points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. + features (Tensor, optional): (B, C, N) features of each point. + Defaults to None. + indices (Tensor, optional): (B, num_point) Index of the features. + Defaults to None. + target_xyz (Tensor, optional): (B, M, 3) new coords of the outputs. + Defaults to None. + + Returns: + Tuple[Tensor]: + + - new_xyz: (B, M, 3) where M is the number of points. + New features xyz. + - new_features: (B, M, sum_k(mlps[k][-1])) where M is the + number of points. New feature descriptors. + - indices: (B, M) where M is the number of points. + Index of the features. + """ + new_features_list = [] + + # sample points, (B, num_point, 3), (B, num_point) + new_xyz, indices = self._sample_points(points_xyz, features, indices, + target_xyz) + + for i in range(len(self.groupers)): + xyz = points_xyz + new_features = features + for j in range(len(self.mlps[i])): + # we don't use grouped_features here to avoid large GPU memory + # _, (B, 3, num_point, nsample), (B, num_point, nsample) + _, grouped_xyz, grouped_idx = self.groupers[i](xyz, new_xyz, + new_features) + + # concat xyz as additional features + if self.use_xyz and j == 0: + # (B, C+3, N) + new_features = torch.cat( + (points_xyz.permute(0, 2, 1), new_features), dim=1) + + # (B, out_c, num_point, nsample) + grouped_new_features = self.mlps[i][j]( + (new_features, grouped_xyz, grouped_idx.long()))[0] + + # different from PointNet++ and non CUDA version of PAConv + # CUDA version of PAConv needs to aggregate local features + # every time after it passes through a Conv layer + # in order to transform to valid input shape + # (B, out_c, num_point) + new_features = self._pool_features(grouped_new_features) + + # constrain the points to be grouped for next PAConv layer + # because new_features only contains sampled centers now + # (B, num_point, 3) + xyz = new_xyz + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1), indices + + +@SA_MODULES.register_module() +class PAConvCUDASAModule(PAConvCUDASAModuleMSG): + r"""Point set abstraction module with single-scale grouping (SSG) used in + PAConv networks. + + Replace the non CUDA version PAConv with CUDA implemented PAConv for + efficient computation. See the `paper `_ + for more details. + """ + + def __init__( + self, + mlp_channels: List[int], + paconv_num_kernels: List[int], + num_point: Optional[int] = None, + radius: Optional[float] = None, + num_sample: Optional[int] = None, + norm_cfg: ConfigType = dict(type='BN2d', momentum=0.1), + use_xyz: bool = True, + pool_mod: str = 'max', + fps_mod: List[str] = ['D-FPS'], + fps_sample_range_list: List[int] = [-1], + normalize_xyz: bool = False, + paconv_kernel_input: str = 'w_neighbor', + scorenet_input: str = 'w_neighbor_dist', + scorenet_cfg: dict = dict( + mlp_channels=[8, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False) + ) -> None: + super(PAConvCUDASAModule, self).__init__( + mlp_channels=[mlp_channels], + paconv_num_kernels=[paconv_num_kernels], + num_point=num_point, + radii=[radius], + sample_nums=[num_sample], + norm_cfg=norm_cfg, + use_xyz=use_xyz, + pool_mod=pool_mod, + fps_mod=fps_mod, + fps_sample_range_list=fps_sample_range_list, + normalize_xyz=normalize_xyz, + paconv_kernel_input=paconv_kernel_input, + scorenet_input=scorenet_input, + scorenet_cfg=scorenet_cfg) diff --git a/mmdet3d/models/layers/pointnet_modules/point_fp_module.py b/mmdet3d/models/layers/pointnet_modules/point_fp_module.py new file mode 100755 index 0000000..3635490 --- /dev/null +++ b/mmdet3d/models/layers/pointnet_modules/point_fp_module.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch +from mmcv.cnn import ConvModule +from mmcv.ops import three_interpolate, three_nn +from mmengine.model import BaseModule +from torch import Tensor +from torch import nn as nn + +from mmdet3d.utils import ConfigType, OptMultiConfig + + +class PointFPModule(BaseModule): + """Point feature propagation module used in PointNets. + + Propagate the features from one set to another. + + Args: + mlp_channels (list[int]): List of mlp channels. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN2d'). + init_cfg (:obj:`ConfigDict` or dict or List[:obj:`Contigdict` or dict], + optional): Initialization config dict. Defaults to None. + """ + + def __init__(self, + mlp_channels: List[int], + norm_cfg: ConfigType = dict(type='BN2d'), + init_cfg: OptMultiConfig = None) -> None: + super(PointFPModule, self).__init__(init_cfg=init_cfg) + self.mlps = nn.Sequential() + for i in range(len(mlp_channels) - 1): + self.mlps.add_module( + f'layer{i}', + ConvModule( + mlp_channels[i], + mlp_channels[i + 1], + kernel_size=(1, 1), + stride=(1, 1), + conv_cfg=dict(type='Conv2d'), + norm_cfg=norm_cfg)) + + def forward(self, target: Tensor, source: Tensor, target_feats: Tensor, + source_feats: Tensor) -> Tensor: + """Forward. + + Args: + target (Tensor): (B, n, 3) Tensor of the xyz positions of + the target features. + source (Tensor): (B, m, 3) Tensor of the xyz positions of + the source features. + target_feats (Tensor): (B, C1, n) Tensor of the features to be + propagated to. + source_feats (Tensor): (B, C2, m) Tensor of features + to be propagated. + + Return: + Tensor: (B, M, N) M = mlp[-1], Tensor of the target features. + """ + if source is not None: + dist, idx = three_nn(target, source) + dist_reciprocal = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_reciprocal, dim=2, keepdim=True) + weight = dist_reciprocal / norm + + interpolated_feats = three_interpolate(source_feats, idx, weight) + else: + interpolated_feats = source_feats.expand(*source_feats.size()[0:2], + target.size(1)) + + if target_feats is not None: + new_features = torch.cat([interpolated_feats, target_feats], + dim=1) # (B, C2 + C1, n) + else: + new_features = interpolated_feats + + new_features = new_features.unsqueeze(-1) + new_features = self.mlps(new_features) + + return new_features.squeeze(-1) diff --git a/mmdet3d/models/layers/pointnet_modules/point_sa_module.py b/mmdet3d/models/layers/pointnet_modules/point_sa_module.py new file mode 100755 index 0000000..61661af --- /dev/null +++ b/mmdet3d/models/layers/pointnet_modules/point_sa_module.py @@ -0,0 +1,354 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +from mmcv.cnn import ConvModule +from mmcv.ops import GroupAll +from mmcv.ops import PointsSampler as Points_Sampler +from mmcv.ops import QueryAndGroup, gather_points +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.models.layers import PAConv +from mmdet3d.utils import ConfigType +from .builder import SA_MODULES + + +class BasePointSAModule(nn.Module): + """Base module for point set abstraction module used in PointNets. + + Args: + num_point (int): Number of points. + radii (List[float]): List of radius in each ball query. + sample_nums (List[int]): Number of samples in each ball query. + mlp_channels (List[List[int]]): Specify of the pointnet before + the global pooling for each scale. + fps_mod (List[str]): Type of FPS method, valid mod + ['F-FPS', 'D-FPS', 'FS']. Defaults to ['D-FPS']. + + - F-FPS: using feature distances for FPS. + - D-FPS: using Euclidean distances of points for FPS. + - FS: using F-FPS and D-FPS simultaneously. + fps_sample_range_list (List[int]): Range of points to apply FPS. + Defaults to [-1]. + dilated_group (bool): Whether to use dilated ball query. + Defaults to False. + use_xyz (bool): Whether to use xyz. Defaults to True. + pool_mod (str): Type of pooling method. Defaults to 'max'. + normalize_xyz (bool): Whether to normalize local XYZ with radius. + Defaults to False. + grouper_return_grouped_xyz (bool): Whether to return grouped xyz + in `QueryAndGroup`. Defaults to False. + grouper_return_grouped_idx (bool): Whether to return grouped idx + in `QueryAndGroup`. Defaults to False. + """ + + def __init__(self, + num_point: int, + radii: List[float], + sample_nums: List[int], + mlp_channels: List[List[int]], + fps_mod: List[str] = ['D-FPS'], + fps_sample_range_list: List[int] = [-1], + dilated_group: bool = False, + use_xyz: bool = True, + pool_mod: str = 'max', + normalize_xyz: bool = False, + grouper_return_grouped_xyz: bool = False, + grouper_return_grouped_idx: bool = False) -> None: + super(BasePointSAModule, self).__init__() + + assert len(radii) == len(sample_nums) == len(mlp_channels) + assert pool_mod in ['max', 'avg'] + assert isinstance(fps_mod, list) or isinstance(fps_mod, tuple) + assert isinstance(fps_sample_range_list, list) or isinstance( + fps_sample_range_list, tuple) + assert len(fps_mod) == len(fps_sample_range_list) + + if isinstance(mlp_channels, tuple): + mlp_channels = list(map(list, mlp_channels)) + self.mlp_channels = mlp_channels + + if isinstance(num_point, int): + self.num_point = [num_point] + elif isinstance(num_point, list) or isinstance(num_point, tuple): + self.num_point = num_point + elif num_point is None: + self.num_point = None + else: + raise NotImplementedError('Error type of num_point!') + + self.pool_mod = pool_mod + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + self.fps_mod_list = fps_mod + self.fps_sample_range_list = fps_sample_range_list + + if self.num_point is not None: + self.points_sampler = Points_Sampler(self.num_point, + self.fps_mod_list, + self.fps_sample_range_list) + else: + self.points_sampler = None + + for i in range(len(radii)): + radius = radii[i] + sample_num = sample_nums[i] + if num_point is not None: + if dilated_group and i != 0: + min_radius = radii[i - 1] + else: + min_radius = 0 + grouper = QueryAndGroup( + radius, + sample_num, + min_radius=min_radius, + use_xyz=use_xyz, + normalize_xyz=normalize_xyz, + return_grouped_xyz=grouper_return_grouped_xyz, + return_grouped_idx=grouper_return_grouped_idx) + else: + grouper = GroupAll(use_xyz) + self.groupers.append(grouper) + + def _sample_points(self, points_xyz: Tensor, features: Tensor, + indices: Tensor, target_xyz: Tensor) -> Tuple[Tensor]: + """Perform point sampling based on inputs. + + If `indices` is specified, directly sample corresponding points. + Else if `target_xyz` is specified, use is as sampled points. + Otherwise sample points using `self.points_sampler`. + + Args: + points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. + features (Tensor): (B, C, N) Features of each point. + indices (Tensor): (B, num_point) Index of the features. + target_xyz (Tensor): (B, M, 3) new_xyz coordinates of the outputs. + + Returns: + Tuple[Tensor]: + + - new_xyz: (B, num_point, 3) Sampled xyz coordinates of points. + - indices: (B, num_point) Sampled points' index. + """ + xyz_flipped = points_xyz.transpose(1, 2).contiguous() + if indices is not None: + assert (indices.shape[1] == self.num_point[0]) + new_xyz = gather_points(xyz_flipped, indices).transpose( + 1, 2).contiguous() if self.num_point is not None else None + elif target_xyz is not None: + new_xyz = target_xyz.contiguous() + else: + if self.num_point is not None: + indices = self.points_sampler(points_xyz, features) + new_xyz = gather_points(xyz_flipped, + indices).transpose(1, 2).contiguous() + else: + new_xyz = None + + return new_xyz, indices + + def _pool_features(self, features: Tensor) -> Tensor: + """Perform feature aggregation using pooling operation. + + Args: + features (Tensor): (B, C, N, K) Features of locally grouped + points before pooling. + + Returns: + Tensor: (B, C, N) Pooled features aggregating local information. + """ + if self.pool_mod == 'max': + # (B, C, N, 1) + new_features = F.max_pool2d( + features, kernel_size=[1, features.size(3)]) + elif self.pool_mod == 'avg': + # (B, C, N, 1) + new_features = F.avg_pool2d( + features, kernel_size=[1, features.size(3)]) + else: + raise NotImplementedError + + return new_features.squeeze(-1).contiguous() + + def forward( + self, + points_xyz: Tensor, + features: Optional[Tensor] = None, + indices: Optional[Tensor] = None, + target_xyz: Optional[Tensor] = None, + ) -> Tuple[Tensor]: + """Forward. + + Args: + points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. + features (Tensor, optional): (B, C, N) Features of each point. + Defaults to None. + indices (Tensor, optional): (B, num_point) Index of the features. + Defaults to None. + target_xyz (Tensor, optional): (B, M, 3) New coords of the outputs. + Defaults to None. + + Returns: + Tuple[Tensor]: + + - new_xyz: (B, M, 3) Where M is the number of points. + New features xyz. + - new_features: (B, M, sum_k(mlps[k][-1])) Where M is the + number of points. New feature descriptors. + - indices: (B, M) Where M is the number of points. + Index of the features. + """ + new_features_list = [] + + # sample points, (B, num_point, 3), (B, num_point) + new_xyz, indices = self._sample_points(points_xyz, features, indices, + target_xyz) + + for i in range(len(self.groupers)): + # grouped_results may contain: + # - grouped_features: (B, C, num_point, nsample) + # - grouped_xyz: (B, 3, num_point, nsample) + # - grouped_idx: (B, num_point, nsample) + grouped_results = self.groupers[i](points_xyz, new_xyz, features) + + # (B, mlp[-1], num_point, nsample) + new_features = self.mlps[i](grouped_results) + + # this is a bit hack because PAConv outputs two values + # we take the first one as feature + if isinstance(self.mlps[i][0], PAConv): + assert isinstance(new_features, tuple) + new_features = new_features[0] + + # (B, mlp[-1], num_point) + new_features = self._pool_features(new_features) + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1), indices + + +@SA_MODULES.register_module() +class PointSAModuleMSG(BasePointSAModule): + """Point set abstraction module with multi-scale grouping (MSG) used in + PointNets. + + Args: + num_point (int): Number of points. + radii (List[float]): List of radius in each ball query. + sample_nums (List[int]): Number of samples in each ball query. + mlp_channels (List[List[int]]): Specify of the pointnet before + the global pooling for each scale. + fps_mod (List[str]): Type of FPS method, valid mod + ['F-FPS', 'D-FPS', 'FS']. Defaults to ['D-FPS']. + + - F-FPS: using feature distances for FPS. + - D-FPS: using Euclidean distances of points for FPS. + - FS: using F-FPS and D-FPS simultaneously. + fps_sample_range_list (List[int]): Range of points to apply FPS. + Defaults to [-1]. + dilated_group (bool): Whether to use dilated ball query. + Defaults to False. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN2d'). + use_xyz (bool): Whether to use xyz. Defaults to True. + pool_mod (str): Type of pooling method. Defaults to 'max'. + normalize_xyz (bool): Whether to normalize local XYZ with radius. + Defaults to False. + bias (bool or str): If specified as `auto`, it will be decided by + `norm_cfg`. `bias` will be set as True if `norm_cfg` is None, + otherwise False. Defaults to 'auto'. + """ + + def __init__(self, + num_point: int, + radii: List[float], + sample_nums: List[int], + mlp_channels: List[List[int]], + fps_mod: List[str] = ['D-FPS'], + fps_sample_range_list: List[int] = [-1], + dilated_group: bool = False, + norm_cfg: ConfigType = dict(type='BN2d'), + use_xyz: bool = True, + pool_mod: str = 'max', + normalize_xyz: bool = False, + bias: Union[bool, str] = 'auto') -> None: + super(PointSAModuleMSG, self).__init__( + num_point=num_point, + radii=radii, + sample_nums=sample_nums, + mlp_channels=mlp_channels, + fps_mod=fps_mod, + fps_sample_range_list=fps_sample_range_list, + dilated_group=dilated_group, + use_xyz=use_xyz, + pool_mod=pool_mod, + normalize_xyz=normalize_xyz) + + for i in range(len(self.mlp_channels)): + mlp_channel = self.mlp_channels[i] + if use_xyz: + mlp_channel[0] += 3 + + mlp = nn.Sequential() + for i in range(len(mlp_channel) - 1): + mlp.add_module( + f'layer{i}', + ConvModule( + mlp_channel[i], + mlp_channel[i + 1], + kernel_size=(1, 1), + stride=(1, 1), + conv_cfg=dict(type='Conv2d'), + norm_cfg=norm_cfg, + bias=bias)) + self.mlps.append(mlp) + + +@SA_MODULES.register_module() +class PointSAModule(PointSAModuleMSG): + """Point set abstraction module with single-scale grouping (SSG) used in + PointNets. + + Args: + mlp_channels (List[int]): Specify of the pointnet before + the global pooling for each scale. + num_point (int, optional): Number of points. Defaults to None. + radius (float, optional): Radius to group with. Defaults to None. + num_sample (int, optional): Number of samples in each ball query. + Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Default to dict(type='BN2d'). + use_xyz (bool): Whether to use xyz. Defaults to True. + pool_mod (str): Type of pooling method. Defaults to 'max'. + fps_mod (List[str]): Type of FPS method, valid mod + ['F-FPS', 'D-FPS', 'FS']. Defaults to ['D-FPS']. + fps_sample_range_list (List[int]): Range of points to apply FPS. + Defaults to [-1]. + normalize_xyz (bool): Whether to normalize local XYZ with radius. + Defaults to False. + """ + + def __init__(self, + mlp_channels: List[int], + num_point: Optional[int] = None, + radius: Optional[float] = None, + num_sample: Optional[int] = None, + norm_cfg: ConfigType = dict(type='BN2d'), + use_xyz: bool = True, + pool_mod: str = 'max', + fps_mod: List[str] = ['D-FPS'], + fps_sample_range_list: List[int] = [-1], + normalize_xyz: bool = False) -> None: + super(PointSAModule, self).__init__( + mlp_channels=[mlp_channels], + num_point=num_point, + radii=[radius], + sample_nums=[num_sample], + norm_cfg=norm_cfg, + use_xyz=use_xyz, + pool_mod=pool_mod, + fps_mod=fps_mod, + fps_sample_range_list=fps_sample_range_list, + normalize_xyz=normalize_xyz) diff --git a/mmdet3d/models/layers/pointnet_modules/stack_point_sa_module.py b/mmdet3d/models/layers/pointnet_modules/stack_point_sa_module.py new file mode 100755 index 0000000..6283932 --- /dev/null +++ b/mmdet3d/models/layers/pointnet_modules/stack_point_sa_module.py @@ -0,0 +1,199 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.ops import ball_query, grouping_operation +from mmengine.model import BaseModule +from torch import Tensor + +from mmdet3d.registry import MODELS + + +class StackQueryAndGroup(BaseModule): + """Find nearby points in spherical space. + + Args: + radius (float): List of radius in each ball query. + sample_nums (int): Number of samples in each ball query. + use_xyz (bool): Whether to use xyz. Default: True. + init_cfg (dict, optional): Initialize config of + model. Defaults to None. + """ + + def __init__(self, + radius: float, + sample_nums: int, + use_xyz: bool = True, + init_cfg: dict = None): + super().__init__(init_cfg=init_cfg) + self.radius, self.sample_nums, self.use_xyz = \ + radius, sample_nums, use_xyz + + def forward(self, + xyz: torch.Tensor, + xyz_batch_cnt: torch.Tensor, + new_xyz: torch.Tensor, + new_xyz_batch_cnt: torch.Tensor, + features: torch.Tensor = None) -> Tuple[Tensor, Tensor]: + """Forward. + + Args: + xyz (Tensor): Tensor of the xyz coordinates + of the features shape with (N1 + N2 ..., 3). + xyz_batch_cnt: (Tensor): Stacked input xyz coordinates nums in + each batch, just like (N1, N2, ...). + new_xyz (Tensor): New coords of the outputs shape with + (M1 + M2 ..., 3). + new_xyz_batch_cnt: (Tensor): Stacked new xyz coordinates nums + in each batch, just like (M1, M2, ...). + features (Tensor, optional): Features of each point with shape + (N1 + N2 ..., C). C is features channel number. Default: None. + """ + assert xyz.shape[0] == xyz_batch_cnt.sum( + ), f'xyz: {str(xyz.shape)}, xyz_batch_cnt: str(new_xyz_batch_cnt)' + assert new_xyz.shape[0] == new_xyz_batch_cnt.sum(), \ + 'new_xyz: str(new_xyz.shape), new_xyz_batch_cnt: ' \ + 'str(new_xyz_batch_cnt)' + + # idx: (M1 + M2 ..., nsample) + idx = ball_query(0, self.radius, self.sample_nums, xyz, new_xyz, + xyz_batch_cnt, new_xyz_batch_cnt) + empty_ball_mask = (idx[:, 0] == -1) + idx[empty_ball_mask] = 0 + grouped_xyz = grouping_operation( + xyz, idx, xyz_batch_cnt, + new_xyz_batch_cnt) # (M1 + M2, 3, nsample) + grouped_xyz -= new_xyz.unsqueeze(-1) + + grouped_xyz[empty_ball_mask] = 0 + if features is not None: + grouped_features = grouping_operation( + features, idx, xyz_batch_cnt, + new_xyz_batch_cnt) # (M1 + M2, C, nsample) + grouped_features[empty_ball_mask] = 0 + if self.use_xyz: + new_features = torch.cat( + [grouped_xyz, grouped_features], + dim=1) # (M1 + M2 ..., C + 3, nsample) + else: + new_features = grouped_features + else: + assert self.use_xyz, 'Cannot have not features and not' \ + ' use xyz as a feature!' + new_features = grouped_xyz + return new_features, idx + + +@MODELS.register_module() +class StackedSAModuleMSG(BaseModule): + """Stack point set abstraction module. + + Args: + in_channels (int): Input channels. + radius (list[float]): List of radius in each ball query. + sample_nums (list[int]): Number of samples in each ball query. + mlp_channels (list[list[int]]): Specify mlp channels of the + pointnet before the global pooling for each scale to encode + point features. + use_xyz (bool): Whether to use xyz. Default: True. + pool_mod (str): Type of pooling method. + Default: 'max_pool'. + norm_cfg (dict): Type of normalization method. Defaults to + dict(type='BN2d', eps=1e-5, momentum=0.01). + init_cfg (dict, optional): Initialize config of + model. Defaults to None. + """ + + def __init__(self, + in_channels: int, + radius: List[float], + sample_nums: List[int], + mlp_channels: List[List[int]], + use_xyz: bool = True, + pool_mod='max', + norm_cfg: dict = dict(type='BN2d', eps=1e-5, momentum=0.01), + init_cfg: dict = None, + **kwargs) -> None: + super(StackedSAModuleMSG, self).__init__(init_cfg=init_cfg) + assert len(radius) == len(sample_nums) == len(mlp_channels) + + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radius)): + cin = in_channels + if use_xyz: + cin += 3 + cur_radius = radius[i] + nsample = sample_nums[i] + mlp_spec = mlp_channels[i] + + self.groupers.append( + StackQueryAndGroup(cur_radius, nsample, use_xyz=use_xyz)) + + mlp = nn.Sequential() + for i in range(len(mlp_spec)): + cout = mlp_spec[i] + mlp.add_module( + f'layer{i}', + ConvModule( + cin, + cout, + kernel_size=(1, 1), + stride=(1, 1), + conv_cfg=dict(type='Conv2d'), + norm_cfg=norm_cfg, + bias=False)) + cin = cout + self.mlps.append(mlp) + self.pool_mod = pool_mod + + def forward(self, + xyz: Tensor, + xyz_batch_cnt: Tensor, + new_xyz: Tensor, + new_xyz_batch_cnt: Tensor, + features: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: + """Forward. + + Args: + xyz (Tensor): Tensor of the xyz coordinates + of the features shape with (N1 + N2 ..., 3). + xyz_batch_cnt: (Tensor): Stacked input xyz coordinates nums in + each batch, just like (N1, N2, ...). + new_xyz (Tensor): New coords of the outputs shape with + (M1 + M2 ..., 3). + new_xyz_batch_cnt: (Tensor): Stacked new xyz coordinates nums + in each batch, just like (M1, M2, ...). + features (Tensor, optional): Features of each point with shape + (N1 + N2 ..., C). C is features channel number. Default: None. + + Returns: + Return new points coordinates and features: + - new_xyz (Tensor): Target points coordinates with shape + (N1 + N2 ..., 3). + - new_features (Tensor): Target points features with shape + (M1 + M2 ..., sum_k(mlps[k][-1])). + """ + new_features_list = [] + for k in range(len(self.groupers)): + grouped_features, ball_idxs = self.groupers[k]( + xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, + features) # (M1 + M2, Cin, nsample) + grouped_features = grouped_features.permute(1, 0, + 2).unsqueeze(dim=0) + new_features = self.mlps[k](grouped_features) + # (M1 + M2 ..., Cout, nsample) + if self.pool_mod == 'max': + new_features = new_features.max(-1).values + elif self.pool_mod == 'avg': + new_features = new_features.mean(-1) + else: + raise NotImplementedError + new_features = new_features.squeeze(dim=0).permute(1, 0) + new_features_list.append(new_features) + + new_features = torch.cat(new_features_list, dim=1) + + return new_xyz, new_features diff --git a/mmdet3d/models/layers/sparse_block.py b/mmdet3d/models/layers/sparse_block.py new file mode 100755 index 0000000..14fc4de --- /dev/null +++ b/mmdet3d/models/layers/sparse_block.py @@ -0,0 +1,209 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple, Union + +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmdet.models.backbones.resnet import BasicBlock, Bottleneck +from torch import nn + +from mmdet3d.utils import OptConfigType +from .spconv import IS_SPCONV2_AVAILABLE + +if IS_SPCONV2_AVAILABLE: + from spconv.pytorch import SparseConvTensor, SparseModule, SparseSequential +else: + from mmcv.ops import SparseConvTensor, SparseModule, SparseSequential + + +def replace_feature(out: SparseConvTensor, + new_features: SparseConvTensor) -> SparseConvTensor: + if 'replace_feature' in out.__dir__(): + # spconv 2.x behaviour + return out.replace_feature(new_features) + else: + out.features = new_features + return out + + +class SparseBottleneck(Bottleneck, SparseModule): + """Sparse bottleneck block for PartA^2. + + Bottleneck block implemented with submanifold sparse convolution. + + Args: + inplanes (int): Inplanes of block. + planes (int): Planes of block. + stride (int or Tuple[int]): Stride of the first block. Defaults to 1. + downsample (Module, optional): Down sample module for block. + Defaults to None. + conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + convolution layer. Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + normalization layer. Defaults to None. + """ + + expansion = 4 + + def __init__(self, + inplanes: int, + planes: int, + stride: Union[int, Tuple[int]] = 1, + downsample: nn.Module = None, + conv_cfg: OptConfigType = None, + norm_cfg: OptConfigType = None) -> None: + + SparseModule.__init__(self) + Bottleneck.__init__( + self, + inplanes, + planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + def forward(self, x: SparseConvTensor) -> SparseConvTensor: + identity = x.features + + out = self.conv1(x) + out = replace_feature(out, self.bn1(out.features)) + out = replace_feature(out, self.relu(out.features)) + + out = self.conv2(out) + out = replace_feature(out, self.bn2(out.features)) + out = replace_feature(out, self.relu(out.features)) + + out = self.conv3(out) + out = replace_feature(out, self.bn3(out.features)) + + if self.downsample is not None: + identity = self.downsample(x) + + out = replace_feature(out, out.features + identity) + out = replace_feature(out, self.relu(out.features)) + + return out + + +class SparseBasicBlock(BasicBlock, SparseModule): + """Sparse basic block for PartA^2. + + Sparse basic block implemented with submanifold sparse convolution. + + Args: + inplanes (int): Inplanes of block. + planes (int): Planes of block. + stride (int or Tuple[int]): Stride of the first block. Defaults to 1. + downsample (Module, optional): Down sample module for block. + Defaults to None. + conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + convolution layer. Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + normalization layer. Defaults to None. + """ + + expansion = 1 + + def __init__(self, + inplanes: int, + planes: int, + stride: Union[int, Tuple[int]] = 1, + downsample: nn.Module = None, + conv_cfg: OptConfigType = None, + norm_cfg: OptConfigType = None) -> None: + SparseModule.__init__(self) + BasicBlock.__init__( + self, + inplanes, + planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + def forward(self, x: SparseConvTensor) -> SparseConvTensor: + identity = x.features + + assert x.features.dim() == 2, f'x.features.dim()={x.features.dim()}' + out = self.conv1(x) + out = replace_feature(out, self.norm1(out.features)) + out = replace_feature(out, self.relu(out.features)) + + out = self.conv2(out) + out = replace_feature(out, self.norm2(out.features)) + + if self.downsample is not None: + identity = self.downsample(x) + + out = replace_feature(out, out.features + identity) + out = replace_feature(out, self.relu(out.features)) + + return out + + +def make_sparse_convmodule( + in_channels: int, + out_channels: int, + kernel_size: Union[int, Tuple[int]], + indice_key: str, + stride: Union[int, Tuple[int]] = 1, + padding: Union[int, Tuple[int]] = 0, + conv_type: str = 'SubMConv3d', + norm_cfg: OptConfigType = None, + order: Tuple[str] = ('conv', 'norm', 'act') +) -> SparseSequential: + """Make sparse convolution module. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of out channels. + kernel_size (int | Tuple[int]): Kernel size of convolution. + indice_key (str): The indice key used for sparse tensor. + stride (int or tuple[int]): The stride of convolution. + padding (int or tuple[int]): The padding number of input. + conv_type (str): Sparse conv type in spconv. Defaults to 'SubMConv3d'. + norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + normalization layer. Defaults to None. + order (Tuple[str]): The order of conv/norm/activation layers. It is a + sequence of "conv", "norm" and "act". Common examples are + ("conv", "norm", "act") and ("act", "conv", "norm"). + Defaults to ('conv', 'norm', 'act'). + + Returns: + spconv.SparseSequential: sparse convolution module. + """ + assert isinstance(order, tuple) and len(order) <= 3 + assert set(order) | {'conv', 'norm', 'act'} == {'conv', 'norm', 'act'} + + conv_cfg = dict(type=conv_type, indice_key=indice_key) + + layers = list() + for layer in order: + if layer == 'conv': + if conv_type not in [ + 'SparseInverseConv3d', 'SparseInverseConv2d', + 'SparseInverseConv1d' + ]: + layers.append( + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + bias=False)) + else: + layers.append( + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size, + bias=False)) + elif layer == 'norm': + layers.append(build_norm_layer(norm_cfg, out_channels)[1]) + elif layer == 'act': + layers.append(nn.ReLU(inplace=True)) + + layers = SparseSequential(*layers) + return layers diff --git a/mmdet3d/models/layers/spconv/__init__.py b/mmdet3d/models/layers/spconv/__init__.py new file mode 100755 index 0000000..37b533e --- /dev/null +++ b/mmdet3d/models/layers/spconv/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .overwrite_spconv import register_spconv2 + +try: + import spconv +except ImportError: + IS_SPCONV2_AVAILABLE = False +else: + if hasattr(spconv, '__version__') and spconv.__version__ >= '2.0.0': + IS_SPCONV2_AVAILABLE = register_spconv2() + else: + IS_SPCONV2_AVAILABLE = False + +__all__ = ['IS_SPCONV2_AVAILABLE'] diff --git a/mmdet3d/models/layers/spconv/overwrite_spconv/__init__.py b/mmdet3d/models/layers/spconv/overwrite_spconv/__init__.py new file mode 100755 index 0000000..2e93d9c --- /dev/null +++ b/mmdet3d/models/layers/spconv/overwrite_spconv/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .write_spconv2 import register_spconv2 + +__all__ = ['register_spconv2'] diff --git a/mmdet3d/models/layers/spconv/overwrite_spconv/write_spconv2.py b/mmdet3d/models/layers/spconv/overwrite_spconv/write_spconv2.py new file mode 100755 index 0000000..fa2ae51 --- /dev/null +++ b/mmdet3d/models/layers/spconv/overwrite_spconv/write_spconv2.py @@ -0,0 +1,104 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +from typing import List, OrderedDict + +from mmengine.registry import MODELS +from torch.nn.parameter import Parameter + + +def register_spconv2() -> bool: + """This func registers spconv2.0 spconv ops to overwrite the default mmcv + spconv ops.""" + try: + from spconv.pytorch import (SparseConv2d, SparseConv3d, SparseConv4d, + SparseConvTranspose2d, + SparseConvTranspose3d, SparseInverseConv2d, + SparseInverseConv3d, SparseModule, + SubMConv2d, SubMConv3d, SubMConv4d) + except ImportError: + return False + else: + MODELS._register_module(SparseConv2d, 'SparseConv2d', force=True) + MODELS._register_module(SparseConv3d, 'SparseConv3d', force=True) + MODELS._register_module(SparseConv4d, 'SparseConv4d', force=True) + + MODELS._register_module( + SparseConvTranspose2d, 'SparseConvTranspose2d', force=True) + MODELS._register_module( + SparseConvTranspose3d, 'SparseConvTranspose3d', force=True) + + MODELS._register_module( + SparseInverseConv2d, 'SparseInverseConv2d', force=True) + MODELS._register_module( + SparseInverseConv3d, 'SparseInverseConv3d', force=True) + + MODELS._register_module(SubMConv2d, 'SubMConv2d', force=True) + MODELS._register_module(SubMConv3d, 'SubMConv3d', force=True) + MODELS._register_module(SubMConv4d, 'SubMConv4d', force=True) + SparseModule._version = 2 + SparseModule._load_from_state_dict = _load_from_state_dict + return True + + +def _load_from_state_dict(self, state_dict: OrderedDict, prefix: str, + local_metadata: dict, strict: bool, + missing_keys: List[str], unexpected_keys: List[str], + error_msgs: List[str]) -> None: + """Rewrite this func to compat the convolutional kernel weights between + spconv 1.x in MMCV and 2.x in spconv2.x. + + Kernel weights in MMCV spconv has shape in (D,H,W,in_channel,out_channel) , + while those in spcon2.x is in (out_channel,D,H,W,in_channel). + """ + version = local_metadata.get('version', None) + for hook in self._load_state_dict_pre_hooks.values(): + hook(state_dict, prefix, local_metadata, strict, missing_keys, + unexpected_keys, error_msgs) + + local_name_params = itertools.chain(self._parameters.items(), + self._buffers.items()) + local_state = {k: v.data for k, v in local_name_params if v is not None} + + for name, param in local_state.items(): + key = prefix + name + if key in state_dict: + input_param = state_dict[key] + + # Backward compatibility: loading 1-dim tensor from + # 0.3.* to version 0.4+ + if len(param.shape) == 0 and len(input_param.shape) == 1: + input_param = input_param[0] + if version != 2: + dims = [len(input_param.shape) - 1] + list( + range(len(input_param.shape) - 1)) + input_param = input_param.permute(*dims) + if input_param.shape != param.shape: + # local shape should match the one in checkpoint + error_msgs.append( + f'size mismatch for {key}: copying a param with ' + f'shape {key, input_param.shape} from checkpoint,' + f'the shape in current model is {param.shape}.') + continue + + if isinstance(input_param, Parameter): + # backwards compatibility for serialized parameters + input_param = input_param.data + try: + param.copy_(input_param) + except Exception: + error_msgs.append( + f'While copying the parameter named "{key}", whose ' + f'dimensions in the model are {param.size()} and whose ' + f'dimensions in the checkpoint are {input_param.size()}.') + elif strict: + missing_keys.append(key) + + if strict: + for key, input_param in state_dict.items(): + if key.startswith(prefix): + input_name = key[len(prefix):] + input_name = input_name.split( + '.', 1)[0] # get the name of param/buffer/child + if input_name not in self._modules \ + and input_name not in local_state: + unexpected_keys.append(key) diff --git a/mmdet3d/models/layers/torchsparse/__init__.py b/mmdet3d/models/layers/torchsparse/__init__.py new file mode 100755 index 0000000..1232c73 --- /dev/null +++ b/mmdet3d/models/layers/torchsparse/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .torchsparse_wrapper import register_torchsparse + +try: + import torchsparse # noqa +except ImportError: + IS_TORCHSPARSE_AVAILABLE = False +else: + IS_TORCHSPARSE_AVAILABLE = register_torchsparse() + +__all__ = ['IS_TORCHSPARSE_AVAILABLE'] diff --git a/mmdet3d/models/layers/torchsparse/torchsparse_wrapper.py b/mmdet3d/models/layers/torchsparse/torchsparse_wrapper.py new file mode 100755 index 0000000..c79c1b7 --- /dev/null +++ b/mmdet3d/models/layers/torchsparse/torchsparse_wrapper.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.registry import MODELS + + +def register_torchsparse() -> bool: + """This func registers torchsparse modules.""" + try: + from torchsparse.nn import (BatchNorm, Conv3d, GroupNorm, LeakyReLU, + ReLU) + except ImportError: + return False + else: + MODELS._register_module(Conv3d, 'TorchSparseConv3d') + MODELS._register_module(BatchNorm, 'TorchSparseBatchNorm') + MODELS._register_module(GroupNorm, 'TorchSparseGroupNorm') + MODELS._register_module(ReLU, 'TorchSparseReLU') + MODELS._register_module(LeakyReLU, 'TorchSparseLeakyReLU') + return True diff --git a/mmdet3d/models/layers/torchsparse_block.py b/mmdet3d/models/layers/torchsparse_block.py new file mode 100755 index 0000000..251de07 --- /dev/null +++ b/mmdet3d/models/layers/torchsparse_block.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence, Union + +from mmengine.model import BaseModule +from torch import nn + +from mmdet3d.utils import OptConfigType +from .torchsparse import IS_TORCHSPARSE_AVAILABLE + +if IS_TORCHSPARSE_AVAILABLE: + import torchsparse.nn as spnn + from torchsparse.tensor import SparseTensor +else: + SparseTensor = None + + +class TorchSparseConvModule(BaseModule): + """A torchsparse conv block that bundles conv/norm/activation layers. + + Args: + in_channels (int): In channels of block. + out_channels (int): Out channels of block. + kernel_size (int or Tuple[int]): Kernel_size of block. + stride (int or Tuple[int]): Stride of the first block. Defaults to 1. + dilation (int): Dilation of block. Defaults to 1. + transposed (bool): Whether use transposed convolution operator. + Defaults to False. + init_cfg (:obj:`ConfigDict` or dict, optional): Initialization config. + Defaults to None. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: Union[int, Sequence[int]], + stride: Union[int, Sequence[int]] = 1, + dilation: int = 1, + bias: bool = False, + transposed: bool = False, + init_cfg: OptConfigType = None, + ) -> None: + super().__init__(init_cfg) + self.net = nn.Sequential( + spnn.Conv3d(in_channels, out_channels, kernel_size, stride, + dilation, bias, transposed), + spnn.BatchNorm(out_channels), + spnn.ReLU(inplace=True), + ) + + def forward(self, x: SparseTensor) -> SparseTensor: + out = self.net(x) + return out + + +class TorchSparseResidualBlock(BaseModule): + """Torchsparse residual basic block for MinkUNet. + + Args: + in_channels (int): In channels of block. + out_channels (int): Out channels of block. + kernel_size (int or Tuple[int]): Kernel_size of block. + stride (int or Tuple[int]): Stride of the first block. Defaults to 1. + dilation (int): Dilation of block. Defaults to 1. + init_cfg (:obj:`ConfigDict` or dict, optional): Initialization config. + Defaults to None. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: Union[int, Sequence[int]], + stride: Union[int, Sequence[int]] = 1, + dilation: int = 1, + bias: bool = False, + init_cfg: OptConfigType = None, + ) -> None: + super().__init__(init_cfg) + self.net = nn.Sequential( + spnn.Conv3d(in_channels, out_channels, kernel_size, stride, + dilation, bias), + spnn.BatchNorm(out_channels), + spnn.ReLU(inplace=True), + spnn.Conv3d( + out_channels, + out_channels, + kernel_size, + stride=1, + dilation=dilation, + bias=bias), + spnn.BatchNorm(out_channels), + ) + if in_channels == out_channels and stride == 1: + self.downsample = nn.Identity() + else: + self.downsample = nn.Sequential( + spnn.Conv3d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + dilation=dilation, + bias=bias), + spnn.BatchNorm(out_channels), + ) + + self.relu = spnn.ReLU(inplace=True) + + def forward(self, x: SparseTensor) -> SparseTensor: + out = self.relu(self.net(x) + self.downsample(x)) + return out diff --git a/mmdet3d/models/layers/transformer.py b/mmdet3d/models/layers/transformer.py new file mode 100755 index 0000000..d2c9663 --- /dev/null +++ b/mmdet3d/models/layers/transformer.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +from mmcv.cnn.bricks.transformer import MultiheadAttention +from mmengine.registry import MODELS +from torch import Tensor +from torch import nn as nn + +from mmdet3d.utils import ConfigType, OptMultiConfig + + +@MODELS.register_module() +class GroupFree3DMHA(MultiheadAttention): + """A wrapper for torch.nn.MultiheadAttention for GroupFree3D. + + This module implements MultiheadAttention with identity connection, + and positional encoding used in DETR is also passed as input. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. Same as + `nn.MultiheadAttention`. + attn_drop (float): A Dropout layer on attn_output_weights. + Defaults to 0.0. + proj_drop (float): A Dropout layer. Defaults to 0.0. + dropout_layer (ConfigType): The dropout_layer used when adding + the shortcut. Defaults to dict(type='DropOut', drop_prob=0.). + init_cfg (:obj:`ConfigDict` or dict or List[:obj:`Contigdict` or dict], + optional): Initialization config dict. Defaults to None. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) or (n, batch, embed_dim). + Defaults to False. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + attn_drop: float = 0., + proj_drop: float = 0., + dropout_layer: ConfigType = dict( + type='DropOut', drop_prob=0.), + init_cfg: OptMultiConfig = None, + batch_first: bool = False, + **kwargs) -> None: + super(GroupFree3DMHA, + self).__init__(embed_dims, num_heads, attn_drop, proj_drop, + dropout_layer, init_cfg, batch_first, **kwargs) + + def forward(self, + query: Tensor, + key: Tensor, + value: Tensor, + identity: Tensor, + query_pos: Optional[Tensor] = None, + key_pos: Optional[Tensor] = None, + attn_mask: Optional[Tensor] = None, + key_padding_mask: Optional[Tensor] = None, + **kwargs) -> Tensor: + """Forward function for `GroupFree3DMHA`. + + **kwargs allow passing a more general data flow when combining + with other operations in `transformerlayer`. + + Args: + query (Tensor): The input query with shape [num_queries, bs, + embed_dims]. Same in `nn.MultiheadAttention.forward`. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims]. Same in `nn.MultiheadAttention.forward`. + If None, the ``query`` will be used. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. + If None, the `key` will be used. + identity (Tensor): This tensor, with the same shape as x, + will be used for the identity link. If None, `x` will be used. + query_pos (Tensor, optional): The positional encoding for query, + with the same shape as `x`. Defaults to None. + If not None, it will be added to `x` before forward function. + key_pos (Tensor, optional): The positional encoding for `key`, + with the same shape as `key`. Defaults to None. If not None, + it will be added to `key` before forward function. If None, + and `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor, optional): ByteTensor mask with shape + [num_queries, num_keys]. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + key_padding_mask (Tensor, optional): ByteTensor with shape + [bs, num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + + Returns: + Tensor: Forwarded results with shape [num_queries, bs, embed_dims]. + """ + + if hasattr(self, 'operation_name'): + if self.operation_name == 'self_attn': + value = value + query_pos + elif self.operation_name == 'cross_attn': + value = value + key_pos + else: + raise NotImplementedError( + f'{self.__class__.name} ' + f"can't be used as {self.operation_name}") + else: + value = value + query_pos + + return super(GroupFree3DMHA, self).forward( + query=query, + key=key, + value=value, + identity=identity, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask, + **kwargs) + + +@MODELS.register_module() +class ConvBNPositionalEncoding(nn.Module): + """Absolute position embedding with Conv learning. + + Args: + input_channel (int): Input features dim. + num_pos_feats (int): Output position features dim. + Defaults to 288 to be consistent with seed features dim. + """ + + def __init__(self, input_channel: int, num_pos_feats: int = 288) -> None: + super(ConvBNPositionalEncoding, self).__init__() + self.position_embedding_head = nn.Sequential( + nn.Conv1d(input_channel, num_pos_feats, kernel_size=1), + nn.BatchNorm1d(num_pos_feats), nn.ReLU(inplace=True), + nn.Conv1d(num_pos_feats, num_pos_feats, kernel_size=1)) + + def forward(self, xyz: Tensor) -> Tensor: + """Forward pass. + + Args: + xyz (Tensor): (B, N, 3) The coordinates to embed. + + Returns: + Tensor: (B, num_pos_feats, N) The embedded position features. + """ + xyz = xyz.permute(0, 2, 1) + position_embedding = self.position_embedding_head(xyz) + return position_embedding diff --git a/mmdet3d/models/layers/vote_module.py b/mmdet3d/models/layers/vote_module.py new file mode 100755 index 0000000..8759aec --- /dev/null +++ b/mmdet3d/models/layers/vote_module.py @@ -0,0 +1,190 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch +from mmcv.cnn import ConvModule +from mmengine import is_tuple_of +from torch import Tensor +from torch import nn as nn + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType + + +class VoteModule(nn.Module): + """Vote module. + + Generate votes from seed point features. + + Args: + in_channels (int): Number of channels of seed point features. + vote_per_seed (int): Number of votes generated from each seed point. + Defaults to 1. + gt_per_seed (int): Number of ground truth votes generated from each + seed point. Defaults to 3. + num_points (int): Number of points to be used for voting. + Defaults to 1. + conv_channels (tuple[int]): Out channels of vote generating + convolution. Defaults to (16, 16). + conv_cfg (:obj:`ConfigDict` or dict): Config dict for convolution + layer. Defaults to dict(type='Conv1d'). + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN1d'). + norm_feats (bool): Whether to normalize features. Default to True. + with_res_feat (bool): Whether to predict residual features. + Defaults to True. + vote_xyz_range (List[float], optional): The range of points + translation. Defaults to None. + vote_loss (:obj:`ConfigDict` or dict, optional): Config of vote loss. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + vote_per_seed: int = 1, + gt_per_seed: int = 3, + num_points: int = -1, + conv_channels: Tuple[int] = (16, 16), + conv_cfg: ConfigType = dict(type='Conv1d'), + norm_cfg: ConfigType = dict(type='BN1d'), + act_cfg: ConfigType = dict(type='ReLU'), + norm_feats: bool = True, + with_res_feat: bool = True, + vote_xyz_range: List[float] = None, + vote_loss: OptConfigType = None) -> None: + super(VoteModule, self).__init__() + self.in_channels = in_channels + self.vote_per_seed = vote_per_seed + self.gt_per_seed = gt_per_seed + self.num_points = num_points + self.norm_feats = norm_feats + self.with_res_feat = with_res_feat + + assert vote_xyz_range is None or is_tuple_of(vote_xyz_range, float) + self.vote_xyz_range = vote_xyz_range + + if vote_loss is not None: + self.vote_loss = MODELS.build(vote_loss) + + prev_channels = in_channels + vote_conv_list = list() + for k in range(len(conv_channels)): + vote_conv_list.append( + ConvModule( + prev_channels, + conv_channels[k], + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + bias=True, + inplace=True)) + prev_channels = conv_channels[k] + self.vote_conv = nn.Sequential(*vote_conv_list) + + # conv_out predicts coordinate and residual features + if with_res_feat: + out_channel = (3 + in_channels) * self.vote_per_seed + else: + out_channel = 3 * self.vote_per_seed + self.conv_out = nn.Conv1d(prev_channels, out_channel, 1) + + def forward(self, seed_points: Tensor, + seed_feats: Tensor) -> Tuple[Tensor]: + """Forward. + + Args: + seed_points (Tensor): Coordinate of the seed points in shape + (B, N, 3). + seed_feats (Tensor): Features of the seed points in shape + (B, C, N). + + Returns: + Tuple[torch.Tensor]: + + - vote_points: Voted xyz based on the seed points + with shape (B, M, 3), ``M=num_seed*vote_per_seed``. + - vote_features: Voted features based on the seed points with + shape (B, C, M) where ``M=num_seed*vote_per_seed``, + ``C=vote_feature_dim``. + """ + if self.num_points != -1: + assert self.num_points < seed_points.shape[1], \ + f'Number of vote points ({self.num_points}) should be '\ + f'smaller than seed points size ({seed_points.shape[1]})' + seed_points = seed_points[:, :self.num_points] + seed_feats = seed_feats[..., :self.num_points] + + batch_size, feat_channels, num_seed = seed_feats.shape + num_vote = num_seed * self.vote_per_seed + x = self.vote_conv(seed_feats) + # (batch_size, (3+out_dim)*vote_per_seed, num_seed) + votes = self.conv_out(x) + + votes = votes.transpose(2, 1).view(batch_size, num_seed, + self.vote_per_seed, -1) + + offset = votes[:, :, :, 0:3] + if self.vote_xyz_range is not None: + limited_offset_list = [] + for axis in range(len(self.vote_xyz_range)): + limited_offset_list.append(offset[..., axis].clamp( + min=-self.vote_xyz_range[axis], + max=self.vote_xyz_range[axis])) + limited_offset = torch.stack(limited_offset_list, -1) + vote_points = (seed_points.unsqueeze(2) + + limited_offset).contiguous() + else: + vote_points = (seed_points.unsqueeze(2) + offset).contiguous() + vote_points = vote_points.view(batch_size, num_vote, 3) + offset = offset.reshape(batch_size, num_vote, 3).transpose(2, 1) + + if self.with_res_feat: + res_feats = votes[:, :, :, 3:] + vote_feats = (seed_feats.transpose(2, 1).unsqueeze(2) + + res_feats).contiguous() + vote_feats = vote_feats.view(batch_size, + num_vote, feat_channels).transpose( + 2, 1).contiguous() + + if self.norm_feats: + features_norm = torch.norm(vote_feats, p=2, dim=1) + vote_feats = vote_feats.div(features_norm.unsqueeze(1)) + else: + vote_feats = seed_feats + return vote_points, vote_feats, offset + + def get_loss(self, seed_points: Tensor, vote_points: Tensor, + seed_indices: Tensor, vote_targets_mask: Tensor, + vote_targets: Tensor) -> Tensor: + """Calculate loss of voting module. + + Args: + seed_points (Tensor): Coordinate of the seed points. + vote_points (Tensor): Coordinate of the vote points. + seed_indices (Tensor): Indices of seed points in raw points. + vote_targets_mask (Tensor): Mask of valid vote targets. + vote_targets (Tensor): Targets of votes. + + Returns: + Tensor: Weighted vote loss. + """ + batch_size, num_seed = seed_points.shape[:2] + + seed_gt_votes_mask = torch.gather(vote_targets_mask, 1, + seed_indices).float() + + seed_indices_expand = seed_indices.unsqueeze(-1).repeat( + 1, 1, 3 * self.gt_per_seed) + seed_gt_votes = torch.gather(vote_targets, 1, seed_indices_expand) + seed_gt_votes += seed_points.repeat(1, 1, self.gt_per_seed) + + weight = seed_gt_votes_mask / (torch.sum(seed_gt_votes_mask) + 1e-6) + distance = self.vote_loss( + vote_points.view(batch_size * num_seed, -1, 3), + seed_gt_votes.view(batch_size * num_seed, -1, 3), + dst_weight=weight.view(batch_size * num_seed, 1))[1] + vote_loss = torch.sum(torch.min(distance, dim=1)[0]) + + return vote_loss diff --git a/mmdet3d/models/losses/__init__.py b/mmdet3d/models/losses/__init__.py new file mode 100755 index 0000000..6956c72 --- /dev/null +++ b/mmdet3d/models/losses/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.losses import FocalLoss, SmoothL1Loss, binary_cross_entropy + +from .axis_aligned_iou_loss import AxisAlignedIoULoss, axis_aligned_iou_loss +from .chamfer_distance import ChamferDistance, chamfer_distance +from .lovasz_loss import LovaszLoss +from .multibin_loss import MultiBinLoss +from .paconv_regularization_loss import PAConvRegularizationLoss +from .rotated_iou_loss import RotatedIoU3DLoss, rotated_iou_3d_loss +from .uncertain_smooth_l1_loss import UncertainL1Loss, UncertainSmoothL1Loss + +__all__ = [ + 'FocalLoss', 'SmoothL1Loss', 'binary_cross_entropy', 'ChamferDistance', + 'chamfer_distance', 'axis_aligned_iou_loss', 'AxisAlignedIoULoss', + 'PAConvRegularizationLoss', 'UncertainL1Loss', 'UncertainSmoothL1Loss', + 'MultiBinLoss', 'RotatedIoU3DLoss', 'rotated_iou_3d_loss', 'LovaszLoss' +] diff --git a/mmdet3d/models/losses/axis_aligned_iou_loss.py b/mmdet3d/models/losses/axis_aligned_iou_loss.py new file mode 100755 index 0000000..45e25c9 --- /dev/null +++ b/mmdet3d/models/losses/axis_aligned_iou_loss.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from mmdet.models.losses.utils import weighted_loss +from torch import Tensor +from torch import nn as nn + +from mmdet3d.registry import MODELS +from mmdet3d.structures import AxisAlignedBboxOverlaps3D + + +@weighted_loss +def axis_aligned_iou_loss(pred: Tensor, target: Tensor) -> Tensor: + """Calculate the IoU loss (1-IoU) of two set of axis aligned bounding + boxes. Note that predictions and targets are one-to-one corresponded. + + Args: + pred (Tensor): Bbox predictions with shape [..., 3]. + target (Tensor): Bbox targets (gt) with shape [..., 3]. + + Returns: + Tensor: IoU loss between predictions and targets. + """ + + axis_aligned_iou = AxisAlignedBboxOverlaps3D()( + pred, target, is_aligned=True) + iou_loss = 1 - axis_aligned_iou + return iou_loss + + +@MODELS.register_module() +class AxisAlignedIoULoss(nn.Module): + """Calculate the IoU loss (1-IoU) of axis aligned bounding boxes. + + Args: + reduction (str): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to 'mean'. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + reduction: str = 'mean', + loss_weight: float = 1.0) -> None: + super(AxisAlignedIoULoss, self).__init__() + assert reduction in ['none', 'sum', 'mean'] + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + avg_factor: Optional[float] = None, + reduction_override: Optional[str] = None, + **kwargs) -> Tensor: + """Forward function of loss calculation. + + Args: + pred (Tensor): Bbox predictions with shape [..., 3]. + target (Tensor): Bbox targets (gt) with shape [..., 3]. + weight (Tensor, optional): Weight of loss. + Defaults to None. + avg_factor (float, optional): Average factor that is used to + average the loss. Defaults to None. + reduction_override (str, optional): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to None. + + Returns: + Tensor: IoU loss between predictions and targets. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if (weight is not None) and (not torch.any(weight > 0)) and ( + reduction != 'none'): + return (pred * weight).sum() + return axis_aligned_iou_loss( + pred, + target, + weight=weight, + avg_factor=avg_factor, + reduction=reduction) * self.loss_weight diff --git a/mmdet3d/models/losses/chamfer_distance.py b/mmdet3d/models/losses/chamfer_distance.py new file mode 100755 index 0000000..1098213 --- /dev/null +++ b/mmdet3d/models/losses/chamfer_distance.py @@ -0,0 +1,156 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple, Union + +import torch +from torch import Tensor +from torch import nn as nn +from torch.nn.functional import l1_loss, mse_loss, smooth_l1_loss + +from mmdet3d.registry import MODELS + + +def chamfer_distance( + src: Tensor, + dst: Tensor, + src_weight: Union[Tensor, float] = 1.0, + dst_weight: Union[Tensor, float] = 1.0, + criterion_mode: str = 'l2', + reduction: str = 'mean') -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Calculate Chamfer Distance of two sets. + + Args: + src (Tensor): Source set with shape [B, N, C] to + calculate Chamfer Distance. + dst (Tensor): Destination set with shape [B, M, C] to + calculate Chamfer Distance. + src_weight (Tensor or float): Weight of source loss. Defaults to 1.0. + dst_weight (Tensor or float): Weight of destination loss. + Defaults to 1.0. + criterion_mode (str): Criterion mode to calculate distance. + The valid modes are 'smooth_l1', 'l1' or 'l2'. Defaults to 'l2'. + reduction (str): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to 'mean'. + + Returns: + tuple: Source and Destination loss with the corresponding indices. + + - loss_src (Tensor): The min distance + from source to destination. + - loss_dst (Tensor): The min distance + from destination to source. + - indices1 (Tensor): Index the min distance point + for each point in source to destination. + - indices2 (Tensor): Index the min distance point + for each point in destination to source. + """ + + if criterion_mode == 'smooth_l1': + criterion = smooth_l1_loss + elif criterion_mode == 'l1': + criterion = l1_loss + elif criterion_mode == 'l2': + criterion = mse_loss + else: + raise NotImplementedError + + src_expand = src.unsqueeze(2).repeat(1, 1, dst.shape[1], 1) + dst_expand = dst.unsqueeze(1).repeat(1, src.shape[1], 1, 1) + + distance = criterion(src_expand, dst_expand, reduction='none').sum(-1) + src2dst_distance, indices1 = torch.min(distance, dim=2) # (B,N) + dst2src_distance, indices2 = torch.min(distance, dim=1) # (B,M) + + loss_src = (src2dst_distance * src_weight) + loss_dst = (dst2src_distance * dst_weight) + + if reduction == 'sum': + loss_src = torch.sum(loss_src) + loss_dst = torch.sum(loss_dst) + elif reduction == 'mean': + loss_src = torch.mean(loss_src) + loss_dst = torch.mean(loss_dst) + elif reduction == 'none': + pass + else: + raise NotImplementedError + + return loss_src, loss_dst, indices1, indices2 + + +@MODELS.register_module() +class ChamferDistance(nn.Module): + """Calculate Chamfer Distance of two sets. + + Args: + mode (str): Criterion mode to calculate distance. + The valid modes are 'smooth_l1', 'l1' or 'l2'. Defaults to 'l2'. + reduction (str): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to 'mean'. + loss_src_weight (float): Weight of loss_source. Defaults to l.0. + loss_dst_weight (float): Weight of loss_target. Defaults to 1.0. + """ + + def __init__(self, + mode: str = 'l2', + reduction: str = 'mean', + loss_src_weight: float = 1.0, + loss_dst_weight: float = 1.0) -> None: + super(ChamferDistance, self).__init__() + + assert mode in ['smooth_l1', 'l1', 'l2'] + assert reduction in ['none', 'sum', 'mean'] + self.mode = mode + self.reduction = reduction + self.loss_src_weight = loss_src_weight + self.loss_dst_weight = loss_dst_weight + + def forward( + self, + source: Tensor, + target: Tensor, + src_weight: Union[Tensor, float] = 1.0, + dst_weight: Union[Tensor, float] = 1.0, + reduction_override: Optional[str] = None, + return_indices: bool = False, + **kwargs + ) -> Union[Tuple[Tensor, Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]: + """Forward function of loss calculation. + + Args: + source (Tensor): Source set with shape [B, N, C] to + calculate Chamfer Distance. + target (Tensor): Destination set with shape [B, M, C] to + calculate Chamfer Distance. + src_weight (Tensor | float): + Weight of source loss. Defaults to 1.0. + dst_weight (Tensor | float): + Weight of destination loss. Defaults to 1.0. + reduction_override (str, optional): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to None. + return_indices (bool): Whether to return indices. + Defaults to False. + + Returns: + tuple[Tensor]: If ``return_indices=True``, return losses of + source and target with their corresponding indices in the + order of ``(loss_source, loss_target, indices1, indices2)``. + If ``return_indices=False``, return + ``(loss_source, loss_target)``. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + loss_source, loss_target, indices1, indices2 = chamfer_distance( + source, target, src_weight, dst_weight, self.mode, reduction) + + loss_source *= self.loss_src_weight + loss_target *= self.loss_dst_weight + + if return_indices: + return loss_source, loss_target, indices1, indices2 + else: + return loss_source, loss_target diff --git a/mmdet3d/models/losses/lovasz_loss.py b/mmdet3d/models/losses/lovasz_loss.py new file mode 100755 index 0000000..a9bcc27 --- /dev/null +++ b/mmdet3d/models/losses/lovasz_loss.py @@ -0,0 +1,356 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Directly borrowed from mmsegmentation. + +Modified from https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytor +ch/lovasz_losses.py Lovasz-Softmax and Jaccard hinge loss in PyTorch Maxim +Berman 2018 ESAT-PSI KU Leuven (MIT License) +""" + +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmdet.models import weight_reduce_loss +from mmengine.utils import is_list_of + +from mmdet3d.registry import MODELS + + +def lovasz_grad(gt_sorted: torch.Tensor) -> torch.Tensor: + """Computes gradient of the Lovasz extension w.r.t sorted errors. + + See Alg. 1 in paper. + `The Lovasz-Softmax loss. `_. + + Args: + gt_sorted (torch.Tensor): Sorted ground truth. + + Return: + torch.Tensor: Gradient of the Lovasz extension. + """ + p = len(gt_sorted) + gts = gt_sorted.sum() + intersection = gts - gt_sorted.float().cumsum(0) + union = gts + (1 - gt_sorted).float().cumsum(0) + jaccard = 1. - intersection / union + if p > 1: # cover 1-pixel case + jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] + return jaccard + + +def flatten_binary_logits( + logits: torch.Tensor, + labels: torch.Tensor, + ignore_index: Optional[int] = None +) -> Tuple[torch.Tensor, torch.Tensor]: + """Flatten predictions and labels in the batch (binary case). Remove + tensors whose labels equal to 'ignore_index'. + + Args: + probs (torch.Tensor): Predictions to be modified. + labels (torch.Tensor): Labels to be modified. + ignore_index (int, optional): The label index to be ignored. + Defaults to None. + + Return: + tuple(torch.Tensor, torch.Tensor): Modified predictions and labels. + """ + logits = logits.view(-1) + labels = labels.view(-1) + if ignore_index is None: + return logits, labels + valid = (labels != ignore_index) + vlogits = logits[valid] + vlabels = labels[valid] + return vlogits, vlabels + + +def flatten_probs( + probs: torch.Tensor, + labels: torch.Tensor, + ignore_index: Optional[int] = None +) -> Tuple[torch.Tensor, torch.Tensor]: + """Flatten predictions and labels in the batch. Remove tensors whose labels + equal to 'ignore_index'. + + Args: + probs (torch.Tensor): Predictions to be modified. + labels (torch.Tensor): Labels to be modified. + ignore_index (int, optional): The label index to be ignored. + Defaults to None. + + Return: + tuple(torch.Tensor, torch.Tensor): Modified predictions and labels. + """ + if probs.dim() != 2: # for input with P*C + if probs.dim() == 3: + # assumes output of a sigmoid layer + B, H, W = probs.size() + probs = probs.view(B, 1, H, W) + B, C, H, W = probs.size() + probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, + C) # B*H*W, C=P,C + labels = labels.view(-1) + if ignore_index is None: + return probs, labels + valid = (labels != ignore_index) + vprobs = probs[valid.nonzero().squeeze()] + vlabels = labels[valid] + return vprobs, vlabels + + +def lovasz_hinge_flat(logits: torch.Tensor, + labels: torch.Tensor) -> torch.Tensor: + """Binary Lovasz hinge loss. + + Args: + logits (torch.Tensor): Logits at each prediction + (between -infty and +infty) with shape [P]. + labels (torch.Tensor): Binary ground truth labels (0 or 1) + with shape [P]. + + Returns: + torch.Tensor: The calculated loss. + """ + if len(labels) == 0: + # only void pixels, the gradients should be 0 + return logits.sum() * 0. + signs = 2. * labels.float() - 1. + errors = (1. - logits * signs) + errors_sorted, perm = torch.sort(errors, dim=0, descending=True) + perm = perm.data + gt_sorted = labels[perm] + grad = lovasz_grad(gt_sorted) + loss = torch.dot(F.relu(errors_sorted), grad) + return loss + + +def lovasz_hinge(logits: torch.Tensor, + labels: torch.Tensor, + classes: Optional[Union[str, List[int]]] = None, + per_sample: bool = False, + class_weight: Optional[List[float]] = None, + reduction: str = 'mean', + avg_factor: Optional[int] = None, + ignore_index: int = 255) -> torch.Tensor: + """Binary Lovasz hinge loss. + + Args: + logits (torch.Tensor): Logits at each pixel + (between -infty and +infty) with shape [B, H, W]. + labels (torch.Tensor): Binary ground truth masks (0 or 1) + with shape [B, H, W]. + classes (Union[str, list[int]], optional): Placeholder, to be + consistent with other loss. Defaults to None. + per_sample (bool): If per_sample is True, compute the loss per + sample instead of per batch. Defaults to False. + class_weight (list[float], optional): Placeholder, to be consistent + with other loss. Defaults to None. + reduction (str): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_sample is True. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. This parameter only works when per_sample is True. + Defaults to None. + ignore_index (Union[int, None]): The label index to be ignored. + Defaults to 255. + + Returns: + torch.Tensor: The calculated loss. + """ + if per_sample: + loss = [ + lovasz_hinge_flat(*flatten_binary_logits( + logit.unsqueeze(0), label.unsqueeze(0), ignore_index)) + for logit, label in zip(logits, labels) + ] + loss = weight_reduce_loss( + torch.stack(loss), None, reduction, avg_factor) + else: + loss = lovasz_hinge_flat( + *flatten_binary_logits(logits, labels, ignore_index)) + return loss + + +def lovasz_softmax_flat( + probs: torch.Tensor, + labels: torch.Tensor, + classes: Union[str, List[int]] = 'present', + class_weight: Optional[List[float]] = None) -> torch.Tensor: + """Multi-class Lovasz-Softmax loss. + + Args: + probs (torch.Tensor): Class probabilities at each prediction + (between 0 and 1) with shape [P, C] + labels (torch.Tensor): Ground truth labels (between 0 and C - 1) + with shape [P]. + classes (Union[str, list[int]]): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Defaults to 'present'. + class_weight (list[float], optional): The weight for each class. + Defaults to None. + + Returns: + torch.Tensor: The calculated loss. + """ + if probs.numel() == 0: + # only void pixels, the gradients should be 0 + return probs * 0. + C = probs.size(1) + losses = [] + class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes + for c in class_to_sum: + fg = (labels == c).float() # foreground for class c + if (classes == 'present' and fg.sum() == 0): + continue + if C == 1: + if len(classes) > 1: + raise ValueError('Sigmoid output possible only with 1 class') + class_pred = probs[:, 0] + else: + class_pred = probs[:, c] + errors = (fg - class_pred).abs() + errors_sorted, perm = torch.sort(errors, 0, descending=True) + perm = perm.data + fg_sorted = fg[perm] + loss = torch.dot(errors_sorted, lovasz_grad(fg_sorted)) + if class_weight is not None: + loss *= class_weight[c] + losses.append(loss) + return torch.stack(losses).mean() + + +def lovasz_softmax(probs: torch.Tensor, + labels: torch.Tensor, + classes: Union[str, List[int]] = 'present', + per_sample: bool = False, + class_weight: List[float] = None, + reduction: str = 'mean', + avg_factor: Optional[int] = None, + ignore_index: int = 255) -> torch.Tensor: + """Multi-class Lovasz-Softmax loss. + + Args: + probs (torch.Tensor): Class probabilities at each + prediction (between 0 and 1) with shape [B, C, H, W]. + labels (torch.Tensor): Ground truth labels (between 0 and + C - 1) with shape [B, H, W]. + classes (Union[str, list[int]]): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Defaults to 'present'. + per_sample (bool): If per_sample is True, compute the loss per + sample instead of per batch. Defaults to False. + class_weight (list[float], optional): The weight for each class. + Defaults to None. + reduction (str): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_sample is True. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. This parameter only works when per_sample is True. + Defaults to None. + ignore_index (Union[int, None]): The label index to be ignored. + Defaults to 255. + + Returns: + torch.Tensor: The calculated loss. + """ + + if per_sample: + loss = [ + lovasz_softmax_flat( + *flatten_probs( + prob.unsqueeze(0), label.unsqueeze(0), ignore_index), + classes=classes, + class_weight=class_weight) + for prob, label in zip(probs, labels) + ] + loss = weight_reduce_loss( + torch.stack(loss), None, reduction, avg_factor) + else: + loss = lovasz_softmax_flat( + *flatten_probs(probs, labels, ignore_index), + classes=classes, + class_weight=class_weight) + return loss + + +@MODELS.register_module() +class LovaszLoss(nn.Module): + """LovaszLoss. + + This loss is proposed in `The Lovasz-Softmax loss: A tractable surrogate + for the optimization of the intersection-over-union measure in neural + networks `_. + + Args: + loss_type (str): Binary or multi-class loss. + Defaults to 'multi_class'. Options are "binary" and "multi_class". + classes (Union[str, list[int]]): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Defaults to 'present'. + per_sample (bool): If per_sample is True, compute the loss per + sample instead of per batch. Defaults to False. + reduction (str): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_sample is True. Defaults to 'mean'. + class_weight ([list[float], optional): Weight of each class. + Defaults to None. + loss_weight (float): Weight of the loss. Defaults to 1.0. + """ + + def __init__(self, + loss_type: str = 'multi_class', + classes: Union[str, List[int]] = 'present', + per_sample: bool = False, + reduction: str = 'mean', + class_weight: Optional[List[float]] = None, + loss_weight: float = 1.0): + super().__init__() + assert loss_type in ('binary', 'multi_class'), "loss_type should be \ + 'binary' or 'multi_class'." + + if loss_type == 'binary': + self.cls_criterion = lovasz_hinge + else: + self.cls_criterion = lovasz_softmax + assert classes in ('all', 'present') or is_list_of(classes, int) + if not per_sample: + assert reduction == 'none', "reduction should be 'none' when \ + per_sample is False." + + self.classes = classes + self.per_sample = per_sample + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = class_weight + + def forward(self, + cls_score: torch.Tensor, + label: torch.Tensor, + avg_factor: int = None, + reduction_override: str = None, + **kwargs) -> torch.Tensor: + """Forward function.""" + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.class_weight is not None: + class_weight = cls_score.new_tensor(self.class_weight) + else: + class_weight = None + + # if multi-class loss, transform logits to probs + if self.cls_criterion == lovasz_softmax: + cls_score = F.softmax(cls_score, dim=1) + + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + self.classes, + self.per_sample, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_cls diff --git a/mmdet3d/models/losses/multibin_loss.py b/mmdet3d/models/losses/multibin_loss.py new file mode 100755 index 0000000..91a1271 --- /dev/null +++ b/mmdet3d/models/losses/multibin_loss.py @@ -0,0 +1,107 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from mmdet.models.losses.utils import weighted_loss +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.registry import MODELS + + +@weighted_loss +def multibin_loss(pred_orientations: Tensor, + gt_orientations: Tensor, + num_dir_bins: int = 4) -> Tensor: + """Multi-Bin Loss. + + Args: + pred_orientations(Tensor): Predicted local vector + orientation in [axis_cls, head_cls, sin, cos] format. + shape (N, num_dir_bins * 4) + gt_orientations(Tensor): Corresponding gt bboxes, + shape (N, num_dir_bins * 2). + num_dir_bins(int): Number of bins to encode + direction angle. + Defaults to 4. + + Returns: + Tensor: Loss tensor. + """ + cls_losses = 0 + reg_losses = 0 + reg_cnt = 0 + for i in range(num_dir_bins): + # bin cls loss + cls_ce_loss = F.cross_entropy( + pred_orientations[:, (i * 2):(i * 2 + 2)], + gt_orientations[:, i].long(), + reduction='mean') + # regression loss + valid_mask_i = (gt_orientations[:, i] == 1) + cls_losses += cls_ce_loss + if valid_mask_i.sum() > 0: + start = num_dir_bins * 2 + i * 2 + end = start + 2 + pred_offset = F.normalize(pred_orientations[valid_mask_i, + start:end]) + gt_offset_sin = torch.sin(gt_orientations[valid_mask_i, + num_dir_bins + i]) + gt_offset_cos = torch.cos(gt_orientations[valid_mask_i, + num_dir_bins + i]) + reg_loss = \ + F.l1_loss(pred_offset[:, 0], gt_offset_sin, + reduction='none') + \ + F.l1_loss(pred_offset[:, 1], gt_offset_cos, + reduction='none') + + reg_losses += reg_loss.sum() + reg_cnt += valid_mask_i.sum() + + return cls_losses / num_dir_bins + reg_losses / reg_cnt + + +@MODELS.register_module() +class MultiBinLoss(nn.Module): + """Multi-Bin Loss for orientation. + + Args: + reduction (str): The method to reduce the loss. + Options are 'none', 'mean' and 'sum'. Defaults to 'none'. + loss_weight (float): The weight of loss. Defaults + to 1.0. + """ + + def __init__(self, + reduction: str = 'none', + loss_weight: float = 1.0) -> None: + super(MultiBinLoss, self).__init__() + assert reduction in ['none', 'sum', 'mean'] + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred: Tensor, + target: Tensor, + num_dir_bins: int, + reduction_override: Optional[str] = None) -> Tensor: + """Forward function. + + Args: + pred (Tensor): The prediction. + target (Tensor): The learning target of the prediction. + num_dir_bins (int): Number of bins to encode direction angle. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + + Returns: + Tensor: Loss tensor. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * multibin_loss( + pred, target, num_dir_bins=num_dir_bins, reduction=reduction) + return loss diff --git a/mmdet3d/models/losses/paconv_regularization_loss.py b/mmdet3d/models/losses/paconv_regularization_loss.py new file mode 100755 index 0000000..2d88761 --- /dev/null +++ b/mmdet3d/models/losses/paconv_regularization_loss.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +import torch +from mmdet.models.losses.utils import weight_reduce_loss +from torch import Tensor +from torch import nn as nn + +from mmdet3d.registry import MODELS +from ..layers import PAConv, PAConvCUDA + + +def weight_correlation(conv: nn.Module) -> Tensor: + """Calculate correlations between kernel weights in Conv's weight bank as + regularization loss. The cosine similarity is used as metrics. + + Args: + conv (nn.Module): A Conv modules to be regularized. + Currently we only support `PAConv` and `PAConvCUDA`. + + Returns: + Tensor: Correlations between each kernel weights in weight bank. + """ + assert isinstance(conv, (PAConv, PAConvCUDA)), \ + f'unsupported module type {type(conv)}' + kernels = conv.weight_bank # [C_in, num_kernels * C_out] + in_channels = conv.in_channels + out_channels = conv.out_channels + num_kernels = conv.num_kernels + + # [num_kernels, Cin * Cout] + flatten_kernels = kernels.view(in_channels, num_kernels, out_channels).\ + permute(1, 0, 2).reshape(num_kernels, -1) + # [num_kernels, num_kernels] + inner_product = torch.matmul(flatten_kernels, flatten_kernels.T) + # [num_kernels, 1] + kernel_norms = torch.sum(flatten_kernels**2, dim=-1, keepdim=True)**0.5 + # [num_kernels, num_kernels] + kernel_norms = torch.matmul(kernel_norms, kernel_norms.T) + cosine_sims = inner_product / kernel_norms + # take upper triangular part excluding diagonal since we only compute + # correlation between different kernels once + # the square is to ensure positive loss, refer to: + # https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/tool/train.py#L208 + corr = torch.sum(torch.triu(cosine_sims, diagonal=1)**2) + + return corr + + +def paconv_regularization_loss(modules: List[nn.Module], + reduction: str) -> Tensor: + """Computes correlation loss of PAConv weight kernels as regularization. + + Args: + modules (List[nn.Module] | :obj:`generator`): + A list or a python generator of torch.nn.Modules. + reduction (str): Method to reduce losses among PAConv modules. + The valid reduction method are 'none', 'sum' or 'mean'. + + Returns: + Tensor: Correlation loss of kernel weights. + """ + corr_loss = [] + for module in modules: + if isinstance(module, (PAConv, PAConvCUDA)): + corr_loss.append(weight_correlation(module)) + corr_loss = torch.stack(corr_loss) + + # perform reduction + corr_loss = weight_reduce_loss(corr_loss, reduction=reduction) + + return corr_loss + + +@MODELS.register_module() +class PAConvRegularizationLoss(nn.Module): + """Calculate correlation loss of kernel weights in PAConv's weight bank. + + This is used as a regularization term in PAConv model training. + + Args: + reduction (str): Method to reduce losses. The reduction is performed + among all PAConv modules instead of prediction tensors. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to 'mean'. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + reduction: str = 'mean', + loss_weight: float = 1.0) -> None: + super(PAConvRegularizationLoss, self).__init__() + assert reduction in ['none', 'sum', 'mean'] + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + modules: List[nn.Module], + reduction_override: Optional[str] = None, + **kwargs) -> Tensor: + """Forward function of loss calculation. + + Args: + modules (List[nn.Module] | :obj:`generator`): + A list or a python generator of torch.nn.Modules. + reduction_override (str, optional): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to None. + + Returns: + Tensor: Correlation loss of kernel weights. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + return self.loss_weight * paconv_regularization_loss( + modules, reduction=reduction) diff --git a/mmdet3d/models/losses/rotated_iou_loss.py b/mmdet3d/models/losses/rotated_iou_loss.py new file mode 100755 index 0000000..1a737bd --- /dev/null +++ b/mmdet3d/models/losses/rotated_iou_loss.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from mmcv.ops import diff_iou_rotated_3d +from mmdet.models.losses.utils import weighted_loss +from torch import Tensor +from torch import nn as nn + +from mmdet3d.registry import MODELS + + +@weighted_loss +def rotated_iou_3d_loss(pred: Tensor, target: Tensor) -> Tensor: + """Calculate the IoU loss (1-IoU) of two sets of rotated bounding boxes. + + Note that predictions and targets are one-to-one corresponded. + + Args: + pred (Tensor): Bbox predictions with shape [N, 7] + (x, y, z, w, l, h, alpha). + target (Tensor): Bbox targets (gt) with shape [N, 7] + (x, y, z, w, l, h, alpha). + + Returns: + Tensor: IoU loss between predictions and targets. + """ + iou_loss = 1 - diff_iou_rotated_3d(pred.unsqueeze(0), + target.unsqueeze(0))[0] + return iou_loss + + +@MODELS.register_module() +class RotatedIoU3DLoss(nn.Module): + """Calculate the IoU loss (1-IoU) of rotated bounding boxes. + + Args: + reduction (str): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to 'mean'. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + reduction: str = 'mean', + loss_weight: float = 1.0) -> None: + super().__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + avg_factor: Optional[float] = None, + reduction_override: Optional[str] = None, + **kwargs) -> Tensor: + """Forward function of loss calculation. + + Args: + pred (Tensor): Bbox predictions with shape [..., 7] + (x, y, z, w, l, h, alpha). + target (Tensor): Bbox targets (gt) with shape [..., 7] + (x, y, z, w, l, h, alpha). + weight (Tensor, optional): Weight of loss. + Defaults to None. + avg_factor (float, optional): Average factor that is used to + average the loss. Defaults to None. + reduction_override (str, optional): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to None. + + Returns: + Tensor: IoU loss between predictions and targets. + """ + if weight is not None and not torch.any(weight > 0): + return pred.sum() * weight.sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if weight is not None and weight.dim() > 1: + weight = weight.mean(-1) + loss = self.loss_weight * rotated_iou_3d_loss( + pred, + target, + weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + + return loss diff --git a/mmdet3d/models/losses/uncertain_smooth_l1_loss.py b/mmdet3d/models/losses/uncertain_smooth_l1_loss.py new file mode 100755 index 0000000..4cd90f3 --- /dev/null +++ b/mmdet3d/models/losses/uncertain_smooth_l1_loss.py @@ -0,0 +1,199 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from mmdet.models.losses.utils import weighted_loss +from torch import Tensor +from torch import nn as nn + +from mmdet3d.registry import MODELS + + +@weighted_loss +def uncertain_smooth_l1_loss(pred: Tensor, + target: Tensor, + sigma: Tensor, + alpha: float = 1.0, + beta: float = 1.0) -> Tensor: + """Smooth L1 loss with uncertainty. + + Args: + pred (Tensor): The prediction. + target (Tensor): The learning target of the prediction. + sigma (Tensor): The sigma for uncertainty. + alpha (float): The coefficient of log(sigma). + Defaults to 1.0. + beta (float): The threshold in the piecewise function. + Defaults to 1.0. + + Returns: + Tensor: Calculated loss + """ + assert beta > 0 + assert target.numel() > 0 + assert pred.size() == target.size() == sigma.size(), 'The size of pred ' \ + f'{pred.size()}, target {target.size()}, and sigma {sigma.size()} ' \ + 'are inconsistent.' + diff = torch.abs(pred - target) + loss = torch.where(diff < beta, 0.5 * diff * diff / beta, + diff - 0.5 * beta) + loss = torch.exp(-sigma) * loss + alpha * sigma + + return loss + + +@weighted_loss +def uncertain_l1_loss(pred: Tensor, + target: Tensor, + sigma: Tensor, + alpha: float = 1.0) -> Tensor: + """L1 loss with uncertainty. + + Args: + pred (Tensor): The prediction. + target (Tensor): The learning target of the prediction. + sigma (Tensor): The sigma for uncertainty. + alpha (float): The coefficient of log(sigma). + Defaults to 1.0. + + Returns: + Tensor: Calculated loss + """ + assert target.numel() > 0 + assert pred.size() == target.size() == sigma.size(), 'The size of pred ' \ + f'{pred.size()}, target {target.size()}, and sigma {sigma.size()} ' \ + 'are inconsistent.' + loss = torch.abs(pred - target) + loss = torch.exp(-sigma) * loss + alpha * sigma + return loss + + +@MODELS.register_module() +class UncertainSmoothL1Loss(nn.Module): + r"""Smooth L1 loss with uncertainty. + + Please refer to `PGD `_ and + `Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry + and Semantics `_ for more details. + + Args: + alpha (float): The coefficient of log(sigma). + Defaults to 1.0. + beta (float): The threshold in the piecewise function. + Defaults to 1.0. + reduction (str): The method to reduce the loss. + Options are 'none', 'mean' and 'sum'. Defaults to 'mean'. + loss_weight (float): The weight of loss. Defaults to 1.0 + """ + + def __init__(self, + alpha: float = 1.0, + beta: float = 1.0, + reduction: str = 'mean', + loss_weight: float = 1.0) -> None: + super(UncertainSmoothL1Loss, self).__init__() + assert reduction in ['none', 'sum', 'mean'] + self.alpha = alpha + self.beta = beta + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred: Tensor, + target: Tensor, + sigma: Tensor, + weight: Optional[Tensor] = None, + avg_factor: Optional[float] = None, + reduction_override: Optional[str] = None, + **kwargs) -> Tensor: + """Forward function. + + Args: + pred (Tensor): The prediction. + target (Tensor): The learning target of the prediction. + sigma (Tensor): The sigma for uncertainty. + weight (Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (float, optional): Average factor that is used to + average the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + + Returns: + Tensor: Calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * uncertain_smooth_l1_loss( + pred, + target, + weight, + sigma=sigma, + alpha=self.alpha, + beta=self.beta, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_bbox + + +@MODELS.register_module() +class UncertainL1Loss(nn.Module): + """L1 loss with uncertainty. + + Args: + alpha (float): The coefficient of log(sigma). + Defaults to 1.0. + reduction (str): The method to reduce the loss. + Options are 'none', 'mean' and 'sum'. Defaults to 'mean'. + loss_weight (float): The weight of loss. Defaults to 1.0. + """ + + def __init__(self, + alpha: float = 1.0, + reduction: str = 'mean', + loss_weight: float = 1.0) -> None: + super(UncertainL1Loss, self).__init__() + assert reduction in ['none', 'sum', 'mean'] + self.alpha = alpha + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred: Tensor, + target: Tensor, + sigma: Tensor, + weight: Optional[Tensor] = None, + avg_factor: Optional[float] = None, + reduction_override: Optional[str] = None) -> Tensor: + """Forward function. + + Args: + pred (Tensor): The prediction. + target (Tensor): The learning target of the prediction. + sigma (Tensor): The sigma for uncertainty. + weight (Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (float, optional): Average factor that is used to + average the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + + Returns: + Tensor: Calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * uncertain_l1_loss( + pred, + target, + weight, + sigma=sigma, + alpha=self.alpha, + reduction=reduction, + avg_factor=avg_factor) + return loss_bbox diff --git a/mmdet3d/models/middle_encoders/__init__.py b/mmdet3d/models/middle_encoders/__init__.py new file mode 100755 index 0000000..96f5d20 --- /dev/null +++ b/mmdet3d/models/middle_encoders/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .pillar_scatter import PointPillarsScatter +from .sparse_encoder import SparseEncoder, SparseEncoderSASSD +from .sparse_unet import SparseUNet +from .voxel_set_abstraction import VoxelSetAbstraction + +__all__ = [ + 'PointPillarsScatter', 'SparseEncoder', 'SparseEncoderSASSD', 'SparseUNet', + 'VoxelSetAbstraction' +] diff --git a/mmdet3d/models/middle_encoders/pillar_scatter.py b/mmdet3d/models/middle_encoders/pillar_scatter.py new file mode 100755 index 0000000..40110c7 --- /dev/null +++ b/mmdet3d/models/middle_encoders/pillar_scatter.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class PointPillarsScatter(nn.Module): + """Point Pillar's Scatter. + + Converts learned features from dense tensor to sparse pseudo image. + + Args: + in_channels (int): Channels of input features. + output_shape (list[int]): Required output shape of features. + """ + + def __init__(self, in_channels, output_shape): + super().__init__() + self.output_shape = output_shape + self.ny = output_shape[0] + self.nx = output_shape[1] + self.in_channels = in_channels + self.fp16_enabled = False + + def forward(self, voxel_features, coors, batch_size=None): + """Foraward function to scatter features.""" + # TODO: rewrite the function in a batch manner + # no need to deal with different batch cases + if batch_size is not None: + return self.forward_batch(voxel_features, coors, batch_size) + else: + return self.forward_single(voxel_features, coors) + + def forward_single(self, voxel_features, coors): + """Scatter features of single sample. + + Args: + voxel_features (torch.Tensor): Voxel features in shape (N, M, C). + coors (torch.Tensor): Coordinates of each voxel. + The first column indicates the sample ID. + """ + # Create the canvas for this sample + canvas = torch.zeros( + self.in_channels, + self.nx * self.ny, + dtype=voxel_features.dtype, + device=voxel_features.device) + + indices = coors[:, 2] * self.nx + coors[:, 3] + indices = indices.long() + voxels = voxel_features.t() + # Now scatter the blob back to the canvas. + canvas[:, indices] = voxels + # Undo the column stacking to final 4-dim tensor + canvas = canvas.view(1, self.in_channels, self.ny, self.nx) + return canvas + + def forward_batch(self, voxel_features, coors, batch_size): + """Scatter features of single sample. + + Args: + voxel_features (torch.Tensor): Voxel features in shape (N, M, C). + coors (torch.Tensor): Coordinates of each voxel in shape (N, 4). + The first column indicates the sample ID. + batch_size (int): Number of samples in the current batch. + """ + # batch_canvas will be the final output. + batch_canvas = [] + for batch_itt in range(batch_size): + # Create the canvas for this sample + canvas = torch.zeros( + self.in_channels, + self.nx * self.ny, + dtype=voxel_features.dtype, + device=voxel_features.device) + + # Only include non-empty pillars + batch_mask = coors[:, 0] == batch_itt + this_coors = coors[batch_mask, :] + indices = this_coors[:, 2] * self.nx + this_coors[:, 3] + indices = indices.type(torch.long) + voxels = voxel_features[batch_mask, :] + voxels = voxels.t() + + # Now scatter the blob back to the canvas. + canvas[:, indices] = voxels + + # Append to a list for later stacking. + batch_canvas.append(canvas) + + # Stack to 3-dim tensor (batch-size, in_channels, nrows*ncols) + batch_canvas = torch.stack(batch_canvas, 0) + + # Undo the column stacking to final 4-dim tensor + batch_canvas = batch_canvas.view(batch_size, self.in_channels, self.ny, + self.nx) + + return batch_canvas diff --git a/mmdet3d/models/middle_encoders/sparse_encoder.py b/mmdet3d/models/middle_encoders/sparse_encoder.py new file mode 100755 index 0000000..e5331be --- /dev/null +++ b/mmdet3d/models/middle_encoders/sparse_encoder.py @@ -0,0 +1,513 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch +from mmcv.ops import points_in_boxes_all, three_interpolate, three_nn +from mmdet.models.losses import sigmoid_focal_loss, smooth_l1_loss +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models.layers import SparseBasicBlock, make_sparse_convmodule +from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE +from mmdet3d.registry import MODELS +from mmdet3d.structures import BaseInstance3DBoxes + +if IS_SPCONV2_AVAILABLE: + from spconv.pytorch import SparseConvTensor, SparseSequential +else: + from mmcv.ops import SparseConvTensor, SparseSequential + + +@MODELS.register_module() +class SparseEncoder(nn.Module): + r"""Sparse encoder for SECOND and Part-A2. + + Args: + in_channels (int): The number of input channels. + sparse_shape (list[int]): The sparse shape of input tensor. + order (list[str], optional): Order of conv module. + Defaults to ('conv', 'norm', 'act'). + norm_cfg (dict, optional): Config of normalization layer. Defaults to + dict(type='BN1d', eps=1e-3, momentum=0.01). + base_channels (int, optional): Out channels for conv_input layer. + Defaults to 16. + output_channels (int, optional): Out channels for conv_out layer. + Defaults to 128. + encoder_channels (tuple[tuple[int]], optional): + Convolutional channels of each encode block. + Defaults to ((16, ), (32, 32, 32), (64, 64, 64), (64, 64, 64)). + encoder_paddings (tuple[tuple[int]], optional): + Paddings of each encode block. + Defaults to ((1, ), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1, 1)). + block_type (str, optional): Type of the block to use. + Defaults to 'conv_module'. + return_middle_feats (bool): Whether output middle features. + Default to False. + """ + + def __init__(self, + in_channels, + sparse_shape, + order=('conv', 'norm', 'act'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + base_channels=16, + output_channels=128, + encoder_channels=((16, ), (32, 32, 32), (64, 64, 64), (64, 64, + 64)), + encoder_paddings=((1, ), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1, + 1)), + block_type='conv_module', + return_middle_feats=False): + super().__init__() + assert block_type in ['conv_module', 'basicblock'] + self.sparse_shape = sparse_shape + self.in_channels = in_channels + self.order = order + self.base_channels = base_channels + self.output_channels = output_channels + self.encoder_channels = encoder_channels + self.encoder_paddings = encoder_paddings + self.stage_num = len(self.encoder_channels) + self.fp16_enabled = False + self.return_middle_feats = return_middle_feats + # Spconv init all weight on its own + + assert isinstance(order, tuple) and len(order) == 3 + assert set(order) == {'conv', 'norm', 'act'} + + if self.order[0] != 'conv': # pre activate + self.conv_input = make_sparse_convmodule( + in_channels, + self.base_channels, + 3, + norm_cfg=norm_cfg, + padding=1, + indice_key='subm1', + conv_type='SubMConv3d', + order=('conv', )) + else: # post activate + self.conv_input = make_sparse_convmodule( + in_channels, + self.base_channels, + 3, + norm_cfg=norm_cfg, + padding=1, + indice_key='subm1', + conv_type='SubMConv3d') + + encoder_out_channels = self.make_encoder_layers( + make_sparse_convmodule, + norm_cfg, + self.base_channels, + block_type=block_type) + + self.conv_out = make_sparse_convmodule( + encoder_out_channels, + self.output_channels, + kernel_size=(3, 1, 1), + stride=(2, 1, 1), + norm_cfg=norm_cfg, + padding=0, + indice_key='spconv_down2', + conv_type='SparseConv3d') + + def forward(self, voxel_features, coors, batch_size): + """Forward of SparseEncoder. + + Args: + voxel_features (torch.Tensor): Voxel features in shape (N, C). + coors (torch.Tensor): Coordinates in shape (N, 4), + the columns in the order of (batch_idx, z_idx, y_idx, x_idx). + batch_size (int): Batch size. + + Returns: + torch.Tensor | tuple[torch.Tensor, list]: Return spatial features + include: + + - spatial_features (torch.Tensor): Spatial features are out from + the last layer. + - encode_features (List[SparseConvTensor], optional): Middle layer + output features. When self.return_middle_feats is True, the + module returns middle features. + """ + coors = coors.int() + input_sp_tensor = SparseConvTensor(voxel_features, coors, + self.sparse_shape, batch_size) + x = self.conv_input(input_sp_tensor) + + encode_features = [] + for encoder_layer in self.encoder_layers: + x = encoder_layer(x) + encode_features.append(x) + + # for detection head + # [200, 176, 5] -> [200, 176, 2] + out = self.conv_out(encode_features[-1]) + spatial_features = out.dense() + + N, C, D, H, W = spatial_features.shape + spatial_features = spatial_features.view(N, C * D, H, W) + + if self.return_middle_feats: + return spatial_features, encode_features + else: + return spatial_features + + def make_encoder_layers(self, + make_block, + norm_cfg, + in_channels, + block_type='conv_module', + conv_cfg=dict(type='SubMConv3d')): + """make encoder layers using sparse convs. + + Args: + make_block (method): A bounded function to build blocks. + norm_cfg (dict[str]): Config of normalization layer. + in_channels (int): The number of encoder input channels. + block_type (str, optional): Type of the block to use. + Defaults to 'conv_module'. + conv_cfg (dict, optional): Config of conv layer. Defaults to + dict(type='SubMConv3d'). + + Returns: + int: The number of encoder output channels. + """ + assert block_type in ['conv_module', 'basicblock'] + self.encoder_layers = SparseSequential() + + for i, blocks in enumerate(self.encoder_channels): + blocks_list = [] + for j, out_channels in enumerate(tuple(blocks)): + padding = tuple(self.encoder_paddings[i])[j] + # each stage started with a spconv layer + # except the first stage + if i != 0 and j == 0 and block_type == 'conv_module': + blocks_list.append( + make_block( + in_channels, + out_channels, + 3, + norm_cfg=norm_cfg, + stride=2, + padding=padding, + indice_key=f'spconv{i + 1}', + conv_type='SparseConv3d')) + elif block_type == 'basicblock': + if j == len(blocks) - 1 and i != len( + self.encoder_channels) - 1: + blocks_list.append( + make_block( + in_channels, + out_channels, + 3, + norm_cfg=norm_cfg, + stride=2, + padding=padding, + indice_key=f'spconv{i + 1}', + conv_type='SparseConv3d')) + else: + blocks_list.append( + SparseBasicBlock( + out_channels, + out_channels, + norm_cfg=norm_cfg, + conv_cfg=conv_cfg)) + else: + blocks_list.append( + make_block( + in_channels, + out_channels, + 3, + norm_cfg=norm_cfg, + padding=padding, + indice_key=f'subm{i + 1}', + conv_type='SubMConv3d')) + in_channels = out_channels + stage_name = f'encoder_layer{i + 1}' + stage_layers = SparseSequential(*blocks_list) + self.encoder_layers.add_module(stage_name, stage_layers) + return out_channels + + +@MODELS.register_module() +class SparseEncoderSASSD(SparseEncoder): + r"""Sparse encoder for `SASSD `_ + + Args: + in_channels (int): The number of input channels. + sparse_shape (list[int]): The sparse shape of input tensor. + order (list[str], optional): Order of conv module. + Defaults to ('conv', 'norm', 'act'). + norm_cfg (dict, optional): Config of normalization layer. Defaults to + dict(type='BN1d', eps=1e-3, momentum=0.01). + base_channels (int, optional): Out channels for conv_input layer. + Defaults to 16. + output_channels (int, optional): Out channels for conv_out layer. + Defaults to 128. + encoder_channels (tuple[tuple[int]], optional): + Convolutional channels of each encode block. + Defaults to ((16, ), (32, 32, 32), (64, 64, 64), (64, 64, 64)). + encoder_paddings (tuple[tuple[int]], optional): + Paddings of each encode block. + Defaults to ((1, ), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1, 1)). + block_type (str, optional): Type of the block to use. + Defaults to 'conv_module'. + """ + + def __init__(self, + in_channels: int, + sparse_shape: List[int], + order: Tuple[str] = ('conv', 'norm', 'act'), + norm_cfg: dict = dict(type='BN1d', eps=1e-3, momentum=0.01), + base_channels: int = 16, + output_channels: int = 128, + encoder_channels: Tuple[tuple] = ((16, ), (32, 32, 32), + (64, 64, 64), (64, 64, 64)), + encoder_paddings: Tuple[tuple] = ((1, ), (1, 1, 1), (1, 1, 1), + ((0, 1, 1), 1, 1)), + block_type: str = 'conv_module'): + super(SparseEncoderSASSD, self).__init__( + in_channels=in_channels, + sparse_shape=sparse_shape, + order=order, + norm_cfg=norm_cfg, + base_channels=base_channels, + output_channels=output_channels, + encoder_channels=encoder_channels, + encoder_paddings=encoder_paddings, + block_type=block_type) + + self.point_fc = nn.Linear(112, 64, bias=False) + self.point_cls = nn.Linear(64, 1, bias=False) + self.point_reg = nn.Linear(64, 3, bias=False) + + def forward(self, + voxel_features: Tensor, + coors: Tensor, + batch_size: Tensor, + test_mode: bool = False) -> Tuple[Tensor, tuple]: + """Forward of SparseEncoder. + + Args: + voxel_features (torch.Tensor): Voxel features in shape (N, C). + coors (torch.Tensor): Coordinates in shape (N, 4), + the columns in the order of (batch_idx, z_idx, y_idx, x_idx). + batch_size (int): Batch size. + test_mode (bool, optional): Whether in test mode. + Defaults to False. + + Returns: + Tensor: Backbone features. + tuple[torch.Tensor]: Mean feature value of the points, + Classification result of the points, + Regression offsets of the points. + """ + coors = coors.int() + input_sp_tensor = SparseConvTensor(voxel_features, coors, + self.sparse_shape, batch_size) + x = self.conv_input(input_sp_tensor) + + encode_features = [] + for encoder_layer in self.encoder_layers: + x = encoder_layer(x) + encode_features.append(x) + + # for detection head + # [200, 176, 5] -> [200, 176, 2] + out = self.conv_out(encode_features[-1]) + spatial_features = out.dense() + + N, C, D, H, W = spatial_features.shape + spatial_features = spatial_features.view(N, C * D, H, W) + + if test_mode: + return spatial_features, None + + points_mean = torch.zeros_like(voxel_features) + points_mean[:, 0] = coors[:, 0] + points_mean[:, 1:] = voxel_features[:, :3] + + # auxiliary network + p0 = self.make_auxiliary_points( + encode_features[0], + points_mean, + offset=(0, -40., -3.), + voxel_size=(.1, .1, .2)) + + p1 = self.make_auxiliary_points( + encode_features[1], + points_mean, + offset=(0, -40., -3.), + voxel_size=(.2, .2, .4)) + + p2 = self.make_auxiliary_points( + encode_features[2], + points_mean, + offset=(0, -40., -3.), + voxel_size=(.4, .4, .8)) + + pointwise = torch.cat([p0, p1, p2], dim=-1) + pointwise = self.point_fc(pointwise) + point_cls = self.point_cls(pointwise) + point_reg = self.point_reg(pointwise) + point_misc = (points_mean, point_cls, point_reg) + + return spatial_features, point_misc + + def get_auxiliary_targets(self, + points_feats: Tensor, + gt_bboxes_3d: List[BaseInstance3DBoxes], + enlarge: float = 1.0) -> Tuple[Tensor, Tensor]: + """Get auxiliary target. + + Args: + points_feats (torch.Tensor): Mean features of the points. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth + boxes for each sample. + enlarge (float, optional): Enlaged scale. Defaults to 1.0. + + Returns: + tuple[torch.Tensor]: Label of the points and + center offsets of the points. + """ + center_offsets = list() + pts_labels = list() + for i in range(len(gt_bboxes_3d)): + boxes3d = gt_bboxes_3d[i].tensor.detach().clone() + idx = torch.nonzero(points_feats[:, 0] == i).view(-1) + point_xyz = points_feats[idx, 1:].detach().clone() + + boxes3d[:, 3:6] *= enlarge + + pts_in_flag, center_offset = self.calculate_pts_offsets( + point_xyz, boxes3d) + pts_label = pts_in_flag.max(0)[0].byte() + pts_labels.append(pts_label) + center_offsets.append(center_offset) + + center_offsets = torch.cat(center_offsets) + pts_labels = torch.cat(pts_labels).to(center_offsets.device) + + return pts_labels, center_offsets + + def calculate_pts_offsets(self, points: Tensor, + bboxes_3d: Tensor) -> Tuple[Tensor, Tensor]: + """Find all boxes in which each point is, as well as the offsets from + the box centers. + + Args: + points (torch.Tensor): [M, 3], [x, y, z] in LiDAR coordinate + bboxes_3d (torch.Tensor): [T, 7], + num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz], + (x, y, z) is the bottom center. + + Returns: + tuple[torch.Tensor]: Point indices of boxes with the shape of + (T, M). Default background = 0. + And offsets from the box centers of points, + if it belows to the box, with the shape of (M, 3). + Default background = 0. + """ + boxes_num = len(bboxes_3d) + pts_num = len(points) + + box_indices = points_in_boxes_all(points[None, ...], bboxes_3d[None, + ...]) + pts_indices = box_indices.squeeze(0).transpose(0, 1) + center_offsets = torch.zeros_like(points).to(points.device) + + for i in range(boxes_num): + for j in range(pts_num): + if pts_indices[i][j] == 1: + center_offsets[j][0] = points[j][0] - bboxes_3d[i][0] + center_offsets[j][1] = points[j][1] - bboxes_3d[i][1] + center_offsets[j][2] = ( + points[j][2] - + (bboxes_3d[i][2] + bboxes_3d[i][2] / 2.0)) + return pts_indices, center_offsets + + def aux_loss(self, points: Tensor, point_cls: Tensor, point_reg: Tensor, + gt_bboxes_3d: Tensor) -> dict: + """Calculate auxiliary loss. + + Args: + points (torch.Tensor): Mean feature value of the points. + point_cls (torch.Tensor): Classification result of the points. + point_reg (torch.Tensor): Regression offsets of the points. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth + boxes for each sample. + + Returns: + dict: Auxiliary loss. + """ + num_boxes = len(gt_bboxes_3d) + pts_labels, center_targets = self.get_auxiliary_targets( + points, gt_bboxes_3d) + + rpn_cls_target = pts_labels.long() + pos = (pts_labels > 0).float() + neg = (pts_labels == 0).float() + + pos_normalizer = pos.sum().clamp(min=1.0) + + cls_weights = pos + neg + reg_weights = pos + reg_weights = reg_weights / pos_normalizer + + aux_loss_cls = sigmoid_focal_loss( + point_cls, + rpn_cls_target, + weight=cls_weights, + avg_factor=pos_normalizer) + + aux_loss_cls /= num_boxes + + weight = reg_weights[..., None] + aux_loss_reg = smooth_l1_loss(point_reg, center_targets, beta=1 / 9.) + aux_loss_reg = torch.sum(aux_loss_reg * weight)[None] + aux_loss_reg /= num_boxes + + aux_loss_cls, aux_loss_reg = [aux_loss_cls], [aux_loss_reg] + + return dict(aux_loss_cls=aux_loss_cls, aux_loss_reg=aux_loss_reg) + + def make_auxiliary_points( + self, + source_tensor: Tensor, + target: Tensor, + offset: Tuple = (0., -40., -3.), + voxel_size: Tuple = (.05, .05, .1) + ) -> Tensor: + """Make auxiliary points for loss computation. + + Args: + source_tensor (torch.Tensor): (M, C) features to be propigated. + target (torch.Tensor): (N, 4) bxyz positions of the + target features. + offset (tuple[float], optional): Voxelization offset. + Defaults to (0., -40., -3.) + voxel_size (tuple[float], optional): Voxelization size. + Defaults to (.05, .05, .1) + + Returns: + torch.Tensor: (N, C) tensor of the features of the target features. + """ + # Tansfer tensor to points + source = source_tensor.indices.float() + offset = torch.Tensor(offset).to(source.device) + voxel_size = torch.Tensor(voxel_size).to(source.device) + source[:, 1:] = ( + source[:, [3, 2, 1]] * voxel_size + offset + .5 * voxel_size) + + source_feats = source_tensor.features[None, ...].transpose(1, 2) + + # Interplate auxiliary points + dist, idx = three_nn(target[None, ...], source[None, ...]) + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=2, keepdim=True) + weight = dist_recip / norm + new_features = three_interpolate(source_feats.contiguous(), idx, + weight) + + return new_features.squeeze(0).transpose(0, 1) diff --git a/mmdet3d/models/middle_encoders/sparse_unet.py b/mmdet3d/models/middle_encoders/sparse_unet.py new file mode 100755 index 0000000..2d13507 --- /dev/null +++ b/mmdet3d/models/middle_encoders/sparse_unet.py @@ -0,0 +1,299 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE + +if IS_SPCONV2_AVAILABLE: + from spconv.pytorch import SparseConvTensor, SparseSequential +else: + from mmcv.ops import SparseConvTensor, SparseSequential + +from mmengine.model import BaseModule + +from mmdet3d.models.layers import SparseBasicBlock, make_sparse_convmodule +from mmdet3d.models.layers.sparse_block import replace_feature +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class SparseUNet(BaseModule): + r"""SparseUNet for PartA^2. + + See the `paper `_ for more details. + + Args: + in_channels (int): The number of input channels. + sparse_shape (list[int]): The sparse shape of input tensor. + norm_cfg (dict): Config of normalization layer. + base_channels (int): Out channels for conv_input layer. + output_channels (int): Out channels for conv_out layer. + encoder_channels (tuple[tuple[int]]): + Convolutional channels of each encode block. + encoder_paddings (tuple[tuple[int]]): Paddings of each encode block. + decoder_channels (tuple[tuple[int]]): + Convolutional channels of each decode block. + decoder_paddings (tuple[tuple[int]]): Paddings of each decode block. + """ + + def __init__(self, + in_channels, + sparse_shape, + order=('conv', 'norm', 'act'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + base_channels=16, + output_channels=128, + encoder_channels=((16, ), (32, 32, 32), (64, 64, 64), (64, 64, + 64)), + encoder_paddings=((1, ), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1, + 1)), + decoder_channels=((64, 64, 64), (64, 64, 32), (32, 32, 16), + (16, 16, 16)), + decoder_paddings=((1, 0), (1, 0), (0, 0), (0, 1)), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.sparse_shape = sparse_shape + self.in_channels = in_channels + self.order = order + self.base_channels = base_channels + self.output_channels = output_channels + self.encoder_channels = encoder_channels + self.encoder_paddings = encoder_paddings + self.decoder_channels = decoder_channels + self.decoder_paddings = decoder_paddings + self.stage_num = len(self.encoder_channels) + self.fp16_enabled = False + # Spconv init all weight on its own + + assert isinstance(order, tuple) and len(order) == 3 + assert set(order) == {'conv', 'norm', 'act'} + + if self.order[0] != 'conv': # pre activate + self.conv_input = make_sparse_convmodule( + in_channels, + self.base_channels, + 3, + norm_cfg=norm_cfg, + padding=1, + indice_key='subm1', + conv_type='SubMConv3d', + order=('conv', )) + else: # post activate + self.conv_input = make_sparse_convmodule( + in_channels, + self.base_channels, + 3, + norm_cfg=norm_cfg, + padding=1, + indice_key='subm1', + conv_type='SubMConv3d') + + encoder_out_channels = self.make_encoder_layers( + make_sparse_convmodule, norm_cfg, self.base_channels) + self.make_decoder_layers(make_sparse_convmodule, norm_cfg, + encoder_out_channels) + + self.conv_out = make_sparse_convmodule( + encoder_out_channels, + self.output_channels, + kernel_size=(3, 1, 1), + stride=(2, 1, 1), + norm_cfg=norm_cfg, + padding=0, + indice_key='spconv_down2', + conv_type='SparseConv3d') + + def forward(self, voxel_features, coors, batch_size): + """Forward of SparseUNet. + + Args: + voxel_features (torch.float32): Voxel features in shape [N, C]. + coors (torch.int32): Coordinates in shape [N, 4], + the columns in the order of (batch_idx, z_idx, y_idx, x_idx). + batch_size (int): Batch size. + + Returns: + dict[str, torch.Tensor]: Backbone features. + """ + coors = coors.int() + input_sp_tensor = SparseConvTensor(voxel_features, coors, + self.sparse_shape, batch_size) + x = self.conv_input(input_sp_tensor) + + encode_features = [] + for encoder_layer in self.encoder_layers: + x = encoder_layer(x) + encode_features.append(x) + + # for detection head + # [200, 176, 5] -> [200, 176, 2] + out = self.conv_out(encode_features[-1]) + spatial_features = out.dense() + + N, C, D, H, W = spatial_features.shape + spatial_features = spatial_features.view(N, C * D, H, W) + + # for segmentation head, with output shape: + # [400, 352, 11] <- [200, 176, 5] + # [800, 704, 21] <- [400, 352, 11] + # [1600, 1408, 41] <- [800, 704, 21] + # [1600, 1408, 41] <- [1600, 1408, 41] + decode_features = [] + x = encode_features[-1] + for i in range(self.stage_num, 0, -1): + x = self.decoder_layer_forward(encode_features[i - 1], x, + getattr(self, f'lateral_layer{i}'), + getattr(self, f'merge_layer{i}'), + getattr(self, f'upsample_layer{i}')) + decode_features.append(x) + + seg_features = decode_features[-1].features + + ret = dict( + spatial_features=spatial_features, seg_features=seg_features) + + return ret + + def decoder_layer_forward(self, x_lateral, x_bottom, lateral_layer, + merge_layer, upsample_layer): + """Forward of upsample and residual block. + + Args: + x_lateral (:obj:`SparseConvTensor`): Lateral tensor. + x_bottom (:obj:`SparseConvTensor`): Feature from bottom layer. + lateral_layer (SparseBasicBlock): Convolution for lateral tensor. + merge_layer (SparseSequential): Convolution for merging features. + upsample_layer (SparseSequential): Convolution for upsampling. + + Returns: + :obj:`SparseConvTensor`: Upsampled feature. + """ + x = lateral_layer(x_lateral) + x = replace_feature(x, torch.cat((x_bottom.features, x.features), + dim=1)) + x_merge = merge_layer(x) + x = self.reduce_channel(x, x_merge.features.shape[1]) + x = replace_feature(x, x_merge.features + x.features) + x = upsample_layer(x) + return x + + @staticmethod + def reduce_channel(x, out_channels): + """reduce channel for element-wise addition. + + Args: + x (:obj:`SparseConvTensor`): Sparse tensor, ``x.features`` + are in shape (N, C1). + out_channels (int): The number of channel after reduction. + + Returns: + :obj:`SparseConvTensor`: Channel reduced feature. + """ + features = x.features + n, in_channels = features.shape + assert (in_channels % out_channels + == 0) and (in_channels >= out_channels) + x = replace_feature(x, features.view(n, out_channels, -1).sum(dim=2)) + return x + + def make_encoder_layers(self, make_block, norm_cfg, in_channels): + """make encoder layers using sparse convs. + + Args: + make_block (method): A bounded function to build blocks. + norm_cfg (dict[str]): Config of normalization layer. + in_channels (int): The number of encoder input channels. + + Returns: + int: The number of encoder output channels. + """ + self.encoder_layers = SparseSequential() + + for i, blocks in enumerate(self.encoder_channels): + blocks_list = [] + for j, out_channels in enumerate(tuple(blocks)): + padding = tuple(self.encoder_paddings[i])[j] + # each stage started with a spconv layer + # except the first stage + if i != 0 and j == 0: + blocks_list.append( + make_block( + in_channels, + out_channels, + 3, + norm_cfg=norm_cfg, + stride=2, + padding=padding, + indice_key=f'spconv{i + 1}', + conv_type='SparseConv3d')) + else: + blocks_list.append( + make_block( + in_channels, + out_channels, + 3, + norm_cfg=norm_cfg, + padding=padding, + indice_key=f'subm{i + 1}', + conv_type='SubMConv3d')) + in_channels = out_channels + stage_name = f'encoder_layer{i + 1}' + stage_layers = SparseSequential(*blocks_list) + self.encoder_layers.add_module(stage_name, stage_layers) + return out_channels + + def make_decoder_layers(self, make_block, norm_cfg, in_channels): + """make decoder layers using sparse convs. + + Args: + make_block (method): A bounded function to build blocks. + norm_cfg (dict[str]): Config of normalization layer. + in_channels (int): The number of encoder input channels. + + Returns: + int: The number of encoder output channels. + """ + block_num = len(self.decoder_channels) + for i, block_channels in enumerate(self.decoder_channels): + paddings = self.decoder_paddings[i] + setattr( + self, f'lateral_layer{block_num - i}', + SparseBasicBlock( + in_channels, + block_channels[0], + conv_cfg=dict( + type='SubMConv3d', indice_key=f'subm{block_num - i}'), + norm_cfg=norm_cfg)) + setattr( + self, f'merge_layer{block_num - i}', + make_block( + in_channels * 2, + block_channels[1], + 3, + norm_cfg=norm_cfg, + padding=paddings[0], + indice_key=f'subm{block_num - i}', + conv_type='SubMConv3d')) + if block_num - i != 1: + setattr( + self, f'upsample_layer{block_num - i}', + make_block( + in_channels, + block_channels[2], + 3, + norm_cfg=norm_cfg, + indice_key=f'spconv{block_num - i}', + conv_type='SparseInverseConv3d')) + else: + # use submanifold conv instead of inverse conv + # in the last block + setattr( + self, f'upsample_layer{block_num - i}', + make_block( + in_channels, + block_channels[2], + 3, + norm_cfg=norm_cfg, + padding=paddings[1], + indice_key='subm1', + conv_type='SubMConv3d')) + in_channels = block_channels[2] diff --git a/mmdet3d/models/middle_encoders/voxel_set_abstraction.py b/mmdet3d/models/middle_encoders/voxel_set_abstraction.py new file mode 100755 index 0000000..e2161fa --- /dev/null +++ b/mmdet3d/models/middle_encoders/voxel_set_abstraction.py @@ -0,0 +1,334 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +import mmengine +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.ops.furthest_point_sample import furthest_point_sample +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS +from mmdet3d.utils import InstanceList + + +def bilinear_interpolate_torch(inputs, x, y): + """Bilinear interpolate for inputs.""" + x0 = torch.floor(x).long() + x1 = x0 + 1 + + y0 = torch.floor(y).long() + y1 = y0 + 1 + + x0 = torch.clamp(x0, 0, inputs.shape[1] - 1) + x1 = torch.clamp(x1, 0, inputs.shape[1] - 1) + y0 = torch.clamp(y0, 0, inputs.shape[0] - 1) + y1 = torch.clamp(y1, 0, inputs.shape[0] - 1) + + Ia = inputs[y0, x0] + Ib = inputs[y1, x0] + Ic = inputs[y0, x1] + Id = inputs[y1, x1] + + wa = (x1.type_as(x) - x) * (y1.type_as(y) - y) + wb = (x1.type_as(x) - x) * (y - y0.type_as(y)) + wc = (x - x0.type_as(x)) * (y1.type_as(y) - y) + wd = (x - x0.type_as(x)) * (y - y0.type_as(y)) + ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t( + torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd) + return ans + + +@MODELS.register_module() +class VoxelSetAbstraction(BaseModule): + """Voxel set abstraction module for PVRCNN and PVRCNN++. + + Args: + num_keypoints (int): The number of key points sampled from + raw points cloud. + fused_out_channel (int): Key points feature output channels + num after fused. Default to 128. + voxel_size (list[float]): Size of voxels. Defaults to + [0.05, 0.05, 0.1]. + point_cloud_range (list[float]): Point cloud range. Defaults to + [0, -40, -3, 70.4, 40, 1]. + voxel_sa_cfgs_list (List[dict or ConfigDict], optional): List of SA + module cfg. Used to gather key points features from multi-wise + voxel features. Default to None. + rawpoints_sa_cfgs (dict or ConfigDict, optional): SA module cfg. + Used to gather key points features from raw points. Default to + None. + bev_feat_channel (int): Bev features channels num. + Default to 256. + bev_scale_factor (int): Bev features scale factor. Default to 8. + voxel_center_as_source (bool): Whether used voxel centers as points + cloud key points. Defaults to False. + norm_cfg (dict[str]): Config of normalization layer. Default + used dict(type='BN1d', eps=1e-5, momentum=0.1). + bias (bool | str, optional): If specified as `auto`, it will be + decided by `norm_cfg`. `bias` will be set as True if + `norm_cfg` is None, otherwise False. Default: 'auto'. + """ + + def __init__(self, + num_keypoints: int, + fused_out_channel: int = 128, + voxel_size: list = [0.05, 0.05, 0.1], + point_cloud_range: list = [0, -40, -3, 70.4, 40, 1], + voxel_sa_cfgs_list: Optional[list] = None, + rawpoints_sa_cfgs: Optional[dict] = None, + bev_feat_channel: int = 256, + bev_scale_factor: int = 8, + voxel_center_as_source: bool = False, + norm_cfg: dict = dict(type='BN2d', eps=1e-5, momentum=0.1), + bias: str = 'auto') -> None: + super().__init__() + self.num_keypoints = num_keypoints + self.fused_out_channel = fused_out_channel + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + self.voxel_center_as_source = voxel_center_as_source + + gathered_channel = 0 + + if rawpoints_sa_cfgs is not None: + self.rawpoints_sa_layer = MODELS.build(rawpoints_sa_cfgs) + gathered_channel += sum( + [x[-1] for x in rawpoints_sa_cfgs.mlp_channels]) + else: + self.rawpoints_sa_layer = None + + if voxel_sa_cfgs_list is not None: + self.voxel_sa_configs_list = voxel_sa_cfgs_list + self.voxel_sa_layers = nn.ModuleList() + for voxel_sa_config in voxel_sa_cfgs_list: + cur_layer = MODELS.build(voxel_sa_config) + self.voxel_sa_layers.append(cur_layer) + gathered_channel += sum( + [x[-1] for x in voxel_sa_config.mlp_channels]) + else: + self.voxel_sa_layers = None + + if bev_feat_channel is not None and bev_scale_factor is not None: + self.bev_cfg = mmengine.Config( + dict( + bev_feat_channels=bev_feat_channel, + bev_scale_factor=bev_scale_factor)) + gathered_channel += bev_feat_channel + else: + self.bev_cfg = None + self.point_feature_fusion_layer = nn.Sequential( + ConvModule( + gathered_channel, + fused_out_channel, + kernel_size=(1, 1), + stride=(1, 1), + conv_cfg=dict(type='Conv2d'), + norm_cfg=norm_cfg, + bias=bias)) + + def interpolate_from_bev_features(self, keypoints: torch.Tensor, + bev_features: torch.Tensor, + batch_size: int, + bev_scale_factor: int) -> torch.Tensor: + """Gather key points features from bev feature map by interpolate. + + Args: + keypoints (torch.Tensor): Sampled key points with shape + (N1 + N2 + ..., NDim). + bev_features (torch.Tensor): Bev feature map from the first + stage with shape (B, C, H, W). + batch_size (int): Input batch size. + bev_scale_factor (int): Bev feature map scale factor. + + Returns: + torch.Tensor: Key points features gather from bev feature + map with shape (N1 + N2 + ..., C) + """ + x_idxs = (keypoints[..., 0] - + self.point_cloud_range[0]) / self.voxel_size[0] + y_idxs = (keypoints[..., 1] - + self.point_cloud_range[1]) / self.voxel_size[1] + + x_idxs = x_idxs / bev_scale_factor + y_idxs = y_idxs / bev_scale_factor + + point_bev_features_list = [] + for k in range(batch_size): + cur_x_idxs = x_idxs[k, ...] + cur_y_idxs = y_idxs[k, ...] + cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C) + point_bev_features = bilinear_interpolate_torch( + cur_bev_features, cur_x_idxs, cur_y_idxs) + point_bev_features_list.append(point_bev_features) + + point_bev_features = torch.cat( + point_bev_features_list, dim=0) # (N1 + N2 + ..., C) + return point_bev_features.view(batch_size, keypoints.shape[1], -1) + + def get_voxel_centers(self, coors: torch.Tensor, + scale_factor: float) -> torch.Tensor: + """Get voxel centers coordinate. + + Args: + coors (torch.Tensor): Coordinates of voxels shape is Nx(1+NDim), + where 1 represents the batch index. + scale_factor (float): Scale factor. + + Returns: + torch.Tensor: Voxel centers coordinate with shape (N, 3). + """ + assert coors.shape[1] == 4 + voxel_centers = coors[:, [3, 2, 1]].float() # (xyz) + voxel_size = torch.tensor( + self.voxel_size, + device=voxel_centers.device).float() * scale_factor + pc_range = torch.tensor( + self.point_cloud_range[0:3], device=voxel_centers.device).float() + voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range + return voxel_centers + + def sample_key_points(self, points: List[torch.Tensor], + coors: torch.Tensor) -> torch.Tensor: + """Sample key points from raw points cloud. + + Args: + points (List[torch.Tensor]): Point cloud of each sample. + coors (torch.Tensor): Coordinates of voxels shape is Nx(1+NDim), + where 1 represents the batch index. + + Returns: + torch.Tensor: (B, M, 3) Key points of each sample. + M is num_keypoints. + """ + assert points is not None or coors is not None + if self.voxel_center_as_source: + _src_points = self.get_voxel_centers(coors=coors, scale_factor=1) + batch_size = coors[-1, 0].item() + 1 + src_points = [ + _src_points[coors[:, 0] == b] for b in range(batch_size) + ] + else: + src_points = [p[..., :3] for p in points] + + keypoints_list = [] + for points_to_sample in src_points: + num_points = points_to_sample.shape[0] + cur_pt_idxs = furthest_point_sample( + points_to_sample.unsqueeze(dim=0).contiguous(), + self.num_keypoints).long()[0] + + if num_points < self.num_keypoints: + times = int(self.num_keypoints / num_points) + 1 + non_empty = cur_pt_idxs[:num_points] + cur_pt_idxs = non_empty.repeat(times)[:self.num_keypoints] + + keypoints = points_to_sample[cur_pt_idxs] + + keypoints_list.append(keypoints) + keypoints = torch.stack(keypoints_list, dim=0) # (B, M, 3) + return keypoints + + def forward(self, batch_inputs_dict: dict, feats_dict: dict, + rpn_results_list: InstanceList) -> dict: + """Extract point-wise features from multi-input. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'voxels' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - voxels (dict[torch.Tensor]): Voxels of the batch sample. + feats_dict (dict): Contains features from the first + stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + + Returns: + dict: Contain Point-wise features, include: + - keypoints (torch.Tensor): Sampled key points. + - keypoint_features (torch.Tensor): Gathered key points + features from multi input. + - fusion_keypoint_features (torch.Tensor): Fusion + keypoint_features by point_feature_fusion_layer. + """ + points = batch_inputs_dict['points'] + voxel_encode_features = feats_dict['multi_scale_3d_feats'] + bev_encode_features = feats_dict['spatial_feats'] + if self.voxel_center_as_source: + voxels_coors = batch_inputs_dict['voxels']['coors'] + else: + voxels_coors = None + keypoints = self.sample_key_points(points, voxels_coors) + + point_features_list = [] + batch_size = len(points) + + if self.bev_cfg is not None: + point_bev_features = self.interpolate_from_bev_features( + keypoints, bev_encode_features, batch_size, + self.bev_cfg.bev_scale_factor) + point_features_list.append(point_bev_features.contiguous()) + + batch_size, num_keypoints, _ = keypoints.shape + key_xyz = keypoints.view(-1, 3) + key_xyz_batch_cnt = key_xyz.new_zeros(batch_size).int().fill_( + num_keypoints) + + if self.rawpoints_sa_layer is not None: + batch_points = torch.cat(points, dim=0) + batch_cnt = [len(p) for p in points] + xyz = batch_points[:, :3].contiguous() + features = None + if batch_points.size(1) > 0: + features = batch_points[:, 3:].contiguous() + xyz_batch_cnt = xyz.new_tensor(batch_cnt, dtype=torch.int32) + + pooled_points, pooled_features = self.rawpoints_sa_layer( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=key_xyz.contiguous(), + new_xyz_batch_cnt=key_xyz_batch_cnt, + features=features.contiguous(), + ) + + point_features_list.append(pooled_features.contiguous().view( + batch_size, num_keypoints, -1)) + if self.voxel_sa_layers is not None: + for k, voxel_sa_layer in enumerate(self.voxel_sa_layers): + cur_coords = voxel_encode_features[k].indices + xyz = self.get_voxel_centers( + coors=cur_coords, + scale_factor=self.voxel_sa_configs_list[k].scale_factor + ).contiguous() + xyz_batch_cnt = xyz.new_zeros(batch_size).int() + for bs_idx in range(batch_size): + xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum() + + pooled_points, pooled_features = voxel_sa_layer( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=key_xyz.contiguous(), + new_xyz_batch_cnt=key_xyz_batch_cnt, + features=voxel_encode_features[k].features.contiguous(), + ) + point_features_list.append(pooled_features.contiguous().view( + batch_size, num_keypoints, -1)) + + point_features = torch.cat( + point_features_list, dim=-1).view(batch_size * num_keypoints, -1, + 1) + + fusion_point_features = self.point_feature_fusion_layer( + point_features.unsqueeze(dim=-1)).squeeze(dim=-1) + + batch_idxs = torch.arange( + batch_size * num_keypoints, device=keypoints.device + ) // num_keypoints # batch indexes of each key points + batch_keypoints_xyz = torch.cat( + (batch_idxs.to(key_xyz.dtype).unsqueeze(dim=-1), key_xyz), dim=-1) + + return dict( + keypoint_features=point_features.squeeze(dim=-1), + fusion_keypoint_features=fusion_point_features.squeeze(dim=-1), + keypoints=batch_keypoints_xyz) diff --git a/mmdet3d/models/necks/__init__.py b/mmdet3d/models/necks/__init__.py new file mode 100755 index 0000000..53b885c --- /dev/null +++ b/mmdet3d/models/necks/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.necks.fpn import FPN + +from .dla_neck import DLANeck +from .imvoxel_neck import IndoorImVoxelNeck, OutdoorImVoxelNeck +from .pointnet2_fp_neck import PointNetFPNeck +from .second_fpn import SECONDFPN + +__all__ = [ + 'FPN', 'SECONDFPN', 'OutdoorImVoxelNeck', 'PointNetFPNeck', 'DLANeck', + 'IndoorImVoxelNeck' +] diff --git a/mmdet3d/models/necks/dla_neck.py b/mmdet3d/models/necks/dla_neck.py new file mode 100755 index 0000000..6ff194b --- /dev/null +++ b/mmdet3d/models/necks/dla_neck.py @@ -0,0 +1,233 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import numpy as np +from mmcv.cnn import ConvModule, build_conv_layer +from mmengine.model import BaseModule +from torch import nn as nn + +from mmdet3d.registry import MODELS + + +def fill_up_weights(up): + """Simulated bilinear upsampling kernel. + + Args: + up (nn.Module): ConvTranspose2d module. + """ + w = up.weight.data + f = math.ceil(w.size(2) / 2) + c = (2 * f - 1 - f % 2) / (2. * f) + for i in range(w.size(2)): + for j in range(w.size(3)): + w[0, 0, i, j] = \ + (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) + for c in range(1, w.size(0)): + w[c, 0, :, :] = w[0, 0, :, :] + + +class IDAUpsample(BaseModule): + """Iterative Deep Aggregation (IDA) Upsampling module to upsample features + of different scales to a similar scale. + + Args: + out_channels (int): Number of output channels for DeformConv. + in_channels (List[int]): List of input channels of multi-scale + feature maps. + kernel_sizes (List[int]): List of size of the convolving + kernel of different scales. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + use_dcn (bool, optional): If True, use DCNv2. Default: True. + """ + + def __init__( + self, + out_channels, + in_channels, + kernel_sizes, + norm_cfg=None, + use_dcn=True, + init_cfg=None, + ): + super(IDAUpsample, self).__init__(init_cfg) + self.use_dcn = use_dcn + self.projs = nn.ModuleList() + self.ups = nn.ModuleList() + self.nodes = nn.ModuleList() + + for i in range(1, len(in_channels)): + in_channel = in_channels[i] + up_kernel_size = int(kernel_sizes[i]) + proj = ConvModule( + in_channel, + out_channels, + 3, + padding=1, + bias=True, + conv_cfg=dict(type='DCNv2') if self.use_dcn else None, + norm_cfg=norm_cfg) + node = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + bias=True, + conv_cfg=dict(type='DCNv2') if self.use_dcn else None, + norm_cfg=norm_cfg) + up = build_conv_layer( + dict(type='deconv'), + out_channels, + out_channels, + up_kernel_size * 2, + stride=up_kernel_size, + padding=up_kernel_size // 2, + output_padding=0, + groups=out_channels, + bias=False) + + self.projs.append(proj) + self.ups.append(up) + self.nodes.append(node) + + def forward(self, mlvl_features, start_level, end_level): + """Forward function. + + Args: + mlvl_features (list[torch.Tensor]): Features from multiple layers. + start_level (int): Start layer for feature upsampling. + end_level (int): End layer for feature upsampling. + """ + for i in range(start_level, end_level - 1): + upsample = self.ups[i - start_level] + project = self.projs[i - start_level] + mlvl_features[i + 1] = upsample(project(mlvl_features[i + 1])) + node = self.nodes[i - start_level] + mlvl_features[i + 1] = node(mlvl_features[i + 1] + + mlvl_features[i]) + + +class DLAUpsample(BaseModule): + """Deep Layer Aggregation (DLA) Upsampling module for different scales + feature extraction, upsampling and fusion, It consists of groups of + IDAupsample modules. + + Args: + start_level (int): The start layer. + channels (List[int]): List of input channels of multi-scale + feature maps. + scales(List[int]): List of scale of different layers' feature. + in_channels (NoneType, optional): List of input channels of + different scales. Default: None. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + use_dcn (bool, optional): Whether to use dcn in IDAup module. + Default: True. + """ + + def __init__(self, + start_level, + channels, + scales, + in_channels=None, + norm_cfg=None, + use_dcn=True, + init_cfg=None): + super(DLAUpsample, self).__init__(init_cfg) + self.start_level = start_level + if in_channels is None: + in_channels = channels + self.channels = channels + channels = list(channels) + scales = np.array(scales, dtype=int) + for i in range(len(channels) - 1): + j = -i - 2 + setattr( + self, 'ida_{}'.format(i), + IDAUpsample(channels[j], in_channels[j:], + scales[j:] // scales[j], norm_cfg, use_dcn)) + scales[j + 1:] = scales[j] + in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]] + + def forward(self, mlvl_features): + """Forward function. + + Args: + mlvl_features(list[torch.Tensor]): Features from multi-scale + layers. + + Returns: + tuple[torch.Tensor]: Up-sampled features of different layers. + """ + outs = [mlvl_features[-1]] + for i in range(len(mlvl_features) - self.start_level - 1): + ida = getattr(self, 'ida_{}'.format(i)) + ida(mlvl_features, len(mlvl_features) - i - 2, len(mlvl_features)) + outs.insert(0, mlvl_features[-1]) + return outs + + +@MODELS.register_module() +class DLANeck(BaseModule): + """DLA Neck. + + Args: + in_channels (list[int], optional): List of input channels + of multi-scale feature map. + start_level (int, optional): The scale level where upsampling + starts. Default: 2. + end_level (int, optional): The scale level where upsampling + ends. Default: 5. + norm_cfg (dict, optional): Config dict for normalization + layer. Default: None. + use_dcn (bool, optional): Whether to use dcn in IDAup module. + Default: True. + """ + + def __init__(self, + in_channels=[16, 32, 64, 128, 256, 512], + start_level=2, + end_level=5, + norm_cfg=None, + use_dcn=True, + init_cfg=None): + super(DLANeck, self).__init__(init_cfg) + self.start_level = start_level + self.end_level = end_level + scales = [2**i for i in range(len(in_channels[self.start_level:]))] + self.dla_up = DLAUpsample( + start_level=self.start_level, + channels=in_channels[self.start_level:], + scales=scales, + norm_cfg=norm_cfg, + use_dcn=use_dcn) + self.ida_up = IDAUpsample( + in_channels[self.start_level], + in_channels[self.start_level:self.end_level], + [2**i for i in range(self.end_level - self.start_level)], norm_cfg, + use_dcn) + + def forward(self, x): + mlvl_features = [x[i] for i in range(len(x))] + mlvl_features = self.dla_up(mlvl_features) + outs = [] + for i in range(self.end_level - self.start_level): + outs.append(mlvl_features[i].clone()) + self.ida_up(outs, 0, len(outs)) + return [outs[-1]] + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.ConvTranspose2d): + # In order to be consistent with the source code, + # reset the ConvTranspose2d initialization parameters + m.reset_parameters() + # Simulated bilinear upsampling kernel + fill_up_weights(m) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + # In order to be consistent with the source code, + # reset the Conv2d initialization parameters + m.reset_parameters() diff --git a/mmdet3d/models/necks/imvoxel_neck.py b/mmdet3d/models/necks/imvoxel_neck.py new file mode 100755 index 0000000..94facbf --- /dev/null +++ b/mmdet3d/models/necks/imvoxel_neck.py @@ -0,0 +1,230 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch import nn + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class OutdoorImVoxelNeck(BaseModule): + """Neck for ImVoxelNet outdoor scenario. + + Args: + in_channels (int): Number of channels in an input tensor. + out_channels (int): Number of channels in all output tensors. + """ + + def __init__(self, in_channels, out_channels): + super(OutdoorImVoxelNeck, self).__init__() + self.model = nn.Sequential( + ResModule(in_channels, in_channels), + ConvModule( + in_channels=in_channels, + out_channels=in_channels * 2, + kernel_size=3, + stride=(1, 1, 2), + padding=1, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU', inplace=True)), + ResModule(in_channels * 2, in_channels * 2), + ConvModule( + in_channels=in_channels * 2, + out_channels=in_channels * 4, + kernel_size=3, + stride=(1, 1, 2), + padding=1, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU', inplace=True)), + ResModule(in_channels * 4, in_channels * 4), + ConvModule( + in_channels=in_channels * 4, + out_channels=out_channels, + kernel_size=3, + padding=(1, 1, 0), + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU', inplace=True))) + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): of shape (N, C_in, N_x, N_y, N_z). + + Returns: + list[torch.Tensor]: of shape (N, C_out, N_y, N_x). + """ + x = self.model.forward(x) + assert x.shape[-1] == 1 + # Anchor3DHead axis order is (y, x). + return [x[..., 0].transpose(-1, -2)] + + def init_weights(self): + """Initialize weights of neck.""" + pass + + +@MODELS.register_module() +class IndoorImVoxelNeck(BaseModule): + """Neck for ImVoxelNet outdoor scenario. + + Args: + in_channels (int): Number of channels in an input tensor. + out_channels (int): Number of channels in all output tensors. + n_blocks (list[int]): Number of blocks for each feature level. + """ + + def __init__(self, in_channels, out_channels, n_blocks): + super(IndoorImVoxelNeck, self).__init__() + self.n_scales = len(n_blocks) + n_channels = in_channels + for i in range(len(n_blocks)): + stride = 1 if i == 0 else 2 + self.__setattr__(f'down_layer_{i}', + self._make_layer(stride, n_channels, n_blocks[i])) + n_channels = n_channels * stride + if i > 0: + self.__setattr__( + f'up_block_{i}', + self._make_up_block(n_channels, n_channels // 2)) + self.__setattr__(f'out_block_{i}', + self._make_block(n_channels, out_channels)) + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): of shape (N, C_in, N_x, N_y, N_z). + + Returns: + list[torch.Tensor]: of shape (N, C_out, N_xi, N_yi, N_zi). + """ + down_outs = [] + for i in range(self.n_scales): + x = self.__getattr__(f'down_layer_{i}')(x) + down_outs.append(x) + outs = [] + for i in range(self.n_scales - 1, -1, -1): + if i < self.n_scales - 1: + x = self.__getattr__(f'up_block_{i + 1}')(x) + x = down_outs[i] + x + out = self.__getattr__(f'out_block_{i}')(x) + outs.append(out) + return outs[::-1] + + @staticmethod + def _make_layer(stride, n_channels, n_blocks): + """Make a layer from several residual blocks. + + Args: + stride (int): Stride of the first residual block. + n_channels (int): Number of channels of the first residual block. + n_blocks (int): Number of residual blocks. + + Returns: + torch.nn.Module: With several residual blocks. + """ + blocks = [] + for i in range(n_blocks): + if i == 0 and stride != 1: + blocks.append(ResModule(n_channels, n_channels * 2, stride)) + n_channels = n_channels * 2 + else: + blocks.append(ResModule(n_channels, n_channels)) + return nn.Sequential(*blocks) + + @staticmethod + def _make_block(in_channels, out_channels): + """Make a convolutional block. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + + Returns: + torch.nn.Module: Convolutional block. + """ + return nn.Sequential( + nn.Conv3d(in_channels, out_channels, 3, 1, 1, bias=False), + nn.BatchNorm3d(out_channels), nn.ReLU(inplace=True)) + + @staticmethod + def _make_up_block(in_channels, out_channels): + """Make upsampling convolutional block. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + + Returns: + torch.nn.Module: Upsampling convolutional block. + """ + + return nn.Sequential( + nn.ConvTranspose3d(in_channels, out_channels, 2, 2, bias=False), + nn.BatchNorm3d(out_channels), nn.ReLU(inplace=True), + nn.Conv3d(out_channels, out_channels, 3, 1, 1, bias=False), + nn.BatchNorm3d(out_channels), nn.ReLU(inplace=True)) + + +class ResModule(nn.Module): + """3d residual block for ImVoxelNeck. + + Args: + in_channels (int): Number of channels in input tensor. + out_channels (int): Number of channels in output tensor. + stride (int, optional): Stride of the block. Defaults to 1. + """ + + def __init__(self, in_channels, out_channels, stride=1): + super().__init__() + self.conv0 = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + padding=1, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU', inplace=True)) + self.conv1 = ConvModule( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + padding=1, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=None) + if stride != 1: + self.downsample = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=stride, + padding=0, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=None) + self.stride = stride + self.activation = nn.ReLU(inplace=True) + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): of shape (N, C, N_x, N_y, N_z). + + Returns: + torch.Tensor: 5d feature map. + """ + identity = x + x = self.conv0(x) + x = self.conv1(x) + if self.stride != 1: + identity = self.downsample(identity) + x = x + identity + x = self.activation(x) + return x diff --git a/mmdet3d/models/necks/pointnet2_fp_neck.py b/mmdet3d/models/necks/pointnet2_fp_neck.py new file mode 100755 index 0000000..535b0be --- /dev/null +++ b/mmdet3d/models/necks/pointnet2_fp_neck.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.model import BaseModule +from torch import nn as nn + +from mmdet3d.models.layers.pointnet_modules import PointFPModule +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class PointNetFPNeck(BaseModule): + r"""PointNet FP Module used in PointRCNN. + + Refer to the `official code `_. + + .. code-block:: none + + sa_n ---------------------------------------- + | + ... --------------------------------- | + | | + sa_1 ------------- | | + | | | + sa_0 -> fp_0 -> fp_module ->fp_1 -> ... -> fp_module -> fp_n + + sa_n including sa_xyz (torch.Tensor) and sa_features (torch.Tensor) + fp_n including fp_xyz (torch.Tensor) and fp_features (torch.Tensor) + + Args: + fp_channels (tuple[tuple[int]]): Tuple of mlp channels in FP modules. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, fp_channels, init_cfg=None): + super(PointNetFPNeck, self).__init__(init_cfg=init_cfg) + + self.num_fp = len(fp_channels) + self.FP_modules = nn.ModuleList() + for cur_fp_mlps in fp_channels: + self.FP_modules.append(PointFPModule(mlp_channels=cur_fp_mlps)) + + def _extract_input(self, feat_dict): + """Extract inputs from features dictionary. + + Args: + feat_dict (dict): Feature dict from backbone, which may contain + the following keys and values: + + - sa_xyz (list[torch.Tensor]): Points of each sa module + in shape (N, 3). + - sa_features (list[torch.Tensor]): Output features of + each sa module in shape (N, M). + + Returns: + list[torch.Tensor]: Coordinates of multiple levels of points. + list[torch.Tensor]: Features of multiple levels of points. + """ + sa_xyz = feat_dict['sa_xyz'] + sa_features = feat_dict['sa_features'] + assert len(sa_xyz) == len(sa_features) + + return sa_xyz, sa_features + + def forward(self, feat_dict): + """Forward pass. + + Args: + feat_dict (dict): Feature dict from backbone. + + Returns: + dict[str, torch.Tensor]: Outputs of the Neck. + + - fp_xyz (torch.Tensor): The coordinates of fp features. + - fp_features (torch.Tensor): The features from the last + feature propagation layers. + """ + sa_xyz, sa_features = self._extract_input(feat_dict) + + fp_feature = sa_features[-1] + fp_xyz = sa_xyz[-1] + + for i in range(self.num_fp): + # consume the points in a bottom-up manner + fp_feature = self.FP_modules[i](sa_xyz[-(i + 2)], sa_xyz[-(i + 1)], + sa_features[-(i + 2)], fp_feature) + fp_xyz = sa_xyz[-(i + 2)] + + ret = dict(fp_xyz=fp_xyz, fp_features=fp_feature) + return ret diff --git a/mmdet3d/models/necks/second_fpn.py b/mmdet3d/models/necks/second_fpn.py new file mode 100755 index 0000000..43b0342 --- /dev/null +++ b/mmdet3d/models/necks/second_fpn.py @@ -0,0 +1,90 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmcv.cnn import build_conv_layer, build_norm_layer, build_upsample_layer +from mmengine.model import BaseModule +from torch import nn as nn + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class SECONDFPN(BaseModule): + """FPN used in SECOND/PointPillars/PartA2/MVXNet. + + Args: + in_channels (list[int]): Input channels of multi-scale feature maps. + out_channels (list[int]): Output channels of feature maps. + upsample_strides (list[int]): Strides used to upsample the + feature maps. + norm_cfg (dict): Config dict of normalization layers. + upsample_cfg (dict): Config dict of upsample layers. + conv_cfg (dict): Config dict of conv layers. + use_conv_for_no_stride (bool): Whether to use conv when stride is 1. + """ + + def __init__(self, + in_channels=[128, 128, 256], + out_channels=[256, 256, 256], + upsample_strides=[1, 2, 4], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + conv_cfg=dict(type='Conv2d', bias=False), + use_conv_for_no_stride=False, + init_cfg=None): + # if for GroupNorm, + # cfg is dict(type='GN', num_groups=num_groups, eps=1e-3, affine=True) + super(SECONDFPN, self).__init__(init_cfg=init_cfg) + assert len(out_channels) == len(upsample_strides) == len(in_channels) + self.in_channels = in_channels + self.out_channels = out_channels + self.fp16_enabled = False + + deblocks = [] + for i, out_channel in enumerate(out_channels): + stride = upsample_strides[i] + if stride > 1 or (stride == 1 and not use_conv_for_no_stride): + upsample_layer = build_upsample_layer( + upsample_cfg, + in_channels=in_channels[i], + out_channels=out_channel, + kernel_size=upsample_strides[i], + stride=upsample_strides[i]) + else: + stride = np.round(1 / stride).astype(np.int64) + upsample_layer = build_conv_layer( + conv_cfg, + in_channels=in_channels[i], + out_channels=out_channel, + kernel_size=stride, + stride=stride) + + deblock = nn.Sequential(upsample_layer, + build_norm_layer(norm_cfg, out_channel)[1], + nn.ReLU(inplace=True)) + deblocks.append(deblock) + self.deblocks = nn.ModuleList(deblocks) + + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='ConvTranspose2d'), + dict(type='Constant', layer='NaiveSyncBatchNorm2d', val=1.0) + ] + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): 4D Tensor in (N, C, H, W) shape. + + Returns: + list[torch.Tensor]: Multi-level feature maps. + """ + assert len(x) == len(self.in_channels) + ups = [deblock(x[i]) for i, deblock in enumerate(self.deblocks)] + + if len(ups) > 1: + out = torch.cat(ups, dim=1) + else: + out = ups[0] + return [out] diff --git a/mmdet3d/models/roi_heads/__init__.py b/mmdet3d/models/roi_heads/__init__.py new file mode 100755 index 0000000..0e90b1a --- /dev/null +++ b/mmdet3d/models/roi_heads/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_3droi_head import Base3DRoIHead +from .bbox_heads import PartA2BboxHead +from .h3d_roi_head import H3DRoIHead +from .mask_heads import PointwiseSemanticHead, PrimitiveHead +from .part_aggregation_roi_head import PartAggregationROIHead +from .point_rcnn_roi_head import PointRCNNRoIHead +from .pv_rcnn_roi_head import PVRCNNRoiHead +from .roi_extractors import Single3DRoIAwareExtractor, SingleRoIExtractor + +__all__ = [ + 'Base3DRoIHead', 'PartAggregationROIHead', 'PointwiseSemanticHead', + 'Single3DRoIAwareExtractor', 'PartA2BboxHead', 'SingleRoIExtractor', + 'H3DRoIHead', 'PrimitiveHead', 'PointRCNNRoIHead', 'PVRCNNRoiHead' +] diff --git a/mmdet3d/models/roi_heads/base_3droi_head.py b/mmdet3d/models/roi_heads/base_3droi_head.py new file mode 100755 index 0000000..9a6fb6b --- /dev/null +++ b/mmdet3d/models/roi_heads/base_3droi_head.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.roi_heads import BaseRoIHead + +from mmdet3d.registry import MODELS, TASK_UTILS + + +class Base3DRoIHead(BaseRoIHead): + """Base class for 3d RoIHeads.""" + + def __init__(self, + bbox_head=None, + bbox_roi_extractor=None, + mask_head=None, + mask_roi_extractor=None, + train_cfg=None, + test_cfg=None, + init_cfg=None): + super(Base3DRoIHead, self).__init__( + bbox_head=bbox_head, + bbox_roi_extractor=bbox_roi_extractor, + mask_head=mask_head, + mask_roi_extractor=mask_roi_extractor, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + + def init_bbox_head(self, bbox_roi_extractor: dict, + bbox_head: dict) -> None: + """Initialize box head and box roi extractor. + + Args: + bbox_roi_extractor (dict or ConfigDict): Config of box + roi extractor. + bbox_head (dict or ConfigDict): Config of box in box head. + """ + self.bbox_roi_extractor = MODELS.build(bbox_roi_extractor) + self.bbox_head = MODELS.build(bbox_head) + + def init_assigner_sampler(self): + """Initialize assigner and sampler.""" + self.bbox_assigner = None + self.bbox_sampler = None + if self.train_cfg: + if isinstance(self.train_cfg.assigner, dict): + self.bbox_assigner = TASK_UTILS.build(self.train_cfg.assigner) + elif isinstance(self.train_cfg.assigner, list): + self.bbox_assigner = [ + TASK_UTILS.build(res) for res in self.train_cfg.assigner + ] + self.bbox_sampler = TASK_UTILS.build(self.train_cfg.sampler) + + def init_mask_head(self): + """Initialize mask head, skip since ``PartAggregationROIHead`` does not + have one.""" + pass diff --git a/mmdet3d/models/roi_heads/bbox_heads/__init__.py b/mmdet3d/models/roi_heads/bbox_heads/__init__.py new file mode 100755 index 0000000..994465e --- /dev/null +++ b/mmdet3d/models/roi_heads/bbox_heads/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.roi_heads.bbox_heads import (BBoxHead, ConvFCBBoxHead, + DoubleConvFCBBoxHead, + Shared2FCBBoxHead, + Shared4Conv1FCBBoxHead) + +from .h3d_bbox_head import H3DBboxHead +from .parta2_bbox_head import PartA2BboxHead +from .point_rcnn_bbox_head import PointRCNNBboxHead +from .pv_rcnn_bbox_head import PVRCNNBBoxHead + +__all__ = [ + 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', + 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'PartA2BboxHead', + 'H3DBboxHead', 'PointRCNNBboxHead', 'PVRCNNBBoxHead' +] diff --git a/mmdet3d/models/roi_heads/bbox_heads/h3d_bbox_head.py b/mmdet3d/models/roi_heads/bbox_heads/h3d_bbox_head.py new file mode 100755 index 0000000..8168a5e --- /dev/null +++ b/mmdet3d/models/roi_heads/bbox_heads/h3d_bbox_head.py @@ -0,0 +1,990 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple + +import torch +from mmcv.cnn import ConvModule +from mmdet.models.utils import multi_apply +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.models import aligned_3d_nms +from mmdet3d.models.layers.pointnet_modules import build_sa_module +from mmdet3d.models.losses import chamfer_distance +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures import (BaseInstance3DBoxes, DepthInstance3DBoxes, + Det3DDataSample) + + +@MODELS.register_module() +class H3DBboxHead(BaseModule): + r"""Bbox head of `H3DNet `_. + + Args: + num_classes (int): The number of classes. + surface_matching_cfg (dict): Config for surface primitive matching. + line_matching_cfg (dict): Config for line primitive matching. + bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for encoding and + decoding boxes. + train_cfg (dict): Config for training. Defaults to None. + test_cfg (dict): Config for testing. Defaults to None. + gt_per_seed (int): Number of ground truth votes generated + from each seed point. Defaults to 1. + num_proposal (int): Number of proposal votes generated. + Defaults to 256. + primitive_feat_refine_streams (int): The number of mlps to + refine primitive feature. Defaults to 2. + primitive_refine_channels (tuple[int]): Convolution channels of + prediction layer. Defaults to [128, 128, 128]. + upper_thresh (float): Threshold for line matching. Defaults to 100. + surface_thresh (float): Threshold for surface matching. + Defaults to 0.5. + line_thresh (float): Threshold for line matching. Defaults to 0.5. + conv_cfg (dict): Config of convolution in prediction layer. + Defaults to None. + norm_cfg (dict): Config of BN in prediction layer. Defaults to None. + objectness_loss (dict): Config of objectness loss. Defaults to None. + center_loss (dict): Config of center loss. Defaults to None. + dir_class_loss (dict): Config of direction classification loss. + Defaults to None. + dir_res_loss (dict): Config of direction residual regression loss. + Defaults to None. + size_class_loss (dict): Config of size classification loss. + Defaults to None. + size_res_loss (dict): Config of size residual regression loss. + Defaults to None. + semantic_loss (dict): Config of point-wise semantic segmentation loss. + Defaults to None. + cues_objectness_loss (dict): Config of cues objectness loss. + Defaults to None. + cues_semantic_loss (dict): Config of cues semantic loss. + Defaults to None. + proposal_objectness_loss (dict): Config of proposal objectness + loss. Defaults to None. + primitive_center_loss (dict): Config of primitive center regression + loss. Defaults to None. + """ + + def __init__(self, + num_classes: int, + suface_matching_cfg: dict, + line_matching_cfg: dict, + bbox_coder: dict, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + gt_per_seed: int = 1, + num_proposal: int = 256, + primitive_feat_refine_streams: int = 2, + primitive_refine_channels: List[int] = [128, 128, 128], + upper_thresh: float = 100.0, + surface_thresh: float = 0.5, + line_thresh: float = 0.5, + conv_cfg: dict = dict(type='Conv1d'), + norm_cfg: dict = dict(type='BN1d'), + objectness_loss: Optional[dict] = None, + center_loss: Optional[dict] = None, + dir_class_loss: Optional[dict] = None, + dir_res_loss: Optional[dict] = None, + size_class_loss: Optional[dict] = None, + size_res_loss: Optional[dict] = None, + semantic_loss: Optional[dict] = None, + cues_objectness_loss: Optional[dict] = None, + cues_semantic_loss: Optional[dict] = None, + proposal_objectness_loss: Optional[dict] = None, + primitive_center_loss: Optional[dict] = None, + init_cfg: dict = None): + super(H3DBboxHead, self).__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.gt_per_seed = gt_per_seed + self.num_proposal = num_proposal + self.with_angle = bbox_coder['with_rot'] + self.upper_thresh = upper_thresh + self.surface_thresh = surface_thresh + self.line_thresh = line_thresh + + self.loss_objectness = MODELS.build(objectness_loss) + self.loss_center = MODELS.build(center_loss) + self.loss_dir_class = MODELS.build(dir_class_loss) + self.loss_dir_res = MODELS.build(dir_res_loss) + self.loss_size_class = MODELS.build(size_class_loss) + self.loss_size_res = MODELS.build(size_res_loss) + self.loss_semantic = MODELS.build(semantic_loss) + + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.num_sizes = self.bbox_coder.num_sizes + self.num_dir_bins = self.bbox_coder.num_dir_bins + + self.loss_cues_objectness = MODELS.build(cues_objectness_loss) + self.loss_cues_semantic = MODELS.build(cues_semantic_loss) + self.loss_proposal_objectness = MODELS.build(proposal_objectness_loss) + self.loss_primitive_center = MODELS.build(primitive_center_loss) + + assert suface_matching_cfg['mlp_channels'][-1] == \ + line_matching_cfg['mlp_channels'][-1] + + # surface center matching + self.surface_center_matcher = build_sa_module(suface_matching_cfg) + # line center matching + self.line_center_matcher = build_sa_module(line_matching_cfg) + + # Compute the matching scores + matching_feat_dims = suface_matching_cfg['mlp_channels'][-1] + self.matching_conv = ConvModule( + matching_feat_dims, + matching_feat_dims, + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True, + inplace=True) + self.matching_pred = nn.Conv1d(matching_feat_dims, 2, 1) + + # Compute the semantic matching scores + self.semantic_matching_conv = ConvModule( + matching_feat_dims, + matching_feat_dims, + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True, + inplace=True) + self.semantic_matching_pred = nn.Conv1d(matching_feat_dims, 2, 1) + + # Surface feature aggregation + self.surface_feats_aggregation = list() + for k in range(primitive_feat_refine_streams): + self.surface_feats_aggregation.append( + ConvModule( + matching_feat_dims, + matching_feat_dims, + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True, + inplace=True)) + self.surface_feats_aggregation = nn.Sequential( + *self.surface_feats_aggregation) + + # Line feature aggregation + self.line_feats_aggregation = list() + for k in range(primitive_feat_refine_streams): + self.line_feats_aggregation.append( + ConvModule( + matching_feat_dims, + matching_feat_dims, + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True, + inplace=True)) + self.line_feats_aggregation = nn.Sequential( + *self.line_feats_aggregation) + + # surface center(6) + line center(12) + prev_channel = 18 * matching_feat_dims + self.bbox_pred = nn.ModuleList() + for k in range(len(primitive_refine_channels)): + self.bbox_pred.append( + ConvModule( + prev_channel, + primitive_refine_channels[k], + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True, + inplace=False)) + prev_channel = primitive_refine_channels[k] + + # Final object detection + # Objectness scores (2), center residual (3), + # heading class+residual (num_heading_bin*2), size class + + # residual(num_size_cluster*4) + conv_out_channel = (2 + 3 + bbox_coder['num_dir_bins'] * 2 + + bbox_coder['num_sizes'] * 4 + self.num_classes) + self.bbox_pred.append(nn.Conv1d(prev_channel, conv_out_channel, 1)) + + def forward(self, feats_dict: dict): + """Forward pass. + + Args: + feats_dict (dict): Feature dict from backbone. + + Returns: + dict: Predictions of head. + """ + ret_dict = {} + aggregated_points = feats_dict['aggregated_points'] + original_feature = feats_dict['aggregated_features'] + batch_size = original_feature.shape[0] + object_proposal = original_feature.shape[2] + + # Extract surface center, features and semantic predictions + z_center = feats_dict['pred_z_center'] + xy_center = feats_dict['pred_xy_center'] + z_semantic = feats_dict['sem_cls_scores_z'] + xy_semantic = feats_dict['sem_cls_scores_xy'] + z_feature = feats_dict['aggregated_features_z'] + xy_feature = feats_dict['aggregated_features_xy'] + # Extract line points and features + line_center = feats_dict['pred_line_center'] + line_feature = feats_dict['aggregated_features_line'] + + surface_center_pred = torch.cat((z_center, xy_center), dim=1) + ret_dict['surface_center_pred'] = surface_center_pred + ret_dict['surface_sem_pred'] = torch.cat((z_semantic, xy_semantic), + dim=1) + + # Extract the surface and line centers of rpn proposals + rpn_proposals = feats_dict['rpn_proposals'] + rpn_proposals_bbox = DepthInstance3DBoxes( + rpn_proposals.reshape(-1, 7).clone(), + box_dim=rpn_proposals.shape[-1], + with_yaw=self.with_angle, + origin=(0.5, 0.5, 0.5)) + + obj_surface_center, obj_line_center = \ + rpn_proposals_bbox.get_surface_line_center() + obj_surface_center = obj_surface_center.reshape( + batch_size, -1, 6, 3).transpose(1, 2).reshape(batch_size, -1, 3) + obj_line_center = obj_line_center.reshape(batch_size, -1, 12, + 3).transpose(1, 2).reshape( + batch_size, -1, 3) + ret_dict['surface_center_object'] = obj_surface_center + ret_dict['line_center_object'] = obj_line_center + + # aggregate primitive z and xy features to rpn proposals + surface_center_feature_pred = torch.cat((z_feature, xy_feature), dim=2) + surface_center_feature_pred = torch.cat( + (surface_center_feature_pred.new_zeros( + (batch_size, 6, surface_center_feature_pred.shape[2])), + surface_center_feature_pred), + dim=1) + + surface_xyz, surface_features, _ = self.surface_center_matcher( + surface_center_pred, + surface_center_feature_pred, + target_xyz=obj_surface_center) + + # aggregate primitive line features to rpn proposals + line_feature = torch.cat((line_feature.new_zeros( + (batch_size, 12, line_feature.shape[2])), line_feature), + dim=1) + line_xyz, line_features, _ = self.line_center_matcher( + line_center, line_feature, target_xyz=obj_line_center) + + # combine the surface and line features + combine_features = torch.cat((surface_features, line_features), dim=2) + + matching_features = self.matching_conv(combine_features) + matching_score = self.matching_pred(matching_features) + ret_dict['matching_score'] = matching_score.transpose(2, 1) + + semantic_matching_features = self.semantic_matching_conv( + combine_features) + semantic_matching_score = self.semantic_matching_pred( + semantic_matching_features) + ret_dict['semantic_matching_score'] = \ + semantic_matching_score.transpose(2, 1) + + surface_features = self.surface_feats_aggregation(surface_features) + line_features = self.line_feats_aggregation(line_features) + + # Combine all surface and line features + surface_features = surface_features.view(batch_size, -1, + object_proposal) + line_features = line_features.view(batch_size, -1, object_proposal) + + combine_feature = torch.cat((surface_features, line_features), dim=1) + + # Final bbox predictions + bbox_predictions = self.bbox_pred[0](combine_feature) + bbox_predictions += original_feature + for conv_module in self.bbox_pred[1:]: + bbox_predictions = conv_module(bbox_predictions) + + refine_decode_res = self.bbox_coder.split_pred( + bbox_predictions[:, :self.num_classes + 2], + bbox_predictions[:, self.num_classes + 2:], aggregated_points) + for key in refine_decode_res.keys(): + ret_dict[key + '_optimized'] = refine_decode_res[key] + return ret_dict + + def loss( + self, + points: List[Tensor], + feats_dict: dict, + rpn_targets: Tuple = None, + batch_data_samples: List[Det3DDataSample] = None, + ): + """ + Args: + points (list[tensor]): Points cloud of multiple samples. + feats_dict (dict): Predictions from backbone or FPN. + rpn_targets (Tuple, Optional): The target of sample from RPN. + Defaults to None. + batch_data_samples (list[:obj:`Det3DDataSample`], Optional): + Each item contains the meta information of each sample + and corresponding annotations. Defaults to None. + + Returns: + dict: A dictionary of loss components. + """ + preds = self(feats_dict) + feats_dict.update(preds) + + (vote_targets, vote_target_masks, size_class_targets, size_res_targets, + dir_class_targets, dir_res_targets, center_targets, _, mask_targets, + valid_gt_masks, objectness_targets, objectness_weights, + box_loss_weights, valid_gt_weights) = rpn_targets + + losses = {} + + # calculate refined proposal loss + refined_proposal_loss = self.get_proposal_stage_loss( + feats_dict, + size_class_targets, + size_res_targets, + dir_class_targets, + dir_res_targets, + center_targets, + mask_targets, + objectness_targets, + objectness_weights, + box_loss_weights, + valid_gt_weights, + suffix='_optimized') + for key in refined_proposal_loss.keys(): + losses[key + '_optimized'] = refined_proposal_loss[key] + + batch_gt_instance_3d = [] + batch_input_metas = [] + + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instance_3d.append(data_sample.gt_instances_3d) + + temp_loss = self.loss_by_feat(points, feats_dict, batch_gt_instance_3d) + losses.update(temp_loss) + return losses + + def loss_by_feat(self, points: List[torch.Tensor], feats_dict: dict, + batch_gt_instances_3d: List[InstanceData], + **kwargs) -> dict: + """Compute loss. + + Args: + points (list[torch.Tensor]): Input points. + feats_dict (dict): Predictions from forward of vote head. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes`` and ``labels`` + attributes. + + Returns: + dict: Losses of H3DNet. + """ + bbox3d_optimized = self.bbox_coder.decode( + feats_dict, suffix='_optimized') + + targets = self.get_targets(points, feats_dict, batch_gt_instances_3d) + + (cues_objectness_label, cues_sem_label, proposal_objectness_label, + cues_mask, cues_match_mask, proposal_objectness_mask, + cues_matching_label, obj_surface_line_center) = targets + + # match scores for each geometric primitive + objectness_scores = feats_dict['matching_score'] + # match scores for the semantics of primitives + objectness_scores_sem = feats_dict['semantic_matching_score'] + + primitive_objectness_loss = self.loss_cues_objectness( + objectness_scores.transpose(2, 1), + cues_objectness_label, + weight=cues_mask, + avg_factor=cues_mask.sum() + 1e-6) + + primitive_sem_loss = self.loss_cues_semantic( + objectness_scores_sem.transpose(2, 1), + cues_sem_label, + weight=cues_mask, + avg_factor=cues_mask.sum() + 1e-6) + + objectness_scores = feats_dict['obj_scores_optimized'] + objectness_loss_refine = self.loss_proposal_objectness( + objectness_scores.transpose(2, 1), proposal_objectness_label) + primitive_matching_loss = (objectness_loss_refine * + cues_match_mask).sum() / ( + cues_match_mask.sum() + 1e-6) * 0.5 + primitive_sem_matching_loss = ( + objectness_loss_refine * proposal_objectness_mask).sum() / ( + proposal_objectness_mask.sum() + 1e-6) * 0.5 + + # Get the object surface center here + batch_size, object_proposal = bbox3d_optimized.shape[:2] + refined_bbox = DepthInstance3DBoxes( + bbox3d_optimized.reshape(-1, 7).clone(), + box_dim=bbox3d_optimized.shape[-1], + with_yaw=self.with_angle, + origin=(0.5, 0.5, 0.5)) + + pred_obj_surface_center, pred_obj_line_center = \ + refined_bbox.get_surface_line_center() + pred_obj_surface_center = pred_obj_surface_center.reshape( + batch_size, -1, 6, 3).transpose(1, 2).reshape(batch_size, -1, 3) + pred_obj_line_center = pred_obj_line_center.reshape( + batch_size, -1, 12, 3).transpose(1, 2).reshape(batch_size, -1, 3) + pred_surface_line_center = torch.cat( + (pred_obj_surface_center, pred_obj_line_center), 1) + + square_dist = self.loss_primitive_center(pred_surface_line_center, + obj_surface_line_center) + + match_dist = torch.sqrt(square_dist.sum(dim=-1) + 1e-6) + primitive_centroid_reg_loss = torch.sum( + match_dist * cues_matching_label) / ( + cues_matching_label.sum() + 1e-6) + + refined_loss = dict( + primitive_objectness_loss=primitive_objectness_loss, + primitive_sem_loss=primitive_sem_loss, + primitive_matching_loss=primitive_matching_loss, + primitive_sem_matching_loss=primitive_sem_matching_loss, + primitive_centroid_reg_loss=primitive_centroid_reg_loss) + + return refined_loss + + def predict(self, + points: List[torch.Tensor], + feats_dict: Dict[str, torch.Tensor], + batch_data_samples: List[Det3DDataSample], + suffix='_optimized', + **kwargs) -> List[InstanceData]: + """ + Args: + points (list[tensor]): Point clouds of multiple samples. + feats_dict (dict): Features from FPN or backbone.. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes meta information of data. + suffix (str): suffix for tensor in feats_dict. + Defaults to '_optimized'. + + Returns: + list[:obj:`InstanceData`]: List of processed predictions. Each + InstanceData contains 3d Bounding boxes and corresponding + scores and labels. + """ + preds_dict = self(feats_dict) + # `preds_dict` can be used in H3DNET + feats_dict.update(preds_dict) + + batch_size = len(batch_data_samples) + batch_input_metas = [] + for batch_index in range(batch_size): + metainfo = batch_data_samples[batch_index].metainfo + batch_input_metas.append(metainfo) + + results_list = self.predict_by_feat( + points, feats_dict, batch_input_metas, suffix=suffix, **kwargs) + return results_list + + def predict_by_feat(self, + points: List[torch.Tensor], + feats_dict: dict, + batch_input_metas: List[dict], + suffix='_optimized', + **kwargs) -> List[InstanceData]: + """Generate bboxes from vote head predictions. + + Args: + points (List[torch.Tensor]): Input points of multiple samples. + feats_dict (dict): Predictions from previous components. + batch_input_metas (list[dict]): Each item + contains the meta information of each sample. + suffix (str): suffix for tensor in feats_dict. + Defaults to '_optimized'. + + Returns: + list[:obj:`InstanceData`]: Return list of processed + predictions. Each InstanceData cantains + 3d Bounding boxes and corresponding scores and labels. + """ + + # decode boxes + obj_scores = F.softmax( + feats_dict['obj_scores' + suffix], dim=-1)[..., -1] + + sem_scores = F.softmax(feats_dict['sem_scores'], dim=-1) + + prediction_collection = {} + prediction_collection['center'] = feats_dict['center' + suffix] + prediction_collection['dir_class'] = feats_dict['dir_class'] + prediction_collection['dir_res'] = feats_dict['dir_res' + suffix] + prediction_collection['size_class'] = feats_dict['size_class'] + prediction_collection['size_res'] = feats_dict['size_res' + suffix] + + bbox3d = self.bbox_coder.decode(prediction_collection) + + batch_size = bbox3d.shape[0] + results_list = list() + points = torch.stack(points) + for b in range(batch_size): + temp_results = InstanceData() + bbox_selected, score_selected, labels = self.multiclass_nms_single( + obj_scores[b], sem_scores[b], bbox3d[b], points[b, ..., :3], + batch_input_metas[b]) + bbox = batch_input_metas[b]['box_type_3d']( + bbox_selected, + box_dim=bbox_selected.shape[-1], + with_yaw=self.bbox_coder.with_rot) + + temp_results.bboxes_3d = bbox + temp_results.scores_3d = score_selected + temp_results.labels_3d = labels + results_list.append(temp_results) + + return results_list + + def multiclass_nms_single(self, obj_scores: Tensor, sem_scores: Tensor, + bbox: Tensor, points: Tensor, + input_meta: dict) -> Tuple: + """Multi-class nms in single batch. + + Args: + obj_scores (torch.Tensor): Objectness score of bounding boxes. + sem_scores (torch.Tensor): semantic class score of bounding boxes. + bbox (torch.Tensor): Predicted bounding boxes. + points (torch.Tensor): Input points. + input_meta (dict): Point cloud and image's meta info. + + Returns: + tuple[torch.Tensor]: Bounding boxes, scores and labels. + """ + bbox = input_meta['box_type_3d']( + bbox, + box_dim=bbox.shape[-1], + with_yaw=self.bbox_coder.with_rot, + origin=(0.5, 0.5, 0.5)) + box_indices = bbox.points_in_boxes_all(points) + + corner3d = bbox.corners + minmax_box3d = corner3d.new(torch.Size((corner3d.shape[0], 6))) + minmax_box3d[:, :3] = torch.min(corner3d, dim=1)[0] + minmax_box3d[:, 3:] = torch.max(corner3d, dim=1)[0] + + nonempty_box_mask = box_indices.T.sum(1) > 5 + + bbox_classes = torch.argmax(sem_scores, -1) + nms_selected = aligned_3d_nms(minmax_box3d[nonempty_box_mask], + obj_scores[nonempty_box_mask], + bbox_classes[nonempty_box_mask], + self.test_cfg.nms_thr) + + # filter empty boxes and boxes with low score + scores_mask = (obj_scores > self.test_cfg.score_thr) + nonempty_box_inds = torch.nonzero( + nonempty_box_mask, as_tuple=False).flatten() + nonempty_mask = torch.zeros_like(bbox_classes).scatter( + 0, nonempty_box_inds[nms_selected], 1) + selected = (nonempty_mask.bool() & scores_mask.bool()) + + if self.test_cfg.per_class_proposal: + bbox_selected, score_selected, labels = [], [], [] + for k in range(sem_scores.shape[-1]): + bbox_selected.append(bbox[selected].tensor) + score_selected.append(obj_scores[selected] * + sem_scores[selected][:, k]) + labels.append( + torch.zeros_like(bbox_classes[selected]).fill_(k)) + bbox_selected = torch.cat(bbox_selected, 0) + score_selected = torch.cat(score_selected, 0) + labels = torch.cat(labels, 0) + else: + bbox_selected = bbox[selected].tensor + score_selected = obj_scores[selected] + labels = bbox_classes[selected] + + return bbox_selected, score_selected, labels + + def get_proposal_stage_loss(self, + bbox_preds, + size_class_targets, + size_res_targets, + dir_class_targets, + dir_res_targets, + center_targets, + mask_targets, + objectness_targets, + objectness_weights, + box_loss_weights, + valid_gt_weights, + suffix=''): + """Compute loss for the aggregation module. + + Args: + bbox_preds (dict): Predictions from forward of vote head. + size_class_targets (torch.Tensor): Ground truth + size class of each prediction bounding box. + size_res_targets (torch.Tensor): Ground truth + size residual of each prediction bounding box. + dir_class_targets (torch.Tensor): Ground truth + direction class of each prediction bounding box. + dir_res_targets (torch.Tensor): Ground truth + direction residual of each prediction bounding box. + center_targets (torch.Tensor): Ground truth center + of each prediction bounding box. + mask_targets (torch.Tensor): Validation of each + prediction bounding box. + objectness_targets (torch.Tensor): Ground truth + objectness label of each prediction bounding box. + objectness_weights (torch.Tensor): Weights of objectness + loss for each prediction bounding box. + box_loss_weights (torch.Tensor): Weights of regression + loss for each prediction bounding box. + valid_gt_weights (torch.Tensor): Validation of each + ground truth bounding box. + + Returns: + dict: Losses of aggregation module. + """ + # calculate objectness loss + objectness_loss = self.loss_objectness( + bbox_preds['obj_scores' + suffix].transpose(2, 1), + objectness_targets, + weight=objectness_weights) + + # calculate center loss + source2target_loss, target2source_loss = self.loss_center( + bbox_preds['center' + suffix], + center_targets, + src_weight=box_loss_weights, + dst_weight=valid_gt_weights) + center_loss = source2target_loss + target2source_loss + + # calculate direction class loss + dir_class_loss = self.loss_dir_class( + bbox_preds['dir_class' + suffix].transpose(2, 1), + dir_class_targets, + weight=box_loss_weights) + + # calculate direction residual loss + batch_size, proposal_num = size_class_targets.shape[:2] + heading_label_one_hot = dir_class_targets.new_zeros( + (batch_size, proposal_num, self.num_dir_bins)) + heading_label_one_hot.scatter_(2, dir_class_targets.unsqueeze(-1), 1) + dir_res_norm = (bbox_preds['dir_res_norm' + suffix] * + heading_label_one_hot).sum(dim=-1) + dir_res_loss = self.loss_dir_res( + dir_res_norm, dir_res_targets, weight=box_loss_weights) + + # calculate size class loss + size_class_loss = self.loss_size_class( + bbox_preds['size_class' + suffix].transpose(2, 1), + size_class_targets, + weight=box_loss_weights) + + # calculate size residual loss + one_hot_size_targets = box_loss_weights.new_zeros( + (batch_size, proposal_num, self.num_sizes)) + one_hot_size_targets.scatter_(2, size_class_targets.unsqueeze(-1), 1) + one_hot_size_targets_expand = one_hot_size_targets.unsqueeze( + -1).repeat(1, 1, 1, 3) + size_residual_norm = (bbox_preds['size_res_norm' + suffix] * + one_hot_size_targets_expand).sum(dim=2) + box_loss_weights_expand = box_loss_weights.unsqueeze(-1).repeat( + 1, 1, 3) + size_res_loss = self.loss_size_res( + size_residual_norm, + size_res_targets, + weight=box_loss_weights_expand) + + # calculate semantic loss + semantic_loss = self.loss_semantic( + bbox_preds['sem_scores' + suffix].transpose(2, 1), + mask_targets, + weight=box_loss_weights) + + losses = dict( + objectness_loss=objectness_loss, + semantic_loss=semantic_loss, + center_loss=center_loss, + dir_class_loss=dir_class_loss, + dir_res_loss=dir_res_loss, + size_class_loss=size_class_loss, + size_res_loss=size_res_loss) + + return losses + + def get_targets( + self, + points, + feats_dict: Optional[dict] = None, + batch_gt_instances_3d: Optional[List[InstanceData]] = None, + ): + """Generate targets of vote head. + + Args: + points (list[torch.Tensor]): Points of each batch. + feats_dict (dict, optional): Predictions of previous + components. Defaults to None. + batch_gt_instances_3d (list[:obj:`InstanceData`], optional): + Batch of gt_instances. It usually includes + ``bboxes_3d`` and ``labels_3d`` attributes. + + Returns: + tuple[torch.Tensor]: Targets of vote head. + """ + # find empty example + valid_gt_masks = list() + gt_num = list() + batch_gt_labels_3d = [ + gt_instances_3d.labels_3d + for gt_instances_3d in batch_gt_instances_3d + ] + batch_gt_bboxes_3d = [ + gt_instances_3d.bboxes_3d + for gt_instances_3d in batch_gt_instances_3d + ] + for index in range(len(batch_gt_labels_3d)): + if len(batch_gt_labels_3d[index]) == 0: + fake_box = batch_gt_bboxes_3d[index].tensor.new_zeros( + 1, batch_gt_bboxes_3d[index].tensor.shape[-1]) + batch_gt_bboxes_3d[index] = batch_gt_bboxes_3d[index].new_box( + fake_box) + batch_gt_labels_3d[index] = batch_gt_labels_3d[ + index].new_zeros(1) + valid_gt_masks.append(batch_gt_labels_3d[index].new_zeros(1)) + gt_num.append(1) + else: + valid_gt_masks.append(batch_gt_labels_3d[index].new_ones( + batch_gt_labels_3d[index].shape)) + gt_num.append(batch_gt_labels_3d[index].shape[0]) + + aggregated_points = [ + feats_dict['aggregated_points'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + surface_center_pred = [ + feats_dict['surface_center_pred'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + line_center_pred = [ + feats_dict['pred_line_center'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + surface_center_object = [ + feats_dict['surface_center_object'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + line_center_object = [ + feats_dict['line_center_object'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + surface_sem_pred = [ + feats_dict['surface_sem_pred'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + line_sem_pred = [ + feats_dict['sem_cls_scores_line'][i] + for i in range(len(batch_gt_labels_3d)) + ] + + (cues_objectness_label, cues_sem_label, proposal_objectness_label, + cues_mask, cues_match_mask, proposal_objectness_mask, + cues_matching_label, obj_surface_line_center) = multi_apply( + self._get_targets_single, points, batch_gt_bboxes_3d, + batch_gt_labels_3d, aggregated_points, surface_center_pred, + line_center_pred, surface_center_object, line_center_object, + surface_sem_pred, line_sem_pred) + + cues_objectness_label = torch.stack(cues_objectness_label) + cues_sem_label = torch.stack(cues_sem_label) + proposal_objectness_label = torch.stack(proposal_objectness_label) + cues_mask = torch.stack(cues_mask) + cues_match_mask = torch.stack(cues_match_mask) + proposal_objectness_mask = torch.stack(proposal_objectness_mask) + cues_matching_label = torch.stack(cues_matching_label) + obj_surface_line_center = torch.stack(obj_surface_line_center) + + return (cues_objectness_label, cues_sem_label, + proposal_objectness_label, cues_mask, cues_match_mask, + proposal_objectness_mask, cues_matching_label, + obj_surface_line_center) + + def _get_targets_single(self, + points: Tensor, + gt_bboxes_3d: BaseInstance3DBoxes, + gt_labels_3d: Tensor, + aggregated_points: Optional[Tensor] = None, + pred_surface_center: Optional[Tensor] = None, + pred_line_center: Optional[Tensor] = None, + pred_obj_surface_center: Optional[Tensor] = None, + pred_obj_line_center: Optional[Tensor] = None, + pred_surface_sem: Optional[Tensor] = None, + pred_line_sem: Optional[Tensor] = None): + """Generate targets for primitive cues for single batch. + + Args: + points (torch.Tensor): Points of each batch. + gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): Ground truth + boxes of each batch. + gt_labels_3d (torch.Tensor): Labels of each batch. + aggregated_points (torch.Tensor): Aggregated points from + vote aggregation layer. + pred_surface_center (torch.Tensor): Prediction of surface center. + pred_line_center (torch.Tensor): Prediction of line center. + pred_obj_surface_center (torch.Tensor): Objectness prediction + of surface center. + pred_obj_line_center (torch.Tensor): Objectness prediction of + line center. + pred_surface_sem (torch.Tensor): Semantic prediction of + surface center. + pred_line_sem (torch.Tensor): Semantic prediction of line center. + Returns: + tuple[torch.Tensor]: Targets for primitive cues. + """ + device = points.device + gt_bboxes_3d = gt_bboxes_3d.to(device) + num_proposals = aggregated_points.shape[0] + gt_center = gt_bboxes_3d.gravity_center + + dist1, dist2, ind1, _ = chamfer_distance( + aggregated_points.unsqueeze(0), + gt_center.unsqueeze(0), + reduction='none') + # Set assignment + object_assignment = ind1.squeeze(0) + + # Generate objectness label and mask + # objectness_label: 1 if pred object center is within + # self.train_cfg['near_threshold'] of any GT object + # objectness_mask: 0 if pred object center is in gray + # zone (DONOTCARE), 1 otherwise + euclidean_dist1 = torch.sqrt(dist1.squeeze(0) + 1e-6) + proposal_objectness_label = euclidean_dist1.new_zeros( + num_proposals, dtype=torch.long) + proposal_objectness_mask = euclidean_dist1.new_zeros(num_proposals) + + gt_sem = gt_labels_3d[object_assignment] + + obj_surface_center, obj_line_center = \ + gt_bboxes_3d.get_surface_line_center() + obj_surface_center = obj_surface_center.reshape(-1, 6, + 3).transpose(0, 1) + obj_line_center = obj_line_center.reshape(-1, 12, 3).transpose(0, 1) + obj_surface_center = obj_surface_center[:, object_assignment].reshape( + 1, -1, 3) + obj_line_center = obj_line_center[:, + object_assignment].reshape(1, -1, 3) + + surface_sem = torch.argmax(pred_surface_sem, dim=1).float() + line_sem = torch.argmax(pred_line_sem, dim=1).float() + + dist_surface, _, surface_ind, _ = chamfer_distance( + obj_surface_center, + pred_surface_center.unsqueeze(0), + reduction='none') + dist_line, _, line_ind, _ = chamfer_distance( + obj_line_center, pred_line_center.unsqueeze(0), reduction='none') + + surface_sel = pred_surface_center[surface_ind.squeeze(0)] + line_sel = pred_line_center[line_ind.squeeze(0)] + surface_sel_sem = surface_sem[surface_ind.squeeze(0)] + line_sel_sem = line_sem[line_ind.squeeze(0)] + + surface_sel_sem_gt = gt_sem.repeat(6).float() + line_sel_sem_gt = gt_sem.repeat(12).float() + + euclidean_dist_surface = torch.sqrt(dist_surface.squeeze(0) + 1e-6) + euclidean_dist_line = torch.sqrt(dist_line.squeeze(0) + 1e-6) + objectness_label_surface = euclidean_dist_line.new_zeros( + num_proposals * 6, dtype=torch.long) + + objectness_label_line = euclidean_dist_line.new_zeros( + num_proposals * 12, dtype=torch.long) + + objectness_label_surface_sem = euclidean_dist_line.new_zeros( + num_proposals * 6, dtype=torch.long) + objectness_label_line_sem = euclidean_dist_line.new_zeros( + num_proposals * 12, dtype=torch.long) + + euclidean_dist_obj_surface = torch.sqrt(( + (pred_obj_surface_center - surface_sel)**2).sum(dim=-1) + 1e-6) + euclidean_dist_obj_line = torch.sqrt( + torch.sum((pred_obj_line_center - line_sel)**2, dim=-1) + 1e-6) + + # Objectness score just with centers + proposal_objectness_label[ + euclidean_dist1 < self.train_cfg['near_threshold']] = 1 + proposal_objectness_mask[ + euclidean_dist1 < self.train_cfg['near_threshold']] = 1 + proposal_objectness_mask[ + euclidean_dist1 > self.train_cfg['far_threshold']] = 1 + + objectness_label_surface[ + (euclidean_dist_obj_surface < + self.train_cfg['label_surface_threshold']) * + (euclidean_dist_surface < + self.train_cfg['mask_surface_threshold'])] = 1 + objectness_label_surface_sem[ + (euclidean_dist_obj_surface < + self.train_cfg['label_surface_threshold']) * + (euclidean_dist_surface < self.train_cfg['mask_surface_threshold']) + * (surface_sel_sem == surface_sel_sem_gt)] = 1 + + objectness_label_line[ + (euclidean_dist_obj_line < self.train_cfg['label_line_threshold']) + * + (euclidean_dist_line < self.train_cfg['mask_line_threshold'])] = 1 + objectness_label_line_sem[ + (euclidean_dist_obj_line < self.train_cfg['label_line_threshold']) + * (euclidean_dist_line < self.train_cfg['mask_line_threshold']) * + (line_sel_sem == line_sel_sem_gt)] = 1 + + objectness_label_surface_obj = proposal_objectness_label.repeat(6) + objectness_mask_surface_obj = proposal_objectness_mask.repeat(6) + objectness_label_line_obj = proposal_objectness_label.repeat(12) + objectness_mask_line_obj = proposal_objectness_mask.repeat(12) + + objectness_mask_surface = objectness_mask_surface_obj + objectness_mask_line = objectness_mask_line_obj + + cues_objectness_label = torch.cat( + (objectness_label_surface, objectness_label_line), 0) + cues_sem_label = torch.cat( + (objectness_label_surface_sem, objectness_label_line_sem), 0) + cues_mask = torch.cat((objectness_mask_surface, objectness_mask_line), + 0) + + objectness_label_surface *= objectness_label_surface_obj + objectness_label_line *= objectness_label_line_obj + cues_matching_label = torch.cat( + (objectness_label_surface, objectness_label_line), 0) + + objectness_label_surface_sem *= objectness_label_surface_obj + objectness_label_line_sem *= objectness_label_line_obj + + cues_match_mask = (torch.sum( + cues_objectness_label.view(18, num_proposals), dim=0) >= + 1).float() + + obj_surface_line_center = torch.cat( + (obj_surface_center, obj_line_center), 1).squeeze(0) + + return (cues_objectness_label, cues_sem_label, + proposal_objectness_label, cues_mask, cues_match_mask, + proposal_objectness_mask, cues_matching_label, + obj_surface_line_center) diff --git a/mmdet3d/models/roi_heads/bbox_heads/parta2_bbox_head.py b/mmdet3d/models/roi_heads/bbox_heads/parta2_bbox_head.py new file mode 100755 index 0000000..2a21e40 --- /dev/null +++ b/mmdet3d/models/roi_heads/bbox_heads/parta2_bbox_head.py @@ -0,0 +1,658 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Tuple + +import numpy as np +import torch +from mmcv.cnn import ConvModule +from mmdet.models.utils import multi_apply +from mmengine.model import normal_init +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.models import make_sparse_convmodule +from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE +from mmdet3d.utils.typing_utils import InstanceList + +if IS_SPCONV2_AVAILABLE: + from spconv.pytorch import (SparseConvTensor, SparseMaxPool3d, + SparseSequential) +else: + from mmcv.ops import SparseConvTensor, SparseMaxPool3d, SparseSequential + +from mmengine.model import BaseModule +from torch import nn as nn + +from mmdet3d.models.layers import nms_bev, nms_normal_bev +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures.bbox_3d import (LiDARInstance3DBoxes, + rotation_3d_in_axis, xywhr2xyxyr) +from mmdet3d.utils.typing_utils import SamplingResultList + + +@MODELS.register_module() +class PartA2BboxHead(BaseModule): + """PartA2 RoI head. + + Args: + num_classes (int): The number of classes to prediction. + seg_in_channels (int): Input channels of segmentation + convolution layer. + part_in_channels (int): Input channels of part convolution layer. + seg_conv_channels (list(int)): Out channels of each + segmentation convolution layer. + part_conv_channels (list(int)): Out channels of each + part convolution layer. + merge_conv_channels (list(int)): Out channels of each + feature merged convolution layer. + down_conv_channels (list(int)): Out channels of each + downsampled convolution layer. + shared_fc_channels (list(int)): Out channels of each shared fc layer. + cls_channels (list(int)): Out channels of each classification layer. + reg_channels (list(int)): Out channels of each regression layer. + dropout_ratio (float): Dropout ratio of classification and + regression layers. + roi_feat_size (int): The size of pooled roi features. + with_corner_loss (bool): Whether to use corner loss or not. + bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for box head. + conv_cfg (dict): Config dict of convolutional layers + norm_cfg (dict): Config dict of normalization layers + loss_bbox (dict): Config dict of box regression loss. + loss_cls (dict, optional): Config dict of classifacation loss. + """ + + def __init__(self, + num_classes: int, + seg_in_channels: int, + part_in_channels: int, + seg_conv_channels: List[int] = None, + part_conv_channels: List[int] = None, + merge_conv_channels: List[int] = None, + down_conv_channels: List[int] = None, + shared_fc_channels: List[int] = None, + cls_channels: List[int] = None, + reg_channels: List[int] = None, + dropout_ratio: float = 0.1, + roi_feat_size: int = 14, + with_corner_loss: bool = True, + bbox_coder: dict = dict(type='DeltaXYZWLHRBBoxCoder'), + conv_cfg: dict = dict(type='Conv1d'), + norm_cfg: dict = dict(type='BN1d', eps=1e-3, momentum=0.01), + loss_bbox: dict = dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_cls: dict = dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='none', + loss_weight=1.0), + init_cfg: dict = None) -> None: + super(PartA2BboxHead, self).__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.with_corner_loss = with_corner_loss + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.loss_bbox = MODELS.build(loss_bbox) + self.loss_cls = MODELS.build(loss_cls) + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + + assert down_conv_channels[-1] == shared_fc_channels[0] + + # init layers + part_channel_last = part_in_channels + part_conv = [] + for i, channel in enumerate(part_conv_channels): + part_conv.append( + make_sparse_convmodule( + part_channel_last, + channel, + 3, + padding=1, + norm_cfg=norm_cfg, + indice_key=f'rcnn_part{i}', + conv_type='SubMConv3d')) + part_channel_last = channel + self.part_conv = SparseSequential(*part_conv) + + seg_channel_last = seg_in_channels + seg_conv = [] + for i, channel in enumerate(seg_conv_channels): + seg_conv.append( + make_sparse_convmodule( + seg_channel_last, + channel, + 3, + padding=1, + norm_cfg=norm_cfg, + indice_key=f'rcnn_seg{i}', + conv_type='SubMConv3d')) + seg_channel_last = channel + self.seg_conv = SparseSequential(*seg_conv) + + self.conv_down = SparseSequential() + + merge_conv_channel_last = part_channel_last + seg_channel_last + merge_conv = [] + for i, channel in enumerate(merge_conv_channels): + merge_conv.append( + make_sparse_convmodule( + merge_conv_channel_last, + channel, + 3, + padding=1, + norm_cfg=norm_cfg, + indice_key='rcnn_down0')) + merge_conv_channel_last = channel + + down_conv_channel_last = merge_conv_channel_last + conv_down = [] + for i, channel in enumerate(down_conv_channels): + conv_down.append( + make_sparse_convmodule( + down_conv_channel_last, + channel, + 3, + padding=1, + norm_cfg=norm_cfg, + indice_key='rcnn_down1')) + down_conv_channel_last = channel + + self.conv_down.add_module('merge_conv', SparseSequential(*merge_conv)) + self.conv_down.add_module('max_pool3d', + SparseMaxPool3d(kernel_size=2, stride=2)) + self.conv_down.add_module('down_conv', SparseSequential(*conv_down)) + + shared_fc_list = [] + pool_size = roi_feat_size // 2 + pre_channel = shared_fc_channels[0] * pool_size**3 + for k in range(1, len(shared_fc_channels)): + shared_fc_list.append( + ConvModule( + pre_channel, + shared_fc_channels[k], + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + inplace=True)) + pre_channel = shared_fc_channels[k] + + if k != len(shared_fc_channels) - 1 and dropout_ratio > 0: + shared_fc_list.append(nn.Dropout(dropout_ratio)) + + self.shared_fc = nn.Sequential(*shared_fc_list) + + # Classification layer + channel_in = shared_fc_channels[-1] + cls_channel = 1 + cls_layers = [] + pre_channel = channel_in + for k in range(0, len(cls_channels)): + cls_layers.append( + ConvModule( + pre_channel, + cls_channels[k], + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + inplace=True)) + pre_channel = cls_channels[k] + cls_layers.append( + ConvModule( + pre_channel, + cls_channel, + 1, + padding=0, + conv_cfg=conv_cfg, + act_cfg=None)) + if dropout_ratio >= 0: + cls_layers.insert(1, nn.Dropout(dropout_ratio)) + + self.conv_cls = nn.Sequential(*cls_layers) + + # Regression layer + reg_layers = [] + pre_channel = channel_in + for k in range(0, len(reg_channels)): + reg_layers.append( + ConvModule( + pre_channel, + reg_channels[k], + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + inplace=True)) + pre_channel = reg_channels[k] + reg_layers.append( + ConvModule( + pre_channel, + self.bbox_coder.code_size, + 1, + padding=0, + conv_cfg=conv_cfg, + act_cfg=None)) + if dropout_ratio >= 0: + reg_layers.insert(1, nn.Dropout(dropout_ratio)) + + self.conv_reg = nn.Sequential(*reg_layers) + + if init_cfg is None: + self.init_cfg = dict( + type='Xavier', + layer=['Conv2d', 'Conv1d'], + distribution='uniform') + + def init_weights(self): + super().init_weights() + normal_init(self.conv_reg[-1].conv, mean=0, std=0.001) + + def forward(self, seg_feats: Tensor, part_feats: Tensor) -> Tuple[Tensor]: + """Forward pass. + + Args: + seg_feats (torch.Tensor): Point-wise semantic features. + part_feats (torch.Tensor): Point-wise part prediction features. + + Returns: + tuple[torch.Tensor]: Score of class and bbox predictions. + """ + # (B * N, out_x, out_y, out_z, 4) + rcnn_batch_size = part_feats.shape[0] + + # transform to sparse tensors + sparse_shape = part_feats.shape[1:4] + # (non_empty_num, 4) ==> [bs_idx, x_idx, y_idx, z_idx] + sparse_idx = part_feats.sum(dim=-1).nonzero(as_tuple=False) + + part_features = part_feats[sparse_idx[:, 0], sparse_idx[:, 1], + sparse_idx[:, 2], sparse_idx[:, 3]] + seg_features = seg_feats[sparse_idx[:, 0], sparse_idx[:, 1], + sparse_idx[:, 2], sparse_idx[:, 3]] + coords = sparse_idx.int().contiguous() + part_features = SparseConvTensor(part_features, coords, sparse_shape, + rcnn_batch_size) + seg_features = SparseConvTensor(seg_features, coords, sparse_shape, + rcnn_batch_size) + + # forward rcnn network + x_part = self.part_conv(part_features) + x_rpn = self.seg_conv(seg_features) + + merged_feature = torch.cat((x_rpn.features, x_part.features), + dim=1) # (N, C) + shared_feature = SparseConvTensor(merged_feature, coords, sparse_shape, + rcnn_batch_size) + + x = self.conv_down(shared_feature) + + shared_feature = x.dense().view(rcnn_batch_size, -1, 1) + + shared_feature = self.shared_fc(shared_feature) + + cls_score = self.conv_cls(shared_feature).transpose( + 1, 2).contiguous().squeeze(dim=1) # (B, 1) + bbox_pred = self.conv_reg(shared_feature).transpose( + 1, 2).contiguous().squeeze(dim=1) # (B, C) + + return cls_score, bbox_pred + + def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor, + labels: Tensor, bbox_targets: Tensor, pos_gt_bboxes: Tensor, + reg_mask: Tensor, label_weights: Tensor, + bbox_weights: Tensor) -> Dict: + """Computing losses. + + Args: + cls_score (torch.Tensor): Scores of each roi. + bbox_pred (torch.Tensor): Predictions of bboxes. + rois (torch.Tensor): Roi bboxes. + labels (torch.Tensor): Labels of class. + bbox_targets (torch.Tensor): Target of positive bboxes. + pos_gt_bboxes (torch.Tensor): Ground truths of positive bboxes. + reg_mask (torch.Tensor): Mask for positive bboxes. + label_weights (torch.Tensor): Weights of class loss. + bbox_weights (torch.Tensor): Weights of bbox loss. + + Returns: + dict: Computed losses. + + - loss_cls (torch.Tensor): Loss of classes. + - loss_bbox (torch.Tensor): Loss of bboxes. + - loss_corner (torch.Tensor): Loss of corners. + """ + losses = dict() + rcnn_batch_size = cls_score.shape[0] + + # calculate class loss + cls_flat = cls_score.view(-1) + loss_cls = self.loss_cls(cls_flat, labels, label_weights) + losses['loss_cls'] = loss_cls + + # calculate regression loss + code_size = self.bbox_coder.code_size + pos_inds = (reg_mask > 0) + if pos_inds.any() == 0: + # fake a part loss + losses['loss_bbox'] = loss_cls.new_tensor(0) * loss_cls.sum() + if self.with_corner_loss: + losses['loss_corner'] = loss_cls.new_tensor(0) * loss_cls.sum() + else: + pos_bbox_pred = bbox_pred.view(rcnn_batch_size, -1)[pos_inds] + bbox_weights_flat = bbox_weights[pos_inds].view(-1, 1).repeat( + 1, pos_bbox_pred.shape[-1]) + loss_bbox = self.loss_bbox( + pos_bbox_pred.unsqueeze(dim=0), bbox_targets.unsqueeze(dim=0), + bbox_weights_flat.unsqueeze(dim=0)) + losses['loss_bbox'] = loss_bbox + + if self.with_corner_loss: + pos_roi_boxes3d = rois[..., 1:].view(-1, code_size)[pos_inds] + pos_roi_boxes3d = pos_roi_boxes3d.view(-1, code_size) + batch_anchors = pos_roi_boxes3d.clone().detach() + pos_rois_rotation = pos_roi_boxes3d[..., 6].view(-1) + roi_xyz = pos_roi_boxes3d[..., 0:3].view(-1, 3) + batch_anchors[..., 0:3] = 0 + # decode boxes + pred_boxes3d = self.bbox_coder.decode( + batch_anchors, + pos_bbox_pred.view(-1, code_size)).view(-1, code_size) + + pred_boxes3d[..., 0:3] = rotation_3d_in_axis( + pred_boxes3d[..., 0:3].unsqueeze(1), + pos_rois_rotation, + axis=2).squeeze(1) + + pred_boxes3d[:, 0:3] += roi_xyz + + # calculate corner loss + loss_corner = self.get_corner_loss_lidar( + pred_boxes3d, pos_gt_bboxes) + losses['loss_corner'] = loss_corner + + return losses + + def get_targets(self, + sampling_results: SamplingResultList, + rcnn_train_cfg: dict, + concat: bool = True) -> Tuple[Tensor]: + """Generate targets. + + Args: + sampling_results (list[:obj:`SamplingResult`]): + Sampled results from rois. + rcnn_train_cfg (:obj:`ConfigDict`): Training config of rcnn. + concat (bool): Whether to concatenate targets between batches. + + Returns: + tuple[torch.Tensor]: Targets of boxes and class prediction. + """ + pos_bboxes_list = [res.pos_bboxes for res in sampling_results] + pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] + iou_list = [res.iou for res in sampling_results] + targets = multi_apply( + self._get_target_single, + pos_bboxes_list, + pos_gt_bboxes_list, + iou_list, + cfg=rcnn_train_cfg) + + (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) = targets + + if concat: + label = torch.cat(label, 0) + bbox_targets = torch.cat(bbox_targets, 0) + pos_gt_bboxes = torch.cat(pos_gt_bboxes, 0) + reg_mask = torch.cat(reg_mask, 0) + + label_weights = torch.cat(label_weights, 0) + label_weights /= torch.clamp(label_weights.sum(), min=1.0) + + bbox_weights = torch.cat(bbox_weights, 0) + bbox_weights /= torch.clamp(bbox_weights.sum(), min=1.0) + + return (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) + + def _get_target_single(self, pos_bboxes: Tensor, pos_gt_bboxes: Tensor, + ious: Tensor, cfg: dict) -> Tuple[Tensor]: + """Generate training targets for a single sample. + + Args: + pos_bboxes (torch.Tensor): Positive boxes with shape + (N, 7). + pos_gt_bboxes (torch.Tensor): Ground truth boxes with shape + (M, 7). + ious (torch.Tensor): IoU between `pos_bboxes` and `pos_gt_bboxes` + in shape (N, M). + cfg (dict): Training configs. + + Returns: + tuple[torch.Tensor]: Target for positive boxes. + (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) + """ + cls_pos_mask = ious > cfg.cls_pos_thr + cls_neg_mask = ious < cfg.cls_neg_thr + interval_mask = (cls_pos_mask == 0) & (cls_neg_mask == 0) + + # iou regression target + label = (cls_pos_mask > 0).float() + label[interval_mask] = ious[interval_mask] * 2 - 0.5 + # label weights + label_weights = (label >= 0).float() + + # box regression target + reg_mask = pos_bboxes.new_zeros(ious.size(0)).long() + reg_mask[0:pos_gt_bboxes.size(0)] = 1 + bbox_weights = (reg_mask > 0).float() + if reg_mask.bool().any(): + pos_gt_bboxes_ct = pos_gt_bboxes.clone().detach() + roi_center = pos_bboxes[..., 0:3] + roi_ry = pos_bboxes[..., 6] % (2 * np.pi) + + # canonical transformation + pos_gt_bboxes_ct[..., 0:3] -= roi_center + pos_gt_bboxes_ct[..., 6] -= roi_ry + pos_gt_bboxes_ct[..., 0:3] = rotation_3d_in_axis( + pos_gt_bboxes_ct[..., 0:3].unsqueeze(1), -roi_ry, + axis=2).squeeze(1) + + # flip orientation if rois have opposite orientation + ry_label = pos_gt_bboxes_ct[..., 6] % (2 * np.pi) # 0 ~ 2pi + opposite_flag = (ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5) + ry_label[opposite_flag] = (ry_label[opposite_flag] + np.pi) % ( + 2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi) + flag = ry_label > np.pi + ry_label[flag] = ry_label[flag] - np.pi * 2 # (-pi/2, pi/2) + ry_label = torch.clamp(ry_label, min=-np.pi / 2, max=np.pi / 2) + pos_gt_bboxes_ct[..., 6] = ry_label + + rois_anchor = pos_bboxes.clone().detach() + rois_anchor[:, 0:3] = 0 + rois_anchor[:, 6] = 0 + bbox_targets = self.bbox_coder.encode(rois_anchor, + pos_gt_bboxes_ct) + else: + # no fg bbox + bbox_targets = pos_gt_bboxes.new_empty((0, 7)) + + return (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) + + def get_corner_loss_lidar(self, + pred_bbox3d: Tensor, + gt_bbox3d: Tensor, + delta: float = 1.0) -> Tensor: + """Calculate corner loss of given boxes. + + Args: + pred_bbox3d (torch.FloatTensor): Predicted boxes in shape (N, 7). + gt_bbox3d (torch.FloatTensor): Ground truth boxes in shape (N, 7). + delta (float, optional): huber loss threshold. Defaults to 1.0 + + Returns: + torch.FloatTensor: Calculated corner loss in shape (N). + """ + assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0] + + # This is a little bit hack here because we assume the box for + # Part-A2 is in LiDAR coordinates + gt_boxes_structure = LiDARInstance3DBoxes(gt_bbox3d) + pred_box_corners = LiDARInstance3DBoxes(pred_bbox3d).corners + gt_box_corners = gt_boxes_structure.corners + + # This flip only changes the heading direction of GT boxes + gt_bbox3d_flip = gt_boxes_structure.clone() + gt_bbox3d_flip.tensor[:, 6] += np.pi + gt_box_corners_flip = gt_bbox3d_flip.corners + + corner_dist = torch.min( + torch.norm(pred_box_corners - gt_box_corners, dim=2), + torch.norm(pred_box_corners - gt_box_corners_flip, + dim=2)) # (N, 8) + # huber loss + abs_error = corner_dist.abs() + quadratic = abs_error.clamp(max=delta) + linear = (abs_error - quadratic) + corner_loss = 0.5 * quadratic**2 + delta * linear + + return corner_loss.mean(dim=1) + + def get_results(self, + rois: Tensor, + cls_score: Tensor, + bbox_pred: Tensor, + class_labels: Tensor, + class_pred: Tensor, + input_metas: List[dict], + cfg: dict = None) -> InstanceList: + """Generate bboxes from bbox head predictions. + + Args: + rois (torch.Tensor): Roi bounding boxes. + cls_score (torch.Tensor): Scores of bounding boxes. + bbox_pred (torch.Tensor): Bounding boxes predictions + class_labels (torch.Tensor): Label of classes + class_pred (torch.Tensor): Score for nms. + input_metas (list[dict]): Point cloud and image's meta info. + cfg (:obj:`ConfigDict`): Testing config. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + roi_batch_id = rois[..., 0] + roi_boxes = rois[..., 1:] # boxes without batch id + batch_size = int(roi_batch_id.max().item() + 1) + + # decode boxes + roi_ry = roi_boxes[..., 6].view(-1) + roi_xyz = roi_boxes[..., 0:3].view(-1, 3) + local_roi_boxes = roi_boxes.clone().detach() + local_roi_boxes[..., 0:3] = 0 + rcnn_boxes3d = self.bbox_coder.decode(local_roi_boxes, bbox_pred) + rcnn_boxes3d[..., 0:3] = rotation_3d_in_axis( + rcnn_boxes3d[..., 0:3].unsqueeze(1), roi_ry, axis=2).squeeze(1) + rcnn_boxes3d[:, 0:3] += roi_xyz + + # post processing + result_list = [] + for batch_id in range(batch_size): + cur_class_labels = class_labels[batch_id] + cur_cls_score = cls_score[roi_batch_id == batch_id].view(-1) + + cur_box_prob = class_pred[batch_id] + cur_rcnn_boxes3d = rcnn_boxes3d[roi_batch_id == batch_id] + keep = self.multi_class_nms(cur_box_prob, cur_rcnn_boxes3d, + cfg.score_thr, cfg.nms_thr, + input_metas[batch_id], + cfg.use_rotate_nms) + selected_bboxes = cur_rcnn_boxes3d[keep] + selected_label_preds = cur_class_labels[keep] + selected_scores = cur_cls_score[keep] + + results = InstanceData() + results.bboxes_3d = input_metas[batch_id]['box_type_3d']( + selected_bboxes, self.bbox_coder.code_size) + results.scores_3d = selected_scores + results.labels_3d = selected_label_preds + + result_list.append(results) + return result_list + + def multi_class_nms(self, + box_probs: Tensor, + box_preds: Tensor, + score_thr: float, + nms_thr: float, + input_meta: dict, + use_rotate_nms: bool = True) -> Tensor: + """Multi-class NMS for box head. + + Note: + This function has large overlap with the `box3d_multiclass_nms` + implemented in `mmdet3d.core.post_processing`. We are considering + merging these two functions in the future. + + Args: + box_probs (torch.Tensor): Predicted boxes probabitilies in + shape (N,). + box_preds (torch.Tensor): Predicted boxes in shape (N, 7+C). + score_thr (float): Threshold of scores. + nms_thr (float): Threshold for NMS. + input_meta (dict): Meta information of the current sample. + use_rotate_nms (bool, optional): Whether to use rotated nms. + Defaults to True. + + Returns: + torch.Tensor: Selected indices. + """ + if use_rotate_nms: + nms_func = nms_bev + else: + nms_func = nms_normal_bev + + assert box_probs.shape[ + 1] == self.num_classes, f'box_probs shape: {str(box_probs.shape)}' + selected_list = [] + selected_labels = [] + boxes_for_nms = xywhr2xyxyr(input_meta['box_type_3d']( + box_preds, self.bbox_coder.code_size).bev) + + score_thresh = score_thr if isinstance( + score_thr, list) else [score_thr for x in range(self.num_classes)] + nms_thresh = nms_thr if isinstance( + nms_thr, list) else [nms_thr for x in range(self.num_classes)] + for k in range(0, self.num_classes): + class_scores_keep = box_probs[:, k] >= score_thresh[k] + + if class_scores_keep.int().sum() > 0: + original_idxs = class_scores_keep.nonzero( + as_tuple=False).view(-1) + cur_boxes_for_nms = boxes_for_nms[class_scores_keep] + cur_rank_scores = box_probs[class_scores_keep, k] + + cur_selected = nms_func(cur_boxes_for_nms, cur_rank_scores, + nms_thresh[k]) + + if cur_selected.shape[0] == 0: + continue + selected_list.append(original_idxs[cur_selected]) + selected_labels.append( + torch.full([cur_selected.shape[0]], + k + 1, + dtype=torch.int64, + device=box_preds.device)) + + keep = torch.cat( + selected_list, dim=0) if len(selected_list) > 0 else [] + return keep diff --git a/mmdet3d/models/roi_heads/bbox_heads/point_rcnn_bbox_head.py b/mmdet3d/models/roi_heads/bbox_heads/point_rcnn_bbox_head.py new file mode 100755 index 0000000..ef80e17 --- /dev/null +++ b/mmdet3d/models/roi_heads/bbox_heads/point_rcnn_bbox_head.py @@ -0,0 +1,604 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.cnn.bricks import build_conv_layer +from mmdet.models.utils import multi_apply +from mmengine.model import BaseModule, normal_init +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.models.layers import nms_bev, nms_normal_bev +from mmdet3d.models.layers.pointnet_modules import build_sa_module +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures.bbox_3d import (LiDARInstance3DBoxes, + rotation_3d_in_axis, xywhr2xyxyr) +from mmdet3d.utils.typing_utils import InstanceList, SamplingResultList + + +@MODELS.register_module() +class PointRCNNBboxHead(BaseModule): + """PointRCNN RoI Bbox head. + + Args: + num_classes (int): The number of classes to prediction. + in_channels (int): Input channels of point features. + mlp_channels (list[int]): the number of mlp channels + pred_layer_cfg (dict, optional): Config of classfication and + regression prediction layers. Defaults to None. + num_points (tuple): The number of points which each SA + module samples. Defaults to (128, 32, -1). + radius (tuple): Sampling radius of each SA module. + Defaults to (0.2, 0.4, 100). + num_samples (tuple): The number of samples for ball query + in each SA module. Defaults to (64, 64, 64). + sa_channels (tuple): Out channels of each mlp in SA module. + Defaults to ((128, 128, 128), (128, 128, 256), (256, 256, 512)). + bbox_coder (dict): Config dict of box coders. + Defaults to dict(type='DeltaXYZWLHRBBoxCoder'). + sa_cfg (dict): Config of set abstraction module, which may + contain the following keys and values: + + - pool_mod (str): Pool method ('max' or 'avg') for SA modules. + - use_xyz (bool): Whether to use xyz as a part of features. + - normalize_xyz (bool): Whether to normalize xyz with radii in + each SA module. + Defaults to dict(type='PointSAModule', pool_mod='max', + use_xyz=True). + conv_cfg (dict): Config dict of convolutional layers. + Defaults to dict(type='Conv1d'). + norm_cfg (dict): Config dict of normalization layers. + Defaults to dict(type='BN1d'). + act_cfg (dict): Config dict of activation layers. + Defaults to dict(type='ReLU'). + bias (str): Type of bias. Defaults to 'auto'. + loss_bbox (dict): Config of regression loss function. + Defaults to dict(type='SmoothL1Loss', beta=1.0 / 9.0, + reduction='sum', loss_weight=1.0). + loss_cls (dict): Config of classification loss function. + Defaults to dict(type='CrossEntropyLoss', use_sigmoid=True, + reduction='sum', loss_weight=1.0). + with_corner_loss (bool): Whether using corner loss. + Defaults to True. + init_cfg (dict, optional): Config of initialization. Defaults to None. + """ + + def __init__(self, + num_classes: dict, + in_channels: dict, + mlp_channels: dict, + pred_layer_cfg: Optional[dict] = None, + num_points: dict = (128, 32, -1), + radius: dict = (0.2, 0.4, 100), + num_samples: dict = (64, 64, 64), + sa_channels: dict = ((128, 128, 128), (128, 128, 256), + (256, 256, 512)), + bbox_coder: dict = dict(type='DeltaXYZWLHRBBoxCoder'), + sa_cfg: dict = dict( + type='PointSAModule', pool_mod='max', use_xyz=True), + conv_cfg: dict = dict(type='Conv1d'), + norm_cfg: dict = dict(type='BN1d'), + act_cfg: dict = dict(type='ReLU'), + bias: str = 'auto', + loss_bbox: dict = dict( + type='SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=1.0), + loss_cls: dict = dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + with_corner_loss: bool = True, + init_cfg: Optional[dict] = None) -> None: + super(PointRCNNBboxHead, self).__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.num_sa = len(sa_channels) + self.with_corner_loss = with_corner_loss + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.bias = bias + + self.loss_bbox = MODELS.build(loss_bbox) + self.loss_cls = MODELS.build(loss_cls) + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + + self.in_channels = in_channels + mlp_channels = [self.in_channels] + mlp_channels + shared_mlps = nn.Sequential() + for i in range(len(mlp_channels) - 1): + shared_mlps.add_module( + f'layer{i}', + ConvModule( + mlp_channels[i], + mlp_channels[i + 1], + kernel_size=(1, 1), + stride=(1, 1), + inplace=False, + conv_cfg=dict(type='Conv2d'))) + self.xyz_up_layer = nn.Sequential(*shared_mlps) + + c_out = mlp_channels[-1] + self.merge_down_layer = ConvModule( + c_out * 2, + c_out, + kernel_size=(1, 1), + stride=(1, 1), + inplace=False, + conv_cfg=dict(type='Conv2d')) + + pre_channels = c_out + + self.SA_modules = nn.ModuleList() + sa_in_channel = pre_channels + + for sa_index in range(self.num_sa): + cur_sa_mlps = list(sa_channels[sa_index]) + cur_sa_mlps = [sa_in_channel] + cur_sa_mlps + sa_out_channel = cur_sa_mlps[-1] + + cur_num_points = num_points[sa_index] + if cur_num_points <= 0: + cur_num_points = None + self.SA_modules.append( + build_sa_module( + num_point=cur_num_points, + radius=radius[sa_index], + num_sample=num_samples[sa_index], + mlp_channels=cur_sa_mlps, + cfg=sa_cfg)) + sa_in_channel = sa_out_channel + self.cls_convs = self._add_conv_branch( + pred_layer_cfg.in_channels, pred_layer_cfg.cls_conv_channels) + self.reg_convs = self._add_conv_branch( + pred_layer_cfg.in_channels, pred_layer_cfg.reg_conv_channels) + + prev_channel = pred_layer_cfg.cls_conv_channels[-1] + self.conv_cls = build_conv_layer( + self.conv_cfg, + in_channels=prev_channel, + out_channels=self.num_classes, + kernel_size=1) + prev_channel = pred_layer_cfg.reg_conv_channels[-1] + self.conv_reg = build_conv_layer( + self.conv_cfg, + in_channels=prev_channel, + out_channels=self.bbox_coder.code_size * self.num_classes, + kernel_size=1) + + if init_cfg is None: + self.init_cfg = dict(type='Xavier', layer=['Conv2d', 'Conv1d']) + + def _add_conv_branch(self, in_channels: int, + conv_channels: tuple) -> nn.Sequential: + """Add shared or separable branch. + + Args: + in_channels (int): Input feature channel. + conv_channels (tuple): Middle feature channels. + """ + conv_spec = [in_channels] + list(conv_channels) + # add branch specific conv layers + conv_layers = nn.Sequential() + for i in range(len(conv_spec) - 1): + conv_layers.add_module( + f'layer{i}', + ConvModule( + conv_spec[i], + conv_spec[i + 1], + kernel_size=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=self.bias, + inplace=True)) + return conv_layers + + def init_weights(self): + """Initialize weights of the head.""" + super().init_weights() + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + if m.bias is not None: + nn.init.constant_(m.bias, 0) + normal_init(self.conv_reg.weight, mean=0, std=0.001) + + def forward(self, feats: Tensor) -> Tuple[Tensor]: + """Forward pass. + + Args: + feats (torch.Torch): Features from RCNN modules. + + Returns: + tuple[torch.Tensor]: Score of class and bbox predictions. + """ + input_data = feats.clone().detach() + xyz_input = input_data[..., 0:self.in_channels].transpose( + 1, 2).unsqueeze(dim=3).contiguous().clone().detach() + xyz_features = self.xyz_up_layer(xyz_input) + rpn_features = input_data[..., self.in_channels:].transpose( + 1, 2).unsqueeze(dim=3) + merged_features = torch.cat((xyz_features, rpn_features), dim=1) + merged_features = self.merge_down_layer(merged_features) + l_xyz, l_features = [input_data[..., 0:3].contiguous()], \ + [merged_features.squeeze(dim=3)] + for i in range(len(self.SA_modules)): + li_xyz, li_features, cur_indices = \ + self.SA_modules[i](l_xyz[i], l_features[i]) + l_xyz.append(li_xyz) + l_features.append(li_features) + + shared_features = l_features[-1] + x_cls = shared_features + x_reg = shared_features + x_cls = self.cls_convs(x_cls) + rcnn_cls = self.conv_cls(x_cls) + x_reg = self.reg_convs(x_reg) + rcnn_reg = self.conv_reg(x_reg) + rcnn_cls = rcnn_cls.transpose(1, 2).contiguous().squeeze(dim=1) + rcnn_reg = rcnn_reg.transpose(1, 2).contiguous().squeeze(dim=1) + return rcnn_cls, rcnn_reg + + def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor, + labels: Tensor, bbox_targets: Tensor, pos_gt_bboxes: Tensor, + reg_mask: Tensor, label_weights: Tensor, + bbox_weights: Tensor) -> Dict: + """Computing losses. + + Args: + cls_score (torch.Tensor): Scores of each RoI. + bbox_pred (torch.Tensor): Predictions of bboxes. + rois (torch.Tensor): RoI bboxes. + labels (torch.Tensor): Labels of class. + bbox_targets (torch.Tensor): Target of positive bboxes. + pos_gt_bboxes (torch.Tensor): Ground truths of positive bboxes. + reg_mask (torch.Tensor): Mask for positive bboxes. + label_weights (torch.Tensor): Weights of class loss. + bbox_weights (torch.Tensor): Weights of bbox loss. + + Returns: + dict: Computed losses. + + - loss_cls (torch.Tensor): Loss of classes. + - loss_bbox (torch.Tensor): Loss of bboxes. + - loss_corner (torch.Tensor): Loss of corners. + """ + losses = dict() + rcnn_batch_size = cls_score.shape[0] + # calculate class loss + cls_flat = cls_score.view(-1) + loss_cls = self.loss_cls(cls_flat, labels, label_weights) + losses['loss_cls'] = loss_cls + + # calculate regression loss + code_size = self.bbox_coder.code_size + pos_inds = (reg_mask > 0) + + pos_bbox_pred = bbox_pred.view(rcnn_batch_size, -1)[pos_inds].clone() + bbox_weights_flat = bbox_weights[pos_inds].view(-1, 1).repeat( + 1, pos_bbox_pred.shape[-1]) + loss_bbox = self.loss_bbox( + pos_bbox_pred.unsqueeze(dim=0), + bbox_targets.unsqueeze(dim=0).detach(), + bbox_weights_flat.unsqueeze(dim=0)) + losses['loss_bbox'] = loss_bbox + + if pos_inds.any() != 0 and self.with_corner_loss: + rois = rois.detach() + pos_roi_boxes3d = rois[..., 1:].view(-1, code_size)[pos_inds] + pos_roi_boxes3d = pos_roi_boxes3d.view(-1, code_size) + batch_anchors = pos_roi_boxes3d.clone().detach() + pos_rois_rotation = pos_roi_boxes3d[..., 6].view(-1) + roi_xyz = pos_roi_boxes3d[..., 0:3].view(-1, 3) + batch_anchors[..., 0:3] = 0 + # decode boxes + pred_boxes3d = self.bbox_coder.decode( + batch_anchors, + pos_bbox_pred.view(-1, code_size)).view(-1, code_size) + + pred_boxes3d[..., 0:3] = rotation_3d_in_axis( + pred_boxes3d[..., 0:3].unsqueeze(1), (pos_rois_rotation), + axis=2).squeeze(1) + + pred_boxes3d[:, 0:3] += roi_xyz + + # calculate corner loss + loss_corner = self.get_corner_loss_lidar(pred_boxes3d, + pos_gt_bboxes).mean() + + losses['loss_corner'] = loss_corner + else: + losses['loss_corner'] = loss_cls.new_tensor(0) * loss_cls.sum() + return losses + + def get_corner_loss_lidar(self, + pred_bbox3d: Tensor, + gt_bbox3d: Tensor, + delta: float = 1.0) -> Tensor: + """Calculate corner loss of given boxes. + + Args: + pred_bbox3d (torch.FloatTensor): Predicted boxes in shape (N, 7). + gt_bbox3d (torch.FloatTensor): Ground truth boxes in shape (N, 7). + delta (float, optional): huber loss threshold. Defaults to 1.0 + + Returns: + torch.FloatTensor: Calculated corner loss in shape (N). + """ + assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0] + + # This is a little bit hack here because we assume the box for + # PointRCNN is in LiDAR coordinates + + gt_boxes_structure = LiDARInstance3DBoxes(gt_bbox3d) + pred_box_corners = LiDARInstance3DBoxes(pred_bbox3d).corners + gt_box_corners = gt_boxes_structure.corners + + # This flip only changes the heading direction of GT boxes + gt_bbox3d_flip = gt_boxes_structure.clone() + gt_bbox3d_flip.tensor[:, 6] += np.pi + gt_box_corners_flip = gt_bbox3d_flip.corners + + corner_dist = torch.min( + torch.norm(pred_box_corners - gt_box_corners, dim=2), + torch.norm(pred_box_corners - gt_box_corners_flip, dim=2)) + # huber loss + abs_error = corner_dist.abs() + # quadratic = abs_error.clamp(max=delta) + # linear = (abs_error - quadratic) + # corner_loss = 0.5 * quadratic**2 + delta * linear + loss = torch.where(abs_error < delta, 0.5 * abs_error**2 / delta, + abs_error - 0.5 * delta) + return loss.mean(dim=1) + + def get_targets(self, + sampling_results: SamplingResultList, + rcnn_train_cfg: dict, + concat: bool = True) -> Tuple[Tensor]: + """Generate targets. + + Args: + sampling_results (list[:obj:`SamplingResult`]): + Sampled results from rois. + rcnn_train_cfg (:obj:`ConfigDict`): Training config of rcnn. + concat (bool): Whether to concatenate targets between + batches. Defaults to True. + + Returns: + tuple[torch.Tensor]: Targets of boxes and class prediction. + """ + pos_bboxes_list = [res.pos_bboxes for res in sampling_results] + pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] + iou_list = [res.iou for res in sampling_results] + targets = multi_apply( + self._get_target_single, + pos_bboxes_list, + pos_gt_bboxes_list, + iou_list, + cfg=rcnn_train_cfg) + (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) = targets + + if concat: + label = torch.cat(label, 0) + bbox_targets = torch.cat(bbox_targets, 0) + pos_gt_bboxes = torch.cat(pos_gt_bboxes, 0) + reg_mask = torch.cat(reg_mask, 0) + + label_weights = torch.cat(label_weights, 0) + label_weights /= torch.clamp(label_weights.sum(), min=1.0) + + bbox_weights = torch.cat(bbox_weights, 0) + bbox_weights /= torch.clamp(bbox_weights.sum(), min=1.0) + + return (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) + + def _get_target_single(self, pos_bboxes: Tensor, pos_gt_bboxes: Tensor, + ious: Tensor, cfg: dict) -> Tuple[Tensor]: + """Generate training targets for a single sample. + + Args: + pos_bboxes (torch.Tensor): Positive boxes with shape + (N, 7). + pos_gt_bboxes (torch.Tensor): Ground truth boxes with shape + (M, 7). + ious (torch.Tensor): IoU between `pos_bboxes` and `pos_gt_bboxes` + in shape (N, M). + cfg (dict): Training configs. + + Returns: + tuple[torch.Tensor]: Target for positive boxes. + (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) + """ + cls_pos_mask = ious > cfg.cls_pos_thr + cls_neg_mask = ious < cfg.cls_neg_thr + interval_mask = (cls_pos_mask == 0) & (cls_neg_mask == 0) + # iou regression target + label = (cls_pos_mask > 0).float() + label[interval_mask] = (ious[interval_mask] - cfg.cls_neg_thr) / \ + (cfg.cls_pos_thr - cfg.cls_neg_thr) + # label weights + label_weights = (label >= 0).float() + # box regression target + reg_mask = pos_bboxes.new_zeros(ious.size(0)).long() + reg_mask[0:pos_gt_bboxes.size(0)] = 1 + bbox_weights = (reg_mask > 0).float() + if reg_mask.bool().any(): + pos_gt_bboxes_ct = pos_gt_bboxes.clone().detach() + roi_center = pos_bboxes[..., 0:3] + roi_ry = pos_bboxes[..., 6] % (2 * np.pi) + + # canonical transformation + pos_gt_bboxes_ct[..., 0:3] -= roi_center + pos_gt_bboxes_ct[..., 6] -= roi_ry + pos_gt_bboxes_ct[..., 0:3] = rotation_3d_in_axis( + pos_gt_bboxes_ct[..., 0:3].unsqueeze(1), -(roi_ry), + axis=2).squeeze(1) + + # flip orientation if gt have opposite orientation + ry_label = pos_gt_bboxes_ct[..., 6] % (2 * np.pi) # 0 ~ 2pi + is_opposite = (ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5) + ry_label[is_opposite] = (ry_label[is_opposite] + np.pi) % ( + 2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi) + flag = ry_label > np.pi + ry_label[flag] = ry_label[flag] - np.pi * 2 # (-pi/2, pi/2) + ry_label = torch.clamp(ry_label, min=-np.pi / 2, max=np.pi / 2) + pos_gt_bboxes_ct[..., 6] = ry_label + + rois_anchor = pos_bboxes.clone().detach() + rois_anchor[:, 0:3] = 0 + rois_anchor[:, 6] = 0 + bbox_targets = self.bbox_coder.encode(rois_anchor, + pos_gt_bboxes_ct) + else: + # no fg bbox + bbox_targets = pos_gt_bboxes.new_empty((0, 7)) + + return (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) + + def get_results(self, + rois: Tensor, + cls_score: Tensor, + bbox_pred: Tensor, + class_labels: Tensor, + input_metas: List[dict], + cfg: dict = None) -> InstanceList: + """Generate bboxes from bbox head predictions. + + Args: + rois (torch.Tensor): RoI bounding boxes. + cls_score (torch.Tensor): Scores of bounding boxes. + bbox_pred (torch.Tensor): Bounding boxes predictions + class_labels (torch.Tensor): Label of classes + input_metas (list[dict]): Point cloud and image's meta info. + cfg (:obj:`ConfigDict`, optional): Testing config. + Defaults to None. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + roi_batch_id = rois[..., 0] + roi_boxes = rois[..., 1:] # boxes without batch id + batch_size = int(roi_batch_id.max().item() + 1) + + # decode boxes + roi_ry = roi_boxes[..., 6].view(-1) + roi_xyz = roi_boxes[..., 0:3].view(-1, 3) + local_roi_boxes = roi_boxes.clone().detach() + local_roi_boxes[..., 0:3] = 0 + rcnn_boxes3d = self.bbox_coder.decode(local_roi_boxes, bbox_pred) + rcnn_boxes3d[..., 0:3] = rotation_3d_in_axis( + rcnn_boxes3d[..., 0:3].unsqueeze(1), roi_ry, axis=2).squeeze(1) + rcnn_boxes3d[:, 0:3] += roi_xyz + + # post processing + result_list = [] + for batch_id in range(batch_size): + cur_class_labels = class_labels[batch_id] + cur_cls_score = cls_score[roi_batch_id == batch_id].view(-1) + + cur_box_prob = cur_cls_score.unsqueeze(1) + cur_rcnn_boxes3d = rcnn_boxes3d[roi_batch_id == batch_id] + keep = self.multi_class_nms(cur_box_prob, cur_rcnn_boxes3d, + cfg.score_thr, cfg.nms_thr, + input_metas[batch_id], + cfg.use_rotate_nms) + selected_bboxes = cur_rcnn_boxes3d[keep] + selected_label_preds = cur_class_labels[keep] + selected_scores = cur_cls_score[keep] + results = InstanceData() + results.bboxes_3d = input_metas[batch_id]['box_type_3d']( + selected_bboxes, selected_bboxes.shape[-1]) + results.scores_3d = selected_scores + results.labels_3d = selected_label_preds + + result_list.append(results) + return result_list + + def multi_class_nms(self, + box_probs: Tensor, + box_preds: Tensor, + score_thr: float, + nms_thr: float, + input_meta: dict, + use_rotate_nms: bool = True) -> Tensor: + """Multi-class NMS for box head. + + Note: + This function has large overlap with the `box3d_multiclass_nms` + implemented in `mmdet3d.core.post_processing`. We are considering + merging these two functions in the future. + + Args: + box_probs (torch.Tensor): Predicted boxes probabilities in + shape (N,). + box_preds (torch.Tensor): Predicted boxes in shape (N, 7+C). + score_thr (float): Threshold of scores. + nms_thr (float): Threshold for NMS. + input_meta (dict): Meta information of the current sample. + use_rotate_nms (bool): Whether to use rotated nms. + Defaults to True. + + Returns: + torch.Tensor: Selected indices. + """ + if use_rotate_nms: + nms_func = nms_bev + else: + nms_func = nms_normal_bev + + assert box_probs.shape[ + 1] == self.num_classes, f'box_probs shape: {str(box_probs.shape)}' + selected_list = [] + selected_labels = [] + boxes_for_nms = xywhr2xyxyr(input_meta['box_type_3d']( + box_preds, self.bbox_coder.code_size).bev) + + score_thresh = score_thr if isinstance( + score_thr, list) else [score_thr for x in range(self.num_classes)] + nms_thresh = nms_thr if isinstance( + nms_thr, list) else [nms_thr for x in range(self.num_classes)] + for k in range(0, self.num_classes): + class_scores_keep = box_probs[:, k] >= score_thresh[k] + + if class_scores_keep.int().sum() > 0: + original_idxs = class_scores_keep.nonzero( + as_tuple=False).view(-1) + cur_boxes_for_nms = boxes_for_nms[class_scores_keep] + cur_rank_scores = box_probs[class_scores_keep, k] + + cur_selected = nms_func(cur_boxes_for_nms, cur_rank_scores, + nms_thresh[k]) + + if cur_selected.shape[0] == 0: + continue + selected_list.append(original_idxs[cur_selected]) + selected_labels.append( + torch.full([cur_selected.shape[0]], + k + 1, + dtype=torch.int64, + device=box_preds.device)) + + keep = torch.cat( + selected_list, dim=0) if len(selected_list) > 0 else [] + return keep diff --git a/mmdet3d/models/roi_heads/bbox_heads/pv_rcnn_bbox_head.py b/mmdet3d/models/roi_heads/bbox_heads/pv_rcnn_bbox_head.py new file mode 100755 index 0000000..abdaf79 --- /dev/null +++ b/mmdet3d/models/roi_heads/bbox_heads/pv_rcnn_bbox_head.py @@ -0,0 +1,509 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch +from mmcv.cnn import ConvModule +from mmdet.models.task_modules.samplers import SamplingResult +from mmdet.models.utils import multi_apply +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import nn as nn + +from mmdet3d.models.layers import nms_bev, nms_normal_bev +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures.bbox_3d import (LiDARInstance3DBoxes, + rotation_3d_in_axis, xywhr2xyxyr) +from mmdet3d.utils import InstanceList + + +@MODELS.register_module() +class PVRCNNBBoxHead(BaseModule): + """PVRCNN BBox head. + + Args: + in_channels (int): The number of input channel. + grid_size (int): The number of grid points in roi bbox. + num_classes (int): The number of classes. + class_agnostic (bool): Whether generate class agnostic prediction. + Defaults to True. + shared_fc_channels (tuple(int)): Out channels of each shared fc layer. + Defaults to (256, 256). + cls_channels (tuple(int)): Out channels of each classification layer. + Defaults to (256, 256). + reg_channels (tuple(int)): Out channels of each regression layer. + Defaults to (256, 256). + dropout_ratio (float): Ratio of dropout layer. Defaults to 0.5. + with_corner_loss (bool): Whether to use corner loss or not. + Defaults to True. + bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for box head. + Defaults to dict(type='DeltaXYZWLHRBBoxCoder'). + norm_cfg (dict): Type of normalization method. + Defaults to dict(type='BN1d', eps=1e-5, momentum=0.1) + loss_bbox (dict): Config dict of box regression loss. + loss_cls (dict): Config dict of classifacation loss. + init_cfg (dict, optional): Initialize config of + model. + """ + + def __init__( + self, + in_channels: int, + grid_size: int, + num_classes: int, + class_agnostic: bool = True, + shared_fc_channels: Tuple[int] = (256, 256), + cls_channels: Tuple[int] = (256, 256), + reg_channels: Tuple[int] = (256, 256), + dropout_ratio: float = 0.3, + with_corner_loss: bool = True, + bbox_coder: dict = dict(type='DeltaXYZWLHRBBoxCoder'), + norm_cfg: dict = dict(type='BN2d', eps=1e-5, momentum=0.1), + loss_bbox: dict = dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_cls: dict = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none', + loss_weight=1.0), + init_cfg: Optional[dict] = dict( + type='Xavier', layer=['Conv2d', 'Conv1d'], distribution='uniform') + ) -> None: + super(PVRCNNBBoxHead, self).__init__(init_cfg=init_cfg) + self.init_cfg = init_cfg + self.num_classes = num_classes + self.with_corner_loss = with_corner_loss + self.class_agnostic = class_agnostic + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.loss_bbox = MODELS.build(loss_bbox) + self.loss_cls = MODELS.build(loss_cls) + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + + cls_out_channels = 1 if class_agnostic else num_classes + self.reg_out_channels = self.bbox_coder.code_size * cls_out_channels + if self.use_sigmoid_cls: + self.cls_out_channels = cls_out_channels + else: + self.cls_out_channels = cls_out_channels + 1 + + self.dropout_ratio = dropout_ratio + self.grid_size = grid_size + + # PVRCNNBBoxHead model in_channels is num of grid points in roi box. + in_channels *= (self.grid_size**3) + + self.in_channels = in_channels + + self.shared_fc_layer = self._make_fc_layers( + in_channels, shared_fc_channels, + range(len(shared_fc_channels) - 1), norm_cfg) + self.cls_layer = self._make_fc_layers( + shared_fc_channels[-1], + cls_channels, + range(1), + norm_cfg, + out_channels=self.cls_out_channels) + self.reg_layer = self._make_fc_layers( + shared_fc_channels[-1], + reg_channels, + range(1), + norm_cfg, + out_channels=self.reg_out_channels) + + def _make_fc_layers(self, + in_channels: int, + fc_channels: list, + dropout_indices: list, + norm_cfg: dict, + out_channels: Optional[int] = None) -> torch.nn.Module: + """Initial a full connection layer. + + Args: + in_channels (int): Module in channels. + fc_channels (list): Full connection layer channels. + dropout_indices (list): Dropout indices. + norm_cfg (dict): Type of normalization method. + out_channels (int, optional): Module out channels. + """ + fc_layers = [] + pre_channel = in_channels + for k in range(len(fc_channels)): + fc_layers.append( + ConvModule( + pre_channel, + fc_channels[k], + kernel_size=(1, 1), + stride=(1, 1), + norm_cfg=norm_cfg, + conv_cfg=dict(type='Conv2d'), + bias=False, + inplace=True)) + pre_channel = fc_channels[k] + if self.dropout_ratio >= 0 and k in dropout_indices: + fc_layers.append(nn.Dropout(self.dropout_ratio)) + if out_channels is not None: + fc_layers.append( + nn.Conv2d(fc_channels[-1], out_channels, 1, bias=True)) + fc_layers = nn.Sequential(*fc_layers) + return fc_layers + + def forward(self, feats: torch.Tensor) -> Tuple[torch.Tensor]: + """Forward pvrcnn bbox head. + + Args: + feats (torch.Tensor): Batch point-wise features. + + Returns: + tuple[torch.Tensor]: Score of class and bbox predictions. + """ + # (B * N, 6, 6, 6, C) + rcnn_batch_size = feats.shape[0] + feats = feats.permute(0, 4, 1, 2, + 3).contiguous().view(rcnn_batch_size, -1, 1, 1) + # (BxN, C*6*6*6) + shared_feats = self.shared_fc_layer(feats) + cls_score = self.cls_layer(shared_feats).transpose( + 1, 2).contiguous().view(-1, self.cls_out_channels) # (B, 1) + bbox_pred = self.reg_layer(shared_feats).transpose( + 1, 2).contiguous().view(-1, self.reg_out_channels) # (B, C) + return cls_score, bbox_pred + + def loss(self, cls_score: torch.Tensor, bbox_pred: torch.Tensor, + rois: torch.Tensor, labels: torch.Tensor, + bbox_targets: torch.Tensor, pos_gt_bboxes: torch.Tensor, + reg_mask: torch.Tensor, label_weights: torch.Tensor, + bbox_weights: torch.Tensor) -> Dict: + """Coumputing losses. + + Args: + cls_score (torch.Tensor): Scores of each roi. + bbox_pred (torch.Tensor): Predictions of bboxes. + rois (torch.Tensor): Roi bboxes. + labels (torch.Tensor): Labels of class. + bbox_targets (torch.Tensor): Target of positive bboxes. + pos_gt_bboxes (torch.Tensor): Ground truths of positive bboxes. + reg_mask (torch.Tensor): Mask for positive bboxes. + label_weights (torch.Tensor): Weights of class loss. + bbox_weights (torch.Tensor): Weights of bbox loss. + + Returns: + dict: Computed losses. + + - loss_cls (torch.Tensor): Loss of classes. + - loss_bbox (torch.Tensor): Loss of bboxes. + - loss_corner (torch.Tensor): Loss of corners. + """ + losses = dict() + rcnn_batch_size = cls_score.shape[0] + + # calculate class loss + cls_flat = cls_score.view(-1) + loss_cls = self.loss_cls(cls_flat, labels, label_weights) + losses['loss_cls'] = loss_cls + + # calculate regression loss + code_size = self.bbox_coder.code_size + pos_inds = (reg_mask > 0) + if pos_inds.any() == 0: + # fake a part loss + losses['loss_bbox'] = 0 * bbox_pred.sum() + if self.with_corner_loss: + losses['loss_corner'] = 0 * bbox_pred.sum() + else: + pos_bbox_pred = bbox_pred.view(rcnn_batch_size, -1)[pos_inds] + bbox_weights_flat = bbox_weights[pos_inds].view(-1, 1).repeat( + 1, pos_bbox_pred.shape[-1]) + loss_bbox = self.loss_bbox( + pos_bbox_pred.unsqueeze(dim=0), bbox_targets.unsqueeze(dim=0), + bbox_weights_flat.unsqueeze(dim=0)) + losses['loss_bbox'] = loss_bbox + + if self.with_corner_loss: + pos_roi_boxes3d = rois[..., 1:].view(-1, code_size)[pos_inds] + pos_roi_boxes3d = pos_roi_boxes3d.view(-1, code_size) + batch_anchors = pos_roi_boxes3d.clone().detach() + pos_rois_rotation = pos_roi_boxes3d[..., 6].view(-1) + roi_xyz = pos_roi_boxes3d[..., 0:3].view(-1, 3) + batch_anchors[..., 0:3] = 0 + # decode boxes + pred_boxes3d = self.bbox_coder.decode( + batch_anchors, + pos_bbox_pred.view(-1, code_size)).view(-1, code_size) + + pred_boxes3d[..., 0:3] = rotation_3d_in_axis( + pred_boxes3d[..., 0:3].unsqueeze(1), + pos_rois_rotation, + axis=2).squeeze(1) + + pred_boxes3d[:, 0:3] += roi_xyz + + # calculate corner loss + loss_corner = self.get_corner_loss_lidar( + pred_boxes3d, pos_gt_bboxes) + losses['loss_corner'] = loss_corner.mean() + + return losses + + def get_targets(self, + sampling_results: SamplingResult, + rcnn_train_cfg: dict, + concat: bool = True) -> Tuple[torch.Tensor]: + """Generate targets. + + Args: + sampling_results (list[:obj:`SamplingResult`]): + Sampled results from rois. + rcnn_train_cfg (:obj:`ConfigDict`): Training config of rcnn. + concat (bool): Whether to concatenate targets between batches. + + Returns: + tuple[torch.Tensor]: Targets of boxes and class prediction. + """ + pos_bboxes_list = [res.pos_bboxes for res in sampling_results] + pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] + iou_list = [res.iou for res in sampling_results] + targets = multi_apply( + self._get_target_single, + pos_bboxes_list, + pos_gt_bboxes_list, + iou_list, + cfg=rcnn_train_cfg) + + (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) = targets + + if concat: + label = torch.cat(label, 0) + bbox_targets = torch.cat(bbox_targets, 0) + pos_gt_bboxes = torch.cat(pos_gt_bboxes, 0) + reg_mask = torch.cat(reg_mask, 0) + + label_weights = torch.cat(label_weights, 0) + label_weights /= torch.clamp(label_weights.sum(), min=1.0) + + bbox_weights = torch.cat(bbox_weights, 0) + bbox_weights /= torch.clamp(bbox_weights.sum(), min=1.0) + + return (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) + + def _get_target_single(self, pos_bboxes: torch.Tensor, + pos_gt_bboxes: torch.Tensor, ious: torch.Tensor, + cfg: dict) -> Tuple[torch.Tensor]: + """Generate training targets for a single sample. + + Args: + pos_bboxes (torch.Tensor): Positive boxes with shape + (N, 7). + pos_gt_bboxes (torch.Tensor): Ground truth boxes with shape + (M, 7). + ious (torch.Tensor): IoU between `pos_bboxes` and `pos_gt_bboxes` + in shape (N, M). + cfg (dict): Training configs. + + Returns: + tuple[torch.Tensor]: Target for positive boxes. + (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) + """ + cls_pos_mask = ious > cfg.cls_pos_thr + cls_neg_mask = ious < cfg.cls_neg_thr + interval_mask = (cls_pos_mask == 0) & (cls_neg_mask == 0) + + # iou regression target + label = (cls_pos_mask > 0).float() + label[interval_mask] = ious[interval_mask] * 2 - 0.5 + # label weights + label_weights = (label >= 0).float() + + # box regression target + reg_mask = pos_bboxes.new_zeros(ious.size(0)).long() + reg_mask[0:pos_gt_bboxes.size(0)] = 1 + bbox_weights = (reg_mask > 0).float() + if reg_mask.bool().any(): + pos_gt_bboxes_ct = pos_gt_bboxes.clone().detach() + roi_center = pos_bboxes[..., 0:3] + roi_ry = pos_bboxes[..., 6] % (2 * np.pi) + + # canonical transformation + pos_gt_bboxes_ct[..., 0:3] -= roi_center + pos_gt_bboxes_ct[..., 6] -= roi_ry + pos_gt_bboxes_ct[..., 0:3] = rotation_3d_in_axis( + pos_gt_bboxes_ct[..., 0:3].unsqueeze(1), -roi_ry, + axis=2).squeeze(1) + + # flip orientation if rois have opposite orientation + ry_label = pos_gt_bboxes_ct[..., 6] % (2 * np.pi) # 0 ~ 2pi + opposite_flag = (ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5) + ry_label[opposite_flag] = (ry_label[opposite_flag] + np.pi) % ( + 2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi) + flag = ry_label > np.pi + ry_label[flag] = ry_label[flag] - np.pi * 2 # (-pi/2, pi/2) + ry_label = torch.clamp(ry_label, min=-np.pi / 2, max=np.pi / 2) + pos_gt_bboxes_ct[..., 6] = ry_label + + rois_anchor = pos_bboxes.clone().detach() + rois_anchor[:, 0:3] = 0 + rois_anchor[:, 6] = 0 + bbox_targets = self.bbox_coder.encode(rois_anchor, + pos_gt_bboxes_ct) + else: + # no fg bbox + bbox_targets = pos_gt_bboxes.new_empty((0, 7)) + + return (label, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, + bbox_weights) + + def get_corner_loss_lidar(self, + pred_bbox3d: torch.Tensor, + gt_bbox3d: torch.Tensor, + delta: float = 1.0) -> torch.Tensor: + """Calculate corner loss of given boxes. + + Args: + pred_bbox3d (torch.FloatTensor): Predicted boxes in shape (N, 7). + gt_bbox3d (torch.FloatTensor): Ground truth boxes in shape (N, 7). + delta (float, optional): huber loss threshold. Defaults to 1.0 + + Returns: + torch.FloatTensor: Calculated corner loss in shape (N). + """ + assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0] + + # This is a little bit hack here because we assume the box for + # Part-A2 is in LiDAR coordinates + gt_boxes_structure = LiDARInstance3DBoxes(gt_bbox3d) + pred_box_corners = LiDARInstance3DBoxes(pred_bbox3d).corners + gt_box_corners = gt_boxes_structure.corners + + # This flip only changes the heading direction of GT boxes + gt_bbox3d_flip = gt_boxes_structure.clone() + gt_bbox3d_flip.tensor[:, 6] += np.pi + gt_box_corners_flip = gt_bbox3d_flip.corners + + corner_dist = torch.min( + torch.norm(pred_box_corners - gt_box_corners, dim=2), + torch.norm(pred_box_corners - gt_box_corners_flip, + dim=2)) # (N, 8) + # huber loss + abs_error = torch.abs(corner_dist) + corner_loss = torch.where(abs_error < delta, + 0.5 * abs_error**2 / delta, + abs_error - 0.5 * delta) + return corner_loss.mean(dim=1) + + def get_results(self, + rois: torch.Tensor, + cls_preds: torch.Tensor, + bbox_reg: torch.Tensor, + class_labels: torch.Tensor, + input_metas: List[dict], + test_cfg: dict = None) -> InstanceList: + """Generate bboxes from bbox head predictions. + + Args: + rois (torch.Tensor): Roi bounding boxes. + cls_preds (torch.Tensor): Scores of bounding boxes. + bbox_reg (torch.Tensor): Bounding boxes predictions + class_labels (torch.Tensor): Label of classes + input_metas (list[dict]): Point cloud meta info. + test_cfg (:obj:`ConfigDict`): Testing config. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + roi_batch_id = rois[..., 0] + roi_boxes = rois[..., 1:] # boxes without batch id + batch_size = int(roi_batch_id.max().item() + 1) + + # decode boxes + roi_ry = roi_boxes[..., 6].view(-1) + roi_xyz = roi_boxes[..., 0:3].view(-1, 3) + local_roi_boxes = roi_boxes.clone().detach() + local_roi_boxes[..., 0:3] = 0 + batch_box_preds = self.bbox_coder.decode(local_roi_boxes, bbox_reg) + batch_box_preds[..., 0:3] = rotation_3d_in_axis( + batch_box_preds[..., 0:3].unsqueeze(1), roi_ry, axis=2).squeeze(1) + batch_box_preds[:, 0:3] += roi_xyz + + # post processing + result_list = [] + for batch_id in range(batch_size): + cur_cls_preds = cls_preds[roi_batch_id == batch_id] + box_preds = batch_box_preds[roi_batch_id == batch_id] + label_preds = class_labels[batch_id] + + cur_cls_preds = cur_cls_preds.sigmoid() + cur_cls_preds, _ = torch.max(cur_cls_preds, dim=-1) + selected = self.class_agnostic_nms( + scores=cur_cls_preds, + bbox_preds=box_preds, + input_meta=input_metas[batch_id], + nms_cfg=test_cfg) + + selected_bboxes = box_preds[selected] + selected_label_preds = label_preds[selected] + selected_scores = cur_cls_preds[selected] + + results = InstanceData() + results.bboxes_3d = input_metas[batch_id]['box_type_3d']( + selected_bboxes, self.bbox_coder.code_size) + results.scores_3d = selected_scores + results.labels_3d = selected_label_preds + + result_list.append(results) + return result_list + + def class_agnostic_nms(self, scores: torch.Tensor, + bbox_preds: torch.Tensor, nms_cfg: dict, + input_meta: dict) -> Tuple[torch.Tensor]: + """Class agnostic NMS for box head. + + Args: + scores (torch.Tensor): Object score of bounding boxes. + bbox_preds (torch.Tensor): Predicted bounding boxes. + nms_cfg (dict): NMS config dict. + input_meta (dict): Contain pcd and img's meta info. + + Returns: + tuple[torch.Tensor]: Bounding boxes, scores and labels. + """ + obj_scores = scores.clone() + if nms_cfg.use_rotate_nms: + nms_func = nms_bev + else: + nms_func = nms_normal_bev + + bbox = input_meta['box_type_3d']( + bbox_preds.clone(), + box_dim=bbox_preds.shape[-1], + with_yaw=True, + origin=(0.5, 0.5, 0.5)) + + if nms_cfg.score_thr is not None: + scores_mask = (obj_scores >= nms_cfg.score_thr) + obj_scores = obj_scores[scores_mask] + bbox = bbox[scores_mask] + selected = [] + if obj_scores.shape[0] > 0: + box_scores_nms, indices = torch.topk( + obj_scores, k=min(4096, obj_scores.shape[0])) + bbox_bev = bbox.bev[indices] + bbox_for_nms = xywhr2xyxyr(bbox_bev) + + keep = nms_func(bbox_for_nms, box_scores_nms, nms_cfg.nms_thr) + selected = indices[keep] + if nms_cfg.score_thr is not None: + original_idxs = scores_mask.nonzero().view(-1) + selected = original_idxs[selected] + return selected diff --git a/mmdet3d/models/roi_heads/h3d_roi_head.py b/mmdet3d/models/roi_heads/h3d_roi_head.py new file mode 100755 index 0000000..521ce13 --- /dev/null +++ b/mmdet3d/models/roi_heads/h3d_roi_head.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List + +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.structures import Det3DDataSample +from .base_3droi_head import Base3DRoIHead + + +@MODELS.register_module() +class H3DRoIHead(Base3DRoIHead): + """H3D roi head for H3DNet. + + Args: + primitive_list (List): Configs of primitive heads. + bbox_head (ConfigDict): Config of bbox_head. + train_cfg (ConfigDict): Training config. + test_cfg (ConfigDict): Testing config. + """ + + def __init__(self, + primitive_list: List[dict], + bbox_head: dict = None, + train_cfg: dict = None, + test_cfg: dict = None, + init_cfg: dict = None): + super(H3DRoIHead, self).__init__( + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + # Primitive module + assert len(primitive_list) == 3 + self.primitive_z = MODELS.build(primitive_list[0]) + self.primitive_xy = MODELS.build(primitive_list[1]) + self.primitive_line = MODELS.build(primitive_list[2]) + + def init_mask_head(self): + """Initialize mask head, skip since ``H3DROIHead`` does not have + one.""" + pass + + def init_bbox_head(self, dummy_args, bbox_head): + """Initialize box head. + + Args: + dummy_args (optional): Just to compatible with + the interface in base class + bbox_head (dict): Config for bbox head. + """ + bbox_head['train_cfg'] = self.train_cfg + bbox_head['test_cfg'] = self.test_cfg + self.bbox_head = MODELS.build(bbox_head) + + def init_assigner_sampler(self): + """Initialize assigner and sampler.""" + pass + + def loss(self, points: List[Tensor], feats_dict: dict, + batch_data_samples: List[Det3DDataSample], **kwargs): + """Training forward function of PartAggregationROIHead. + + Args: + points (list[torch.Tensor]): Point cloud of each sample. + feats_dict (dict): Dict of feature. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + + Returns: + dict: losses from each head. + """ + losses = dict() + + primitive_loss_inputs = (points, feats_dict, batch_data_samples) + # note the feats_dict would be added new key and value in each head. + loss_z = self.primitive_z.loss(*primitive_loss_inputs) + loss_xy = self.primitive_xy.loss(*primitive_loss_inputs) + loss_line = self.primitive_line.loss(*primitive_loss_inputs) + + losses.update(loss_z) + losses.update(loss_xy) + losses.update(loss_line) + + targets = feats_dict.pop('targets') + + bbox_loss = self.bbox_head.loss( + points, + feats_dict, + rpn_targets=targets, + batch_data_samples=batch_data_samples) + losses.update(bbox_loss) + return losses + + def predict(self, + points: List[Tensor], + feats_dict: Dict[str, Tensor], + batch_data_samples: List[Det3DDataSample], + suffix='_optimized', + **kwargs) -> List[InstanceData]: + """ + Args: + points (list[tensor]): Point clouds of multiple samples. + feats_dict (dict): Features from FPN or backbone.. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes meta information of data. + + Returns: + list[:obj:`InstanceData`]: List of processed predictions. Each + InstanceData contains 3d Bounding boxes and corresponding + scores and labels. + """ + + result_z = self.primitive_z(feats_dict) + feats_dict.update(result_z) + + result_xy = self.primitive_xy(feats_dict) + feats_dict.update(result_xy) + + result_line = self.primitive_line(feats_dict) + feats_dict.update(result_line) + + bbox_preds = self.bbox_head(feats_dict) + feats_dict.update(bbox_preds) + results_list = self.bbox_head.predict( + points, feats_dict, batch_data_samples, suffix=suffix) + + return results_list diff --git a/mmdet3d/models/roi_heads/mask_heads/__init__.py b/mmdet3d/models/roi_heads/mask_heads/__init__.py new file mode 100755 index 0000000..68e754b --- /dev/null +++ b/mmdet3d/models/roi_heads/mask_heads/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .foreground_segmentation_head import ForegroundSegmentationHead +from .pointwise_semantic_head import PointwiseSemanticHead +from .primitive_head import PrimitiveHead + +__all__ = [ + 'PointwiseSemanticHead', 'PrimitiveHead', 'ForegroundSegmentationHead' +] diff --git a/mmdet3d/models/roi_heads/mask_heads/foreground_segmentation_head.py b/mmdet3d/models/roi_heads/mask_heads/foreground_segmentation_head.py new file mode 100755 index 0000000..6505fef --- /dev/null +++ b/mmdet3d/models/roi_heads/mask_heads/foreground_segmentation_head.py @@ -0,0 +1,174 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional, Tuple + +import torch +from mmcv.cnn.bricks import build_norm_layer +from mmdet.models.utils import multi_apply +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import nn as nn + +from mmdet3d.registry import MODELS +from mmdet3d.utils import InstanceList + + +@MODELS.register_module() +class ForegroundSegmentationHead(BaseModule): + """Foreground segmentation head. + + Args: + in_channels (int): The number of input channel. + mlp_channels (tuple[int]): Specify of mlp channels. Defaults + to (256, 256). + extra_width (float): Boxes enlarge width. Default used 0.1. + norm_cfg (dict): Type of normalization method. Defaults to + dict(type='BN1d', eps=1e-5, momentum=0.1). + init_cfg (dict, optional): Initialize config of + model. Defaults to None. + loss_seg (dict): Config of segmentation loss. Defaults to + dict(type='mmdet.FocalLoss') + """ + + def __init__( + self, + in_channels: int, + mlp_channels: Tuple[int] = (256, 256), + extra_width: float = 0.1, + norm_cfg: dict = dict(type='BN1d', eps=1e-5, momentum=0.1), + init_cfg: Optional[dict] = None, + loss_seg: dict = dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + reduction='sum', + gamma=2.0, + alpha=0.25, + activated=True, + loss_weight=1.0) + ) -> None: + super(ForegroundSegmentationHead, self).__init__(init_cfg=init_cfg) + self.extra_width = extra_width + self.num_classes = 1 + + self.in_channels = in_channels + self.use_sigmoid_cls = loss_seg.get('use_sigmoid', False) + + out_channels = 1 + if self.use_sigmoid_cls: + self.out_channels = out_channels + else: + self.out_channels = out_channels + 1 + + mlps_layers = [] + cin = in_channels + for mlp in mlp_channels: + mlps_layers.extend([ + nn.Linear(cin, mlp, bias=False), + build_norm_layer(norm_cfg, mlp)[1], + nn.ReLU() + ]) + cin = mlp + mlps_layers.append(nn.Linear(cin, self.out_channels, bias=True)) + + self.seg_cls_layer = nn.Sequential(*mlps_layers) + + self.loss_seg = MODELS.build(loss_seg) + + def forward(self, feats: torch.Tensor) -> dict: + """Forward head. + + Args: + feats (torch.Tensor): Point-wise features. + + Returns: + dict: Segment predictions. + """ + seg_preds = self.seg_cls_layer(feats) + return dict(seg_preds=seg_preds) + + def _get_targets_single(self, point_xyz: torch.Tensor, + gt_bboxes_3d: InstanceData, + gt_labels_3d: torch.Tensor) -> torch.Tensor: + """generate segmentation targets for a single sample. + + Args: + point_xyz (torch.Tensor): Coordinate of points. + gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): Ground truth boxes in + shape (box_num, 7). + gt_labels_3d (torch.Tensor): Class labels of ground truths in + shape (box_num). + + Returns: + torch.Tensor: Points class labels. + """ + point_cls_labels_single = point_xyz.new_zeros( + point_xyz.shape[0]).long() + enlarged_gt_boxes = gt_bboxes_3d.enlarged_box(self.extra_width) + + box_idxs_of_pts = gt_bboxes_3d.points_in_boxes_part(point_xyz).long() + extend_box_idxs_of_pts = enlarged_gt_boxes.points_in_boxes_part( + point_xyz).long() + box_fg_flag = box_idxs_of_pts >= 0 + fg_flag = box_fg_flag.clone() + ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0) + point_cls_labels_single[ignore_flag] = -1 + gt_box_of_fg_points = gt_labels_3d[box_idxs_of_pts[fg_flag]] + point_cls_labels_single[ + fg_flag] = 1 if self.num_classes == 1 else\ + gt_box_of_fg_points.long() + return point_cls_labels_single, + + def get_targets(self, points_bxyz: torch.Tensor, + batch_gt_instances_3d: InstanceList) -> dict: + """Generate segmentation targets. + + Args: + points_bxyz (torch.Tensor): The coordinates of point in shape + (B, num_points, 3). + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + + Returns: + dict: Prediction targets + - seg_targets (torch.Tensor): Segmentation targets. + """ + batch_size = len(batch_gt_instances_3d) + points_xyz_list = [] + gt_bboxes_3d = [] + gt_labels_3d = [] + for idx in range(batch_size): + coords_idx = points_bxyz[:, 0] == idx + points_xyz_list.append(points_bxyz[coords_idx][..., 1:]) + gt_bboxes_3d.append(batch_gt_instances_3d[idx].bboxes_3d) + gt_labels_3d.append(batch_gt_instances_3d[idx].labels_3d) + seg_targets, = multi_apply(self._get_targets_single, points_xyz_list, + gt_bboxes_3d, gt_labels_3d) + seg_targets = torch.cat(seg_targets, dim=0) + return dict(seg_targets=seg_targets) + + def loss(self, semantic_results: dict, + semantic_targets: dict) -> Dict[str, torch.Tensor]: + """Calculate point-wise segmentation losses. + + Args: + semantic_results (dict): Results from semantic head. + semantic_targets (dict): Targets of semantic results. + + Returns: + dict: Loss of segmentation. + + - loss_semantic (torch.Tensor): Segmentation prediction loss. + """ + seg_preds = semantic_results['seg_preds'] + seg_targets = semantic_targets['seg_targets'] + + positives = (seg_targets > 0) + + negative_cls_weights = (seg_targets == 0).float() + seg_weights = (negative_cls_weights + 1.0 * positives).float() + pos_normalizer = positives.sum(dim=0).float() + seg_weights /= torch.clamp(pos_normalizer, min=1.0) + + seg_preds = torch.sigmoid(seg_preds) + loss_seg = self.loss_seg(seg_preds, (~positives).long(), seg_weights) + return dict(loss_semantic=loss_seg) diff --git a/mmdet3d/models/roi_heads/mask_heads/pointwise_semantic_head.py b/mmdet3d/models/roi_heads/mask_heads/pointwise_semantic_head.py new file mode 100755 index 0000000..340c6bf --- /dev/null +++ b/mmdet3d/models/roi_heads/mask_heads/pointwise_semantic_head.py @@ -0,0 +1,211 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional, Tuple + +import torch +from mmdet.models.utils import multi_apply +from mmengine.model import BaseModule +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.registry import MODELS +from mmdet3d.structures.bbox_3d import BaseInstance3DBoxes, rotation_3d_in_axis +from mmdet3d.utils import InstanceList + + +@MODELS.register_module() +class PointwiseSemanticHead(BaseModule): + """Semantic segmentation head for point-wise segmentation. + + Predict point-wise segmentation and part regression results for PartA2. + See `paper `_ for more details. + + Args: + in_channels (int): The number of input channel. + num_classes (int): The number of class. + extra_width (float): Boxes enlarge width. + loss_seg (dict): Config of segmentation loss. + loss_part (dict): Config of part prediction loss. + """ + + def __init__( + self, + in_channels: int, + num_classes: int = 3, + extra_width: float = 0.2, + seg_score_thr: float = 0.3, + init_cfg: Optional[dict] = None, + loss_seg: dict = dict( + type='FocalLoss', + use_sigmoid=True, + reduction='sum', + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_part: dict = dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0) + ) -> None: + super(PointwiseSemanticHead, self).__init__(init_cfg=init_cfg) + self.extra_width = extra_width + self.num_classes = num_classes + self.seg_score_thr = seg_score_thr + self.seg_cls_layer = nn.Linear(in_channels, 1, bias=True) + self.seg_reg_layer = nn.Linear(in_channels, 3, bias=True) + + self.loss_seg = MODELS.build(loss_seg) + self.loss_part = MODELS.build(loss_part) + + def forward(self, x: Tensor) -> Dict[str, Tensor]: + """Forward pass. + + Args: + x (torch.Tensor): Features from the first stage. + + Returns: + dict: Part features, segmentation and part predictions. + + - seg_preds (torch.Tensor): Segment predictions. + - part_preds (torch.Tensor): Part predictions. + - part_feats (torch.Tensor): Feature predictions. + """ + seg_preds = self.seg_cls_layer(x) # (N, 1) + part_preds = self.seg_reg_layer(x) # (N, 3) + + seg_scores = torch.sigmoid(seg_preds).detach() + seg_mask = (seg_scores > self.seg_score_thr) + + part_offsets = torch.sigmoid(part_preds).clone().detach() + part_offsets[seg_mask.view(-1) == 0] = 0 + part_feats = torch.cat((part_offsets, seg_scores), + dim=-1) # shape (npoints, 4) + return dict( + seg_preds=seg_preds, part_preds=part_preds, part_feats=part_feats) + + def get_targets_single(self, voxel_centers: Tensor, + gt_bboxes_3d: BaseInstance3DBoxes, + gt_labels_3d: Tensor) -> Tuple[Tensor]: + """generate segmentation and part prediction targets for a single + sample. + + Args: + voxel_centers (torch.Tensor): The center of voxels in shape + (voxel_num, 3). + gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): Ground truth boxes in + shape (box_num, 7). + gt_labels_3d (torch.Tensor): Class labels of ground truths in + shape (box_num). + + Returns: + tuple[torch.Tensor]: Segmentation targets with shape [voxel_num] + part prediction targets with shape [voxel_num, 3] + """ + gt_bboxes_3d = gt_bboxes_3d.to(voxel_centers.device) + enlarged_gt_boxes = gt_bboxes_3d.enlarged_box(self.extra_width) + + part_targets = voxel_centers.new_zeros((voxel_centers.shape[0], 3), + dtype=torch.float32) + box_idx = gt_bboxes_3d.points_in_boxes_part(voxel_centers) + enlarge_box_idx = enlarged_gt_boxes.points_in_boxes_part( + voxel_centers).long() + + gt_labels_pad = F.pad( + gt_labels_3d, (1, 0), mode='constant', value=self.num_classes) + seg_targets = gt_labels_pad[(box_idx.long() + 1)] + fg_pt_flag = box_idx > -1 + ignore_flag = fg_pt_flag ^ (enlarge_box_idx > -1) + seg_targets[ignore_flag] = -1 + + for k in range(len(gt_bboxes_3d)): + k_box_flag = box_idx == k + # no point in current box (caused by velodyne reduce) + if not k_box_flag.any(): + continue + fg_voxels = voxel_centers[k_box_flag] + transformed_voxels = fg_voxels - gt_bboxes_3d.bottom_center[k] + transformed_voxels = rotation_3d_in_axis( + transformed_voxels.unsqueeze(0), + -gt_bboxes_3d.yaw[k].view(1), + axis=2) + part_targets[k_box_flag] = transformed_voxels / gt_bboxes_3d.dims[ + k] + voxel_centers.new_tensor([0.5, 0.5, 0]) + + part_targets = torch.clamp(part_targets, min=0) + return seg_targets, part_targets + + def get_targets(self, voxel_dict: dict, + batch_gt_instances_3d: InstanceList) -> dict: + """generate segmentation and part prediction targets. + + Args: + voxel_dict (dict): Contains information of voxels. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + + Returns: + dict: Prediction targets + + - seg_targets (torch.Tensor): Segmentation targets + with shape [voxel_num]. + - part_targets (torch.Tensor): Part prediction targets + with shape [voxel_num, 3]. + """ + batch_size = len(batch_gt_instances_3d) + voxel_center_list = [] + gt_bboxes_3d = [] + gt_labels_3d = [] + for idx in range(batch_size): + coords_idx = voxel_dict['coors'][:, 0] == idx + voxel_center_list.append(voxel_dict['voxel_centers'][coords_idx]) + gt_bboxes_3d.append(batch_gt_instances_3d[idx].bboxes_3d) + gt_labels_3d.append(batch_gt_instances_3d[idx].labels_3d) + seg_targets, part_targets = multi_apply(self.get_targets_single, + voxel_center_list, + gt_bboxes_3d, gt_labels_3d) + seg_targets = torch.cat(seg_targets, dim=0) + part_targets = torch.cat(part_targets, dim=0) + return dict(seg_targets=seg_targets, part_targets=part_targets) + + def loss(self, semantic_results: dict, + semantic_targets: dict) -> Dict[str, Tensor]: + """Calculate point-wise segmentation and part prediction losses. + + Args: + semantic_results (dict): Results from semantic head. + + - seg_preds: Segmentation predictions. + - part_preds: Part predictions. + + semantic_targets (dict): Targets of semantic results. + + - seg_preds: Segmentation targets. + - part_preds: Part targets. + + Returns: + dict: Loss of segmentation and part prediction. + + - loss_seg (torch.Tensor): Segmentation prediction loss. + - loss_part (torch.Tensor): Part prediction loss. + """ + seg_preds = semantic_results['seg_preds'] + part_preds = semantic_results['part_preds'] + seg_targets = semantic_targets['seg_targets'] + part_targets = semantic_targets['part_targets'] + + pos_mask = (seg_targets > -1) & (seg_targets < self.num_classes) + binary_seg_target = pos_mask.long() + pos = pos_mask.float() + neg = (seg_targets == self.num_classes).float() + seg_weights = pos + neg + pos_normalizer = pos.sum() + seg_weights = seg_weights / torch.clamp(pos_normalizer, min=1.0) + loss_seg = self.loss_seg(seg_preds, binary_seg_target, seg_weights) + + if pos_normalizer > 0: + loss_part = self.loss_part(part_preds[pos_mask], + part_targets[pos_mask]) + else: + # fake a part loss + loss_part = loss_seg.new_tensor(0) + + return dict(loss_seg=loss_seg, loss_part=loss_part) diff --git a/mmdet3d/models/roi_heads/mask_heads/primitive_head.py b/mmdet3d/models/roi_heads/mask_heads/primitive_head.py new file mode 100755 index 0000000..f6b5e1b --- /dev/null +++ b/mmdet3d/models/roi_heads/mask_heads/primitive_head.py @@ -0,0 +1,1053 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple + +import torch +from mmcv.cnn import ConvModule +from mmcv.ops import furthest_point_sample +from mmdet.models.utils import multi_apply +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.models.layers import VoteModule, build_sa_module +from mmdet3d.registry import MODELS +from mmdet3d.structures import Det3DDataSample +from mmdet3d.structures.bbox_3d import BaseInstance3DBoxes + + +@MODELS.register_module() +class PrimitiveHead(BaseModule): + r"""Primitive head of `H3DNet `_. + + Args: + num_dims (int): The dimension of primitive semantic information. + num_classes (int): The number of class. + primitive_mode (str): The mode of primitive module, + available mode ['z', 'xy', 'line']. + bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for encoding and + decoding boxes. + train_cfg (dict, optional): Config for training. + test_cfg (dict, optional): Config for testing. + vote_module_cfg (dict, optional): Config of VoteModule for point-wise + votes. + vote_aggregation_cfg (dict, optional): Config of vote aggregation + layer. + feat_channels (tuple[int]): Convolution channels of + prediction layer. + upper_thresh (float): Threshold for line matching. + surface_thresh (float): Threshold for surface matching. + conv_cfg (dict, optional): Config of convolution in prediction layer. + norm_cfg (dict, optional): Config of BN in prediction layer. + objectness_loss (dict, optional): Config of objectness loss. + center_loss (dict, optional): Config of center loss. + semantic_loss (dict, optional): Config of point-wise semantic + segmentation loss. + """ + + def __init__(self, + num_dims: int, + num_classes: int, + primitive_mode: str, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + vote_module_cfg: Optional[dict] = None, + vote_aggregation_cfg: Optional[dict] = None, + feat_channels: tuple = (128, 128), + upper_thresh: float = 100.0, + surface_thresh: float = 0.5, + conv_cfg: dict = dict(type='Conv1d'), + norm_cfg: dict = dict(type='BN1d'), + objectness_loss: Optional[dict] = None, + center_loss: Optional[dict] = None, + semantic_reg_loss: Optional[dict] = None, + semantic_cls_loss: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super(PrimitiveHead, self).__init__(init_cfg=init_cfg) + # bounding boxes centers, face centers and edge centers + assert primitive_mode in ['z', 'xy', 'line'] + # The dimension of primitive semantic information. + self.num_dims = num_dims + self.num_classes = num_classes + self.primitive_mode = primitive_mode + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.gt_per_seed = vote_module_cfg['gt_per_seed'] + self.num_proposal = vote_aggregation_cfg['num_point'] + self.upper_thresh = upper_thresh + self.surface_thresh = surface_thresh + + self.loss_objectness = MODELS.build(objectness_loss) + self.loss_center = MODELS.build(center_loss) + self.loss_semantic_reg = MODELS.build(semantic_reg_loss) + self.loss_semantic_cls = MODELS.build(semantic_cls_loss) + + assert vote_aggregation_cfg['mlp_channels'][0] == vote_module_cfg[ + 'in_channels'] + + # Primitive existence flag prediction + self.flag_conv = ConvModule( + vote_module_cfg['conv_channels'][-1], + vote_module_cfg['conv_channels'][-1] // 2, + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True, + inplace=True) + self.flag_pred = torch.nn.Conv1d( + vote_module_cfg['conv_channels'][-1] // 2, 2, 1) + + self.vote_module = VoteModule(**vote_module_cfg) + self.vote_aggregation = build_sa_module(vote_aggregation_cfg) + + prev_channel = vote_aggregation_cfg['mlp_channels'][-1] + conv_pred_list = list() + for k in range(len(feat_channels)): + conv_pred_list.append( + ConvModule( + prev_channel, + feat_channels[k], + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True, + inplace=True)) + prev_channel = feat_channels[k] + self.conv_pred = nn.Sequential(*conv_pred_list) + + conv_out_channel = 3 + num_dims + num_classes + self.conv_pred.add_module('conv_out', + nn.Conv1d(prev_channel, conv_out_channel, 1)) + + @property + def sample_mode(self): + if self.training: + sample_mode = self.train_cfg.sample_mode + else: + sample_mode = self.test_cfg.sample_mode + assert sample_mode in ['vote', 'seed', 'random'] + return sample_mode + + def forward(self, feats_dict: dict) -> dict: + """Forward pass. + + Args: + feats_dict (dict): Feature dict from backbone. + + + Returns: + dict: Predictions of primitive head. + """ + sample_mode = self.sample_mode + + seed_points = feats_dict['fp_xyz_net0'][-1] + seed_features = feats_dict['hd_feature'] + results = {} + + primitive_flag = self.flag_conv(seed_features) + primitive_flag = self.flag_pred(primitive_flag) + + results['pred_flag_' + self.primitive_mode] = primitive_flag + + # 1. generate vote_points from seed_points + vote_points, vote_features, _ = self.vote_module( + seed_points, seed_features) + results['vote_' + self.primitive_mode] = vote_points + results['vote_features_' + self.primitive_mode] = vote_features + + # 2. aggregate vote_points + if sample_mode == 'vote': + # use fps in vote_aggregation + sample_indices = None + elif sample_mode == 'seed': + # FPS on seed and choose the votes corresponding to the seeds + sample_indices = furthest_point_sample(seed_points, + self.num_proposal) + elif sample_mode == 'random': + # Random sampling from the votes + batch_size, num_seed = seed_points.shape[:2] + sample_indices = torch.randint( + 0, + num_seed, (batch_size, self.num_proposal), + dtype=torch.int32, + device=seed_points.device) + else: + raise NotImplementedError('Unsupported sample mod!') + + vote_aggregation_ret = self.vote_aggregation(vote_points, + vote_features, + sample_indices) + aggregated_points, features, aggregated_indices = vote_aggregation_ret + results['aggregated_points_' + self.primitive_mode] = aggregated_points + results['aggregated_features_' + self.primitive_mode] = features + results['aggregated_indices_' + + self.primitive_mode] = aggregated_indices + + # 3. predict primitive offsets and semantic information + predictions = self.conv_pred(features) + + # 4. decode predictions + decode_ret = self.primitive_decode_scores(predictions, + aggregated_points) + results.update(decode_ret) + + center, pred_ind = self.get_primitive_center( + primitive_flag, decode_ret['center_' + self.primitive_mode]) + + results['pred_' + self.primitive_mode + '_ind'] = pred_ind + results['pred_' + self.primitive_mode + '_center'] = center + return results + + def loss(self, points: List[torch.Tensor], feats_dict: Dict[str, + torch.Tensor], + batch_data_samples: List[Det3DDataSample], **kwargs) -> dict: + """ + Args: + points (list[tensor]): Points cloud of multiple samples. + feats_dict (dict): Predictions from backbone or FPN. + batch_data_samples (list[:obj:`Det3DDataSample`]): Each item + contains the meta information of each sample and + corresponding annotations. + + Returns: + dict: A dictionary of loss components. + """ + preds = self(feats_dict) + feats_dict.update(preds) + + batch_gt_instance_3d = [] + batch_gt_instances_ignore = [] + batch_input_metas = [] + batch_pts_semantic_mask = [] + batch_pts_instance_mask = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + batch_gt_instance_3d.append(data_sample.gt_instances_3d) + batch_gt_instances_ignore.append( + data_sample.get('ignored_instances', None)) + batch_pts_semantic_mask.append( + data_sample.gt_pts_seg.get('pts_semantic_mask', None)) + batch_pts_instance_mask.append( + data_sample.gt_pts_seg.get('pts_instance_mask', None)) + + loss_inputs = (points, feats_dict, batch_gt_instance_3d) + losses = self.loss_by_feat( + *loss_inputs, + batch_pts_semantic_mask=batch_pts_semantic_mask, + batch_pts_instance_mask=batch_pts_instance_mask, + batch_gt_instances_ignore=batch_gt_instances_ignore, + ) + return losses + + def loss_by_feat( + self, + points: List[torch.Tensor], + feats_dict: dict, + batch_gt_instances_3d: List[InstanceData], + batch_pts_semantic_mask: Optional[List[torch.Tensor]] = None, + batch_pts_instance_mask: Optional[List[torch.Tensor]] = None, + **kwargs): + """Compute loss. + + Args: + points (list[torch.Tensor]): Input points. + feats_dict (dict): Predictions of previous modules. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_pts_semantic_mask (list[tensor]): Semantic mask + of points cloud. Defaults to None. + batch_pts_instance_mask (list[tensor]): Instance mask + of points cloud. Defaults to None. + + Returns: + dict: Losses of Primitive Head. + """ + + targets = self.get_targets(points, feats_dict, batch_gt_instances_3d, + batch_pts_semantic_mask, + batch_pts_instance_mask) + + (point_mask, point_offset, gt_primitive_center, gt_primitive_semantic, + gt_sem_cls_label, gt_primitive_mask) = targets + + losses = {} + # Compute the loss of primitive existence flag + pred_flag = feats_dict['pred_flag_' + self.primitive_mode] + flag_loss = self.loss_objectness(pred_flag, gt_primitive_mask.long()) + losses['flag_loss_' + self.primitive_mode] = flag_loss + + # calculate vote loss + vote_loss = self.vote_module.get_loss( + feats_dict['seed_points'], + feats_dict['vote_' + self.primitive_mode], + feats_dict['seed_indices'], point_mask, point_offset) + losses['vote_loss_' + self.primitive_mode] = vote_loss + + num_proposal = feats_dict['aggregated_points_' + + self.primitive_mode].shape[1] + primitive_center = feats_dict['center_' + self.primitive_mode] + if self.primitive_mode != 'line': + primitive_semantic = feats_dict['size_residuals_' + + self.primitive_mode].contiguous() + else: + primitive_semantic = None + semancitc_scores = feats_dict['sem_cls_scores_' + + self.primitive_mode].transpose(2, 1) + + gt_primitive_mask = gt_primitive_mask / \ + (gt_primitive_mask.sum() + 1e-6) + center_loss, size_loss, sem_cls_loss = self.compute_primitive_loss( + primitive_center, primitive_semantic, semancitc_scores, + num_proposal, gt_primitive_center, gt_primitive_semantic, + gt_sem_cls_label, gt_primitive_mask) + losses['center_loss_' + self.primitive_mode] = center_loss + losses['size_loss_' + self.primitive_mode] = size_loss + losses['sem_loss_' + self.primitive_mode] = sem_cls_loss + + return losses + + def get_targets( + self, + points, + bbox_preds: Optional[dict] = None, + batch_gt_instances_3d: List[InstanceData] = None, + batch_pts_semantic_mask: List[torch.Tensor] = None, + batch_pts_instance_mask: List[torch.Tensor] = None, + ): + """Generate targets of primitive head. + + Args: + points (list[torch.Tensor]): Points of each batch. + bbox_preds (torch.Tensor): Bounding box predictions of + primitive head. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + batch_pts_semantic_mask (list[tensor]): Semantic gt mask for + multiple images. + batch_pts_instance_mask (list[tensor]): Instance gt mask for + multiple images. + + Returns: + tuple[torch.Tensor]: Targets of primitive head. + """ + batch_gt_labels_3d = [ + gt_instances_3d.labels_3d + for gt_instances_3d in batch_gt_instances_3d + ] + batch_gt_bboxes_3d = [ + gt_instances_3d.bboxes_3d + for gt_instances_3d in batch_gt_instances_3d + ] + for index in range(len(batch_gt_labels_3d)): + if len(batch_gt_labels_3d[index]) == 0: + fake_box = batch_gt_bboxes_3d[index].tensor.new_zeros( + 1, batch_gt_bboxes_3d[index].tensor.shape[-1]) + batch_gt_bboxes_3d[index] = batch_gt_bboxes_3d[index].new_box( + fake_box) + batch_gt_labels_3d[index] = batch_gt_labels_3d[ + index].new_zeros(1) + + if batch_pts_semantic_mask is None: + batch_pts_semantic_mask = [ + None for _ in range(len(batch_gt_labels_3d)) + ] + batch_pts_instance_mask = [ + None for _ in range(len(batch_gt_labels_3d)) + ] + + (point_mask, point_sem, + point_offset) = multi_apply(self.get_targets_single, points, + batch_gt_bboxes_3d, batch_gt_labels_3d, + batch_pts_semantic_mask, + batch_pts_instance_mask) + + point_mask = torch.stack(point_mask) + point_sem = torch.stack(point_sem) + point_offset = torch.stack(point_offset) + + batch_size = point_mask.shape[0] + num_proposal = bbox_preds['aggregated_points_' + + self.primitive_mode].shape[1] + num_seed = bbox_preds['seed_points'].shape[1] + seed_inds = bbox_preds['seed_indices'].long() + seed_inds_expand = seed_inds.view(batch_size, num_seed, + 1).repeat(1, 1, 3) + seed_gt_votes = torch.gather(point_offset, 1, seed_inds_expand) + seed_gt_votes += bbox_preds['seed_points'] + gt_primitive_center = seed_gt_votes.view(batch_size * num_proposal, 1, + 3) + + seed_inds_expand_sem = seed_inds.view(batch_size, num_seed, 1).repeat( + 1, 1, 4 + self.num_dims) + seed_gt_sem = torch.gather(point_sem, 1, seed_inds_expand_sem) + gt_primitive_semantic = seed_gt_sem[:, :, 3:3 + self.num_dims].view( + batch_size * num_proposal, 1, self.num_dims).contiguous() + + gt_sem_cls_label = seed_gt_sem[:, :, -1].long() + + gt_votes_mask = torch.gather(point_mask, 1, seed_inds) + + return (point_mask, point_offset, gt_primitive_center, + gt_primitive_semantic, gt_sem_cls_label, gt_votes_mask) + + def get_targets_single( + self, + points: torch.Tensor, + gt_bboxes_3d: BaseInstance3DBoxes, + gt_labels_3d: torch.Tensor, + pts_semantic_mask: torch.Tensor = None, + pts_instance_mask: torch.Tensor = None) -> Tuple[torch.Tensor]: + """Generate targets of primitive head for single batch. + + Args: + points (torch.Tensor): Points of each batch. + gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): Ground truth + boxes of each batch. + gt_labels_3d (torch.Tensor): Labels of each batch. + pts_semantic_mask (torch.Tensor): Point-wise semantic + label of each batch. + pts_instance_mask (torch.Tensor): Point-wise instance + label of each batch. + + Returns: + tuple[torch.Tensor]: Targets of primitive head. + """ + gt_bboxes_3d = gt_bboxes_3d.to(points.device) + num_points = points.shape[0] + + point_mask = points.new_zeros(num_points) + # Offset to the primitive center + point_offset = points.new_zeros([num_points, 3]) + # Semantic information of primitive center + point_sem = points.new_zeros([num_points, 3 + self.num_dims + 1]) + + # Generate pts_semantic_mask and pts_instance_mask when they are None + if pts_semantic_mask is None or pts_instance_mask is None: + points2box_mask = gt_bboxes_3d.points_in_boxes_all(points) + assignment = points2box_mask.argmax(1) + background_mask = points2box_mask.max(1)[0] == 0 + + if pts_semantic_mask is None: + pts_semantic_mask = gt_labels_3d[assignment] + pts_semantic_mask[background_mask] = self.num_classes + + if pts_instance_mask is None: + pts_instance_mask = assignment + pts_instance_mask[background_mask] = gt_labels_3d.shape[0] + + instance_flag = torch.nonzero( + pts_semantic_mask != self.num_classes, as_tuple=False).squeeze(1) + instance_labels = pts_instance_mask[instance_flag].unique() + + with_yaw = gt_bboxes_3d.with_yaw + for i, i_instance in enumerate(instance_labels): + indices = instance_flag[pts_instance_mask[instance_flag] == + i_instance] + coords = points[indices, :3] + cur_cls_label = pts_semantic_mask[indices][0] + + # Bbox Corners + cur_corners = gt_bboxes_3d.corners[i] + + plane_lower_temp = points.new_tensor( + [0, 0, 1, -cur_corners[7, -1]]) + upper_points = cur_corners[[1, 2, 5, 6]] + refined_distance = (upper_points * plane_lower_temp[:3]).sum(dim=1) + + if self.check_horizon(upper_points) and \ + plane_lower_temp[0] + plane_lower_temp[1] < \ + self.train_cfg['lower_thresh']: + plane_lower = points.new_tensor( + [0, 0, 1, plane_lower_temp[-1]]) + plane_upper = points.new_tensor( + [0, 0, 1, -torch.mean(refined_distance)]) + else: + raise NotImplementedError('Only horizontal plane is support!') + + if self.check_dist(plane_upper, upper_points) is False: + raise NotImplementedError( + 'Mean distance to plane should be lower than thresh!') + + # Get the boundary points here + point2plane_dist, selected = self.match_point2plane( + plane_lower, coords) + + # Get bottom four lines + if self.primitive_mode == 'line': + point2line_matching = self.match_point2line( + coords[selected], cur_corners, with_yaw, mode='bottom') + + point_mask, point_offset, point_sem = \ + self._assign_primitive_line_targets(point_mask, + point_offset, + point_sem, + coords[selected], + indices[selected], + cur_cls_label, + point2line_matching, + cur_corners, + [1, 1, 0, 0], + with_yaw, + mode='bottom') + + # Set the surface labels here + if self.primitive_mode == 'z' and \ + selected.sum() > self.train_cfg['num_point'] and \ + point2plane_dist[selected].var() < \ + self.train_cfg['var_thresh']: + + point_mask, point_offset, point_sem = \ + self._assign_primitive_surface_targets(point_mask, + point_offset, + point_sem, + coords[selected], + indices[selected], + cur_cls_label, + cur_corners, + with_yaw, + mode='bottom') + + # Get the boundary points here + point2plane_dist, selected = self.match_point2plane( + plane_upper, coords) + + # Get top four lines + if self.primitive_mode == 'line': + point2line_matching = self.match_point2line( + coords[selected], cur_corners, with_yaw, mode='top') + + point_mask, point_offset, point_sem = \ + self._assign_primitive_line_targets(point_mask, + point_offset, + point_sem, + coords[selected], + indices[selected], + cur_cls_label, + point2line_matching, + cur_corners, + [1, 1, 0, 0], + with_yaw, + mode='top') + + if self.primitive_mode == 'z' and \ + selected.sum() > self.train_cfg['num_point'] and \ + point2plane_dist[selected].var() < \ + self.train_cfg['var_thresh']: + + point_mask, point_offset, point_sem = \ + self._assign_primitive_surface_targets(point_mask, + point_offset, + point_sem, + coords[selected], + indices[selected], + cur_cls_label, + cur_corners, + with_yaw, + mode='top') + + # Get left two lines + plane_left_temp = self._get_plane_fomulation( + cur_corners[2] - cur_corners[3], + cur_corners[3] - cur_corners[0], cur_corners[0]) + + right_points = cur_corners[[4, 5, 7, 6]] + plane_left_temp /= torch.norm(plane_left_temp[:3]) + refined_distance = (right_points * plane_left_temp[:3]).sum(dim=1) + + if plane_left_temp[2] < self.train_cfg['lower_thresh']: + plane_left = plane_left_temp + plane_right = points.new_tensor([ + plane_left_temp[0], plane_left_temp[1], plane_left_temp[2], + -refined_distance.mean() + ]) + else: + raise NotImplementedError( + 'Normal vector of the plane should be horizontal!') + + # Get the boundary points here + point2plane_dist, selected = self.match_point2plane( + plane_left, coords) + + # Get left four lines + if self.primitive_mode == 'line': + point2line_matching = self.match_point2line( + coords[selected], cur_corners, with_yaw, mode='left') + point_mask, point_offset, point_sem = \ + self._assign_primitive_line_targets( + point_mask, point_offset, point_sem, + coords[selected], indices[selected], cur_cls_label, + point2line_matching[2:], cur_corners, [2, 2], + with_yaw, mode='left') + + if self.primitive_mode == 'xy' and \ + selected.sum() > self.train_cfg['num_point'] and \ + point2plane_dist[selected].var() < \ + self.train_cfg['var_thresh']: + + point_mask, point_offset, point_sem = \ + self._assign_primitive_surface_targets( + point_mask, point_offset, point_sem, + coords[selected], indices[selected], cur_cls_label, + cur_corners, with_yaw, mode='left') + + # Get the boundary points here + point2plane_dist, selected = self.match_point2plane( + plane_right, coords) + + # Get right four lines + if self.primitive_mode == 'line': + point2line_matching = self.match_point2line( + coords[selected], cur_corners, with_yaw, mode='right') + + point_mask, point_offset, point_sem = \ + self._assign_primitive_line_targets( + point_mask, point_offset, point_sem, + coords[selected], indices[selected], cur_cls_label, + point2line_matching[2:], cur_corners, [2, 2], + with_yaw, mode='right') + + if self.primitive_mode == 'xy' and \ + selected.sum() > self.train_cfg['num_point'] and \ + point2plane_dist[selected].var() < \ + self.train_cfg['var_thresh']: + + point_mask, point_offset, point_sem = \ + self._assign_primitive_surface_targets( + point_mask, point_offset, point_sem, + coords[selected], indices[selected], cur_cls_label, + cur_corners, with_yaw, mode='right') + + plane_front_temp = self._get_plane_fomulation( + cur_corners[0] - cur_corners[4], + cur_corners[4] - cur_corners[5], cur_corners[5]) + + back_points = cur_corners[[3, 2, 7, 6]] + plane_front_temp /= torch.norm(plane_front_temp[:3]) + refined_distance = (back_points * plane_front_temp[:3]).sum(dim=1) + + if plane_front_temp[2] < self.train_cfg['lower_thresh']: + plane_front = plane_front_temp + plane_back = points.new_tensor([ + plane_front_temp[0], plane_front_temp[1], + plane_front_temp[2], -torch.mean(refined_distance) + ]) + else: + raise NotImplementedError( + 'Normal vector of the plane should be horizontal!') + + # Get the boundary points here + point2plane_dist, selected = self.match_point2plane( + plane_front, coords) + + if self.primitive_mode == 'xy' and \ + selected.sum() > self.train_cfg['num_point'] and \ + (point2plane_dist[selected]).var() < \ + self.train_cfg['var_thresh']: + + point_mask, point_offset, point_sem = \ + self._assign_primitive_surface_targets( + point_mask, point_offset, point_sem, + coords[selected], indices[selected], cur_cls_label, + cur_corners, with_yaw, mode='front') + + # Get the boundary points here + point2plane_dist, selected = self.match_point2plane( + plane_back, coords) + + if self.primitive_mode == 'xy' and \ + selected.sum() > self.train_cfg['num_point'] and \ + point2plane_dist[selected].var() < \ + self.train_cfg['var_thresh']: + + point_mask, point_offset, point_sem = \ + self._assign_primitive_surface_targets( + point_mask, point_offset, point_sem, + coords[selected], indices[selected], cur_cls_label, + cur_corners, with_yaw, mode='back') + + return (point_mask, point_sem, point_offset) + + def primitive_decode_scores(self, predictions: torch.Tensor, + aggregated_points: torch.Tensor) -> dict: + """Decode predicted parts to primitive head. + + Args: + predictions (torch.Tensor): primitive pridictions of each batch. + aggregated_points (torch.Tensor): The aggregated points + of vote stage. + + Returns: + Dict: Predictions of primitive head, including center, + semantic size and semantic scores. + """ + + ret_dict = {} + pred_transposed = predictions.transpose(2, 1) + + center = aggregated_points + pred_transposed[:, :, 0:3] + ret_dict['center_' + self.primitive_mode] = center + + if self.primitive_mode in ['z', 'xy']: + ret_dict['size_residuals_' + self.primitive_mode] = \ + pred_transposed[:, :, 3:3 + self.num_dims] + + ret_dict['sem_cls_scores_' + self.primitive_mode] = \ + pred_transposed[:, :, 3 + self.num_dims:] + + return ret_dict + + def check_horizon(self, points: torch.Tensor) -> bool: + """Check whether is a horizontal plane. + + Args: + points (torch.Tensor): Points of input. + + Returns: + Bool: Flag of result. + """ + return (points[0][-1] == points[1][-1]) and \ + (points[1][-1] == points[2][-1]) and \ + (points[2][-1] == points[3][-1]) + + def check_dist(self, plane_equ: torch.Tensor, + points: torch.Tensor) -> tuple: + """Whether the mean of points to plane distance is lower than thresh. + + Args: + plane_equ (torch.Tensor): Plane to be checked. + points (torch.Tensor): Points to be checked. + + Returns: + Tuple: Flag of result. + """ + return (points[:, 2] + + plane_equ[-1]).sum() / 4.0 < self.train_cfg['lower_thresh'] + + def point2line_dist(self, points: torch.Tensor, pts_a: torch.Tensor, + pts_b: torch.Tensor) -> torch.Tensor: + """Calculate the distance from point to line. + + Args: + points (torch.Tensor): Points of input. + pts_a (torch.Tensor): Point on the specific line. + pts_b (torch.Tensor): Point on the specific line. + + Returns: + torch.Tensor: Distance between each point to line. + """ + line_a2b = pts_b - pts_a + line_a2pts = points - pts_a + length = (line_a2pts * line_a2b.view(1, 3)).sum(1) / \ + line_a2b.norm() + dist = (line_a2pts.norm(dim=1)**2 - length**2).sqrt() + + return dist + + def match_point2line(self, + points: torch.Tensor, + corners: torch.Tensor, + with_yaw: bool, + mode: str = 'bottom') -> tuple: + """Match points to corresponding line. + + Args: + points (torch.Tensor): Points of input. + corners (torch.Tensor): Eight corners of a bounding box. + with_yaw (Bool): Whether the boundind box is with rotation. + mode (str, optional): Specify which line should be matched, + available mode are ('bottom', 'top', 'left', 'right'). + Defaults to 'bottom'. + + Returns: + Tuple: Flag of matching correspondence. + """ + if with_yaw: + corners_pair = { + 'bottom': [[0, 3], [4, 7], [0, 4], [3, 7]], + 'top': [[1, 2], [5, 6], [1, 5], [2, 6]], + 'left': [[0, 1], [3, 2], [0, 1], [3, 2]], + 'right': [[4, 5], [7, 6], [4, 5], [7, 6]] + } + selected_list = [] + for pair_index in corners_pair[mode]: + selected = self.point2line_dist( + points, corners[pair_index[0]], corners[pair_index[1]]) \ + < self.train_cfg['line_thresh'] + selected_list.append(selected) + else: + xmin, ymin, _ = corners.min(0)[0] + xmax, ymax, _ = corners.max(0)[0] + sel1 = torch.abs(points[:, 0] - + xmin) < self.train_cfg['line_thresh'] + sel2 = torch.abs(points[:, 0] - + xmax) < self.train_cfg['line_thresh'] + sel3 = torch.abs(points[:, 1] - + ymin) < self.train_cfg['line_thresh'] + sel4 = torch.abs(points[:, 1] - + ymax) < self.train_cfg['line_thresh'] + selected_list = [sel1, sel2, sel3, sel4] + return selected_list + + def match_point2plane(self, plane: torch.Tensor, + points: torch.Tensor) -> tuple: + """Match points to plane. + + Args: + plane (torch.Tensor): Equation of the plane. + points (torch.Tensor): Points of input. + + Returns: + Tuple: Distance of each point to the plane and + flag of matching correspondence. + """ + point2plane_dist = torch.abs((points * plane[:3]).sum(dim=1) + + plane[-1]) + min_dist = point2plane_dist.min() + selected = torch.abs(point2plane_dist - + min_dist) < self.train_cfg['dist_thresh'] + return point2plane_dist, selected + + def compute_primitive_loss(self, primitive_center: torch.Tensor, + primitive_semantic: torch.Tensor, + semantic_scores: torch.Tensor, + num_proposal: torch.Tensor, + gt_primitive_center: torch.Tensor, + gt_primitive_semantic: torch.Tensor, + gt_sem_cls_label: torch.Tensor, + gt_primitive_mask: torch.Tensor) -> Tuple: + """Compute loss of primitive module. + + Args: + primitive_center (torch.Tensor): Pridictions of primitive center. + primitive_semantic (torch.Tensor): Pridictions of primitive + semantic. + semantic_scores (torch.Tensor): Pridictions of primitive + semantic scores. + num_proposal (int): The number of primitive proposal. + gt_primitive_center (torch.Tensor): Ground truth of + primitive center. + gt_votes_sem (torch.Tensor): Ground truth of primitive semantic. + gt_sem_cls_label (torch.Tensor): Ground truth of primitive + semantic class. + gt_primitive_mask (torch.Tensor): Ground truth of primitive mask. + + Returns: + Tuple: Loss of primitive module. + """ + batch_size = primitive_center.shape[0] + vote_xyz_reshape = primitive_center.view(batch_size * num_proposal, -1, + 3) + + center_loss = self.loss_center( + vote_xyz_reshape, + gt_primitive_center, + dst_weight=gt_primitive_mask.view(batch_size * num_proposal, 1))[1] + + if self.primitive_mode != 'line': + size_xyz_reshape = primitive_semantic.view( + batch_size * num_proposal, -1, self.num_dims).contiguous() + size_loss = self.loss_semantic_reg( + size_xyz_reshape, + gt_primitive_semantic, + dst_weight=gt_primitive_mask.view(batch_size * num_proposal, + 1))[1] + else: + size_loss = center_loss.new_tensor(0.0) + + # Semantic cls loss + sem_cls_loss = self.loss_semantic_cls( + semantic_scores, gt_sem_cls_label, weight=gt_primitive_mask) + + return center_loss, size_loss, sem_cls_loss + + def get_primitive_center(self, pred_flag: torch.Tensor, + center: torch.Tensor) -> Tuple: + """Generate primitive center from predictions. + + Args: + pred_flag (torch.Tensor): Scores of primitive center. + center (torch.Tensor): Pridictions of primitive center. + + Returns: + Tuple: Primitive center and the prediction indices. + """ + ind_normal = F.softmax(pred_flag, dim=1) + pred_indices = (ind_normal[:, 1, :] > + self.surface_thresh).detach().float() + selected = (ind_normal[:, 1, :] <= + self.surface_thresh).detach().float() + offset = torch.ones_like(center) * self.upper_thresh + center = center + offset * selected.unsqueeze(-1) + return center, pred_indices + + def _assign_primitive_line_targets(self, + point_mask: torch.Tensor, + point_offset: torch.Tensor, + point_sem: torch.Tensor, + coords: torch.Tensor, + indices: torch.Tensor, + cls_label: int, + point2line_matching: torch.Tensor, + corners: torch.Tensor, + center_axises: torch.Tensor, + with_yaw: bool, + mode: str = 'bottom') -> Tuple: + """Generate targets of line primitive. + + Args: + point_mask (torch.Tensor): Tensor to store the ground + truth of mask. + point_offset (torch.Tensor): Tensor to store the ground + truth of offset. + point_sem (torch.Tensor): Tensor to store the ground + truth of semantic. + coords (torch.Tensor): The selected points. + indices (torch.Tensor): Indices of the selected points. + cls_label (int): Class label of the ground truth bounding box. + point2line_matching (torch.Tensor): Flag indicate that + matching line of each point. + corners (torch.Tensor): Corners of the ground truth bounding box. + center_axises (list[int]): Indicate in which axis the line center + should be refined. + with_yaw (Bool): Whether the boundind box is with rotation. + mode (str, optional): Specify which line should be matched, + available mode are ('bottom', 'top', 'left', 'right'). + Defaults to 'bottom'. + + Returns: + Tuple: Targets of the line primitive. + """ + corners_pair = { + 'bottom': [[0, 3], [4, 7], [0, 4], [3, 7]], + 'top': [[1, 2], [5, 6], [1, 5], [2, 6]], + 'left': [[0, 1], [3, 2]], + 'right': [[4, 5], [7, 6]] + } + corners_pair = corners_pair[mode] + assert len(corners_pair) == len(point2line_matching) == len( + center_axises) + for line_select, center_axis, pair_index in zip( + point2line_matching, center_axises, corners_pair): + if line_select.sum() > self.train_cfg['num_point_line']: + point_mask[indices[line_select]] = 1.0 + + if with_yaw: + line_center = (corners[pair_index[0]] + + corners[pair_index[1]]) / 2 + else: + line_center = coords[line_select].mean(dim=0) + line_center[center_axis] = corners[:, center_axis].mean() + + point_offset[indices[line_select]] = \ + line_center - coords[line_select] + point_sem[indices[line_select]] = \ + point_sem.new_tensor([line_center[0], line_center[1], + line_center[2], cls_label]) + return point_mask, point_offset, point_sem + + def _assign_primitive_surface_targets(self, + point_mask: torch.Tensor, + point_offset: torch.Tensor, + point_sem: torch.Tensor, + coords: torch.Tensor, + indices: torch.Tensor, + cls_label: int, + corners: torch.Tensor, + with_yaw: bool, + mode: str = 'bottom') -> Tuple: + """Generate targets for primitive z and primitive xy. + + Args: + point_mask (torch.Tensor): Tensor to store the ground + truth of mask. + point_offset (torch.Tensor): Tensor to store the ground + truth of offset. + point_sem (torch.Tensor): Tensor to store the ground + truth of semantic. + coords (torch.Tensor): The selected points. + indices (torch.Tensor): Indices of the selected points. + cls_label (int): Class label of the ground truth bounding box. + corners (torch.Tensor): Corners of the ground truth bounding box. + with_yaw (Bool): Whether the boundind box is with rotation. + mode (str, optional): Specify which line should be matched, + available mode are ('bottom', 'top', 'left', 'right', + 'front', 'back'). + Defaults to 'bottom'. + + Returns: + Tuple: Targets of the center primitive. + """ + point_mask[indices] = 1.0 + corners_pair = { + 'bottom': [0, 7], + 'top': [1, 6], + 'left': [0, 1], + 'right': [4, 5], + 'front': [0, 1], + 'back': [3, 2] + } + pair_index = corners_pair[mode] + if self.primitive_mode == 'z': + if with_yaw: + center = (corners[pair_index[0]] + + corners[pair_index[1]]) / 2.0 + center[2] = coords[:, 2].mean() + point_sem[indices] = point_sem.new_tensor([ + center[0], center[1], + center[2], (corners[4] - corners[0]).norm(), + (corners[3] - corners[0]).norm(), cls_label + ]) + else: + center = point_mask.new_tensor([ + corners[:, 0].mean(), corners[:, 1].mean(), + coords[:, 2].mean() + ]) + point_sem[indices] = point_sem.new_tensor([ + center[0], center[1], center[2], + corners[:, 0].max() - corners[:, 0].min(), + corners[:, 1].max() - corners[:, 1].min(), cls_label + ]) + elif self.primitive_mode == 'xy': + if with_yaw: + center = coords.mean(0) + center[2] = (corners[pair_index[0], 2] + + corners[pair_index[1], 2]) / 2.0 + point_sem[indices] = point_sem.new_tensor([ + center[0], center[1], center[2], + corners[pair_index[1], 2] - corners[pair_index[0], 2], + cls_label + ]) + else: + center = point_mask.new_tensor([ + coords[:, 0].mean(), coords[:, 1].mean(), + corners[:, 2].mean() + ]) + point_sem[indices] = point_sem.new_tensor([ + center[0], center[1], center[2], + corners[:, 2].max() - corners[:, 2].min(), cls_label + ]) + point_offset[indices] = center - coords + return point_mask, point_offset, point_sem + + def _get_plane_fomulation(self, vector1: torch.Tensor, + vector2: torch.Tensor, + point: torch.Tensor) -> torch.Tensor: + """Compute the equation of the plane. + + Args: + vector1 (torch.Tensor): Parallel vector of the plane. + vector2 (torch.Tensor): Parallel vector of the plane. + point (torch.Tensor): Point on the plane. + + Returns: + torch.Tensor: Equation of the plane. + """ + surface_norm = torch.cross(vector1, vector2) + surface_dis = -torch.dot(surface_norm, point) + plane = point.new_tensor( + [surface_norm[0], surface_norm[1], surface_norm[2], surface_dis]) + return plane diff --git a/mmdet3d/models/roi_heads/part_aggregation_roi_head.py b/mmdet3d/models/roi_heads/part_aggregation_roi_head.py new file mode 100755 index 0000000..82816b3 --- /dev/null +++ b/mmdet3d/models/roi_heads/part_aggregation_roi_head.py @@ -0,0 +1,379 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Tuple + +from mmdet.models.task_modules import AssignResult, SamplingResult +from mmengine import ConfigDict +from torch import Tensor +from torch.nn import functional as F + +from mmdet3d.registry import MODELS +from mmdet3d.structures import bbox3d2roi +from mmdet3d.utils import InstanceList +from ...structures.det3d_data_sample import SampleList +from .base_3droi_head import Base3DRoIHead + + +@MODELS.register_module() +class PartAggregationROIHead(Base3DRoIHead): + """Part aggregation roi head for PartA2. + + Args: + semantic_head (ConfigDict): Config of semantic head. + num_classes (int): The number of classes. + seg_roi_extractor (ConfigDict): Config of seg_roi_extractor. + bbox_roi_extractor (ConfigDict): Config of part_roi_extractor. + bbox_head (ConfigDict): Config of bbox_head. + train_cfg (ConfigDict): Training config. + test_cfg (ConfigDict): Testing config. + """ + + def __init__(self, + semantic_head: dict, + num_classes: int = 3, + seg_roi_extractor: dict = None, + bbox_head: dict = None, + bbox_roi_extractor: dict = None, + train_cfg: dict = None, + test_cfg: dict = None, + init_cfg: dict = None) -> None: + super(PartAggregationROIHead, self).__init__( + bbox_head=bbox_head, + bbox_roi_extractor=bbox_roi_extractor, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + self.num_classes = num_classes + assert semantic_head is not None + self.init_seg_head(seg_roi_extractor, semantic_head) + + def init_seg_head(self, seg_roi_extractor: dict, + semantic_head: dict) -> None: + """Initialize semantic head and seg roi extractor. + + Args: + seg_roi_extractor (dict): Config of seg + roi extractor. + semantic_head (dict): Config of semantic head. + """ + self.semantic_head = MODELS.build(semantic_head) + self.seg_roi_extractor = MODELS.build(seg_roi_extractor) + + @property + def with_semantic(self): + """bool: whether the head has semantic branch""" + return hasattr(self, + 'semantic_head') and self.semantic_head is not None + + def _bbox_forward_train(self, feats_dict: Dict, voxels_dict: Dict, + sampling_results: List[SamplingResult]) -> Dict: + """Forward training function of roi_extractor and bbox_head. + + Args: + feats_dict (dict): Contains features from the first stage. + voxels_dict (dict): Contains information of voxels. + sampling_results (:obj:`SamplingResult`): Sampled results used + for training. + + Returns: + dict: Forward results including losses and predictions. + """ + rois = bbox3d2roi([res.bboxes for res in sampling_results]) + bbox_results = self._bbox_forward(feats_dict, voxels_dict, rois) + + bbox_targets = self.bbox_head.get_targets(sampling_results, + self.train_cfg) + loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], + bbox_results['bbox_pred'], rois, + *bbox_targets) + + bbox_results.update(loss_bbox=loss_bbox) + return bbox_results + + def _assign_and_sample( + self, rpn_results_list: InstanceList, + batch_gt_instances_3d: InstanceList, + batch_gt_instances_ignore: InstanceList) -> List[SamplingResult]: + """Assign and sample proposals for training. + + Args: + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + batch_gt_instances_ignore (list): Ignore instances of gt bboxes. + + Returns: + list[:obj:`SamplingResult`]: Sampled results of each training + sample. + """ + sampling_results = [] + # bbox assign + for batch_idx in range(len(rpn_results_list)): + cur_proposal_list = rpn_results_list[batch_idx] + cur_boxes = cur_proposal_list['bboxes_3d'] + cur_labels_3d = cur_proposal_list['labels_3d'] + cur_gt_instances_3d = batch_gt_instances_3d[batch_idx] + cur_gt_instances_ignore = batch_gt_instances_ignore[batch_idx] + cur_gt_instances_3d.bboxes_3d = cur_gt_instances_3d.\ + bboxes_3d.tensor + cur_gt_bboxes = cur_gt_instances_3d.bboxes_3d.to(cur_boxes.device) + cur_gt_labels = cur_gt_instances_3d.labels_3d + + batch_num_gts = 0 + # 0 is bg + batch_gt_indis = cur_gt_labels.new_full((len(cur_boxes), ), 0) + batch_max_overlaps = cur_boxes.tensor.new_zeros(len(cur_boxes)) + # -1 is bg + batch_gt_labels = cur_gt_labels.new_full((len(cur_boxes), ), -1) + + # each class may have its own assigner + if isinstance(self.bbox_assigner, list): + for i, assigner in enumerate(self.bbox_assigner): + gt_per_cls = (cur_gt_labels == i) + pred_per_cls = (cur_labels_3d == i) + cur_assign_res = assigner.assign( + cur_proposal_list[pred_per_cls], + cur_gt_instances_3d[gt_per_cls], + cur_gt_instances_ignore) + # gather assign_results in different class into one result + batch_num_gts += cur_assign_res.num_gts + # gt inds (1-based) + gt_inds_arange_pad = gt_per_cls.nonzero( + as_tuple=False).view(-1) + 1 + # pad 0 for indice unassigned + gt_inds_arange_pad = F.pad( + gt_inds_arange_pad, (1, 0), mode='constant', value=0) + # pad -1 for indice ignore + gt_inds_arange_pad = F.pad( + gt_inds_arange_pad, (1, 0), mode='constant', value=-1) + # convert to 0~gt_num+2 for indices + gt_inds_arange_pad += 1 + # now 0 is bg, >1 is fg in batch_gt_indis + batch_gt_indis[pred_per_cls] = gt_inds_arange_pad[ + cur_assign_res.gt_inds + 1] - 1 + batch_max_overlaps[ + pred_per_cls] = cur_assign_res.max_overlaps + batch_gt_labels[pred_per_cls] = cur_assign_res.labels + + assign_result = AssignResult(batch_num_gts, batch_gt_indis, + batch_max_overlaps, + batch_gt_labels) + else: # for single class + assign_result = self.bbox_assigner.assign( + cur_proposal_list, cur_gt_instances_3d, + cur_gt_instances_ignore) + # sample boxes + sampling_result = self.bbox_sampler.sample(assign_result, + cur_boxes.tensor, + cur_gt_bboxes, + cur_gt_labels) + sampling_results.append(sampling_result) + return sampling_results + + def _semantic_forward_train(self, feats_dict: dict, voxel_dict: dict, + batch_gt_instances_3d: InstanceList) -> Dict: + """Train semantic head. + + Args: + feats_dict (dict): Contains features from the first stage. + voxel_dict (dict): Contains information of voxels. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + + Returns: + dict: Segmentation results including losses + """ + semantic_results = self.semantic_head(feats_dict['seg_features']) + semantic_targets = self.semantic_head.get_targets( + voxel_dict, batch_gt_instances_3d) + loss_semantic = self.semantic_head.loss(semantic_results, + semantic_targets) + semantic_results.update(loss_semantic=loss_semantic) + return semantic_results + + def predict(self, + feats_dict: Dict, + rpn_results_list: InstanceList, + batch_data_samples: SampleList, + rescale: bool = False, + **kwargs) -> InstanceList: + """Perform forward propagation of the roi head and predict detection + results on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + assert self.with_bbox, 'Bbox head must be implemented in PartA2.' + assert self.with_semantic, 'Semantic head must be implemented' \ + ' in PartA2.' + + batch_input_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + voxels_dict = feats_dict.pop('voxels_dict') + # TODO: Split predict semantic and bbox + results_list = self.predict_bbox(feats_dict, voxels_dict, + batch_input_metas, rpn_results_list, + self.test_cfg) + return results_list + + def predict_bbox(self, feats_dict: Dict, voxel_dict: Dict, + batch_input_metas: List[dict], + rpn_results_list: InstanceList, + test_cfg: ConfigDict) -> InstanceList: + """Perform forward propagation of the bbox head and predict detection + results on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + voxel_dict (dict): Contains information of voxels. + batch_input_metas (list[dict], Optional): Batch image meta info. + Defaults to None. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + test_cfg (Config): Test config. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + semantic_results = self.semantic_head(feats_dict['seg_features']) + feats_dict.update(semantic_results) + rois = bbox3d2roi( + [res['bboxes_3d'].tensor for res in rpn_results_list]) + labels_3d = [res['labels_3d'] for res in rpn_results_list] + cls_preds = [res['cls_preds'] for res in rpn_results_list] + bbox_results = self._bbox_forward(feats_dict, voxel_dict, rois) + + bbox_list = self.bbox_head.get_results(rois, bbox_results['cls_score'], + bbox_results['bbox_pred'], + labels_3d, cls_preds, + batch_input_metas, test_cfg) + return bbox_list + + def _bbox_forward(self, feats_dict: Dict, voxel_dict: Dict, + rois: Tensor) -> Dict: + """Forward function of roi_extractor and bbox_head used in both + training and testing. + + Args: + feats_dict (dict): Contains features from the first stage. + voxel_dict (dict): Contains information of voxels. + rois (Tensor): Roi boxes. + + Returns: + dict: Contains predictions of bbox_head and + features of roi_extractor. + """ + pooled_seg_feats = self.seg_roi_extractor(feats_dict['seg_features'], + voxel_dict['voxel_centers'], + voxel_dict['coors'][..., + 0], rois) + pooled_part_feats = self.bbox_roi_extractor( + feats_dict['part_feats'], voxel_dict['voxel_centers'], + voxel_dict['coors'][..., 0], rois) + cls_score, bbox_pred = self.bbox_head(pooled_seg_feats, + pooled_part_feats) + + bbox_results = dict( + cls_score=cls_score, + bbox_pred=bbox_pred, + pooled_seg_feats=pooled_seg_feats, + pooled_part_feats=pooled_part_feats) + return bbox_results + + def loss(self, feats_dict: Dict, rpn_results_list: InstanceList, + batch_data_samples: SampleList, **kwargs) -> dict: + """Perform forward propagation and loss calculation of the detection + roi on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + dict[str, Tensor]: A dictionary of loss components + """ + assert len(rpn_results_list) == len(batch_data_samples) + losses = dict() + batch_gt_instances_3d = [] + batch_gt_instances_ignore = [] + voxels_dict = feats_dict.pop('voxels_dict') + for data_sample in batch_data_samples: + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + if 'ignored_instances' in data_sample: + batch_gt_instances_ignore.append(data_sample.ignored_instances) + else: + batch_gt_instances_ignore.append(None) + if self.with_semantic: + semantic_results = self._semantic_forward_train( + feats_dict, voxels_dict, batch_gt_instances_3d) + losses.update(semantic_results.pop('loss_semantic')) + + sample_results = self._assign_and_sample(rpn_results_list, + batch_gt_instances_3d, + batch_gt_instances_ignore) + if self.with_bbox: + feats_dict.update(semantic_results) + bbox_results = self._bbox_forward_train(feats_dict, voxels_dict, + sample_results) + losses.update(bbox_results['loss_bbox']) + + return losses + + def _forward(self, feats_dict: dict, + rpn_results_list: InstanceList) -> Tuple: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + feats_dict (dict): Contains features from the first stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + + Returns: + tuple: A tuple of results from roi head. + """ + voxel_dict = feats_dict.pop('voxel_dict') + semantic_results = self.semantic_head(feats_dict['seg_features']) + feats_dict.update(semantic_results) + rois = bbox3d2roi([res['bbox_3d'].tensor for res in rpn_results_list]) + bbox_results = self._bbox_forward(feats_dict, voxel_dict, rois) + cls_score = bbox_results['cls_score'] + bbox_pred = bbox_results['bbox_pred'] + return cls_score, bbox_pred diff --git a/mmdet3d/models/roi_heads/point_rcnn_roi_head.py b/mmdet3d/models/roi_heads/point_rcnn_roi_head.py new file mode 100755 index 0000000..454e2f3 --- /dev/null +++ b/mmdet3d/models/roi_heads/point_rcnn_roi_head.py @@ -0,0 +1,309 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional + +import torch +from mmdet.models.task_modules import AssignResult +from torch import Tensor +from torch.nn import functional as F + +from mmdet3d.registry import MODELS, TASK_UTILS +from mmdet3d.structures import bbox3d2roi +from mmdet3d.utils.typing_utils import InstanceList, SampleList +from .base_3droi_head import Base3DRoIHead + + +@MODELS.register_module() +class PointRCNNRoIHead(Base3DRoIHead): + """RoI head for PointRCNN. + + Args: + bbox_head (dict): Config of bbox_head. + bbox_roi_extractor (dict): Config of RoI extractor. + train_cfg (dict): Train configs. + test_cfg (dict): Test configs. + depth_normalizer (float): Normalize depth feature. + Defaults to 70.0. + init_cfg (dict, optional): Config of initialization. Defaults to None. + """ + + def __init__(self, + bbox_head: dict, + bbox_roi_extractor: dict, + train_cfg: dict, + test_cfg: dict, + depth_normalizer: dict = 70.0, + init_cfg: Optional[dict] = None) -> None: + super(PointRCNNRoIHead, self).__init__( + bbox_head=bbox_head, + bbox_roi_extractor=bbox_roi_extractor, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + self.depth_normalizer = depth_normalizer + + self.init_assigner_sampler() + + def init_mask_head(self): + """Initialize maek head.""" + pass + + def init_assigner_sampler(self): + """Initialize assigner and sampler.""" + self.bbox_assigner = None + self.bbox_sampler = None + if self.train_cfg: + if isinstance(self.train_cfg.assigner, dict): + self.bbox_assigner = TASK_UTILS.build(self.train_cfg.assigner) + elif isinstance(self.train_cfg.assigner, list): + self.bbox_assigner = [ + TASK_UTILS.build(res) for res in self.train_cfg.assigner + ] + self.bbox_sampler = TASK_UTILS.build(self.train_cfg.sampler) + + def loss(self, feats_dict: Dict, rpn_results_list: InstanceList, + batch_data_samples: SampleList, **kwargs) -> dict: + """Perform forward propagation and loss calculation of the detection + roi on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + dict[str, Tensor]: A dictionary of loss components + """ + features = feats_dict['fp_features'] + fp_points = feats_dict['fp_points'] + point_cls_preds = feats_dict['points_cls_preds'] + sem_scores = point_cls_preds.sigmoid() + point_scores = sem_scores.max(-1)[0] + batch_gt_instances_3d = [] + batch_gt_instances_ignore = [] + for data_sample in batch_data_samples: + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + if 'ignored_instances' in data_sample: + batch_gt_instances_ignore.append(data_sample.ignored_instances) + else: + batch_gt_instances_ignore.append(None) + sample_results = self._assign_and_sample(rpn_results_list, + batch_gt_instances_3d, + batch_gt_instances_ignore) + + # concat the depth, semantic features and backbone features + features = features.transpose(1, 2).contiguous() + point_depths = fp_points.norm(dim=2) / self.depth_normalizer - 0.5 + features_list = [ + point_scores.unsqueeze(2), + point_depths.unsqueeze(2), features + ] + features = torch.cat(features_list, dim=2) + + bbox_results = self._bbox_forward_train(features, fp_points, + sample_results) + losses = dict() + losses.update(bbox_results['loss_bbox']) + + return losses + + def predict(self, + feats_dict: Dict, + rpn_results_list: InstanceList, + batch_data_samples: SampleList, + rescale: bool = False, + **kwargs) -> InstanceList: + """Perform forward propagation of the roi head and predict detection + results on the features of the upstream network. + + Args: + feats_dict (dict): Contains features from the first stage. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + rois = bbox3d2roi( + [res['bboxes_3d'].tensor for res in rpn_results_list]) + labels_3d = [res['labels_3d'] for res in rpn_results_list] + batch_input_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + fp_features = feats_dict['fp_features'] + fp_points = feats_dict['fp_points'] + point_cls_preds = feats_dict['points_cls_preds'] + sem_scores = point_cls_preds.sigmoid() + point_scores = sem_scores.max(-1)[0] + + features = fp_features.transpose(1, 2).contiguous() + point_depths = fp_points.norm(dim=2) / self.depth_normalizer - 0.5 + features_list = [ + point_scores.unsqueeze(2), + point_depths.unsqueeze(2), features + ] + + features = torch.cat(features_list, dim=2) + batch_size = features.shape[0] + bbox_results = self._bbox_forward(features, fp_points, batch_size, + rois) + object_score = bbox_results['cls_score'].sigmoid() + bbox_list = self.bbox_head.get_results( + rois, + object_score, + bbox_results['bbox_pred'], + labels_3d, + batch_input_metas, + cfg=self.test_cfg) + + return bbox_list + + def _bbox_forward_train(self, features: Tensor, points: Tensor, + sampling_results: SampleList) -> dict: + """Forward training function of roi_extractor and bbox_head. + + Args: + features (torch.Tensor): Backbone features with depth and \ + semantic features. + points (torch.Tensor): Point cloud. + sampling_results (:obj:`SamplingResult`): Sampled results used + for training. + + Returns: + dict: Forward results including losses and predictions. + """ + rois = bbox3d2roi([res.bboxes for res in sampling_results]) + batch_size = features.shape[0] + bbox_results = self._bbox_forward(features, points, batch_size, rois) + bbox_targets = self.bbox_head.get_targets(sampling_results, + self.train_cfg) + + loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], + bbox_results['bbox_pred'], rois, + *bbox_targets) + + bbox_results.update(loss_bbox=loss_bbox) + return bbox_results + + def _bbox_forward(self, features: Tensor, points: Tensor, batch_size: int, + rois: Tensor) -> dict: + """Forward function of roi_extractor and bbox_head used in both + training and testing. + + Args: + features (torch.Tensor): Backbone features with depth and + semantic features. + points (torch.Tensor): Point cloud. + batch_size (int): Batch size. + rois (torch.Tensor): RoI boxes. + + Returns: + dict: Contains predictions of bbox_head and + features of roi_extractor. + """ + pooled_point_feats = self.bbox_roi_extractor(features, points, + batch_size, rois) + + cls_score, bbox_pred = self.bbox_head(pooled_point_feats) + bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred) + return bbox_results + + def _assign_and_sample( + self, rpn_results_list: InstanceList, + batch_gt_instances_3d: InstanceList, + batch_gt_instances_ignore: InstanceList) -> SampleList: + """Assign and sample proposals for training. + + Args: + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + batch_gt_instances_ignore (list[:obj:`InstanceData`]): Ignore + instances of gt bboxes. + + Returns: + list[:obj:`SamplingResult`]: Sampled results of each training + sample. + """ + sampling_results = [] + # bbox assign + for batch_idx in range(len(rpn_results_list)): + cur_proposal_list = rpn_results_list[batch_idx] + cur_boxes = cur_proposal_list['bboxes_3d'] + cur_labels_3d = cur_proposal_list['labels_3d'] + cur_gt_instances_3d = batch_gt_instances_3d[batch_idx] + cur_gt_instances_3d.bboxes_3d = cur_gt_instances_3d.\ + bboxes_3d.tensor + cur_gt_instances_ignore = batch_gt_instances_ignore[batch_idx] + cur_gt_bboxes = cur_gt_instances_3d.bboxes_3d.to(cur_boxes.device) + cur_gt_labels = cur_gt_instances_3d.labels_3d + batch_num_gts = 0 + # 0 is bg + batch_gt_indis = cur_gt_labels.new_full((len(cur_boxes), ), 0) + batch_max_overlaps = cur_boxes.tensor.new_zeros(len(cur_boxes)) + # -1 is bg + batch_gt_labels = cur_gt_labels.new_full((len(cur_boxes), ), -1) + + # each class may have its own assigner + if isinstance(self.bbox_assigner, list): + for i, assigner in enumerate(self.bbox_assigner): + gt_per_cls = (cur_gt_labels == i) + pred_per_cls = (cur_labels_3d == i) + cur_assign_res = assigner.assign( + cur_proposal_list[pred_per_cls], + cur_gt_instances_3d[gt_per_cls], + cur_gt_instances_ignore) + # gather assign_results in different class into one result + batch_num_gts += cur_assign_res.num_gts + # gt inds (1-based) + gt_inds_arange_pad = gt_per_cls.nonzero( + as_tuple=False).view(-1) + 1 + # pad 0 for indice unassigned + gt_inds_arange_pad = F.pad( + gt_inds_arange_pad, (1, 0), mode='constant', value=0) + # pad -1 for indice ignore + gt_inds_arange_pad = F.pad( + gt_inds_arange_pad, (1, 0), mode='constant', value=-1) + # convert to 0~gt_num+2 for indices + gt_inds_arange_pad += 1 + # now 0 is bg, >1 is fg in batch_gt_indis + batch_gt_indis[pred_per_cls] = gt_inds_arange_pad[ + cur_assign_res.gt_inds + 1] - 1 + batch_max_overlaps[ + pred_per_cls] = cur_assign_res.max_overlaps + batch_gt_labels[pred_per_cls] = cur_assign_res.labels + + assign_result = AssignResult(batch_num_gts, batch_gt_indis, + batch_max_overlaps, + batch_gt_labels) + else: # for single class + assign_result = self.bbox_assigner.assign( + cur_proposal_list, cur_gt_instances_3d, + cur_gt_instances_ignore) + + # sample boxes + sampling_result = self.bbox_sampler.sample(assign_result, + cur_boxes.tensor, + cur_gt_bboxes, + cur_gt_labels) + sampling_results.append(sampling_result) + return sampling_results diff --git a/mmdet3d/models/roi_heads/pv_rcnn_roi_head.py b/mmdet3d/models/roi_heads/pv_rcnn_roi_head.py new file mode 100755 index 0000000..1c60111 --- /dev/null +++ b/mmdet3d/models/roi_heads/pv_rcnn_roi_head.py @@ -0,0 +1,312 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +import torch +from mmdet.models.task_modules import AssignResult +from mmdet.models.task_modules.samplers import SamplingResult +from torch.nn import functional as F + +from mmdet3d.models.roi_heads.base_3droi_head import Base3DRoIHead +from mmdet3d.registry import MODELS +from mmdet3d.structures import bbox3d2roi +from mmdet3d.structures.det3d_data_sample import SampleList +from mmdet3d.utils import InstanceList + + +@MODELS.register_module() +class PVRCNNRoiHead(Base3DRoIHead): + """RoI head for PV-RCNN. + + Args: + num_classes (int): The number of classes. Defaults to 3. + semantic_head (dict, optional): Config of semantic head. + Defaults to None. + bbox_roi_extractor (dict, optional): Config of roi_extractor. + Defaults to None. + bbox_head (dict, optional): Config of bbox_head. Defaults to None. + train_cfg (dict, optional): Train config of model. + Defaults to None. + test_cfg (dict, optional): Train config of model. + Defaults to None. + init_cfg (dict, optional): Initialize config of + model. Defaults to None. + """ + + def __init__(self, + num_classes: int = 3, + semantic_head: Optional[dict] = None, + bbox_roi_extractor: Optional[dict] = None, + bbox_head: Optional[dict] = None, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super(PVRCNNRoiHead, self).__init__( + bbox_head=bbox_head, + bbox_roi_extractor=bbox_roi_extractor, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + self.num_classes = num_classes + self.semantic_head = MODELS.build(semantic_head) + + self.init_assigner_sampler() + + @property + def with_semantic(self): + """bool: whether the head has semantic branch""" + return hasattr(self, + 'semantic_head') and self.semantic_head is not None + + def loss(self, feats_dict: dict, rpn_results_list: InstanceList, + batch_data_samples: SampleList, **kwargs) -> dict: + """Training forward function of PVRCNNROIHead. + + Args: + feats_dict (dict): Contains point-wise features. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + dict: losses from each head. + + - loss_semantic (torch.Tensor): loss of semantic head. + - loss_bbox (torch.Tensor): loss of bboxes. + - loss_cls (torch.Tensor): loss of object classification. + - loss_corner (torch.Tensor): loss of bboxes corners. + """ + losses = dict() + batch_gt_instances_3d = [] + batch_gt_instances_ignore = [] + for data_sample in batch_data_samples: + batch_gt_instances_3d.append(data_sample.gt_instances_3d) + if 'ignored_instances' in data_sample: + batch_gt_instances_ignore.append(data_sample.ignored_instances) + else: + batch_gt_instances_ignore.append(None) + if self.with_semantic: + semantic_results = self._semantic_forward_train( + feats_dict['keypoint_features'], feats_dict['keypoints'], + batch_gt_instances_3d) + losses['loss_semantic'] = semantic_results['loss_semantic'] + + sample_results = self._assign_and_sample(rpn_results_list, + batch_gt_instances_3d) + if self.with_bbox: + bbox_results = self._bbox_forward_train( + semantic_results['seg_preds'], + feats_dict['fusion_keypoint_features'], + feats_dict['keypoints'], sample_results) + losses.update(bbox_results['loss_bbox']) + + return losses + + def predict(self, feats_dict: dict, rpn_results_list: InstanceList, + batch_data_samples: SampleList, **kwargs) -> SampleList: + """Perform forward propagation of the roi head and predict detection + results on the features of the upstream network. + + Args: + feats_dict (dict): Contains point-wise features. + rpn_results_list (List[:obj:`InstanceData`]): Detection results + of rpn head. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + list[:obj:`InstanceData`]: Detection results of each sample + after the post process. + Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (BaseInstance3DBoxes): Prediction of bboxes, + contains a tensor with shape (num_instances, C), where + C >= 7. + """ + assert self.with_bbox, 'Bbox head must be implemented.' + assert self.with_semantic, 'Semantic head must be implemented.' + + batch_input_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + + semantic_results = self.semantic_head(feats_dict['keypoint_features']) + point_features = feats_dict[ + 'fusion_keypoint_features'] * semantic_results[ + 'seg_preds'].sigmoid().max( + dim=-1, keepdim=True).values + rois = bbox3d2roi( + [res['bboxes_3d'].tensor for res in rpn_results_list]) + labels_3d = [res['labels_3d'] for res in rpn_results_list] + bbox_results = self._bbox_forward(point_features, + feats_dict['keypoints'], rois) + + results_list = self.bbox_head.get_results(rois, + bbox_results['bbox_scores'], + bbox_results['bbox_reg'], + labels_3d, batch_input_metas, + self.test_cfg) + return results_list + + def _bbox_forward_train(self, seg_preds: torch.Tensor, + keypoint_features: torch.Tensor, + keypoints: torch.Tensor, + sampling_results: SamplingResult) -> dict: + """Forward training function of roi_extractor and bbox_head. + + Args: + seg_preds (torch.Tensor): Point-wise semantic features. + keypoint_features (torch.Tensor): key points features + from points encoder. + keypoints (torch.Tensor): Coordinate of key points. + sampling_results (:obj:`SamplingResult`): Sampled results used + for training. + + Returns: + dict: Forward results including losses and predictions. + """ + rois = bbox3d2roi([res.bboxes for res in sampling_results]) + keypoint_features = keypoint_features * seg_preds.sigmoid().max( + dim=-1, keepdim=True).values + bbox_results = self._bbox_forward(keypoint_features, keypoints, rois) + + bbox_targets = self.bbox_head.get_targets(sampling_results, + self.train_cfg) + loss_bbox = self.bbox_head.loss(bbox_results['bbox_scores'], + bbox_results['bbox_reg'], rois, + *bbox_targets) + + bbox_results.update(loss_bbox=loss_bbox) + return bbox_results + + def _bbox_forward(self, keypoint_features: torch.Tensor, + keypoints: torch.Tensor, rois: torch.Tensor) -> dict: + """Forward function of roi_extractor and bbox_head used in both + training and testing. + + Args: + rois (Tensor): Roi boxes. + keypoint_features (torch.Tensor): key points features + from points encoder. + keypoints (torch.Tensor): Coordinate of key points. + rois (Tensor): Roi boxes. + + Returns: + dict: Contains predictions of bbox_head and + features of roi_extractor. + """ + pooled_keypoint_features = self.bbox_roi_extractor( + keypoint_features, keypoints[..., 1:], keypoints[..., 0].int(), + rois) + bbox_score, bbox_reg = self.bbox_head(pooled_keypoint_features) + + bbox_results = dict(bbox_scores=bbox_score, bbox_reg=bbox_reg) + return bbox_results + + def _assign_and_sample( + self, proposal_list: InstanceList, + batch_gt_instances_3d: InstanceList) -> List[SamplingResult]: + """Assign and sample proposals for training. + + Args: + proposal_list (list[:obj:`InstancesData`]): Proposals produced by + rpn head. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + + Returns: + list[:obj:`SamplingResult`]: Sampled results of each training + sample. + """ + sampling_results = [] + # bbox assign + for batch_idx in range(len(proposal_list)): + cur_proposal_list = proposal_list[batch_idx] + cur_boxes = cur_proposal_list['bboxes_3d'] + cur_labels_3d = cur_proposal_list['labels_3d'] + cur_gt_instances_3d = batch_gt_instances_3d[batch_idx] + cur_gt_instances_3d.bboxes_3d = cur_gt_instances_3d.\ + bboxes_3d.tensor + cur_gt_bboxes = batch_gt_instances_3d[batch_idx].bboxes_3d.to( + cur_boxes.device) + cur_gt_labels = batch_gt_instances_3d[batch_idx].labels_3d + + batch_num_gts = 0 + # 0 is bg + batch_gt_indis = cur_gt_labels.new_full((len(cur_boxes), ), 0) + batch_max_overlaps = cur_boxes.tensor.new_zeros(len(cur_boxes)) + # -1 is bg + batch_gt_labels = cur_gt_labels.new_full((len(cur_boxes), ), -1) + + # each class may have its own assigner + if isinstance(self.bbox_assigner, list): + for i, assigner in enumerate(self.bbox_assigner): + gt_per_cls = (cur_gt_labels == i) + pred_per_cls = (cur_labels_3d == i) + cur_assign_res = assigner.assign( + cur_proposal_list[pred_per_cls], + cur_gt_instances_3d[gt_per_cls]) + # gather assign_results in different class into one result + batch_num_gts += cur_assign_res.num_gts + # gt inds (1-based) + gt_inds_arange_pad = gt_per_cls.nonzero( + as_tuple=False).view(-1) + 1 + # pad 0 for indice unassigned + gt_inds_arange_pad = F.pad( + gt_inds_arange_pad, (1, 0), mode='constant', value=0) + # pad -1 for indice ignore + gt_inds_arange_pad = F.pad( + gt_inds_arange_pad, (1, 0), mode='constant', value=-1) + # convert to 0~gt_num+2 for indices + gt_inds_arange_pad += 1 + # now 0 is bg, >1 is fg in batch_gt_indis + batch_gt_indis[pred_per_cls] = gt_inds_arange_pad[ + cur_assign_res.gt_inds + 1] - 1 + batch_max_overlaps[ + pred_per_cls] = cur_assign_res.max_overlaps + batch_gt_labels[pred_per_cls] = cur_assign_res.labels + + assign_result = AssignResult(batch_num_gts, batch_gt_indis, + batch_max_overlaps, + batch_gt_labels) + else: # for single class + assign_result = self.bbox_assigner.assign( + cur_proposal_list, cur_gt_instances_3d) + # sample boxes + sampling_result = self.bbox_sampler.sample(assign_result, + cur_boxes.tensor, + cur_gt_bboxes, + cur_gt_labels) + sampling_results.append(sampling_result) + return sampling_results + + def _semantic_forward_train(self, keypoint_features: torch.Tensor, + keypoints: torch.Tensor, + batch_gt_instances_3d: InstanceList) -> dict: + """Train semantic head. + + Args: + keypoint_features (torch.Tensor): key points features + from points encoder. + keypoints (torch.Tensor): Coordinate of key points. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + + Returns: + dict: Segmentation results including losses + """ + semantic_results = self.semantic_head(keypoint_features) + semantic_targets = self.semantic_head.get_targets( + keypoints, batch_gt_instances_3d) + loss_semantic = self.semantic_head.loss(semantic_results, + semantic_targets) + semantic_results.update(loss_semantic) + return semantic_results diff --git a/mmdet3d/models/roi_heads/roi_extractors/__init__.py b/mmdet3d/models/roi_heads/roi_extractors/__init__.py new file mode 100755 index 0000000..f10e717 --- /dev/null +++ b/mmdet3d/models/roi_heads/roi_extractors/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.roi_heads.roi_extractors import SingleRoIExtractor + +from .batch_roigridpoint_extractor import Batch3DRoIGridExtractor +from .single_roiaware_extractor import Single3DRoIAwareExtractor +from .single_roipoint_extractor import Single3DRoIPointExtractor + +__all__ = [ + 'SingleRoIExtractor', 'Single3DRoIAwareExtractor', + 'Single3DRoIPointExtractor', 'Batch3DRoIGridExtractor' +] diff --git a/mmdet3d/models/roi_heads/roi_extractors/batch_roigridpoint_extractor.py b/mmdet3d/models/roi_heads/roi_extractors/batch_roigridpoint_extractor.py new file mode 100755 index 0000000..6d4825f --- /dev/null +++ b/mmdet3d/models/roi_heads/roi_extractors/batch_roigridpoint_extractor.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS +from mmdet3d.structures.bbox_3d import rotation_3d_in_axis + + +@MODELS.register_module() +class Batch3DRoIGridExtractor(BaseModule): + """Grid point wise roi-aware Extractor. + + Args: + grid_size (int): The number of grid points in a roi bbox. + Defaults to 6. + roi_layer (dict, optional): Config of sa module to get + grid points features. Defaults to None. + init_cfg (dict, optional): Initialize config of + model. Defaults to None. + """ + + def __init__(self, + grid_size: int = 6, + roi_layer: dict = None, + init_cfg: dict = None) -> None: + super(Batch3DRoIGridExtractor, self).__init__(init_cfg=init_cfg) + self.roi_grid_pool_layer = MODELS.build(roi_layer) + self.grid_size = grid_size + + def forward(self, feats: torch.Tensor, coordinate: torch.Tensor, + batch_inds: torch.Tensor, rois: torch.Tensor) -> torch.Tensor: + """Forward roi extractor to extract grid points feature. + + Args: + feats (torch.Tensor): Key points features. + coordinate (torch.Tensor): Key points coordinates. + batch_inds (torch.Tensor): Input batch indexes. + rois (torch.Tensor): Detection results of rpn head. + + Returns: + torch.Tensor: Grid points features. + """ + batch_size = int(batch_inds.max()) + 1 + + xyz = coordinate + xyz_batch_cnt = xyz.new_zeros(batch_size).int() + for k in range(batch_size): + xyz_batch_cnt[k] = (batch_inds == k).sum() + + rois_batch_inds = rois[:, 0].int() + # (N1+N2+..., 6x6x6, 3) + roi_grid = self.get_dense_grid_points(rois[:, 1:]) + + new_xyz = roi_grid.view(-1, 3) + new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int() + for k in range(batch_size): + new_xyz_batch_cnt[k] = ((rois_batch_inds == k).sum() * + roi_grid.size(1)) + pooled_points, pooled_features = self.roi_grid_pool_layer( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz.contiguous(), + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=feats.contiguous()) # (M1 + M2 ..., C) + + pooled_features = pooled_features.view(-1, self.grid_size, + self.grid_size, self.grid_size, + pooled_features.shape[-1]) + # (BxN, 6, 6, 6, C) + return pooled_features + + def get_dense_grid_points(self, rois: torch.Tensor) -> torch.Tensor: + """Get dense grid points from rois. + + Args: + rois (torch.Tensor): Detection results of rpn head. + + Returns: + torch.Tensor: Grid points coordinates. + """ + rois_bbox = rois.clone() + rois_bbox[:, 2] += rois_bbox[:, 5] / 2 + faked_features = rois_bbox.new_ones( + (self.grid_size, self.grid_size, self.grid_size)) + dense_idx = faked_features.nonzero() + dense_idx = dense_idx.repeat(rois_bbox.size(0), 1, 1).float() + dense_idx = ((dense_idx + 0.5) / self.grid_size) + dense_idx[..., :3] -= 0.5 + + roi_ctr = rois_bbox[:, :3] + roi_dim = rois_bbox[:, 3:6] + roi_grid_points = dense_idx * roi_dim.view(-1, 1, 3) + roi_grid_points = rotation_3d_in_axis( + roi_grid_points, rois_bbox[:, 6], axis=2) + roi_grid_points += roi_ctr.view(-1, 1, 3) + + return roi_grid_points diff --git a/mmdet3d/models/roi_heads/roi_extractors/single_roiaware_extractor.py b/mmdet3d/models/roi_heads/roi_extractors/single_roiaware_extractor.py new file mode 100755 index 0000000..00756cc --- /dev/null +++ b/mmdet3d/models/roi_heads/roi_extractors/single_roiaware_extractor.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +import torch.nn as nn +from mmcv import ops +from mmengine.model import BaseModule +from torch import Tensor + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class Single3DRoIAwareExtractor(BaseModule): + """Point-wise roi-aware Extractor. + + Extract Point-wise roi features. + + Args: + roi_layer (dict, optional): The config of roi layer. + """ + + def __init__(self, + roi_layer: Optional[dict] = None, + init_cfg: Optional[dict] = None) -> None: + super(Single3DRoIAwareExtractor, self).__init__(init_cfg=init_cfg) + self.roi_layer = self.build_roi_layers(roi_layer) + + def build_roi_layers(self, layer_cfg: dict) -> nn.Module: + """Build roi layers using `layer_cfg`""" + cfg = layer_cfg.copy() + layer_type = cfg.pop('type') + assert hasattr(ops, layer_type) + layer_cls = getattr(ops, layer_type) + roi_layers = layer_cls(**cfg) + return roi_layers + + def forward(self, feats: Tensor, coordinate: Tensor, batch_inds: Tensor, + rois: Tensor) -> Tensor: + """Extract point-wise roi features. + + Args: + feats (torch.FloatTensor): Point-wise features with + shape (batch, npoints, channels) for pooling. + coordinate (torch.FloatTensor): Coordinate of each point. + batch_inds (torch.LongTensor): Indicate the batch of each point. + rois (torch.FloatTensor): Roi boxes with batch indices. + + Returns: + torch.FloatTensor: Pooled features + """ + pooled_roi_feats = [] + for batch_idx in range(int(batch_inds.max()) + 1): + roi_inds = (rois[..., 0].int() == batch_idx) + coors_inds = (batch_inds.int() == batch_idx) + pooled_roi_feat = self.roi_layer(rois[..., 1:][roi_inds], + coordinate[coors_inds], + feats[coors_inds]) + pooled_roi_feats.append(pooled_roi_feat) + pooled_roi_feats = torch.cat(pooled_roi_feats, 0) + return pooled_roi_feats diff --git a/mmdet3d/models/roi_heads/roi_extractors/single_roipoint_extractor.py b/mmdet3d/models/roi_heads/roi_extractors/single_roipoint_extractor.py new file mode 100755 index 0000000..2697d25 --- /dev/null +++ b/mmdet3d/models/roi_heads/roi_extractors/single_roipoint_extractor.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +import torch.nn as nn +from mmcv import ops +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.structures.bbox_3d import rotation_3d_in_axis + + +@MODELS.register_module() +class Single3DRoIPointExtractor(nn.Module): + """Point-wise roi-aware Extractor. + + Extract Point-wise roi features. + + Args: + roi_layer (dict, optional): The config of roi layer. + """ + + def __init__(self, roi_layer: Optional[dict] = None) -> None: + super(Single3DRoIPointExtractor, self).__init__() + self.roi_layer = self.build_roi_layers(roi_layer) + + def build_roi_layers(self, layer_cfg: dict) -> nn.Module: + """Build roi layers using `layer_cfg`""" + cfg = layer_cfg.copy() + layer_type = cfg.pop('type') + assert hasattr(ops, layer_type) + layer_cls = getattr(ops, layer_type) + roi_layers = layer_cls(**cfg) + return roi_layers + + def forward(self, feats: Tensor, coordinate: Tensor, batch_inds: Tensor, + rois: Tensor) -> Tensor: + """Extract point-wise roi features. + + Args: + feats (torch.FloatTensor): Point-wise features with + shape (batch, npoints, channels) for pooling. + coordinate (torch.FloatTensor): Coordinate of each point. + batch_inds (torch.LongTensor): Indicate the batch of each point. + rois (torch.FloatTensor): Roi boxes with batch indices. + + Returns: + torch.FloatTensor: Pooled features + """ + rois = rois[..., 1:] + rois = rois.view(batch_inds, -1, rois.shape[-1]) + with torch.no_grad(): + pooled_roi_feat, pooled_empty_flag = self.roi_layer( + coordinate, feats, rois) + + # canonical transformation + roi_center = rois[:, :, 0:3] + pooled_roi_feat[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2) + pooled_roi_feat = pooled_roi_feat.view(-1, + pooled_roi_feat.shape[-2], + pooled_roi_feat.shape[-1]) + pooled_roi_feat[:, :, 0:3] = rotation_3d_in_axis( + pooled_roi_feat[:, :, 0:3], + -(rois.view(-1, rois.shape[-1])[:, 6]), + axis=2) + pooled_roi_feat[pooled_empty_flag.view(-1) > 0] = 0 + + return pooled_roi_feat diff --git a/mmdet3d/models/segmentors/__init__.py b/mmdet3d/models/segmentors/__init__.py new file mode 100755 index 0000000..5e43985 --- /dev/null +++ b/mmdet3d/models/segmentors/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import Base3DSegmentor +from .cylinder3d import Cylinder3D +from .encoder_decoder import EncoderDecoder3D +from .minkunet import MinkUNet + +__all__ = ['Base3DSegmentor', 'EncoderDecoder3D', 'Cylinder3D', 'MinkUNet'] diff --git a/mmdet3d/models/segmentors/base.py b/mmdet3d/models/segmentors/base.py new file mode 100755 index 0000000..e881c2a --- /dev/null +++ b/mmdet3d/models/segmentors/base.py @@ -0,0 +1,163 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import Dict, List, Union + +from mmengine.model import BaseModel +from torch import Tensor + +from mmdet3d.structures import PointData +from mmdet3d.structures.det3d_data_sample import (ForwardResults, + OptSampleList, SampleList) +from mmdet3d.utils import OptConfigType, OptMultiConfig + + +class Base3DSegmentor(BaseModel, metaclass=ABCMeta): + """Base class for 3D segmentors. + + Args: + data_preprocessor (dict or ConfigDict, optional): Model preprocessing + config for processing the input data. it usually includes + ``to_rgb``, ``pad_size_divisor``, ``pad_val``, ``mean`` and + ``std``. Defaults to None. + init_cfg (dict or ConfigDict, optional): The config to control the + initialization. Defaults to None. + """ + + def __init__(self, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super(Base3DSegmentor, self).__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + @property + def with_neck(self) -> bool: + """bool: Whether the segmentor has neck.""" + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_auxiliary_head(self) -> bool: + """bool: Whether the segmentor has auxiliary head.""" + return hasattr(self, + 'auxiliary_head') and self.auxiliary_head is not None + + @property + def with_decode_head(self) -> bool: + """bool: Whether the segmentor has decode head.""" + return hasattr(self, 'decode_head') and self.decode_head is not None + + @property + def with_regularization_loss(self) -> bool: + """bool: Whether the segmentor has regularization loss for weight.""" + return hasattr(self, 'loss_regularization') and \ + self.loss_regularization is not None + + @abstractmethod + def extract_feat(self, batch_inputs: Tensor) -> dict: + """Placeholder for extract features from images.""" + pass + + @abstractmethod + def encode_decode(self, batch_inputs: Tensor, + batch_data_samples: SampleList) -> Tensor: + """Placeholder for encode images with backbone and decode into a + semantic segmentation map of the same size as input.""" + pass + + def forward(self, + inputs: Union[dict, List[dict]], + data_samples: OptSampleList = None, + mode: str = 'tensor') -> ForwardResults: + """The unified entry for a forward process in both training and test. + + The method should accept three modes: "tensor", "predict" and "loss": + + - "tensor": Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`SegDataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + inputs (dict or List[dict]): Input sample dict which includes + 'points' and 'imgs' keys. + + - points (List[Tensor]): Point cloud of each sample. + - imgs (Tensor): Image tensor has shape (B, C, H, W). + data_samples (List[:obj:`Det3DDataSample`], optional): + The annotation data of every samples. Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="predict"``, return a list of :obj:`Det3DDataSample`. + - If ``mode="loss"``, return a dict of tensor. + """ + if mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + return self.predict(inputs, data_samples) + elif mode == 'tensor': + return self._forward(inputs, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}". ' + 'Only supports loss, predict and tensor mode') + + @abstractmethod + def loss(self, batch_inputs: dict, + batch_data_samples: SampleList) -> Dict[str, Tensor]: + """Calculate losses from a batch of inputs and data samples.""" + pass + + @abstractmethod + def predict(self, batch_inputs: dict, + batch_data_samples: SampleList) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing.""" + pass + + @abstractmethod + def _forward(self, + batch_inputs: dict, + batch_data_samples: OptSampleList = None) -> Tensor: + """Network forward process. + + Usually includes backbone, neck and head forward without any post- + processing. + """ + pass + + @abstractmethod + def aug_test(self, batch_inputs, batch_data_samples): + """Placeholder for augmentation test.""" + pass + + def postprocess_result(self, seg_pred_list: List[dict], + batch_data_samples: SampleList) -> SampleList: + """Convert results list to `Det3DDataSample`. + + Args: + seg_logits_list (List[dict]): List of segmentation results, + seg_logits from model of each input point clouds sample. + batch_data_samples (List[:obj:`Det3DDataSample`]): The det3d data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + + Returns: + List[:obj:`Det3DDataSample`]: Segmentation results of the input + points. Each Det3DDataSample usually contains: + + - ``pred_pts_seg`` (PixelData): Prediction of 3D semantic + segmentation. + """ + + for i in range(len(seg_pred_list)): + seg_pred = seg_pred_list[i] + batch_data_samples[i].set_data( + {'pred_pts_seg': PointData(**{'pts_semantic_mask': seg_pred})}) + return batch_data_samples diff --git a/mmdet3d/models/segmentors/cylinder3d.py b/mmdet3d/models/segmentors/cylinder3d.py new file mode 100755 index 0000000..b126607 --- /dev/null +++ b/mmdet3d/models/segmentors/cylinder3d.py @@ -0,0 +1,142 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict + +from torch import Tensor + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig +from ...structures.det3d_data_sample import SampleList +from .encoder_decoder import EncoderDecoder3D + + +@MODELS.register_module() +class Cylinder3D(EncoderDecoder3D): + """`Cylindrical and Asymmetrical 3D Convolution Networks for LiDAR + Segmentation. + + `_. + + Args: + voxel_encoder (dict or :obj:`ConfigDict`): The config for the + points2voxel encoder of segmentor. + backbone (dict or :obj:`ConfigDict`): The config for the backnone of + segmentor. + decode_head (dict or :obj:`ConfigDict`): The config for the decode + head of segmentor. + neck (dict or :obj:`ConfigDict`, optional): The config for the neck of + segmentor. Defaults to None. + auxiliary_head (dict or :obj:`ConfigDict` or List[dict or + :obj:`ConfigDict`], optional): The config for the auxiliary head of + segmentor. Defaults to None. + loss_regularization (dict or :obj:`ConfigDict` or List[dict or + :obj:`ConfigDict`], optional): The config for the regularization + loass. Defaults to None. + train_cfg (dict or :obj:`ConfigDict`, optional): The config for + training. Defaults to None. + test_cfg (dict or :obj:`ConfigDict`, optional): The config for testing. + Defaults to None. + data_preprocessor (dict or :obj:`ConfigDict`, optional): The + pre-process config of :class:`BaseDataPreprocessor`. + Defaults to None. + init_cfg (dict or :obj:`ConfigDict` or List[dict or :obj:`ConfigDict`], + optional): The weight initialized config for :class:`BaseModule`. + Defaults to None. + """ + + def __init__(self, + voxel_encoder: ConfigType, + backbone: ConfigType, + decode_head: ConfigType, + neck: OptConfigType = None, + auxiliary_head: OptConfigType = None, + loss_regularization: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None) -> None: + super(Cylinder3D, self).__init__( + backbone=backbone, + decode_head=decode_head, + neck=neck, + auxiliary_head=auxiliary_head, + loss_regularization=loss_regularization, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + self.voxel_encoder = MODELS.build(voxel_encoder) + + def extract_feat(self, batch_inputs: dict) -> Tensor: + """Extract features from points.""" + encoded_feats = self.voxel_encoder(batch_inputs['voxels']['voxels'], + batch_inputs['voxels']['coors']) + batch_inputs['voxels']['voxel_coors'] = encoded_feats[1] + x = self.backbone(encoded_feats[0], encoded_feats[1], + len(batch_inputs['points'])) + if self.with_neck: + x = self.neck(x) + return x + + def loss(self, batch_inputs_dict: dict, + batch_data_samples: SampleList) -> Dict[str, Tensor]: + """Calculate losses from a batch of inputs and data samples. + + Args: + batch_inputs_dict (dict): Input sample dict which + includes 'points' and 'imgs' keys. + + - points (List[Tensor]): Point cloud of each sample. + - imgs (Tensor, optional): Image tensor has shape (B, C, H, W). + batch_data_samples (List[:obj:`Det3DDataSample`]): The det3d data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + + Returns: + Dict[str, Tensor]: A dictionary of loss components. + """ + + # extract features using backbone + x = self.extract_feat(batch_inputs_dict) + losses = dict() + loss_decode = self._decode_head_forward_train(x, batch_data_samples) + losses.update(loss_decode) + + return losses + + def predict(self, + batch_inputs_dict: dict, + batch_data_samples: SampleList, + rescale: bool = True) -> SampleList: + """Simple test with single scene. + + Args: + batch_inputs_dict (dict): Input sample dict which includes 'points' + and 'imgs' keys. + + - points (List[Tensor]): Point cloud of each sample. + - imgs (Tensor, optional): Image tensor has shape (B, C, H, W). + batch_data_samples (List[:obj:`Det3DDataSample`]): The det3d data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + rescale (bool): Whether transform to original number of points. + Will be used for voxelization based segmentors. + Defaults to True. + + Returns: + List[:obj:`Det3DDataSample`]: Segmentation results of the input + points. Each Det3DDataSample usually contains: + + - ``pred_pts_seg`` (PixelData): Prediction of 3D semantic + segmentation. + """ + # 3D segmentation requires per-point prediction, so it's impossible + # to use down-sampling to get a batch of scenes with same num_points + # therefore, we only support testing one scene every time + x = self.extract_feat(batch_inputs_dict) + seg_pred_list = self.decode_head.predict(x, batch_inputs_dict, + batch_data_samples) + for i in range(len(seg_pred_list)): + seg_pred_list[i] = seg_pred_list[i].argmax(1).cpu() + + return self.postprocess_result(seg_pred_list, batch_data_samples) diff --git a/mmdet3d/models/segmentors/encoder_decoder.py b/mmdet3d/models/segmentors/encoder_decoder.py new file mode 100755 index 0000000..8554dc8 --- /dev/null +++ b/mmdet3d/models/segmentors/encoder_decoder.py @@ -0,0 +1,552 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Tuple + +import numpy as np +import torch +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F + +from mmdet3d.registry import MODELS +from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig +from ...structures.det3d_data_sample import OptSampleList, SampleList +from ..utils import add_prefix +from .base import Base3DSegmentor + + +@MODELS.register_module() +class EncoderDecoder3D(Base3DSegmentor): + """3D Encoder Decoder segmentors. + + EncoderDecoder typically consists of backbone, decode_head, auxiliary_head. + Note that auxiliary_head is only used for deep supervision during training, + which could be dumped during inference. + + 1. The ``loss`` method is used to calculate the loss of model, + which includes two steps: (1) Extracts features to obtain the feature maps + (2) Call the decode head loss function to forward decode head model and + calculate losses. + + .. code:: text + + loss(): extract_feat() -> _decode_head_forward_train() -> _auxiliary_head_forward_train (optional) + _decode_head_forward_train(): decode_head.loss() + _auxiliary_head_forward_train(): auxiliary_head.loss (optional) + + 2. The ``predict`` method is used to predict segmentation results, + which includes two steps: (1) Run inference function to obtain the list of + seg_logits (2) Call post-processing function to obtain list of + ``Det3DDataSample`` including ``pred_pts_seg``. + + .. code:: text + + predict(): inference() -> postprocess_result() + inference(): whole_inference()/slide_inference() + whole_inference()/slide_inference(): encoder_decoder() + encoder_decoder(): extract_feat() -> decode_head.predict() + + 4 The ``_forward`` method is used to output the tensor by running the model, + which includes two steps: (1) Extracts features to obtain the feature maps + (2) Call the decode head forward function to forward decode head model. + + .. code:: text + + _forward(): extract_feat() -> _decode_head.forward() + + Args: + backbone (dict or :obj:`ConfigDict`): The config for the backnone of + segmentor. + decode_head (dict or :obj:`ConfigDict`): The config for the decode + head of segmentor. + neck (dict or :obj:`ConfigDict`, optional): The config for the neck of + segmentor. Defaults to None. + auxiliary_head (dict or :obj:`ConfigDict` or List[dict or + :obj:`ConfigDict`], optional): The config for the auxiliary head of + segmentor. Defaults to None. + loss_regularization (dict or :obj:`ConfigDict` or List[dict or + :obj:`ConfigDict`], optional): The config for the regularization + loass. Defaults to None. + train_cfg (dict or :obj:`ConfigDict`, optional): The config for + training. Defaults to None. + test_cfg (dict or :obj:`ConfigDict`, optional): The config for testing. + Defaults to None. + data_preprocessor (dict or :obj:`ConfigDict`, optional): The + pre-process config of :class:`BaseDataPreprocessor`. + Defaults to None. + init_cfg (dict or :obj:`ConfigDict` or List[dict or :obj:`ConfigDict`], + optional): The weight initialized config for :class:`BaseModule`. + Defaults to None. + """ # noqa: E501 + + def __init__(self, + backbone: ConfigType, + decode_head: ConfigType, + neck: OptConfigType = None, + auxiliary_head: OptMultiConfig = None, + loss_regularization: OptMultiConfig = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None) -> None: + super(EncoderDecoder3D, self).__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + self.backbone = MODELS.build(backbone) + if neck is not None: + self.neck = MODELS.build(neck) + self._init_decode_head(decode_head) + self._init_auxiliary_head(auxiliary_head) + self._init_loss_regularization(loss_regularization) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + assert self.with_decode_head, \ + '3D EncoderDecoder Segmentor should have a decode_head' + + def _init_decode_head(self, decode_head: ConfigType) -> None: + """Initialize ``decode_head``.""" + self.decode_head = MODELS.build(decode_head) + self.num_classes = self.decode_head.num_classes + + def _init_auxiliary_head(self, + auxiliary_head: OptMultiConfig = None) -> None: + """Initialize ``auxiliary_head``.""" + if auxiliary_head is not None: + if isinstance(auxiliary_head, list): + self.auxiliary_head = nn.ModuleList() + for head_cfg in auxiliary_head: + self.auxiliary_head.append(MODELS.build(head_cfg)) + else: + self.auxiliary_head = MODELS.build(auxiliary_head) + + def _init_loss_regularization(self, + loss_regularization: OptMultiConfig = None + ) -> None: + """Initialize ``loss_regularization``.""" + if loss_regularization is not None: + if isinstance(loss_regularization, list): + self.loss_regularization = nn.ModuleList() + for loss_cfg in loss_regularization: + self.loss_regularization.append(MODELS.build(loss_cfg)) + else: + self.loss_regularization = MODELS.build(loss_regularization) + + def extract_feat(self, batch_inputs: Tensor) -> dict: + """Extract features from points.""" + x = self.backbone(batch_inputs) + if self.with_neck: + x = self.neck(x) + return x + + def encode_decode(self, batch_inputs: Tensor, + batch_input_metas: List[dict]) -> Tensor: + """Encode points with backbone and decode into a semantic segmentation + map of the same size as input. + + Args: + batch_input (Tensor): Input point cloud sample + batch_input_metas (List[dict]): Meta information of a batch of + samples. + + Returns: + Tensor: Segmentation logits of shape [B, num_classes, N]. + """ + x = self.extract_feat(batch_inputs) + seg_logits = self.decode_head.predict(x, batch_input_metas, + self.test_cfg) + return seg_logits + + def _decode_head_forward_train( + self, batch_inputs_dict: dict, + batch_data_samples: SampleList) -> Dict[str, Tensor]: + """Run forward function and calculate loss for decode head in training. + + Args: + batch_input (Tensor): Input point cloud sample + batch_data_samples (List[:obj:`Det3DDataSample`]): The det3d data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + + Returns: + Dict[str, Tensor]: A dictionary of loss components for decode head. + """ + losses = dict() + loss_decode = self.decode_head.loss(batch_inputs_dict, + batch_data_samples, self.train_cfg) + + losses.update(add_prefix(loss_decode, 'decode')) + return losses + + def _auxiliary_head_forward_train( + self, + batch_inputs_dict: dict, + batch_data_samples: SampleList, + ) -> Dict[str, Tensor]: + """Run forward function and calculate loss for auxiliary head in + training. + + Args: + batch_input (Tensor): Input point cloud sample + batch_data_samples (List[:obj:`Det3DDataSample`]): The det3d data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + + Returns: + Dict[str, Tensor]: A dictionary of loss components for auxiliary + head. + """ + losses = dict() + if isinstance(self.auxiliary_head, nn.ModuleList): + for idx, aux_head in enumerate(self.auxiliary_head): + loss_aux = aux_head.loss(batch_inputs_dict, batch_data_samples, + self.train_cfg) + losses.update(add_prefix(loss_aux, f'aux_{idx}')) + else: + loss_aux = self.auxiliary_head.loss(batch_inputs_dict, + batch_data_samples, + self.train_cfg) + losses.update(add_prefix(loss_aux, 'aux')) + + return losses + + def _loss_regularization_forward_train(self) -> Dict[str, Tensor]: + """Calculate regularization loss for model weight in training.""" + losses = dict() + if isinstance(self.loss_regularization, nn.ModuleList): + for idx, regularize_loss in enumerate(self.loss_regularization): + loss_regularize = dict( + loss_regularize=regularize_loss(self.modules())) + losses.update(add_prefix(loss_regularize, f'regularize_{idx}')) + else: + loss_regularize = dict( + loss_regularize=self.loss_regularization(self.modules())) + losses.update(add_prefix(loss_regularize, 'regularize')) + + return losses + + def loss(self, batch_inputs_dict: dict, + batch_data_samples: SampleList) -> Dict[str, Tensor]: + """Calculate losses from a batch of inputs and data samples. + + Args: + batch_inputs_dict (dict): Input sample dict which + includes 'points' and 'imgs' keys. + + - points (List[Tensor]): Point cloud of each sample. + - imgs (Tensor, optional): Image tensor has shape (B, C, H, W). + batch_data_samples (List[:obj:`Det3DDataSample`]): The det3d data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + + Returns: + Dict[str, Tensor]: A dictionary of loss components. + """ + + # extract features using backbone + points = torch.stack(batch_inputs_dict['points']) + x = self.extract_feat(points) + + losses = dict() + + loss_decode = self._decode_head_forward_train(x, batch_data_samples) + losses.update(loss_decode) + + if self.with_auxiliary_head: + loss_aux = self._auxiliary_head_forward_train( + x, batch_data_samples) + losses.update(loss_aux) + + if self.with_regularization_loss: + loss_regularize = self._loss_regularization_forward_train() + losses.update(loss_regularize) + + return losses + + @staticmethod + def _input_generation(coords, + patch_center: Tensor, + coord_max: Tensor, + feats: Tensor, + use_normalized_coord: bool = False) -> Tensor: + """Generating model input. + + Generate input by subtracting patch center and adding additional + features. Currently support colors and normalized xyz as features. + + Args: + coords (Tensor): Sampled 3D point coordinate of shape [S, 3]. + patch_center (Tensor): Center coordinate of the patch. + coord_max (Tensor): Max coordinate of all 3D points. + feats (Tensor): Features of sampled points of shape [S, C]. + use_normalized_coord (bool): Whether to use normalized xyz as + additional features. Defaults to False. + + Returns: + Tensor: The generated input data of shape [S, 3+C']. + """ + # subtract patch center, the z dimension is not centered + centered_coords = coords.clone() + centered_coords[:, 0] -= patch_center[0] + centered_coords[:, 1] -= patch_center[1] + + # normalized coordinates as extra features + if use_normalized_coord: + normalized_coord = coords / coord_max + feats = torch.cat([feats, normalized_coord], dim=1) + + points = torch.cat([centered_coords, feats], dim=1) + + return points + + def _sliding_patch_generation(self, + points: Tensor, + num_points: int, + block_size: float, + sample_rate: float = 0.5, + use_normalized_coord: bool = False, + eps: float = 1e-3) -> Tuple[Tensor, Tensor]: + """Sampling points in a sliding window fashion. + + First sample patches to cover all the input points. + Then sample points in each patch to batch points of a certain number. + + Args: + points (Tensor): Input points of shape [N, 3+C]. + num_points (int): Number of points to be sampled in each patch. + block_size (float): Size of a patch to sample. + sample_rate (float): Stride used in sliding patch. Defaults to 0.5. + use_normalized_coord (bool): Whether to use normalized xyz as + additional features. Defaults to False. + eps (float): A value added to patch boundary to guarantee points + coverage. Defaults to 1e-3. + + Returns: + Tuple[Tensor, Tensor]: + + - patch_points (Tensor): Points of different patches of shape + [K, N, 3+C]. + - patch_idxs (Tensor): Index of each point in `patch_points` of + shape [K, N]. + """ + device = points.device + # we assume the first three dims are points' 3D coordinates + # and the rest dims are their per-point features + coords = points[:, :3] + feats = points[:, 3:] + + coord_max = coords.max(0)[0] + coord_min = coords.min(0)[0] + stride = block_size * sample_rate + num_grid_x = int( + torch.ceil((coord_max[0] - coord_min[0] - block_size) / + stride).item() + 1) + num_grid_y = int( + torch.ceil((coord_max[1] - coord_min[1] - block_size) / + stride).item() + 1) + + patch_points, patch_idxs = [], [] + for idx_y in range(num_grid_y): + s_y = coord_min[1] + idx_y * stride + e_y = torch.min(s_y + block_size, coord_max[1]) + s_y = e_y - block_size + for idx_x in range(num_grid_x): + s_x = coord_min[0] + idx_x * stride + e_x = torch.min(s_x + block_size, coord_max[0]) + s_x = e_x - block_size + + # extract points within this patch + cur_min = torch.tensor([s_x, s_y, coord_min[2]]).to(device) + cur_max = torch.tensor([e_x, e_y, coord_max[2]]).to(device) + cur_choice = ((coords >= cur_min - eps) & + (coords <= cur_max + eps)).all(dim=1) + + if not cur_choice.any(): # no points in this patch + continue + + # sample points in this patch to multiple batches + cur_center = cur_min + block_size / 2.0 + point_idxs = torch.nonzero(cur_choice, as_tuple=True)[0] + num_batch = int(np.ceil(point_idxs.shape[0] / num_points)) + point_size = int(num_batch * num_points) + replace = point_size > 2 * point_idxs.shape[0] + num_repeat = point_size - point_idxs.shape[0] + if replace: # duplicate + point_idxs_repeat = point_idxs[torch.randint( + 0, point_idxs.shape[0], + size=(num_repeat, )).to(device)] + else: + point_idxs_repeat = point_idxs[torch.randperm( + point_idxs.shape[0])[:num_repeat]] + + choices = torch.cat([point_idxs, point_idxs_repeat], dim=0) + choices = choices[torch.randperm(choices.shape[0])] + + # construct model input + point_batches = self._input_generation( + coords[choices], + cur_center, + coord_max, + feats[choices], + use_normalized_coord=use_normalized_coord) + + patch_points.append(point_batches) + patch_idxs.append(choices) + + patch_points = torch.cat(patch_points, dim=0) + patch_idxs = torch.cat(patch_idxs, dim=0) + + # make sure all points are sampled at least once + assert torch.unique(patch_idxs).shape[0] == points.shape[0], \ + 'some points are not sampled in sliding inference' + + return patch_points, patch_idxs + + def slide_inference(self, point: Tensor, input_meta: dict, + rescale: bool) -> Tensor: + """Inference by sliding-window with overlap. + + Args: + point (Tensor): Input points of shape [N, 3+C]. + input_meta (dict): Meta information of input sample. + rescale (bool): Whether transform to original number of points. + Will be used for voxelization based segmentors. + + Returns: + Tensor: The output segmentation map of shape [num_classes, N]. + """ + num_points = self.test_cfg.num_points + block_size = self.test_cfg.block_size + sample_rate = self.test_cfg.sample_rate + use_normalized_coord = self.test_cfg.use_normalized_coord + batch_size = self.test_cfg.batch_size * num_points + + # patch_points is of shape [K*N, 3+C], patch_idxs is of shape [K*N] + patch_points, patch_idxs = self._sliding_patch_generation( + point, num_points, block_size, sample_rate, use_normalized_coord) + feats_dim = patch_points.shape[1] + seg_logits = [] # save patch predictions + + for batch_idx in range(0, patch_points.shape[0], batch_size): + batch_points = patch_points[batch_idx:batch_idx + batch_size] + batch_points = batch_points.view(-1, num_points, feats_dim) + # batch_seg_logit is of shape [B, num_classes, N] + batch_seg_logit = self.encode_decode(batch_points, + [input_meta] * batch_size) + batch_seg_logit = batch_seg_logit.transpose(1, 2).contiguous() + seg_logits.append(batch_seg_logit.view(-1, self.num_classes)) + + # aggregate per-point logits by indexing sum and dividing count + seg_logits = torch.cat(seg_logits, dim=0) # [K*N, num_classes] + expand_patch_idxs = patch_idxs.unsqueeze(1).repeat(1, self.num_classes) + preds = point.new_zeros((point.shape[0], self.num_classes)).\ + scatter_add_(dim=0, index=expand_patch_idxs, src=seg_logits) + count_mat = torch.bincount(patch_idxs) + preds = preds / count_mat[:, None] + + # TODO: if rescale and voxelization segmentor + + return preds.transpose(0, 1) # to [num_classes, K*N] + + def whole_inference(self, points: Tensor, batch_input_metas: List[dict], + rescale: bool) -> Tensor: + """Inference with full scene (one forward pass without sliding).""" + seg_logit = self.encode_decode(points, batch_input_metas) + # TODO: if rescale and voxelization segmentor + return seg_logit + + def inference(self, points: Tensor, batch_input_metas: List[dict], + rescale: bool) -> Tensor: + """Inference with slide/whole style. + + Args: + points (Tensor): Input points of shape [B, N, 3+C]. + batch_input_metas (List[dict]): Meta information of a batch of + samples. + rescale (bool): Whether transform to original number of points. + Will be used for voxelization based segmentors. + + Returns: + Tensor: The output segmentation map. + """ + assert self.test_cfg.mode in ['slide', 'whole'] + if self.test_cfg.mode == 'slide': + seg_logit = torch.stack([ + self.slide_inference(point, input_meta, rescale) + for point, input_meta in zip(points, batch_input_metas) + ], 0) + else: + seg_logit = self.whole_inference(points, batch_input_metas, + rescale) + output = F.softmax(seg_logit, dim=1) + return output + + def predict(self, + batch_inputs_dict: dict, + batch_data_samples: SampleList, + rescale: bool = True) -> SampleList: + """Simple test with single scene. + + Args: + batch_inputs_dict (dict): Input sample dict which includes 'points' + and 'imgs' keys. + + - points (List[Tensor]): Point cloud of each sample. + - imgs (Tensor, optional): Image tensor has shape (B, C, H, W). + batch_data_samples (List[:obj:`Det3DDataSample`]): The det3d data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + rescale (bool): Whether transform to original number of points. + Will be used for voxelization based segmentors. + Defaults to True. + + Returns: + List[:obj:`Det3DDataSample`]: Segmentation results of the input + points. Each Det3DDataSample usually contains: + + - ``pred_pts_seg`` (PixelData): Prediction of 3D semantic + segmentation. + """ + # 3D segmentation requires per-point prediction, so it's impossible + # to use down-sampling to get a batch of scenes with same num_points + # therefore, we only support testing one scene every time + seg_pred_list = [] + batch_input_metas = [] + for data_sample in batch_data_samples: + batch_input_metas.append(data_sample.metainfo) + + points = batch_inputs_dict['points'] + for point, input_meta in zip(points, batch_input_metas): + seg_prob = self.inference( + point.unsqueeze(0), [input_meta], rescale)[0] + seg_map = seg_prob.argmax(0) # [N] + # to cpu tensor for consistency with det3d + seg_map = seg_map.cpu() + seg_pred_list.append(seg_map) + + return self.postprocess_result(seg_pred_list, batch_data_samples) + + def _forward(self, + batch_inputs_dict: dict, + batch_data_samples: OptSampleList = None) -> Tensor: + """Network forward process. + + Args: + batch_inputs_dict (dict): Input sample dict which includes 'points' + and 'imgs' keys. + + - points (List[Tensor]): Point cloud of each sample. + - imgs (Tensor, optional): Image tensor has shape (B, C, H, W). + batch_data_samples (List[:obj:`Det3DDataSample`]): The det3d data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + + Returns: + Tensor: Forward output of model without any post-processes. + """ + points = torch.stack(batch_inputs_dict['points']) + x = self.extract_feat(points) + return self.decode_head.forward(x) + + def aug_test(self, batch_inputs, batch_img_metas): + """Placeholder for augmentation test.""" + pass diff --git a/mmdet3d/models/segmentors/minkunet.py b/mmdet3d/models/segmentors/minkunet.py new file mode 100755 index 0000000..fcf8c22 --- /dev/null +++ b/mmdet3d/models/segmentors/minkunet.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from torch import Tensor + +from mmdet3d.models.layers.torchsparse import IS_TORCHSPARSE_AVAILABLE +from mmdet3d.registry import MODELS +from mmdet3d.structures.det3d_data_sample import OptSampleList, SampleList +from .encoder_decoder import EncoderDecoder3D + +if IS_TORCHSPARSE_AVAILABLE: + from torchsparse import SparseTensor +else: + SparseTensor = None + + +@MODELS.register_module() +class MinkUNet(EncoderDecoder3D): + r"""MinkUNet is the implementation of `4D Spatio-Temporal ConvNets. + `_ with TorchSparse backend. + + Refer to `implementation code `_. + + Args: + kwargs (dict): Arguments are the same as those in + :class:`EncoderDecoder3D`. + """ + + def __init__(self, **kwargs) -> None: + if not IS_TORCHSPARSE_AVAILABLE: + raise ImportError( + 'Please follow `get_started.md` to install Torchsparse.`') + super().__init__(**kwargs) + + def loss(self, inputs: dict, data_samples: SampleList): + """Calculate losses from a batch of inputs and data samples. + + Args: + batch_inputs_dict (dict): Input sample dict which + includes 'points' and 'voxels' keys. + + - points (List[Tensor]): Point cloud of each sample. + - voxels (dict): Voxel feature and coords after voxelization. + batch_data_samples (List[:obj:`Det3DDataSample`]): The seg data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + + Returns: + Dict[str, Tensor]: A dictionary of loss components. + """ + x = self.extract_feat(inputs) + losses = self.decode_head.loss(x, data_samples, self.train_cfg) + return losses + + def predict(self, inputs: dict, data_samples: SampleList) -> SampleList: + """Simple test with single scene. + + Args: + batch_inputs_dict (dict): Input sample dict which + includes 'points' and 'voxels' keys. + + - points (List[Tensor]): Point cloud of each sample. + - voxels (dict): Voxel feature and coords after voxelization. + batch_data_samples (List[:obj:`Det3DDataSample`]): The seg data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. + + Returns: + List[:obj:`Det3DDataSample`]: Segmentation results of the input + points. Each Det3DDataSample usually contains: + + - ``pred_pts_seg`` (PixelData): Prediction of 3D semantic + segmentation. + """ + x = self.extract_feat(inputs) + seg_logits = self.decode_head.predict(x, data_samples) + seg_preds = [seg_logit.argmax(dim=1) for seg_logit in seg_logits] + + return self.postprocess_result(seg_preds, data_samples) + + def _forward(self, + batch_inputs_dict: dict, + batch_data_samples: OptSampleList = None) -> Tensor: + """Network forward process. + + Args: + batch_inputs_dict (dict): Input sample dict which + includes 'points' and 'voxels' keys. + + - points (List[Tensor]): Point cloud of each sample. + - voxels (dict): Voxel feature and coords after voxelization. + batch_data_samples (List[:obj:`Det3DDataSample`]): The seg data + samples. It usually includes information such as `metainfo` and + `gt_pts_seg`. Defaults to None. + + Returns: + Tensor: Forward output of model without any post-processes. + """ + x = self.extract_feat(batch_inputs_dict) + return self.decode_head.forward(x) + + def extract_feat(self, batch_inputs_dict: dict) -> SparseTensor: + """Extract features from voxels. + + Args: + batch_inputs_dict (dict): Input sample dict which + includes 'points' and 'voxels' keys. + + - points (List[Tensor]): Point cloud of each sample. + - voxels (dict): Voxel feature and coords after voxelization. + + Returns: + SparseTensor: voxels with features. + """ + voxel_dict = batch_inputs_dict['voxels'] + x = self.backbone(voxel_dict['voxels'], voxel_dict['coors']) + if self.with_neck: + x = self.neck(x) + return x diff --git a/mmdet3d/models/task_modules/__init__.py b/mmdet3d/models/task_modules/__init__.py new file mode 100755 index 0000000..2a0c818 --- /dev/null +++ b/mmdet3d/models/task_modules/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.task_modules import AssignResult, BaseAssigner + +from .anchor import (ANCHOR_GENERATORS, PRIOR_GENERATORS, + AlignedAnchor3DRangeGenerator, + AlignedAnchor3DRangeGeneratorPerCls, + Anchor3DRangeGenerator, build_anchor_generator, + build_prior_generator) +from .assigners import Max3DIoUAssigner +from .coders import (AnchorFreeBBoxCoder, CenterPointBBoxCoder, + DeltaXYZWLHRBBoxCoder, FCOS3DBBoxCoder, + GroupFree3DBBoxCoder, MonoFlexCoder, + PartialBinBasedBBoxCoder, PGDBBoxCoder, + PointXYZWHLRBBoxCoder, SMOKECoder) +from .samplers import (BaseSampler, CombinedSampler, + InstanceBalancedPosSampler, IoUBalancedNegSampler, + IoUNegPiecewiseSampler, OHEMSampler, PseudoSampler, + RandomSampler, SamplingResult) +from .voxel import VoxelGenerator + +__all__ = [ + 'BaseAssigner', 'Max3DIoUAssigner', 'AssignResult', 'BaseSampler', + 'PseudoSampler', 'RandomSampler', 'InstanceBalancedPosSampler', + 'IoUBalancedNegSampler', 'CombinedSampler', 'OHEMSampler', + 'SamplingResult', 'IoUNegPiecewiseSampler', 'DeltaXYZWLHRBBoxCoder', + 'PartialBinBasedBBoxCoder', 'CenterPointBBoxCoder', 'AnchorFreeBBoxCoder', + 'GroupFree3DBBoxCoder', 'PointXYZWHLRBBoxCoder', 'FCOS3DBBoxCoder', + 'PGDBBoxCoder', 'SMOKECoder', 'MonoFlexCoder', 'VoxelGenerator', + 'AlignedAnchor3DRangeGenerator', 'Anchor3DRangeGenerator', + 'build_prior_generator', 'AlignedAnchor3DRangeGeneratorPerCls', + 'build_anchor_generator', 'ANCHOR_GENERATORS', 'PRIOR_GENERATORS' +] diff --git a/mmdet3d/models/task_modules/anchor/__init__.py b/mmdet3d/models/task_modules/anchor/__init__.py new file mode 100755 index 0000000..fc14ac1 --- /dev/null +++ b/mmdet3d/models/task_modules/anchor/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .anchor_3d_generator import (AlignedAnchor3DRangeGenerator, + AlignedAnchor3DRangeGeneratorPerCls, + Anchor3DRangeGenerator) +from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS, + build_anchor_generator, build_prior_generator) + +__all__ = [ + 'AlignedAnchor3DRangeGenerator', 'Anchor3DRangeGenerator', + 'build_prior_generator', 'AlignedAnchor3DRangeGeneratorPerCls', + 'build_anchor_generator', 'ANCHOR_GENERATORS', 'PRIOR_GENERATORS' +] diff --git a/mmdet3d/models/task_modules/anchor/anchor_3d_generator.py b/mmdet3d/models/task_modules/anchor/anchor_3d_generator.py new file mode 100755 index 0000000..9e2511a --- /dev/null +++ b/mmdet3d/models/task_modules/anchor/anchor_3d_generator.py @@ -0,0 +1,419 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmengine +import torch + +from mmdet3d.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class Anchor3DRangeGenerator(object): + """3D Anchor Generator by range. + + This anchor generator generates anchors by the given range in different + feature levels. + Due the convention in 3D detection, different anchor sizes are related to + different ranges for different categories. However we find this setting + does not effect the performance much in some datasets, e.g., nuScenes. + + Args: + ranges (list[list[float]]): Ranges of different anchors. + The ranges are the same across different feature levels. But may + vary for different anchor sizes if size_per_range is True. + sizes (list[list[float]], optional): 3D sizes of anchors. + Defaults to [[3.9, 1.6, 1.56]]. + scales (list[int], optional): Scales of anchors in different feature + levels. Defaults to [1]. + rotations (list[float], optional): Rotations of anchors in a feature + grid. Defaults to [0, 1.5707963]. + custom_values (tuple[float], optional): Customized values of that + anchor. For example, in nuScenes the anchors have velocities. + Defaults to (). + reshape_out (bool, optional): Whether to reshape the output into + (N x 4). Defaults to True. + size_per_range (bool, optional): Whether to use separate ranges for + different sizes. If size_per_range is True, the ranges should have + the same length as the sizes, if not, it will be duplicated. + Defaults to True. + """ + + def __init__(self, + ranges, + sizes=[[3.9, 1.6, 1.56]], + scales=[1], + rotations=[0, 1.5707963], + custom_values=(), + reshape_out=True, + size_per_range=True): + assert mmengine.is_list_of(ranges, list) + if size_per_range: + if len(sizes) != len(ranges): + assert len(ranges) == 1 + ranges = ranges * len(sizes) + assert len(ranges) == len(sizes) + else: + assert len(ranges) == 1 + assert mmengine.is_list_of(sizes, list) + assert isinstance(scales, list) + + self.sizes = sizes + self.scales = scales + self.ranges = ranges + self.rotations = rotations + self.custom_values = custom_values + self.cached_anchors = None + self.reshape_out = reshape_out + self.size_per_range = size_per_range + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += f'anchor_range={self.ranges},\n' + s += f'scales={self.scales},\n' + s += f'sizes={self.sizes},\n' + s += f'rotations={self.rotations},\n' + s += f'reshape_out={self.reshape_out},\n' + s += f'size_per_range={self.size_per_range})' + return s + + @property + def num_base_anchors(self): + """list[int]: Total number of base anchors in a feature grid.""" + num_rot = len(self.rotations) + num_size = torch.tensor(self.sizes).reshape(-1, 3).size(0) + return num_rot * num_size + + @property + def num_levels(self): + """int: Number of feature levels that the generator is applied to.""" + return len(self.scales) + + def grid_anchors(self, featmap_sizes, device='cuda'): + """Generate grid anchors in multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes in + multiple feature levels. + device (str, optional): Device where the anchors will be put on. + Defaults to 'cuda'. + + Returns: + list[torch.Tensor]: Anchors in multiple feature levels. + The sizes of each tensor should be [N, 4], where + N = width * height * num_base_anchors, width and height + are the sizes of the corresponding feature level, + num_base_anchors is the number of anchors for that level. + """ + assert self.num_levels == len(featmap_sizes) + multi_level_anchors = [] + for i in range(self.num_levels): + anchors = self.single_level_grid_anchors( + featmap_sizes[i], self.scales[i], device=device) + if self.reshape_out: + anchors = anchors.reshape(-1, anchors.size(-1)) + multi_level_anchors.append(anchors) + return multi_level_anchors + + def single_level_grid_anchors(self, featmap_size, scale, device='cuda'): + """Generate grid anchors of a single level feature map. + + This function is usually called by method ``self.grid_anchors``. + + Args: + featmap_size (tuple[int]): Size of the feature map. + scale (float): Scale factor of the anchors in the current level. + device (str, optional): Device the tensor will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors in the overall feature map. + """ + # We reimplement the anchor generator using torch in cuda + # torch: 0.6975 s for 1000 times + # numpy: 4.3345 s for 1000 times + # which is ~5 times faster than the numpy implementation + if not self.size_per_range: + return self.anchors_single_range( + featmap_size, + self.ranges[0], + scale, + self.sizes, + self.rotations, + device=device) + + mr_anchors = [] + for anchor_range, anchor_size in zip(self.ranges, self.sizes): + mr_anchors.append( + self.anchors_single_range( + featmap_size, + anchor_range, + scale, + anchor_size, + self.rotations, + device=device)) + mr_anchors = torch.cat(mr_anchors, dim=-3) + return mr_anchors + + def anchors_single_range(self, + feature_size, + anchor_range, + scale=1, + sizes=[[3.9, 1.6, 1.56]], + rotations=[0, 1.5707963], + device='cuda'): + """Generate anchors in a single range. + + Args: + feature_size (list[float] | tuple[float]): Feature map size. It is + either a list of a tuple of [D, H, W](in order of z, y, and x). + anchor_range (torch.Tensor | list[float]): Range of anchors with + shape [6]. The order is consistent with that of anchors, i.e., + (x_min, y_min, z_min, x_max, y_max, z_max). + scale (float | int, optional): The scale factor of anchors. + Defaults to 1. + sizes (list[list] | np.ndarray | torch.Tensor, optional): + Anchor size with shape [N, 3], in order of x, y, z. + Defaults to [[3.9, 1.6, 1.56]]. + rotations (list[float] | np.ndarray | torch.Tensor, optional): + Rotations of anchors in a single feature grid. + Defaults to [0, 1.5707963]. + device (str): Devices that the anchors will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors with shape + [*feature_size, num_sizes, num_rots, 7]. + """ + if len(feature_size) == 2: + feature_size = [1, feature_size[0], feature_size[1]] + anchor_range = torch.tensor(anchor_range, device=device) + z_centers = torch.linspace( + anchor_range[2], anchor_range[5], feature_size[0], device=device) + y_centers = torch.linspace( + anchor_range[1], anchor_range[4], feature_size[1], device=device) + x_centers = torch.linspace( + anchor_range[0], anchor_range[3], feature_size[2], device=device) + sizes = torch.tensor(sizes, device=device).reshape(-1, 3) * scale + rotations = torch.tensor(rotations, device=device) + + # torch.meshgrid default behavior is 'id', np's default is 'xy' + rets = torch.meshgrid(x_centers, y_centers, z_centers, rotations) + # torch.meshgrid returns a tuple rather than list + rets = list(rets) + tile_shape = [1] * 5 + tile_shape[-2] = int(sizes.shape[0]) + for i in range(len(rets)): + rets[i] = rets[i].unsqueeze(-2).repeat(tile_shape).unsqueeze(-1) + + sizes = sizes.reshape([1, 1, 1, -1, 1, 3]) + tile_size_shape = list(rets[0].shape) + tile_size_shape[3] = 1 + sizes = sizes.repeat(tile_size_shape) + rets.insert(3, sizes) + + ret = torch.cat(rets, dim=-1).permute([2, 1, 0, 3, 4, 5]) + # [1, 200, 176, N, 2, 7] for kitti after permute + + if len(self.custom_values) > 0: + custom_ndim = len(self.custom_values) + custom = ret.new_zeros([*ret.shape[:-1], custom_ndim]) + # custom[:] = self.custom_values + ret = torch.cat([ret, custom], dim=-1) + # [1, 200, 176, N, 2, 9] for nus dataset after permute + return ret + + +@TASK_UTILS.register_module() +class AlignedAnchor3DRangeGenerator(Anchor3DRangeGenerator): + """Aligned 3D Anchor Generator by range. + + This anchor generator uses a different manner to generate the positions + of anchors' centers from :class:`Anchor3DRangeGenerator`. + + Note: + The `align` means that the anchor's center is aligned with the voxel + grid, which is also the feature grid. The previous implementation of + :class:`Anchor3DRangeGenerator` does not generate the anchors' center + according to the voxel grid. Rather, it generates the center by + uniformly distributing the anchors inside the minimum and maximum + anchor ranges according to the feature map sizes. + However, this makes the anchors center does not match the feature grid. + The :class:`AlignedAnchor3DRangeGenerator` add + 1 when using the + feature map sizes to obtain the corners of the voxel grid. Then it + shifts the coordinates to the center of voxel grid and use the left + up corner to distribute anchors. + + Args: + anchor_corner (bool, optional): Whether to align with the corner of the + voxel grid. By default it is False and the anchor's center will be + the same as the corresponding voxel's center, which is also the + center of the corresponding greature grid. Defaults to False. + """ + + def __init__(self, align_corner=False, **kwargs): + super(AlignedAnchor3DRangeGenerator, self).__init__(**kwargs) + self.align_corner = align_corner + + def anchors_single_range(self, + feature_size, + anchor_range, + scale, + sizes=[[3.9, 1.6, 1.56]], + rotations=[0, 1.5707963], + device='cuda'): + """Generate anchors in a single range. + + Args: + feature_size (list[float] | tuple[float]): Feature map size. It is + either a list of a tuple of [D, H, W](in order of z, y, and x). + anchor_range (torch.Tensor | list[float]): Range of anchors with + shape [6]. The order is consistent with that of anchors, i.e., + (x_min, y_min, z_min, x_max, y_max, z_max). + scale (float | int): The scale factor of anchors. + sizes (list[list] | np.ndarray | torch.Tensor, optional): + Anchor size with shape [N, 3], in order of x, y, z. + Defaults to [[3.9, 1.6, 1.56]]. + rotations (list[float] | np.ndarray | torch.Tensor, optional): + Rotations of anchors in a single feature grid. + Defaults to [0, 1.5707963]. + device (str, optional): Devices that the anchors will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors with shape + [*feature_size, num_sizes, num_rots, 7]. + """ + if len(feature_size) == 2: + feature_size = [1, feature_size[0], feature_size[1]] + anchor_range = torch.tensor(anchor_range, device=device) + z_centers = torch.linspace( + anchor_range[2], + anchor_range[5], + feature_size[0] + 1, + device=device) + y_centers = torch.linspace( + anchor_range[1], + anchor_range[4], + feature_size[1] + 1, + device=device) + x_centers = torch.linspace( + anchor_range[0], + anchor_range[3], + feature_size[2] + 1, + device=device) + sizes = torch.tensor(sizes, device=device).reshape(-1, 3) * scale + rotations = torch.tensor(rotations, device=device) + + # shift the anchor center + if not self.align_corner: + z_shift = (z_centers[1] - z_centers[0]) / 2 + y_shift = (y_centers[1] - y_centers[0]) / 2 + x_shift = (x_centers[1] - x_centers[0]) / 2 + z_centers += z_shift + y_centers += y_shift + x_centers += x_shift + + # torch.meshgrid default behavior is 'id', np's default is 'xy' + rets = torch.meshgrid(x_centers[:feature_size[2]], + y_centers[:feature_size[1]], + z_centers[:feature_size[0]], rotations) + + # torch.meshgrid returns a tuple rather than list + rets = list(rets) + tile_shape = [1] * 5 + tile_shape[-2] = int(sizes.shape[0]) + for i in range(len(rets)): + rets[i] = rets[i].unsqueeze(-2).repeat(tile_shape).unsqueeze(-1) + + sizes = sizes.reshape([1, 1, 1, -1, 1, 3]) + tile_size_shape = list(rets[0].shape) + tile_size_shape[3] = 1 + sizes = sizes.repeat(tile_size_shape) + rets.insert(3, sizes) + + ret = torch.cat(rets, dim=-1).permute([2, 1, 0, 3, 4, 5]) + + if len(self.custom_values) > 0: + custom_ndim = len(self.custom_values) + custom = ret.new_zeros([*ret.shape[:-1], custom_ndim]) + # TODO: check the support of custom values + # custom[:] = self.custom_values + ret = torch.cat([ret, custom], dim=-1) + return ret + + +@TASK_UTILS.register_module() +class AlignedAnchor3DRangeGeneratorPerCls(AlignedAnchor3DRangeGenerator): + """3D Anchor Generator by range for per class. + + This anchor generator generates anchors by the given range for per class. + Note that feature maps of different classes may be different. + + Args: + kwargs (dict): Arguments are the same as those in + :class:`AlignedAnchor3DRangeGenerator`. + """ + + def __init__(self, **kwargs): + super(AlignedAnchor3DRangeGeneratorPerCls, self).__init__(**kwargs) + assert len(self.scales) == 1, 'Multi-scale feature map levels are' + \ + ' not supported currently in this kind of anchor generator.' + + def grid_anchors(self, featmap_sizes, device='cuda'): + """Generate grid anchors in multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes for + different classes in a single feature level. + device (str, optional): Device where the anchors will be put on. + Defaults to 'cuda'. + + Returns: + list[list[torch.Tensor]]: Anchors in multiple feature levels. + Note that in this anchor generator, we currently only + support single feature level. The sizes of each tensor + should be [num_sizes/ranges*num_rots*featmap_size, + box_code_size]. + """ + multi_level_anchors = [] + anchors = self.multi_cls_grid_anchors( + featmap_sizes, self.scales[0], device=device) + multi_level_anchors.append(anchors) + return multi_level_anchors + + def multi_cls_grid_anchors(self, featmap_sizes, scale, device='cuda'): + """Generate grid anchors of a single level feature map for multi-class + with different feature map sizes. + + This function is usually called by method ``self.grid_anchors``. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes for + different classes in a single feature level. + scale (float): Scale factor of the anchors in the current level. + device (str, optional): Device the tensor will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors in the overall feature map. + """ + assert len(featmap_sizes) == len(self.sizes) == len(self.ranges), \ + 'The number of different feature map sizes anchor sizes and ' + \ + 'ranges should be the same.' + + multi_cls_anchors = [] + for i in range(len(featmap_sizes)): + anchors = self.anchors_single_range( + featmap_sizes[i], + self.ranges[i], + scale, + self.sizes[i], + self.rotations, + device=device) + # [*featmap_size, num_sizes/ranges, num_rots, box_code_size] + ndim = len(featmap_sizes[i]) + anchors = anchors.view(*featmap_sizes[i], -1, anchors.size(-1)) + # [*featmap_size, num_sizes/ranges*num_rots, box_code_size] + anchors = anchors.permute(ndim, *range(0, ndim), ndim + 1) + # [num_sizes/ranges*num_rots, *featmap_size, box_code_size] + multi_cls_anchors.append(anchors.reshape(-1, anchors.size(-1))) + # [num_sizes/ranges*num_rots*featmap_size, box_code_size] + return multi_cls_anchors diff --git a/mmdet3d/models/task_modules/anchor/builder.py b/mmdet3d/models/task_modules/anchor/builder.py new file mode 100755 index 0000000..5d75b12 --- /dev/null +++ b/mmdet3d/models/task_modules/anchor/builder.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmdet3d.registry import TASK_UTILS + +PRIOR_GENERATORS = TASK_UTILS + +ANCHOR_GENERATORS = TASK_UTILS + + +def build_prior_generator(cfg, default_args=None): + warnings.warn( + '``build_prior_generator`` would be deprecated soon, please use ' + '``mmdet3d.registry.TASK_UTILS.build()`` ') + return TASK_UTILS.build(cfg, default_args=default_args) + + +def build_anchor_generator(cfg, default_args=None): + warnings.warn( + '``build_anchor_generator`` would be deprecated soon, please use ' + '``mmdet3d.registry.TASK_UTILS.build()`` ') + return TASK_UTILS.build(cfg, default_args=default_args) diff --git a/mmdet3d/models/task_modules/assigners/__init__.py b/mmdet3d/models/task_modules/assigners/__init__.py new file mode 100755 index 0000000..5c2b0a9 --- /dev/null +++ b/mmdet3d/models/task_modules/assigners/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .max_3d_iou_assigner import Max3DIoUAssigner + +__all__ = ['Max3DIoUAssigner'] diff --git a/mmdet3d/models/task_modules/assigners/max_3d_iou_assigner.py b/mmdet3d/models/task_modules/assigners/max_3d_iou_assigner.py new file mode 100755 index 0000000..6664fdf --- /dev/null +++ b/mmdet3d/models/task_modules/assigners/max_3d_iou_assigner.py @@ -0,0 +1,158 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Union + +from mmdet.models.task_modules import AssignResult, MaxIoUAssigner +from mmengine.structures import InstanceData + +from mmdet3d.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class Max3DIoUAssigner(MaxIoUAssigner): + # TODO: This is a temporary box assigner. + """Assign a corresponding gt bbox or background to each bbox. + + Each proposals will be assigned with `-1`, or a semi-positive integer + indicating the ground truth index. + + - -1: negative sample, no assigned gt + - semi-positive integer: positive sample, index (0-based) of assigned gt + + Args: + pos_iou_thr (float): IoU threshold for positive bboxes. + neg_iou_thr (float or tuple): IoU threshold for negative bboxes. + min_pos_iou (float): Minimum iou for a bbox to be considered as a + positive bbox. Positive samples can have smaller IoU than + pos_iou_thr due to the 4th step (assign max IoU sample to each gt). + `min_pos_iou` is set to avoid assigning bboxes that have extremely + small iou with GT as positive samples. + gt_max_assign_all (bool): Whether to assign all bboxes with the same + highest overlap with some gt to that gt. + ignore_iof_thr (float): IoF threshold for ignoring bboxes (if + `gt_bboxes_ignore` is specified). Negative values mean not + ignoring any bboxes. + ignore_wrt_candidates (bool): Whether to compute the iof between + `bboxes` and `gt_bboxes_ignore`, or the contrary. + match_low_quality (bool): Whether to allow low quality matches. This is + usually allowed for RPN and single stage detectors, but not allowed + in the second stage. Details are demonstrated in Step 4. + gpu_assign_thr (int): The upper bound of the number of GT for GPU + assign. When the number of gt is above this threshold, will assign + on CPU device. Negative values mean not assign on CPU. + iou_calculator (dict): Config of overlaps Calculator. + """ + + def __init__(self, + pos_iou_thr: float, + neg_iou_thr: Union[float, tuple], + min_pos_iou: float = .0, + gt_max_assign_all: bool = True, + ignore_iof_thr: float = -1, + ignore_wrt_candidates: bool = True, + match_low_quality: bool = True, + gpu_assign_thr: float = -1, + iou_calculator: dict = dict(type='BboxOverlaps2D')): + self.pos_iou_thr = pos_iou_thr + self.neg_iou_thr = neg_iou_thr + self.min_pos_iou = min_pos_iou + self.gt_max_assign_all = gt_max_assign_all + self.ignore_iof_thr = ignore_iof_thr + self.ignore_wrt_candidates = ignore_wrt_candidates + self.gpu_assign_thr = gpu_assign_thr + self.match_low_quality = match_low_quality + self.iou_calculator = TASK_UTILS.build(iou_calculator) + + def assign(self, + pred_instances: InstanceData, + gt_instances: InstanceData, + gt_instances_ignore: Optional[InstanceData] = None, + **kwargs) -> AssignResult: + """Assign gt to bboxes. + + This method assign a gt bbox to every bbox (proposal/anchor), each bbox + will be assigned with -1, or a semi-positive number. -1 means negative + sample, semi-positive number is the index (0-based) of assigned gt. + The assignment is done in following steps, the order matters. + + 1. assign every bbox to the background + 2. assign proposals whose iou with all gts < neg_iou_thr to 0 + 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, + assign it to that bbox + 4. for each gt bbox, assign its nearest proposals (may be more than + one) to itself + + Args: + pred_instances (:obj:`InstanceData`): Instances of model + predictions. It includes ``priors``, and the priors can + be anchors or points, or the bboxes predicted by the + previous stage, has shape (n, 4). The bboxes predicted by + the current model or stage will be named ``bboxes``, + ``labels``, and ``scores``, the same as the ``InstanceData`` + in other places. + gt_instances (:obj:`InstanceData`): Ground truth of instance + annotations. It usually includes ``bboxes``, with shape (k, 4), + and ``labels``, with shape (k, ). + gt_instances_ignore (:obj:`InstanceData`, optional): Instances + to be ignored during training. It includes ``bboxes`` + attribute data that is ignored during training and testing. + Defaults to None. + + Returns: + :obj:`AssignResult`: The assign result. + + Example: + >>> from mmengine.structures import InstanceData + >>> self = MaxIoUAssigner(0.5, 0.5) + >>> pred_instances = InstanceData() + >>> pred_instances.priors = torch.Tensor([[0, 0, 10, 10], + ... [10, 10, 20, 20]]) + >>> gt_instances = InstanceData() + >>> gt_instances.bboxes = torch.Tensor([[0, 0, 10, 9]]) + >>> gt_instances.labels = torch.Tensor([0]) + >>> assign_result = self.assign(pred_instances, gt_instances) + >>> expected_gt_inds = torch.LongTensor([1, 0]) + >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) + """ + gt_bboxes = gt_instances.bboxes_3d + if 'priors' in pred_instances: + priors = pred_instances.priors + else: + priors = pred_instances.bboxes_3d.tensor + gt_labels = gt_instances.labels_3d + if gt_instances_ignore is not None: + gt_bboxes_ignore = gt_instances_ignore.bboxes_3d + else: + gt_bboxes_ignore = None + + assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( + gt_bboxes.shape[0] > self.gpu_assign_thr) else False + # compute overlap and assign gt on CPU when number of GT is large + if assign_on_cpu: + device = priors.device + priors = priors.cpu() + gt_bboxes = gt_bboxes.cpu() + gt_labels = gt_labels.cpu() + if gt_bboxes_ignore is not None: + gt_bboxes_ignore = gt_bboxes_ignore.cpu() + + overlaps = self.iou_calculator(gt_bboxes, priors) + + if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None + and gt_bboxes_ignore.numel() > 0 and priors.numel() > 0): + if self.ignore_wrt_candidates: + ignore_overlaps = self.iou_calculator( + priors, gt_bboxes_ignore, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) + else: + ignore_overlaps = self.iou_calculator( + gt_bboxes_ignore, priors, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) + overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 + + assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) + if assign_on_cpu: + assign_result.gt_inds = assign_result.gt_inds.to(device) + assign_result.max_overlaps = assign_result.max_overlaps.to(device) + if assign_result.labels is not None: + assign_result.labels = assign_result.labels.to(device) + return assign_result diff --git a/mmdet3d/models/task_modules/builder.py b/mmdet3d/models/task_modules/builder.py new file mode 100755 index 0000000..4be54e9 --- /dev/null +++ b/mmdet3d/models/task_modules/builder.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmdet3d.registry import TASK_UTILS + +BBOX_ASSIGNERS = TASK_UTILS +BBOX_SAMPLERS = TASK_UTILS +BBOX_CODERS = TASK_UTILS + + +def build_assigner(cfg, **default_args): + """Builder of box assigner.""" + warnings.warn('``build_assigner`` would be deprecated soon, please use ' + '``mmdet3d.registry.TASK_UTILS.build()`` ') + return TASK_UTILS.build(cfg, default_args=default_args) + + +def build_sampler(cfg, **default_args): + """Builder of box sampler.""" + warnings.warn('``build_sampler`` would be deprecated soon, please use ' + '``mmdet3d.registry.TASK_UTILS.build()`` ') + return TASK_UTILS.build(cfg, default_args=default_args) + + +def build_bbox_coder(cfg, **default_args): + """Builder of box coder.""" + warnings.warn('``build_bbox_coder`` would be deprecated soon, please use ' + '``mmdet3d.registry.TASK_UTILS.build()`` ') + return TASK_UTILS.build(cfg, default_args=default_args) diff --git a/mmdet3d/models/task_modules/coders/__init__.py b/mmdet3d/models/task_modules/coders/__init__.py new file mode 100755 index 0000000..b22e725 --- /dev/null +++ b/mmdet3d/models/task_modules/coders/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .anchor_free_bbox_coder import AnchorFreeBBoxCoder +from .centerpoint_bbox_coders import CenterPointBBoxCoder +from .delta_xyzwhlr_bbox_coder import DeltaXYZWLHRBBoxCoder +from .fcos3d_bbox_coder import FCOS3DBBoxCoder +from .groupfree3d_bbox_coder import GroupFree3DBBoxCoder +from .monoflex_bbox_coder import MonoFlexCoder +from .partial_bin_based_bbox_coder import PartialBinBasedBBoxCoder +from .pgd_bbox_coder import PGDBBoxCoder +from .point_xyzwhlr_bbox_coder import PointXYZWHLRBBoxCoder +from .smoke_bbox_coder import SMOKECoder + +__all__ = [ + 'DeltaXYZWLHRBBoxCoder', 'PartialBinBasedBBoxCoder', + 'CenterPointBBoxCoder', 'AnchorFreeBBoxCoder', 'GroupFree3DBBoxCoder', + 'PointXYZWHLRBBoxCoder', 'FCOS3DBBoxCoder', 'PGDBBoxCoder', 'SMOKECoder', + 'MonoFlexCoder' +] diff --git a/mmdet3d/models/task_modules/coders/anchor_free_bbox_coder.py b/mmdet3d/models/task_modules/coders/anchor_free_bbox_coder.py new file mode 100755 index 0000000..037b74f --- /dev/null +++ b/mmdet3d/models/task_modules/coders/anchor_free_bbox_coder.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.registry import TASK_UTILS +from .partial_bin_based_bbox_coder import PartialBinBasedBBoxCoder + + +@TASK_UTILS.register_module() +class AnchorFreeBBoxCoder(PartialBinBasedBBoxCoder): + """Anchor free bbox coder for 3D boxes. + + Args: + num_dir_bins (int): Number of bins to encode direction angle. + with_rot (bool): Whether the bbox is with rotation. + """ + + def __init__(self, num_dir_bins, with_rot=True): + super(AnchorFreeBBoxCoder, self).__init__( + num_dir_bins, 0, [], with_rot=with_rot) + self.num_dir_bins = num_dir_bins + self.with_rot = with_rot + + def encode(self, gt_bboxes_3d, gt_labels_3d): + """Encode ground truth to prediction targets. + + Args: + gt_bboxes_3d (BaseInstance3DBoxes): Ground truth bboxes + with shape (n, 7). + gt_labels_3d (torch.Tensor): Ground truth classes. + + Returns: + tuple: Targets of center, size and direction. + """ + # generate center target + center_target = gt_bboxes_3d.gravity_center + + # generate bbox size target + size_res_target = gt_bboxes_3d.dims / 2 + + # generate dir target + box_num = gt_labels_3d.shape[0] + if self.with_rot: + (dir_class_target, + dir_res_target) = self.angle2class(gt_bboxes_3d.yaw) + dir_res_target /= (2 * np.pi / self.num_dir_bins) + else: + dir_class_target = gt_labels_3d.new_zeros(box_num) + dir_res_target = gt_bboxes_3d.tensor.new_zeros(box_num) + + return (center_target, size_res_target, dir_class_target, + dir_res_target) + + def decode(self, bbox_out): + """Decode predicted parts to bbox3d. + + Args: + bbox_out (dict): Predictions from model, should contain keys below. + + - center: predicted bottom center of bboxes. + - dir_class: predicted bbox direction class. + - dir_res: predicted bbox direction residual. + - size: predicted bbox size. + + Returns: + torch.Tensor: Decoded bbox3d with shape (batch, n, 7). + """ + center = bbox_out['center'] + batch_size, num_proposal = center.shape[:2] + + # decode heading angle + if self.with_rot: + dir_class = torch.argmax(bbox_out['dir_class'], -1) + dir_res = torch.gather(bbox_out['dir_res'], 2, + dir_class.unsqueeze(-1)) + dir_res.squeeze_(2) + dir_angle = self.class2angle(dir_class, dir_res).reshape( + batch_size, num_proposal, 1) + else: + dir_angle = center.new_zeros(batch_size, num_proposal, 1) + + # decode bbox size + bbox_size = torch.clamp(bbox_out['size'] * 2, min=0.1) + + bbox3d = torch.cat([center, bbox_size, dir_angle], dim=-1) + return bbox3d + + def split_pred(self, cls_preds, reg_preds, base_xyz): + """Split predicted features to specific parts. + + Args: + cls_preds (torch.Tensor): Class predicted features to split. + reg_preds (torch.Tensor): Regression predicted features to split. + base_xyz (torch.Tensor): Coordinates of points. + + Returns: + dict[str, torch.Tensor]: Split results. + """ + results = {} + results['obj_scores'] = cls_preds + + start, end = 0, 0 + reg_preds_trans = reg_preds.transpose(2, 1) + + # decode center + end += 3 + # (batch_size, num_proposal, 3) + results['center_offset'] = reg_preds_trans[..., start:end] + results['center'] = base_xyz.detach() + reg_preds_trans[..., start:end] + start = end + + # decode center + end += 3 + # (batch_size, num_proposal, 3) + results['size'] = reg_preds_trans[..., start:end] + start = end + + # decode direction + end += self.num_dir_bins + results['dir_class'] = reg_preds_trans[..., start:end] + start = end + + end += self.num_dir_bins + dir_res_norm = reg_preds_trans[..., start:end] + start = end + + results['dir_res_norm'] = dir_res_norm + results['dir_res'] = dir_res_norm * (2 * np.pi / self.num_dir_bins) + + return results diff --git a/mmdet3d/models/task_modules/coders/centerpoint_bbox_coders.py b/mmdet3d/models/task_modules/coders/centerpoint_bbox_coders.py new file mode 100755 index 0000000..8624012 --- /dev/null +++ b/mmdet3d/models/task_modules/coders/centerpoint_bbox_coders.py @@ -0,0 +1,229 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmdet.models.task_modules import BaseBBoxCoder + +from mmdet3d.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class CenterPointBBoxCoder(BaseBBoxCoder): + """Bbox coder for CenterPoint. + + Args: + pc_range (list[float]): Range of point cloud. + out_size_factor (int): Downsample factor of the model. + voxel_size (list[float]): Size of voxel. + post_center_range (list[float], optional): Limit of the center. + Default: None. + max_num (int, optional): Max number to be kept. Default: 100. + score_threshold (float, optional): Threshold to filter boxes + based on score. Default: None. + code_size (int, optional): Code size of bboxes. Default: 9 + """ + + def __init__(self, + pc_range, + out_size_factor, + voxel_size, + post_center_range=None, + max_num=100, + score_threshold=None, + code_size=9): + + self.pc_range = pc_range + self.out_size_factor = out_size_factor + self.voxel_size = voxel_size + self.post_center_range = post_center_range + self.max_num = max_num + self.score_threshold = score_threshold + self.code_size = code_size + + def _gather_feat(self, feats, inds, feat_masks=None): + """Given feats and indexes, returns the gathered feats. + + Args: + feats (torch.Tensor): Features to be transposed and gathered + with the shape of [B, 2, W, H]. + inds (torch.Tensor): Indexes with the shape of [B, N]. + feat_masks (torch.Tensor, optional): Mask of the feats. + Default: None. + + Returns: + torch.Tensor: Gathered feats. + """ + dim = feats.size(2) + inds = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), dim) + feats = feats.gather(1, inds) + if feat_masks is not None: + feat_masks = feat_masks.unsqueeze(2).expand_as(feats) + feats = feats[feat_masks] + feats = feats.view(-1, dim) + return feats + + def _topk(self, scores, K=80): + """Get indexes based on scores. + + Args: + scores (torch.Tensor): scores with the shape of [B, N, W, H]. + K (int, optional): Number to be kept. Defaults to 80. + + Returns: + tuple[torch.Tensor] + torch.Tensor: Selected scores with the shape of [B, K]. + torch.Tensor: Selected indexes with the shape of [B, K]. + torch.Tensor: Selected classes with the shape of [B, K]. + torch.Tensor: Selected y coord with the shape of [B, K]. + torch.Tensor: Selected x coord with the shape of [B, K]. + """ + batch, cat, height, width = scores.size() + + topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K) + + topk_inds = topk_inds % (height * width) + topk_ys = (topk_inds.float() / + torch.tensor(width, dtype=torch.float)).int().float() + topk_xs = (topk_inds % width).int().float() + + topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K) + topk_clses = (topk_ind / torch.tensor(K, dtype=torch.float)).int() + topk_inds = self._gather_feat(topk_inds.view(batch, -1, 1), + topk_ind).view(batch, K) + topk_ys = self._gather_feat(topk_ys.view(batch, -1, 1), + topk_ind).view(batch, K) + topk_xs = self._gather_feat(topk_xs.view(batch, -1, 1), + topk_ind).view(batch, K) + + return topk_score, topk_inds, topk_clses, topk_ys, topk_xs + + def _transpose_and_gather_feat(self, feat, ind): + """Given feats and indexes, returns the transposed and gathered feats. + + Args: + feat (torch.Tensor): Features to be transposed and gathered + with the shape of [B, 2, W, H]. + ind (torch.Tensor): Indexes with the shape of [B, N]. + + Returns: + torch.Tensor: Transposed and gathered feats. + """ + feat = feat.permute(0, 2, 3, 1).contiguous() + feat = feat.view(feat.size(0), -1, feat.size(3)) + feat = self._gather_feat(feat, ind) + return feat + + def encode(self): + pass + + def decode(self, + heat, + rot_sine, + rot_cosine, + hei, + dim, + vel, + reg=None, + task_id=-1): + """Decode bboxes. + + Args: + heat (torch.Tensor): Heatmap with the shape of [B, N, W, H]. + rot_sine (torch.Tensor): Sine of rotation with the shape of + [B, 1, W, H]. + rot_cosine (torch.Tensor): Cosine of rotation with the shape of + [B, 1, W, H]. + hei (torch.Tensor): Height of the boxes with the shape + of [B, 1, W, H]. + dim (torch.Tensor): Dim of the boxes with the shape of + [B, 1, W, H]. + vel (torch.Tensor): Velocity with the shape of [B, 1, W, H]. + reg (torch.Tensor, optional): Regression value of the boxes in + 2D with the shape of [B, 2, W, H]. Default: None. + task_id (int, optional): Index of task. Default: -1. + + Returns: + list[dict]: Decoded boxes. + """ + batch, cat, _, _ = heat.size() + + scores, inds, clses, ys, xs = self._topk(heat, K=self.max_num) + + if reg is not None: + reg = self._transpose_and_gather_feat(reg, inds) + reg = reg.view(batch, self.max_num, 2) + xs = xs.view(batch, self.max_num, 1) + reg[:, :, 0:1] + ys = ys.view(batch, self.max_num, 1) + reg[:, :, 1:2] + else: + xs = xs.view(batch, self.max_num, 1) + 0.5 + ys = ys.view(batch, self.max_num, 1) + 0.5 + + # rotation value and direction label + rot_sine = self._transpose_and_gather_feat(rot_sine, inds) + rot_sine = rot_sine.view(batch, self.max_num, 1) + + rot_cosine = self._transpose_and_gather_feat(rot_cosine, inds) + rot_cosine = rot_cosine.view(batch, self.max_num, 1) + rot = torch.atan2(rot_sine, rot_cosine) + + # height in the bev + hei = self._transpose_and_gather_feat(hei, inds) + hei = hei.view(batch, self.max_num, 1) + + # dim of the box + dim = self._transpose_and_gather_feat(dim, inds) + dim = dim.view(batch, self.max_num, 3) + + # class label + clses = clses.view(batch, self.max_num).float() + scores = scores.view(batch, self.max_num) + + xs = xs.view( + batch, self.max_num, + 1) * self.out_size_factor * self.voxel_size[0] + self.pc_range[0] + ys = ys.view( + batch, self.max_num, + 1) * self.out_size_factor * self.voxel_size[1] + self.pc_range[1] + + if vel is None: # KITTI FORMAT + final_box_preds = torch.cat([xs, ys, hei, dim, rot], dim=2) + else: # exist velocity, nuscene format + vel = self._transpose_and_gather_feat(vel, inds) + vel = vel.view(batch, self.max_num, 2) + final_box_preds = torch.cat([xs, ys, hei, dim, rot, vel], dim=2) + + final_scores = scores + final_preds = clses + + # use score threshold + if self.score_threshold is not None: + thresh_mask = final_scores > self.score_threshold + + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=heat.device) + mask = (final_box_preds[..., :3] >= + self.post_center_range[:3]).all(2) + mask &= (final_box_preds[..., :3] <= + self.post_center_range[3:]).all(2) + + predictions_dicts = [] + for i in range(batch): + cmask = mask[i, :] + if self.score_threshold: + cmask &= thresh_mask[i] + + boxes3d = final_box_preds[i, cmask] + scores = final_scores[i, cmask] + labels = final_preds[i, cmask] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels + } + + predictions_dicts.append(predictions_dict) + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + + return predictions_dicts diff --git a/mmdet3d/models/task_modules/coders/delta_xyzwhlr_bbox_coder.py b/mmdet3d/models/task_modules/coders/delta_xyzwhlr_bbox_coder.py new file mode 100755 index 0000000..6098dad --- /dev/null +++ b/mmdet3d/models/task_modules/coders/delta_xyzwhlr_bbox_coder.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmdet.models.task_modules import BaseBBoxCoder + +from mmdet3d.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class DeltaXYZWLHRBBoxCoder(BaseBBoxCoder): + """Bbox Coder for 3D boxes. + + Args: + code_size (int): The dimension of boxes to be encoded. + """ + + def __init__(self, code_size=7): + super(DeltaXYZWLHRBBoxCoder, self).__init__() + self.code_size = code_size + + @staticmethod + def encode(src_boxes, dst_boxes): + """Get box regression transformation deltas (dx, dy, dz, dx_size, + dy_size, dz_size, dr, dv*) that can be used to transform the + `src_boxes` into the `target_boxes`. + + Args: + src_boxes (torch.Tensor): source boxes, e.g., object proposals. + dst_boxes (torch.Tensor): target of the transformation, e.g., + ground-truth boxes. + + Returns: + torch.Tensor: Box transformation deltas. + """ + box_ndim = src_boxes.shape[-1] + cas, cgs, cts = [], [], [] + if box_ndim > 7: + xa, ya, za, wa, la, ha, ra, *cas = torch.split( + src_boxes, 1, dim=-1) + xg, yg, zg, wg, lg, hg, rg, *cgs = torch.split( + dst_boxes, 1, dim=-1) + cts = [g - a for g, a in zip(cgs, cas)] + else: + xa, ya, za, wa, la, ha, ra = torch.split(src_boxes, 1, dim=-1) + xg, yg, zg, wg, lg, hg, rg = torch.split(dst_boxes, 1, dim=-1) + za = za + ha / 2 + zg = zg + hg / 2 + diagonal = torch.sqrt(la**2 + wa**2) + xt = (xg - xa) / diagonal + yt = (yg - ya) / diagonal + zt = (zg - za) / ha + lt = torch.log(lg / la) + wt = torch.log(wg / wa) + ht = torch.log(hg / ha) + rt = rg - ra + return torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts], dim=-1) + + @staticmethod + def decode(anchors, deltas): + """Apply transformation `deltas` (dx, dy, dz, dx_size, dy_size, + dz_size, dr, dv*) to `boxes`. + + Args: + anchors (torch.Tensor): Parameters of anchors with shape (N, 7). + deltas (torch.Tensor): Encoded boxes with shape + (N, 7+n) [x, y, z, x_size, y_size, z_size, r, velo*]. + + Returns: + torch.Tensor: Decoded boxes. + """ + cas, cts = [], [] + box_ndim = anchors.shape[-1] + if box_ndim > 7: + xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1) + xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(deltas, 1, dim=-1) + else: + xa, ya, za, wa, la, ha, ra = torch.split(anchors, 1, dim=-1) + xt, yt, zt, wt, lt, ht, rt = torch.split(deltas, 1, dim=-1) + + za = za + ha / 2 + diagonal = torch.sqrt(la**2 + wa**2) + xg = xt * diagonal + xa + yg = yt * diagonal + ya + zg = zt * ha + za + + lg = torch.exp(lt) * la + wg = torch.exp(wt) * wa + hg = torch.exp(ht) * ha + rg = rt + ra + zg = zg - hg / 2 + cgs = [t + a for t, a in zip(cts, cas)] + return torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs], dim=-1) diff --git a/mmdet3d/models/task_modules/coders/fcos3d_bbox_coder.py b/mmdet3d/models/task_modules/coders/fcos3d_bbox_coder.py new file mode 100755 index 0000000..2790d00 --- /dev/null +++ b/mmdet3d/models/task_modules/coders/fcos3d_bbox_coder.py @@ -0,0 +1,127 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmdet.models.task_modules import BaseBBoxCoder + +from mmdet3d.registry import TASK_UTILS +from mmdet3d.structures.bbox_3d import limit_period + + +@TASK_UTILS.register_module() +class FCOS3DBBoxCoder(BaseBBoxCoder): + """Bounding box coder for FCOS3D. + + Args: + base_depths (tuple[tuple[float]]): Depth references for decode box + depth. Defaults to None. + base_dims (tuple[tuple[float]]): Dimension references for decode box + dimension. Defaults to None. + code_size (int): The dimension of boxes to be encoded. Defaults to 7. + norm_on_bbox (bool): Whether to apply normalization on the bounding + box 2D attributes. Defaults to True. + """ + + def __init__(self, + base_depths=None, + base_dims=None, + code_size=7, + norm_on_bbox=True): + super(FCOS3DBBoxCoder, self).__init__() + self.base_depths = base_depths + self.base_dims = base_dims + self.bbox_code_size = code_size + self.norm_on_bbox = norm_on_bbox + + def encode(self, gt_bboxes_3d, gt_labels_3d, gt_bboxes, gt_labels): + # TODO: refactor the encoder in the FCOS3D and PGD head + pass + + def decode(self, bbox, scale, stride, training, cls_score=None): + """Decode regressed results into 3D predictions. + + Note that offsets are not transformed to the projected 3D centers. + + Args: + bbox (torch.Tensor): Raw bounding box predictions in shape + [N, C, H, W]. + scale (tuple[`Scale`]): Learnable scale parameters. + stride (int): Stride for a specific feature level. + training (bool): Whether the decoding is in the training + procedure. + cls_score (torch.Tensor): Classification score map for deciding + which base depth or dim is used. Defaults to None. + + Returns: + torch.Tensor: Decoded boxes. + """ + # scale the bbox of different level + # only apply to offset, depth and size prediction + scale_offset, scale_depth, scale_size = scale[0:3] + + clone_bbox = bbox.clone() + bbox[:, :2] = scale_offset(clone_bbox[:, :2]).float() + bbox[:, 2] = scale_depth(clone_bbox[:, 2]).float() + bbox[:, 3:6] = scale_size(clone_bbox[:, 3:6]).float() + + if self.base_depths is None: + bbox[:, 2] = bbox[:, 2].exp() + elif len(self.base_depths) == 1: # only single prior + mean = self.base_depths[0][0] + std = self.base_depths[0][1] + bbox[:, 2] = mean + bbox.clone()[:, 2] * std + else: # multi-class priors + assert len(self.base_depths) == cls_score.shape[1], \ + 'The number of multi-class depth priors should be equal to ' \ + 'the number of categories.' + indices = cls_score.max(dim=1)[1] + depth_priors = cls_score.new_tensor( + self.base_depths)[indices, :].permute(0, 3, 1, 2) + mean = depth_priors[:, 0] + std = depth_priors[:, 1] + bbox[:, 2] = mean + bbox.clone()[:, 2] * std + + bbox[:, 3:6] = bbox[:, 3:6].exp() + if self.base_dims is not None: + assert len(self.base_dims) == cls_score.shape[1], \ + 'The number of anchor sizes should be equal to the number ' \ + 'of categories.' + indices = cls_score.max(dim=1)[1] + size_priors = cls_score.new_tensor( + self.base_dims)[indices, :].permute(0, 3, 1, 2) + bbox[:, 3:6] = size_priors * bbox.clone()[:, 3:6] + + assert self.norm_on_bbox is True, 'Setting norm_on_bbox to False '\ + 'has not been thoroughly tested for FCOS3D.' + if self.norm_on_bbox: + if not training: + # Note that this line is conducted only when testing + bbox[:, :2] *= stride + + return bbox + + @staticmethod + def decode_yaw(bbox, centers2d, dir_cls, dir_offset, cam2img): + """Decode yaw angle and change it from local to global.i. + + Args: + bbox (torch.Tensor): Bounding box predictions in shape + [N, C] with yaws to be decoded. + centers2d (torch.Tensor): Projected 3D-center on the image planes + corresponding to the box predictions. + dir_cls (torch.Tensor): Predicted direction classes. + dir_offset (float): Direction offset before dividing all the + directions into several classes. + cam2img (torch.Tensor): Camera intrinsic matrix in shape [4, 4]. + + Returns: + torch.Tensor: Bounding boxes with decoded yaws. + """ + if bbox.shape[0] > 0: + dir_rot = limit_period(bbox[..., 6] - dir_offset, 0, np.pi) + bbox[..., 6] = \ + dir_rot + dir_offset + np.pi * dir_cls.to(bbox.dtype) + + bbox[:, 6] = torch.atan2(centers2d[:, 0] - cam2img[0, 2], + cam2img[0, 0]) + bbox[:, 6] + + return bbox diff --git a/mmdet3d/models/task_modules/coders/groupfree3d_bbox_coder.py b/mmdet3d/models/task_modules/coders/groupfree3d_bbox_coder.py new file mode 100755 index 0000000..aa9cec3 --- /dev/null +++ b/mmdet3d/models/task_modules/coders/groupfree3d_bbox_coder.py @@ -0,0 +1,191 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.registry import TASK_UTILS +from .partial_bin_based_bbox_coder import PartialBinBasedBBoxCoder + + +@TASK_UTILS.register_module() +class GroupFree3DBBoxCoder(PartialBinBasedBBoxCoder): + """Modified partial bin based bbox coder for GroupFree3D. + + Args: + num_dir_bins (int): Number of bins to encode direction angle. + num_sizes (int): Number of size clusters. + mean_sizes (list[list[int]]): Mean size of bboxes in each class. + with_rot (bool, optional): Whether the bbox is with rotation. + Defaults to True. + size_cls_agnostic (bool, optional): Whether the predicted size is + class-agnostic. Defaults to True. + """ + + def __init__(self, + num_dir_bins, + num_sizes, + mean_sizes, + with_rot=True, + size_cls_agnostic=True): + super(GroupFree3DBBoxCoder, self).__init__( + num_dir_bins=num_dir_bins, + num_sizes=num_sizes, + mean_sizes=mean_sizes, + with_rot=with_rot) + self.size_cls_agnostic = size_cls_agnostic + + def encode(self, gt_bboxes_3d, gt_labels_3d): + """Encode ground truth to prediction targets. + + Args: + gt_bboxes_3d (BaseInstance3DBoxes): Ground truth bboxes + with shape (n, 7). + gt_labels_3d (torch.Tensor): Ground truth classes. + + Returns: + tuple: Targets of center, size and direction. + """ + # generate center target + center_target = gt_bboxes_3d.gravity_center + + # generate bbox size target + size_target = gt_bboxes_3d.dims + size_class_target = gt_labels_3d + size_res_target = gt_bboxes_3d.dims - gt_bboxes_3d.tensor.new_tensor( + self.mean_sizes)[size_class_target] + + # generate dir target + box_num = gt_labels_3d.shape[0] + if self.with_rot: + (dir_class_target, + dir_res_target) = self.angle2class(gt_bboxes_3d.yaw) + else: + dir_class_target = gt_labels_3d.new_zeros(box_num) + dir_res_target = gt_bboxes_3d.tensor.new_zeros(box_num) + + return (center_target, size_target, size_class_target, size_res_target, + dir_class_target, dir_res_target) + + def decode(self, bbox_out, prefix=''): + """Decode predicted parts to bbox3d. + + Args: + bbox_out (dict): Predictions from model, should contain keys below. + + - center: predicted bottom center of bboxes. + - dir_class: predicted bbox direction class. + - dir_res: predicted bbox direction residual. + - size_class: predicted bbox size class. + - size_res: predicted bbox size residual. + - size: predicted class-agnostic bbox size + prefix (str, optional): Decode predictions with specific prefix. + Defaults to ''. + + Returns: + torch.Tensor: Decoded bbox3d with shape (batch, n, 7). + """ + center = bbox_out[f'{prefix}center'] + batch_size, num_proposal = center.shape[:2] + + # decode heading angle + if self.with_rot: + dir_class = torch.argmax(bbox_out[f'{prefix}dir_class'], -1) + dir_res = torch.gather(bbox_out[f'{prefix}dir_res'], 2, + dir_class.unsqueeze(-1)) + dir_res.squeeze_(2) + dir_angle = self.class2angle(dir_class, dir_res).reshape( + batch_size, num_proposal, 1) + else: + dir_angle = center.new_zeros(batch_size, num_proposal, 1) + + # decode bbox size + if self.size_cls_agnostic: + bbox_size = bbox_out[f'{prefix}size'].reshape( + batch_size, num_proposal, 3) + else: + size_class = torch.argmax( + bbox_out[f'{prefix}size_class'], -1, keepdim=True) + size_res = torch.gather( + bbox_out[f'{prefix}size_res'], 2, + size_class.unsqueeze(-1).repeat(1, 1, 1, 3)) + mean_sizes = center.new_tensor(self.mean_sizes) + size_base = torch.index_select(mean_sizes, 0, + size_class.reshape(-1)) + bbox_size = size_base.reshape(batch_size, num_proposal, + -1) + size_res.squeeze(2) + + bbox3d = torch.cat([center, bbox_size, dir_angle], dim=-1) + return bbox3d + + def split_pred(self, cls_preds, reg_preds, base_xyz, prefix=''): + """Split predicted features to specific parts. + + Args: + cls_preds (torch.Tensor): Class predicted features to split. + reg_preds (torch.Tensor): Regression predicted features to split. + base_xyz (torch.Tensor): Coordinates of points. + prefix (str, optional): Decode predictions with specific prefix. + Defaults to ''. + + Returns: + dict[str, torch.Tensor]: Split results. + """ + results = {} + start, end = 0, 0 + + cls_preds_trans = cls_preds.transpose(2, 1) + reg_preds_trans = reg_preds.transpose(2, 1) + + # decode center + end += 3 + # (batch_size, num_proposal, 3) + results[f'{prefix}center_residual'] = \ + reg_preds_trans[..., start:end].contiguous() + results[f'{prefix}center'] = base_xyz + \ + reg_preds_trans[..., start:end].contiguous() + start = end + + # decode direction + end += self.num_dir_bins + results[f'{prefix}dir_class'] = \ + reg_preds_trans[..., start:end].contiguous() + start = end + + end += self.num_dir_bins + dir_res_norm = reg_preds_trans[..., start:end].contiguous() + start = end + + results[f'{prefix}dir_res_norm'] = dir_res_norm + results[f'{prefix}dir_res'] = dir_res_norm * ( + np.pi / self.num_dir_bins) + + # decode size + if self.size_cls_agnostic: + end += 3 + results[f'{prefix}size'] = \ + reg_preds_trans[..., start:end].contiguous() + else: + end += self.num_sizes + results[f'{prefix}size_class'] = reg_preds_trans[ + ..., start:end].contiguous() + start = end + + end += self.num_sizes * 3 + size_res_norm = reg_preds_trans[..., start:end] + batch_size, num_proposal = reg_preds_trans.shape[:2] + size_res_norm = size_res_norm.view( + [batch_size, num_proposal, self.num_sizes, 3]) + start = end + + results[f'{prefix}size_res_norm'] = size_res_norm.contiguous() + mean_sizes = reg_preds.new_tensor(self.mean_sizes) + results[f'{prefix}size_res'] = ( + size_res_norm * mean_sizes.unsqueeze(0).unsqueeze(0)) + + # decode objectness score + # Group-Free-3D objectness output shape (batch, proposal, 1) + results[f'{prefix}obj_scores'] = cls_preds_trans[..., :1].contiguous() + + # decode semantic score + results[f'{prefix}sem_scores'] = cls_preds_trans[..., 1:].contiguous() + + return results diff --git a/mmdet3d/models/task_modules/coders/monoflex_bbox_coder.py b/mmdet3d/models/task_modules/coders/monoflex_bbox_coder.py new file mode 100755 index 0000000..9f4259c --- /dev/null +++ b/mmdet3d/models/task_modules/coders/monoflex_bbox_coder.py @@ -0,0 +1,515 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmdet.models.task_modules import BaseBBoxCoder +from torch.nn import functional as F + +from mmdet3d.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class MonoFlexCoder(BaseBBoxCoder): + """Bbox Coder for MonoFlex. + + Args: + depth_mode (str): The mode for depth calculation. + Available options are "linear", "inv_sigmoid", and "exp". + base_depth (tuple[float]): References for decoding box depth. + depth_range (list): Depth range of predicted depth. + combine_depth (bool): Whether to use combined depth (direct depth + and depth from keypoints) or use direct depth only. + uncertainty_range (list): Uncertainty range of predicted depth. + base_dims (tuple[tuple[float]]): Dimensions mean and std of decode bbox + dimensions [l, h, w] for each category. + dims_mode (str): The mode for dimension calculation. + Available options are "linear" and "exp". + multibin (bool): Whether to use multibin representation. + num_dir_bins (int): Number of Number of bins to encode + direction angle. + bin_centers (list[float]): Local yaw centers while using multibin + representations. + bin_margin (float): Margin of multibin representations. + code_size (int): The dimension of boxes to be encoded. + eps (float, optional): A value added to the denominator for numerical + stability. Default 1e-3. + """ + + def __init__(self, + depth_mode, + base_depth, + depth_range, + combine_depth, + uncertainty_range, + base_dims, + dims_mode, + multibin, + num_dir_bins, + bin_centers, + bin_margin, + code_size, + eps=1e-3): + super(MonoFlexCoder, self).__init__() + + # depth related + self.depth_mode = depth_mode + self.base_depth = base_depth + self.depth_range = depth_range + self.combine_depth = combine_depth + self.uncertainty_range = uncertainty_range + + # dimensions related + self.base_dims = base_dims + self.dims_mode = dims_mode + + # orientation related + self.multibin = multibin + self.num_dir_bins = num_dir_bins + self.bin_centers = bin_centers + self.bin_margin = bin_margin + + # output related + self.bbox_code_size = code_size + self.eps = eps + + def encode(self, gt_bboxes_3d): + """Encode ground truth to prediction targets. + + Args: + gt_bboxes_3d (`BaseInstance3DBoxes`): Ground truth 3D bboxes. + shape: (N, 7). + + Returns: + torch.Tensor: Targets of orientations. + """ + local_yaw = gt_bboxes_3d.local_yaw + # encode local yaw (-pi ~ pi) to multibin format + encode_local_yaw = local_yaw.new_zeros( + [local_yaw.shape[0], self.num_dir_bins * 2]) + bin_size = 2 * np.pi / self.num_dir_bins + margin_size = bin_size * self.bin_margin + + bin_centers = local_yaw.new_tensor(self.bin_centers) + range_size = bin_size / 2 + margin_size + + offsets = local_yaw.unsqueeze(1) - bin_centers.unsqueeze(0) + offsets[offsets > np.pi] = offsets[offsets > np.pi] - 2 * np.pi + offsets[offsets < -np.pi] = offsets[offsets < -np.pi] + 2 * np.pi + + for i in range(self.num_dir_bins): + offset = offsets[:, i] + inds = abs(offset) < range_size + encode_local_yaw[inds, i] = 1 + encode_local_yaw[inds, i + self.num_dir_bins] = offset[inds] + + orientation_target = encode_local_yaw + + return orientation_target + + def decode(self, bbox, base_centers2d, labels, downsample_ratio, cam2imgs): + """Decode bounding box regression into 3D predictions. + + Args: + bbox (Tensor): Raw bounding box predictions for each + predict center2d point. + shape: (N, C) + base_centers2d (torch.Tensor): Base centers2d for 3D bboxes. + shape: (N, 2). + labels (Tensor): Batch predict class label for each predict + center2d point. + shape: (N, ) + downsample_ratio (int): The stride of feature map. + cam2imgs (Tensor): Batch images' camera intrinsic matrix. + shape: kitti (N, 4, 4) nuscenes (N, 3, 3) + + Return: + dict: The 3D prediction dict decoded from regression map. + the dict has components below: + - bboxes2d (torch.Tensor): Decoded [x1, y1, x2, y2] format + 2D bboxes. + - dimensions (torch.Tensor): Decoded dimensions for each + object. + - offsets2d (torch.Tenosr): Offsets between base centers2d + and real centers2d. + - direct_depth (torch.Tensor): Decoded directly regressed + depth. + - keypoints2d (torch.Tensor): Keypoints of each projected + 3D box on image. + - keypoints_depth (torch.Tensor): Decoded depth from keypoints. + - combined_depth (torch.Tensor): Combined depth using direct + depth and keypoints depth with depth uncertainty. + - orientations (torch.Tensor): Multibin format orientations + (local yaw) for each objects. + """ + + # 4 dimensions for FCOS style regression + pred_bboxes2d = bbox[:, 0:4] + + # change FCOS style to [x1, y1, x2, y2] format for IOU Loss + pred_bboxes2d = self.decode_bboxes2d(pred_bboxes2d, base_centers2d) + + # 2 dimensions for projected centers2d offsets + pred_offsets2d = bbox[:, 4:6] + + # 3 dimensions for 3D bbox dimensions offsets + pred_dimensions_offsets3d = bbox[:, 29:32] + + # the first 8 dimensions are for orientation bin classification + # and the second 8 dimensions are for orientation offsets. + pred_orientations = torch.cat((bbox[:, 32:40], bbox[:, 40:48]), dim=1) + + # 3 dimensions for the uncertainties of the solved depths from + # groups of keypoints + pred_keypoints_depth_uncertainty = bbox[:, 26:29] + + # 1 dimension for the uncertainty of directly regressed depth + pred_direct_depth_uncertainty = bbox[:, 49:50].squeeze(-1) + + # 2 dimension of offsets x keypoints (8 corners + top/bottom center) + pred_keypoints2d = bbox[:, 6:26].reshape(-1, 10, 2) + + # 1 dimension for depth offsets + pred_direct_depth_offsets = bbox[:, 48:49].squeeze(-1) + + # decode the pred residual dimensions to real dimensions + pred_dimensions = self.decode_dims(labels, pred_dimensions_offsets3d) + pred_direct_depth = self.decode_direct_depth(pred_direct_depth_offsets) + pred_keypoints_depth = self.keypoints2depth(pred_keypoints2d, + pred_dimensions, cam2imgs, + downsample_ratio) + + pred_direct_depth_uncertainty = torch.clamp( + pred_direct_depth_uncertainty, self.uncertainty_range[0], + self.uncertainty_range[1]) + pred_keypoints_depth_uncertainty = torch.clamp( + pred_keypoints_depth_uncertainty, self.uncertainty_range[0], + self.uncertainty_range[1]) + + if self.combine_depth: + pred_depth_uncertainty = torch.cat( + (pred_direct_depth_uncertainty.unsqueeze(-1), + pred_keypoints_depth_uncertainty), + dim=1).exp() + pred_depth = torch.cat( + (pred_direct_depth.unsqueeze(-1), pred_keypoints_depth), dim=1) + pred_combined_depth = \ + self.combine_depths(pred_depth, pred_depth_uncertainty) + else: + pred_combined_depth = None + + preds = dict( + bboxes2d=pred_bboxes2d, + dimensions=pred_dimensions, + offsets2d=pred_offsets2d, + keypoints2d=pred_keypoints2d, + orientations=pred_orientations, + direct_depth=pred_direct_depth, + keypoints_depth=pred_keypoints_depth, + combined_depth=pred_combined_depth, + direct_depth_uncertainty=pred_direct_depth_uncertainty, + keypoints_depth_uncertainty=pred_keypoints_depth_uncertainty, + ) + + return preds + + def decode_direct_depth(self, depth_offsets): + """Transform depth offset to directly regressed depth. + + Args: + depth_offsets (torch.Tensor): Predicted depth offsets. + shape: (N, ) + + Return: + torch.Tensor: Directly regressed depth. + shape: (N, ) + """ + if self.depth_mode == 'exp': + direct_depth = depth_offsets.exp() + elif self.depth_mode == 'linear': + base_depth = depth_offsets.new_tensor(self.base_depth) + direct_depth = depth_offsets * base_depth[1] + base_depth[0] + elif self.depth_mode == 'inv_sigmoid': + direct_depth = 1 / torch.sigmoid(depth_offsets) - 1 + else: + raise ValueError + + if self.depth_range is not None: + direct_depth = torch.clamp( + direct_depth, min=self.depth_range[0], max=self.depth_range[1]) + + return direct_depth + + def decode_location(self, + base_centers2d, + offsets2d, + depths, + cam2imgs, + downsample_ratio, + pad_mode='default'): + """Retrieve object location. + + Args: + base_centers2d (torch.Tensor): predicted base centers2d. + shape: (N, 2) + offsets2d (torch.Tensor): The offsets between real centers2d + and base centers2d. + shape: (N , 2) + depths (torch.Tensor): Depths of objects. + shape: (N, ) + cam2imgs (torch.Tensor): Batch images' camera intrinsic matrix. + shape: kitti (N, 4, 4) nuscenes (N, 3, 3) + downsample_ratio (int): The stride of feature map. + pad_mode (str, optional): Padding mode used in + training data augmentation. + + Return: + tuple(torch.Tensor): Centers of 3D boxes. + shape: (N, 3) + """ + N = cam2imgs.shape[0] + # (N, 4, 4) + cam2imgs_inv = cam2imgs.inverse() + if pad_mode == 'default': + centers2d_img = (base_centers2d + offsets2d) * downsample_ratio + else: + raise NotImplementedError + # (N, 3) + centers2d_img = \ + torch.cat((centers2d_img, depths.unsqueeze(-1)), dim=1) + # (N, 4, 1) + centers2d_extend = \ + torch.cat((centers2d_img, centers2d_img.new_ones(N, 1)), + dim=1).unsqueeze(-1) + locations = torch.matmul(cam2imgs_inv, centers2d_extend).squeeze(-1) + + return locations[:, :3] + + def keypoints2depth(self, + keypoints2d, + dimensions, + cam2imgs, + downsample_ratio=4, + group0_index=[(7, 3), (0, 4)], + group1_index=[(2, 6), (1, 5)]): + """Decode depth form three groups of keypoints and geometry projection + model. 2D keypoints inlucding 8 coreners and top/bottom centers will be + divided into three groups which will be used to calculate three depths + of object. + + .. code-block:: none + + Group center keypoints: + + + --------------- + + /| top center /| + / | . / | + / | | / | + + ---------|----- + + + | / | | / + | / . | / + |/ bottom center |/ + + --------------- + + + Group 0 keypoints: + + 0 + + -------------- + + /| /| + / | / | + / | 5/ | + + -------------- + + + | /3 | / + | / | / + |/ |/ + + -------------- + 6 + + Group 1 keypoints: + + 4 + + -------------- + + /| /| + / | / | + / | / | + 1 + -------------- + + 7 + | / | / + | / | / + |/ |/ + 2 + -------------- + + + + Args: + keypoints2d (torch.Tensor): Keypoints of objects. + 8 vertices + top/bottom center. + shape: (N, 10, 2) + dimensions (torch.Tensor): Dimensions of objetcts. + shape: (N, 3) + cam2imgs (torch.Tensor): Batch images' camera intrinsic matrix. + shape: kitti (N, 4, 4) nuscenes (N, 3, 3) + downsample_ratio (int, opitonal): The stride of feature map. + Defaults: 4. + group0_index(list[tuple[int]], optional): Keypoints group 0 + of index to calculate the depth. + Defaults: [0, 3, 4, 7]. + group1_index(list[tuple[int]], optional): Keypoints group 1 + of index to calculate the depth. + Defaults: [1, 2, 5, 6] + + Return: + tuple(torch.Tensor): Depth computed from three groups of + keypoints (top/bottom, group0, group1) + shape: (N, 3) + """ + + pred_height_3d = dimensions[:, 1].clone() + f_u = cam2imgs[:, 0, 0] + center_height = keypoints2d[:, -2, 1] - keypoints2d[:, -1, 1] + corner_group0_height = keypoints2d[:, group0_index[0], 1] \ + - keypoints2d[:, group0_index[1], 1] + corner_group1_height = keypoints2d[:, group1_index[0], 1] \ + - keypoints2d[:, group1_index[1], 1] + center_depth = f_u * pred_height_3d / ( + F.relu(center_height) * downsample_ratio + self.eps) + corner_group0_depth = (f_u * pred_height_3d).unsqueeze(-1) / ( + F.relu(corner_group0_height) * downsample_ratio + self.eps) + corner_group1_depth = (f_u * pred_height_3d).unsqueeze(-1) / ( + F.relu(corner_group1_height) * downsample_ratio + self.eps) + + corner_group0_depth = corner_group0_depth.mean(dim=1) + corner_group1_depth = corner_group1_depth.mean(dim=1) + + keypoints_depth = torch.stack( + (center_depth, corner_group0_depth, corner_group1_depth), dim=1) + keypoints_depth = torch.clamp( + keypoints_depth, min=self.depth_range[0], max=self.depth_range[1]) + + return keypoints_depth + + def decode_dims(self, labels, dims_offset): + """Retrieve object dimensions. + + Args: + labels (torch.Tensor): Each points' category id. + shape: (N, K) + dims_offset (torch.Tensor): Dimension offsets. + shape: (N, 3) + + Returns: + torch.Tensor: Shape (N, 3) + """ + + if self.dims_mode == 'exp': + dims_offset = dims_offset.exp() + elif self.dims_mode == 'linear': + labels = labels.long() + base_dims = dims_offset.new_tensor(self.base_dims) + dims_mean = base_dims[:, :3] + dims_std = base_dims[:, 3:6] + cls_dimension_mean = dims_mean[labels, :] + cls_dimension_std = dims_std[labels, :] + dimensions = dims_offset * cls_dimension_mean + cls_dimension_std + else: + raise ValueError + + return dimensions + + def decode_orientation(self, ori_vector, locations): + """Retrieve object orientation. + + Args: + ori_vector (torch.Tensor): Local orientation vector + in [axis_cls, head_cls, sin, cos] format. + shape: (N, num_dir_bins * 4) + locations (torch.Tensor): Object location. + shape: (N, 3) + + Returns: + tuple[torch.Tensor]: yaws and local yaws of 3d bboxes. + """ + if self.multibin: + pred_bin_cls = ori_vector[:, :self.num_dir_bins * 2].view( + -1, self.num_dir_bins, 2) + pred_bin_cls = pred_bin_cls.softmax(dim=2)[..., 1] + orientations = ori_vector.new_zeros(ori_vector.shape[0]) + for i in range(self.num_dir_bins): + mask_i = (pred_bin_cls.argmax(dim=1) == i) + start_bin = self.num_dir_bins * 2 + i * 2 + end_bin = start_bin + 2 + pred_bin_offset = ori_vector[mask_i, start_bin:end_bin] + orientations[mask_i] = pred_bin_offset[:, 0].atan2( + pred_bin_offset[:, 1]) + self.bin_centers[i] + else: + axis_cls = ori_vector[:, :2].softmax(dim=1) + axis_cls = axis_cls[:, 0] < axis_cls[:, 1] + head_cls = ori_vector[:, 2:4].softmax(dim=1) + head_cls = head_cls[:, 0] < head_cls[:, 1] + # cls axis + orientations = self.bin_centers[axis_cls + head_cls * 2] + sin_cos_offset = F.normalize(ori_vector[:, 4:]) + orientations += sin_cos_offset[:, 0].atan(sin_cos_offset[:, 1]) + + locations = locations.view(-1, 3) + rays = locations[:, 0].atan2(locations[:, 2]) + local_yaws = orientations + yaws = local_yaws + rays + + larger_idx = (yaws > np.pi).nonzero(as_tuple=False) + small_idx = (yaws < -np.pi).nonzero(as_tuple=False) + if len(larger_idx) != 0: + yaws[larger_idx] -= 2 * np.pi + if len(small_idx) != 0: + yaws[small_idx] += 2 * np.pi + + larger_idx = (local_yaws > np.pi).nonzero(as_tuple=False) + small_idx = (local_yaws < -np.pi).nonzero(as_tuple=False) + if len(larger_idx) != 0: + local_yaws[larger_idx] -= 2 * np.pi + if len(small_idx) != 0: + local_yaws[small_idx] += 2 * np.pi + + return yaws, local_yaws + + def decode_bboxes2d(self, reg_bboxes2d, base_centers2d): + """Retrieve [x1, y1, x2, y2] format 2D bboxes. + + Args: + reg_bboxes2d (torch.Tensor): Predicted FCOS style + 2D bboxes. + shape: (N, 4) + base_centers2d (torch.Tensor): predicted base centers2d. + shape: (N, 2) + + Returns: + torch.Tenosr: [x1, y1, x2, y2] format 2D bboxes. + """ + centers_x = base_centers2d[:, 0] + centers_y = base_centers2d[:, 1] + + xs_min = centers_x - reg_bboxes2d[..., 0] + ys_min = centers_y - reg_bboxes2d[..., 1] + xs_max = centers_x + reg_bboxes2d[..., 2] + ys_max = centers_y + reg_bboxes2d[..., 3] + + bboxes2d = torch.stack([xs_min, ys_min, xs_max, ys_max], dim=-1) + + return bboxes2d + + def combine_depths(self, depth, depth_uncertainty): + """Combine all the prediced depths with depth uncertainty. + + Args: + depth (torch.Tensor): Predicted depths of each object. + 2D bboxes. + shape: (N, 4) + depth_uncertainty (torch.Tensor): Depth uncertainty for + each depth of each object. + shape: (N, 4) + + Returns: + torch.Tenosr: combined depth. + """ + uncertainty_weights = 1 / depth_uncertainty + uncertainty_weights = \ + uncertainty_weights / \ + uncertainty_weights.sum(dim=1, keepdim=True) + combined_depth = torch.sum(depth * uncertainty_weights, dim=1) + + return combined_depth diff --git a/mmdet3d/models/task_modules/coders/partial_bin_based_bbox_coder.py b/mmdet3d/models/task_modules/coders/partial_bin_based_bbox_coder.py new file mode 100755 index 0000000..db25daa --- /dev/null +++ b/mmdet3d/models/task_modules/coders/partial_bin_based_bbox_coder.py @@ -0,0 +1,241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmdet.models.task_modules import BaseBBoxCoder + +from mmdet3d.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class PartialBinBasedBBoxCoder(BaseBBoxCoder): + """Partial bin based bbox coder. + + Args: + num_dir_bins (int): Number of bins to encode direction angle. + num_sizes (int): Number of size clusters. + mean_sizes (list[list[int]]): Mean size of bboxes in each class. + with_rot (bool): Whether the bbox is with rotation. + """ + + def __init__(self, num_dir_bins, num_sizes, mean_sizes, with_rot=True): + super(PartialBinBasedBBoxCoder, self).__init__() + assert len(mean_sizes) == num_sizes + self.num_dir_bins = num_dir_bins + self.num_sizes = num_sizes + self.mean_sizes = mean_sizes + self.with_rot = with_rot + + def encode(self, gt_bboxes_3d, gt_labels_3d): + """Encode ground truth to prediction targets. + + Args: + gt_bboxes_3d (BaseInstance3DBoxes): Ground truth bboxes + with shape (n, 7). + gt_labels_3d (torch.Tensor): Ground truth classes. + + Returns: + tuple: Targets of center, size and direction. + """ + # generate center target + center_target = gt_bboxes_3d.gravity_center + + # generate bbox size target + size_class_target = gt_labels_3d + size_res_target = gt_bboxes_3d.dims - gt_bboxes_3d.tensor.new_tensor( + self.mean_sizes)[size_class_target] + + # generate dir target + box_num = gt_labels_3d.shape[0] + if self.with_rot: + (dir_class_target, + dir_res_target) = self.angle2class(gt_bboxes_3d.yaw) + else: + dir_class_target = gt_labels_3d.new_zeros(box_num) + dir_res_target = gt_bboxes_3d.tensor.new_zeros(box_num) + + return (center_target, size_class_target, size_res_target, + dir_class_target, dir_res_target) + + def decode(self, bbox_out, suffix=''): + """Decode predicted parts to bbox3d. + + Args: + bbox_out (dict): Predictions from model, should contain keys below. + + - center: predicted bottom center of bboxes. + - dir_class: predicted bbox direction class. + - dir_res: predicted bbox direction residual. + - size_class: predicted bbox size class. + - size_res: predicted bbox size residual. + suffix (str): Decode predictions with specific suffix. + + Returns: + torch.Tensor: Decoded bbox3d with shape (batch, n, 7). + """ + center = bbox_out['center' + suffix] + batch_size, num_proposal = center.shape[:2] + + # decode heading angle + if self.with_rot: + dir_class = torch.argmax(bbox_out['dir_class' + suffix], -1) + dir_res = torch.gather(bbox_out['dir_res' + suffix], 2, + dir_class.unsqueeze(-1)) + dir_res.squeeze_(2) + dir_angle = self.class2angle(dir_class, dir_res).reshape( + batch_size, num_proposal, 1) + else: + dir_angle = center.new_zeros(batch_size, num_proposal, 1) + + # decode bbox size + size_class = torch.argmax( + bbox_out['size_class' + suffix], -1, keepdim=True) + size_res = torch.gather(bbox_out['size_res' + suffix], 2, + size_class.unsqueeze(-1).repeat(1, 1, 1, 3)) + mean_sizes = center.new_tensor(self.mean_sizes) + size_base = torch.index_select(mean_sizes, 0, size_class.reshape(-1)) + bbox_size = size_base.reshape(batch_size, num_proposal, + -1) + size_res.squeeze(2) + + bbox3d = torch.cat([center, bbox_size, dir_angle], dim=-1) + return bbox3d + + def decode_corners(self, center, size_res, size_class): + """Decode center, size residuals and class to corners. Only useful for + axis-aligned bounding boxes, so angle isn't considered. + + Args: + center (torch.Tensor): Shape [B, N, 3] + size_res (torch.Tensor): Shape [B, N, 3] or [B, N, C, 3] + size_class (torch.Tensor): Shape: [B, N] or [B, N, 1] + or [B, N, C, 3] + + Returns: + torch.Tensor: Corners with shape [B, N, 6] + """ + if len(size_class.shape) == 2 or size_class.shape[-1] == 1: + batch_size, proposal_num = size_class.shape[:2] + one_hot_size_class = size_res.new_zeros( + (batch_size, proposal_num, self.num_sizes)) + if len(size_class.shape) == 2: + size_class = size_class.unsqueeze(-1) + one_hot_size_class.scatter_(2, size_class, 1) + one_hot_size_class_expand = one_hot_size_class.unsqueeze( + -1).repeat(1, 1, 1, 3).contiguous() + else: + one_hot_size_class_expand = size_class + + if len(size_res.shape) == 4: + size_res = torch.sum(size_res * one_hot_size_class_expand, 2) + + mean_sizes = size_res.new_tensor(self.mean_sizes) + mean_sizes = torch.sum(mean_sizes * one_hot_size_class_expand, 2) + size_full = (size_res + 1) * mean_sizes + size_full = torch.clamp(size_full, 0) + half_size_full = size_full / 2 + corner1 = center - half_size_full + corner2 = center + half_size_full + corners = torch.cat([corner1, corner2], dim=-1) + return corners + + def split_pred(self, cls_preds, reg_preds, base_xyz): + """Split predicted features to specific parts. + + Args: + cls_preds (torch.Tensor): Class predicted features to split. + reg_preds (torch.Tensor): Regression predicted features to split. + base_xyz (torch.Tensor): Coordinates of points. + + Returns: + dict[str, torch.Tensor]: Split results. + """ + results = {} + start, end = 0, 0 + + cls_preds_trans = cls_preds.transpose(2, 1) + reg_preds_trans = reg_preds.transpose(2, 1) + + # decode center + end += 3 + # (batch_size, num_proposal, 3) + results['center'] = base_xyz + \ + reg_preds_trans[..., start:end].contiguous() + start = end + + # decode direction + end += self.num_dir_bins + results['dir_class'] = reg_preds_trans[..., start:end].contiguous() + start = end + + end += self.num_dir_bins + dir_res_norm = reg_preds_trans[..., start:end].contiguous() + start = end + + results['dir_res_norm'] = dir_res_norm + results['dir_res'] = dir_res_norm * (np.pi / self.num_dir_bins) + + # decode size + end += self.num_sizes + results['size_class'] = reg_preds_trans[..., start:end].contiguous() + start = end + + end += self.num_sizes * 3 + size_res_norm = reg_preds_trans[..., start:end] + batch_size, num_proposal = reg_preds_trans.shape[:2] + size_res_norm = size_res_norm.view( + [batch_size, num_proposal, self.num_sizes, 3]) + start = end + + results['size_res_norm'] = size_res_norm.contiguous() + mean_sizes = reg_preds.new_tensor(self.mean_sizes) + results['size_res'] = ( + size_res_norm * mean_sizes.unsqueeze(0).unsqueeze(0)) + + # decode objectness score + start = 0 + end = 2 + results['obj_scores'] = cls_preds_trans[..., start:end].contiguous() + start = end + + # decode semantic score + results['sem_scores'] = cls_preds_trans[..., start:].contiguous() + + return results + + def angle2class(self, angle): + """Convert continuous angle to a discrete class and a residual. + + Convert continuous angle to a discrete class and a small + regression number from class center angle to current angle. + + Args: + angle (torch.Tensor): Angle is from 0-2pi (or -pi~pi), + class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N). + + Returns: + tuple: Encoded discrete class and residual. + """ + angle = angle % (2 * np.pi) + angle_per_class = 2 * np.pi / float(self.num_dir_bins) + shifted_angle = (angle + angle_per_class / 2) % (2 * np.pi) + angle_cls = shifted_angle // angle_per_class + angle_res = shifted_angle - ( + angle_cls * angle_per_class + angle_per_class / 2) + return angle_cls.long(), angle_res + + def class2angle(self, angle_cls, angle_res, limit_period=True): + """Inverse function to angle2class. + + Args: + angle_cls (torch.Tensor): Angle class to decode. + angle_res (torch.Tensor): Angle residual to decode. + limit_period (bool): Whether to limit angle to [-pi, pi]. + + Returns: + torch.Tensor: Angle decoded from angle_cls and angle_res. + """ + angle_per_class = 2 * np.pi / float(self.num_dir_bins) + angle_center = angle_cls.float() * angle_per_class + angle = angle_center + angle_res + if limit_period: + angle[angle > np.pi] -= 2 * np.pi + return angle diff --git a/mmdet3d/models/task_modules/coders/pgd_bbox_coder.py b/mmdet3d/models/task_modules/coders/pgd_bbox_coder.py new file mode 100755 index 0000000..f6361a4 --- /dev/null +++ b/mmdet3d/models/task_modules/coders/pgd_bbox_coder.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from torch.nn import functional as F + +from mmdet3d.registry import TASK_UTILS +from .fcos3d_bbox_coder import FCOS3DBBoxCoder + + +@TASK_UTILS.register_module() +class PGDBBoxCoder(FCOS3DBBoxCoder): + """Bounding box coder for PGD.""" + + def encode(self, gt_bboxes_3d, gt_labels_3d, gt_bboxes, gt_labels): + # TODO: refactor the encoder codes in the FCOS3D and PGD head + pass + + def decode_2d(self, + bbox, + scale, + stride, + max_regress_range, + training, + pred_keypoints=False, + pred_bbox2d=True): + """Decode regressed 2D attributes. + + Args: + bbox (torch.Tensor): Raw bounding box predictions in shape + [N, C, H, W]. + scale (tuple[`Scale`]): Learnable scale parameters. + stride (int): Stride for a specific feature level. + max_regress_range (int): Maximum regression range for a specific + feature level. + training (bool): Whether the decoding is in the training + procedure. + pred_keypoints (bool, optional): Whether to predict keypoints. + Defaults to False. + pred_bbox2d (bool, optional): Whether to predict 2D bounding + boxes. Defaults to False. + + Returns: + torch.Tensor: Decoded boxes. + """ + clone_bbox = bbox.clone() + if pred_keypoints: + scale_kpts = scale[3] + # 2 dimension of offsets x 8 corners of a 3D bbox + bbox[:, self.bbox_code_size:self.bbox_code_size + 16] = \ + torch.tanh(scale_kpts(clone_bbox[ + :, self.bbox_code_size:self.bbox_code_size + 16]).float()) + + if pred_bbox2d: + scale_bbox2d = scale[-1] + # The last four dimensions are offsets to four sides of a 2D bbox + bbox[:, -4:] = scale_bbox2d(clone_bbox[:, -4:]).float() + + if self.norm_on_bbox: + if pred_bbox2d: + bbox[:, -4:] = F.relu(bbox.clone()[:, -4:]) + if not training: + if pred_keypoints: + bbox[ + :, self.bbox_code_size:self.bbox_code_size + 16] *= \ + max_regress_range + if pred_bbox2d: + bbox[:, -4:] *= stride + else: + if pred_bbox2d: + bbox[:, -4:] = bbox.clone()[:, -4:].exp() + return bbox + + def decode_prob_depth(self, depth_cls_preds, depth_range, depth_unit, + division, num_depth_cls): + """Decode probabilistic depth map. + + Args: + depth_cls_preds (torch.Tensor): Depth probabilistic map in shape + [..., self.num_depth_cls] (raw output before softmax). + depth_range (tuple[float]): Range of depth estimation. + depth_unit (int): Unit of depth range division. + division (str): Depth division method. Options include 'uniform', + 'linear', 'log', 'loguniform'. + num_depth_cls (int): Number of depth classes. + + Returns: + torch.Tensor: Decoded probabilistic depth estimation. + """ + if division == 'uniform': + depth_multiplier = depth_unit * \ + depth_cls_preds.new_tensor( + list(range(num_depth_cls))).reshape([1, -1]) + prob_depth_preds = (F.softmax(depth_cls_preds.clone(), dim=-1) * + depth_multiplier).sum(dim=-1) + return prob_depth_preds + elif division == 'linear': + split_pts = depth_cls_preds.new_tensor(list( + range(num_depth_cls))).reshape([1, -1]) + depth_multiplier = depth_range[0] + ( + depth_range[1] - depth_range[0]) / \ + (num_depth_cls * (num_depth_cls - 1)) * \ + (split_pts * (split_pts+1)) + prob_depth_preds = (F.softmax(depth_cls_preds.clone(), dim=-1) * + depth_multiplier).sum(dim=-1) + return prob_depth_preds + elif division == 'log': + split_pts = depth_cls_preds.new_tensor(list( + range(num_depth_cls))).reshape([1, -1]) + start = max(depth_range[0], 1) + end = depth_range[1] + depth_multiplier = (np.log(start) + + split_pts * np.log(end / start) / + (num_depth_cls - 1)).exp() + prob_depth_preds = (F.softmax(depth_cls_preds.clone(), dim=-1) * + depth_multiplier).sum(dim=-1) + return prob_depth_preds + elif division == 'loguniform': + split_pts = depth_cls_preds.new_tensor(list( + range(num_depth_cls))).reshape([1, -1]) + start = max(depth_range[0], 1) + end = depth_range[1] + log_multiplier = np.log(start) + \ + split_pts * np.log(end / start) / (num_depth_cls - 1) + prob_depth_preds = (F.softmax(depth_cls_preds.clone(), dim=-1) * + log_multiplier).sum(dim=-1).exp() + return prob_depth_preds + else: + raise NotImplementedError diff --git a/mmdet3d/models/task_modules/coders/point_xyzwhlr_bbox_coder.py b/mmdet3d/models/task_modules/coders/point_xyzwhlr_bbox_coder.py new file mode 100755 index 0000000..5e401e9 --- /dev/null +++ b/mmdet3d/models/task_modules/coders/point_xyzwhlr_bbox_coder.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmdet.models.task_modules import BaseBBoxCoder + +from mmdet3d.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class PointXYZWHLRBBoxCoder(BaseBBoxCoder): + """Point based bbox coder for 3D boxes. + + Args: + code_size (int): The dimension of boxes to be encoded. + use_mean_size (bool, optional): Whether using anchors based on class. + Defaults to True. + mean_size (list[list[float]], optional): Mean size of bboxes in + each class. Defaults to None. + """ + + def __init__(self, code_size=7, use_mean_size=True, mean_size=None): + super(PointXYZWHLRBBoxCoder, self).__init__() + self.code_size = code_size + self.use_mean_size = use_mean_size + if self.use_mean_size: + self.mean_size = torch.from_numpy(np.array(mean_size)).float() + assert self.mean_size.min() > 0, \ + f'The min of mean_size should > 0, however currently it is '\ + f'{self.mean_size.min()}, please check it in your config.' + + def encode(self, gt_bboxes_3d, points, gt_labels_3d=None): + """Encode ground truth to prediction targets. + + Args: + gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): Ground truth bboxes + with shape (N, 7 + C). + points (torch.Tensor): Point cloud with shape (N, 3). + gt_labels_3d (torch.Tensor, optional): Ground truth classes. + Defaults to None. + + Returns: + torch.Tensor: Encoded boxes with shape (N, 8 + C). + """ + gt_bboxes_3d[:, 3:6] = torch.clamp_min(gt_bboxes_3d[:, 3:6], min=1e-5) + + xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split( + gt_bboxes_3d, 1, dim=-1) + xa, ya, za = torch.split(points, 1, dim=-1) + + if self.use_mean_size: + assert gt_labels_3d.max() <= self.mean_size.shape[0] - 1, \ + f'the max gt label {gt_labels_3d.max()} is bigger than' \ + f'anchor types {self.mean_size.shape[0] - 1}.' + self.mean_size = self.mean_size.to(gt_labels_3d.device) + point_anchor_size = self.mean_size[gt_labels_3d] + dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1) + diagonal = torch.sqrt(dxa**2 + dya**2) + xt = (xg - xa) / diagonal + yt = (yg - ya) / diagonal + zt = (zg - za) / dza + dxt = torch.log(dxg / dxa) + dyt = torch.log(dyg / dya) + dzt = torch.log(dzg / dza) + else: + xt = (xg - xa) + yt = (yg - ya) + zt = (zg - za) + dxt = torch.log(dxg) + dyt = torch.log(dyg) + dzt = torch.log(dzg) + + return torch.cat( + [xt, yt, zt, dxt, dyt, dzt, + torch.cos(rg), + torch.sin(rg), *cgs], + dim=-1) + + def decode(self, box_encodings, points, pred_labels_3d=None): + """Decode predicted parts and points to bbox3d. + + Args: + box_encodings (torch.Tensor): Encoded boxes with shape (N, 8 + C). + points (torch.Tensor): Point cloud with shape (N, 3). + pred_labels_3d (torch.Tensor): Bbox predicted labels (N, M). + + Returns: + torch.Tensor: Decoded boxes with shape (N, 7 + C) + """ + xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split( + box_encodings, 1, dim=-1) + xa, ya, za = torch.split(points, 1, dim=-1) + + if self.use_mean_size: + assert pred_labels_3d.max() <= self.mean_size.shape[0] - 1, \ + f'The max pred label {pred_labels_3d.max()} is bigger than' \ + f'anchor types {self.mean_size.shape[0] - 1}.' + self.mean_size = self.mean_size.to(pred_labels_3d.device) + point_anchor_size = self.mean_size[pred_labels_3d] + dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1) + diagonal = torch.sqrt(dxa**2 + dya**2) + xg = xt * diagonal + xa + yg = yt * diagonal + ya + zg = zt * dza + za + + dxg = torch.exp(dxt) * dxa + dyg = torch.exp(dyt) * dya + dzg = torch.exp(dzt) * dza + else: + xg = xt + xa + yg = yt + ya + zg = zt + za + dxg, dyg, dzg = torch.split( + torch.exp(box_encodings[..., 3:6]), 1, dim=-1) + + rg = torch.atan2(sint, cost) + + return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cts], dim=-1) diff --git a/mmdet3d/models/task_modules/coders/smoke_bbox_coder.py b/mmdet3d/models/task_modules/coders/smoke_bbox_coder.py new file mode 100755 index 0000000..c1a65a7 --- /dev/null +++ b/mmdet3d/models/task_modules/coders/smoke_bbox_coder.py @@ -0,0 +1,208 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmdet.models.task_modules import BaseBBoxCoder + +from mmdet3d.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class SMOKECoder(BaseBBoxCoder): + """Bbox Coder for SMOKE. + + Args: + base_depth (tuple[float]): Depth references for decode box depth. + base_dims (tuple[tuple[float]]): Dimension references [l, h, w] + for decode box dimension for each category. + code_size (int): The dimension of boxes to be encoded. + """ + + def __init__(self, base_depth, base_dims, code_size): + super(SMOKECoder, self).__init__() + self.base_depth = base_depth + self.base_dims = base_dims + self.bbox_code_size = code_size + + def encode(self, locations, dimensions, orientations, input_metas): + """Encode CameraInstance3DBoxes by locations, dimensions, orientations. + + Args: + locations (Tensor): Center location for 3D boxes. + (N, 3) + dimensions (Tensor): Dimensions for 3D boxes. + shape (N, 3) + orientations (Tensor): Orientations for 3D boxes. + shape (N, 1) + input_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + + Return: + :obj:`CameraInstance3DBoxes`: 3D bboxes of batch images, + shape (N, bbox_code_size). + """ + + bboxes = torch.cat((locations, dimensions, orientations), dim=1) + assert bboxes.shape[1] == self.bbox_code_size, 'bboxes shape dose not'\ + 'match the bbox_code_size.' + batch_bboxes = input_metas[0]['box_type_3d']( + bboxes, box_dim=self.bbox_code_size) + + return batch_bboxes + + def decode(self, + reg, + points, + labels, + cam2imgs, + trans_mats, + locations=None): + """Decode regression into locations, dimensions, orientations. + + Args: + reg (Tensor): Batch regression for each predict center2d point. + shape: (batch * K (max_objs), C) + points(Tensor): Batch projected bbox centers on image plane. + shape: (batch * K (max_objs) , 2) + labels (Tensor): Batch predict class label for each predict + center2d point. + shape: (batch, K (max_objs)) + cam2imgs (Tensor): Batch images' camera intrinsic matrix. + shape: kitti (batch, 4, 4) nuscenes (batch, 3, 3) + trans_mats (Tensor): transformation matrix from original image + to feature map. + shape: (batch, 3, 3) + locations (None | Tensor): if locations is None, this function + is used to decode while inference, otherwise, it's used while + training using the ground truth 3d bbox locations. + shape: (batch * K (max_objs), 3) + + Return: + tuple(Tensor): The tuple has components below: + - locations (Tensor): Centers of 3D boxes. + shape: (batch * K (max_objs), 3) + - dimensions (Tensor): Dimensions of 3D boxes. + shape: (batch * K (max_objs), 3) + - orientations (Tensor): Orientations of 3D + boxes. + shape: (batch * K (max_objs), 1) + """ + depth_offsets = reg[:, 0] + centers2d_offsets = reg[:, 1:3] + dimensions_offsets = reg[:, 3:6] + orientations = reg[:, 6:8] + depths = self._decode_depth(depth_offsets) + # get the 3D Bounding box's center location. + pred_locations = self._decode_location(points, centers2d_offsets, + depths, cam2imgs, trans_mats) + pred_dimensions = self._decode_dimension(labels, dimensions_offsets) + if locations is None: + pred_orientations = self._decode_orientation( + orientations, pred_locations) + else: + pred_orientations = self._decode_orientation( + orientations, locations) + + return pred_locations, pred_dimensions, pred_orientations + + def _decode_depth(self, depth_offsets): + """Transform depth offset to depth.""" + base_depth = depth_offsets.new_tensor(self.base_depth) + depths = depth_offsets * base_depth[1] + base_depth[0] + + return depths + + def _decode_location(self, points, centers2d_offsets, depths, cam2imgs, + trans_mats): + """Retrieve objects location in camera coordinate based on projected + points. + + Args: + points (Tensor): Projected points on feature map in (x, y) + shape: (batch * K, 2) + centers2d_offset (Tensor): Project points offset in + (delta_x, delta_y). shape: (batch * K, 2) + depths (Tensor): Object depth z. + shape: (batch * K) + cam2imgs (Tensor): Batch camera intrinsics matrix. + shape: kitti (batch, 4, 4) nuscenes (batch, 3, 3) + trans_mats (Tensor): transformation matrix from original image + to feature map. + shape: (batch, 3, 3) + """ + # number of points + N = centers2d_offsets.shape[0] + # batch_size + N_batch = cam2imgs.shape[0] + batch_id = torch.arange(N_batch).unsqueeze(1) + obj_id = batch_id.repeat(1, N // N_batch).flatten() + trans_mats_inv = trans_mats.inverse()[obj_id] + cam2imgs_inv = cam2imgs.inverse()[obj_id] + centers2d = points + centers2d_offsets + centers2d_extend = torch.cat((centers2d, centers2d.new_ones(N, 1)), + dim=1) + # expand project points as [N, 3, 1] + centers2d_extend = centers2d_extend.unsqueeze(-1) + # transform project points back on original image + centers2d_img = torch.matmul(trans_mats_inv, centers2d_extend) + centers2d_img = centers2d_img * depths.view(N, -1, 1) + if cam2imgs.shape[1] == 4: + centers2d_img = torch.cat( + (centers2d_img, centers2d.new_ones(N, 1, 1)), dim=1) + locations = torch.matmul(cam2imgs_inv, centers2d_img).squeeze(2) + + return locations[:, :3] + + def _decode_dimension(self, labels, dims_offset): + """Transform dimension offsets to dimension according to its category. + + Args: + labels (Tensor): Each points' category id. + shape: (N, K) + dims_offset (Tensor): Dimension offsets. + shape: (N, 3) + """ + labels = labels.flatten().long() + base_dims = dims_offset.new_tensor(self.base_dims) + dims_select = base_dims[labels, :] + dimensions = dims_offset.exp() * dims_select + + return dimensions + + def _decode_orientation(self, ori_vector, locations): + """Retrieve object orientation. + + Args: + ori_vector (Tensor): Local orientation in [sin, cos] format. + shape: (N, 2) + locations (Tensor): Object location. + shape: (N, 3) + + Return: + Tensor: yaw(Orientation). Notice that the yaw's + range is [-np.pi, np.pi]. + shape:(N, 1) + """ + assert len(ori_vector) == len(locations) + locations = locations.view(-1, 3) + rays = torch.atan(locations[:, 0] / (locations[:, 2] + 1e-7)) + alphas = torch.atan(ori_vector[:, 0] / (ori_vector[:, 1] + 1e-7)) + + # get cosine value positive and negative index. + cos_pos_inds = (ori_vector[:, 1] >= 0).nonzero(as_tuple=False) + cos_neg_inds = (ori_vector[:, 1] < 0).nonzero(as_tuple=False) + + alphas[cos_pos_inds] -= np.pi / 2 + alphas[cos_neg_inds] += np.pi / 2 + # retrieve object rotation y angle. + yaws = alphas + rays + + larger_inds = (yaws > np.pi).nonzero(as_tuple=False) + small_inds = (yaws < -np.pi).nonzero(as_tuple=False) + + if len(larger_inds) != 0: + yaws[larger_inds] -= 2 * np.pi + if len(small_inds) != 0: + yaws[small_inds] += 2 * np.pi + + yaws = yaws.unsqueeze(-1) + return yaws diff --git a/mmdet3d/models/task_modules/samplers/__init__.py b/mmdet3d/models/task_modules/samplers/__init__.py new file mode 100755 index 0000000..0e7be46 --- /dev/null +++ b/mmdet3d/models/task_modules/samplers/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.task_modules.samplers import (BaseSampler, CombinedSampler, + InstanceBalancedPosSampler, + IoUBalancedNegSampler, + OHEMSampler, RandomSampler, + SamplingResult) + +from .iou_neg_piecewise_sampler import IoUNegPiecewiseSampler +from .pseudosample import PseudoSampler + +__all__ = [ + 'BaseSampler', 'PseudoSampler', 'RandomSampler', + 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', + 'OHEMSampler', 'SamplingResult', 'IoUNegPiecewiseSampler' +] diff --git a/mmdet3d/models/task_modules/samplers/iou_neg_piecewise_sampler.py b/mmdet3d/models/task_modules/samplers/iou_neg_piecewise_sampler.py new file mode 100755 index 0000000..c249471 --- /dev/null +++ b/mmdet3d/models/task_modules/samplers/iou_neg_piecewise_sampler.py @@ -0,0 +1,187 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch + +from mmdet3d.registry import TASK_UTILS +from . import RandomSampler, SamplingResult + + +@TASK_UTILS.register_module() +class IoUNegPiecewiseSampler(RandomSampler): + """IoU Piece-wise Sampling. + + Sampling negative proposals according to a list of IoU thresholds. + The negative proposals are divided into several pieces according + to `neg_iou_piece_thrs`. And the ratio of each piece is indicated + by `neg_piece_fractions`. + + Args: + num (int): Number of proposals. + pos_fraction (float): The fraction of positive proposals. + neg_piece_fractions (list): A list contains fractions that indicates + the ratio of each piece of total negative samplers. + neg_iou_piece_thrs (list): A list contains IoU thresholds that + indicate the upper bound of this piece. + neg_pos_ub (float): The total ratio to limit the upper bound + number of negative samples. + add_gt_as_proposals (bool): Whether to add gt as proposals. + """ + + def __init__(self, + num, + pos_fraction=None, + neg_piece_fractions=None, + neg_iou_piece_thrs=None, + neg_pos_ub=-1, + add_gt_as_proposals=False, + return_iou=False): + super(IoUNegPiecewiseSampler, + self).__init__(num, pos_fraction, neg_pos_ub, + add_gt_as_proposals) + assert isinstance(neg_piece_fractions, list) + assert len(neg_piece_fractions) == len(neg_iou_piece_thrs) + self.neg_piece_fractions = neg_piece_fractions + self.neg_iou_thr = neg_iou_piece_thrs + self.return_iou = return_iou + self.neg_piece_num = len(self.neg_piece_fractions) + + def _sample_pos(self, assign_result, num_expected, **kwargs): + """Randomly sample some positive samples.""" + pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) + if pos_inds.numel() != 0: + pos_inds = pos_inds.squeeze(1) + if pos_inds.numel() <= num_expected: + return pos_inds + else: + return self.random_choice(pos_inds, num_expected) + + def _sample_neg(self, assign_result, num_expected, **kwargs): + """Randomly sample some negative samples.""" + neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) + if neg_inds.numel() != 0: + neg_inds = neg_inds.squeeze(1) + if len(neg_inds) <= 0: + return neg_inds.squeeze(1) + else: + neg_inds_choice = neg_inds.new_zeros([0]) + extend_num = 0 + max_overlaps = assign_result.max_overlaps[neg_inds] + + for piece_inds in range(self.neg_piece_num): + if piece_inds == self.neg_piece_num - 1: # for the last piece + piece_expected_num = num_expected - len(neg_inds_choice) + min_iou_thr = 0 + else: + # if the numbers of negative samplers in previous + # pieces are less than the expected number, extend + # the same number in the current piece. + piece_expected_num = min( + num_expected, + math.ceil(num_expected * + self.neg_piece_fractions[piece_inds]) + + extend_num) + min_iou_thr = self.neg_iou_thr[piece_inds + 1] + max_iou_thr = self.neg_iou_thr[piece_inds] + piece_neg_inds = torch.nonzero( + (max_overlaps >= min_iou_thr) + & (max_overlaps < max_iou_thr), + as_tuple=False).view(-1) + + if len(piece_neg_inds) < piece_expected_num: + neg_inds_choice = torch.cat( + [neg_inds_choice, neg_inds[piece_neg_inds]], dim=0) + extend_num += piece_expected_num - len(piece_neg_inds) + + # for the last piece + if piece_inds == self.neg_piece_num - 1: + extend_neg_num = num_expected - len(neg_inds_choice) + # if the numbers of nagetive samples > 0, we will + # randomly select num_expected samples in last piece + if piece_neg_inds.numel() > 0: + rand_idx = torch.randint( + low=0, + high=piece_neg_inds.numel(), + size=(extend_neg_num, )).long() + neg_inds_choice = torch.cat( + [neg_inds_choice, piece_neg_inds[rand_idx]], + dim=0) + # if the numbers of nagetive samples == 0, we will + # randomly select num_expected samples in all + # previous pieces + else: + rand_idx = torch.randint( + low=0, + high=neg_inds_choice.numel(), + size=(extend_neg_num, )).long() + neg_inds_choice = torch.cat( + [neg_inds_choice, neg_inds_choice[rand_idx]], + dim=0) + else: + piece_choice = self.random_choice(piece_neg_inds, + piece_expected_num) + neg_inds_choice = torch.cat( + [neg_inds_choice, neg_inds[piece_choice]], dim=0) + extend_num = 0 + assert len(neg_inds_choice) == num_expected + return neg_inds_choice + + def sample(self, + assign_result, + bboxes, + gt_bboxes, + gt_labels=None, + **kwargs): + """Sample positive and negative bboxes. + + This is a simple implementation of bbox sampling given candidates, + assigning results and ground truth bboxes. + + Args: + assign_result (:obj:`AssignResult`): Bbox assigning results. + bboxes (torch.Tensor): Boxes to be sampled from. + gt_bboxes (torch.Tensor): Ground truth bboxes. + gt_labels (torch.Tensor, optional): Class labels of ground truth + bboxes. + + Returns: + :obj:`SamplingResult`: Sampling result. + """ + if len(bboxes.shape) < 2: + bboxes = bboxes[None, :] + + gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.bool) + if self.add_gt_as_proposals and len(gt_bboxes) > 0: + if gt_labels is None: + raise ValueError( + 'gt_labels must be given when add_gt_as_proposals is True') + bboxes = torch.cat([gt_bboxes, bboxes], dim=0) + assign_result.add_gt_(gt_labels) + gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.bool) + gt_flags = torch.cat([gt_ones, gt_flags]) + + num_expected_pos = int(self.num * self.pos_fraction) + pos_inds = self.pos_sampler._sample_pos( + assign_result, num_expected_pos, bboxes=bboxes, **kwargs) + # We found that sampled indices have duplicated items occasionally. + # (may be a bug of PyTorch) + pos_inds = pos_inds.unique() + num_sampled_pos = pos_inds.numel() + num_expected_neg = self.num - num_sampled_pos + if self.neg_pos_ub >= 0: + _pos = max(1, num_sampled_pos) + neg_upper_bound = int(self.neg_pos_ub * _pos) + if num_expected_neg > neg_upper_bound: + num_expected_neg = neg_upper_bound + neg_inds = self.neg_sampler._sample_neg( + assign_result, num_expected_neg, bboxes=bboxes, **kwargs) + + sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags) + if self.return_iou: + # PartA2 needs iou score to regression. + sampling_result.iou = assign_result.max_overlaps[torch.cat( + [pos_inds, neg_inds])] + sampling_result.iou.detach_() + + return sampling_result diff --git a/mmdet3d/models/task_modules/samplers/pseudosample.py b/mmdet3d/models/task_modules/samplers/pseudosample.py new file mode 100755 index 0000000..5ae99e7 --- /dev/null +++ b/mmdet3d/models/task_modules/samplers/pseudosample.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmdet.models.task_modules import AssignResult +from mmengine.structures import InstanceData + +from mmdet3d.registry import TASK_UTILS +from ..samplers import BaseSampler, SamplingResult + + +@TASK_UTILS.register_module() +class PseudoSampler(BaseSampler): + """A pseudo sampler that does not do sampling actually.""" + + # TODO: This is a temporary pseudo sampler. + + def __init__(self, **kwargs): + pass + + def _sample_pos(self, **kwargs): + """Sample positive samples.""" + raise NotImplementedError + + def _sample_neg(self, **kwargs): + """Sample negative samples.""" + raise NotImplementedError + + def sample(self, assign_result: AssignResult, pred_instances: InstanceData, + gt_instances: InstanceData, *args, **kwargs): + """Directly returns the positive and negative indices of samples. + + Args: + assign_result (:obj:`AssignResult`): Bbox assigning results. + pred_instances (:obj:`InstaceData`): Instances of model + predictions. It includes ``priors``, and the priors can + be anchors, points, or bboxes predicted by the model, + shape(n, 4). + gt_instances (:obj:`InstaceData`): Ground truth of instance + annotations. It usually includes ``bboxes`` and ``labels`` + attributes. + + Returns: + :obj:`SamplingResult`: sampler results + """ + gt_bboxes = gt_instances.bboxes_3d + priors = pred_instances.priors + + pos_inds = torch.nonzero( + assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() + neg_inds = torch.nonzero( + assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() + + gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8) + sampling_result = SamplingResult( + pos_inds=pos_inds, + neg_inds=neg_inds, + priors=priors, + gt_bboxes=gt_bboxes, + assign_result=assign_result, + gt_flags=gt_flags, + avg_factor_with_neg=False) + return sampling_result diff --git a/mmdet3d/models/task_modules/voxel/__init__.py b/mmdet3d/models/task_modules/voxel/__init__.py new file mode 100755 index 0000000..273dc5b --- /dev/null +++ b/mmdet3d/models/task_modules/voxel/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .voxel_generator import VoxelGenerator + +__all__ = ['VoxelGenerator'] diff --git a/mmdet3d/models/task_modules/voxel/voxel_generator.py b/mmdet3d/models/task_modules/voxel/voxel_generator.py new file mode 100755 index 0000000..9db8b3b --- /dev/null +++ b/mmdet3d/models/task_modules/voxel/voxel_generator.py @@ -0,0 +1,283 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numba +import numpy as np + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class VoxelGenerator(object): + """Voxel generator in numpy implementation. + + Args: + voxel_size (list[float]): Size of a single voxel + point_cloud_range (list[float]): Range of points + max_num_points (int): Maximum number of points in a single voxel + max_voxels (int, optional): Maximum number of voxels. + Defaults to 20000. + """ + + def __init__(self, + voxel_size, + point_cloud_range, + max_num_points, + max_voxels=20000): + + point_cloud_range = np.array(point_cloud_range, dtype=np.float32) + # [0, -40, -3, 70.4, 40, 1] + voxel_size = np.array(voxel_size, dtype=np.float32) + grid_size = (point_cloud_range[3:] - + point_cloud_range[:3]) / voxel_size + grid_size = np.round(grid_size).astype(np.int64) + + self._voxel_size = voxel_size + self._point_cloud_range = point_cloud_range + self._max_num_points = max_num_points + self._max_voxels = max_voxels + self._grid_size = grid_size + + def generate(self, points): + """Generate voxels given points.""" + return points_to_voxel(points, self._voxel_size, + self._point_cloud_range, self._max_num_points, + True, self._max_voxels) + + @property + def voxel_size(self): + """list[float]: Size of a single voxel.""" + return self._voxel_size + + @property + def max_num_points_per_voxel(self): + """int: Maximum number of points per voxel.""" + return self._max_num_points + + @property + def point_cloud_range(self): + """list[float]: Range of point cloud.""" + return self._point_cloud_range + + @property + def grid_size(self): + """np.ndarray: The size of grids.""" + return self._grid_size + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + indent = ' ' * (len(repr_str) + 1) + repr_str += f'(voxel_size={self._voxel_size},\n' + repr_str += indent + 'point_cloud_range=' + repr_str += f'{self._point_cloud_range.tolist()},\n' + repr_str += indent + f'max_num_points={self._max_num_points},\n' + repr_str += indent + f'max_voxels={self._max_voxels},\n' + repr_str += indent + f'grid_size={self._grid_size.tolist()}' + repr_str += ')' + return repr_str + + +def points_to_voxel(points, + voxel_size, + coors_range, + max_points=35, + reverse_index=True, + max_voxels=20000): + """convert kitti points(N, >=3) to voxels. + + Args: + points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and + points[:, 3:] contain other information such as reflectivity. + voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size + coors_range (list[float | tuple[float] | ndarray]): Voxel range. + format: xyzxyz, minmax + max_points (int): Indicate maximum points contained in a voxel. + reverse_index (bool): Whether return reversed coordinates. + if points has xyz format and reverse_index is True, output + coordinates will be zyx format, but points in features always + xyz format. + max_voxels (int): Maximum number of voxels this function creates. + For second, 20000 is a good choice. Points should be shuffled for + randomness before this function because max_voxels drops points. + + Returns: + tuple[np.ndarray]: + voxels: [M, max_points, ndim] float tensor. only contain points. + coordinates: [M, 3] int32 tensor. + num_points_per_voxel: [M] int32 tensor. + """ + if not isinstance(voxel_size, np.ndarray): + voxel_size = np.array(voxel_size, dtype=points.dtype) + if not isinstance(coors_range, np.ndarray): + coors_range = np.array(coors_range, dtype=points.dtype) + voxelmap_shape = (coors_range[3:] - coors_range[:3]) / voxel_size + voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist()) + if reverse_index: + voxelmap_shape = voxelmap_shape[::-1] + # don't create large array in jit(nopython=True) code. + num_points_per_voxel = np.zeros(shape=(max_voxels, ), dtype=np.int32) + coor_to_voxelidx = -np.ones(shape=voxelmap_shape, dtype=np.int32) + voxels = np.zeros( + shape=(max_voxels, max_points, points.shape[-1]), dtype=points.dtype) + coors = np.zeros(shape=(max_voxels, 3), dtype=np.int32) + if reverse_index: + voxel_num = _points_to_voxel_reverse_kernel( + points, voxel_size, coors_range, num_points_per_voxel, + coor_to_voxelidx, voxels, coors, max_points, max_voxels) + + else: + voxel_num = _points_to_voxel_kernel(points, voxel_size, coors_range, + num_points_per_voxel, + coor_to_voxelidx, voxels, coors, + max_points, max_voxels) + + coors = coors[:voxel_num] + voxels = voxels[:voxel_num] + num_points_per_voxel = num_points_per_voxel[:voxel_num] + + return voxels, coors, num_points_per_voxel + + +@numba.jit(nopython=True) +def _points_to_voxel_reverse_kernel(points, + voxel_size, + coors_range, + num_points_per_voxel, + coor_to_voxelidx, + voxels, + coors, + max_points=35, + max_voxels=20000): + """convert kitti points(N, >=3) to voxels. + + Args: + points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and + points[:, 3:] contain other information such as reflectivity. + voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size + coors_range (list[float | tuple[float] | ndarray]): Range of voxels. + format: xyzxyz, minmax + num_points_per_voxel (int): Number of points per voxel. + coor_to_voxel_idx (np.ndarray): A voxel grid of shape (D, H, W), + which has the same shape as the complete voxel map. It indicates + the index of each corresponding voxel. + voxels (np.ndarray): Created empty voxels. + coors (np.ndarray): Created coordinates of each voxel. + max_points (int): Indicate maximum points contained in a voxel. + max_voxels (int): Maximum number of voxels this function create. + for second, 20000 is a good choice. Points should be shuffled for + randomness before this function because max_voxels drops points. + + Returns: + tuple[np.ndarray]: + voxels: Shape [M, max_points, ndim], only contain points. + coordinates: Shape [M, 3]. + num_points_per_voxel: Shape [M]. + """ + # put all computations to one loop. + # we shouldn't create large array in main jit code, otherwise + # reduce performance + N = points.shape[0] + # ndim = points.shape[1] - 1 + ndim = 3 + ndim_minus_1 = ndim - 1 + grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size + # np.round(grid_size) + # grid_size = np.round(grid_size).astype(np.int64)(np.int32) + grid_size = np.round(grid_size, 0, grid_size).astype(np.int32) + coor = np.zeros(shape=(3, ), dtype=np.int32) + voxel_num = 0 + failed = False + for i in range(N): + failed = False + for j in range(ndim): + c = np.floor((points[i, j] - coors_range[j]) / voxel_size[j]) + if c < 0 or c >= grid_size[j]: + failed = True + break + coor[ndim_minus_1 - j] = c + if failed: + continue + voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]] + if voxelidx == -1: + voxelidx = voxel_num + if voxel_num >= max_voxels: + continue + voxel_num += 1 + coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx + coors[voxelidx] = coor + num = num_points_per_voxel[voxelidx] + if num < max_points: + voxels[voxelidx, num] = points[i] + num_points_per_voxel[voxelidx] += 1 + return voxel_num + + +@numba.jit(nopython=True) +def _points_to_voxel_kernel(points, + voxel_size, + coors_range, + num_points_per_voxel, + coor_to_voxelidx, + voxels, + coors, + max_points=35, + max_voxels=20000): + """convert kitti points(N, >=3) to voxels. + + Args: + points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and + points[:, 3:] contain other information such as reflectivity. + voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size. + coors_range (list[float | tuple[float] | ndarray]): Range of voxels. + format: xyzxyz, minmax + num_points_per_voxel (int): Number of points per voxel. + coor_to_voxel_idx (np.ndarray): A voxel grid of shape (D, H, W), + which has the same shape as the complete voxel map. It indicates + the index of each corresponding voxel. + voxels (np.ndarray): Created empty voxels. + coors (np.ndarray): Created coordinates of each voxel. + max_points (int): Indicate maximum points contained in a voxel. + max_voxels (int): Maximum number of voxels this function create. + for second, 20000 is a good choice. Points should be shuffled for + randomness before this function because max_voxels drops points. + + Returns: + tuple[np.ndarray]: + voxels: Shape [M, max_points, ndim], only contain points. + coordinates: Shape [M, 3]. + num_points_per_voxel: Shape [M]. + """ + N = points.shape[0] + # ndim = points.shape[1] - 1 + ndim = 3 + grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size + # grid_size = np.round(grid_size).astype(np.int64)(np.int32) + grid_size = np.round(grid_size, 0, grid_size).astype(np.int32) + + # lower_bound = coors_range[:3] + # upper_bound = coors_range[3:] + coor = np.zeros(shape=(3, ), dtype=np.int32) + voxel_num = 0 + failed = False + for i in range(N): + failed = False + for j in range(ndim): + c = np.floor((points[i, j] - coors_range[j]) / voxel_size[j]) + if c < 0 or c >= grid_size[j]: + failed = True + break + coor[j] = c + if failed: + continue + voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]] + if voxelidx == -1: + voxelidx = voxel_num + if voxel_num >= max_voxels: + continue + voxel_num += 1 + coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx + coors[voxelidx] = coor + num = num_points_per_voxel[voxelidx] + if num < max_points: + voxels[voxelidx, num] = points[i] + num_points_per_voxel[voxelidx] += 1 + return voxel_num diff --git a/mmdet3d/models/test_time_augs/__init__.py b/mmdet3d/models/test_time_augs/__init__.py new file mode 100755 index 0000000..288f4d3 --- /dev/null +++ b/mmdet3d/models/test_time_augs/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .merge_augs import merge_aug_bboxes_3d + +__all__ = ['merge_aug_bboxes_3d'] diff --git a/mmdet3d/models/test_time_augs/merge_augs.py b/mmdet3d/models/test_time_augs/merge_augs.py new file mode 100755 index 0000000..b78f326 --- /dev/null +++ b/mmdet3d/models/test_time_augs/merge_augs.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch + +from mmdet3d.structures import bbox3d2result, bbox3d_mapping_back, xywhr2xyxyr +from mmdet3d.utils import ConfigType +from ..layers import nms_bev, nms_normal_bev + + +def merge_aug_bboxes_3d(aug_results: List[dict], + aug_batch_input_metas: List[dict], + test_cfg: ConfigType) -> dict: + """Merge augmented detection 3D bboxes and scores. + + Args: + aug_results (List[dict]): The dict of detection results. + The dict contains the following keys + + - bbox_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. + - scores_3d (Tensor): Detection scores. + - labels_3d (Tensor): Predicted box labels. + aug_batch_input_metas (List[dict]): Meta information of each sample. + test_cfg (dict or :obj:`ConfigDict`): Test config. + + Returns: + dict: Bounding boxes results in cpu mode, containing merged results. + + - bbox_3d (:obj:`BaseInstance3DBoxes`): Merged detection bbox. + - scores_3d (torch.Tensor): Merged detection scores. + - labels_3d (torch.Tensor): Merged predicted box labels. + """ + + assert len(aug_results) == len(aug_batch_input_metas), \ + '"aug_results" should have the same length as ' \ + f'"aug_batch_input_metas", got len(aug_results)={len(aug_results)} ' \ + f'and len(aug_batch_input_metas)={len(aug_batch_input_metas)}' + + recovered_bboxes = [] + recovered_scores = [] + recovered_labels = [] + + for bboxes, input_info in zip(aug_results, aug_batch_input_metas): + scale_factor = input_info['pcd_scale_factor'] + pcd_horizontal_flip = input_info['pcd_horizontal_flip'] + pcd_vertical_flip = input_info['pcd_vertical_flip'] + recovered_scores.append(bboxes['scores_3d']) + recovered_labels.append(bboxes['labels_3d']) + bboxes = bbox3d_mapping_back(bboxes['bbox_3d'], scale_factor, + pcd_horizontal_flip, pcd_vertical_flip) + recovered_bboxes.append(bboxes) + + aug_bboxes = recovered_bboxes[0].cat(recovered_bboxes) + aug_bboxes_for_nms = xywhr2xyxyr(aug_bboxes.bev) + aug_scores = torch.cat(recovered_scores, dim=0) + aug_labels = torch.cat(recovered_labels, dim=0) + + # TODO: use a more elegent way to deal with nms + if test_cfg.get('use_rotate_nms', False): + nms_func = nms_bev + else: + nms_func = nms_normal_bev + + merged_bboxes = [] + merged_scores = [] + merged_labels = [] + + # Apply multi-class nms when merge bboxes + if len(aug_labels) == 0: + return bbox3d2result(aug_bboxes, aug_scores, aug_labels) + + for class_id in range(torch.max(aug_labels).item() + 1): + class_inds = (aug_labels == class_id) + bboxes_i = aug_bboxes[class_inds] + bboxes_nms_i = aug_bboxes_for_nms[class_inds, :] + scores_i = aug_scores[class_inds] + labels_i = aug_labels[class_inds] + if len(bboxes_nms_i) == 0: + continue + selected = nms_func(bboxes_nms_i, scores_i, test_cfg.nms_thr) + + merged_bboxes.append(bboxes_i[selected, :]) + merged_scores.append(scores_i[selected]) + merged_labels.append(labels_i[selected]) + + merged_bboxes = merged_bboxes[0].cat(merged_bboxes) + merged_scores = torch.cat(merged_scores, dim=0) + merged_labels = torch.cat(merged_labels, dim=0) + + _, order = merged_scores.sort(0, descending=True) + num = min(test_cfg.get('max_num', 500), len(aug_bboxes)) + order = order[:num] + + merged_bboxes = merged_bboxes[order] + merged_scores = merged_scores[order] + merged_labels = merged_labels[order] + + return bbox3d2result(merged_bboxes, merged_scores, merged_labels) diff --git a/mmdet3d/models/utils/__init__.py b/mmdet3d/models/utils/__init__.py new file mode 100755 index 0000000..98e0fbd --- /dev/null +++ b/mmdet3d/models/utils/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .add_prefix import add_prefix +from .clip_sigmoid import clip_sigmoid +from .edge_indices import get_edge_indices +from .gaussian import (draw_heatmap_gaussian, ellip_gaussian2D, gaussian_2d, + gaussian_radius, get_ellip_gaussian_2D) +from .gen_keypoints import get_keypoints +from .handle_objs import filter_outside_objs, handle_proj_objs + +__all__ = [ + 'clip_sigmoid', 'get_edge_indices', 'filter_outside_objs', + 'handle_proj_objs', 'get_keypoints', 'gaussian_2d', + 'draw_heatmap_gaussian', 'gaussian_radius', 'get_ellip_gaussian_2D', + 'ellip_gaussian2D', 'add_prefix' +] diff --git a/mmdet3d/models/utils/add_prefix.py b/mmdet3d/models/utils/add_prefix.py new file mode 100755 index 0000000..46a7b60 --- /dev/null +++ b/mmdet3d/models/utils/add_prefix.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def add_prefix(inputs: dict, prefix: str) -> dict: + """Add prefix for dict. + + Args: + inputs (dict): The input dict with str keys. + prefix (str): The prefix to add. + + Returns: + + dict: The dict with keys updated with ``prefix``. + """ + + outputs = dict() + for name, value in inputs.items(): + outputs[f'{prefix}.{name}'] = value + + return outputs diff --git a/mmdet3d/models/utils/clip_sigmoid.py b/mmdet3d/models/utils/clip_sigmoid.py new file mode 100755 index 0000000..7be8301 --- /dev/null +++ b/mmdet3d/models/utils/clip_sigmoid.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import Tensor + + +def clip_sigmoid(x: Tensor, eps: float = 1e-4) -> Tensor: + """Sigmoid function for input feature. + + Args: + x (Tensor): Input feature map with the shape of [B, N, H, W]. + eps (float): Lower bound of the range to be clamped to. + Defaults to 1e-4. + + Returns: + Tensor: Feature map after sigmoid. + """ + y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps) + return y diff --git a/mmdet3d/models/utils/edge_indices.py b/mmdet3d/models/utils/edge_indices.py new file mode 100755 index 0000000..33190cd --- /dev/null +++ b/mmdet3d/models/utils/edge_indices.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import numpy as np +import torch +from torch import Tensor + + +def get_edge_indices(img_metas: List[dict], + downsample_ratio: int, + step: int = 1, + pad_mode: str = 'default', + dtype: type = np.float32, + device: str = 'cpu') -> List[Tensor]: + """Function to filter the objects label outside the image. + The edge_indices are generated using numpy on cpu rather + than on CUDA due to the latency issue. When batch size = 8, + this function with numpy array is ~8 times faster than that + with CUDA tensor (0.09s and 0.72s in 100 runs). + + Args: + img_metas (List[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + downsample_ratio (int): Downsample ratio of output feature, + step (int): Step size used for generateing + edge indices. Defaults to 1. + pad_mode (str): Padding mode during data pipeline. + Defaults to 'default'. + dtype (type): Dtype of edge indices tensor. + Defaults to np.float32. + device (str): Device of edge indices tensor. + Defaults to 'cpu'. + + Returns: + List[Tensor]: Edge indices for each image in batch data. + """ + edge_indices_list = [] + for i in range(len(img_metas)): + img_shape = img_metas[i]['img_shape'] + pad_shape = img_metas[i]['pad_shape'] + h, w = img_shape[:2] + pad_h, pad_w = pad_shape + edge_indices = [] + + if pad_mode == 'default': + x_min = 0 + y_min = 0 + x_max = (w - 1) // downsample_ratio + y_max = (h - 1) // downsample_ratio + elif pad_mode == 'center': + x_min = np.ceil((pad_w - w) / 2 * downsample_ratio) + y_min = np.ceil((pad_h - h) / 2 * downsample_ratio) + x_max = x_min + w // downsample_ratio + y_max = y_min + h // downsample_ratio + else: + raise NotImplementedError + + # left + y = np.arange(y_min, y_max, step, dtype=dtype) + x = np.ones(len(y)) * x_min + + edge_indices_edge = np.stack((x, y), axis=1) + edge_indices.append(edge_indices_edge) + + # bottom + x = np.arange(x_min, x_max, step, dtype=dtype) + y = np.ones(len(x)) * y_max + + edge_indices_edge = np.stack((x, y), axis=1) + edge_indices.append(edge_indices_edge) + + # right + y = np.arange(y_max, y_min, -step, dtype=dtype) + x = np.ones(len(y)) * x_max + + edge_indices_edge = np.stack((x, y), axis=1) + edge_indices.append(edge_indices_edge) + + # top + x = np.arange(x_max, x_min, -step, dtype=dtype) + y = np.ones(len(x)) * y_min + + edge_indices_edge = np.stack((x, y), axis=1) + edge_indices.append(edge_indices_edge) + + edge_indices = \ + np.concatenate([index for index in edge_indices], axis=0) + edge_indices = torch.from_numpy(edge_indices).to(device).long() + edge_indices_list.append(edge_indices) + + return edge_indices_list diff --git a/mmdet3d/models/utils/gaussian.py b/mmdet3d/models/utils/gaussian.py new file mode 100755 index 0000000..3d094dc --- /dev/null +++ b/mmdet3d/models/utils/gaussian.py @@ -0,0 +1,169 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import numpy as np +import torch +from torch import Tensor + + +def gaussian_2d(shape: Tuple[int, int], sigma: float = 1) -> np.ndarray: + """Generate gaussian map. + + Args: + shape (Tuple[int]): Shape of the map. + sigma (float): Sigma to generate gaussian map. + Defaults to 1. + + Returns: + np.ndarray: Generated gaussian map. + """ + m, n = [(ss - 1.) / 2. for ss in shape] + y, x = np.ogrid[-m:m + 1, -n:n + 1] + + h = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) + h[h < np.finfo(h.dtype).eps * h.max()] = 0 + return h + + +def draw_heatmap_gaussian(heatmap: Tensor, + center: Tensor, + radius: int, + k: int = 1) -> Tensor: + """Get gaussian masked heatmap. + + Args: + heatmap (Tensor): Heatmap to be masked. + center (Tensor): Center coord of the heatmap. + radius (int): Radius of gaussian. + k (int): Multiple of masked_gaussian. Defaults to 1. + + Returns: + Tensor: Masked heatmap. + """ + diameter = 2 * radius + 1 + gaussian = gaussian_2d((diameter, diameter), sigma=diameter / 6) + + x, y = int(center[0]), int(center[1]) + + height, width = heatmap.shape[0:2] + + left, right = min(x, radius), min(width - x, radius + 1) + top, bottom = min(y, radius), min(height - y, radius + 1) + + masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] + masked_gaussian = torch.from_numpy( + gaussian[radius - top:radius + bottom, + radius - left:radius + right]).to(heatmap.device, + torch.float32) + if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: + torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap) + return heatmap + + +def gaussian_radius(det_size: Tuple[Tensor, Tensor], + min_overlap: float = 0.5) -> Tensor: + """Get radius of gaussian. + + Args: + det_size (Tuple[Tensor]): Size of the detection result. + min_overlap (float): Gaussian_overlap. Defaults to 0.5. + + Returns: + Tensor: Computed radius. + """ + height, width = det_size + + a1 = 1 + b1 = (height + width) + c1 = width * height * (1 - min_overlap) / (1 + min_overlap) + sq1 = torch.sqrt(b1**2 - 4 * a1 * c1) + r1 = (b1 + sq1) / 2 + + a2 = 4 + b2 = 2 * (height + width) + c2 = (1 - min_overlap) * width * height + sq2 = torch.sqrt(b2**2 - 4 * a2 * c2) + r2 = (b2 + sq2) / 2 + + a3 = 4 * min_overlap + b3 = -2 * min_overlap * (height + width) + c3 = (min_overlap - 1) * width * height + sq3 = torch.sqrt(b3**2 - 4 * a3 * c3) + r3 = (b3 + sq3) / 2 + return min(r1, r2, r3) + + +def get_ellip_gaussian_2D(heatmap: Tensor, + center: List[int], + radius_x: int, + radius_y: int, + k: int = 1) -> Tensor: + """Generate 2D ellipse gaussian heatmap. + + Args: + heatmap (Tensor): Input heatmap, the gaussian kernel will cover on + it and maintain the max value. + center (List[int]): Coord of gaussian kernel's center. + radius_x (int): X-axis radius of gaussian kernel. + radius_y (int): Y-axis radius of gaussian kernel. + k (int): Coefficient of gaussian kernel. Defaults to 1. + + Returns: + out_heatmap (Tensor): Updated heatmap covered by gaussian kernel. + """ + diameter_x, diameter_y = 2 * radius_x + 1, 2 * radius_y + 1 + gaussian_kernel = ellip_gaussian2D((radius_x, radius_y), + sigma_x=diameter_x // 6, + sigma_y=diameter_y // 6, + dtype=heatmap.dtype, + device=heatmap.device) + + x, y = int(center[0]), int(center[1]) + height, width = heatmap.shape[0:2] + + left, right = min(x, radius_x), min(width - x, radius_x + 1) + top, bottom = min(y, radius_y), min(height - y, radius_y + 1) + + masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] + masked_gaussian = gaussian_kernel[radius_y - top:radius_y + bottom, + radius_x - left:radius_x + right] + out_heatmap = heatmap + torch.max( + masked_heatmap, + masked_gaussian * k, + out=out_heatmap[y - top:y + bottom, x - left:x + right]) + + return out_heatmap + + +def ellip_gaussian2D(radius: Tuple[int, int], + sigma_x: int, + sigma_y: int, + dtype: torch.dtype = torch.float32, + device: str = 'cpu') -> Tensor: + """Generate 2D ellipse gaussian kernel. + + Args: + radius (Tuple[int]): Ellipse radius (radius_x, radius_y) of gaussian + kernel. + sigma_x (int): X-axis sigma of gaussian function. + sigma_y (int): Y-axis sigma of gaussian function. + dtype (torch.dtype): Dtype of gaussian tensor. + Defaults to torch.float32. + device (str): Device of gaussian tensor. + Defaults to 'cpu'. + + Returns: + h (Tensor): Gaussian kernel with a + ``(2 * radius_y + 1) * (2 * radius_x + 1)`` shape. + """ + x = torch.arange( + -radius[0], radius[0] + 1, dtype=dtype, device=device).view(1, -1) + y = torch.arange( + -radius[1], radius[1] + 1, dtype=dtype, device=device).view(-1, 1) + + h = (-(x * x) / (2 * sigma_x * sigma_x) - (y * y) / + (2 * sigma_y * sigma_y)).exp() + h[h < torch.finfo(h.dtype).eps * h.max()] = 0 + + return h diff --git a/mmdet3d/models/utils/gen_keypoints.py b/mmdet3d/models/utils/gen_keypoints.py new file mode 100755 index 0000000..848b0f0 --- /dev/null +++ b/mmdet3d/models/utils/gen_keypoints.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch +from torch import Tensor + +from mmdet3d.structures import CameraInstance3DBoxes, points_cam2img + + +def get_keypoints( + gt_bboxes_3d_list: List[CameraInstance3DBoxes], + centers2d_list: List[Tensor], + img_metas: List[dict], + use_local_coords: bool = True) -> Tuple[List[Tensor], List[Tensor]]: + """Function to filter the objects label outside the image. + + Args: + gt_bboxes_3d_list (List[:obj:`CameraInstance3DBoxes`]): Ground truth + bboxes of each image. + centers2d_list (List[Tensor]): Projected 3D centers onto 2D image, + shape (num_gt, 2). + img_metas (List[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + use_local_coords (bool): Whether to use local coordinates + for keypoints. Defaults to True. + + Returns: + Tuple[List[Tensor], List[Tensor]]: It contains two elements, + the first is the keypoints for each projected 2D bbox in batch data. + The second is the visible mask of depth calculated by keypoints. + """ + + assert len(gt_bboxes_3d_list) == len(centers2d_list) + bs = len(gt_bboxes_3d_list) + keypoints2d_list = [] + keypoints_depth_mask_list = [] + + for i in range(bs): + gt_bboxes_3d = gt_bboxes_3d_list[i] + centers2d = centers2d_list[i] + img_shape = img_metas[i]['img_shape'] + cam2img = img_metas[i]['cam2img'] + h, w = img_shape[:2] + # (N, 8, 3) + corners3d = gt_bboxes_3d.corners + top_centers3d = torch.mean(corners3d[:, [0, 1, 4, 5], :], dim=1) + bot_centers3d = torch.mean(corners3d[:, [2, 3, 6, 7], :], dim=1) + # (N, 2, 3) + top_bot_centers3d = torch.stack((top_centers3d, bot_centers3d), dim=1) + keypoints3d = torch.cat((corners3d, top_bot_centers3d), dim=1) + # (N, 10, 2) + keypoints2d = points_cam2img(keypoints3d, cam2img) + + # keypoints mask: keypoints must be inside + # the image and in front of the camera + keypoints_x_visible = (keypoints2d[..., 0] >= 0) & ( + keypoints2d[..., 0] <= w - 1) + keypoints_y_visible = (keypoints2d[..., 1] >= 0) & ( + keypoints2d[..., 1] <= h - 1) + keypoints_z_visible = (keypoints3d[..., -1] > 0) + + # (N, 1O) + keypoints_visible = \ + keypoints_x_visible & keypoints_y_visible & keypoints_z_visible + # center, diag-02, diag-13 + keypoints_depth_valid = torch.stack( + (keypoints_visible[:, [8, 9]].all(dim=1), + keypoints_visible[:, [0, 3, 5, 6]].all(dim=1), + keypoints_visible[:, [1, 2, 4, 7]].all(dim=1)), + dim=1) + keypoints_visible = keypoints_visible.float() + + if use_local_coords: + keypoints2d = torch.cat((keypoints2d - centers2d.unsqueeze(1), + keypoints_visible.unsqueeze(-1)), + dim=2) + else: + keypoints2d = torch.cat( + (keypoints2d, keypoints_visible.unsqueeze(-1)), dim=2) + + keypoints2d_list.append(keypoints2d) + keypoints_depth_mask_list.append(keypoints_depth_valid) + + return (keypoints2d_list, keypoints_depth_mask_list) diff --git a/mmdet3d/models/utils/handle_objs.py b/mmdet3d/models/utils/handle_objs.py new file mode 100755 index 0000000..d05afb1 --- /dev/null +++ b/mmdet3d/models/utils/handle_objs.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch +from torch import Tensor + +from mmdet3d.structures import CameraInstance3DBoxes + + +def filter_outside_objs(gt_bboxes_list: List[Tensor], + gt_labels_list: List[Tensor], + gt_bboxes_3d_list: List[CameraInstance3DBoxes], + gt_labels_3d_list: List[Tensor], + centers2d_list: List[Tensor], + img_metas: List[dict]) -> None: + """Function to filter the objects label outside the image. + + Args: + gt_bboxes_list (List[Tensor]): Ground truth bboxes of each image, + each has shape (num_gt, 4). + gt_labels_list (List[Tensor]): Ground truth labels of each box, + each has shape (num_gt,). + gt_bboxes_3d_list (List[:obj:`CameraInstance3DBoxes`]): 3D Ground + truth bboxes of each image, each has shape + (num_gt, bbox_code_size). + gt_labels_3d_list (List[Tensor]): 3D Ground truth labels of each + box, each has shape (num_gt,). + centers2d_list (List[Tensor]): Projected 3D centers onto 2D image, + each has shape (num_gt, 2). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + """ + bs = len(centers2d_list) + + for i in range(bs): + centers2d = centers2d_list[i].clone() + img_shape = img_metas[i]['img_shape'] + keep_inds = (centers2d[:, 0] > 0) & \ + (centers2d[:, 0] < img_shape[1]) & \ + (centers2d[:, 1] > 0) & \ + (centers2d[:, 1] < img_shape[0]) + centers2d_list[i] = centers2d[keep_inds] + gt_labels_list[i] = gt_labels_list[i][keep_inds] + gt_bboxes_list[i] = gt_bboxes_list[i][keep_inds] + gt_bboxes_3d_list[i].tensor = gt_bboxes_3d_list[i].tensor[keep_inds] + gt_labels_3d_list[i] = gt_labels_3d_list[i][keep_inds] + + +def get_centers2d_target(centers2d: Tensor, centers: Tensor, + img_shape: tuple) -> Tensor: + """Function to get target centers2d. + + Args: + centers2d (Tensor): Projected 3D centers onto 2D images. + centers (Tensor): Centers of 2d gt bboxes. + img_shape (tuple): Resized image shape. + + Returns: + torch.Tensor: Projected 3D centers (centers2D) target. + """ + N = centers2d.shape[0] + h, w = img_shape[:2] + valid_intersects = centers2d.new_zeros((N, 2)) + a = (centers[:, 1] - centers2d[:, 1]) / (centers[:, 0] - centers2d[:, 0]) + b = centers[:, 1] - a * centers[:, 0] + left_y = b + right_y = (w - 1) * a + b + top_x = -b / a + bottom_x = (h - 1 - b) / a + + left_coors = torch.stack((left_y.new_zeros(N, ), left_y), dim=1) + right_coors = torch.stack((right_y.new_full((N, ), w - 1), right_y), dim=1) + top_coors = torch.stack((top_x, top_x.new_zeros(N, )), dim=1) + bottom_coors = torch.stack((bottom_x, bottom_x.new_full((N, ), h - 1)), + dim=1) + + intersects = torch.stack( + [left_coors, right_coors, top_coors, bottom_coors], dim=1) + intersects_x = intersects[:, :, 0] + intersects_y = intersects[:, :, 1] + inds = (intersects_x >= 0) & (intersects_x <= + w - 1) & (intersects_y >= 0) & ( + intersects_y <= h - 1) + valid_intersects = intersects[inds].reshape(N, 2, 2) + dist = torch.norm(valid_intersects - centers2d.unsqueeze(1), dim=2) + min_idx = torch.argmin(dist, dim=1) + + min_idx = min_idx.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, 2) + centers2d_target = valid_intersects.gather(dim=1, index=min_idx).squeeze(1) + + return centers2d_target + + +def handle_proj_objs( + centers2d_list: List[Tensor], gt_bboxes_list: List[Tensor], + img_metas: List[dict] +) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: + """Function to handle projected object centers2d, generate target + centers2d. + + Args: + gt_bboxes_list (List[Tensor]): Ground truth bboxes of each image, + shape (num_gt, 4). + centers2d_list (List[Tensor]): Projected 3D centers onto 2D image, + shape (num_gt, 2). + img_metas (List[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + + Returns: + Tuple[List[Tensor], List[Tensor], List[Tensor]]: It contains three + elements. The first is the target centers2d after handling the + truncated objects. The second is the offsets between target centers2d + and round int dtype centers2d,and the last is the truncation mask + for each object in batch data. + """ + bs = len(centers2d_list) + centers2d_target_list = [] + trunc_mask_list = [] + offsets2d_list = [] + # for now, only pad mode that img is padded by right and + # bottom side is supported. + for i in range(bs): + centers2d = centers2d_list[i] + gt_bbox = gt_bboxes_list[i] + img_shape = img_metas[i]['img_shape'] + centers2d_target = centers2d.clone() + inside_inds = (centers2d[:, 0] > 0) & \ + (centers2d[:, 0] < img_shape[1]) & \ + (centers2d[:, 1] > 0) & \ + (centers2d[:, 1] < img_shape[0]) + outside_inds = ~inside_inds + + # if there are outside objects + if outside_inds.any(): + centers = (gt_bbox[:, :2] + gt_bbox[:, 2:]) / 2 + outside_centers2d = centers2d[outside_inds] + match_centers = centers[outside_inds] + target_outside_centers2d = get_centers2d_target( + outside_centers2d, match_centers, img_shape) + centers2d_target[outside_inds] = target_outside_centers2d + + offsets2d = centers2d - centers2d_target.round().int() + trunc_mask = outside_inds + + centers2d_target_list.append(centers2d_target) + trunc_mask_list.append(trunc_mask) + offsets2d_list.append(offsets2d) + + return (centers2d_target_list, offsets2d_list, trunc_mask_list) diff --git a/mmdet3d/models/voxel_encoders/__init__.py b/mmdet3d/models/voxel_encoders/__init__.py new file mode 100755 index 0000000..9e14e87 --- /dev/null +++ b/mmdet3d/models/voxel_encoders/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .pillar_encoder import DynamicPillarFeatureNet, PillarFeatureNet +from .voxel_encoder import (DynamicSimpleVFE, DynamicVFE, HardSimpleVFE, + HardVFE, SegVFE) + +__all__ = [ + 'PillarFeatureNet', 'DynamicPillarFeatureNet', 'HardVFE', 'DynamicVFE', + 'HardSimpleVFE', 'DynamicSimpleVFE', 'SegVFE' +] diff --git a/mmdet3d/models/voxel_encoders/pillar_encoder.py b/mmdet3d/models/voxel_encoders/pillar_encoder.py new file mode 100755 index 0000000..305e526 --- /dev/null +++ b/mmdet3d/models/voxel_encoders/pillar_encoder.py @@ -0,0 +1,320 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import build_norm_layer +from mmcv.ops import DynamicScatter +from torch import nn + +from mmdet3d.registry import MODELS +from .utils import PFNLayer, get_paddings_indicator + + +@MODELS.register_module() +class PillarFeatureNet(nn.Module): + """Pillar Feature Net. + + The network prepares the pillar features and performs forward pass + through PFNLayers. + + Args: + in_channels (int, optional): Number of input features, + either x, y, z or x, y, z, r. Defaults to 4. + feat_channels (tuple, optional): Number of features in each of the + N PFNLayers. Defaults to (64, ). + with_distance (bool, optional): Whether to include Euclidean distance + to points. Defaults to False. + with_cluster_center (bool, optional): [description]. Defaults to True. + with_voxel_center (bool, optional): [description]. Defaults to True. + voxel_size (tuple[float], optional): Size of voxels, only utilize x + and y size. Defaults to (0.2, 0.2, 4). + point_cloud_range (tuple[float], optional): Point cloud range, only + utilizes x and y min. Defaults to (0, -40, -3, 70.4, 40, 1). + norm_cfg ([type], optional): [description]. + Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01). + mode (str, optional): The mode to gather point features. Options are + 'max' or 'avg'. Defaults to 'max'. + legacy (bool, optional): Whether to use the new behavior or + the original behavior. Defaults to True. + """ + + def __init__(self, + in_channels=4, + feat_channels=(64, ), + with_distance=False, + with_cluster_center=True, + with_voxel_center=True, + voxel_size=(0.2, 0.2, 4), + point_cloud_range=(0, -40, -3, 70.4, 40, 1), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + mode='max', + legacy=True): + super(PillarFeatureNet, self).__init__() + assert len(feat_channels) > 0 + self.legacy = legacy + if with_cluster_center: + in_channels += 3 + if with_voxel_center: + in_channels += 3 + if with_distance: + in_channels += 1 + self._with_distance = with_distance + self._with_cluster_center = with_cluster_center + self._with_voxel_center = with_voxel_center + self.fp16_enabled = False + # Create PillarFeatureNet layers + self.in_channels = in_channels + feat_channels = [in_channels] + list(feat_channels) + pfn_layers = [] + for i in range(len(feat_channels) - 1): + in_filters = feat_channels[i] + out_filters = feat_channels[i + 1] + if i < len(feat_channels) - 2: + last_layer = False + else: + last_layer = True + pfn_layers.append( + PFNLayer( + in_filters, + out_filters, + norm_cfg=norm_cfg, + last_layer=last_layer, + mode=mode)) + self.pfn_layers = nn.ModuleList(pfn_layers) + + # Need pillar (voxel) size and x/y offset in order to calculate offset + self.vx = voxel_size[0] + self.vy = voxel_size[1] + self.vz = voxel_size[2] + self.x_offset = self.vx / 2 + point_cloud_range[0] + self.y_offset = self.vy / 2 + point_cloud_range[1] + self.z_offset = self.vz / 2 + point_cloud_range[2] + self.point_cloud_range = point_cloud_range + + def forward(self, features, num_points, coors, *args, **kwargs): + """Forward function. + + Args: + features (torch.Tensor): Point features or raw points in shape + (N, M, C). + num_points (torch.Tensor): Number of points in each pillar. + coors (torch.Tensor): Coordinates of each voxel. + + Returns: + torch.Tensor: Features of pillars. + """ + features_ls = [features] + # Find distance of x, y, and z from cluster center + if self._with_cluster_center: + points_mean = features[:, :, :3].sum( + dim=1, keepdim=True) / num_points.type_as(features).view( + -1, 1, 1) + f_cluster = features[:, :, :3] - points_mean + features_ls.append(f_cluster) + + # Find distance of x, y, and z from pillar center + dtype = features.dtype + if self._with_voxel_center: + if not self.legacy: + f_center = torch.zeros_like(features[:, :, :3]) + f_center[:, :, 0] = features[:, :, 0] - ( + coors[:, 3].to(dtype).unsqueeze(1) * self.vx + + self.x_offset) + f_center[:, :, 1] = features[:, :, 1] - ( + coors[:, 2].to(dtype).unsqueeze(1) * self.vy + + self.y_offset) + f_center[:, :, 2] = features[:, :, 2] - ( + coors[:, 1].to(dtype).unsqueeze(1) * self.vz + + self.z_offset) + else: + f_center = features[:, :, :3] + f_center[:, :, 0] = f_center[:, :, 0] - ( + coors[:, 3].type_as(features).unsqueeze(1) * self.vx + + self.x_offset) + f_center[:, :, 1] = f_center[:, :, 1] - ( + coors[:, 2].type_as(features).unsqueeze(1) * self.vy + + self.y_offset) + f_center[:, :, 2] = f_center[:, :, 2] - ( + coors[:, 1].type_as(features).unsqueeze(1) * self.vz + + self.z_offset) + features_ls.append(f_center) + + if self._with_distance: + points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True) + features_ls.append(points_dist) + + # Combine together feature decorations + features = torch.cat(features_ls, dim=-1) + # The feature decorations were calculated without regard to whether + # pillar was empty. Need to ensure that + # empty pillars remain set to zeros. + voxel_count = features.shape[1] + mask = get_paddings_indicator(num_points, voxel_count, axis=0) + mask = torch.unsqueeze(mask, -1).type_as(features) + features *= mask + + for pfn in self.pfn_layers: + features = pfn(features, num_points) + + return features.squeeze(1) + + +@MODELS.register_module() +class DynamicPillarFeatureNet(PillarFeatureNet): + """Pillar Feature Net using dynamic voxelization. + + The network prepares the pillar features and performs forward pass + through PFNLayers. The main difference is that it is used for + dynamic voxels, which contains different number of points inside a voxel + without limits. + + Args: + in_channels (int, optional): Number of input features, + either x, y, z or x, y, z, r. Defaults to 4. + feat_channels (tuple, optional): Number of features in each of the + N PFNLayers. Defaults to (64, ). + with_distance (bool, optional): Whether to include Euclidean distance + to points. Defaults to False. + with_cluster_center (bool, optional): [description]. Defaults to True. + with_voxel_center (bool, optional): [description]. Defaults to True. + voxel_size (tuple[float], optional): Size of voxels, only utilize x + and y size. Defaults to (0.2, 0.2, 4). + point_cloud_range (tuple[float], optional): Point cloud range, only + utilizes x and y min. Defaults to (0, -40, -3, 70.4, 40, 1). + norm_cfg ([type], optional): [description]. + Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01). + mode (str, optional): The mode to gather point features. Options are + 'max' or 'avg'. Defaults to 'max'. + legacy (bool, optional): Whether to use the new behavior or + the original behavior. Defaults to True. + """ + + def __init__(self, + in_channels=4, + feat_channels=(64, ), + with_distance=False, + with_cluster_center=True, + with_voxel_center=True, + voxel_size=(0.2, 0.2, 4), + point_cloud_range=(0, -40, -3, 70.4, 40, 1), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + mode='max', + legacy=True): + super(DynamicPillarFeatureNet, self).__init__( + in_channels, + feat_channels, + with_distance, + with_cluster_center=with_cluster_center, + with_voxel_center=with_voxel_center, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + norm_cfg=norm_cfg, + mode=mode, + legacy=legacy) + self.fp16_enabled = False + feat_channels = [self.in_channels] + list(feat_channels) + pfn_layers = [] + # TODO: currently only support one PFNLayer + + for i in range(len(feat_channels) - 1): + in_filters = feat_channels[i] + out_filters = feat_channels[i + 1] + if i > 0: + in_filters *= 2 + norm_name, norm_layer = build_norm_layer(norm_cfg, out_filters) + pfn_layers.append( + nn.Sequential( + nn.Linear(in_filters, out_filters, bias=False), norm_layer, + nn.ReLU(inplace=True))) + self.num_pfn = len(pfn_layers) + self.pfn_layers = nn.ModuleList(pfn_layers) + self.pfn_scatter = DynamicScatter(voxel_size, point_cloud_range, + (mode != 'max')) + self.cluster_scatter = DynamicScatter( + voxel_size, point_cloud_range, average_points=True) + + def map_voxel_center_to_point(self, pts_coors, voxel_mean, voxel_coors): + """Map the centers of voxels to its corresponding points. + + Args: + pts_coors (torch.Tensor): The coordinates of each points, shape + (M, 3), where M is the number of points. + voxel_mean (torch.Tensor): The mean or aggregated features of a + voxel, shape (N, C), where N is the number of voxels. + voxel_coors (torch.Tensor): The coordinates of each voxel. + + Returns: + torch.Tensor: Corresponding voxel centers of each points, shape + (M, C), where M is the number of points. + """ + # Step 1: scatter voxel into canvas + # Calculate necessary things for canvas creation + canvas_y = int( + (self.point_cloud_range[4] - self.point_cloud_range[1]) / self.vy) + canvas_x = int( + (self.point_cloud_range[3] - self.point_cloud_range[0]) / self.vx) + canvas_channel = voxel_mean.size(1) + batch_size = pts_coors[-1, 0] + 1 + canvas_len = canvas_y * canvas_x * batch_size + # Create the canvas for this sample + canvas = voxel_mean.new_zeros(canvas_channel, canvas_len) + # Only include non-empty pillars + indices = ( + voxel_coors[:, 0] * canvas_y * canvas_x + + voxel_coors[:, 2] * canvas_x + voxel_coors[:, 3]) + # Scatter the blob back to the canvas + canvas[:, indices.long()] = voxel_mean.t() + + # Step 2: get voxel mean for each point + voxel_index = ( + pts_coors[:, 0] * canvas_y * canvas_x + + pts_coors[:, 2] * canvas_x + pts_coors[:, 3]) + center_per_point = canvas[:, voxel_index.long()].t() + return center_per_point + + def forward(self, features, coors): + """Forward function. + + Args: + features (torch.Tensor): Point features or raw points in shape + (N, M, C). + coors (torch.Tensor): Coordinates of each voxel + + Returns: + torch.Tensor: Features of pillars. + """ + features_ls = [features] + # Find distance of x, y, and z from cluster center + if self._with_cluster_center: + voxel_mean, mean_coors = self.cluster_scatter(features, coors) + points_mean = self.map_voxel_center_to_point( + coors, voxel_mean, mean_coors) + # TODO: maybe also do cluster for reflectivity + f_cluster = features[:, :3] - points_mean[:, :3] + features_ls.append(f_cluster) + + # Find distance of x, y, and z from pillar center + if self._with_voxel_center: + f_center = features.new_zeros(size=(features.size(0), 3)) + f_center[:, 0] = features[:, 0] - ( + coors[:, 3].type_as(features) * self.vx + self.x_offset) + f_center[:, 1] = features[:, 1] - ( + coors[:, 2].type_as(features) * self.vy + self.y_offset) + f_center[:, 2] = features[:, 2] - ( + coors[:, 1].type_as(features) * self.vz + self.z_offset) + features_ls.append(f_center) + + if self._with_distance: + points_dist = torch.norm(features[:, :3], 2, 1, keepdim=True) + features_ls.append(points_dist) + + # Combine together feature decorations + features = torch.cat(features_ls, dim=-1) + for i, pfn in enumerate(self.pfn_layers): + point_feats = pfn(features) + voxel_feats, voxel_coors = self.pfn_scatter(point_feats, coors) + if i != len(self.pfn_layers) - 1: + # need to concat voxel feats if it is not the last pfn + feat_per_point = self.map_voxel_center_to_point( + coors, voxel_feats, voxel_coors) + features = torch.cat([point_feats, feat_per_point], dim=1) + + return voxel_feats, voxel_coors diff --git a/mmdet3d/models/voxel_encoders/utils.py b/mmdet3d/models/voxel_encoders/utils.py new file mode 100755 index 0000000..9b9e7af --- /dev/null +++ b/mmdet3d/models/voxel_encoders/utils.py @@ -0,0 +1,179 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import build_norm_layer +from torch import nn +from torch.nn import functional as F + + +def get_paddings_indicator(actual_num, max_num, axis=0): + """Create boolean mask by actually number of a padded tensor. + + Args: + actual_num (torch.Tensor): Actual number of points in each voxel. + max_num (int): Max number of points in each voxel + + Returns: + torch.Tensor: Mask indicates which points are valid inside a voxel. + """ + actual_num = torch.unsqueeze(actual_num, axis + 1) + # tiled_actual_num: [N, M, 1] + max_num_shape = [1] * len(actual_num.shape) + max_num_shape[axis + 1] = -1 + max_num = torch.arange( + max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape) + # tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]] + # tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]] + paddings_indicator = actual_num.int() > max_num + # paddings_indicator shape: [batch_size, max_num] + return paddings_indicator + + +class VFELayer(nn.Module): + """Voxel Feature Encoder layer. + + The voxel encoder is composed of a series of these layers. + This module do not support average pooling and only support to use + max pooling to gather features inside a VFE. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + norm_cfg (dict): Config dict of normalization layers + max_out (bool): Whether aggregate the features of points inside + each voxel and only return voxel features. + cat_max (bool): Whether concatenate the aggregated features + and pointwise features. + """ + + def __init__(self, + in_channels, + out_channels, + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + max_out=True, + cat_max=True): + super(VFELayer, self).__init__() + self.fp16_enabled = False + self.cat_max = cat_max + self.max_out = max_out + # self.units = int(out_channels / 2) + + self.norm = build_norm_layer(norm_cfg, out_channels)[1] + self.linear = nn.Linear(in_channels, out_channels, bias=False) + + def forward(self, inputs): + """Forward function. + + Args: + inputs (torch.Tensor): Voxels features of shape (N, M, C). + N is the number of voxels, M is the number of points in + voxels, C is the number of channels of point features. + + Returns: + torch.Tensor: Voxel features. There are three mode under which the + features have different meaning. + - `max_out=False`: Return point-wise features in + shape (N, M, C). + - `max_out=True` and `cat_max=False`: Return aggregated + voxel features in shape (N, C) + - `max_out=True` and `cat_max=True`: Return concatenated + point-wise features in shape (N, M, C). + """ + # [K, T, 7] tensordot [7, units] = [K, T, units] + voxel_count = inputs.shape[1] + + x = self.linear(inputs) + x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2, + 1).contiguous() + pointwise = F.relu(x) + # [K, T, units] + if self.max_out: + aggregated = torch.max(pointwise, dim=1, keepdim=True)[0] + else: + # this is for fusion layer + return pointwise + + if not self.cat_max: + return aggregated.squeeze(1) + else: + # [K, 1, units] + repeated = aggregated.repeat(1, voxel_count, 1) + concatenated = torch.cat([pointwise, repeated], dim=2) + # [K, T, 2 * units] + return concatenated + + +class PFNLayer(nn.Module): + """Pillar Feature Net Layer. + + The Pillar Feature Net is composed of a series of these layers, but the + PointPillars paper results only used a single PFNLayer. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + norm_cfg (dict, optional): Config dict of normalization layers. + Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01). + last_layer (bool, optional): If last_layer, there is no + concatenation of features. Defaults to False. + mode (str, optional): Pooling model to gather features inside voxels. + Defaults to 'max'. + """ + + def __init__(self, + in_channels, + out_channels, + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + last_layer=False, + mode='max'): + + super().__init__() + self.fp16_enabled = False + self.name = 'PFNLayer' + self.last_vfe = last_layer + if not self.last_vfe: + out_channels = out_channels // 2 + self.units = out_channels + + self.norm = build_norm_layer(norm_cfg, self.units)[1] + self.linear = nn.Linear(in_channels, self.units, bias=False) + + assert mode in ['max', 'avg'] + self.mode = mode + + def forward(self, inputs, num_voxels=None, aligned_distance=None): + """Forward function. + + Args: + inputs (torch.Tensor): Pillar/Voxel inputs with shape (N, M, C). + N is the number of voxels, M is the number of points in + voxels, C is the number of channels of point features. + num_voxels (torch.Tensor, optional): Number of points in each + voxel. Defaults to None. + aligned_distance (torch.Tensor, optional): The distance of + each points to the voxel center. Defaults to None. + + Returns: + torch.Tensor: Features of Pillars. + """ + x = self.linear(inputs) + x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2, + 1).contiguous() + x = F.relu(x) + + if self.mode == 'max': + if aligned_distance is not None: + x = x.mul(aligned_distance.unsqueeze(-1)) + x_max = torch.max(x, dim=1, keepdim=True)[0] + elif self.mode == 'avg': + if aligned_distance is not None: + x = x.mul(aligned_distance.unsqueeze(-1)) + x_max = x.sum( + dim=1, keepdim=True) / num_voxels.type_as(inputs).view( + -1, 1, 1) + + if self.last_vfe: + return x_max + else: + x_repeat = x_max.repeat(1, inputs.shape[1], 1) + x_concatenated = torch.cat([x, x_repeat], dim=2) + return x_concatenated diff --git a/mmdet3d/models/voxel_encoders/voxel_encoder.py b/mmdet3d/models/voxel_encoders/voxel_encoder.py new file mode 100755 index 0000000..550198e --- /dev/null +++ b/mmdet3d/models/voxel_encoders/voxel_encoder.py @@ -0,0 +1,640 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple + +import torch +from mmcv.cnn import build_norm_layer +from mmcv.ops import DynamicScatter +from torch import Tensor, nn + +from mmdet3d.registry import MODELS +from .utils import VFELayer, get_paddings_indicator + + +@MODELS.register_module() +class HardSimpleVFE(nn.Module): + """Simple voxel feature encoder used in SECOND. + + It simply averages the values of points in a voxel. + + Args: + num_features (int, optional): Number of features to use. Default: 4. + """ + + def __init__(self, num_features: int = 4) -> None: + super(HardSimpleVFE, self).__init__() + self.num_features = num_features + self.fp16_enabled = False + + def forward(self, features: Tensor, num_points: Tensor, coors: Tensor, + *args, **kwargs) -> Tensor: + """Forward function. + + Args: + features (torch.Tensor): Point features in shape + (N, M, 3(4)). N is the number of voxels and M is the maximum + number of points inside a single voxel. + num_points (torch.Tensor): Number of points in each voxel, + shape (N, ). + coors (torch.Tensor): Coordinates of voxels. + + Returns: + torch.Tensor: Mean of points inside each voxel in shape (N, 3(4)) + """ + points_mean = features[:, :, :self.num_features].sum( + dim=1, keepdim=False) / num_points.type_as(features).view(-1, 1) + return points_mean.contiguous() + + +@MODELS.register_module() +class DynamicSimpleVFE(nn.Module): + """Simple dynamic voxel feature encoder used in DV-SECOND. + + It simply averages the values of points in a voxel. + But the number of points in a voxel is dynamic and varies. + + Args: + voxel_size (tupe[float]): Size of a single voxel + point_cloud_range (tuple[float]): Range of the point cloud and voxels + """ + + def __init__(self, + voxel_size=(0.2, 0.2, 4), + point_cloud_range=(0, -40, -3, 70.4, 40, 1)): + super(DynamicSimpleVFE, self).__init__() + self.scatter = DynamicScatter(voxel_size, point_cloud_range, True) + self.fp16_enabled = False + + @torch.no_grad() + def forward(self, features, coors, *args, **kwargs): + """Forward function. + + Args: + features (torch.Tensor): Point features in shape + (N, 3(4)). N is the number of points. + coors (torch.Tensor): Coordinates of voxels. + + Returns: + torch.Tensor: Mean of points inside each voxel in shape (M, 3(4)). + M is the number of voxels. + """ + # This function is used from the start of the voxelnet + # num_points: [concated_num_points] + features, features_coors = self.scatter(features, coors) + return features, features_coors + + +@MODELS.register_module() +class DynamicVFE(nn.Module): + """Dynamic Voxel feature encoder used in DV-SECOND. + + It encodes features of voxels and their points. It could also fuse + image feature into voxel features in a point-wise manner. + The number of points inside the voxel varies. + + Args: + in_channels (int, optional): Input channels of VFE. Defaults to 4. + feat_channels (list(int), optional): Channels of features in VFE. + with_distance (bool, optional): Whether to use the L2 distance of + points to the origin point. Defaults to False. + with_cluster_center (bool, optional): Whether to use the distance + to cluster center of points inside a voxel. Defaults to False. + with_voxel_center (bool, optional): Whether to use the distance + to center of voxel for each points inside a voxel. + Defaults to False. + voxel_size (tuple[float], optional): Size of a single voxel. + Defaults to (0.2, 0.2, 4). + point_cloud_range (tuple[float], optional): The range of points + or voxels. Defaults to (0, -40, -3, 70.4, 40, 1). + norm_cfg (dict, optional): Config dict of normalization layers. + mode (str, optional): The mode when pooling features of points + inside a voxel. Available options include 'max' and 'avg'. + Defaults to 'max'. + fusion_layer (dict, optional): The config dict of fusion + layer used in multi-modal detectors. Defaults to None. + return_point_feats (bool, optional): Whether to return the features + of each points. Defaults to False. + """ + + def __init__(self, + in_channels=4, + feat_channels=[], + with_distance=False, + with_cluster_center=False, + with_voxel_center=False, + voxel_size=(0.2, 0.2, 4), + point_cloud_range=(0, -40, -3, 70.4, 40, 1), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + mode='max', + fusion_layer=None, + return_point_feats=False): + super(DynamicVFE, self).__init__() + assert mode in ['avg', 'max'] + assert len(feat_channels) > 0 + if with_cluster_center: + in_channels += 3 + if with_voxel_center: + in_channels += 3 + if with_distance: + in_channels += 1 + self.in_channels = in_channels + self._with_distance = with_distance + self._with_cluster_center = with_cluster_center + self._with_voxel_center = with_voxel_center + self.return_point_feats = return_point_feats + self.fp16_enabled = False + + # Need pillar (voxel) size and x/y offset in order to calculate offset + self.vx = voxel_size[0] + self.vy = voxel_size[1] + self.vz = voxel_size[2] + self.x_offset = self.vx / 2 + point_cloud_range[0] + self.y_offset = self.vy / 2 + point_cloud_range[1] + self.z_offset = self.vz / 2 + point_cloud_range[2] + self.point_cloud_range = point_cloud_range + + feat_channels = [self.in_channels] + list(feat_channels) + vfe_layers = [] + for i in range(len(feat_channels) - 1): + in_filters = feat_channels[i] + out_filters = feat_channels[i + 1] + if i > 0: + in_filters *= 2 + norm_name, norm_layer = build_norm_layer(norm_cfg, out_filters) + vfe_layers.append( + nn.Sequential( + nn.Linear(in_filters, out_filters, bias=False), norm_layer, + nn.ReLU(inplace=True))) + self.vfe_layers = nn.ModuleList(vfe_layers) + self.num_vfe = len(vfe_layers) + self.vfe_scatter = DynamicScatter(voxel_size, point_cloud_range, + (mode != 'max')) + self.cluster_scatter = DynamicScatter( + voxel_size, point_cloud_range, average_points=True) + self.fusion_layer = None + if fusion_layer is not None: + self.fusion_layer = MODELS.build(fusion_layer) + + def map_voxel_center_to_point(self, pts_coors, voxel_mean, voxel_coors): + """Map voxel features to its corresponding points. + + Args: + pts_coors (torch.Tensor): Voxel coordinate of each point. + voxel_mean (torch.Tensor): Voxel features to be mapped. + voxel_coors (torch.Tensor): Coordinates of valid voxels + + Returns: + torch.Tensor: Features or centers of each point. + """ + # Step 1: scatter voxel into canvas + # Calculate necessary things for canvas creation + canvas_z = int( + (self.point_cloud_range[5] - self.point_cloud_range[2]) / self.vz) + canvas_y = int( + (self.point_cloud_range[4] - self.point_cloud_range[1]) / self.vy) + canvas_x = int( + (self.point_cloud_range[3] - self.point_cloud_range[0]) / self.vx) + # canvas_channel = voxel_mean.size(1) + batch_size = pts_coors[-1, 0] + 1 + canvas_len = canvas_z * canvas_y * canvas_x * batch_size + # Create the canvas for this sample + canvas = voxel_mean.new_zeros(canvas_len, dtype=torch.long) + # Only include non-empty pillars + indices = ( + voxel_coors[:, 0] * canvas_z * canvas_y * canvas_x + + voxel_coors[:, 1] * canvas_y * canvas_x + + voxel_coors[:, 2] * canvas_x + voxel_coors[:, 3]) + # Scatter the blob back to the canvas + canvas[indices.long()] = torch.arange( + start=0, end=voxel_mean.size(0), device=voxel_mean.device) + + # Step 2: get voxel mean for each point + voxel_index = ( + pts_coors[:, 0] * canvas_z * canvas_y * canvas_x + + pts_coors[:, 1] * canvas_y * canvas_x + + pts_coors[:, 2] * canvas_x + pts_coors[:, 3]) + voxel_inds = canvas[voxel_index.long()] + center_per_point = voxel_mean[voxel_inds, ...] + return center_per_point + + def forward(self, + features, + coors, + points=None, + img_feats=None, + img_metas=None, + *args, + **kwargs): + """Forward functions. + + Args: + features (torch.Tensor): Features of voxels, shape is NxC. + coors (torch.Tensor): Coordinates of voxels, shape is Nx(1+NDim). + points (list[torch.Tensor], optional): Raw points used to guide the + multi-modality fusion. Defaults to None. + img_feats (list[torch.Tensor], optional): Image features used for + multi-modality fusion. Defaults to None. + img_metas (dict, optional): [description]. Defaults to None. + + Returns: + tuple: If `return_point_feats` is False, returns voxel features and + its coordinates. If `return_point_feats` is True, returns + feature of each points inside voxels. + """ + features_ls = [features] + # Find distance of x, y, and z from cluster center + if self._with_cluster_center: + voxel_mean, mean_coors = self.cluster_scatter(features, coors) + points_mean = self.map_voxel_center_to_point( + coors, voxel_mean, mean_coors) + # TODO: maybe also do cluster for reflectivity + f_cluster = features[:, :3] - points_mean[:, :3] + features_ls.append(f_cluster) + + # Find distance of x, y, and z from pillar center + if self._with_voxel_center: + f_center = features.new_zeros(size=(features.size(0), 3)) + f_center[:, 0] = features[:, 0] - ( + coors[:, 3].type_as(features) * self.vx + self.x_offset) + f_center[:, 1] = features[:, 1] - ( + coors[:, 2].type_as(features) * self.vy + self.y_offset) + f_center[:, 2] = features[:, 2] - ( + coors[:, 1].type_as(features) * self.vz + self.z_offset) + features_ls.append(f_center) + + if self._with_distance: + points_dist = torch.norm(features[:, :3], 2, 1, keepdim=True) + features_ls.append(points_dist) + + # Combine together feature decorations + features = torch.cat(features_ls, dim=-1) + for i, vfe in enumerate(self.vfe_layers): + point_feats = vfe(features) + if (i == len(self.vfe_layers) - 1 and self.fusion_layer is not None + and img_feats is not None): + point_feats = self.fusion_layer(img_feats, points, point_feats, + img_metas) + voxel_feats, voxel_coors = self.vfe_scatter(point_feats, coors) + if i != len(self.vfe_layers) - 1: + # need to concat voxel feats if it is not the last vfe + feat_per_point = self.map_voxel_center_to_point( + coors, voxel_feats, voxel_coors) + features = torch.cat([point_feats, feat_per_point], dim=1) + + if self.return_point_feats: + return point_feats + return voxel_feats, voxel_coors + + +@MODELS.register_module() +class HardVFE(nn.Module): + """Voxel feature encoder used in DV-SECOND. + + It encodes features of voxels and their points. It could also fuse + image feature into voxel features in a point-wise manner. + + Args: + in_channels (int, optional): Input channels of VFE. Defaults to 4. + feat_channels (list(int), optional): Channels of features in VFE. + with_distance (bool, optional): Whether to use the L2 distance + of points to the origin point. Defaults to False. + with_cluster_center (bool, optional): Whether to use the distance + to cluster center of points inside a voxel. Defaults to False. + with_voxel_center (bool, optional): Whether to use the distance to + center of voxel for each points inside a voxel. Defaults to False. + voxel_size (tuple[float], optional): Size of a single voxel. + Defaults to (0.2, 0.2, 4). + point_cloud_range (tuple[float], optional): The range of points + or voxels. Defaults to (0, -40, -3, 70.4, 40, 1). + norm_cfg (dict, optional): Config dict of normalization layers. + mode (str, optional): The mode when pooling features of points inside a + voxel. Available options include 'max' and 'avg'. + Defaults to 'max'. + fusion_layer (dict, optional): The config dict of fusion layer + used in multi-modal detectors. Defaults to None. + return_point_feats (bool, optional): Whether to return the + features of each points. Defaults to False. + """ + + def __init__(self, + in_channels=4, + feat_channels=[], + with_distance=False, + with_cluster_center=False, + with_voxel_center=False, + voxel_size=(0.2, 0.2, 4), + point_cloud_range=(0, -40, -3, 70.4, 40, 1), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + mode='max', + fusion_layer=None, + return_point_feats=False): + super(HardVFE, self).__init__() + assert len(feat_channels) > 0 + if with_cluster_center: + in_channels += 3 + if with_voxel_center: + in_channels += 3 + if with_distance: + in_channels += 1 + self.in_channels = in_channels + self._with_distance = with_distance + self._with_cluster_center = with_cluster_center + self._with_voxel_center = with_voxel_center + self.return_point_feats = return_point_feats + self.fp16_enabled = False + + # Need pillar (voxel) size and x/y offset to calculate pillar offset + self.vx = voxel_size[0] + self.vy = voxel_size[1] + self.vz = voxel_size[2] + self.x_offset = self.vx / 2 + point_cloud_range[0] + self.y_offset = self.vy / 2 + point_cloud_range[1] + self.z_offset = self.vz / 2 + point_cloud_range[2] + self.point_cloud_range = point_cloud_range + + feat_channels = [self.in_channels] + list(feat_channels) + vfe_layers = [] + for i in range(len(feat_channels) - 1): + in_filters = feat_channels[i] + out_filters = feat_channels[i + 1] + if i > 0: + in_filters *= 2 + # TODO: pass norm_cfg to VFE + # norm_name, norm_layer = build_norm_layer(norm_cfg, out_filters) + if i == (len(feat_channels) - 2): + cat_max = False + max_out = True + if fusion_layer: + max_out = False + else: + max_out = True + cat_max = True + vfe_layers.append( + VFELayer( + in_filters, + out_filters, + norm_cfg=norm_cfg, + max_out=max_out, + cat_max=cat_max)) + self.vfe_layers = nn.ModuleList(vfe_layers) + self.num_vfe = len(vfe_layers) + + self.fusion_layer = None + if fusion_layer is not None: + self.fusion_layer = MODELS.build(fusion_layer) + + def forward(self, + features, + num_points, + coors, + img_feats=None, + img_metas=None, + *args, + **kwargs): + """Forward functions. + + Args: + features (torch.Tensor): Features of voxels, shape is MxNxC. + num_points (torch.Tensor): Number of points in each voxel. + coors (torch.Tensor): Coordinates of voxels, shape is Mx(1+NDim). + img_feats (list[torch.Tensor], optional): Image features used for + multi-modality fusion. Defaults to None. + img_metas (dict, optional): [description]. Defaults to None. + + Returns: + tuple: If `return_point_feats` is False, returns voxel features and + its coordinates. If `return_point_feats` is True, returns + feature of each points inside voxels. + """ + features_ls = [features] + # Find distance of x, y, and z from cluster center + if self._with_cluster_center: + points_mean = ( + features[:, :, :3].sum(dim=1, keepdim=True) / + num_points.type_as(features).view(-1, 1, 1)) + # TODO: maybe also do cluster for reflectivity + f_cluster = features[:, :, :3] - points_mean + features_ls.append(f_cluster) + + # Find distance of x, y, and z from pillar center + if self._with_voxel_center: + f_center = features.new_zeros( + size=(features.size(0), features.size(1), 3)) + f_center[:, :, 0] = features[:, :, 0] - ( + coors[:, 3].type_as(features).unsqueeze(1) * self.vx + + self.x_offset) + f_center[:, :, 1] = features[:, :, 1] - ( + coors[:, 2].type_as(features).unsqueeze(1) * self.vy + + self.y_offset) + f_center[:, :, 2] = features[:, :, 2] - ( + coors[:, 1].type_as(features).unsqueeze(1) * self.vz + + self.z_offset) + features_ls.append(f_center) + + if self._with_distance: + points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True) + features_ls.append(points_dist) + + # Combine together feature decorations + voxel_feats = torch.cat(features_ls, dim=-1) + # The feature decorations were calculated without regard to whether + # pillar was empty. + # Need to ensure that empty voxels remain set to zeros. + voxel_count = voxel_feats.shape[1] + mask = get_paddings_indicator(num_points, voxel_count, axis=0) + voxel_feats *= mask.unsqueeze(-1).type_as(voxel_feats) + + for i, vfe in enumerate(self.vfe_layers): + voxel_feats = vfe(voxel_feats) + + if (self.fusion_layer is not None and img_feats is not None): + voxel_feats = self.fusion_with_mask(features, mask, voxel_feats, + coors, img_feats, img_metas) + + return voxel_feats + + def fusion_with_mask(self, features, mask, voxel_feats, coors, img_feats, + img_metas): + """Fuse image and point features with mask. + + Args: + features (torch.Tensor): Features of voxel, usually it is the + values of points in voxels. + mask (torch.Tensor): Mask indicates valid features in each voxel. + voxel_feats (torch.Tensor): Features of voxels. + coors (torch.Tensor): Coordinates of each single voxel. + img_feats (list[torch.Tensor]): Multi-scale feature maps of image. + img_metas (list(dict)): Meta information of image and points. + + Returns: + torch.Tensor: Fused features of each voxel. + """ + # the features is consist of a batch of points + batch_size = coors[-1, 0] + 1 + points = [] + for i in range(batch_size): + single_mask = (coors[:, 0] == i) + points.append(features[single_mask][mask[single_mask]]) + + point_feats = voxel_feats[mask] + point_feats = self.fusion_layer(img_feats, points, point_feats, + img_metas) + + voxel_canvas = voxel_feats.new_zeros( + size=(voxel_feats.size(0), voxel_feats.size(1), + point_feats.size(-1))) + voxel_canvas[mask] = point_feats + out = torch.max(voxel_canvas, dim=1)[0] + + return out + + +@MODELS.register_module() +class SegVFE(nn.Module): + """Voxel feature encoder used in segmentation task. + + It encodes features of voxels and their points. It could also fuse + image feature into voxel features in a point-wise manner. + The number of points inside the voxel varies. + + Args: + in_channels (int): Input channels of VFE. Defaults to 6. + feat_channels (list(int)): Channels of features in VFE. + with_voxel_center (bool): Whether to use the distance + to center of voxel for each points inside a voxel. + Defaults to False. + voxel_size (tuple[float]): Size of a single voxel (rho, phi, z). + Defaults to None. + grid_shape (tuple[float]): The grid shape of voxelization. + Defaults to (480, 360, 32). + point_cloud_range (tuple[float]): The range of points or voxels. + Defaults to (0, -3.14159265359, -4, 50, 3.14159265359, 2). + norm_cfg (dict): Config dict of normalization layers. + mode (str): The mode when pooling features of points + inside a voxel. Available options include 'max' and 'avg'. + Defaults to 'max'. + with_pre_norm (bool): Whether to use the norm layer before + input vfe layer. + feat_compression (int, optional): The voxel feature compression + channels, Defaults to None + return_point_feats (bool): Whether to return the features + of each points. Defaults to False. + """ + + def __init__(self, + in_channels: int = 6, + feat_channels: Sequence[int] = [], + with_voxel_center: bool = False, + voxel_size: Optional[Sequence[float]] = None, + grid_shape: Sequence[float] = (480, 360, 32), + point_cloud_range: Sequence[float] = (0, -3.14159265359, -4, + 50, 3.14159265359, 2), + norm_cfg: dict = dict(type='BN1d', eps=1e-5, momentum=0.1), + mode: bool = 'max', + with_pre_norm: bool = True, + feat_compression: Optional[int] = None, + return_point_feats: bool = False) -> None: + super(SegVFE, self).__init__() + assert mode in ['avg', 'max'] + assert len(feat_channels) > 0 + assert not (voxel_size and grid_shape), \ + 'voxel_size and grid_shape cannot be setting at the same time' + if with_voxel_center: + in_channels += 3 + self.in_channels = in_channels + self._with_voxel_center = with_voxel_center + self.return_point_feats = return_point_feats + + self.point_cloud_range = point_cloud_range + point_cloud_range = torch.tensor( + point_cloud_range, dtype=torch.float32) + if voxel_size: + self.voxel_size = voxel_size + voxel_size = torch.tensor(voxel_size, dtype=torch.float32) + grid_shape = (point_cloud_range[3:] - + point_cloud_range[:3]) / voxel_size + grid_shape = torch.round(grid_shape).long().tolist() + self.grid_shape = grid_shape + elif grid_shape: + grid_shape = torch.tensor(grid_shape, dtype=torch.float32) + voxel_size = (point_cloud_range[3:] - point_cloud_range[:3]) / ( + grid_shape - 1) + voxel_size = voxel_size.tolist() + self.voxel_size = voxel_size + else: + raise ValueError('must assign a value to voxel_size or grid_shape') + + # Need pillar (voxel) size and x/y offset in order to calculate offset + self.vx = self.voxel_size[0] + self.vy = self.voxel_size[1] + self.vz = self.voxel_size[2] + self.x_offset = self.vx / 2 + point_cloud_range[0] + self.y_offset = self.vy / 2 + point_cloud_range[1] + self.z_offset = self.vz / 2 + point_cloud_range[2] + + feat_channels = [self.in_channels] + list(feat_channels) + if with_pre_norm: + self.pre_norm = build_norm_layer(norm_cfg, self.in_channels)[1] + vfe_layers = [] + for i in range(len(feat_channels) - 1): + in_filters = feat_channels[i] + out_filters = feat_channels[i + 1] + norm_layer = build_norm_layer(norm_cfg, out_filters)[1] + if i == len(feat_channels) - 2: + vfe_layers.append(nn.Linear(in_filters, out_filters)) + else: + vfe_layers.append( + nn.Sequential( + nn.Linear(in_filters, out_filters), norm_layer, + nn.ReLU(inplace=True))) + self.vfe_layers = nn.ModuleList(vfe_layers) + self.vfe_scatter = DynamicScatter(self.voxel_size, + self.point_cloud_range, + (mode != 'max')) + self.compression_layers = None + if feat_compression is not None: + self.compression_layers = nn.Sequential( + nn.Linear(feat_channels[-1], feat_compression), nn.ReLU()) + + def forward(self, features: Tensor, coors: Tensor, *args, + **kwargs) -> Tuple[Tensor]: + """Forward functions. + + Args: + features (Tensor): Features of voxels, shape is NxC. + coors (Tensor): Coordinates of voxels, shape is Nx(1+NDim). + + Returns: + tuple: If `return_point_feats` is False, returns voxel features and + its coordinates. If `return_point_feats` is True, returns + feature of each points inside voxels additionally. + """ + features_ls = [features] + + # Find distance of x, y, and z from voxel center + if self._with_voxel_center: + f_center = features.new_zeros(size=(features.size(0), 3)) + f_center[:, 0] = features[:, 0] - ( + coors[:, 1].type_as(features) * self.vx + self.x_offset) + f_center[:, 1] = features[:, 1] - ( + coors[:, 2].type_as(features) * self.vy + self.y_offset) + f_center[:, 2] = features[:, 2] - ( + coors[:, 3].type_as(features) * self.vz + self.z_offset) + features_ls.append(f_center) + + # Combine together feature decorations + features = torch.cat(features_ls[::-1], dim=-1) + if self.pre_norm is not None: + features = self.pre_norm(features) + + point_feats = [] + for vfe in self.vfe_layers: + features = vfe(features) + point_feats.append(features) + voxel_feats, voxel_coors = self.vfe_scatter(features, coors) + + if self.compression_layers is not None: + voxel_feats = self.compression_layers(voxel_feats) + + if self.return_point_feats: + return voxel_feats, voxel_coors, point_feats + return voxel_feats, voxel_coors diff --git a/mmdet3d/registry.py b/mmdet3d/registry.py new file mode 100755 index 0000000..0278a76 --- /dev/null +++ b/mmdet3d/registry.py @@ -0,0 +1,141 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""MMDetection3D provides 17 registry nodes to support using modules across +projects. Each node is a child of the root registry in MMEngine. + +More details can be found at +https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. +""" + +from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS +from mmengine.registry import DATASETS as MMENGINE_DATASETS +from mmengine.registry import EVALUATOR as MMENGINE_EVALUATOR +from mmengine.registry import HOOKS as MMENGINE_HOOKS +from mmengine.registry import INFERENCERS as MMENGINE_INFERENCERS +from mmengine.registry import LOG_PROCESSORS as MMENGINE_LOG_PROCESSORS +from mmengine.registry import LOOPS as MMENGINE_LOOPS +from mmengine.registry import METRICS as MMENGINE_METRICS +from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS +from mmengine.registry import MODELS as MMENGINE_MODELS +from mmengine.registry import \ + OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS +from mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS +from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS +from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS +from mmengine.registry import \ + RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS +from mmengine.registry import RUNNERS as MMENGINE_RUNNERS +from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS +from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS +from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS +from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS +from mmengine.registry import \ + WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS +from mmengine.registry import Registry + +# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner` +RUNNERS = Registry( + # TODO: update the location when mmdet3d has its own runner + 'runner', + parent=MMENGINE_RUNNERS, + locations=['mmdet3d.engine']) +# manage runner constructors that define how to initialize runners +RUNNER_CONSTRUCTORS = Registry( + 'runner constructor', + parent=MMENGINE_RUNNER_CONSTRUCTORS, + # TODO: update the location when mmdet3d has its own runner + locations=['mmdet3d.engine']) +# manage all kinds of loops like `EpochBasedTrainLoop` +LOOPS = Registry( + # TODO: update the location when mmdet3d has its own loop + 'loop', + parent=MMENGINE_LOOPS, + locations=['mmdet3d.engine']) +# manage all kinds of hooks like `CheckpointHook` +HOOKS = Registry( + 'hook', parent=MMENGINE_HOOKS, locations=['mmdet3d.engine.hooks']) + +# manage data-related modules +DATASETS = Registry( + 'dataset', parent=MMENGINE_DATASETS, locations=['mmdet3d.datasets']) +DATA_SAMPLERS = Registry( + 'data sampler', + parent=MMENGINE_DATA_SAMPLERS, + # TODO: update the location when mmdet3d has its own data sampler + locations=['mmdet3d.datasets']) +TRANSFORMS = Registry( + 'transform', + parent=MMENGINE_TRANSFORMS, + locations=['mmdet3d.datasets.transforms']) + +# mangage all kinds of modules inheriting `nn.Module` +MODELS = Registry( + 'model', parent=MMENGINE_MODELS, locations=['mmdet3d.models']) +# mangage all kinds of model wrappers like 'MMDistributedDataParallel' +MODEL_WRAPPERS = Registry( + 'model_wrapper', + parent=MMENGINE_MODEL_WRAPPERS, + locations=['mmdet3d.models']) +# mangage all kinds of weight initialization modules like `Uniform` +WEIGHT_INITIALIZERS = Registry( + 'weight initializer', + parent=MMENGINE_WEIGHT_INITIALIZERS, + locations=['mmdet3d.models']) + +# mangage all kinds of optimizers like `SGD` and `Adam` +OPTIMIZERS = Registry( + 'optimizer', + parent=MMENGINE_OPTIMIZERS, + # TODO: update the location when mmdet3d has its own optimizer + locations=['mmdet3d.engine']) +# manage optimizer wrapper +OPTIM_WRAPPERS = Registry( + 'optim wrapper', + parent=MMENGINE_OPTIM_WRAPPERS, + # TODO: update the location when mmdet3d has its own optimizer + locations=['mmdet3d.engine']) +# manage constructors that customize the optimization hyperparameters. +OPTIM_WRAPPER_CONSTRUCTORS = Registry( + 'optimizer wrapper constructor', + parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS, + # TODO: update the location when mmdet3d has its own optimizer + locations=['mmdet3d.engine']) +# mangage all kinds of parameter schedulers like `MultiStepLR` +PARAM_SCHEDULERS = Registry( + 'parameter scheduler', + parent=MMENGINE_PARAM_SCHEDULERS, + # TODO: update the location when mmdet3d has its own scheduler + locations=['mmdet3d.engine']) +# manage all kinds of metrics +METRICS = Registry( + 'metric', parent=MMENGINE_METRICS, locations=['mmdet3d.evaluation']) +# manage evaluator +EVALUATOR = Registry( + 'evaluator', parent=MMENGINE_EVALUATOR, locations=['mmdet3d.evaluation']) + +# manage task-specific modules like anchor generators and box coders +TASK_UTILS = Registry( + 'task util', parent=MMENGINE_TASK_UTILS, locations=['mmdet3d.models']) + +# manage visualizer +VISUALIZERS = Registry( + 'visualizer', + parent=MMENGINE_VISUALIZERS, + locations=['mmdet3d.visualization']) +# manage visualizer backend +VISBACKENDS = Registry( + 'vis_backend', + parent=MMENGINE_VISBACKENDS, + locations=['mmdet3d.visualization']) + +# manage logprocessor +LOG_PROCESSORS = Registry( + 'log_processor', + parent=MMENGINE_LOG_PROCESSORS, + # TODO: update the location when mmdet3d has its own log processor + locations=['mmdet3d.engine']) + +# manage inferencer +INFERENCERS = Registry( + 'inferencer', + parent=MMENGINE_INFERENCERS, + locations=['mmdet3d.api.inferencers']) diff --git a/mmdet3d/structures/__init__.py b/mmdet3d/structures/__init__.py new file mode 100755 index 0000000..1bb1924 --- /dev/null +++ b/mmdet3d/structures/__init__.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bbox_3d import (BaseInstance3DBoxes, Box3DMode, CameraInstance3DBoxes, + Coord3DMode, DepthInstance3DBoxes, LiDARInstance3DBoxes, + get_box_type, get_proj_mat_by_coord_type, limit_period, + mono_cam_box2vis, points_cam2img, points_img2cam, + rotation_3d_in_axis, xywhr2xyxyr) +from .det3d_data_sample import Det3DDataSample +# yapf: disable +from .ops import (AxisAlignedBboxOverlaps3D, BboxOverlaps3D, + BboxOverlapsNearest3D, axis_aligned_bbox_overlaps_3d, + bbox3d2result, bbox3d2roi, bbox3d_mapping_back, + bbox_overlaps_3d, bbox_overlaps_nearest_3d, + box2d_to_corner_jit, box3d_to_bbox, box_camera_to_lidar, + boxes3d_to_corners3d_lidar, camera_to_lidar, + center_to_corner_box2d, center_to_corner_box3d, + center_to_minmax_2d, corner_to_standup_nd_jit, + corner_to_surfaces_3d, corner_to_surfaces_3d_jit, corners_nd, + create_anchors_3d_range, depth_to_lidar_points, + depth_to_points, get_frustum, iou_jit, minmax_to_corner_2d, + points_in_convex_polygon_3d_jit, + points_in_convex_polygon_jit, points_in_rbbox, + projection_matrix_to_CRT_kitti, rbbox2d_to_near_bbox, + remove_outside_points, rotation_points_single_angle, + surface_equ_3d) +# yapf: enable +from .point_data import PointData +from .points import BasePoints, CameraPoints, DepthPoints, LiDARPoints + +__all__ = [ + 'BasePoints', 'CameraPoints', 'DepthPoints', 'LiDARPoints', + 'Det3DDataSample', 'PointData', 'Box3DMode', 'BaseInstance3DBoxes', + 'LiDARInstance3DBoxes', 'CameraInstance3DBoxes', 'DepthInstance3DBoxes', + 'xywhr2xyxyr', 'get_box_type', 'rotation_3d_in_axis', 'limit_period', + 'points_cam2img', 'points_img2cam', 'Coord3DMode', 'mono_cam_box2vis', + 'get_proj_mat_by_coord_type', 'box2d_to_corner_jit', 'box3d_to_bbox', + 'box_camera_to_lidar', 'boxes3d_to_corners3d_lidar', 'camera_to_lidar', + 'center_to_corner_box2d', 'center_to_corner_box3d', 'center_to_minmax_2d', + 'corner_to_standup_nd_jit', 'corner_to_surfaces_3d', + 'corner_to_surfaces_3d_jit', 'corners_nd', 'create_anchors_3d_range', + 'depth_to_lidar_points', 'depth_to_points', 'get_frustum', 'iou_jit', + 'minmax_to_corner_2d', 'points_in_convex_polygon_3d_jit', + 'points_in_convex_polygon_jit', 'points_in_rbbox', + 'projection_matrix_to_CRT_kitti', 'rbbox2d_to_near_bbox', + 'remove_outside_points', 'rotation_points_single_angle', 'surface_equ_3d', + 'BboxOverlapsNearest3D', 'BboxOverlaps3D', 'bbox_overlaps_nearest_3d', + 'bbox_overlaps_3d', 'AxisAlignedBboxOverlaps3D', + 'axis_aligned_bbox_overlaps_3d', 'bbox3d_mapping_back', 'bbox3d2roi', + 'bbox3d2result' +] diff --git a/mmdet3d/structures/bbox_3d/__init__.py b/mmdet3d/structures/bbox_3d/__init__.py new file mode 100755 index 0000000..460035a --- /dev/null +++ b/mmdet3d/structures/bbox_3d/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_box3d import BaseInstance3DBoxes +from .box_3d_mode import Box3DMode +from .cam_box3d import CameraInstance3DBoxes +from .coord_3d_mode import Coord3DMode +from .depth_box3d import DepthInstance3DBoxes +from .lidar_box3d import LiDARInstance3DBoxes +from .utils import (get_box_type, get_proj_mat_by_coord_type, limit_period, + mono_cam_box2vis, points_cam2img, points_img2cam, + rotation_3d_in_axis, xywhr2xyxyr) + +__all__ = [ + 'Box3DMode', 'BaseInstance3DBoxes', 'LiDARInstance3DBoxes', + 'CameraInstance3DBoxes', 'DepthInstance3DBoxes', 'xywhr2xyxyr', + 'get_box_type', 'rotation_3d_in_axis', 'limit_period', 'points_cam2img', + 'points_img2cam', 'Coord3DMode', 'mono_cam_box2vis', + 'get_proj_mat_by_coord_type' +] diff --git a/mmdet3d/structures/bbox_3d/base_box3d.py b/mmdet3d/structures/bbox_3d/base_box3d.py new file mode 100755 index 0000000..fba20ce --- /dev/null +++ b/mmdet3d/structures/bbox_3d/base_box3d.py @@ -0,0 +1,584 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from abc import abstractmethod + +import numpy as np +import torch +from mmcv.ops import box_iou_rotated, points_in_boxes_all, points_in_boxes_part + +from .utils import limit_period + + +class BaseInstance3DBoxes(object): + """Base class for 3D Boxes. + + Note: + The box is bottom centered, i.e. the relative position of origin in + the box is (0.5, 0.5, 0). + + Args: + tensor (torch.Tensor | np.ndarray | list): a N x box_dim matrix. + box_dim (int): Number of the dimension of a box. + Each row is (x, y, z, x_size, y_size, z_size, yaw). + Defaults to 7. + with_yaw (bool): Whether the box is with yaw rotation. + If False, the value of yaw will be set to 0 as minmax boxes. + Defaults to True. + origin (tuple[float], optional): Relative position of the box origin. + Defaults to (0.5, 0.5, 0). This will guide the box be converted to + (0.5, 0.5, 0) mode. + + Attributes: + tensor (torch.Tensor): Float matrix of N x box_dim. + box_dim (int): Integer indicating the dimension of a box. + Each row is (x, y, z, x_size, y_size, z_size, yaw, ...). + with_yaw (bool): If True, the value of yaw will be set to 0 as minmax + boxes. + """ + + def __init__(self, tensor, box_dim=7, with_yaw=True, origin=(0.5, 0.5, 0)): + if isinstance(tensor, torch.Tensor): + device = tensor.device + else: + device = torch.device('cpu') + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that + # does not depend on the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, box_dim)).to( + dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == box_dim, tensor.size() + + if tensor.shape[-1] == 6: + # If the dimension of boxes is 6, we expand box_dim by padding + # 0 as a fake yaw and set with_yaw to False. + assert box_dim == 6 + fake_rot = tensor.new_zeros(tensor.shape[0], 1) + tensor = torch.cat((tensor, fake_rot), dim=-1) + self.box_dim = box_dim + 1 + self.with_yaw = False + else: + self.box_dim = box_dim + self.with_yaw = with_yaw + self.tensor = tensor.clone() + + if origin != (0.5, 0.5, 0): + dst = self.tensor.new_tensor((0.5, 0.5, 0)) + src = self.tensor.new_tensor(origin) + self.tensor[:, :3] += self.tensor[:, 3:6] * (dst - src) + + @property + def volume(self): + """torch.Tensor: A vector with volume of each box.""" + return self.tensor[:, 3] * self.tensor[:, 4] * self.tensor[:, 5] + + @property + def dims(self): + """torch.Tensor: Size dimensions of each box in shape (N, 3).""" + return self.tensor[:, 3:6] + + @property + def yaw(self): + """torch.Tensor: A vector with yaw of each box in shape (N, ).""" + return self.tensor[:, 6] + + @property + def height(self): + """torch.Tensor: A vector with height of each box in shape (N, ).""" + return self.tensor[:, 5] + + @property + def top_height(self): + """torch.Tensor: + A vector with the top height of each box in shape (N, ).""" + return self.bottom_height + self.height + + @property + def bottom_height(self): + """torch.Tensor: + A vector with bottom's height of each box in shape (N, ).""" + return self.tensor[:, 2] + + @property + def center(self): + """Calculate the center of all the boxes. + + Note: + In MMDetection3D's convention, the bottom center is + usually taken as the default center. + + The relative position of the centers in different kinds of + boxes are different, e.g., the relative center of a boxes is + (0.5, 1.0, 0.5) in camera and (0.5, 0.5, 0) in lidar. + It is recommended to use ``bottom_center`` or ``gravity_center`` + for clearer usage. + + Returns: + torch.Tensor: A tensor with center of each box in shape (N, 3). + """ + return self.bottom_center + + @property + def bottom_center(self): + """torch.Tensor: A tensor with center of each box in shape (N, 3).""" + return self.tensor[:, :3] + + @property + def gravity_center(self): + """torch.Tensor: A tensor with center of each box in shape (N, 3).""" + pass + + @property + def corners(self): + """torch.Tensor: + a tensor with 8 corners of each box in shape (N, 8, 3).""" + pass + + @property + def bev(self): + """torch.Tensor: 2D BEV box of each box with rotation + in XYWHR format, in shape (N, 5).""" + return self.tensor[:, [0, 1, 3, 4, 6]] + + @property + def nearest_bev(self): + """torch.Tensor: A tensor of 2D BEV box of each box + without rotation.""" + # Obtain BEV boxes with rotation in XYWHR format + bev_rotated_boxes = self.bev + # convert the rotation to a valid range + rotations = bev_rotated_boxes[:, -1] + normed_rotations = torch.abs(limit_period(rotations, 0.5, np.pi)) + + # find the center of boxes + conditions = (normed_rotations > np.pi / 4)[..., None] + bboxes_xywh = torch.where(conditions, bev_rotated_boxes[:, + [0, 1, 3, 2]], + bev_rotated_boxes[:, :4]) + + centers = bboxes_xywh[:, :2] + dims = bboxes_xywh[:, 2:] + bev_boxes = torch.cat([centers - dims / 2, centers + dims / 2], dim=-1) + return bev_boxes + + def in_range_bev(self, box_range): + """Check whether the boxes are in the given range. + + Args: + box_range (list | torch.Tensor): the range of box + (x_min, y_min, x_max, y_max) + + Note: + The original implementation of SECOND checks whether boxes in + a range by checking whether the points are in a convex + polygon, we reduce the burden for simpler cases. + + Returns: + torch.Tensor: Whether each box is inside the reference range. + """ + in_range_flags = ((self.bev[:, 0] > box_range[0]) + & (self.bev[:, 1] > box_range[1]) + & (self.bev[:, 0] < box_range[2]) + & (self.bev[:, 1] < box_range[3])) + return in_range_flags + + @abstractmethod + def rotate(self, angle, points=None): + """Rotate boxes with points (optional) with the given angle or rotation + matrix. + + Args: + angle (float | torch.Tensor | np.ndarray): + Rotation angle or rotation matrix. + points (torch.Tensor | numpy.ndarray | + :obj:`BasePoints`, optional): + Points to rotate. Defaults to None. + """ + pass + + @abstractmethod + def flip(self, bev_direction='horizontal'): + """Flip the boxes in BEV along given BEV direction. + + Args: + bev_direction (str, optional): Direction by which to flip. + Can be chosen from 'horizontal' and 'vertical'. + Defaults to 'horizontal'. + """ + pass + + def translate(self, trans_vector): + """Translate boxes with the given translation vector. + + Args: + trans_vector (torch.Tensor): Translation vector of size (1, 3). + """ + if not isinstance(trans_vector, torch.Tensor): + trans_vector = self.tensor.new_tensor(trans_vector) + self.tensor[:, :3] += trans_vector + + def in_range_3d(self, box_range): + """Check whether the boxes are in the given range. + + Args: + box_range (list | torch.Tensor): The range of box + (x_min, y_min, z_min, x_max, y_max, z_max) + + Note: + In the original implementation of SECOND, checking whether + a box in the range checks whether the points are in a convex + polygon, we try to reduce the burden for simpler cases. + + Returns: + torch.Tensor: A binary vector indicating whether each box is + inside the reference range. + """ + in_range_flags = ((self.tensor[:, 0] > box_range[0]) + & (self.tensor[:, 1] > box_range[1]) + & (self.tensor[:, 2] > box_range[2]) + & (self.tensor[:, 0] < box_range[3]) + & (self.tensor[:, 1] < box_range[4]) + & (self.tensor[:, 2] < box_range[5])) + return in_range_flags + + @abstractmethod + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`Box3DMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BaseInstance3DBoxes`: The converted box of the same type + in the `dst` mode. + """ + pass + + def scale(self, scale_factor): + """Scale the box with horizontal and vertical scaling factors. + + Args: + scale_factors (float): Scale factors to scale the boxes. + """ + self.tensor[:, :6] *= scale_factor + self.tensor[:, 7:] *= scale_factor # velocity + + def limit_yaw(self, offset=0.5, period=np.pi): + """Limit the yaw to a given period and offset. + + Args: + offset (float, optional): The offset of the yaw. Defaults to 0.5. + period (float, optional): The expected period. Defaults to np.pi. + """ + self.tensor[:, 6] = limit_period(self.tensor[:, 6], offset, period) + + def nonempty(self, threshold=0.0): + """Find boxes that are non-empty. + + A box is considered empty, + if either of its side is no larger than threshold. + + Args: + threshold (float, optional): The threshold of minimal sizes. + Defaults to 0.0. + + Returns: + torch.Tensor: A binary vector which represents whether each + box is empty (False) or non-empty (True). + """ + box = self.tensor + size_x = box[..., 3] + size_y = box[..., 4] + size_z = box[..., 5] + keep = ((size_x > threshold) + & (size_y > threshold) & (size_z > threshold)) + return keep + + def __getitem__(self, item): + """ + Note: + The following usage are allowed: + 1. `new_boxes = boxes[3]`: + return a `Boxes` that contains only one box. + 2. `new_boxes = boxes[2:10]`: + return a slice of boxes. + 3. `new_boxes = boxes[vector]`: + where vector is a torch.BoolTensor with `length = len(boxes)`. + Nonzero elements in the vector will be selected. + Note that the returned Boxes might share storage with this Boxes, + subject to Pytorch's indexing semantics. + + Returns: + :obj:`BaseInstance3DBoxes`: A new object of + :class:`BaseInstance3DBoxes` after indexing. + """ + original_type = type(self) + if isinstance(item, int): + return original_type( + self.tensor[item].view(1, -1), + box_dim=self.box_dim, + with_yaw=self.with_yaw) + b = self.tensor[item] + assert b.dim() == 2, \ + f'Indexing on Boxes with {item} failed to return a matrix!' + return original_type(b, box_dim=self.box_dim, with_yaw=self.with_yaw) + + def __len__(self): + """int: Number of boxes in the current object.""" + return self.tensor.shape[0] + + def __repr__(self): + """str: Return a strings that describes the object.""" + return self.__class__.__name__ + '(\n ' + str(self.tensor) + ')' + + @classmethod + def cat(cls, boxes_list): + """Concatenate a list of Boxes into a single Boxes. + + Args: + boxes_list (list[:obj:`BaseInstance3DBoxes`]): List of boxes. + + Returns: + :obj:`BaseInstance3DBoxes`: The concatenated Boxes. + """ + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0)) + assert all(isinstance(box, cls) for box in boxes_list) + + # use torch.cat (v.s. layers.cat) + # so the returned boxes never share storage with input + cat_boxes = cls( + torch.cat([b.tensor for b in boxes_list], dim=0), + box_dim=boxes_list[0].tensor.shape[1], + with_yaw=boxes_list[0].with_yaw) + return cat_boxes + + def to(self, device, *args, **kwargs): + """Convert current boxes to a specific device. + + Args: + device (str | :obj:`torch.device`): The name of the device. + + Returns: + :obj:`BaseInstance3DBoxes`: A new boxes object on the + specific device. + """ + original_type = type(self) + return original_type( + self.tensor.to(device, *args, **kwargs), + box_dim=self.box_dim, + with_yaw=self.with_yaw) + + def clone(self): + """Clone the Boxes. + + Returns: + :obj:`BaseInstance3DBoxes`: Box object with the same properties + as self. + """ + original_type = type(self) + return original_type( + self.tensor.clone(), box_dim=self.box_dim, with_yaw=self.with_yaw) + + @property + def device(self): + """str: The device of the boxes are on.""" + return self.tensor.device + + def __iter__(self): + """Yield a box as a Tensor of shape (4,) at a time. + + Returns: + torch.Tensor: A box of shape (4,). + """ + yield from self.tensor + + @classmethod + def height_overlaps(cls, boxes1, boxes2, mode='iou'): + """Calculate height overlaps of two boxes. + + Note: + This function calculates the height overlaps between boxes1 and + boxes2, boxes1 and boxes2 should be in the same type. + + Args: + boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes. + boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes. + mode (str, optional): Mode of IoU calculation. Defaults to 'iou'. + + Returns: + torch.Tensor: Calculated iou of boxes. + """ + assert isinstance(boxes1, BaseInstance3DBoxes) + assert isinstance(boxes2, BaseInstance3DBoxes) + assert type(boxes1) == type(boxes2), '"boxes1" and "boxes2" should' \ + f'be in the same type, got {type(boxes1)} and {type(boxes2)}.' + + boxes1_top_height = boxes1.top_height.view(-1, 1) + boxes1_bottom_height = boxes1.bottom_height.view(-1, 1) + boxes2_top_height = boxes2.top_height.view(1, -1) + boxes2_bottom_height = boxes2.bottom_height.view(1, -1) + + heighest_of_bottom = torch.max(boxes1_bottom_height, + boxes2_bottom_height) + lowest_of_top = torch.min(boxes1_top_height, boxes2_top_height) + overlaps_h = torch.clamp(lowest_of_top - heighest_of_bottom, min=0) + return overlaps_h + + @classmethod + def overlaps(cls, boxes1, boxes2, mode='iou'): + """Calculate 3D overlaps of two boxes. + + Note: + This function calculates the overlaps between ``boxes1`` and + ``boxes2``, ``boxes1`` and ``boxes2`` should be in the same type. + + Args: + boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes. + boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes. + mode (str, optional): Mode of iou calculation. Defaults to 'iou'. + + Returns: + torch.Tensor: Calculated 3D overlaps of the boxes. + """ + assert isinstance(boxes1, BaseInstance3DBoxes) + assert isinstance(boxes2, BaseInstance3DBoxes) + assert type(boxes1) == type(boxes2), '"boxes1" and "boxes2" should' \ + f'be in the same type, got {type(boxes1)} and {type(boxes2)}.' + + assert mode in ['iou', 'iof'] + + rows = len(boxes1) + cols = len(boxes2) + if rows * cols == 0: + return boxes1.tensor.new(rows, cols) + + # height overlap + overlaps_h = cls.height_overlaps(boxes1, boxes2) + + # Restrict the min values of W and H to avoid memory overflow in + # ``box_iou_rotated``. + boxes1_bev, boxes2_bev = boxes1.bev, boxes2.bev + boxes1_bev[:, 2:4] = boxes1_bev[:, 2:4].clamp(min=1e-4) + boxes2_bev[:, 2:4] = boxes2.bev[:, 2:4].clamp(min=1e-4) + + # bev overlap + iou2d = box_iou_rotated(boxes1_bev, boxes2_bev) + areas1 = (boxes1_bev[:, 2] * boxes1_bev[:, 3]).unsqueeze(1).expand( + rows, cols) + areas2 = (boxes2_bev[:, 2] * boxes2_bev[:, 3]).unsqueeze(0).expand( + rows, cols) + overlaps_bev = iou2d * (areas1 + areas2) / (1 + iou2d) + + # 3d overlaps + overlaps_3d = overlaps_bev.to(boxes1.device) * overlaps_h + + volume1 = boxes1.volume.view(-1, 1) + volume2 = boxes2.volume.view(1, -1) + + if mode == 'iou': + # the clamp func is used to avoid division of 0 + iou3d = overlaps_3d / torch.clamp( + volume1 + volume2 - overlaps_3d, min=1e-8) + else: + iou3d = overlaps_3d / torch.clamp(volume1, min=1e-8) + + return iou3d + + def new_box(self, data): + """Create a new box object with data. + + The new box and its tensor has the similar properties + as self and self.tensor, respectively. + + Args: + data (torch.Tensor | numpy.array | list): Data to be copied. + + Returns: + :obj:`BaseInstance3DBoxes`: A new bbox object with ``data``, + the object's other properties are similar to ``self``. + """ + new_tensor = self.tensor.new_tensor(data) \ + if not isinstance(data, torch.Tensor) else data.to(self.device) + original_type = type(self) + return original_type( + new_tensor, box_dim=self.box_dim, with_yaw=self.with_yaw) + + def points_in_boxes_part(self, points, boxes_override=None): + """Find the box in which each point is. + + Args: + points (torch.Tensor): Points in shape (1, M, 3) or (M, 3), + 3 dimensions are (x, y, z) in LiDAR or depth coordinate. + boxes_override (torch.Tensor, optional): Boxes to override + `self.tensor`. Defaults to None. + + Returns: + torch.Tensor: The index of the first box that each point + is in, in shape (M, ). Default value is -1 + (if the point is not enclosed by any box). + + Note: + If a point is enclosed by multiple boxes, the index of the + first box will be returned. + """ + if boxes_override is not None: + boxes = boxes_override + else: + boxes = self.tensor + if points.dim() == 2: + points = points.unsqueeze(0) + box_idx = points_in_boxes_part(points, + boxes.unsqueeze(0).to( + points.device)).squeeze(0) + return box_idx + + def points_in_boxes_all(self, points, boxes_override=None): + """Find all boxes in which each point is. + + Args: + points (torch.Tensor): Points in shape (1, M, 3) or (M, 3), + 3 dimensions are (x, y, z) in LiDAR or depth coordinate. + boxes_override (torch.Tensor, optional): Boxes to override + `self.tensor`. Defaults to None. + + Returns: + torch.Tensor: A tensor indicating whether a point is in a box, + in shape (M, T). T is the number of boxes. Denote this + tensor as A, if the m^th point is in the t^th box, then + `A[m, t] == 1`, elsewise `A[m, t] == 0`. + """ + if boxes_override is not None: + boxes = boxes_override + else: + boxes = self.tensor + + points_clone = points.clone()[..., :3] + if points_clone.dim() == 2: + points_clone = points_clone.unsqueeze(0) + else: + assert points_clone.dim() == 3 and points_clone.shape[0] == 1 + + boxes = boxes.to(points_clone.device).unsqueeze(0) + box_idxs_of_pts = points_in_boxes_all(points_clone, boxes) + + return box_idxs_of_pts.squeeze(0) + + def points_in_boxes(self, points, boxes_override=None): + warnings.warn('DeprecationWarning: points_in_boxes is a ' + 'deprecated method, please consider using ' + 'points_in_boxes_part.') + return self.points_in_boxes_part(points, boxes_override) + + def points_in_boxes_batch(self, points, boxes_override=None): + warnings.warn('DeprecationWarning: points_in_boxes_batch is a ' + 'deprecated method, please consider using ' + 'points_in_boxes_all.') + return self.points_in_boxes_all(points, boxes_override) diff --git a/mmdet3d/structures/bbox_3d/box_3d_mode.py b/mmdet3d/structures/bbox_3d/box_3d_mode.py new file mode 100755 index 0000000..e44335a --- /dev/null +++ b/mmdet3d/structures/bbox_3d/box_3d_mode.py @@ -0,0 +1,258 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from enum import IntEnum, unique + +import numpy as np +import torch + +from .base_box3d import BaseInstance3DBoxes +from .cam_box3d import CameraInstance3DBoxes +from .depth_box3d import DepthInstance3DBoxes +from .lidar_box3d import LiDARInstance3DBoxes +from .utils import limit_period + + +@unique +class Box3DMode(IntEnum): + r"""Enum of different ways to represent a box. + + Coordinates in LiDAR: + + .. code-block:: none + + up z + ^ x front + | / + | / + left y <------ 0 + + The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + + Coordinates in camera: + + .. code-block:: none + + z front + / + / + 0 ------> x right + | + | + v + down y + + The relative coordinate of bottom center in a CAM box is (0.5, 1.0, 0.5), + and the yaw is around the y axis, thus the rotation axis=1. + + Coordinates in Depth mode: + + .. code-block:: none + + up z + ^ y front + | / + | / + 0 ------> x right + + The relative coordinate of bottom center in a DEPTH box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + """ + + LIDAR = 0 + CAM = 1 + DEPTH = 2 + + @staticmethod + def convert(box, src, dst, rt_mat=None, with_yaw=True, correct_yaw=False): + """Convert boxes from `src` mode to `dst` mode. + + Args: + box (tuple | list | np.ndarray | + torch.Tensor | :obj:`BaseInstance3DBoxes`): + Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7. + src (:obj:`Box3DMode`): The src Box mode. + dst (:obj:`Box3DMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + with_yaw (bool, optional): If `box` is an instance of + :obj:`BaseInstance3DBoxes`, whether or not it has a yaw angle. + Defaults to True. + correct_yaw (bool): If the yaw is rotated by rt_mat. + + Returns: + (tuple | list | np.ndarray | torch.Tensor | + :obj:`BaseInstance3DBoxes`): + The converted box of the same type. + """ + if src == dst: + return box + + is_numpy = isinstance(box, np.ndarray) + is_Instance3DBoxes = isinstance(box, BaseInstance3DBoxes) + single_box = isinstance(box, (list, tuple)) + if single_box: + assert len(box) >= 7, ( + 'Box3DMode.convert takes either a k-tuple/list or ' + 'an Nxk array/tensor, where k >= 7') + arr = torch.tensor(box)[None, :] + else: + # avoid modifying the input box + if is_numpy: + arr = torch.from_numpy(np.asarray(box)).clone() + elif is_Instance3DBoxes: + arr = box.tensor.clone() + else: + arr = box.clone() + + if is_Instance3DBoxes: + with_yaw = box.with_yaw + + # convert box from `src` mode to `dst` mode. + x_size, y_size, z_size = arr[..., 3:4], arr[..., 4:5], arr[..., 5:6] + if with_yaw: + yaw = arr[..., 6:7] + if src == Box3DMode.LIDAR and dst == Box3DMode.CAM: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]]) + xyz_size = torch.cat([x_size, z_size, y_size], dim=-1) + if with_yaw: + if correct_yaw: + yaw_vector = torch.cat([ + torch.cos(yaw), + torch.sin(yaw), + torch.zeros_like(yaw) + ], + dim=1) + else: + yaw = -yaw - np.pi / 2 + yaw = limit_period(yaw, period=np.pi * 2) + elif src == Box3DMode.CAM and dst == Box3DMode.LIDAR: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]) + xyz_size = torch.cat([x_size, z_size, y_size], dim=-1) + if with_yaw: + if correct_yaw: + yaw_vector = torch.cat([ + torch.cos(-yaw), + torch.zeros_like(yaw), + torch.sin(-yaw) + ], + dim=1) + else: + yaw = -yaw - np.pi / 2 + yaw = limit_period(yaw, period=np.pi * 2) + elif src == Box3DMode.DEPTH and dst == Box3DMode.CAM: + if rt_mat is None: + rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) + xyz_size = torch.cat([x_size, z_size, y_size], dim=-1) + if with_yaw: + if correct_yaw: + yaw_vector = torch.cat([ + torch.cos(yaw), + torch.sin(yaw), + torch.zeros_like(yaw) + ], + dim=1) + else: + yaw = -yaw + elif src == Box3DMode.CAM and dst == Box3DMode.DEPTH: + if rt_mat is None: + rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]]) + xyz_size = torch.cat([x_size, z_size, y_size], dim=-1) + if with_yaw: + if correct_yaw: + yaw_vector = torch.cat([ + torch.cos(-yaw), + torch.zeros_like(yaw), + torch.sin(-yaw) + ], + dim=1) + else: + yaw = -yaw + elif src == Box3DMode.LIDAR and dst == Box3DMode.DEPTH: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]]) + xyz_size = torch.cat([x_size, y_size, z_size], dim=-1) + if with_yaw: + if correct_yaw: + yaw_vector = torch.cat([ + torch.cos(yaw), + torch.sin(yaw), + torch.zeros_like(yaw) + ], + dim=1) + else: + yaw = yaw + np.pi / 2 + yaw = limit_period(yaw, period=np.pi * 2) + elif src == Box3DMode.DEPTH and dst == Box3DMode.LIDAR: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]) + xyz_size = torch.cat([x_size, y_size, z_size], dim=-1) + if with_yaw: + if correct_yaw: + yaw_vector = torch.cat([ + torch.cos(yaw), + torch.sin(yaw), + torch.zeros_like(yaw) + ], + dim=1) + else: + yaw = yaw - np.pi / 2 + yaw = limit_period(yaw, period=np.pi * 2) + else: + raise NotImplementedError( + f'Conversion from Box3DMode {src} to {dst} ' + 'is not supported yet') + + if not isinstance(rt_mat, torch.Tensor): + rt_mat = arr.new_tensor(rt_mat) + if rt_mat.size(1) == 4: + extended_xyz = torch.cat( + [arr[..., :3], arr.new_ones(arr.size(0), 1)], dim=-1) + xyz = extended_xyz @ rt_mat.t() + else: + xyz = arr[..., :3] @ rt_mat.t() + + # Note: we only use rotation in rt_mat + # so don't need to extend yaw_vector + if with_yaw and correct_yaw: + rot_yaw_vector = yaw_vector @ rt_mat[:3, :3].t() + if dst == Box3DMode.CAM: + yaw = torch.atan2(-rot_yaw_vector[:, [2]], rot_yaw_vector[:, + [0]]) + elif dst in [Box3DMode.LIDAR, Box3DMode.DEPTH]: + yaw = torch.atan2(rot_yaw_vector[:, [1]], rot_yaw_vector[:, + [0]]) + yaw = limit_period(yaw, period=np.pi * 2) + + if with_yaw: + remains = arr[..., 7:] + arr = torch.cat([xyz[..., :3], xyz_size, yaw, remains], dim=-1) + else: + remains = arr[..., 6:] + arr = torch.cat([xyz[..., :3], xyz_size, remains], dim=-1) + + # convert arr to the original type + original_type = type(box) + if single_box: + return original_type(arr.flatten().tolist()) + if is_numpy: + return arr.numpy() + elif is_Instance3DBoxes: + if dst == Box3DMode.CAM: + target_type = CameraInstance3DBoxes + elif dst == Box3DMode.LIDAR: + target_type = LiDARInstance3DBoxes + elif dst == Box3DMode.DEPTH: + target_type = DepthInstance3DBoxes + else: + raise NotImplementedError( + f'Conversion to {dst} through {original_type}' + ' is not supported yet') + return target_type(arr, box_dim=arr.size(-1), with_yaw=with_yaw) + else: + return arr diff --git a/mmdet3d/structures/bbox_3d/cam_box3d.py b/mmdet3d/structures/bbox_3d/cam_box3d.py new file mode 100755 index 0000000..b3cf085 --- /dev/null +++ b/mmdet3d/structures/bbox_3d/cam_box3d.py @@ -0,0 +1,361 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.structures.points import BasePoints +from .base_box3d import BaseInstance3DBoxes +from .utils import rotation_3d_in_axis, yaw2local + + +class CameraInstance3DBoxes(BaseInstance3DBoxes): + """3D boxes of instances in CAM coordinates. + + Coordinates in camera: + + .. code-block:: none + + z front (yaw=-0.5*pi) + / + / + 0 ------> x right (yaw=0) + | + | + v + down y + + The relative coordinate of bottom center in a CAM box is (0.5, 1.0, 0.5), + and the yaw is around the y axis, thus the rotation axis=1. + The yaw is 0 at the positive direction of x axis, and decreases from + the positive direction of x to the positive direction of z. + + Attributes: + tensor (torch.Tensor): Float matrix in shape (N, box_dim). + box_dim (int): Integer indicating the dimension of a box + Each row is (x, y, z, x_size, y_size, z_size, yaw, ...). + with_yaw (bool): If True, the value of yaw will be set to 0 as + axis-aligned boxes tightly enclosing the original boxes. + """ + YAW_AXIS = 1 + + def __init__(self, + tensor, + box_dim=7, + with_yaw=True, + origin=(0.5, 1.0, 0.5)): + if isinstance(tensor, torch.Tensor): + device = tensor.device + else: + device = torch.device('cpu') + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that + # does not depend on the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, box_dim)).to( + dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == box_dim, tensor.size() + + if tensor.shape[-1] == 6: + # If the dimension of boxes is 6, we expand box_dim by padding + # 0 as a fake yaw and set with_yaw to False. + assert box_dim == 6 + fake_rot = tensor.new_zeros(tensor.shape[0], 1) + tensor = torch.cat((tensor, fake_rot), dim=-1) + self.box_dim = box_dim + 1 + self.with_yaw = False + else: + self.box_dim = box_dim + self.with_yaw = with_yaw + self.tensor = tensor.clone() + + if origin != (0.5, 1.0, 0.5): + dst = self.tensor.new_tensor((0.5, 1.0, 0.5)) + src = self.tensor.new_tensor(origin) + self.tensor[:, :3] += self.tensor[:, 3:6] * (dst - src) + + @property + def height(self): + """torch.Tensor: A vector with height of each box in shape (N, ).""" + return self.tensor[:, 4] + + @property + def top_height(self): + """torch.Tensor: + A vector with the top height of each box in shape (N, ).""" + # the positive direction is down rather than up + return self.bottom_height - self.height + + @property + def bottom_height(self): + """torch.Tensor: + A vector with bottom's height of each box in shape (N, ).""" + return self.tensor[:, 1] + + @property + def local_yaw(self): + """torch.Tensor: + A vector with local yaw of each box in shape (N, ). + local_yaw equals to alpha in kitti, which is commonly + used in monocular 3D object detection task, so only + :obj:`CameraInstance3DBoxes` has the property. + """ + yaw = self.yaw + loc = self.gravity_center + local_yaw = yaw2local(yaw, loc) + + return local_yaw + + @property + def gravity_center(self): + """torch.Tensor: A tensor with center of each box in shape (N, 3).""" + bottom_center = self.bottom_center + gravity_center = torch.zeros_like(bottom_center) + gravity_center[:, [0, 2]] = bottom_center[:, [0, 2]] + gravity_center[:, 1] = bottom_center[:, 1] - self.tensor[:, 4] * 0.5 + return gravity_center + + @property + def corners(self): + """torch.Tensor: Coordinates of corners of all the boxes in + shape (N, 8, 3). + + Convert the boxes to in clockwise order, in the form of + (x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0) + + .. code-block:: none + + front z + / + / + (x0, y0, z1) + ----------- + (x1, y0, z1) + /| / | + / | / | + (x0, y0, z0) + ----------- + + (x1, y1, z1) + | / . | / + | / origin | / + (x0, y1, z0) + ----------- + -------> x right + | (x1, y1, z0) + | + v + down y + """ + if self.tensor.numel() == 0: + return torch.empty([0, 8, 3], device=self.tensor.device) + + dims = self.dims + corners_norm = torch.from_numpy( + np.stack(np.unravel_index(np.arange(8), [2] * 3), axis=1)).to( + device=dims.device, dtype=dims.dtype) + + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + # use relative origin [0.5, 1, 0.5] + corners_norm = corners_norm - dims.new_tensor([0.5, 1, 0.5]) + corners = dims.view([-1, 1, 3]) * corners_norm.reshape([1, 8, 3]) + + corners = rotation_3d_in_axis( + corners, self.tensor[:, 6], axis=self.YAW_AXIS) + corners += self.tensor[:, :3].view(-1, 1, 3) + return corners + + @property + def bev(self): + """torch.Tensor: 2D BEV box of each box with rotation + in XYWHR format, in shape (N, 5).""" + bev = self.tensor[:, [0, 2, 3, 5, 6]].clone() + # positive direction of the gravity axis + # in cam coord system points to the earth + # so the bev yaw angle needs to be reversed + bev[:, -1] = -bev[:, -1] + return bev + + def rotate(self, angle, points=None): + """Rotate boxes with points (optional) with the given angle or rotation + matrix. + + Args: + angle (float | torch.Tensor | np.ndarray): + Rotation angle or rotation matrix. + points (torch.Tensor | np.ndarray | :obj:`BasePoints`, optional): + Points to rotate. Defaults to None. + + Returns: + tuple or None: When ``points`` is None, the function returns + None, otherwise it returns the rotated points and the + rotation matrix ``rot_mat_T``. + """ + if not isinstance(angle, torch.Tensor): + angle = self.tensor.new_tensor(angle) + + assert angle.shape == torch.Size([3, 3]) or angle.numel() == 1, \ + f'invalid rotation angle shape {angle.shape}' + + if angle.numel() == 1: + self.tensor[:, 0:3], rot_mat_T = rotation_3d_in_axis( + self.tensor[:, 0:3], + angle, + axis=self.YAW_AXIS, + return_mat=True) + else: + rot_mat_T = angle + rot_sin = rot_mat_T[2, 0] + rot_cos = rot_mat_T[0, 0] + angle = np.arctan2(rot_sin, rot_cos) + self.tensor[:, 0:3] = self.tensor[:, 0:3] @ rot_mat_T + + self.tensor[:, 6] += angle + + if points is not None: + if isinstance(points, torch.Tensor): + points[:, :3] = points[:, :3] @ rot_mat_T + elif isinstance(points, np.ndarray): + rot_mat_T = rot_mat_T.cpu().numpy() + points[:, :3] = np.dot(points[:, :3], rot_mat_T) + elif isinstance(points, BasePoints): + points.rotate(rot_mat_T) + else: + raise ValueError + return points, rot_mat_T + + def flip(self, bev_direction='horizontal', points=None): + """Flip the boxes in BEV along given BEV direction. + + In CAM coordinates, it flips the x (horizontal) or z (vertical) axis. + + Args: + bev_direction (str): Flip direction (horizontal or vertical). + points (torch.Tensor | np.ndarray | :obj:`BasePoints`, optional): + Points to flip. Defaults to None. + + Returns: + torch.Tensor, numpy.ndarray or None: Flipped points. + """ + assert bev_direction in ('horizontal', 'vertical') + if bev_direction == 'horizontal': + self.tensor[:, 0::7] = -self.tensor[:, 0::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + np.pi + elif bev_direction == 'vertical': + self.tensor[:, 2::7] = -self.tensor[:, 2::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + + if points is not None: + assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints)) + if isinstance(points, (torch.Tensor, np.ndarray)): + if bev_direction == 'horizontal': + points[:, 0] = -points[:, 0] + elif bev_direction == 'vertical': + points[:, 2] = -points[:, 2] + elif isinstance(points, BasePoints): + points.flip(bev_direction) + return points + + @classmethod + def height_overlaps(cls, boxes1, boxes2, mode='iou'): + """Calculate height overlaps of two boxes. + + This function calculates the height overlaps between ``boxes1`` and + ``boxes2``, where ``boxes1`` and ``boxes2`` should be in the same type. + + Args: + boxes1 (:obj:`CameraInstance3DBoxes`): Boxes 1 contain N boxes. + boxes2 (:obj:`CameraInstance3DBoxes`): Boxes 2 contain M boxes. + mode (str, optional): Mode of iou calculation. Defaults to 'iou'. + + Returns: + torch.Tensor: Calculated iou of boxes' heights. + """ + assert isinstance(boxes1, CameraInstance3DBoxes) + assert isinstance(boxes2, CameraInstance3DBoxes) + + boxes1_top_height = boxes1.top_height.view(-1, 1) + boxes1_bottom_height = boxes1.bottom_height.view(-1, 1) + boxes2_top_height = boxes2.top_height.view(1, -1) + boxes2_bottom_height = boxes2.bottom_height.view(1, -1) + + # positive direction of the gravity axis + # in cam coord system points to the earth + heighest_of_bottom = torch.min(boxes1_bottom_height, + boxes2_bottom_height) + lowest_of_top = torch.max(boxes1_top_height, boxes2_top_height) + overlaps_h = torch.clamp(heighest_of_bottom - lowest_of_top, min=0) + return overlaps_h + + def convert_to(self, dst, rt_mat=None, correct_yaw=False): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`Box3DMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from ``src`` coordinates to ``dst`` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + correct_yaw (bool): Whether to convert the yaw angle to the target + coordinate. Defaults to False. + Returns: + :obj:`BaseInstance3DBoxes`: + The converted box of the same type in the ``dst`` mode. + """ + from .box_3d_mode import Box3DMode + + # TODO: always set correct_yaw=True + return Box3DMode.convert( + box=self, + src=Box3DMode.CAM, + dst=dst, + rt_mat=rt_mat, + correct_yaw=correct_yaw) + + def points_in_boxes_part(self, points, boxes_override=None): + """Find the box in which each point is. + + Args: + points (torch.Tensor): Points in shape (1, M, 3) or (M, 3), + 3 dimensions are (x, y, z) in LiDAR or depth coordinate. + boxes_override (torch.Tensor, optional): Boxes to override + `self.tensor `. Defaults to None. + + Returns: + torch.Tensor: The index of the box in which + each point is, in shape (M, ). Default value is -1 + (if the point is not enclosed by any box). + """ + from .coord_3d_mode import Coord3DMode + + points_lidar = Coord3DMode.convert(points, Coord3DMode.CAM, + Coord3DMode.LIDAR) + if boxes_override is not None: + boxes_lidar = boxes_override + else: + boxes_lidar = Coord3DMode.convert(self.tensor, Coord3DMode.CAM, + Coord3DMode.LIDAR) + + box_idx = super().points_in_boxes_part(points_lidar, boxes_lidar) + return box_idx + + def points_in_boxes_all(self, points, boxes_override=None): + """Find all boxes in which each point is. + + Args: + points (torch.Tensor): Points in shape (1, M, 3) or (M, 3), + 3 dimensions are (x, y, z) in LiDAR or depth coordinate. + boxes_override (torch.Tensor, optional): Boxes to override + `self.tensor `. Defaults to None. + + Returns: + torch.Tensor: The index of all boxes in which each point is, + in shape (B, M, T). + """ + from .coord_3d_mode import Coord3DMode + + points_lidar = Coord3DMode.convert(points, Coord3DMode.CAM, + Coord3DMode.LIDAR) + if boxes_override is not None: + boxes_lidar = boxes_override + else: + boxes_lidar = Coord3DMode.convert(self.tensor, Coord3DMode.CAM, + Coord3DMode.LIDAR) + + box_idx = super().points_in_boxes_all(points_lidar, boxes_lidar) + return box_idx diff --git a/mmdet3d/structures/bbox_3d/coord_3d_mode.py b/mmdet3d/structures/bbox_3d/coord_3d_mode.py new file mode 100755 index 0000000..3347ddc --- /dev/null +++ b/mmdet3d/structures/bbox_3d/coord_3d_mode.py @@ -0,0 +1,235 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from enum import IntEnum, unique + +import numpy as np +import torch + +from mmdet3d.structures.points import (BasePoints, CameraPoints, DepthPoints, + LiDARPoints) +from .base_box3d import BaseInstance3DBoxes +from .box_3d_mode import Box3DMode + + +@unique +class Coord3DMode(IntEnum): + r"""Enum of different ways to represent a box + and point cloud. + + Coordinates in LiDAR: + + .. code-block:: none + + up z + ^ x front + | / + | / + left y <------ 0 + + The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + + Coordinates in camera: + + .. code-block:: none + + z front + / + / + 0 ------> x right + | + | + v + down y + + The relative coordinate of bottom center in a CAM box is (0.5, 1.0, 0.5), + and the yaw is around the y axis, thus the rotation axis=1. + + Coordinates in Depth mode: + + .. code-block:: none + + up z + ^ y front + | / + | / + 0 ------> x right + + The relative coordinate of bottom center in a DEPTH box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + """ + + LIDAR = 0 + CAM = 1 + DEPTH = 2 + + @staticmethod + def convert(input, src, dst, rt_mat=None, with_yaw=True, is_point=True): + """Convert boxes or points from `src` mode to `dst` mode. + + Args: + input (tuple | list | np.ndarray | torch.Tensor | + :obj:`BaseInstance3DBoxes` | :obj:`BasePoints`): + Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7. + src (:obj:`Box3DMode` | :obj:`Coord3DMode`): The source mode. + dst (:obj:`Box3DMode` | :obj:`Coord3DMode`): The target mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + with_yaw (bool): If `box` is an instance of + :obj:`BaseInstance3DBoxes`, whether or not it has a yaw angle. + Defaults to True. + is_point (bool): If `input` is neither an instance of + :obj:`BaseInstance3DBoxes` nor an instance of + :obj:`BasePoints`, whether or not it is point data. + Defaults to True. + + Returns: + (tuple | list | np.ndarray | torch.Tensor | + :obj:`BaseInstance3DBoxes` | :obj:`BasePoints`): + The converted box of the same type. + """ + if isinstance(input, BaseInstance3DBoxes): + return Coord3DMode.convert_box( + input, src, dst, rt_mat=rt_mat, with_yaw=with_yaw) + elif isinstance(input, BasePoints): + return Coord3DMode.convert_point(input, src, dst, rt_mat=rt_mat) + elif isinstance(input, (tuple, list, np.ndarray, torch.Tensor)): + if is_point: + return Coord3DMode.convert_point( + input, src, dst, rt_mat=rt_mat) + else: + return Coord3DMode.convert_box( + input, src, dst, rt_mat=rt_mat, with_yaw=with_yaw) + else: + raise NotImplementedError + + @staticmethod + def convert_box(box, src, dst, rt_mat=None, with_yaw=True): + """Convert boxes from `src` mode to `dst` mode. + + Args: + box (tuple | list | np.ndarray | + torch.Tensor | :obj:`BaseInstance3DBoxes`): + Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7. + src (:obj:`Box3DMode`): The src Box mode. + dst (:obj:`Box3DMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + with_yaw (bool): If `box` is an instance of + :obj:`BaseInstance3DBoxes`, whether or not it has a yaw angle. + Defaults to True. + + Returns: + (tuple | list | np.ndarray | torch.Tensor | + :obj:`BaseInstance3DBoxes`): + The converted box of the same type. + """ + return Box3DMode.convert(box, src, dst, rt_mat=rt_mat) + + @staticmethod + def convert_point(point, src, dst, rt_mat=None): + """Convert points from `src` mode to `dst` mode. + + Args: + point (tuple | list | np.ndarray | + torch.Tensor | :obj:`BasePoints`): + Can be a k-tuple, k-list or an Nxk array/tensor. + src (:obj:`CoordMode`): The src Point mode. + dst (:obj:`CoordMode`): The target Point mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + (tuple | list | np.ndarray | torch.Tensor | :obj:`BasePoints`): + The converted point of the same type. + """ + if src == dst: + return point + + is_numpy = isinstance(point, np.ndarray) + is_InstancePoints = isinstance(point, BasePoints) + single_point = isinstance(point, (list, tuple)) + if single_point: + assert len(point) >= 3, ( + 'CoordMode.convert takes either a k-tuple/list or ' + 'an Nxk array/tensor, where k >= 3') + arr = torch.tensor(point)[None, :] + else: + # avoid modifying the input point + if is_numpy: + arr = torch.from_numpy(np.asarray(point)).clone() + elif is_InstancePoints: + arr = point.tensor.clone() + else: + arr = point.clone() + + # convert point from `src` mode to `dst` mode. + if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]]) + elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]) + elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM: + if rt_mat is None: + rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) + elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH: + if rt_mat is None: + rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]]) + elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]]) + elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]) + else: + raise NotImplementedError( + f'Conversion from Coord3DMode {src} to {dst} ' + 'is not supported yet') + + if not isinstance(rt_mat, torch.Tensor): + rt_mat = arr.new_tensor(rt_mat) + if rt_mat.size(1) == 4: + extended_xyz = torch.cat( + [arr[..., :3], arr.new_ones(arr.size(0), 1)], dim=-1) + xyz = extended_xyz @ rt_mat.t() + else: + xyz = arr[..., :3] @ rt_mat.t() + + remains = arr[..., 3:] + arr = torch.cat([xyz[..., :3], remains], dim=-1) + + # convert arr to the original type + original_type = type(point) + if single_point: + return original_type(arr.flatten().tolist()) + if is_numpy: + return arr.numpy() + elif is_InstancePoints: + if dst == Coord3DMode.CAM: + target_type = CameraPoints + elif dst == Coord3DMode.LIDAR: + target_type = LiDARPoints + elif dst == Coord3DMode.DEPTH: + target_type = DepthPoints + else: + raise NotImplementedError( + f'Conversion to {dst} through {original_type}' + ' is not supported yet') + return target_type( + arr, + points_dim=arr.size(-1), + attribute_dims=point.attribute_dims) + else: + return arr diff --git a/mmdet3d/structures/bbox_3d/depth_box3d.py b/mmdet3d/structures/bbox_3d/depth_box3d.py new file mode 100755 index 0000000..5a1c32b --- /dev/null +++ b/mmdet3d/structures/bbox_3d/depth_box3d.py @@ -0,0 +1,270 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.structures.points import BasePoints +from .base_box3d import BaseInstance3DBoxes +from .utils import rotation_3d_in_axis + + +class DepthInstance3DBoxes(BaseInstance3DBoxes): + """3D boxes of instances in Depth coordinates. + + Coordinates in Depth: + + .. code-block:: none + + up z y front (yaw=0.5*pi) + ^ ^ + | / + | / + 0 ------> x right (yaw=0) + + The relative coordinate of bottom center in a Depth box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + The yaw is 0 at the positive direction of x axis, and decreases from + the positive direction of x to the positive direction of y. + Also note that rotation of DepthInstance3DBoxes is counterclockwise, + which is reverse to the definition of the yaw angle (clockwise). + + A refactor is ongoing to make the three coordinate systems + easier to understand and convert between each other. + + Attributes: + tensor (torch.Tensor): Float matrix of N x box_dim. + box_dim (int): Integer indicates the dimension of a box + Each row is (x, y, z, x_size, y_size, z_size, yaw, ...). + with_yaw (bool): If True, the value of yaw will be set to 0 as minmax + boxes. + """ + YAW_AXIS = 2 + + @property + def gravity_center(self): + """torch.Tensor: A tensor with center of each box in shape (N, 3).""" + bottom_center = self.bottom_center + gravity_center = torch.zeros_like(bottom_center) + gravity_center[:, :2] = bottom_center[:, :2] + gravity_center[:, 2] = bottom_center[:, 2] + self.tensor[:, 5] * 0.5 + return gravity_center + + @property + def corners(self): + """torch.Tensor: Coordinates of corners of all the boxes + in shape (N, 8, 3). + + Convert the boxes to corners in clockwise order, in form of + ``(x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0)`` + + .. code-block:: none + + up z + front y ^ + / | + / | + (x0, y1, z1) + ----------- + (x1, y1, z1) + /| / | + / | / | + (x0, y0, z1) + ----------- + + (x1, y1, z0) + | / . | / + | / origin | / + (x0, y0, z0) + ----------- + --------> right x + (x1, y0, z0) + """ + if self.tensor.numel() == 0: + return torch.empty([0, 8, 3], device=self.tensor.device) + + dims = self.dims + corners_norm = torch.from_numpy( + np.stack(np.unravel_index(np.arange(8), [2] * 3), axis=1)).to( + device=dims.device, dtype=dims.dtype) + + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + # use relative origin (0.5, 0.5, 0) + corners_norm = corners_norm - dims.new_tensor([0.5, 0.5, 0]) + corners = dims.view([-1, 1, 3]) * corners_norm.reshape([1, 8, 3]) + + # rotate around z axis + corners = rotation_3d_in_axis( + corners, self.tensor[:, 6], axis=self.YAW_AXIS) + corners += self.tensor[:, :3].view(-1, 1, 3) + return corners + + def rotate(self, angle, points=None): + """Rotate boxes with points (optional) with the given angle or rotation + matrix. + + Args: + angle (float | torch.Tensor | np.ndarray): + Rotation angle or rotation matrix. + points (torch.Tensor | np.ndarray | :obj:`BasePoints`, optional): + Points to rotate. Defaults to None. + + Returns: + tuple or None: When ``points`` is None, the function returns + None, otherwise it returns the rotated points and the + rotation matrix ``rot_mat_T``. + """ + if not isinstance(angle, torch.Tensor): + angle = self.tensor.new_tensor(angle) + + assert angle.shape == torch.Size([3, 3]) or angle.numel() == 1, \ + f'invalid rotation angle shape {angle.shape}' + + if angle.numel() == 1: + self.tensor[:, 0:3], rot_mat_T = rotation_3d_in_axis( + self.tensor[:, 0:3], + angle, + axis=self.YAW_AXIS, + return_mat=True) + else: + rot_mat_T = angle + rot_sin = rot_mat_T[0, 1] + rot_cos = rot_mat_T[0, 0] + angle = np.arctan2(rot_sin, rot_cos) + self.tensor[:, 0:3] = self.tensor[:, 0:3] @ rot_mat_T + + if self.with_yaw: + self.tensor[:, 6] += angle + else: + # for axis-aligned boxes, we take the new + # enclosing axis-aligned boxes after rotation + corners_rot = self.corners @ rot_mat_T + new_x_size = corners_rot[..., 0].max( + dim=1, keepdim=True)[0] - corners_rot[..., 0].min( + dim=1, keepdim=True)[0] + new_y_size = corners_rot[..., 1].max( + dim=1, keepdim=True)[0] - corners_rot[..., 1].min( + dim=1, keepdim=True)[0] + self.tensor[:, 3:5] = torch.cat((new_x_size, new_y_size), dim=-1) + + if points is not None: + if isinstance(points, torch.Tensor): + points[:, :3] = points[:, :3] @ rot_mat_T + elif isinstance(points, np.ndarray): + rot_mat_T = rot_mat_T.cpu().numpy() + points[:, :3] = np.dot(points[:, :3], rot_mat_T) + elif isinstance(points, BasePoints): + points.rotate(rot_mat_T) + else: + raise ValueError + return points, rot_mat_T + + def flip(self, bev_direction='horizontal', points=None): + """Flip the boxes in BEV along given BEV direction. + + In Depth coordinates, it flips x (horizontal) or y (vertical) axis. + + Args: + bev_direction (str, optional): Flip direction + (horizontal or vertical). Defaults to 'horizontal'. + points (torch.Tensor | np.ndarray | :obj:`BasePoints`, optional): + Points to flip. Defaults to None. + + Returns: + torch.Tensor, numpy.ndarray or None: Flipped points. + """ + assert bev_direction in ('horizontal', 'vertical') + if bev_direction == 'horizontal': + self.tensor[:, 0::7] = -self.tensor[:, 0::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + np.pi + elif bev_direction == 'vertical': + self.tensor[:, 1::7] = -self.tensor[:, 1::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + + if points is not None: + assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints)) + if isinstance(points, (torch.Tensor, np.ndarray)): + if bev_direction == 'horizontal': + points[:, 0] = -points[:, 0] + elif bev_direction == 'vertical': + points[:, 1] = -points[:, 1] + elif isinstance(points, BasePoints): + points.flip(bev_direction) + return points + + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`Box3DMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from ``src`` coordinates to ``dst`` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`DepthInstance3DBoxes`: + The converted box of the same type in the ``dst`` mode. + """ + from .box_3d_mode import Box3DMode + return Box3DMode.convert( + box=self, src=Box3DMode.DEPTH, dst=dst, rt_mat=rt_mat) + + def enlarged_box(self, extra_width): + """Enlarge the length, width and height boxes. + + Args: + extra_width (float | torch.Tensor): Extra width to enlarge the box. + + Returns: + :obj:`DepthInstance3DBoxes`: Enlarged boxes. + """ + enlarged_boxes = self.tensor.clone() + enlarged_boxes[:, 3:6] += extra_width * 2 + # bottom center z minus extra_width + enlarged_boxes[:, 2] -= extra_width + return self.new_box(enlarged_boxes) + + def get_surface_line_center(self): + """Compute surface and line center of bounding boxes. + + Returns: + torch.Tensor: Surface and line center of bounding boxes. + """ + obj_size = self.dims + center = self.gravity_center.view(-1, 1, 3) + batch_size = center.shape[0] + + rot_sin = torch.sin(-self.yaw) + rot_cos = torch.cos(-self.yaw) + rot_mat_T = self.yaw.new_zeros(tuple(list(self.yaw.shape) + [3, 3])) + rot_mat_T[..., 0, 0] = rot_cos + rot_mat_T[..., 0, 1] = -rot_sin + rot_mat_T[..., 1, 0] = rot_sin + rot_mat_T[..., 1, 1] = rot_cos + rot_mat_T[..., 2, 2] = 1 + + # Get the object surface center + offset = obj_size.new_tensor([[0, 0, 1], [0, 0, -1], [0, 1, 0], + [0, -1, 0], [1, 0, 0], [-1, 0, 0]]) + offset = offset.view(1, 6, 3) / 2 + surface_3d = (offset * + obj_size.view(batch_size, 1, 3).repeat(1, 6, 1)).reshape( + -1, 3) + + # Get the object line center + offset = obj_size.new_tensor([[1, 0, 1], [-1, 0, 1], [0, 1, 1], + [0, -1, 1], [1, 0, -1], [-1, 0, -1], + [0, 1, -1], [0, -1, -1], [1, 1, 0], + [1, -1, 0], [-1, 1, 0], [-1, -1, 0]]) + offset = offset.view(1, 12, 3) / 2 + + line_3d = (offset * + obj_size.view(batch_size, 1, 3).repeat(1, 12, 1)).reshape( + -1, 3) + + surface_rot = rot_mat_T.repeat(6, 1, 1) + surface_3d = torch.matmul(surface_3d.unsqueeze(-2), + surface_rot).squeeze(-2) + surface_center = center.repeat(1, 6, 1).reshape(-1, 3) + surface_3d + + line_rot = rot_mat_T.repeat(12, 1, 1) + line_3d = torch.matmul(line_3d.unsqueeze(-2), line_rot).squeeze(-2) + line_center = center.repeat(1, 12, 1).reshape(-1, 3) + line_3d + + return surface_center, line_center diff --git a/mmdet3d/structures/bbox_3d/lidar_box3d.py b/mmdet3d/structures/bbox_3d/lidar_box3d.py new file mode 100755 index 0000000..a176edf --- /dev/null +++ b/mmdet3d/structures/bbox_3d/lidar_box3d.py @@ -0,0 +1,215 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.structures.points import BasePoints +from .base_box3d import BaseInstance3DBoxes +from .utils import rotation_3d_in_axis + + +class LiDARInstance3DBoxes(BaseInstance3DBoxes): + """3D boxes of instances in LIDAR coordinates. + + Coordinates in LiDAR: + + .. code-block:: none + + up z x front (yaw=0) + ^ ^ + | / + | / + (yaw=0.5*pi) left y <------ 0 + + The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + The yaw is 0 at the positive direction of x axis, and increases from + the positive direction of x to the positive direction of y. + + A refactor is ongoing to make the three coordinate systems + easier to understand and convert between each other. + + Attributes: + tensor (torch.Tensor): Float matrix of N x box_dim. + box_dim (int): Integer indicating the dimension of a box. + Each row is (x, y, z, x_size, y_size, z_size, yaw, ...). + with_yaw (bool): If True, the value of yaw will be set to 0 as minmax + boxes. + """ + YAW_AXIS = 2 + + @property + def gravity_center(self): + """torch.Tensor: A tensor with center of each box in shape (N, 3).""" + bottom_center = self.bottom_center + gravity_center = torch.zeros_like(bottom_center) + gravity_center[:, :2] = bottom_center[:, :2] + gravity_center[:, 2] = bottom_center[:, 2] + self.tensor[:, 5] * 0.5 + return gravity_center + + @property + def corners(self): + """torch.Tensor: Coordinates of corners of all the boxes + in shape (N, 8, 3). + + Convert the boxes to corners in clockwise order, in form of + ``(x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0)`` + + .. code-block:: none + + up z + front x ^ + / | + / | + (x1, y0, z1) + ----------- + (x1, y1, z1) + /| / | + / | / | + (x0, y0, z1) + ----------- + + (x1, y1, z0) + | / . | / + | / origin | / + left y<-------- + ----------- + (x0, y1, z0) + (x0, y0, z0) + """ + if self.tensor.numel() == 0: + return torch.empty([0, 8, 3], device=self.tensor.device) + + dims = self.dims + corners_norm = torch.from_numpy( + np.stack(np.unravel_index(np.arange(8), [2] * 3), axis=1)).to( + device=dims.device, dtype=dims.dtype) + + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + # use relative origin [0.5, 0.5, 0] + corners_norm = corners_norm - dims.new_tensor([0.5, 0.5, 0]) + corners = dims.view([-1, 1, 3]) * corners_norm.reshape([1, 8, 3]) + + # rotate around z axis + corners = rotation_3d_in_axis( + corners, self.tensor[:, 6], axis=self.YAW_AXIS) + corners += self.tensor[:, :3].view(-1, 1, 3) + return corners + + def rotate(self, angle, points=None): + """Rotate boxes with points (optional) with the given angle or rotation + matrix. + + Args: + angles (float | torch.Tensor | np.ndarray): + Rotation angle or rotation matrix. + points (torch.Tensor | np.ndarray | :obj:`BasePoints`, optional): + Points to rotate. Defaults to None. + + Returns: + tuple or None: When ``points`` is None, the function returns + None, otherwise it returns the rotated points and the + rotation matrix ``rot_mat_T``. + """ + if not isinstance(angle, torch.Tensor): + angle = self.tensor.new_tensor(angle) + + assert angle.shape == torch.Size([3, 3]) or angle.numel() == 1, \ + f'invalid rotation angle shape {angle.shape}' + + if angle.numel() == 1: + self.tensor[:, 0:3], rot_mat_T = rotation_3d_in_axis( + self.tensor[:, 0:3], + angle, + axis=self.YAW_AXIS, + return_mat=True) + else: + rot_mat_T = angle + rot_sin = rot_mat_T[0, 1] + rot_cos = rot_mat_T[0, 0] + angle = np.arctan2(rot_sin, rot_cos) + self.tensor[:, 0:3] = self.tensor[:, 0:3] @ rot_mat_T + + self.tensor[:, 6] += angle + + if self.tensor.shape[1] == 9: + # rotate velo vector + self.tensor[:, 7:9] = self.tensor[:, 7:9] @ rot_mat_T[:2, :2] + + if points is not None: + if isinstance(points, torch.Tensor): + points[:, :3] = points[:, :3] @ rot_mat_T + elif isinstance(points, np.ndarray): + rot_mat_T = rot_mat_T.cpu().numpy() + points[:, :3] = np.dot(points[:, :3], rot_mat_T) + elif isinstance(points, BasePoints): + points.rotate(rot_mat_T) + else: + raise ValueError + return points, rot_mat_T + + def flip(self, bev_direction='horizontal', points=None): + """Flip the boxes in BEV along given BEV direction. + + In LIDAR coordinates, it flips the y (horizontal) or x (vertical) axis. + + Args: + bev_direction (str): Flip direction (horizontal or vertical). + points (torch.Tensor | np.ndarray | :obj:`BasePoints`, optional): + Points to flip. Defaults to None. + + Returns: + torch.Tensor, numpy.ndarray or None: Flipped points. + """ + assert bev_direction in ('horizontal', 'vertical') + if bev_direction == 'horizontal': + self.tensor[:, 1::7] = -self.tensor[:, 1::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + elif bev_direction == 'vertical': + self.tensor[:, 0::7] = -self.tensor[:, 0::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + np.pi + + if points is not None: + assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints)) + if isinstance(points, (torch.Tensor, np.ndarray)): + if bev_direction == 'horizontal': + points[:, 1] = -points[:, 1] + elif bev_direction == 'vertical': + points[:, 0] = -points[:, 0] + elif isinstance(points, BasePoints): + points.flip(bev_direction) + return points + + def convert_to(self, dst, rt_mat=None, correct_yaw=False): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`Box3DMode`): the target Box mode + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from ``src`` coordinates to ``dst`` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + correct_yaw (bool): If convert the yaw angle to the target + coordinate. Defaults to False. + Returns: + :obj:`BaseInstance3DBoxes`: + The converted box of the same type in the ``dst`` mode. + """ + from .box_3d_mode import Box3DMode + return Box3DMode.convert( + box=self, + src=Box3DMode.LIDAR, + dst=dst, + rt_mat=rt_mat, + correct_yaw=correct_yaw) + + def enlarged_box(self, extra_width): + """Enlarge the length, width and height boxes. + + Args: + extra_width (float | torch.Tensor): Extra width to enlarge the box. + + Returns: + :obj:`LiDARInstance3DBoxes`: Enlarged boxes. + """ + enlarged_boxes = self.tensor.clone() + enlarged_boxes[:, 3:6] += extra_width * 2 + # bottom center z minus extra_width + enlarged_boxes[:, 2] -= extra_width + return self.new_box(enlarged_boxes) diff --git a/mmdet3d/structures/bbox_3d/utils.py b/mmdet3d/structures/bbox_3d/utils.py new file mode 100755 index 0000000..e75db84 --- /dev/null +++ b/mmdet3d/structures/bbox_3d/utils.py @@ -0,0 +1,357 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from logging import warning + +import numpy as np +import torch + +from mmdet3d.utils.array_converter import array_converter + + +@array_converter(apply_to=('val', )) +def limit_period(val, offset=0.5, period=np.pi): + """Limit the value into a period for periodic function. + + Args: + val (torch.Tensor | np.ndarray): The value to be converted. + offset (float, optional): Offset to set the value range. + Defaults to 0.5. + period ([type], optional): Period of the value. Defaults to np.pi. + + Returns: + (torch.Tensor | np.ndarray): Value in the range of + [-offset * period, (1-offset) * period] + """ + limited_val = val - torch.floor(val / period + offset) * period + return limited_val + + +@array_converter(apply_to=('points', 'angles')) +def rotation_3d_in_axis(points, + angles, + axis=0, + return_mat=False, + clockwise=False): + """Rotate points by angles according to axis. + + Args: + points (np.ndarray | torch.Tensor | list | tuple ): + Points of shape (N, M, 3). + angles (np.ndarray | torch.Tensor | list | tuple | float): + Vector of angles in shape (N,) + axis (int, optional): The axis to be rotated. Defaults to 0. + return_mat: Whether or not return the rotation matrix (transposed). + Defaults to False. + clockwise: Whether the rotation is clockwise. Defaults to False. + + Raises: + ValueError: when the axis is not in range [0, 1, 2], it will + raise value error. + + Returns: + (torch.Tensor | np.ndarray): Rotated points in shape (N, M, 3). + """ + batch_free = len(points.shape) == 2 + if batch_free: + points = points[None] + + if isinstance(angles, float) or len(angles.shape) == 0: + angles = torch.full(points.shape[:1], angles) + + assert len(points.shape) == 3 and len(angles.shape) == 1 \ + and points.shape[0] == angles.shape[0], f'Incorrect shape of points ' \ + f'angles: {points.shape}, {angles.shape}' + + assert points.shape[-1] in [2, 3], \ + f'Points size should be 2 or 3 instead of {points.shape[-1]}' + + rot_sin = torch.sin(angles) + rot_cos = torch.cos(angles) + ones = torch.ones_like(rot_cos) + zeros = torch.zeros_like(rot_cos) + + if points.shape[-1] == 3: + if axis == 1 or axis == -2: + rot_mat_T = torch.stack([ + torch.stack([rot_cos, zeros, -rot_sin]), + torch.stack([zeros, ones, zeros]), + torch.stack([rot_sin, zeros, rot_cos]) + ]) + elif axis == 2 or axis == -1: + rot_mat_T = torch.stack([ + torch.stack([rot_cos, rot_sin, zeros]), + torch.stack([-rot_sin, rot_cos, zeros]), + torch.stack([zeros, zeros, ones]) + ]) + elif axis == 0 or axis == -3: + rot_mat_T = torch.stack([ + torch.stack([ones, zeros, zeros]), + torch.stack([zeros, rot_cos, rot_sin]), + torch.stack([zeros, -rot_sin, rot_cos]) + ]) + else: + raise ValueError(f'axis should in range ' + f'[-3, -2, -1, 0, 1, 2], got {axis}') + else: + rot_mat_T = torch.stack([ + torch.stack([rot_cos, rot_sin]), + torch.stack([-rot_sin, rot_cos]) + ]) + + if clockwise: + rot_mat_T = rot_mat_T.transpose(0, 1) + + if points.shape[0] == 0: + points_new = points + else: + points_new = torch.einsum('aij,jka->aik', points, rot_mat_T) + + if batch_free: + points_new = points_new.squeeze(0) + + if return_mat: + rot_mat_T = torch.einsum('jka->ajk', rot_mat_T) + if batch_free: + rot_mat_T = rot_mat_T.squeeze(0) + return points_new, rot_mat_T + else: + return points_new + + +@array_converter(apply_to=('boxes_xywhr', )) +def xywhr2xyxyr(boxes_xywhr): + """Convert a rotated boxes in XYWHR format to XYXYR format. + + Args: + boxes_xywhr (torch.Tensor | np.ndarray): Rotated boxes in XYWHR format. + + Returns: + (torch.Tensor | np.ndarray): Converted boxes in XYXYR format. + """ + boxes = torch.zeros_like(boxes_xywhr) + half_w = boxes_xywhr[..., 2] / 2 + half_h = boxes_xywhr[..., 3] / 2 + + boxes[..., 0] = boxes_xywhr[..., 0] - half_w + boxes[..., 1] = boxes_xywhr[..., 1] - half_h + boxes[..., 2] = boxes_xywhr[..., 0] + half_w + boxes[..., 3] = boxes_xywhr[..., 1] + half_h + boxes[..., 4] = boxes_xywhr[..., 4] + return boxes + + +def get_box_type(box_type): + """Get the type and mode of box structure. + + Args: + box_type (str): The type of box structure. + The valid value are "LiDAR", "Camera", or "Depth". + + Raises: + ValueError: A ValueError is raised when `box_type` + does not belong to the three valid types. + + Returns: + tuple: Box type and box mode. + """ + from .box_3d_mode import (Box3DMode, CameraInstance3DBoxes, + DepthInstance3DBoxes, LiDARInstance3DBoxes) + box_type_lower = box_type.lower() + if box_type_lower == 'lidar': + box_type_3d = LiDARInstance3DBoxes + box_mode_3d = Box3DMode.LIDAR + elif box_type_lower == 'camera': + box_type_3d = CameraInstance3DBoxes + box_mode_3d = Box3DMode.CAM + elif box_type_lower == 'depth': + box_type_3d = DepthInstance3DBoxes + box_mode_3d = Box3DMode.DEPTH + else: + raise ValueError('Only "box_type" of "camera", "lidar", "depth"' + f' are supported, got {box_type}') + + return box_type_3d, box_mode_3d + + +@array_converter(apply_to=('points_3d', 'proj_mat')) +def points_cam2img(points_3d, proj_mat, with_depth=False): + """Project points in camera coordinates to image coordinates. + + Args: + points_3d (torch.Tensor | np.ndarray): Points in shape (N, 3) + proj_mat (torch.Tensor | np.ndarray): + Transformation matrix between coordinates. + with_depth (bool, optional): Whether to keep depth in the output. + Defaults to False. + + Returns: + (torch.Tensor | np.ndarray): Points in image coordinates, + with shape [N, 2] if `with_depth=False`, else [N, 3]. + """ + points_shape = list(points_3d.shape) + points_shape[-1] = 1 + + assert len(proj_mat.shape) == 2, 'The dimension of the projection'\ + f' matrix should be 2 instead of {len(proj_mat.shape)}.' + d1, d2 = proj_mat.shape[:2] + assert (d1 == 3 and d2 == 3) or (d1 == 3 and d2 == 4) or ( + d1 == 4 and d2 == 4), 'The shape of the projection matrix'\ + f' ({d1}*{d2}) is not supported.' + if d1 == 3: + proj_mat_expanded = torch.eye( + 4, device=proj_mat.device, dtype=proj_mat.dtype) + proj_mat_expanded[:d1, :d2] = proj_mat + proj_mat = proj_mat_expanded + + # previous implementation use new_zeros, new_one yields better results + points_4 = torch.cat([points_3d, points_3d.new_ones(points_shape)], dim=-1) + + point_2d = points_4 @ proj_mat.T + point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] + + if with_depth: + point_2d_res = torch.cat([point_2d_res, point_2d[..., 2:3]], dim=-1) + + return point_2d_res + + +@array_converter(apply_to=('points', 'cam2img')) +def points_img2cam(points, cam2img): + """Project points in image coordinates to camera coordinates. + + Args: + points (torch.Tensor): 2.5D points in 2D images, [N, 3], + 3 corresponds with x, y in the image and depth. + cam2img (torch.Tensor): Camera intrinsic matrix. The shape can be + [3, 3], [3, 4] or [4, 4]. + + Returns: + torch.Tensor: points in 3D space. [N, 3], + 3 corresponds with x, y, z in 3D space. + """ + assert cam2img.shape[0] <= 4 + assert cam2img.shape[1] <= 4 + assert points.shape[1] == 3 + + xys = points[:, :2] + depths = points[:, 2].view(-1, 1) + unnormed_xys = torch.cat([xys * depths, depths], dim=1) + + pad_cam2img = torch.eye(4, dtype=xys.dtype, device=xys.device) + pad_cam2img[:cam2img.shape[0], :cam2img.shape[1]] = cam2img + inv_pad_cam2img = torch.inverse(pad_cam2img).transpose(0, 1) + + # Do operation in homogeneous coordinates. + num_points = unnormed_xys.shape[0] + homo_xys = torch.cat([unnormed_xys, xys.new_ones((num_points, 1))], dim=1) + points3D = torch.mm(homo_xys, inv_pad_cam2img)[:, :3] + + return points3D + + +def mono_cam_box2vis(cam_box): + """This is a post-processing function on the bboxes from Mono-3D task. If + we want to perform projection visualization, we need to: + + 1. rotate the box along x-axis for np.pi / 2 (roll) + 2. change orientation from local yaw to global yaw + 3. convert yaw by (np.pi / 2 - yaw) + + After applying this function, we can project and draw it on 2D images. + + Args: + cam_box (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate + system before conversion. Could be gt bbox loaded from dataset + or network prediction output. + + Returns: + :obj:`CameraInstance3DBoxes`: Box after conversion. + """ + warning.warn('DeprecationWarning: The hack of yaw and dimension in the ' + 'monocular 3D detection on nuScenes has been removed. The ' + 'function mono_cam_box2vis will be deprecated.') + from . import CameraInstance3DBoxes + assert isinstance(cam_box, CameraInstance3DBoxes), \ + 'input bbox should be CameraInstance3DBoxes!' + + loc = cam_box.gravity_center + dim = cam_box.dims + yaw = cam_box.yaw + feats = cam_box.tensor[:, 7:] + # rotate along x-axis for np.pi / 2 + # see also here: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L557 # noqa + dim[:, [1, 2]] = dim[:, [2, 1]] + # change local yaw to global yaw for visualization + # refer to https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L164-L166 # noqa + yaw += torch.atan2(loc[:, 0], loc[:, 2]) + # convert yaw by (-yaw - np.pi / 2) + # this is because mono 3D box class such as `NuScenesBox` has different + # definition of rotation with our `CameraInstance3DBoxes` + yaw = -yaw - np.pi / 2 + cam_box = torch.cat([loc, dim, yaw[:, None], feats], dim=1) + cam_box = CameraInstance3DBoxes( + cam_box, box_dim=cam_box.shape[-1], origin=(0.5, 0.5, 0.5)) + + return cam_box + + +def get_proj_mat_by_coord_type(img_meta, coord_type): + """Obtain image features using points. + + Args: + img_meta (dict): Meta info. + coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'. + Can be case-insensitive. + + Returns: + torch.Tensor: transformation matrix. + """ + coord_type = coord_type.upper() + mapping = {'LIDAR': 'lidar2img', 'DEPTH': 'depth2img', 'CAMERA': 'cam2img'} + assert coord_type in mapping.keys() + return img_meta[mapping[coord_type]] + + +def yaw2local(yaw, loc): + """Transform global yaw to local yaw (alpha in kitti) in camera + coordinates, ranges from -pi to pi. + + Args: + yaw (torch.Tensor): A vector with local yaw of each box. + shape: (N, ) + loc (torch.Tensor): gravity center of each box. + shape: (N, 3) + + Returns: + torch.Tensor: local yaw (alpha in kitti). + """ + local_yaw = yaw - torch.atan2(loc[:, 0], loc[:, 2]) + larger_idx = (local_yaw > np.pi).nonzero(as_tuple=False) + small_idx = (local_yaw < -np.pi).nonzero(as_tuple=False) + if len(larger_idx) != 0: + local_yaw[larger_idx] -= 2 * np.pi + if len(small_idx) != 0: + local_yaw[small_idx] += 2 * np.pi + + return local_yaw + + +def get_lidar2img(cam2img, lidar2cam): + """Get the projection matrix of lidar2img. + + Args: + cam2img (torch.Tensor): A 3x3 or 4x4 projection matrix. + lidar2cam (torch.Tensor): A 3x3 or 4x4 projection matrix. + + Returns: + torch.Tensor: transformation matrix with shape 4x4. + """ + if cam2img.shape == (3, 3): + temp = cam2img.new_zeros(4, 4) + temp[:3, :3] = cam2img + cam2img = temp + + if lidar2cam.shape == (3, 3): + temp = lidar2cam.new_zeros(4, 4) + temp[:3, :3] = lidar2cam + lidar2cam = temp + return torch.matmul(cam2img, lidar2cam) diff --git a/mmdet3d/structures/det3d_data_sample.py b/mmdet3d/structures/det3d_data_sample.py new file mode 100755 index 0000000..54228a7 --- /dev/null +++ b/mmdet3d/structures/det3d_data_sample.py @@ -0,0 +1,213 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple, Union + +import torch +from mmdet.structures import DetDataSample +from mmengine.structures import InstanceData + +from .point_data import PointData + + +class Det3DDataSample(DetDataSample): + """A data structure interface of MMDetection3D. They are used as interfaces + between different components. + + The attributes in ``Det3DDataSample`` are divided into several parts: + + - ``proposals`` (InstanceData): Region proposals used in two-stage + detectors. + - ``ignored_instances`` (InstanceData): Instances to be ignored during + training/testing. + - ``gt_instances_3d`` (InstanceData): Ground truth of 3D instance + annotations. + - ``gt_instances`` (InstanceData): Ground truth of 2D instance + annotations. + - ``pred_instances_3d`` (InstanceData): 3D instances of model + predictions. + - For point-cloud 3D object detection task whose input modality is + `use_lidar=True, use_camera=False`, the 3D predictions results are + saved in `pred_instances_3d`. + - For vision-only (monocular/multi-view) 3D object detection task + whose input modality is `use_lidar=False, use_camera=True`, the 3D + predictions are saved in `pred_instances_3d`. + - ``pred_instances`` (InstanceData): 2D instances of model predictions. + - For multi-modality 3D detection task whose input modality is + `use_lidar=True, use_camera=True`, the 2D predictions are saved in + `pred_instances`. + - ``pts_pred_instances_3d`` (InstanceData): 3D instances of model + predictions based on point cloud. + - For multi-modality 3D detection task whose input modality is + `use_lidar=True, use_camera=True`, the 3D predictions based on + point cloud are saved in `pts_pred_instances_3d` to distinguish + with `img_pred_instances_3d` which based on image. + - ``img_pred_instances_3d`` (InstanceData): 3D instances of model + predictions based on image. + - For multi-modality 3D detection task whose input modality is + `use_lidar=True, use_camera=True`, the 3D predictions based on + image are saved in `img_pred_instances_3d` to distinguish with + `pts_pred_instances_3d` which based on point cloud. + - ``gt_pts_seg`` (PointData): Ground truth of point cloud segmentation. + - ``pred_pts_seg`` (PointData): Prediction of point cloud segmentation. + - ``eval_ann_info`` (dict or None): Raw annotation, which will be + passed to evaluator and do the online evaluation. + + Examples: + >>> import torch + >>> from mmengine.structures import InstanceData + + >>> from mmdet3d.structures import Det3DDataSample + >>> from mmdet3d.structures import BaseInstance3DBoxes + + >>> data_sample = Det3DDataSample() + >>> meta_info = dict( + ... img_shape=(800, 1196, 3), + ... pad_shape=(800, 1216, 3)) + >>> gt_instances_3d = InstanceData(metainfo=meta_info) + >>> gt_instances_3d.bboxes_3d = BaseInstance3DBoxes(torch.rand((5, 7))) + >>> gt_instances_3d.labels_3d = torch.randint(0, 3, (5,)) + >>> data_sample.gt_instances_3d = gt_instances_3d + >>> assert 'img_shape' in data_sample.gt_instances_3d.metainfo_keys() + >>> len(data_sample.gt_instances_3d) + 5 + >>> print(data_sample) + + ) at 0x7f7e2a0e8640> + >>> pred_instances = InstanceData(metainfo=meta_info) + >>> pred_instances.bboxes = torch.rand((5, 4)) + >>> pred_instances.scores = torch.rand((5, )) + >>> data_sample = Det3DDataSample(pred_instances=pred_instances) + >>> assert 'pred_instances' in data_sample + + >>> pred_instances_3d = InstanceData(metainfo=meta_info) + >>> pred_instances_3d.bboxes_3d = BaseInstance3DBoxes( + ... torch.rand((5, 7))) + >>> pred_instances_3d.scores_3d = torch.rand((5, )) + >>> pred_instances_3d.labels_3d = torch.rand((5, )) + >>> data_sample = Det3DDataSample(pred_instances_3d=pred_instances_3d) + >>> assert 'pred_instances_3d' in data_sample + + >>> data_sample = Det3DDataSample() + >>> gt_instances_3d_data = dict( + ... bboxes_3d=BaseInstance3DBoxes(torch.rand((2, 7))), + ... labels_3d=torch.rand(2)) + >>> gt_instances_3d = InstanceData(**gt_instances_3d_data) + >>> data_sample.gt_instances_3d = gt_instances_3d + >>> assert 'gt_instances_3d' in data_sample + >>> assert 'bboxes_3d' in data_sample.gt_instances_3d + + >>> from mmdet3d.structures import PointData + >>> data_sample = Det3DDataSample() + >>> gt_pts_seg_data = dict( + ... pts_instance_mask=torch.rand(2), + ... pts_semantic_mask=torch.rand(2)) + >>> data_sample.gt_pts_seg = PointData(**gt_pts_seg_data) + >>> print(data_sample) + + ) at 0x7f7e29ff0d60> + """ + + @property + def gt_instances_3d(self) -> InstanceData: + return self._gt_instances_3d + + @gt_instances_3d.setter + def gt_instances_3d(self, value: InstanceData) -> None: + self.set_field(value, '_gt_instances_3d', dtype=InstanceData) + + @gt_instances_3d.deleter + def gt_instances_3d(self) -> None: + del self._gt_instances_3d + + @property + def pred_instances_3d(self) -> InstanceData: + return self._pred_instances_3d + + @pred_instances_3d.setter + def pred_instances_3d(self, value: InstanceData) -> None: + self.set_field(value, '_pred_instances_3d', dtype=InstanceData) + + @pred_instances_3d.deleter + def pred_instances_3d(self) -> None: + del self._pred_instances_3d + + @property + def pts_pred_instances_3d(self) -> InstanceData: + return self._pts_pred_instances_3d + + @pts_pred_instances_3d.setter + def pts_pred_instances_3d(self, value: InstanceData) -> None: + self.set_field(value, '_pts_pred_instances_3d', dtype=InstanceData) + + @pts_pred_instances_3d.deleter + def pts_pred_instances_3d(self) -> None: + del self._pts_pred_instances_3d + + @property + def img_pred_instances_3d(self) -> InstanceData: + return self._img_pred_instances_3d + + @img_pred_instances_3d.setter + def img_pred_instances_3d(self, value: InstanceData) -> None: + self.set_field(value, '_img_pred_instances_3d', dtype=InstanceData) + + @img_pred_instances_3d.deleter + def img_pred_instances_3d(self) -> None: + del self._img_pred_instances_3d + + @property + def gt_pts_seg(self) -> PointData: + return self._gt_pts_seg + + @gt_pts_seg.setter + def gt_pts_seg(self, value: PointData) -> None: + self.set_field(value, '_gt_pts_seg', dtype=PointData) + + @gt_pts_seg.deleter + def gt_pts_seg(self) -> None: + del self._gt_pts_seg + + @property + def pred_pts_seg(self) -> PointData: + return self._pred_pts_seg + + @pred_pts_seg.setter + def pred_pts_seg(self, value: PointData) -> None: + self.set_field(value, '_pred_pts_seg', dtype=PointData) + + @pred_pts_seg.deleter + def pred_pts_seg(self) -> None: + del self._pred_pts_seg + + +SampleList = List[Det3DDataSample] +OptSampleList = Optional[SampleList] +ForwardResults = Union[Dict[str, torch.Tensor], List[Det3DDataSample], + Tuple[torch.Tensor], torch.Tensor] diff --git a/mmdet3d/structures/ops/__init__.py b/mmdet3d/structures/ops/__init__.py new file mode 100755 index 0000000..d71ec30 --- /dev/null +++ b/mmdet3d/structures/ops/__init__.py @@ -0,0 +1,38 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# yapf:disable +from .box_np_ops import (box2d_to_corner_jit, box3d_to_bbox, + box_camera_to_lidar, boxes3d_to_corners3d_lidar, + camera_to_lidar, center_to_corner_box2d, + center_to_corner_box3d, center_to_minmax_2d, + corner_to_standup_nd_jit, corner_to_surfaces_3d, + corner_to_surfaces_3d_jit, corners_nd, + create_anchors_3d_range, depth_to_lidar_points, + depth_to_points, get_frustum, iou_jit, + minmax_to_corner_2d, points_in_convex_polygon_3d_jit, + points_in_convex_polygon_jit, points_in_rbbox, + projection_matrix_to_CRT_kitti, rbbox2d_to_near_bbox, + remove_outside_points, rotation_points_single_angle, + surface_equ_3d) +# yapf:enable +from .iou3d_calculator import (AxisAlignedBboxOverlaps3D, BboxOverlaps3D, + BboxOverlapsNearest3D, + axis_aligned_bbox_overlaps_3d, bbox_overlaps_3d, + bbox_overlaps_nearest_3d) +from .transforms import bbox3d2result, bbox3d2roi, bbox3d_mapping_back + +__all__ = [ + 'box2d_to_corner_jit', 'box3d_to_bbox', 'box_camera_to_lidar', + 'boxes3d_to_corners3d_lidar', 'camera_to_lidar', 'center_to_corner_box2d', + 'center_to_corner_box3d', 'center_to_minmax_2d', + 'corner_to_standup_nd_jit', 'corner_to_surfaces_3d', + 'corner_to_surfaces_3d_jit', 'corners_nd', 'create_anchors_3d_range', + 'depth_to_lidar_points', 'depth_to_points', 'get_frustum', 'iou_jit', + 'minmax_to_corner_2d', 'points_in_convex_polygon_3d_jit', + 'points_in_convex_polygon_jit', 'points_in_rbbox', + 'projection_matrix_to_CRT_kitti', 'rbbox2d_to_near_bbox', + 'remove_outside_points', 'rotation_points_single_angle', 'surface_equ_3d', + 'BboxOverlapsNearest3D', 'BboxOverlaps3D', 'bbox_overlaps_nearest_3d', + 'bbox_overlaps_3d', 'AxisAlignedBboxOverlaps3D', + 'axis_aligned_bbox_overlaps_3d', 'bbox3d_mapping_back', 'bbox3d2roi', + 'bbox3d2result' +] diff --git a/mmdet3d/structures/ops/box_np_ops.py b/mmdet3d/structures/ops/box_np_ops.py new file mode 100755 index 0000000..24eaae7 --- /dev/null +++ b/mmdet3d/structures/ops/box_np_ops.py @@ -0,0 +1,828 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# TODO: clean the functions in this file and move the APIs into box bbox_3d +# in the future +# NOTICE: All functions in this file are valid for LiDAR or depth boxes only +# if we use default parameters. + +import numba +import numpy as np + +from mmdet3d.structures.bbox_3d import (limit_period, points_cam2img, + rotation_3d_in_axis) + + +def camera_to_lidar(points, r_rect, velo2cam): + """Convert points in camera coordinate to lidar coordinate. + + Note: + This function is for KITTI only. + + Args: + points (np.ndarray, shape=[N, 3]): Points in camera coordinate. + r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in + specific camera coordinate (e.g. CAM2) to CAM0. + velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in + camera coordinate to lidar coordinate. + + Returns: + np.ndarray, shape=[N, 3]: Points in lidar coordinate. + """ + points_shape = list(points.shape[0:-1]) + if points.shape[-1] == 3: + points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) + lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) + return lidar_points[..., :3] + + +def box_camera_to_lidar(data, r_rect, velo2cam): + """Convert boxes in camera coordinate to lidar coordinate. + + Note: + This function is for KITTI only. + + Args: + data (np.ndarray, shape=[N, 7]): Boxes in camera coordinate. + r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in + specific camera coordinate (e.g. CAM2) to CAM0. + velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in + camera coordinate to lidar coordinate. + + Returns: + np.ndarray, shape=[N, 3]: Boxes in lidar coordinate. + """ + xyz = data[:, 0:3] + x_size, y_size, z_size = data[:, 3:4], data[:, 4:5], data[:, 5:6] + r = data[:, 6:7] + xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) + # yaw and dims also needs to be converted + r_new = -r - np.pi / 2 + r_new = limit_period(r_new, period=np.pi * 2) + return np.concatenate([xyz_lidar, x_size, z_size, y_size, r_new], axis=1) + + +def corners_nd(dims, origin=0.5): + """Generate relative box corners based on length per dim and origin point. + + Args: + dims (np.ndarray, shape=[N, ndim]): Array of length per dim + origin (list or array or float, optional): origin point relate to + smallest point. Defaults to 0.5 + + Returns: + np.ndarray, shape=[N, 2 ** ndim, ndim]: Returned corners. + point layout example: (2d) x0y0, x0y1, x1y0, x1y1; + (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 + where x0 < x1, y0 < y1, z0 < z1. + """ + ndim = int(dims.shape[1]) + corners_norm = np.stack( + np.unravel_index(np.arange(2**ndim), [2] * ndim), + axis=1).astype(dims.dtype) + # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 + # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 + # so need to convert to a format which is convenient to do other computing. + # for 2d boxes, format is clockwise start with minimum point + # for 3d boxes, please draw lines by your hand. + if ndim == 2: + # generate clockwise box corners + corners_norm = corners_norm[[0, 1, 3, 2]] + elif ndim == 3: + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) + corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape( + [1, 2**ndim, ndim]) + return corners + + +def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): + """Convert kitti locations, dimensions and angles to corners. + format: center(xy), dims(xy), angles(counterclockwise when positive) + + Args: + centers (np.ndarray): Locations in kitti label file with shape (N, 2). + dims (np.ndarray): Dimensions in kitti label file with shape (N, 2). + angles (np.ndarray, optional): Rotation_y in kitti label file with + shape (N). Defaults to None. + origin (list or array or float, optional): origin point relate to + smallest point. Defaults to 0.5. + + Returns: + np.ndarray: Corners with the shape of (N, 4, 2). + """ + # 'length' in kitti format is in x axis. + # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) + # center in kitti format is [0.5, 1.0, 0.5] in xyz. + corners = corners_nd(dims, origin=origin) + # corners: [N, 4, 2] + if angles is not None: + corners = rotation_3d_in_axis(corners, angles) + corners += centers.reshape([-1, 1, 2]) + return corners + + +@numba.jit(nopython=True) +def depth_to_points(depth, trunc_pixel): + """Convert depth map to points. + + Args: + depth (np.array, shape=[H, W]): Depth map which + the row of [0~`trunc_pixel`] are truncated. + trunc_pixel (int): The number of truncated row. + + Returns: + np.ndarray: Points in camera coordinates. + """ + num_pts = np.sum(depth[trunc_pixel:, ] > 0.1) + points = np.zeros((num_pts, 3), dtype=depth.dtype) + x = np.array([0, 0, 1], dtype=depth.dtype) + k = 0 + for i in range(trunc_pixel, depth.shape[0]): + for j in range(depth.shape[1]): + if depth[i, j] > 0.1: + x = np.array([j, i, 1], dtype=depth.dtype) + points[k] = x * depth[i, j] + k += 1 + return points + + +def depth_to_lidar_points(depth, trunc_pixel, P2, r_rect, velo2cam): + """Convert depth map to points in lidar coordinate. + + Args: + depth (np.array, shape=[H, W]): Depth map which + the row of [0~`trunc_pixel`] are truncated. + trunc_pixel (int): The number of truncated row. + P2 (p.array, shape=[4, 4]): Intrinsics of Camera2. + r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in + specific camera coordinate (e.g. CAM2) to CAM0. + velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in + camera coordinate to lidar coordinate. + + Returns: + np.ndarray: Points in lidar coordinates. + """ + pts = depth_to_points(depth, trunc_pixel) + points_shape = list(pts.shape[0:-1]) + points = np.concatenate([pts, np.ones(points_shape + [1])], axis=-1) + points = points @ np.linalg.inv(P2.T) + lidar_points = camera_to_lidar(points, r_rect, velo2cam) + return lidar_points + + +def center_to_corner_box3d(centers, + dims, + angles=None, + origin=(0.5, 1.0, 0.5), + axis=1): + """Convert kitti locations, dimensions and angles to corners. + + Args: + centers (np.ndarray): Locations in kitti label file with shape (N, 3). + dims (np.ndarray): Dimensions in kitti label file with shape (N, 3). + angles (np.ndarray, optional): Rotation_y in kitti label file with + shape (N). Defaults to None. + origin (list or array or float, optional): Origin point relate to + smallest point. Use (0.5, 1.0, 0.5) in camera and (0.5, 0.5, 0) + in lidar. Defaults to (0.5, 1.0, 0.5). + axis (int, optional): Rotation axis. 1 for camera and 2 for lidar. + Defaults to 1. + + Returns: + np.ndarray: Corners with the shape of (N, 8, 3). + """ + # 'length' in kitti format is in x axis. + # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(lwh)(lidar) + # center in kitti format is [0.5, 1.0, 0.5] in xyz. + corners = corners_nd(dims, origin=origin) + # corners: [N, 8, 3] + if angles is not None: + corners = rotation_3d_in_axis(corners, angles, axis=axis) + corners += centers.reshape([-1, 1, 3]) + return corners + + +@numba.jit(nopython=True) +def box2d_to_corner_jit(boxes): + """Convert box2d to corner. + + Args: + boxes (np.ndarray, shape=[N, 5]): Boxes2d with rotation. + + Returns: + box_corners (np.ndarray, shape=[N, 4, 2]): Box corners. + """ + num_box = boxes.shape[0] + corners_norm = np.zeros((4, 2), dtype=boxes.dtype) + corners_norm[1, 1] = 1.0 + corners_norm[2] = 1.0 + corners_norm[3, 0] = 1.0 + corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) + corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape( + 1, 4, 2) + rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) + box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) + for i in range(num_box): + rot_sin = np.sin(boxes[i, -1]) + rot_cos = np.cos(boxes[i, -1]) + rot_mat_T[0, 0] = rot_cos + rot_mat_T[0, 1] = rot_sin + rot_mat_T[1, 0] = -rot_sin + rot_mat_T[1, 1] = rot_cos + box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] + return box_corners + + +@numba.njit +def corner_to_standup_nd_jit(boxes_corner): + """Convert boxes_corner to aligned (min-max) boxes. + + Args: + boxes_corner (np.ndarray, shape=[N, 2**dim, dim]): Boxes corners. + + Returns: + np.ndarray, shape=[N, dim*2]: Aligned (min-max) boxes. + """ + num_boxes = boxes_corner.shape[0] + ndim = boxes_corner.shape[-1] + result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) + for i in range(num_boxes): + for j in range(ndim): + result[i, j] = np.min(boxes_corner[i, :, j]) + for j in range(ndim): + result[i, j + ndim] = np.max(boxes_corner[i, :, j]) + return result + + +@numba.jit(nopython=True) +def corner_to_surfaces_3d_jit(corners): + """Convert 3d box corners from corner function above to surfaces that + normal vectors all direct to internal. + + Args: + corners (np.ndarray): 3d box corners with the shape of (N, 8, 3). + + Returns: + np.ndarray: Surfaces with the shape of (N, 6, 4, 3). + """ + # box_corners: [N, 8, 3], must from corner functions in this module + num_boxes = corners.shape[0] + surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) + corner_idxes = np.array([ + 0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7 + ]).reshape(6, 4) + for i in range(num_boxes): + for j in range(6): + for k in range(4): + surfaces[i, j, k] = corners[i, corner_idxes[j, k]] + return surfaces + + +def rotation_points_single_angle(points, angle, axis=0): + """Rotate points with a single angle. + + Args: + points (np.ndarray, shape=[N, 3]]): + angle (np.ndarray, shape=[1]]): + axis (int, optional): Axis to rotate at. Defaults to 0. + + Returns: + np.ndarray: Rotated points. + """ + # points: [N, 3] + rot_sin = np.sin(angle) + rot_cos = np.cos(angle) + if axis == 1: + rot_mat_T = np.array( + [[rot_cos, 0, rot_sin], [0, 1, 0], [-rot_sin, 0, rot_cos]], + dtype=points.dtype) + elif axis == 2 or axis == -1: + rot_mat_T = np.array( + [[rot_cos, rot_sin, 0], [-rot_sin, rot_cos, 0], [0, 0, 1]], + dtype=points.dtype) + elif axis == 0: + rot_mat_T = np.array( + [[1, 0, 0], [0, rot_cos, rot_sin], [0, -rot_sin, rot_cos]], + dtype=points.dtype) + else: + raise ValueError('axis should in range') + + return points @ rot_mat_T, rot_mat_T + + +def box3d_to_bbox(box3d, P2): + """Convert box3d in camera coordinates to bbox in image coordinates. + + Args: + box3d (np.ndarray, shape=[N, 7]): Boxes in camera coordinate. + P2 (np.array, shape=[4, 4]): Intrinsics of Camera2. + + Returns: + np.ndarray, shape=[N, 4]: Boxes 2d in image coordinates. + """ + box_corners = center_to_corner_box3d( + box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1) + box_corners_in_image = points_cam2img(box_corners, P2) + # box_corners_in_image: [N, 8, 2] + minxy = np.min(box_corners_in_image, axis=1) + maxxy = np.max(box_corners_in_image, axis=1) + bbox = np.concatenate([minxy, maxxy], axis=1) + return bbox + + +def corner_to_surfaces_3d(corners): + """convert 3d box corners from corner function above to surfaces that + normal vectors all direct to internal. + + Args: + corners (np.ndarray): 3D box corners with shape of (N, 8, 3). + + Returns: + np.ndarray: Surfaces with the shape of (N, 6, 4, 3). + """ + # box_corners: [N, 8, 3], must from corner functions in this module + surfaces = np.array([ + [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], + [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], + [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], + [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], + [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], + [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], + ]).transpose([2, 0, 1, 3]) + return surfaces + + +def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0)): + """Check points in rotated bbox and return indices. + + Note: + This function is for counterclockwise boxes. + + Args: + points (np.ndarray, shape=[N, 3+dim]): Points to query. + rbbox (np.ndarray, shape=[M, 7]): Boxes3d with rotation. + z_axis (int, optional): Indicate which axis is height. + Defaults to 2. + origin (tuple[int], optional): Indicate the position of + box center. Defaults to (0.5, 0.5, 0). + + Returns: + np.ndarray, shape=[N, M]: Indices of points in each box. + """ + # TODO: this function is different from PointCloud3D, be careful + # when start to use nuscene, check the input + rbbox_corners = center_to_corner_box3d( + rbbox[:, :3], rbbox[:, 3:6], rbbox[:, 6], origin=origin, axis=z_axis) + surfaces = corner_to_surfaces_3d(rbbox_corners) + indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) + return indices + + +def minmax_to_corner_2d(minmax_box): + """Convert minmax box to corners2d. + + Args: + minmax_box (np.ndarray, shape=[N, dims]): minmax boxes. + + Returns: + np.ndarray: 2d corners of boxes + """ + ndim = minmax_box.shape[-1] // 2 + center = minmax_box[..., :ndim] + dims = minmax_box[..., ndim:] - center + return center_to_corner_box2d(center, dims, origin=0.0) + + +def create_anchors_3d_range(feature_size, + anchor_range, + sizes=((3.9, 1.6, 1.56), ), + rotations=(0, np.pi / 2), + dtype=np.float32): + """Create anchors 3d by range. + + Args: + feature_size (list[float] | tuple[float]): Feature map size. It is + either a list of a tuple of [D, H, W](in order of z, y, and x). + anchor_range (torch.Tensor | list[float]): Range of anchors with + shape [6]. The order is consistent with that of anchors, i.e., + (x_min, y_min, z_min, x_max, y_max, z_max). + sizes (list[list] | np.ndarray | torch.Tensor, optional): + Anchor size with shape [N, 3], in order of x, y, z. + Defaults to ((3.9, 1.6, 1.56), ). + rotations (list[float] | np.ndarray | torch.Tensor, optional): + Rotations of anchors in a single feature grid. + Defaults to (0, np.pi / 2). + dtype (type, optional): Data type. Defaults to np.float32. + + Returns: + np.ndarray: Range based anchors with shape of + (*feature_size, num_sizes, num_rots, 7). + """ + anchor_range = np.array(anchor_range, dtype) + z_centers = np.linspace( + anchor_range[2], anchor_range[5], feature_size[0], dtype=dtype) + y_centers = np.linspace( + anchor_range[1], anchor_range[4], feature_size[1], dtype=dtype) + x_centers = np.linspace( + anchor_range[0], anchor_range[3], feature_size[2], dtype=dtype) + sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3]) + rotations = np.array(rotations, dtype=dtype) + rets = np.meshgrid( + x_centers, y_centers, z_centers, rotations, indexing='ij') + tile_shape = [1] * 5 + tile_shape[-2] = int(sizes.shape[0]) + for i in range(len(rets)): + rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape) + rets[i] = rets[i][..., np.newaxis] # for concat + sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3]) + tile_size_shape = list(rets[0].shape) + tile_size_shape[3] = 1 + sizes = np.tile(sizes, tile_size_shape) + rets.insert(3, sizes) + ret = np.concatenate(rets, axis=-1) + return np.transpose(ret, [2, 1, 0, 3, 4, 5]) + + +def center_to_minmax_2d(centers, dims, origin=0.5): + """Center to minmax. + + Args: + centers (np.ndarray): Center points. + dims (np.ndarray): Dimensions. + origin (list or array or float, optional): Origin point relate + to smallest point. Defaults to 0.5. + + Returns: + np.ndarray: Minmax points. + """ + if origin == 0.5: + return np.concatenate([centers - dims / 2, centers + dims / 2], + axis=-1) + corners = center_to_corner_box2d(centers, dims, origin=origin) + return corners[:, [0, 2]].reshape([-1, 4]) + + +def rbbox2d_to_near_bbox(rbboxes): + """convert rotated bbox to nearest 'standing' or 'lying' bbox. + + Args: + rbboxes (np.ndarray): Rotated bboxes with shape of + (N, 5(x, y, xdim, ydim, rad)). + + Returns: + np.ndarray: Bounding boxes with the shape of + (N, 4(xmin, ymin, xmax, ymax)). + """ + rots = rbboxes[..., -1] + rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) + cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] + bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) + bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) + return bboxes + + +@numba.jit(nopython=True) +def iou_jit(boxes, query_boxes, mode='iou', eps=0.0): + """Calculate box iou. Note that jit version runs ~10x faster than the + box_overlaps function in mmdet3d.core.evaluation. + + Note: + This function is for counterclockwise boxes. + + Args: + boxes (np.ndarray): Input bounding boxes with shape of (N, 4). + query_boxes (np.ndarray): Query boxes with shape of (K, 4). + mode (str, optional): IoU mode. Defaults to 'iou'. + eps (float, optional): Value added to denominator. Defaults to 0. + + Returns: + np.ndarray: Overlap between boxes and query_boxes + with the shape of [N, K]. + """ + N = boxes.shape[0] + K = query_boxes.shape[0] + overlaps = np.zeros((N, K), dtype=boxes.dtype) + for k in range(K): + box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + eps) * + (query_boxes[k, 3] - query_boxes[k, 1] + eps)) + for n in range(N): + iw = ( + min(boxes[n, 2], query_boxes[k, 2]) - + max(boxes[n, 0], query_boxes[k, 0]) + eps) + if iw > 0: + ih = ( + min(boxes[n, 3], query_boxes[k, 3]) - + max(boxes[n, 1], query_boxes[k, 1]) + eps) + if ih > 0: + if mode == 'iou': + ua = ((boxes[n, 2] - boxes[n, 0] + eps) * + (boxes[n, 3] - boxes[n, 1] + eps) + box_area - + iw * ih) + else: + ua = ((boxes[n, 2] - boxes[n, 0] + eps) * + (boxes[n, 3] - boxes[n, 1] + eps)) + overlaps[n, k] = iw * ih / ua + return overlaps + + +def projection_matrix_to_CRT_kitti(proj): + """Split projection matrix of KITTI. + + Note: + This function is for KITTI only. + + P = C @ [R|T] + C is upper triangular matrix, so we need to inverse CR and use QR + stable for all kitti camera projection matrix. + + Args: + proj (p.array, shape=[4, 4]): Intrinsics of camera. + + Returns: + tuple[np.ndarray]: Splited matrix of C, R and T. + """ + + CR = proj[0:3, 0:3] + CT = proj[0:3, 3] + RinvCinv = np.linalg.inv(CR) + Rinv, Cinv = np.linalg.qr(RinvCinv) + C = np.linalg.inv(Cinv) + R = np.linalg.inv(Rinv) + T = Cinv @ CT + return C, R, T + + +def remove_outside_points(points, rect, Trv2c, P2, image_shape): + """Remove points which are outside of image. + + Note: + This function is for KITTI only. + + Args: + points (np.ndarray, shape=[N, 3+dims]): Total points. + rect (np.ndarray, shape=[4, 4]): Matrix to project points in + specific camera coordinate (e.g. CAM2) to CAM0. + Trv2c (np.ndarray, shape=[4, 4]): Matrix to project points in + camera coordinate to lidar coordinate. + P2 (p.array, shape=[4, 4]): Intrinsics of Camera2. + image_shape (list[int]): Shape of image. + + Returns: + np.ndarray, shape=[N, 3+dims]: Filtered points. + """ + # 5x faster than remove_outside_points_v1(2ms vs 10ms) + C, R, T = projection_matrix_to_CRT_kitti(P2) + image_bbox = [0, 0, image_shape[1], image_shape[0]] + frustum = get_frustum(image_bbox, C) + frustum -= T + frustum = np.linalg.inv(R) @ frustum.T + frustum = camera_to_lidar(frustum.T, rect, Trv2c) + frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) + indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) + points = points[indices.reshape([-1])] + return points + + +def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): + """Get frustum corners in camera coordinates. + + Args: + bbox_image (list[int]): box in image coordinates. + C (np.ndarray): Intrinsics. + near_clip (float, optional): Nearest distance of frustum. + Defaults to 0.001. + far_clip (float, optional): Farthest distance of frustum. + Defaults to 100. + + Returns: + np.ndarray, shape=[8, 3]: coordinates of frustum corners. + """ + fku = C[0, 0] + fkv = -C[1, 1] + u0v0 = C[0:2, 2] + z_points = np.array( + [near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] + b = bbox_image + box_corners = np.array( + [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], + dtype=C.dtype) + near_box_corners = (box_corners - u0v0) / np.array( + [fku / near_clip, -fkv / near_clip], dtype=C.dtype) + far_box_corners = (box_corners - u0v0) / np.array( + [fku / far_clip, -fkv / far_clip], dtype=C.dtype) + ret_xy = np.concatenate([near_box_corners, far_box_corners], + axis=0) # [8, 2] + ret_xyz = np.concatenate([ret_xy, z_points], axis=1) + return ret_xyz + + +def surface_equ_3d(polygon_surfaces): + """ + + Args: + polygon_surfaces (np.ndarray): Polygon surfaces with shape of + [num_polygon, max_num_surfaces, max_num_points_of_surface, 3]. + All surfaces' normal vector must direct to internal. + Max_num_points_of_surface must at least 3. + + Returns: + tuple: normal vector and its direction. + """ + # return [a, b, c], d in ax+by+cz+d=0 + # polygon_surfaces: [num_polygon, num_surfaces, num_points_of_polygon, 3] + surface_vec = polygon_surfaces[:, :, :2, :] - \ + polygon_surfaces[:, :, 1:3, :] + # normal_vec: [..., 3] + normal_vec = np.cross(surface_vec[:, :, 0, :], surface_vec[:, :, 1, :]) + # print(normal_vec.shape, points[..., 0, :].shape) + # d = -np.inner(normal_vec, points[..., 0, :]) + d = np.einsum('aij, aij->ai', normal_vec, polygon_surfaces[:, :, 0, :]) + return normal_vec, -d + + +@numba.njit +def _points_in_convex_polygon_3d_jit(points, polygon_surfaces, normal_vec, d, + num_surfaces): + """ + Args: + points (np.ndarray): Input points with shape of (num_points, 3). + polygon_surfaces (np.ndarray): Polygon surfaces with shape of + (num_polygon, max_num_surfaces, max_num_points_of_surface, 3). + All surfaces' normal vector must direct to internal. + Max_num_points_of_surface must at least 3. + normal_vec (np.ndarray): Normal vector of polygon_surfaces. + d (int): Directions of normal vector. + num_surfaces (np.ndarray): Number of surfaces a polygon contains + shape of (num_polygon). + + Returns: + np.ndarray: Result matrix with the shape of [num_points, num_polygon]. + """ + max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3] + num_points = points.shape[0] + num_polygons = polygon_surfaces.shape[0] + ret = np.ones((num_points, num_polygons), dtype=np.bool_) + sign = 0.0 + for i in range(num_points): + for j in range(num_polygons): + for k in range(max_num_surfaces): + if k > num_surfaces[j]: + break + sign = ( + points[i, 0] * normal_vec[j, k, 0] + + points[i, 1] * normal_vec[j, k, 1] + + points[i, 2] * normal_vec[j, k, 2] + d[j, k]) + if sign >= 0: + ret[i, j] = False + break + return ret + + +def points_in_convex_polygon_3d_jit(points, + polygon_surfaces, + num_surfaces=None): + """Check points is in 3d convex polygons. + + Args: + points (np.ndarray): Input points with shape of (num_points, 3). + polygon_surfaces (np.ndarray): Polygon surfaces with shape of + (num_polygon, max_num_surfaces, max_num_points_of_surface, 3). + All surfaces' normal vector must direct to internal. + Max_num_points_of_surface must at least 3. + num_surfaces (np.ndarray, optional): Number of surfaces a polygon + contains shape of (num_polygon). Defaults to None. + + Returns: + np.ndarray: Result matrix with the shape of [num_points, num_polygon]. + """ + max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3] + # num_points = points.shape[0] + num_polygons = polygon_surfaces.shape[0] + if num_surfaces is None: + num_surfaces = np.full((num_polygons, ), 9999999, dtype=np.int64) + normal_vec, d = surface_equ_3d(polygon_surfaces[:, :, :3, :]) + # normal_vec: [num_polygon, max_num_surfaces, 3] + # d: [num_polygon, max_num_surfaces] + return _points_in_convex_polygon_3d_jit(points, polygon_surfaces, + normal_vec, d, num_surfaces) + + +@numba.njit +def points_in_convex_polygon_jit(points, polygon, clockwise=False): + """Check points is in 2d convex polygons. True when point in polygon. + + Args: + points (np.ndarray): Input points with the shape of [num_points, 2]. + polygon (np.ndarray): Input polygon with the shape of + [num_polygon, num_points_of_polygon, 2]. + clockwise (bool, optional): Indicate polygon is clockwise. Defaults + to True. + + Returns: + np.ndarray: Result matrix with the shape of [num_points, num_polygon]. + """ + # first convert polygon to directed lines + num_points_of_polygon = polygon.shape[1] + num_points = points.shape[0] + num_polygons = polygon.shape[0] + # vec for all the polygons + if clockwise: + vec1 = polygon - polygon[:, + np.array([num_points_of_polygon - 1] + list( + range(num_points_of_polygon - 1))), :] + else: + vec1 = polygon[:, + np.array([num_points_of_polygon - 1] + + list(range(num_points_of_polygon - + 1))), :] - polygon + ret = np.zeros((num_points, num_polygons), dtype=np.bool_) + success = True + cross = 0.0 + for i in range(num_points): + for j in range(num_polygons): + success = True + for k in range(num_points_of_polygon): + vec = vec1[j, k] + cross = vec[1] * (polygon[j, k, 0] - points[i, 0]) + cross -= vec[0] * (polygon[j, k, 1] - points[i, 1]) + if cross >= 0: + success = False + break + ret[i, j] = success + return ret + + +def boxes3d_to_corners3d_lidar(boxes3d, bottom_center=True): + """Convert kitti center boxes to corners. + + 7 -------- 4 + /| /| + 6 -------- 5 . + | | | | + . 3 -------- 0 + |/ |/ + 2 -------- 1 + + Note: + This function is for LiDAR boxes only. + + Args: + boxes3d (np.ndarray): Boxes with shape of (N, 7) + [x, y, z, x_size, y_size, z_size, ry] in LiDAR coords, + see the definition of ry in KITTI dataset. + bottom_center (bool, optional): Whether z is on the bottom center + of object. Defaults to True. + + Returns: + np.ndarray: Box corners with the shape of [N, 8, 3]. + """ + boxes_num = boxes3d.shape[0] + x_size, y_size, z_size = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5] + x_corners = np.array([ + x_size / 2., -x_size / 2., -x_size / 2., x_size / 2., x_size / 2., + -x_size / 2., -x_size / 2., x_size / 2. + ], + dtype=np.float32).T + y_corners = np.array([ + -y_size / 2., -y_size / 2., y_size / 2., y_size / 2., -y_size / 2., + -y_size / 2., y_size / 2., y_size / 2. + ], + dtype=np.float32).T + if bottom_center: + z_corners = np.zeros((boxes_num, 8), dtype=np.float32) + z_corners[:, 4:8] = z_size.reshape(boxes_num, 1).repeat( + 4, axis=1) # (N, 8) + else: + z_corners = np.array([ + -z_size / 2., -z_size / 2., -z_size / 2., -z_size / 2., + z_size / 2., z_size / 2., z_size / 2., z_size / 2. + ], + dtype=np.float32).T + + ry = boxes3d[:, 6] + zeros, ones = np.zeros( + ry.size, dtype=np.float32), np.ones( + ry.size, dtype=np.float32) + rot_list = np.array([[np.cos(ry), np.sin(ry), zeros], + [-np.sin(ry), np.cos(ry), zeros], + [zeros, zeros, ones]]) # (3, 3, N) + R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3) + + temp_corners = np.concatenate((x_corners.reshape( + -1, 8, 1), y_corners.reshape(-1, 8, 1), z_corners.reshape(-1, 8, 1)), + axis=2) # (N, 8, 3) + rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3) + x_corners = rotated_corners[:, :, 0] + y_corners = rotated_corners[:, :, 1] + z_corners = rotated_corners[:, :, 2] + + x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2] + + x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8) + y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8) + z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8) + + corners = np.concatenate( + (x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), + axis=2) + + return corners.astype(np.float32) diff --git a/mmdet3d/structures/ops/iou3d_calculator.py b/mmdet3d/structures/ops/iou3d_calculator.py new file mode 100755 index 0000000..baec1cb --- /dev/null +++ b/mmdet3d/structures/ops/iou3d_calculator.py @@ -0,0 +1,329 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmdet.structures.bbox import bbox_overlaps + +from mmdet3d.registry import TASK_UTILS +from mmdet3d.structures.bbox_3d import get_box_type + + +@TASK_UTILS.register_module() +class BboxOverlapsNearest3D(object): + """Nearest 3D IoU Calculator. + + Note: + This IoU calculator first finds the nearest 2D boxes in bird eye view + (BEV), and then calculates the 2D IoU using :meth:`bbox_overlaps`. + + Args: + coordinate (str): 'camera', 'lidar', or 'depth' coordinate system. + """ + + def __init__(self, coordinate='lidar'): + assert coordinate in ['camera', 'lidar', 'depth'] + self.coordinate = coordinate + + def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): + """Calculate nearest 3D IoU. + + Note: + If ``is_aligned`` is ``False``, then it calculates the ious between + each bbox of bboxes1 and bboxes2, otherwise it calculates the ious + between each aligned pair of bboxes1 and bboxes2. + + Args: + bboxes1 (torch.Tensor): shape (N, 7+N) + [x, y, z, x_size, y_size, z_size, ry, v]. + bboxes2 (torch.Tensor): shape (M, 7+N) + [x, y, z, x_size, y_size, z_size, ry, v]. + mode (str): "iou" (intersection over union) or iof + (intersection over foreground). + is_aligned (bool): Whether the calculation is aligned. + + Return: + torch.Tensor: If ``is_aligned`` is ``True``, return ious between + bboxes1 and bboxes2 with shape (M, N). If ``is_aligned`` is + ``False``, return shape is M. + """ + return bbox_overlaps_nearest_3d(bboxes1, bboxes2, mode, is_aligned, + self.coordinate) + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(coordinate={self.coordinate}' + return repr_str + + +@TASK_UTILS.register_module() +class BboxOverlaps3D(object): + """3D IoU Calculator. + + Args: + coordinate (str): The coordinate system, valid options are + 'camera', 'lidar', and 'depth'. + """ + + def __init__(self, coordinate): + assert coordinate in ['camera', 'lidar', 'depth'] + self.coordinate = coordinate + + def __call__(self, bboxes1, bboxes2, mode='iou'): + """Calculate 3D IoU using cuda implementation. + + Note: + This function calculate the IoU of 3D boxes based on their volumes. + IoU calculator ``:class:BboxOverlaps3D`` uses this function to + calculate the actual 3D IoUs of boxes. + + Args: + bboxes1 (torch.Tensor): with shape (N, 7+C), + (x, y, z, x_size, y_size, z_size, ry, v*). + bboxes2 (torch.Tensor): with shape (M, 7+C), + (x, y, z, x_size, y_size, z_size, ry, v*). + mode (str): "iou" (intersection over union) or + iof (intersection over foreground). + + Return: + torch.Tensor: Bbox overlaps results of bboxes1 and bboxes2 + with shape (M, N) (aligned mode is not supported currently). + """ + return bbox_overlaps_3d(bboxes1, bboxes2, mode, self.coordinate) + + def __repr__(self): + """str: return a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(coordinate={self.coordinate}' + return repr_str + + +def bbox_overlaps_nearest_3d(bboxes1, + bboxes2, + mode='iou', + is_aligned=False, + coordinate='lidar'): + """Calculate nearest 3D IoU. + + Note: + This function first finds the nearest 2D boxes in bird eye view + (BEV), and then calculates the 2D IoU using :meth:`bbox_overlaps`. + This IoU calculator :class:`BboxOverlapsNearest3D` uses this + function to calculate IoUs of boxes. + + If ``is_aligned`` is ``False``, then it calculates the ious between + each bbox of bboxes1 and bboxes2, otherwise the ious between each + aligned pair of bboxes1 and bboxes2. + + Args: + bboxes1 (torch.Tensor): with shape (N, 7+C), + (x, y, z, x_size, y_size, z_size, ry, v*). + bboxes2 (torch.Tensor): with shape (M, 7+C), + (x, y, z, x_size, y_size, z_size, ry, v*). + mode (str): "iou" (intersection over union) or iof + (intersection over foreground). + is_aligned (bool): Whether the calculation is aligned + + Return: + torch.Tensor: If ``is_aligned`` is ``True``, return ious between + bboxes1 and bboxes2 with shape (M, N). If ``is_aligned`` is + ``False``, return shape is M. + """ + assert bboxes1.size(-1) == bboxes2.size(-1) >= 7 + + box_type, _ = get_box_type(coordinate) + + bboxes1 = box_type(bboxes1, box_dim=bboxes1.shape[-1]) + bboxes2 = box_type(bboxes2, box_dim=bboxes2.shape[-1]) + + # Change the bboxes to bev + # box conversion and iou calculation in torch version on CUDA + # is 10x faster than that in numpy version + bboxes1_bev = bboxes1.nearest_bev + bboxes2_bev = bboxes2.nearest_bev + + ret = bbox_overlaps( + bboxes1_bev, bboxes2_bev, mode=mode, is_aligned=is_aligned) + return ret + + +def bbox_overlaps_3d(bboxes1, bboxes2, mode='iou', coordinate='camera'): + """Calculate 3D IoU using cuda implementation. + + Note: + This function calculates the IoU of 3D boxes based on their volumes. + IoU calculator :class:`BboxOverlaps3D` uses this function to + calculate the actual IoUs of boxes. + + Args: + bboxes1 (torch.Tensor): with shape (N, 7+C), + (x, y, z, x_size, y_size, z_size, ry, v*). + bboxes2 (torch.Tensor): with shape (M, 7+C), + (x, y, z, x_size, y_size, z_size, ry, v*). + mode (str): "iou" (intersection over union) or + iof (intersection over foreground). + coordinate (str): 'camera' or 'lidar' coordinate system. + + Return: + torch.Tensor: Bbox overlaps results of bboxes1 and bboxes2 + with shape (M, N) (aligned mode is not supported currently). + """ + assert bboxes1.size(-1) == bboxes2.size(-1) >= 7 + + box_type, _ = get_box_type(coordinate) + + bboxes1 = box_type(bboxes1, box_dim=bboxes1.shape[-1]) + bboxes2 = box_type(bboxes2, box_dim=bboxes2.shape[-1]) + + return bboxes1.overlaps(bboxes1, bboxes2, mode=mode) + + +@TASK_UTILS.register_module() +class AxisAlignedBboxOverlaps3D(object): + """Axis-aligned 3D Overlaps (IoU) Calculator.""" + + def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): + """Calculate IoU between 2D bboxes. + + Args: + bboxes1 (Tensor): shape (B, m, 6) in + format or empty. + bboxes2 (Tensor): shape (B, n, 6) in + format or empty. + B indicates the batch dim, in shape (B1, B2, ..., Bn). + If ``is_aligned`` is ``True``, then m and n must be equal. + mode (str): "iou" (intersection over union) or "giou" (generalized + intersection over union). + is_aligned (bool, optional): If True, then m and n must be equal. + Defaults to False. + Returns: + Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) + """ + assert bboxes1.size(-1) == bboxes2.size(-1) == 6 + return axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2, mode, + is_aligned) + + def __repr__(self): + """str: a string describing the module""" + repr_str = self.__class__.__name__ + '()' + return repr_str + + +def axis_aligned_bbox_overlaps_3d(bboxes1, + bboxes2, + mode='iou', + is_aligned=False, + eps=1e-6): + """Calculate overlap between two set of axis aligned 3D bboxes. If + ``is_aligned`` is ``False``, then calculate the overlaps between each bbox + of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of + bboxes1 and bboxes2. + + Args: + bboxes1 (Tensor): shape (B, m, 6) in + format or empty. + bboxes2 (Tensor): shape (B, n, 6) in + format or empty. + B indicates the batch dim, in shape (B1, B2, ..., Bn). + If ``is_aligned`` is ``True``, then m and n must be equal. + mode (str): "iou" (intersection over union) or "giou" (generalized + intersection over union). + is_aligned (bool, optional): If True, then m and n must be equal. + Defaults to False. + eps (float, optional): A value added to the denominator for numerical + stability. Defaults to 1e-6. + + Returns: + Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) + + Example: + >>> bboxes1 = torch.FloatTensor([ + >>> [0, 0, 0, 10, 10, 10], + >>> [10, 10, 10, 20, 20, 20], + >>> [32, 32, 32, 38, 40, 42], + >>> ]) + >>> bboxes2 = torch.FloatTensor([ + >>> [0, 0, 0, 10, 20, 20], + >>> [0, 10, 10, 10, 19, 20], + >>> [10, 10, 10, 20, 20, 20], + >>> ]) + >>> overlaps = axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2) + >>> assert overlaps.shape == (3, 3) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) + >>> assert overlaps.shape == (3, ) + Example: + >>> empty = torch.empty(0, 6) + >>> nonempty = torch.FloatTensor([[0, 0, 0, 10, 9, 10]]) + >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) + >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) + >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) + """ + + assert mode in ['iou', 'giou'], f'Unsupported mode {mode}' + # Either the boxes are empty or the length of boxes's last dimension is 6 + assert (bboxes1.size(-1) == 6 or bboxes1.size(0) == 0) + assert (bboxes2.size(-1) == 6 or bboxes2.size(0) == 0) + + # Batch dim must be the same + # Batch dim: (B1, B2, ... Bn) + assert bboxes1.shape[:-2] == bboxes2.shape[:-2] + batch_shape = bboxes1.shape[:-2] + + rows = bboxes1.size(-2) + cols = bboxes2.size(-2) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + if is_aligned: + return bboxes1.new(batch_shape + (rows, )) + else: + return bboxes1.new(batch_shape + (rows, cols)) + + area1 = (bboxes1[..., 3] - + bboxes1[..., 0]) * (bboxes1[..., 4] - bboxes1[..., 1]) * ( + bboxes1[..., 5] - bboxes1[..., 2]) + area2 = (bboxes2[..., 3] - + bboxes2[..., 0]) * (bboxes2[..., 4] - bboxes2[..., 1]) * ( + bboxes2[..., 5] - bboxes2[..., 2]) + + if is_aligned: + lt = torch.max(bboxes1[..., :3], bboxes2[..., :3]) # [B, rows, 3] + rb = torch.min(bboxes1[..., 3:], bboxes2[..., 3:]) # [B, rows, 3] + + wh = (rb - lt).clamp(min=0) # [B, rows, 2] + overlap = wh[..., 0] * wh[..., 1] * wh[..., 2] + + if mode in ['iou', 'giou']: + union = area1 + area2 - overlap + else: + union = area1 + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :3], bboxes2[..., :3]) + enclosed_rb = torch.max(bboxes1[..., 3:], bboxes2[..., 3:]) + else: + lt = torch.max(bboxes1[..., :, None, :3], + bboxes2[..., None, :, :3]) # [B, rows, cols, 3] + rb = torch.min(bboxes1[..., :, None, 3:], + bboxes2[..., None, :, 3:]) # [B, rows, cols, 3] + + wh = (rb - lt).clamp(min=0) # [B, rows, cols, 3] + overlap = wh[..., 0] * wh[..., 1] * wh[..., 2] + + if mode in ['iou', 'giou']: + union = area1[..., None] + area2[..., None, :] - overlap + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :, None, :3], + bboxes2[..., None, :, :3]) + enclosed_rb = torch.max(bboxes1[..., :, None, 3:], + bboxes2[..., None, :, 3:]) + + eps = union.new_tensor([eps]) + union = torch.max(union, eps) + ious = overlap / union + if mode in ['iou']: + return ious + # calculate gious + enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0) + enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] * enclose_wh[..., 2] + enclose_area = torch.max(enclose_area, eps) + gious = ious - (enclose_area - union) / enclose_area + return gious diff --git a/mmdet3d/structures/ops/transforms.py b/mmdet3d/structures/ops/transforms.py new file mode 100755 index 0000000..8e9f700 --- /dev/null +++ b/mmdet3d/structures/ops/transforms.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def bbox3d_mapping_back(bboxes, scale_factor, flip_horizontal, flip_vertical): + """Map bboxes from testing scale to original image scale. + + Args: + bboxes (:obj:`BaseInstance3DBoxes`): Boxes to be mapped back. + scale_factor (float): Scale factor. + flip_horizontal (bool): Whether to flip horizontally. + flip_vertical (bool): Whether to flip vertically. + + Returns: + :obj:`BaseInstance3DBoxes`: Boxes mapped back. + """ + new_bboxes = bboxes.clone() + if flip_horizontal: + new_bboxes.flip('horizontal') + if flip_vertical: + new_bboxes.flip('vertical') + new_bboxes.scale(1 / scale_factor) + + return new_bboxes + + +def bbox3d2roi(bbox_list): + """Convert a list of bounding boxes to roi format. + + Args: + bbox_list (list[torch.Tensor]): A list of bounding boxes + corresponding to a batch of images. + + Returns: + torch.Tensor: Region of interests in shape (n, c), where + the channels are in order of [batch_ind, x, y ...]. + """ + rois_list = [] + for img_id, bboxes in enumerate(bbox_list): + if bboxes.size(0) > 0: + img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) + rois = torch.cat([img_inds, bboxes], dim=-1) + else: + rois = torch.zeros_like(bboxes) + rois_list.append(rois) + rois = torch.cat(rois_list, 0) + return rois + + +# TODO delete this +def bbox3d2result(bboxes, scores, labels, attrs=None): + """Convert detection results to a list of numpy arrays. + + Args: + bboxes (torch.Tensor): Bounding boxes with shape (N, 5). + labels (torch.Tensor): Labels with shape (N, ). + scores (torch.Tensor): Scores with shape (N, ). + attrs (torch.Tensor, optional): Attributes with shape (N, ). + Defaults to None. + + Returns: + dict[str, torch.Tensor]: Bounding box results in cpu mode. + + - boxes_3d (torch.Tensor): 3D boxes. + - scores (torch.Tensor): Prediction scores. + - labels_3d (torch.Tensor): Box labels. + - attrs_3d (torch.Tensor, optional): Box attributes. + """ + result_dict = dict( + bboxes_3d=bboxes.to('cpu'), + scores_3d=scores.cpu(), + labels_3d=labels.cpu()) + + if attrs is not None: + result_dict['attr_labels'] = attrs.cpu() + + return result_dict diff --git a/mmdet3d/structures/point_data.py b/mmdet3d/structures/point_data.py new file mode 100755 index 0000000..f12d4c8 --- /dev/null +++ b/mmdet3d/structures/point_data.py @@ -0,0 +1,161 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Sized +from typing import Union + +import numpy as np +import torch +from mmengine.structures import BaseDataElement + +IndexType = Union[str, slice, int, list, torch.LongTensor, + torch.cuda.LongTensor, torch.BoolTensor, + torch.cuda.BoolTensor, np.ndarray] + + +class PointData(BaseDataElement): + """Data structure for point-level annotations or predictions. + + All data items in ``data_fields`` of ``PointData`` meet the following + requirements: + + - They are all one dimension. + - They should have the same length. + + `PointData` is used to save point-level semantic and instance mask, + it also can save `instances_labels` and `instances_scores` temporarily. + In the future, we would consider to move the instance-level info into + `gt_instances_3d` and `pred_instances_3d`. + + Examples: + >>> metainfo = dict( + ... sample_idx=random.randint(0, 100)) + >>> points = np.random.randint(0, 255, (100, 3)) + >>> point_data = PointData(metainfo=metainfo, + ... points=points) + >>> print(len(point_data)) + 100 + + >>> # slice + >>> slice_data = point_data[10:60] + >>> assert len(slice_data) == 50 + + >>> # set + >>> point_data.pts_semantic_mask = torch.randint(0, 255, (100,)) + >>> point_data.pts_instance_mask = torch.randint(0, 255, (100,)) + >>> assert tuple(point_data.pts_semantic_mask.shape) == (100,) + >>> assert tuple(point_data.pts_instance_mask.shape) == (100,) + """ + + def __setattr__(self, name: str, value: Sized) -> None: + """setattr is only used to set data. + + The value must have the attribute of `__len__` and have the same length + of `PointData`. + """ + if name in ('_metainfo_fields', '_data_fields'): + if not hasattr(self, name): + super().__setattr__(name, value) + else: + raise AttributeError(f'{name} has been used as a ' + 'private attribute, which is immutable.') + + else: + assert isinstance(value, + Sized), 'value must contain `__len__` attribute' + # TODO: make sure the input value share the same length + super().__setattr__(name, value) + + __setitem__ = __setattr__ + + def __getitem__(self, item: IndexType) -> 'PointData': + """ + Args: + item (str, int, list, :obj:`slice`, :obj:`numpy.ndarray`, + :obj:`torch.LongTensor`, :obj:`torch.BoolTensor`): + Get the corresponding values according to item. + + Returns: + :obj:`PointData`: Corresponding values. + """ + if isinstance(item, list): + item = np.array(item) + if isinstance(item, np.ndarray): + # The default int type of numpy is platform dependent, int32 for + # windows and int64 for linux. `torch.Tensor` requires the index + # should be int64, therefore we simply convert it to int64 here. + # Mode details in https://github.com/numpy/numpy/issues/9464 + item = item.astype(np.int64) if item.dtype == np.int32 else item + item = torch.from_numpy(item) + assert isinstance( + item, (str, slice, int, torch.LongTensor, torch.cuda.LongTensor, + torch.BoolTensor, torch.cuda.BoolTensor)) + + if isinstance(item, str): + return getattr(self, item) + + if isinstance(item, int): + if item >= len(self) or item < -len(self): # type: ignore + raise IndexError(f'Index {item} out of range!') + else: + # keep the dimension + item = slice(item, None, len(self)) + + new_data = self.__class__(metainfo=self.metainfo) + if isinstance(item, torch.Tensor): + assert item.dim() == 1, 'Only support to get the' \ + ' values along the first dimension.' + if isinstance(item, (torch.BoolTensor, torch.cuda.BoolTensor)): + assert len(item) == len(self), 'The shape of the ' \ + 'input(BoolTensor) ' \ + f'{len(item)} ' \ + 'does not match the shape ' \ + 'of the indexed tensor ' \ + 'in results_field ' \ + f'{len(self)} at ' \ + 'first dimension.' + + for k, v in self.items(): + if isinstance(v, torch.Tensor): + new_data[k] = v[item] + elif isinstance(v, np.ndarray): + new_data[k] = v[item.cpu().numpy()] + elif isinstance( + v, (str, list, tuple)) or (hasattr(v, '__getitem__') + and hasattr(v, 'cat')): + # convert to indexes from BoolTensor + if isinstance(item, + (torch.BoolTensor, torch.cuda.BoolTensor)): + indexes = torch.nonzero(item).view( + -1).cpu().numpy().tolist() + else: + indexes = item.cpu().numpy().tolist() + slice_list = [] + if indexes: + for index in indexes: + slice_list.append(slice(index, None, len(v))) + else: + slice_list.append(slice(None, 0, None)) + r_list = [v[s] for s in slice_list] + if isinstance(v, (str, list, tuple)): + new_value = r_list[0] + for r in r_list[1:]: + new_value = new_value + r + else: + new_value = v.cat(r_list) + new_data[k] = new_value + else: + raise ValueError( + f'The type of `{k}` is `{type(v)}`, which has no ' + 'attribute of `cat`, so it does not ' + 'support slice with `bool`') + else: + # item is a slice + for k, v in self.items(): + new_data[k] = v[item] + return new_data # type: ignore + + def __len__(self) -> int: + """int: The length of `PointData`.""" + if len(self._data_fields) > 0: + return len(self.values()[0]) + else: + return 0 diff --git a/mmdet3d/structures/points/__init__.py b/mmdet3d/structures/points/__init__.py new file mode 100755 index 0000000..73d2d83 --- /dev/null +++ b/mmdet3d/structures/points/__init__.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_points import BasePoints +from .cam_points import CameraPoints +from .depth_points import DepthPoints +from .lidar_points import LiDARPoints + +__all__ = ['BasePoints', 'CameraPoints', 'DepthPoints', 'LiDARPoints'] + + +def get_points_type(points_type): + """Get the class of points according to coordinate type. + + Args: + points_type (str): The type of points coordinate. + The valid value are "CAMERA", "LIDAR", or "DEPTH". + + Returns: + class: Points type. + """ + if points_type == 'CAMERA': + points_cls = CameraPoints + elif points_type == 'LIDAR': + points_cls = LiDARPoints + elif points_type == 'DEPTH': + points_cls = DepthPoints + else: + raise ValueError('Only "points_type" of "CAMERA", "LIDAR", or "DEPTH"' + f' are supported, got {points_type}') + + return points_cls diff --git a/mmdet3d/structures/points/base_points.py b/mmdet3d/structures/points/base_points.py new file mode 100755 index 0000000..e7d9b8e --- /dev/null +++ b/mmdet3d/structures/points/base_points.py @@ -0,0 +1,440 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from abc import abstractmethod + +import numpy as np +import torch + +from ..bbox_3d.utils import rotation_3d_in_axis + + +class BasePoints(object): + """Base class for Points. + + Args: + tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. + points_dim (int, optional): Number of the dimension of a point. + Each row is (x, y, z). Defaults to 3. + attribute_dims (dict, optional): Dictionary to indicate the + meaning of extra dimension. Defaults to None. + + Attributes: + tensor (torch.Tensor): Float matrix of N x points_dim. + points_dim (int): Integer indicating the dimension of a point. + Each row is (x, y, z, ...). + attribute_dims (bool): Dictionary to indicate the meaning of extra + dimension. Defaults to None. + rotation_axis (int): Default rotation axis for points rotation. + """ + + def __init__(self, tensor, points_dim=3, attribute_dims=None): + if isinstance(tensor, torch.Tensor): + device = tensor.device + else: + device = torch.device('cpu') + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that + # does not depend on the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, points_dim)).to( + dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == \ + points_dim, tensor.size() + + self.tensor = tensor + self.points_dim = points_dim + self.attribute_dims = attribute_dims + self.rotation_axis = 0 + + @property + def coord(self): + """torch.Tensor: Coordinates of each point in shape (N, 3).""" + return self.tensor[:, :3] + + @coord.setter + def coord(self, tensor): + """Set the coordinates of each point.""" + try: + tensor = tensor.reshape(self.shape[0], 3) + except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray + raise ValueError(f'got unexpected shape {tensor.shape}') + if not isinstance(tensor, torch.Tensor): + tensor = self.tensor.new_tensor(tensor) + self.tensor[:, :3] = tensor + + @property + def height(self): + """torch.Tensor: + A vector with height of each point in shape (N, 1), or None.""" + if self.attribute_dims is not None and \ + 'height' in self.attribute_dims.keys(): + return self.tensor[:, self.attribute_dims['height']] + else: + return None + + @height.setter + def height(self, tensor): + """Set the height of each point.""" + try: + tensor = tensor.reshape(self.shape[0]) + except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray + raise ValueError(f'got unexpected shape {tensor.shape}') + if not isinstance(tensor, torch.Tensor): + tensor = self.tensor.new_tensor(tensor) + if self.attribute_dims is not None and \ + 'height' in self.attribute_dims.keys(): + self.tensor[:, self.attribute_dims['height']] = tensor + else: + # add height attribute + if self.attribute_dims is None: + self.attribute_dims = dict() + attr_dim = self.shape[1] + self.tensor = torch.cat([self.tensor, tensor.unsqueeze(1)], dim=1) + self.attribute_dims.update(dict(height=attr_dim)) + self.points_dim += 1 + + @property + def color(self): + """torch.Tensor: + A vector with color of each point in shape (N, 3), or None.""" + if self.attribute_dims is not None and \ + 'color' in self.attribute_dims.keys(): + return self.tensor[:, self.attribute_dims['color']] + else: + return None + + @color.setter + def color(self, tensor): + """Set the color of each point.""" + try: + tensor = tensor.reshape(self.shape[0], 3) + except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray + raise ValueError(f'got unexpected shape {tensor.shape}') + if tensor.max() >= 256 or tensor.min() < 0: + warnings.warn('point got color value beyond [0, 255]') + if not isinstance(tensor, torch.Tensor): + tensor = self.tensor.new_tensor(tensor) + if self.attribute_dims is not None and \ + 'color' in self.attribute_dims.keys(): + self.tensor[:, self.attribute_dims['color']] = tensor + else: + # add color attribute + if self.attribute_dims is None: + self.attribute_dims = dict() + attr_dim = self.shape[1] + self.tensor = torch.cat([self.tensor, tensor], dim=1) + self.attribute_dims.update( + dict(color=[attr_dim, attr_dim + 1, attr_dim + 2])) + self.points_dim += 3 + + @property + def shape(self): + """torch.Shape: Shape of points.""" + return self.tensor.shape + + def shuffle(self): + """Shuffle the points. + + Returns: + torch.Tensor: The shuffled index. + """ + idx = torch.randperm(self.__len__(), device=self.tensor.device) + self.tensor = self.tensor[idx] + return idx + + def rotate(self, rotation, axis=None): + """Rotate points with the given rotation matrix or angle. + + Args: + rotation (float | np.ndarray | torch.Tensor): Rotation matrix + or angle. + axis (int, optional): Axis to rotate at. Defaults to None. + """ + if not isinstance(rotation, torch.Tensor): + rotation = self.tensor.new_tensor(rotation) + assert rotation.shape == torch.Size([3, 3]) or \ + rotation.numel() == 1, f'invalid rotation shape {rotation.shape}' + + if axis is None: + axis = self.rotation_axis + + if rotation.numel() == 1: + rotated_points, rot_mat_T = rotation_3d_in_axis( + self.tensor[:, :3][None], rotation, axis=axis, return_mat=True) + self.tensor[:, :3] = rotated_points.squeeze(0) + rot_mat_T = rot_mat_T.squeeze(0) + else: + # rotation.numel() == 9 + self.tensor[:, :3] = self.tensor[:, :3] @ rotation + rot_mat_T = rotation + + return rot_mat_T + + @abstractmethod + def flip(self, bev_direction='horizontal'): + """Flip the points along given BEV direction. + + Args: + bev_direction (str): Flip direction (horizontal or vertical). + """ + pass + + def translate(self, trans_vector): + """Translate points with the given translation vector. + + Args: + trans_vector (np.ndarray, torch.Tensor): Translation + vector of size 3 or nx3. + """ + if not isinstance(trans_vector, torch.Tensor): + trans_vector = self.tensor.new_tensor(trans_vector) + trans_vector = trans_vector.squeeze(0) + if trans_vector.dim() == 1: + assert trans_vector.shape[0] == 3 + elif trans_vector.dim() == 2: + assert trans_vector.shape[0] == self.tensor.shape[0] and \ + trans_vector.shape[1] == 3 + else: + raise NotImplementedError( + f'Unsupported translation vector of shape {trans_vector.shape}' + ) + self.tensor[:, :3] += trans_vector + + def in_range_3d(self, point_range): + """Check whether the points are in the given range. + + Args: + point_range (list | torch.Tensor): The range of point + (x_min, y_min, z_min, x_max, y_max, z_max) + + Note: + In the original implementation of SECOND, checking whether + a box in the range checks whether the points are in a convex + polygon, we try to reduce the burden for simpler cases. + + Returns: + torch.Tensor: A binary vector indicating whether each point is + inside the reference range. + """ + in_range_flags = ((self.tensor[:, 0] > point_range[0]) + & (self.tensor[:, 1] > point_range[1]) + & (self.tensor[:, 2] > point_range[2]) + & (self.tensor[:, 0] < point_range[3]) + & (self.tensor[:, 1] < point_range[4]) + & (self.tensor[:, 2] < point_range[5])) + return in_range_flags + + @property + def bev(self): + """torch.Tensor: BEV of the points in shape (N, 2).""" + return self.tensor[:, [0, 1]] + + def in_range_bev(self, point_range): + """Check whether the points are in the given range. + + Args: + point_range (list | torch.Tensor): The range of point + in order of (x_min, y_min, x_max, y_max). + + Returns: + torch.Tensor: Indicating whether each point is inside + the reference range. + """ + in_range_flags = ((self.bev[:, 0] > point_range[0]) + & (self.bev[:, 1] > point_range[1]) + & (self.bev[:, 0] < point_range[2]) + & (self.bev[:, 1] < point_range[3])) + return in_range_flags + + @abstractmethod + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`CoordMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BasePoints`: The converted box of the same type + in the `dst` mode. + """ + pass + + def scale(self, scale_factor): + """Scale the points with horizontal and vertical scaling factors. + + Args: + scale_factors (float): Scale factors to scale the points. + """ + self.tensor[:, :3] *= scale_factor + + def __getitem__(self, item): + """ + Note: + The following usage are allowed: + 1. `new_points = points[3]`: + return a `Points` that contains only one point. + 2. `new_points = points[2:10]`: + return a slice of points. + 3. `new_points = points[vector]`: + where vector is a torch.BoolTensor with `length = len(points)`. + Nonzero elements in the vector will be selected. + 4. `new_points = points[3:11, vector]`: + return a slice of points and attribute dims. + 5. `new_points = points[4:12, 2]`: + return a slice of points with single attribute. + Note that the returned Points might share storage with this Points, + subject to Pytorch's indexing semantics. + + Returns: + :obj:`BasePoints`: A new object of + :class:`BasePoints` after indexing. + """ + original_type = type(self) + if isinstance(item, int): + return original_type( + self.tensor[item].view(1, -1), + points_dim=self.points_dim, + attribute_dims=self.attribute_dims) + elif isinstance(item, tuple) and len(item) == 2: + if isinstance(item[1], slice): + start = 0 if item[1].start is None else item[1].start + stop = self.tensor.shape[1] if \ + item[1].stop is None else item[1].stop + step = 1 if item[1].step is None else item[1].step + item = list(item) + item[1] = list(range(start, stop, step)) + item = tuple(item) + elif isinstance(item[1], int): + item = list(item) + item[1] = [item[1]] + item = tuple(item) + p = self.tensor[item[0], item[1]] + + keep_dims = list( + set(item[1]).intersection(set(range(3, self.tensor.shape[1])))) + if self.attribute_dims is not None: + attribute_dims = self.attribute_dims.copy() + for key in self.attribute_dims.keys(): + cur_attribute_dims = attribute_dims[key] + if isinstance(cur_attribute_dims, int): + cur_attribute_dims = [cur_attribute_dims] + intersect_attr = list( + set(cur_attribute_dims).intersection(set(keep_dims))) + if len(intersect_attr) == 1: + attribute_dims[key] = intersect_attr[0] + elif len(intersect_attr) > 1: + attribute_dims[key] = intersect_attr + else: + attribute_dims.pop(key) + else: + attribute_dims = None + elif isinstance(item, (slice, np.ndarray, torch.Tensor)): + p = self.tensor[item] + attribute_dims = self.attribute_dims + else: + raise NotImplementedError(f'Invalid slice {item}!') + + assert p.dim() == 2, \ + f'Indexing on Points with {item} failed to return a matrix!' + return original_type( + p, points_dim=p.shape[1], attribute_dims=attribute_dims) + + def __len__(self): + """int: Number of points in the current object.""" + return self.tensor.shape[0] + + def __repr__(self): + """str: Return a strings that describes the object.""" + return self.__class__.__name__ + '(\n ' + str(self.tensor) + ')' + + @classmethod + def cat(cls, points_list): + """Concatenate a list of Points into a single Points. + + Args: + points_list (list[:obj:`BasePoints`]): List of points. + + Returns: + :obj:`BasePoints`: The concatenated Points. + """ + assert isinstance(points_list, (list, tuple)) + if len(points_list) == 0: + return cls(torch.empty(0)) + assert all(isinstance(points, cls) for points in points_list) + + # use torch.cat (v.s. layers.cat) + # so the returned points never share storage with input + cat_points = cls( + torch.cat([p.tensor for p in points_list], dim=0), + points_dim=points_list[0].tensor.shape[1], + attribute_dims=points_list[0].attribute_dims) + return cat_points + + def to(self, device): + """Convert current points to a specific device. + + Args: + device (str | :obj:`torch.device`): The name of the device. + + Returns: + :obj:`BasePoints`: A new boxes object on the + specific device. + """ + original_type = type(self) + return original_type( + self.tensor.to(device), + points_dim=self.points_dim, + attribute_dims=self.attribute_dims) + + def clone(self): + """Clone the Points. + + Returns: + :obj:`BasePoints`: Box object with the same properties + as self. + """ + original_type = type(self) + return original_type( + self.tensor.clone(), + points_dim=self.points_dim, + attribute_dims=self.attribute_dims) + + @property + def device(self): + """str: The device of the points are on.""" + return self.tensor.device + + def __iter__(self): + """Yield a point as a Tensor of shape (4,) at a time. + + Returns: + torch.Tensor: A point of shape (4,). + """ + yield from self.tensor + + def new_point(self, data): + """Create a new point object with data. + + The new point and its tensor has the similar properties + as self and self.tensor, respectively. + + Args: + data (torch.Tensor | numpy.array | list): Data to be copied. + + Returns: + :obj:`BasePoints`: A new point object with ``data``, + the object's other properties are similar to ``self``. + """ + new_tensor = self.tensor.new_tensor(data) \ + if not isinstance(data, torch.Tensor) else data.to(self.device) + original_type = type(self) + return original_type( + new_tensor, + points_dim=self.points_dim, + attribute_dims=self.attribute_dims) diff --git a/mmdet3d/structures/points/cam_points.py b/mmdet3d/structures/points/cam_points.py new file mode 100755 index 0000000..570fe0c --- /dev/null +++ b/mmdet3d/structures/points/cam_points.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_points import BasePoints + + +class CameraPoints(BasePoints): + """Points of instances in CAM coordinates. + + Args: + tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. + points_dim (int, optional): Number of the dimension of a point. + Each row is (x, y, z). Defaults to 3. + attribute_dims (dict, optional): Dictionary to indicate the + meaning of extra dimension. Defaults to None. + + Attributes: + tensor (torch.Tensor): Float matrix of N x points_dim. + points_dim (int): Integer indicating the dimension of a point. + Each row is (x, y, z, ...). + attribute_dims (bool): Dictionary to indicate the meaning of extra + dimension. Defaults to None. + rotation_axis (int): Default rotation axis for points rotation. + """ + + def __init__(self, tensor, points_dim=3, attribute_dims=None): + super(CameraPoints, self).__init__( + tensor, points_dim=points_dim, attribute_dims=attribute_dims) + self.rotation_axis = 1 + + def flip(self, bev_direction='horizontal'): + """Flip the points along given BEV direction. + + Args: + bev_direction (str): Flip direction (horizontal or vertical). + """ + if bev_direction == 'horizontal': + self.tensor[:, 0] = -self.tensor[:, 0] + elif bev_direction == 'vertical': + self.tensor[:, 2] = -self.tensor[:, 2] + + @property + def bev(self): + """torch.Tensor: BEV of the points in shape (N, 2).""" + return self.tensor[:, [0, 2]] + + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`CoordMode`): The target Point mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BasePoints`: The converted point of the same type + in the `dst` mode. + """ + from mmdet3d.structures import Coord3DMode + return Coord3DMode.convert_point( + point=self, src=Coord3DMode.CAM, dst=dst, rt_mat=rt_mat) diff --git a/mmdet3d/structures/points/depth_points.py b/mmdet3d/structures/points/depth_points.py new file mode 100755 index 0000000..54af44b --- /dev/null +++ b/mmdet3d/structures/points/depth_points.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_points import BasePoints + + +class DepthPoints(BasePoints): + """Points of instances in DEPTH coordinates. + + Args: + tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. + points_dim (int, optional): Number of the dimension of a point. + Each row is (x, y, z). Defaults to 3. + attribute_dims (dict, optional): Dictionary to indicate the + meaning of extra dimension. Defaults to None. + + Attributes: + tensor (torch.Tensor): Float matrix of N x points_dim. + points_dim (int): Integer indicating the dimension of a point. + Each row is (x, y, z, ...). + attribute_dims (bool): Dictionary to indicate the meaning of extra + dimension. Defaults to None. + rotation_axis (int): Default rotation axis for points rotation. + """ + + def __init__(self, tensor, points_dim=3, attribute_dims=None): + super(DepthPoints, self).__init__( + tensor, points_dim=points_dim, attribute_dims=attribute_dims) + self.rotation_axis = 2 + + def flip(self, bev_direction='horizontal'): + """Flip the points along given BEV direction. + + Args: + bev_direction (str): Flip direction (horizontal or vertical). + """ + if bev_direction == 'horizontal': + self.tensor[:, 0] = -self.tensor[:, 0] + elif bev_direction == 'vertical': + self.tensor[:, 1] = -self.tensor[:, 1] + + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`CoordMode`): The target Point mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BasePoints`: The converted point of the same type + in the `dst` mode. + """ + from mmdet3d.structures import Coord3DMode + return Coord3DMode.convert_point( + point=self, src=Coord3DMode.DEPTH, dst=dst, rt_mat=rt_mat) diff --git a/mmdet3d/structures/points/lidar_points.py b/mmdet3d/structures/points/lidar_points.py new file mode 100755 index 0000000..c9f3c56 --- /dev/null +++ b/mmdet3d/structures/points/lidar_points.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_points import BasePoints + + +class LiDARPoints(BasePoints): + """Points of instances in LIDAR coordinates. + + Args: + tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. + points_dim (int, optional): Number of the dimension of a point. + Each row is (x, y, z). Defaults to 3. + attribute_dims (dict, optional): Dictionary to indicate the + meaning of extra dimension. Defaults to None. + + Attributes: + tensor (torch.Tensor): Float matrix of N x points_dim. + points_dim (int): Integer indicating the dimension of a point. + Each row is (x, y, z, ...). + attribute_dims (bool): Dictionary to indicate the meaning of extra + dimension. Defaults to None. + rotation_axis (int): Default rotation axis for points rotation. + """ + + def __init__(self, tensor, points_dim=3, attribute_dims=None): + super(LiDARPoints, self).__init__( + tensor, points_dim=points_dim, attribute_dims=attribute_dims) + self.rotation_axis = 2 + + def flip(self, bev_direction='horizontal'): + """Flip the points along given BEV direction. + + Args: + bev_direction (str): Flip direction (horizontal or vertical). + """ + if bev_direction == 'horizontal': + self.tensor[:, 1] = -self.tensor[:, 1] + elif bev_direction == 'vertical': + self.tensor[:, 0] = -self.tensor[:, 0] + + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`CoordMode`): The target Point mode. + rt_mat (np.ndarray | torch.Tensor, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BasePoints`: The converted point of the same type + in the `dst` mode. + """ + from mmdet3d.structures import Coord3DMode + return Coord3DMode.convert_point( + point=self, src=Coord3DMode.LIDAR, dst=dst, rt_mat=rt_mat) diff --git a/mmdet3d/testing/__init__.py b/mmdet3d/testing/__init__.py new file mode 100755 index 0000000..0674a94 --- /dev/null +++ b/mmdet3d/testing/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .data_utils import (create_data_info_after_loading, + create_dummy_data_info, + create_mono3d_data_info_after_loading) +from .model_utils import (create_detector_inputs, get_detector_cfg, + get_model_cfg, setup_seed) + +__all__ = [ + 'create_dummy_data_info', 'create_data_info_after_loading', + 'create_mono3d_data_info_after_loading', 'create_detector_inputs', + 'get_detector_cfg', 'get_model_cfg', 'setup_seed' +] diff --git a/mmdet3d/testing/data_utils.py b/mmdet3d/testing/data_utils.py new file mode 100755 index 0000000..952a6cf --- /dev/null +++ b/mmdet3d/testing/data_utils.py @@ -0,0 +1,196 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np + +# create a dummy `results` to test the pipeline +from mmdet3d.datasets import LoadAnnotations3D, LoadPointsFromFile +from mmdet3d.datasets.transforms.loading import LoadImageFromFileMono3D +from mmdet3d.structures import LiDARInstance3DBoxes + + +def create_dummy_data_info(with_ann=True): + + ann_info = { + 'gt_bboxes': + np.array([[712.4, 143., 810.73, 307.92]]), + 'gt_labels': + np.array([1]), + 'gt_bboxes_3d': + LiDARInstance3DBoxes( + np.array( + [[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, + -1.5808]])), + 'gt_labels_3d': + np.array([1]), + 'centers_2d': + np.array([[765.04, 214.56]]), + 'depths': + np.array([8.410]), + 'num_lidar_pts': + np.array([377]), + 'difficulty': + np.array([0]), + 'truncated': + np.array([0]), + 'occluded': + np.array([0]), + 'alpha': + np.array([-0.2]), + 'score': + np.array([0.]), + 'index': + np.array([0]), + 'group_id': + np.array([0]) + } + data_info = { + 'sample_id': + 0, + 'images': { + 'CAM0': { + 'cam2img': [[707.0493, 0.0, 604.0814, 0.0], + [0.0, 707.0493, 180.5066, 0.0], + [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] + }, + 'CAM1': { + 'cam2img': [[707.0493, 0.0, 604.0814, -379.7842], + [0.0, 707.0493, 180.5066, 0.0], + [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] + }, + 'CAM2': { + 'img_path': + 'tests/data/kitti/training/image_2/000000.png', + 'height': + 370, + 'width': + 1224, + 'cam2img': [[707.0493, 0.0, 604.0814, 45.75831], + [0.0, 707.0493, 180.5066, -0.3454157], + [0.0, 0.0, 1.0, 0.004981016], [0.0, 0.0, 0.0, 1.0]] + }, + 'CAM3': { + 'cam2img': [[707.0493, 0.0, 604.0814, -334.1081], + [0.0, 707.0493, 180.5066, 2.33066], + [0.0, 0.0, 1.0, 0.003201153], [0.0, 0.0, 0.0, 1.0]] + }, + 'R0_rect': [[ + 0.9999127984046936, 0.010092630051076412, + -0.008511931635439396, 0.0 + ], + [ + -0.010127290152013302, 0.9999405741691589, + -0.004037670791149139, 0.0 + ], + [ + 0.008470674976706505, 0.0041235219687223434, + 0.9999555945396423, 0.0 + ], [0.0, 0.0, 0.0, 1.0]] + }, + 'lidar_points': { + 'num_pts_feats': + 4, + 'lidar_path': + 'tests/data/kitti/training/velodyne_reduced/000000.bin', + 'lidar2cam': [[ + -0.0015960992313921452, -0.9999162554740906, + -0.012840436771512032, -0.022366708144545555 + ], + [ + -0.00527064548805356, 0.012848696671426296, + -0.9999035596847534, -0.05967890843749046 + ], + [ + 0.9999848008155823, -0.0015282672829926014, + -0.005290712229907513, -0.33254900574684143 + ], [0.0, 0.0, 0.0, 1.0]], + 'Tr_velo_to_cam': [[ + 0.006927963811904192, -0.9999722242355347, -0.0027578289154917, + -0.024577289819717407 + ], + [ + -0.0011629819637164474, + 0.0027498360723257065, -0.9999955296516418, + -0.06127237156033516 + ], + [ + 0.999975323677063, 0.006931141018867493, + -0.0011438990477472544, -0.33210289478302 + ], [0.0, 0.0, 0.0, 1.0]], + 'Tr_imu_to_velo': [[ + 0.999997615814209, 0.0007553070900030434, + -0.002035825978964567, -0.8086758852005005 + ], + [ + -0.0007854027207940817, 0.9998897910118103, + -0.014822980388998985, 0.3195559084415436 + ], + [ + 0.002024406101554632, 0.014824540354311466, + 0.9998881220817566, -0.7997230887413025 + ], [0.0, 0.0, 0.0, 1.0]] + }, + 'instances': [{ + 'bbox': [712.4, 143.0, 810.73, 307.92], + 'bbox_label': + -1, + 'bbox_3d': [ + 1.840000033378601, 1.4700000286102295, 8.40999984741211, + 1.2000000476837158, 1.8899999856948853, 0.47999998927116394, + 0.009999999776482582 + ], + 'bbox_label_3d': + -1, + 'center_2d': [765.04, 214.56], + 'depth': + 8.410, + 'num_lidar_pts': + 377, + 'difficulty': + 0, + 'truncated': + 0, + 'occluded': + 0, + 'alpha': + -0.2, + 'score': + 0.0, + 'index': + 0, + 'group_id': + 0 + }], + 'plane': + None, + 'pts_semantic_mask_path': + 'tests/data/semantickitti/sequences/00/labels/000000.label', + 'pts_panoptic_mask_path': + 'tests/data/semantickitti/sequences/00/labels/000000.label', + } + if with_ann: + data_info['ann_info'] = ann_info + return data_info + + +def create_data_info_after_loading(): + load_anns_transform = LoadAnnotations3D( + with_bbox_3d=True, with_label_3d=True) + load_points_transform = LoadPointsFromFile( + coord_type='LIDAR', load_dim=4, use_dim=3) + data_info = create_dummy_data_info() + data_info = load_points_transform(data_info) + data_info_after_loading = load_anns_transform(data_info) + return data_info_after_loading + + +def create_mono3d_data_info_after_loading(): + load_anns_transform = LoadAnnotations3D( + with_bbox=True, + with_label=True, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True) + load_img_transform = LoadImageFromFileMono3D() + data_info = create_dummy_data_info() + data_info = load_img_transform(data_info) + data_info_after_loading = load_anns_transform(data_info) + return data_info_after_loading diff --git a/mmdet3d/testing/model_utils.py b/mmdet3d/testing/model_utils.py new file mode 100755 index 0000000..da44939 --- /dev/null +++ b/mmdet3d/testing/model_utils.py @@ -0,0 +1,154 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import random +from os.path import dirname, exists, join + +import numpy as np +import torch +from mmengine.structures import InstanceData + +from mmdet3d.structures import (CameraInstance3DBoxes, DepthInstance3DBoxes, + Det3DDataSample, LiDARInstance3DBoxes, + PointData) + + +def setup_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + np.random.seed(seed) + random.seed(seed) + torch.backends.cudnn.deterministic = True + + +def _get_config_directory(): + """Find the predefined detector config directory.""" + try: + # Assume we are running in the source mmdetection3d repo + repo_dpath = dirname(dirname(dirname(__file__))) + except NameError: + # For IPython development when this __file__ is not defined + import mmdet3d + repo_dpath = dirname(dirname(mmdet3d.__file__)) + config_dpath = join(repo_dpath, 'configs') + if not exists(config_dpath): + raise Exception('Cannot find config path') + return config_dpath + + +def _get_config_module(fname): + """Load a configuration as a python module.""" + from mmengine import Config + config_dpath = _get_config_directory() + config_fpath = join(config_dpath, fname) + config_mod = Config.fromfile(config_fpath) + return config_mod + + +def get_model_cfg(fname): + """Grab configs necessary to create a model. + + These are deep copied to allow for safe modification of parameters without + influencing other tests. + """ + config = _get_config_module(fname) + model = copy.deepcopy(config.model) + + return model + + +def get_detector_cfg(fname): + """Grab configs necessary to create a detector. + + These are deep copied to allow for safe modification of parameters without + influencing other tests. + """ + import mmengine + config = _get_config_module(fname) + model = copy.deepcopy(config.model) + train_cfg = mmengine.Config(copy.deepcopy(config.model.train_cfg)) + test_cfg = mmengine.Config(copy.deepcopy(config.model.test_cfg)) + + model.update(train_cfg=train_cfg) + model.update(test_cfg=test_cfg) + return model + + +def create_detector_inputs(seed=0, + with_points=True, + with_img=False, + img_size=10, + num_gt_instance=20, + num_points=10, + points_feat_dim=4, + num_classes=3, + gt_bboxes_dim=7, + with_pts_semantic_mask=False, + with_pts_instance_mask=False, + with_eval_ann_info=False, + bboxes_3d_type='lidar'): + setup_seed(seed) + assert bboxes_3d_type in ('lidar', 'depth', 'cam') + bbox_3d_class = { + 'lidar': LiDARInstance3DBoxes, + 'depth': DepthInstance3DBoxes, + 'cam': CameraInstance3DBoxes + } + meta_info = dict() + meta_info['depth2img'] = np.array( + [[5.23289349e+02, 3.68831943e+02, 6.10469439e+01], + [1.09560138e+02, 1.97404735e+02, -5.47377738e+02], + [1.25930002e-02, 9.92229998e-01, -1.23769999e-01]]) + meta_info['lidar2img'] = np.array( + [[5.23289349e+02, 3.68831943e+02, 6.10469439e+01], + [1.09560138e+02, 1.97404735e+02, -5.47377738e+02], + [1.25930002e-02, 9.92229998e-01, -1.23769999e-01]]) + + inputs_dict = dict() + + if with_points: + points = torch.rand([num_points, points_feat_dim]) + inputs_dict['points'] = [points] + + if with_img: + if isinstance(img_size, tuple): + img = torch.rand(3, img_size[0], img_size[1]) + meta_info['img_shape'] = img_size + meta_info['ori_shape'] = img_size + else: + img = torch.rand(3, img_size, img_size) + meta_info['img_shape'] = (img_size, img_size) + meta_info['ori_shape'] = (img_size, img_size) + meta_info['scale_factor'] = np.array([1., 1.]) + inputs_dict['img'] = [img] + + gt_instance_3d = InstanceData() + + gt_instance_3d.bboxes_3d = bbox_3d_class[bboxes_3d_type]( + torch.rand([num_gt_instance, gt_bboxes_dim]), box_dim=gt_bboxes_dim) + gt_instance_3d.labels_3d = torch.randint(0, num_classes, [num_gt_instance]) + data_sample = Det3DDataSample( + metainfo=dict(box_type_3d=bbox_3d_class[bboxes_3d_type])) + data_sample.set_metainfo(meta_info) + data_sample.gt_instances_3d = gt_instance_3d + + gt_instance = InstanceData() + gt_instance.labels = torch.randint(0, num_classes, [num_gt_instance]) + gt_instance.bboxes = torch.rand(num_gt_instance, 4) + gt_instance.bboxes[:, + 2:] = gt_instance.bboxes[:, :2] + gt_instance.bboxes[:, + 2:] + + data_sample.gt_instances = gt_instance + data_sample.gt_pts_seg = PointData() + if with_pts_instance_mask: + pts_instance_mask = torch.randint(0, num_gt_instance, [num_points]) + data_sample.gt_pts_seg['pts_instance_mask'] = pts_instance_mask + if with_pts_semantic_mask: + pts_semantic_mask = torch.randint(0, num_classes, [num_points]) + data_sample.gt_pts_seg['pts_semantic_mask'] = pts_semantic_mask + if with_eval_ann_info: + data_sample.eval_ann_info = dict() + else: + data_sample.eval_ann_info = None + + return dict(inputs=inputs_dict, data_samples=[data_sample]) diff --git a/mmdet3d/utils/__init__.py b/mmdet3d/utils/__init__.py new file mode 100755 index 0000000..b5ed456 --- /dev/null +++ b/mmdet3d/utils/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .array_converter import ArrayConverter, array_converter +from .collect_env import collect_env +from .compat_cfg import compat_cfg +from .misc import replace_ceph_backend +from .setup_env import register_all_modules, setup_multi_processes +from .typing_utils import (ConfigType, InstanceList, MultiConfig, + OptConfigType, OptInstanceList, OptMultiConfig, + OptSampleList, OptSamplingResultList) + +__all__ = [ + 'collect_env', 'setup_multi_processes', 'compat_cfg', + 'register_all_modules', 'array_converter', 'ArrayConverter', 'ConfigType', + 'OptConfigType', 'MultiConfig', 'OptMultiConfig', 'InstanceList', + 'OptInstanceList', 'OptSamplingResultList', 'replace_ceph_backend', + 'OptSampleList' +] diff --git a/mmdet3d/utils/array_converter.py b/mmdet3d/utils/array_converter.py new file mode 100755 index 0000000..99d838f --- /dev/null +++ b/mmdet3d/utils/array_converter.py @@ -0,0 +1,351 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools +from inspect import getfullargspec +from typing import Callable, Optional, Tuple, Union + +import numpy as np +import torch + +TemplateArrayType = Union[tuple, list, int, float, np.ndarray, torch.Tensor] +OptArrayType = Optional[Union[np.ndarray, torch.Tensor]] + + +def array_converter(to_torch: bool = True, + apply_to: Tuple[str, ...] = tuple(), + template_arg_name_: Optional[str] = None, + recover: bool = True) -> Callable: + """Wrapper function for data-type agnostic processing. + + First converts input arrays to PyTorch tensors or NumPy ndarrays + for middle calculation, then convert output to original data-type if + `recover=True`. + + Args: + to_torch (bool): Whether convert to PyTorch tensors + for middle calculation. Defaults to True. + apply_to (Tuple[str, ...]): The arguments to which we apply + data-type conversion. Defaults to an empty tuple. + template_arg_name_ (str, optional): Argument serving as the template ( + return arrays should have the same dtype and device + as the template). Defaults to None. If None, we will use the + first argument in `apply_to` as the template argument. + recover (bool): Whether or not recover the wrapped function + outputs to the `template_arg_name_` type. Defaults to True. + + Raises: + ValueError: When template_arg_name_ is not among all args, or + when apply_to contains an arg which is not among all args, + a ValueError will be raised. When the template argument or + an argument to convert is a list or tuple, and cannot be + converted to a NumPy array, a ValueError will be raised. + TypeError: When the type of the template argument or + an argument to convert does not belong to the above range, + or the contents of such an list-or-tuple-type argument + do not share the same data type, a TypeError is raised. + + Returns: + (function): wrapped function. + + Example: + >>> import torch + >>> import numpy as np + >>> + >>> # Use torch addition for a + b, + >>> # and convert return values to the type of a + >>> @array_converter(apply_to=('a', 'b')) + >>> def simple_add(a, b): + >>> return a + b + >>> + >>> a = np.array([1.1]) + >>> b = np.array([2.2]) + >>> simple_add(a, b) + >>> + >>> # Use numpy addition for a + b, + >>> # and convert return values to the type of b + >>> @array_converter(to_torch=False, apply_to=('a', 'b'), + >>> template_arg_name_='b') + >>> def simple_add(a, b): + >>> return a + b + >>> + >>> simple_add() + >>> + >>> # Use torch funcs for floor(a) if flag=True else ceil(a), + >>> # and return the torch tensor + >>> @array_converter(apply_to=('a',), recover=False) + >>> def floor_or_ceil(a, flag=True): + >>> return torch.floor(a) if flag else torch.ceil(a) + >>> + >>> floor_or_ceil(a, flag=False) + """ + + def array_converter_wrapper(func): + """Outer wrapper for the function.""" + + @functools.wraps(func) + def new_func(*args, **kwargs): + """Inner wrapper for the arguments.""" + if len(apply_to) == 0: + return func(*args, **kwargs) + + func_name = func.__name__ + + arg_spec = getfullargspec(func) + + arg_names = arg_spec.args + arg_num = len(arg_names) + default_arg_values = arg_spec.defaults + if default_arg_values is None: + default_arg_values = [] + no_default_arg_num = len(arg_names) - len(default_arg_values) + + kwonly_arg_names = arg_spec.kwonlyargs + kwonly_default_arg_values = arg_spec.kwonlydefaults + if kwonly_default_arg_values is None: + kwonly_default_arg_values = {} + + all_arg_names = arg_names + kwonly_arg_names + + # in case there are args in the form of *args + if len(args) > arg_num: + named_args = args[:arg_num] + nameless_args = args[arg_num:] + else: + named_args = args + nameless_args = [] + + # template argument data type is used for all array-like arguments + if template_arg_name_ is None: + template_arg_name = apply_to[0] + else: + template_arg_name = template_arg_name_ + + if template_arg_name not in all_arg_names: + raise ValueError(f'{template_arg_name} is not among the ' + f'argument list of function {func_name}') + + # inspect apply_to + for arg_to_apply in apply_to: + if arg_to_apply not in all_arg_names: + raise ValueError(f'{arg_to_apply} is not ' + f'an argument of {func_name}') + + new_args = [] + new_kwargs = {} + + converter = ArrayConverter() + target_type = torch.Tensor if to_torch else np.ndarray + + # non-keyword arguments + for i, arg_value in enumerate(named_args): + if arg_names[i] in apply_to: + new_args.append( + converter.convert( + input_array=arg_value, target_type=target_type)) + else: + new_args.append(arg_value) + + if arg_names[i] == template_arg_name: + template_arg_value = arg_value + + kwonly_default_arg_values.update(kwargs) + kwargs = kwonly_default_arg_values + + # keyword arguments and non-keyword arguments using default value + for i in range(len(named_args), len(all_arg_names)): + arg_name = all_arg_names[i] + if arg_name in kwargs: + if arg_name in apply_to: + new_kwargs[arg_name] = converter.convert( + input_array=kwargs[arg_name], + target_type=target_type) + else: + new_kwargs[arg_name] = kwargs[arg_name] + else: + default_value = default_arg_values[i - no_default_arg_num] + if arg_name in apply_to: + new_kwargs[arg_name] = converter.convert( + input_array=default_value, target_type=target_type) + else: + new_kwargs[arg_name] = default_value + if arg_name == template_arg_name: + template_arg_value = kwargs[arg_name] + + # add nameless args provided by *args (if exists) + new_args += nameless_args + + return_values = func(*new_args, **new_kwargs) + converter.set_template(template_arg_value) + + def recursive_recover(input_data): + if isinstance(input_data, (tuple, list)): + new_data = [] + for item in input_data: + new_data.append(recursive_recover(item)) + return tuple(new_data) if isinstance(input_data, + tuple) else new_data + elif isinstance(input_data, dict): + new_data = {} + for k, v in input_data.items(): + new_data[k] = recursive_recover(v) + return new_data + elif isinstance(input_data, (torch.Tensor, np.ndarray)): + return converter.recover(input_data) + else: + return input_data + + if recover: + return recursive_recover(return_values) + else: + return return_values + + return new_func + + return array_converter_wrapper + + +class ArrayConverter: + """Utility class for data-type agnostic processing. + + Args: + template_array (tuple | list | int | float | np.ndarray | + torch.Tensor, optional): template array. Defaults to None. + """ + SUPPORTED_NON_ARRAY_TYPES = (int, float, np.int8, np.int16, np.int32, + np.int64, np.uint8, np.uint16, np.uint32, + np.uint64, np.float16, np.float32, np.float64) + + def __init__(self, + template_array: Optional[TemplateArrayType] = None) -> None: + if template_array is not None: + self.set_template(template_array) + + def set_template(self, array: TemplateArrayType) -> None: + """Set template array. + + Args: + array (tuple | list | int | float | np.ndarray | torch.Tensor): + Template array. + + Raises: + ValueError: If input is list or tuple and cannot be converted to + to a NumPy array, a ValueError is raised. + TypeError: If input type does not belong to the above range, + or the contents of a list or tuple do not share the + same data type, a TypeError is raised. + """ + self.array_type = type(array) + self.is_num = False + self.device = 'cpu' + + if isinstance(array, np.ndarray): + self.dtype = array.dtype + elif isinstance(array, torch.Tensor): + self.dtype = array.dtype + self.device = array.device + elif isinstance(array, (list, tuple)): + try: + array = np.array(array) + if array.dtype not in self.SUPPORTED_NON_ARRAY_TYPES: + raise TypeError + self.dtype = array.dtype + except (ValueError, TypeError): + print(f'The following list cannot be converted to' + f' a numpy array of supported dtype:\n{array}') + raise + elif isinstance(array, self.SUPPORTED_NON_ARRAY_TYPES): + self.array_type = np.ndarray + self.is_num = True + self.dtype = np.dtype(type(array)) + else: + raise TypeError(f'Template type {self.array_type}' + f' is not supported.') + + def convert( + self, + input_array: TemplateArrayType, + target_type: Optional[type] = None, + target_array: OptArrayType = None + ) -> Union[np.ndarray, torch.Tensor]: + """Convert input array to target data type. + + Args: + input_array (tuple | list | int | float | np.ndarray | + torch.Tensor): Input array. + target_type (:class:`np.ndarray` or :class:`torch.Tensor`, + optional): Type to which input array is converted. + Defaults to None. + target_array (np.ndarray | torch.Tensor, optional): + Template array to which input array is converted. + Defaults to None. + + Raises: + ValueError: If input is list or tuple and cannot be converted to + to a NumPy array, a ValueError is raised. + TypeError: If input type does not belong to the above range, + or the contents of a list or tuple do not share the + same data type, a TypeError is raised. + + Returns: + np.ndarray or torch.Tensor: The converted array. + """ + if isinstance(input_array, (list, tuple)): + try: + input_array = np.array(input_array) + if input_array.dtype not in self.SUPPORTED_NON_ARRAY_TYPES: + raise TypeError + except (ValueError, TypeError): + print(f'The input cannot be converted to' + f' a single-type numpy array:\n{input_array}') + raise + elif isinstance(input_array, self.SUPPORTED_NON_ARRAY_TYPES): + input_array = np.array(input_array) + array_type = type(input_array) + assert target_type is not None or target_array is not None, \ + 'must specify a target' + if target_type is not None: + assert target_type in (np.ndarray, torch.Tensor), \ + 'invalid target type' + if target_type == array_type: + return input_array + elif target_type == np.ndarray: + # default dtype is float32 + converted_array = input_array.cpu().numpy().astype(np.float32) + else: + # default dtype is float32, device is 'cpu' + converted_array = torch.tensor( + input_array, dtype=torch.float32) + else: + assert isinstance(target_array, (np.ndarray, torch.Tensor)), \ + 'invalid target array type' + if isinstance(target_array, array_type): + return input_array + elif isinstance(target_array, np.ndarray): + converted_array = input_array.cpu().numpy().astype( + target_array.dtype) + else: + converted_array = target_array.new_tensor(input_array) + return converted_array + + def recover( + self, input_array: Union[np.ndarray, torch.Tensor] + ) -> Union[np.ndarray, torch.Tensor]: + """Recover input type to original array type. + + Args: + input_array (np.ndarray | torch.Tensor): Input array. + + Returns: + np.ndarray or torch.Tensor: Converted array. + """ + assert isinstance(input_array, (np.ndarray, torch.Tensor)), \ + 'invalid input array type' + if isinstance(input_array, self.array_type): + return input_array + elif isinstance(input_array, torch.Tensor): + converted_array = input_array.cpu().numpy().astype(self.dtype) + else: + converted_array = torch.tensor( + input_array, dtype=self.dtype, device=self.device) + if self.is_num: + converted_array = converted_array.item() + return converted_array diff --git a/mmdet3d/utils/collect_env.py b/mmdet3d/utils/collect_env.py new file mode 100755 index 0000000..f4de74f --- /dev/null +++ b/mmdet3d/utils/collect_env.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmdet +from mmengine.utils import get_git_hash +from mmengine.utils.dl_utils import collect_env as collect_base_env + +import mmdet3d + + +def collect_env(): + """Collect the information of the running environments.""" + env_info = collect_base_env() + env_info['MMDetection'] = mmdet.__version__ + env_info['MMDetection3D'] = mmdet3d.__version__ + '+' + get_git_hash()[:7] + from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE + env_info['spconv2.0'] = IS_SPCONV2_AVAILABLE + + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print(f'{name}: {val}') diff --git a/mmdet3d/utils/compat_cfg.py b/mmdet3d/utils/compat_cfg.py new file mode 100755 index 0000000..4d1a5f6 --- /dev/null +++ b/mmdet3d/utils/compat_cfg.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings + +from mmengine import ConfigDict + + +def compat_cfg(cfg): + """This function would modify some filed to keep the compatibility of + config. + + For example, it will move some args which will be deprecated to the correct + fields. + """ + cfg = copy.deepcopy(cfg) + cfg = compat_imgs_per_gpu(cfg) + cfg = compat_loader_args(cfg) + cfg = compat_runner_args(cfg) + return cfg + + +def compat_runner_args(cfg): + if 'runner' not in cfg: + cfg.runner = ConfigDict({ + 'type': 'EpochBasedRunner', + 'max_epochs': cfg.total_epochs + }) + warnings.warn( + 'config is now expected to have a `runner` section, ' + 'please set `runner` in your config.', UserWarning) + else: + if 'total_epochs' in cfg: + assert cfg.total_epochs == cfg.runner.max_epochs + return cfg + + +def compat_imgs_per_gpu(cfg): + cfg = copy.deepcopy(cfg) + if 'imgs_per_gpu' in cfg.data: + warnings.warn('"imgs_per_gpu" is deprecated in MMDet V2.0. ' + 'Please use "samples_per_gpu" instead') + if 'samples_per_gpu' in cfg.data: + warnings.warn( + f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and ' + f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"' + f'={cfg.data.imgs_per_gpu} is used in this experiments') + else: + warnings.warn('Automatically set "samples_per_gpu"="imgs_per_gpu"=' + f'{cfg.data.imgs_per_gpu} in this experiments') + cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu + return cfg + + +def compat_loader_args(cfg): + """Deprecated sample_per_gpu in cfg.data.""" + + cfg = copy.deepcopy(cfg) + if 'train_dataloader' not in cfg.data: + cfg.data['train_dataloader'] = ConfigDict() + if 'val_dataloader' not in cfg.data: + cfg.data['val_dataloader'] = ConfigDict() + if 'test_dataloader' not in cfg.data: + cfg.data['test_dataloader'] = ConfigDict() + + # special process for train_dataloader + if 'samples_per_gpu' in cfg.data: + + samples_per_gpu = cfg.data.pop('samples_per_gpu') + assert 'samples_per_gpu' not in \ + cfg.data.train_dataloader, ('`samples_per_gpu` are set ' + 'in `data` field and ` ' + 'data.train_dataloader` ' + 'at the same time. ' + 'Please only set it in ' + '`data.train_dataloader`. ') + cfg.data.train_dataloader['samples_per_gpu'] = samples_per_gpu + + if 'persistent_workers' in cfg.data: + + persistent_workers = cfg.data.pop('persistent_workers') + assert 'persistent_workers' not in \ + cfg.data.train_dataloader, ('`persistent_workers` are set ' + 'in `data` field and ` ' + 'data.train_dataloader` ' + 'at the same time. ' + 'Please only set it in ' + '`data.train_dataloader`. ') + cfg.data.train_dataloader['persistent_workers'] = persistent_workers + + if 'workers_per_gpu' in cfg.data: + + workers_per_gpu = cfg.data.pop('workers_per_gpu') + cfg.data.train_dataloader['workers_per_gpu'] = workers_per_gpu + cfg.data.val_dataloader['workers_per_gpu'] = workers_per_gpu + cfg.data.test_dataloader['workers_per_gpu'] = workers_per_gpu + + # special process for val_dataloader + if 'samples_per_gpu' in cfg.data.val: + # keep default value of `sample_per_gpu` is 1 + assert 'samples_per_gpu' not in \ + cfg.data.val_dataloader, ('`samples_per_gpu` are set ' + 'in `data.val` field and ` ' + 'data.val_dataloader` at ' + 'the same time. ' + 'Please only set it in ' + '`data.val_dataloader`. ') + cfg.data.val_dataloader['samples_per_gpu'] = \ + cfg.data.val.pop('samples_per_gpu') + # special process for val_dataloader + + # in case the test dataset is concatenated + if isinstance(cfg.data.test, dict): + if 'samples_per_gpu' in cfg.data.test: + assert 'samples_per_gpu' not in \ + cfg.data.test_dataloader, ('`samples_per_gpu` are set ' + 'in `data.test` field and ` ' + 'data.test_dataloader` ' + 'at the same time. ' + 'Please only set it in ' + '`data.test_dataloader`. ') + + cfg.data.test_dataloader['samples_per_gpu'] = \ + cfg.data.test.pop('samples_per_gpu') + + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + if 'samples_per_gpu' in ds_cfg: + assert 'samples_per_gpu' not in \ + cfg.data.test_dataloader, ('`samples_per_gpu` are set ' + 'in `data.test` field and ` ' + 'data.test_dataloader` at' + ' the same time. ' + 'Please only set it in ' + '`data.test_dataloader`. ') + samples_per_gpu = max( + [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) + cfg.data.test_dataloader['samples_per_gpu'] = samples_per_gpu + + return cfg diff --git a/mmdet3d/utils/misc.py b/mmdet3d/utils/misc.py new file mode 100755 index 0000000..e5f4b47 --- /dev/null +++ b/mmdet3d/utils/misc.py @@ -0,0 +1,106 @@ +# Copyright (c) OpenMMLab. All rights reserved. + + +def replace_ceph_backend(cfg): + cfg_pretty_text = cfg.pretty_text + + replace_strs = \ + r'''backend_args = dict( + backend='petrel', + path_mapping=dict({ + './data/DATA/': 's3://openmmlab/datasets/detection3d/CEPH/', + 'data/DATA/': 's3://openmmlab/datasets/detection3d/CEPH/' + })) + ''' + + if 'nuscenes' in cfg_pretty_text: + replace_strs = replace_strs.replace('DATA', 'nuscenes') + replace_strs = replace_strs.replace('CEPH', 'nuscenes') + elif 'lyft' in cfg_pretty_text: + replace_strs = replace_strs.replace('DATA', 'lyft') + replace_strs = replace_strs.replace('CEPH', 'lyft') + elif 'waymo' in cfg_pretty_text: + replace_strs = replace_strs.replace('DATA', 'waymo') + replace_strs = replace_strs.replace('CEPH', 'waymo') + elif 'kitti' in cfg_pretty_text: + replace_strs = replace_strs.replace('DATA', 'kitti') + replace_strs = replace_strs.replace('CEPH', 'kitti') + elif 'scannet' in cfg_pretty_text: + replace_strs = replace_strs.replace('DATA', 'scannet') + replace_strs = replace_strs.replace('CEPH', 'scannet_processed') + elif 's3dis' in cfg_pretty_text: + replace_strs = replace_strs.replace('DATA', 's3dis') + replace_strs = replace_strs.replace('CEPH', 's3dis_processed') + elif 'sunrgbd' in cfg_pretty_text: + replace_strs = replace_strs.replace('DATA', 'sunrgbd') + replace_strs = replace_strs.replace('CEPH', 'sunrgbd_processed') + elif 'semantickitti' in cfg_pretty_text: + replace_strs = replace_strs.replace('DATA', 'semantickitti') + replace_strs = replace_strs.replace('CEPH', 'semantickitti') + elif 'nuimages' in cfg_pretty_text: + replace_strs = replace_strs.replace('DATA', 'nuimages') + replace_strs = replace_strs.replace('CEPH', 'nuimages') + else: + NotImplemented('Does not support global replacement') + + replace_strs = replace_strs.replace(' ', '').replace('\n', '') + + # use data info file from ceph + # cfg_pretty_text = cfg_pretty_text.replace( + # 'ann_file', replace_strs + ', ann_file') + + cfg_pretty_text = cfg_pretty_text.replace('backend_args=None', '') + + # replace LoadImageFromFile + cfg_pretty_text = cfg_pretty_text.replace( + 'LoadImageFromFile\'', 'LoadImageFromFile\',' + replace_strs) + + # replace LoadImageFromFileMono3D + cfg_pretty_text = cfg_pretty_text.replace( + 'LoadImageFromFileMono3D\'', + 'LoadImageFromFileMono3D\',' + replace_strs) + + # replace LoadMultiViewImageFromFiles + cfg_pretty_text = cfg_pretty_text.replace( + 'LoadMultiViewImageFromFiles\'', + 'LoadMultiViewImageFromFiles\',' + replace_strs) + + # replace LoadPointsFromFile + cfg_pretty_text = cfg_pretty_text.replace( + 'LoadPointsFromFile\'', 'LoadPointsFromFile\',' + replace_strs) + + # replace LoadPointsFromMultiSweeps + cfg_pretty_text = cfg_pretty_text.replace( + 'LoadPointsFromMultiSweeps\'', + 'LoadPointsFromMultiSweeps\',' + replace_strs) + + # replace LoadAnnotations + cfg_pretty_text = cfg_pretty_text.replace( + 'LoadAnnotations\'', 'LoadAnnotations\',' + replace_strs) + + # replace LoadAnnotations3D + cfg_pretty_text = cfg_pretty_text.replace( + 'LoadAnnotations3D\'', 'LoadAnnotations3D\',' + replace_strs) + + # replace KittiMetric + cfg_pretty_text = cfg_pretty_text.replace('KittiMetric\'', + 'KittiMetric\',' + replace_strs) + + # replace LyftMetric + cfg_pretty_text = cfg_pretty_text.replace('LyftMetric\'', + 'LyftMetric\',' + replace_strs) + + # replace NuScenesMetric + cfg_pretty_text = cfg_pretty_text.replace( + 'NuScenesMetric\'', 'NuScenesMetric\',' + replace_strs) + + # replace WaymoMetric + cfg_pretty_text = cfg_pretty_text.replace('WaymoMetric\'', + 'WaymoMetric\',' + replace_strs) + + # replace dbsampler + cfg_pretty_text = cfg_pretty_text.replace('info_path', + replace_strs + ', info_path') + + cfg = cfg.fromstring(cfg_pretty_text, file_format='.py') + return cfg diff --git a/mmdet3d/utils/setup_env.py b/mmdet3d/utils/setup_env.py new file mode 100755 index 0000000..19c63d9 --- /dev/null +++ b/mmdet3d/utils/setup_env.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import datetime +import os +import platform +import warnings + +import cv2 +from mmengine import DefaultScope +from torch import multiprocessing as mp + + +def setup_multi_processes(cfg): + """Setup multi-processing environment variables.""" + # set multi-process start method as `fork` to speed up the training + if platform.system() != 'Windows': + mp_start_method = cfg.get('mp_start_method', 'fork') + current_method = mp.get_start_method(allow_none=True) + if current_method is not None and current_method != mp_start_method: + warnings.warn( + f'Multi-processing start method `{mp_start_method}` is ' + f'different from the previous setting `{current_method}`.' + f'It will be force set to `{mp_start_method}`. You can change ' + f'this behavior by changing `mp_start_method` in your config.') + mp.set_start_method(mp_start_method, force=True) + + # disable opencv multithreading to avoid system being overloaded + opencv_num_threads = cfg.get('opencv_num_threads', 0) + cv2.setNumThreads(opencv_num_threads) + + # setup OMP threads + # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa + workers_per_gpu = cfg.data.get('workers_per_gpu', 1) + if 'train_dataloader' in cfg.data: + workers_per_gpu = \ + max(cfg.data.train_dataloader.get('workers_per_gpu', 1), + workers_per_gpu) + + if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1: + omp_num_threads = 1 + warnings.warn( + f'Setting OMP_NUM_THREADS environment variable for each process ' + f'to be {omp_num_threads} in default, to avoid your system being ' + f'overloaded, please further tune the variable for optimal ' + f'performance in your application as needed.') + os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) + + # setup MKL threads + if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1: + mkl_num_threads = 1 + warnings.warn( + f'Setting MKL_NUM_THREADS environment variable for each process ' + f'to be {mkl_num_threads} in default, to avoid your system being ' + f'overloaded, please further tune the variable for optimal ' + f'performance in your application as needed.') + os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) + + +def register_all_modules(init_default_scope: bool = True) -> None: + """Register all modules in mmdet3d into the registries. + + Args: + init_default_scope (bool): Whether initialize the mmdet default scope. + When `init_default_scope=True`, the global default scope will be + set to `mmdet3d`, and all registries will build modules from mmdet3d's + registry node. To understand more about the registry, please refer + to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md + Defaults to True. + """ # noqa + import mmdet3d.datasets # noqa: F401,F403 + import mmdet3d.engine # noqa: F401,F403 + import mmdet3d.evaluation.metrics # noqa: F401,F403 + import mmdet3d.models # noqa: F401,F403 + import mmdet3d.structures # noqa: F401,F403 + import mmdet3d.visualization # noqa: F401,F403 + if init_default_scope: + never_created = DefaultScope.get_current_instance() is None \ + or not DefaultScope.check_instance_created('mmdet3d') + if never_created: + DefaultScope.get_instance('mmdet3d', scope_name='mmdet3d') + return + current_scope = DefaultScope.get_current_instance() + if current_scope.scope_name != 'mmdet3d': + warnings.warn('The current default scope ' + f'"{current_scope.scope_name}" is not "mmdet3d", ' + '`register_all_modules` will force the current' + 'default scope to be "mmdet3d". If this is not ' + 'expected, please set `init_default_scope=False`.') + # avoid name conflict + new_instance_name = f'mmdet3d-{datetime.datetime.now()}' + DefaultScope.get_instance(new_instance_name, scope_name='mmdet3d') diff --git a/mmdet3d/utils/typing_utils.py b/mmdet3d/utils/typing_utils.py new file mode 100755 index 0000000..4e6436c --- /dev/null +++ b/mmdet3d/utils/typing_utils.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Collecting some commonly used type hint in MMDetection3D.""" +from typing import List, Optional, Union + +from mmdet.models.task_modules.samplers import SamplingResult +from mmengine.config import ConfigDict +from mmengine.structures import InstanceData + +from mmdet3d.structures.det3d_data_sample import Det3DDataSample + +# Type hint of config data +ConfigType = Union[ConfigDict, dict] +OptConfigType = Optional[ConfigType] + +# Type hint of one or more config data +MultiConfig = Union[ConfigType, List[ConfigType]] +OptMultiConfig = Optional[MultiConfig] + +InstanceList = List[InstanceData] +OptInstanceList = Optional[InstanceList] + +SamplingResultList = List[SamplingResult] + +OptSamplingResultList = Optional[SamplingResultList] +SampleList = List[Det3DDataSample] +OptSampleList = Optional[SampleList] diff --git a/mmdet3d/version.py b/mmdet3d/version.py new file mode 100755 index 0000000..4e895f2 --- /dev/null +++ b/mmdet3d/version.py @@ -0,0 +1,28 @@ +# Copyright (c) Open-MMLab. All rights reserved. + +__version__ = '1.1.0' +short_version = __version__ + + +def parse_version_info(version_str): + """Parse a version string into a tuple. + + Args: + version_str (str): The version string. + + Returns: + tuple[int | str]: The version info, e.g., "1.3.0" is parsed into + (1, 3, 0), and "2.0.0rc4" is parsed into (2, 0, 0, 'rc4'). + """ + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) diff --git a/mmdet3d/visualization/__init__.py b/mmdet3d/visualization/__init__.py new file mode 100755 index 0000000..1423601 --- /dev/null +++ b/mmdet3d/visualization/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .local_visualizer import Det3DLocalVisualizer +from .vis_utils import (proj_camera_bbox3d_to_img, proj_depth_bbox3d_to_img, + proj_lidar_bbox3d_to_img, to_depth_mode, write_obj, + write_oriented_bbox) + +__all__ = [ + 'Det3DLocalVisualizer', 'write_obj', 'write_oriented_bbox', + 'to_depth_mode', 'proj_lidar_bbox3d_to_img', 'proj_depth_bbox3d_to_img', + 'proj_camera_bbox3d_to_img' +] diff --git a/mmdet3d/visualization/local_visualizer.py b/mmdet3d/visualization/local_visualizer.py new file mode 100755 index 0000000..aaa3ce9 --- /dev/null +++ b/mmdet3d/visualization/local_visualizer.py @@ -0,0 +1,833 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List, Optional, Tuple, Union + +import matplotlib.pyplot as plt +import mmcv +import numpy as np +from matplotlib.collections import PatchCollection +from matplotlib.patches import PathPatch +from matplotlib.path import Path +from mmdet.visualization import DetLocalVisualizer +from mmengine.dist import master_only +from mmengine.structures import InstanceData +from mmengine.visualization import Visualizer as MMENGINE_Visualizer +from mmengine.visualization.utils import check_type, tensor2ndarray +from torch import Tensor + +from mmdet3d.registry import VISUALIZERS +from mmdet3d.structures import (BaseInstance3DBoxes, Box3DMode, + CameraInstance3DBoxes, Coord3DMode, + DepthInstance3DBoxes, Det3DDataSample, + LiDARInstance3DBoxes, PointData, + points_cam2img) +from .vis_utils import (proj_camera_bbox3d_to_img, proj_depth_bbox3d_to_img, + proj_lidar_bbox3d_to_img, to_depth_mode) + +try: + import open3d as o3d + from open3d import geometry + from open3d.visualization import Visualizer +except ImportError: + o3d = geometry = Visualizer = None + + +@VISUALIZERS.register_module() +class Det3DLocalVisualizer(DetLocalVisualizer): + """MMDetection3D Local Visualizer. + + - 3D detection and segmentation drawing methods + + - draw_bboxes_3d: draw 3D bounding boxes on point clouds + - draw_proj_bboxes_3d: draw projected 3D bounding boxes on image + - draw_seg_mask: draw segmentation mask via per-point colorization + + Args: + name (str): Name of the instance. Defaults to 'visualizer'. + points (np.ndarray, optional): Points to visualize with shape (N, 3+C). + Defaults to None. + image (np.ndarray, optional): The origin image to draw. The format + should be RGB. Defaults to None. + pcd_mode (int): The point cloud mode (coordinates): 0 represents LiDAR, + 1 represents CAMERA, 2 represents Depth. Defaults to 0. + vis_backends (List[dict], optional): Visual backend config list. + Defaults to None. + save_dir (str, optional): Save file dir for all storage backends. + If it is None, the backend storage will not save any data. + Defaults to None. + bbox_color (str or Tuple[int], optional): Color of bbox lines. + The tuple of color should be in BGR order. Defaults to None. + text_color (str or Tuple[int]): Color of texts. The tuple of color + should be in BGR order. Defaults to (200, 200, 200). + mask_color (str or Tuple[int], optional): Color of masks. The tuple of + color should be in BGR order. Defaults to None. + line_width (int or float): The linewidth of lines. Defaults to 3. + frame_cfg (dict): The coordinate frame config while Open3D + visualization initialization. + Defaults to dict(size=1, origin=[0, 0, 0]). + alpha (int or float): The transparency of bboxes or mask. + Defaults to 0.8. + + Examples: + >>> import numpy as np + >>> import torch + >>> from mmengine.structures import InstanceData + >>> from mmdet3d.structures import (DepthInstance3DBoxes + ... Det3DDataSample) + >>> from mmdet3d.visualization import Det3DLocalVisualizer + + >>> det3d_local_visualizer = Det3DLocalVisualizer() + >>> image = np.random.randint(0, 256, size=(10, 12, 3)).astype('uint8') + >>> points = np.random.rand(1000, 3) + >>> gt_instances_3d = InstanceData() + >>> gt_instances_3d.bboxes_3d = DepthInstance3DBoxes( + ... torch.rand((5, 7))) + >>> gt_instances_3d.labels_3d = torch.randint(0, 2, (5,)) + >>> gt_det3d_data_sample = Det3DDataSample() + >>> gt_det3d_data_sample.gt_instances_3d = gt_instances_3d + >>> data_input = dict(img=image, points=points) + >>> det3d_local_visualizer.add_datasample('3D Scene', data_input, + ... gt_det3d_data_sample) + + >>> from mmdet3d.structures import PointData + >>> det3d_local_visualizer = Det3DLocalVisualizer() + >>> points = np.random.rand(1000, 3) + >>> gt_pts_seg = PointData() + >>> gt_pts_seg.pts_semantic_mask = torch.randint(0, 10, (1000, )) + >>> gt_det3d_data_sample = Det3DDataSample() + >>> gt_det3d_data_sample.gt_pts_seg = gt_pts_seg + >>> data_input = dict(points=points) + >>> det3d_local_visualizer.add_datasample('3D Scene', data_input, + ... gt_det3d_data_sample, + ... vis_task='lidar_seg') + """ + + def __init__(self, + name: str = 'visualizer', + points: Optional[np.ndarray] = None, + image: Optional[np.ndarray] = None, + pcd_mode: int = 0, + vis_backends: Optional[List[dict]] = None, + save_dir: Optional[str] = None, + bbox_color: Optional[Union[str, Tuple[int]]] = None, + text_color: Union[str, Tuple[int]] = (200, 200, 200), + mask_color: Optional[Union[str, Tuple[int]]] = None, + line_width: Union[int, float] = 3, + frame_cfg: dict = dict(size=1, origin=[0, 0, 0]), + alpha: Union[int, float] = 0.8) -> None: + super().__init__( + name=name, + image=image, + vis_backends=vis_backends, + save_dir=save_dir, + bbox_color=bbox_color, + text_color=text_color, + mask_color=mask_color, + line_width=line_width, + alpha=alpha) + if points is not None: + self.set_points(points, pcd_mode=pcd_mode, frame_cfg=frame_cfg) + self.pts_seg_num = 0 + + def _clear_o3d_vis(self) -> None: + """Clear open3d vis.""" + + if hasattr(self, 'o3d_vis'): + del self.o3d_vis + del self.pcd + del self.points_colors + + def _initialize_o3d_vis(self, frame_cfg: dict) -> Visualizer: + """Initialize open3d vis according to frame_cfg. + + Args: + frame_cfg (dict): The config to create coordinate frame in open3d + vis. + + Returns: + :obj:`o3d.visualization.Visualizer`: Created open3d vis. + """ + if o3d is None or geometry is None: + raise ImportError( + 'Please run "pip install open3d" to install open3d first.') + o3d_vis = o3d.visualization.Visualizer() + o3d_vis.create_window() + # create coordinate frame + mesh_frame = geometry.TriangleMesh.create_coordinate_frame(**frame_cfg) + o3d_vis.add_geometry(mesh_frame) + return o3d_vis + + @master_only + def set_points(self, + points: np.ndarray, + pcd_mode: int = 0, + vis_mode: str = 'replace', + frame_cfg: dict = dict(size=1, origin=[0, 0, 0]), + points_color: Tuple[float] = (0.5, 0.5, 0.5), + points_size: int = 2, + mode: str = 'xyz') -> None: + """Set the point cloud to draw. + + Args: + points (np.ndarray): Points to visualize with shape (N, 3+C). + pcd_mode (int): The point cloud mode (coordinates): 0 represents + LiDAR, 1 represents CAMERA, 2 represents Depth. Defaults to 0. + vis_mode (str): The visualization mode in Open3D: + + - 'replace': Replace the existing point cloud with input point + cloud. + - 'add': Add input point cloud into existing point cloud. + + Defaults to 'replace'. + frame_cfg (dict): The coordinate frame config for Open3D + visualization initialization. + Defaults to dict(size=1, origin=[0, 0, 0]). + points_color (Tuple[float]): The color of points. + Defaults to (0.5, 0.5, 0.5). + points_size (int): The size of points to show on visualizer. + Defaults to 2. + mode (str): Indicate type of the input points, available mode + ['xyz', 'xyzrgb']. Defaults to 'xyz'. + """ + assert points is not None + assert vis_mode in ('replace', 'add') + check_type('points', points, np.ndarray) + + if not hasattr(self, 'o3d_vis'): + self.o3d_vis = self._initialize_o3d_vis(frame_cfg) + + # for now we convert points into depth mode for visualization + if pcd_mode != Coord3DMode.DEPTH: + points = Coord3DMode.convert(points, pcd_mode, Coord3DMode.DEPTH) + + if hasattr(self, 'pcd') and vis_mode != 'add': + self.o3d_vis.remove_geometry(self.pcd) + + # set points size in Open3D + self.o3d_vis.get_render_option().point_size = points_size + + points = points.copy() + pcd = geometry.PointCloud() + if mode == 'xyz': + pcd.points = o3d.utility.Vector3dVector(points[:, :3]) + points_colors = np.tile( + np.array(points_color), (points.shape[0], 1)) + elif mode == 'xyzrgb': + pcd.points = o3d.utility.Vector3dVector(points[:, :3]) + points_colors = points[:, 3:6] + # normalize to [0, 1] for Open3D drawing + if not ((points_colors >= 0.0) & (points_colors <= 1.0)).all(): + points_colors /= 255.0 + else: + raise NotImplementedError + + pcd.colors = o3d.utility.Vector3dVector(points_colors) + self.o3d_vis.add_geometry(pcd) + self.pcd = pcd + self.points_colors = points_colors + + # TODO: assign 3D Box color according to pred / GT labels + # We draw GT / pred bboxes on the same point cloud scenes + # for better detection performance comparison + def draw_bboxes_3d(self, + bboxes_3d: BaseInstance3DBoxes, + bbox_color: Tuple[float] = (0, 1, 0), + points_in_box_color: Tuple[float] = (1, 0, 0), + rot_axis: int = 2, + center_mode: str = 'lidar_bottom', + mode: str = 'xyz') -> None: + """Draw bbox on visualizer and change the color of points inside + bbox3d. + + Args: + bboxes_3d (:obj:`BaseInstance3DBoxes`): 3D bbox + (x, y, z, x_size, y_size, z_size, yaw) to visualize. + bbox_color (Tuple[float]): The color of 3D bboxes. + Defaults to (0, 1, 0). + points_in_box_color (Tuple[float]): The color of points inside 3D + bboxes. Defaults to (1, 0, 0). + rot_axis (int): Rotation axis of 3D bboxes. Defaults to 2. + center_mode (str): Indicates the center of bbox is bottom center or + gravity center. Available mode + ['lidar_bottom', 'camera_bottom']. Defaults to 'lidar_bottom'. + mode (str): Indicates the type of input points, available mode + ['xyz', 'xyzrgb']. Defaults to 'xyz'. + """ + # Before visualizing the 3D Boxes in point cloud scene + # we need to convert the boxes to Depth mode + check_type('bboxes', bboxes_3d, BaseInstance3DBoxes) + + if not isinstance(bboxes_3d, DepthInstance3DBoxes): + bboxes_3d = bboxes_3d.convert_to(Box3DMode.DEPTH) + + # convert bboxes to numpy dtype + bboxes_3d = tensor2ndarray(bboxes_3d.tensor) + + in_box_color = np.array(points_in_box_color) + + for i in range(len(bboxes_3d)): + center = bboxes_3d[i, 0:3] + dim = bboxes_3d[i, 3:6] + yaw = np.zeros(3) + yaw[rot_axis] = bboxes_3d[i, 6] + rot_mat = geometry.get_rotation_matrix_from_xyz(yaw) + + if center_mode == 'lidar_bottom': + # bottom center to gravity center + center[rot_axis] += dim[rot_axis] / 2 + elif center_mode == 'camera_bottom': + # bottom center to gravity center + center[rot_axis] -= dim[rot_axis] / 2 + box3d = geometry.OrientedBoundingBox(center, rot_mat, dim) + + line_set = geometry.LineSet.create_from_oriented_bounding_box( + box3d) + line_set.paint_uniform_color(bbox_color) + # draw bboxes on visualizer + self.o3d_vis.add_geometry(line_set) + + # change the color of points which are in box + if self.pcd is not None and mode == 'xyz': + indices = box3d.get_point_indices_within_bounding_box( + self.pcd.points) + self.points_colors[indices] = in_box_color + + # update points colors + if self.pcd is not None: + self.pcd.colors = o3d.utility.Vector3dVector(self.points_colors) + self.o3d_vis.update_geometry(self.pcd) + + def set_bev_image(self, + bev_image: Optional[np.ndarray] = None, + bev_shape: int = 900) -> None: + """Set the bev image to draw. + + Args: + bev_image (np.ndarray, optional): The bev image to draw. + Defaults to None. + bev_shape (int): The bev image shape. Defaults to 900. + """ + if bev_image is None: + bev_image = np.zeros((bev_shape, bev_shape, 3), np.uint8) + + self._image = bev_image + self.width, self.height = bev_image.shape[1], bev_image.shape[0] + self._default_font_size = max( + np.sqrt(self.height * self.width) // 90, 10) + self.ax_save.cla() + self.ax_save.axis(False) + self.ax_save.imshow(bev_image, origin='lower') + # plot camera view range + x1 = np.linspace(0, self.width / 2) + x2 = np.linspace(self.width / 2, self.width) + self.ax_save.plot( + x1, + self.width / 2 - x1, + ls='--', + color='grey', + linewidth=1, + alpha=0.5) + self.ax_save.plot( + x2, + x2 - self.width / 2, + ls='--', + color='grey', + linewidth=1, + alpha=0.5) + self.ax_save.plot( + self.width / 2, + 0, + marker='+', + markersize=16, + markeredgecolor='red') + + # TODO: Support bev point cloud visualization + @master_only + def draw_bev_bboxes(self, + bboxes_3d: BaseInstance3DBoxes, + scale: int = 15, + edge_colors: Union[str, Tuple[int], + List[Union[str, Tuple[int]]]] = 'o', + line_styles: Union[str, List[str]] = '-', + line_widths: Union[int, float, List[Union[int, + float]]] = 1, + face_colors: Union[str, Tuple[int], + List[Union[str, + Tuple[int]]]] = 'none', + alpha: Union[int, float] = 1) -> MMENGINE_Visualizer: + """Draw projected 3D boxes on the image. + + Args: + bboxes_3d (:obj:`BaseInstance3DBoxes`): 3D bbox + (x, y, z, x_size, y_size, z_size, yaw) to visualize. + scale (dict): Value to scale the bev bboxes for better + visualization. Defaults to 15. + edge_colors (str or Tuple[int] or List[str or Tuple[int]]): + The colors of bboxes. ``colors`` can have the same length with + lines or just single value. If ``colors`` is single value, all + the lines will have the same colors. Refer to `matplotlib. + colors` for full list of formats that are accepted. + Defaults to 'o'. + line_styles (str or List[str]): The linestyle of lines. + ``line_styles`` can have the same length with texts or just + single value. If ``line_styles`` is single value, all the lines + will have the same linestyle. Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to '-'. + line_widths (int or float or List[int or float]): The linewidth of + lines. ``line_widths`` can have the same length with lines or + just single value. If ``line_widths`` is single value, all the + lines will have the same linewidth. Defaults to 2. + face_colors (str or Tuple[int] or List[str or Tuple[int]]): + The face colors. Defaults to 'none'. + alpha (int or float): The transparency of bboxes. Defaults to 1. + """ + + check_type('bboxes', bboxes_3d, BaseInstance3DBoxes) + bev_bboxes = tensor2ndarray(bboxes_3d.bev) + # scale the bev bboxes for better visualization + bev_bboxes[:, :4] *= scale + ctr, w, h, theta = np.split(bev_bboxes, [2, 3, 4], axis=-1) + cos_value, sin_value = np.cos(theta), np.sin(theta) + vec1 = np.concatenate([w / 2 * cos_value, w / 2 * sin_value], axis=-1) + vec2 = np.concatenate([-h / 2 * sin_value, h / 2 * cos_value], axis=-1) + pt1 = ctr + vec1 + vec2 + pt2 = ctr + vec1 - vec2 + pt3 = ctr - vec1 - vec2 + pt4 = ctr - vec1 + vec2 + poly = np.stack([pt1, pt2, pt3, pt4], axis=-2) + # move the object along x-axis + poly[:, :, 0] += self.width / 2 + poly = [p for p in poly] + return self.draw_polygons( + poly, + alpha=alpha, + edge_colors=edge_colors, + line_styles=line_styles, + line_widths=line_widths, + face_colors=face_colors) + + @master_only + def draw_points_on_image(self, + points: Union[np.ndarray, Tensor], + pts2img: np.ndarray, + sizes: Union[np.ndarray, int] = 10) -> None: + """Draw projected points on the image. + + Args: + points (np.ndarray or Tensor): Points to draw. + pts2img (np.ndarray): The transformation matrix from the coordinate + of point cloud to image plane. + sizes (np.ndarray or int): The marker size. Defaults to 10. + """ + check_type('points', points, (np.ndarray, Tensor)) + points = tensor2ndarray(points) + assert self._image is not None, 'Please set image using `set_image`' + projected_points = points_cam2img(points, pts2img, with_depth=True) + depths = projected_points[:, 2] + colors = (depths % 20) / 20 + # use colormap to obtain the render color + color_map = plt.get_cmap('jet') + self.ax_save.scatter( + projected_points[:, 0], + projected_points[:, 1], + c=colors, + cmap=color_map, + s=sizes, + alpha=0.5, + edgecolors='none') + + # TODO: set bbox color according to palette + @master_only + def draw_proj_bboxes_3d( + self, + bboxes_3d: BaseInstance3DBoxes, + input_meta: dict, + edge_colors: Union[str, Tuple[int], + List[Union[str, Tuple[int]]]] = 'royalblue', + line_styles: Union[str, List[str]] = '-', + line_widths: Union[int, float, List[Union[int, float]]] = 2, + face_colors: Union[str, Tuple[int], + List[Union[str, Tuple[int]]]] = 'royalblue', + alpha: Union[int, float] = 0.4): + """Draw projected 3D boxes on the image. + + Args: + bboxes_3d (:obj:`BaseInstance3DBoxes`): 3D bbox + (x, y, z, x_size, y_size, z_size, yaw) to visualize. + input_meta (dict): Input meta information. + edge_colors (str or Tuple[int] or List[str or Tuple[int]]): + The colors of bboxes. ``colors`` can have the same length with + lines or just single value. If ``colors`` is single value, all + the lines will have the same colors. Refer to `matplotlib. + colors` for full list of formats that are accepted. + Defaults to 'royalblue'. + line_styles (str or List[str]): The linestyle of lines. + ``line_styles`` can have the same length with texts or just + single value. If ``line_styles`` is single value, all the lines + will have the same linestyle. Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to '-'. + line_widths (int or float or List[int or float]): The linewidth of + lines. ``line_widths`` can have the same length with lines or + just single value. If ``line_widths`` is single value, all the + lines will have the same linewidth. Defaults to 2. + face_colors (str or Tuple[int] or List[str or Tuple[int]]): + The face colors. Defaults to 'royalblue'. + alpha (int or float): The transparency of bboxes. Defaults to 0.4. + """ + + check_type('bboxes', bboxes_3d, BaseInstance3DBoxes) + + if isinstance(bboxes_3d, DepthInstance3DBoxes): + proj_bbox3d_to_img = proj_depth_bbox3d_to_img + elif isinstance(bboxes_3d, LiDARInstance3DBoxes): + proj_bbox3d_to_img = proj_lidar_bbox3d_to_img + elif isinstance(bboxes_3d, CameraInstance3DBoxes): + proj_bbox3d_to_img = proj_camera_bbox3d_to_img + else: + raise NotImplementedError('unsupported box type!') + + corners_2d = proj_bbox3d_to_img(bboxes_3d, input_meta) + + lines_verts_idx = [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 5, 1, 2, 6] + lines_verts = corners_2d[:, lines_verts_idx, :] + front_polys = corners_2d[:, 4:, :] + codes = [Path.LINETO] * lines_verts.shape[1] + codes[0] = Path.MOVETO + pathpatches = [] + for i in range(len(corners_2d)): + verts = lines_verts[i] + pth = Path(verts, codes) + pathpatches.append(PathPatch(pth)) + + p = PatchCollection( + pathpatches, + facecolors='none', + edgecolors=edge_colors, + linewidths=line_widths, + linestyles=line_styles) + + self.ax_save.add_collection(p) + + # draw a mask on the front of project bboxes + front_polys = [front_poly for front_poly in front_polys] + return self.draw_polygons( + front_polys, + alpha=alpha, + edge_colors=edge_colors, + line_styles=line_styles, + line_widths=line_widths, + face_colors=face_colors) + + @master_only + def draw_seg_mask(self, seg_mask_colors: np.ndarray) -> None: + """Add segmentation mask to visualizer via per-point colorization. + + Args: + seg_mask_colors (np.ndarray): The segmentation mask with shape + (N, 6), whose first 3 dims are point coordinates and last 3 + dims are converted colors. + """ + # we can't draw the colors on existing points + # in case gt and pred mask would overlap + # instead we set a large offset along x-axis for each seg mask + self.pts_seg_num += 1 + offset = (np.array(self.pcd.points).max(0) - + np.array(self.pcd.points).min(0))[0] * 1.2 * self.pts_seg_num + mesh_frame = geometry.TriangleMesh.create_coordinate_frame( + size=1, origin=[offset, 0, 0]) # create coordinate frame for seg + self.o3d_vis.add_geometry(mesh_frame) + seg_points = copy.deepcopy(seg_mask_colors) + seg_points[:, 0] += offset + self.set_points(seg_points, pcd_mode=2, vis_mode='add', mode='xyzrgb') + + def _draw_instances_3d(self, + data_input: dict, + instances: InstanceData, + input_meta: dict, + vis_task: str, + palette: Optional[List[tuple]] = None) -> dict: + """Draw 3D instances of GT or prediction. + + Args: + data_input (dict): The input dict to draw. + instances (:obj:`InstanceData`): Data structure for instance-level + annotations or predictions. + input_meta (dict): Meta information. + vis_task (str): Visualization task, it includes: 'lidar_det', + 'multi-modality_det', 'mono_det'. + palette (List[tuple], optional): Palette information corresponding + to the category. Defaults to None. + + Returns: + dict: The drawn point cloud and image whose channel is RGB. + """ + + # Only visualize when there is at least one instance + if not len(instances) > 0: + return None + + bboxes_3d = instances.bboxes_3d # BaseInstance3DBoxes + + data_3d = dict() + + if vis_task in ['lidar_det', 'multi-modality_det']: + assert 'points' in data_input + points = data_input['points'] + check_type('points', points, (np.ndarray, Tensor)) + points = tensor2ndarray(points) + + if not isinstance(bboxes_3d, DepthInstance3DBoxes): + points, bboxes_3d_depth = to_depth_mode(points, bboxes_3d) + else: + bboxes_3d_depth = bboxes_3d.clone() + + self.set_points(points, pcd_mode=2) + self.draw_bboxes_3d(bboxes_3d_depth) + + data_3d['bboxes_3d'] = tensor2ndarray(bboxes_3d_depth.tensor) + data_3d['points'] = points + + if vis_task in ['mono_det', 'multi-modality_det']: + assert 'img' in data_input + img = data_input['img'] + if isinstance(data_input['img'], Tensor): + img = img.permute(1, 2, 0).numpy() + img = img[..., [2, 1, 0]] # bgr to rgb + self.set_image(img) + self.draw_proj_bboxes_3d(bboxes_3d, input_meta) + if vis_task == 'mono_det' and hasattr(instances, 'centers_2d'): + centers_2d = instances.centers_2d + self.draw_points(centers_2d) + drawn_img = self.get_image() + data_3d['img'] = drawn_img + + return data_3d + + def _draw_pts_sem_seg(self, + points: Union[Tensor, np.ndarray], + pts_seg: PointData, + palette: Optional[List[tuple]] = None, + ignore_index: Optional[int] = None) -> None: + """Draw 3D semantic mask of GT or prediction. + + Args: + points (Tensor or np.ndarray): The input point cloud to draw. + pts_seg (:obj:`PointData`): Data structure for pixel-level + annotations or predictions. + palette (List[tuple], optional): Palette information corresponding + to the category. Defaults to None. + ignore_index (int, optional): Ignore category. Defaults to None. + """ + check_type('points', points, (np.ndarray, Tensor)) + + points = tensor2ndarray(points) + pts_sem_seg = tensor2ndarray(pts_seg.pts_semantic_mask) + palette = np.array(palette) + + if ignore_index is not None: + points = points[pts_sem_seg != ignore_index] + pts_sem_seg = pts_sem_seg[pts_sem_seg != ignore_index] + + pts_color = palette[pts_sem_seg] + seg_color = np.concatenate([points[:, :3], pts_color], axis=1) + + self.set_points(points, pcd_mode=2, vis_mode='add') + self.draw_seg_mask(seg_color) + + @master_only + def show(self, + save_path: Optional[str] = None, + drawn_img_3d: Optional[np.ndarray] = None, + drawn_img: Optional[np.ndarray] = None, + win_name: str = 'image', + wait_time: int = 0, + continue_key: str = ' ') -> None: + """Show the drawn point cloud/image. + + Args: + save_path (str, optional): Path to save open3d visualized results. + Defaults to None. + drawn_img_3d (np.ndarray, optional): The image to show. If + drawn_img_3d is not None, it will show the image got by + Visualizer. Defaults to None. + drawn_img (np.ndarray, optional): The image to show. If drawn_img + is not None, it will show the image got by Visualizer. + Defaults to None. + win_name (str): The image title. Defaults to 'image'. + wait_time (int): Delay in milliseconds. 0 is the special value that + means "forever". Defaults to 0. + continue_key (str): The key for users to continue. Defaults to ' '. + """ + if hasattr(self, 'o3d_vis'): + self.o3d_vis.run() + if save_path is not None: + self.o3d_vis.capture_screen_image(save_path) + self.o3d_vis.destroy_window() + self._clear_o3d_vis() + + if hasattr(self, '_image'): + if drawn_img_3d is not None: + super().show(drawn_img_3d, win_name, wait_time, continue_key) + if drawn_img is not None: + super().show(drawn_img, win_name, wait_time, continue_key) + + # TODO: Support Visualize the 3D results from image and point cloud + # respectively + @master_only + def add_datasample(self, + name: str, + data_input: dict, + data_sample: Optional[Det3DDataSample] = None, + draw_gt: bool = True, + draw_pred: bool = True, + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + o3d_save_path: Optional[str] = None, + vis_task: str = 'mono_det', + pred_score_thr: float = 0.3, + step: int = 0) -> None: + """Draw datasample and save to all backends. + + - If GT and prediction are plotted at the same time, they are displayed + in a stitched image where the left image is the ground truth and the + right image is the prediction. + - If ``show`` is True, all storage backends are ignored, and the images + will be displayed in a local window. + - If ``out_file`` is specified, the drawn image will be saved to + ``out_file``. It is usually used when the display is not available. + + Args: + name (str): The image identifier. + data_input (dict): It should include the point clouds or image + to draw. + data_sample (:obj:`Det3DDataSample`, optional): Prediction + Det3DDataSample. Defaults to None. + draw_gt (bool): Whether to draw GT Det3DDataSample. + Defaults to True. + draw_pred (bool): Whether to draw Prediction Det3DDataSample. + Defaults to True. + show (bool): Whether to display the drawn point clouds and image. + Defaults to False. + wait_time (float): The interval of show (s). Defaults to 0. + out_file (str, optional): Path to output file. Defaults to None. + o3d_save_path (str, optional): Path to save open3d visualized + results. Defaults to None. + vis_task (str): Visualization task. Defaults to 'mono_det'. + pred_score_thr (float): The threshold to visualize the bboxes + and masks. Defaults to 0.3. + step (int): Global step value to record. Defaults to 0. + """ + assert vis_task in ( + 'mono_det', 'multi-view_det', 'lidar_det', 'lidar_seg', + 'multi-modality_det'), f'got unexpected vis_task {vis_task}.' + classes = self.dataset_meta.get('classes', None) + # For object detection datasets, no palette is saved + palette = self.dataset_meta.get('palette', None) + ignore_index = self.dataset_meta.get('ignore_index', None) + + gt_data_3d = None + pred_data_3d = None + gt_img_data = None + pred_img_data = None + + if draw_gt and data_sample is not None: + if 'gt_instances_3d' in data_sample: + gt_data_3d = self._draw_instances_3d( + data_input, data_sample.gt_instances_3d, + data_sample.metainfo, vis_task, palette) + if 'gt_instances' in data_sample: + if len(data_sample.gt_instances) > 0: + assert 'img' in data_input + img = data_input['img'] + if isinstance(data_input['img'], Tensor): + img = data_input['img'].permute(1, 2, 0).numpy() + img = img[..., [2, 1, 0]] # bgr to rgb + gt_img_data = self._draw_instances( + img, data_sample.gt_instances, classes, palette) + if 'gt_pts_seg' in data_sample and vis_task == 'lidar_seg': + assert classes is not None, 'class information is ' \ + 'not provided when ' \ + 'visualizing semantic ' \ + 'segmentation results.' + assert 'points' in data_input + self._draw_pts_sem_seg(data_input['points'], + data_sample.gt_pts_seg, palette, + ignore_index) + + if draw_pred and data_sample is not None: + if 'pred_instances_3d' in data_sample: + pred_instances_3d = data_sample.pred_instances_3d + # .cpu can not be used for BaseInstance3DBoxes + # so we need to use .to('cpu') + pred_instances_3d = pred_instances_3d[ + pred_instances_3d.scores_3d > pred_score_thr].to('cpu') + pred_data_3d = self._draw_instances_3d(data_input, + pred_instances_3d, + data_sample.metainfo, + vis_task, palette) + if 'pred_instances' in data_sample: + if 'img' in data_input and len(data_sample.pred_instances) > 0: + pred_instances = data_sample.pred_instances + pred_instances = pred_instances[ + pred_instances.scores > pred_score_thr].cpu() + img = data_input['img'] + if isinstance(data_input['img'], Tensor): + img = data_input['img'].permute(1, 2, 0).numpy() + img = img[..., [2, 1, 0]] # bgr to rgb + pred_img_data = self._draw_instances( + img, pred_instances, classes, palette) + if 'pred_pts_seg' in data_sample and vis_task == 'lidar_seg': + assert classes is not None, 'class information is ' \ + 'not provided when ' \ + 'visualizing semantic ' \ + 'segmentation results.' + assert 'points' in data_input + self._draw_pts_sem_seg(data_input['points'], + data_sample.pred_pts_seg, palette, + ignore_index) + + # monocular 3d object detection image + if vis_task in ['mono_det', 'multi-modality_det']: + if gt_data_3d is not None and pred_data_3d is not None: + drawn_img_3d = np.concatenate( + (gt_data_3d['img'], pred_data_3d['img']), axis=1) + elif gt_data_3d is not None: + drawn_img_3d = gt_data_3d['img'] + elif pred_data_3d is not None: + drawn_img_3d = pred_data_3d['img'] + else: # both instances of gt and pred are empty + drawn_img_3d = None + else: + drawn_img_3d = None + + # 2d object detection image + if gt_img_data is not None and pred_img_data is not None: + drawn_img = np.concatenate((gt_img_data, pred_img_data), axis=1) + elif gt_img_data is not None: + drawn_img = gt_img_data + elif pred_img_data is not None: + drawn_img = pred_img_data + else: + drawn_img = None + + if show: + self.show( + o3d_save_path, + drawn_img_3d, + drawn_img, + win_name=name, + wait_time=wait_time) + + if out_file is not None: + # check the suffix of the name of image file + if not (out_file.endswith('.png') or out_file.endswith('.jpg')): + out_file = f'{out_file}.png' + if drawn_img_3d is not None: + mmcv.imwrite(drawn_img_3d[..., ::-1], out_file) + if drawn_img is not None: + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + self.add_image(name, drawn_img_3d, step) diff --git a/mmdet3d/visualization/vis_utils.py b/mmdet3d/visualization/vis_utils.py new file mode 100755 index 0000000..340f28f --- /dev/null +++ b/mmdet3d/visualization/vis_utils.py @@ -0,0 +1,177 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Tuple + +import numpy as np +import torch +import trimesh + +from mmdet3d.structures import (BaseInstance3DBoxes, Box3DMode, + CameraInstance3DBoxes, Coord3DMode, + DepthInstance3DBoxes, LiDARInstance3DBoxes) + + +def write_obj(points: np.ndarray, out_filename: str) -> None: + """Write points into ``obj`` format for meshlab visualization. + + Args: + points (np.ndarray): Points in shape (N, dim). + out_filename (str): Filename to be saved. + """ + N = points.shape[0] + fout = open(out_filename, 'w') + for i in range(N): + if points.shape[1] == 6: + c = points[i, 3:].astype(int) + fout.write( + 'v %f %f %f %d %d %d\n' % + (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2])) + + else: + fout.write('v %f %f %f\n' % + (points[i, 0], points[i, 1], points[i, 2])) + fout.close() + + +def write_oriented_bbox(scene_bbox: np.ndarray, out_filename: str) -> None: + """Export oriented (around Z axis) scene bbox to meshes. + + Args: + scene_bbox (np.ndarray): xyz pos of center and 3 lengths + (x_size, y_size, z_size) and heading angle around Z axis. + Y forward, X right, Z upward, heading angle of positive X is 0, + heading angle of positive Y is 90 degrees. + out_filename (str): Filename. + """ + + def heading2rotmat(heading_angle: float) -> np.ndarray: + rotmat = np.zeros((3, 3)) + rotmat[2, 2] = 1 + cosval = np.cos(heading_angle) + sinval = np.sin(heading_angle) + rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]]) + return rotmat + + def convert_oriented_box_to_trimesh_fmt( + box: np.ndarray) -> trimesh.base.Trimesh: + ctr = box[:3] + lengths = box[3:6] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + trns[0:3, 0:3] = heading2rotmat(box[6]) + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + if len(scene_bbox) == 0: + scene_bbox = np.zeros((1, 7)) + scene = trimesh.scene.Scene() + for box in scene_bbox: + scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box)) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to obj file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='obj') + + +def to_depth_mode( + points: np.ndarray, + bboxes: BaseInstance3DBoxes) -> Tuple[np.ndarray, BaseInstance3DBoxes]: + """Convert points and bboxes to Depth Coord and Depth Box mode.""" + if points is not None: + points = Coord3DMode.convert_point(points.copy(), Coord3DMode.LIDAR, + Coord3DMode.DEPTH) + if bboxes is not None: + bboxes = Box3DMode.convert(bboxes.clone(), Box3DMode.LIDAR, + Box3DMode.DEPTH) + return points, bboxes + + +# TODO: refactor lidar2img to img_meta +def proj_lidar_bbox3d_to_img(bboxes_3d: LiDARInstance3DBoxes, + input_meta: dict) -> np.ndarray: + """Project the 3D bbox on 2D plane. + + Args: + bboxes_3d (:obj:`LiDARInstance3DBoxes`): 3D bbox in lidar coordinate + system to visualize. + input_meta (dict): Meta information. + """ + corners_3d = bboxes_3d.corners.cpu().numpy() + num_bbox = corners_3d.shape[0] + pts_4d = np.concatenate( + [corners_3d.reshape(-1, 3), + np.ones((num_bbox * 8, 1))], axis=-1) + lidar2img = copy.deepcopy(input_meta['lidar2img']).reshape(4, 4) + if isinstance(lidar2img, torch.Tensor): + lidar2img = lidar2img.cpu().numpy() + pts_2d = pts_4d @ lidar2img.T + + pts_2d[:, 2] = np.clip(pts_2d[:, 2], a_min=1e-5, a_max=1e5) + pts_2d[:, 0] /= pts_2d[:, 2] + pts_2d[:, 1] /= pts_2d[:, 2] + imgfov_pts_2d = pts_2d[..., :2].reshape(num_bbox, 8, 2) + + return imgfov_pts_2d + + +# TODO: remove third parameter in all functions here in favour of img_metas +def proj_depth_bbox3d_to_img(bboxes_3d: DepthInstance3DBoxes, + input_meta: dict) -> np.ndarray: + """Project the 3D bbox on 2D plane and draw on input image. + + Args: + bboxes_3d (:obj:`DepthInstance3DBoxes`): 3D bbox in depth coordinate + system to visualize. + input_meta (dict): Meta information. + """ + from mmdet3d.models import apply_3d_transformation + from mmdet3d.structures import points_cam2img + + input_meta = copy.deepcopy(input_meta) + corners_3d = bboxes_3d.corners + num_bbox = corners_3d.shape[0] + points_3d = corners_3d.reshape(-1, 3) + + # first reverse the data transformations + xyz_depth = apply_3d_transformation( + points_3d, 'DEPTH', input_meta, reverse=True) + + # project to 2d to get image coords (uv) + uv_origin = points_cam2img(xyz_depth, + xyz_depth.new_tensor(input_meta['depth2img'])) + uv_origin = (uv_origin - 1).round() + imgfov_pts_2d = uv_origin[..., :2].reshape(num_bbox, 8, 2).numpy() + + return imgfov_pts_2d + + +# project the camera bboxes 3d to image +def proj_camera_bbox3d_to_img(bboxes_3d: CameraInstance3DBoxes, + input_meta: dict) -> np.ndarray: + """Project the 3D bbox on 2D plane and draw on input image. + + Args: + bboxes_3d (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate + system to visualize. + input_meta (dict): Meta information. + """ + from mmdet3d.structures import points_cam2img + + cam2img = copy.deepcopy(input_meta['cam2img']) + corners_3d = bboxes_3d.corners + num_bbox = corners_3d.shape[0] + points_3d = corners_3d.reshape(-1, 3) + if not isinstance(cam2img, torch.Tensor): + cam2img = torch.from_numpy(np.array(cam2img)) + + assert (cam2img.shape == torch.Size([3, 3]) + or cam2img.shape == torch.Size([4, 4])) + cam2img = cam2img.float().cpu() + + # project to 2d to get image coords (uv) + uv_origin = points_cam2img(points_3d, cam2img) + uv_origin = (uv_origin - 1).round() + imgfov_pts_2d = uv_origin[..., :2].reshape(num_bbox, 8, 2).numpy() + + return imgfov_pts_2d diff --git a/model-index.yml b/model-index.yml new file mode 100755 index 0000000..672e665 --- /dev/null +++ b/model-index.yml @@ -0,0 +1,30 @@ +Import: + - configs/3dssd/metafile.yml + - configs/centerpoint/metafile.yml + - configs/dgcnn/metafile.yml + - configs/dynamic_voxelization/metafile.yml + - configs/fcos3d/metafile.yml + - configs/free_anchor/metafile.yml + - configs/groupfree3d/metafile.yml + - configs/h3dnet/metafile.yml + - configs/imvotenet/metafile.yml + - configs/imvoxelnet/metafile.yml + - configs/monoflex/metafile.yml + - configs/mvxnet/metafile.yml + - configs/nuimages/metafile.yml + - configs/paconv/metafile.yml + - configs/parta2/metafile.yml + - configs/pgd/metafile.yml + - configs/point_rcnn/metafile.yml + - configs/pointnet2/metafile.yml + - configs/pointpillars/metafile.yml + - configs/regnet/metafile.yml + - configs/second/metafile.yml + - configs/smoke/metafile.yml + - configs/ssn/metafile.yml + - configs/votenet/metafile.yml + - configs/minkunet/metafile.yml + - configs/cylinder3d/metafile.yml + - configs/pv_rcnn/metafile.yml + - configs/fcaf3d/metafile.yml + - configs/spvcnn/metafile.yml diff --git a/projects/BEVFusion/README.md b/projects/BEVFusion/README.md new file mode 100755 index 0000000..101f7b3 --- /dev/null +++ b/projects/BEVFusion/README.md @@ -0,0 +1,126 @@ +# BEVFusion: Multi-Task Multi-Sensor Fusion with Unified Bird's-Eye View Representation + +> [BEVFusion: Multi-Task Multi-Sensor Fusion with Unified Bird's-Eye View Representation](https://arxiv.org/abs/2205.13542) + + + +## Abstract + +Multi-sensor fusion is essential for an accurate and reliable autonomous driving system. Recent approaches are based on point-level fusion: augmenting the LiDAR point cloud with camera features. However, the camera-to-LiDAR projection throws away the semantic density of camera features, hindering the effectiveness of such methods, especially for semantic-oriented tasks (such as 3D scene segmentation). In this paper, we break this deeply-rooted convention with BEVFusion, an efficient and generic multi-task multi-sensor fusion framework. It unifies multi-modal features in the shared bird's-eye view (BEV) representation space, which nicely preserves both geometric and semantic information. To achieve this, we diagnose and lift key efficiency bottlenecks in the view transformation with optimized BEV pooling, reducing latency by more than 40x. BEVFusion is fundamentally task-agnostic and seamlessly supports different 3D perception tasks with almost no architectural changes. It establishes the new state of the art on nuScenes, achieving 1.3% higher mAP and NDS on 3D object detection and 13.6% higher mIoU on BEV map segmentation, with 1.9x lower computation cost. Code to reproduce our +results is available at https://github.com/mit-han-lab/bevfusion. + +
    + +
    + +## Introduction + +We implement BEVFusion and provide the results and pretrained checkpoints on NuScenes dataset. + +## Usage + + + +### Compiling operations on CUDA + +**Note** that the voxelization OP in the original implementation of `BEVFusion` is different from the implementation in MMCV. If you want to use the original pretrained model [here](https://github.com/mit-han-lab/bevfusion/blob/main/README.md), you need to use the original implementation of voxelization OP. + +```python +python projects/BEVFusion/setup.py develop +``` + +### Training commands + +In MMDetection3D's root directory, run the following command to train the model: + +```bash +python tools/train.py projects/BEVFusion/configs/bevfusion_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py +``` + +For multi-gpu training, run: + +```bash +python -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=${NUM_GPUS} --master_port=29506 --master_addr="127.0.0.1" tools/train.py projects/BEVFusion/configs/bevfusion_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py +``` + +### Testing commands + +In MMDetection3D's root directory, run the following command to test the model: + +```bash +python tools/train.py projects/BEVFusion/configs/bevfusion_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py ${CHECKPOINT_PATH} +``` + +## Results and models + +### NuScenes + +| Backbone | Voxel type (voxel size) | NMS | Mem (GB) | Inf time (fps) | NDS | mAP | Download | +| :-----------------------------------------------------------------------------: | :---------------------: | :-: | :------: | :------------: | :---: | :---: | :------------------------------------------------------------------------------------------------------: | +| [SECFPN](./configs/bevfusion_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py) | voxel (0.075) | × | - | - | 71.62 | 68.77 | [converted_model](https://drive.google.com/file/d/1QkvbYDk4G2d6SZoeJqish13qSyXA4lp3/view?usp=share_link) | + +## Citation + +```latex +@inproceedings{liu2022bevfusion, + title={BEVFusion: Multi-Task Multi-Sensor Fusion with Unified Bird's-Eye View Representation}, + author={Liu, Zhijian and Tang, Haotian and Amini, Alexander and Yang, Xingyu and Mao, Huizi and Rus, Daniela and Han, Song}, + booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, + year={2023} +} +``` + +## Checklist + + + +- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [x] Finish the code + + + + - [x] Basic docstrings & proper citation + + + + - [x] Test-time correctness + + + + - [x] A full README + + + +- [ ] Milestone 2: Indicates a successful model implementation. + + - [ ] Training-time correctness + + + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Type hints and docstrings + + + + - [ ] Unit tests + + + + - [ ] Code polishing + + + + - [ ] Metafile.yml + + + +- [ ] Move your modules into the core package following the codebase's file hierarchy structure. + + + +- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. diff --git a/projects/BEVFusion/bevfusion/__init__.py b/projects/BEVFusion/bevfusion/__init__.py new file mode 100755 index 0000000..c36fc64 --- /dev/null +++ b/projects/BEVFusion/bevfusion/__init__.py @@ -0,0 +1,18 @@ +from .bevfusion import BEVFusion +from .bevfusion_necks import GeneralizedLSSFPN +from .depth_lss import DepthLSSTransform +from .loading import BEVLoadMultiViewImageFromFiles +from .sparse_encoder import BEVFusionSparseEncoder +from .transformer import TransformerDecoderLayer +from .transforms_3d import GridMask, ImageAug3D +from .transfusion_head import ConvFuser, TransFusionHead +from .utils import (BBoxBEVL1Cost, HeuristicAssigner3D, HungarianAssigner3D, + IoU3DCost) + +__all__ = [ + 'BEVFusion', 'TransFusionHead', 'ConvFuser', 'ImageAug3D', 'GridMask', + 'GeneralizedLSSFPN', 'HungarianAssigner3D', 'BBoxBEVL1Cost', 'IoU3DCost', + 'HeuristicAssigner3D', 'DepthLSSTransform', + 'BEVLoadMultiViewImageFromFiles', 'BEVFusionSparseEncoder', + 'TransformerDecoderLayer' +] diff --git a/projects/BEVFusion/bevfusion/bevfusion.py b/projects/BEVFusion/bevfusion/bevfusion.py new file mode 100755 index 0000000..a823528 --- /dev/null +++ b/projects/BEVFusion/bevfusion/bevfusion.py @@ -0,0 +1,242 @@ +from typing import Dict, List, Optional + +import numpy as np +import torch +from torch import Tensor +from torch.nn import functional as F + +from mmdet3d.models import Base3DDetector +from mmdet3d.registry import MODELS +from mmdet3d.structures import Det3DDataSample +from mmdet3d.utils import OptConfigType, OptMultiConfig, OptSampleList +from .ops import Voxelization + + +@MODELS.register_module() +class BEVFusion(Base3DDetector): + + def __init__( + self, + data_preprocessor: OptConfigType = None, + pts_voxel_encoder: Optional[dict] = None, + pts_middle_encoder: Optional[dict] = None, + fusion_layer: Optional[dict] = None, + img_backbone: Optional[dict] = None, + pts_backbone: Optional[dict] = None, + vtransform: Optional[dict] = None, + img_neck: Optional[dict] = None, + pts_neck: Optional[dict] = None, + bbox_head: Optional[dict] = None, + init_cfg: OptMultiConfig = None, + seg_head: Optional[dict] = None, + **kwargs, + ) -> None: + voxelize_cfg = data_preprocessor.pop('voxelize_cfg') + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + self.voxelize_reduce = voxelize_cfg.pop('voxelize_reduce') + self.pts_voxel_layer = Voxelization(**voxelize_cfg) + + self.pts_voxel_encoder = MODELS.build(pts_voxel_encoder) + + self.img_backbone = MODELS.build(img_backbone) + self.img_neck = MODELS.build(img_neck) + self.vtransform = MODELS.build(vtransform) + self.pts_middle_encoder = MODELS.build(pts_middle_encoder) + + self.fusion_layer = MODELS.build(fusion_layer) + + self.pts_backbone = MODELS.build(pts_backbone) + self.pts_neck = MODELS.build(pts_neck) + + self.bbox_head = MODELS.build(bbox_head) + # hard code here where using converted checkpoint of original + # implementation of `BEVFusion` + self.use_converted_checkpoint = True + + self.init_weights() + + def _forward(self, + batch_inputs: Tensor, + batch_data_samples: OptSampleList = None): + """Network forward process. + + Usually includes backbone, neck and head forward without any post- + processing. + """ + pass + + def init_weights(self) -> None: + if self.img_backbone is not None: + self.img_backbone.init_weights() + + @property + def with_bbox_head(self): + """bool: Whether the detector has a box head.""" + return hasattr(self, 'bbox_head') and self.bbox_head is not None + + @property + def with_seg_head(self): + """bool: Whether the detector has a segmentation head. + """ + return hasattr(self, 'seg_head') and self.seg_head is not None + + def extract_img_feat( + self, + x, + points, + lidar2image, + camera_intrinsics, + camera2lidar, + img_aug_matrix, + lidar_aug_matrix, + img_metas, + ) -> torch.Tensor: + B, N, C, H, W = x.size() + x = x.view(B * N, C, H, W) + + x = self.img_backbone(x) + x = self.img_neck(x) + + if not isinstance(x, torch.Tensor): + x = x[0] + + BN, C, H, W = x.size() + x = x.view(B, int(BN / B), C, H, W) + + x = self.vtransform( + x, + points, + lidar2image, + camera_intrinsics, + camera2lidar, + img_aug_matrix, + lidar_aug_matrix, + img_metas, + ) + return x + + def extract_pts_feat(self, batch_inputs_dict) -> torch.Tensor: + points = batch_inputs_dict['points'] + feats, coords, sizes = self.voxelize(points) + batch_size = coords[-1, 0] + 1 + x = self.pts_middle_encoder(feats, coords, batch_size) + return x + + @torch.no_grad() + def voxelize(self, points): + feats, coords, sizes = [], [], [] + for k, res in enumerate(points): + ret = self.pts_voxel_layer(res) + if len(ret) == 3: + # hard voxelize + f, c, n = ret + else: + assert len(ret) == 2 + f, c = ret + n = None + feats.append(f) + coords.append(F.pad(c, (1, 0), mode='constant', value=k)) + if n is not None: + sizes.append(n) + + feats = torch.cat(feats, dim=0) + coords = torch.cat(coords, dim=0) + if len(sizes) > 0: + sizes = torch.cat(sizes, dim=0) + if self.voxelize_reduce: + feats = feats.sum( + dim=1, keepdim=False) / sizes.type_as(feats).view(-1, 1) + feats = feats.contiguous() + + return feats, coords, sizes + + def predict(self, batch_inputs_dict: Dict[str, Optional[Tensor]], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """Forward of testing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input sample. Each Det3DDataSample usually contain + 'pred_instances_3d'. And the ``pred_instances_3d`` usually + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bbox_3d (:obj:`BaseInstance3DBoxes`): Prediction of bboxes, + contains a tensor with shape (num_instances, 7). + """ + batch_input_metas = [item.metainfo for item in batch_data_samples] + feats = self.extract_feat(batch_inputs_dict, batch_input_metas) + + if self.with_bbox_head: + outputs = self.bbox_head.predict(feats, batch_input_metas) + if self.use_converted_checkpoint: + outputs[0]['bboxes_3d'].tensor[:, 6] = -outputs[0][ + 'bboxes_3d'].tensor[:, 6] - np.pi / 2 + outputs[0]['bboxes_3d'].tensor[:, 3:5] = outputs[0][ + 'bboxes_3d'].tensor[:, [4, 3]] + + res = self.add_pred_to_datasample(batch_data_samples, outputs) + + return res + + def extract_feat( + self, + batch_inputs_dict, + batch_input_metas, + **kwargs, + ): + imgs = batch_inputs_dict.get('imgs', None) + points = batch_inputs_dict.get('points', None) + + lidar2image, camera_intrinsics, camera2lidar = [], [], [] + img_aug_matrix, lidar_aug_matrix = [], [] + for i, meta in enumerate(batch_input_metas): + lidar2image.append(meta['lidar2img']) + camera_intrinsics.append(meta['cam2img']) + camera2lidar.append(meta['cam2lidar']) + img_aug_matrix.append(meta.get('img_aug_matrix', np.eye(4))) + lidar_aug_matrix.append(meta.get('lidar_aug_matrix', np.eye(4))) + + lidar2image = imgs.new_tensor(np.asarray(lidar2image)) + camera_intrinsics = imgs.new_tensor(np.array(camera_intrinsics)) + camera2lidar = imgs.new_tensor(np.asarray(camera2lidar)) + img_aug_matrix = imgs.new_tensor(np.asarray(img_aug_matrix)) + lidar_aug_matrix = imgs.new_tensor(np.asarray(lidar_aug_matrix)) + img_feature = self.extract_img_feat(imgs, points, lidar2image, + camera_intrinsics, camera2lidar, + img_aug_matrix, lidar_aug_matrix, + batch_input_metas) + pts_feature = self.extract_pts_feat(batch_inputs_dict) + + features = [img_feature, pts_feature] + + if self.fusion_layer is not None: + x = self.fusion_layer(features) + else: + assert len(features) == 1, features + x = features[0] + + x = self.pts_backbone(x) + x = self.pts_neck(x) + + return x + + def loss(self, batch_inputs_dict: Dict[str, Optional[Tensor]], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + pass diff --git a/projects/BEVFusion/bevfusion/bevfusion_necks.py b/projects/BEVFusion/bevfusion/bevfusion_necks.py new file mode 100755 index 0000000..4fc79c3 --- /dev/null +++ b/projects/BEVFusion/bevfusion/bevfusion_necks.py @@ -0,0 +1,99 @@ +# modify from https://github.com/mit-han-lab/bevfusion +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class GeneralizedLSSFPN(BaseModule): + + def __init__( + self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=dict(type='BN2d'), + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(mode='bilinear', align_corners=True), + ) -> None: + super().__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1: + self.backbone_end_level = self.num_ins - 1 + # assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i] + + (in_channels[i + 1] if i == self.backbone_end_level - + 1 else out_channels), + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False, + ) + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False, + ) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + def forward(self, inputs): + """Forward function.""" + # upsample -> cat -> conv1x1 -> conv3x3 + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [inputs[i + self.start_level] for i in range(len(inputs))] + + # build top-down path + used_backbone_levels = len(laterals) - 1 + for i in range(used_backbone_levels - 1, -1, -1): + x = F.interpolate( + laterals[i + 1], + size=laterals[i].shape[2:], + **self.upsample_cfg, + ) + laterals[i] = torch.cat([laterals[i], x], dim=1) + laterals[i] = self.lateral_convs[i](laterals[i]) + laterals[i] = self.fpn_convs[i](laterals[i]) + + # build outputs + outs = [laterals[i] for i in range(used_backbone_levels)] + return tuple(outs) diff --git a/projects/BEVFusion/bevfusion/depth_lss.py b/projects/BEVFusion/bevfusion/depth_lss.py new file mode 100755 index 0000000..f336c92 --- /dev/null +++ b/projects/BEVFusion/bevfusion/depth_lss.py @@ -0,0 +1,354 @@ +# modify from https://github.com/mit-han-lab/bevfusion +from typing import Tuple + +import torch +from torch import nn + +from mmdet3d.registry import MODELS +from .ops import bev_pool + + +def gen_dx_bx(xbound, ybound, zbound): + dx = torch.Tensor([row[2] for row in [xbound, ybound, zbound]]) + bx = torch.Tensor( + [row[0] + row[2] / 2.0 for row in [xbound, ybound, zbound]]) + nx = torch.LongTensor([(row[1] - row[0]) / row[2] + for row in [xbound, ybound, zbound]]) + return dx, bx, nx + + +class BaseTransform(nn.Module): + + def __init__( + self, + in_channels: int, + out_channels: int, + image_size: Tuple[int, int], + feature_size: Tuple[int, int], + xbound: Tuple[float, float, float], + ybound: Tuple[float, float, float], + zbound: Tuple[float, float, float], + dbound: Tuple[float, float, float], + ) -> None: + super().__init__() + self.in_channels = in_channels + self.image_size = image_size + self.feature_size = feature_size + self.xbound = xbound + self.ybound = ybound + self.zbound = zbound + self.dbound = dbound + + dx, bx, nx = gen_dx_bx(self.xbound, self.ybound, self.zbound) + self.dx = nn.Parameter(dx, requires_grad=False) + self.bx = nn.Parameter(bx, requires_grad=False) + self.nx = nn.Parameter(nx, requires_grad=False) + + self.C = out_channels + self.frustum = self.create_frustum() + self.D = self.frustum.shape[0] + self.fp16_enabled = False + + def create_frustum(self): + iH, iW = self.image_size + fH, fW = self.feature_size + + ds = ( + torch.arange(*self.dbound, + dtype=torch.float).view(-1, 1, 1).expand(-1, fH, fW)) + D, _, _ = ds.shape + + xs = ( + torch.linspace(0, iW - 1, fW, + dtype=torch.float).view(1, 1, fW).expand(D, fH, fW)) + ys = ( + torch.linspace(0, iH - 1, fH, + dtype=torch.float).view(1, fH, 1).expand(D, fH, fW)) + + frustum = torch.stack((xs, ys, ds), -1) + return nn.Parameter(frustum, requires_grad=False) + + def get_geometry( + self, + camera2lidar_rots, + camera2lidar_trans, + intrins, + post_rots, + post_trans, + **kwargs, + ): + B, N, _ = camera2lidar_trans.shape + + # undo post-transformation + # B x N x D x H x W x 3 + points = self.frustum - post_trans.view(B, N, 1, 1, 1, 3) + points = ( + torch.inverse(post_rots).view(B, N, 1, 1, 1, 3, + 3).matmul(points.unsqueeze(-1))) + # cam_to_lidar + points = torch.cat( + ( + points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3], + points[:, :, :, :, :, 2:3], + ), + 5, + ) + combine = camera2lidar_rots.matmul(torch.inverse(intrins)) + points = combine.view(B, N, 1, 1, 1, 3, 3).matmul(points).squeeze(-1) + points += camera2lidar_trans.view(B, N, 1, 1, 1, 3) + + if 'extra_rots' in kwargs: + extra_rots = kwargs['extra_rots'] + points = ( + extra_rots.view(B, 1, 1, 1, 1, 3, + 3).repeat(1, N, 1, 1, 1, 1, 1).matmul( + points.unsqueeze(-1)).squeeze(-1)) + if 'extra_trans' in kwargs: + extra_trans = kwargs['extra_trans'] + points += extra_trans.view(B, 1, 1, 1, 1, + 3).repeat(1, N, 1, 1, 1, 1) + + return points + + def get_cam_feats(self, x): + raise NotImplementedError + + def bev_pool(self, geom_feats, x): + B, N, D, H, W, C = x.shape + Nprime = B * N * D * H * W + + # flatten x + x = x.reshape(Nprime, C) + + # flatten indices + geom_feats = ((geom_feats - (self.bx - self.dx / 2.0)) / + self.dx).long() + geom_feats = geom_feats.view(Nprime, 3) + batch_ix = torch.cat([ + torch.full([Nprime // B, 1], ix, device=x.device, dtype=torch.long) + for ix in range(B) + ]) + geom_feats = torch.cat((geom_feats, batch_ix), 1) + + # filter out points that are outside box + kept = ((geom_feats[:, 0] >= 0) + & (geom_feats[:, 0] < self.nx[0]) + & (geom_feats[:, 1] >= 0) + & (geom_feats[:, 1] < self.nx[1]) + & (geom_feats[:, 2] >= 0) + & (geom_feats[:, 2] < self.nx[2])) + x = x[kept] + geom_feats = geom_feats[kept] + + x = bev_pool(x, geom_feats, B, self.nx[2], self.nx[0], self.nx[1]) + + # collapse Z + final = torch.cat(x.unbind(dim=2), 1) + + return final + + def forward( + self, + img, + points, + lidar2image, + camera_intrinsics, + camera2lidar, + img_aug_matrix, + lidar_aug_matrix, + **kwargs, + ): + intrins = camera_intrinsics[..., :3, :3] + post_rots = img_aug_matrix[..., :3, :3] + post_trans = img_aug_matrix[..., :3, 3] + camera2lidar_rots = camera2lidar[..., :3, :3] + camera2lidar_trans = camera2lidar[..., :3, 3] + + extra_rots = lidar_aug_matrix[..., :3, :3] + extra_trans = lidar_aug_matrix[..., :3, 3] + + geom = self.get_geometry( + camera2lidar_rots, + camera2lidar_trans, + intrins, + post_rots, + post_trans, + extra_rots=extra_rots, + extra_trans=extra_trans, + ) + + x = self.get_cam_feats(img) + x = self.bev_pool(geom, x) + return x + + +class BaseDepthTransform(BaseTransform): + + def forward( + self, + img, + points, + lidar2image, + cam_intrinsic, + camera2lidar, + img_aug_matrix, + lidar_aug_matrix, + metas, + **kwargs, + ): + intrins = cam_intrinsic[..., :3, :3] + post_rots = img_aug_matrix[..., :3, :3] + post_trans = img_aug_matrix[..., :3, 3] + camera2lidar_rots = camera2lidar[..., :3, :3] + camera2lidar_trans = camera2lidar[..., :3, 3] + + # print(img.shape, self.image_size, self.feature_size) + + batch_size = len(points) + depth = torch.zeros(batch_size, img.shape[1], 1, + *self.image_size).to(points[0].device) + + for b in range(batch_size): + cur_coords = points[b][:, :3] + cur_img_aug_matrix = img_aug_matrix[b] + cur_lidar_aug_matrix = lidar_aug_matrix[b] + cur_lidar2image = lidar2image[b] + + # inverse aug + cur_coords -= cur_lidar_aug_matrix[:3, 3] + cur_coords = torch.inverse(cur_lidar_aug_matrix[:3, :3]).matmul( + cur_coords.transpose(1, 0)) + # lidar2image + cur_coords = cur_lidar2image[:, :3, :3].matmul(cur_coords) + cur_coords += cur_lidar2image[:, :3, 3].reshape(-1, 3, 1) + # get 2d coords + dist = cur_coords[:, 2, :] + cur_coords[:, 2, :] = torch.clamp(cur_coords[:, 2, :], 1e-5, 1e5) + cur_coords[:, :2, :] /= cur_coords[:, 2:3, :] + + # imgaug + cur_coords = cur_img_aug_matrix[:, :3, :3].matmul(cur_coords) + cur_coords += cur_img_aug_matrix[:, :3, 3].reshape(-1, 3, 1) + cur_coords = cur_coords[:, :2, :].transpose(1, 2) + + # normalize coords for grid sample + cur_coords = cur_coords[..., [1, 0]] + + on_img = ((cur_coords[..., 0] < self.image_size[0]) + & (cur_coords[..., 0] >= 0) + & (cur_coords[..., 1] < self.image_size[1]) + & (cur_coords[..., 1] >= 0)) + for c in range(on_img.shape[0]): + masked_coords = cur_coords[c, on_img[c]].long() + masked_dist = dist[c, on_img[c]] + depth[b, c, 0, masked_coords[:, 0], + masked_coords[:, 1]] = masked_dist + + extra_rots = lidar_aug_matrix[..., :3, :3] + extra_trans = lidar_aug_matrix[..., :3, 3] + geom = self.get_geometry( + camera2lidar_rots, + camera2lidar_trans, + intrins, + post_rots, + post_trans, + extra_rots=extra_rots, + extra_trans=extra_trans, + ) + + x = self.get_cam_feats(img, depth) + x = self.bev_pool(geom, x) + return x + + +@MODELS.register_module() +class DepthLSSTransform(BaseDepthTransform): + + def __init__( + self, + in_channels: int, + out_channels: int, + image_size: Tuple[int, int], + feature_size: Tuple[int, int], + xbound: Tuple[float, float, float], + ybound: Tuple[float, float, float], + zbound: Tuple[float, float, float], + dbound: Tuple[float, float, float], + downsample: int = 1, + ) -> None: + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + image_size=image_size, + feature_size=feature_size, + xbound=xbound, + ybound=ybound, + zbound=zbound, + dbound=dbound, + ) + self.dtransform = nn.Sequential( + nn.Conv2d(1, 8, 1), + nn.BatchNorm2d(8), + nn.ReLU(True), + nn.Conv2d(8, 32, 5, stride=4, padding=2), + nn.BatchNorm2d(32), + nn.ReLU(True), + nn.Conv2d(32, 64, 5, stride=2, padding=2), + nn.BatchNorm2d(64), + nn.ReLU(True), + ) + self.depthnet = nn.Sequential( + nn.Conv2d(in_channels + 64, in_channels, 3, padding=1), + nn.BatchNorm2d(in_channels), + nn.ReLU(True), + nn.Conv2d(in_channels, in_channels, 3, padding=1), + nn.BatchNorm2d(in_channels), + nn.ReLU(True), + nn.Conv2d(in_channels, self.D + self.C, 1), + ) + if downsample > 1: + assert downsample == 2, downsample + self.downsample = nn.Sequential( + nn.Conv2d( + out_channels, out_channels, 3, padding=1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(True), + nn.Conv2d( + out_channels, + out_channels, + 3, + stride=downsample, + padding=1, + bias=False, + ), + nn.BatchNorm2d(out_channels), + nn.ReLU(True), + nn.Conv2d( + out_channels, out_channels, 3, padding=1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(True), + ) + else: + self.downsample = nn.Identity() + + def get_cam_feats(self, x, d): + B, N, C, fH, fW = x.shape + + d = d.view(B * N, *d.shape[2:]) + x = x.view(B * N, C, fH, fW) + + d = self.dtransform(d) + x = torch.cat([d, x], dim=1) + x = self.depthnet(x) + + depth = x[:, :self.D].softmax(dim=1) + x = depth.unsqueeze(1) * x[:, self.D:(self.D + self.C)].unsqueeze(2) + + x = x.view(B, N, self.C, self.D, fH, fW) + x = x.permute(0, 1, 3, 4, 5, 2) + return x + + def forward(self, *args, **kwargs): + x = super().forward(*args, **kwargs) + x = self.downsample(x) + return x diff --git a/projects/BEVFusion/bevfusion/loading.py b/projects/BEVFusion/bevfusion/loading.py new file mode 100755 index 0000000..8615be7 --- /dev/null +++ b/projects/BEVFusion/bevfusion/loading.py @@ -0,0 +1,208 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Optional + +import mmcv +import numpy as np +from mmengine.fileio import get + +from mmdet3d.datasets.transforms import LoadMultiViewImageFromFiles +from mmdet3d.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class BEVLoadMultiViewImageFromFiles(LoadMultiViewImageFromFiles): + """Load multi channel images from a list of separate channel files. + + ``BEVLoadMultiViewImageFromFiles`` adds the following keys for the + convenience of view transforms in the forward: + - 'cam2lidar' + - 'lidar2img' + + Args: + to_float32 (bool): Whether to convert the img to float32. + Defaults to False. + color_type (str): Color type of the file. Defaults to 'unchanged'. + backend_args (dict, optional): Arguments to instantiate the + corresponding backend. Defaults to None. + num_views (int): Number of view in a frame. Defaults to 5. + num_ref_frames (int): Number of frame in loading. Defaults to -1. + test_mode (bool): Whether is test mode in loading. Defaults to False. + set_default_scale (bool): Whether to set default scale. + Defaults to True. + """ + + def transform(self, results: dict) -> Optional[dict]: + """Call function to load multi-view image from files. + + Args: + results (dict): Result dict containing multi-view image filenames. + + Returns: + dict: The result dict containing the multi-view image data. + Added keys and values are described below. + + - filename (str): Multi-view image filenames. + - img (np.ndarray): Multi-view image arrays. + - img_shape (tuple[int]): Shape of multi-view image arrays. + - ori_shape (tuple[int]): Shape of original image arrays. + - pad_shape (tuple[int]): Shape of padded image arrays. + - scale_factor (float): Scale factor. + - img_norm_cfg (dict): Normalization configuration of images. + """ + # TODO: consider split the multi-sweep part out of this pipeline + # Derive the mask and transform for loading of multi-sweep data + if self.num_ref_frames > 0: + # init choice with the current frame + init_choice = np.array([0], dtype=np.int64) + num_frames = len(results['img_filename']) // self.num_views - 1 + if num_frames == 0: # no previous frame, then copy cur frames + choices = np.random.choice( + 1, self.num_ref_frames, replace=True) + elif num_frames >= self.num_ref_frames: + # NOTE: suppose the info is saved following the order + # from latest to earlier frames + if self.test_mode: + choices = np.arange(num_frames - self.num_ref_frames, + num_frames) + 1 + # NOTE: +1 is for selecting previous frames + else: + choices = np.random.choice( + num_frames, self.num_ref_frames, replace=False) + 1 + elif num_frames > 0 and num_frames < self.num_ref_frames: + if self.test_mode: + base_choices = np.arange(num_frames) + 1 + random_choices = np.random.choice( + num_frames, + self.num_ref_frames - num_frames, + replace=True) + 1 + choices = np.concatenate([base_choices, random_choices]) + else: + choices = np.random.choice( + num_frames, self.num_ref_frames, replace=True) + 1 + else: + raise NotImplementedError + choices = np.concatenate([init_choice, choices]) + select_filename = [] + for choice in choices: + select_filename += results['img_filename'][choice * + self.num_views: + (choice + 1) * + self.num_views] + results['img_filename'] = select_filename + for key in ['cam2img', 'lidar2cam']: + if key in results: + select_results = [] + for choice in choices: + select_results += results[key][choice * + self.num_views:(choice + + 1) * + self.num_views] + results[key] = select_results + for key in ['ego2global']: + if key in results: + select_results = [] + for choice in choices: + select_results += [results[key][choice]] + results[key] = select_results + # Transform lidar2cam to + # [cur_lidar]2[prev_img] and [cur_lidar]2[prev_cam] + for key in ['lidar2cam']: + if key in results: + # only change matrices of previous frames + for choice_idx in range(1, len(choices)): + pad_prev_ego2global = np.eye(4) + prev_ego2global = results['ego2global'][choice_idx] + pad_prev_ego2global[:prev_ego2global. + shape[0], :prev_ego2global. + shape[1]] = prev_ego2global + pad_cur_ego2global = np.eye(4) + cur_ego2global = results['ego2global'][0] + pad_cur_ego2global[:cur_ego2global. + shape[0], :cur_ego2global. + shape[1]] = cur_ego2global + cur2prev = np.linalg.inv(pad_prev_ego2global).dot( + pad_cur_ego2global) + for result_idx in range(choice_idx * self.num_views, + (choice_idx + 1) * + self.num_views): + results[key][result_idx] = \ + results[key][result_idx].dot(cur2prev) + # Support multi-view images with different shapes + # TODO: record the origin shape and padded shape + filename, cam2img, lidar2cam, cam2lidar, lidar2img = [], [], [], [], [] + for _, cam_item in results['images'].items(): + filename.append(cam_item['img_path']) + lidar2cam.append(cam_item['lidar2cam']) + + lidar2cam_array = np.array(cam_item['lidar2cam']).astype( + np.float32) + lidar2cam_rot = lidar2cam_array[:3, :3] + lidar2cam_trans = lidar2cam_array[:3, 3:4] + camera2lidar = np.eye(4) + camera2lidar[:3, :3] = lidar2cam_rot.T + camera2lidar[:3, 3:4] = -1 * np.matmul( + lidar2cam_rot.T, lidar2cam_trans.reshape(3, 1)) + cam2lidar.append(camera2lidar) + + cam2img_array = np.eye(4).astype(np.float32) + cam2img_array[:3, :3] = np.array(cam_item['cam2img']).astype( + np.float32) + cam2img.append(cam2img_array) + lidar2img.append(cam2img_array @ lidar2cam_array) + + results['img_path'] = filename + results['cam2img'] = np.stack(cam2img, axis=0) + results['lidar2cam'] = np.stack(lidar2cam, axis=0) + results['cam2lidar'] = np.stack(cam2lidar, axis=0) + results['lidar2img'] = np.stack(lidar2img, axis=0) + + results['ori_cam2img'] = copy.deepcopy(results['cam2img']) + + # img is of shape (h, w, c, num_views) + # h and w can be different for different views + img_bytes = [ + get(name, backend_args=self.backend_args) for name in filename + ] + imgs = [ + mmcv.imfrombytes( + img_byte, + flag=self.color_type, + backend='pillow', + channel_order='rgb') for img_byte in img_bytes + ] + # handle the image with different shape + img_shapes = np.stack([img.shape for img in imgs], axis=0) + img_shape_max = np.max(img_shapes, axis=0) + img_shape_min = np.min(img_shapes, axis=0) + assert img_shape_min[-1] == img_shape_max[-1] + if not np.all(img_shape_max == img_shape_min): + pad_shape = img_shape_max[:2] + else: + pad_shape = None + if pad_shape is not None: + imgs = [ + mmcv.impad(img, shape=pad_shape, pad_val=0) for img in imgs + ] + img = np.stack(imgs, axis=-1) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + # unravel to list, see `DefaultFormatBundle` in formating.py + # which will transpose each image separately and then stack into array + results['img'] = [img[..., i] for i in range(img.shape[-1])] + results['img_shape'] = img.shape[:2] + results['ori_shape'] = img.shape[:2] + # Set initial values for default meta_keys + results['pad_shape'] = img.shape[:2] + if self.set_default_scale: + results['scale_factor'] = 1.0 + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + results['num_views'] = self.num_views + results['num_ref_frames'] = self.num_ref_frames + return results diff --git a/projects/BEVFusion/bevfusion/ops/__init__.py b/projects/BEVFusion/bevfusion/ops/__init__.py new file mode 100755 index 0000000..03830c3 --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/__init__.py @@ -0,0 +1,7 @@ +from .bev_pool import bev_pool +from .voxel import DynamicScatter, Voxelization, dynamic_scatter, voxelization + +__all__ = [ + 'bev_pool', 'Voxelization', 'voxelization', 'dynamic_scatter', + 'DynamicScatter' +] diff --git a/projects/BEVFusion/bevfusion/ops/bev_pool/__init__.py b/projects/BEVFusion/bevfusion/ops/bev_pool/__init__.py new file mode 100755 index 0000000..842b03c --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/bev_pool/__init__.py @@ -0,0 +1,3 @@ +from .bev_pool import bev_pool + +__all__ = ['bev_pool'] diff --git a/projects/BEVFusion/bevfusion/ops/bev_pool/bev_pool.py b/projects/BEVFusion/bevfusion/ops/bev_pool/bev_pool.py new file mode 100755 index 0000000..46cf532 --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/bev_pool/bev_pool.py @@ -0,0 +1,94 @@ +import torch + +from . import bev_pool_ext + + +class QuickCumsum(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, geom_feats, ranks): + x = x.cumsum(0) + kept = torch.ones(x.shape[0], device=x.device, dtype=torch.bool) + kept[:-1] = ranks[1:] != ranks[:-1] + + x, geom_feats = x[kept], geom_feats[kept] + x = torch.cat((x[:1], x[1:] - x[:-1])) + + # save kept for backward + ctx.save_for_backward(kept) + + # no gradient for geom_feats + ctx.mark_non_differentiable(geom_feats) + + return x, geom_feats + + @staticmethod + def backward(ctx, gradx, gradgeom): + (kept, ) = ctx.saved_tensors + back = torch.cumsum(kept, 0) + back[kept] -= 1 + + val = gradx[back] + + return val, None, None + + +class QuickCumsumCuda(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, geom_feats, ranks, B, D, H, W): + kept = torch.ones(x.shape[0], device=x.device, dtype=torch.bool) + kept[1:] = ranks[1:] != ranks[:-1] + interval_starts = torch.where(kept)[0].int() + interval_lengths = torch.zeros_like(interval_starts) + interval_lengths[:-1] = interval_starts[1:] - interval_starts[:-1] + interval_lengths[-1] = x.shape[0] - interval_starts[-1] + geom_feats = geom_feats.int() + + out = bev_pool_ext.bev_pool_forward( + x, + geom_feats, + interval_lengths, + interval_starts, + B, + D, + H, + W, + ) + + ctx.save_for_backward(interval_starts, interval_lengths, geom_feats) + ctx.saved_shapes = B, D, H, W + return out + + @staticmethod + def backward(ctx, out_grad): + interval_starts, interval_lengths, geom_feats = ctx.saved_tensors + B, D, H, W = ctx.saved_shapes + + out_grad = out_grad.contiguous() + x_grad = bev_pool_ext.bev_pool_backward( + out_grad, + geom_feats, + interval_lengths, + interval_starts, + B, + D, + H, + W, + ) + + return x_grad, None, None, None, None, None, None + + +def bev_pool(feats, coords, B, D, H, W): + assert feats.shape[0] == coords.shape[0] + + ranks = ( + coords[:, 0] * (W * D * B) + coords[:, 1] * (D * B) + + coords[:, 2] * B + coords[:, 3]) + indices = ranks.argsort() + feats, coords, ranks = feats[indices], coords[indices], ranks[indices] + + x = QuickCumsumCuda.apply(feats, coords, ranks, B, D, H, W) + x = x.permute(0, 4, 1, 2, 3).contiguous() + return x diff --git a/projects/BEVFusion/bevfusion/ops/bev_pool/src/bev_pool.cpp b/projects/BEVFusion/bevfusion/ops/bev_pool/src/bev_pool.cpp new file mode 100755 index 0000000..b6a04e0 --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/bev_pool/src/bev_pool.cpp @@ -0,0 +1,94 @@ +#include +#include + +// CUDA function declarations +void bev_pool(int b, int d, int h, int w, int n, int c, int n_intervals, const float* x, + const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* out); + +void bev_pool_grad(int b, int d, int h, int w, int n, int c, int n_intervals, const float* out_grad, + const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* x_grad); + + +/* + Function: pillar pooling (forward, cuda) + Args: + x : input features, FloatTensor[n, c] + geom_feats : input coordinates, IntTensor[n, 4] + interval_lengths : starting position for pooled point, IntTensor[n_intervals] + interval_starts : how many points in each pooled point, IntTensor[n_intervals] + Return: + out : output features, FloatTensor[b, d, h, w, c] +*/ +at::Tensor bev_pool_forward( + const at::Tensor _x, + const at::Tensor _geom_feats, + const at::Tensor _interval_lengths, + const at::Tensor _interval_starts, + int b, int d, int h, int w +) { + int n = _x.size(0); + int c = _x.size(1); + int n_intervals = _interval_lengths.size(0); + const at::cuda::OptionalCUDAGuard device_guard(device_of(_x)); + const float* x = _x.data_ptr(); + const int* geom_feats = _geom_feats.data_ptr(); + const int* interval_lengths = _interval_lengths.data_ptr(); + const int* interval_starts = _interval_starts.data_ptr(); + + auto options = + torch::TensorOptions().dtype(_x.dtype()).device(_x.device()); + at::Tensor _out = torch::zeros({b, d, h, w, c}, options); + float* out = _out.data_ptr(); + bev_pool( + b, d, h, w, n, c, n_intervals, x, + geom_feats, interval_starts, interval_lengths, out + ); + return _out; +} + + +/* + Function: pillar pooling (backward, cuda) + Args: + out_grad : input features, FloatTensor[b, d, h, w, c] + geom_feats : input coordinates, IntTensor[n, 4] + interval_lengths : starting position for pooled point, IntTensor[n_intervals] + interval_starts : how many points in each pooled point, IntTensor[n_intervals] + Return: + x_grad : output features, FloatTensor[n, 4] +*/ +at::Tensor bev_pool_backward( + const at::Tensor _out_grad, + const at::Tensor _geom_feats, + const at::Tensor _interval_lengths, + const at::Tensor _interval_starts, + int b, int d, int h, int w +) { + int n = _geom_feats.size(0); + int c = _out_grad.size(4); + int n_intervals = _interval_lengths.size(0); + const at::cuda::OptionalCUDAGuard device_guard(device_of(_out_grad)); + const float* out_grad = _out_grad.data_ptr(); + const int* geom_feats = _geom_feats.data_ptr(); + const int* interval_lengths = _interval_lengths.data_ptr(); + const int* interval_starts = _interval_starts.data_ptr(); + + auto options = + torch::TensorOptions().dtype(_out_grad.dtype()).device(_out_grad.device()); + at::Tensor _x_grad = torch::zeros({n, c}, options); + float* x_grad = _x_grad.data_ptr(); + + bev_pool_grad( + b, d, h, w, n, c, n_intervals, out_grad, + geom_feats, interval_starts, interval_lengths, x_grad + ); + + return _x_grad; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("bev_pool_forward", &bev_pool_forward, + "bev_pool_forward"); + m.def("bev_pool_backward", &bev_pool_backward, + "bev_pool_backward"); +} diff --git a/projects/BEVFusion/bevfusion/ops/bev_pool/src/bev_pool_cuda.cu b/projects/BEVFusion/bevfusion/ops/bev_pool/src/bev_pool_cuda.cu new file mode 100755 index 0000000..ea4e407 --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/bev_pool/src/bev_pool_cuda.cu @@ -0,0 +1,98 @@ +#include +#include + +/* + Function: pillar pooling + Args: + b : batch size + d : depth of the feature map + h : height of pooled feature map + w : width of pooled feature map + n : number of input points + c : number of channels + n_intervals : number of unique points + x : input features, FloatTensor[n, c] + geom_feats : input coordinates, IntTensor[n, 4] + interval_lengths : starting position for pooled point, IntTensor[n_intervals] + interval_starts : how many points in each pooled point, IntTensor[n_intervals] + out : output features, FloatTensor[b, d, h, w, c] +*/ +__global__ void bev_pool_kernel(int b, int d, int h, int w, int n, int c, int n_intervals, + const float *__restrict__ x, + const int *__restrict__ geom_feats, + const int *__restrict__ interval_starts, + const int *__restrict__ interval_lengths, + float* __restrict__ out) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int index = idx / c; + int cur_c = idx % c; + if (index >= n_intervals) return; + int interval_start = interval_starts[index]; + int interval_length = interval_lengths[index]; + const int* cur_geom_feats = geom_feats + interval_start * 4; + const float* cur_x = x + interval_start * c + cur_c; + float* cur_out = out + cur_geom_feats[3] * d * h * w * c + + cur_geom_feats[2] * h * w * c + cur_geom_feats[0] * w * c + + cur_geom_feats[1] * c + cur_c; + float psum = 0; + for(int i = 0; i < interval_length; i++){ + psum += cur_x[i * c]; + } + *cur_out = psum; +} + + +/* + Function: pillar pooling backward + Args: + b : batch size + d : depth of the feature map + h : height of pooled feature map + w : width of pooled feature map + n : number of input points + c : number of channels + n_intervals : number of unique points + out_grad : gradient of the BEV fmap from top, FloatTensor[b, d, h, w, c] + geom_feats : input coordinates, IntTensor[n, 4] + interval_lengths : starting position for pooled point, IntTensor[n_intervals] + interval_starts : how many points in each pooled point, IntTensor[n_intervals] + x_grad : gradient of the image fmap, FloatTensor +*/ +__global__ void bev_pool_grad_kernel(int b, int d, int h, int w, int n, int c, int n_intervals, + const float *__restrict__ out_grad, + const int *__restrict__ geom_feats, + const int *__restrict__ interval_starts, + const int *__restrict__ interval_lengths, + float* __restrict__ x_grad) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int index = idx / c; + int cur_c = idx % c; + if (index >= n_intervals) return; + int interval_start = interval_starts[index]; + int interval_length = interval_lengths[index]; + + const int* cur_geom_feats = geom_feats + interval_start * 4; + float* cur_x_grad = x_grad + interval_start * c + cur_c; + + const float* cur_out_grad = out_grad + cur_geom_feats[3] * d * h * w * c + + cur_geom_feats[2] * h * w * c + cur_geom_feats[0] * w * c + + cur_geom_feats[1] * c + cur_c; + for(int i = 0; i < interval_length; i++){ + cur_x_grad[i * c] = *cur_out_grad; + } + +} + +void bev_pool(int b, int d, int h, int w, int n, int c, int n_intervals, const float* x, + const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* out) { + bev_pool_kernel<<<(int)ceil(((double)n_intervals * c / 256)), 256>>>( + b, d, h, w, n, c, n_intervals, x, geom_feats, interval_starts, interval_lengths, out + ); +} + +void bev_pool_grad(int b, int d, int h, int w, int n, int c, int n_intervals, const float* out_grad, + const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* x_grad) { + bev_pool_grad_kernel<<<(int)ceil(((double)n_intervals * c / 256)), 256>>>( + b, d, h, w, n, c, n_intervals, out_grad, geom_feats, interval_starts, interval_lengths, x_grad + ); +} diff --git a/projects/BEVFusion/bevfusion/ops/voxel/__init__.py b/projects/BEVFusion/bevfusion/ops/voxel/__init__.py new file mode 100755 index 0000000..a74fb63 --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/voxel/__init__.py @@ -0,0 +1,4 @@ +from .scatter_points import DynamicScatter, dynamic_scatter +from .voxelize import Voxelization, voxelization + +__all__ = ['Voxelization', 'voxelization', 'dynamic_scatter', 'DynamicScatter'] diff --git a/projects/BEVFusion/bevfusion/ops/voxel/scatter_points.py b/projects/BEVFusion/bevfusion/ops/voxel/scatter_points.py new file mode 100755 index 0000000..1862abd --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/voxel/scatter_points.py @@ -0,0 +1,112 @@ +import torch +from torch import nn +from torch.autograd import Function + +from .voxel_layer import (dynamic_point_to_voxel_backward, + dynamic_point_to_voxel_forward) + + +class _dynamic_scatter(Function): + + @staticmethod + def forward(ctx, feats, coors, reduce_type='max'): + """convert kitti points(N, >=3) to voxels. + + Args: + feats: [N, C] float tensor. points features to be reduced + into voxels. + coors: [N, ndim] int tensor. corresponding voxel coordinates + (specifically multi-dim voxel index) of each points. + reduce_type: str. reduce op. support 'max', 'sum' and 'mean' + Returns: + tuple + voxel_feats: [M, C] float tensor. reduced features. input features + that shares the same voxel coordinates are reduced to one row + coordinates: [M, ndim] int tensor, voxel coordinates. + """ + results = dynamic_point_to_voxel_forward(feats, coors, reduce_type) + (voxel_feats, voxel_coors, point2voxel_map, + voxel_points_count) = results + ctx.reduce_type = reduce_type + ctx.save_for_backward(feats, voxel_feats, point2voxel_map, + voxel_points_count) + ctx.mark_non_differentiable(voxel_coors) + return voxel_feats, voxel_coors + + @staticmethod + def backward(ctx, grad_voxel_feats, grad_voxel_coors=None): + (feats, voxel_feats, point2voxel_map, + voxel_points_count) = ctx.saved_tensors + grad_feats = torch.zeros_like(feats) + # TODO: whether to use index put or use cuda_backward + # To use index put, need point to voxel index + dynamic_point_to_voxel_backward( + grad_feats, + grad_voxel_feats.contiguous(), + feats, + voxel_feats, + point2voxel_map, + voxel_points_count, + ctx.reduce_type, + ) + return grad_feats, None, None + + +dynamic_scatter = _dynamic_scatter.apply + + +class DynamicScatter(nn.Module): + + def __init__(self, voxel_size, point_cloud_range, average_points: bool): + super(DynamicScatter, self).__init__() + """Scatters points into voxels, used in the voxel encoder with + dynamic voxelization + + **Note**: The CPU and GPU implementation get the same output, but + have numerical difference after summation and division (e.g., 5e-7). + + Args: + average_points (bool): whether to use avg pooling to scatter + points into voxel voxel_size (list): list [x, y, z] size + of three dimension + point_cloud_range (list): + [x_min, y_min, z_min, x_max, y_max, z_max] + """ + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + self.average_points = average_points + + def forward_single(self, points, coors): + reduce = 'mean' if self.average_points else 'max' + return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce) + + def forward(self, points, coors): + """ + Args: + input: NC points + """ + if coors.size(-1) == 3: + return self.forward_single(points, coors) + else: + batch_size = coors[-1, 0] + 1 + voxels, voxel_coors = [], [] + for i in range(batch_size): + inds = torch.where(coors[:, 0] == i) + voxel, voxel_coor = self.forward_single( + points[inds], coors[inds][:, 1:]) + coor_pad = nn.functional.pad( + voxel_coor, (1, 0), mode='constant', value=i) + voxel_coors.append(coor_pad) + voxels.append(voxel) + features = torch.cat(voxels, dim=0) + feature_coors = torch.cat(voxel_coors, dim=0) + + return features, feature_coors + + def __repr__(self): + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'voxel_size=' + str(self.voxel_size) + tmpstr += ', point_cloud_range=' + str(self.point_cloud_range) + tmpstr += ', average_points=' + str(self.average_points) + tmpstr += ')' + return tmpstr diff --git a/projects/BEVFusion/bevfusion/ops/voxel/src/scatter_points_cpu.cpp b/projects/BEVFusion/bevfusion/ops/voxel/src/scatter_points_cpu.cpp new file mode 100755 index 0000000..c22b8ae --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/voxel/src/scatter_points_cpu.cpp @@ -0,0 +1,122 @@ +#include +#include +// #include "voxelization.h" + +namespace { + +template +void determin_max_points_kernel( + torch::TensorAccessor coor, + torch::TensorAccessor point_to_voxelidx, + torch::TensorAccessor num_points_per_voxel, + torch::TensorAccessor coor_to_voxelidx, int& voxel_num, + int& max_points, const int num_points) { + int voxelidx, num; + for (int i = 0; i < num_points; ++i) { + if (coor[i][0] == -1) continue; + voxelidx = coor_to_voxelidx[coor[i][0]][coor[i][1]][coor[i][2]]; + + // record voxel + if (voxelidx == -1) { + voxelidx = voxel_num; + voxel_num += 1; + coor_to_voxelidx[coor[i][0]][coor[i][1]][coor[i][2]] = voxelidx; + } + + // put points into voxel + num = num_points_per_voxel[voxelidx]; + point_to_voxelidx[i] = num; + num_points_per_voxel[voxelidx] += 1; + + // update max points per voxel + max_points = std::max(max_points, num + 1); + } + + return; +} + +template +void scatter_point_to_voxel_kernel( + const torch::TensorAccessor points, + torch::TensorAccessor coor, + torch::TensorAccessor point_to_voxelidx, + torch::TensorAccessor coor_to_voxelidx, + torch::TensorAccessor voxels, + torch::TensorAccessor voxel_coors, const int num_features, + const int num_points, const int NDim) { + for (int i = 0; i < num_points; ++i) { + int num = point_to_voxelidx[i]; + int voxelidx = coor_to_voxelidx[coor[i][0]][coor[i][1]][coor[i][2]]; + for (int k = 0; k < num_features; ++k) { + voxels[voxelidx][num][k] = points[i][k]; + } + for (int k = 0; k < NDim; ++k) { + voxel_coors[voxelidx][k] = coor[i][k]; + } + } +} + +} // namespace + +namespace voxelization { + +std::vector dynamic_point_to_voxel_cpu( + const at::Tensor& points, const at::Tensor& voxel_mapping, + const std::vector voxel_size, const std::vector coors_range) { + // current version tooks about 0.02s_0.03s for one frame on cpu + // check device + AT_ASSERTM(points.device().is_cpu(), "points must be a CPU tensor"); + + const int NDim = voxel_mapping.size(1); + const int num_points = points.size(0); + const int num_features = points.size(1); + + std::vector grid_size(NDim); + for (int i = 0; i < NDim; ++i) { + grid_size[i] = + round((coors_range[NDim + i] - coors_range[i]) / voxel_size[i]); + } + + at::Tensor num_points_per_voxel = at::zeros( + { + num_points, + }, + voxel_mapping.options()); + at::Tensor coor_to_voxelidx = -at::ones( + {grid_size[2], grid_size[1], grid_size[0]}, voxel_mapping.options()); + at::Tensor point_to_voxelidx = -at::ones( + { + num_points, + }, + voxel_mapping.options()); + + int voxel_num = 0; + int max_points = 0; + AT_DISPATCH_ALL_TYPES(voxel_mapping.scalar_type(), "determin_max_point", [&] { + determin_max_points_kernel( + voxel_mapping.accessor(), + point_to_voxelidx.accessor(), + num_points_per_voxel.accessor(), + coor_to_voxelidx.accessor(), voxel_num, max_points, + num_points); + }); + + at::Tensor voxels = + at::zeros({voxel_num, max_points, num_features}, points.options()); + at::Tensor voxel_coors = + at::zeros({voxel_num, NDim}, points.options().dtype(at::kInt)); + + AT_DISPATCH_ALL_TYPES(points.scalar_type(), "scatter_point_to_voxel", [&] { + scatter_point_to_voxel_kernel( + points.accessor(), voxel_mapping.accessor(), + point_to_voxelidx.accessor(), + coor_to_voxelidx.accessor(), voxels.accessor(), + voxel_coors.accessor(), num_features, num_points, NDim); + }); + + at::Tensor num_points_per_voxel_out = + num_points_per_voxel.slice(/*dim=*/0, /*start=*/0, /*end=*/voxel_num); + return {voxels, voxel_coors, num_points_per_voxel_out}; +} + +} // namespace voxelization diff --git a/projects/BEVFusion/bevfusion/ops/voxel/src/scatter_points_cuda.cu b/projects/BEVFusion/bevfusion/ops/voxel/src/scatter_points_cuda.cu new file mode 100755 index 0000000..2ed1869 --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/voxel/src/scatter_points_cuda.cu @@ -0,0 +1,310 @@ +#include +#include +#include + +#include + +typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; + +#define CHECK_CUDA(x) \ + TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +namespace { +int const threadsPerBlock = 512; +int const maxGridDim = 50000; +} // namespace + +__device__ __forceinline__ static void reduceMax(float *address, float val) { + int *address_as_i = reinterpret_cast(address); + int old = *address_as_i, assumed; + do { + assumed = old; + old = atomicCAS(address_as_i, assumed, + __float_as_int(fmaxf(val, __int_as_float(assumed)))); + } while (assumed != old || __int_as_float(old) < val); +} + +__device__ __forceinline__ static void reduceMax(double *address, double val) { + unsigned long long *address_as_ull = + reinterpret_cast(address); + unsigned long long old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS( + address_as_ull, assumed, + __double_as_longlong(fmax(val, __longlong_as_double(assumed)))); + } while (assumed != old || __longlong_as_double(old) < val); +} + +// get rid of meaningless warnings when compiling host code +#ifdef __CUDA_ARCH__ +__device__ __forceinline__ static void reduceAdd(float *address, float val) { +#if (__CUDA_ARCH__ < 200) +#warning \ + "compute capability lower than 2.x. fall back to use CAS version of atomicAdd for float32" + int *address_as_i = reinterpret_cast(address); + int old = *address_as_i, assumed; + do { + assumed = old; + old = atomicCAS(address_as_i, assumed, + __float_as_int(val + __int_as_float(assumed))); + } while (assumed != old); +#else + atomicAdd(address, val); +#endif +} + +__device__ __forceinline__ static void reduceAdd(double *address, double val) { +#if (__CUDA_ARCH__ < 600) +#warning \ + "compute capability lower than 6.x. fall back to use CAS version of atomicAdd for float64" + unsigned long long *address_as_ull = + reinterpret_cast(address); + unsigned long long old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __double_as_longlong(val + __longlong_as_double(assumed))); + } while (assumed != old); +#else + atomicAdd(address, val); +#endif +} +#endif + +template +__global__ void +feats_reduce_kernel(const T *feats, const int32_t *coors_map, + T *reduced_feats, // shall be 0 at initialization + const int num_input, const int num_feats, + const reduce_t reduce_type) { + for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; + x += gridDim.x * blockDim.x) { + int32_t reduce_to = coors_map[x]; + if (reduce_to == -1) continue; + + const T *feats_offset = feats + x * num_feats; + T *reduced_feats_offset = reduced_feats + reduce_to * num_feats; + if (reduce_type == reduce_t::MAX) { + for (int i = 0; i < num_feats; i++) { + reduceMax(&reduced_feats_offset[i], feats_offset[i]); + } + } else { + for (int i = 0; i < num_feats; i++) { + reduceAdd(&reduced_feats_offset[i], feats_offset[i]); + } + } + } +} + +template +__global__ void add_reduce_traceback_grad_kernel( + T *grad_feats, const T *grad_reduced_feats, const int32_t *coors_map, + const int32_t *reduce_count, const int num_input, const int num_feats, + const reduce_t reduce_type) { + for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; + x += gridDim.x * blockDim.x) { + int32_t reduce_to = coors_map[x]; + if (reduce_to == -1) { + continue; + } + + const int input_offset = x * num_feats; + T *grad_feats_offset = grad_feats + input_offset; + const int reduced_offset = reduce_to * num_feats; + const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; + + if (reduce_type == reduce_t::SUM) { + for (int i = 0; i < num_feats; i++) { + grad_feats_offset[i] = grad_reduced_feats_offset[i]; + } + } else if (reduce_type == reduce_t::MEAN) { + for (int i = 0; i < num_feats; i++) { + grad_feats_offset[i] = grad_reduced_feats_offset[i] / + static_cast(reduce_count[reduce_to]); + } + } + } +} + +template +__global__ void max_reduce_traceback_scatter_idx_kernel( + const T *feats, const T *reduced_feats, int32_t *reduce_from, + const int32_t *coors_map, const int num_input, const int num_feats) { + for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; + x += gridDim.x * blockDim.x) { + int32_t reduce_to = coors_map[x]; + + const int input_offset = x * num_feats; + const T *feats_offset = feats + input_offset; + + if (reduce_to == -1) { + continue; + } + + const int reduced_offset = reduce_to * num_feats; + const T *reduced_feats_offset = reduced_feats + reduced_offset; + int32_t *reduce_from_offset = reduce_from + reduced_offset; + + for (int i = 0; i < num_feats; i++) { + if (feats_offset[i] == reduced_feats_offset[i]) { + atomicMin(&reduce_from_offset[i], static_cast(x)); + } + } + } +} + +template +__global__ void max_reduce_scatter_grad_kernel(T *grad_feats, + const T *grad_reduced_feats, + const int32_t *reduce_from, + const int num_reduced, + const int num_feats) { + for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_reduced; + x += gridDim.x * blockDim.x) { + const int reduced_offset = x * num_feats; + const int32_t *scatter_to_offset = reduce_from + reduced_offset; + const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; + + for (int i = 0; i < num_feats; i++) { + grad_feats[scatter_to_offset[i] * num_feats + i] = + grad_reduced_feats_offset[i]; + } + } +} + +namespace voxelization { + +std::vector dynamic_point_to_voxel_forward_gpu( + const at::Tensor &feats, const at::Tensor &coors, + const reduce_t reduce_type) { + CHECK_INPUT(feats); + CHECK_INPUT(coors); + + const int num_input = feats.size(0); + const int num_feats = feats.size(1); + + if (num_input == 0) + return {feats.clone().detach(), + coors.clone().detach(), + coors.new_empty({0}, torch::kInt32), + coors.new_empty({0}, torch::kInt32)}; + + at::Tensor out_coors; + at::Tensor coors_map; + at::Tensor reduce_count; + + auto coors_clean = coors.masked_fill(coors.lt(0).any(-1, true), -1); + + std::tie(out_coors, coors_map, reduce_count) = + at::unique_dim(coors_clean, 0, true, true, true); + + if (out_coors.index({0, 0}).lt(0).item()) { + // the first element of out_coors (-1,-1,-1) and should be removed + out_coors = out_coors.slice(0, 1); + reduce_count = reduce_count.slice(0, 1); + coors_map = coors_map - 1; + } + + coors_map = coors_map.to(torch::kInt32); + reduce_count = reduce_count.to(torch::kInt32); + + auto reduced_feats = + at::empty({out_coors.size(0), num_feats}, feats.options()); + + AT_DISPATCH_FLOATING_TYPES( + feats.scalar_type(), "feats_reduce_kernel", ([&] { + if (reduce_type == reduce_t::MAX) + reduced_feats.fill_(-std::numeric_limits::infinity()); + else + reduced_feats.fill_(static_cast(0)); + + dim3 blocks(std::min(at::cuda::ATenCeilDiv(num_input, threadsPerBlock), + maxGridDim)); + dim3 threads(threadsPerBlock); + feats_reduce_kernel<<>>( + feats.data_ptr(), coors_map.data_ptr(), + reduced_feats.data_ptr(), num_input, num_feats, reduce_type); + if (reduce_type == reduce_t::MEAN) + reduced_feats /= reduce_count.unsqueeze(-1).to(reduced_feats.dtype()); + })); + AT_CUDA_CHECK(cudaGetLastError()); + + return {reduced_feats, out_coors, coors_map, reduce_count}; +} + +void dynamic_point_to_voxel_backward_gpu(at::Tensor &grad_feats, + const at::Tensor &grad_reduced_feats, + const at::Tensor &feats, + const at::Tensor &reduced_feats, + const at::Tensor &coors_map, + const at::Tensor &reduce_count, + const reduce_t reduce_type) { + CHECK_INPUT(grad_feats); + CHECK_INPUT(grad_reduced_feats); + CHECK_INPUT(feats); + CHECK_INPUT(reduced_feats); + CHECK_INPUT(coors_map); + CHECK_INPUT(reduce_count); + + const int num_input = feats.size(0); + const int num_reduced = reduced_feats.size(0); + const int num_feats = feats.size(1); + + grad_feats.fill_(0); + // copy voxel grad to points + + if (num_input == 0 || num_reduced == 0) return; + + if (reduce_type == reduce_t::MEAN || reduce_type == reduce_t::SUM) { + AT_DISPATCH_FLOATING_TYPES( + grad_reduced_feats.scalar_type(), "add_reduce_traceback_grad_kernel", + ([&] { + dim3 blocks(std::min( + at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); + dim3 threads(threadsPerBlock); + add_reduce_traceback_grad_kernel<<>>( + grad_feats.data_ptr(), + grad_reduced_feats.data_ptr(), + coors_map.data_ptr(), reduce_count.data_ptr(), + num_input, num_feats, reduce_type); + })); + AT_CUDA_CHECK(cudaGetLastError()); + } else { + auto reduce_from = at::full({num_reduced, num_feats}, num_input, + coors_map.options().dtype(torch::kInt32)); + AT_DISPATCH_FLOATING_TYPES( + grad_reduced_feats.scalar_type(), + "max_reduce_traceback_scatter_idx_kernel", ([&] { + dim3 blocks(std::min( + at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); + dim3 threads(threadsPerBlock); + max_reduce_traceback_scatter_idx_kernel<<>>( + feats.data_ptr(), reduced_feats.data_ptr(), + reduce_from.data_ptr(), coors_map.data_ptr(), + num_input, num_feats); + })); + AT_CUDA_CHECK(cudaGetLastError()); + + AT_DISPATCH_FLOATING_TYPES( + grad_reduced_feats.scalar_type(), + "max_reduce_traceback_scatter_idx_kernel", ([&] { + dim3 blocks(std::min( + at::cuda::ATenCeilDiv(num_reduced, threadsPerBlock), maxGridDim)); + dim3 threads(threadsPerBlock); + max_reduce_scatter_grad_kernel<<>>( + grad_feats.data_ptr(), + grad_reduced_feats.data_ptr(), + reduce_from.data_ptr(), num_reduced, num_feats); + })); + AT_CUDA_CHECK(cudaGetLastError()); + } + return; +} + +} // namespace voxelization diff --git a/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization.cpp b/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization.cpp new file mode 100755 index 0000000..f83348e --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization.cpp @@ -0,0 +1,13 @@ +#include +#include "voxelization.h" + +namespace voxelization { + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("hard_voxelize", &hard_voxelize, "hard voxelize"); + m.def("dynamic_voxelize", &dynamic_voxelize, "dynamic voxelization"); + m.def("dynamic_point_to_voxel_forward", &dynamic_point_to_voxel_forward, "dynamic point to voxel forward"); + m.def("dynamic_point_to_voxel_backward", &dynamic_point_to_voxel_backward, "dynamic point to voxel backward"); +} + +} // namespace voxelization diff --git a/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization.h b/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization.h new file mode 100755 index 0000000..765b30a --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization.h @@ -0,0 +1,142 @@ +#pragma once +#include + +typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; + +namespace voxelization { + +int hard_voxelize_cpu(const at::Tensor &points, at::Tensor &voxels, + at::Tensor &coors, at::Tensor &num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim = 3); + +void dynamic_voxelize_cpu(const at::Tensor &points, at::Tensor &coors, + const std::vector voxel_size, + const std::vector coors_range, + const int NDim = 3); + +std::vector dynamic_point_to_voxel_cpu( + const at::Tensor &points, const at::Tensor &voxel_mapping, + const std::vector voxel_size, const std::vector coors_range); + +#ifdef WITH_CUDA +int hard_voxelize_gpu(const at::Tensor &points, at::Tensor &voxels, + at::Tensor &coors, at::Tensor &num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim = 3); + +int nondisterministic_hard_voxelize_gpu(const at::Tensor &points, at::Tensor &voxels, + at::Tensor &coors, at::Tensor &num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim = 3); + +void dynamic_voxelize_gpu(const at::Tensor &points, at::Tensor &coors, + const std::vector voxel_size, + const std::vector coors_range, + const int NDim = 3); + +std::vector dynamic_point_to_voxel_forward_gpu(const torch::Tensor &feats, + const torch::Tensor &coors, + const reduce_t reduce_type); + +void dynamic_point_to_voxel_backward_gpu(torch::Tensor &grad_feats, + const torch::Tensor &grad_reduced_feats, + const torch::Tensor &feats, + const torch::Tensor &reduced_feats, + const torch::Tensor &coors_idx, + const torch::Tensor &reduce_count, + const reduce_t reduce_type); +#endif + +// Interface for Python +inline int hard_voxelize(const at::Tensor &points, at::Tensor &voxels, + at::Tensor &coors, at::Tensor &num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim = 3, const bool deterministic = true) { + if (points.device().is_cuda()) { +#ifdef WITH_CUDA + if (deterministic) { + return hard_voxelize_gpu(points, voxels, coors, num_points_per_voxel, + voxel_size, coors_range, max_points, max_voxels, + NDim); + } + return nondisterministic_hard_voxelize_gpu(points, voxels, coors, num_points_per_voxel, + voxel_size, coors_range, max_points, max_voxels, + NDim); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + return hard_voxelize_cpu(points, voxels, coors, num_points_per_voxel, + voxel_size, coors_range, max_points, max_voxels, + NDim); +} + +inline void dynamic_voxelize(const at::Tensor &points, at::Tensor &coors, + const std::vector voxel_size, + const std::vector coors_range, + const int NDim = 3) { + if (points.device().is_cuda()) { +#ifdef WITH_CUDA + return dynamic_voxelize_gpu(points, coors, voxel_size, coors_range, NDim); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + return dynamic_voxelize_cpu(points, coors, voxel_size, coors_range, NDim); +} + +inline reduce_t convert_reduce_type(const std::string &reduce_type) { + if (reduce_type == "max") + return reduce_t::MAX; + else if (reduce_type == "sum") + return reduce_t::SUM; + else if (reduce_type == "mean") + return reduce_t::MEAN; + else TORCH_CHECK(false, "do not support reduce type " + reduce_type) + return reduce_t::SUM; +} + +inline std::vector dynamic_point_to_voxel_forward(const torch::Tensor &feats, + const torch::Tensor &coors, + const std::string &reduce_type) { + if (feats.device().is_cuda()) { +#ifdef WITH_CUDA + return dynamic_point_to_voxel_forward_gpu(feats, coors, convert_reduce_type(reduce_type)); +#else + TORCH_CHECK(false, "Not compiled with GPU support"); +#endif + } + TORCH_CHECK(false, "do not support cpu yet"); + return std::vector(); +} + +inline void dynamic_point_to_voxel_backward(torch::Tensor &grad_feats, + const torch::Tensor &grad_reduced_feats, + const torch::Tensor &feats, + const torch::Tensor &reduced_feats, + const torch::Tensor &coors_idx, + const torch::Tensor &reduce_count, + const std::string &reduce_type) { + if (grad_feats.device().is_cuda()) { +#ifdef WITH_CUDA + dynamic_point_to_voxel_backward_gpu( + grad_feats, grad_reduced_feats, feats, reduced_feats, coors_idx, reduce_count, + convert_reduce_type(reduce_type)); + return; +#else + TORCH_CHECK(false, "Not compiled with GPU support"); +#endif + } + TORCH_CHECK(false, "do not support cpu yet"); +} + +} // namespace voxelization diff --git a/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization_cpu.cpp b/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization_cpu.cpp new file mode 100755 index 0000000..1f87e26 --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization_cpu.cpp @@ -0,0 +1,173 @@ +#include +#include +// #include "voxelization.h" + +namespace { + +template +void dynamic_voxelize_kernel(const torch::TensorAccessor points, + torch::TensorAccessor coors, + const std::vector voxel_size, + const std::vector coors_range, + const std::vector grid_size, + const int num_points, const int num_features, + const int NDim) { + const int ndim_minus_1 = NDim - 1; + bool failed = false; + // int coor[NDim]; + int* coor = new int[NDim](); + int c; + + for (int i = 0; i < num_points; ++i) { + failed = false; + for (int j = 0; j < NDim; ++j) { + c = floor((points[i][j] - coors_range[j]) / voxel_size[j]); + // necessary to rm points out of range + if ((c < 0 || c >= grid_size[j])) { + failed = true; + break; + } + coor[j] = c; + } + + for (int k = 0; k < NDim; ++k) { + if (failed) + coors[i][k] = -1; + else + coors[i][k] = coor[k]; + } + } + + delete[] coor; + return; +} + +template +void hard_voxelize_kernel(const torch::TensorAccessor points, + torch::TensorAccessor voxels, + torch::TensorAccessor coors, + torch::TensorAccessor num_points_per_voxel, + torch::TensorAccessor coor_to_voxelidx, + int& voxel_num, const std::vector voxel_size, + const std::vector coors_range, + const std::vector grid_size, + const int max_points, const int max_voxels, + const int num_points, const int num_features, + const int NDim) { + // declare a temp coors + at::Tensor temp_coors = at::zeros( + {num_points, NDim}, at::TensorOptions().dtype(at::kInt).device(at::kCPU)); + + // First use dynamic voxelization to get coors, + // then check max points/voxels constraints + dynamic_voxelize_kernel(points, temp_coors.accessor(), + voxel_size, coors_range, grid_size, + num_points, num_features, NDim); + + int voxelidx, num; + auto coor = temp_coors.accessor(); + + for (int i = 0; i < num_points; ++i) { + // T_int* coor = temp_coors.data_ptr() + i * NDim; + + if (coor[i][0] == -1) continue; + + voxelidx = coor_to_voxelidx[coor[i][0]][coor[i][1]][coor[i][2]]; + + // record voxel + if (voxelidx == -1) { + voxelidx = voxel_num; + if (max_voxels != -1 && voxel_num >= max_voxels) continue; + voxel_num += 1; + + coor_to_voxelidx[coor[i][0]][coor[i][1]][coor[i][2]] = voxelidx; + + for (int k = 0; k < NDim; ++k) { + coors[voxelidx][k] = coor[i][k]; + } + } + + // put points into voxel + num = num_points_per_voxel[voxelidx]; + if (max_points == -1 || num < max_points) { + for (int k = 0; k < num_features; ++k) { + voxels[voxelidx][num][k] = points[i][k]; + } + num_points_per_voxel[voxelidx] += 1; + } + } + + return; +} + +} // namespace + +namespace voxelization { + +int hard_voxelize_cpu(const at::Tensor& points, at::Tensor& voxels, + at::Tensor& coors, at::Tensor& num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim = 3) { + // current version tooks about 0.02s_0.03s for one frame on cpu + // check device + AT_ASSERTM(points.device().is_cpu(), "points must be a CPU tensor"); + + std::vector grid_size(NDim); + const int num_points = points.size(0); + const int num_features = points.size(1); + + for (int i = 0; i < NDim; ++i) { + grid_size[i] = + round((coors_range[NDim + i] - coors_range[i]) / voxel_size[i]); + } + + // coors, num_points_per_voxel, coor_to_voxelidx are int Tensor + // printf("cpu coor_to_voxelidx size: [%d, %d, %d]\n", grid_size[2], + // grid_size[1], grid_size[0]); + at::Tensor coor_to_voxelidx = + -at::ones({grid_size[2], grid_size[1], grid_size[0]}, coors.options()); + + int voxel_num = 0; + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + points.scalar_type(), "hard_voxelize_forward", [&] { + hard_voxelize_kernel( + points.accessor(), voxels.accessor(), + coors.accessor(), num_points_per_voxel.accessor(), + coor_to_voxelidx.accessor(), voxel_num, voxel_size, + coors_range, grid_size, max_points, max_voxels, num_points, + num_features, NDim); + }); + + return voxel_num; +} + +void dynamic_voxelize_cpu(const at::Tensor& points, at::Tensor& coors, + const std::vector voxel_size, + const std::vector coors_range, + const int NDim = 3) { + // check device + AT_ASSERTM(points.device().is_cpu(), "points must be a CPU tensor"); + + std::vector grid_size(NDim); + const int num_points = points.size(0); + const int num_features = points.size(1); + + for (int i = 0; i < NDim; ++i) { + grid_size[i] = + round((coors_range[NDim + i] - coors_range[i]) / voxel_size[i]); + } + + // coors, num_points_per_voxel, coor_to_voxelidx are int Tensor + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + points.scalar_type(), "hard_voxelize_forward", [&] { + dynamic_voxelize_kernel( + points.accessor(), coors.accessor(), + voxel_size, coors_range, grid_size, num_points, num_features, NDim); + }); + + return; +} + +} // namespace voxelization diff --git a/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization_cuda.cu b/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization_cuda.cu new file mode 100755 index 0000000..f2c4f5a --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization_cuda.cu @@ -0,0 +1,530 @@ +#include +#include +#include +#include + +#include + +#define CHECK_CUDA(x) \ + TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +namespace { +int const threadsPerBlock = sizeof(unsigned long long) * 8; +} + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +template +__global__ void dynamic_voxelize_kernel( + const T* points, T_int* coors, const float voxel_x, const float voxel_y, + const float voxel_z, const float coors_x_min, const float coors_y_min, + const float coors_z_min, const float coors_x_max, const float coors_y_max, + const float coors_z_max, const int grid_x, const int grid_y, + const int grid_z, const int num_points, const int num_features, + const int NDim) { + // const int index = blockIdx.x * threadsPerBlock + threadIdx.x; + CUDA_1D_KERNEL_LOOP(index, num_points) { + // To save some computation + auto points_offset = points + index * num_features; + auto coors_offset = coors + index * NDim; + int c_x = floor((points_offset[0] - coors_x_min) / voxel_x); + if (c_x < 0 || c_x >= grid_x) { + coors_offset[0] = -1; + return; + } + + int c_y = floor((points_offset[1] - coors_y_min) / voxel_y); + if (c_y < 0 || c_y >= grid_y) { + coors_offset[0] = -1; + coors_offset[1] = -1; + return; + } + + int c_z = floor((points_offset[2] - coors_z_min) / voxel_z); + if (c_z < 0 || c_z >= grid_z) { + coors_offset[0] = -1; + coors_offset[1] = -1; + coors_offset[2] = -1; + } else { + coors_offset[0] = c_x; + coors_offset[1] = c_y; + coors_offset[2] = c_z; + } + } +} + +template +__global__ void assign_point_to_voxel(const int nthreads, const T* points, + T_int* point_to_voxelidx, + T_int* coor_to_voxelidx, T* voxels, + const int max_points, + const int num_features, + const int num_points, const int NDim) { + CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { + // const int index = blockIdx.x * threadsPerBlock + threadIdx.x; + int index = thread_idx / num_features; + + int num = point_to_voxelidx[index]; + int voxelidx = coor_to_voxelidx[index]; + if (num > -1 && voxelidx > -1) { + auto voxels_offset = + voxels + voxelidx * max_points * num_features + num * num_features; + + int k = thread_idx % num_features; + voxels_offset[k] = points[thread_idx]; + } + } +} + +template +__global__ void assign_voxel_coors(const int nthreads, T_int* coor, + T_int* point_to_voxelidx, + T_int* coor_to_voxelidx, T_int* voxel_coors, + const int num_points, const int NDim) { + CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { + // const int index = blockIdx.x * threadsPerBlock + threadIdx.x; + // if (index >= num_points) return; + int index = thread_idx / NDim; + int num = point_to_voxelidx[index]; + int voxelidx = coor_to_voxelidx[index]; + if (num == 0 && voxelidx > -1) { + auto coors_offset = voxel_coors + voxelidx * NDim; + int k = thread_idx % NDim; + coors_offset[k] = coor[thread_idx]; + } + } +} + +template +__global__ void point_to_voxelidx_kernel(const T_int* coor, + T_int* point_to_voxelidx, + T_int* point_to_pointidx, + const int max_points, + const int max_voxels, + const int num_points, const int NDim) { + CUDA_1D_KERNEL_LOOP(index, num_points) { + auto coor_offset = coor + index * NDim; + // skip invalid points + if ((index >= num_points) || (coor_offset[0] == -1)) return; + + int num = 0; + int coor_x = coor_offset[0]; + int coor_y = coor_offset[1]; + int coor_z = coor_offset[2]; + // only calculate the coors before this coor[index] + for (int i = 0; i < index; ++i) { + auto prev_coor = coor + i * NDim; + if (prev_coor[0] == -1) continue; + + // Find all previous points that have the same coors + // if find the same coor, record it + if ((prev_coor[0] == coor_x) && (prev_coor[1] == coor_y) && + (prev_coor[2] == coor_z)) { + num++; + if (num == 1) { + // point to the same coor that first show up + point_to_pointidx[index] = i; + } else if (num >= max_points) { + // out of boundary + return; + } + } + } + if (num == 0) { + point_to_pointidx[index] = index; + } + if (num < max_points) { + point_to_voxelidx[index] = num; + } + } +} + +template +__global__ void determin_voxel_num( + // const T_int* coor, + T_int* num_points_per_voxel, T_int* point_to_voxelidx, + T_int* point_to_pointidx, T_int* coor_to_voxelidx, T_int* voxel_num, + const int max_points, const int max_voxels, const int num_points) { + // only calculate the coors before this coor[index] + for (int i = 0; i < num_points; ++i) { + // if (coor[i][0] == -1) + // continue; + int point_pos_in_voxel = point_to_voxelidx[i]; + // record voxel + if (point_pos_in_voxel == -1) { + // out of max_points or invalid point + continue; + } else if (point_pos_in_voxel == 0) { + // record new voxel + int voxelidx = voxel_num[0]; + if (voxel_num[0] >= max_voxels) continue; + voxel_num[0] += 1; + coor_to_voxelidx[i] = voxelidx; + num_points_per_voxel[voxelidx] = 1; + } else { + int point_idx = point_to_pointidx[i]; + int voxelidx = coor_to_voxelidx[point_idx]; + if (voxelidx != -1) { + coor_to_voxelidx[i] = voxelidx; + num_points_per_voxel[voxelidx] += 1; + } + } + } +} + +__global__ void nondisterministic_get_assign_pos( + const int nthreads, const int32_t *coors_map, int32_t *pts_id, + int32_t *coors_count, int32_t *reduce_count, int32_t *coors_order) { + CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { + int coors_idx = coors_map[thread_idx]; + if (coors_idx > -1) { + int32_t coors_pts_pos = atomicAdd(&reduce_count[coors_idx], 1); + pts_id[thread_idx] = coors_pts_pos; + if (coors_pts_pos == 0) { + coors_order[coors_idx] = atomicAdd(coors_count, 1); + } + } + } +} + +template +__global__ void nondisterministic_assign_point_voxel( + const int nthreads, const T *points, const int32_t *coors_map, + const int32_t *pts_id, const int32_t *coors_in, + const int32_t *reduce_count, const int32_t *coors_order, + T *voxels, int32_t *coors, int32_t *pts_count, const int max_voxels, + const int max_points, const int num_features, const int NDim) { + CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { + int coors_idx = coors_map[thread_idx]; + int coors_pts_pos = pts_id[thread_idx]; + if (coors_idx > -1) { + int coors_pos = coors_order[coors_idx]; + if (coors_pos < max_voxels && coors_pts_pos < max_points) { + auto voxels_offset = + voxels + (coors_pos * max_points + coors_pts_pos) * num_features; + auto points_offset = points + thread_idx * num_features; + for (int k = 0; k < num_features; k++) { + voxels_offset[k] = points_offset[k]; + } + if (coors_pts_pos == 0) { + pts_count[coors_pos] = min(reduce_count[coors_idx], max_points); + auto coors_offset = coors + coors_pos * NDim; + auto coors_in_offset = coors_in + coors_idx * NDim; + for (int k = 0; k < NDim; k++) { + coors_offset[k] = coors_in_offset[k]; + } + } + } + } + } +} + +namespace voxelization { + +int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels, + at::Tensor& coors, at::Tensor& num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim = 3) { + // current version tooks about 0.04s for one frame on cpu + // check device + CHECK_INPUT(points); + + at::cuda::CUDAGuard device_guard(points.device()); + + const int num_points = points.size(0); + const int num_features = points.size(1); + + const float voxel_x = voxel_size[0]; + const float voxel_y = voxel_size[1]; + const float voxel_z = voxel_size[2]; + const float coors_x_min = coors_range[0]; + const float coors_y_min = coors_range[1]; + const float coors_z_min = coors_range[2]; + const float coors_x_max = coors_range[3]; + const float coors_y_max = coors_range[4]; + const float coors_z_max = coors_range[5]; + + const int grid_x = round((coors_x_max - coors_x_min) / voxel_x); + const int grid_y = round((coors_y_max - coors_y_min) / voxel_y); + const int grid_z = round((coors_z_max - coors_z_min) / voxel_z); + + // map points to voxel coors + at::Tensor temp_coors = + at::zeros({num_points, NDim}, points.options().dtype(at::kInt)); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096)); + dim3 block(512); + + // 1. link point to corresponding voxel coors + AT_DISPATCH_ALL_TYPES( + points.scalar_type(), "hard_voxelize_kernel", ([&] { + dynamic_voxelize_kernel + <<>>( + points.contiguous().data_ptr(), + temp_coors.contiguous().data_ptr(), voxel_x, voxel_y, + voxel_z, coors_x_min, coors_y_min, coors_z_min, coors_x_max, + coors_y_max, coors_z_max, grid_x, grid_y, grid_z, num_points, + num_features, NDim); + })); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + + // 2. map point to the idx of the corresponding voxel, find duplicate coor + // create some temporary variables + auto point_to_pointidx = -at::ones( + { + num_points, + }, + points.options().dtype(at::kInt)); + auto point_to_voxelidx = -at::ones( + { + num_points, + }, + points.options().dtype(at::kInt)); + + dim3 map_grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096)); + dim3 map_block(512); + AT_DISPATCH_ALL_TYPES( + temp_coors.scalar_type(), "determin_duplicate", ([&] { + point_to_voxelidx_kernel + <<>>( + temp_coors.contiguous().data_ptr(), + point_to_voxelidx.contiguous().data_ptr(), + point_to_pointidx.contiguous().data_ptr(), max_points, + max_voxels, num_points, NDim); + })); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + + // 3. determined voxel num and voxel's coor index + // make the logic in the CUDA device could accelerate about 10 times + auto coor_to_voxelidx = -at::ones( + { + num_points, + }, + points.options().dtype(at::kInt)); + auto voxel_num = at::zeros( + { + 1, + }, + points.options().dtype(at::kInt)); // must be zero from the beginning + + AT_DISPATCH_ALL_TYPES( + temp_coors.scalar_type(), "determin_duplicate", ([&] { + determin_voxel_num<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + num_points_per_voxel.contiguous().data_ptr(), + point_to_voxelidx.contiguous().data_ptr(), + point_to_pointidx.contiguous().data_ptr(), + coor_to_voxelidx.contiguous().data_ptr(), + voxel_num.contiguous().data_ptr(), max_points, max_voxels, + num_points); + })); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + + // 4. copy point features to voxels + // Step 4 & 5 could be parallel + auto pts_output_size = num_points * num_features; + dim3 cp_grid(std::min(at::cuda::ATenCeilDiv(pts_output_size, 512), 4096)); + dim3 cp_block(512); + AT_DISPATCH_ALL_TYPES( + points.scalar_type(), "assign_point_to_voxel", ([&] { + assign_point_to_voxel + <<>>( + pts_output_size, points.contiguous().data_ptr(), + point_to_voxelidx.contiguous().data_ptr(), + coor_to_voxelidx.contiguous().data_ptr(), + voxels.contiguous().data_ptr(), max_points, num_features, + num_points, NDim); + })); + // cudaDeviceSynchronize(); + // AT_CUDA_CHECK(cudaGetLastError()); + + // 5. copy coors of each voxels + auto coors_output_size = num_points * NDim; + dim3 coors_cp_grid( + std::min(at::cuda::ATenCeilDiv(coors_output_size, 512), 4096)); + dim3 coors_cp_block(512); + AT_DISPATCH_ALL_TYPES( + points.scalar_type(), "assign_point_to_voxel", ([&] { + assign_voxel_coors<<>>( + coors_output_size, temp_coors.contiguous().data_ptr(), + point_to_voxelidx.contiguous().data_ptr(), + coor_to_voxelidx.contiguous().data_ptr(), + coors.contiguous().data_ptr(), num_points, NDim); + })); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + + auto voxel_num_cpu = voxel_num.to(at::kCPU); + int voxel_num_int = voxel_num_cpu.data_ptr()[0]; + + return voxel_num_int; +} + +int nondisterministic_hard_voxelize_gpu( + const at::Tensor &points, at::Tensor &voxels, + at::Tensor &coors, at::Tensor &num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim = 3) { + + CHECK_INPUT(points); + + at::cuda::CUDAGuard device_guard(points.device()); + + const int num_points = points.size(0); + const int num_features = points.size(1); + + if (num_points == 0) + return 0; + + const float voxel_x = voxel_size[0]; + const float voxel_y = voxel_size[1]; + const float voxel_z = voxel_size[2]; + const float coors_x_min = coors_range[0]; + const float coors_y_min = coors_range[1]; + const float coors_z_min = coors_range[2]; + const float coors_x_max = coors_range[3]; + const float coors_y_max = coors_range[4]; + const float coors_z_max = coors_range[5]; + + const int grid_x = round((coors_x_max - coors_x_min) / voxel_x); + const int grid_y = round((coors_y_max - coors_y_min) / voxel_y); + const int grid_z = round((coors_z_max - coors_z_min) / voxel_z); + + // map points to voxel coors + at::Tensor temp_coors = + at::zeros({num_points, NDim}, points.options().dtype(torch::kInt32)); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096)); + dim3 block(512); + + // 1. link point to corresponding voxel coors + AT_DISPATCH_ALL_TYPES( + points.scalar_type(), "hard_voxelize_kernel", ([&] { + dynamic_voxelize_kernel + <<>>( + points.contiguous().data_ptr(), + temp_coors.contiguous().data_ptr(), voxel_x, voxel_y, + voxel_z, coors_x_min, coors_y_min, coors_z_min, coors_x_max, + coors_y_max, coors_z_max, grid_x, grid_y, grid_z, num_points, + num_features, NDim); + })); + + at::Tensor coors_map; + at::Tensor coors_count; + at::Tensor coors_order; + at::Tensor reduce_count; + at::Tensor pts_id; + + auto coors_clean = temp_coors.masked_fill(temp_coors.lt(0).any(-1, true), -1); + + std::tie(temp_coors, coors_map, reduce_count) = + at::unique_dim(coors_clean, 0, true, true, false); + + if (temp_coors.index({0, 0}).lt(0).item()) { + // the first element of temp_coors is (-1,-1,-1) and should be removed + temp_coors = temp_coors.slice(0, 1); + coors_map = coors_map - 1; + } + + int num_coors = temp_coors.size(0); + temp_coors = temp_coors.to(torch::kInt32); + coors_map = coors_map.to(torch::kInt32); + + coors_count = coors_map.new_zeros(1); + coors_order = coors_map.new_empty(num_coors); + reduce_count = coors_map.new_zeros(num_coors); + pts_id = coors_map.new_zeros(num_points); + + dim3 cp_grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096)); + dim3 cp_block(512); + AT_DISPATCH_ALL_TYPES(points.scalar_type(), "get_assign_pos", ([&] { + nondisterministic_get_assign_pos<<>>( + num_points, + coors_map.contiguous().data_ptr(), + pts_id.contiguous().data_ptr(), + coors_count.contiguous().data_ptr(), + reduce_count.contiguous().data_ptr(), + coors_order.contiguous().data_ptr()); + })); + + AT_DISPATCH_ALL_TYPES( + points.scalar_type(), "assign_point_to_voxel", ([&] { + nondisterministic_assign_point_voxel + <<>>( + num_points, points.contiguous().data_ptr(), + coors_map.contiguous().data_ptr(), + pts_id.contiguous().data_ptr(), + temp_coors.contiguous().data_ptr(), + reduce_count.contiguous().data_ptr(), + coors_order.contiguous().data_ptr(), + voxels.contiguous().data_ptr(), + coors.contiguous().data_ptr(), + num_points_per_voxel.contiguous().data_ptr(), + max_voxels, max_points, + num_features, NDim); + })); + AT_CUDA_CHECK(cudaGetLastError()); + return max_voxels < num_coors ? max_voxels : num_coors; +} + +void dynamic_voxelize_gpu(const at::Tensor& points, at::Tensor& coors, + const std::vector voxel_size, + const std::vector coors_range, + const int NDim = 3) { + // current version tooks about 0.04s for one frame on cpu + // check device + CHECK_INPUT(points); + + at::cuda::CUDAGuard device_guard(points.device()); + + const int num_points = points.size(0); + const int num_features = points.size(1); + + const float voxel_x = voxel_size[0]; + const float voxel_y = voxel_size[1]; + const float voxel_z = voxel_size[2]; + const float coors_x_min = coors_range[0]; + const float coors_y_min = coors_range[1]; + const float coors_z_min = coors_range[2]; + const float coors_x_max = coors_range[3]; + const float coors_y_max = coors_range[4]; + const float coors_z_max = coors_range[5]; + + const int grid_x = round((coors_x_max - coors_x_min) / voxel_x); + const int grid_y = round((coors_y_max - coors_y_min) / voxel_y); + const int grid_z = round((coors_z_max - coors_z_min) / voxel_z); + + const int col_blocks = at::cuda::ATenCeilDiv(num_points, threadsPerBlock); + dim3 blocks(col_blocks); + dim3 threads(threadsPerBlock); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_ALL_TYPES(points.scalar_type(), "dynamic_voxelize_kernel", [&] { + dynamic_voxelize_kernel<<>>( + points.contiguous().data_ptr(), + coors.contiguous().data_ptr(), voxel_x, voxel_y, voxel_z, + coors_x_min, coors_y_min, coors_z_min, coors_x_max, coors_y_max, + coors_z_max, grid_x, grid_y, grid_z, num_points, num_features, NDim); + }); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + + return; +} + +} // namespace voxelization diff --git a/projects/BEVFusion/bevfusion/ops/voxel/voxelize.py b/projects/BEVFusion/bevfusion/ops/voxel/voxelize.py new file mode 100755 index 0000000..00a9374 --- /dev/null +++ b/projects/BEVFusion/bevfusion/ops/voxel/voxelize.py @@ -0,0 +1,161 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch +from torch import nn +from torch.autograd import Function +from torch.nn.modules.utils import _pair + +from .voxel_layer import dynamic_voxelize, hard_voxelize + + +class _Voxelization(Function): + + @staticmethod + def forward(ctx, + points, + voxel_size, + coors_range, + max_points=35, + max_voxels=20000, + deterministic=True): + """convert kitti points(N, >=3) to voxels. + + Args: + points: [N, ndim] float tensor. points[:, :3] contain xyz points + and points[:, 3:] contain other information like reflectivity + voxel_size: [3] list/tuple or array, float. xyz, indicate voxel + size + coors_range: [6] list/tuple or array, float. indicate voxel + range. format: xyzxyz, minmax + max_points: int. indicate maximum points contained in a voxel. if + max_points=-1, it means using dynamic_voxelize + max_voxels: int. indicate maximum voxels this function create. + for second, 20000 is a good choice. Users should shuffle points + before call this function because max_voxels may drop points. + deterministic: bool. whether to invoke the non-deterministic + version of hard-voxelization implementations. non-deterministic + version is considerablly fast but is not deterministic. only + affects hard voxelization. default True. for more information + of this argument and the implementation insights, please refer + to the following links: + https://github.com/open-mmlab/mmdetection3d/issues/894 + https://github.com/open-mmlab/mmdetection3d/pull/904 + it is an experimental feature and we will appreciate it if + you could share with us the failing cases. + + Returns: + voxels: [M, max_points, ndim] float tensor. only contain points + and returned when max_points != -1. + coordinates: [M, 3] int32 tensor, always returned. + num_points_per_voxel: [M] int32 tensor. Only returned when + max_points != -1. + """ + if max_points == -1 or max_voxels == -1: + coors = points.new_zeros(size=(points.size(0), 3), dtype=torch.int) + dynamic_voxelize(points, coors, voxel_size, coors_range, 3) + return coors + else: + voxels = points.new_zeros( + size=(max_voxels, max_points, points.size(1))) + coors = points.new_zeros(size=(max_voxels, 3), dtype=torch.int) + num_points_per_voxel = points.new_zeros( + size=(max_voxels, ), dtype=torch.int) + voxel_num = hard_voxelize( + points, + voxels, + coors, + num_points_per_voxel, + voxel_size, + coors_range, + max_points, + max_voxels, + 3, + deterministic, + ) + # select the valid voxels + voxels_out = voxels[:voxel_num] + coors_out = coors[:voxel_num] + num_points_per_voxel_out = num_points_per_voxel[:voxel_num] + return voxels_out, coors_out, num_points_per_voxel_out + + +voxelization = _Voxelization.apply + + +class Voxelization(nn.Module): + + def __init__(self, + voxel_size, + point_cloud_range, + max_num_points, + max_voxels=20000, + deterministic=True): + super(Voxelization, self).__init__() + """ + Args: + voxel_size (list): list [x, y, z] size of three dimension + point_cloud_range (list): + [x_min, y_min, z_min, x_max, y_max, z_max] + max_num_points (int): max number of points per voxel + max_voxels (tuple or int): max number of voxels in + (training, testing) time + deterministic: bool. whether to invoke the non-deterministic + version of hard-voxelization implementations. non-deterministic + version is considerablly fast but is not deterministic. only + affects hard voxelization. default True. for more information + of this argument and the implementation insights, please refer + to the following links: + https://github.com/open-mmlab/mmdetection3d/issues/894 + https://github.com/open-mmlab/mmdetection3d/pull/904 + it is an experimental feature and we will appreciate it if + you could share with us the failing cases. + """ + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + self.max_num_points = max_num_points + if isinstance(max_voxels, tuple): + self.max_voxels = max_voxels + else: + self.max_voxels = _pair(max_voxels) + self.deterministic = deterministic + + point_cloud_range = torch.tensor( + point_cloud_range, dtype=torch.float32) + # [0, -40, -3, 70.4, 40, 1] + voxel_size = torch.tensor(voxel_size, dtype=torch.float32) + grid_size = (point_cloud_range[3:] - + point_cloud_range[:3]) / voxel_size + grid_size = torch.round(grid_size).long() + input_feat_shape = grid_size[:2] + self.grid_size = grid_size + # the origin shape is as [x-len, y-len, z-len] + # [w, h, d] -> [d, h, w] removed + self.pcd_shape = [*input_feat_shape, 1] # [::-1] + + def forward(self, input): + """ + Args: + input: NC points + """ + if self.training: + max_voxels = self.max_voxels[0] + else: + max_voxels = self.max_voxels[1] + + return voxelization( + input, + self.voxel_size, + self.point_cloud_range, + self.max_num_points, + max_voxels, + self.deterministic, + ) + + def __repr__(self): + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'voxel_size=' + str(self.voxel_size) + tmpstr += ', point_cloud_range=' + str(self.point_cloud_range) + tmpstr += ', max_num_points=' + str(self.max_num_points) + tmpstr += ', max_voxels=' + str(self.max_voxels) + tmpstr += ', deterministic=' + str(self.deterministic) + tmpstr += ')' + return tmpstr diff --git a/projects/BEVFusion/bevfusion/sparse_encoder.py b/projects/BEVFusion/bevfusion/sparse_encoder.py new file mode 100755 index 0000000..68bf2bc --- /dev/null +++ b/projects/BEVFusion/bevfusion/sparse_encoder.py @@ -0,0 +1,151 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet3d.models.layers import make_sparse_convmodule +from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE +from mmdet3d.models.middle_encoders import SparseEncoder +from mmdet3d.registry import MODELS + +if IS_SPCONV2_AVAILABLE: + from spconv.pytorch import SparseConvTensor +else: + from mmcv.ops import SparseConvTensor + + +@MODELS.register_module() +class BEVFusionSparseEncoder(SparseEncoder): + r"""Sparse encoder for BEVFusion. The difference between this + implementation and that of ``SparseEncoder`` is that the shape order of 3D + conv is (H, W, D) in ``BEVFusionSparseEncoder`` rather than (D, H, W) in + ``SparseEncoder``. This difference comes from the implementation of + ``voxelization``. + + Args: + in_channels (int): The number of input channels. + sparse_shape (list[int]): The sparse shape of input tensor. + order (list[str], optional): Order of conv module. + Defaults to ('conv', 'norm', 'act'). + norm_cfg (dict, optional): Config of normalization layer. Defaults to + dict(type='BN1d', eps=1e-3, momentum=0.01). + base_channels (int, optional): Out channels for conv_input layer. + Defaults to 16. + output_channels (int, optional): Out channels for conv_out layer. + Defaults to 128. + encoder_channels (tuple[tuple[int]], optional): + Convolutional channels of each encode block. + Defaults to ((16, ), (32, 32, 32), (64, 64, 64), (64, 64, 64)). + encoder_paddings (tuple[tuple[int]], optional): + Paddings of each encode block. + Defaults to ((1, ), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1, 1)). + block_type (str, optional): Type of the block to use. + Defaults to 'conv_module'. + return_middle_feats (bool): Whether output middle features. + Default to False. + """ + + def __init__(self, + in_channels, + sparse_shape, + order=('conv', 'norm', 'act'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + base_channels=16, + output_channels=128, + encoder_channels=((16, ), (32, 32, 32), (64, 64, 64), (64, 64, + 64)), + encoder_paddings=((1, ), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1, + 1)), + block_type='conv_module', + return_middle_feats=False): + super(SparseEncoder, self).__init__() + assert block_type in ['conv_module', 'basicblock'] + self.sparse_shape = sparse_shape + self.in_channels = in_channels + self.order = order + self.base_channels = base_channels + self.output_channels = output_channels + self.encoder_channels = encoder_channels + self.encoder_paddings = encoder_paddings + self.stage_num = len(self.encoder_channels) + self.fp16_enabled = False + self.return_middle_feats = return_middle_feats + # Spconv init all weight on its own + + assert isinstance(order, tuple) and len(order) == 3 + assert set(order) == {'conv', 'norm', 'act'} + + if self.order[0] != 'conv': # pre activate + self.conv_input = make_sparse_convmodule( + in_channels, + self.base_channels, + 3, + norm_cfg=norm_cfg, + padding=1, + indice_key='subm1', + conv_type='SubMConv3d', + order=('conv', )) + else: # post activate + self.conv_input = make_sparse_convmodule( + in_channels, + self.base_channels, + 3, + norm_cfg=norm_cfg, + padding=1, + indice_key='subm1', + conv_type='SubMConv3d') + + encoder_out_channels = self.make_encoder_layers( + make_sparse_convmodule, + norm_cfg, + self.base_channels, + block_type=block_type) + + self.conv_out = make_sparse_convmodule( + encoder_out_channels, + self.output_channels, + kernel_size=(1, 1, 3), + stride=(1, 1, 2), + norm_cfg=norm_cfg, + padding=0, + indice_key='spconv_down2', + conv_type='SparseConv3d') + + def forward(self, voxel_features, coors, batch_size): + """Forward of SparseEncoder. + + Args: + voxel_features (torch.Tensor): Voxel features in shape (N, C). + coors (torch.Tensor): Coordinates in shape (N, 4), + the columns in the order of (batch_idx, z_idx, y_idx, x_idx). + batch_size (int): Batch size. + + Returns: + torch.Tensor | tuple[torch.Tensor, list]: Return spatial features + include: + + - spatial_features (torch.Tensor): Spatial features are out from + the last layer. + - encode_features (List[SparseConvTensor], optional): Middle layer + output features. When self.return_middle_feats is True, the + module returns middle features. + """ + coors = coors.int() + input_sp_tensor = SparseConvTensor(voxel_features, coors, + self.sparse_shape, batch_size) + x = self.conv_input(input_sp_tensor) + + encode_features = [] + for encoder_layer in self.encoder_layers: + x = encoder_layer(x) + encode_features.append(x) + + # for detection head + # [200, 176, 5] -> [200, 176, 2] + out = self.conv_out(encode_features[-1]) + spatial_features = out.dense() + + N, C, H, W, D = spatial_features.shape + spatial_features = spatial_features.permute(0, 1, 4, 2, 3).contiguous() + spatial_features = spatial_features.view(N, C * D, H, W) + + if self.return_middle_feats: + return spatial_features, encode_features + else: + return spatial_features diff --git a/projects/BEVFusion/bevfusion/transformer.py b/projects/BEVFusion/bevfusion/transformer.py new file mode 100755 index 0000000..b69d2c6 --- /dev/null +++ b/projects/BEVFusion/bevfusion/transformer.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models import DetrTransformerDecoderLayer +from torch import Tensor, nn + +from mmdet3d.registry import MODELS + + +class PositionEncodingLearned(nn.Module): + """Absolute pos embedding, learned.""" + + def __init__(self, input_channel, num_pos_feats=288): + super().__init__() + self.position_embedding_head = nn.Sequential( + nn.Conv1d(input_channel, num_pos_feats, kernel_size=1), + nn.BatchNorm1d(num_pos_feats), nn.ReLU(inplace=True), + nn.Conv1d(num_pos_feats, num_pos_feats, kernel_size=1)) + + def forward(self, xyz): + xyz = xyz.transpose(1, 2).contiguous() + position_embedding = self.position_embedding_head(xyz) + return position_embedding + + +@MODELS.register_module() +class TransformerDecoderLayer(DetrTransformerDecoderLayer): + + def __init__(self, + pos_encoding_cfg=dict(input_channel=2, num_pos_feats=128), + **kwargs): + super().__init__(**kwargs) + self.self_posembed = PositionEncodingLearned(**pos_encoding_cfg) + self.cross_posembed = PositionEncodingLearned(**pos_encoding_cfg) + + def forward(self, + query: Tensor, + key: Tensor = None, + value: Tensor = None, + query_pos: Tensor = None, + key_pos: Tensor = None, + self_attn_mask: Tensor = None, + cross_attn_mask: Tensor = None, + key_padding_mask: Tensor = None, + **kwargs) -> Tensor: + """ + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + key (Tensor, optional): The input key, has shape (bs, num_keys, + dim). If `None`, the `query` will be used. Defaults to `None`. + value (Tensor, optional): The input value, has the same shape as + `key`, as in `nn.MultiheadAttention.forward`. If `None`, the + `key` will be used. Defaults to `None`. + query_pos (Tensor, optional): The positional encoding for `query`, + has the same shape as `query`. If not `None`, it will be added + to `query` before forward function. Defaults to `None`. + key_pos (Tensor, optional): The positional encoding for `key`, has + the same shape as `key`. If not `None`, it will be added to + `key` before forward function. If None, and `query_pos` has the + same shape as `key`, then `query_pos` will be used for + `key_pos`. Defaults to None. + self_attn_mask (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. + Defaults to None. + cross_attn_mask (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor, optional): The `key_padding_mask` of + `self_attn` input. ByteTensor, has shape (bs, num_value). + Defaults to None. + + Returns: + Tensor: forwarded results, has shape (bs, num_queries, dim). + """ + if self.self_posembed is not None and query_pos is not None: + query_pos = self.self_posembed(query_pos).transpose(1, 2) + else: + query_pos = None + if self.cross_posembed is not None and key_pos is not None: + key_pos = self.cross_posembed(key_pos).transpose(1, 2) + else: + key_pos = None + query = query.transpose(1, 2) + key = key.transpose(1, 2) + # Note that the `value` (equal to `query`) is encoded with `query_pos`. + # This is different from the standard DETR Decoder Layer. + query = self.self_attn( + query=query, + key=query, + value=query + query_pos, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=self_attn_mask, + **kwargs) + query = self.norms[0](query) + # Note that the `value` (equal to `key`) is encoded with `key_pos`. + # This is different from the standard DETR Decoder Layer. + query = self.cross_attn( + query=query, + key=key, + value=key + key_pos, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=cross_attn_mask, + key_padding_mask=key_padding_mask, + **kwargs) + query = self.norms[1](query) + query = self.ffn(query) + query = self.norms[2](query) + + query = query.transpose(1, 2) + return query diff --git a/projects/BEVFusion/bevfusion/transforms_3d.py b/projects/BEVFusion/bevfusion/transforms_3d.py new file mode 100755 index 0000000..35116f5 --- /dev/null +++ b/projects/BEVFusion/bevfusion/transforms_3d.py @@ -0,0 +1,195 @@ +# modify from https://github.com/mit-han-lab/bevfusion +from typing import Any, Dict + +import numpy as np +import torch +from mmcv.transforms import BaseTransform +from PIL import Image + +from mmdet3d.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class ImageAug3D(BaseTransform): + + def __init__(self, final_dim, resize_lim, bot_pct_lim, rot_lim, rand_flip, + is_train): + self.final_dim = final_dim + self.resize_lim = resize_lim + self.bot_pct_lim = bot_pct_lim + self.rand_flip = rand_flip + self.rot_lim = rot_lim + self.is_train = is_train + + def sample_augmentation(self, results): + H, W = results['ori_shape'] + fH, fW = self.final_dim + if self.is_train: + resize = np.random.uniform(*self.resize_lim) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = int( + (1 - np.random.uniform(*self.bot_pct_lim)) * newH) - fH + crop_w = int(np.random.uniform(0, max(0, newW - fW))) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + flip = False + if self.rand_flip and np.random.choice([0, 1]): + flip = True + rotate = np.random.uniform(*self.rot_lim) + else: + resize = np.mean(self.resize_lim) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = int((1 - np.mean(self.bot_pct_lim)) * newH) - fH + crop_w = int(max(0, newW - fW) / 2) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + flip = False + rotate = 0 + return resize, resize_dims, crop, flip, rotate + + def img_transform(self, img, rotation, translation, resize, resize_dims, + crop, flip, rotate): + # adjust image + img = Image.fromarray(img.astype('uint8'), mode='RGB') + img = img.resize(resize_dims) + img = img.crop(crop) + if flip: + img = img.transpose(method=Image.FLIP_LEFT_RIGHT) + img = img.rotate(rotate) + + # post-homography transformation + rotation *= resize + translation -= torch.Tensor(crop[:2]) + if flip: + A = torch.Tensor([[-1, 0], [0, 1]]) + b = torch.Tensor([crop[2] - crop[0], 0]) + rotation = A.matmul(rotation) + translation = A.matmul(translation) + b + theta = rotate / 180 * np.pi + A = torch.Tensor([ + [np.cos(theta), np.sin(theta)], + [-np.sin(theta), np.cos(theta)], + ]) + b = torch.Tensor([crop[2] - crop[0], crop[3] - crop[1]]) / 2 + b = A.matmul(-b) + b + rotation = A.matmul(rotation) + translation = A.matmul(translation) + b + + return img, rotation, translation + + def transform(self, data: Dict[str, Any]) -> Dict[str, Any]: + imgs = data['img'] + new_imgs = [] + transforms = [] + for img in imgs: + resize, resize_dims, crop, flip, rotate = self.sample_augmentation( + data) + post_rot = torch.eye(2) + post_tran = torch.zeros(2) + new_img, rotation, translation = self.img_transform( + img, + post_rot, + post_tran, + resize=resize, + resize_dims=resize_dims, + crop=crop, + flip=flip, + rotate=rotate, + ) + transform = torch.eye(4) + transform[:2, :2] = rotation + transform[:2, 3] = translation + new_imgs.append(np.array(new_img).astype(np.float32)) + transforms.append(transform.numpy()) + data['img'] = new_imgs + # update the calibration matrices + data['img_aug_matrix'] = transforms + return data + + +@TRANSFORMS.register_module() +class GridMask(BaseTransform): + + def __init__( + self, + use_h, + use_w, + max_epoch, + rotate=1, + offset=False, + ratio=0.5, + mode=0, + prob=1.0, + fixed_prob=False, + ): + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + self.epoch = None + self.max_epoch = max_epoch + self.fixed_prob = fixed_prob + + def set_epoch(self, epoch): + self.epoch = epoch + if not self.fixed_prob: + self.set_prob(self.epoch, self.max_epoch) + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * self.epoch / self.max_epoch + + def transform(self, results): + if np.random.rand() > self.prob: + return results + imgs = results['img'] + h = imgs[0].shape[0] + w = imgs[0].shape[1] + self.d1 = 2 + self.d2 = min(h, w) + hh = int(1.5 * h) + ww = int(1.5 * w) + d = np.random.randint(self.d1, self.d2) + if self.ratio == 1: + self.length = np.random.randint(1, d) + else: + self.length = min(max(int(d * self.ratio + 0.5), 1), d - 1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh // d): + s = d * i + st_h + t = min(s + self.length, hh) + mask[s:t, :] *= 0 + if self.use_w: + for i in range(ww // d): + s = d * i + st_w + t = min(s + self.length, ww) + mask[:, s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh - h) // 2:(hh - h) // 2 + h, + (ww - w) // 2:(ww - w) // 2 + w] + + mask = mask.astype(np.float32) + mask = mask[:, :, None] + if self.mode == 1: + mask = 1 - mask + + # mask = mask.expand_as(imgs[0]) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h, w) - 0.5)).float() + offset = (1 - mask) * offset + imgs = [x * mask + offset for x in imgs] + else: + imgs = [x * mask for x in imgs] + + results.update(img=imgs) + return results diff --git a/projects/BEVFusion/bevfusion/transfusion_head.py b/projects/BEVFusion/bevfusion/transfusion_head.py new file mode 100755 index 0000000..59dbdf8 --- /dev/null +++ b/projects/BEVFusion/bevfusion/transfusion_head.py @@ -0,0 +1,841 @@ +# modify from https://github.com/mit-han-lab/bevfusion +import copy +from typing import List + +import numpy as np +import torch +import torch.nn.functional as F +from mmcv.cnn import ConvModule, build_conv_layer +from mmdet.models.task_modules import (AssignResult, PseudoSampler, + build_assigner, build_bbox_coder, + build_sampler) +from mmdet.models.utils import multi_apply +from mmengine.structures import InstanceData +from torch import nn + +from mmdet3d.models import circle_nms, draw_heatmap_gaussian, gaussian_radius +from mmdet3d.models.dense_heads.centerpoint_head import SeparateHead +from mmdet3d.models.layers import nms_bev +from mmdet3d.registry import MODELS +from mmdet3d.structures import xywhr2xyxyr + + +def clip_sigmoid(x, eps=1e-4): + y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps) + return y + + +@MODELS.register_module() +class ConvFuser(nn.Sequential): + + def __init__(self, in_channels: int, out_channels: int) -> None: + self.in_channels = in_channels + self.out_channels = out_channels + super().__init__( + nn.Conv2d( + sum(in_channels), out_channels, 3, padding=1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(True), + ) + + def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor: + return super().forward(torch.cat(inputs, dim=1)) + + +@MODELS.register_module() +class TransFusionHead(nn.Module): + + def __init__( + self, + num_proposals=128, + auxiliary=True, + in_channels=128 * 3, + hidden_channel=128, + num_classes=4, + # config for Transformer + num_decoder_layers=3, + decoder_layer=dict(), + num_heads=8, + nms_kernel_size=1, + bn_momentum=0.1, + # config for FFN + common_heads=dict(), + num_heatmap_convs=2, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + bias='auto', + # loss + loss_cls=dict(type='mmdet.GaussianFocalLoss', reduction='mean'), + loss_bbox=dict(type='mmdet.L1Loss', reduction='mean'), + loss_heatmap=dict(type='mmdet.GaussianFocalLoss', reduction='mean'), + # others + train_cfg=None, + test_cfg=None, + bbox_coder=None, + ): + super(TransFusionHead, self).__init__() + + self.fp16_enabled = False + + self.num_classes = num_classes + self.num_proposals = num_proposals + self.auxiliary = auxiliary + self.in_channels = in_channels + self.num_heads = num_heads + self.num_decoder_layers = num_decoder_layers + self.bn_momentum = bn_momentum + self.nms_kernel_size = nms_kernel_size + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + if not self.use_sigmoid_cls: + self.num_classes += 1 + self.loss_cls = MODELS.build(loss_cls) + self.loss_bbox = MODELS.build(loss_bbox) + self.loss_heatmap = MODELS.build(loss_heatmap) + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.sampling = False + + # a shared convolution + self.shared_conv = build_conv_layer( + dict(type='Conv2d'), + in_channels, + hidden_channel, + kernel_size=3, + padding=1, + bias=bias, + ) + + layers = [] + layers.append( + ConvModule( + hidden_channel, + hidden_channel, + kernel_size=3, + padding=1, + bias=bias, + conv_cfg=dict(type='Conv2d'), + norm_cfg=dict(type='BN2d'), + )) + layers.append( + build_conv_layer( + dict(type='Conv2d'), + hidden_channel, + num_classes, + kernel_size=3, + padding=1, + bias=bias, + )) + self.heatmap_head = nn.Sequential(*layers) + self.class_encoding = nn.Conv1d(num_classes, hidden_channel, 1) + + # transformer decoder layers for object query with LiDAR feature + self.decoder = nn.ModuleList() + for i in range(self.num_decoder_layers): + self.decoder.append(MODELS.build(decoder_layer)) + + # Prediction Head + self.prediction_heads = nn.ModuleList() + for i in range(self.num_decoder_layers): + heads = copy.deepcopy(common_heads) + heads.update(dict(heatmap=(self.num_classes, num_heatmap_convs))) + self.prediction_heads.append( + SeparateHead( + hidden_channel, + heads, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=bias, + )) + + self.init_weights() + self._init_assigner_sampler() + + # Position Embedding for Cross-Attention, which is re-used during training # noqa: E501 + x_size = self.test_cfg['grid_size'][0] // self.test_cfg[ + 'out_size_factor'] + y_size = self.test_cfg['grid_size'][1] // self.test_cfg[ + 'out_size_factor'] + self.bev_pos = self.create_2D_grid(x_size, y_size) + + self.img_feat_pos = None + self.img_feat_collapsed_pos = None + + def create_2D_grid(self, x_size, y_size): + meshgrid = [[0, x_size - 1, x_size], [0, y_size - 1, y_size]] + # NOTE: modified + batch_x, batch_y = torch.meshgrid( + *[torch.linspace(it[0], it[1], it[2]) for it in meshgrid]) + batch_x = batch_x + 0.5 + batch_y = batch_y + 0.5 + coord_base = torch.cat([batch_x[None], batch_y[None]], dim=0)[None] + coord_base = coord_base.view(1, 2, -1).permute(0, 2, 1) + return coord_base + + def init_weights(self): + # initialize transformer + for m in self.decoder.parameters(): + if m.dim() > 1: + nn.init.xavier_uniform_(m) + if hasattr(self, 'query'): + nn.init.xavier_normal_(self.query) + self.init_bn_momentum() + + def init_bn_momentum(self): + for m in self.modules(): + if isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)): + m.momentum = self.bn_momentum + + def _init_assigner_sampler(self): + """Initialize the target assigner and sampler of the head.""" + if self.train_cfg is None: + return + + if self.sampling: + self.bbox_sampler = build_sampler(self.train_cfg.sampler) + else: + self.bbox_sampler = PseudoSampler() + if isinstance(self.train_cfg.assigner, dict): + self.bbox_assigner = build_assigner(self.train_cfg.assigner) + elif isinstance(self.train_cfg.assigner, list): + self.bbox_assigner = [ + build_assigner(res) for res in self.train_cfg.assigner + ] + + def forward_single(self, inputs, metas): + """Forward function for CenterPoint. + Args: + inputs (torch.Tensor): Input feature map with the shape of + [B, 512, 128(H), 128(W)]. (consistent with L748) + Returns: + list[dict]: Output results for tasks. + """ + batch_size = inputs.shape[0] + fusion_feat = self.shared_conv(inputs) + + ################################# + # image to BEV + ################################# + fusion_feat_flatten = fusion_feat.view(batch_size, + fusion_feat.shape[1], + -1) # [BS, C, H*W] + bev_pos = self.bev_pos.repeat(batch_size, 1, 1).to(fusion_feat.device) + + ################################# + # query initialization + ################################# + dense_heatmap = self.heatmap_head(fusion_feat) + heatmap = dense_heatmap.detach().sigmoid() + padding = self.nms_kernel_size // 2 + local_max = torch.zeros_like(heatmap) + # equals to nms radius = voxel_size * out_size_factor * kenel_size + local_max_inner = F.max_pool2d( + heatmap, kernel_size=self.nms_kernel_size, stride=1, padding=0) + local_max[:, :, padding:(-padding), + padding:(-padding)] = local_max_inner + # for Pedestrian & Traffic_cone in nuScenes + if self.test_cfg['dataset'] == 'nuScenes': + local_max[:, 8, ] = F.max_pool2d( + heatmap[:, 8], kernel_size=1, stride=1, padding=0) + local_max[:, 9, ] = F.max_pool2d( + heatmap[:, 9], kernel_size=1, stride=1, padding=0) + elif self.test_cfg[ + 'dataset'] == 'Waymo': # for Pedestrian & Cyclist in Waymo + local_max[:, 1, ] = F.max_pool2d( + heatmap[:, 1], kernel_size=1, stride=1, padding=0) + local_max[:, 2, ] = F.max_pool2d( + heatmap[:, 2], kernel_size=1, stride=1, padding=0) + heatmap = heatmap * (heatmap == local_max) + heatmap = heatmap.view(batch_size, heatmap.shape[1], -1) + + # top num_proposals among all classes + top_proposals = heatmap.view(batch_size, -1).argsort( + dim=-1, descending=True)[..., :self.num_proposals] + top_proposals_class = top_proposals // heatmap.shape[-1] + top_proposals_index = top_proposals % heatmap.shape[-1] + query_feat = fusion_feat_flatten.gather( + index=top_proposals_index[:, None, :].expand( + -1, fusion_feat_flatten.shape[1], -1), + dim=-1, + ) + self.query_labels = top_proposals_class + + # add category embedding + one_hot = F.one_hot( + top_proposals_class, + num_classes=self.num_classes).permute(0, 2, 1) + query_cat_encoding = self.class_encoding(one_hot.float()) + query_feat += query_cat_encoding + + query_pos = bev_pos.gather( + index=top_proposals_index[:, None, :].permute(0, 2, 1).expand( + -1, -1, bev_pos.shape[-1]), + dim=1, + ) + ################################# + # transformer decoder layer (Fusion feature as K,V) + ################################# + ret_dicts = [] + for i in range(self.num_decoder_layers): + # Transformer Decoder Layer + # :param query: B C Pq :param query_pos: B Pq 3/6 + query_feat = self.decoder[i]( + query_feat, + key=fusion_feat_flatten, + query_pos=query_pos, + key_pos=bev_pos) + + # Prediction + res_layer = self.prediction_heads[i](query_feat) + res_layer['center'] = res_layer['center'] + query_pos.permute( + 0, 2, 1) + ret_dicts.append(res_layer) + + # for next level positional embedding + query_pos = res_layer['center'].detach().clone().permute(0, 2, 1) + + ret_dicts[0]['query_heatmap_score'] = heatmap.gather( + index=top_proposals_index[:, + None, :].expand(-1, self.num_classes, + -1), + dim=-1, + ) # [bs, num_classes, num_proposals] + ret_dicts[0]['dense_heatmap'] = dense_heatmap + + if self.auxiliary is False: + # only return the results of last decoder layer + return [ret_dicts[-1]] + + # return all the layer's results for auxiliary superivison + new_res = {} + for key in ret_dicts[0].keys(): + if key not in [ + 'dense_heatmap', 'dense_heatmap_old', 'query_heatmap_score' + ]: + new_res[key] = torch.cat( + [ret_dict[key] for ret_dict in ret_dicts], dim=-1) + else: + new_res[key] = ret_dicts[0][key] + return [new_res] + + def forward(self, feats, metas): + """Forward pass. + + Args: + feats (list[torch.Tensor]): Multi-level features, e.g., + features produced by FPN. + Returns: + tuple(list[dict]): Output results. first index by level, second + index by layer + """ + if isinstance(feats, torch.Tensor): + feats = [feats] + res = multi_apply(self.forward_single, feats, [metas]) + assert len(res) == 1, 'only support one level features.' + return res + + def predict(self, batch_feats, batch_input_metas): + preds_dicts = self(batch_feats, batch_input_metas) + res = self.predict_by_feat(preds_dicts, batch_input_metas) + return res + + def predict_by_feat(self, + preds_dicts, + metas, + img=None, + rescale=False, + for_roi=False): + """Generate bboxes from bbox head predictions. + + Args: + preds_dicts (tuple[list[dict]]): Prediction results. + Returns: + list[list[dict]]: Decoded bbox, scores and labels for each layer + & each batch. + """ + rets = [] + for layer_id, preds_dict in enumerate(preds_dicts): + batch_size = preds_dict[0]['heatmap'].shape[0] + batch_score = preds_dict[0]['heatmap'][ + ..., -self.num_proposals:].sigmoid() + # if self.loss_iou.loss_weight != 0: + # batch_score = torch.sqrt(batch_score * preds_dict[0]['iou'][..., -self.num_proposals:].sigmoid()) # noqa: E501 + one_hot = F.one_hot( + self.query_labels, + num_classes=self.num_classes).permute(0, 2, 1) + batch_score = batch_score * preds_dict[0][ + 'query_heatmap_score'] * one_hot + + batch_center = preds_dict[0]['center'][..., -self.num_proposals:] + batch_height = preds_dict[0]['height'][..., -self.num_proposals:] + batch_dim = preds_dict[0]['dim'][..., -self.num_proposals:] + batch_rot = preds_dict[0]['rot'][..., -self.num_proposals:] + batch_vel = None + if 'vel' in preds_dict[0]: + batch_vel = preds_dict[0]['vel'][..., -self.num_proposals:] + + temp = self.bbox_coder.decode( + batch_score, + batch_rot, + batch_dim, + batch_center, + batch_height, + batch_vel, + filter=True, + ) + + if self.test_cfg['dataset'] == 'nuScenes': + self.tasks = [ + dict( + num_class=8, + class_names=[], + indices=[0, 1, 2, 3, 4, 5, 6, 7], + radius=-1, + ), + dict( + num_class=1, + class_names=['pedestrian'], + indices=[8], + radius=0.175, + ), + dict( + num_class=1, + class_names=['traffic_cone'], + indices=[9], + radius=0.175, + ), + ] + elif self.test_cfg['dataset'] == 'Waymo': + self.tasks = [ + dict( + num_class=1, + class_names=['Car'], + indices=[0], + radius=0.7), + dict( + num_class=1, + class_names=['Pedestrian'], + indices=[1], + radius=0.7), + dict( + num_class=1, + class_names=['Cyclist'], + indices=[2], + radius=0.7), + ] + + ret_layer = [] + for i in range(batch_size): + boxes3d = temp[i]['bboxes'] + scores = temp[i]['scores'] + labels = temp[i]['labels'] + # adopt circle nms for different categories + if self.test_cfg['nms_type'] is not None: + keep_mask = torch.zeros_like(scores) + for task in self.tasks: + task_mask = torch.zeros_like(scores) + for cls_idx in task['indices']: + task_mask += labels == cls_idx + task_mask = task_mask.bool() + if task['radius'] > 0: + if self.test_cfg['nms_type'] == 'circle': + boxes_for_nms = torch.cat( + [ + boxes3d[task_mask][:, :2], + scores[:, None][task_mask], + ], + dim=1, + ) + task_keep_indices = torch.tensor( + circle_nms( + boxes_for_nms.detach().cpu().numpy(), + task['radius'], + )) + else: + boxes_for_nms = xywhr2xyxyr( + metas[i]['box_type_3d']( + boxes3d[task_mask][:, :7], 7).bev) + top_scores = scores[task_mask] + task_keep_indices = nms_bev( + boxes_for_nms, + top_scores, + thresh=task['radius'], + pre_maxsize=self.test_cfg['pre_maxsize'], + post_max_size=self. + test_cfg['post_maxsize'], + ) + else: + task_keep_indices = torch.arange(task_mask.sum()) + if task_keep_indices.shape[0] != 0: + keep_indices = torch.where( + task_mask != 0)[0][task_keep_indices] + keep_mask[keep_indices] = 1 + keep_mask = keep_mask.bool() + ret = dict( + bboxes=boxes3d[keep_mask], + scores=scores[keep_mask], + labels=labels[keep_mask], + ) + else: # no nms + ret = dict(bboxes=boxes3d, scores=scores, labels=labels) + + temp_instances = InstanceData() + ret['bboxes'][:, 2] = ret[ + 'bboxes'][:, 2] - ret['bboxes'][:, 5] * 0.5 # noqa: E501 + temp_instances.bboxes_3d = metas[0]['box_type_3d']( + ret['bboxes'], box_dim=ret['bboxes'].shape[-1]) + temp_instances.scores_3d = ret['scores'] + temp_instances.labels_3d = ret['labels'].int() + + ret_layer.append(temp_instances) + + rets.append(ret_layer) + assert len( + rets + ) == 1, f'only support one layer now, but get {len(rets)} layers' + + return rets[0] + + def get_targets(self, gt_bboxes_3d, gt_labels_3d, preds_dict): + """Generate training targets. + Args: + gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes. + gt_labels_3d (torch.Tensor): Labels of boxes. + preds_dicts (tuple of dict): first index by layer (default 1) + Returns: + tuple[torch.Tensor]: Tuple of target including \ + the following results in order. + - torch.Tensor: classification target. [BS, num_proposals] + - torch.Tensor: classification weights (mask) + [BS, num_proposals] + - torch.Tensor: regression target. [BS, num_proposals, 8] + - torch.Tensor: regression weights. [BS, num_proposals, 8] + """ + # change preds_dict into list of dict (index by batch_id) + # preds_dict[0]['center'].shape [bs, 3, num_proposal] + list_of_pred_dict = [] + for batch_idx in range(len(gt_bboxes_3d)): + pred_dict = {} + for key in preds_dict[0].keys(): + pred_dict[key] = preds_dict[0][key][batch_idx:batch_idx + 1] + list_of_pred_dict.append(pred_dict) + + assert len(gt_bboxes_3d) == len(list_of_pred_dict) + + res_tuple = multi_apply( + self.get_targets_single, + gt_bboxes_3d, + gt_labels_3d, + list_of_pred_dict, + np.arange(len(gt_labels_3d)), + ) + labels = torch.cat(res_tuple[0], dim=0) + label_weights = torch.cat(res_tuple[1], dim=0) + bbox_targets = torch.cat(res_tuple[2], dim=0) + bbox_weights = torch.cat(res_tuple[3], dim=0) + ious = torch.cat(res_tuple[4], dim=0) + num_pos = np.sum(res_tuple[5]) + matched_ious = np.mean(res_tuple[6]) + heatmap = torch.cat(res_tuple[7], dim=0) + return ( + labels, + label_weights, + bbox_targets, + bbox_weights, + ious, + num_pos, + matched_ious, + heatmap, + ) + + def get_targets_single(self, gt_bboxes_3d, gt_labels_3d, preds_dict, + batch_idx): + """Generate training targets for a single sample. + Args: + gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes. + gt_labels_3d (torch.Tensor): Labels of boxes. + preds_dict (dict): dict of prediction result for a single sample + Returns: + tuple[torch.Tensor]: Tuple of target including \ + the following results in order. + - torch.Tensor: classification target. [1, num_proposals] + - torch.Tensor: classification weights (mask) [1, num_proposals] # noqa: E501 + - torch.Tensor: regression target. [1, num_proposals, 8] + - torch.Tensor: regression weights. [1, num_proposals, 8] + - torch.Tensor: iou target. [1, num_proposals] + - int: number of positive proposals + """ + num_proposals = preds_dict['center'].shape[-1] + + # get pred boxes, carefully ! don't change the network outputs + score = copy.deepcopy(preds_dict['heatmap'].detach()) + center = copy.deepcopy(preds_dict['center'].detach()) + height = copy.deepcopy(preds_dict['height'].detach()) + dim = copy.deepcopy(preds_dict['dim'].detach()) + rot = copy.deepcopy(preds_dict['rot'].detach()) + if 'vel' in preds_dict.keys(): + vel = copy.deepcopy(preds_dict['vel'].detach()) + else: + vel = None + + boxes_dict = self.bbox_coder.decode( + score, rot, dim, center, height, + vel) # decode the prediction to real world metric bbox + bboxes_tensor = boxes_dict[0]['bboxes'] + gt_bboxes_tensor = gt_bboxes_3d.tensor.to(score.device) + # each layer should do label assign separately. + if self.auxiliary: + num_layer = self.num_decoder_layers + else: + num_layer = 1 + + assign_result_list = [] + for idx_layer in range(num_layer): + bboxes_tensor_layer = bboxes_tensor[self.num_proposals * + idx_layer:self.num_proposals * + (idx_layer + 1), :] + score_layer = score[..., self.num_proposals * + idx_layer:self.num_proposals * + (idx_layer + 1), ] + + if self.train_cfg.assigner.type == 'HungarianAssigner3D': + assign_result = self.bbox_assigner.assign( + bboxes_tensor_layer, + gt_bboxes_tensor, + gt_labels_3d, + score_layer, + self.train_cfg, + ) + elif self.train_cfg.assigner.type == 'HeuristicAssigner': + assign_result = self.bbox_assigner.assign( + bboxes_tensor_layer, + gt_bboxes_tensor, + None, + gt_labels_3d, + self.query_labels[batch_idx], + ) + else: + raise NotImplementedError + assign_result_list.append(assign_result) + + # combine assign result of each layer + assign_result_ensemble = AssignResult( + num_gts=sum([res.num_gts for res in assign_result_list]), + gt_inds=torch.cat([res.gt_inds for res in assign_result_list]), + max_overlaps=torch.cat( + [res.max_overlaps for res in assign_result_list]), + labels=torch.cat([res.labels for res in assign_result_list]), + ) + sampling_result = self.bbox_sampler.sample(assign_result_ensemble, + bboxes_tensor, + gt_bboxes_tensor) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + assert len(pos_inds) + len(neg_inds) == num_proposals + + # create target for loss computation + bbox_targets = torch.zeros([num_proposals, self.bbox_coder.code_size + ]).to(center.device) + bbox_weights = torch.zeros([num_proposals, self.bbox_coder.code_size + ]).to(center.device) + ious = assign_result_ensemble.max_overlaps + ious = torch.clamp(ious, min=0.0, max=1.0) + labels = bboxes_tensor.new_zeros(num_proposals, dtype=torch.long) + label_weights = bboxes_tensor.new_zeros( + num_proposals, dtype=torch.long) + + if gt_labels_3d is not None: # default label is -1 + labels += self.num_classes + + # both pos and neg have classification loss, only pos has regression + # and iou loss + if len(pos_inds) > 0: + pos_bbox_targets = self.bbox_coder.encode( + sampling_result.pos_gt_bboxes) + + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + + if gt_labels_3d is None: + labels[pos_inds] = 1 + else: + labels[pos_inds] = gt_labels_3d[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # # compute dense heatmap targets + device = labels.device + gt_bboxes_3d = torch.cat( + [gt_bboxes_3d.gravity_center, gt_bboxes_3d.tensor[:, 3:]], + dim=1).to(device) + grid_size = torch.tensor(self.train_cfg['grid_size']) + pc_range = torch.tensor(self.train_cfg['point_cloud_range']) + voxel_size = torch.tensor(self.train_cfg['voxel_size']) + feature_map_size = (grid_size[:2] // self.train_cfg['out_size_factor'] + ) # [x_len, y_len] + heatmap = gt_bboxes_3d.new_zeros(self.num_classes, feature_map_size[1], + feature_map_size[0]) + for idx in range(len(gt_bboxes_3d)): + width = gt_bboxes_3d[idx][3] + length = gt_bboxes_3d[idx][4] + width = width / voxel_size[0] / self.train_cfg['out_size_factor'] + length = length / voxel_size[1] / self.train_cfg['out_size_factor'] + if width > 0 and length > 0: + radius = gaussian_radius( + (length, width), + min_overlap=self.train_cfg['gaussian_overlap']) + radius = max(self.train_cfg['min_radius'], int(radius)) + x, y = gt_bboxes_3d[idx][0], gt_bboxes_3d[idx][1] + + coor_x = ((x - pc_range[0]) / voxel_size[0] / + self.train_cfg['out_size_factor']) + coor_y = ((y - pc_range[1]) / voxel_size[1] / + self.train_cfg['out_size_factor']) + + center = torch.tensor([coor_x, coor_y], + dtype=torch.float32, + device=device) + center_int = center.to(torch.int32) + + # original + # draw_heatmap_gaussian(heatmap[gt_labels_3d[idx]], center_int, radius) # noqa: E501 + # NOTE: fix + draw_heatmap_gaussian(heatmap[gt_labels_3d[idx]], + center_int[[1, 0]], radius) + + mean_iou = ious[pos_inds].sum() / max(len(pos_inds), 1) + return ( + labels[None], + label_weights[None], + bbox_targets[None], + bbox_weights[None], + ious[None], + int(pos_inds.shape[0]), + float(mean_iou), + heatmap[None], + ) + + def loss(self, gt_bboxes_3d, gt_labels_3d, preds_dicts, **kwargs): + """Loss function for CenterHead. + + Args: + gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground + truth gt boxes. + gt_labels_3d (list[torch.Tensor]): Labels of boxes. + preds_dicts (list[list[dict]]): Output of forward function. + Returns: + dict[str:torch.Tensor]: Loss of heatmap and bbox of each task. + """ + ( + labels, + label_weights, + bbox_targets, + bbox_weights, + ious, + num_pos, + matched_ious, + heatmap, + ) = self.get_targets(gt_bboxes_3d, gt_labels_3d, preds_dicts[0]) + if hasattr(self, 'on_the_image_mask'): + label_weights = label_weights * self.on_the_image_mask + bbox_weights = bbox_weights * self.on_the_image_mask[:, :, None] + num_pos = bbox_weights.max(-1).values.sum() + preds_dict = preds_dicts[0][0] + loss_dict = dict() + + # compute heatmap loss + loss_heatmap = self.loss_heatmap( + clip_sigmoid(preds_dict['dense_heatmap']), + heatmap, + avg_factor=max(heatmap.eq(1).float().sum().item(), 1), + ) + loss_dict['loss_heatmap'] = loss_heatmap + + # compute loss for each layer + for idx_layer in range( + self.num_decoder_layers if self.auxiliary else 1): + if idx_layer == self.num_decoder_layers - 1 or ( + idx_layer == 0 and self.auxiliary is False): + prefix = 'layer_-1' + else: + prefix = f'layer_{idx_layer}' + + layer_labels = labels[..., idx_layer * + self.num_proposals:(idx_layer + 1) * + self.num_proposals, ].reshape(-1) + layer_label_weights = label_weights[ + ..., idx_layer * self.num_proposals:(idx_layer + 1) * + self.num_proposals, ].reshape(-1) + layer_score = preds_dict['heatmap'][..., idx_layer * + self.num_proposals:(idx_layer + + 1) * + self.num_proposals, ] + layer_cls_score = layer_score.permute(0, 2, 1).reshape( + -1, self.num_classes) + layer_loss_cls = self.loss_cls( + layer_cls_score, + layer_labels, + layer_label_weights, + avg_factor=max(num_pos, 1), + ) + + layer_center = preds_dict['center'][..., idx_layer * + self.num_proposals:(idx_layer + + 1) * + self.num_proposals, ] + layer_height = preds_dict['height'][..., idx_layer * + self.num_proposals:(idx_layer + + 1) * + self.num_proposals, ] + layer_rot = preds_dict['rot'][..., idx_layer * + self.num_proposals:(idx_layer + 1) * + self.num_proposals, ] + layer_dim = preds_dict['dim'][..., idx_layer * + self.num_proposals:(idx_layer + 1) * + self.num_proposals, ] + preds = torch.cat( + [layer_center, layer_height, layer_dim, layer_rot], + dim=1).permute(0, 2, 1) # [BS, num_proposals, code_size] + if 'vel' in preds_dict.keys(): + layer_vel = preds_dict['vel'][..., idx_layer * + self.num_proposals:(idx_layer + + 1) * + self.num_proposals, ] + preds = torch.cat([ + layer_center, layer_height, layer_dim, layer_rot, layer_vel + ], + dim=1).permute( + 0, 2, + 1) # [BS, num_proposals, code_size] + code_weights = self.train_cfg.get('code_weights', None) + layer_bbox_weights = bbox_weights[:, idx_layer * + self.num_proposals:(idx_layer + + 1) * + self.num_proposals, :, ] + layer_reg_weights = layer_bbox_weights * layer_bbox_weights.new_tensor( # noqa: E501 + code_weights) + layer_bbox_targets = bbox_targets[:, idx_layer * + self.num_proposals:(idx_layer + + 1) * + self.num_proposals, :, ] + layer_loss_bbox = self.loss_bbox( + preds, + layer_bbox_targets, + layer_reg_weights, + avg_factor=max(num_pos, 1)) + + loss_dict[f'{prefix}_loss_cls'] = layer_loss_cls + loss_dict[f'{prefix}_loss_bbox'] = layer_loss_bbox + # loss_dict[f'{prefix}_loss_iou'] = layer_loss_iou + + loss_dict['matched_ious'] = layer_loss_cls.new_tensor(matched_ious) + + return loss_dict diff --git a/projects/BEVFusion/bevfusion/utils.py b/projects/BEVFusion/bevfusion/utils.py new file mode 100755 index 0000000..0ce5472 --- /dev/null +++ b/projects/BEVFusion/bevfusion/utils.py @@ -0,0 +1,306 @@ +# modify from https://github.com/mit-han-lab/bevfusion +import torch +from mmdet.models.task_modules import AssignResult, BaseAssigner, BaseBBoxCoder + +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + +from mmdet3d.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class TransFusionBBoxCoder(BaseBBoxCoder): + + def __init__( + self, + pc_range, + out_size_factor, + voxel_size, + post_center_range=None, + score_threshold=None, + code_size=8, + ): + self.pc_range = pc_range + self.out_size_factor = out_size_factor + self.voxel_size = voxel_size + self.post_center_range = post_center_range + self.score_threshold = score_threshold + self.code_size = code_size + + def encode(self, dst_boxes): + targets = torch.zeros([dst_boxes.shape[0], + self.code_size]).to(dst_boxes.device) + targets[:, 0] = (dst_boxes[:, 0] - self.pc_range[0]) / ( + self.out_size_factor * self.voxel_size[0]) + targets[:, 1] = (dst_boxes[:, 1] - self.pc_range[1]) / ( + self.out_size_factor * self.voxel_size[1]) + targets[:, 3] = dst_boxes[:, 3].log() + targets[:, 4] = dst_boxes[:, 4].log() + targets[:, 5] = dst_boxes[:, 5].log() + # bottom center to gravity center + targets[:, 2] = dst_boxes[:, 2] + dst_boxes[:, 5] * 0.5 + targets[:, 6] = torch.sin(dst_boxes[:, 6]) + targets[:, 7] = torch.cos(dst_boxes[:, 6]) + if self.code_size == 10: + targets[:, 8:10] = dst_boxes[:, 7:] + return targets + + def decode(self, heatmap, rot, dim, center, height, vel, filter=False): + """Decode bboxes. + Args: + heat (torch.Tensor): Heatmap with the shape of + [B, num_cls, num_proposals]. + rot (torch.Tensor): Rotation with the shape of + [B, 1, num_proposals]. + dim (torch.Tensor): Dim of the boxes with the shape of + [B, 3, num_proposals]. + center (torch.Tensor): bev center of the boxes with the shape of + [B, 2, num_proposals]. (in feature map metric) + height (torch.Tensor): height of the boxes with the shape of + [B, 2, num_proposals]. (in real world metric) + vel (torch.Tensor): Velocity with the shape of + [B, 2, num_proposals]. + filter: if False, return all box without checking score and + center_range + Returns: + list[dict]: Decoded boxes. + """ + # class label + final_preds = heatmap.max(1, keepdims=False).indices + final_scores = heatmap.max(1, keepdims=False).values + + # change size to real world metric + center[:, + 0, :] = center[:, + 0, :] * self.out_size_factor * self.voxel_size[ + 0] + self.pc_range[0] + center[:, + 1, :] = center[:, + 1, :] * self.out_size_factor * self.voxel_size[ + 1] + self.pc_range[1] + dim[:, 0, :] = dim[:, 0, :].exp() + dim[:, 1, :] = dim[:, 1, :].exp() + dim[:, 2, :] = dim[:, 2, :].exp() + height = height - dim[:, + 2:3, :] * 0.5 # gravity center to bottom center + rots, rotc = rot[:, 0:1, :], rot[:, 1:2, :] + rot = torch.atan2(rots, rotc) + + if vel is None: + final_box_preds = torch.cat([center, height, dim, rot], + dim=1).permute(0, 2, 1) + else: + final_box_preds = torch.cat([center, height, dim, rot, vel], + dim=1).permute(0, 2, 1) + + predictions_dicts = [] + for i in range(heatmap.shape[0]): + boxes3d = final_box_preds[i] + scores = final_scores[i] + labels = final_preds[i] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels + } + predictions_dicts.append(predictions_dict) + + if filter is False: + return predictions_dicts + + # use score threshold + if self.score_threshold is not None: + thresh_mask = final_scores > self.score_threshold + + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=heatmap.device) + mask = (final_box_preds[..., :3] >= + self.post_center_range[:3]).all(2) + mask &= (final_box_preds[..., :3] <= + self.post_center_range[3:]).all(2) + + predictions_dicts = [] + for i in range(heatmap.shape[0]): + cmask = mask[i, :] + if self.score_threshold: + cmask &= thresh_mask[i] + + boxes3d = final_box_preds[i, cmask] + scores = final_scores[i, cmask] + labels = final_preds[i, cmask] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels + } + + predictions_dicts.append(predictions_dict) + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + + return predictions_dicts + + +@TASK_UTILS.register_module() +class BBoxBEVL1Cost(object): + + def __init__(self, weight): + self.weight = weight + + def __call__(self, bboxes, gt_bboxes, train_cfg): + pc_start = bboxes.new(train_cfg['point_cloud_range'][0:2]) + pc_range = bboxes.new( + train_cfg['point_cloud_range'][3:5]) - bboxes.new( + train_cfg['point_cloud_range'][0:2]) + # normalize the box center to [0, 1] + normalized_bboxes_xy = (bboxes[:, :2] - pc_start) / pc_range + normalized_gt_bboxes_xy = (gt_bboxes[:, :2] - pc_start) / pc_range + reg_cost = torch.cdist( + normalized_bboxes_xy, normalized_gt_bboxes_xy, p=1) + return reg_cost * self.weight + + +@TASK_UTILS.register_module() +class IoU3DCost(object): + + def __init__(self, weight): + self.weight = weight + + def __call__(self, iou): + iou_cost = -iou + return iou_cost * self.weight + + +@TASK_UTILS.register_module() +class HeuristicAssigner3D(BaseAssigner): + + def __init__(self, + dist_thre=100, + iou_calculator=dict(type='BboxOverlaps3D')): + self.dist_thre = dist_thre # distance in meter + self.iou_calculator = TASK_UTILS.build(iou_calculator) + + def assign(self, + bboxes, + gt_bboxes, + gt_bboxes_ignore=None, + gt_labels=None, + query_labels=None): + dist_thre = self.dist_thre + num_gts, num_bboxes = len(gt_bboxes), len(bboxes) + + bev_dist = torch.norm( + bboxes[:, 0:2][None, :, :] - gt_bboxes[:, 0:2][:, None, :], + dim=-1) # [num_gts, num_bboxes] + if query_labels is not None: + # only match the gt box and query with same category + not_same_class = (query_labels[None] != gt_labels[:, None]) + bev_dist += not_same_class * dist_thre + + # for each gt box, assign it to the nearest pred box + nearest_values, nearest_indices = bev_dist.min(1) # [num_gts] + assigned_gt_inds = torch.ones([ + num_bboxes, + ]).to(bboxes) * 0 + assigned_gt_vals = torch.ones([ + num_bboxes, + ]).to(bboxes) * 10000 + assigned_gt_labels = torch.ones([ + num_bboxes, + ]).to(bboxes) * -1 + for idx_gts in range(num_gts): + # for idx_pred in torch.where(bev_dist[idx_gts] < dist_thre)[0]: + # # each gt match to all the pred box within some radius + idx_pred = nearest_indices[ + idx_gts] # each gt only match to the nearest pred box + if bev_dist[idx_gts, idx_pred] <= dist_thre: + # if this pred box is assigned, then compare + if bev_dist[idx_gts, idx_pred] < assigned_gt_vals[idx_pred]: + assigned_gt_vals[idx_pred] = bev_dist[idx_gts, idx_pred] + # for AssignResult, 0 is negative, -1 is ignore, 1-based + # indices are positive + assigned_gt_inds[idx_pred] = idx_gts + 1 + assigned_gt_labels[idx_pred] = gt_labels[idx_gts] + + max_overlaps = torch.zeros([ + num_bboxes, + ]).to(bboxes) + matched_indices = torch.where(assigned_gt_inds > 0) + matched_iou = self.iou_calculator( + gt_bboxes[assigned_gt_inds[matched_indices].long() - 1], + bboxes[matched_indices]).diag() + max_overlaps[matched_indices] = matched_iou + + return AssignResult( + num_gts, + assigned_gt_inds.long(), + max_overlaps, + labels=assigned_gt_labels) + + +@TASK_UTILS.register_module() +class HungarianAssigner3D(BaseAssigner): + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxBEVL1Cost', weight=1.0), + iou_cost=dict(type='IoU3DCost', weight=1.0), + iou_calculator=dict(type='BboxOverlaps3D')): + self.cls_cost = TASK_UTILS.build(cls_cost) + self.reg_cost = TASK_UTILS.build(reg_cost) + self.iou_cost = TASK_UTILS.build(iou_cost) + self.iou_calculator = TASK_UTILS.build(iou_calculator) + + def assign(self, bboxes, gt_bboxes, gt_labels, cls_pred, train_cfg): + num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bboxes.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assigned_labels = bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + + # 2. compute the weighted costs + # see mmdetection/mmdet/core/bbox/match_costs/match_cost.py + cls_cost = self.cls_cost(cls_pred[0].T, gt_labels) + reg_cost = self.reg_cost(bboxes, gt_bboxes, train_cfg) + iou = self.iou_calculator(bboxes, gt_bboxes) + iou_cost = self.iou_cost(iou) + + # weighted sum of above three costs + cost = cls_cost + reg_cost + iou_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to(bboxes.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to(bboxes.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + + max_overlaps = torch.zeros_like(iou.max(1).values) + max_overlaps[matched_row_inds] = iou[matched_row_inds, + matched_col_inds] + # max_overlaps = iou.max(1).values + return AssignResult( + num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/projects/BEVFusion/configs/bevfusion_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py b/projects/BEVFusion/configs/bevfusion_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py new file mode 100755 index 0000000..8f12892 --- /dev/null +++ b/projects/BEVFusion/configs/bevfusion_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py @@ -0,0 +1,430 @@ +_base_ = ['mmdet3d::_base_/default_runtime.py'] +custom_imports = dict( + imports=['projects.BEVFusion.bevfusion'], allow_failed_imports=False) + +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.075, 0.075, 0.2] +point_cloud_range = [-54.0, -54.0, -5.0, 54.0, 54.0, 3.0] +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +metainfo = dict(classes=class_names) +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +data_prefix = dict( + pts='samples/LIDAR_TOP', + CAM_FRONT='samples/CAM_FRONT', + CAM_FRONT_LEFT='samples/CAM_FRONT_LEFT', + CAM_FRONT_RIGHT='samples/CAM_FRONT_RIGHT', + CAM_BACK='samples/CAM_BACK', + CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', + CAM_BACK_LEFT='samples/CAM_BACK_LEFT', + sweeps='sweeps/LIDAR_TOP') +input_modality = dict(use_lidar=True, use_camera=True) +backend_args = None + +model = dict( + type='BEVFusion', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=False, + pad_size_divisor=32, + voxelize_cfg=dict( + max_num_points=10, + point_cloud_range=[-54.0, -54.0, -5.0, 54.0, 54.0, 3.0], + voxel_size=[0.075, 0.075, 0.2], + max_voxels=[120000, 160000], + voxelize_reduce=True)), + img_backbone=dict( + type='mmdet.SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.2, + patch_norm=True, + out_indices=[1, 2, 3], + with_cp=False, + convert_weights=True, + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa: E501 + )), + img_neck=dict( + type='GeneralizedLSSFPN', + in_channels=[192, 384, 768], + out_channels=256, + start_level=0, + num_outs=3, + norm_cfg=dict(type='BN2d', requires_grad=True), + act_cfg=dict(type='ReLU', inplace=True), + upsample_cfg=dict(mode='bilinear', align_corners=False)), + vtransform=dict( + type='DepthLSSTransform', + in_channels=256, + out_channels=80, + image_size=[256, 704], + feature_size=[32, 88], + xbound=[-54.0, 54.0, 0.3], + ybound=[-54.0, 54.0, 0.3], + zbound=[-10.0, 10.0, 20.0], + dbound=[1.0, 60.0, 0.5], + downsample=2), + pts_voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + pts_middle_encoder=dict( + type='BEVFusionSparseEncoder', + in_channels=5, + sparse_shape=[1440, 1440, 41], + order=('conv', 'norm', 'act'), + norm_cfg=dict(type='SyncBN', eps=0.001, momentum=0.01), + encoder_channels=((16, 16, 32), (32, 32, 64), (64, 64, 128), (128, + 128)), + encoder_paddings=((0, 0, 1), (0, 0, 1), (0, 0, (1, 1, 0)), (0, 0)), + block_type='basicblock'), + fusion_layer=dict( + type='ConvFuser', in_channels=[80, 256], out_channels=256), + pts_backbone=dict( + type='SECOND', + in_channels=256, + out_channels=[128, 256], + layer_nums=[5, 5], + layer_strides=[1, 2], + norm_cfg=dict(type='SyncBN', eps=0.001, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)), + pts_neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + out_channels=[256, 256], + upsample_strides=[1, 2], + norm_cfg=dict(type='SyncBN', eps=0.001, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True), + bbox_head=dict( + type='TransFusionHead', + num_proposals=200, + auxiliary=True, + in_channels=512, + hidden_channel=128, + num_classes=10, + nms_kernel_size=3, + bn_momentum=0.1, + num_decoder_layers=1, + decoder_layer=dict( + type='TransformerDecoderLayer', + self_attn_cfg=dict(embed_dims=128, num_heads=8, dropout=0.1), + cross_attn_cfg=dict(embed_dims=128, num_heads=8, dropout=0.1), + ffn_cfg=dict( + embed_dims=128, + feedforward_channels=256, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type='ReLU', inplace=True), + ), + norm_cfg=dict(type='LN'), + pos_encoding_cfg=dict(input_channel=2, num_pos_feats=128)), + train_cfg=dict( + dataset='nuScenes', + point_cloud_range=[-54.0, -54.0, -5.0, 54.0, 54.0, 3.0], + grid_size=[1440, 1440, 41], + voxel_size=[0.075, 0.075, 0.2], + out_size_factor=8, + gaussian_overlap=0.1, + min_radius=2, + pos_weight=-1, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + assigner=dict( + type='HungarianAssigner3D', + iou_calculator=dict(type='BboxOverlaps3D', coordinate='lidar'), + cls_cost=dict( + type='mmdet.FocalLossCost', + gamma=2.0, + alpha=0.25, + weight=0.15), + reg_cost=dict(type='BBoxBEVL1Cost', weight=0.25), + iou_cost=dict(type='IoU3DCost', weight=0.25))), + test_cfg=dict( + dataset='nuScenes', + grid_size=[1440, 1440, 41], + out_size_factor=8, + voxel_size=[0.075, 0.075], + pc_range=[-54.0, -54.0], + nms_type=None), + common_heads=dict( + center=[2, 2], height=[1, 2], dim=[3, 2], rot=[2, 2], vel=[2, 2]), + bbox_coder=dict( + type='TransFusionBBoxCoder', + pc_range=[-54.0, -54.0], + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + score_threshold=0.0, + out_size_factor=8, + voxel_size=[0.075, 0.075], + code_size=10), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0), + loss_heatmap=dict( + type='mmdet.GaussianFocalLoss', reduction='mean', loss_weight=1.0), + loss_bbox=dict( + type='mmdet.L1Loss', reduction='mean', loss_weight=0.25))) + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'nuscenes_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)), + classes=class_names, + sample_groups=dict( + car=5, + truck=5, + bus=5, + trailer=5, + construction_vehicle=5, + traffic_cone=5, + barrier=5, + motorcycle=5, + bicycle=5, + pedestrian=5), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + reduce_beams=32, + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='BEVLoadMultiViewImageFromFiles', + to_float32=True, + color_type='color', + backend_args=backend_args), + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + reduce_beams=32, + load_augmented=None, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + load_dim=5, + use_dim=5, + reduce_beams=32, + pad_empty_sweeps=True, + remove_close=True, + load_augmented=None, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False), + # dict(type='ObjectSampling', db_sampler=db_sampler), + dict( + type='ImageAug3D', + final_dim=[256, 704], + resize_lim=[0.38, 0.55], + bot_pct_lim=[0.0, 0.0], + rot_lim=[-5.4, 5.4], + rand_flip=True, + is_train=True), + dict( + type='GlobalRotScaleTrans', + resize_lim=[0.9, 1.1], + rot_lim=[-0.78539816, 0.78539816], + trans_lim=0.5, + is_train=True), + dict(type='RandomFlip3D'), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='ObjectNameFilter', + classes=[ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', + 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + ]), + dict( + type='GridMask', + use_h=True, + use_w=True, + max_epoch=6, + rotate=1, + offset=False, + ratio=0.5, + mode=1, + prob=0.0, + fixed_prob=True), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'img', 'gt_bboxes_3d', 'gt_labels_3d', 'gt_bboxes', + 'gt_labels' + ]) +] + +test_pipeline = [ + dict( + type='BEVLoadMultiViewImageFromFiles', + to_float32=True, + color_type='color', + backend_args=backend_args), + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + backend_args=backend_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + load_dim=5, + use_dim=5, + pad_empty_sweeps=True, + remove_close=True, + backend_args=backend_args), + dict( + type='ImageAug3D', + final_dim=[256, 704], + resize_lim=[0.48, 0.48], + bot_pct_lim=[0.0, 0.0], + rot_lim=[0.0, 0.0], + rand_flip=False, + is_train=False), + dict( + type='PointsRangeFilter', + point_cloud_range=[-54.0, -54.0, -5.0, 54.0, 54.0, 3.0]), + dict( + type='Pack3DDetInputs', + keys=['img', 'points', 'gt_bboxes_3d', 'gt_labels_3d'], + meta_keys=[ + 'cam2img', 'ori_cam2img', 'lidar2cam', 'lidar2img', 'cam2lidar', + 'ori_lidar2img', 'img_aug_matrix', 'box_type_3d', 'sample_idx', + 'lidar_path', 'img_path' + ]) +] + +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_train.pkl', + pipeline=train_pipeline, + metainfo=metainfo, + modality=input_modality, + test_mode=False, + data_prefix=data_prefix, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=0, + # persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_val.pkl', + pipeline=test_pipeline, + metainfo=metainfo, + modality=input_modality, + data_prefix=data_prefix, + test_mode=True, + box_type_3d='LiDAR', + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='NuScenesMetric', + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.33333333, + by_epoch=False, + begin=0, + end=500), + dict( + type='CosineAnnealingLR', + begin=0, + T_max=6, + end=6, + by_epoch=True, + eta_min_ratio=1e-3), + # momentum scheduler + # During the first 8 epochs, momentum increases from 1 to 0.85 / 0.95 + # during the next 12 epochs, momentum increases from 0.85 / 0.95 to 1 + dict( + type='CosineAnnealingMomentum', + eta_min=0.85 / 0.95, + begin=0, + end=2.4, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + eta_min=1, + begin=2.4, + end=6, + by_epoch=True, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=6, val_interval=6) +val_cfg = dict() +test_cfg = dict() + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.01), + clip_grad=dict(max_norm=35, norm_type=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (4 GPUs) x (4 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) + +default_hooks = dict( + logger=dict(type='LoggerHook', interval=50), + checkpoint=dict(type='CheckpointHook', interval=5)) diff --git a/projects/BEVFusion/setup.py b/projects/BEVFusion/setup.py new file mode 100755 index 0000000..41aa96b --- /dev/null +++ b/projects/BEVFusion/setup.py @@ -0,0 +1,71 @@ +import os +from setuptools import setup + +import torch +from torch.utils.cpp_extension import (BuildExtension, CppExtension, + CUDAExtension) + + +def make_cuda_ext(name, + module, + sources, + sources_cuda=[], + extra_args=[], + extra_include_path=[]): + + define_macros = [] + extra_compile_args = {'cxx': [] + extra_args} + + if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1': + define_macros += [('WITH_CUDA', None)] + extension = CUDAExtension + extra_compile_args['nvcc'] = extra_args + [ + '-D__CUDA_NO_HALF_OPERATORS__', + '-D__CUDA_NO_HALF_CONVERSIONS__', + '-D__CUDA_NO_HALF2_OPERATORS__', + '-gencode=arch=compute_70,code=sm_70', + '-gencode=arch=compute_75,code=sm_75', + '-gencode=arch=compute_80,code=sm_80', + '-gencode=arch=compute_86,code=sm_86', + ] + sources += sources_cuda + else: + print('Compiling {} without CUDA'.format(name)) + extension = CppExtension + + return extension( + name='{}.{}'.format(module, name), + sources=[os.path.join(*module.split('.'), p) for p in sources], + include_dirs=extra_include_path, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + + +if __name__ == '__main__': + setup( + name='bev_pool', + ext_modules=[ + make_cuda_ext( + name='bev_pool_ext', + module='projects.BEVFusion.bevfusion.ops.bev_pool', + sources=[ + 'src/bev_pool.cpp', + 'src/bev_pool_cuda.cu', + ], + ), + make_cuda_ext( + name='voxel_layer', + module='projects.BEVFusion.bevfusion.ops.voxel', + sources=[ + 'src/voxelization.cpp', + 'src/scatter_points_cpu.cpp', + 'src/scatter_points_cuda.cu', + 'src/voxelization_cpu.cpp', + 'src/voxelization_cuda.cu', + ], + ), + ], + cmdclass={'build_ext': BuildExtension}, + zip_safe=False, + ) diff --git a/projects/CenterFormer/README.md b/projects/CenterFormer/README.md new file mode 100755 index 0000000..9d81f1b --- /dev/null +++ b/projects/CenterFormer/README.md @@ -0,0 +1,82 @@ +# CenterFormer: Center-based Transformer for 3D Object Detection + +> [CenterFormer: Center-based Transformer for 3D Object Detection](https://arxiv.org/abs/2209.05588) + + + +## Abstract + +Query-based transformer has shown great potential in con- +structing long-range attention in many image-domain tasks, but has +rarely been considered in LiDAR-based 3D object detection due to the +overwhelming size of the point cloud data. In this paper, we propose +CenterFormer, a center-based transformer network for 3D object de- +tection. CenterFormer first uses a center heatmap to select center candi- +dates on top of a standard voxel-based point cloud encoder. It then uses +the feature of the center candidate as the query embedding in the trans- +former. To further aggregate features from multiple frames, we design +an approach to fuse features through cross-attention. Lastly, regression +heads are added to predict the bounding box on the output center feature +representation. Our design reduces the convergence difficulty and compu- +tational complexity of the transformer structure. The results show signif- +icant improvements over the strong baseline of anchor-free object detec- +tion networks. CenterFormer achieves state-of-the-art performance for a +single model on the Waymo Open Dataset, with 73.7% mAPH on the val- +idation set and 75.6% mAPH on the test set, significantly outperforming +all previously published CNN and transformer-based methods. Our code +is publicly available at https://github.com/TuSimple/centerformer + +
    + +
    + +## Introduction + +We implement CenterFormer and provide the results and checkpoints on Waymo dataset. + +## Usage + + + +### Training commands + +In MMDetection3D's root directory, run the following command to train the model: + +```bash +python tools/train.py projects/CenterFormer/configs/centerformer_voxel01_second-atten_secfpn-atten_4xb4-cyclic-20e_waymoD5-3d-3class.py +``` + +For multi-gpu training, run: + +```bash +python -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=${NUM_GPUS} --master_port=29506 --master_addr="127.0.0.1" tools/train.py projects/CenterFormer/configs/centerformer_voxel01_second-atten_secfpn-atten_4xb4-cyclic-20e_waymoD5-3d-3class.py +``` + +### Testing commands + +In MMDetection3D's root directory, run the following command to test the model: + +```bash +python tools/train.py projects/CenterFormer/configs/centerformer_voxel01_second-atten_secfpn-atten_4xb4-cyclic-20e_waymoD5-3d-3class.py ${CHECKPOINT_PATH} +``` + +## Results and models + +### Waymo + +| Backbone | Load Interval | Voxel type (voxel size) | Multi-Class NMS | Multi-frames | Mem (GB) | Inf time (fps) | mAP@L1 | mAPH@L1 | mAP@L2 | **mAPH@L2** | Download | +| :-----------------------------------------------------------------------------------------------------------------: | :-----------: | :---------------------: | :-------------: | :----------: | :------: | :------------: | :----: | :-----: | :----: | :---------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [SECFPN_WithAttention](./configs/centerformer_voxel01_second-attn_secfpn-attn_4xb4-cyclic-20e_waymoD5-3d-3class.py) | 5 | voxel (0.1) | ✓ | × | 14.8 | | 72.2 | 69.5 | 65.9 | 63.3 | [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/centerformer/centerformer_voxel01_second-attn_secfpn-attn_4xb4-cyclic-20e_waymoD5-3d-3class/centerformer_voxel01_second-attn_secfpn-attn_4xb4-cyclic-20e_waymoD5-3d-3class_20221227_205613-70c9ad37.log) | + +**Note** that `SECFPN_WithAttention` denotes both SECOND and SECONDFPN with ChannelAttention and SpatialAttention. + +## Citation + +```latex +@InProceedings{Zhou_centerformer, +title = {CenterFormer: Center-based Transformer for 3D Object Detection}, +author = {Zhou, Zixiang and Zhao, Xiangchen and Wang, Yu and Wang, Panqu and Foroosh, Hassan}, +booktitle = {ECCV}, +year = {2022} +} +``` diff --git a/projects/CenterFormer/centerformer/__init__.py b/projects/CenterFormer/centerformer/__init__.py new file mode 100755 index 0000000..3bd38cd --- /dev/null +++ b/projects/CenterFormer/centerformer/__init__.py @@ -0,0 +1,11 @@ +from .bbox_ops import nms_iou3d +from .centerformer import CenterFormer +from .centerformer_backbone import (DeformableDecoderRPN, + MultiFrameDeformableDecoderRPN) +from .centerformer_head import CenterFormerBboxHead +from .losses import FastFocalLoss + +__all__ = [ + 'CenterFormer', 'DeformableDecoderRPN', 'CenterFormerBboxHead', + 'FastFocalLoss', 'nms_iou3d', 'MultiFrameDeformableDecoderRPN' +] diff --git a/projects/CenterFormer/centerformer/bbox_ops.py b/projects/CenterFormer/centerformer/bbox_ops.py new file mode 100755 index 0000000..dca5d76 --- /dev/null +++ b/projects/CenterFormer/centerformer/bbox_ops.py @@ -0,0 +1,41 @@ +import torch +from mmcv.utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', ['iou3d_nms3d_forward']) + + +def nms_iou3d(boxes, scores, thresh, pre_maxsize=None, post_max_size=None): + """NMS function GPU implementation (using IoU3D). The difference between + this implementation and nms3d in MMCV is that we add `pre_maxsize` and + `post_max_size` before and after NMS respectively. + + Args: + boxes (Tensor): Input boxes with the shape of [N, 7] + ([cx, cy, cz, l, w, h, theta]). + scores (Tensor): Scores of boxes with the shape of [N]. + thresh (float): Overlap threshold of NMS. + pre_max_size (int, optional): Max size of boxes before NMS. + Defaults to None. + post_max_size (int, optional): Max size of boxes after NMS. + Defaults to None. + + Returns: + Tensor: Indexes after NMS. + """ + # TODO: directly refactor ``nms3d`` in MMCV + assert boxes.size(1) == 7, 'Input boxes shape should be (N, 7)' + order = scores.sort(0, descending=True)[1] + if pre_maxsize is not None: + order = order[:pre_maxsize] + boxes = boxes[order].contiguous() + + keep = boxes.new_zeros(boxes.size(0), dtype=torch.long) + num_out = boxes.new_zeros(size=(), dtype=torch.long) + ext_module.iou3d_nms3d_forward( + boxes, keep, num_out, nms_overlap_thresh=thresh) + keep = order[keep[:num_out].to(boxes.device)].contiguous() + + if post_max_size is not None: + keep = keep[:post_max_size] + + return keep diff --git a/projects/CenterFormer/centerformer/centerformer.py b/projects/CenterFormer/centerformer/centerformer.py new file mode 100755 index 0000000..6b8b64d --- /dev/null +++ b/projects/CenterFormer/centerformer/centerformer.py @@ -0,0 +1,180 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional + +import torch +from torch import Tensor +from torch.nn.modules.batchnorm import _BatchNorm + +from mmdet3d.models.detectors import Base3DDetector +from mmdet3d.registry import MODELS +from mmdet3d.structures import Det3DDataSample + + +@MODELS.register_module() +class CenterFormer(Base3DDetector): + """Base class of center-based 3D detector. + + Args: + voxel_encoder (dict, optional): Point voxelization + encoder layer. Defaults to None. + middle_encoder (dict, optional): Middle encoder layer + of points cloud modality. Defaults to None. + pts_fusion_layer (dict, optional): Fusion layer. + Defaults to None. + backbone (dict, optional): Backbone of extracting + points features. Defaults to None. + neck (dict, optional): Neck of extracting + points features. Defaults to None. + bbox_head (dict, optional): Bboxes head of + point cloud modality. Defaults to None. + train_cfg (dict, optional): Train config of model. + Defaults to None. + test_cfg (dict, optional): Train config of model. + Defaults to None. + init_cfg (dict, optional): Initialize config of + model. Defaults to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`Det3DDataPreprocessor`. Defaults to None. + """ + + def __init__(self, + voxel_encoder: Optional[dict] = None, + middle_encoder: Optional[dict] = None, + backbone: Optional[dict] = None, + neck: Optional[dict] = None, + bbox_head: Optional[dict] = None, + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None, + data_preprocessor: Optional[dict] = None, + **kwargs): + super(CenterFormer, self).__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor, **kwargs) + + if voxel_encoder: + self.voxel_encoder = MODELS.build(voxel_encoder) + if middle_encoder: + self.middle_encoder = MODELS.build(middle_encoder) + if backbone: + backbone.update(train_cfg=train_cfg, test_cfg=test_cfg) + self.backbone = MODELS.build(backbone) + if neck is not None: + self.neck = MODELS.build(neck) + if bbox_head: + bbox_head.update(train_cfg=train_cfg, test_cfg=test_cfg) + self.bbox_head = MODELS.build(bbox_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def init_weights(self): + for m in self.modules(): + if isinstance(m, _BatchNorm): + torch.nn.init.uniform_(m.weight) + + @property + def with_bbox(self): + """bool: Whether the detector has a 3D box head.""" + return hasattr(self, 'bbox_head') and self.bbox_head is not None + + @property + def with_backbone(self): + """bool: Whether the detector has a 3D backbone.""" + return hasattr(self, 'backbone') and self.backbone is not None + + @property + def with_voxel_encoder(self): + """bool: Whether the detector has a voxel encoder.""" + return hasattr(self, + 'voxel_encoder') and self.voxel_encoder is not None + + @property + def with_middle_encoder(self): + """bool: Whether the detector has a middle encoder.""" + return hasattr(self, + 'middle_encoder') and self.middle_encoder is not None + + def _forward(self): + pass + + def extract_feat(self, batch_inputs_dict: dict, + batch_input_metas: List[dict]) -> tuple: + """Extract features from images and points. + Args: + batch_inputs_dict (dict): Dict of batch inputs. It + contains + - points (List[tensor]): Point cloud of multiple inputs. + - imgs (tensor): Image tensor with shape (B, C, H, W). + batch_input_metas (list[dict]): Meta information of multiple inputs + in a batch. + Returns: + tuple: Two elements in tuple arrange as + image features and point cloud features. + """ + voxel_dict = batch_inputs_dict.get('voxels', None) + voxel_features, feature_coors = self.voxel_encoder( + voxel_dict['voxels'], voxel_dict['coors']) + batch_size = voxel_dict['coors'][-1, 0].item() + 1 + x = self.middle_encoder(voxel_features, feature_coors, batch_size) + + return x + + def loss(self, batch_inputs_dict: Dict[List, torch.Tensor], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """ + Args: + batch_inputs_dict (dict): The model input dict which include + 'points' and `imgs` keys. + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor): Tensor of batch images, has shape + (B, C, H ,W) + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, . + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + + batch_input_metas = [item.metainfo for item in batch_data_samples] + pts_feats = self.extract_feat(batch_inputs_dict, batch_input_metas) + preds, batch_tatgets = self.backbone(pts_feats, batch_data_samples) + preds = self.bbox_head(preds) + losses = dict() + losses.update(self.bbox_head.loss(preds, batch_tatgets)) + return losses + # return self.bbox_head.predict(preds, batch_tatgets) + + def predict(self, batch_inputs_dict: Dict[str, Optional[Tensor]], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """Forward of testing. + Args: + batch_inputs_dict (dict): The model input dict which include + 'points' keys. + - points (list[torch.Tensor]): Point cloud of each sample. + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input sample. Each Det3DDataSample usually contain + 'pred_instances_3d'. And the ``pred_instances_3d`` usually + contains following keys. + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bbox_3d (:obj:`BaseInstance3DBoxes`): Prediction of bboxes, + contains a tensor with shape (num_instances, 7). + """ + batch_input_metas = [item.metainfo for item in batch_data_samples] + pts_feats = self.extract_feat(batch_inputs_dict, batch_input_metas) + preds, _ = self.backbone(pts_feats, batch_data_samples) + + preds = self.bbox_head(preds) + results_list_3d = self.bbox_head.predict(preds, batch_input_metas) + + detsamples = self.add_pred_to_datasample(batch_data_samples, + results_list_3d) + return detsamples diff --git a/projects/CenterFormer/centerformer/centerformer_backbone.py b/projects/CenterFormer/centerformer/centerformer_backbone.py new file mode 100755 index 0000000..1c62471 --- /dev/null +++ b/projects/CenterFormer/centerformer/centerformer_backbone.py @@ -0,0 +1,980 @@ +# modify from https://github.com/TuSimple/centerformer/blob/master/det3d/models/necks/rpn_transformer.py # noqa + +from typing import List, Tuple + +import numpy as np +import torch +from mmcv.cnn import build_norm_layer +from mmdet.models.utils import multi_apply +from mmengine.logging import print_log +from mmengine.structures import InstanceData +from torch import Tensor, nn + +from mmdet3d.models.utils import draw_heatmap_gaussian, gaussian_radius +from mmdet3d.registry import MODELS +from mmdet3d.structures import center_to_corner_box2d +from .transformer import DeformableTransformerDecoder + + +class ChannelAttention(nn.Module): + + def __init__(self, in_planes, ratio=16): + super(ChannelAttention, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.max_pool = nn.AdaptiveMaxPool2d(1) + + self.fc = nn.Sequential( + nn.Conv2d(in_planes, in_planes // 16, 1, bias=False), + nn.ReLU(), + nn.Conv2d(in_planes // 16, in_planes, 1, bias=False), + ) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + avg_out = self.fc(self.avg_pool(x)) + max_out = self.fc(self.max_pool(x)) + out = avg_out + max_out + return self.sigmoid(out) * x + + +class SpatialAttention(nn.Module): + + def __init__(self, kernel_size=7): + super(SpatialAttention, self).__init__() + + self.conv1 = nn.Conv2d( + 2, 1, kernel_size, padding=kernel_size // 2, bias=False) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + avg_out = torch.mean(x, dim=1, keepdim=True) + max_out, _ = torch.max(x, dim=1, keepdim=True) + y = torch.cat([avg_out, max_out], dim=1) + y = self.conv1(y) + return self.sigmoid(y) * x + + +class MultiFrameSpatialAttention(nn.Module): + + def __init__(self, kernel_size=7): + super(MultiFrameSpatialAttention, self).__init__() + + self.conv1 = nn.Conv2d( + 2, 1, kernel_size, padding=kernel_size // 2, bias=False) + self.sigmoid = nn.Sigmoid() + + def forward(self, curr, prev): + avg_out = torch.mean(curr, dim=1, keepdim=True) + max_out, _ = torch.max(curr, dim=1, keepdim=True) + y = torch.cat([avg_out, max_out], dim=1) + y = self.conv1(y) + return self.sigmoid(y) * prev + + +class BaseDecoderRPN(nn.Module): + + def __init__( + self, + layer_nums, # [2,2,2] + ds_num_filters, # [128,256,64] + num_input_features, # 256 + transformer_config=None, + hm_head_layer=2, + corner_head_layer=2, + corner=False, + assign_label_window_size=1, + classes=3, + use_gt_training=False, + norm_cfg=None, + logger=None, + init_bias=-2.19, + score_threshold=0.1, + obj_num=500, + **kwargs): + super(BaseDecoderRPN, self).__init__() + self._layer_strides = [1, 2, -4] + self._num_filters = ds_num_filters + self._layer_nums = layer_nums + self._num_input_features = num_input_features + self.score_threshold = score_threshold + self.transformer_config = transformer_config + self.corner = corner + self.obj_num = obj_num + self.use_gt_training = use_gt_training + self.window_size = assign_label_window_size**2 + self.cross_attention_kernel_size = [3, 3, 3] + self.batch_id = None + + if norm_cfg is None: + norm_cfg = dict(type='BN', eps=1e-3, momentum=0.01) + self._norm_cfg = norm_cfg + + assert len(self._layer_strides) == len(self._layer_nums) + assert len(self._num_filters) == len(self._layer_nums) + assert self.transformer_config is not None + + in_filters = [ + self._num_input_features, + self._num_filters[0], + self._num_filters[1], + ] + blocks = [] + + for i, layer_num in enumerate(self._layer_nums): + block, num_out_filters = self._make_layer( + in_filters[i], + self._num_filters[i], + layer_num, + stride=self._layer_strides[i], + ) + blocks.append(block) + self.blocks = nn.ModuleList(blocks) + self.up = nn.Sequential( + nn.ConvTranspose2d( + self._num_filters[0], + self._num_filters[2], + 2, + stride=2, + bias=False), + build_norm_layer(self._norm_cfg, self._num_filters[2])[1], + nn.ReLU()) + # heatmap prediction + hm_head = [] + for i in range(hm_head_layer - 1): + hm_head.append( + nn.Conv2d( + self._num_filters[-1] * 2, + 64, + kernel_size=3, + stride=1, + padding=1, + bias=True, + )) + hm_head.append(build_norm_layer(self._norm_cfg, 64)[1]) + hm_head.append(nn.ReLU()) + + hm_head.append( + nn.Conv2d( + 64, classes, kernel_size=3, stride=1, padding=1, bias=True)) + hm_head[-1].bias.data.fill_(init_bias) + self.hm_head = nn.Sequential(*hm_head) + + if self.corner: + self.corner_head = [] + for i in range(corner_head_layer - 1): + self.corner_head.append( + nn.Conv2d( + self._num_filters[-1] * 2, + 64, + kernel_size=3, + stride=1, + padding=1, + bias=True, + )) + self.corner_head.append( + build_norm_layer(self._norm_cfg, 64)[1]) + self.corner_head.append(nn.ReLU()) + + self.corner_head.append( + nn.Conv2d( + 64, 1, kernel_size=3, stride=1, padding=1, bias=True)) + self.corner_head[-1].bias.data.fill_(init_bias) + self.corner_head = nn.Sequential(*self.corner_head) + + def _make_layer(self, inplanes, planes, num_blocks, stride=1): + + if stride > 0: + block = [ + nn.ZeroPad2d(1), + nn.Conv2d(inplanes, planes, 3, stride=stride, bias=False), + build_norm_layer(self._norm_cfg, planes)[1], + nn.ReLU(), + ] + else: + block = [ + nn.ConvTranspose2d( + inplanes, planes, -stride, stride=-stride, bias=False), + build_norm_layer(self._norm_cfg, planes)[1], + nn.ReLU(), + ] + + for j in range(num_blocks): + block.append(nn.Conv2d(planes, planes, 3, padding=1, bias=False)) + block.append(build_norm_layer(self._norm_cfg, planes)[1], ) + block.append(nn.ReLU()) + + block.append(ChannelAttention(planes)) + block.append(SpatialAttention()) + block = nn.Sequential(*block) + + return block, planes + + def forward(self, x, example=None): + pass + + def get_multi_scale_feature(self, center_pos, feats): + """ + Args: + center_pos: center coor at the lowest scale feature map [B 500 2] + feats: multi scale BEV feature 3*[B C H W] + Returns: + neighbor_feat: [B 500 K C] + neighbor_pos: [B 500 K 2] + """ + kernel_size = self.cross_attention_kernel_size + batch, num_cls, H, W = feats[0].size() + + center_num = center_pos.shape[1] + + relative_pos_list = [] + neighbor_feat_list = [] + for i, k in enumerate(kernel_size): + neighbor_coords = torch.arange(-(k // 2), (k // 2) + 1) + neighbor_coords = torch.flatten( + torch.stack( + torch.meshgrid([neighbor_coords, neighbor_coords]), dim=0), + 1, + ) # [2, k] + neighbor_coords = (neighbor_coords.permute( + 1, + 0).contiguous().to(center_pos)) # relative coordinate [k, 2] + neighbor_coords = (center_pos[:, :, None, :] // (2**i) + + neighbor_coords[None, None, :, :] + ) # coordinates [B, 500, k, 2] + neighbor_coords = torch.clamp( + neighbor_coords, min=0, + max=H // (2**i) - 1) # prevent out of bound + feat_id = (neighbor_coords[:, :, :, 1] * (W // (2**i)) + + neighbor_coords[:, :, :, 0]) # pixel id [B, 500, k] + feat_id = feat_id.reshape(batch, -1) # pixel id [B, 500*k] + selected_feat = ( + feats[i].reshape(batch, num_cls, (H * W) // (4**i)).permute( + 0, 2, 1).contiguous()[self.batch_id.repeat(1, k**2), + feat_id]) # B, 500*k, C + neighbor_feat_list.append( + selected_feat.reshape(batch, center_num, -1, + num_cls)) # B, 500, k, C + relative_pos_list.append(neighbor_coords * (2**i)) # B, 500, k, 2 + + neighbor_pos = torch.cat(relative_pos_list, dim=2) # B, 500, K, 2/3 + neighbor_feats = torch.cat(neighbor_feat_list, dim=2) # B, 500, K, C + return neighbor_feats, neighbor_pos + + def get_multi_scale_feature_multiframe(self, center_pos, feats, timeframe): + """ + Args: + center_pos: center coor at the lowest scale feature map [B 500 2] + feats: multi scale BEV feature (3+k)*[B C H W] + timeframe: timeframe [B,k] + Returns: + neighbor_feat: [B 500 K C] + neighbor_pos: [B 500 K 2] + neighbor_time: [B 500 K 1] + """ + kernel_size = self.cross_attention_kernel_size + batch, num_cls, H, W = feats[0].size() + + center_num = center_pos.shape[1] + + relative_pos_list = [] + neighbor_feat_list = [] + timeframe_list = [] + for i, k in enumerate(kernel_size): + neighbor_coords = torch.arange(-(k // 2), (k // 2) + 1) + neighbor_coords = torch.flatten( + torch.stack( + torch.meshgrid([neighbor_coords, neighbor_coords]), dim=0), + 1, + ) # [2, k] + neighbor_coords = (neighbor_coords.permute( + 1, + 0).contiguous().to(center_pos)) # relative coordinate [k, 2] + neighbor_coords = (center_pos[:, :, None, :] // (2**i) + + neighbor_coords[None, None, :, :] + ) # coordinates [B, 500, k, 2] + neighbor_coords = torch.clamp( + neighbor_coords, min=0, + max=H // (2**i) - 1) # prevent out of bound + feat_id = (neighbor_coords[:, :, :, 1] * (W // (2**i)) + + neighbor_coords[:, :, :, 0]) # pixel id [B, 500, k] + feat_id = feat_id.reshape(batch, -1) # pixel id [B, 500*k] + selected_feat = ( + feats[i].reshape(batch, num_cls, (H * W) // (4**i)).permute( + 0, 2, 1).contiguous()[self.batch_id.repeat(1, k**2), + feat_id]) # B, 500*k, C + neighbor_feat_list.append( + selected_feat.reshape(batch, center_num, -1, + num_cls)) # B, 500, k, C + relative_pos_list.append(neighbor_coords * (2**i)) # B, 500, k, 2 + timeframe_list.append( + torch.full_like(neighbor_coords[:, :, :, 0:1], 0)) # B, 500, k + if i == 0: + # add previous frame feature + for frame_num in range(feats[-1].shape[1]): + selected_feat = (feats[-1][:, frame_num, :, :, :].reshape( + batch, num_cls, (H * W) // (4**i)).permute( + 0, 2, + 1).contiguous()[self.batch_id.repeat(1, k**2), + feat_id]) # B, 500*k, C + neighbor_feat_list.append( + selected_feat.reshape(batch, center_num, -1, num_cls)) + relative_pos_list.append(neighbor_coords * (2**i)) + time = timeframe[:, frame_num + 1].to(selected_feat) # B + timeframe_list.append( + time[:, None, None, None] * torch.full_like( + neighbor_coords[:, :, :, 0:1], 1)) # B, 500, k + + neighbor_pos = torch.cat(relative_pos_list, dim=2) # B, 500, K, 2/3 + neighbor_feats = torch.cat(neighbor_feat_list, dim=2) # B, 500, K, C + neighbor_time = torch.cat(timeframe_list, dim=2) # B, 500, K, 1 + + return neighbor_feats, neighbor_pos, neighbor_time + + +@MODELS.register_module() +class DeformableDecoderRPN(BaseDecoderRPN): + """The original implement of CenterFormer modules. + + It fuse the backbone, neck and heatmap head into one module. The backbone + is `SECOND` with attention and the neck is `SECONDFPN` with attention. + + TODO: split this module into backbone、neck and head. + """ + + def __init__(self, + layer_nums, + ds_num_filters, + num_input_features, + tasks=dict(), + transformer_config=None, + hm_head_layer=2, + corner_head_layer=2, + corner=False, + parametric_embedding=False, + assign_label_window_size=1, + classes=3, + use_gt_training=False, + norm_cfg=None, + logger=None, + init_bias=-2.19, + score_threshold=0.1, + obj_num=500, + train_cfg=None, + test_cfg=None, + **kwargs): + super(DeformableDecoderRPN, self).__init__( + layer_nums, + ds_num_filters, + num_input_features, + transformer_config, + hm_head_layer, + corner_head_layer, + corner, + assign_label_window_size, + classes, + use_gt_training, + norm_cfg, + logger, + init_bias, + score_threshold, + obj_num, + ) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.tasks = tasks + self.class_names = [t['class_names'] for t in tasks] + + self.transformer_decoder = DeformableTransformerDecoder( + self._num_filters[-1] * 2, + depth=transformer_config.depth, + n_heads=transformer_config.n_heads, + dim_single_head=transformer_config.dim_single_head, + dim_ffn=transformer_config.dim_ffn, + dropout=transformer_config.dropout, + out_attention=transformer_config.out_attn, + n_points=transformer_config.get('n_points', 9), + ) + self.pos_embedding_type = transformer_config.get( + 'pos_embedding_type', 'linear') + if self.pos_embedding_type == 'linear': + self.pos_embedding = nn.Linear(2, self._num_filters[-1] * 2) + else: + raise NotImplementedError() + self.parametric_embedding = parametric_embedding + if self.parametric_embedding: + self.query_embed = nn.Embedding(self.obj_num, + self._num_filters[-1] * 2) + nn.init.uniform_(self.query_embed.weight, -1.0, 1.0) + + print_log('Finish RPN_transformer_deformable Initialization', + 'current') + + def _sigmoid(self, x): + y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4) + return y + + def forward(self, x, batch_data_samples): + + batch_gt_instance_3d = [] + for data_sample in batch_data_samples: + batch_gt_instance_3d.append(data_sample.gt_instances_3d) + + # FPN + x = self.blocks[0](x) + x_down = self.blocks[1](x) + x_up = torch.cat([self.blocks[2](x_down), self.up(x)], dim=1) + + # heatmap head + hm = self.hm_head(x_up) + + if self.corner and self.corner_head.training: + corner_hm = self.corner_head(x_up) + corner_hm = self._sigmoid(corner_hm) + + # find top K center location + hm = self._sigmoid(hm) + batch, num_cls, H, W = hm.size() + + scores, labels = torch.max( + hm.reshape(batch, num_cls, H * W), dim=1) # b,H*W + self.batch_id = torch.from_numpy(np.indices( + (batch, self.obj_num))[0]).to(labels) + + if self.training: + heatmaps, anno_boxes, gt_inds, gt_masks, corner_heatmaps, cat_labels = self.get_targets( # noqa: E501 + batch_gt_instance_3d) + batch_targets = dict( + ind=gt_inds, + mask=gt_masks, + hm=heatmaps, + anno_box=anno_boxes, + corners=corner_heatmaps, + cat=cat_labels) + inds = gt_inds[0][:, (self.window_size // 2)::self.window_size] + masks = gt_masks[0][:, (self.window_size // 2)::self.window_size] + batch_id_gt = torch.from_numpy( + np.indices((batch, inds.shape[1]))[0]).to(labels) + scores[batch_id_gt, inds] = scores[batch_id_gt, inds] + masks + order = scores.sort(1, descending=True)[1] + order = order[:, :self.obj_num] + scores[batch_id_gt, inds] = scores[batch_id_gt, inds] - masks + else: + order = scores.sort(1, descending=True)[1] + order = order[:, :self.obj_num] + batch_targets = None + + scores = torch.gather(scores, 1, order) + labels = torch.gather(labels, 1, order) + mask = scores > self.score_threshold + + ct_feat = x_up.reshape(batch, -1, H * W).transpose(2, 1).contiguous() + ct_feat = ct_feat[self.batch_id, order] # B, 500, C + + # create position embedding for each center + y_coor = order // W + x_coor = order - y_coor * W + y_coor, x_coor = y_coor.to(ct_feat), x_coor.to(ct_feat) + y_coor, x_coor = y_coor / H, x_coor / W + pos_features = torch.stack([x_coor, y_coor], dim=2) + + if self.parametric_embedding: + ct_feat = self.query_embed.weight + ct_feat = ct_feat.unsqueeze(0).expand(batch, -1, -1) + + # run transformer + src = torch.cat( + ( + x_up.reshape(batch, -1, H * W).transpose(2, 1).contiguous(), + x.reshape(batch, -1, + (H * W) // 4).transpose(2, 1).contiguous(), + x_down.reshape(batch, -1, + (H * W) // 16).transpose(2, 1).contiguous(), + ), + dim=1, + ) # B ,sum(H*W), C + spatial_shapes = torch.as_tensor( + [(H, W), (H // 2, W // 2), (H // 4, W // 4)], + dtype=torch.long, + device=ct_feat.device, + ) + level_start_index = torch.cat(( + spatial_shapes.new_zeros((1, )), + spatial_shapes.prod(1).cumsum(0)[:-1], + )) + + transformer_out = self.transformer_decoder( + ct_feat, + self.pos_embedding, + src, + spatial_shapes, + level_start_index, + center_pos=pos_features, + ) # (B,N,C) + + ct_feat = (transformer_out['ct_feat'].transpose(2, 1).contiguous() + ) # B, C, 500 + + out_dict = { + 'hm': hm, + 'scores': scores, + 'labels': labels, + 'order': order, + 'ct_feat': ct_feat, + 'mask': mask, + } + if 'out_attention' in transformer_out: + out_dict.update( + {'out_attention': transformer_out['out_attention']}) + if self.corner and self.corner_head.training: + out_dict.update({'corner_hm': corner_hm}) + + return out_dict, batch_targets + + def get_targets( + self, + batch_gt_instances_3d: List[InstanceData], + ) -> Tuple[List[Tensor]]: + """Generate targets. How each output is transformed: Each nested list + is transposed so that all same-index elements in each sub-list (1, ..., + N) become the new sub-lists. + + [ [a0, a1, a2, ... ], [b0, b1, b2, ... ], ... ] + ==> [ [a0, b0, ... ], [a1, b1, ... ], [a2, b2, ... ] ] + The new transposed nested list is converted into a list of N + tensors generated by concatenating tensors in the new sub-lists. + [ tensor0, tensor1, tensor2, ... ] + Args: + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instances. It usually includes ``bboxes_3d`` and + ``labels_3d`` attributes. + Returns: + Returns: + tuple[list[torch.Tensor]]: Tuple of target including + the following results in order. + - list[torch.Tensor]: Heatmap scores. + - list[torch.Tensor]: Ground truth boxes. + - list[torch.Tensor]: Indexes indicating the + position of the valid boxes. + - list[torch.Tensor]: Masks indicating which + boxes are valid. + - list[torch.Tensor]: catagrate labels. + """ + heatmaps, anno_boxes, inds, masks, corner_heatmaps, cat_labels = multi_apply( # noqa: E501 + self.get_targets_single, batch_gt_instances_3d) + # Transpose heatmaps + heatmaps = list(map(list, zip(*heatmaps))) + heatmaps = [torch.stack(hms_) for hms_ in heatmaps] + # Transpose heatmaps + corner_heatmaps = list(map(list, zip(*corner_heatmaps))) + corner_heatmaps = [torch.stack(hms_) for hms_ in corner_heatmaps] + # Transpose anno_boxes + anno_boxes = list(map(list, zip(*anno_boxes))) + anno_boxes = [torch.stack(anno_boxes_) for anno_boxes_ in anno_boxes] + # Transpose inds + inds = list(map(list, zip(*inds))) + inds = [torch.stack(inds_) for inds_ in inds] + # Transpose inds + masks = list(map(list, zip(*masks))) + masks = [torch.stack(masks_) for masks_ in masks] + # Transpose cat_labels + cat_labels = list(map(list, zip(*cat_labels))) + cat_labels = [torch.stack(labels_) for labels_ in cat_labels] + return heatmaps, anno_boxes, inds, masks, corner_heatmaps, cat_labels + + def get_targets_single(self, + gt_instances_3d: InstanceData) -> Tuple[Tensor]: + """Generate training targets for a single sample. + Args: + gt_instances_3d (:obj:`InstanceData`): Gt_instances of + single data sample. It usually includes + ``bboxes_3d`` and ``labels_3d`` attributes. + Returns: + tuple[list[torch.Tensor]]: Tuple of target including + the following results in order. + - list[torch.Tensor]: Heatmap scores. + - list[torch.Tensor]: Ground truth boxes. + - list[torch.Tensor]: Indexes indicating the position + of the valid boxes. + - list[torch.Tensor]: Masks indicating which boxes + are valid. + - list[torch.Tensor]: catagrate labels. + """ + gt_labels_3d = gt_instances_3d.labels_3d + gt_bboxes_3d = gt_instances_3d.bboxes_3d + device = gt_labels_3d.device + gt_bboxes_3d = torch.cat( + (gt_bboxes_3d.gravity_center, gt_bboxes_3d.tensor[:, 3:]), + dim=1).to(device) + max_objs = self.train_cfg['max_objs'] * self.train_cfg['dense_reg'] + grid_size = torch.tensor(self.train_cfg['grid_size']) + pc_range = torch.tensor(self.train_cfg['point_cloud_range']) + voxel_size = torch.tensor(self.train_cfg['voxel_size']) + + feature_map_size = grid_size[:2] // self.train_cfg['out_size_factor'] + + # reorganize the gt_dict by tasks + task_masks = [] + flag = 0 + for class_name in self.class_names: + task_masks.append([ + torch.where(gt_labels_3d == class_name.index(i) + flag) + for i in class_name + ]) + flag += len(class_name) + + task_boxes = [] + task_classes = [] + flag2 = 0 + for idx, mask in enumerate(task_masks): + task_box = [] + task_class = [] + for m in mask: + task_box.append(gt_bboxes_3d[m]) + # 0 is background for each task, so we need to add 1 here. + task_class.append(gt_labels_3d[m] + 1 - flag2) + task_boxes.append(torch.cat(task_box, axis=0).to(device)) + task_classes.append(torch.cat(task_class).long().to(device)) + flag2 += len(mask) + draw_gaussian = draw_heatmap_gaussian + heatmaps, anno_boxes, inds, masks, corner_heatmaps, cat_labels = [], [], [], [], [], [] # noqa: E501 + + for idx in range(len(self.tasks)): + heatmap = gt_bboxes_3d.new_zeros( + (len(self.class_names[idx]), feature_map_size[1], + feature_map_size[0])) + corner_heatmap = torch.zeros( + (1, feature_map_size[1], feature_map_size[0]), + dtype=torch.float32, + device=device) + + anno_box = gt_bboxes_3d.new_zeros((max_objs, 8), + dtype=torch.float32) + + ind = gt_labels_3d.new_zeros((max_objs), dtype=torch.int64) + mask = gt_bboxes_3d.new_zeros((max_objs), dtype=torch.uint8) + cat_label = gt_bboxes_3d.new_zeros((max_objs), dtype=torch.int64) + + num_objs = min(task_boxes[idx].shape[0], max_objs) + + for k in range(num_objs): + cls_id = task_classes[idx][k] - 1 + + # gt boxes [xyzlwhr] + length = task_boxes[idx][k][3] + width = task_boxes[idx][k][4] + length = length / voxel_size[0] / self.train_cfg[ + 'out_size_factor'] + width = width / voxel_size[1] / self.train_cfg[ + 'out_size_factor'] + + if width > 0 and length > 0: + radius = gaussian_radius( + (width, length), + min_overlap=self.train_cfg['gaussian_overlap']) + radius = max(self.train_cfg['min_radius'], int(radius)) + + # be really careful for the coordinate system of + # your box annotation. + x, y, z = task_boxes[idx][k][0], task_boxes[idx][k][ + 1], task_boxes[idx][k][2] + + coor_x = ( + x - pc_range[0] + ) / voxel_size[0] / self.train_cfg['out_size_factor'] + coor_y = ( + y - pc_range[1] + ) / voxel_size[1] / self.train_cfg['out_size_factor'] + + center = torch.tensor([coor_x, coor_y], + dtype=torch.float32, + device=device) + center_int = center.to(torch.int32) + + # throw out not in range objects to avoid out of array + # area when creating the heatmap + if not (0 <= center_int[0] < feature_map_size[0] + and 0 <= center_int[1] < feature_map_size[1]): + continue + + draw_gaussian(heatmap[cls_id], center_int, radius) + + radius = radius // 2 + # # draw four corner and center TODO: use torch + rot = task_boxes[idx][k][6] + corner_keypoints = center_to_corner_box2d( + center.unsqueeze(0).cpu().numpy(), + torch.tensor([[length, width]], + dtype=torch.float32).numpy(), + angles=rot, + origin=0.5) + corner_keypoints = torch.from_numpy(corner_keypoints).to( + center) + + draw_gaussian(corner_heatmap[0], center_int, radius) + draw_gaussian( + corner_heatmap[0], + (corner_keypoints[0, 0] + corner_keypoints[0, 1]) / 2, + radius) + draw_gaussian( + corner_heatmap[0], + (corner_keypoints[0, 2] + corner_keypoints[0, 3]) / 2, + radius) + draw_gaussian( + corner_heatmap[0], + (corner_keypoints[0, 0] + corner_keypoints[0, 3]) / 2, + radius) + draw_gaussian( + corner_heatmap[0], + (corner_keypoints[0, 1] + corner_keypoints[0, 2]) / 2, + radius) + + new_idx = k + x, y = center_int[0], center_int[1] + + assert (y * feature_map_size[0] + x < + feature_map_size[0] * feature_map_size[1]) + + ind[new_idx] = y * feature_map_size[0] + x + mask[new_idx] = 1 + cat_label[new_idx] = cls_id + # TODO: support other outdoor dataset + # vx, vy = task_boxes[idx][k][7:] + rot = task_boxes[idx][k][6] + box_dim = task_boxes[idx][k][3:6] + box_dim = box_dim.log() + anno_box[new_idx] = torch.cat([ + center - torch.tensor([x, y], device=device), + z.unsqueeze(0), box_dim, + torch.sin(rot).unsqueeze(0), + torch.cos(rot).unsqueeze(0) + ]) + + heatmaps.append(heatmap) + corner_heatmaps.append(corner_heatmap) + anno_boxes.append(anno_box) + masks.append(mask) + inds.append(ind) + cat_labels.append(cat_label) + return heatmaps, anno_boxes, inds, masks, corner_heatmaps, cat_labels + + +@MODELS.register_module() +class MultiFrameDeformableDecoderRPN(BaseDecoderRPN): + """The original implementation of CenterFormer modules. + + The difference between this module and + `DeformableDecoderRPN` is that this module uses information from multi + frames. + + TODO: split this module into backbone、neck and head. + """ + + def __init__( + self, + layer_nums, # [2,2,2] + ds_num_filters, # [128,256,64] + num_input_features, # 256 + transformer_config=None, + hm_head_layer=2, + corner_head_layer=2, + corner=False, + parametric_embedding=False, + assign_label_window_size=1, + classes=3, + use_gt_training=False, + norm_cfg=None, + logger=None, + init_bias=-2.19, + score_threshold=0.1, + obj_num=500, + frame=1, + **kwargs): + super(MultiFrameDeformableDecoderRPN, self).__init__( + layer_nums, + ds_num_filters, + num_input_features, + transformer_config, + hm_head_layer, + corner_head_layer, + corner, + assign_label_window_size, + classes, + use_gt_training, + norm_cfg, + logger, + init_bias, + score_threshold, + obj_num, + ) + self.frame = frame + + self.out = nn.Sequential( + nn.Conv2d( + self._num_filters[0] * frame, + self._num_filters[0], + 3, + padding=1, + bias=False, + ), + build_norm_layer(self._norm_cfg, self._num_filters[0])[1], + nn.ReLU(), + ) + self.mtf_attention = MultiFrameSpatialAttention() + self.time_embedding = nn.Linear(1, self._num_filters[0]) + + self.transformer_decoder = DeformableTransformerDecoder( + self._num_filters[-1] * 2, + depth=transformer_config.depth, + n_heads=transformer_config.n_heads, + n_levels=2 + self.frame, + dim_single_head=transformer_config.dim_single_head, + dim_ffn=transformer_config.dim_ffn, + dropout=transformer_config.dropout, + out_attention=transformer_config.out_attn, + n_points=transformer_config.get('n_points', 9), + ) + self.pos_embedding_type = transformer_config.get( + 'pos_embedding_type', 'linear') + if self.pos_embedding_type == 'linear': + self.pos_embedding = nn.Linear(2, self._num_filters[-1] * 2) + else: + raise NotImplementedError() + self.parametric_embedding = parametric_embedding + if self.parametric_embedding: + self.query_embed = nn.Embedding(self.obj_num, + self._num_filters[-1] * 2) + nn.init.uniform_(self.query_embed.weight, -1.0, 1.0) + + print_log('Finish RPN_transformer_deformable Initialization', + 'current') + + def forward(self, x, example=None): + + # FPN + x = self.blocks[0](x) + x_down = self.blocks[1](x) + x_up = torch.cat([self.blocks[2](x_down), self.up(x)], dim=1) + + # take out the BEV feature on current frame + x = torch.split(x, self.frame) + x_up = torch.split(x_up, self.frame) + x_down = torch.split(x_down, self.frame) + x_prev = torch.stack([t[1:] for t in x_up], dim=0) # B,K,C,H,W + x = torch.stack([t[0] for t in x], dim=0) + x_down = torch.stack([t[0] for t in x_down], dim=0) + + x_up = torch.stack([t[0] for t in x_up], dim=0) # B,C,H,W + # use spatial attention in current frame on previous feature + x_prev_cat = self.mtf_attention( + x_up, + x_prev.reshape(x_up.shape[0], -1, x_up.shape[2], + x_up.shape[3])) # B,K*C,H,W + # time embedding + x_up_fuse = torch.cat((x_up, x_prev_cat), dim=1) + self.time_embedding( + example['times'][:, :, None].to(x_up)).reshape( + x_up.shape[0], -1, 1, 1) + # fuse mtf feature + x_up_fuse = self.out(x_up_fuse) + + # heatmap head + hm = self.hm_head(x_up_fuse) + + if self.corner and self.corner_head.training: + corner_hm = self.corner_head(x_up_fuse) + corner_hm = torch.sigmoid(corner_hm) + + # find top K center location + hm = torch.sigmoid(hm) + batch, num_cls, H, W = hm.size() + + scores, labels = torch.max( + hm.reshape(batch, num_cls, H * W), dim=1) # b,H*W + self.batch_id = torch.from_numpy(np.indices( + (batch, self.obj_num))[0]).to(labels) + + if self.use_gt_training and self.hm_head.training: + gt_inds = example['ind'][0][:, (self.window_size // + 2)::self.window_size] + gt_masks = example['mask'][0][:, (self.window_size // + 2)::self.window_size] + batch_id_gt = torch.from_numpy( + np.indices((batch, gt_inds.shape[1]))[0]).to(labels) + scores[batch_id_gt, + gt_inds] = scores[batch_id_gt, gt_inds] + gt_masks + order = scores.sort(1, descending=True)[1] + order = order[:, :self.obj_num] + scores[batch_id_gt, + gt_inds] = scores[batch_id_gt, gt_inds] - gt_masks + else: + order = scores.sort(1, descending=True)[1] + order = order[:, :self.obj_num] + + scores = torch.gather(scores, 1, order) + labels = torch.gather(labels, 1, order) + mask = scores > self.score_threshold + + ct_feat = (x_up.reshape(batch, -1, + H * W).transpose(2, + 1).contiguous()[self.batch_id, + order] + ) # B, 500, C + + # create position embedding for each center + y_coor = order // W + x_coor = order - y_coor * W + y_coor, x_coor = y_coor.to(ct_feat), x_coor.to(ct_feat) + y_coor, x_coor = y_coor / H, x_coor / W + pos_features = torch.stack([x_coor, y_coor], dim=2) + + if self.parametric_embedding: + ct_feat = self.query_embed.weight + ct_feat = ct_feat.unsqueeze(0).expand(batch, -1, -1) + + # run transformer + src_list = [ + x_up.reshape(batch, -1, H * W).transpose(2, 1).contiguous(), + x.reshape(batch, -1, (H * W) // 4).transpose(2, 1).contiguous(), + x_down.reshape(batch, -1, (H * W) // 16).transpose(2, + 1).contiguous(), + ] + for frame in range(x_prev.shape[1]): + src_list.append(x_prev[:, frame].reshape(batch, + -1, (H * W)).transpose( + 2, 1).contiguous()) + src = torch.cat(src_list, dim=1) # B ,sum(H*W), C + spatial_list = [(H, W), (H // 2, W // 2), (H // 4, W // 4)] + spatial_list += [(H, W) for frame in range(x_prev.shape[1])] + spatial_shapes = torch.as_tensor( + spatial_list, dtype=torch.long, device=ct_feat.device) + level_start_index = torch.cat(( + spatial_shapes.new_zeros((1, )), + spatial_shapes.prod(1).cumsum(0)[:-1], + )) + + transformer_out = self.transformer_decoder( + ct_feat, + self.pos_embedding, + src, + spatial_shapes, + level_start_index, + center_pos=pos_features, + ) # (B,N,C) + + ct_feat = (transformer_out['ct_feat'].transpose(2, 1).contiguous() + ) # B, C, 500 + + out_dict = { + 'hm': hm, + 'scores': scores, + 'labels': labels, + 'order': order, + 'ct_feat': ct_feat, + 'mask': mask, + } + if 'out_attention' in transformer_out: + out_dict.update( + {'out_attention': transformer_out['out_attention']}) + if self.corner and self.corner_head.training: + out_dict.update({'corner_hm': corner_hm}) + + return out_dict diff --git a/projects/CenterFormer/centerformer/centerformer_head.py b/projects/CenterFormer/centerformer/centerformer_head.py new file mode 100755 index 0000000..f1e5cbe --- /dev/null +++ b/projects/CenterFormer/centerformer/centerformer_head.py @@ -0,0 +1,582 @@ +# ------------------------------------------------------------------------------ +# Portions of this code are from +# det3d (https://github.com/poodarchu/Det3D/tree/56402d4761a5b73acd23080f537599b0888cce07) # noqa +# Copyright (c) 2019 朱本金 +# Licensed under the MIT License +# ------------------------------------------------------------------------------ + +import copy +import logging + +import numpy as np +import torch +from mmcv.cnn import build_norm_layer +from mmcv.ops import boxes_iou3d +from mmengine.logging import print_log +from mmengine.model import kaiming_init +from mmengine.structures import InstanceData +from torch import nn + +from mmdet3d.models.layers import circle_nms, nms_bev +from mmdet3d.registry import MODELS +from .bbox_ops import nms_iou3d +from .losses import FastFocalLoss + + +class SepHead(nn.Module): + """TODO: This module is the original implementation in CenterFormer and it + has few differences with ``SeperateHead`` in `mmdet3d` but refactor this + module will lower the performance a little. + """ + + def __init__( + self, + in_channels, + heads, + head_conv=64, + final_kernel=1, + bn=False, + init_bias=-2.19, + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + **kwargs, + ): + super(SepHead, self).__init__(**kwargs) + + self.heads = heads + for head in self.heads: + classes, num_conv = self.heads[head] + + fc = [] + for i in range(num_conv - 1): + fc.append( + nn.Conv1d( + in_channels, + head_conv, + kernel_size=final_kernel, + stride=1, + padding=final_kernel // 2, + bias=True, + )) + if bn: + fc.append(build_norm_layer(norm_cfg, head_conv)[1]) + fc.append(nn.ReLU()) + + fc.append( + nn.Conv1d( + head_conv, + classes, + kernel_size=final_kernel, + stride=1, + padding=final_kernel // 2, + bias=True, + )) + + if 'hm' in head: + fc[-1].bias.data.fill_(init_bias) + else: + for m in fc: + if isinstance(m, nn.Conv1d): + kaiming_init(m) + + fc = nn.Sequential(*fc) + self.__setattr__(head, fc) + + def forward(self, x, y): + for head in self.heads: + x[head] = self.__getattr__(head)(y) + + return x + + +@MODELS.register_module() +class CenterFormerBboxHead(nn.Module): + + def __init__(self, + in_channels, + tasks, + weight=0.25, + iou_weight=1, + corner_weight=1, + code_weights=[], + common_heads=dict(), + logger=None, + init_bias=-2.19, + share_conv_channel=64, + assign_label_window_size=1, + iou_loss=False, + corner_loss=False, + iou_factor=[1, 1, 4], + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + bbox_code_size=7, + test_cfg=None, + **kawrgs): + super(CenterFormerBboxHead, self).__init__() + + num_classes = [len(t['class_names']) for t in tasks] + self.class_names = [t['class_names'] for t in tasks] + self.code_weights = code_weights + self.bbox_code_size = 7 + self.weight = weight # weight between hm loss and loc loss + self.iou_weight = iou_weight + self.corner_weight = corner_weight + self.iou_factor = iou_factor + + self.in_channels = in_channels + self.num_classes = num_classes + self.test_cfg = test_cfg + + self.crit = FastFocalLoss(assign_label_window_size) + self.crit_reg = torch.nn.L1Loss(reduction='none') + self.use_iou_loss = iou_loss + if self.use_iou_loss: + self.crit_iou = torch.nn.SmoothL1Loss(reduction='none') + self.corner_loss = corner_loss + if self.corner_loss: + self.corner_crit = torch.nn.MSELoss(reduction='none') + + self.box_n_dim = 9 if 'vel' in common_heads else 7 + self.use_direction_classifier = False + + if not logger: + logger = logging.getLogger('CenterFormerBboxHead') + self.logger = logger + + logger.info(f'num_classes: {num_classes}') + + # a shared convolution + self.shared_conv = nn.Sequential( + nn.Conv1d( + in_channels, share_conv_channel, kernel_size=1, bias=True), + build_norm_layer(norm_cfg, share_conv_channel)[1], + nn.ReLU(inplace=True), + ) + + self.tasks = nn.ModuleList() + print_log(f'Use HM Bias: {init_bias}', 'current') + + for num_cls in num_classes: + heads = copy.deepcopy(common_heads) + self.tasks.append( + SepHead( + share_conv_channel, + heads, + bn=True, + init_bias=init_bias, + final_kernel=1, + norm_cfg=norm_cfg)) + + logger.info('Finish CenterHeadIoU Initialization') + + def forward(self, x, *kwargs): + ret_dicts = [] + + y = self.shared_conv(x['ct_feat'].float()) + + for task in self.tasks: + ret_dicts.append(task(x, y)) + + return ret_dicts + + def _sigmoid(self, x): + y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4) + return y + + def loss(self, preds_dicts, example, **kwargs): + losses = {} + for task_id, preds_dict in enumerate(preds_dicts): + # heatmap focal loss + hm_loss = self.crit( + preds_dict['hm'], + example['hm'][task_id], + example['ind'][task_id], + example['mask'][task_id], + example['cat'][task_id], + ) + + target_box = example['anno_box'][task_id] + + if self.corner_loss: + corner_loss = self.corner_crit(preds_dict['corner_hm'], + example['corners'][task_id]) + corner_mask = (example['corners'][task_id] > 0).to(corner_loss) + corner_loss = (corner_loss * corner_mask).sum() / ( + corner_mask.sum() + 1e-4) + losses.update({ + f'{task_id}_corner_loss': + corner_loss * self.corner_weight + }) + + # reconstruct the anno_box from multiple reg heads + if 'vel' in preds_dict: + preds_dict['anno_box'] = torch.cat( + ( + preds_dict['reg'], + preds_dict['height'], + preds_dict['dim'], + preds_dict['vel'], + preds_dict['rot'], + ), + dim=1, + ) + else: + preds_dict['anno_box'] = torch.cat( + ( + preds_dict['reg'], + preds_dict['height'], + preds_dict['dim'], + preds_dict['rot'], + ), + dim=1, + ) + target_box = target_box[..., [0, 1, 2, 3, 4, 5, -2, + -1]] # remove vel target + + # Regression loss for dimension, offset, height, rotation + # get corresponding gt box # B, 500 + target_box, selected_mask, selected_cls = get_corresponding_box( + preds_dict['order'], + example['ind'][task_id], + example['mask'][task_id], + example['cat'][task_id], + target_box, + ) + mask = selected_mask.float().unsqueeze(2) + + weights = self.code_weights + + box_loss = self.crit_reg( + preds_dict['anno_box'].transpose(1, 2) * mask, + target_box * mask) + box_loss = box_loss / (mask.sum() + 1e-4) + box_loss = box_loss.transpose(2, 0).sum(dim=2).sum(dim=1) + + loc_loss = (box_loss * box_loss.new_tensor(weights)).sum() + + if self.use_iou_loss: + with torch.no_grad(): + preds_box = get_box( + preds_dict['anno_box'], + preds_dict['order'], + self.test_cfg, + preds_dict['hm'].shape[2], + preds_dict['hm'].shape[3], + ) + cur_gt = get_box_gt( + target_box, + preds_dict['order'], + self.test_cfg, + preds_dict['hm'].shape[2], + preds_dict['hm'].shape[3], + ) + + iou_targets = boxes_iou3d( + preds_box.reshape(-1, 7), cur_gt.reshape( + -1, 7))[range(preds_box.reshape(-1, 7).shape[0]), + range(cur_gt.reshape(-1, 7).shape[0])] + iou_targets[torch.isnan(iou_targets)] = 0 + iou_targets = 2 * iou_targets - 1 + iou_loss = self.crit_iou(preds_dict['iou'].reshape(-1), + iou_targets) * mask.reshape(-1) + iou_loss = iou_loss.sum() / (mask.sum() + 1e-4) + + losses.update( + {f'{task_id}_iou_loss': iou_loss * self.iou_weight}) + + losses.update({ + f'{task_id}_hm_loss': hm_loss, + f'{task_id}_loc_loss': loc_loss * self.weight + }) + + return losses + + def predict(self, preds_dicts, batch_input_metas, **kwargs): + """decode, nms, then return the detection result. + + Additionally support double flip testing + """ + rets = [] + + post_center_range = self.test_cfg.post_center_limit_range + if len(post_center_range) > 0: + post_center_range = torch.tensor( + post_center_range, + dtype=preds_dicts[0]['scores'].dtype, + device=preds_dicts[0]['scores'].device, + ) + + for task_id, preds_dict in enumerate(preds_dicts): + # convert B C N to B N C + for key, val in preds_dict.items(): + if torch.is_tensor(preds_dict[key]): + if len(preds_dict[key].shape) == 3: + preds_dict[key] = val.permute(0, 2, 1).contiguous() + + batch_score = preds_dict['scores'] + batch_label = preds_dict['labels'] + batch_mask = preds_dict['mask'] + if self.use_iou_loss: + batch_iou = preds_dict['iou'].squeeze(2) + else: + batch_iou = None + + batch_dim = torch.exp(preds_dict['dim']) + + batch_rots = preds_dict['rot'][..., 0:1] + batch_rotc = preds_dict['rot'][..., 1:2] + + batch_reg = preds_dict['reg'] + batch_hei = preds_dict['height'] + batch_rot = torch.atan2(batch_rots, batch_rotc) + if self.use_iou_loss: + batch_iou = (batch_iou + 1) * 0.5 + batch_iou = torch.clamp(batch_iou, min=0.0, max=1.0) + + batch, _, H, W = preds_dict['hm'].size() + + ys, xs = torch.meshgrid([torch.arange(0, H), torch.arange(0, W)]) + ys = ys.view(1, H, W).repeat(batch, 1, 1).to(batch_score) + xs = xs.view(1, H, W).repeat(batch, 1, 1).to(batch_score) + + obj_num = preds_dict['order'].shape[1] + batch_id = np.indices((batch, obj_num))[0] + batch_id = torch.from_numpy(batch_id).to(preds_dict['order']) + + xs = ( + xs.view(batch, -1, 1)[batch_id, preds_dict['order']] + + batch_reg[:, :, 0:1]) + ys = ( + ys.view(batch, -1, 1)[batch_id, preds_dict['order']] + + batch_reg[:, :, 1:2]) + + xs = ( + xs * self.test_cfg.out_size_factor * + self.test_cfg.voxel_size[0] + self.test_cfg.pc_range[0]) + ys = ( + ys * self.test_cfg.out_size_factor * + self.test_cfg.voxel_size[1] + self.test_cfg.pc_range[1]) + + if 'vel' in preds_dict: + batch_vel = preds_dict['vel'] + batch_box_preds = torch.cat( + [xs, ys, batch_hei, batch_dim, batch_vel, batch_rot], + dim=2) + else: + batch_box_preds = torch.cat( + [xs, ys, batch_hei, batch_dim, batch_rot], dim=2) + + if self.test_cfg.get('per_class_nms', False): + pass + else: + rets.append( + self.post_processing( + batch_input_metas, + batch_box_preds, + batch_score, + batch_label, + self.test_cfg, + post_center_range, + task_id, + batch_mask, + batch_iou, + )) + + # Merge branches results + ret_list = [] + num_samples = len(rets[0]) + + ret_list = [] + for i in range(num_samples): + temp_instances = InstanceData() + for k in rets[0][i].keys(): + if k == 'bboxes': + bboxes = torch.cat([ret[i][k] for ret in rets]) + bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5 + bboxes = batch_input_metas[i]['box_type_3d']( + bboxes, self.bbox_code_size) + elif k == 'labels': + flag = 0 + for j, num_class in enumerate(self.num_classes): + rets[j][i][k] += flag + flag += num_class + labels = torch.cat([ret[i][k] for ret in rets]) + elif k == 'scores': + scores = torch.cat([ret[i][k] for ret in rets]) + + temp_instances.bboxes_3d = bboxes + temp_instances.scores_3d = scores + temp_instances.labels_3d = labels + ret_list.append(temp_instances) + + return ret_list + + def post_processing( + self, + img_metas, + batch_box_preds, + batch_score, + batch_label, + test_cfg, + post_center_range, + task_id, + batch_mask, + batch_iou, + ): + batch_size = len(batch_score) + + prediction_dicts = [] + for i in range(batch_size): + box_preds = batch_box_preds[i] + scores = batch_score[i] + labels = batch_label[i] + mask = batch_mask[i] + + distance_mask = (box_preds[..., :3] >= post_center_range[:3]).all( + 1) & (box_preds[..., :3] <= post_center_range[3:]).all(1) + + mask = mask & distance_mask + + box_preds = box_preds[mask] + scores = scores[mask] + labels = labels[mask] + + if self.use_iou_loss: + iou_factor = torch.LongTensor(self.iou_factor).to(labels) + ious = batch_iou[i][mask] + ious = torch.pow(ious, iou_factor[labels]) + scores = scores * ious + + boxes_for_nms = box_preds[:, [0, 1, 2, 3, 4, 5, -1]] + + if test_cfg.get('circular_nms', False): + centers = boxes_for_nms[:, [0, 1]] + boxes = torch.cat([centers, scores.view(-1, 1)], dim=1) + selected = _circle_nms( + boxes, + min_radius=test_cfg.min_radius[task_id], + post_max_size=test_cfg.nms.nms_post_max_size, + ) + elif test_cfg.nms.get('use_multi_class_nms', False): + # multi class nms + selected = [] + for c in range(3): + class_mask = labels == c + if class_mask.sum() > 0: + class_idx = class_mask.nonzero() + select = nms_iou3d( + boxes_for_nms[class_mask].float(), + scores[class_mask].float(), + thresh=test_cfg.nms.nms_iou_threshold[c], + pre_maxsize=test_cfg.nms.nms_pre_max_size[c], + post_max_size=test_cfg.nms.nms_post_max_size[c], + ) + selected.append(class_idx[select, 0]) + if len(selected) > 0: + selected = torch.cat(selected, dim=0) + else: + selected = nms_bev( + boxes_for_nms.float(), + scores.float(), + thresh=test_cfg.nms.nms_iou_threshold, + pre_max_size=test_cfg.nms.nms_pre_max_size, + post_max_size=test_cfg.nms.nms_post_max_size, + ) + + selected_boxes = box_preds[selected] + selected_scores = scores[selected] + selected_labels = labels[selected] + + prediction_dict = { + 'bboxes': selected_boxes, + 'scores': selected_scores, + 'labels': selected_labels, + } + + prediction_dicts.append(prediction_dict) + + return prediction_dicts + + +def _circle_nms(boxes, min_radius, post_max_size=83): + """NMS according to center distance.""" + keep = np.array(circle_nms(boxes.cpu().numpy(), + thresh=min_radius))[:post_max_size] + + keep = torch.from_numpy(keep).long().to(boxes.device) + + return keep + + +def get_box(pred_boxs, order, test_cfg, H, W): + batch = pred_boxs.shape[0] + obj_num = order.shape[1] + ys, xs = torch.meshgrid([torch.arange(0, H), torch.arange(0, W)]) + ys = ys.view(1, H, W).repeat(batch, 1, 1).to(pred_boxs) + xs = xs.view(1, H, W).repeat(batch, 1, 1).to(pred_boxs) + + batch_id = np.indices((batch, obj_num))[0] + batch_id = torch.from_numpy(batch_id).to(order) + xs = xs.view(batch, H * W)[batch_id, order].unsqueeze(1) + pred_boxs[:, + 0:1] + ys = ys.view(batch, H * W)[batch_id, order].unsqueeze(1) + pred_boxs[:, + 1:2] + + xs = xs * test_cfg.out_size_factor * test_cfg.voxel_size[ + 0] + test_cfg.pc_range[0] + ys = ys * test_cfg.out_size_factor * test_cfg.voxel_size[ + 1] + test_cfg.pc_range[1] + + rot = torch.atan2(pred_boxs[:, 6:7], pred_boxs[:, 7:8]) + pred = torch.cat( + [xs, ys, pred_boxs[:, 2:3], + torch.exp(pred_boxs[:, 3:6]), rot], dim=1) + + return torch.transpose(pred, 1, 2).contiguous() # B M 7 + + +def get_box_gt(gt_boxs, order, test_cfg, H, W): + batch = gt_boxs.shape[0] + obj_num = order.shape[1] + ys, xs = torch.meshgrid([torch.arange(0, H), torch.arange(0, W)]) + ys = ys.view(1, H, W).repeat(batch, 1, 1).to(gt_boxs) + xs = xs.view(1, H, W).repeat(batch, 1, 1).to(gt_boxs) + + batch_id = np.indices((batch, obj_num))[0] + batch_id = torch.from_numpy(batch_id).to(order) + + batch_gt_dim = torch.exp(gt_boxs[..., 3:6]) + batch_gt_hei = gt_boxs[..., 2:3] + batch_gt_rot = torch.atan2(gt_boxs[..., -2:-1], gt_boxs[..., -1:]) + xs = xs.view(batch, H * W)[batch_id, order].unsqueeze(2) + gt_boxs[..., + 0:1] + ys = ys.view(batch, H * W)[batch_id, order].unsqueeze(2) + gt_boxs[..., + 1:2] + + xs = xs * test_cfg.out_size_factor * test_cfg.voxel_size[ + 0] + test_cfg.pc_range[0] + ys = ys * test_cfg.out_size_factor * test_cfg.voxel_size[ + 1] + test_cfg.pc_range[1] + + batch_box_targets = torch.cat( + [xs, ys, batch_gt_hei, batch_gt_dim, batch_gt_rot], dim=-1) + + return batch_box_targets # B M 7 + + +def get_corresponding_box(x_ind, y_ind, y_mask, y_cls, target_box): + # find the id in y which has the same ind in x + select_target = torch.zeros(x_ind.shape[0], x_ind.shape[1], + target_box.shape[2]).to(target_box) + select_mask = torch.zeros_like(x_ind).to(y_mask) + select_cls = torch.zeros_like(x_ind).to(y_cls) + + for i in range(x_ind.shape[0]): + idx = torch.arange(y_ind[i].shape[-1]).to(x_ind) + idx = idx[y_mask[i]] + box_cls = y_cls[i][y_mask[i]] + valid_y_ind = y_ind[i][y_mask[i]] + match = (x_ind[i].unsqueeze(1) == valid_y_ind.unsqueeze(0)).nonzero() + select_target[i, match[:, 0]] = target_box[i, idx[match[:, 1]]] + select_mask[i, match[:, 0]] = 1 + select_cls[i, match[:, 0]] = box_cls[match[:, 1]] + + return select_target, select_mask, select_cls diff --git a/projects/CenterFormer/centerformer/losses.py b/projects/CenterFormer/centerformer/losses.py new file mode 100755 index 0000000..e59dc8f --- /dev/null +++ b/projects/CenterFormer/centerformer/losses.py @@ -0,0 +1,58 @@ +# modify from https://github.com/TuSimple/centerformer/blob/master/det3d/models/losses/centernet_loss.py # noqa + +import torch +from torch import nn + +from mmdet3d.registry import MODELS + + +def _gather_feat(feat, ind, mask=None): + dim = feat.size(2) + ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) + feat = feat.gather(1, ind) + if mask is not None: + mask = mask.unsqueeze(2).expand_as(feat) + feat = feat[mask] + feat = feat.view(-1, dim) + return feat + + +def _transpose_and_gather_feat(feat, ind): + feat = feat.permute(0, 2, 3, 1).contiguous() + feat = feat.view(feat.size(0), -1, feat.size(3)) + feat = _gather_feat(feat, ind) + return feat + + +@MODELS.register_module() +class FastFocalLoss(nn.Module): + """Reimplemented focal loss, exactly the same as the CornerNet version. + + Faster and costs much less memory. + """ + + def __init__(self, focal_factor=2): + super(FastFocalLoss, self).__init__() + self.focal_factor = focal_factor + + def forward(self, out, target, ind, mask, cat): + ''' + Args: + out, target: B x C x H x W + ind, mask: B x M + cat (category id for peaks): B x M + ''' + mask = mask.float() + gt = torch.pow(1 - target, 4) + neg_loss = torch.log(1 - out) * torch.pow(out, self.focal_factor) * gt + neg_loss = neg_loss.sum() + + pos_pred_pix = _transpose_and_gather_feat(out, ind) # B x M x C + pos_pred = pos_pred_pix.gather(2, cat.unsqueeze(2)) # B x M + num_pos = mask.sum() + pos_loss = torch.log(pos_pred) * torch.pow( + 1 - pos_pred, self.focal_factor) * mask.unsqueeze(2) + pos_loss = pos_loss.sum() + if num_pos == 0: + return -neg_loss + return -(pos_loss + neg_loss) / num_pos diff --git a/projects/CenterFormer/centerformer/multi_scale_deform_attn.py b/projects/CenterFormer/centerformer/multi_scale_deform_attn.py new file mode 100755 index 0000000..6c39af9 --- /dev/null +++ b/projects/CenterFormer/centerformer/multi_scale_deform_attn.py @@ -0,0 +1,229 @@ +# modify from https://github.com/TuSimple/centerformer/blob/master/det3d/models/ops/modules/ms_deform_attn.py # noqa + +import math +from typing import Optional + +import torch +import torch.nn.functional as F +from mmcv.utils import ext_loader +from torch import Tensor, nn +from torch.autograd.function import Function, once_differentiable +from torch.nn.init import constant_, xavier_uniform_ + +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +class MultiScaleDeformableAttnFunction(Function): + + @staticmethod + def forward(ctx, value: torch.Tensor, value_spatial_shapes: torch.Tensor, + value_level_start_index: torch.Tensor, + sampling_locations: torch.Tensor, + attention_weights: torch.Tensor, + im2col_step: torch.Tensor) -> torch.Tensor: + """GPU/MLU version of multi-scale deformable attention. + + Args: + value (torch.Tensor): The value has shape + (bs, num_keys, mum_heads, embed_dims//num_heads) + value_spatial_shapes (torch.Tensor): Spatial shape of + each feature map, has shape (num_levels, 2), + last dimension 2 represent (h, w) + sampling_locations (torch.Tensor): The location of sampling points, + has shape + (bs ,num_queries, num_heads, num_levels, num_points, 2), + the last dimension 2 represent (x, y). + attention_weights (torch.Tensor): The weight of sampling points + used when calculate the attention, has shape + (bs ,num_queries, num_heads, num_levels, num_points), + im2col_step (torch.Tensor): The step used in image to column. + Returns: + torch.Tensor: has shape (bs, num_queries, embed_dims) + """ + + ctx.im2col_step = im2col_step + output = ext_module.ms_deform_attn_forward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + im2col_step=ctx.im2col_step) + ctx.save_for_backward(value, value_spatial_shapes, + value_level_start_index, sampling_locations, + attention_weights) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output: torch.Tensor) -> tuple: + """GPU/MLU version of backward function. + + Args: + grad_output (torch.Tensor): Gradient of output tensor of forward. + Returns: + tuple[Tensor]: Gradient of input tensors in forward. + """ + value, value_spatial_shapes, value_level_start_index,\ + sampling_locations, attention_weights = ctx.saved_tensors + grad_value = torch.zeros_like(value) + grad_sampling_loc = torch.zeros_like(sampling_locations) + grad_attn_weight = torch.zeros_like(attention_weights) + + ext_module.ms_deform_attn_backward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + grad_output.contiguous(), + grad_value, + grad_sampling_loc, + grad_attn_weight, + im2col_step=ctx.im2col_step) + + return grad_value, None, None, \ + grad_sampling_loc, grad_attn_weight, None + + +class MSDeformAttn(nn.Module): + """Multi-Scale Deformable Attention Module. Note that the difference + between this implementation and the implementation in MMCV is that the + dimension of input and hidden embedding in the multi-attention-head can be + specified respectively. + + Args: + dim_model (int, optional): The input and output dimension in the model. + Defaults to 256. + dim_single_head (int, optional): hidden dimension in the single head. + Defaults to 64. + n_levels (int, optional): number of feature levels. Defaults to 4. + n_heads (int, optional): number of attention heads. Defaults to 8. + n_points (int, optional): number of sampling points per attention head + per feature level. Defaults to 4. + out_sample_loc (bool, optional): Whether to return the sampling + location. Defaults to False. + """ + + def __init__(self, + dim_model=256, + dim_single_head=64, + n_levels=4, + n_heads=8, + n_points=4, + out_sample_loc=False): + super().__init__() + + self.im2col_step = 64 + + self.dim_model = dim_model + self.dim_single_head = dim_single_head + self.n_levels = n_levels + self.n_heads = n_heads + self.n_points = n_points + + self.out_sample_loc = out_sample_loc + + self.sampling_offsets = nn.Linear(dim_model, + n_heads * n_levels * n_points * 2) + self.attention_weights = nn.Linear(dim_model, + n_heads * n_levels * n_points) + self.value_proj = nn.Linear(dim_model, dim_single_head * n_heads) + self.output_proj = nn.Linear(dim_single_head * n_heads, dim_model) + + self._reset_parameters() + + def _reset_parameters(self): + constant_(self.sampling_offsets.weight.data, 0.) + thetas = torch.arange( + self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / + grid_init.abs().max(-1, keepdim=True)[0]).view( + self.n_heads, 1, 1, 2).repeat(1, self.n_levels, + self.n_points, 1) + for i in range(self.n_points): + grid_init[:, :, i, :] *= i + 1 + with torch.no_grad(): + self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) + constant_(self.attention_weights.weight.data, 0.) + constant_(self.attention_weights.bias.data, 0.) + xavier_uniform_(self.value_proj.weight.data) + constant_(self.value_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, + query: Tensor, + reference_points: Tensor, + input_flatten: Tensor, + input_spatial_shapes: Tensor, + input_level_start_index: Tensor, + input_padding_mask: Optional[Tensor] = None): + """Forward Function of MultiScaleDeformAttention. + + Args: + query (Tensor): (N, num_query, C) + reference_points (Tensor): (N, num_query, n_levels, 2). The + normalized reference points with shape + (bs, num_query, num_levels, 2), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + input_flatten (Tensor): _description_ + input_spatial_shapes (Tensor): Spatial shape of features in + different levels. With shape (num_levels, 2), + last dimension represents (h, w). + input_level_start_index (Tensor): The start index of each level. + A tensor has shape ``(num_levels, )`` and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + input_padding_mask (Optional[Tensor], optional): The padding mask + for value. Defaults to None. + + Returns: + Tuple[Tensor, Tensor]: forwarded results. + """ + N, Len_q, _ = query.shape + N, Len_in, _ = input_flatten.shape + assert (input_spatial_shapes[:, 0] * + input_spatial_shapes[:, 1]).sum() == Len_in + + value = self.value_proj(input_flatten) + if input_padding_mask is not None: + value = value.masked_fill(input_padding_mask[..., None], float(0)) + value = value.view(N, Len_in, self.n_heads, self.dim_single_head) + sampling_offsets = self.sampling_offsets(query).view( + N, Len_q, self.n_heads, self.n_levels, self.n_points, 2) + attention_weights = self.attention_weights(query).view( + N, Len_q, self.n_heads, self.n_levels * self.n_points) + attention_weights = F.softmax(attention_weights, + -1).view(N, Len_q, self.n_heads, + self.n_levels, self.n_points) + # N, Len_q, n_heads, n_levels, n_points, 2 + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack( + [input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], + -1).to(sampling_offsets) + + sampling_locations = reference_points[:, :, None, :, None, :] + \ + sampling_offsets / offset_normalizer[None, None, None, :, None, :] # noqa: E501 + elif reference_points.shape[-1] == 4: + sampling_locations = reference_points[:, :, None, :, None, :2] \ + + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 # noqa: E501 + else: + raise ValueError( + 'Last dim of reference_points must be 2 or 4, but get {} instead.' # noqa: E501 + .format(reference_points.shape[-1])) + output = MultiScaleDeformableAttnFunction.apply( + value, input_spatial_shapes, input_level_start_index, + sampling_locations, attention_weights, self.im2col_step) + output = self.output_proj(output) + if self.out_sample_loc: + return output, torch.cat( + (sampling_locations, attention_weights[:, :, :, :, :, None]), + dim=-1) + else: + return output, None diff --git a/projects/CenterFormer/centerformer/transformer.py b/projects/CenterFormer/centerformer/transformer.py new file mode 100755 index 0000000..88b8ff2 --- /dev/null +++ b/projects/CenterFormer/centerformer/transformer.py @@ -0,0 +1,261 @@ +# modify from https://github.com/TuSimple/centerformer/blob/master/det3d/models/utils/transformer.py # noqa + +import torch +from einops import rearrange +from mmcv.cnn.bricks.activation import GELU +from torch import einsum, nn + +from .multi_scale_deform_attn import MSDeformAttn + + +class PreNorm(nn.Module): + + def __init__(self, dim, fn): + super().__init__() + self.norm = nn.LayerNorm(dim) + self.fn = fn + + def forward(self, x, y=None, **kwargs): + if y is not None: + return self.fn(self.norm(x), self.norm(y), **kwargs) + else: + return self.fn(self.norm(x), **kwargs) + + +class FFN(nn.Module): + + def __init__(self, dim, hidden_dim, dropout=0.0): + super().__init__() + self.net = nn.Sequential( + nn.Linear(dim, hidden_dim), + GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, dim), + nn.Dropout(dropout), + ) + + def forward(self, x): + return self.net(x) + + +class SelfAttention(nn.Module): + + def __init__(self, + dim, + n_heads=8, + dim_single_head=64, + dropout=0.0, + out_attention=False): + super().__init__() + inner_dim = dim_single_head * n_heads + project_out = not (n_heads == 1 and dim_single_head == dim) + + self.n_heads = n_heads + self.scale = dim_single_head**-0.5 + self.out_attention = out_attention + + self.attend = nn.Softmax(dim=-1) + self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False) + + self.to_out = ( + nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout)) + if project_out else nn.Identity()) + + def forward(self, x): + _, _, _, h = *x.shape, self.n_heads + qkv = self.to_qkv(x).chunk(3, dim=-1) + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), qkv) + + dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale + + attn = self.attend(dots) + + out = einsum('b h i j, b h j d -> b h i d', attn, v) + out = rearrange(out, 'b h n d -> b n (h d)') + + if self.out_attention: + return self.to_out(out), attn + else: + return self.to_out(out) + + +class DeformableCrossAttention(nn.Module): + + def __init__( + self, + dim_model=256, + dim_single_head=64, + dropout=0.3, + n_levels=3, + n_heads=6, + n_points=9, + out_sample_loc=False, + ): + super().__init__() + + # cross attention + self.cross_attn = MSDeformAttn( + dim_model, + dim_single_head, + n_levels, + n_heads, + n_points, + out_sample_loc=out_sample_loc) + self.dropout = nn.Dropout(dropout) + self.out_sample_loc = out_sample_loc + + @staticmethod + def with_pos_embed(tensor, pos): + return tensor if pos is None else tensor + pos + + def forward( + self, + tgt, + src, + query_pos=None, + reference_points=None, + src_spatial_shapes=None, + level_start_index=None, + src_padding_mask=None, + ): + # cross attention + tgt2, sampling_locations = self.cross_attn( + self.with_pos_embed(tgt, query_pos), + reference_points, + src, + src_spatial_shapes, + level_start_index, + src_padding_mask, + ) + tgt = self.dropout(tgt2) + + if self.out_sample_loc: + return tgt, sampling_locations + else: + return tgt + + +class DeformableTransformerDecoder(nn.Module): + """Deformable transformer decoder. + + Note that the ``DeformableDetrTransformerDecoder`` in MMDet has different + interfaces in multi-head-attention which is customized here. For example, + 'embed_dims' is not a position argument in our customized multi-head-self- + attention, but is required in MMDet. Thus, we can not directly use the + ``DeformableDetrTransformerDecoder`` in MMDET. + """ + + def __init__( + self, + dim, + n_levels=3, + depth=2, + n_heads=4, + dim_single_head=32, + dim_ffn=256, + dropout=0.0, + out_attention=False, + n_points=9, + ): + super().__init__() + self.out_attention = out_attention + self.layers = nn.ModuleList([]) + self.depth = depth + self.n_levels = n_levels + self.n_points = n_points + + for _ in range(depth): + self.layers.append( + nn.ModuleList([ + PreNorm( + dim, + SelfAttention( + dim, + n_heads=n_heads, + dim_single_head=dim_single_head, + dropout=dropout, + out_attention=self.out_attention, + ), + ), + PreNorm( + dim, + DeformableCrossAttention( + dim, + dim_single_head, + n_levels=n_levels, + n_heads=n_heads, + dropout=dropout, + n_points=n_points, + out_sample_loc=self.out_attention, + ), + ), + PreNorm(dim, FFN(dim, dim_ffn, dropout=dropout)), + ])) + + def forward(self, x, pos_embedding, src, src_spatial_shapes, + level_start_index, center_pos): + if self.out_attention: + out_cross_attention_list = [] + if pos_embedding is not None: + center_pos_embedding = pos_embedding(center_pos) + reference_points = center_pos[:, :, + None, :].repeat(1, 1, self.n_levels, 1) + for i, (self_attn, cross_attn, ff) in enumerate(self.layers): + if self.out_attention: + if center_pos_embedding is not None: + x_att, self_att = self_attn(x + center_pos_embedding) + x = x_att + x + x_att, cross_att = cross_attn( + x, + src, + query_pos=center_pos_embedding, + reference_points=reference_points, + src_spatial_shapes=src_spatial_shapes, + level_start_index=level_start_index, + ) + else: + x_att, self_att = self_attn(x) + x = x_att + x + x_att, cross_att = cross_attn( + x, + src, + query_pos=None, + reference_points=reference_points, + src_spatial_shapes=src_spatial_shapes, + level_start_index=level_start_index, + ) + out_cross_attention_list.append(cross_att) + else: + if center_pos_embedding is not None: + x_att = self_attn(x + center_pos_embedding) + x = x_att + x + x_att = cross_attn( + x, + src, + query_pos=center_pos_embedding, + reference_points=reference_points, + src_spatial_shapes=src_spatial_shapes, + level_start_index=level_start_index, + ) + else: + x_att = self_attn(x) + x = x_att + x + x_att = cross_attn( + x, + src, + query_pos=None, + reference_points=reference_points, + src_spatial_shapes=src_spatial_shapes, + level_start_index=level_start_index, + ) + + x = x_att + x + x = ff(x) + x + + out_dict = {'ct_feat': x} + if self.out_attention: + out_dict.update({ + 'out_attention': + torch.stack(out_cross_attention_list, dim=2) + }) + return out_dict diff --git a/projects/CenterFormer/configs/centerformer_voxel01_second-attn_secfpn-attn_4xb4-cyclic-20e_waymoD5-3d-3class.py b/projects/CenterFormer/configs/centerformer_voxel01_second-attn_secfpn-attn_4xb4-cyclic-20e_waymoD5-3d-3class.py new file mode 100755 index 0000000..22a7152 --- /dev/null +++ b/projects/CenterFormer/configs/centerformer_voxel01_second-attn_secfpn-attn_4xb4-cyclic-20e_waymoD5-3d-3class.py @@ -0,0 +1,308 @@ +_base_ = ['mmdet3d::_base_/default_runtime.py'] +custom_imports = dict( + imports=['projects.CenterFormer.centerformer'], allow_failed_imports=False) + +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.1, 0.1, 0.15] +point_cloud_range = [-75.2, -75.2, -2, 75.2, 75.2, 4] +class_names = ['Car', 'Pedestrian', 'Cyclist'] +tasks = [dict(num_class=3, class_names=['car', 'pedestrian', 'cyclist'])] +metainfo = dict(classes=class_names) +input_modality = dict(use_lidar=True, use_camera=False) +backend_args = None + +model = dict( + type='CenterFormer', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + voxel=True, + voxel_type='dynamic', + voxel_layer=dict( + max_num_points=-1, + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(-1, -1))), + voxel_encoder=dict( + type='DynamicSimpleVFE', + point_cloud_range=point_cloud_range, + voxel_size=voxel_size), + middle_encoder=dict( + type='SparseEncoder', + in_channels=5, + sparse_shape=[41, 1504, 1504], + order=('conv', 'norm', 'act'), + norm_cfg=dict(type='naiveSyncBN1d', eps=0.001, momentum=0.01), + encoder_channels=((16, 16, 32), (32, 32, 64), (64, 64, 128), (128, + 128)), + encoder_paddings=((1, 1, 1), (1, 1, 1), (1, 1, [0, 1, 1]), (1, 1)), + block_type='basicblock'), + backbone=dict( + type='DeformableDecoderRPN', + layer_nums=[5, 5, 1], + ds_num_filters=[256, 256, 128], + num_input_features=256, + tasks=tasks, + use_gt_training=True, + corner=True, + assign_label_window_size=1, + obj_num=500, + norm_cfg=dict(type='SyncBN', eps=1e-3, momentum=0.01), + transformer_config=dict( + depth=2, + n_heads=6, + dim_single_head=64, + dim_ffn=256, + dropout=0.3, + out_attn=False, + n_points=15, + ), + ), + bbox_head=dict( + type='CenterFormerBboxHead', + in_channels=256, + tasks=tasks, + dataset='waymo', + weight=2, + corner_loss=True, + iou_loss=True, + assign_label_window_size=1, + norm_cfg=dict(type='SyncBN', eps=1e-3, momentum=0.01), + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + common_heads={ + 'reg': (2, 2), + 'height': (1, 2), + 'dim': (3, 2), + 'rot': (2, 2), + 'iou': (1, 2) + }, # (output_channel, num_conv) + ), + train_cfg=dict( + grid_size=[1504, 1504, 40], + voxel_size=voxel_size, + out_size_factor=4, + dense_reg=1, + gaussian_overlap=0.1, + point_cloud_range=point_cloud_range, + max_objs=500, + min_radius=2, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]), + test_cfg=dict( + post_center_limit_range=[-80, -80, -10.0, 80, 80, 10.0], + nms=dict( + use_rotate_nms=False, + use_multi_class_nms=True, + nms_pre_max_size=[1600, 1600, 800], + nms_post_max_size=[200, 200, 100], + nms_iou_threshold=[0.8, 0.55, 0.55], + ), + score_threshold=0.1, + pc_range=[-75.2, -75.2], + out_size_factor=4, + voxel_size=[0.1, 0.1], + obj_num=1000, + )) + +data_root = 'data/waymo/kitti_format/' +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=5, Cyclist=5)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=[0, 1, 2, 3, 4], + backend_args=backend_args), + backend_args=backend_args) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + norm_intensity=True, + backend_args=backend_args), + # Add this if using `MultiFrameDeformableDecoderRPN` + # dict( + # type='LoadPointsFromMultiSweeps', + # sweeps_num=9, + # load_dim=6, + # use_dim=[0, 1, 2, 3, 4], + # pad_empty_sweeps=True, + # remove_close=True), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05], + translation_std=[0.5, 0.5, 0]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] + +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + norm_intensity=True, + backend_args=backend_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range) + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] + +dataset_type = 'WaymoDataset' +train_dataloader = dict( + batch_size=4, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='waymo_infos_train.pkl', + data_prefix=dict(pts='training/velodyne', sweeps='training/velodyne'), + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + metainfo=metainfo, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + # load one frame every five frames + load_interval=5, + backend_args=backend_args)) +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(pts='training/velodyne', sweeps='training/velodyne'), + ann_file='waymo_infos_val.pkl', + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + metainfo=metainfo, + box_type_3d='LiDAR', + backend_args=backend_args)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='WaymoMetric', + ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', + waymo_bin_file='./data/waymo/waymo_format/gt.bin', + data_root='./data/waymo/waymo_format', + backend_args=backend_args, + convert_kitti_format=False, + idx2metainfo='./data/waymo/waymo_format/idx2metainfo.pkl') +test_evaluator = val_evaluator + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') + +# For waymo dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 20. Please change the interval accordingly if you do not +# use a default schedule. +# optimizer +lr = 3e-4 +# This schedule is mainly used by models on nuScenes dataset +# max_norm=10 is better for SECOND +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.01, betas=(0.9, 0.99)), + clip_grad=dict(max_norm=35, norm_type=2)) +# learning rate +param_scheduler = [ + # learning rate scheduler + # During the first 8 epochs, learning rate increases from 0 to lr * 10 + # during the next 12 epochs, learning rate decreases from lr * 10 to + # lr * 1e-4 + dict( + type='CosineAnnealingLR', + T_max=8, + eta_min=lr * 10, + begin=0, + end=8, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=12, + eta_min=lr * 1e-4, + begin=8, + end=20, + by_epoch=True, + convert_to_iter_based=True), + # momentum scheduler + # During the first 8 epochs, momentum increases from 0 to 0.85 / 0.95 + # during the next 12 epochs, momentum increases from 0.85 / 0.95 to 1 + dict( + type='CosineAnnealingMomentum', + T_max=8, + eta_min=0.85 / 0.95, + begin=0, + end=8, + by_epoch=True, + convert_to_iter_based=True), + dict( + type='CosineAnnealingMomentum', + T_max=12, + eta_min=1, + begin=8, + end=20, + by_epoch=True, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=20, val_interval=20) +val_cfg = dict() +test_cfg = dict() + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (4 GPUs) x (4 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) + +default_hooks = dict( + logger=dict(type='LoggerHook', interval=50), + checkpoint=dict(type='CheckpointHook', interval=5)) +custom_hooks = [dict(type='DisableObjectSampleHook', disable_after_epoch=15)] diff --git a/projects/DETR3D/README.md b/projects/DETR3D/README.md new file mode 100755 index 0000000..c5fd4bd --- /dev/null +++ b/projects/DETR3D/README.md @@ -0,0 +1,147 @@ +DETR3D: 3D Object Detection from Multi-view Images via 3D-to-2D Queries + +> [DETR3D: 3D Object Detection from Multi-view Images via 3D-to-2D Queries](https://arxiv.org/abs/2110.06922) + + + +## Abstract + +We introduce a framework for multi-camera 3D object detection. In +contrast to existing works, which estimate 3D bounding boxes directly from +monocular images or use depth prediction networks to generate input for 3D object +detection from 2D information, our method manipulates predictions directly +in 3D space. Our architecture extracts 2D features from multiple camera images +and then uses a sparse set of 3D object queries to index into these 2D features, +linking 3D positions to multi-view images using camera transformation matrices. +Finally, our model makes a bounding box prediction per object query, using a +set-to-set loss to measure the discrepancy between the ground-truth and the prediction. +This top-down approach outperforms its bottom-up counterpart in which +object bounding box prediction follows per-pixel depth estimation, since it does +not suffer from the compounding error introduced by a depth prediction model. +Moreover, our method does not require post-processing such as non-maximum +suppression, dramatically improving inference speed. We achieve state-of-the-art +performance on the nuScenes autonomous driving benchmark. + +
    + +
    + +## Introduction + +This directory contains the implementations of DETR3D (https://arxiv.org/abs/2110.06922). Our implementations are built on top of MMdetection3D. +We have updated DETR3D to be compatible with latest mmdet3d-dev1.x. The codebase and config files have all changed to adapt to the new mmdet3d version. All previous pretrained models are verified with the result listed below. However, newly trained models are yet to be uploaded. + +## Environment Setup + +We require the version of mmdet \<= V3.0.0rc5. The mmdet later than V3.0.0rc5 has refactored DETR-series and its config file, but our configs and code are yet to be updated. + +## Train + +1. Downloads the [pretrained backbone weights](https://drive.google.com/drive/folders/1h5bDg7Oh9hKvkFL-dRhu5-ahrEp2lRNN?usp=sharing) to pretrained/ + +2. For example, to train DETR3D on 8 GPUs, please use + +```bash +bash tools/dist_train.sh projects/DETR3D/configs/detr3d_res101_gridmask.py 8 --cfg-options load_from=pretrained/fcos3d.pth +``` + +## Evaluation using pretrained models + +1. Download the newly trained weights accordingly. + + | Backbone | mAP | NDS | Download | + | :-----------------------------------------------------------------------------------------------------: | :--: | :--: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | + | [DETR3D, ResNet101 w/ DCN, evaluation on val set](./configs/detr3d_r101_gridmask.py) | 35.5 | 42.8 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/detr3d/detr3d_r101_gridmask.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/detr3d/detr3d_r101_gridmask.log) | + | [above, + CBGS, evaluation on val set](./configs/detr3d_r101_gridmask_cbgs.py) | 35.2 | 42.7 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/detr3d/detr3d_r101_gridmask_cbgs.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/detr3d/detr3d_r101_gridmask_cbgs.log) | + | [DETR3D, VoVNet on trainval, evaluation on test set](./configs/detr3d_vovnet_gridmask_trainval_cbgs.py) | 41.4 | 48.1 | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/detr3d/detr3d_vovnet_gridmask_trainval_cbgs.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/detr3d/detr3d_vovnet_gridmask_trainval_cbgs.log) | + +2. Testing + + To test, use: + + ```bash + bash tools/dist_test.sh projects/DETR3D/configs/detr3d_res101_gridmask.py ${CHECKPOINT_PATH} 8 + ``` + +## Converting old models (Optional) + +For old models please refer to [Object DGCNN & DETR3D](https://github.com/WangYueFt/detr3d) + +From v0.17.3 to v1.0.0, mmdet3d has changed its bbox representation. Given that Box(x,y,z,θ), we have x_new = y_old, y_new = x_old, θ_new = -θ_old - π/2. + +Old models are trained on v0.17.3. Our regression branch outputs (cx,cy,w,l,cz,h,sin(θ),cos(θ),vx,vy). For a previous model which outputs y=\[y0,y1,y2,y3,y4,y5,y6,y7,y8,y9\], we get y_new = \[...,y3,y2,...,-y7,-y6, ...\]. So we should change the final Linear layer's weight accordingly. + +To convert the old weights, please use + +```bash +python projects/DETR3D/detr3d/old_detr3d_converter.py ${CHECKPOINT_DIR}/detr3d_resnet101.pth ${CHECKPOINT_DIR}/detr3d_r101_v1.0.0.pth --code_size 10 +``` + +## Citation + +If you find this repo useful for your research, please consider citing the papers + +``` +@inproceedings{ + detr3d, + title={DETR3D: 3D Object Detection from Multi-view Images via 3D-to-2D Queries}, + author={Wang, Yue and Guizilini, Vitor and Zhang, Tianyuan and Wang, Yilun and Zhao, Hang and and Solomon, Justin M.}, + booktitle={The Conference on Robot Learning ({CoRL})}, + year={2021} +} +``` + +## Checklist + + + +- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [x] Finish the code + + + + - [x] Basic docstrings & proper citation + + + + - [x] Test-time correctness + + + + - [x] A full README + + + +- [x] Milestone 2: Indicates a successful model implementation. + + - [x] Training-time correctness + + + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Type hints and docstrings + + + + - [ ] Unit tests + + + + - [ ] Code polishing + + + + - [ ] Metafile.yml + + + +- [ ] Move your modules into the core package following the codebase's file hierarchy structure. + + + +- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. diff --git a/projects/DETR3D/configs/detr3d_r101_gridmask.py b/projects/DETR3D/configs/detr3d_r101_gridmask.py new file mode 100755 index 0000000..0dddcd8 --- /dev/null +++ b/projects/DETR3D/configs/detr3d_r101_gridmask.py @@ -0,0 +1,258 @@ +_base_ = [ + # 'mmdet3d::_base_/datasets/nus-3d.py', + 'mmdet3d::_base_/default_runtime.py' +] + +custom_imports = dict(imports=['projects.DETR3D.detr3d']) +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False) +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +# this means type='DETR3D' will be processed as 'mmdet3d.DETR3D' +default_scope = 'mmdet3d' +model = dict( + type='DETR3D', + use_grid_mask=True, + data_preprocessor=dict( + type='Det3DDataPreprocessor', **img_norm_cfg, pad_size_divisor=32), + img_backbone=dict( + type='mmdet.ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True)), + img_neck=dict( + type='mmdet.FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='DETR3DHead', + num_query=900, + num_classes=10, + in_channels=256, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='Detr3DTransformer', + decoder=dict( + type='Detr3DTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='mmdet.DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', # mmcv. + embed_dims=256, + num_heads=8, + dropout=0.1), + dict( + type='Detr3DCrossAtten', + pc_range=point_cloud_range, + num_points=1, + embed_dims=256) + ], + feedforward_channels=512, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', + num_feats=128, + normalize=True, + offset=-0.5), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='mmdet.L1Loss', loss_weight=0.25), + loss_iou=dict(type='mmdet.GIoULoss', loss_weight=0.0)), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='mmdet.FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + # ↓ Fake cost. This is just to get compatible with DETR head + iou_cost=dict(type='mmdet.IoUCost', weight=0.0), + pc_range=point_cloud_range)))) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' + +test_transforms = [ + dict( + type='RandomResize3D', + scale=(1600, 900), + ratio_range=(1., 1.), + keep_ratio=True) +] +train_transforms = [dict(type='PhotoMetricDistortion3D')] + test_transforms + +backend_args = None +train_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + num_views=6, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False), + dict(type='MultiViewWrapper', transforms=train_transforms), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='Pack3DDetInputs', keys=['img', 'gt_bboxes_3d', 'gt_labels_3d']) +] + +test_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + num_views=6, + backend_args=backend_args), + dict(type='MultiViewWrapper', transforms=test_transforms), + dict(type='Pack3DDetInputs', keys=['img']) +] + +metainfo = dict(classes=class_names) +data_prefix = dict( + pts='', + CAM_FRONT='samples/CAM_FRONT', + CAM_FRONT_LEFT='samples/CAM_FRONT_LEFT', + CAM_FRONT_RIGHT='samples/CAM_FRONT_RIGHT', + CAM_BACK='samples/CAM_BACK', + CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', + CAM_BACK_LEFT='samples/CAM_BACK_LEFT') + +train_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_train.pkl', + pipeline=train_pipeline, + load_type='frame_based', + metainfo=metainfo, + modality=input_modality, + test_mode=False, + data_prefix=data_prefix, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + backend_args=backend_args)) + +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_val.pkl', + load_type='frame_based', + pipeline=test_pipeline, + metainfo=metainfo, + modality=input_modality, + test_mode=True, + data_prefix=data_prefix, + box_type_3d='LiDAR', + backend_args=backend_args)) + +test_dataloader = val_dataloader + +val_evaluator = dict( + type='NuScenesMetric', + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=2e-4, weight_decay=0.01), + paramwise_cfg=dict(custom_keys={'img_backbone': dict(lr_mult=0.1)}), + clip_grad=dict(max_norm=35, norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=0, + end=24, + T_max=24, + eta_min_ratio=1e-3) +] + +total_epochs = 24 + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=total_epochs, val_interval=2) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', interval=1, max_keep_ckpts=1, save_last=True)) +load_from = 'ckpts/fcos3d.pth' + +# setuptools 65 downgrades to 58. +# In mmlab-node we use setuptools 61 but occurs NO errors +vis_backends = [dict(type='TensorboardVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/projects/DETR3D/configs/detr3d_r101_gridmask_cbgs.py b/projects/DETR3D/configs/detr3d_r101_gridmask_cbgs.py new file mode 100755 index 0000000..06618ee --- /dev/null +++ b/projects/DETR3D/configs/detr3d_r101_gridmask_cbgs.py @@ -0,0 +1,80 @@ +_base_ = ['./detr3d_r101_gridmask.py'] + +custom_imports = dict(imports=['projects.DETR3D.detr3d']) +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False) +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' + +test_transforms = [ + dict( + type='RandomResize3D', + scale=(1600, 900), + ratio_range=(1., 1.), + keep_ratio=True) +] +train_transforms = [dict(type='PhotoMetricDistortion3D')] + test_transforms + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True, num_views=6), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False), + dict(type='MultiViewWrapper', transforms=train_transforms), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='Pack3DDetInputs', keys=['img', 'gt_bboxes_3d', 'gt_labels_3d']) +] + +metainfo = dict(classes=class_names) +data_prefix = dict( + pts='', + CAM_FRONT='samples/CAM_FRONT', + CAM_FRONT_LEFT='samples/CAM_FRONT_LEFT', + CAM_FRONT_RIGHT='samples/CAM_FRONT_RIGHT', + CAM_BACK='samples/CAM_BACK', + CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', + CAM_BACK_LEFT='samples/CAM_BACK_LEFT') + +train_dataloader = dict( + _delete_=True, + batch_size=1, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='CBGSDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_train.pkl', + pipeline=train_pipeline, + load_type='frame_based', + metainfo=metainfo, + modality=input_modality, + test_mode=False, + data_prefix=data_prefix, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'))) diff --git a/projects/DETR3D/configs/detr3d_r50_bert_gridmask_halfdata.py b/projects/DETR3D/configs/detr3d_r50_bert_gridmask_halfdata.py new file mode 100644 index 0000000..960c676 --- /dev/null +++ b/projects/DETR3D/configs/detr3d_r50_bert_gridmask_halfdata.py @@ -0,0 +1,293 @@ +_base_ = [ + # 'mmdet3d::_base_/datasets/nus-3d.py', + 'mmdet3d::_base_/default_runtime.py' +] + +custom_imports = dict(imports=['projects.DETR3D.detr3d']) +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False) +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] +lang_model_name = 'bert-base-uncased' +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +# this means type='DETR3D' will be processed as 'mmdet3d.DETR3D' +default_scope = 'mmdet3d' +model = dict( + type='DETR3D', + use_grid_mask=True, + data_preprocessor=dict( + type='Det3DDataPreprocessor', **img_norm_cfg, pad_size_divisor=32), + language_model=dict( + type='BertModel', + name=lang_model_name, + pad_to_max=False, + use_sub_sentence_represent=True, + special_tokens_list=['[CLS]', '[SEP]', '.', '?'], + add_pooling_layer=False, + ), + encoder=dict( + num_layers=6, + num_cp=6, + # visual layer config + layer_cfg=dict( + self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)), + # text layer config + text_layer_cfg=dict( + self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0), + ffn_cfg=dict( + embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)), + # fusion layer config + fusion_layer_cfg=dict( + v_dim=256, + l_dim=256, + embed_dim=1024, + num_heads=4, + init_values=1e-4), + ), + positional_encoding_single=dict( + num_feats=128, + normalize=True, + temperature=20, + offset=0.0), + img_backbone=dict( + type='mmdet.ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True)), + img_neck=dict( + type='mmdet.FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='DETR3DHead', + num_query=900, + num_classes=10, + in_channels=256, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='Detr3DTransformer', + decoder=dict( + type='Detr3DTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='mmdet.DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', # mmcv. + embed_dims=256, + num_heads=8, + dropout=0.1), + dict( + type='Detr3DCrossAtten', + pc_range=point_cloud_range, + num_points=1, + embed_dims=256) + ], + feedforward_channels=512, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', + num_feats=128, + normalize=True, + offset=-0.5), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='mmdet.L1Loss', loss_weight=0.25), + loss_iou=dict(type='mmdet.GIoULoss', loss_weight=0.0)), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='mmdet.FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + # ↓ Fake cost. This is just to get compatible with DETR head + iou_cost=dict(type='mmdet.IoUCost', weight=0.0), + pc_range=point_cloud_range)))) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' + +test_transforms = [ + dict( + type='RandomResize3D', + scale=(1600, 900), + ratio_range=(1., 1.), + keep_ratio=True) +] +train_transforms = [dict(type='PhotoMetricDistortion3D')] + test_transforms + +backend_args = None +train_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + num_views=6, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False), + dict(type='MultiViewWrapper', transforms=train_transforms), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='Pack3DDetInputs', keys=['img', 'gt_bboxes_3d', 'gt_labels_3d']) +] + +test_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + num_views=6, + backend_args=backend_args), + dict(type='MultiViewWrapper', transforms=test_transforms), + dict(type='Pack3DDetInputs', keys=['img']) +] + +metainfo = dict(classes=class_names) +data_prefix = dict( + pts='', + CAM_FRONT='samples/CAM_FRONT', + CAM_FRONT_LEFT='samples/CAM_FRONT_LEFT', + CAM_FRONT_RIGHT='samples/CAM_FRONT_RIGHT', + CAM_BACK='samples/CAM_BACK', + CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', + CAM_BACK_LEFT='samples/CAM_BACK_LEFT') + +train_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_train.pkl', + pipeline=train_pipeline, + load_type='frame_based', + metainfo=metainfo, + modality=input_modality, + test_mode=False, + data_prefix=data_prefix, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + load_interval = 2, + backend_args=backend_args)) + +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_val.pkl', + load_type='frame_based', + pipeline=test_pipeline, + metainfo=metainfo, + modality=input_modality, + test_mode=True, + data_prefix=data_prefix, + box_type_3d='LiDAR', + backend_args=backend_args)) + +test_dataloader = val_dataloader + +val_evaluator = dict( + type='NuScenesMetric', + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=2e-4, weight_decay=0.01), + paramwise_cfg=dict(custom_keys={'img_backbone': dict(lr_mult=0.1)}), + clip_grad=dict(max_norm=35, norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=0, + end=24, + T_max=24, + eta_min_ratio=1e-3) +] + +total_epochs = 24 + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=total_epochs, val_interval=2) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', interval=1, max_keep_ckpts=1, save_last=True)) +load_from = 'pretrained/fcos3d.pth' + +# setuptools 65 downgrades to 58. +# In mmlab-node we use setuptools 61 but occurs NO errors +vis_backends = [dict(type='TensorboardVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/projects/DETR3D/configs/detr3d_r50_gridmask_halfdata.py b/projects/DETR3D/configs/detr3d_r50_gridmask_halfdata.py new file mode 100644 index 0000000..61bbab0 --- /dev/null +++ b/projects/DETR3D/configs/detr3d_r50_gridmask_halfdata.py @@ -0,0 +1,259 @@ +_base_ = [ + # 'mmdet3d::_base_/datasets/nus-3d.py', + 'mmdet3d::_base_/default_runtime.py' +] + +custom_imports = dict(imports=['projects.DETR3D.detr3d']) +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False) +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +# this means type='DETR3D' will be processed as 'mmdet3d.DETR3D' +default_scope = 'mmdet3d' +model = dict( + type='DETR3D', + use_grid_mask=True, + data_preprocessor=dict( + type='Det3DDataPreprocessor', **img_norm_cfg, pad_size_divisor=32), + img_backbone=dict( + type='mmdet.ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True)), + img_neck=dict( + type='mmdet.FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='DETR3DHead', + num_query=900, + num_classes=10, + in_channels=256, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='Detr3DTransformer', + decoder=dict( + type='Detr3DTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='mmdet.DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', # mmcv. + embed_dims=256, + num_heads=8, + dropout=0.1), + dict( + type='Detr3DCrossAtten', + pc_range=point_cloud_range, + num_points=1, + embed_dims=256) + ], + feedforward_channels=512, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='mmdet.SinePositionalEncoding', + num_feats=128, + normalize=True, + offset=-0.5), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='mmdet.L1Loss', loss_weight=0.25), + loss_iou=dict(type='mmdet.GIoULoss', loss_weight=0.0)), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='mmdet.FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + # ↓ Fake cost. This is just to get compatible with DETR head + iou_cost=dict(type='mmdet.IoUCost', weight=0.0), + pc_range=point_cloud_range)))) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' + +test_transforms = [ + dict( + type='RandomResize3D', + scale=(1600, 900), + ratio_range=(1., 1.), + keep_ratio=True) +] +train_transforms = [dict(type='PhotoMetricDistortion3D')] + test_transforms + +backend_args = None +train_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + num_views=6, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False), + dict(type='MultiViewWrapper', transforms=train_transforms), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='Pack3DDetInputs', keys=['img', 'gt_bboxes_3d', 'gt_labels_3d']) +] + +test_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + num_views=6, + backend_args=backend_args), + dict(type='MultiViewWrapper', transforms=test_transforms), + dict(type='Pack3DDetInputs', keys=['img']) +] + +metainfo = dict(classes=class_names) +data_prefix = dict( + pts='', + CAM_FRONT='samples/CAM_FRONT', + CAM_FRONT_LEFT='samples/CAM_FRONT_LEFT', + CAM_FRONT_RIGHT='samples/CAM_FRONT_RIGHT', + CAM_BACK='samples/CAM_BACK', + CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', + CAM_BACK_LEFT='samples/CAM_BACK_LEFT') + +train_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_train.pkl', + pipeline=train_pipeline, + load_type='frame_based', + metainfo=metainfo, + modality=input_modality, + test_mode=False, + data_prefix=data_prefix, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + load_interval = 2, + backend_args=backend_args)) + +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='nuscenes_infos_val.pkl', + load_type='frame_based', + pipeline=test_pipeline, + metainfo=metainfo, + modality=input_modality, + test_mode=True, + data_prefix=data_prefix, + box_type_3d='LiDAR', + backend_args=backend_args)) + +test_dataloader = val_dataloader + +val_evaluator = dict( + type='NuScenesMetric', + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + metric='bbox', + backend_args=backend_args) +test_evaluator = val_evaluator + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=2e-4, weight_decay=0.01), + paramwise_cfg=dict(custom_keys={'img_backbone': dict(lr_mult=0.1)}), + clip_grad=dict(max_norm=35, norm_type=2), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + by_epoch=False, + begin=0, + end=500), + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=0, + end=24, + T_max=24, + eta_min_ratio=1e-3) +] + +total_epochs = 24 + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=total_epochs, val_interval=2) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', interval=1, max_keep_ckpts=1, save_last=True)) +load_from = 'pretrained/fcos3d.pth' + +# setuptools 65 downgrades to 58. +# In mmlab-node we use setuptools 61 but occurs NO errors +vis_backends = [dict(type='TensorboardVisBackend')] +visualizer = dict( + type='Det3DLocalVisualizer', vis_backends=vis_backends, name='visualizer') diff --git a/projects/DETR3D/configs/detr3d_vovnet_gridmask_trainval_cbgs.py b/projects/DETR3D/configs/detr3d_vovnet_gridmask_trainval_cbgs.py new file mode 100755 index 0000000..1fb8cb8 --- /dev/null +++ b/projects/DETR3D/configs/detr3d_vovnet_gridmask_trainval_cbgs.py @@ -0,0 +1,52 @@ +_base_ = ['./detr3d_r101_gridmask_cbgs.py'] + +custom_imports = dict(imports=['projects.DETR3D.detr3d']) + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + bgr_to_rgb=False) + +# this means type='DETR3D' will be processed as 'mmdet3d.DETR3D' +default_scope = 'mmdet3d' +model = dict( + type='DETR3D', + use_grid_mask=True, + data_preprocessor=dict( + type='Det3DDataPreprocessor', **img_norm_cfg, pad_size_divisor=32), + img_backbone=dict( + _delete_=True, + type='VoVNet', + spec_name='V-99-eSE', + norm_eval=True, + frozen_stages=1, + input_ch=3, + out_features=['stage2', 'stage3', 'stage4', 'stage5']), + img_neck=dict( + type='mmdet.FPN', + in_channels=[256, 512, 768, 1024], + out_channels=256, + start_level=0, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True)) + +train_dataloader = dict( + dataset=dict( + type='CBGSDataset', + dataset=dict(ann_file='nuscenes_infos_trainval.pkl'))) + +test_dataloader = dict( + dataset=dict( + data_root='data/nuscenes-test', ann_file='nuscenes_infos_test.pkl')) + +test_evaluator = dict( + type='NuScenesMetric', + data_root='data/nuscenes-test', + ann_file='data/nuscenes-test/nuscenes_infos_test.pkl', + jsonfile_prefix='work_dirs/detr3d_vovnet_results_test', + format_only=True, + metric=[]) + +load_from = 'ckpts/dd3d_det_final.pth' +find_unused_parameters = True diff --git a/projects/DETR3D/detr3d/__init__.py b/projects/DETR3D/detr3d/__init__.py new file mode 100755 index 0000000..91de36d --- /dev/null +++ b/projects/DETR3D/detr3d/__init__.py @@ -0,0 +1,13 @@ +from .detr3d import DETR3D +from .detr3d_head import DETR3DHead +from .detr3d_transformer import (Detr3DCrossAtten, Detr3DTransformer, + Detr3DTransformerDecoder) +from .hungarian_assigner_3d import HungarianAssigner3D +from .match_cost import BBox3DL1Cost +from .nms_free_coder import NMSFreeCoder +from .vovnet import VoVNet +__all__ = [ + 'VoVNet', 'DETR3D', 'DETR3DHead', 'Detr3DTransformer', + 'Detr3DTransformerDecoder', 'Detr3DCrossAtten', 'HungarianAssigner3D', + 'BBox3DL1Cost', 'NMSFreeCoder' +] diff --git a/projects/DETR3D/detr3d/base.py b/projects/DETR3D/detr3d/base.py new file mode 100644 index 0000000..1a193b0 --- /dev/null +++ b/projects/DETR3D/detr3d/base.py @@ -0,0 +1,156 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import Dict, List, Tuple, Union + +import torch +from mmengine.model import BaseModel +from torch import Tensor + +from mmdet.structures import DetDataSample, OptSampleList, SampleList +from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig +from ..utils import samplelist_boxtype2tensor + +ForwardResults = Union[Dict[str, torch.Tensor], List[DetDataSample], + Tuple[torch.Tensor], torch.Tensor] + + +class BaseDetector(BaseModel, metaclass=ABCMeta): + """Base class for detectors. + + Args: + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`BaseDataPreprocessor`. it usually includes, + ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``. + init_cfg (dict or ConfigDict, optional): the config to control the + initialization. Defaults to None. + """ + + def __init__(self, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + @property + def with_neck(self) -> bool: + """bool: whether the detector has a neck""" + return hasattr(self, 'neck') and self.neck is not None + + # TODO: these properties need to be carefully handled + # for both single stage & two stage detectors + @property + def with_shared_head(self) -> bool: + """bool: whether the detector has a shared head in the RoI Head""" + return hasattr(self, 'roi_head') and self.roi_head.with_shared_head + + @property + def with_bbox(self) -> bool: + """bool: whether the detector has a bbox head""" + return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) + or (hasattr(self, 'bbox_head') and self.bbox_head is not None)) + + @property + def with_mask(self) -> bool: + """bool: whether the detector has a mask head""" + return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) + or (hasattr(self, 'mask_head') and self.mask_head is not None)) + + def forward(self, + inputs: torch.Tensor, + data_samples: OptSampleList = None, + mode: str = 'tensor') -> ForwardResults: + """The unified entry for a forward process in both training and test. + + The method should accept three modes: "tensor", "predict" and "loss": + + - "tensor": Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`DetDataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle either back propagation or + parameter update, which are supposed to be done in :meth:`train_step`. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (list[:obj:`DetDataSample`], optional): A batch of + data samples that contain annotations and predictions. + Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="predict"``, return a list of :obj:`DetDataSample`. + - If ``mode="loss"``, return a dict of tensor. + """ + if mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + return self.predict(inputs, data_samples) + elif mode == 'tensor': + return self._forward(inputs, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}". ' + 'Only supports loss, predict and tensor mode') + + @abstractmethod + def loss(self, batch_inputs: Tensor, + batch_data_samples: SampleList) -> Union[dict, tuple]: + """Calculate losses from a batch of inputs and data samples.""" + pass + + @abstractmethod + def predict(self, batch_inputs: Tensor, + batch_data_samples: SampleList) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing.""" + pass + + @abstractmethod + def _forward(self, + batch_inputs: Tensor, + batch_data_samples: OptSampleList = None): + """Network forward process. + + Usually includes backbone, neck and head forward without any post- + processing. + """ + pass + + @abstractmethod + def extract_feat(self, batch_inputs: Tensor): + """Extract features from images.""" + pass + + def add_pred_to_datasample(self, data_samples: SampleList, + results_list: InstanceList) -> SampleList: + """Add predictions to `DetDataSample`. + + Args: + data_samples (list[:obj:`DetDataSample`], optional): A batch of + data samples that contain annotations and predictions. + results_list (list[:obj:`InstanceData`]): Detection results of + each image. + + Returns: + list[:obj:`DetDataSample`]: Detection results of the + input images. Each DetDataSample usually contain + 'pred_instances'. And the ``pred_instances`` usually + contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + """ + for data_sample, pred_instances in zip(data_samples, results_list): + data_sample.pred_instances = pred_instances + samplelist_boxtype2tensor(data_samples) + return data_samples diff --git a/projects/DETR3D/detr3d/bert.py b/projects/DETR3D/detr3d/bert.py new file mode 100644 index 0000000..0e6e3d7 --- /dev/null +++ b/projects/DETR3D/detr3d/bert.py @@ -0,0 +1,232 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict +from typing import Sequence + +import torch +from mmengine.model import BaseModel +from torch import nn + +try: + from transformers import AutoTokenizer, BertConfig + from transformers import BertModel as HFBertModel +except ImportError: + AutoTokenizer = None + HFBertModel = None + +from mmdet.registry import MODELS + + +def generate_masks_with_special_tokens_and_transfer_map( + tokenized, special_tokens_list): + """Generate attention mask between each pair of special tokens. + + Only token pairs in between two special tokens are attended to + and thus the attention mask for these pairs is positive. + + Args: + input_ids (torch.Tensor): input ids. Shape: [bs, num_token] + special_tokens_mask (list): special tokens mask. + + Returns: + Tuple(Tensor, Tensor): + - attention_mask is the attention mask between each tokens. + Only token pairs in between two special tokens are positive. + Shape: [bs, num_token, num_token]. + - position_ids is the position id of tokens within each valid sentence. + The id starts from 0 whenenver a special token is encountered. + Shape: [bs, num_token] + """ + input_ids = tokenized['input_ids'] + bs, num_token = input_ids.shape + # special_tokens_mask: + # bs, num_token. 1 for special tokens. 0 for normal tokens + special_tokens_mask = torch.zeros((bs, num_token), + device=input_ids.device).bool() + + for special_token in special_tokens_list: + special_tokens_mask |= input_ids == special_token + + # idxs: each row is a list of indices of special tokens + idxs = torch.nonzero(special_tokens_mask) + + # generate attention mask and positional ids + attention_mask = ( + torch.eye(num_token, + device=input_ids.device).bool().unsqueeze(0).repeat( + bs, 1, 1)) + position_ids = torch.zeros((bs, num_token), device=input_ids.device) + previous_col = 0 + for i in range(idxs.shape[0]): + row, col = idxs[i] + if (col == 0) or (col == num_token - 1): + attention_mask[row, col, col] = True + position_ids[row, col] = 0 + else: + attention_mask[row, previous_col + 1:col + 1, + previous_col + 1:col + 1] = True + position_ids[row, previous_col + 1:col + 1] = torch.arange( + 0, col - previous_col, device=input_ids.device) + previous_col = col + + return attention_mask, position_ids.to(torch.long) + + +@MODELS.register_module() +class BertModel(BaseModel): + """BERT model for language embedding only encoder. + + Args: + name (str, optional): name of the pretrained BERT model from + HuggingFace. Defaults to bert-base-uncased. + max_tokens (int, optional): maximum number of tokens to be + used for BERT. Defaults to 256. + pad_to_max (bool, optional): whether to pad the tokens to max_tokens. + Defaults to True. + use_sub_sentence_represent (bool, optional): whether to use sub + sentence represent introduced in `Grounding DINO + `. Defaults to False. + special_tokens_list (list, optional): special tokens used to split + subsentence. It cannot be None when `use_sub_sentence_represent` + is True. Defaults to None. + add_pooling_layer (bool, optional): whether to adding pooling + layer in bert encoder. Defaults to False. + num_layers_of_embedded (int, optional): number of layers of + the embedded model. Defaults to 1. + use_checkpoint (bool, optional): whether to use gradient checkpointing. + Defaults to False. + """ + + def __init__(self, + name: str = 'bert-base-uncased', + max_tokens: int = 256, + pad_to_max: bool = True, + use_sub_sentence_represent: bool = False, + special_tokens_list: list = None, + add_pooling_layer: bool = False, + num_layers_of_embedded: int = 1, + use_checkpoint: bool = False, + **kwargs) -> None: + + super().__init__(**kwargs) + self.max_tokens = max_tokens + self.pad_to_max = pad_to_max + + if AutoTokenizer is None: + raise RuntimeError( + 'transformers is not installed, please install it by: ' + 'pip install transformers.') + text_encoder_type='./text' + self.tokenizer = AutoTokenizer.from_pretrained(text_encoder_type) + self.language_backbone = nn.Sequential( + OrderedDict([('body', + BertEncoder( + name, + add_pooling_layer=add_pooling_layer, + num_layers_of_embedded=num_layers_of_embedded, + use_checkpoint=use_checkpoint))])) + + self.use_sub_sentence_represent = use_sub_sentence_represent + if self.use_sub_sentence_represent: + assert special_tokens_list is not None, \ + 'special_tokens should not be None \ + if use_sub_sentence_represent is True' + + self.special_tokens = self.tokenizer.convert_tokens_to_ids( + special_tokens_list) + + def forward(self, captions: Sequence[str], **kwargs) -> dict: + """Forward function.""" + device = next(self.language_backbone.parameters()).device + tokenized = self.tokenizer.batch_encode_plus( + captions, + max_length=self.max_tokens, + padding='max_length' if self.pad_to_max else 'longest', + return_special_tokens_mask=True, + return_tensors='pt', + truncation=True).to(device) + input_ids = tokenized.input_ids + if self.use_sub_sentence_represent: + attention_mask, position_ids = \ + generate_masks_with_special_tokens_and_transfer_map( + tokenized, self.special_tokens) + token_type_ids = tokenized['token_type_ids'] + + else: + attention_mask = tokenized.attention_mask + position_ids = None + token_type_ids = None + + tokenizer_input = { + 'input_ids': input_ids, + 'attention_mask': attention_mask, + 'position_ids': position_ids, + 'token_type_ids': token_type_ids + } + language_dict_features = self.language_backbone(tokenizer_input) + if self.use_sub_sentence_represent: + language_dict_features['position_ids'] = position_ids + language_dict_features[ + 'text_token_mask'] = tokenized.attention_mask.bool() + return language_dict_features + + +class BertEncoder(nn.Module): + """BERT encoder for language embedding. + + Args: + name (str): name of the pretrained BERT model from HuggingFace. + Defaults to bert-base-uncased. + add_pooling_layer (bool): whether to add a pooling layer. + num_layers_of_embedded (int): number of layers of the embedded model. + Defaults to 1. + use_checkpoint (bool): whether to use gradient checkpointing. + Defaults to False. + """ + + def __init__(self, + name: str, + add_pooling_layer: bool = False, + num_layers_of_embedded: int = 1, + use_checkpoint: bool = False): + super().__init__() + if BertConfig is None: + raise RuntimeError( + 'transformers is not installed, please install it by: ' + 'pip install transformers.') + text_encoder_type='./text' + config = BertConfig.from_pretrained(text_encoder_type) + config.gradient_checkpointing = use_checkpoint + # only encoder + self.model = HFBertModel.from_pretrained( + text_encoder_type, add_pooling_layer=add_pooling_layer, config=config) + self.language_dim = config.hidden_size + self.num_layers_of_embedded = num_layers_of_embedded + + def forward(self, x) -> dict: + mask = x['attention_mask'] + + outputs = self.model( + input_ids=x['input_ids'], + attention_mask=mask, + position_ids=x['position_ids'], + token_type_ids=x['token_type_ids'], + output_hidden_states=True, + ) + + # outputs has 13 layers, 1 input layer and 12 hidden layers + encoded_layers = outputs.hidden_states[1:] + features = torch.stack(encoded_layers[-self.num_layers_of_embedded:], + 1).mean(1) + # language embedding has shape [len(phrase), seq_len, language_dim] + features = features / self.num_layers_of_embedded + if mask.dim() == 2: + embedded = features * mask.unsqueeze(-1).float() + else: + embedded = features + + results = { + 'embedded': embedded, + 'masks': mask, + 'hidden': encoded_layers[-1] + } + return results diff --git a/projects/DETR3D/detr3d/detr3d.py b/projects/DETR3D/detr3d/detr3d.py new file mode 100755 index 0000000..c0a84a0 --- /dev/null +++ b/projects/DETR3D/detr3d/detr3d.py @@ -0,0 +1,571 @@ +from typing import Dict, List, Optional +from torch.nn.init import normal_ +import torch +from torch import Tensor +import torch.nn as nn +from typing import Dict, Tuple, Union +from .glip import (create_positive_map, create_positive_map_label_to_token, + run_ner) +from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector +from mmdet3d.registry import MODELS +from mmdet3d.structures import Det3DDataSample +from mmdet3d.structures.bbox_3d.utils import get_lidar2img +from .grid_mask import GridMask +from mmdet.structures import OptSampleList +import torch.nn.functional as F +from ..layers.transformer.grounding_dino_layers import ( + GroundingDinoTransformerDecoder, GroundingDinoTransformerEncoder) +from mmdet.models.layers.positional_encoding import SinePositionalEncoding + +@MODELS.register_module() +class DETR3D(MVXTwoStageDetector): + """DETR3D: 3D Object Detection from Multi-view Images via 3D-to-2D Queries + + Args: + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`Det3DDataPreprocessor`. Defaults to None. + use_grid_mask (bool) : Data augmentation. Whether to mask out some + grids during extract_img_feat. Defaults to False. + img_backbone (dict, optional): Backbone of extracting + images feature. Defaults to None. + img_neck (dict, optional): Neck of extracting + image features. Defaults to None. + pts_bbox_head (dict, optional): Bboxes head of + detr3d. Defaults to None. + train_cfg (dict, optional): Train config of model. + Defaults to None. + test_cfg (dict, optional): Train config of model. + Defaults to None. + init_cfg (dict, optional): Initialize config of + model. Defaults to None. + """ + + def __init__(self, + data_preprocessor=None, + use_grid_mask=False, + img_backbone=None, + img_neck=None, + pts_bbox_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + language_model=None, + encoder=None, + positional_encoding_single=None, + ): + super(DETR3D, self).__init__( + img_backbone=img_backbone, + img_neck=img_neck, + pts_bbox_head=pts_bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor) + self.grid_mask = GridMask( + True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) + self.use_grid_mask = use_grid_mask + self.language_cfg = language_model + self.language_model = MODELS.build(self.language_cfg) + self.text_feat_map = nn.Linear( + self.language_model.language_backbone.body.language_dim, + 256, + bias=True) + self._special_tokens = '. ' + self.positional_encoding = SinePositionalEncoding( + **positional_encoding_single) + self.encoder = GroundingDinoTransformerEncoder(**encoder) + # self.level_embed = nn.Parameter( + # torch.Tensor(4, 256)) + nn.init.constant_(self.text_feat_map.bias.data, 0) + # normal_(self.level_embed) + nn.init.xavier_uniform_(self.text_feat_map.weight.data) + def extract_img_feat(self, img: Tensor, + batch_input_metas: List[dict]) -> List[Tensor]: + """Extract features from images. + + Args: + img (tensor): Batched multi-view image tensor with + shape (B, N, C, H, W). + batch_input_metas (list[dict]): Meta information of multiple inputs + in a batch. + + Returns: + list[tensor]: multi-level image features. + """ + + B = img.size(0) + if img is not None: + input_shape = img.shape[-2:] # bs nchw + # update real input shape of each single img + for img_meta in batch_input_metas: + img_meta.update(input_shape=input_shape) + + if img.dim() == 5 and img.size(0) == 1: + img.squeeze_() + elif img.dim() == 5 and img.size(0) > 1: + B, N, C, H, W = img.size() + img = img.view(B * N, C, H, W) + if self.use_grid_mask: + img = self.grid_mask(img) # mask out some grids + img_feats = self.img_backbone(img) + if isinstance(img_feats, dict): + img_feats = list(img_feats.values()) + else: + return None + if self.with_img_neck: + img_feats = self.img_neck(img_feats) + + img_feats_reshaped = [] + for img_feat in img_feats: + BN, C, H, W = img_feat.size() + img_feats_reshaped.append(img_feat.view(B, int(BN / B), C, H, W)) + return img_feats_reshaped + + def extract_feat(self, batch_inputs_dict: Dict, + batch_input_metas: List[dict]) -> List[Tensor]: + """Extract features from images. + + Refer to self.extract_img_feat() + """ + imgs = batch_inputs_dict.get('imgs', None) + img_feats = self.extract_img_feat(imgs, batch_input_metas) + return img_feats + + def _forward(self): + raise NotImplementedError('tensor mode is yet to add') + + # original forward_train + def loss(self, batch_inputs_dict: Dict[List, Tensor], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """ + Args: + batch_inputs_dict (dict): The model input dict which include + `imgs` keys. + - imgs (torch.Tensor): Tensor of batched multi-view images. + It has shape (B, N, C, H ,W) + batch_data_samples (List[obj:`Det3DDataSample`]): The Data Samples + It usually includes information such as `gt_instance_3d`. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + + """ + batch_input_metas = [item.metainfo for item in batch_data_samples] + batch_input_metas = self.add_lidar2img(batch_input_metas) + img_feats = self.extract_feat(batch_inputs_dict, batch_input_metas) + bsz=len(batch_data_samples) + #文本预处理 + text_prompts=[ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier'] + batch_gt_instances_3d = [ + item.gt_instances_3d for item in batch_data_samples + ] + new_text_prompts=[] + positive_maps=[] + tokenized, caption_string, tokens_positive, _ = \ + self.get_tokens_and_prompts( + text_prompts, True) + new_text_prompts = [caption_string] * len(batch_data_samples) + gt_labels=[ + data_sample.labels_3d + for data_sample in batch_gt_instances_3d + ] + for gt_label in gt_labels: + new_tokens_positive = [ + tokens_positive[label] for label in gt_label + ] + _, positive_map = self.get_positive_map( + tokenized, new_tokens_positive) + positive_maps.append(positive_map) + + text_dict = self.language_model(new_text_prompts) + for key, value in text_dict.items(): + text_dict[key] = torch.cat([value] * 6, dim=0) + if self.text_feat_map is not None: + text_dict['embedded'] = self.text_feat_map(text_dict['embedded']) + ##################################################################### + encoder_inputs_dict = self.pre_transformer( + img_feats, batch_data_samples) + + memory = self.forward_encoder( + **encoder_inputs_dict, text_dict=text_dict) + del img_feats + img_feats = self.restore_img_feats(memory, encoder_inputs_dict['spatial_shapes'], encoder_inputs_dict['level_start_index']) + outs = self.pts_bbox_head(img_feats, batch_input_metas, **kwargs)#text_dict + loss_inputs = [batch_gt_instances_3d, outs] + losses_pts = self.pts_bbox_head.loss_by_feat(*loss_inputs) + + return losses_pts + + # original simple_test + def predict(self, batch_inputs_dict: Dict[str, Optional[Tensor]], + batch_data_samples: List[Det3DDataSample], + **kwargs) -> List[Det3DDataSample]: + """Forward of testing. + + Args: + batch_inputs_dict (dict): The model input dict which include + `imgs` keys. + + - imgs (torch.Tensor): Tensor of batched multi-view images. + It has shape (B, N, C, H ,W) + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input sample. Each Det3DDataSample usually contain + 'pred_instances_3d'. And the ``pred_instances_3d`` usually + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instances, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bbox_3d (:obj:`BaseInstance3DBoxes`): Prediction of bboxes, + contains a tensor with shape (num_instances, 9). + """ + batch_input_metas = [item.metainfo for item in batch_data_samples] + batch_input_metas = self.add_lidar2img(batch_input_metas) + img_feats = self.extract_feat(batch_inputs_dict, batch_input_metas) + outs = self.pts_bbox_head(img_feats, batch_input_metas) + + results_list_3d = self.pts_bbox_head.predict_by_feat( + outs, batch_input_metas, **kwargs) + + # change the bboxes' format + detsamples = self.add_pred_to_datasample(batch_data_samples, + results_list_3d) + return detsamples + + # may need speed-up + def add_lidar2img(self, batch_input_metas: List[Dict]) -> List[Dict]: + """add 'lidar2img' transformation matrix into batch_input_metas. + + Args: + batch_input_metas (list[dict]): Meta information of multiple inputs + in a batch. + + Returns: + batch_input_metas (list[dict]): Meta info with lidar2img added + """ + for meta in batch_input_metas: + l2i = list() + for i in range(len(meta['cam2img'])): + c2i = torch.tensor(meta['cam2img'][i]).double() + l2c = torch.tensor(meta['lidar2cam'][i]).double() + l2i.append(get_lidar2img(c2i, l2c).float().numpy()) + meta['lidar2img'] = l2i + return batch_input_metas + + def get_tokens_and_prompts( + self, + original_caption: Union[str, list, tuple], + custom_entities: bool = False) -> Tuple[dict, str, list]: + """Get the tokens positive and prompts for the caption.""" + if isinstance(original_caption, (list, tuple)) or custom_entities: + if custom_entities and isinstance(original_caption, str): + original_caption = original_caption.strip(self._special_tokens) + original_caption = original_caption.split(self._special_tokens) + original_caption = list( + filter(lambda x: len(x) > 0, original_caption)) + + caption_string = '' + tokens_positive = [] + for idx, word in enumerate(original_caption): + tokens_positive.append( + [[len(caption_string), + len(caption_string) + len(word)]]) + caption_string += word + caption_string += self._special_tokens + # NOTE: Tokenizer in Grounding DINO is different from + # that in GLIP. The tokenizer in GLIP will pad the + # caption_string to max_length, while the tokenizer + # in Grounding DINO will not. + tokenized = self.language_model.tokenizer( + [caption_string], + padding='max_length' + if self.language_model.pad_to_max else 'longest', + return_tensors='pt') + entities = original_caption + else: + if not original_caption.endswith('.'): + original_caption = original_caption + self._special_tokens + # NOTE: Tokenizer in Grounding DINO is different from + # that in GLIP. The tokenizer in GLIP will pad the + # caption_string to max_length, while the tokenizer + # in Grounding DINO will not. + tokenized = self.language_model.tokenizer( + [original_caption], + padding='max_length' + if self.language_model.pad_to_max else 'longest', + return_tensors='pt') + tokens_positive, noun_phrases = run_ner(original_caption) + entities = noun_phrases + caption_string = original_caption + + return tokenized, caption_string, tokens_positive, entities + + + def get_tokens_and_prompts( + self, + original_caption: Union[str, list, tuple], + custom_entities: bool = False) -> Tuple[dict, str, list]: + """Get the tokens positive and prompts for the caption.""" + if isinstance(original_caption, (list, tuple)) or custom_entities: + if custom_entities and isinstance(original_caption, str): + original_caption = original_caption.strip(self._special_tokens) + original_caption = original_caption.split(self._special_tokens) + original_caption = list( + filter(lambda x: len(x) > 0, original_caption)) + + caption_string = '' + tokens_positive = [] + for idx, word in enumerate(original_caption): + tokens_positive.append( + [[len(caption_string), + len(caption_string) + len(word)]]) + caption_string += word + caption_string += self._special_tokens + # NOTE: Tokenizer in Grounding DINO is different from + # that in GLIP. The tokenizer in GLIP will pad the + # caption_string to max_length, while the tokenizer + # in Grounding DINO will not. + tokenized = self.language_model.tokenizer( + [caption_string], + padding='max_length' + if self.language_model.pad_to_max else 'longest', + return_tensors='pt') + entities = original_caption + else: + if not original_caption.endswith('.'): + original_caption = original_caption + self._special_tokens + # NOTE: Tokenizer in Grounding DINO is different from + # that in GLIP. The tokenizer in GLIP will pad the + # caption_string to max_length, while the tokenizer + # in Grounding DINO will not. + tokenized = self.language_model.tokenizer( + [original_caption], + padding='max_length' + if self.language_model.pad_to_max else 'longest', + return_tensors='pt') + tokens_positive, noun_phrases = run_ner(original_caption) + entities = noun_phrases + caption_string = original_caption + + return tokenized, caption_string, tokens_positive, entities + + def get_positive_map(self, tokenized, tokens_positive): + positive_map = create_positive_map(tokenized, tokens_positive) + positive_map_label_to_token = create_positive_map_label_to_token( + positive_map, plus=1) + return positive_map_label_to_token, positive_map + + def pre_transformer( + self, + mlvl_feats: Tuple[Tensor], + batch_data_samples: OptSampleList = None) -> Tuple[Dict]: + """Process image features before feeding them to the transformer. + + The forward procedure of the transformer is defined as: + 'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder' + More details can be found at `TransformerDetector.forward_transformer` + in `mmdet/detector/base_detr.py`. + + Args: + mlvl_feats (tuple[Tensor]): Multi-level features that may have + different resolutions, output from neck. Each feature has + shape (bs, dim, h_lvl, w_lvl), where 'lvl' means 'layer'. + batch_data_samples (list[:obj:`DetDataSample`], optional): The + batch data samples. It usually includes information such + as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. + Defaults to None. + + Returns: + tuple[dict]: The first dict contains the inputs of encoder and the + second dict contains the inputs of decoder. + + - encoder_inputs_dict (dict): The keyword args dictionary of + `self.forward_encoder()`, which includes 'feat', 'feat_mask', + and 'feat_pos'. + - decoder_inputs_dict (dict): The keyword args dictionary of + `self.forward_decoder()`, which includes 'memory_mask'. + """ + batch_size = mlvl_feats[0].size(0) + num_cams=mlvl_feats[0].size(1) + # construct binary masks for the transformer. + assert batch_data_samples is not None + batch_input_shape = batch_data_samples[0].batch_input_shape + input_img_h, input_img_w = batch_input_shape + img_shape_list = [sample.img_shape for sample in batch_data_samples] + same_shape_flag = all([ + s[0] == input_img_h and s[1] == input_img_w for s in img_shape_list + ]) + # support torch2onnx without feeding masks + if torch.onnx.is_in_onnx_export() or same_shape_flag: + mlvl_masks = [] + mlvl_pos_embeds = [] + for feat in mlvl_feats: + mlvl_masks.append(None) + mlvl_pos_embeds.append( + self.positional_encoding(None, input=feat)) + else: + masks = mlvl_feats[0].new_ones( + (batch_size, num_cams,input_img_h, input_img_w)) + for img_id in range(batch_size): + for cam in range(num_cams): + img_h, img_w = img_shape_list[img_id][cam] + masks[img_id,cam, :img_h, :img_w] = 0 + # NOTE following the official DETR repo, non-zero + # values representing ignored positions, while + # zero values means valid positions. + + mlvl_masks = [] + mlvl_pos_embeds = [] + for feat in mlvl_feats: + mlvl_masks.append( + F.interpolate(masks, size=feat.shape[-2:]).to( + torch.bool)) + tmp=[] + for i in range(batch_size): + tmp.append(self.positional_encoding(mlvl_masks[-1][0]).unsqueeze(0)) + concatenated = torch.cat(tmp, dim=0) + mlvl_pos_embeds.append(concatenated) + + feat_flatten = [] + lvl_pos_embed_flatten = [] + mask_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): + batch_size, n,c, h, w = feat.shape + spatial_shape = torch._shape_as_tensor(feat)[3:].to(feat.device) + # [bs, c, h_lvl, w_lvl] -> [bs, h_lvl*w_lvl, c] + feat = feat.view(batch_size,n,c, -1).permute(0, 1, 3, 2) + pos_embed = pos_embed.view(batch_size,n, c, -1).permute(0, 1, 3, 2) + # lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, 1, -1) + # [bs, h_lvl, w_lvl] -> [bs, h_lvl*w_lvl] + if mask is not None: + mask = mask.flatten(2) + + feat_flatten.append(feat) + # lvl_pos_embed_flatten.append(lvl_pos_embed) + mask_flatten.append(mask) + spatial_shapes.append(spatial_shape) + + # (bs, num_feat_points, dim) + feat_flatten = torch.cat(feat_flatten, 2) + # lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 2) + # (bs, num_feat_points), where num_feat_points = sum_lvl(h_lvl*w_lvl) + if mask_flatten[0] is not None: + mask_flatten = torch.cat(mask_flatten, 2) + else: + mask_flatten = None + + # (num_level, 2) + spatial_shapes = torch.cat(spatial_shapes).view(-1, 2) + level_start_index = torch.cat(( + spatial_shapes.new_zeros((1, )), # (num_level) + spatial_shapes.prod(1).cumsum(0)[:-1])) + if mlvl_masks[0] is not None: + tmp=[] + for i in range(batch_size): + for m in mlvl_masks: + tmp.append(self.get_valid_ratio(m[i])) + valid_ratios = torch.stack(tmp,1).view(batch_size,num_cams,4,2) + encoder_inputs_dict = dict( + feat=feat_flatten, + feat_mask=mask_flatten, + # feat_pos=lvl_pos_embed_flatten, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios) + + return encoder_inputs_dict + + def forward_encoder(self, feat: Tensor, feat_mask: Tensor, + spatial_shapes: Tensor, + level_start_index: Tensor, valid_ratios: Tensor, + text_dict: Dict) -> Dict: + text_token_mask = text_dict['text_token_mask'] + memory, _ = self.encoder( + query=feat, + # query_pos=feat_pos, + key_padding_mask=feat_mask, # for self_attn + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + # for text encoder + memory_text=text_dict['embedded'], + text_attention_mask=~text_token_mask, + position_ids=text_dict['position_ids'], + text_self_attention_masks=text_dict['masks']) + # encoder_outputs_dict = dict( + # memory=memory, + # memory_mask=feat_mask, + # spatial_shapes=spatial_shapes, + # memory_text=memory_text, + # text_token_mask=text_token_mask) + # return encoder_outputs_dict + return memory + @staticmethod + def get_valid_ratio(mask: Tensor) -> Tensor: + """Get the valid radios of feature map in a level. + + .. code:: text + + |---> valid_W <---| + ---+-----------------+-----+--- + A | | | A + | | | | | + | | | | | + valid_H | | | | + | | | | H + | | | | | + V | | | | + ---+-----------------+ | | + | | V + +-----------------------+--- + |---------> W <---------| + + The valid_ratios are defined as: + r_h = valid_H / H, r_w = valid_W / W + They are the factors to re-normalize the relative coordinates of the + image to the relative coordinates of the current level feature map. + + Args: + mask (Tensor): Binary mask of a feature map, has shape (bs, H, W). + + Returns: + Tensor: valid ratios [r_w, r_h] of a feature map, has shape (1, 2). + """ + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio + + def restore_img_feats(self, memory, spatial_shapes, level_start_index): + img_feats = [] + for i in range(len(spatial_shapes)): + # 获取当前层次的空间尺寸 + spatial_shape = spatial_shapes[i] + height, width = spatial_shape + + # 获取当前层次的起始索引和结束索引 + start_index = level_start_index[i] + if i < len(level_start_index) - 1: + end_index = level_start_index[i+1] + else: + end_index = memory.shape[2] + + # 切片操作,从memory中恢复当前层次的img_feat + img_feat = memory[:, :, start_index:end_index] + img_feat = img_feat.reshape(1, 6, height, width,256).permute(0,1,4,2,3) + img_feats.append(img_feat) + + return img_feats \ No newline at end of file diff --git a/projects/DETR3D/detr3d/detr3d_head.py b/projects/DETR3D/detr3d/detr3d_head.py new file mode 100755 index 0000000..d4143ad --- /dev/null +++ b/projects/DETR3D/detr3d/detr3d_head.py @@ -0,0 +1,447 @@ +import copy +from typing import Dict, List, Tuple + +import torch +import torch.nn as nn +from mmcv.cnn import Linear +from mmdet.models.dense_heads import DETRHead +from mmdet.models.layers import inverse_sigmoid +from mmdet.models.utils import multi_apply +from mmdet.utils import InstanceList, OptInstanceList, reduce_mean +from mmengine.model import bias_init_with_prob +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.registry import MODELS, TASK_UTILS +from .util import normalize_bbox + + +@MODELS.register_module() +class DETR3DHead(DETRHead): + """Head of DETR3D. + + Args: + with_box_refine (bool): Whether to refine the reference points + in the decoder. Defaults to False. + as_two_stage (bool) : Whether to generate the proposal from + the outputs of encoder. + transformer (obj:`ConfigDict`): ConfigDict is used for building + the Encoder and Decoder. + bbox_coder (obj:`ConfigDict`): Configs to build the bbox coder + num_cls_fcs (int) : the number of layers in cls and reg branch + code_weights (List[double]) : loss weights of + (cx,cy,l,w,cz,h,sin(φ),cos(φ),v_x,v_y) + code_size (int) : size of code_weights + """ + + def __init__( + self, + *args, + with_box_refine=False, + as_two_stage=False, + transformer=None, + bbox_coder=None, + num_cls_fcs=2, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + code_size=10, + **kwargs): + self.with_box_refine = with_box_refine + self.as_two_stage = as_two_stage + if self.as_two_stage: + transformer['as_two_stage'] = self.as_two_stage + self.code_size = code_size + self.code_weights = code_weights + + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.pc_range = self.bbox_coder.pc_range + self.num_cls_fcs = num_cls_fcs - 1 + super(DETR3DHead, self).__init__( + *args, transformer=transformer, **kwargs) + # DETR sampling=False, so use PseudoSampler, format the result + sampler_cfg = dict(type='PseudoSampler') + self.sampler = TASK_UTILS.build(sampler_cfg) + + self.code_weights = nn.Parameter( + torch.tensor(self.code_weights, requires_grad=False), + requires_grad=False) + + # forward_train -> loss + def _init_layers(self): + """Initialize classification branch and regression branch of head.""" + cls_branch = [] + for _ in range(self.num_reg_fcs): + cls_branch.append(Linear(self.embed_dims, self.embed_dims)) + cls_branch.append(nn.LayerNorm(self.embed_dims)) + cls_branch.append(nn.ReLU(inplace=True)) + cls_branch.append(Linear(self.embed_dims, self.cls_out_channels)) + fc_cls = nn.Sequential(*cls_branch) + + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(Linear(self.embed_dims, self.code_size)) + reg_branch = nn.Sequential(*reg_branch) + + def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + # last reg_branch is used to generate proposal from + # encode feature map when as_two_stage is True. + num_pred = (self.transformer.decoder.num_layers + 1) if \ + self.as_two_stage else self.transformer.decoder.num_layers + + if self.with_box_refine: + self.cls_branches = _get_clones(fc_cls, num_pred) + self.reg_branches = _get_clones(reg_branch, num_pred) + else: + self.cls_branches = nn.ModuleList( + [fc_cls for _ in range(num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(num_pred)]) + + if not self.as_two_stage: + self.query_embedding = nn.Embedding(self.num_query, + self.embed_dims * 2) + + def init_weights(self): + """Initialize weights of the DeformDETR head.""" + self.transformer.init_weights() + if self.loss_cls.use_sigmoid: + bias_init = bias_init_with_prob(0.01) + for m in self.cls_branches: + nn.init.constant_(m[-1].bias, bias_init) + + def forward(self, mlvl_feats: List[Tensor], img_metas: List[Dict], + **kwargs) -> Dict[str, Tensor]: + """Forward function. + + Args: + mlvl_feats (List[Tensor]): Features from the upstream + network, each is a 5D-tensor with shape + (B, N, C, H, W). + Returns: + all_cls_scores (Tensor): Outputs from the classification head, + shape [nb_dec, bs, num_query, cls_out_channels]. Note + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression + head with normalized coordinate format + (cx, cy, l, w, cz, h, sin(φ), cos(φ), vx, vy). + Shape [nb_dec, bs, num_query, 10]. + """ + query_embeds = self.query_embedding.weight + hs, init_reference, inter_references = self.transformer( + mlvl_feats, + query_embeds, + reg_branches=self.reg_branches if self.with_box_refine else None, + img_metas=img_metas, + **kwargs) + hs = hs.permute(0, 2, 1, 3) + outputs_classes = [] + outputs_coords = [] + + for lvl in range(hs.shape[0]): + if lvl == 0: + reference = init_reference + else: + reference = inter_references[lvl - 1] + reference = inverse_sigmoid(reference) + outputs_class = self.cls_branches[lvl](hs[lvl]) + tmp = self.reg_branches[lvl](hs[lvl]) # shape: ([B, num_q, 10]) + # TODO: check the shape of reference + assert reference.shape[-1] == 3 + tmp[..., 0:2] += reference[..., 0:2] + tmp[..., 0:2] = tmp[..., 0:2].sigmoid() + tmp[..., 4:5] += reference[..., 2:3] + tmp[..., 4:5] = tmp[..., 4:5].sigmoid() + + tmp[..., 0:1] = \ + tmp[..., 0:1] * (self.pc_range[3] - self.pc_range[0]) \ + + self.pc_range[0] + tmp[..., 1:2] = \ + tmp[..., 1:2] * (self.pc_range[4] - self.pc_range[1]) \ + + self.pc_range[1] + tmp[..., 4:5] = \ + tmp[..., 4:5] * (self.pc_range[5] - self.pc_range[2]) \ + + self.pc_range[2] + + # TODO: check if using sigmoid + outputs_coord = tmp + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + + outputs_classes = torch.stack(outputs_classes) + outputs_coords = torch.stack(outputs_coords) + outs = { + 'all_cls_scores': outputs_classes, + 'all_bbox_preds': outputs_coords, + 'enc_cls_scores': None, + 'enc_bbox_preds': None, + } + return outs + + def _get_target_single( + self, + cls_score: Tensor, # [query, num_cls] + bbox_pred: Tensor, # [query, 10] + gt_instances_3d: InstanceList) -> Tuple[Tensor, ...]: + """Compute regression and classification targets for a single image.""" + # turn bottm center into gravity center + gt_bboxes = gt_instances_3d.bboxes_3d # [num_gt, 9] + gt_bboxes = torch.cat( + (gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), dim=1) + + gt_labels = gt_instances_3d.labels_3d # [num_gt, num_cls] + # assigner and sampler: PseudoSampler + assign_result = self.assigner.assign( + bbox_pred, cls_score, gt_bboxes, gt_labels, gt_bboxes_ignore=None) + sampling_result = self.sampler.sample( + assign_result, InstanceData(priors=bbox_pred), + InstanceData(bboxes_3d=gt_bboxes)) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label targets + num_bboxes = bbox_pred.size(0) + labels = gt_bboxes.new_full((num_bboxes, ), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + + # bbox targets + # theta in gt_bbox here is still a single scalar + bbox_targets = torch.zeros_like(bbox_pred)[..., :self.code_size - 1] + bbox_weights = torch.zeros_like(bbox_pred) + # only matched query will learn from bbox coord + bbox_weights[pos_inds] = 1.0 + + # fix empty gt bug in multi gpu training + if sampling_result.pos_gt_bboxes.shape[0] == 0: + sampling_result.pos_gt_bboxes = \ + sampling_result.pos_gt_bboxes.reshape(0, self.code_size - 1) + + bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds) + + def get_targets( + self, + batch_cls_scores: List[Tensor], # bs[num_q,num_cls] + batch_bbox_preds: List[Tensor], # bs[num_q,10] + batch_gt_instances_3d: InstanceList) -> tuple(): + """"Compute regression and classification targets for a batch image for + a single decoder layer. + + Args: + batch_cls_scores (list[Tensor]): Box score logits from a single + decoder layer for each image with shape [num_query, + cls_out_channels]. + batch_bbox_preds (list[Tensor]): Sigmoid outputs from a single + decoder layer for each image, with normalized coordinate + (cx,cy,l,w,cz,h,sin(φ),cos(φ),v_x,v_y) and + shape [num_query, 10] + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes_3d``、``labels_3d``. + Returns: + tuple: a tuple containing the following targets. + - labels_list (list[Tensor]): Labels for all images. + - label_weights_list (list[Tensor]): Label weights for all \ + images. + - bbox_targets_list (list[Tensor]): BBox targets for all \ + images. + - bbox_weights_list (list[Tensor]): BBox weights for all \ + images. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + """ + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + pos_inds_list, neg_inds_list) = multi_apply(self._get_target_single, + batch_cls_scores, + batch_bbox_preds, + batch_gt_instances_3d) + + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + + def loss_by_feat_single( + self, + batch_cls_scores: Tensor, # bs,num_q,num_cls + batch_bbox_preds: Tensor, # bs,num_q,10 + batch_gt_instances_3d: InstanceList + ) -> Tuple[Tensor, Tensor]: + """"Loss function for outputs from a single decoder layer of a single + feature level. + + Args: + batch_cls_scores (Tensor): Box score logits from a single + decoder layer for batched images with shape [num_query, + cls_out_channels]. + batch_bbox_preds (Tensor): Sigmoid outputs from a single + decoder layer for batched images, with normalized coordinate + (cx,cy,l,w,cz,h,sin(φ),cos(φ),v_x,v_y) and + shape [num_query, 10] + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually has ``bboxes_3d``,``labels_3d``. + Returns: + tulple(Tensor, Tensor): cls and reg loss for outputs from + a single decoder layer. + """ + batch_size = batch_cls_scores.size(0) # batch size + cls_scores_list = [batch_cls_scores[i] for i in range(batch_size)] + bbox_preds_list = [batch_bbox_preds[i] for i in range(batch_size)] + cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, + batch_gt_instances_3d) + + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + labels = torch.cat(labels_list, 0) + label_weights = torch.cat(label_weights_list, 0) + bbox_targets = torch.cat(bbox_targets_list, 0) + bbox_weights = torch.cat(bbox_weights_list, 0) + + # classification loss + batch_cls_scores = batch_cls_scores.reshape(-1, self.cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.bg_cls_weight + if self.sync_cls_avg_factor: + cls_avg_factor = reduce_mean( + batch_cls_scores.new_tensor([cls_avg_factor])) + + cls_avg_factor = max(cls_avg_factor, 1) + loss_cls = self.loss_cls( + batch_cls_scores, labels, label_weights, avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes across all gpus, for + # normalization purposes + num_total_pos = loss_cls.new_tensor([num_total_pos]) + num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + + # regression L1 loss + batch_bbox_preds = batch_bbox_preds.reshape(-1, + batch_bbox_preds.size(-1)) + normalized_bbox_targets = normalize_bbox(bbox_targets, self.pc_range) + # neg_query is all 0, log(0) is NaN + isnotnan = torch.isfinite(normalized_bbox_targets).all(dim=-1) + bbox_weights = bbox_weights * self.code_weights + + loss_bbox = self.loss_bbox( + batch_bbox_preds[isnotnan, :self.code_size], + normalized_bbox_targets[isnotnan, :self.code_size], + bbox_weights[isnotnan, :self.code_size], + avg_factor=num_total_pos) + + loss_cls = torch.nan_to_num(loss_cls) + loss_bbox = torch.nan_to_num(loss_bbox) + return loss_cls, loss_bbox + + # original loss() + def loss_by_feat( + self, + batch_gt_instances_3d: InstanceList, + preds_dicts: Dict[str, Tensor], + batch_gt_instances_3d_ignore: OptInstanceList = None) -> Dict: + """Compute loss of the head. + + Args: + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、` + `labels_3d``、``depths``、``centers_2d`` and attributes. + gt_instance. It usually includes ``bboxes``、``labels``. + batch_gt_instances_3d_ignore (list[:obj:`InstanceData`], Optional): + NOT supported. + Defaults to None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert batch_gt_instances_3d_ignore is None, \ + f'{self.__class__.__name__} only supports ' \ + f'for batch_gt_instances_3d_ignore setting to None.' + all_cls_scores = preds_dicts[ + 'all_cls_scores'] # num_dec,bs,num_q,num_cls + all_bbox_preds = preds_dicts['all_bbox_preds'] # num_dec,bs,num_q,10 + enc_cls_scores = preds_dicts['enc_cls_scores'] + enc_bbox_preds = preds_dicts['enc_bbox_preds'] + + # calculate loss for each decoder layer + num_dec_layers = len(all_cls_scores) + batch_gt_instances_3d_list = [ + batch_gt_instances_3d for _ in range(num_dec_layers) + ] + losses_cls, losses_bbox = multi_apply(self.loss_by_feat_single, + all_cls_scores, all_bbox_preds, + batch_gt_instances_3d_list) + + loss_dict = dict() + # loss of proposal generated from encode feature map. + if enc_cls_scores is not None: + enc_loss_cls, enc_losses_bbox = self.loss_by_feat_single( + enc_cls_scores, enc_bbox_preds, batch_gt_instances_3d_list) + loss_dict['enc_loss_cls'] = enc_loss_cls + loss_dict['enc_loss_bbox'] = enc_losses_bbox + + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_bbox'] = losses_bbox[-1] + + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i in zip(losses_cls[:-1], losses_bbox[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i + num_dec_layer += 1 + return loss_dict + + def predict_by_feat(self, + preds_dicts, + img_metas, + rescale=False) -> InstanceList: + """Transform network output for a batch into bbox predictions. + + Args: + preds_dicts (Dict[str, Tensor]): + -all_cls_scores (Tensor): Outputs from the classification head, + shape [nb_dec, bs, num_query, cls_out_channels]. Note + cls_out_channels should includes background. + -all_bbox_preds (Tensor): Sigmoid outputs from the regression + head with normalized coordinate format + (cx, cy, l, w, cz, h, rot_sine, rot_cosine, v_x, v_y). + Shape [nb_dec, bs, num_query, 10]. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + + Returns: + list[:obj:`InstanceData`]: Object detection results of each image + after the post process. Each item usually contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C), where C >= 7. + """ + # sinθ & cosθ ---> θ + preds_dicts = self.bbox_coder.decode(preds_dicts) + num_samples = len(preds_dicts) # batch size + ret_list = [] + for i in range(num_samples): + results = InstanceData() + preds = preds_dicts[i] + bboxes = preds['bboxes'] + bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5 + bboxes = img_metas[i]['box_type_3d'](bboxes, self.code_size - 1) + + results.bboxes_3d = bboxes + results.scores_3d = preds['scores'] + results.labels_3d = preds['labels'] + ret_list.append(results) + return ret_list diff --git a/projects/DETR3D/detr3d/detr3d_transformer.py b/projects/DETR3D/detr3d/detr3d_transformer.py new file mode 100755 index 0000000..dfe0765 --- /dev/null +++ b/projects/DETR3D/detr3d/detr3d_transformer.py @@ -0,0 +1,447 @@ +import warnings + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks.transformer import (TransformerLayerSequence, + build_transformer_layer_sequence) +from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention +from mmengine.model import BaseModule, constant_init, xavier_init + +from mmdet3d.registry import MODELS + + +def inverse_sigmoid(x, eps=1e-5): + """Inverse function of sigmoid. + + Args: + x (Tensor): The tensor to do the + inverse. + eps (float): EPS avoid numerical + overflow. Defaults 1e-5. + Returns: + Tensor: The x has passed the inverse + function of sigmoid, has same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +@MODELS.register_module() +class Detr3DTransformer(BaseModule): + """Implements the DETR3D transformer. + + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + num_cams (int): Number of cameras in the dataset. + Default: 6 in NuScenes Det. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + num_feature_levels=4, + num_cams=6, + two_stage_num_proposals=300, + decoder=None, + **kwargs): + super(Detr3DTransformer, self).__init__(**kwargs) + self.decoder = build_transformer_layer_sequence(decoder) + self.embed_dims = self.decoder.embed_dims + self.num_feature_levels = num_feature_levels + self.num_cams = num_cams + self.two_stage_num_proposals = two_stage_num_proposals + self.init_layers() + + def init_layers(self): + """Initialize layers of the Detr3DTransformer.""" + self.reference_points = nn.Linear(self.embed_dims, 3) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MultiScaleDeformableAttention) or isinstance( + m, Detr3DCrossAtten): + m.init_weight() + xavier_init(self.reference_points, distribution='uniform', bias=0.) + + def forward(self, mlvl_feats, query_embed, reg_branches=None, **kwargs): + """Forward function for `Detr3DTransformer`. + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + (B, N, C, H_lvl, W_lvl). + query_embed (Tensor): The query positional and semantic embedding + for decoder, with shape [num_query, c+c]. + mlvl_pos_embeds (list(Tensor)): The positional encoding + of feats from different level, has the shape + [bs, N, embed_dims, h, w]. It is unused here. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when `with_box_refine` is True. Default to None. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape + (num_dec_layers, bs, num_query, embed_dims), else has + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference + points in decoder, has shape + (num_dec_layers, bs, num_query, embed_dims) + """ + assert query_embed is not None + bs = mlvl_feats[0].size(0) + query_pos, query = torch.split(query_embed, self.embed_dims, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) # [bs,num_q,c] + query = query.unsqueeze(0).expand(bs, -1, -1) # [bs,num_q,c] + reference_points = self.reference_points(query_pos) + reference_points = reference_points.sigmoid() + init_reference_out = reference_points + + # decoder + query = query.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=mlvl_feats, + query_pos=query_pos, + reference_points=reference_points, + reg_branches=reg_branches, + **kwargs) + + inter_references_out = inter_references + return inter_states, init_reference_out, inter_references_out + + +@MODELS.register_module() +class Detr3DTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR3D transformer. + + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, *args, return_intermediate=False, **kwargs): + super(Detr3DTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + + def forward(self, + query, + *args, + reference_points=None, + reg_branches=None, + **kwargs): + """Forward function for `Detr3DTransformerDecoder`. + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape self.reference_points = + nn.Linear(self.embed_dims, 3) + reg_branch: (obj:`nn.ModuleList`): Used for + refining the regression results. Only would + be passed when with_box_refine is True, + otherwise would be passed a `None`. + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + output = query + intermediate = [] + intermediate_reference_points = [] + for lid, layer in enumerate(self.layers): # iterative refinement + reference_points_input = reference_points + output = layer( + output, + *args, + reference_points=reference_points_input, + **kwargs) + output = output.permute(1, 0, 2) + if reg_branches is not None: + tmp = reg_branches[lid](output) + + assert reference_points.shape[-1] == 3 + + new_reference_points = torch.zeros_like(reference_points) + new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid( + reference_points[..., :2]) + new_reference_points[..., + 2:3] = tmp[..., 4:5] + inverse_sigmoid( + reference_points[..., 2:3]) + new_reference_points = new_reference_points.sigmoid() + + reference_points = new_reference_points.detach() + + output = output.permute(1, 0, 2) + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append(reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +@MODELS.register_module() +class Detr3DCrossAtten(BaseModule): + """An attention module used in Detr3d. + + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_heads (int): Parallel attention heads. Default: 64. + num_levels (int): The number of feature map used in + Attention. Default: 4. + num_points (int): The number of sampling points for + each query in each head. Default: 4. + im2col_step (int): The step used in image_to_column. + Default: 64. + dropout (float): A Dropout layer on `inp_residual`. + Default: 0.. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__( + self, + embed_dims=256, + num_heads=8, + num_levels=4, + num_points=5, + num_cams=6, + im2col_step=64, + pc_range=None, + dropout=0.1, + norm_cfg=None, + init_cfg=None, + batch_first=False, + ): + super(Detr3DCrossAtten, self).__init__(init_cfg) + if embed_dims % num_heads != 0: + raise ValueError(f'embed_dims must be divisible by num_heads, ' + f'but got {embed_dims} and {num_heads}') + dim_per_head = embed_dims // num_heads + self.norm_cfg = norm_cfg + self.init_cfg = init_cfg + self.dropout = nn.Dropout(dropout) + self.pc_range = pc_range + + # you'd better set dim_per_head to a power of 2 + # which is more efficient in the CUDA implementation + def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format( + n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + if not _is_power_of_2(dim_per_head): + warnings.warn( + "You'd better set embed_dims in " + 'MultiScaleDeformAttention to make ' + 'the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.im2col_step = im2col_step + self.embed_dims = embed_dims + self.num_levels = num_levels + self.num_heads = num_heads + self.num_points = num_points + self.num_cams = num_cams + self.attention_weights = nn.Linear(embed_dims, + num_cams * num_levels * num_points) + + self.output_proj = nn.Linear(embed_dims, embed_dims) + + self.position_encoder = nn.Sequential( + nn.Linear(3, self.embed_dims), + nn.LayerNorm(self.embed_dims), + nn.ReLU(inplace=True), + nn.Linear(self.embed_dims, self.embed_dims), + nn.LayerNorm(self.embed_dims), + nn.ReLU(inplace=True), + ) + self.batch_first = batch_first + self.init_weight() + + def init_weight(self): + """Default initialization for Parameters of Module.""" + constant_init(self.attention_weights, val=0., bias=0.) + xavier_init(self.output_proj, distribution='uniform', bias=0.) + + def forward(self, + query, + key, + value, + residual=None, + query_pos=None, + reference_points=None, + **kwargs): + """Forward Function of Detr3DCrossAtten. + + Args: + query (Tensor): Query of Transformer with shape + (num_query, bs, embed_dims). + key (Tensor): The key tensor with shape + `(num_key, bs, embed_dims)`. + value (List[Tensor]): Image features from + different level. Each element has shape + (B, N, C, H_lvl, W_lvl). + residual (Tensor): The tensor used for addition, with the + same shape as `x`. Default None. If None, `x` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + reference_points (Tensor): The normalized 3D reference + points with shape (bs, num_query, 3) + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + if key is None: + key = query + if value is None: + value = key + + if residual is None: + inp_residual = query + if query_pos is not None: + query = query + query_pos + + query = query.permute(1, 0, 2) + + bs, num_query, _ = query.size() + + attention_weights = self.attention_weights(query).view( + bs, 1, num_query, self.num_cams, self.num_points, self.num_levels) + reference_points_3d, output, mask = feature_sampling( + value, reference_points, self.pc_range, kwargs['img_metas']) + output = torch.nan_to_num(output) + mask = torch.nan_to_num(mask) + attention_weights = attention_weights.sigmoid() * mask + output = output * attention_weights + output = output.sum(-1).sum(-1).sum(-1) + output = output.permute(2, 0, 1) + # (num_query, bs, embed_dims) + output = self.output_proj(output) + pos_feat = self.position_encoder( + inverse_sigmoid(reference_points_3d)).permute(1, 0, 2) + return self.dropout(output) + inp_residual + pos_feat + + +def feature_sampling(mlvl_feats, + ref_pt, + pc_range, + img_metas, + no_sampling=False): + """ sample multi-level features by projecting 3D reference points + to 2D image + Args: + mlvl_feats (List[Tensor]): Image features from + different level. Each element has shape + (B, N, C, H_lvl, W_lvl). + ref_pt (Tensor): The normalized 3D reference + points with shape (bs, num_query, 3) + pc_range: perception range of the detector + img_metas (list[dict]): Meta information of multiple inputs + in a batch, containing `lidar2img`. + no_sampling (bool): If set 'True', the function will return + 2D projected points and mask only. + Returns: + ref_pt_3d (Tensor): A copy of original ref_pt + sampled_feats (Tensor): sampled features with shape \ + (B C num_q N 1 fpn_lvl) + mask (Tensor): Determine whether the reference point \ + has projected outsied of images, with shape \ + (B 1 num_q N 1 1) + """ + lidar2img = [meta['lidar2img'] for meta in img_metas] + lidar2img = np.asarray(lidar2img) + lidar2img = ref_pt.new_tensor(lidar2img) + ref_pt = ref_pt.clone() + ref_pt_3d = ref_pt.clone() + + B, num_query = ref_pt.size()[:2] + num_cam = lidar2img.size(1) + eps = 1e-5 + + ref_pt[..., 0:1] = \ + ref_pt[..., 0:1] * (pc_range[3] - pc_range[0]) + pc_range[0] # x + ref_pt[..., 1:2] = \ + ref_pt[..., 1:2] * (pc_range[4] - pc_range[1]) + pc_range[1] # y + ref_pt[..., 2:3] = \ + ref_pt[..., 2:3] * (pc_range[5] - pc_range[2]) + pc_range[2] # z + + # (B num_q 3) -> (B num_q 4) -> (B 1 num_q 4) -> (B num_cam num_q 4 1) + ref_pt = torch.cat((ref_pt, torch.ones_like(ref_pt[..., :1])), -1) + ref_pt = ref_pt.view(B, 1, num_query, 4) + ref_pt = ref_pt.repeat(1, num_cam, 1, 1).unsqueeze(-1) + # (B num_cam 4 4) -> (B num_cam num_q 4 4) + lidar2img = lidar2img.view(B, num_cam, 1, 4, 4)\ + .repeat(1, 1, num_query, 1, 1) + # (... 4 4) * (... 4 1) -> (B num_cam num_q 4) + pt_cam = torch.matmul(lidar2img, ref_pt).squeeze(-1) + + # (B num_cam num_q) + z = pt_cam[..., 2:3] + eps = eps * torch.ones_like(z) + mask = (z > eps) + pt_cam = pt_cam[..., 0:2] / torch.maximum(z, eps) # prevent zero-division + # padded nuscene image: 928*1600 + (h, w) = img_metas[0]['pad_shape'] + pt_cam[..., 0] /= w + pt_cam[..., 1] /= h + # else: + # (h,w,_) = img_metas[0]['ori_shape'][0] # waymo image + # pt_cam[..., 0] /= w # cam0~2: 1280*1920 + # pt_cam[..., 1] /= h # cam3~4: 886 *1920 padded to 1280*1920 + # mask[:, 3:5, :] &= (pt_cam[:, 3:5, :, 1:2] < 0.7) # filter pt_cam_y > 886 + + mask = ( + mask & (pt_cam[..., 0:1] > 0.0) + & (pt_cam[..., 0:1] < 1.0) + & (pt_cam[..., 1:2] > 0.0) + & (pt_cam[..., 1:2] < 1.0)) + + if no_sampling: + return pt_cam, mask + + # (B num_cam num_q) -> (B 1 num_q num_cam 1 1) + mask = mask.view(B, num_cam, 1, num_query, 1, 1).permute(0, 2, 3, 1, 4, 5) + mask = torch.nan_to_num(mask) + + pt_cam = (pt_cam - 0.5) * 2 # [0,1] to [-1,1] to do grid_sample + sampled_feats = [] + for lvl, feat in enumerate(mlvl_feats): + B, N, C, H, W = feat.size() + feat = feat.view(B * N, C, H, W) + pt_cam_lvl = pt_cam.view(B * N, num_query, 1, 2) + sampled_feat = F.grid_sample(feat, pt_cam_lvl) + # (B num_cam C num_query 1) -> List of (B C num_q num_cam 1) + sampled_feat = sampled_feat.view(B, N, C, num_query, 1) + sampled_feat = sampled_feat.permute(0, 2, 3, 1, 4) + sampled_feats.append(sampled_feat) + + sampled_feats = torch.stack(sampled_feats, -1) + # (B C num_q num_cam fpn_lvl) + sampled_feats = \ + sampled_feats.view(B, C, num_query, num_cam, 1, len(mlvl_feats)) + return ref_pt_3d, sampled_feats, mask diff --git a/projects/DETR3D/detr3d/glip.py b/projects/DETR3D/detr3d/glip.py new file mode 100644 index 0000000..0e58dc6 --- /dev/null +++ b/projects/DETR3D/detr3d/glip.py @@ -0,0 +1,168 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import re +import warnings +from typing import Tuple, Union + +import torch +from torch import Tensor + +from mmdet.registry import MODELS +from mmdet.structures import SampleList +from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig +# from .single_stage import SingleStageDetector + + +def find_noun_phrases(caption: str) -> list: + """Find noun phrases in a caption using nltk. + Args: + caption (str): The caption to analyze. + + Returns: + list: List of noun phrases found in the caption. + + Examples: + >>> caption = 'There is two cat and a remote in the picture' + >>> find_noun_phrases(caption) # ['cat', 'a remote', 'the picture'] + """ + try: + import nltk + nltk.download('punkt') + nltk.download('averaged_perceptron_tagger') + except ImportError: + raise RuntimeError('nltk is not installed, please install it by: ' + 'pip install nltk.') + + caption = caption.lower() + tokens = nltk.word_tokenize(caption) + pos_tags = nltk.pos_tag(tokens) + + grammar = 'NP: {
    ?*+}' + cp = nltk.RegexpParser(grammar) + result = cp.parse(pos_tags) + + noun_phrases = [] + for subtree in result.subtrees(): + if subtree.label() == 'NP': + noun_phrases.append(' '.join(t[0] for t in subtree.leaves())) + + return noun_phrases + + +def remove_punctuation(text: str) -> str: + """Remove punctuation from a text. + Args: + text (str): The input text. + + Returns: + str: The text with punctuation removed. + """ + punctuation = [ + '|', ':', ';', '@', '(', ')', '[', ']', '{', '}', '^', '\'', '\"', '’', + '`', '?', '$', '%', '#', '!', '&', '*', '+', ',', '.' + ] + for p in punctuation: + text = text.replace(p, '') + return text.strip() + + +def run_ner(caption: str) -> Tuple[list, list]: + """Run NER on a caption and return the tokens and noun phrases. + Args: + caption (str): The input caption. + + Returns: + Tuple[List, List]: A tuple containing the tokens and noun phrases. + - tokens_positive (List): A list of token positions. + - noun_phrases (List): A list of noun phrases. + """ + noun_phrases = find_noun_phrases(caption) + noun_phrases = [remove_punctuation(phrase) for phrase in noun_phrases] + noun_phrases = [phrase for phrase in noun_phrases if phrase != ''] + relevant_phrases = noun_phrases + labels = noun_phrases + + tokens_positive = [] + for entity, label in zip(relevant_phrases, labels): + try: + # search all occurrences and mark them as different entities + # TODO: Not Robust + for m in re.finditer(entity, caption.lower()): + tokens_positive.append([[m.start(), m.end()]]) + except Exception: + print('noun entities:', noun_phrases) + print('entity:', entity) + print('caption:', caption.lower()) + return tokens_positive, noun_phrases + + +def create_positive_map(tokenized, + tokens_positive: list, + max_num_entities: int = 256) -> Tensor: + """construct a map such that positive_map[i,j] = True + if box i is associated to token j + + Args: + tokenized: The tokenized input. + tokens_positive (list): A list of token ranges + associated with positive boxes. + max_num_entities (int, optional): The maximum number of entities. + Defaults to 256. + + Returns: + torch.Tensor: The positive map. + + Raises: + Exception: If an error occurs during token-to-char mapping. + """ + positive_map = torch.zeros((len(tokens_positive), max_num_entities), + dtype=torch.float) + + for j, tok_list in enumerate(tokens_positive): + for (beg, end) in tok_list: + try: + beg_pos = tokenized.char_to_token(beg) + end_pos = tokenized.char_to_token(end - 1) + except Exception as e: + print('beg:', beg, 'end:', end) + print('token_positive:', tokens_positive) + raise e + if beg_pos is None: + try: + beg_pos = tokenized.char_to_token(beg + 1) + if beg_pos is None: + beg_pos = tokenized.char_to_token(beg + 2) + except Exception: + beg_pos = None + if end_pos is None: + try: + end_pos = tokenized.char_to_token(end - 2) + if end_pos is None: + end_pos = tokenized.char_to_token(end - 3) + except Exception: + end_pos = None + if beg_pos is None or end_pos is None: + continue + + assert beg_pos is not None and end_pos is not None + positive_map[j, beg_pos:end_pos + 1].fill_(1) + return positive_map / (positive_map.sum(-1)[:, None] + 1e-6) + + +def create_positive_map_label_to_token(positive_map: Tensor, + plus: int = 0) -> dict: + """Create a dictionary mapping the label to the token. + Args: + positive_map (Tensor): The positive map tensor. + plus (int, optional): Value added to the label for indexing. + Defaults to 0. + + Returns: + dict: The dictionary mapping the label to the token. + """ + positive_map_label_to_token = {} + for i in range(len(positive_map)): + positive_map_label_to_token[i + plus] = torch.nonzero( + positive_map[i], as_tuple=True)[0].tolist() + return positive_map_label_to_token + + diff --git a/projects/DETR3D/detr3d/grid_mask.py b/projects/DETR3D/detr3d/grid_mask.py new file mode 100755 index 0000000..33e2cce --- /dev/null +++ b/projects/DETR3D/detr3d/grid_mask.py @@ -0,0 +1,142 @@ +import numpy as np +import torch +import torch.nn as nn +from PIL import Image + + +class Grid(object): + + def __init__(self, + use_h, + use_w, + rotate=1, + offset=False, + ratio=0.5, + mode=0, + prob=1.): + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch + + def __call__(self, img, label): + if np.random.rand() > self.prob: + return img, label + h = img.size(1) + w = img.size(2) + self.d1 = 2 + self.d2 = min(h, w) + hh = int(1.5 * h) + ww = int(1.5 * w) + d = np.random.randint(self.d1, self.d2) + if self.ratio == 1: + self.L = np.random.randint(1, d) + else: + self.L = min(max(int(d * self.ratio + 0.5), 1), d - 1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh // d): + s = d * i + st_h + t = min(s + self.L, hh) + mask[s:t, :] *= 0 + if self.use_w: + for i in range(ww // d): + s = d * i + st_w + t = min(s + self.L, ww) + mask[:, s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh - h) // 2:(hh - h) // 2 + h, + (ww - w) // 2:(ww - w) // 2 + w] + + mask = torch.from_numpy(mask).float() + if self.mode == 1: + mask = 1 - mask + + mask = mask.expand_as(img) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h, w) - 0.5)).float() + offset = (1 - mask) * offset + img = img * mask + offset + else: + img = img * mask + + return img, label + + +class GridMask(nn.Module): + + def __init__(self, + use_h, + use_w, + rotate=1, + offset=False, + ratio=0.5, + mode=0, + prob=1.): + super(GridMask, self).__init__() + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch # + 1.# 0.5 + + def forward(self, x): + if np.random.rand() > self.prob or not self.training: + return x + n, c, h, w = x.size() + x = x.view(-1, h, w) + hh = int(1.5 * h) + ww = int(1.5 * w) + d = np.random.randint(2, h) + self.L = min(max(int(d * self.ratio + 0.5), 1), d - 1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh // d): + s = d * i + st_h + t = min(s + self.L, hh) + mask[s:t, :] *= 0 + if self.use_w: + for i in range(ww // d): + s = d * i + st_w + t = min(s + self.L, ww) + mask[:, s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh - h) // 2:(hh - h) // 2 + h, + (ww - w) // 2:(ww - w) // 2 + w] + + mask = torch.from_numpy(mask).to(x) + if self.mode == 1: + mask = 1 - mask + mask = mask.expand_as(x) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h, w) - 0.5)).to(x) + x = x * mask + offset * (1 - mask) + else: + x = x * mask + + return x.view(n, c, h, w) diff --git a/projects/DETR3D/detr3d/hungarian_assigner_3d.py b/projects/DETR3D/detr3d/hungarian_assigner_3d.py new file mode 100755 index 0000000..ab9c47d --- /dev/null +++ b/projects/DETR3D/detr3d/hungarian_assigner_3d.py @@ -0,0 +1,135 @@ +from typing import List + +import torch +from mmdet.models.task_modules.assigners import AssignResult # check +from mmdet.models.task_modules.assigners import BaseAssigner +from mmengine.structures import InstanceData +from torch import Tensor + +from mmdet3d.registry import TASK_UTILS +from .util import normalize_bbox + +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + + +@TASK_UTILS.register_module() +class HungarianAssigner3D(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth. + + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of some components. + For DETR3D the costs are weighted sum of classification cost, regression L1 + cost and regression iou cost. The targets don't include the no_object, so + generally there are more predictions than targets. After the one-to-one + matching, the un-matched are treated as backgrounds. Thus each query + prediction will be assigned with `0` or a positive integer indicating the + ground truth index: + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + cls_cost (obj:`ConfigDict`) : Match cost configs. + reg_cost. + iou_cost. + pc_range: perception range of the detector + """ + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=1.0), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range: List = None): + self.cls_cost = TASK_UTILS.build(cls_cost) + self.reg_cost = TASK_UTILS.build(reg_cost) + self.iou_cost = TASK_UTILS.build(iou_cost) + self.pc_range = pc_range + + def assign(self, + bbox_pred: Tensor, + cls_pred: Tensor, + gt_bboxes: Tensor, + gt_labels: Tensor, + gt_bboxes_ignore=None, + eps=1e-7) -> AssignResult: + """Computes one-to-one matching based on the weighted costs. + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx,cy,l,w,cz,h,sin(φ),cos(φ),v_x,v_y) which are all in + range [0, 1] and shape [num_query, 10]. + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (cx,cy,cz,l,w,h,φ,v_x,v_y). Shape [num_gt, 9]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`. Default None. + eps (int | float, optional): unused parameter + Returns: + :obj:`AssignResult`: The assigned result. + """ + assert gt_bboxes_ignore is None, \ + 'Only case when gt_bboxes_ignore is None is supported.' + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) # 9, 900 + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + + # 2. compute the weighted costs + # classification and bboxcost. + # # dev1.x interface alignment + pred_instances = InstanceData(scores=cls_pred) + gt_instances = InstanceData(labels=gt_labels) + cls_cost = self.cls_cost(pred_instances, gt_instances) + # regression L1 cost + normalized_gt_bboxes = normalize_bbox(gt_bboxes, self.pc_range) + reg_cost = self.reg_cost(bbox_pred[:, :8], normalized_gt_bboxes[:, :8]) + + # weighted sum of above two costs + cost = cls_cost + reg_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) diff --git a/projects/DETR3D/detr3d/match_cost.py b/projects/DETR3D/detr3d/match_cost.py new file mode 100755 index 0000000..420ff2f --- /dev/null +++ b/projects/DETR3D/detr3d/match_cost.py @@ -0,0 +1,34 @@ +from typing import Union + +import torch +from torch import Tensor + +from mmdet3d.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class BBox3DL1Cost(object): + """BBox3DL1Cost. + + Args: + weight (Union[float, int]): Cost weight. Defaults to 1. + """ + + def __init__(self, weight: Union[float, int] = 1.): + self.weight = weight + + def __call__(self, bbox_pred: Tensor, gt_bboxes: Tensor) -> Tensor: + """Compute match cost. + + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx,cy,l,w,cz,h,sin(φ),cos(φ),v_x,v_y) + which are all in range [0, 1] and shape [num_query, 10]. + gt_bboxes (Tensor): Ground truth boxes with `normalized` + coordinates (cx,cy,l,w,cz,h,sin(φ),cos(φ),v_x,v_y). + Shape [num_gt, 10]. + Returns: + Tensor: Match Cost matrix of shape (num_preds, num_gts). + """ + bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * self.weight diff --git a/projects/DETR3D/detr3d/nms_free_coder.py b/projects/DETR3D/detr3d/nms_free_coder.py new file mode 100755 index 0000000..bdc36cd --- /dev/null +++ b/projects/DETR3D/detr3d/nms_free_coder.py @@ -0,0 +1,118 @@ +import torch +from mmdet.models.task_modules import BaseBBoxCoder + +from mmdet3d.registry import TASK_UTILS +from .util import denormalize_bbox + + +@TASK_UTILS.register_module() +class NMSFreeCoder(BaseBBoxCoder): + """Bbox coder for NMS-free detector. + + Args: + pc_range (list[float]): Range of point cloud. + post_center_range (list[float]): Limit of the center. + Default: None. + max_num (int): Max number to be kept. Default: 100. + score_threshold (float): Threshold to filter boxes based on score. + Default: None. + """ + + def __init__(self, + pc_range=None, + voxel_size=None, + post_center_range=None, + max_num=100, + score_threshold=None, + num_classes=10): + + self.pc_range = pc_range + self.voxel_size = voxel_size + self.post_center_range = post_center_range + self.max_num = max_num + self.score_threshold = score_threshold + self.num_classes = num_classes + + def encode(self): + pass + + def decode_single(self, cls_scores, bbox_preds): + """Decode bboxes. + + Args: + cls_scores (Tensor): Outputs from the classification head, + shape [num_query, cls_out_channels]. Note that + cls_out_channels should includes background. + bbox_preds (Tensor): Outputs from the regression + head with normalized coordinate + (cx, cy, l, w, cz, h, rot_sine, rot_cosine, vx, vy). + Shape [num_query, 10]. + Returns: + list[dict]: Decoded boxes. + """ + max_num = self.max_num + + cls_scores = cls_scores.sigmoid() + scores, indexes = cls_scores.view(-1).topk(max_num) + labels = indexes % self.num_classes + bbox_index = indexes // self.num_classes + bbox_preds = bbox_preds[bbox_index] + + # [[cx, cy, cz, l, w, h, rot, vx, vy]] + final_box_preds = denormalize_bbox(bbox_preds, None) + final_scores = scores + final_preds = labels + + # use score threshold + if self.score_threshold is not None: + thresh_mask = final_scores > self.score_threshold + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=scores.device) + mask = (final_box_preds[..., :3] >= + self.post_center_range[:3]).all(1) + mask &= (final_box_preds[..., :3] <= + self.post_center_range[3:]).all(1) + + if self.score_threshold: + mask &= thresh_mask + + boxes3d = final_box_preds[mask] + scores = final_scores[mask] + labels = final_preds[mask] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels + } + + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + return predictions_dict + + def decode(self, preds_dicts): + """Decode bboxes. + + Args: + all_cls_scores (Tensor): Outputs from the classification head, + shape [nb_dec, bs, num_query, cls_out_channels]. Note + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression + head with normalized coordinate format + (cx, cy, l, w, cz, h, rot_sine, rot_cosine, vx, vy). + Shape [nb_dec, bs, num_query, 10]. + Returns: + list[dict]: Decoded boxes. + """ + # cls & reg target of last decoder layer + all_cls_scores = preds_dicts['all_cls_scores'][-1] + all_bbox_preds = preds_dicts['all_bbox_preds'][-1] + + batch_size = all_cls_scores.size()[0] + predictions_list = [] + for i in range(batch_size): + predictions_list.append( + self.decode_single(all_cls_scores[i], all_bbox_preds[i])) + return predictions_list diff --git a/projects/DETR3D/detr3d/single_stage.py b/projects/DETR3D/detr3d/single_stage.py new file mode 100644 index 0000000..06c0740 --- /dev/null +++ b/projects/DETR3D/detr3d/single_stage.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple, Union + +from torch import Tensor + +from mmdet.registry import MODELS +from mmdet.structures import OptSampleList, SampleList +from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig +from .base import BaseDetector + + +@MODELS.register_module() +class SingleStageDetector(BaseDetector): + """Base class for single-stage detectors. + + Single-stage detectors directly and densely predict bounding boxes on the + output features of the backbone+neck. + """ + + def __init__(self, + backbone: ConfigType, + neck: OptConfigType = None, + bbox_head: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None) -> None: + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + self.backbone = MODELS.build(backbone) + if neck is not None: + self.neck = MODELS.build(neck) + bbox_head.update(train_cfg=train_cfg) + bbox_head.update(test_cfg=test_cfg) + self.bbox_head = MODELS.build(bbox_head) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def _load_from_state_dict(self, state_dict: dict, prefix: str, + local_metadata: dict, strict: bool, + missing_keys: Union[List[str], str], + unexpected_keys: Union[List[str], str], + error_msgs: Union[List[str], str]) -> None: + """Exchange bbox_head key to rpn_head key when loading two-stage + weights into single-stage model.""" + bbox_head_prefix = prefix + '.bbox_head' if prefix else 'bbox_head' + bbox_head_keys = [ + k for k in state_dict.keys() if k.startswith(bbox_head_prefix) + ] + rpn_head_prefix = prefix + '.rpn_head' if prefix else 'rpn_head' + rpn_head_keys = [ + k for k in state_dict.keys() if k.startswith(rpn_head_prefix) + ] + if len(bbox_head_keys) == 0 and len(rpn_head_keys) != 0: + for rpn_head_key in rpn_head_keys: + bbox_head_key = bbox_head_prefix + \ + rpn_head_key[len(rpn_head_prefix):] + state_dict[bbox_head_key] = state_dict.pop(rpn_head_key) + super()._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, unexpected_keys, + error_msgs) + + def loss(self, batch_inputs: Tensor, + batch_data_samples: SampleList) -> Union[dict, list]: + """Calculate losses from a batch of inputs and data samples. + + Args: + batch_inputs (Tensor): Input images of shape (N, C, H, W). + These should usually be mean centered and std scaled. + batch_data_samples (list[:obj:`DetDataSample`]): The batch + data samples. It usually includes information such + as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. + + Returns: + dict: A dictionary of loss components. + """ + x = self.extract_feat(batch_inputs) + losses = self.bbox_head.loss(x, batch_data_samples) + return losses + + def predict(self, + batch_inputs: Tensor, + batch_data_samples: SampleList, + rescale: bool = True) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + batch_inputs (Tensor): Inputs with shape (N, C, H, W). + batch_data_samples (List[:obj:`DetDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + rescale (bool): Whether to rescale the results. + Defaults to True. + + Returns: + list[:obj:`DetDataSample`]: Detection results of the + input images. Each DetDataSample usually contain + 'pred_instances'. And the ``pred_instances`` usually + contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + """ + x = self.extract_feat(batch_inputs) + results_list = self.bbox_head.predict( + x, batch_data_samples, rescale=rescale) + batch_data_samples = self.add_pred_to_datasample( + batch_data_samples, results_list) + return batch_data_samples + + def _forward( + self, + batch_inputs: Tensor, + batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + batch_inputs (Tensor): Inputs with shape (N, C, H, W). + batch_data_samples (list[:obj:`DetDataSample`]): Each item contains + the meta information of each image and corresponding + annotations. + + Returns: + tuple[list]: A tuple of features from ``bbox_head`` forward. + """ + x = self.extract_feat(batch_inputs) + results = self.bbox_head.forward(x) + return results + + def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]: + """Extract features. + + Args: + batch_inputs (Tensor): Image tensor with shape (N, C, H ,W). + + Returns: + tuple[Tensor]: Multi-level features that may have + different resolutions. + """ + x = self.backbone(batch_inputs) + if self.with_neck: + x = self.neck(x) + return x diff --git a/projects/DETR3D/detr3d/util.py b/projects/DETR3D/detr3d/util.py new file mode 100755 index 0000000..c9f22d8 --- /dev/null +++ b/projects/DETR3D/detr3d/util.py @@ -0,0 +1,76 @@ +from typing import List + +import torch +from torch import Tensor + + +def normalize_bbox(bboxes: Tensor, pc_range: List) -> Tensor: + """ normalize bboxes + Args: + bboxes (Tensor): boxes with unnormalized + coordinates (cx,cy,cz,L,W,H,φ,v_x,v_y). Shape [num_gt, 9]. + pc_range (List): Perception range of the detector + Returns: + normalized_bboxes (Tensor): boxes with normalized coordinate + (cx,cy,L,W,cz,H,sin(φ),cos(φ),v_x,v_y). + All in range [0, 1] and shape [num_query, 10]. + """ + + cx = bboxes[..., 0:1] + cy = bboxes[..., 1:2] + cz = bboxes[..., 2:3] + L = bboxes[..., 3:4].log() + W = bboxes[..., 4:5].log() + H = bboxes[..., 5:6].log() + + rot = bboxes[..., 6:7] + if bboxes.size(-1) > 7: + vx = bboxes[..., 7:8] + vy = bboxes[..., 8:9] + normalized_bboxes = torch.cat( + (cx, cy, L, W, cz, H, rot.sin(), rot.cos(), vx, vy), dim=-1) + else: + normalized_bboxes = torch.cat( + (cx, cy, L, W, cz, H, rot.sin(), rot.cos()), dim=-1) + return normalized_bboxes + + +def denormalize_bbox(normalized_bboxes, pc_range): + """ denormalize bboxes + Args: + normalized_bboxes (Tensor): boxes with normalized coordinate + (cx,cy,L,W,cz,H,sin(φ),cos(φ),v_x,v_y). + All in range [0, 1] and shape [num_query, 10]. + pc_range (List): Perception range of the detector + Returns: + denormalized_bboxes (Tensor): boxes with unnormalized + coordinates (cx,cy,cz,L,W,H,φ,v_x,v_y). Shape [num_gt, 9]. + """ + # rotation + rot_sine = normalized_bboxes[..., 6:7] + + rot_cosine = normalized_bboxes[..., 7:8] + rot = torch.atan2(rot_sine, rot_cosine) + + # center in the bev + cx = normalized_bboxes[..., 0:1] + cy = normalized_bboxes[..., 1:2] + cz = normalized_bboxes[..., 4:5] + + # size, the meaning of L,W may alter in different version of mmdet3d + L = normalized_bboxes[..., 2:3] + W = normalized_bboxes[..., 3:4] + H = normalized_bboxes[..., 5:6] + + L = L.exp() + W = W.exp() + H = H.exp() + if normalized_bboxes.size(-1) > 8: + # velocity + vx = normalized_bboxes[:, 8:9] + vy = normalized_bboxes[:, 9:10] + denormalized_bboxes = torch.cat([cx, cy, cz, L, W, H, rot, vx, vy], + dim=-1) + else: + denormalized_bboxes = torch.cat([cx, cy, cz, L, W, H, rot], dim=-1) + return denormalized_bboxes diff --git a/projects/DETR3D/detr3d/vovnet.py b/projects/DETR3D/detr3d/vovnet.py new file mode 100755 index 0000000..63b5773 --- /dev/null +++ b/projects/DETR3D/detr3d/vovnet.py @@ -0,0 +1,442 @@ +import warnings +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmdet3d.registry import MODELS + +VoVNet19_slim_dw_eSE = { + 'stem': [64, 64, 64], + 'stage_conv_ch': [64, 80, 96, 112], + 'stage_out_ch': [112, 256, 384, 512], + 'layer_per_block': 3, + 'block_per_stage': [1, 1, 1, 1], + 'eSE': True, + 'dw': True +} + +VoVNet19_dw_eSE = { + 'stem': [64, 64, 64], + 'stage_conv_ch': [128, 160, 192, 224], + 'stage_out_ch': [256, 512, 768, 1024], + 'layer_per_block': 3, + 'block_per_stage': [1, 1, 1, 1], + 'eSE': True, + 'dw': True +} + +VoVNet19_slim_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [64, 80, 96, 112], + 'stage_out_ch': [112, 256, 384, 512], + 'layer_per_block': 3, + 'block_per_stage': [1, 1, 1, 1], + 'eSE': True, + 'dw': False +} + +VoVNet19_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [128, 160, 192, 224], + 'stage_out_ch': [256, 512, 768, 1024], + 'layer_per_block': 3, + 'block_per_stage': [1, 1, 1, 1], + 'eSE': True, + 'dw': False +} + +VoVNet39_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [128, 160, 192, 224], + 'stage_out_ch': [256, 512, 768, 1024], + 'layer_per_block': 5, + 'block_per_stage': [1, 1, 2, 2], + 'eSE': True, + 'dw': False +} + +VoVNet57_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [128, 160, 192, 224], + 'stage_out_ch': [256, 512, 768, 1024], + 'layer_per_block': 5, + 'block_per_stage': [1, 1, 4, 3], + 'eSE': True, + 'dw': False +} + +VoVNet99_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [128, 160, 192, 224], + 'stage_out_ch': [256, 512, 768, 1024], + 'layer_per_block': 5, + 'block_per_stage': [1, 3, 9, 3], + 'eSE': True, + 'dw': False +} + +_STAGE_SPECS = { + 'V-19-slim-dw-eSE': VoVNet19_slim_dw_eSE, + 'V-19-dw-eSE': VoVNet19_dw_eSE, + 'V-19-slim-eSE': VoVNet19_slim_eSE, + 'V-19-eSE': VoVNet19_eSE, + 'V-39-eSE': VoVNet39_eSE, + 'V-57-eSE': VoVNet57_eSE, + 'V-99-eSE': VoVNet99_eSE, +} + + +def dw_conv3x3(in_channels, + out_channels, + module_name, + postfix, + stride=1, + kernel_size=3, + padding=1): + """3x3 convolution with padding.""" + return [ + ('{}_{}/dw_conv3x3'.format(module_name, postfix), + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=out_channels, + bias=False)), + ('{}_{}/pw_conv1x1'.format(module_name, postfix), + nn.Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias=False)), + ('{}_{}/pw_norm'.format(module_name, + postfix), nn.BatchNorm2d(out_channels)), + ('{}_{}/pw_relu'.format(module_name, postfix), nn.ReLU(inplace=True)), + ] + + +def conv3x3(in_channels, + out_channels, + module_name, + postfix, + stride=1, + groups=1, + kernel_size=3, + padding=1): + """3x3 convolution with padding.""" + return [ + ( + f'{module_name}_{postfix}/conv', + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False, + ), + ), + (f'{module_name}_{postfix}/norm', nn.BatchNorm2d(out_channels)), + (f'{module_name}_{postfix}/relu', nn.ReLU(inplace=True)), + ] + + +def conv1x1(in_channels, + out_channels, + module_name, + postfix, + stride=1, + groups=1, + kernel_size=1, + padding=0): + """1x1 convolution with padding.""" + return [ + ( + f'{module_name}_{postfix}/conv', + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False, + ), + ), + (f'{module_name}_{postfix}/norm', nn.BatchNorm2d(out_channels)), + (f'{module_name}_{postfix}/relu', nn.ReLU(inplace=True)), + ] + + +class Hsigmoid(nn.Module): + + def __init__(self, inplace=True): + super(Hsigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return F.relu6(x + 3.0, inplace=self.inplace) / 6.0 + + +class eSEModule(nn.Module): + + def __init__(self, channel, reduction=4): + super(eSEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0) + self.hsigmoid = Hsigmoid() + + def forward(self, x): + input = x + x = self.avg_pool(x) + x = self.fc(x) + x = self.hsigmoid(x) + return input * x + + +class _OSA_module(nn.Module): + + def __init__(self, + in_ch, + stage_ch, + concat_ch, + layer_per_block, + module_name, + SE=False, + identity=False, + depthwise=False): + + super(_OSA_module, self).__init__() + + self.identity = identity + self.depthwise = depthwise + self.isReduced = False + self.layers = nn.ModuleList() + in_channel = in_ch + if self.depthwise and in_channel != stage_ch: + self.isReduced = True + self.conv_reduction = nn.Sequential( + OrderedDict( + conv1x1(in_channel, stage_ch, + '{}_reduction'.format(module_name), '0'))) + for i in range(layer_per_block): + if self.depthwise: + self.layers.append( + nn.Sequential( + OrderedDict( + dw_conv3x3(stage_ch, stage_ch, module_name, i)))) + else: + self.layers.append( + nn.Sequential( + OrderedDict( + conv3x3(in_channel, stage_ch, module_name, i)))) + in_channel = stage_ch + + # feature aggregation + in_channel = in_ch + layer_per_block * stage_ch + self.concat = nn.Sequential( + OrderedDict(conv1x1(in_channel, concat_ch, module_name, 'concat'))) + + self.ese = eSEModule(concat_ch) + + def forward(self, x): + + identity_feat = x + + output = [] + output.append(x) + if self.depthwise and self.isReduced: + x = self.conv_reduction(x) + for layer in self.layers: + x = layer(x) + output.append(x) + + x = torch.cat(output, dim=1) + xt = self.concat(x) + + xt = self.ese(xt) + + if self.identity: + xt = xt + identity_feat + + return xt + + +class _OSA_stage(nn.Sequential): + + def __init__(self, + in_ch, + stage_ch, + concat_ch, + block_per_stage, + layer_per_block, + stage_num, + SE=False, + depthwise=False): + + super(_OSA_stage, self).__init__() + + if not stage_num == 2: + self.add_module( + 'Pooling', + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)) + + if block_per_stage != 1: + SE = False + module_name = f'OSA{stage_num}_1' + self.add_module( + module_name, + _OSA_module( + in_ch, + stage_ch, + concat_ch, + layer_per_block, + module_name, + SE, + depthwise=depthwise)) + for i in range(block_per_stage - 1): + if i != block_per_stage - 2: # last block + SE = False + module_name = f'OSA{stage_num}_{i + 2}' + self.add_module( + module_name, + _OSA_module( + concat_ch, + stage_ch, + concat_ch, + layer_per_block, + module_name, + SE, + identity=True, + depthwise=depthwise), + ) + + +@MODELS.register_module() +class VoVNet(BaseModule): + + def __init__(self, + spec_name, + input_ch=3, + out_features=None, + frozen_stages=-1, + norm_eval=True, + pretrained=None, + init_cfg=None): + """ + Args: + input_ch(int) : the number of input channel + out_features (list[str]): name of the layers whose outputs should + be returned in forward. Can be anything in "stem", "stage2" ... + """ + super(VoVNet, self).__init__(init_cfg) + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + stage_specs = _STAGE_SPECS[spec_name] + + stem_ch = stage_specs['stem'] + config_stage_ch = stage_specs['stage_conv_ch'] + config_concat_ch = stage_specs['stage_out_ch'] + block_per_stage = stage_specs['block_per_stage'] + layer_per_block = stage_specs['layer_per_block'] + SE = stage_specs['eSE'] + depthwise = stage_specs['dw'] + + self._out_features = out_features + + # Stem module + conv_type = dw_conv3x3 if depthwise else conv3x3 + stem = conv3x3(input_ch, stem_ch[0], 'stem', '1', 2) + stem += conv_type(stem_ch[0], stem_ch[1], 'stem', '2', 1) + stem += conv_type(stem_ch[1], stem_ch[2], 'stem', '3', 2) + self.add_module('stem', nn.Sequential((OrderedDict(stem)))) + current_stirde = 4 + self._out_feature_strides = { + 'stem': current_stirde, + 'stage2': current_stirde + } + self._out_feature_channels = {'stem': stem_ch[2]} + + stem_out_ch = [stem_ch[2]] + in_ch_list = stem_out_ch + config_concat_ch[:-1] + # OSA stages + self.stage_names = [] + for i in range(4): # num_stages + name = 'stage%d' % (i + 2) # stage 2 ... stage 5 + self.stage_names.append(name) + self.add_module( + name, + _OSA_stage( + in_ch_list[i], + config_stage_ch[i], + config_concat_ch[i], + block_per_stage[i], + layer_per_block, + i + 2, + SE, + depthwise, + ), + ) + + self._out_feature_channels[name] = config_concat_ch[i] + if not i == 0: + self._out_feature_strides[name] = current_stirde = int( + current_stirde * 2) + + # initialize weights + # self._initialize_weights() + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + + def forward(self, x): + outputs = {} + x = self.stem(x) + if 'stem' in self._out_features: + outputs['stem'] = x + for name in self.stage_names: + x = getattr(self, name)(x) + if name in self._out_features: + outputs[name] = x + + return outputs + + def _freeze_stages(self): + if self.frozen_stages >= 0: + m = getattr(self, 'stem') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'stage{i+1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(VoVNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/projects/DETR3D/layers/transformer/__init__.py b/projects/DETR3D/layers/transformer/__init__.py new file mode 100755 index 0000000..a86091e --- /dev/null +++ b/projects/DETR3D/layers/transformer/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .conditional_detr_layers import (ConditionalDetrTransformerDecoder, + ConditionalDetrTransformerDecoderLayer) +from .dab_detr_layers import (DABDetrTransformerDecoder, + DABDetrTransformerDecoderLayer, + DABDetrTransformerEncoder) +from .ddq_detr_layers import DDQTransformerDecoder +from .deformable_detr_layers import (DeformableDetrTransformerDecoder, + DeformableDetrTransformerDecoderLayer, + DeformableDetrTransformerEncoder, + DeformableDetrTransformerEncoderLayer) +from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer, + DetrTransformerEncoder, DetrTransformerEncoderLayer,DetrTransformerEncoderLayer2) +from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder +from .grounding_dino_layers import (GroundingDinoTransformerDecoder, + GroundingDinoTransformerDecoderLayer, + GroundingDinoTransformerEncoder) +from .mask2former_layers import (Mask2FormerTransformerDecoder, + Mask2FormerTransformerDecoderLayer, + Mask2FormerTransformerEncoder) +from .utils import (MLP, AdaptivePadding, ConditionalAttention, + PatchEmbed, PatchMerging, coordinate_to_encoding, + inverse_sigmoid, nchw_to_nlc, nlc_to_nchw) + +__all__ = [ + 'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed', + 'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP', + 'DetrTransformerEncoder', 'DetrTransformerDecoder', + 'DetrTransformerEncoderLayer','DetrTransformerEncoderLayer2' 'DetrTransformerDecoderLayer', + 'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder', + 'DeformableDetrTransformerEncoderLayer', + 'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding', + 'ConditionalAttention', 'DABDetrTransformerDecoderLayer', + 'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder', + 'DDQTransformerDecoder', 'ConditionalDetrTransformerDecoder', + 'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder', + 'CdnQueryGenerator', 'Mask2FormerTransformerEncoder', + 'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder', + 'GroundingDinoTransformerDecoderLayer', 'GroundingDinoTransformerEncoder', + 'GroundingDinoTransformerDecoder' +] diff --git a/projects/DETR3D/layers/transformer/conditional_detr_layers.py b/projects/DETR3D/layers/transformer/conditional_detr_layers.py new file mode 100755 index 0000000..6db12a1 --- /dev/null +++ b/projects/DETR3D/layers/transformer/conditional_detr_layers.py @@ -0,0 +1,170 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN +from torch import Tensor +from torch.nn import ModuleList + +from .detr_layers import DetrTransformerDecoder, DetrTransformerDecoderLayer +from .utils import MLP, ConditionalAttention, coordinate_to_encoding + + +class ConditionalDetrTransformerDecoder(DetrTransformerDecoder): + """Decoder of Conditional DETR.""" + + def _init_layers(self) -> None: + """Initialize decoder layers and other layers.""" + self.layers = ModuleList([ + ConditionalDetrTransformerDecoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + self.embed_dims = self.layers[0].embed_dims + self.post_norm = build_norm_layer(self.post_norm_cfg, + self.embed_dims)[1] + # conditional detr affline + self.query_scale = MLP(self.embed_dims, self.embed_dims, + self.embed_dims, 2) + self.ref_point_head = MLP(self.embed_dims, self.embed_dims, 2, 2) + # we have substitute 'qpos_proj' with 'qpos_sine_proj' except for + # the first decoder layer), so 'qpos_proj' should be deleted + # in other layers. + for layer_id in range(self.num_layers - 1): + self.layers[layer_id + 1].cross_attn.qpos_proj = None + + def forward(self, + query: Tensor, + key: Tensor = None, + query_pos: Tensor = None, + key_pos: Tensor = None, + key_padding_mask: Tensor = None): + """Forward function of decoder. + + Args: + query (Tensor): The input query with shape + (bs, num_queries, dim). + key (Tensor): The input key with shape (bs, num_keys, dim) If + `None`, the `query` will be used. Defaults to `None`. + query_pos (Tensor): The positional encoding for `query`, with the + same shape as `query`. If not `None`, it will be added to + `query` before forward function. Defaults to `None`. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. If not `None`, it will be added to + `key` before forward function. If `None`, and `query_pos` + has the same shape as `key`, then `query_pos` will be used + as `key_pos`. Defaults to `None`. + key_padding_mask (Tensor): ByteTensor with shape (bs, num_keys). + Defaults to `None`. + Returns: + List[Tensor]: forwarded results with shape (num_decoder_layers, + bs, num_queries, dim) if `return_intermediate` is True, otherwise + with shape (1, bs, num_queries, dim). References with shape + (bs, num_queries, 2). + """ + reference_unsigmoid = self.ref_point_head( + query_pos) # [bs, num_queries, 2] + reference = reference_unsigmoid.sigmoid() + reference_xy = reference[..., :2] + intermediate = [] + for layer_id, layer in enumerate(self.layers): + if layer_id == 0: + pos_transformation = 1 + else: + pos_transformation = self.query_scale(query) + # get sine embedding for the query reference + ref_sine_embed = coordinate_to_encoding(coord_tensor=reference_xy) + # apply transformation + ref_sine_embed = ref_sine_embed * pos_transformation + query = layer( + query, + key=key, + query_pos=query_pos, + key_pos=key_pos, + key_padding_mask=key_padding_mask, + ref_sine_embed=ref_sine_embed, + is_first=(layer_id == 0)) + if self.return_intermediate: + intermediate.append(self.post_norm(query)) + + if self.return_intermediate: + return torch.stack(intermediate), reference + + query = self.post_norm(query) + return query.unsqueeze(0), reference + + +class ConditionalDetrTransformerDecoderLayer(DetrTransformerDecoderLayer): + """Implements decoder layer in Conditional DETR transformer.""" + + def _init_layers(self): + """Initialize self-attention, cross-attention, FFN, and + normalization.""" + self.self_attn = ConditionalAttention(**self.self_attn_cfg) + self.cross_attn = ConditionalAttention(**self.cross_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(3) + ] + self.norms = ModuleList(norms_list) + + def forward(self, + query: Tensor, + key: Tensor = None, + query_pos: Tensor = None, + key_pos: Tensor = None, + self_attn_masks: Tensor = None, + cross_attn_masks: Tensor = None, + key_padding_mask: Tensor = None, + ref_sine_embed: Tensor = None, + is_first: bool = False): + """ + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim) + key (Tensor, optional): The input key, has shape (bs, num_keys, + dim). If `None`, the `query` will be used. Defaults to `None`. + query_pos (Tensor, optional): The positional encoding for `query`, + has the same shape as `query`. If not `None`, it will be + added to `query` before forward function. Defaults to `None`. + ref_sine_embed (Tensor): The positional encoding for query in + cross attention, with the same shape as `x`. Defaults to None. + key_pos (Tensor, optional): The positional encoding for `key`, has + the same shape as `key`. If not None, it will be added to + `key` before forward function. If None, and `query_pos` has + the same shape as `key`, then `query_pos` will be used for + `key_pos`. Defaults to None. + self_attn_masks (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), Same in `nn.MultiheadAttention. + forward`. Defaults to None. + cross_attn_masks (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), Same in `nn.MultiheadAttention. + forward`. Defaults to None. + key_padding_mask (Tensor, optional): ByteTensor, has shape + (bs, num_keys). Defaults to None. + is_first (bool): A indicator to tell whether the current layer + is the first layer of the decoder. Defaults to False. + + Returns: + Tensor: Forwarded results, has shape (bs, num_queries, dim). + """ + query = self.self_attn( + query=query, + key=query, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=self_attn_masks) + query = self.norms[0](query) + query = self.cross_attn( + query=query, + key=key, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=cross_attn_masks, + key_padding_mask=key_padding_mask, + ref_sine_embed=ref_sine_embed, + is_first=is_first) + query = self.norms[1](query) + query = self.ffn(query) + query = self.norms[2](query) + + return query diff --git a/projects/DETR3D/layers/transformer/dab_detr_layers.py b/projects/DETR3D/layers/transformer/dab_detr_layers.py new file mode 100755 index 0000000..b8a6e77 --- /dev/null +++ b/projects/DETR3D/layers/transformer/dab_detr_layers.py @@ -0,0 +1,298 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN +from mmengine.model import ModuleList +from torch import Tensor + +from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer, + DetrTransformerEncoder, DetrTransformerEncoderLayer) +from .utils import (MLP, ConditionalAttention, coordinate_to_encoding, + inverse_sigmoid) + + +class DABDetrTransformerDecoderLayer(DetrTransformerDecoderLayer): + """Implements decoder layer in DAB-DETR transformer.""" + + def _init_layers(self): + """Initialize self-attention, cross-attention, FFN, normalization and + others.""" + self.self_attn = ConditionalAttention(**self.self_attn_cfg) + self.cross_attn = ConditionalAttention(**self.cross_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(3) + ] + self.norms = ModuleList(norms_list) + self.keep_query_pos = self.cross_attn.keep_query_pos + + def forward(self, + query: Tensor, + key: Tensor, + query_pos: Tensor, + key_pos: Tensor, + ref_sine_embed: Tensor = None, + self_attn_masks: Tensor = None, + cross_attn_masks: Tensor = None, + key_padding_mask: Tensor = None, + is_first: bool = False, + **kwargs) -> Tensor: + """ + Args: + query (Tensor): The input query with shape [bs, num_queries, + dim]. + key (Tensor): The key tensor with shape [bs, num_keys, + dim]. + query_pos (Tensor): The positional encoding for query in self + attention, with the same shape as `x`. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. + ref_sine_embed (Tensor): The positional encoding for query in + cross attention, with the same shape as `x`. + Defaults to None. + self_attn_masks (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + cross_attn_masks (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + is_first (bool): A indicator to tell whether the current layer + is the first layer of the decoder. + Defaults to False. + + Returns: + Tensor: forwarded results with shape + [bs, num_queries, dim]. + """ + + query = self.self_attn( + query=query, + key=query, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=self_attn_masks, + **kwargs) + query = self.norms[0](query) + query = self.cross_attn( + query=query, + key=key, + query_pos=query_pos, + key_pos=key_pos, + ref_sine_embed=ref_sine_embed, + attn_mask=cross_attn_masks, + key_padding_mask=key_padding_mask, + is_first=is_first, + **kwargs) + query = self.norms[1](query) + query = self.ffn(query) + query = self.norms[2](query) + + return query + + +class DABDetrTransformerDecoder(DetrTransformerDecoder): + """Decoder of DAB-DETR. + + Args: + query_dim (int): The last dimension of query pos, + 4 for anchor format, 2 for point format. + Defaults to 4. + query_scale_type (str): Type of transformation applied + to content query. Defaults to `cond_elewise`. + with_modulated_hw_attn (bool): Whether to inject h&w info + during cross conditional attention. Defaults to True. + """ + + def __init__(self, + *args, + query_dim: int = 4, + query_scale_type: str = 'cond_elewise', + with_modulated_hw_attn: bool = True, + **kwargs): + + self.query_dim = query_dim + self.query_scale_type = query_scale_type + self.with_modulated_hw_attn = with_modulated_hw_attn + + super().__init__(*args, **kwargs) + + def _init_layers(self): + """Initialize decoder layers and other layers.""" + assert self.query_dim in [2, 4], \ + f'{"dab-detr only supports anchor prior or reference point prior"}' + assert self.query_scale_type in [ + 'cond_elewise', 'cond_scalar', 'fix_elewise' + ] + + self.layers = ModuleList([ + DABDetrTransformerDecoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + + embed_dims = self.layers[0].embed_dims + self.embed_dims = embed_dims + + self.post_norm = build_norm_layer(self.post_norm_cfg, embed_dims)[1] + if self.query_scale_type == 'cond_elewise': + self.query_scale = MLP(embed_dims, embed_dims, embed_dims, 2) + elif self.query_scale_type == 'cond_scalar': + self.query_scale = MLP(embed_dims, embed_dims, 1, 2) + elif self.query_scale_type == 'fix_elewise': + self.query_scale = nn.Embedding(self.num_layers, embed_dims) + else: + raise NotImplementedError('Unknown query_scale_type: {}'.format( + self.query_scale_type)) + + self.ref_point_head = MLP(self.query_dim // 2 * embed_dims, embed_dims, + embed_dims, 2) + + if self.with_modulated_hw_attn and self.query_dim == 4: + self.ref_anchor_head = MLP(embed_dims, embed_dims, 2, 2) + + self.keep_query_pos = self.layers[0].keep_query_pos + if not self.keep_query_pos: + for layer_id in range(self.num_layers - 1): + self.layers[layer_id + 1].cross_attn.qpos_proj = None + + def forward(self, + query: Tensor, + key: Tensor, + query_pos: Tensor, + key_pos: Tensor, + reg_branches: nn.Module, + key_padding_mask: Tensor = None, + **kwargs) -> List[Tensor]: + """Forward function of decoder. + + Args: + query (Tensor): The input query with shape (bs, num_queries, dim). + key (Tensor): The input key with shape (bs, num_keys, dim). + query_pos (Tensor): The positional encoding for `query`, with the + same shape as `query`. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. + reg_branches (nn.Module): The regression branch for dynamically + updating references in each layer. + key_padding_mask (Tensor): ByteTensor with shape (bs, num_keys). + Defaults to `None`. + + Returns: + List[Tensor]: forwarded results with shape (num_decoder_layers, + bs, num_queries, dim) if `return_intermediate` is True, otherwise + with shape (1, bs, num_queries, dim). references with shape + (num_decoder_layers, bs, num_queries, 2/4). + """ + output = query + unsigmoid_references = query_pos + + reference_points = unsigmoid_references.sigmoid() + intermediate_reference_points = [reference_points] + + intermediate = [] + for layer_id, layer in enumerate(self.layers): + obj_center = reference_points[..., :self.query_dim] + ref_sine_embed = coordinate_to_encoding( + coord_tensor=obj_center, num_feats=self.embed_dims // 2) + query_pos = self.ref_point_head( + ref_sine_embed) # [bs, nq, 2c] -> [bs, nq, c] + # For the first decoder layer, do not apply transformation + if self.query_scale_type != 'fix_elewise': + if layer_id == 0: + pos_transformation = 1 + else: + pos_transformation = self.query_scale(output) + else: + pos_transformation = self.query_scale.weight[layer_id] + # apply transformation + ref_sine_embed = ref_sine_embed[ + ..., :self.embed_dims] * pos_transformation + # modulated height and weight attention + if self.with_modulated_hw_attn: + assert obj_center.size(-1) == 4 + ref_hw = self.ref_anchor_head(output).sigmoid() + ref_sine_embed[..., self.embed_dims // 2:] *= \ + (ref_hw[..., 0] / obj_center[..., 2]).unsqueeze(-1) + ref_sine_embed[..., : self.embed_dims // 2] *= \ + (ref_hw[..., 1] / obj_center[..., 3]).unsqueeze(-1) + + output = layer( + output, + key, + query_pos=query_pos, + ref_sine_embed=ref_sine_embed, + key_pos=key_pos, + key_padding_mask=key_padding_mask, + is_first=(layer_id == 0), + **kwargs) + # iter update + tmp_reg_preds = reg_branches(output) + tmp_reg_preds[..., :self.query_dim] += inverse_sigmoid( + reference_points) + new_reference_points = tmp_reg_preds[ + ..., :self.query_dim].sigmoid() + if layer_id != self.num_layers - 1: + intermediate_reference_points.append(new_reference_points) + reference_points = new_reference_points.detach() + + if self.return_intermediate: + intermediate.append(self.post_norm(output)) + + output = self.post_norm(output) + + if self.return_intermediate: + return [ + torch.stack(intermediate), + torch.stack(intermediate_reference_points), + ] + else: + return [ + output.unsqueeze(0), + torch.stack(intermediate_reference_points) + ] + + +class DABDetrTransformerEncoder(DetrTransformerEncoder): + """Encoder of DAB-DETR.""" + + def _init_layers(self): + """Initialize encoder layers.""" + self.layers = ModuleList([ + DetrTransformerEncoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + embed_dims = self.layers[0].embed_dims + self.embed_dims = embed_dims + self.query_scale = MLP(embed_dims, embed_dims, embed_dims, 2) + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, **kwargs): + """Forward function of encoder. + + Args: + query (Tensor): Input queries of encoder, has shape + (bs, num_queries, dim). + query_pos (Tensor): The positional embeddings of the queries, has + shape (bs, num_feat_points, dim). + key_padding_mask (Tensor): ByteTensor, the key padding mask + of the queries, has shape (bs, num_feat_points). + + Returns: + Tensor: With shape (num_queries, bs, dim). + """ + + for layer in self.layers: + pos_scales = self.query_scale(query) + query = layer( + query, + query_pos=query_pos * pos_scales, + key_padding_mask=key_padding_mask, + **kwargs) + + return query diff --git a/projects/DETR3D/layers/transformer/ddq_detr_layers.py b/projects/DETR3D/layers/transformer/ddq_detr_layers.py new file mode 100755 index 0000000..57664c7 --- /dev/null +++ b/projects/DETR3D/layers/transformer/ddq_detr_layers.py @@ -0,0 +1,223 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +from mmcv.ops import batched_nms +from torch import Tensor, nn + +from mmdet.structures.bbox import bbox_cxcywh_to_xyxy +from .deformable_detr_layers import DeformableDetrTransformerDecoder +from .utils import MLP, coordinate_to_encoding, inverse_sigmoid + + +class DDQTransformerDecoder(DeformableDetrTransformerDecoder): + """Transformer decoder of DDQ.""" + + def _init_layers(self) -> None: + """Initialize encoder layers.""" + super()._init_layers() + self.ref_point_head = MLP(self.embed_dims * 2, self.embed_dims, + self.embed_dims, 2) + self.norm = nn.LayerNorm(self.embed_dims) + + def select_distinct_queries(self, reference_points: Tensor, query: Tensor, + self_attn_mask: Tensor, layer_index): + """Get updated `self_attn_mask` for distinct queries selection, it is + used in self attention layers of decoder. + + Args: + reference_points (Tensor): The input reference of decoder, + has shape (bs, num_queries, 4) with the last dimension + arranged as (cx, cy, w, h). + query (Tensor): The input query of decoder, has shape + (bs, num_queries, dims). + self_attn_mask (Tensor): The input self attention mask of + last decoder layer, has shape (bs, num_queries_total, + num_queries_total). + layer_index (int): Last decoder layer index, used to get + classification score of last layer output, for + distinct queries selection. + + Returns: + Tensor: `self_attn_mask` used in self attention layers + of decoder, has shape (bs, num_queries_total, + num_queries_total). + """ + num_imgs = len(reference_points) + dis_start, num_dis = self.cache_dict['dis_query_info'] + # shape of self_attn_mask + # (batch⋅num_heads, num_queries, embed_dims) + dis_mask = self_attn_mask[:, dis_start:dis_start + num_dis, + dis_start:dis_start + num_dis] + # cls_branches from DDQDETRHead + scores = self.cache_dict['cls_branches'][layer_index]( + query[:, dis_start:dis_start + num_dis]).sigmoid().max(-1).values + proposals = reference_points[:, dis_start:dis_start + num_dis] + proposals = bbox_cxcywh_to_xyxy(proposals) + + attn_mask_list = [] + for img_id in range(num_imgs): + single_proposals = proposals[img_id] + single_scores = scores[img_id] + attn_mask = ~dis_mask[img_id * self.cache_dict['num_heads']][0] + # distinct query inds in this layer + ori_index = attn_mask.nonzero().view(-1) + _, keep_idxs = batched_nms(single_proposals[ori_index], + single_scores[ori_index], + torch.ones(len(ori_index)), + self.cache_dict['dqs_cfg']) + + real_keep_index = ori_index[keep_idxs] + + attn_mask = torch.ones_like(dis_mask[0]).bool() + # such a attn_mask give best result + # If it requires to keep index i, then all cells in row or column + # i should be kept in `attn_mask` . For example, if + # `real_keep_index` = [1, 4], and `attn_mask` size = [8, 8], + # then all cells at rows or columns [1, 4] should be kept, and + # all the other cells should be masked out. So the value of + # `attn_mask` should be: + # + # target\source 0 1 2 3 4 5 6 7 + # 0 [ 0 1 0 0 1 0 0 0 ] + # 1 [ 1 1 1 1 1 1 1 1 ] + # 2 [ 0 1 0 0 1 0 0 0 ] + # 3 [ 0 1 0 0 1 0 0 0 ] + # 4 [ 1 1 1 1 1 1 1 1 ] + # 5 [ 0 1 0 0 1 0 0 0 ] + # 6 [ 0 1 0 0 1 0 0 0 ] + # 7 [ 0 1 0 0 1 0 0 0 ] + attn_mask[real_keep_index] = False + attn_mask[:, real_keep_index] = False + + attn_mask = attn_mask[None].repeat(self.cache_dict['num_heads'], 1, + 1) + attn_mask_list.append(attn_mask) + attn_mask = torch.cat(attn_mask_list) + self_attn_mask = copy.deepcopy(self_attn_mask) + self_attn_mask[:, dis_start:dis_start + num_dis, + dis_start:dis_start + num_dis] = attn_mask + # will be used in loss and inference + self.cache_dict['distinct_query_mask'].append(~attn_mask) + return self_attn_mask + + def forward(self, query: Tensor, value: Tensor, key_padding_mask: Tensor, + self_attn_mask: Tensor, reference_points: Tensor, + spatial_shapes: Tensor, level_start_index: Tensor, + valid_ratios: Tensor, reg_branches: nn.ModuleList, + **kwargs) -> Tensor: + """Forward function of Transformer decoder. + + Args: + query (Tensor): The input query, has shape (bs, num_queries, + dims). + value (Tensor): The input values, has shape (bs, num_value, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn` + input. ByteTensor, has shape (bs, num_value). + self_attn_mask (Tensor): The attention mask to prevent information + leakage from different denoising groups, distinct queries and + dense queries, has shape (num_queries_total, + num_queries_total). It will be updated for distinct queries + selection in this forward function. It is `None` when + `self.training` is `False`. + reference_points (Tensor): The initial reference, has shape + (bs, num_queries, 4) with the last dimension arranged as + (cx, cy, w, h). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels, ) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + reg_branches: (obj:`nn.ModuleList`): Used for refining the + regression results. + + Returns: + tuple[Tensor]: Output queries and references of Transformer + decoder + + - query (Tensor): Output embeddings of the last decoder, has + shape (bs, num_queries, embed_dims) when `return_intermediate` + is `False`. Otherwise, Intermediate output embeddings of all + decoder layers, has shape (num_decoder_layers, bs, num_queries, + embed_dims). + - reference_points (Tensor): The reference of the last decoder + layer, has shape (bs, num_queries, 4) when `return_intermediate` + is `False`. Otherwise, Intermediate references of all decoder + layers, has shape (1 + num_decoder_layers, bs, num_queries, 4). + The coordinates are arranged as (cx, cy, w, h). + """ + intermediate = [] + intermediate_reference_points = [reference_points] + self.cache_dict['distinct_query_mask'] = [] + if self_attn_mask is None: + self_attn_mask = torch.zeros((query.size(1), query.size(1)), + device=query.device).bool() + # shape is (batch*number_heads, num_queries, num_queries) + self_attn_mask = self_attn_mask[None].repeat( + len(query) * self.cache_dict['num_heads'], 1, 1) + for layer_index, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = \ + reference_points[:, :, None] * torch.cat( + [valid_ratios, valid_ratios], -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = \ + reference_points[:, :, None] * valid_ratios[:, None] + + query_sine_embed = coordinate_to_encoding( + reference_points_input[:, :, 0, :], + num_feats=self.embed_dims // 2) + query_pos = self.ref_point_head(query_sine_embed) + + query = layer( + query, + query_pos=query_pos, + value=value, + key_padding_mask=key_padding_mask, + self_attn_mask=self_attn_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reference_points=reference_points_input, + **kwargs) + + if not self.training: + tmp = reg_branches[layer_index](query) + assert reference_points.shape[-1] == 4 + new_reference_points = tmp + inverse_sigmoid( + reference_points, eps=1e-3) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + if layer_index < (len(self.layers) - 1): + self_attn_mask = self.select_distinct_queries( + reference_points, query, self_attn_mask, layer_index) + + else: + num_dense = self.cache_dict['num_dense_queries'] + tmp = reg_branches[layer_index](query[:, :-num_dense]) + tmp_dense = self.aux_reg_branches[layer_index]( + query[:, -num_dense:]) + + tmp = torch.cat([tmp, tmp_dense], dim=1) + assert reference_points.shape[-1] == 4 + new_reference_points = tmp + inverse_sigmoid( + reference_points, eps=1e-3) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + if layer_index < (len(self.layers) - 1): + self_attn_mask = self.select_distinct_queries( + reference_points, query, self_attn_mask, layer_index) + + if self.return_intermediate: + intermediate.append(self.norm(query)) + intermediate_reference_points.append(new_reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return query, reference_points diff --git a/projects/DETR3D/layers/transformer/deformable_detr_layers.py b/projects/DETR3D/layers/transformer/deformable_detr_layers.py new file mode 100755 index 0000000..200367d --- /dev/null +++ b/projects/DETR3D/layers/transformer/deformable_detr_layers.py @@ -0,0 +1,265 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple, Union + +import torch +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmcv.ops import MultiScaleDeformableAttention +from mmengine.model import ModuleList +from torch import Tensor, nn + +from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer, + DetrTransformerEncoder, DetrTransformerEncoderLayer,DetrTransformerEncoderLayer2) +from .utils import inverse_sigmoid + +try: + from fairscale.nn.checkpoint import checkpoint_wrapper +except Exception: + checkpoint_wrapper = None + + +class DeformableDetrTransformerEncoder(DetrTransformerEncoder): + """Transformer encoder of Deformable DETR.""" + + def _init_layers(self) -> None: + """Initialize encoder layers.""" + self.layers = ModuleList([ + DeformableDetrTransformerEncoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + + if self.num_cp > 0: + if checkpoint_wrapper is None: + raise NotImplementedError( + 'If you want to reduce GPU memory usage, \ + please install fairscale by executing the \ + following command: pip install fairscale.') + for i in range(self.num_cp): + self.layers[i] = checkpoint_wrapper(self.layers[i]) + + self.embed_dims = self.layers[0].embed_dims + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, spatial_shapes: Tensor, + level_start_index: Tensor, valid_ratios: Tensor, + **kwargs) -> Tensor: + """Forward function of Transformer encoder. + + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + query_pos (Tensor): The positional encoding for query, has shape + (bs, num_queries, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor, has shape (bs, num_queries). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels, ) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + + Returns: + Tensor: Output queries of Transformer encoder, which is also + called 'encoder output embeddings' or 'memory', has shape + (bs, num_queries, dim) + """ + reference_points = self.get_encoder_reference_points( + spatial_shapes, valid_ratios, device=query.device) + for layer in self.layers: + query = layer( + query=query, + query_pos=query_pos, + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reference_points=reference_points, + **kwargs) + return query + + @staticmethod + def get_encoder_reference_points( + spatial_shapes: Tensor, valid_ratios: Tensor, + device: Union[torch.device, str]) -> Tensor: + """Get the reference points used in encoder. + + Args: + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + device (obj:`device` or str): The device acquired by the + `reference_points`. + + Returns: + Tensor: Reference points used in decoder, has shape (bs, length, + num_levels, 2). + """ + + reference_points_list = [] + for lvl, (H, W) in enumerate(spatial_shapes): + ref_y, ref_x = torch.meshgrid( + torch.linspace( + 0.5, H - 0.5, H, dtype=torch.float32, device=device), + torch.linspace( + 0.5, W - 0.5, W, dtype=torch.float32, device=device)) + ref_y = ref_y.reshape(-1)[None] / ( + valid_ratios[:, :,None, lvl, 1] * H) + ref_x = ref_x.reshape(-1)[None] / ( + valid_ratios[:, :,None, lvl, 0] * W) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 2) + # [bs, sum(hw), num_level, 2] + reference_points = reference_points[:, :, :, None] * valid_ratios[:, :, None] + return reference_points + + +class DeformableDetrTransformerDecoder(DetrTransformerDecoder): + """Transformer Decoder of Deformable DETR.""" + + def _init_layers(self) -> None: + """Initialize decoder layers.""" + self.layers = ModuleList([ + DeformableDetrTransformerDecoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + self.embed_dims = self.layers[0].embed_dims + if self.post_norm_cfg is not None: + raise ValueError('There is not post_norm in ' + f'{self._get_name()}') + + def forward(self, + query: Tensor, + query_pos: Tensor, + value: Tensor, + key_padding_mask: Tensor, + reference_points: Tensor, + spatial_shapes: Tensor, + level_start_index: Tensor, + valid_ratios: Tensor, + reg_branches: Optional[nn.Module] = None, + **kwargs) -> Tuple[Tensor]: + """Forward function of Transformer decoder. + + Args: + query (Tensor): The input queries, has shape (bs, num_queries, + dim). + query_pos (Tensor): The input positional query, has shape + (bs, num_queries, dim). It will be added to `query` before + forward function. + value (Tensor): The input values, has shape (bs, num_value, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn` + input. ByteTensor, has shape (bs, num_value). + reference_points (Tensor): The initial reference, has shape + (bs, num_queries, 4) with the last dimension arranged as + (cx, cy, w, h) when `as_two_stage` is `True`, otherwise has + shape (bs, num_queries, 2) with the last dimension arranged + as (cx, cy). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels, ) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + reg_branches: (obj:`nn.ModuleList`, optional): Used for refining + the regression results. Only would be passed when + `with_box_refine` is `True`, otherwise would be `None`. + + Returns: + tuple[Tensor]: Outputs of Deformable Transformer Decoder. + + - output (Tensor): Output embeddings of the last decoder, has + shape (num_queries, bs, embed_dims) when `return_intermediate` + is `False`. Otherwise, Intermediate output embeddings of all + decoder layers, has shape (num_decoder_layers, num_queries, bs, + embed_dims). + - reference_points (Tensor): The reference of the last decoder + layer, has shape (bs, num_queries, 4) when `return_intermediate` + is `False`. Otherwise, Intermediate references of all decoder + layers, has shape (num_decoder_layers, bs, num_queries, 4). The + coordinates are arranged as (cx, cy, w, h) + """ + output = query + intermediate = [] + intermediate_reference_points = [] + for layer_id, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = \ + reference_points[:, :, None] * \ + torch.cat([valid_ratios, valid_ratios], -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = \ + reference_points[:, :, None] * \ + valid_ratios[:, None] + output = layer( + output, + query_pos=query_pos, + value=value, + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reference_points=reference_points_input, + **kwargs) + + if reg_branches is not None: + tmp_reg_preds = reg_branches[layer_id](output) + if reference_points.shape[-1] == 4: + new_reference_points = tmp_reg_preds + inverse_sigmoid( + reference_points) + new_reference_points = new_reference_points.sigmoid() + else: + assert reference_points.shape[-1] == 2 + new_reference_points = tmp_reg_preds + new_reference_points[..., :2] = tmp_reg_preds[ + ..., :2] + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append(reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +class DeformableDetrTransformerEncoderLayer(DetrTransformerEncoderLayer2): + """Encoder layer of Deformable DETR.""" + + def _init_layers(self) -> None: + """Initialize self_attn, ffn, and norms.""" + self.self_attn = MultiScaleDeformableAttention(**self.self_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(2) + ] + self.norms = ModuleList(norms_list) + + +class DeformableDetrTransformerDecoderLayer(DetrTransformerDecoderLayer): + """Decoder layer of Deformable DETR.""" + + def _init_layers(self) -> None: + """Initialize self_attn, cross-attn, ffn, and norms.""" + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.cross_attn = MultiScaleDeformableAttention(**self.cross_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(3) + ] + self.norms = ModuleList(norms_list) diff --git a/projects/DETR3D/layers/transformer/detr_layers.py b/projects/DETR3D/layers/transformer/detr_layers.py new file mode 100755 index 0000000..36a5ebc --- /dev/null +++ b/projects/DETR3D/layers/transformer/detr_layers.py @@ -0,0 +1,459 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +import torch +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmengine import ConfigDict +from mmengine.model import BaseModule, ModuleList +from torch import Tensor + +from mmdet.utils import ConfigType, OptConfigType + +try: + from fairscale.nn.checkpoint import checkpoint_wrapper +except Exception: + checkpoint_wrapper = None + + +class DetrTransformerEncoder(BaseModule): + """Encoder of DETR. + + Args: + num_layers (int): Number of encoder layers. + layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder + layer. All the layers will share the same config. + num_cp (int): Number of checkpointing blocks in encoder layer. + Default to -1. + init_cfg (:obj:`ConfigDict` or dict, optional): the config to control + the initialization. Defaults to None. + """ + + def __init__(self, + num_layers: int, + layer_cfg: ConfigType, + num_cp: int = -1, + init_cfg: OptConfigType = None) -> None: + + super().__init__(init_cfg=init_cfg) + self.num_layers = num_layers + self.layer_cfg = layer_cfg + self.num_cp = num_cp + assert self.num_cp <= self.num_layers + self._init_layers() + + def _init_layers(self) -> None: + """Initialize encoder layers.""" + self.layers = ModuleList([ + DetrTransformerEncoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + + if self.num_cp > 0: + if checkpoint_wrapper is None: + raise NotImplementedError( + 'If you want to reduce GPU memory usage, \ + please install fairscale by executing the \ + following command: pip install fairscale.') + for i in range(self.num_cp): + self.layers[i] = checkpoint_wrapper(self.layers[i]) + + self.embed_dims = self.layers[0].embed_dims + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, **kwargs) -> Tensor: + """Forward function of encoder. + + Args: + query (Tensor): Input queries of encoder, has shape + (bs, num_queries, dim). + query_pos (Tensor): The positional embeddings of the queries, has + shape (bs, num_queries, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor, has shape (bs, num_queries). + + Returns: + Tensor: Has shape (bs, num_queries, dim) if `batch_first` is + `True`, otherwise (num_queries, bs, dim). + """ + for layer in self.layers: + query = layer(query, query_pos, key_padding_mask, **kwargs) + return query + + +class DetrTransformerDecoder(BaseModule): + """Decoder of DETR. + + Args: + num_layers (int): Number of decoder layers. + layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder + layer. All the layers will share the same config. + post_norm_cfg (:obj:`ConfigDict` or dict, optional): Config of the + post normalization layer. Defaults to `LN`. + return_intermediate (bool, optional): Whether to return outputs of + intermediate layers. Defaults to `True`, + init_cfg (:obj:`ConfigDict` or dict, optional): the config to control + the initialization. Defaults to None. + """ + + def __init__(self, + num_layers: int, + layer_cfg: ConfigType, + post_norm_cfg: OptConfigType = dict(type='LN'), + return_intermediate: bool = True, + init_cfg: Union[dict, ConfigDict] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.layer_cfg = layer_cfg + self.num_layers = num_layers + self.post_norm_cfg = post_norm_cfg + self.return_intermediate = return_intermediate + self._init_layers() + + def _init_layers(self) -> None: + """Initialize decoder layers.""" + self.layers = ModuleList([ + DetrTransformerDecoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + self.embed_dims = self.layers[0].embed_dims + self.post_norm = build_norm_layer(self.post_norm_cfg, + self.embed_dims)[1] + + def forward(self, query: Tensor, key: Tensor, value: Tensor, + query_pos: Tensor, key_pos: Tensor, key_padding_mask: Tensor, + **kwargs) -> Tensor: + """Forward function of decoder + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + key (Tensor): The input key, has shape (bs, num_keys, dim). + value (Tensor): The input value with the same shape as `key`. + query_pos (Tensor): The positional encoding for `query`, with the + same shape as `query`. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. + key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn` + input. ByteTensor, has shape (bs, num_value). + + Returns: + Tensor: The forwarded results will have shape + (num_decoder_layers, bs, num_queries, dim) if + `return_intermediate` is `True` else (1, bs, num_queries, dim). + """ + intermediate = [] + for layer in self.layers: + query = layer( + query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + key_padding_mask=key_padding_mask, + **kwargs) + if self.return_intermediate: + intermediate.append(self.post_norm(query)) + query = self.post_norm(query) + + if self.return_intermediate: + return torch.stack(intermediate) + + return query.unsqueeze(0) + + +class DetrTransformerEncoderLayer(BaseModule): + """Implements encoder layer in DETR transformer. + + Args: + self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self + attention. + ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN. + norm_cfg (:obj:`ConfigDict` or dict, optional): Config for + normalization layers. All the layers will share the same + config. Defaults to `LN`. + init_cfg (:obj:`ConfigDict` or dict, optional): Config to control + the initialization. Defaults to None. + """ + + def __init__(self, + self_attn_cfg: OptConfigType = dict( + embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg: OptConfigType = dict( + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True)), + norm_cfg: OptConfigType = dict(type='LN'), + init_cfg: OptConfigType = None) -> None: + + super().__init__(init_cfg=init_cfg) + + self.self_attn_cfg = self_attn_cfg + if 'batch_first' not in self.self_attn_cfg: + self.self_attn_cfg['batch_first'] = True + else: + assert self.self_attn_cfg['batch_first'] is True, 'First \ + dimension of all DETRs in mmdet is `batch`, \ + please set `batch_first` flag.' + + self.ffn_cfg = ffn_cfg + self.norm_cfg = norm_cfg + self._init_layers() + + def _init_layers(self) -> None: + """Initialize self-attention, FFN, and normalization.""" + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(2) + ] + self.norms = ModuleList(norms_list) + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, **kwargs) -> Tensor: + """Forward function of an encoder layer. + + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + query_pos (Tensor): The positional encoding for query, with + the same shape as `query`. + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor. has shape (bs, num_queries). + Returns: + Tensor: forwarded results, has shape (bs, num_queries, dim). + """ + query = self.self_attn( + query=query, + key=query, + value=query, + query_pos=query_pos, + key_pos=query_pos, + key_padding_mask=key_padding_mask, + **kwargs) + query = self.norms[0](query) + query = self.ffn(query) + query = self.norms[1](query) + + return query + + +class DetrTransformerEncoderLayer2(BaseModule): + """Implements encoder layer in DETR transformer. + + Args: + self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self + attention. + ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN. + norm_cfg (:obj:`ConfigDict` or dict, optional): Config for + normalization layers. All the layers will share the same + config. Defaults to `LN`. + init_cfg (:obj:`ConfigDict` or dict, optional): Config to control + the initialization. Defaults to None. + """ + + def __init__(self, + self_attn_cfg: OptConfigType = dict( + embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg: OptConfigType = dict( + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True)), + norm_cfg: OptConfigType = dict(type='LN'), + init_cfg: OptConfigType = None) -> None: + + super().__init__(init_cfg=init_cfg) + + self.self_attn_cfg = self_attn_cfg + if 'batch_first' not in self.self_attn_cfg: + self.self_attn_cfg['batch_first'] = True + else: + assert self.self_attn_cfg['batch_first'] is True, 'First \ + dimension of all DETRs in mmdet is `batch`, \ + please set `batch_first` flag.' + + self.ffn_cfg = ffn_cfg + self.norm_cfg = norm_cfg + self._init_layers() + + def _init_layers(self) -> None: + """Initialize self-attention, FFN, and normalization.""" + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(2) + ] + self.norms = ModuleList(norms_list) + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, reference_points: Tensor, **kwargs) -> Tensor: + """Forward function of an encoder layer. + + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + query_pos (Tensor): The positional encoding for query, with + the same shape as `query`. + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor. has shape (bs, num_queries). + Returns: + Tensor: forwarded results, has shape (bs, num_queries, dim). + """ + bsz=query.shape[0] + num_cam=6 + tmp=[] + for i in range(bsz): + tmp.append( + self.norms[1]( + self.ffn( + self.norms[0]( + self.self_attn( + query=query[i], + key=query[i], + value=query[i], + query_pos=query_pos[i], + key_pos=query_pos[i], + key_padding_mask=key_padding_mask[i], + reference_points=reference_points[i], + **kwargs)))) + ) + tmp=torch.cat(tmp,dim=0).view(bsz,num_cam,-1,256) + return tmp + +class DetrTransformerDecoderLayer(BaseModule): + """Implements decoder layer in DETR transformer. + + Args: + self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self + attention. + cross_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for cross + attention. + ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN. + norm_cfg (:obj:`ConfigDict` or dict, optional): Config for + normalization layers. All the layers will share the same + config. Defaults to `LN`. + init_cfg (:obj:`ConfigDict` or dict, optional): Config to control + the initialization. Defaults to None. + """ + + def __init__(self, + self_attn_cfg: OptConfigType = dict( + embed_dims=256, + num_heads=8, + dropout=0.0, + batch_first=True), + cross_attn_cfg: OptConfigType = dict( + embed_dims=256, + num_heads=8, + dropout=0.0, + batch_first=True), + ffn_cfg: OptConfigType = dict( + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True), + ), + norm_cfg: OptConfigType = dict(type='LN'), + init_cfg: OptConfigType = None) -> None: + + super().__init__(init_cfg=init_cfg) + + self.self_attn_cfg = self_attn_cfg + self.cross_attn_cfg = cross_attn_cfg + if 'batch_first' not in self.self_attn_cfg: + self.self_attn_cfg['batch_first'] = True + else: + assert self.self_attn_cfg['batch_first'] is True, 'First \ + dimension of all DETRs in mmdet is `batch`, \ + please set `batch_first` flag.' + + if 'batch_first' not in self.cross_attn_cfg: + self.cross_attn_cfg['batch_first'] = True + else: + assert self.cross_attn_cfg['batch_first'] is True, 'First \ + dimension of all DETRs in mmdet is `batch`, \ + please set `batch_first` flag.' + + self.ffn_cfg = ffn_cfg + self.norm_cfg = norm_cfg + self._init_layers() + + def _init_layers(self) -> None: + """Initialize self-attention, FFN, and normalization.""" + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.cross_attn = MultiheadAttention(**self.cross_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(3) + ] + self.norms = ModuleList(norms_list) + + def forward(self, + query: Tensor, + key: Tensor = None, + value: Tensor = None, + query_pos: Tensor = None, + key_pos: Tensor = None, + self_attn_mask: Tensor = None, + cross_attn_mask: Tensor = None, + key_padding_mask: Tensor = None, + **kwargs) -> Tensor: + """ + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + key (Tensor, optional): The input key, has shape (bs, num_keys, + dim). If `None`, the `query` will be used. Defaults to `None`. + value (Tensor, optional): The input value, has the same shape as + `key`, as in `nn.MultiheadAttention.forward`. If `None`, the + `key` will be used. Defaults to `None`. + query_pos (Tensor, optional): The positional encoding for `query`, + has the same shape as `query`. If not `None`, it will be added + to `query` before forward function. Defaults to `None`. + key_pos (Tensor, optional): The positional encoding for `key`, has + the same shape as `key`. If not `None`, it will be added to + `key` before forward function. If None, and `query_pos` has the + same shape as `key`, then `query_pos` will be used for + `key_pos`. Defaults to None. + self_attn_mask (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. + Defaults to None. + cross_attn_mask (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor, optional): The `key_padding_mask` of + `self_attn` input. ByteTensor, has shape (bs, num_value). + Defaults to None. + + Returns: + Tensor: forwarded results, has shape (bs, num_queries, dim). + """ + + query = self.self_attn( + query=query, + key=query, + value=query, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=self_attn_mask, + **kwargs) + query = self.norms[0](query) + query = self.cross_attn( + query=query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=cross_attn_mask, + key_padding_mask=key_padding_mask, + **kwargs) + query = self.norms[1](query) + query = self.ffn(query) + query = self.norms[2](query) + + return query diff --git a/projects/DETR3D/layers/transformer/dino_layers.py b/projects/DETR3D/layers/transformer/dino_layers.py new file mode 100755 index 0000000..64610d0 --- /dev/null +++ b/projects/DETR3D/layers/transformer/dino_layers.py @@ -0,0 +1,562 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Tuple, Union + +import torch +from mmengine.model import BaseModule +from torch import Tensor, nn + +from mmdet.structures import SampleList +from mmdet.structures.bbox import bbox_xyxy_to_cxcywh +from mmdet.utils import OptConfigType +from .deformable_detr_layers import DeformableDetrTransformerDecoder +from .utils import MLP, coordinate_to_encoding, inverse_sigmoid + + +class DinoTransformerDecoder(DeformableDetrTransformerDecoder): + """Transformer decoder of DINO.""" + + def _init_layers(self) -> None: + """Initialize decoder layers.""" + super()._init_layers() + self.ref_point_head = MLP(self.embed_dims * 2, self.embed_dims, + self.embed_dims, 2) + self.norm = nn.LayerNorm(self.embed_dims) + + def forward(self, query: Tensor, value: Tensor, key_padding_mask: Tensor, + self_attn_mask: Tensor, reference_points: Tensor, + spatial_shapes: Tensor, level_start_index: Tensor, + valid_ratios: Tensor, reg_branches: nn.ModuleList, + **kwargs) -> Tuple[Tensor]: + """Forward function of Transformer decoder. + + Args: + query (Tensor): The input query, has shape (num_queries, bs, dim). + value (Tensor): The input values, has shape (num_value, bs, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor, has shape (num_queries, bs). + self_attn_mask (Tensor): The attention mask to prevent information + leakage from different denoising groups and matching parts, has + shape (num_queries_total, num_queries_total). It is `None` when + `self.training` is `False`. + reference_points (Tensor): The initial reference, has shape + (bs, num_queries, 4) with the last dimension arranged as + (cx, cy, w, h). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels, ) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + reg_branches: (obj:`nn.ModuleList`): Used for refining the + regression results. + + Returns: + tuple[Tensor]: Output queries and references of Transformer + decoder + + - query (Tensor): Output embeddings of the last decoder, has + shape (num_queries, bs, embed_dims) when `return_intermediate` + is `False`. Otherwise, Intermediate output embeddings of all + decoder layers, has shape (num_decoder_layers, num_queries, bs, + embed_dims). + - reference_points (Tensor): The reference of the last decoder + layer, has shape (bs, num_queries, 4) when `return_intermediate` + is `False`. Otherwise, Intermediate references of all decoder + layers, has shape (num_decoder_layers, bs, num_queries, 4). The + coordinates are arranged as (cx, cy, w, h) + """ + intermediate = [] + intermediate_reference_points = [reference_points] + for lid, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = \ + reference_points[:, :, None] * torch.cat( + [valid_ratios, valid_ratios], -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = \ + reference_points[:, :, None] * valid_ratios[:, None] + + query_sine_embed = coordinate_to_encoding( + reference_points_input[:, :, 0, :]) + query_pos = self.ref_point_head(query_sine_embed) + + query = layer( + query, + query_pos=query_pos, + value=value, + key_padding_mask=key_padding_mask, + self_attn_mask=self_attn_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reference_points=reference_points_input, + **kwargs) + + if reg_branches is not None: + tmp = reg_branches[lid](query) + assert reference_points.shape[-1] == 4 + new_reference_points = tmp + inverse_sigmoid( + reference_points, eps=1e-3) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + if self.return_intermediate: + intermediate.append(self.norm(query)) + intermediate_reference_points.append(new_reference_points) + # NOTE this is for the "Look Forward Twice" module, + # in the DeformDETR, reference_points was appended. + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return query, reference_points + + +class CdnQueryGenerator(BaseModule): + """Implement query generator of the Contrastive denoising (CDN) proposed in + `DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object + Detection `_ + + Code is modified from the `official github repo + `_. + + Args: + num_classes (int): Number of object classes. + embed_dims (int): The embedding dimensions of the generated queries. + num_matching_queries (int): The queries number of the matching part. + Used for generating dn_mask. + label_noise_scale (float): The scale of label noise, defaults to 0.5. + box_noise_scale (float): The scale of box noise, defaults to 1.0. + group_cfg (:obj:`ConfigDict` or dict, optional): The config of the + denoising queries grouping, includes `dynamic`, `num_dn_queries`, + and `num_groups`. Two grouping strategies, 'static dn groups' and + 'dynamic dn groups', are supported. When `dynamic` is `False`, + the `num_groups` should be set, and the number of denoising query + groups will always be `num_groups`. When `dynamic` is `True`, the + `num_dn_queries` should be set, and the group number will be + dynamic to ensure that the denoising queries number will not exceed + `num_dn_queries` to prevent large fluctuations of memory. Defaults + to `None`. + """ + + def __init__(self, + num_classes: int, + embed_dims: int, + num_matching_queries: int, + label_noise_scale: float = 0.5, + box_noise_scale: float = 1.0, + group_cfg: OptConfigType = None) -> None: + super().__init__() + self.num_classes = num_classes + self.embed_dims = embed_dims + self.num_matching_queries = num_matching_queries + self.label_noise_scale = label_noise_scale + self.box_noise_scale = box_noise_scale + + # prepare grouping strategy + group_cfg = {} if group_cfg is None else group_cfg + self.dynamic_dn_groups = group_cfg.get('dynamic', True) + if self.dynamic_dn_groups: + if 'num_dn_queries' not in group_cfg: + warnings.warn("'num_dn_queries' should be set when using " + 'dynamic dn groups, use 100 as default.') + self.num_dn_queries = group_cfg.get('num_dn_queries', 100) + assert isinstance(self.num_dn_queries, int), \ + f'Expected the num_dn_queries to have type int, but got ' \ + f'{self.num_dn_queries}({type(self.num_dn_queries)}). ' + else: + assert 'num_groups' in group_cfg, \ + 'num_groups should be set when using static dn groups' + self.num_groups = group_cfg['num_groups'] + assert isinstance(self.num_groups, int), \ + f'Expected the num_groups to have type int, but got ' \ + f'{self.num_groups}({type(self.num_groups)}). ' + + # NOTE The original repo of DINO set the num_embeddings 92 for coco, + # 91 (0~90) of which represents target classes and the 92 (91) + # indicates `Unknown` class. However, the embedding of `unknown` class + # is not used in the original DINO. + # TODO: num_classes + 1 or num_classes ? + self.label_embedding = nn.Embedding(self.num_classes, self.embed_dims) + + def __call__(self, batch_data_samples: SampleList) -> tuple: + """Generate contrastive denoising (cdn) queries with ground truth. + + Descriptions of the Number Values in code and comments: + - num_target_total: the total target number of the input batch + samples. + - max_num_target: the max target number of the input batch samples. + - num_noisy_targets: the total targets number after adding noise, + i.e., num_target_total * num_groups * 2. + - num_denoising_queries: the length of the output batched queries, + i.e., max_num_target * num_groups * 2. + + NOTE The format of input bboxes in batch_data_samples is unnormalized + (x, y, x, y), and the output bbox queries are embedded by normalized + (cx, cy, w, h) format bboxes going through inverse_sigmoid. + + Args: + batch_data_samples (list[:obj:`DetDataSample`]): List of the batch + data samples, each includes `gt_instance` which has attributes + `bboxes` and `labels`. The `bboxes` has unnormalized coordinate + format (x, y, x, y). + + Returns: + tuple: The outputs of the dn query generator. + + - dn_label_query (Tensor): The output content queries for denoising + part, has shape (bs, num_denoising_queries, dim), where + `num_denoising_queries = max_num_target * num_groups * 2`. + - dn_bbox_query (Tensor): The output reference bboxes as positions + of queries for denoising part, which are embedded by normalized + (cx, cy, w, h) format bboxes going through inverse_sigmoid, has + shape (bs, num_denoising_queries, 4) with the last dimension + arranged as (cx, cy, w, h). + - attn_mask (Tensor): The attention mask to prevent information + leakage from different denoising groups and matching parts, + will be used as `self_attn_mask` of the `decoder`, has shape + (num_queries_total, num_queries_total), where `num_queries_total` + is the sum of `num_denoising_queries` and `num_matching_queries`. + - dn_meta (Dict[str, int]): The dictionary saves information about + group collation, including 'num_denoising_queries' and + 'num_denoising_groups'. It will be used for split outputs of + denoising and matching parts and loss calculation. + """ + # normalize bbox and collate ground truth (gt) + gt_labels_list = [] + gt_bboxes_list = [] + for sample in batch_data_samples: + img_h, img_w = sample.img_shape + bboxes = sample.gt_instances.bboxes + factor = bboxes.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + bboxes_normalized = bboxes / factor + gt_bboxes_list.append(bboxes_normalized) + gt_labels_list.append(sample.gt_instances.labels) + gt_labels = torch.cat(gt_labels_list) # (num_target_total, 4) + gt_bboxes = torch.cat(gt_bboxes_list) + + num_target_list = [len(bboxes) for bboxes in gt_bboxes_list] + max_num_target = max(num_target_list) + num_groups = self.get_num_groups(max_num_target) + + dn_label_query = self.generate_dn_label_query(gt_labels, num_groups) + dn_bbox_query = self.generate_dn_bbox_query(gt_bboxes, num_groups) + + # The `batch_idx` saves the batch index of the corresponding sample + # for each target, has shape (num_target_total). + batch_idx = torch.cat([ + torch.full_like(t.long(), i) for i, t in enumerate(gt_labels_list) + ]) + dn_label_query, dn_bbox_query = self.collate_dn_queries( + dn_label_query, dn_bbox_query, batch_idx, len(batch_data_samples), + num_groups) + + attn_mask = self.generate_dn_mask( + max_num_target, num_groups, device=dn_label_query.device) + + dn_meta = dict( + num_denoising_queries=int(max_num_target * 2 * num_groups), + num_denoising_groups=num_groups) + + return dn_label_query, dn_bbox_query, attn_mask, dn_meta + + def get_num_groups(self, max_num_target: int = None) -> int: + """Calculate denoising query groups number. + + Two grouping strategies, 'static dn groups' and 'dynamic dn groups', + are supported. When `self.dynamic_dn_groups` is `False`, the number + of denoising query groups will always be `self.num_groups`. When + `self.dynamic_dn_groups` is `True`, the group number will be dynamic, + ensuring the denoising queries number will not exceed + `self.num_dn_queries` to prevent large fluctuations of memory. + + NOTE The `num_group` is shared for different samples in a batch. When + the target numbers in the samples varies, the denoising queries of the + samples containing fewer targets are padded to the max length. + + Args: + max_num_target (int, optional): The max target number of the batch + samples. It will only be used when `self.dynamic_dn_groups` is + `True`. Defaults to `None`. + + Returns: + int: The denoising group number of the current batch. + """ + if self.dynamic_dn_groups: + assert max_num_target is not None, \ + 'group_queries should be provided when using ' \ + 'dynamic dn groups' + if max_num_target == 0: + num_groups = 1 + else: + num_groups = self.num_dn_queries // max_num_target + else: + num_groups = self.num_groups + if num_groups < 1: + num_groups = 1 + return int(num_groups) + + def generate_dn_label_query(self, gt_labels: Tensor, + num_groups: int) -> Tensor: + """Generate noisy labels and their query embeddings. + + The strategy for generating noisy labels is: Randomly choose labels of + `self.label_noise_scale * 0.5` proportion and override each of them + with a random object category label. + + NOTE Not add noise to all labels. Besides, the `self.label_noise_scale + * 0.5` arg is the ratio of the chosen positions, which is higher than + the actual proportion of noisy labels, because the labels to override + may be correct. And the gap becomes larger as the number of target + categories decreases. The users should notice this and modify the scale + arg or the corresponding logic according to specific dataset. + + Args: + gt_labels (Tensor): The concatenated gt labels of all samples + in the batch, has shape (num_target_total, ) where + `num_target_total = sum(num_target_list)`. + num_groups (int): The number of denoising query groups. + + Returns: + Tensor: The query embeddings of noisy labels, has shape + (num_noisy_targets, embed_dims), where `num_noisy_targets = + num_target_total * num_groups * 2`. + """ + assert self.label_noise_scale > 0 + gt_labels_expand = gt_labels.repeat(2 * num_groups, + 1).view(-1) # Note `* 2` # noqa + p = torch.rand_like(gt_labels_expand.float()) + chosen_indice = torch.nonzero(p < (self.label_noise_scale * 0.5)).view( + -1) # Note `* 0.5` + new_labels = torch.randint_like(chosen_indice, 0, self.num_classes) + noisy_labels_expand = gt_labels_expand.scatter(0, chosen_indice, + new_labels) + dn_label_query = self.label_embedding(noisy_labels_expand) + return dn_label_query + + def generate_dn_bbox_query(self, gt_bboxes: Tensor, + num_groups: int) -> Tensor: + """Generate noisy bboxes and their query embeddings. + + The strategy for generating noisy bboxes is as follow: + + .. code:: text + + +--------------------+ + | negative | + | +----------+ | + | | positive | | + | | +-----|----+------------+ + | | | | | | + | +----+-----+ | | + | | | | + +---------+----------+ | + | | + | gt bbox | + | | + | +---------+----------+ + | | | | + | | +----+-----+ | + | | | | | | + +-------------|--- +----+ | | + | | positive | | + | +----------+ | + | negative | + +--------------------+ + + The random noise is added to the top-left and down-right point + positions, hence, normalized (x, y, x, y) format of bboxes are + required. The noisy bboxes of positive queries have the points + both within the inner square, while those of negative queries + have the points both between the inner and outer squares. + + Besides, the length of outer square is twice as long as that of + the inner square, i.e., self.box_noise_scale * w_or_h / 2. + NOTE The noise is added to all the bboxes. Moreover, there is still + unconsidered case when one point is within the positive square and + the others is between the inner and outer squares. + + Args: + gt_bboxes (Tensor): The concatenated gt bboxes of all samples + in the batch, has shape (num_target_total, 4) with the last + dimension arranged as (cx, cy, w, h) where + `num_target_total = sum(num_target_list)`. + num_groups (int): The number of denoising query groups. + + Returns: + Tensor: The output noisy bboxes, which are embedded by normalized + (cx, cy, w, h) format bboxes going through inverse_sigmoid, has + shape (num_noisy_targets, 4) with the last dimension arranged as + (cx, cy, w, h), where + `num_noisy_targets = num_target_total * num_groups * 2`. + """ + assert self.box_noise_scale > 0 + device = gt_bboxes.device + + # expand gt_bboxes as groups + gt_bboxes_expand = gt_bboxes.repeat(2 * num_groups, 1) # xyxy + + # obtain index of negative queries in gt_bboxes_expand + positive_idx = torch.arange( + len(gt_bboxes), dtype=torch.long, device=device) + positive_idx = positive_idx.unsqueeze(0).repeat(num_groups, 1) + positive_idx += 2 * len(gt_bboxes) * torch.arange( + num_groups, dtype=torch.long, device=device)[:, None] + positive_idx = positive_idx.flatten() + negative_idx = positive_idx + len(gt_bboxes) + + # determine the sign of each element in the random part of the added + # noise to be positive or negative randomly. + rand_sign = torch.randint_like( + gt_bboxes_expand, low=0, high=2, + dtype=torch.float32) * 2.0 - 1.0 # [low, high), 1 or -1, randomly + + # calculate the random part of the added noise + rand_part = torch.rand_like(gt_bboxes_expand) # [0, 1) + rand_part[negative_idx] += 1.0 # pos: [0, 1); neg: [1, 2) + rand_part *= rand_sign # pos: (-1, 1); neg: (-2, -1] U [1, 2) + + # add noise to the bboxes + bboxes_whwh = bbox_xyxy_to_cxcywh(gt_bboxes_expand)[:, 2:].repeat(1, 2) + noisy_bboxes_expand = gt_bboxes_expand + torch.mul( + rand_part, bboxes_whwh) * self.box_noise_scale / 2 # xyxy + noisy_bboxes_expand = noisy_bboxes_expand.clamp(min=0.0, max=1.0) + noisy_bboxes_expand = bbox_xyxy_to_cxcywh(noisy_bboxes_expand) + + dn_bbox_query = inverse_sigmoid(noisy_bboxes_expand, eps=1e-3) + return dn_bbox_query + + def collate_dn_queries(self, input_label_query: Tensor, + input_bbox_query: Tensor, batch_idx: Tensor, + batch_size: int, num_groups: int) -> Tuple[Tensor]: + """Collate generated queries to obtain batched dn queries. + + The strategy for query collation is as follow: + + .. code:: text + + input_queries (num_target_total, query_dim) + P_A1 P_B1 P_B2 N_A1 N_B1 N_B2 P'A1 P'B1 P'B2 N'A1 N'B1 N'B2 + |________ group1 ________| |________ group2 ________| + | + V + P_A1 Pad0 N_A1 Pad0 P'A1 Pad0 N'A1 Pad0 + P_B1 P_B2 N_B1 N_B2 P'B1 P'B2 N'B1 N'B2 + |____ group1 ____| |____ group2 ____| + batched_queries (batch_size, max_num_target, query_dim) + + where query_dim is 4 for bbox and self.embed_dims for label. + Notation: _-group 1; '-group 2; + A-Sample1(has 1 target); B-sample2(has 2 targets) + + Args: + input_label_query (Tensor): The generated label queries of all + targets, has shape (num_target_total, embed_dims) where + `num_target_total = sum(num_target_list)`. + input_bbox_query (Tensor): The generated bbox queries of all + targets, has shape (num_target_total, 4) with the last + dimension arranged as (cx, cy, w, h). + batch_idx (Tensor): The batch index of the corresponding sample + for each target, has shape (num_target_total). + batch_size (int): The size of the input batch. + num_groups (int): The number of denoising query groups. + + Returns: + tuple[Tensor]: Output batched label and bbox queries. + - batched_label_query (Tensor): The output batched label queries, + has shape (batch_size, max_num_target, embed_dims). + - batched_bbox_query (Tensor): The output batched bbox queries, + has shape (batch_size, max_num_target, 4) with the last dimension + arranged as (cx, cy, w, h). + """ + device = input_label_query.device + num_target_list = [ + torch.sum(batch_idx == idx) for idx in range(batch_size) + ] + max_num_target = max(num_target_list) + num_denoising_queries = int(max_num_target * 2 * num_groups) + + map_query_index = torch.cat([ + torch.arange(num_target, device=device) + for num_target in num_target_list + ]) + map_query_index = torch.cat([ + map_query_index + max_num_target * i for i in range(2 * num_groups) + ]).long() + batch_idx_expand = batch_idx.repeat(2 * num_groups, 1).view(-1) + mapper = (batch_idx_expand, map_query_index) + + batched_label_query = torch.zeros( + batch_size, num_denoising_queries, self.embed_dims, device=device) + batched_bbox_query = torch.zeros( + batch_size, num_denoising_queries, 4, device=device) + + batched_label_query[mapper] = input_label_query + batched_bbox_query[mapper] = input_bbox_query + return batched_label_query, batched_bbox_query + + def generate_dn_mask(self, max_num_target: int, num_groups: int, + device: Union[torch.device, str]) -> Tensor: + """Generate attention mask to prevent information leakage from + different denoising groups and matching parts. + + .. code:: text + + 0 0 0 0 1 1 1 1 0 0 0 0 0 + 0 0 0 0 1 1 1 1 0 0 0 0 0 + 0 0 0 0 1 1 1 1 0 0 0 0 0 + 0 0 0 0 1 1 1 1 0 0 0 0 0 + 1 1 1 1 0 0 0 0 0 0 0 0 0 + 1 1 1 1 0 0 0 0 0 0 0 0 0 + 1 1 1 1 0 0 0 0 0 0 0 0 0 + 1 1 1 1 0 0 0 0 0 0 0 0 0 + 1 1 1 1 1 1 1 1 0 0 0 0 0 + 1 1 1 1 1 1 1 1 0 0 0 0 0 + 1 1 1 1 1 1 1 1 0 0 0 0 0 + 1 1 1 1 1 1 1 1 0 0 0 0 0 + 1 1 1 1 1 1 1 1 0 0 0 0 0 + max_num_target |_| |_________| num_matching_queries + |_____________| num_denoising_queries + + 1 -> True (Masked), means 'can not see'. + 0 -> False (UnMasked), means 'can see'. + + Args: + max_num_target (int): The max target number of the input batch + samples. + num_groups (int): The number of denoising query groups. + device (obj:`device` or str): The device of generated mask. + + Returns: + Tensor: The attention mask to prevent information leakage from + different denoising groups and matching parts, will be used as + `self_attn_mask` of the `decoder`, has shape (num_queries_total, + num_queries_total), where `num_queries_total` is the sum of + `num_denoising_queries` and `num_matching_queries`. + """ + num_denoising_queries = int(max_num_target * 2 * num_groups) + num_queries_total = num_denoising_queries + self.num_matching_queries + attn_mask = torch.zeros( + num_queries_total, + num_queries_total, + device=device, + dtype=torch.bool) + # Make the matching part cannot see the denoising groups + attn_mask[num_denoising_queries:, :num_denoising_queries] = True + # Make the denoising groups cannot see each other + for i in range(num_groups): + # Mask rows of one group per step. + row_scope = slice(max_num_target * 2 * i, + max_num_target * 2 * (i + 1)) + left_scope = slice(max_num_target * 2 * i) + right_scope = slice(max_num_target * 2 * (i + 1), + num_denoising_queries) + attn_mask[row_scope, right_scope] = True + attn_mask[row_scope, left_scope] = True + return attn_mask diff --git a/projects/DETR3D/layers/transformer/grounding_dino_layers.py b/projects/DETR3D/layers/transformer/grounding_dino_layers.py new file mode 100755 index 0000000..e559b8e --- /dev/null +++ b/projects/DETR3D/layers/transformer/grounding_dino_layers.py @@ -0,0 +1,271 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmcv.ops import MultiScaleDeformableAttention +from mmengine.model import ModuleList +from torch import Tensor + +from mmdet.models.utils.vlfuse_helper import SingleScaleBiAttentionBlock +from mmdet.utils import ConfigType, OptConfigType +from .deformable_detr_layers import (DeformableDetrTransformerDecoderLayer, + DeformableDetrTransformerEncoder, + DeformableDetrTransformerEncoderLayer) +from .detr_layers import DetrTransformerEncoderLayer +from .dino_layers import DinoTransformerDecoder +from .utils import MLP, get_text_sine_pos_embed + +try: + from fairscale.nn.checkpoint import checkpoint_wrapper +except Exception: + checkpoint_wrapper = None + + +class GroundingDinoTransformerDecoderLayer( + DeformableDetrTransformerDecoderLayer): + + def __init__(self, + cross_attn_text_cfg: OptConfigType = dict( + embed_dims=256, + num_heads=8, + dropout=0.0, + batch_first=True), + **kwargs) -> None: + """Decoder layer of Deformable DETR.""" + self.cross_attn_text_cfg = cross_attn_text_cfg + if 'batch_first' not in self.cross_attn_text_cfg: + self.cross_attn_text_cfg['batch_first'] = True + super().__init__(**kwargs) + + def _init_layers(self) -> None: + """Initialize self_attn, cross-attn, ffn, and norms.""" + self.self_attn = MultiheadAttention(**self.self_attn_cfg) + self.cross_attn_text = MultiheadAttention(**self.cross_attn_text_cfg) + self.cross_attn = MultiScaleDeformableAttention(**self.cross_attn_cfg) + self.embed_dims = self.self_attn.embed_dims + self.ffn = FFN(**self.ffn_cfg) + norms_list = [ + build_norm_layer(self.norm_cfg, self.embed_dims)[1] + for _ in range(4) + ] + self.norms = ModuleList(norms_list) + + def forward(self, + query: Tensor, + key: Tensor = None, + value: Tensor = None, + query_pos: Tensor = None, + key_pos: Tensor = None, + self_attn_mask: Tensor = None, + cross_attn_mask: Tensor = None, + key_padding_mask: Tensor = None, + memory_text: Tensor = None, + text_attention_mask: Tensor = None, + **kwargs) -> Tensor: + """Implements decoder layer in Grounding DINO transformer. + + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + key (Tensor, optional): The input key, has shape (bs, num_keys, + dim). If `None`, the `query` will be used. Defaults to `None`. + value (Tensor, optional): The input value, has the same shape as + `key`, as in `nn.MultiheadAttention.forward`. If `None`, the + `key` will be used. Defaults to `None`. + query_pos (Tensor, optional): The positional encoding for `query`, + has the same shape as `query`. If not `None`, it will be added + to `query` before forward function. Defaults to `None`. + key_pos (Tensor, optional): The positional encoding for `key`, has + the same shape as `key`. If not `None`, it will be added to + `key` before forward function. If None, and `query_pos` has the + same shape as `key`, then `query_pos` will be used for + `key_pos`. Defaults to None. + self_attn_mask (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. + Defaults to None. + cross_attn_mask (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor, optional): The `key_padding_mask` of + `self_attn` input. ByteTensor, has shape (bs, num_value). + Defaults to None. + memory_text (Tensor): Memory text. It has shape (bs, len_text, + text_embed_dims). + text_attention_mask (Tensor): Text token mask. It has shape (bs, + len_text). + + Returns: + Tensor: forwarded results, has shape (bs, num_queries, dim). + """ + # self attention + query = self.self_attn( + query=query, + key=query, + value=query, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=self_attn_mask, + **kwargs) + query = self.norms[0](query) + # cross attention between query and text + query = self.cross_attn_text( + query=query, + query_pos=query_pos, + key=memory_text, + value=memory_text, + key_padding_mask=text_attention_mask) + query = self.norms[1](query) + # cross attention between query and image + query = self.cross_attn( + query=query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=cross_attn_mask, + key_padding_mask=key_padding_mask, + **kwargs) + query = self.norms[2](query) + query = self.ffn(query) + query = self.norms[3](query) + + return query + + +class GroundingDinoTransformerEncoder(DeformableDetrTransformerEncoder): + + def __init__(self, text_layer_cfg: ConfigType, + fusion_layer_cfg: ConfigType, **kwargs) -> None: + self.text_layer_cfg = text_layer_cfg + self.fusion_layer_cfg = fusion_layer_cfg + super().__init__(**kwargs) + + def _init_layers(self) -> None: + """Initialize encoder layers.""" + # self.layers = ModuleList([ + # DeformableDetrTransformerEncoderLayer(**self.layer_cfg) + # for _ in range(self.num_layers) + # ]) + # self.text_layers = ModuleList([ + # DetrTransformerEncoderLayer(**self.text_layer_cfg) + # for _ in range(self.num_layers) + # ]) + self.fusion_layers = ModuleList([ + SingleScaleBiAttentionBlock(**self.fusion_layer_cfg) + for _ in range(self.num_layers) + ]) + # self.embed_dims = self.layers[0].embed_dims + if self.num_cp > 0: + if checkpoint_wrapper is None: + raise NotImplementedError( + 'If you want to reduce GPU memory usage, \ + please install fairscale by executing the \ + following command: pip install fairscale.') + for i in range(self.num_cp): + # self.layers[i] = checkpoint_wrapper(self.layers[i]) + self.fusion_layers[i] = checkpoint_wrapper( + self.fusion_layers[i]) + + def forward(self, + query: Tensor, + # query_pos: Tensor, + key_padding_mask: Tensor, + spatial_shapes: Tensor, + level_start_index: Tensor, + valid_ratios: Tensor, + memory_text: Tensor = None, + text_attention_mask: Tensor = None, + pos_text: Tensor = None, + text_self_attention_masks: Tensor = None, + position_ids: Tensor = None): + """Forward function of Transformer encoder. + + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + query_pos (Tensor): The positional encoding for query, has shape + (bs, num_queries, dim). + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor, has shape (bs, num_queries). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels, ) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + memory_text (Tensor, optional): Memory text. It has shape (bs, + len_text, text_embed_dims). + text_attention_mask (Tensor, optional): Text token mask. It has + shape (bs,len_text). + pos_text (Tensor, optional): The positional encoding for text. + Defaults to None. + text_self_attention_masks (Tensor, optional): Text self attention + mask. Defaults to None. + position_ids (Tensor, optional): Text position ids. + Defaults to None. + """ + output = query + # reference_points = self.get_encoder_reference_points( + # spatial_shapes, valid_ratios, device=query.device) + # if self.text_layers: + # # generate pos_text + # bs, n_text, _ = memory_text.shape + # if pos_text is None and position_ids is None: + # pos_text = ( + # torch.arange(n_text, + # device=memory_text.device).float().unsqueeze( + # 0).unsqueeze(-1).repeat(bs, 1, 1)) + # pos_text = get_text_sine_pos_embed( + # pos_text, num_pos_feats=256, exchange_xy=False) + # if position_ids is not None: + # pos_text = get_text_sine_pos_embed( + # position_ids[..., None], + # num_pos_feats=256, + # exchange_xy=False) + + # main process + # for layer_id, layer in enumerate(self.layers): + for layer_id in range(6): + if self.fusion_layers: + output, memory_text = self.fusion_layers[layer_id]( + visual_feature=output, + lang_feature=memory_text, + attention_mask_v=key_padding_mask, + attention_mask_l=text_attention_mask, + ) + # if self.text_layers: + # text_num_heads = self.text_layers[ + # layer_id].self_attn_cfg.num_heads + # memory_text = self.text_layers[layer_id]( + # query=memory_text[0], + # query_pos=(pos_text if pos_text is not None else None), + # attn_mask=~text_self_attention_masks.repeat( + # text_num_heads, 1, 1), # note we use ~ for mask here + # key_padding_mask=None, + # ) + # output = layer( + # query=output, + # query_pos=query_pos, + # reference_points=reference_points, + # spatial_shapes=spatial_shapes, + # level_start_index=level_start_index, + # key_padding_mask=key_padding_mask) + return output, memory_text + + +class GroundingDinoTransformerDecoder(DinoTransformerDecoder): + + def _init_layers(self) -> None: + """Initialize decoder layers.""" + self.layers = ModuleList([ + GroundingDinoTransformerDecoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + self.embed_dims = self.layers[0].embed_dims + if self.post_norm_cfg is not None: + raise ValueError('There is not post_norm in ' + f'{self._get_name()}') + self.ref_point_head = MLP(self.embed_dims * 2, self.embed_dims, + self.embed_dims, 2) + self.norm = nn.LayerNorm(self.embed_dims) diff --git a/projects/DETR3D/layers/transformer/mask2former_layers.py b/projects/DETR3D/layers/transformer/mask2former_layers.py new file mode 100755 index 0000000..dcc604e --- /dev/null +++ b/projects/DETR3D/layers/transformer/mask2former_layers.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_norm_layer +from mmengine.model import ModuleList +from torch import Tensor + +from .deformable_detr_layers import DeformableDetrTransformerEncoder +from .detr_layers import DetrTransformerDecoder, DetrTransformerDecoderLayer + + +class Mask2FormerTransformerEncoder(DeformableDetrTransformerEncoder): + """Encoder in PixelDecoder of Mask2Former.""" + + def forward(self, query: Tensor, query_pos: Tensor, + key_padding_mask: Tensor, spatial_shapes: Tensor, + level_start_index: Tensor, valid_ratios: Tensor, + reference_points: Tensor, **kwargs) -> Tensor: + """Forward function of Transformer encoder. + + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + query_pos (Tensor): The positional encoding for query, has shape + (bs, num_queries, dim). If not None, it will be added to the + `query` before forward function. Defaults to None. + key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` + input. ByteTensor, has shape (bs, num_queries). + spatial_shapes (Tensor): Spatial shapes of features in all levels, + has shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels, ) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + valid_ratios (Tensor): The ratios of the valid width and the valid + height relative to the width and the height of features in all + levels, has shape (bs, num_levels, 2). + reference_points (Tensor): The initial reference, has shape + (bs, num_queries, 2) with the last dimension arranged + as (cx, cy). + + Returns: + Tensor: Output queries of Transformer encoder, which is also + called 'encoder output embeddings' or 'memory', has shape + (bs, num_queries, dim) + """ + for layer in self.layers: + query = layer( + query=query, + query_pos=query_pos, + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reference_points=reference_points, + **kwargs) + return query + + +class Mask2FormerTransformerDecoder(DetrTransformerDecoder): + """Decoder of Mask2Former.""" + + def _init_layers(self) -> None: + """Initialize decoder layers.""" + self.layers = ModuleList([ + Mask2FormerTransformerDecoderLayer(**self.layer_cfg) + for _ in range(self.num_layers) + ]) + self.embed_dims = self.layers[0].embed_dims + self.post_norm = build_norm_layer(self.post_norm_cfg, + self.embed_dims)[1] + + +class Mask2FormerTransformerDecoderLayer(DetrTransformerDecoderLayer): + """Implements decoder layer in Mask2Former transformer.""" + + def forward(self, + query: Tensor, + key: Tensor = None, + value: Tensor = None, + query_pos: Tensor = None, + key_pos: Tensor = None, + self_attn_mask: Tensor = None, + cross_attn_mask: Tensor = None, + key_padding_mask: Tensor = None, + **kwargs) -> Tensor: + """ + Args: + query (Tensor): The input query, has shape (bs, num_queries, dim). + key (Tensor, optional): The input key, has shape (bs, num_keys, + dim). If `None`, the `query` will be used. Defaults to `None`. + value (Tensor, optional): The input value, has the same shape as + `key`, as in `nn.MultiheadAttention.forward`. If `None`, the + `key` will be used. Defaults to `None`. + query_pos (Tensor, optional): The positional encoding for `query`, + has the same shape as `query`. If not `None`, it will be added + to `query` before forward function. Defaults to `None`. + key_pos (Tensor, optional): The positional encoding for `key`, has + the same shape as `key`. If not `None`, it will be added to + `key` before forward function. If None, and `query_pos` has the + same shape as `key`, then `query_pos` will be used for + `key_pos`. Defaults to None. + self_attn_mask (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. + Defaults to None. + cross_attn_mask (Tensor, optional): ByteTensor mask, has shape + (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor, optional): The `key_padding_mask` of + `self_attn` input. ByteTensor, has shape (bs, num_value). + Defaults to None. + + Returns: + Tensor: forwarded results, has shape (bs, num_queries, dim). + """ + + query = self.cross_attn( + query=query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=cross_attn_mask, + key_padding_mask=key_padding_mask, + **kwargs) + query = self.norms[0](query) + query = self.self_attn( + query=query, + key=query, + value=query, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=self_attn_mask, + **kwargs) + query = self.norms[1](query) + query = self.ffn(query) + query = self.norms[2](query) + + return query diff --git a/projects/DETR3D/layers/transformer/positional_encoding.py b/projects/DETR3D/layers/transformer/positional_encoding.py new file mode 100644 index 0000000..c2eed97 --- /dev/null +++ b/projects/DETR3D/layers/transformer/positional_encoding.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Optional + +import torch +import torch.nn as nn +from mmengine.model import BaseModule +from torch import Tensor + +from mmdet.registry import MODELS +from mmdet.utils import MultiConfig, OptMultiConfig + + +@MODELS.register_module() +class SinePositionalEncodingSingle(BaseModule): + """Position encoding with sine and cosine functions. + + See `End-to-End Object Detection with Transformers + `_ for details. + + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. Note the final returned dimension + for each position is 2 times of this value. + temperature (int, optional): The temperature used for scaling + the position embedding. Defaults to 10000. + normalize (bool, optional): Whether to normalize the position + embedding. Defaults to False. + scale (float, optional): A scale factor that scales the position + embedding. The scale will be used only when `normalize` is True. + Defaults to 2*pi. + eps (float, optional): A value added to the denominator for + numerical stability. Defaults to 1e-6. + offset (float): offset add to embed when do the normalization. + Defaults to 0. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None + """ + + def __init__(self, + num_feats: int, + temperature: int = 10000, + normalize: bool = False, + scale: float = 2 * math.pi, + eps: float = 1e-6, + offset: float = 0., + init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg=init_cfg) + if normalize: + assert isinstance(scale, (float, int)), 'when normalize is set,' \ + 'scale should be provided and in float or int type, ' \ + f'found {type(scale)}' + self.num_feats = num_feats + self.temperature = temperature + self.normalize = normalize + self.scale = scale + self.eps = eps + self.offset = offset + + def forward(self, mask: Tensor, input: Optional[Tensor] = None) -> Tensor: + """Forward function for `SinePositionalEncoding`. + + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, h, w]. + input (Tensor, optional): Input image/feature Tensor. + Shape [bs, c, h, w] + + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, h, w]. + """ + assert not (mask is None and input is None) + + if mask is not None: + B, H, W = mask.size() + device = mask.device + # For convenience of exporting to ONNX, + # it's required to convert + # `masks` from bool to int. + mask = mask.to(torch.int) + not_mask = 1 - mask # logical_not + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + else: + # single image or batch image with no padding + B, _, H, W = input.shape + device = input.device + x_embed = torch.arange( + 1, W + 1, dtype=torch.float32, device=device) + x_embed = x_embed.view(1, 1, -1).repeat(B, H, 1) + y_embed = torch.arange( + 1, H + 1, dtype=torch.float32, device=device) + y_embed = y_embed.view(1, -1, 1).repeat(B, 1, W) + if self.normalize: + y_embed = (y_embed + self.offset) / \ + (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = (x_embed + self.offset) / \ + (x_embed[:, :, -1:] + self.eps) * self.scale + dim_t = torch.arange( + self.num_feats, dtype=torch.float32, device=device) + dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + # use `view` instead of `flatten` for dynamically exporting to ONNX + + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), + dim=4).view(B, H, W, -1) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), + dim=4).view(B, H, W, -1) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + def __repr__(self) -> str: + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'temperature={self.temperature}, ' + repr_str += f'normalize={self.normalize}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'eps={self.eps})' + return repr_str \ No newline at end of file diff --git a/projects/DETR3D/layers/transformer/utils.py b/projects/DETR3D/layers/transformer/utils.py new file mode 100755 index 0000000..0ca5b90 --- /dev/null +++ b/projects/DETR3D/layers/transformer/utils.py @@ -0,0 +1,810 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings +from typing import Optional, Sequence, Tuple, Union + +import torch +import torch.nn.functional as F +from mmcv.cnn import (Linear, build_activation_layer, build_conv_layer, + build_norm_layer) +from mmcv.cnn.bricks.drop import Dropout +from mmengine.model import BaseModule, ModuleList +from mmengine.utils import to_2tuple +from torch import Tensor, nn + +from mmdet.registry import MODELS +from mmdet.utils import OptConfigType, OptMultiConfig + + +def nlc_to_nchw(x: Tensor, hw_shape: Sequence[int]) -> Tensor: + """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, L, C] before conversion. + hw_shape (Sequence[int]): The height and width of output feature map. + + Returns: + Tensor: The output tensor of shape [N, C, H, W] after conversion. + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len does not match H, W' + return x.transpose(1, 2).reshape(B, C, H, W).contiguous() + + +def nchw_to_nlc(x): + """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, C, H, W] before conversion. + + Returns: + Tensor: The output tensor of shape [N, L, C] after conversion. + """ + assert len(x.shape) == 4 + return x.flatten(2).transpose(1, 2).contiguous() + + +def coordinate_to_encoding(coord_tensor: Tensor, + num_feats: int = 128, + temperature: int = 10000, + scale: float = 2 * math.pi): + """Convert coordinate tensor to positional encoding. + + Args: + coord_tensor (Tensor): Coordinate tensor to be converted to + positional encoding. With the last dimension as 2 or 4. + num_feats (int, optional): The feature dimension for each position + along x-axis or y-axis. Note the final returned dimension + for each position is 2 times of this value. Defaults to 128. + temperature (int, optional): The temperature used for scaling + the position embedding. Defaults to 10000. + scale (float, optional): A scale factor that scales the position + embedding. The scale will be used only when `normalize` is True. + Defaults to 2*pi. + Returns: + Tensor: Returned encoded positional tensor. + """ + dim_t = torch.arange( + num_feats, dtype=torch.float32, device=coord_tensor.device) + dim_t = temperature**(2 * (dim_t // 2) / num_feats) + x_embed = coord_tensor[..., 0] * scale + y_embed = coord_tensor[..., 1] * scale + pos_x = x_embed[..., None] / dim_t + pos_y = y_embed[..., None] / dim_t + pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), + dim=-1).flatten(2) + pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), + dim=-1).flatten(2) + if coord_tensor.size(-1) == 2: + pos = torch.cat((pos_y, pos_x), dim=-1) + elif coord_tensor.size(-1) == 4: + w_embed = coord_tensor[..., 2] * scale + pos_w = w_embed[..., None] / dim_t + pos_w = torch.stack((pos_w[..., 0::2].sin(), pos_w[..., 1::2].cos()), + dim=-1).flatten(2) + + h_embed = coord_tensor[..., 3] * scale + pos_h = h_embed[..., None] / dim_t + pos_h = torch.stack((pos_h[..., 0::2].sin(), pos_h[..., 1::2].cos()), + dim=-1).flatten(2) + + pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=-1) + else: + raise ValueError('Unknown pos_tensor shape(-1):{}'.format( + coord_tensor.size(-1))) + return pos + + +def inverse_sigmoid(x: Tensor, eps: float = 1e-5) -> Tensor: + """Inverse function of sigmoid. + + Args: + x (Tensor): The tensor to do the inverse. + eps (float): EPS avoid numerical overflow. Defaults 1e-5. + Returns: + Tensor: The x has passed the inverse function of sigmoid, has the same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +class AdaptivePadding(nn.Module): + """Applies padding to input (if needed) so that input can get fully covered + by filter you specified. It support two modes "same" and "corner". The + "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around + input. The "corner" mode would pad zero to bottom right. + + Args: + kernel_size (int | tuple): Size of the kernel: + stride (int | tuple): Stride of the filter. Default: 1: + dilation (int | tuple): Spacing between kernel elements. + Default: 1 + padding (str): Support "same" and "corner", "corner" mode + would pad zero to bottom right, and "same" mode would + pad zero around input. Default: "corner". + Example: + >>> kernel_size = 16 + >>> stride = 16 + >>> dilation = 1 + >>> input = torch.rand(1, 1, 15, 17) + >>> adap_pad = AdaptivePadding( + >>> kernel_size=kernel_size, + >>> stride=stride, + >>> dilation=dilation, + >>> padding="corner") + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + >>> input = torch.rand(1, 1, 16, 17) + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + """ + + def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): + + super(AdaptivePadding, self).__init__() + + assert padding in ('same', 'corner') + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + padding = to_2tuple(padding) + dilation = to_2tuple(dilation) + + self.padding = padding + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + + def get_pad_shape(self, input_shape): + input_h, input_w = input_shape + kernel_h, kernel_w = self.kernel_size + stride_h, stride_w = self.stride + output_h = math.ceil(input_h / stride_h) + output_w = math.ceil(input_w / stride_w) + pad_h = max((output_h - 1) * stride_h + + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) + pad_w = max((output_w - 1) * stride_w + + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) + return pad_h, pad_w + + def forward(self, x): + pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) + if pad_h > 0 or pad_w > 0: + if self.padding == 'corner': + x = F.pad(x, [0, pad_w, 0, pad_h]) + elif self.padding == 'same': + x = F.pad(x, [ + pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2 + ]) + return x + + +class PatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The config dict for embedding + conv layer type selection. Default: "Conv2d. + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int): The slide stride of embedding conv. + Default: None (Would be set as `kernel_size`). + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only work when `dynamic_size` + is False. Default: None. + init_cfg (`mmengine.ConfigDict`, optional): The Config for + initialization. Default: None. + """ + + def __init__(self, + in_channels: int = 3, + embed_dims: int = 768, + conv_type: str = 'Conv2d', + kernel_size: int = 16, + stride: int = 16, + padding: Union[int, tuple, str] = 'corner', + dilation: int = 1, + bias: bool = True, + norm_cfg: OptConfigType = None, + input_size: Union[int, tuple] = None, + init_cfg: OptConfigType = None) -> None: + super(PatchEmbed, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + if stride is None: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adap_padding = None + padding = to_2tuple(padding) + + self.projection = build_conv_layer( + dict(type=conv_type), + in_channels=in_channels, + out_channels=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + if input_size: + input_size = to_2tuple(input_size) + # `init_out_size` would be used outside to + # calculate the num_patches + # when `use_abs_pos_embed` outside + self.init_input_size = input_size + if self.adap_padding: + pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) + input_h, input_w = input_size + input_h = input_h + pad_h + input_w = input_w + pad_w + input_size = (input_h, input_w) + + # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + h_out = (input_size[0] + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (input_size[1] + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + self.init_out_size = (h_out, w_out) + else: + self.init_input_size = None + self.init_out_size = None + + def forward(self, x: Tensor) -> Tuple[Tensor, Tuple[int]]: + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adap_padding: + x = self.adap_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + x = x.flatten(2).transpose(1, 2) + if self.norm is not None: + x = self.norm(x) + return x, out_size + + +class PatchMerging(BaseModule): + """Merge patch feature map. + + This layer groups feature map by kernel_size, and applies norm and linear + layers to the grouped feature map. Our implementation uses `nn.Unfold` to + merge patch, which is about 25% faster than original implementation. + Instead, we need to modify pretrained models for compatibility. + + Args: + in_channels (int): The num of input channels. + to gets fully covered by filter and stride you specified.. + Default: True. + out_channels (int): The num of output channels. + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Default: None. (Would be set as `kernel_size`) + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Default: 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults: False. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: Optional[Union[int, tuple]] = 2, + stride: Optional[Union[int, tuple]] = None, + padding: Union[int, tuple, str] = 'corner', + dilation: Optional[Union[int, tuple]] = 1, + bias: Optional[bool] = False, + norm_cfg: OptConfigType = dict(type='LN'), + init_cfg: OptConfigType = None) -> None: + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + if stride: + stride = stride + else: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of unfold + padding = 0 + else: + self.adap_padding = None + + padding = to_2tuple(padding) + self.sampler = nn.Unfold( + kernel_size=kernel_size, + dilation=dilation, + padding=padding, + stride=stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, sample_dim)[1] + else: + self.norm = None + + self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) + + def forward(self, x: Tensor, + input_size: Tuple[int]) -> Tuple[Tensor, Tuple[int]]: + """ + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + B, L, C = x.shape + assert isinstance(input_size, Sequence), f'Expect ' \ + f'input_size is ' \ + f'`Sequence` ' \ + f'but get {input_size}' + + H, W = input_size + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + + if self.adap_padding: + x = self.adap_padding(x) + H, W = x.shape[-2:] + + x = self.sampler(x) + # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) + + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + x = self.norm(x) if self.norm else x + x = self.reduction(x) + return x, output_size + + +class ConditionalAttention(BaseModule): + """A wrapper of conditional attention, dropout and residual connection. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop: A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + cross_attn (bool): Whether the attention module is for cross attention. + Default: False + keep_query_pos (bool): Whether to transform query_pos before cross + attention. + Default: False. + batch_first (bool): When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default: True. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + attn_drop: float = 0., + proj_drop: float = 0., + cross_attn: bool = False, + keep_query_pos: bool = False, + batch_first: bool = True, + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg=init_cfg) + + assert batch_first is True, 'Set `batch_first`\ + to False is NOT supported in ConditionalAttention. \ + First dimension of all DETRs in mmdet is `batch`, \ + please set `batch_first` to True.' + + self.cross_attn = cross_attn + self.keep_query_pos = keep_query_pos + self.embed_dims = embed_dims + self.num_heads = num_heads + self.attn_drop = Dropout(attn_drop) + self.proj_drop = Dropout(proj_drop) + + self._init_layers() + + def _init_layers(self): + """Initialize layers for qkv projection.""" + embed_dims = self.embed_dims + self.qcontent_proj = Linear(embed_dims, embed_dims) + self.qpos_proj = Linear(embed_dims, embed_dims) + self.kcontent_proj = Linear(embed_dims, embed_dims) + self.kpos_proj = Linear(embed_dims, embed_dims) + self.v_proj = Linear(embed_dims, embed_dims) + if self.cross_attn: + self.qpos_sine_proj = Linear(embed_dims, embed_dims) + self.out_proj = Linear(embed_dims, embed_dims) + + nn.init.constant_(self.out_proj.bias, 0.) + + def forward_attn(self, + query: Tensor, + key: Tensor, + value: Tensor, + attn_mask: Tensor = None, + key_padding_mask: Tensor = None) -> Tuple[Tensor]: + """Forward process for `ConditionalAttention`. + + Args: + query (Tensor): The input query with shape [bs, num_queries, + embed_dims]. + key (Tensor): The key tensor with shape [bs, num_keys, + embed_dims]. + If None, the `query` will be used. Defaults to None. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + If None, the `key` will be used. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + Returns: + Tuple[Tensor]: Attention outputs of shape :math:`(N, L, E)`, + where :math:`N` is the batch size, :math:`L` is the target + sequence length , and :math:`E` is the embedding dimension + `embed_dim`. Attention weights per head of shape :math:` + (num_heads, L, S)`. where :math:`N` is batch size, :math:`L` + is target sequence length, and :math:`S` is the source sequence + length. + """ + assert key.size(1) == value.size(1), \ + f'{"key, value must have the same sequence length"}' + assert query.size(0) == key.size(0) == value.size(0), \ + f'{"batch size must be equal for query, key, value"}' + assert query.size(2) == key.size(2), \ + f'{"q_dims, k_dims must be equal"}' + assert value.size(2) == self.embed_dims, \ + f'{"v_dims must be equal to embed_dims"}' + + bs, tgt_len, hidden_dims = query.size() + _, src_len, _ = key.size() + head_dims = hidden_dims // self.num_heads + v_head_dims = self.embed_dims // self.num_heads + assert head_dims * self.num_heads == hidden_dims, \ + f'{"hidden_dims must be divisible by num_heads"}' + scaling = float(head_dims)**-0.5 + + q = query * scaling + k = key + v = value + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or \ + attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or \ + attn_mask.dtype == torch.uint8 or \ + attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for \ + attn_mask' + + if attn_mask.dtype == torch.uint8: + warnings.warn('Byte tensor for attn_mask is deprecated.\ + Use bool tensor instead.') + attn_mask = attn_mask.to(torch.bool) + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(1), key.size(1)]: + raise RuntimeError( + 'The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [ + bs * self.num_heads, + query.size(1), + key.size(1) + ]: + raise RuntimeError( + 'The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError( + "attn_mask's dimension {} is not supported".format( + attn_mask.dim())) + # attn_mask's dim is 3 now. + + if key_padding_mask is not None and key_padding_mask.dtype == int: + key_padding_mask = key_padding_mask.to(torch.bool) + + q = q.contiguous().view(bs, tgt_len, self.num_heads, + head_dims).permute(0, 2, 1, 3).flatten(0, 1) + if k is not None: + k = k.contiguous().view(bs, src_len, self.num_heads, + head_dims).permute(0, 2, 1, + 3).flatten(0, 1) + if v is not None: + v = v.contiguous().view(bs, src_len, self.num_heads, + v_head_dims).permute(0, 2, 1, + 3).flatten(0, 1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bs + assert key_padding_mask.size(1) == src_len + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [ + bs * self.num_heads, tgt_len, src_len + ] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view( + bs, self.num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view( + bs * self.num_heads, tgt_len, src_len) + + attn_output_weights = F.softmax( + attn_output_weights - + attn_output_weights.max(dim=-1, keepdim=True)[0], + dim=-1) + attn_output_weights = self.attn_drop(attn_output_weights) + + attn_output = torch.bmm(attn_output_weights, v) + assert list( + attn_output.size()) == [bs * self.num_heads, tgt_len, v_head_dims] + attn_output = attn_output.view(bs, self.num_heads, tgt_len, + v_head_dims).permute(0, 2, 1, + 3).flatten(2) + attn_output = self.out_proj(attn_output) + + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bs, self.num_heads, + tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / self.num_heads + + def forward(self, + query: Tensor, + key: Tensor, + query_pos: Tensor = None, + ref_sine_embed: Tensor = None, + key_pos: Tensor = None, + attn_mask: Tensor = None, + key_padding_mask: Tensor = None, + is_first: bool = False) -> Tensor: + """Forward function for `ConditionalAttention`. + Args: + query (Tensor): The input query with shape [bs, num_queries, + embed_dims]. + key (Tensor): The key tensor with shape [bs, num_keys, + embed_dims]. + If None, the `query` will be used. Defaults to None. + query_pos (Tensor): The positional encoding for query in self + attention, with the same shape as `x`. If not None, it will + be added to `x` before forward function. + Defaults to None. + query_sine_embed (Tensor): The positional encoding for query in + cross attention, with the same shape as `x`. If not None, it + will be added to `x` before forward function. + Defaults to None. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. Defaults to None. If not None, it will + be added to `key` before forward function. If None, and + `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + is_first (bool): A indicator to tell whether the current layer + is the first layer of the decoder. + Defaults to False. + Returns: + Tensor: forwarded results with shape + [bs, num_queries, embed_dims]. + """ + + if self.cross_attn: + q_content = self.qcontent_proj(query) + k_content = self.kcontent_proj(key) + v = self.v_proj(key) + + bs, nq, c = q_content.size() + _, hw, _ = k_content.size() + + k_pos = self.kpos_proj(key_pos) + if is_first or self.keep_query_pos: + q_pos = self.qpos_proj(query_pos) + q = q_content + q_pos + k = k_content + k_pos + else: + q = q_content + k = k_content + q = q.view(bs, nq, self.num_heads, c // self.num_heads) + query_sine_embed = self.qpos_sine_proj(ref_sine_embed) + query_sine_embed = query_sine_embed.view(bs, nq, self.num_heads, + c // self.num_heads) + q = torch.cat([q, query_sine_embed], dim=3).view(bs, nq, 2 * c) + k = k.view(bs, hw, self.num_heads, c // self.num_heads) + k_pos = k_pos.view(bs, hw, self.num_heads, c // self.num_heads) + k = torch.cat([k, k_pos], dim=3).view(bs, hw, 2 * c) + ca_output = self.forward_attn( + query=q, + key=k, + value=v, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask)[0] + query = query + self.proj_drop(ca_output) + else: + q_content = self.qcontent_proj(query) + q_pos = self.qpos_proj(query_pos) + k_content = self.kcontent_proj(query) + k_pos = self.kpos_proj(query_pos) + v = self.v_proj(query) + q = q_content if q_pos is None else q_content + q_pos + k = k_content if k_pos is None else k_content + k_pos + sa_output = self.forward_attn( + query=q, + key=k, + value=v, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask)[0] + query = query + self.proj_drop(sa_output) + + return query + + +class MLP(BaseModule): + """Very simple multi-layer perceptron (also called FFN) with relu. Mostly + used in DETR series detectors. + + Args: + input_dim (int): Feature dim of the input tensor. + hidden_dim (int): Feature dim of the hidden layer. + output_dim (int): Feature dim of the output tensor. + num_layers (int): Number of FFN layers. As the last + layer of MLP only contains FFN (Linear). + """ + + def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, + num_layers: int) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = ModuleList( + Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x: Tensor) -> Tensor: + """Forward function of MLP. + + Args: + x (Tensor): The input feature, has shape + (num_queries, bs, input_dim). + Returns: + Tensor: The output feature, has shape + (num_queries, bs, output_dim). + """ + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + + + + +def get_text_sine_pos_embed( + pos_tensor: torch.Tensor, + num_pos_feats: int = 128, + temperature: int = 10000, + exchange_xy: bool = True, +): + """generate sine position embedding from a position tensor + Args: + pos_tensor (torch.Tensor): shape: [..., n]. + num_pos_feats (int): projected shape for each float in the tensor. + temperature (int): temperature in the sine/cosine function. + exchange_xy (bool, optional): exchange pos x and pos y. For example, + input tensor is [x,y], the results will be [pos(y), pos(x)]. + Defaults to True. + Returns: + pos_embed (torch.Tensor): shape: [..., n*num_pos_feats]. + """ + scale = 2 * math.pi + dim_t = torch.arange( + num_pos_feats, dtype=torch.float32, device=pos_tensor.device) + dim_t = temperature**(2 * torch.div(dim_t, 2, rounding_mode='floor') / + num_pos_feats) + + def sine_func(x: torch.Tensor): + sin_x = x * scale / dim_t + sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), + dim=3).flatten(2) + return sin_x + + pos_res = [ + sine_func(x) + for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1) + ] + if exchange_xy: + pos_res[0], pos_res[1] = pos_res[1], pos_res[0] + pos_res = torch.cat(pos_res, dim=-1) + return pos_res diff --git a/projects/DETR3D/old_detr3d_converter.py b/projects/DETR3D/old_detr3d_converter.py new file mode 100755 index 0000000..9913ab6 --- /dev/null +++ b/projects/DETR3D/old_detr3d_converter.py @@ -0,0 +1,25 @@ +from argparse import ArgumentParser + +import torch + +parser = ArgumentParser() +parser.add_argument('src', default='old.pth') +parser.add_argument('dst', default='new.pth') # ('training','validation') +parser.add_argument('--code_size', type=int, default='10') +args = parser.parse_args() +model = torch.load(args.src) +code_size = args.code_size +if model['meta'].get('detr3d_convert_tag') is not None: + print('this model has already converted!') +else: + print('converting...') + # (cx, cy, w, l, cz, h, sin(φ), cos(φ), vx, vy) + for key in model['state_dict']: + tsr = model['state_dict'][key] + if 'reg_branches' in key and tsr.shape[0] == code_size: + print(key, ' with ', tsr.shape, 'has changed') + tsr[[2, 3], ...] = tsr[[3, 2], ...] + tsr[[6, 7], ...] = -tsr[[7, 6], ...] + model['meta']['detr3d_convert_tag'] = True + torch.save(model, args.dst) + print('done...') diff --git a/projects/PETR/README.md b/projects/PETR/README.md new file mode 100755 index 0000000..0e106cc --- /dev/null +++ b/projects/PETR/README.md @@ -0,0 +1,63 @@ +# PETR + +This is an README for `PETR`. + +## Description + +Author: @SekiroRong. +This is an implementation of *PETR*. + +## Usage + + + +### Training commands + +In MMDet3D's root directory, run the following command to train the model: + +```bash +python tools/train.py projects/PETR/config/petr/petr_vovnet_gridmask_p4_800x320.py +``` + +### Testing commands + +In MMDet3D's root directory, run the following command to test the model: + +```bash +python tools/test.py projects/PETR/config/petr/petr_vovnet_gridmask_p4_800x320.py ${CHECKPOINT_PATH} +``` + +## Results + + + +This Result is trained by petr_vovnet_gridmask_p4_800x320.py and use [weights](https://drive.google.com/file/d/1ABI5BoQCkCkP4B0pO5KBJ3Ni0tei0gZi/view?usp=sharing) as pretrain weight. + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP | NDS | Download | +| :---------------------------------------------------------------------------: | :-----: | :------: | :------------: | :--: | :--: | :----------------------: | +| [petr_vovnet_gridmask_p4_800x320](configs/petr_vovnet_gridmask_p4_800x320.py) | 1x | 7.62 | 18.7 | 38.3 | 43.5 | [model](<>) \| [log](<>) | + +``` +mAP: 0.3830 +mATE: 0.7547 +mASE: 0.2683 +mAOE: 0.4948 +mAVE: 0.8331 +mAAE: 0.2056 +NDS: 0.4358 +Eval time: 118.7s + +Per-class results: +Object Class AP ATE ASE AOE AVE AAE +car 0.567 0.538 0.151 0.086 0.873 0.212 +truck 0.341 0.785 0.213 0.113 0.821 0.234 +bus 0.426 0.766 0.201 0.128 1.813 0.343 +trailer 0.216 1.116 0.227 0.649 0.640 0.122 +construction_vehicle 0.093 1.118 0.483 1.292 0.217 0.330 +pedestrian 0.453 0.685 0.293 0.644 0.535 0.238 +motorcycle 0.374 0.700 0.253 0.624 1.291 0.154 +bicycle 0.345 0.622 0.262 0.775 0.475 0.011 +traffic_cone 0.539 0.557 0.319 nan nan nan +barrier 0.476 0.661 0.279 0.142 nan nan +``` diff --git a/projects/PETR/configs/petr_vovnet_gridmask_p4_800x320.py b/projects/PETR/configs/petr_vovnet_gridmask_p4_800x320.py new file mode 100755 index 0000000..5cec194 --- /dev/null +++ b/projects/PETR/configs/petr_vovnet_gridmask_p4_800x320.py @@ -0,0 +1,368 @@ +_base_ = [ + 'mmdet3d::_base_/datasets/nus-3d.py', 'mmdet3d::_base_/default_runtime.py', + 'mmdet3d::_base_/schedules/cyclic-20e.py' +] +backbone_norm_cfg = dict(type='LN', requires_grad=True) +custom_imports = dict(imports=['projects.PETR.petr']) + +randomness = dict(seed=1, deterministic=False, diff_rank_seed=False) +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + to_rgb=False) +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] +metainfo = dict(classes=class_names) + +input_modality = dict(use_camera=True) +model = dict( + type='PETR', + data_preprocessor=dict( + type='Det3DDataPreprocessor', + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + bgr_to_rgb=False, + pad_size_divisor=32), + use_grid_mask=True, + img_backbone=dict( + type='VoVNetCP', + spec_name='V-99-eSE', + norm_eval=True, + frozen_stages=-1, + input_ch=3, + out_features=( + 'stage4', + 'stage5', + )), + img_neck=dict( + type='CPFPN', in_channels=[768, 1024], out_channels=256, num_outs=2), + pts_bbox_head=dict( + type='PETRHead', + num_classes=10, + in_channels=256, + num_query=900, + LID=True, + with_position=True, + with_multiview=True, + position_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + normedlinear=False, + transformer=dict( + type='PETRTransformer', + decoder=dict( + type='PETRTransformerDecoder', + return_intermediate=True, + num_layers=6, + transformerlayers=dict( + type='PETRTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.1, + dropout_layer=dict(type='Dropout', drop_prob=0.1)), + dict( + type='PETRMultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.1, + dropout_layer=dict(type='Dropout', drop_prob=0.1)), + ], + feedforward_channels=2048, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')), + )), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='SinePositionalEncoding3D', num_feats=128, normalize=True), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='mmdet.L1Loss', loss_weight=0.25), + loss_iou=dict(type='mmdet.GIoULoss', loss_weight=0.0)), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict( + type='IoUCost', weight=0.0 + ), # Fake cost. Just to be compatible with DETR head. + pc_range=point_cloud_range)))) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +backend_args = None + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'nuscenes_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict( + car=5, + truck=5, + bus=5, + trailer=5, + construction_vehicle=5, + traffic_cone=5, + barrier=5, + motorcycle=5, + bicycle=5, + pedestrian=5)), + classes=class_names, + sample_groups=dict( + car=2, + truck=3, + construction_vehicle=7, + bus=4, + trailer=6, + barrier=2, + motorcycle=6, + bicycle=6, + pedestrian=2, + traffic_cone=2), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + backend_args=backend_args), + backend_args=backend_args) +ida_aug_conf = { + 'resize_lim': (0.47, 0.625), + 'final_dim': (320, 800), + 'bot_pct_lim': (0.0, 0.0), + 'rot_lim': (0.0, 0.0), + 'H': 900, + 'W': 1600, + 'rand_flip': True, +} + +train_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict( + type='ResizeCropFlipImage', data_aug_conf=ida_aug_conf, training=True), + dict( + type='GlobalRotScaleTransImage', + rot_range=[-0.3925, 0.3925], + translation_std=[0, 0, 0], + scale_ratio_range=[0.95, 1.05], + reverse_angle=False, + training=True), + dict( + type='Pack3DDetInputs', + keys=[ + 'img', 'gt_bboxes', 'gt_bboxes_labels', 'attr_labels', + 'gt_bboxes_3d', 'gt_labels_3d', 'centers_2d', 'depths' + ]) +] +test_pipeline = [ + dict( + type='LoadMultiViewImageFromFiles', + to_float32=True, + backend_args=backend_args), + dict( + type='ResizeCropFlipImage', data_aug_conf=ida_aug_conf, + training=False), + dict(type='Pack3DDetInputs', keys=['img']) +] + +train_dataloader = dict( + batch_size=1, + num_workers=4, + dataset=dict( + type=dataset_type, + data_prefix=dict( + pts='samples/LIDAR_TOP', + CAM_FRONT='samples/CAM_FRONT', + CAM_FRONT_LEFT='samples/CAM_FRONT_LEFT', + CAM_FRONT_RIGHT='samples/CAM_FRONT_RIGHT', + CAM_BACK='samples/CAM_BACK', + CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', + CAM_BACK_LEFT='samples/CAM_BACK_LEFT'), + pipeline=train_pipeline, + box_type_3d='LiDAR', + metainfo=metainfo, + test_mode=False, + modality=input_modality, + use_valid_flag=True, + backend_args=backend_args)) +test_dataloader = dict( + dataset=dict( + type=dataset_type, + data_prefix=dict( + pts='samples/LIDAR_TOP', + CAM_FRONT='samples/CAM_FRONT', + CAM_FRONT_LEFT='samples/CAM_FRONT_LEFT', + CAM_FRONT_RIGHT='samples/CAM_FRONT_RIGHT', + CAM_BACK='samples/CAM_BACK', + CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', + CAM_BACK_LEFT='samples/CAM_BACK_LEFT'), + pipeline=test_pipeline, + box_type_3d='LiDAR', + metainfo=metainfo, + test_mode=True, + modality=input_modality, + use_valid_flag=True, + backend_args=backend_args)) +val_dataloader = dict( + dataset=dict( + type=dataset_type, + data_prefix=dict( + pts='samples/LIDAR_TOP', + CAM_FRONT='samples/CAM_FRONT', + CAM_FRONT_LEFT='samples/CAM_FRONT_LEFT', + CAM_FRONT_RIGHT='samples/CAM_FRONT_RIGHT', + CAM_BACK='samples/CAM_BACK', + CAM_BACK_RIGHT='samples/CAM_BACK_RIGHT', + CAM_BACK_LEFT='samples/CAM_BACK_LEFT'), + pipeline=test_pipeline, + box_type_3d='LiDAR', + metainfo=metainfo, + test_mode=True, + modality=input_modality, + use_valid_flag=True, + backend_args=backend_args)) + +# Different from original PETR: +# We don't use special lr for image_backbone +# This seems won't affect model performance +optim_wrapper = dict( + # TODO Add Amp + # type='AmpOptimWrapper', + # loss_scale='dynamic', + optimizer=dict(type='AdamW', lr=2e-4, weight_decay=0.01), + paramwise_cfg=dict(custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), + clip_grad=dict(max_norm=35, norm_type=2)) + +num_epochs = 24 + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.0 / 3, + begin=0, + end=500, + by_epoch=False), + dict( + type='CosineAnnealingLR', + # TODO Figure out what T_max + T_max=num_epochs, + by_epoch=True, + ) +] + +train_cfg = dict(max_epochs=num_epochs, val_interval=num_epochs) + +find_unused_parameters = False + +# pretrain_path can be found here: +# https://drive.google.com/file/d/1ABI5BoQCkCkP4B0pO5KBJ3Ni0tei0gZi/view +load_from = '/mnt/d/fcos3d_vovnet_imgbackbone-remapped.pth' +resume = False + +# --------------Original--------------- +# mAP: 0.3778 +# mATE: 0.7463 +# mASE: 0.2718 +# mAOE: 0.4883 +# mAVE: 0.9062 +# mAAE: 0.2123 +# NDS: 0.4264 +# Eval time: 242.1s + +# Per-class results: +# Object Class AP ATE ASE AOE AVE AAE +# car 0.556 0.555 0.153 0.091 0.917 0.216 +# truck 0.330 0.805 0.218 0.119 0.859 0.250 +# bus 0.412 0.789 0.205 0.162 2.067 0.337 +# trailer 0.221 0.976 0.233 0.663 0.797 0.146 +# construction_vehicle 0.094 1.096 0.493 1.145 0.190 0.349 +# pedestrian 0.453 0.688 0.289 0.636 0.549 0.235 +# motorcycle 0.368 0.690 0.256 0.622 1.417 0.149 +# bicycle 0.341 0.609 0.270 0.812 0.455 0.017 +# traffic_cone 0.531 0.582 0.320 nan nan nan +# barrier 0.472 0.673 0.281 0.145 nan nan + +# --------------Refactored in mmdet3d v1.0--------------- +# mAP: 0.3827 +# mATE: 0.7375 +# mASE: 0.2703 +# mAOE: 0.4799 +# mAVE: 0.8699 +# mAAE: 0.2038 +# NDS: 0.4352 +# Eval time: 124.8s + +# Per-class results: +# Object Class AP ATE ASE AOE AVE AAE +# car 0.574 0.519 0.150 0.087 0.865 0.206 +# truck 0.349 0.773 0.213 0.117 0.855 0.220 +# bus 0.423 0.781 0.204 0.122 1.902 0.319 +# trailer 0.219 1.034 0.231 0.608 0.830 0.149 +# construction_vehicle 0.084 1.062 0.486 1.245 0.172 0.360 +# pedestrian 0.452 0.681 0.293 0.646 0.529 0.231 +# motorcycle 0.378 0.670 0.250 0.567 1.334 0.130 +# bicycle 0.347 0.639 0.264 0.788 0.472 0.016 +# traffic_cone 0.538 0.553 0.325 nan nan nan +# barrier 0.464 0.662 0.287 0.137 nan nan + +# --------------Refactored in mmdet3d v1.1--------------- +# mAP: 0.3830 +# mATE: 0.7547 +# mASE: 0.2683 +# mAOE: 0.4948 +# mAVE: 0.8331 +# mAAE: 0.2056 +# NDS: 0.4358 +# Eval time: 118.7s + +# Per-class results: +# Object Class AP ATE ASE AOE AVE AAE +# car 0.567 0.538 0.151 0.086 0.873 0.212 +# truck 0.341 0.785 0.213 0.113 0.821 0.234 +# bus 0.426 0.766 0.201 0.128 1.813 0.343 +# trailer 0.216 1.116 0.227 0.649 0.640 0.122 +# construction_vehicle 0.093 1.118 0.483 1.292 0.217 0.330 +# pedestrian 0.453 0.685 0.293 0.644 0.535 0.238 +# motorcycle 0.374 0.700 0.253 0.624 1.291 0.154 +# bicycle 0.345 0.622 0.262 0.775 0.475 0.011 +# traffic_cone 0.539 0.557 0.319 nan nan nan +# barrier 0.476 0.661 0.279 0.142 nan nan diff --git a/projects/PETR/petr/__init__.py b/projects/PETR/petr/__init__.py new file mode 100755 index 0000000..2ed2ecc --- /dev/null +++ b/projects/PETR/petr/__init__.py @@ -0,0 +1,24 @@ +from .cp_fpn import CPFPN +from .hungarian_assigner_3d import HungarianAssigner3D +from .match_cost import BBox3DL1Cost +from .nms_free_coder import NMSFreeCoder +from .petr import PETR +from .petr_head import PETRHead +from .petr_transformer import (PETRDNTransformer, PETRMultiheadAttention, + PETRTransformer, PETRTransformerDecoder, + PETRTransformerDecoderLayer, + PETRTransformerEncoder) +from .positional_encoding import (LearnedPositionalEncoding3D, + SinePositionalEncoding3D) +from .transforms_3d import GlobalRotScaleTransImage, ResizeCropFlipImage +from .utils import denormalize_bbox, normalize_bbox +from .vovnetcp import VoVNetCP + +__all__ = [ + 'GlobalRotScaleTransImage', 'ResizeCropFlipImage', 'VoVNetCP', 'PETRHead', + 'CPFPN', 'HungarianAssigner3D', 'NMSFreeCoder', 'BBox3DL1Cost', + 'LearnedPositionalEncoding3D', 'PETRDNTransformer', + 'PETRMultiheadAttention', 'PETRTransformer', 'PETRTransformerDecoder', + 'PETRTransformerDecoderLayer', 'PETRTransformerEncoder', 'PETR', + 'SinePositionalEncoding3D', 'denormalize_bbox', 'normalize_bbox' +] diff --git a/projects/PETR/petr/cp_fpn.py b/projects/PETR/petr/cp_fpn.py new file mode 100755 index 0000000..02c9024 --- /dev/null +++ b/projects/PETR/petr/cp_fpn.py @@ -0,0 +1,211 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2022 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from mmdetection (https://github.com/open-mmlab/mmdetection) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS + + +# This FPN remove unused parameters which can used with checkpoint +# (with_cp = True) +@MODELS.register_module() +class CPFPN(BaseModule): + r"""Feature Pyramid Network. + + This is an implementation of paper `Feature Pyramid Networks for Object + Detection `_. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, it is equivalent to `add_extra_convs='on_input'`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (str): Config dict for activation layer in ConvModule. + Default: None. + upsample_cfg (dict): Config dict for interpolate layer. + Default: `dict(mode='nearest')` + init_cfg (dict or list[dict], optional): Initialization config dict. + + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = FPN(in_channels, 11, len(in_channels)).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest'), + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(CPFPN, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + self.add_extra_convs = 'on_input' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + self.lateral_convs.append(l_conv) + if i == 0: + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + # @auto_fp16() + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + laterals[i - 1] += F.interpolate(laterals[i], + **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] += F.interpolate( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) if i == 0 else laterals[i] + for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) diff --git a/projects/PETR/petr/grid_mask.py b/projects/PETR/petr/grid_mask.py new file mode 100755 index 0000000..279d6b2 --- /dev/null +++ b/projects/PETR/petr/grid_mask.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn as nn +from PIL import Image + + +class Grid(object): + + def __init__(self, + use_h, + use_w, + rotate=1, + offset=False, + ratio=0.5, + mode=0, + prob=1., + length=1): + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + self.length = length + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch + + def __call__(self, img, label): + if np.random.rand() > self.prob: + return img, label + h = img.size(1) + w = img.size(2) + self.d1 = 2 + self.d2 = min(h, w) + hh = int(1.5 * h) + ww = int(1.5 * w) + d = np.random.randint(self.d1, self.d2) + if self.ratio == 1: + self.length = np.random.randint(1, d) + else: + self.length = min(max(int(d * self.ratio + 0.5), 1), d - 1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh // d): + s = d * i + st_h + t = min(s + self.length, hh) + mask[s:t, :] *= 0 + if self.use_w: + for i in range(ww // d): + s = d * i + st_w + t = min(s + self.length, ww) + mask[:, s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh - h) // 2:(hh - h) // 2 + h, + (ww - w) // 2:(ww - w) // 2 + w] + + mask = torch.from_numpy(mask).float() + if self.mode == 1: + mask = 1 - mask + + mask = mask.expand_as(img) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h, w) - 0.5)).float() + offset = (1 - mask) * offset + img = img * mask + offset + else: + img = img * mask + + return img, label + + +class GridMask(nn.Module): + + def __init__(self, + use_h, + use_w, + rotate=1, + offset=False, + ratio=0.5, + mode=0, + prob=1.): + super(GridMask, self).__init__() + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch # + 1.#0.5 + + def forward(self, x): + if np.random.rand() > self.prob or not self.training: + return x + n, c, h, w = x.size() + x = x.view(-1, h, w) + hh = int(1.5 * h) + ww = int(1.5 * w) + d = np.random.randint(2, h) + self.length = min(max(int(d * self.ratio + 0.5), 1), d - 1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh // d): + s = d * i + st_h + t = min(s + self.length, hh) + mask[s:t, :] *= 0 + if self.use_w: + for i in range(ww // d): + s = d * i + st_w + t = min(s + self.length, ww) + mask[:, s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh - h) // 2:(hh - h) // 2 + h, + (ww - w) // 2:(ww - w) // 2 + w] + + mask = torch.from_numpy(mask).float().cuda() + if self.mode == 1: + mask = 1 - mask + mask = mask.expand_as(x) + if self.offset: + offset = torch.from_numpy( + 2 * (np.random.rand(h, w) - 0.5)).float().cuda() + x = x * mask + offset * (1 - mask) + else: + x = x * mask + + return x.view(n, c, h, w) diff --git a/projects/PETR/petr/hungarian_assigner_3d.py b/projects/PETR/petr/hungarian_assigner_3d.py new file mode 100755 index 0000000..8600323 --- /dev/null +++ b/projects/PETR/petr/hungarian_assigner_3d.py @@ -0,0 +1,142 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2021 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from DETR3D (https://github.com/WangYueFt/detr3d) +# Copyright (c) 2021 Wang, Yue +# ------------------------------------------------------------------------ +# Modified from mmdetection (https://github.com/open-mmlab/mmdetection) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ +import torch +from mmdet.models.task_modules import AssignResult, BaseAssigner + +from mmdet3d.registry import TASK_UTILS +from projects.PETR.petr.utils import normalize_bbox + +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + + +@TASK_UTILS.register_module() +class HungarianAssigner3D(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth. This + class computes an assignment between the targets and the predictions based + on the costs. The costs are weighted sum of three components: + classification cost, regression L1 cost and regression iou cost. The + targets don't include the no_object, so generally there are more + predictions than targets. After the one-to-one matching, the un-matched are + treated as backgrounds. Thus each query prediction will be assigned with + `0` or a positive integer indicating the ground truth index: + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + Args: + cls_weight (int | float, optional): The scale factor for classification + cost. Default 1.0. + bbox_weight (int | float, optional): The scale factor for regression + L1 cost. Default 1.0. + iou_weight (int | float, optional): The scale factor for regression + iou cost. Default 1.0. + iou_calculator (dict | optional): The config for the iou calculation. + Default type `BboxOverlaps2D`. + iou_mode (str | optional): "iou" (intersection over union), "iof" + (intersection over foreground), or "giou" (generalized + intersection over union). Default "giou". + """ + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=1.0), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=None): + self.cls_cost = TASK_UTILS.build(cls_cost) + self.reg_cost = TASK_UTILS.build(reg_cost) + self.iou_cost = TASK_UTILS.build(iou_cost) + self.pc_range = pc_range + + def assign(self, + bbox_pred, + cls_pred, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + eps=1e-7): + """Computes one-to-one matching based on the weighted costs. + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`. Default None. + eps (int | float, optional): A value added to the denominator for + numerical stability. Default 1e-7. + Returns: + :obj:`AssignResult`: The assigned result. + """ + assert gt_bboxes_ignore is None, \ + 'Only case when gt_bboxes_ignore is None is supported.' + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + + # 2. compute the weighted costs + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + normalized_gt_bboxes = normalize_bbox(gt_bboxes, self.pc_range) + reg_cost = self.reg_cost(bbox_pred[:, :8], normalized_gt_bboxes[:, :8]) + + # weighted sum of above two costs + cost = cls_cost + reg_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + cost = torch.nan_to_num(cost, nan=100.0, posinf=100.0, neginf=-100.0) + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) diff --git a/projects/PETR/petr/match_cost.py b/projects/PETR/petr/match_cost.py new file mode 100755 index 0000000..ee48d4b --- /dev/null +++ b/projects/PETR/petr/match_cost.py @@ -0,0 +1,338 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet3d.registry import TASK_UTILS + + +def fp16_clamp(x, min=None, max=None): + if not x.is_cuda and x.dtype == torch.float16: + # clamp for cpu float16, tensor fp16 has no clamp implementation + return x.float().clamp(min, max).half() + + return x.clamp(min, max) + + +def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): + """Calculate overlap between two set of bboxes. + FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 + Note: + Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', + there are some new generated variable when calculating IOU + using bbox_overlaps function: + 1) is_aligned is False + area1: M x 1 + area2: N x 1 + lt: M x N x 2 + rb: M x N x 2 + wh: M x N x 2 + overlap: M x N x 1 + union: M x N x 1 + ious: M x N x 1 + Total memory: + S = (9 x N x M + N + M) * 4 Byte, + When using FP16, we can reduce: + R = (9 x N x M + N + M) * 4 / 2 Byte + R large than (N + M) * 4 * 2 is always true when N and M >= 1. + Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, + N + 1 < 3 * N, when N or M is 1. + Given M = 40 (ground truth), N = 400000 (three anchor boxes + in per grid, FPN, R-CNNs), + R = 275 MB (one times) + A special case (dense detection), M = 512 (ground truth), + R = 3516 MB = 3.43 GB + When the batch size is B, reduce: + B x R + Therefore, CUDA memory runs out frequently. + Experiments on GeForce RTX 2080Ti (11019 MiB): + | dtype | M | N | Use | Real | Ideal | + |:----:|:----:|:----:|:----:|:----:|:----:| + | FP32 | 512 | 400000 | 8020 MiB | -- | -- | + | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | + | FP32 | 40 | 400000 | 1540 MiB | -- | -- | + | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | + 2) is_aligned is True + area1: N x 1 + area2: N x 1 + lt: N x 2 + rb: N x 2 + wh: N x 2 + overlap: N x 1 + union: N x 1 + ious: N x 1 + Total memory: + S = 11 x N * 4 Byte + When using FP16, we can reduce: + R = 11 x N * 4 / 2 Byte + So do the 'giou' (large than 'iou'). + Time-wise, FP16 is generally faster than FP32. + When gpu_assign_thr is not -1, it takes more time on cpu + but not reduce memory. + There, we can reduce half the memory and keep the speed. + If ``is_aligned`` is ``False``, then calculate the overlaps between each + bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned + pair of bboxes1 and bboxes2. + Args: + bboxes1 (Tensor): shape (B, m, 4) in format or empty. + bboxes2 (Tensor): shape (B, n, 4) in format or empty. + B indicates the batch dim, in shape (B1, B2, ..., Bn). + If ``is_aligned`` is ``True``, then m and n must be equal. + mode (str): "iou" (intersection over union), "iof" (intersection over + foreground) or "giou" (generalized intersection over union). + Default "iou". + is_aligned (bool, optional): If True, then m and n must be equal. + Default False. + eps (float, optional): A value added to the denominator for numerical + stability. Default 1e-6. + Returns: + Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) + Example: + >>> bboxes1 = torch.FloatTensor([ + >>> [0, 0, 10, 10], + >>> [10, 10, 20, 20], + >>> [32, 32, 38, 42], + >>> ]) + >>> bboxes2 = torch.FloatTensor([ + >>> [0, 0, 10, 20], + >>> [0, 10, 10, 19], + >>> [10, 10, 20, 20], + >>> ]) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2) + >>> assert overlaps.shape == (3, 3) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) + >>> assert overlaps.shape == (3, ) + Example: + >>> empty = torch.empty(0, 4) + >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) + >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) + >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) + >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) + """ + + assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' + # Either the boxes are empty or the length of boxes' last dimension is 4 + assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) + assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) + + # Batch dim must be the same + # Batch dim: (B1, B2, ... Bn) + assert bboxes1.shape[:-2] == bboxes2.shape[:-2] + batch_shape = bboxes1.shape[:-2] + + rows = bboxes1.size(-2) + cols = bboxes2.size(-2) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + if is_aligned: + return bboxes1.new(batch_shape + (rows, )) + else: + return bboxes1.new(batch_shape + (rows, cols)) + + area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( + bboxes1[..., 3] - bboxes1[..., 1]) + area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( + bboxes2[..., 3] - bboxes2[..., 1]) + + if is_aligned: + lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] + rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] + + wh = fp16_clamp(rb - lt, min=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1 + area2 - overlap + else: + union = area1 + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) + enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) + else: + lt = torch.max(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) # [B, rows, cols, 2] + rb = torch.min(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] + + wh = fp16_clamp(rb - lt, min=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1[..., None] + area2[..., None, :] - overlap + else: + union = area1[..., None] + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) + enclosed_rb = torch.max(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) + + eps = union.new_tensor([eps]) + union = torch.max(union, eps) + ious = overlap / union + if mode in ['iou', 'iof']: + return ious + # calculate gious + enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0) + enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] + enclose_area = torch.max(enclose_area, eps) + gious = ious - (enclose_area - union) / enclose_area + return gious + + +@TASK_UTILS.register_module() +class BBox3DL1Cost(object): + """BBox3DL1Cost. + + Args: + weight (int | float, optional): loss_weight + """ + + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, bbox_pred, gt_bboxes): + """ + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + gt_bboxes (Tensor): Ground truth boxes with normalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + Returns: + torch.Tensor: bbox_cost value with weight + """ + bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * self.weight + + +@TASK_UTILS.register_module() +class FocalLossCost: + """FocalLossCost. + Args: + weight (int | float, optional): loss_weight + alpha (int | float, optional): focal_loss alpha + gamma (int | float, optional): focal_loss gamma + eps (float, optional): default 1e-12 + binary_input (bool, optional): Whether the input is binary, + default False. + Examples: + >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost + >>> import torch + >>> self = FocalLossCost() + >>> cls_pred = torch.rand(4, 3) + >>> gt_labels = torch.tensor([0, 1, 2]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(cls_pred, gt_labels) + tensor([[-0.3236, -0.3364, -0.2699], + [-0.3439, -0.3209, -0.4807], + [-0.4099, -0.3795, -0.2929], + [-0.1950, -0.1207, -0.2626]]) + """ + + def __init__(self, + weight=1., + alpha=0.25, + gamma=2, + eps=1e-12, + binary_input=False): + self.weight = weight + self.alpha = alpha + self.gamma = gamma + self.eps = eps + self.binary_input = binary_input + + def _focal_loss_cost(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits, shape + (num_query, num_class). + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + Returns: + torch.Tensor: cls_cost value with weight + """ + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + self.eps).log() * ( + 1 - self.alpha) * cls_pred.pow(self.gamma) + pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( + 1 - cls_pred).pow(self.gamma) + + cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] + return cls_cost * self.weight + + def _mask_focal_loss_cost(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classfication logits + in shape (num_query, d1, ..., dn), dtype=torch.float32. + gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn), + dtype=torch.long. Labels should be binary. + Returns: + Tensor: Focal cost matrix with weight in shape\ + (num_query, num_gt). + """ + cls_pred = cls_pred.flatten(1) + gt_labels = gt_labels.flatten(1).float() + n = cls_pred.shape[1] + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + self.eps).log() * ( + 1 - self.alpha) * cls_pred.pow(self.gamma) + pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( + 1 - cls_pred).pow(self.gamma) + + cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ + torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) + return cls_cost / n * self.weight + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classfication logits. + gt_labels (Tensor)): Labels. + Returns: + Tensor: Focal cost matrix with weight in shape\ + (num_query, num_gt). + """ + if self.binary_input: + return self._mask_focal_loss_cost(cls_pred, gt_labels) + else: + return self._focal_loss_cost(cls_pred, gt_labels) + + +@TASK_UTILS.register_module() +class IoUCost: + """IoUCost. + Args: + iou_mode (str, optional): iou mode such as 'iou' | 'giou' + weight (int | float, optional): loss weight + Examples: + >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost + >>> import torch + >>> self = IoUCost() + >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) + >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) + >>> self(bboxes, gt_bboxes) + tensor([[-0.1250, 0.1667], + [ 0.1667, -0.5000]]) + """ + + def __init__(self, iou_mode='giou', weight=1.): + self.weight = weight + self.iou_mode = iou_mode + + def __call__(self, bboxes, gt_bboxes): + """ + Args: + bboxes (Tensor): Predicted boxes with unnormalized coordinates + (x1, y1, x2, y2). Shape (num_query, 4). + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape (num_gt, 4). + Returns: + torch.Tensor: iou_cost value with weight + """ + # overlaps: [num_bboxes, num_gt] + overlaps = bbox_overlaps( + bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False) + # The 1 is a constant that doesn't change the matching, so omitted. + iou_cost = -overlaps + return iou_cost * self.weight diff --git a/projects/PETR/petr/nms_free_coder.py b/projects/PETR/petr/nms_free_coder.py new file mode 100755 index 0000000..d1415d4 --- /dev/null +++ b/projects/PETR/petr/nms_free_coder.py @@ -0,0 +1,246 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2021 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from DETR3D (https://github.com/WangYueFt/detr3d) +# Copyright (c) 2021 Wang, Yue +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ +import torch +import torch.nn.functional as F +from mmdet.models.task_modules import BaseBBoxCoder + +from mmdet3d.registry import TASK_UTILS +from projects.PETR.petr.utils import denormalize_bbox + + +@TASK_UTILS.register_module() +class NMSFreeCoder(BaseBBoxCoder): + """Bbox coder for NMS-free detector. + + Args: + pc_range (list[float]): Range of point cloud. + post_center_range (list[float]): Limit of the center. + Default: None. + max_num (int): Max number to be kept. Default: 100. + score_threshold (float): Threshold to filter boxes based on score. + Default: None. + code_size (int): Code size of bboxes. Default: 9 + """ + + def __init__(self, + pc_range, + voxel_size=None, + post_center_range=None, + max_num=100, + score_threshold=None, + num_classes=10): + + self.pc_range = pc_range + self.voxel_size = voxel_size + self.post_center_range = post_center_range + self.max_num = max_num + self.score_threshold = score_threshold + self.num_classes = num_classes + + def encode(self): + pass + + def decode_single(self, cls_scores, bbox_preds): + """Decode bboxes. + + Args: + cls_scores (Tensor): Outputs from the classification head, \ + shape [num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + bbox_preds (Tensor): Outputs from the regression \ + head with normalized coordinate format \ + (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [num_query, 9]. + Returns: + list[dict]: Decoded boxes. + """ + max_num = self.max_num + + cls_scores = cls_scores.sigmoid() + scores, indexes = cls_scores.view(-1).topk(max_num) + labels = indexes % self.num_classes + bbox_index = indexes // self.num_classes + bbox_preds = bbox_preds[bbox_index] + + final_box_preds = denormalize_bbox(bbox_preds, self.pc_range) + final_scores = scores + final_preds = labels + + # use score threshold + if self.score_threshold is not None: + thresh_mask = final_scores > self.score_threshold + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=scores.device) + + mask = (final_box_preds[..., :3] >= + self.post_center_range[:3]).all(1) + mask &= (final_box_preds[..., :3] <= + self.post_center_range[3:]).all(1) + + if self.score_threshold: + mask &= thresh_mask + + boxes3d = final_box_preds[mask] + scores = final_scores[mask] + labels = final_preds[mask] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels + } + + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + return predictions_dict + + def decode(self, preds_dicts): + """Decode bboxes. + + Args: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format \ + (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [nb_dec, bs, num_query, 9]. + Returns: + list[dict]: Decoded boxes. + """ + all_cls_scores = preds_dicts['all_cls_scores'][-1] + all_bbox_preds = preds_dicts['all_bbox_preds'][-1] + + batch_size = all_cls_scores.size()[0] + predictions_list = [] + for i in range(batch_size): + predictions_list.append( + self.decode_single(all_cls_scores[i], all_bbox_preds[i])) + return predictions_list + + +@TASK_UTILS.register_module() +class NMSFreeClsCoder(BaseBBoxCoder): + """Bbox coder for NMS-free detector. + + Args: + pc_range (list[float]): Range of point cloud. + post_center_range (list[float]): Limit of the center. + Default: None. + max_num (int): Max number to be kept. Default: 100. + score_threshold (float): Threshold to filter boxes based on score. + Default: None. + code_size (int): Code size of bboxes. Default: 9 + """ + + def __init__(self, + pc_range, + voxel_size=None, + post_center_range=None, + max_num=100, + score_threshold=None, + num_classes=10): + + self.pc_range = pc_range + self.voxel_size = voxel_size + self.post_center_range = post_center_range + self.max_num = max_num + self.score_threshold = score_threshold + self.num_classes = num_classes + + def encode(self): + pass + + def decode_single(self, cls_scores, bbox_preds): + """Decode bboxes. + + Args: + cls_scores (Tensor): Outputs from the classification head, \ + shape [num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + bbox_preds (Tensor): Outputs from the regression \ + head with normalized coordinate format \ + (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [num_query, 9]. + Returns: + list[dict]: Decoded boxes. + """ + max_num = self.max_num + + # cls_scores = cls_scores.sigmoid() + # scores, indexes = cls_scores.view(-1).topk(max_num) + # labels = indexes % self.num_classes + # bbox_index = indexes // self.num_classes + # bbox_preds = bbox_preds[bbox_index] + + cls_scores, labels = F.softmax(cls_scores, dim=-1)[..., :-1].max(-1) + scores, indexes = cls_scores.view(-1).topk(max_num) + labels = labels[indexes] + bbox_preds = bbox_preds[indexes] + + final_box_preds = denormalize_bbox(bbox_preds, self.pc_range) + final_scores = scores + final_preds = labels + + # use score threshold + if self.score_threshold is not None: + thresh_mask = final_scores > self.score_threshold + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=scores.device) + + mask = (final_box_preds[..., :3] >= + self.post_center_range[:3]).all(1) + mask &= (final_box_preds[..., :3] <= + self.post_center_range[3:]).all(1) + + if self.score_threshold: + mask &= thresh_mask + + boxes3d = final_box_preds[mask] + scores = final_scores[mask] + labels = final_preds[mask] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels + } + + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + return predictions_dict + + def decode(self, preds_dicts): + """Decode bboxes. + + Args: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format \ + (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [nb_dec, bs, num_query, 9]. + Returns: + list[dict]: Decoded boxes. + """ + all_cls_scores = preds_dicts['all_cls_scores'][-1] + all_bbox_preds = preds_dicts['all_bbox_preds'][-1] + + batch_size = all_cls_scores.size()[0] + predictions_list = [] + for i in range(batch_size): + predictions_list.append( + self.decode_single(all_cls_scores[i], all_bbox_preds[i])) + return predictions_list diff --git a/projects/PETR/petr/petr.py b/projects/PETR/petr/petr.py new file mode 100755 index 0000000..d4acff6 --- /dev/null +++ b/projects/PETR/petr/petr.py @@ -0,0 +1,282 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2022 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from DETR3D (https://github.com/WangYueFt/detr3d) +# Copyright (c) 2021 Wang, Yue +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ + +import torch +from mmengine.structures import InstanceData + +from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector +from mmdet3d.registry import MODELS +from mmdet3d.structures.ops import bbox3d2result +from .grid_mask import GridMask + + +@MODELS.register_module() +class PETR(MVXTwoStageDetector): + """PETR.""" + + def __init__(self, + use_grid_mask=False, + pts_voxel_layer=None, + pts_middle_encoder=None, + pts_fusion_layer=None, + img_backbone=None, + pts_backbone=None, + img_neck=None, + pts_neck=None, + pts_bbox_head=None, + img_roi_head=None, + img_rpn_head=None, + train_cfg=None, + test_cfg=None, + init_cfg=None, + data_preprocessor=None, + **kwargs): + super(PETR, + self).__init__(pts_voxel_layer, pts_middle_encoder, + pts_fusion_layer, img_backbone, pts_backbone, + img_neck, pts_neck, pts_bbox_head, img_roi_head, + img_rpn_head, train_cfg, test_cfg, init_cfg, + data_preprocessor) + self.grid_mask = GridMask( + True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) + self.use_grid_mask = use_grid_mask + + def extract_img_feat(self, img, img_metas): + """Extract features of images.""" + if isinstance(img, list): + img = torch.stack(img, dim=0) + + B = img.size(0) + if img is not None: + input_shape = img.shape[-2:] + # update real input shape of each single img + for img_meta in img_metas: + img_meta.update(input_shape=input_shape) + if img.dim() == 5: + if img.size(0) == 1 and img.size(1) != 1: + img.squeeze_() + else: + B, N, C, H, W = img.size() + img = img.view(B * N, C, H, W) + if self.use_grid_mask: + img = self.grid_mask(img) + img_feats = self.img_backbone(img) + if isinstance(img_feats, dict): + img_feats = list(img_feats.values()) + else: + return None + if self.with_img_neck: + img_feats = self.img_neck(img_feats) + img_feats_reshaped = [] + for img_feat in img_feats: + BN, C, H, W = img_feat.size() + img_feats_reshaped.append(img_feat.view(B, int(BN / B), C, H, W)) + return img_feats_reshaped + + # @auto_fp16(apply_to=('img'), out_fp32=True) + def extract_feat(self, img, img_metas): + """Extract features from images and points.""" + img_feats = self.extract_img_feat(img, img_metas) + return img_feats + + def forward_pts_train(self, + pts_feats, + gt_bboxes_3d, + gt_labels_3d, + img_metas, + gt_bboxes_ignore=None): + """Forward function for point cloud branch. + + Args: + pts_feats (list[torch.Tensor]): Features of point cloud branch + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth + boxes for each sample. + gt_labels_3d (list[torch.Tensor]): Ground truth labels for + boxes of each sampole + img_metas (list[dict]): Meta information of samples. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + boxes to be ignored. Defaults to None. + Returns: + dict: Losses of each branch. + """ + outs = self.pts_bbox_head(pts_feats, img_metas) + loss_inputs = [gt_bboxes_3d, gt_labels_3d, outs] + losses = self.pts_bbox_head.loss_by_feat(*loss_inputs) + + return losses + + def _forward(self, mode='loss', **kwargs): + """Calls either forward_train or forward_test depending on whether + return_loss=True. + + Note this setting will change the expected inputs. When + `return_loss=True`, img and img_metas are single-nested (i.e. + torch.Tensor and list[dict]), and when `resturn_loss=False`, img and + img_metas should be double nested (i.e. list[torch.Tensor], + list[list[dict]]), with the outer list indicating test time + augmentations. + """ + raise NotImplementedError('tensor mode is yet to add') + + def loss(self, + inputs=None, + data_samples=None, + mode=None, + points=None, + img_metas=None, + gt_bboxes_3d=None, + gt_labels_3d=None, + gt_labels=None, + gt_bboxes=None, + img=None, + proposals=None, + gt_bboxes_ignore=None, + img_depth=None, + img_mask=None): + """Forward training function. + + Args: + points (list[torch.Tensor], optional): Points of each sample. + Defaults to None. + img_metas (list[dict], optional): Meta information of each sample. + Defaults to None. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): + Ground truth 3D boxes. Defaults to None. + gt_labels_3d (list[torch.Tensor], optional): Ground truth labels + of 3D boxes. Defaults to None. + gt_labels (list[torch.Tensor], optional): Ground truth labels + of 2D boxes in images. Defaults to None. + gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in + images. Defaults to None. + img (torch.Tensor optional): Images of each sample with shape + (N, C, H, W). Defaults to None. + proposals ([list[torch.Tensor], optional): Predicted proposals + used for training Fast RCNN. Defaults to None. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + 2D boxes in images to be ignored. Defaults to None. + Returns: + dict: Losses of different branches. + """ + img = inputs['imgs'] + batch_img_metas = [ds.metainfo for ds in data_samples] + batch_gt_instances_3d = [ds.gt_instances_3d for ds in data_samples] + gt_bboxes_3d = [gt.bboxes_3d for gt in batch_gt_instances_3d] + gt_labels_3d = [gt.labels_3d for gt in batch_gt_instances_3d] + gt_bboxes_ignore = None + + batch_img_metas = self.add_lidar2img(img, batch_img_metas) + + img_feats = self.extract_feat(img=img, img_metas=batch_img_metas) + + losses = dict() + losses_pts = self.forward_pts_train(img_feats, gt_bboxes_3d, + gt_labels_3d, batch_img_metas, + gt_bboxes_ignore) + losses.update(losses_pts) + return losses + + def predict(self, inputs=None, data_samples=None, mode=None, **kwargs): + img = inputs['imgs'] + batch_img_metas = [ds.metainfo for ds in data_samples] + for var, name in [(batch_img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + img = [img] if img is None else img + + batch_img_metas = self.add_lidar2img(img, batch_img_metas) + + results_list_3d = self.simple_test(batch_img_metas, img, **kwargs) + + for i, data_sample in enumerate(data_samples): + results_list_3d_i = InstanceData( + metainfo=results_list_3d[i]['pts_bbox']) + data_sample.pred_instances_3d = results_list_3d_i + data_sample.pred_instances = InstanceData() + + return data_samples + + def simple_test_pts(self, x, img_metas, rescale=False): + """Test function of point cloud branch.""" + outs = self.pts_bbox_head(x, img_metas) + bbox_list = self.pts_bbox_head.get_bboxes( + outs, img_metas, rescale=rescale) + bbox_results = [ + bbox3d2result(bboxes, scores, labels) + for bboxes, scores, labels in bbox_list + ] + return bbox_results + + def simple_test(self, img_metas, img=None, rescale=False): + """Test function without augmentaiton.""" + img_feats = self.extract_feat(img=img, img_metas=img_metas) + + bbox_list = [dict() for i in range(len(img_metas))] + bbox_pts = self.simple_test_pts(img_feats, img_metas, rescale=rescale) + for result_dict, pts_bbox in zip(bbox_list, bbox_pts): + result_dict['pts_bbox'] = pts_bbox + return bbox_list + + def aug_test_pts(self, feats, img_metas, rescale=False): + feats_list = [] + for j in range(len(feats[0])): + feats_list_level = [] + for i in range(len(feats)): + feats_list_level.append(feats[i][j]) + feats_list.append(torch.stack(feats_list_level, -1).mean(-1)) + outs = self.pts_bbox_head(feats_list, img_metas) + bbox_list = self.pts_bbox_head.get_bboxes( + outs, img_metas, rescale=rescale) + bbox_results = [ + bbox3d2result(bboxes, scores, labels) + for bboxes, scores, labels in bbox_list + ] + return bbox_results + + def aug_test(self, img_metas, imgs=None, rescale=False): + """Test function with augmentaiton.""" + img_feats = self.extract_feats(img_metas, imgs) + img_metas = img_metas[0] + bbox_list = [dict() for i in range(len(img_metas))] + bbox_pts = self.aug_test_pts(img_feats, img_metas, rescale) + for result_dict, pts_bbox in zip(bbox_list, bbox_pts): + result_dict['pts_bbox'] = pts_bbox + return bbox_list + + # may need speed-up + def add_lidar2img(self, img, batch_input_metas): + """add 'lidar2img' transformation matrix into batch_input_metas. + + Args: + batch_input_metas (list[dict]): Meta information of multiple inputs + in a batch. + Returns: + batch_input_metas (list[dict]): Meta info with lidar2img added + """ + for meta in batch_input_metas: + lidar2img_rts = [] + # obtain lidar to image transformation matrix + for i in range(len(meta['cam2img'])): + lidar2cam_rt = torch.tensor(meta['lidar2cam'][i]).double() + intrinsic = torch.tensor(meta['cam2img'][i]).double() + viewpad = torch.eye(4).double() + viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2img_rt = (viewpad @ lidar2cam_rt) + # The extrinsics mean the transformation from lidar to camera. + # If anyone want to use the extrinsics as sensor to lidar, + # please use np.linalg.inv(lidar2cam_rt.T) + # and modify the ResizeCropFlipImage + # and LoadMultiViewImageFromMultiSweepsFiles. + lidar2img_rts.append(lidar2img_rt) + meta['lidar2img'] = lidar2img_rts + img_shape = meta['img_shape'][:3] + meta['img_shape'] = [img_shape] * len(img[0]) + + return batch_input_metas diff --git a/projects/PETR/petr/petr_head.py b/projects/PETR/petr/petr_head.py new file mode 100755 index 0000000..2b6e088 --- /dev/null +++ b/projects/PETR/petr/petr_head.py @@ -0,0 +1,825 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2022 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from DETR3D (https://github.com/WangYueFt/detr3d) +# Copyright (c) 2021 Wang, Yue +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ +import math + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Conv2d, Linear +from mmdet.models.dense_heads.anchor_free_head import AnchorFreeHead +from mmdet.models.layers import NormedLinear +from mmdet.models.layers.transformer import inverse_sigmoid +from mmdet.models.utils import multi_apply +from mmengine.model.weight_init import bias_init_with_prob +from mmengine.structures import InstanceData + +from mmdet3d.registry import MODELS, TASK_UTILS +from projects.PETR.petr.utils import normalize_bbox + + +def pos2posemb3d(pos, num_pos_feats=128, temperature=10000): + scale = 2 * math.pi + pos = pos * scale + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device) + dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) + pos_x = pos[..., 0, None] / dim_t + pos_y = pos[..., 1, None] / dim_t + pos_z = pos[..., 2, None] / dim_t + pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), + dim=-1).flatten(-2) + pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), + dim=-1).flatten(-2) + pos_z = torch.stack((pos_z[..., 0::2].sin(), pos_z[..., 1::2].cos()), + dim=-1).flatten(-2) + posemb = torch.cat((pos_y, pos_x, pos_z), dim=-1) + return posemb + + +@MODELS.register_module() +class PETRHead(AnchorFreeHead): + """Implements the DETR transformer head. See `paper: End-to-End Object + Detection with Transformers. + + `_ for details. + Args: + num_classes (int): Number of categories excluding the background. + in_channels (int): Number of channels in the input feature map. + num_query (int): Number of query in Transformer. + num_reg_fcs (int, optional): Number of fully-connected layers used in + `FFN`, which is then used for the regression head. Default 2. + transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer. + Default: None. + sync_cls_avg_factor (bool): Whether to sync the avg_factor of + all ranks. Default to False. + positional_encoding (obj:`mmcv.ConfigDict`|dict): + Config for position encoding. + loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the + classification loss. Default `CrossEntropyLoss`. + loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the + regression loss. Default `L1Loss`. + loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the + regression iou loss. Default `GIoULoss`. + tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of + transformer head. + test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of + transformer head. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + _version = 2 + + def __init__(self, + num_classes, + in_channels, + num_query=100, + num_reg_fcs=2, + transformer=None, + sync_cls_avg_factor=False, + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=128, + normalize=True), + code_weights=None, + bbox_coder=None, + loss_cls=dict( + type='CrossEntropyLoss', + bg_cls_weight=0.1, + use_sigmoid=False, + loss_weight=1.0, + class_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=5.0), + iou_cost=dict( + type='IoUCost', iou_mode='giou', weight=2.0))), + test_cfg=dict(max_per_img=100), + with_position=True, + with_multiview=False, + depth_step=0.8, + depth_num=64, + LID=False, + depth_start=1, + position_range=[-65, -65, -8.0, 65, 65, 8.0], + init_cfg=None, + normedlinear=False, + **kwargs): + # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, + # since it brings inconvenience when the initialization of + # `AnchorFreeHead` is called. + if 'code_size' in kwargs: + self.code_size = kwargs['code_size'] + else: + self.code_size = 10 + if code_weights is not None: + self.code_weights = code_weights + else: + self.code_weights = [ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2 + ] + self.code_weights = self.code_weights[:self.code_size] + self.bg_cls_weight = 0 + self.sync_cls_avg_factor = sync_cls_avg_factor + class_weight = loss_cls.get('class_weight', None) + if class_weight is not None and (self.__class__ is PETRHead): + assert isinstance(class_weight, float), 'Expected ' \ + 'class_weight to have type float. Found ' \ + f'{type(class_weight)}.' + # NOTE following the official DETR rep0, bg_cls_weight means + # relative classification weight of the no-object class. + bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight) + assert isinstance(bg_cls_weight, float), 'Expected ' \ + 'bg_cls_weight to have type float. Found ' \ + f'{type(bg_cls_weight)}.' + class_weight = torch.ones(num_classes + 1) * class_weight + # set background class as the last indice + class_weight[num_classes] = bg_cls_weight + loss_cls.update({'class_weight': class_weight}) + if 'bg_cls_weight' in loss_cls: + loss_cls.pop('bg_cls_weight') + self.bg_cls_weight = bg_cls_weight + + if train_cfg: + assert 'assigner' in train_cfg, 'assigner should be provided '\ + 'when train_cfg is set.' + assigner = train_cfg['assigner'] + assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \ + 'The classification weight for loss and matcher should be' \ + 'exactly the same.' + assert loss_bbox['loss_weight'] == assigner['reg_cost'][ + 'weight'], 'The regression L1 weight for loss and matcher ' \ + 'should be exactly the same.' + # assert loss_iou['loss_weight'] == assigner['iou_cost'][ + # 'weight'], \ + # 'The regression iou weight for loss and matcher should be' \ + # 'exactly the same.' + self.assigner = TASK_UTILS.build(assigner) + # DETR sampling=False, so use PseudoSampler + sampler_cfg = dict(type='PseudoSampler') + self.sampler = TASK_UTILS.build(sampler_cfg) + + self.num_query = num_query + self.num_classes = num_classes + self.in_channels = in_channels + self.num_reg_fcs = num_reg_fcs + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.fp16_enabled = False + self.embed_dims = 256 + self.depth_step = depth_step + self.depth_num = depth_num + self.position_dim = 3 * self.depth_num + self.position_range = position_range + self.LID = LID + self.depth_start = depth_start + self.position_level = 0 + self.with_position = with_position + self.with_multiview = with_multiview + assert 'num_feats' in positional_encoding + num_feats = positional_encoding['num_feats'] + assert num_feats * 2 == self.embed_dims, 'embed_dims should' \ + f' be exactly 2 times of num_feats. Found {self.embed_dims}' \ + f' and {num_feats}.' + self.act_cfg = transformer.get('act_cfg', + dict(type='ReLU', inplace=True)) + self.num_pred = 6 + self.normedlinear = normedlinear + super(PETRHead, self).__init__( + num_classes=num_classes, + in_channels=in_channels, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + bbox_coder=bbox_coder, + init_cfg=init_cfg) + + self.loss_cls = MODELS.build(loss_cls) + self.loss_bbox = MODELS.build(loss_bbox) + self.loss_iou = MODELS.build(loss_iou) + + if self.loss_cls.use_sigmoid: + self.cls_out_channels = num_classes + else: + self.cls_out_channels = num_classes + 1 + # self.activate = build_activation_layer(self.act_cfg) + # if self.with_multiview or not self.with_position: + # self.positional_encoding = build_positional_encoding( + # positional_encoding) + self.positional_encoding = TASK_UTILS.build(positional_encoding) + self.transformer = MODELS.build(transformer) + self.code_weights = nn.Parameter( + torch.tensor(self.code_weights, requires_grad=False), + requires_grad=False) + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.pc_range = self.bbox_coder.pc_range + self._init_layers() + + def _init_layers(self): + """Initialize layers of the transformer head.""" + if self.with_position: + self.input_proj = Conv2d( + self.in_channels, self.embed_dims, kernel_size=1) + else: + self.input_proj = Conv2d( + self.in_channels, self.embed_dims, kernel_size=1) + + cls_branch = [] + for _ in range(self.num_reg_fcs): + cls_branch.append(Linear(self.embed_dims, self.embed_dims)) + cls_branch.append(nn.LayerNorm(self.embed_dims)) + cls_branch.append(nn.ReLU(inplace=True)) + if self.normedlinear: + cls_branch.append( + NormedLinear(self.embed_dims, self.cls_out_channels)) + else: + cls_branch.append(Linear(self.embed_dims, self.cls_out_channels)) + fc_cls = nn.Sequential(*cls_branch) + + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(Linear(self.embed_dims, self.code_size)) + reg_branch = nn.Sequential(*reg_branch) + + self.cls_branches = nn.ModuleList( + [fc_cls for _ in range(self.num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(self.num_pred)]) + + if self.with_multiview: + self.adapt_pos3d = nn.Sequential( + nn.Conv2d( + self.embed_dims * 3 // 2, + self.embed_dims * 4, + kernel_size=1, + stride=1, + padding=0), + nn.ReLU(), + nn.Conv2d( + self.embed_dims * 4, + self.embed_dims, + kernel_size=1, + stride=1, + padding=0), + ) + else: + self.adapt_pos3d = nn.Sequential( + nn.Conv2d( + self.embed_dims, + self.embed_dims, + kernel_size=1, + stride=1, + padding=0), + nn.ReLU(), + nn.Conv2d( + self.embed_dims, + self.embed_dims, + kernel_size=1, + stride=1, + padding=0), + ) + + if self.with_position: + self.position_encoder = nn.Sequential( + nn.Conv2d( + self.position_dim, + self.embed_dims * 4, + kernel_size=1, + stride=1, + padding=0), + nn.ReLU(), + nn.Conv2d( + self.embed_dims * 4, + self.embed_dims, + kernel_size=1, + stride=1, + padding=0), + ) + + self.reference_points = nn.Embedding(self.num_query, 3) + self.query_embedding = nn.Sequential( + nn.Linear(self.embed_dims * 3 // 2, self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims), + ) + + def init_weights(self): + """Initialize weights of the transformer head.""" + # The initialization for transformer is important + self.transformer.init_weights() + nn.init.uniform_(self.reference_points.weight.data, 0, 1) + if self.loss_cls.use_sigmoid: + bias_init = bias_init_with_prob(0.01) + for m in self.cls_branches: + nn.init.constant_(m[-1].bias, bias_init) + + def position_embeding(self, img_feats, img_metas, masks=None): + eps = 1e-5 + pad_h, pad_w = img_metas[0]['pad_shape'] + B, N, C, H, W = img_feats[self.position_level].shape + coords_h = torch.arange( + H, device=img_feats[0].device).float() * pad_h / H + coords_w = torch.arange( + W, device=img_feats[0].device).float() * pad_w / W + + if self.LID: + index = torch.arange( + start=0, + end=self.depth_num, + step=1, + device=img_feats[0].device).float() + index_1 = index + 1 + bin_size = (self.position_range[3] - self.depth_start) / ( + self.depth_num * (1 + self.depth_num)) + coords_d = self.depth_start + bin_size * index * index_1 + else: + index = torch.arange( + start=0, + end=self.depth_num, + step=1, + device=img_feats[0].device).float() + bin_size = (self.position_range[3] - + self.depth_start) / self.depth_num + coords_d = self.depth_start + bin_size * index + + D = coords_d.shape[0] + coords = torch.stack(torch.meshgrid([coords_w, coords_h, coords_d + ])).permute(1, 2, 3, + 0) # W, H, D, 3 + coords = torch.cat((coords, torch.ones_like(coords[..., :1])), -1) + coords[..., :2] = coords[..., :2] * torch.maximum( + coords[..., 2:3], + torch.ones_like(coords[..., 2:3]) * eps) + + img2lidars = [] + for img_meta in img_metas: + img2lidar = [] + for i in range(len(img_meta['lidar2img'])): + img2lidar.append(np.linalg.inv(img_meta['lidar2img'][i])) + img2lidars.append(np.asarray(img2lidar)) + img2lidars = np.asarray(img2lidars) + img2lidars = coords.new_tensor(img2lidars) # (B, N, 4, 4) + + coords = coords.view(1, 1, W, H, D, 4, 1).repeat(B, N, 1, 1, 1, 1, 1) + img2lidars = img2lidars.view(B, N, 1, 1, 1, 4, + 4).repeat(1, 1, W, H, D, 1, 1) + coords3d = torch.matmul(img2lidars, coords).squeeze(-1)[..., :3] + coords3d[..., 0:1] = (coords3d[..., 0:1] - self.position_range[0]) / ( + self.position_range[3] - self.position_range[0]) + coords3d[..., 1:2] = (coords3d[..., 1:2] - self.position_range[1]) / ( + self.position_range[4] - self.position_range[1]) + coords3d[..., 2:3] = (coords3d[..., 2:3] - self.position_range[2]) / ( + self.position_range[5] - self.position_range[2]) + + coords_mask = (coords3d > 1.0) | (coords3d < 0.0) + coords_mask = coords_mask.flatten(-2).sum(-1) > (D * 0.5) + coords_mask = masks | coords_mask.permute(0, 1, 3, 2) + coords3d = coords3d.permute(0, 1, 4, 5, 3, + 2).contiguous().view(B * N, -1, H, W) + coords3d = inverse_sigmoid(coords3d) + coords_position_embeding = self.position_encoder(coords3d) + + return coords_position_embeding.view(B, N, self.embed_dims, H, + W), coords_mask + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """load checkpoints.""" + # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, + # since `AnchorFreeHead._load_from_state_dict` should not be + # called here. Invoking the default `Module._load_from_state_dict` + # is enough. + + # Names of some parameters in has been changed. + version = local_metadata.get('version', None) + if (version is None or version < 2) and self.__class__ is PETRHead: + convert_dict = { + '.self_attn.': '.attentions.0.', + # '.ffn.': '.ffns.0.', + '.multihead_attn.': '.attentions.1.', + '.decoder.norm.': '.decoder.post_norm.' + } + state_dict_keys = list(state_dict.keys()) + for k in state_dict_keys: + for ori_key, convert_key in convert_dict.items(): + if ori_key in k: + convert_key = k.replace(ori_key, convert_key) + state_dict[convert_key] = state_dict[k] + del state_dict[k] + + super(AnchorFreeHead, + self)._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, + unexpected_keys, error_msgs) + + def forward(self, mlvl_feats, img_metas): + """Forward function. + + Args: + mlvl_feats (tuple[Tensor]): Features from the upstream + network, each is a 5D-tensor with shape + (B, N, C, H, W). + Returns: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format \ + (cx, cy, w, l, cz, h, theta, vx, vy). \ + Shape [nb_dec, bs, num_query, 9]. + """ + + x = mlvl_feats[0] + batch_size, num_cams = x.size(0), x.size(1) + input_img_h, input_img_w = img_metas[0]['pad_shape'] + masks = x.new_ones((batch_size, num_cams, input_img_h, input_img_w)) + for img_id in range(batch_size): + for cam_id in range(num_cams): + img_h, img_w, _ = img_metas[img_id]['img_shape'][cam_id] + masks[img_id, cam_id, :img_h, :img_w] = 0 + x = self.input_proj(x.flatten(0, 1)) + x = x.view(batch_size, num_cams, *x.shape[-3:]) + # interpolate masks to have the same spatial shape with x + masks = F.interpolate(masks, size=x.shape[-2:]).to(torch.bool) + + if self.with_position: + coords_position_embeding, _ = self.position_embeding( + mlvl_feats, img_metas, masks) + pos_embed = coords_position_embeding + if self.with_multiview: + sin_embed = self.positional_encoding(masks) + sin_embed = self.adapt_pos3d(sin_embed.flatten(0, 1)).view( + x.size()) + pos_embed = pos_embed + sin_embed + else: + pos_embeds = [] + for i in range(num_cams): + xy_embed = self.positional_encoding(masks[:, i, :, :]) + pos_embeds.append(xy_embed.unsqueeze(1)) + sin_embed = torch.cat(pos_embeds, 1) + sin_embed = self.adapt_pos3d(sin_embed.flatten(0, 1)).view( + x.size()) + pos_embed = pos_embed + sin_embed + else: + if self.with_multiview: + pos_embed = self.positional_encoding(masks) + pos_embed = self.adapt_pos3d(pos_embed.flatten(0, 1)).view( + x.size()) + else: + pos_embeds = [] + for i in range(num_cams): + pos_embed = self.positional_encoding(masks[:, i, :, :]) + pos_embeds.append(pos_embed.unsqueeze(1)) + pos_embed = torch.cat(pos_embeds, 1) + + reference_points = self.reference_points.weight + query_embeds = self.query_embedding(pos2posemb3d(reference_points)) + reference_points = reference_points.unsqueeze(0).repeat( + batch_size, 1, 1) # .sigmoid() + + outs_dec, _ = self.transformer(x, masks, query_embeds, pos_embed, + self.reg_branches) + outs_dec = torch.nan_to_num(outs_dec) + outputs_classes = [] + outputs_coords = [] + for lvl in range(outs_dec.shape[0]): + reference = inverse_sigmoid(reference_points.clone()) + assert reference.shape[-1] == 3 + outputs_class = self.cls_branches[lvl](outs_dec[lvl]).to( + torch.float32) + tmp = self.reg_branches[lvl](outs_dec[lvl]).to(torch.float32) + + tmp[..., 0:2] += reference[..., 0:2] + tmp[..., 0:2] = tmp[..., 0:2].sigmoid() + tmp[..., 4:5] += reference[..., 2:3] + tmp[..., 4:5] = tmp[..., 4:5].sigmoid() + + outputs_coord = tmp + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + + all_cls_scores = torch.stack(outputs_classes) + all_bbox_preds = torch.stack(outputs_coords) + + all_bbox_preds[..., 0:1] = ( + all_bbox_preds[..., 0:1] * (self.pc_range[3] - self.pc_range[0]) + + self.pc_range[0]) + all_bbox_preds[..., 1:2] = ( + all_bbox_preds[..., 1:2] * (self.pc_range[4] - self.pc_range[1]) + + self.pc_range[1]) + all_bbox_preds[..., 4:5] = ( + all_bbox_preds[..., 4:5] * (self.pc_range[5] - self.pc_range[2]) + + self.pc_range[2]) + + outs = { + 'all_cls_scores': all_cls_scores, + 'all_bbox_preds': all_bbox_preds, + 'enc_cls_scores': None, + 'enc_bbox_preds': None, + } + return outs + + def _get_target_single(self, + cls_score, + bbox_pred, + gt_labels, + gt_bboxes, + gt_bboxes_ignore=None): + """"Compute regression and classification targets for one image. + Outputs from a single decoder layer of a single feature level are used. + Args: + cls_score (Tensor): Box score logits from a single decoder layer + for one image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from a single decoder layer + for one image, with normalized coordinate (cx, cy, w, h) and + shape [num_query, 4]. + gt_bboxes (Tensor): Ground truth bboxes for one image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (Tensor): Ground truth class indices for one image + with shape (num_gts, ). + gt_bboxes_ignore (Tensor, optional): Bounding boxes + which can be ignored. Default None. + Returns: + tuple[Tensor]: a tuple containing the following for one image. + - labels (Tensor): Labels of each image. + - label_weights (Tensor]): Label weights of each image. + - bbox_targets (Tensor): BBox targets of each image. + - bbox_weights (Tensor): BBox weights of each image. + - pos_inds (Tensor): Sampled positive indices for each image. + - neg_inds (Tensor): Sampled negative indices for each image. + """ + + num_bboxes = bbox_pred.size(0) + # assigner and sampler + assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, + gt_labels, gt_bboxes_ignore) + pred_instance_3d = InstanceData(priors=bbox_pred) + gt_instances_3d = InstanceData(bboxes_3d=gt_bboxes) + sampling_result = self.sampler.sample(assign_result, pred_instance_3d, + gt_instances_3d) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label targets + labels = gt_bboxes.new_full((num_bboxes, ), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + + # bbox targets + code_size = gt_bboxes.size(1) + bbox_targets = torch.zeros_like(bbox_pred)[..., :code_size] + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + # DETR + bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds) + + def get_targets(self, + cls_scores_list, + bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + gt_bboxes_ignore_list=None): + """"Compute regression and classification targets for a batch image. + Outputs from a single decoder layer of a single feature level are used. + Args: + cls_scores_list (list[Tensor]): Box score logits from a single + decoder layer for each image with shape [num_query, + cls_out_channels]. + bbox_preds_list (list[Tensor]): Sigmoid outputs from a single + decoder layer for each image, with normalized coordinate + (cx, cy, w, h) and shape [num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + Returns: + tuple: a tuple containing the following targets. + - labels_list (list[Tensor]): Labels for all images. + - label_weights_list (list[Tensor]): Label weights for all \ + images. + - bbox_targets_list (list[Tensor]): BBox targets for all \ + images. + - bbox_weights_list (list[Tensor]): BBox weights for all \ + images. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + """ + assert gt_bboxes_ignore_list is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + num_imgs = len(cls_scores_list) + gt_bboxes_ignore_list = [ + gt_bboxes_ignore_list for _ in range(num_imgs) + ] + gt_labels_list = gt_labels_list[0] + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + pos_inds_list, + neg_inds_list) = multi_apply(self._get_target_single, cls_scores_list, + bbox_preds_list, gt_labels_list, + gt_bboxes_list, gt_bboxes_ignore_list) + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + + def loss_by_feat_single(self, + cls_scores, + bbox_preds, + gt_bboxes_list, + gt_labels_list, + gt_bboxes_ignore_list=None): + """"Loss function for outputs from a single decoder layer of a single + feature level. + + Args: + cls_scores (Tensor): Box score logits from a single decoder layer + for all images. Shape [bs, num_query, cls_out_channels]. + bbox_preds (Tensor): Sigmoid outputs from a single decoder layer + for all images, with normalized coordinate (cx, cy, w, h) and + shape [bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in + [tl_x, tl_y, br_x,loss_by_feat_single br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + Returns: + dict[str, Tensor]: A dictionary of loss components for outputs + from a single decoder layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] + cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, + gt_bboxes_ignore_list) + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + labels = torch.cat(labels_list, 0) + label_weights = torch.cat(label_weights_list, 0) + bbox_targets = torch.cat(bbox_targets_list, 0) + + bbox_weights = torch.cat(bbox_weights_list, 0) + + # classification loss + cls_scores = cls_scores.reshape(-1, self.cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.bg_cls_weight + # if self.sync_cls_avg_factor: + # cls_avg_factor = reduce_mean( + # cls_scores.new_tensor([cls_avg_factor])) + + cls_avg_factor = max(cls_avg_factor, 1) + loss_cls = self.loss_cls( + cls_scores, labels, label_weights, avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes across all gpus, for + # normalization purposes + num_total_pos = loss_cls.new_tensor([num_total_pos]) + # num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + num_total_pos = torch.clamp(num_total_pos, min=1).item() + + # regression L1 loss + bbox_preds = bbox_preds.reshape(-1, bbox_preds.size(-1)) + normalized_bbox_targets = normalize_bbox(bbox_targets, self.pc_range) + isnotnan = torch.isfinite(normalized_bbox_targets).all(dim=-1) + bbox_weights = bbox_weights * self.code_weights + + loss_bbox = self.loss_bbox( + bbox_preds[isnotnan, :10], + normalized_bbox_targets[isnotnan, :10], + bbox_weights[isnotnan, :10], + avg_factor=num_total_pos) + + loss_cls = torch.nan_to_num(loss_cls) + loss_bbox = torch.nan_to_num(loss_bbox) + return loss_cls, loss_bbox + + def loss_by_feat(self, + gt_bboxes_list, + gt_labels_list, + preds_dicts, + gt_bboxes_ignore=None): + """"Loss function. + Args: + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + preds_dicts: + all_cls_scores (Tensor): Classification score of all + decoder layers, has shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds (Tensor): Sigmoid regression + outputs of all decode layers. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + enc_cls_scores (Tensor): Classification scores of + points on encode feature map , has shape + (N, h*w, num_classes). Only be passed when as_two_stage is + True, otherwise is None. + enc_bbox_preds (Tensor): Regression results of each points + on the encode feature map, has shape (N, h*w, 4). Only be + passed when as_two_stage is True, otherwise is None. + gt_bboxes_ignore (list[Tensor], optional): Bounding boxes + which can be ignored for each image. Default None. + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert gt_bboxes_ignore is None, \ + f'{self.__class__.__name__} only supports ' \ + f'for gt_bboxes_ignore setting to None.' + + all_cls_scores = preds_dicts['all_cls_scores'] + all_bbox_preds = preds_dicts['all_bbox_preds'] + enc_cls_scores = preds_dicts['enc_cls_scores'] + enc_bbox_preds = preds_dicts['enc_bbox_preds'] + + num_dec_layers = len(all_cls_scores) + device = gt_labels_list[0].device + + gt_bboxes_list = [ + torch.cat((gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), + dim=1).to(device) for gt_bboxes in gt_bboxes_list + ] + + all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] + all_gt_labels_list = [[gt_labels_list] for _ in range(num_dec_layers)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers) + ] + + losses_cls, losses_bbox = multi_apply(self.loss_by_feat_single, + all_cls_scores, all_bbox_preds, + all_gt_bboxes_list, + all_gt_labels_list, + all_gt_bboxes_ignore_list) + + loss_dict = dict() + # loss of proposal generated from encode feature map. + if enc_cls_scores is not None: + binary_labels_list = [ + torch.zeros_like(gt_labels_list[i]) + for i in range(len(all_gt_labels_list)) + ] + enc_loss_cls, enc_losses_bbox = \ + self.loss_single(enc_cls_scores, enc_bbox_preds, + gt_bboxes_list, binary_labels_list, + gt_bboxes_ignore) + loss_dict['enc_loss_cls'] = enc_loss_cls + loss_dict['enc_loss_bbox'] = enc_losses_bbox + + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_bbox'] = losses_bbox[-1] + + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i in zip(losses_cls[:-1], losses_bbox[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i + num_dec_layer += 1 + return loss_dict + + def get_bboxes(self, preds_dicts, img_metas, rescale=False): + """Generate bboxes from bbox head predictions. + + Args: + preds_dicts (tuple[list[dict]]): Prediction results. + img_metas (list[dict]): Point cloud and image's meta info. + Returns: + list[dict]: Decoded bbox, scores and labels after nms. + """ + preds_dicts = self.bbox_coder.decode(preds_dicts) + num_samples = len(preds_dicts) + + ret_list = [] + for i in range(num_samples): + preds = preds_dicts[i] + bboxes = preds['bboxes'] + bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5 + bboxes = img_metas[i]['box_type_3d'](bboxes, bboxes.size(-1)) + scores = preds['scores'] + labels = preds['labels'] + ret_list.append([bboxes, scores, labels]) + return ret_list diff --git a/projects/PETR/petr/petr_transformer.py b/projects/PETR/petr/petr_transformer.py new file mode 100755 index 0000000..dbb4cc3 --- /dev/null +++ b/projects/PETR/petr/petr_transformer.py @@ -0,0 +1,540 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2022 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from DETR3D (https://github.com/WangYueFt/detr3d) +# Copyright (c) 2021 Wang, Yue +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ + +import warnings + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import (BaseTransformerLayer, + TransformerLayerSequence) +from mmengine.model import BaseModule +from mmengine.model.weight_init import xavier_init + +# from mmcv.utils import deprecated_api_warning +from mmdet3d.registry import MODELS, TASK_UTILS + + +@MODELS.register_module() +class PETRTransformer(BaseModule): + """Implements the DETR transformer. Following the official DETR + implementation, this module copy-paste from torch.nn.Transformer with + modifications: + + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + `_ for details. + Args: + encoder (`mmcv.ConfigDict` | Dict): Config of + TransformerEncoder. Defaults to None. + decoder ((`mmcv.ConfigDict` | Dict)): Config of + TransformerDecoder. Defaults to None + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Defaults to None. + """ + + def __init__(self, encoder=None, decoder=None, init_cfg=None, cross=False): + super(PETRTransformer, self).__init__(init_cfg=init_cfg) + if encoder is not None: + self.encoder = MODELS.build(encoder) + else: + self.encoder = None + self.decoder = MODELS.build(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, x, mask, query_embed, pos_embed, reg_branch=None): + """Forward function for `Transformer`. + Args: + x (Tensor): Input query with shape [bs, c, h, w] where + c = embed_dims. + mask (Tensor): The key_padding_mask used for encoder and decoder, + with shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, with shape + [num_query, c]. + pos_embed (Tensor): The positional encoding for encoder and + decoder, with the same shape as `x`. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - out_dec: Output from decoder. If return_intermediate_dec \ + is True output has shape [num_dec_layers, bs, + num_query, embed_dims], else has shape [1, bs, \ + num_query, embed_dims]. + - memory: Output results from encoder, with shape \ + [bs, embed_dims, h, w]. + """ + bs, n, c, h, w = x.shape + memory = x.permute(1, 3, 4, 0, + 2).reshape(-1, bs, + c) # [bs, n, c, h, w] -> [n*h*w, bs, c] + pos_embed = pos_embed.permute(1, 3, 4, 0, 2).reshape( + -1, bs, c) # [bs, n, c, h, w] -> [n*h*w, bs, c] + query_embed = query_embed.unsqueeze(1).repeat( + 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim] + mask = mask.view(bs, -1) # [bs, n, h, w] -> [bs, n*h*w] + target = torch.zeros_like(query_embed) + + # out_dec: [num_layers, num_query, bs, dim] + out_dec = self.decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=mask, + reg_branch=reg_branch, + ) + out_dec = out_dec.transpose(1, 2) + memory = memory.reshape(n, h, w, bs, c).permute(3, 0, 4, 1, 2) + return out_dec, memory + + +@MODELS.register_module() +class PETRDNTransformer(BaseModule): + """Implements the DETR transformer. Following the official DETR + implementation, this module copy-paste from torch.nn.Transformer with + modifications: + + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + `_ for details. + Args: + encoder (`mmcv.ConfigDict` | Dict): Config of + TransformerEncoder. Defaults to None. + decoder ((`mmcv.ConfigDict` | Dict)): Config of + TransformerDecoder. Defaults to None + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Defaults to None. + """ + + def __init__(self, encoder=None, decoder=None, init_cfg=None, cross=False): + super(PETRDNTransformer, self).__init__(init_cfg=init_cfg) + if encoder is not None: + self.encoder = MODELS.build(encoder) + else: + self.encoder = None + self.decoder = MODELS.build(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, + x, + mask, + query_embed, + pos_embed, + attn_masks=None, + reg_branch=None): + """Forward function for `Transformer`. + Args: + x (Tensor): Input query with shape [bs, c, h, w] where + c = embed_dims. + mask (Tensor): The key_padding_mask used for encoder and decoder, + with shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, with shape + [num_query, c]. + pos_embed (Tensor): The positional encoding for encoder and + decoder, with the same shape as `x`. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - out_dec: Output from decoder. If return_intermediate_dec \ + is True output has shape [num_dec_layers, bs, + num_query, embed_dims], else has shape [1, bs, \ + num_query, embed_dims]. + - memory: Output results from encoder, with shape \ + [bs, embed_dims, h, w]. + """ + bs, n, c, h, w = x.shape + memory = x.permute(1, 3, 4, 0, + 2).reshape(-1, bs, + c) # [bs, n, c, h, w] -> [n*h*w, bs, c] + pos_embed = pos_embed.permute(1, 3, 4, 0, 2).reshape( + -1, bs, c) # [bs, n, c, h, w] -> [n*h*w, bs, c] + query_embed = query_embed.transpose( + 0, 1) # [num_query, dim] -> [num_query, bs, dim] + mask = mask.view(bs, -1) # [bs, n, h, w] -> [bs, n*h*w] + target = torch.zeros_like(query_embed) + # out_dec: [num_layers, num_query, bs, dim] + out_dec = self.decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=mask, + attn_masks=[attn_masks, None], + reg_branch=reg_branch, + ) + out_dec = out_dec.transpose(1, 2) + memory = memory.reshape(n, h, w, bs, c).permute(3, 0, 4, 1, 2) + return out_dec, memory + + +@MODELS.register_module() +class PETRTransformerDecoderLayer(BaseTransformerLayer): + """Implements decoder layer in DETR transformer. + + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default:None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default:2. + """ + + def __init__(self, + attn_cfgs, + feedforward_channels, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + with_cp=True, + **kwargs): + super(PETRTransformerDecoderLayer, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + assert len(operation_order) == 6 + assert set(operation_order) == set( + ['self_attn', 'norm', 'cross_attn', 'ffn']) + self.use_checkpoint = with_cp + + def _forward( + self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + ): + """Forward function for `TransformerCoder`. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + x = super(PETRTransformerDecoderLayer, self).forward( + query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + query_key_padding_mask=query_key_padding_mask, + key_padding_mask=key_padding_mask, + ) + + return x + + def forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `TransformerCoder`. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if self.use_checkpoint and self.training: + x = cp.checkpoint( + self._forward, + query, + key, + value, + query_pos, + key_pos, + attn_masks, + query_key_padding_mask, + key_padding_mask, + ) + else: + x = self._forward( + query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + query_key_padding_mask=query_key_padding_mask, + key_padding_mask=key_padding_mask) + return x + + +@MODELS.register_module() +class PETRMultiheadAttention(BaseModule): + """A wrapper for ``torch.nn.MultiheadAttention``. + + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='Dropout', drop_prob=0.), + init_cfg=None, + batch_first=False, + **kwargs): + super(PETRMultiheadAttention, self).__init__(init_cfg) + if 'dropout' in kwargs: + warnings.warn( + 'The arguments `dropout` in MultiheadAttention ' + 'has been deprecated, now you can separately ' + 'set `attn_drop`(float), proj_drop(float), ' + 'and `dropout_layer`(dict) ', DeprecationWarning) + attn_drop = kwargs['dropout'] + dropout_layer['drop_prob'] = kwargs.pop('dropout') + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.batch_first = batch_first + + self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, + **kwargs) + + self.proj_drop = nn.Dropout(proj_drop) + self.dropout_layer = MODELS.build( + dropout_layer) if dropout_layer else nn.Identity() + + # @deprecated_api_warning({'residual': 'identity'}, + # cls_name='MultiheadAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `MultiheadAttention`. + + **kwargs allow passing a more general data flow when combining + with other operations in `transformerlayer`. + Args: + query (Tensor): The input query with shape [num_queries, bs, + embed_dims] if self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + If None, the ``query`` will be used. Defaults to None. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + If None, the `key` will be used. + identity (Tensor): This tensor, with the same shape as x, + will be used for the identity link. + If None, `x` will be used. Defaults to None. + query_pos (Tensor): The positional encoding for query, with + the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. Defaults to None. If not None, it will + be added to `key` before forward function. If None, and + `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + Returns: + Tensor: forwarded results with shape + [num_queries, bs, embed_dims] + if self.batch_first is False, else + [bs, num_queries embed_dims]. + """ + + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + + out = self.attn( + query=query, + key=key, + value=value, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask)[0] + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + +@MODELS.register_module() +class PETRTransformerEncoder(TransformerLayerSequence): + """TransformerEncoder of DETR. + + Args: + post_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. Only used when `self.pre_norm` is `True` + """ + + def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs): + super(PETRTransformerEncoder, self).__init__(*args, **kwargs) + if post_norm_cfg is not None: + self.post_norm = TASK_UTILS.build( + post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None + else: + assert not self.pre_norm, f'Use prenorm in ' \ + f'{self.__class__.__name__},' \ + f'Please specify post_norm_cfg' + self.post_norm = None + + def forward(self, *args, **kwargs): + """Forward function for `TransformerCoder`. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + x = super(PETRTransformerEncoder, self).forward(*args, **kwargs) + if self.post_norm is not None: + x = self.post_norm(x) + return x + + +@MODELS.register_module() +class PETRTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer. + + Args: + return_intermediate (bool): Whether to return intermediate outputs. + post_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, + *args, + post_norm_cfg=dict(type='LN'), + return_intermediate=False, + **kwargs): + + super(PETRTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + if post_norm_cfg is not None: + self.post_norm = build_norm_layer(post_norm_cfg, + self.embed_dims)[1] + else: + self.post_norm = None + + def forward(self, query, *args, **kwargs): + """Forward function for `TransformerDecoder`. + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + if not self.return_intermediate: + x = super().forward(query, *args, **kwargs) + if self.post_norm: + x = self.post_norm(x)[None] + return x + + intermediate = [] + for layer in self.layers: + query = layer(query, *args, **kwargs) + if self.return_intermediate: + if self.post_norm is not None: + intermediate.append(self.post_norm(query)) + else: + intermediate.append(query) + return torch.stack(intermediate) diff --git a/projects/PETR/petr/positional_encoding.py b/projects/PETR/petr/positional_encoding.py new file mode 100755 index 0000000..2fb0a00 --- /dev/null +++ b/projects/PETR/petr/positional_encoding.py @@ -0,0 +1,171 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2022 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from mmdetection (https://github.com/open-mmlab/mmdetection) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ +import math + +import torch +import torch.nn as nn +from mmengine.model import BaseModule + +from mmdet3d.registry import MODELS, TASK_UTILS + + +@TASK_UTILS.register_module() +class SinePositionalEncoding3D(BaseModule): + """Position encoding with sine and cosine functions. See `End-to-End Object + Detection with Transformers. + + `_ for details. + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. Note the final returned dimension + for each position is 2 times of this value. + temperature (int, optional): The temperature used for scaling + the position embedding. Defaults to 10000. + normalize (bool, optional): Whether to normalize the position + embedding. Defaults to False. + scale (float, optional): A scale factor that scales the position + embedding. The scale will be used only when `normalize` is True. + Defaults to 2*pi. + eps (float, optional): A value added to the denominator for + numerical stability. Defaults to 1e-6. + offset (float): offset add to embed when do the normalization. + Defaults to 0. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + num_feats, + temperature=10000, + normalize=False, + scale=2 * math.pi, + eps=1e-6, + offset=0., + init_cfg=None): + super(SinePositionalEncoding3D, self).__init__(init_cfg) + if normalize: + assert isinstance(scale, (float, int)), 'when normalize is set,' \ + 'scale should be provided and in float or int type, ' \ + f'found {type(scale)}' + self.num_feats = num_feats + self.temperature = temperature + self.normalize = normalize + self.scale = scale + self.eps = eps + self.offset = offset + + def forward(self, mask): + """Forward function for `SinePositionalEncoding`. + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, h, w]. + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, h, w]. + """ + # For convenience of exporting to ONNX, it's required to convert + # `masks` from bool to int. + mask = mask.to(torch.int) + not_mask = 1 - mask # logical_not + n_embed = not_mask.cumsum(1, dtype=torch.float32) + y_embed = not_mask.cumsum(2, dtype=torch.float32) + x_embed = not_mask.cumsum(3, dtype=torch.float32) + if self.normalize: + n_embed = (n_embed + self.offset) / \ + (n_embed[:, -1:, :, :] + self.eps) * self.scale + y_embed = (y_embed + self.offset) / \ + (y_embed[:, :, -1:, :] + self.eps) * self.scale + x_embed = (x_embed + self.offset) / \ + (x_embed[:, :, :, -1:] + self.eps) * self.scale + dim_t = torch.arange( + self.num_feats, dtype=torch.float32, device=mask.device) + dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats) + pos_n = n_embed[:, :, :, :, None] / dim_t + pos_x = x_embed[:, :, :, :, None] / dim_t + pos_y = y_embed[:, :, :, :, None] / dim_t + # use `view` instead of `flatten` for dynamically exporting to ONNX + B, N, H, W = mask.size() + pos_n = torch.stack( + (pos_n[:, :, :, :, 0::2].sin(), pos_n[:, :, :, :, 1::2].cos()), + dim=4).view(B, N, H, W, -1) + pos_x = torch.stack( + (pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()), + dim=4).view(B, N, H, W, -1) + pos_y = torch.stack( + (pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), + dim=4).view(B, N, H, W, -1) + pos = torch.cat((pos_n, pos_y, pos_x), dim=4).permute(0, 1, 4, 2, 3) + return pos + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'temperature={self.temperature}, ' + repr_str += f'normalize={self.normalize}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'eps={self.eps})' + return repr_str + + +@MODELS.register_module() +class LearnedPositionalEncoding3D(BaseModule): + """Position embedding with learnable embedding weights. + + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. The final returned dimension for + each position is 2 times of this value. + row_num_embed (int, optional): The dictionary size of row embeddings. + Default 50. + col_num_embed (int, optional): The dictionary size of col embeddings. + Default 50. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_feats, + row_num_embed=50, + col_num_embed=50, + init_cfg=dict(type='Uniform', layer='Embedding')): + super(LearnedPositionalEncoding3D, self).__init__(init_cfg) + self.row_embed = nn.Embedding(row_num_embed, num_feats) + self.col_embed = nn.Embedding(col_num_embed, num_feats) + self.num_feats = num_feats + self.row_num_embed = row_num_embed + self.col_num_embed = col_num_embed + + def forward(self, mask): + """Forward function for `LearnedPositionalEncoding`. + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, h, w]. + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, h, w]. + """ + h, w = mask.shape[-2:] + x = torch.arange(w, device=mask.device) + y = torch.arange(h, device=mask.device) + x_embed = self.col_embed(x) + y_embed = self.row_embed(y) + pos = torch.cat( + (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat( + 1, w, 1)), + dim=-1).permute(2, 0, + 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) + return pos + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'row_num_embed={self.row_num_embed}, ' + repr_str += f'col_num_embed={self.col_num_embed})' + return repr_str diff --git a/projects/PETR/petr/transforms_3d.py b/projects/PETR/petr/transforms_3d.py new file mode 100755 index 0000000..0c5770e --- /dev/null +++ b/projects/PETR/petr/transforms_3d.py @@ -0,0 +1,209 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmcv.transforms import BaseTransform +from PIL import Image + +from mmdet3d.registry import TRANSFORMS +from mmdet3d.structures.bbox_3d import LiDARInstance3DBoxes + + +@TRANSFORMS.register_module() +class ResizeCropFlipImage(BaseTransform): + """Random resize, Crop and flip the image + Args: + size (tuple, optional): Fixed padding size. + """ + + def __init__(self, data_aug_conf=None, training=True): + self.data_aug_conf = data_aug_conf + self.training = training + + def transform(self, results): + """Call function to pad images, masks, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Updated result dict. + """ + + imgs = results['img'] + N = len(imgs) + new_imgs = [] + resize, resize_dims, crop, flip, rotate = self._sample_augmentation() + results['lidar2cam'] = np.array(results['lidar2cam']) + for i in range(N): + intrinsic = np.array(results['cam2img'][i]) + viewpad = np.eye(4) + viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + results['cam2img'][i] = viewpad + img = Image.fromarray(np.uint8(imgs[i])) + # augmentation (resize, crop, horizontal flip, rotate) + # different view use different aug (BEV Det) + img, ida_mat = self._img_transform( + img, + resize=resize, + resize_dims=resize_dims, + crop=crop, + flip=flip, + rotate=rotate, + ) + new_imgs.append(np.array(img).astype(np.float32)) + results['cam2img'][ + i][:3, :3] = ida_mat @ results['cam2img'][i][:3, :3] + + results['img'] = new_imgs + + return results + + def _get_rot(self, h): + + return torch.Tensor([ + [np.cos(h), np.sin(h)], + [-np.sin(h), np.cos(h)], + ]) + + def _img_transform(self, img, resize, resize_dims, crop, flip, rotate): + ida_rot = torch.eye(2) + ida_tran = torch.zeros(2) + # adjust image + img = img.resize(resize_dims) + img = img.crop(crop) + if flip: + img = img.transpose(method=Image.FLIP_LEFT_RIGHT) + img = img.rotate(rotate) + + # post-homography transformation + ida_rot *= resize + ida_tran -= torch.Tensor(crop[:2]) + if flip: + A = torch.Tensor([[-1, 0], [0, 1]]) + b = torch.Tensor([crop[2] - crop[0], 0]) + ida_rot = A.matmul(ida_rot) + ida_tran = A.matmul(ida_tran) + b + A = self._get_rot(rotate / 180 * np.pi) + b = torch.Tensor([crop[2] - crop[0], crop[3] - crop[1]]) / 2 + b = A.matmul(-b) + b + ida_rot = A.matmul(ida_rot) + ida_tran = A.matmul(ida_tran) + b + ida_mat = torch.eye(3) + ida_mat[:2, :2] = ida_rot + ida_mat[:2, 2] = ida_tran + return img, ida_mat + + def _sample_augmentation(self): + H, W = self.data_aug_conf['H'], self.data_aug_conf['W'] + fH, fW = self.data_aug_conf['final_dim'] + if self.training: + resize = np.random.uniform(*self.data_aug_conf['resize_lim']) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = int( + (1 - np.random.uniform(*self.data_aug_conf['bot_pct_lim'])) * + newH) - fH + crop_w = int(np.random.uniform(0, max(0, newW - fW))) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + flip = False + if self.data_aug_conf['rand_flip'] and np.random.choice([0, 1]): + flip = True + rotate = np.random.uniform(*self.data_aug_conf['rot_lim']) + else: + resize = max(fH / H, fW / W) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = int( + (1 - np.mean(self.data_aug_conf['bot_pct_lim'])) * newH) - fH + crop_w = int(max(0, newW - fW) / 2) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + flip = False + rotate = 0 + return resize, resize_dims, crop, flip, rotate + + +@TRANSFORMS.register_module() +class GlobalRotScaleTransImage(BaseTransform): + """Random resize, Crop and flip the image + Args: + size (tuple, optional): Fixed padding size. + """ + + def __init__( + self, + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + reverse_angle=False, + training=True, + ): + + self.rot_range = rot_range + self.scale_ratio_range = scale_ratio_range + self.translation_std = translation_std + + self.reverse_angle = reverse_angle + self.training = training + + def transform(self, results): + """Call function to pad images, masks, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Updated result dict. + """ + # random rotate + rot_angle = np.random.uniform(*self.rot_range) + + self.rotate_bev_along_z(results, rot_angle) + if self.reverse_angle: + rot_angle *= -1 + results['gt_bboxes_3d'].rotate(np.array(rot_angle)) + + # random scale + scale_ratio = np.random.uniform(*self.scale_ratio_range) + self.scale_xyz(results, scale_ratio) + results['gt_bboxes_3d'].scale(scale_ratio) + + # TODO: support translation + if not self.reverse_angle: + gt_bboxes_3d = results['gt_bboxes_3d'].tensor.numpy() + gt_bboxes_3d[:, 6] -= 2 * rot_angle + results['gt_bboxes_3d'] = LiDARInstance3DBoxes( + gt_bboxes_3d, box_dim=9) + + return results + + def rotate_bev_along_z(self, results, angle): + rot_cos = torch.cos(torch.tensor(angle)) + rot_sin = torch.sin(torch.tensor(angle)) + + rot_mat = torch.tensor([[rot_cos, -rot_sin, 0, 0], + [rot_sin, rot_cos, 0, 0], [0, 0, 1, 0], + [0, 0, 0, 1]]) + rot_mat_inv = torch.inverse(rot_mat) + num_view = len(results['lidar2cam']) + for view in range(num_view): + results['lidar2cam'][view] = ( + torch.tensor(np.array(results['lidar2cam'][view]).T).float() + @ rot_mat_inv).T.numpy() + + return + + def scale_xyz(self, results, scale_ratio): + rot_mat = torch.tensor([ + [scale_ratio, 0, 0, 0], + [0, scale_ratio, 0, 0], + [0, 0, scale_ratio, 0], + [0, 0, 0, 1], + ]) + + rot_mat_inv = torch.inverse(rot_mat) + + num_view = len(results['lidar2cam']) + for view in range(num_view): + results['lidar2cam'][view] = (torch.tensor( + rot_mat_inv.T + @ results['lidar2cam'][view].T).float()).T.numpy() + + return diff --git a/projects/PETR/petr/utils.py b/projects/PETR/petr/utils.py new file mode 100755 index 0000000..edf2b76 --- /dev/null +++ b/projects/PETR/petr/utils.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.structures.bbox_3d.utils import limit_period + + +def normalize_bbox(bboxes, pc_range): + + cx = bboxes[..., 0:1] + cy = bboxes[..., 1:2] + cz = bboxes[..., 2:3] + length = bboxes[..., 3:4].log() + width = bboxes[..., 4:5].log() + height = bboxes[..., 5:6].log() + + rot = -bboxes[..., 6:7] - np.pi / 2 + rot = limit_period(rot, period=np.pi * 2) + if bboxes.size(-1) > 7: + vx = bboxes[..., 7:8] + vy = bboxes[..., 8:9] + normalized_bboxes = torch.cat( + (cx, cy, length, width, cz, height, rot.sin(), rot.cos(), vx, vy), + dim=-1) + else: + normalized_bboxes = torch.cat( + (cx, cy, length, width, cz, height, rot.sin(), rot.cos()), dim=-1) + return normalized_bboxes + + +def denormalize_bbox(normalized_bboxes, pc_range): + # rotation + rot_sine = normalized_bboxes[..., 6:7] + + rot_cosine = normalized_bboxes[..., 7:8] + rot = torch.atan2(rot_sine, rot_cosine) + rot = -rot - np.pi / 2 + rot = limit_period(rot, period=np.pi * 2) + + # center in the bev + cx = normalized_bboxes[..., 0:1] + cy = normalized_bboxes[..., 1:2] + cz = normalized_bboxes[..., 4:5] + + # size + length = normalized_bboxes[..., 2:3] + width = normalized_bboxes[..., 3:4] + height = normalized_bboxes[..., 5:6] + + width = width.exp() + length = length.exp() + height = height.exp() + if normalized_bboxes.size(-1) > 8: + # velocity + vx = normalized_bboxes[:, 8:9] + vy = normalized_bboxes[:, 9:10] + denormalized_bboxes = torch.cat( + [cx, cy, cz, length, width, height, rot, vx, vy], dim=-1) + else: + denormalized_bboxes = torch.cat( + [cx, cy, cz, length, width, height, rot], dim=-1) + + return denormalized_bboxes diff --git a/projects/PETR/petr/vovnetcp.py b/projects/PETR/petr/vovnetcp.py new file mode 100755 index 0000000..62f0fde --- /dev/null +++ b/projects/PETR/petr/vovnetcp.py @@ -0,0 +1,475 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2022 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from DETR3D (https://github.com/WangYueFt/detr3d) +# Copyright (c) 2021 Wang, Yue +# ------------------------------------------------------------------------ +# Copyright (c) Youngwan Lee (ETRI) All Rights Reserved. +# Copyright 2021 Toyota Research Institute. All rights reserved. +# ------------------------------------------------------------------------ +import warnings +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmdet3d.registry import MODELS + +VoVNet19_slim_dw_eSE = { + 'stem': [64, 64, 64], + 'stage_conv_ch': [64, 80, 96, 112], + 'stage_out_ch': [112, 256, 384, 512], + 'layer_per_block': 3, + 'block_per_stage': [1, 1, 1, 1], + 'eSE': True, + 'dw': True +} + +VoVNet19_dw_eSE = { + 'stem': [64, 64, 64], + 'stage_conv_ch': [128, 160, 192, 224], + 'stage_out_ch': [256, 512, 768, 1024], + 'layer_per_block': 3, + 'block_per_stage': [1, 1, 1, 1], + 'eSE': True, + 'dw': True +} + +VoVNet19_slim_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [64, 80, 96, 112], + 'stage_out_ch': [112, 256, 384, 512], + 'layer_per_block': 3, + 'block_per_stage': [1, 1, 1, 1], + 'eSE': True, + 'dw': False +} + +VoVNet19_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [128, 160, 192, 224], + 'stage_out_ch': [256, 512, 768, 1024], + 'layer_per_block': 3, + 'block_per_stage': [1, 1, 1, 1], + 'eSE': True, + 'dw': False +} + +VoVNet39_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [128, 160, 192, 224], + 'stage_out_ch': [256, 512, 768, 1024], + 'layer_per_block': 5, + 'block_per_stage': [1, 1, 2, 2], + 'eSE': True, + 'dw': False +} + +VoVNet57_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [128, 160, 192, 224], + 'stage_out_ch': [256, 512, 768, 1024], + 'layer_per_block': 5, + 'block_per_stage': [1, 1, 4, 3], + 'eSE': True, + 'dw': False +} + +VoVNet99_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [128, 160, 192, 224], + 'stage_out_ch': [256, 512, 768, 1024], + 'layer_per_block': 5, + 'block_per_stage': [1, 3, 9, 3], + 'eSE': True, + 'dw': False +} + +_STAGE_SPECS = { + 'V-19-slim-dw-eSE': VoVNet19_slim_dw_eSE, + 'V-19-dw-eSE': VoVNet19_dw_eSE, + 'V-19-slim-eSE': VoVNet19_slim_eSE, + 'V-19-eSE': VoVNet19_eSE, + 'V-39-eSE': VoVNet39_eSE, + 'V-57-eSE': VoVNet57_eSE, + 'V-99-eSE': VoVNet99_eSE, +} + + +def dw_conv3x3(in_channels, + out_channels, + module_name, + postfix, + stride=1, + kernel_size=3, + padding=1): + """3x3 convolution with padding.""" + return [ + ('{}_{}/dw_conv3x3'.format(module_name, postfix), + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=out_channels, + bias=False)), + ('{}_{}/pw_conv1x1'.format(module_name, postfix), + nn.Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias=False)), + ('{}_{}/pw_norm'.format(module_name, + postfix), nn.BatchNorm2d(out_channels)), + ('{}_{}/pw_relu'.format(module_name, postfix), nn.ReLU(inplace=True)), + ] + + +def conv3x3(in_channels, + out_channels, + module_name, + postfix, + stride=1, + groups=1, + kernel_size=3, + padding=1): + """3x3 convolution with padding.""" + return [ + ( + f'{module_name}_{postfix}/conv', + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False, + ), + ), + (f'{module_name}_{postfix}/norm', nn.BatchNorm2d(out_channels)), + (f'{module_name}_{postfix}/relu', nn.ReLU(inplace=True)), + ] + + +def conv1x1(in_channels, + out_channels, + module_name, + postfix, + stride=1, + groups=1, + kernel_size=1, + padding=0): + """1x1 convolution with padding.""" + return [ + ( + f'{module_name}_{postfix}/conv', + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False, + ), + ), + (f'{module_name}_{postfix}/norm', nn.BatchNorm2d(out_channels)), + (f'{module_name}_{postfix}/relu', nn.ReLU(inplace=True)), + ] + + +class Hsigmoid(nn.Module): + + def __init__(self, inplace=True): + super(Hsigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return F.relu6(x + 3.0, inplace=self.inplace) / 6.0 + + +class eSEModule(nn.Module): + + def __init__(self, channel, reduction=4): + super(eSEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0) + self.hsigmoid = Hsigmoid() + + def forward(self, x): + input = x + x = self.avg_pool(x) + x = self.fc(x) + x = self.hsigmoid(x) + return input * x + + +class _OSA_module(nn.Module): + + def __init__(self, + in_ch, + stage_ch, + concat_ch, + layer_per_block, + module_name, + SE=False, + identity=False, + depthwise=False, + with_cp=True): + + super(_OSA_module, self).__init__() + + self.identity = identity + self.depthwise = depthwise + self.isReduced = False + self.use_checkpoint = with_cp + self.layers = nn.ModuleList() + in_channel = in_ch + if self.depthwise and in_channel != stage_ch: + self.isReduced = True + self.conv_reduction = nn.Sequential( + OrderedDict( + conv1x1(in_channel, stage_ch, + '{}_reduction'.format(module_name), '0'))) + for i in range(layer_per_block): + if self.depthwise: + self.layers.append( + nn.Sequential( + OrderedDict( + dw_conv3x3(stage_ch, stage_ch, module_name, i)))) + else: + self.layers.append( + nn.Sequential( + OrderedDict( + conv3x3(in_channel, stage_ch, module_name, i)))) + in_channel = stage_ch + + # feature aggregation + in_channel = in_ch + layer_per_block * stage_ch + self.concat = nn.Sequential( + OrderedDict(conv1x1(in_channel, concat_ch, module_name, 'concat'))) + + self.ese = eSEModule(concat_ch) + + def _forward(self, x): + + identity_feat = x + + output = [] + output.append(x) + if self.depthwise and self.isReduced: + x = self.conv_reduction(x) + for layer in self.layers: + x = layer(x) + output.append(x) + + x = torch.cat(output, dim=1) + xt = self.concat(x) + + xt = self.ese(xt) + + if self.identity: + xt = xt + identity_feat + + return xt + + def forward(self, x): + + if self.use_checkpoint and self.training: + xt = cp.checkpoint(self._forward, x) + else: + xt = self._forward(x) + + return xt + + +class _OSA_stage(nn.Sequential): + + def __init__(self, + in_ch, + stage_ch, + concat_ch, + block_per_stage, + layer_per_block, + stage_num, + SE=False, + depthwise=False): + + super(_OSA_stage, self).__init__() + + if not stage_num == 2: + self.add_module( + 'Pooling', + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)) + + if block_per_stage != 1: + SE = False + module_name = f'OSA{stage_num}_1' + self.add_module( + module_name, + _OSA_module( + in_ch, + stage_ch, + concat_ch, + layer_per_block, + module_name, + SE, + depthwise=depthwise)) + for i in range(block_per_stage - 1): + if i != block_per_stage - 2: # last block + SE = False + module_name = f'OSA{stage_num}_{i + 2}' + self.add_module( + module_name, + _OSA_module( + concat_ch, + stage_ch, + concat_ch, + layer_per_block, + module_name, + SE, + identity=True, + depthwise=depthwise), + ) + + +@MODELS.register_module() +class VoVNetCP(BaseModule): + + def __init__(self, + spec_name, + input_ch=3, + out_features=None, + frozen_stages=-1, + norm_eval=True, + pretrained=None, + init_cfg=None): + """ + Args: + input_ch(int) : the number of input channel + out_features (list[str]): name of the layers whose outputs should + be returned in forward. Can be anything in "stem", "stage2" ... + """ + super(VoVNetCP, self).__init__(init_cfg) + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + stage_specs = _STAGE_SPECS[spec_name] + + stem_ch = stage_specs['stem'] + config_stage_ch = stage_specs['stage_conv_ch'] + config_concat_ch = stage_specs['stage_out_ch'] + block_per_stage = stage_specs['block_per_stage'] + layer_per_block = stage_specs['layer_per_block'] + SE = stage_specs['eSE'] + depthwise = stage_specs['dw'] + + self._out_features = out_features + + # Stem module + conv_type = dw_conv3x3 if depthwise else conv3x3 + stem = conv3x3(input_ch, stem_ch[0], 'stem', '1', 2) + stem += conv_type(stem_ch[0], stem_ch[1], 'stem', '2', 1) + stem += conv_type(stem_ch[1], stem_ch[2], 'stem', '3', 2) + self.add_module('stem', nn.Sequential((OrderedDict(stem)))) + current_stirde = 4 + self._out_feature_strides = { + 'stem': current_stirde, + 'stage2': current_stirde + } + self._out_feature_channels = {'stem': stem_ch[2]} + + stem_out_ch = [stem_ch[2]] + in_ch_list = stem_out_ch + config_concat_ch[:-1] + # OSA stages + self.stage_names = [] + for i in range(4): # num_stages + name = 'stage%d' % (i + 2) # stage 2 ... stage 5 + self.stage_names.append(name) + self.add_module( + name, + _OSA_stage( + in_ch_list[i], + config_stage_ch[i], + config_concat_ch[i], + block_per_stage[i], + layer_per_block, + i + 2, + SE, + depthwise, + ), + ) + + self._out_feature_channels[name] = config_concat_ch[i] + if not i == 0: + self._out_feature_strides[name] = current_stirde = int( + current_stirde * 2) + + # initialize weights + # self._initialize_weights() + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + + # def forward(self, x): + # outputs = {} + # x = self.stem(x) + # if "stem" in self._out_features: + # outputs["stem"] = x + # for name in self.stage_names: + # x = getattr(self, name)(x) + # if name in self._out_features: + # outputs[name] = x + + # return outputs + + def forward(self, x): + outputs = [] + x = self.stem(x) + if 'stem' in self._out_features: + outputs.append(x) + for name in self.stage_names: + x = getattr(self, name)(x) + if name in self._out_features: + outputs.append(x) + + return outputs + + def _freeze_stages(self): + if self.frozen_stages >= 0: + m = getattr(self, 'stem') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'stage{i+1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(VoVNetCP, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/projects/TR3D/README.md b/projects/TR3D/README.md new file mode 100755 index 0000000..2b8e20d --- /dev/null +++ b/projects/TR3D/README.md @@ -0,0 +1,97 @@ +# TR3D: Towards Real-Time Indoor 3D Object Detection + +> [TR3D: Towards Real-Time Indoor 3D Object Detection](https://arxiv.org/abs/2302.02858) + +## Abstract + +Recently, sparse 3D convolutions have changed 3D object detection. Performing on par with the voting-based approaches, 3D CNNs are memory-efficient and scale to large scenes better. However, there is still room for improvement. With a conscious, practice-oriented approach to problem-solving, we analyze the performance of such methods and localize the weaknesses. Applying modifications that resolve the found issues one by one, we end up with TR3D: a fast fully-convolutional 3D object detection model trained end-to-end, that achieves state-of-the-art results on the standard benchmarks, ScanNet v2, SUN RGB-D, and S3DIS. Moreover, to take advantage of both point cloud and RGB inputs, we introduce an early fusion of 2D and 3D features. We employ our fusion module to make conventional 3D object detection methods multimodal and demonstrate an impressive boost in performance. Our model with early feature fusion, which we refer to as TR3D+FF, outperforms existing 3D object detection approaches on the SUN RGB-D dataset. Overall, besides being accurate, both TR3D and TR3D+FF models are lightweight, memory-efficient, and fast, thereby marking another milestone on the way toward real-time 3D object detection. + +
    + +
    + +## Usage + +Training and inference in this project were tested with `mmdet3d==1.1.0rc3`. + +### Training commands + +In MMDet3D's root directory, run the following command to train the model: + +```bash +python tools/train.py projects/TR3D/configs/tr3d_1xb16_scannet-3d-18class.py +``` + +### Testing commands + +In MMDet3D's root directory, run the following command to test the model: + +```bash +python tools/test.py projects/TR3D/configs/tr3d_1xb16_scannet-3d-18class.py ${CHECKPOINT_PATH} +``` + +## Results and models + +### ScanNet + +| Backbone | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :--------------------------------------------------------: | :------: | :------------: | :---------: | :---------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [MinkResNet34](./configs/tr3d_1xb16_scannet-3d-18class.py) | 8.6 | 23.7 | 72.9 (72.0) | 59.3 (57.4) | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/tr3d/tr3d_1xb16_scannet-3d-18class/tr3d_1xb16_scannet-3d-18class.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/tr3d/tr3d_1xb16_scannet-3d-18class/tr3d_1xb16_scannet-3d-18class.log.json) | + +### SUN RGB-D + +| Backbone | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :--------------------------------------------------------: | :------: | :------------: | :---------: | :---------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [MinkResNet34](./configs/tr3d_1xb16_sunrgbd-3d-10class.py) | 3.8 | 27.5 | 67.1 (66.3) | 50.4 (49.6) | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/tr3d/tr3d_1xb16_sunrgbd-3d-10class/tr3d_1xb16_sunrgbd-3d-10class.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/tr3d/tr3d_1xb16_sunrgbd-3d-10class/tr3d_1xb16_sunrgbd-3d-10class.log.json) | + +### S3DIS + +| Backbone | Mem (GB) | Inf time (fps) | AP@0.25 | AP@0.5 | Download | +| :-----------------------------------------------------: | :------: | :------------: | :---------: | :---------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [MinkResNet34](./configs/tr3d_1xb16_s3dis-3d-5class.py) | 15.2 | 21.0 | 74.5 (72.1) | 51.7 (47.6) | [model](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/tr3d/tr3d_1xb16_s3dis-3d-5class/tr3d_1xb16_s3dis-3d-5class.pth) \| [log](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/tr3d/tr3d_1xb16_s3dis-3d-5class/tr3d_1xb16_s3dis-3d-5class.log.json) | + +**Note** + +- We report the results across 5 train runs followed by 5 test runs. Median values are in round brackets. +- Inference time is given for a single NVidia GeForce RTX 4090 GPU. + +## Citation + +```latex +@article{rukhovich2023tr3d, + title={TR3D: Towards Real-Time Indoor 3D Object Detection}, + author={Rukhovich, Danila and Vorontsova, Anna and Konushin, Anton}, + journal={arXiv preprint arXiv:2302.02858}, + year={2023} +} +``` + +## Checklist + +- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [x] Finish the code + + - [x] Basic docstrings & proper citation + + - [x] Test-time correctness + + - [x] A full README + +- [x] Milestone 2: Indicates a successful model implementation. + + - [x] Training-time correctness + +- [ ] Milestone 3: Good to be a part of our core package! + + - [x] Type hints and docstrings + + - [ ] Unit tests + + - [ ] Code polishing + + - [ ] Metafile.yml + +- [ ] Move your modules into the core package following the codebase's file hierarchy structure. + +- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. diff --git a/projects/TR3D/configs/tr3d.py b/projects/TR3D/configs/tr3d.py new file mode 100755 index 0000000..3352857 --- /dev/null +++ b/projects/TR3D/configs/tr3d.py @@ -0,0 +1,43 @@ +_base_ = ['mmdet3d::_base_/default_runtime.py'] +custom_imports = dict(imports=['projects.TR3D.tr3d']) + +model = dict( + type='MinkSingleStage3DDetector', + data_preprocessor=dict(type='Det3DDataPreprocessor'), + backbone=dict( + type='TR3DMinkResNet', + in_channels=3, + depth=34, + norm='batch', + num_planes=(64, 128, 128, 128)), + neck=dict( + type='TR3DNeck', in_channels=(64, 128, 128, 128), out_channels=128), + bbox_head=dict( + type='TR3DHead', + in_channels=128, + voxel_size=0.01, + pts_center_threshold=6, + num_reg_outs=6), + train_cfg=dict(), + test_cfg=dict(nms_pre=1000, iou_thr=0.5, score_thr=0.01)) + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.0001), + clip_grad=dict(max_norm=10, norm_type=2)) + +# learning rate +param_scheduler = dict( + type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) + +custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)] + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/projects/TR3D/configs/tr3d_1xb16_s3dis-3d-5class.py b/projects/TR3D/configs/tr3d_1xb16_s3dis-3d-5class.py new file mode 100755 index 0000000..8b7869f --- /dev/null +++ b/projects/TR3D/configs/tr3d_1xb16_s3dis-3d-5class.py @@ -0,0 +1,51 @@ +_base_ = ['./tr3d.py', 'mmdet3d::_base_/datasets/s3dis-3d.py'] +custom_imports = dict(imports=['projects.TR3D.tr3d']) + +dataset_type = 'S3DISDataset' +data_root = 'data/s3dis/' +metainfo = dict(classes=('table', 'chair', 'sofa', 'bookcase', 'board')) +train_area = [1, 2, 3, 4, 6] + +model = dict(bbox_head=dict(label2level=[1, 0, 1, 1, 0])) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict(type='LoadAnnotations3D'), + dict(type='PointSample', num_points=100000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[0.95, 1.05], + translation_std=[0.1, 0.1, 0.1], + shift_height=False), + dict(type='NormalizePointsColor', color_mean=None), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + dataset=dict(datasets=[ + dict( + type=dataset_type, + data_root=data_root, + ann_file=f's3dis_infos_Area_{i}.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + metainfo=metainfo, + box_type_3d='Depth') for i in train_area + ]))) diff --git a/projects/TR3D/configs/tr3d_1xb16_scannet-3d-18class.py b/projects/TR3D/configs/tr3d_1xb16_scannet-3d-18class.py new file mode 100755 index 0000000..e022e47 --- /dev/null +++ b/projects/TR3D/configs/tr3d_1xb16_scannet-3d-18class.py @@ -0,0 +1,68 @@ +_base_ = ['./tr3d.py', 'mmdet3d::_base_/datasets/scannet-3d.py'] +custom_imports = dict(imports=['projects.TR3D.tr3d']) + +model = dict( + bbox_head=dict( + label2level=[0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0])) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict(type='LoadAnnotations3D'), + dict(type='GlobalAlignment', rotation_axis=2), + # We do not sample 100k points for ScanNet, as very few scenes have + # significantly more then 100k points. So we sample 33 to 100% of them. + dict(type='TR3DPointSample', num_points=0.33), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.02, 0.02], + scale_ratio_range=[0.9, 1.1], + translation_std=[0.1, 0.1, 0.1], + shift_height=False), + dict(type='NormalizePointsColor', color_mean=None), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + # We do not sample 100k points for ScanNet, as very few scenes have + # significantly more then 100k points. So it doesn't affect + # inference time and we can accept all points. + # dict(type='PointSample', num_points=100000), + dict(type='NormalizePointsColor', color_mean=None), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='RepeatDataset', + times=15, + dataset=dict(pipeline=train_pipeline, filter_empty_gt=False))) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/projects/TR3D/configs/tr3d_1xb16_sunrgbd-3d-10class.py b/projects/TR3D/configs/tr3d_1xb16_sunrgbd-3d-10class.py new file mode 100755 index 0000000..9cd5579 --- /dev/null +++ b/projects/TR3D/configs/tr3d_1xb16_sunrgbd-3d-10class.py @@ -0,0 +1,62 @@ +_base_ = ['./tr3d.py', 'mmdet3d::_base_/datasets/sunrgbd-3d.py'] +custom_imports = dict(imports=['projects.TR3D.tr3d']) + +model = dict( + bbox_head=dict( + num_reg_outs=8, + label2level=[1, 1, 1, 0, 0, 1, 0, 0, 1, 0], + bbox_loss=dict( + type='TR3DRotatedIoU3DLoss', mode='diou', reduction='none'))) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict(type='LoadAnnotations3D'), + dict(type='PointSample', num_points=100000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.523599, 0.523599], + scale_ratio_range=[.85, 1.15], + translation_std=[.1, .1, .1], + shift_height=False), + dict( + type='Pack3DDetInputs', + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict(type='PointSample', num_points=100000), + ]), + dict(type='Pack3DDetInputs', keys=['points']) +] +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict(pipeline=train_pipeline, filter_empty_gt=False))) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/projects/TR3D/tr3d/__init__.py b/projects/TR3D/tr3d/__init__.py new file mode 100755 index 0000000..812b9e8 --- /dev/null +++ b/projects/TR3D/tr3d/__init__.py @@ -0,0 +1,11 @@ +from .axis_aligned_iou_loss import TR3DAxisAlignedIoULoss +from .mink_resnet import TR3DMinkResNet +from .rotated_iou_loss import TR3DRotatedIoU3DLoss +from .tr3d_head import TR3DHead +from .tr3d_neck import TR3DNeck +from .transforms_3d import TR3DPointSample + +__all__ = [ + 'TR3DAxisAlignedIoULoss', 'TR3DMinkResNet', 'TR3DRotatedIoU3DLoss', + 'TR3DHead', 'TR3DNeck', 'TR3DPointSample' +] diff --git a/projects/TR3D/tr3d/axis_aligned_iou_loss.py b/projects/TR3D/tr3d/axis_aligned_iou_loss.py new file mode 100755 index 0000000..b56f802 --- /dev/null +++ b/projects/TR3D/tr3d/axis_aligned_iou_loss.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from mmdet.models.losses.utils import weighted_loss +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models import axis_aligned_iou_loss +from mmdet3d.registry import MODELS +from mmdet3d.structures import AxisAlignedBboxOverlaps3D + + +@weighted_loss +def axis_aligned_diou_loss(pred: Tensor, target: Tensor) -> Tensor: + """Calculate the DIoU loss (1-DIoU) of two sets of axis aligned bounding + boxes. Note that predictions and targets are one-to-one corresponded. + + Args: + pred (torch.Tensor): Bbox predictions with shape [..., 6] + (x1, y1, z1, x2, y2, z2). + target (torch.Tensor): Bbox targets (gt) with shape [..., 6] + (x1, y1, z1, x2, y2, z2). + + Returns: + torch.Tensor: DIoU loss between predictions and targets. + """ + axis_aligned_iou = AxisAlignedBboxOverlaps3D()( + pred, target, is_aligned=True) + iou_loss = 1 - axis_aligned_iou + + xp1, yp1, zp1, xp2, yp2, zp2 = pred.split(1, dim=-1) + xt1, yt1, zt1, xt2, yt2, zt2 = target.split(1, dim=-1) + + xpc = (xp1 + xp2) / 2 + ypc = (yp1 + yp2) / 2 + zpc = (zp1 + zp2) / 2 + xtc = (xt1 + xt2) / 2 + ytc = (yt1 + yt2) / 2 + ztc = (zt1 + zt2) / 2 + r2 = (xpc - xtc)**2 + (ypc - ytc)**2 + (zpc - ztc)**2 + + x_min = torch.minimum(xp1, xt1) + x_max = torch.maximum(xp2, xt2) + y_min = torch.minimum(yp1, yt1) + y_max = torch.maximum(yp2, yt2) + z_min = torch.minimum(zp1, zt1) + z_max = torch.maximum(zp2, zt2) + c2 = (x_min - x_max)**2 + (y_min - y_max)**2 + (z_min - z_max)**2 + + diou_loss = iou_loss + (r2 / c2)[:, 0] + + return diou_loss + + +@MODELS.register_module() +class TR3DAxisAlignedIoULoss(nn.Module): + """Calculate the IoU loss (1-IoU) of axis aligned bounding boxes. The only + difference with original AxisAlignedIoULoss is the addition of DIoU mode. + These classes should be merged in the future. + + Args: + mode (str): 'iou' for intersection over union or 'diou' for + distance-iou loss. Defaults to 'iou'. + reduction (str): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to 'mean'. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + mode: str = 'iou', + reduction: str = 'mean', + loss_weight: float = 1.0) -> None: + super(TR3DAxisAlignedIoULoss, self).__init__() + assert mode in ['iou', 'diou'] + self.loss = axis_aligned_iou_loss if mode == 'iou' \ + else axis_aligned_diou_loss + assert reduction in ['none', 'sum', 'mean'] + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + avg_factor: Optional[float] = None, + reduction_override: Optional[str] = None, + **kwargs) -> Tensor: + """Forward function of loss calculation. + + Args: + pred (Tensor): Bbox predictions with shape [..., 3]. + target (Tensor): Bbox targets (gt) with shape [..., 3]. + weight (Tensor, optional): Weight of loss. + Defaults to None. + avg_factor (float, optional): Average factor that is used to + average the loss. Defaults to None. + reduction_override (str, optional): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to None. + + Returns: + Tensor: IoU loss between predictions and targets. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if (weight is not None) and (not torch.any(weight > 0)) and ( + reduction != 'none'): + return (pred * weight).sum() + return self.loss( + pred, + target, + weight=weight, + avg_factor=avg_factor, + reduction=reduction) * self.loss_weight diff --git a/projects/TR3D/tr3d/mink_resnet.py b/projects/TR3D/tr3d/mink_resnet.py new file mode 100755 index 0000000..85e0543 --- /dev/null +++ b/projects/TR3D/tr3d/mink_resnet.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +try: + import MinkowskiEngine as ME +except ImportError: + # Please follow getting_started.md to install MinkowskiEngine. + ME = SparseTensor = None + pass + +from mmdet3d.models.backbones import MinkResNet +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class TR3DMinkResNet(MinkResNet): + r"""Minkowski ResNet backbone. See `4D Spatio-Temporal ConvNets + `_ for more details. The onle difference + with MinkResNet is the `norm` and `num_planes` parameters. These classes + should be merged in the future. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input channels, 3 for RGB. + num_stages (int): Resnet stages. Defaults to 4. + pool (bool): Whether to add max pooling after first conv. + Defaults to True. + norm (str): Norm type ('instance' or 'batch') for stem layer. + Usually ResNet implies BatchNorm but for some reason + original MinkResNet implies InstanceNorm. Defaults to 'instance'. + num_planes (tuple[int]): Number of planes per block before + block.expansion. Defaults to (64, 128, 256, 512). + """ + + def __init__(self, + depth: int, + in_channels: int, + num_stages: int = 4, + pool: bool = True, + norm: str = 'instance', + num_planes: Tuple[int] = (64, 128, 256, 512)): + super(TR3DMinkResNet, self).__init__(depth, in_channels, num_stages, + pool) + block, stage_blocks = self.arch_settings[depth] + self.inplanes = 64 + norm_layer = ME.MinkowskiInstanceNorm if norm == 'instance' else \ + ME.MinkowskiBatchNorm + self.norm1 = norm_layer(self.inplanes) + + for i in range(len(stage_blocks)): + setattr( + self, f'layer{i + 1}', + self._make_layer( + block, num_planes[i], stage_blocks[i], stride=2)) diff --git a/projects/TR3D/tr3d/rotated_iou_loss.py b/projects/TR3D/tr3d/rotated_iou_loss.py new file mode 100755 index 0000000..bef7b84 --- /dev/null +++ b/projects/TR3D/tr3d/rotated_iou_loss.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from mmcv.ops.diff_iou_rotated import box2corners, oriented_box_intersection_2d +from mmdet.models.losses.utils import weighted_loss +from torch import Tensor +from torch import nn as nn + +from mmdet3d.models import rotated_iou_3d_loss +from mmdet3d.registry import MODELS + + +def diff_diou_rotated_3d(box3d1: Tensor, box3d2: Tensor) -> Tensor: + """Calculate differentiable DIoU of rotated 3d boxes. + + Args: + box3d1 (Tensor): (B, N, 3+3+1) First box (x,y,z,w,h,l,alpha). + box3d2 (Tensor): (B, N, 3+3+1) Second box (x,y,z,w,h,l,alpha). + Returns: + Tensor: (B, N) DIoU. + """ + box1 = box3d1[..., [0, 1, 3, 4, 6]] + box2 = box3d2[..., [0, 1, 3, 4, 6]] + corners1 = box2corners(box1) + corners2 = box2corners(box2) + intersection, _ = oriented_box_intersection_2d(corners1, corners2) + zmax1 = box3d1[..., 2] + box3d1[..., 5] * 0.5 + zmin1 = box3d1[..., 2] - box3d1[..., 5] * 0.5 + zmax2 = box3d2[..., 2] + box3d2[..., 5] * 0.5 + zmin2 = box3d2[..., 2] - box3d2[..., 5] * 0.5 + z_overlap = (torch.min(zmax1, zmax2) - + torch.max(zmin1, zmin2)).clamp_(min=0.) + intersection_3d = intersection * z_overlap + volume1 = box3d1[..., 3] * box3d1[..., 4] * box3d1[..., 5] + volume2 = box3d2[..., 3] * box3d2[..., 4] * box3d2[..., 5] + union_3d = volume1 + volume2 - intersection_3d + + x1_max = torch.max(corners1[..., 0], dim=2)[0] + x1_min = torch.min(corners1[..., 0], dim=2)[0] + y1_max = torch.max(corners1[..., 1], dim=2)[0] + y1_min = torch.min(corners1[..., 1], dim=2)[0] + + x2_max = torch.max(corners2[..., 0], dim=2)[0] + x2_min = torch.min(corners2[..., 0], dim=2)[0] + y2_max = torch.max(corners2[..., 1], dim=2)[0] + y2_min = torch.min(corners2[..., 1], dim=2)[0] + + x_max = torch.max(x1_max, x2_max) + x_min = torch.min(x1_min, x2_min) + y_max = torch.max(y1_max, y2_max) + y_min = torch.min(y1_min, y2_min) + + z_max = torch.max(zmax1, zmax2) + z_min = torch.min(zmin1, zmin2) + + r2 = ((box1[..., :3] - box2[..., :3])**2).sum(dim=-1) + c2 = (x_min - x_max)**2 + (y_min - y_max)**2 + (z_min - z_max)**2 + + return intersection_3d / union_3d - r2 / c2 + + +@weighted_loss +def rotated_diou_3d_loss(pred: Tensor, target: Tensor) -> Tensor: + """Calculate the DIoU loss (1-DIoU) of two sets of rotated bounding boxes. + Note that predictions and targets are one-to-one corresponded. + + Args: + pred (torch.Tensor): Bbox predictions with shape [N, 7] + (x, y, z, w, l, h, alpha). + target (torch.Tensor): Bbox targets (gt) with shape [N, 7] + (x, y, z, w, l, h, alpha). + + Returns: + torch.Tensor: IoU loss between predictions and targets. + """ + diou_loss = 1 - diff_diou_rotated_3d( + pred.unsqueeze(0), target.unsqueeze(0))[0] + return diou_loss + + +@MODELS.register_module() +class TR3DRotatedIoU3DLoss(nn.Module): + """Calculate the IoU loss (1-IoU) of rotated bounding boxes. The only + difference with original RotatedIoU3DLoss is the addition of DIoU mode. + These classes should be merged in the future. + + Args: + mode (str): 'iou' for intersection over union or 'diou' for + distance-iou loss. Defaults to 'iou'. + reduction (str): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to 'mean'. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + mode: str = 'iou', + reduction: str = 'mean', + loss_weight: float = 1.0) -> None: + super(TR3DRotatedIoU3DLoss, self).__init__() + assert mode in ['iou', 'diou'] + self.loss = rotated_iou_3d_loss if mode == 'iou' \ + else rotated_diou_3d_loss + assert reduction in ['none', 'sum', 'mean'] + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + avg_factor: Optional[float] = None, + reduction_override: Optional[str] = None, + **kwargs) -> Tensor: + """Forward function of loss calculation. + + Args: + pred (Tensor): Bbox predictions with shape [..., 7] + (x, y, z, w, l, h, alpha). + target (Tensor): Bbox targets (gt) with shape [..., 7] + (x, y, z, w, l, h, alpha). + weight (Tensor, optional): Weight of loss. + Defaults to None. + avg_factor (float, optional): Average factor that is used to + average the loss. Defaults to None. + reduction_override (str, optional): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to None. + + Returns: + Tensor: IoU loss between predictions and targets. + """ + if weight is not None and not torch.any(weight > 0): + return pred.sum() * weight.sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if weight is not None and weight.dim() > 1: + weight = weight.mean(-1) + loss = self.loss_weight * self.loss( + pred, + target, + weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + + return loss diff --git a/projects/TR3D/tr3d/tr3d_head.py b/projects/TR3D/tr3d/tr3d_head.py new file mode 100755 index 0000000..48d3b33 --- /dev/null +++ b/projects/TR3D/tr3d/tr3d_head.py @@ -0,0 +1,472 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Adapted from https://github.com/SamsungLabs/tr3d/blob/master/mmdet3d/models/dense_heads/tr3d_head.py # noqa +from typing import List, Optional, Tuple + +try: + import MinkowskiEngine as ME + from MinkowskiEngine import SparseTensor +except ImportError: + # Please follow getting_started.md to install MinkowskiEngine. + ME = SparseTensor = None + pass + +import torch +from mmcv.ops import nms3d, nms3d_normal +from mmengine.model import bias_init_with_prob +from mmengine.structures import InstanceData +from torch import Tensor, nn + +from mmdet3d.models import Base3DDenseHead +from mmdet3d.registry import MODELS +from mmdet3d.structures import BaseInstance3DBoxes +from mmdet3d.utils import InstanceList, OptInstanceList + + +@MODELS.register_module() +class TR3DHead(Base3DDenseHead): + r"""Bbox head of `TR3D `_. + + Args: + in_channels (int): Number of channels in input tensors. + num_reg_outs (int): Number of regression layer channels. + voxel_size (float): Voxel size in meters. + pts_center_threshold (int): Box to location assigner parameter. + After feature level for the box is determined, assigner selects + pts_center_threshold locations closest to the box center. + bbox_loss (dict): Config of bbox loss. Defaults to + dict(type='AxisAlignedIoULoss', mode='diou', reduction=None). + cls_loss (dict): Config of classification loss. Defaults to + dict = dict(type='mmdet.FocalLoss', reduction=None). + train_cfg (dict, optional): Config for train stage. Defaults to None. + test_cfg (dict, optional): Config for test stage. Defaults to None. + init_cfg (dict, optional): Config for weight initialization. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + num_reg_outs: int, + voxel_size: int, + pts_center_threshold: int, + label2level: Tuple[int], + bbox_loss: dict = dict( + type='TR3DAxisAlignedIoULoss', + mode='diou', + reduction='none'), + cls_loss: dict = dict( + type='mmdet.FocalLoss', reduction='none'), + train_cfg: Optional[dict] = None, + test_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super(TR3DHead, self).__init__(init_cfg) + if ME is None: + raise ImportError( + 'Please follow `getting_started.md` to install MinkowskiEngine.`' # noqa: E501 + ) + self.voxel_size = voxel_size + self.pts_center_threshold = pts_center_threshold + self.label2level = label2level + self.bbox_loss = MODELS.build(bbox_loss) + self.cls_loss = MODELS.build(cls_loss) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self._init_layers(len(self.label2level), in_channels, num_reg_outs) + + def _init_layers(self, num_classes: int, in_channels: int, + num_reg_outs: int): + """Initialize layers. + + Args: + in_channels (int): Number of channels in input tensors. + num_reg_outs (int): Number of regression layer channels. + num_classes (int): Number of classes. + """ + self.conv_reg = ME.MinkowskiConvolution( + in_channels, num_reg_outs, kernel_size=1, bias=True, dimension=3) + self.conv_cls = ME.MinkowskiConvolution( + in_channels, num_classes, kernel_size=1, bias=True, dimension=3) + + def init_weights(self): + """Initialize weights.""" + nn.init.normal_(self.conv_reg.kernel, std=.01) + nn.init.normal_(self.conv_cls.kernel, std=.01) + nn.init.constant_(self.conv_cls.bias, bias_init_with_prob(.01)) + + def _forward_single(self, x: SparseTensor) -> Tuple[Tensor, ...]: + """Forward pass per level. + + Args: + x (SparseTensor): Per level neck output tensor. + + Returns: + tuple[Tensor]: Per level head predictions. + """ + reg_final = self.conv_reg(x).features + reg_distance = torch.exp(reg_final[:, 3:6]) + reg_angle = reg_final[:, 6:] + bbox_pred = torch.cat((reg_final[:, :3], reg_distance, reg_angle), + dim=1) + cls_pred = self.conv_cls(x).features + + bbox_preds, cls_preds, points = [], [], [] + for permutation in x.decomposition_permutations: + bbox_preds.append(bbox_pred[permutation]) + cls_preds.append(cls_pred[permutation]) + points.append(x.coordinates[permutation][:, 1:] * self.voxel_size) + + return bbox_preds, cls_preds, points + + def forward(self, x: List[Tensor]) -> Tuple[List[Tensor], ...]: + """Forward pass. + + Args: + x (list[Tensor]): Features from the backbone. + + Returns: + Tuple[List[Tensor], ...]: Predictions of the head. + """ + bbox_preds, cls_preds, points = [], [], [] + for i in range(len(x)): + bbox_pred, cls_pred, point = self._forward_single(x[i]) + bbox_preds.append(bbox_pred) + cls_preds.append(cls_pred) + points.append(point) + return bbox_preds, cls_preds, points + + def _loss_by_feat_single(self, bbox_preds: List[Tensor], + cls_preds: List[Tensor], points: List[Tensor], + gt_bboxes: BaseInstance3DBoxes, gt_labels: Tensor, + input_meta: dict) -> Tuple[Tensor, ...]: + """Loss function of single sample. + + Args: + bbox_preds (list[Tensor]): Bbox predictions for all levels. + cls_preds (list[Tensor]): Classification predictions for all + levels. + points (list[Tensor]): Final location coordinates for all levels. + gt_bboxes (:obj:`BaseInstance3DBoxes`): Ground truth boxes. + gt_labels (Tensor): Ground truth labels. + input_meta (dict): Scene meta info. + + Returns: + tuple[Tensor, ...]: Bbox and classification loss + values and a boolean mask of assigned points. + """ + num_classes = cls_preds[0].shape[1] + bbox_targets, cls_targets = self.get_targets(points, gt_bboxes, + gt_labels, num_classes) + bbox_preds = torch.cat(bbox_preds) + cls_preds = torch.cat(cls_preds) + points = torch.cat(points) + + # cls loss + cls_loss = self.cls_loss(cls_preds, cls_targets) + + # bbox loss + pos_mask = cls_targets < num_classes + pos_bbox_preds = bbox_preds[pos_mask] + if pos_mask.sum() > 0: + pos_points = points[pos_mask] + pos_bbox_preds = bbox_preds[pos_mask] + pos_bbox_targets = bbox_targets[pos_mask] + bbox_loss = self.bbox_loss( + self._bbox_to_loss( + self._bbox_pred_to_bbox(pos_points, pos_bbox_preds)), + self._bbox_to_loss(pos_bbox_targets)) + else: + bbox_loss = pos_bbox_preds + return bbox_loss, cls_loss, pos_mask + + def loss_by_feat(self, + bbox_preds: List[List[Tensor]], + cls_preds: List[List[Tensor]], + points: List[List[Tensor]], + batch_gt_instances_3d: InstanceList, + batch_input_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None, + **kwargs) -> dict: + """Loss function about feature. + + Args: + bbox_preds (list[list[Tensor]]): Bbox predictions for all scenes. + The first list contains predictions from different + levels. The second list contains predictions in a mini-batch. + cls_preds (list[list[Tensor]]): Classification predictions for all + scenes. The first list contains predictions from different + levels. The second list contains predictions in a mini-batch. + points (list[list[Tensor]]): Final location coordinates for all + scenes. The first list contains predictions from different + levels. The second list contains predictions in a mini-batch. + batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of + gt_instance_3d. It usually includes ``bboxes_3d``、` + `labels_3d``、``depths``、``centers_2d`` and attributes. + batch_input_metas (list[dict]): Meta information of each image, + e.g., image size, scaling factor, etc. + + Returns: + dict: Bbox, and classification losses. + """ + bbox_losses, cls_losses, pos_masks = [], [], [] + for i in range(len(batch_input_metas)): + bbox_loss, cls_loss, pos_mask = self._loss_by_feat_single( + bbox_preds=[x[i] for x in bbox_preds], + cls_preds=[x[i] for x in cls_preds], + points=[x[i] for x in points], + input_meta=batch_input_metas[i], + gt_bboxes=batch_gt_instances_3d[i].bboxes_3d, + gt_labels=batch_gt_instances_3d[i].labels_3d) + if len(bbox_loss) > 0: + bbox_losses.append(bbox_loss) + cls_losses.append(cls_loss) + pos_masks.append(pos_mask) + return dict( + bbox_loss=torch.mean(torch.cat(bbox_losses)), + cls_loss=torch.sum(torch.cat(cls_losses)) / + torch.sum(torch.cat(pos_masks))) + + def _predict_by_feat_single(self, bbox_preds: List[Tensor], + cls_preds: List[Tensor], points: List[Tensor], + input_meta: dict) -> InstanceData: + """Generate boxes for single sample. + + Args: + center_preds (list[Tensor]): Centerness predictions for all levels. + bbox_preds (list[Tensor]): Bbox predictions for all levels. + cls_preds (list[Tensor]): Classification predictions for all + levels. + points (list[Tensor]): Final location coordinates for all levels. + input_meta (dict): Scene meta info. + + Returns: + InstanceData: Predicted bounding boxes, scores and labels. + """ + scores = torch.cat(cls_preds).sigmoid() + bbox_preds = torch.cat(bbox_preds) + points = torch.cat(points) + max_scores, _ = scores.max(dim=1) + + if len(scores) > self.test_cfg.nms_pre > 0: + _, ids = max_scores.topk(self.test_cfg.nms_pre) + bbox_preds = bbox_preds[ids] + scores = scores[ids] + points = points[ids] + + bboxes = self._bbox_pred_to_bbox(points, bbox_preds) + bboxes, scores, labels = self._single_scene_multiclass_nms( + bboxes, scores, input_meta) + + bboxes = input_meta['box_type_3d']( + bboxes, + box_dim=bboxes.shape[1], + with_yaw=bboxes.shape[1] == 7, + origin=(.5, .5, .5)) + + results = InstanceData() + results.bboxes_3d = bboxes + results.scores_3d = scores + results.labels_3d = labels + return results + + def predict_by_feat(self, bbox_preds: List[List[Tensor]], cls_preds, + points: List[List[Tensor]], + batch_input_metas: List[dict], + **kwargs) -> List[InstanceData]: + """Generate boxes for all scenes. + + Args: + bbox_preds (list[list[Tensor]]): Bbox predictions for all scenes. + cls_preds (list[list[Tensor]]): Classification predictions for all + scenes. + points (list[list[Tensor]]): Final location coordinates for all + scenes. + batch_input_metas (list[dict]): Meta infos for all scenes. + + Returns: + list[InstanceData]: Predicted bboxes, scores, and labels for + all scenes. + """ + results = [] + for i in range(len(batch_input_metas)): + result = self._predict_by_feat_single( + bbox_preds=[x[i] for x in bbox_preds], + cls_preds=[x[i] for x in cls_preds], + points=[x[i] for x in points], + input_meta=batch_input_metas[i]) + results.append(result) + return results + + @staticmethod + def _bbox_to_loss(bbox): + """Transform box to the axis-aligned or rotated iou loss format. + + Args: + bbox (Tensor): 3D box of shape (N, 6) or (N, 7). + + Returns: + Tensor: Transformed 3D box of shape (N, 6) or (N, 7). + """ + # rotated iou loss accepts (x, y, z, w, h, l, heading) + if bbox.shape[-1] != 6: + return bbox + + # axis-aligned case: x, y, z, w, h, l -> x1, y1, z1, x2, y2, z2 + return torch.stack( + (bbox[..., 0] - bbox[..., 3] / 2, bbox[..., 1] - bbox[..., 4] / 2, + bbox[..., 2] - bbox[..., 5] / 2, bbox[..., 0] + bbox[..., 3] / 2, + bbox[..., 1] + bbox[..., 4] / 2, bbox[..., 2] + bbox[..., 5] / 2), + dim=-1) + + @staticmethod + def _bbox_pred_to_bbox(points, bbox_pred): + """Transform predicted bbox parameters to bbox. + + Args: + points (Tensor): Final locations of shape (N, 3) + bbox_pred (Tensor): Predicted bbox parameters of shape (N, 6) + or (N, 8). + Returns: + Tensor: Transformed 3D box of shape (N, 6) or (N, 7). + """ + if bbox_pred.shape[0] == 0: + return bbox_pred + + x_center = points[:, 0] + bbox_pred[:, 0] + y_center = points[:, 1] + bbox_pred[:, 1] + z_center = points[:, 2] + bbox_pred[:, 2] + base_bbox = torch.stack([ + x_center, y_center, z_center, bbox_pred[:, 3], bbox_pred[:, 4], + bbox_pred[:, 5] + ], -1) + + # axis-aligned case + if bbox_pred.shape[1] == 6: + return base_bbox + + # rotated case: ..., sin(2a)ln(q), cos(2a)ln(q) + scale = bbox_pred[:, 3] + bbox_pred[:, 4] + q = torch.exp( + torch.sqrt( + torch.pow(bbox_pred[:, 6], 2) + torch.pow(bbox_pred[:, 7], 2))) + alpha = 0.5 * torch.atan2(bbox_pred[:, 6], bbox_pred[:, 7]) + return torch.stack( + (x_center, y_center, z_center, scale / (1 + q), scale / + (1 + q) * q, bbox_pred[:, 5] + bbox_pred[:, 4], alpha), + dim=-1) + + @torch.no_grad() + def get_targets(self, points: Tensor, gt_bboxes: BaseInstance3DBoxes, + gt_labels: Tensor, num_classes: int) -> Tuple[Tensor, ...]: + """Compute targets for final locations for a single scene. + + Args: + points (list[Tensor]): Final locations for all levels. + gt_bboxes (BaseInstance3DBoxes): Ground truth boxes. + gt_labels (Tensor): Ground truth labels. + num_classes (int): Number of classes. + + Returns: + tuple[Tensor, ...]: Bbox and classification targets for all + locations. + """ + float_max = points[0].new_tensor(1e8) + levels = torch.cat([ + points[i].new_tensor(i, dtype=torch.long).expand(len(points[i])) + for i in range(len(points)) + ]) + points = torch.cat(points) + n_points = len(points) + n_boxes = len(gt_bboxes) + + if len(gt_labels) == 0: + return points.new_tensor([]), \ + gt_labels.new_full((n_points,), num_classes) + + boxes = torch.cat((gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), + dim=1) + boxes = boxes.to(points.device).expand(n_points, n_boxes, 7) + points = points.unsqueeze(1).expand(n_points, n_boxes, 3) + + # condition 1: fix level for label + label2level = gt_labels.new_tensor(self.label2level) + label_levels = label2level[gt_labels].unsqueeze(0).expand( + n_points, n_boxes) + point_levels = torch.unsqueeze(levels, 1).expand(n_points, n_boxes) + level_condition = label_levels == point_levels + + # condition 2: keep topk location per box by center distance + center = boxes[..., :3] + center_distances = torch.sum(torch.pow(center - points, 2), dim=-1) + center_distances = torch.where(level_condition, center_distances, + float_max) + topk_distances = torch.topk( + center_distances, + min(self.pts_center_threshold + 1, len(center_distances)), + largest=False, + dim=0).values[-1] + topk_condition = center_distances < topk_distances.unsqueeze(0) + + # condition 3: min center distance to box per point + center_distances = torch.where(topk_condition, center_distances, + float_max) + min_values, min_ids = center_distances.min(dim=1) + min_inds = torch.where(min_values < float_max, min_ids, -1) + + bbox_targets = boxes[0][min_inds] + if not gt_bboxes.with_yaw: + bbox_targets = bbox_targets[:, :-1] + cls_targets = torch.where(min_inds >= 0, gt_labels[min_inds], + num_classes) + return bbox_targets, cls_targets + + def _single_scene_multiclass_nms(self, bboxes: Tensor, scores: Tensor, + input_meta: dict) -> Tuple[Tensor, ...]: + """Multi-class nms for a single scene. + + Args: + bboxes (Tensor): Predicted boxes of shape (N_boxes, 6) or + (N_boxes, 7). + scores (Tensor): Predicted scores of shape (N_boxes, N_classes). + input_meta (dict): Scene meta data. + + Returns: + tuple[Tensor, ...]: Predicted bboxes, scores and labels. + """ + num_classes = scores.shape[1] + with_yaw = bboxes.shape[1] == 7 + nms_bboxes, nms_scores, nms_labels = [], [], [] + for i in range(num_classes): + ids = scores[:, i] > self.test_cfg.score_thr + if not ids.any(): + continue + + class_scores = scores[ids, i] + class_bboxes = bboxes[ids] + if with_yaw: + nms_function = nms3d + else: + class_bboxes = torch.cat( + (class_bboxes, torch.zeros_like(class_bboxes[:, :1])), + dim=1) + nms_function = nms3d_normal + + nms_ids = nms_function(class_bboxes, class_scores, + self.test_cfg.iou_thr) + nms_bboxes.append(class_bboxes[nms_ids]) + nms_scores.append(class_scores[nms_ids]) + nms_labels.append( + bboxes.new_full( + class_scores[nms_ids].shape, i, dtype=torch.long)) + + if len(nms_bboxes): + nms_bboxes = torch.cat(nms_bboxes, dim=0) + nms_scores = torch.cat(nms_scores, dim=0) + nms_labels = torch.cat(nms_labels, dim=0) + else: + nms_bboxes = bboxes.new_zeros((0, bboxes.shape[1])) + nms_scores = bboxes.new_zeros((0, )) + nms_labels = bboxes.new_zeros((0, )) + + if not with_yaw: + nms_bboxes = nms_bboxes[:, :6] + + return nms_bboxes, nms_scores, nms_labels diff --git a/projects/TR3D/tr3d/tr3d_neck.py b/projects/TR3D/tr3d/tr3d_neck.py new file mode 100755 index 0000000..41e54b0 --- /dev/null +++ b/projects/TR3D/tr3d/tr3d_neck.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Adapted from https://github.com/SamsungLabs/tr3d/blob/master/mmdet3d/models/necks/tr3d_neck.py # noqa +from typing import List, Tuple + +try: + import MinkowskiEngine as ME + from MinkowskiEngine import SparseTensor +except ImportError: + # Please follow getting_started.md to install MinkowskiEngine. + ME = SparseTensor = None + pass + +from mmengine.model import BaseModule +from torch import nn + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class TR3DNeck(BaseModule): + r"""Neck of `TR3D `_. + + Args: + in_channels (tuple[int]): Number of channels in input tensors. + out_channels (int): Number of channels in output tensors. + """ + + def __init__(self, in_channels: Tuple[int], out_channels: int): + super(TR3DNeck, self).__init__() + self._init_layers(in_channels[1:], out_channels) + + def _init_layers(self, in_channels: Tuple[int], out_channels: int): + """Initialize layers. + + Args: + in_channels (tuple[int]): Number of channels in input tensors. + out_channels (int): Number of channels in output tensors. + """ + for i in range(len(in_channels)): + if i > 0: + self.add_module( + f'up_block_{i}', + self._make_block(in_channels[i], in_channels[i - 1], True, + 2)) + if i < len(in_channels) - 1: + self.add_module( + f'lateral_block_{i}', + self._make_block(in_channels[i], in_channels[i])) + self.add_module(f'out_block_{i}', + self._make_block(in_channels[i], out_channels)) + + def init_weights(self): + """Initialize weights.""" + for m in self.modules(): + if isinstance(m, ME.MinkowskiConvolution): + ME.utils.kaiming_normal_( + m.kernel, mode='fan_out', nonlinearity='relu') + + if isinstance(m, ME.MinkowskiBatchNorm): + nn.init.constant_(m.bn.weight, 1) + nn.init.constant_(m.bn.bias, 0) + + def forward(self, x: List[SparseTensor]) -> List[SparseTensor]: + """Forward pass. + + Args: + x (list[SparseTensor]): Features from the backbone. + + Returns: + List[Tensor]: Output features from the neck. + """ + x = x[1:] + outs = [] + inputs = x + x = inputs[-1] + for i in range(len(inputs) - 1, -1, -1): + if i < len(inputs) - 1: + x = self.__getattr__(f'up_block_{i + 1}')(x) + x = inputs[i] + x + x = self.__getattr__(f'lateral_block_{i}')(x) + out = self.__getattr__(f'out_block_{i}')(x) + outs.append(out) + return outs[::-1] + + @staticmethod + def _make_block(in_channels: int, + out_channels: int, + generative: bool = False, + stride: int = 1) -> nn.Module: + """Construct Conv-Norm-Act block. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + generative (bool): Use generative convolution if True. + Defaults to False. + stride (int): Stride of the convolution. Defaults to 1. + + Returns: + torch.nn.Module: With corresponding layers. + """ + conv = ME.MinkowskiGenerativeConvolutionTranspose if generative \ + else ME.MinkowskiConvolution + return nn.Sequential( + conv( + in_channels, + out_channels, + kernel_size=3, + stride=stride, + dimension=3), ME.MinkowskiBatchNorm(out_channels), + ME.MinkowskiReLU(inplace=True)) diff --git a/projects/TR3D/tr3d/transforms_3d.py b/projects/TR3D/tr3d/transforms_3d.py new file mode 100755 index 0000000..e5f1924 --- /dev/null +++ b/projects/TR3D/tr3d/transforms_3d.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple, Union + +import numpy as np + +from mmdet3d.datasets import PointSample +from mmdet3d.registry import TRANSFORMS +from mmdet3d.structures.points import BasePoints + + +@TRANSFORMS.register_module() +class TR3DPointSample(PointSample): + """The only difference with PointSample is the support of float num_points + parameter. + + In this case we sample random fraction of points from num_points to 100% + points. These classes should be merged in the future. + """ + + def _points_random_sampling( + self, + points: BasePoints, + num_samples: Union[int, float], + sample_range: Optional[float] = None, + replace: bool = False, + return_choices: bool = False + ) -> Union[Tuple[BasePoints, np.ndarray], BasePoints]: + """Points random sampling. + + Sample points to a certain number. + + Args: + points (:obj:`BasePoints`): 3D Points. + num_samples (int): Number of samples to be sampled. + sample_range (float, optional): Indicating the range where the + points will be sampled. Defaults to None. + replace (bool): Sampling with or without replacement. + Defaults to False. + return_choices (bool): Whether return choice. Defaults to False. + + Returns: + tuple[:obj:`BasePoints`, np.ndarray] | :obj:`BasePoints`: + + - points (:obj:`BasePoints`): 3D Points. + - choices (np.ndarray, optional): The generated random samples. + """ + if isinstance(num_samples, float): + assert num_samples < 1 + num_samples = int( + np.random.uniform(self.num_points, 1.) * points.shape[0]) + + if not replace: + replace = (points.shape[0] < num_samples) + point_range = range(len(points)) + if sample_range is not None and not replace: + # Only sampling the near points when len(points) >= num_samples + dist = np.linalg.norm(points.coord.numpy(), axis=1) + far_inds = np.where(dist >= sample_range)[0] + near_inds = np.where(dist < sample_range)[0] + # in case there are too many far points + if len(far_inds) > num_samples: + far_inds = np.random.choice( + far_inds, num_samples, replace=False) + point_range = near_inds + num_samples -= len(far_inds) + choices = np.random.choice(point_range, num_samples, replace=replace) + if sample_range is not None and not replace: + choices = np.concatenate((far_inds, choices)) + # Shuffle points after sampling + np.random.shuffle(choices) + if return_choices: + return points[choices], choices + else: + return points[choices] diff --git a/projects/example_project/README.md b/projects/example_project/README.md new file mode 100755 index 0000000..d23c8e1 --- /dev/null +++ b/projects/example_project/README.md @@ -0,0 +1,115 @@ +# Dummy ResNet Wrapper + +This is an example README for community `projects/`. We have provided detailed explanations for each field in the form of html comments, which are visible when you read the source of this README file. If you wish to submit your project to our main repository, then all the fields in this README are mandatory for others to understand what you have achieved in this implementation. + +## Description + + + +This project implements a dummy ResNet wrapper, which literally does nothing new but prints "hello world" during initialization. + +## Usage + + + +### Training commands + +In MMDet3D's root directory, run the following command to train the model: + +```bash +python tools/train.py projects/example_project/configs/fcos3d_dummy-resnet-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py +``` + +### Testing commands + +In MMDet3D's root directory, run the following command to test the model: + +```bash +python tools/test.py projects/example_project/configs/fcos3d_dummy-resnet-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py ${CHECKPOINT_PATH} +``` + +## Results + + + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | mAP | NDS | Download | +| :--------------------------------------------------------------------------------------------------------------: | :-----: | :------: | :------------: | :--: | :--: | :----------------------: | +| [FCOS3D_dummy](projects/example_project/configs/fcos3d_dummy-resnet-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py) | 1x | 8.69 | | 29.8 | 37.7 | [model](<>) \| [log](<>) | + +## Citation + + + +```latex +@inproceedings{wang2021fcos3d, + title={{FCOS3D: Fully} Convolutional One-Stage Monocular 3D Object Detection}, + author={Wang, Tai and Zhu, Xinge and Pang, Jiangmiao and Lin, Dahua}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops}, + year={2021} +} +# For the original 2D version +@inproceedings{tian2019fcos, + title = {{FCOS: Fully} Convolutional One-Stage Object Detection}, + author = {Tian, Zhi and Shen, Chunhua and Chen, Hao and He, Tong}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + year = {2019} +} +``` + +## Checklist + + + +- [ ] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [ ] Finish the code + + + + - [ ] Basic docstrings & proper citation + + + + - [ ] Test-time correctness + + + + - [ ] A full README + + + +- [ ] Milestone 2: Indicates a successful model implementation. + + - [ ] Training-time correctness + + + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Type hints and docstrings + + + + - [ ] Unit tests + + + + - [ ] Code polishing + + + + - [ ] Metafile.yml + + + +- [ ] Move your modules into the core package following the codebase's file hierarchy structure. + + + +- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. diff --git a/projects/example_project/configs/fcos3d_dummy-resnet-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py b/projects/example_project/configs/fcos3d_dummy-resnet-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py new file mode 100755 index 0000000..4f19c0c --- /dev/null +++ b/projects/example_project/configs/fcos3d_dummy-resnet-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py @@ -0,0 +1,7 @@ +_base_ = [ + '../../../configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py' # noqa +] + +custom_imports = dict(imports=['projects.example_project.dummy']) + +_base_.model.backbone.type = 'DummyResNet' diff --git a/projects/example_project/dummy/__init__.py b/projects/example_project/dummy/__init__.py new file mode 100755 index 0000000..70df789 --- /dev/null +++ b/projects/example_project/dummy/__init__.py @@ -0,0 +1,3 @@ +from .dummy_resnet import DummyResNet + +__all__ = ['DummyResNet'] diff --git a/projects/example_project/dummy/dummy_resnet.py b/projects/example_project/dummy/dummy_resnet.py new file mode 100755 index 0000000..63b5fa1 --- /dev/null +++ b/projects/example_project/dummy/dummy_resnet.py @@ -0,0 +1,15 @@ +from mmdet.models.backbones import ResNet + +from mmdet3d.registry import MODELS + + +@MODELS.register_module() +class DummyResNet(ResNet): + """Implements a dummy ResNet wrapper for demonstration purpose. + Args: + **kwargs: All the arguments are passed to the parent class. + """ + + def __init__(self, **kwargs) -> None: + print('Hello world!') + super().__init__(**kwargs) diff --git a/requirements.txt b/requirements.txt new file mode 100755 index 0000000..6981bd7 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +-r requirements/build.txt +-r requirements/optional.txt +-r requirements/runtime.txt +-r requirements/tests.txt diff --git a/requirements/build.txt b/requirements/build.txt new file mode 100755 index 0000000..e69de29 diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100755 index 0000000..5adbb4d --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,8 @@ +docutils==0.16.0 +m2r==0.2.1 +mistune==0.8.4 +myst-parser +-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme +sphinx==4.0.2 +sphinx-copybutton +sphinx_markdown_tables diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt new file mode 100755 index 0000000..7833d61 --- /dev/null +++ b/requirements/mminstall.txt @@ -0,0 +1,3 @@ +mmcv>=2.0.0rc4,<2.1.0 +mmdet>=3.0.0,<3.1.0 +mmengine>=0.7.1,<1.0.0 diff --git a/requirements/optional.txt b/requirements/optional.txt new file mode 100755 index 0000000..099ad8a --- /dev/null +++ b/requirements/optional.txt @@ -0,0 +1,3 @@ +black==20.8b1 # be compatible with typing-extensions 3.7.4 +typing-extensions # required by tensorflow<=2.6 +waymo-open-dataset-tf-2-6-0 # requires python>=3.7 diff --git a/requirements/readthedocs.txt b/requirements/readthedocs.txt new file mode 100755 index 0000000..4a33470 --- /dev/null +++ b/requirements/readthedocs.txt @@ -0,0 +1,5 @@ +mmcv>=2.0.0rc4 +mmdet>=3.0.0 +mmengine>=0.7.1 +torch +torchvision diff --git a/requirements/runtime.txt b/requirements/runtime.txt new file mode 100755 index 0000000..705f7f4 --- /dev/null +++ b/requirements/runtime.txt @@ -0,0 +1,11 @@ +lyft_dataset_sdk +networkx>=2.5 +numba # you should install numba==0.53.0 if your environment is cuda-9.0 +numpy +nuscenes-devkit +open3d +plyfile +scikit-image +# by default we also use tensorboard to log results +tensorboard +trimesh diff --git a/requirements/tests.txt b/requirements/tests.txt new file mode 100755 index 0000000..563fc46 --- /dev/null +++ b/requirements/tests.txt @@ -0,0 +1,14 @@ +asynctest +codecov +flake8 +interrogate +isort +# Note: used for kwarray.group_items, this may be ported to mmcv in the future. +kwarray +parameterized +pytest +pytest-cov +pytest-runner +ubelt +xdoctest >= 0.10.0 +yapf diff --git a/resources/mmdet3d_outdoor_demo.gif b/resources/mmdet3d_outdoor_demo.gif new file mode 100755 index 0000000000000000000000000000000000000000..1c7541a98612a38d3bc34cd2c61ad5ea4f7c6bb6 GIT binary patch literal 830279 zcmZ6T2{=^y|Hr>)X6&+)b&P$Fki?X+gd}?+E!Ieiq!=m3lBJZfg%-3)w2;!K3}q*Z ziWVeWT4-dC`G3#Qz4!O;x%YYQb9>U6Ip=)O>-&DcKF4a4rQtd+8bU)70bpXk7MYVn z%%LjgQ%7cz6!Q`%GY?_rfkpTmi}0Vtg~i1`iwl1i7UtnUe-`Ez{>=QDo0*@VoS&VX zn`6vPP0!3u&dmOron}nU{G6HiF+KHldW`XV`p3lN$i&3x55~yw@R!Nae#Y3RU*Eca z_4fYw`R2!P-^h>NFQadUMtXaDdwP1_^$frLFx2(Ft7Wvmaj3uHTgS8E_NLEYs`@|G zzyEx{zq9nq)6x%buDox))N?lX9z3m0+o7jUx|*e5RVjGf~f$rkyTC41@e?To@_=gt?krmdo% zG0w`$h;Oer-uUQfTj9~yNwGE04%bv1t#}+zQ?jSw^qvQ|w^e0tD=##=pK|oZgUEvW zJ8s|S&O3c8}rVtDm@IaCk`Iu@Jwg zJ=9Y{RMh`q^1e_sC^*6+;-p=8ibu#vyWkXezg#=N1m8XAcHg+bfP;bFM}2+v?Aq;> z>9Wtq_2>?l_?-?19o&=b?UG%beVjHO8CZ2?0{P}{8t$RH&%yM#{kljyox@v{f;QR5 zY%(~uNiD)!Iow+5u(d*%wOpt*>7cc2h_y_xwe$gNsr}ZHLDu4d)+_f~i~3U{QYfLP z)*epL4nJeM|1>2aMSouk#rqUx$4SbzlP1n54Lwfld7UxXnP|Et$#hc^+3uw2#*^k| z@uurfTae>6lEXI|pli*x=oykNb&p$VAJf&fw9-0ir5>rG_CiC=*haNnk zBTOccn!&+_B3nZ1mgZ7Up*7p zQ9gZnAinAD)y51}=i(iWcds>P>-g+xG%qh~J#Tnuv~sT%0Toy`juSd zT(YZG@^HcSiXf$P7aPvrvY>{3>uG)RV669<2N1WbeE922Lx#rgmz9quhFh)1X{B@m z8|W$@)A3PeN5}e}pJK||KdYRb80jeCcaA6E!8oErGL6Bb^D`6QdfTd^#ZW4NPtmQt z&orLx7624;TMhsiwpZO=^=~|RKj=-9Pl+qZqe5s<&i(bkrcAZ1g+T~?aC6RTzn53t zJBGGgSa-O;1XATZDCO7L$B-o~CtrnuufM@j?9b+-j*CRtA@PQYpD!Y54s3dCV9sk~* zg`+b+uy5g#Is8D3@58;lCUi26gYTYSiL1Ov#rLkeS*rXml7v*8ZU&Tj^8m@Uq2EV- zDKyaJtx4>sfmx!^!XvOTMF^bjSB=Ai zPA#t^peRrjoYVmop+80^OSX}yb?I9BrZnH*+XD}SzH~C$Z|$32)$_Pwig9c5rwlpb z218&~(2P$1i+8j?wBX? zoz>{SZgDVFiGS|*=(~~!3OEWEQ3wb$`Y<;v{+S#5Fghmdc`F+K<5trl0WY%?JQ4;j zT{wzGvap)#y$C@c%hOWpj&ugZ@aC=nAS4+9eoZpI-T|*_0L3;}Z|~cCRAGO+i^l%x zoX+e7aLGIwr^>jNlB)x946;yoA>VMm;d{qa%Q;LxK}(+4i9I*bl9I&(CM ztfw;CO?8w~`)gCA^eS+G&MVCeKttTR@}brX#y)vU_QaIf+(-D)v!O$+ z7flk0JLkeicxz~B0_Hs6=ch;6rGWA$f-%^dZ*@}!q_RVvoT}XPDCvnep7LtHhJmF_Lu+Y1vrol z^nfP3w9zE8L2y5_<;EeEw!96|i$RE|>Z{9^sq$Nj!_+zo?eY!Q*9@8_2!Y$)t;L5m z-re3+6}v)Xs`!lV{v(;yh^LkZP^$B#3$hc?#~|olw>$!RpB-o7_*&NT!!tE$z1K;_ zKF`?k>c_^6>vkDx)Kxw&Z?3bK)jQZF?G+Mlg#)gP0=rw0Yreg`z5DqYnI~-M`-dvQ94I0Aw1Hn83P{Jvvy7p( z;Z1ioJk3+v0OC)V2AMdJDr=e`vM$|sQ-+FTq06Imzs=d&UJ3Vpu@S)ewFhy1o^Y|x->^9olnwI0 z#}7@Jje<*N1NN6Jv$)sTZ!K}XE?tzN(k7$k_!Hhf^~vzBZ!g^X1YT*nzTOrE5H-@s z;lGC7Jjo8;T@{TmTSF0w*YiX{71zp~gFf}CJZl308tT`az1sh6W$q#WwtUk>u@$j6 z&~e>_aB%q9#l-Nj9%ljU^q$F7#cqTNi>8a@wo6voeCwM9 zd5j-g13=aYwDUEfxfckJl&=#M0zlYji!{PeHkcUAe(%vmNl)D9>Z z|A-ucPvz1}&l6Xy4NyZ0F4vv{zI}jRcBMX*WPVZ_^3NvPZeQTmn68wlB@G&U!DR%6 z{4*2;3a+>1Q=*3?Y%)hg2w{wYR|;%47s=F7a&o9jD#ky%YR&8F5(MFj*I>@#pGI!o z4wcjo4??~)UEKWOzz(=RjC^W-aA%2s$T>Bf!g{vAt>t0a7eOl{kYoGku3yvYo}AUf zkXzac5Z{Q2I(D>!#Xpble|uNzQ!+$C&>tTj29K$B6s|bK31WF1Lc+rWmv z zGz$Lm50~gF{RZ=t9BH6vch$}DG66`eIWUk@X3C`-e5Nm{8li8>lrju@WrooI;h)xQ zLKvSoiGl+KrYHE6wr3Wd(cSa`1?`1A{ zSJ&*XZ&=@U_^iu5VJyujig zXBA-rfm>h=eE`H>waJ$Eta$-CaZVhNeY>ASN`|#k!L}5 zQsMvjFm$-CHP$EYtO9`C@nu@Oj~>e*4Ed+j3!Gf3ci{Un{}i?>Sf#K5gF`>R&*}xV z^Vh_Cfj<4w4kj0^au(OO=KDLP8VBGxZ}9WuZFxN4U-!x-+!wm;Fr#{sYQ(sT|_ zVho;3Oh1CfKf>FQaRKjt_@_=#1cd#b5%jfAQ$TDvg%c2vrLlDtr%xdP>(ALGDs3z6WLjWM>eyGQ*;2ug~dOSqJRx}JPO?i2@NItG^SsKt3C1lzBfH- z{a5XXj}Co(dvUWb$4LeFFcnDS2=T0r<66nyGU7yJ@z0Sz;<~Ao!bkZuI|a9i9XwB7x%Rj;HMCzyKpc!qM}~dv zxFkg(0m=~|?QH?ZWuhW}eQCt2zACg8lv)Aqg^z8pkD6tCCDGD3qL2THEY!b&U*aFW zmXP=td=daIFZk7$UqJpb^PSYbvcZ-5mw#lFfnl=lwWcNh5#ECnmI7Us$$HmcUL+$9 zB=ZCWy=0N7+~o}yh-
    7@v%*aeV|xBzrb$=h2yo(3~s%@Z!16m zU?mfr)Rq8UPfxAA)AJO_f5#RN4iL{Htemb==3N^mVBLsc7dn7FS{|jWE8o-N($JUm z3O+{!sW#_X{PX(e;-k0q>Z!~8bKU^`RC#oMnSV}s^qJvN#$d~N<7hq^daQymaH~2a z(K-7573Q|LlBvoJ~rM5~t%O1SsbX_NT4& z0Kl_AftL@mc@RR|Hn!{8v4usp%5Z?g6S$@K#8Q5F6A03ff7ac_6WGCyx(n_TdtP4D z+g9p$Ib{X7wgumivHkw25of_bjEwP=2;nf}W8FoMwFkt#Y=STX$s*UDmWBL7J?P53 zFzl0MH5WGe=GN|dI~n?spP!yabI9((_(v+t4NxHe{P}rMN9l+w!;u60YD7Vq`{lF{tOR5c_o|xPSuA3tZ#%WHJ{bkRL}=J_r!67 z1CW2HOh~ix8E+aAodj@ICxiD^=5jD^3w7O_<7W`X0**ul_m-wf7jqtkA!6v_^ zD@U|GR2=B7@#FDa_381EvHsSQaP8h_3J+5NDMIISO#;6HJFS3|51f%TUu3YuLlv~q z2L)6F5Asan__QMRzrHTuB9VY6i+}DAQId@yJRlbT{AAnRmx2J#7(PW&K;kGR;nAVi z!%>ZJ<;@#?dq*K8-QPI;r?WtNqXNc1UxI_=5YP4Sg!ep8)sEh!rHbFv1V(%a zP5|#{0gQiuL`?uvny$2&%cBMb zIvGQ@QoF%G=c5dN`+Sp_pNc37z!j@B<*k8X0OCo3WtP$5Q4W$OnF7z{3qC~vA|$@B z)vtsX%t%0-qFDj~5aDU2SP-9t|1#K{o_HtEIC`uzH`ddRi1E*c7Mv9U-j8sZO#GLB zhFjf1yx974VS78SJ6?Bv8cBdVCC_?x0X*9ZX}ys4f=ee(iLLT-00J8>L>vTMzL}u( z0u}BrX@JH@$H9C+mj+5N9t|J+AkkF>&?xEzvNxdR3RmQpgpfRlqHp9=FI zK~<}e0tmPE1=fk7N~x3Iy2^L2g!jxzxTkYtHkvQzKahVG=EOjM^I!h?*1KCih3#Md z0g@ug2>tW(M6QZVQ~2i-<^3QFkiPH$n%u@Syp5L-0P~;QzTH9~25*}~Dsb;mZpClO zBgY7BJejHH%uONz`iSGzO&NuAYfm*)MS`n1k|R3@$|1kF(6RQ8^i=_olzU|eT@x;@ zF#j=5w3B&waPaM;3>N>4odi0c8{oBdb2>~f( zeLuK&!?#`~apq83Ur20qvNjJGAx-n|dKAfA9E^LA2=Aj=#0>`3to#S#ACei|uXd>- z*s8|Le}479a49h;Z2GdC|NP-3K>ktRb~orNL#ZW4*OB(M9e`#25e6r@L4uIdcCQvO z_z_9nP6RGaU_kqqe}0U@+P60N&pVkN^SSrnJyd=3Rl=wZSdf`ntaGy7gpx z65aN?^q{Lqf{ zKjpz=i%;hXDnU1aCzk&JLo}v?z2G!A;1pAA_$e!@!R+d8Pg6b(!xaF^ruk)!HFU@= zCEE`4R7KAZIX(g-_QWg=_lmGtA?3|{)|7xU*0;&xAHDX14b&vDR{YrR*U{7*gTr$o zfi5@f5SROrqWfHLc|1BYId{LBS4#|Jt~rY3Kl^;T8UQ%(@geDbrW``4St%i+st6q9 z*#X8s22oi4ldO`$jLCq}5|;nuc|Nz4GSmQJQyrfrUs-y$Ui+@c@4*!P-5 zjW@M=97YmEBjy&ft_UUC6F)EU&$@%^#@6xOtYhuCvF*X}^)z^_55wEwu|^!DHe|1j zCLW8N9E<@WOd^*5e0}#ITGj*=w^9Y;`~oPJ|45SEXyy`%K+-5o9H|$R0;(dP0Z9SK zSc8av^B;G!`eb<@2N1!fr_3n?0^hzs<#Fgma`?-6f4{dTuDcz?q%DxWib=GX)#VKKJZvu;d@TJrC`Ol(DfM>`THgxL82XoMfV|}d+pPN-`Z1N!g zJc;HvS)Av#eg@ANb|3wjpXbQ2>Z8Q6t+?%-McYbDSRu0^Ufj|9xhCYF7deh!EYHZS zKThBRRylmYHqQd%pDe+id#@16P)jcVNs`0_$t;-@ME#Th5M|C)Tb|Qb1mxCI?~QB# z^3Sf?$Tj)TT*epSDJ)2k0)DNyoB~2}3MW8IWBtc^O{ki82dd96@ef1k?v)lP8DP=X znBn%zyY94%NrJR{s4vLU+P%y_>!aCA6yOPsWq5cffB$ih# zRIxD~*hpW3&#LCLt9I956ri8cI%@FaeT8078sfP&xQFzf z{tEGAMEId+G!B$@yoTHXUU2j20-+l#?H$CTCh~qYmeah<6Zz#V!#GjzjWB)k?R=QV^s!>)KN3Qe z|K>l)cTOLKvIXWpL!CE-Uz|5s4G5y@`p>g<;qHIyrVn9PDcXfy0xtT0nbmyu)n$PqTGV6nHoXag<&*>7a;4fa;*!yWY0M)#I$! zN5m2a)5y6j{@EsmpnAhR?u`HN&&6|`0Omh^n7@5@6*(fO8|{H_@zEg z#ePF|UA8zVuu9GIYLSuvrT*`pVf@ouebi)iAv=KlbAGrKN12}zw6lP`(I5_B{!?Xg z35RMj0W*uI`Xx~SfXb@~on`^gd?m;+OvBTt<8I)_TJ6jN@^b#um?jU3T&QxJ^I-qg zbYT;#{|XjD=;hb#fx=^$BVqhwouR(7DNOAIAGnP15A46Bn@Do7|Jrtk)N~I)3RwMD z@y_OoAMOaA(2!LM&`*$m`juAK)p1K%4m2RnS8(X_Bw>8c+L{v^aTz1E4F`h0^r_9l+*A9JE~E^4&#t zDEZ#HIjeryo|XToi_jgf%6}j5dXMElgAjYJYdaHfQVCDFGV8x>EA^^9qonbc8^HYM za_cKt)Ik1ux!iwE{%k#uKq!b|k7J0P8|V6|z5oQgUq*!g6tIQoXuf&x5Ln*M=GLxIpM%(|2Mo|K5s@cN`t zCr20Nt+cliG5#6u`D7dpKoHCz`pnL<@}FK}kI4*^(UQ-!a>ytDH~&!#U;{Pr+=AzF zUcuWgNl58P1JWm7m-0{lC8H;TAbpA~{vpsz>v2HW|Mkti6Ur}haopnKeG|j>I5QrB zHJ&IJ%zrZY+~kJY#n(A~D%#|O2XAk>H!hTykM33mfM{EqW@#;{bD-}b^4t_YVt{8$ zhW~K-Q|b-I_>XU_{0D)AgM+`L#n55pKjiU6$HnYntp8dI^B?zeQE=Jn)LFPh1adJI zKb;uUvdg^;F&C+IU|3}F1jM>0?01%=N49gfRebv|b`F5`UsHc1!UA5Mm+%B3F#mZ|;{L!+ ztAG-NfB^e(`C*zMctV~2sa1f+AU1GW(;4Cg0xHTE5WQ+k0*tJm&##RCXd(|D;v+)5 zYY7mFJxdSvbX0v`u&1G~#{)s-mFh7B3@ef?^#K}oImug&YO5SZ+&VRO7 z8ouI2Pw&L?pS!1WR*_$X33e!0WoMBie>ZsH&=z*@mZZ)Gd=#*vw5HU@31;F7sJIZFD z&zbyXPYZU}w#{2z`&dN+k@JfS=U?QkHbYzHr|%FqM3yiNqxsf9{{6M{8vfy#ryKKF z{TJVc%9&pSjsB0|mMuw8`ThAEFK)!pBeDKRnFOi*O*3X4Jz%D3hG$5j(W*m(sg#*|yz5u6x6 zrM9iwfv{b6NOp!dmCjzE0}l09bFbZ72Y1`z=+o$_@dX|nh2h(jrDEX*e0ofxn2TRC zvaNmxyvF!vza%~Itrnv)LB?Y$=>7fuy>%C#z&lgHc3kJxiG+A*W!r%yn$L?QX|d3j z5ri@q#x17UKJw~$Jl87kmYmPJHD;;e`j+hlf(msh;)Vg=4MPbh&dO}}mi`(N-@K@^ z&_E;qg8TQb)u8c!3!>{FD_g%J4=e4Y#VISW1{95Ar8ZI?mMIpGB(bKMkUTf0c z-}jMW=ol6 zLjD3Ra%)vU0b7A~uVy618yexOc0Mv4UuZs`y)%#1e`!X9c!C7}2Mt#uR)493`Og_m zDq}!%CH($YU#?G)AglgvSRI>iLPE;2rIhAl4uuLKn4OR~_=R^yw$R#Zd4u)V1Gf=V zJiz)dkEB;#1a2*3K;eK`c}Bsv#WT8mBReObBIO0`*B}q4h;EZe8eI<0#wG2L(j6<$ zeo}nuM>Flxms8o|yFpWop1>LDxKP@?7zOjhm5G*>U9`{FZ~*JSSo~9e3ePJdt`n+= zWGM59kN^t%FZh214vw^Q0rSo-w}V)~rt(p`7zW;epE;WBxs9I_K>o=~0%+hH67zR= z`Lwod33+#Qf~fJ?pzbozUbsIkalgld;Lmkg#@PpUKGA}akk@6aY(cW(z{qPkm@fmN z@jkE1^$Dw#Bg0m|I$@PfT>o%jWVM2o|Ekp>sLvFkVENCLzSu8GXBAYz3wBR)t5{w` zc#%)k+2&|j*AV!fhqnA$ZA}CYw<|4jwUvNH_68Z0Z{52=x+!DuRJQi+6f~d%?(QJ} zg!y1`ZeK0zjWdx}p!T&YbxSpIx3GU1n8M z*(#|NkR;;nwehsxLB3mnepjg^`X=CPS~=#FhxtDr8_S37VCsYdP_Qv?iGP;zp9*+i zn-RFA;9b*RV2Vz^7>4)DYv$ zp1eXpLfHbWM%bhcl>n!#-Hrqy9Zx&p#`1sKub9h*pa6yW&%WN{GMvIVK9I!Z2&rkk zWWE*@?G7HG9o^6DN0L|avje~vk2)S2p-G{eTwQ=eDJedZ=?QWsQ+E=R1eIX_^$H?1 z2#bJQrj0}_|504_e=0`k_`m&^uyPzx27Djr?!fp5acGZu9>6QHzv;Vmfe#NKaJ;r> zMz=0d;-=e&puzX-FCqVYR!vI-boYCE5sGU6AmK0n9QcGKc70g?MUUdpkpuMc&%%kF zA3=N9YQWAw z)Q$jXQ~k&P$iLLdeDKYysdfjHb$(xLzJQ&!&- zx;kv!5{76WxJ;mkDB2aMKE%~>OK|&jk&r&D{}Kj80l!%Mb3hs`fVG1>nm@X z`i|i(uSe`6ZozsuKvuYo8-{p*DU|1GTZ~E;eE^f*PjQ-8KMY){3A|@li2}p5tjc0 z9PX*%`t(NHFI%zlpV3Zc@fKl9-`xFu%8(gVDSZYp$m7HKM)@K>91bK1ufz=zP{=(iY08vp**a`TrxCZW_ijDKMMGZF()>^Zz}v>5Gof?@s>J;$tW{v!LI z{wr1s=!ea)`Y&0Vk?Jo9fc_6oRZy87q$zAVt4|d|&*-cM_hA0B=db^x(uxow#K!l+ zy&l0lS!`nn%9WwQ&PyUecylYZDC#`;;PxMxfG19kfy zSpRkZ=(|;LgX7zP-JwJ*|5?TRb$5UCAVZ%C`>#;U|Ji&10ZaZ54s-1B69Z3l{th%y1_hc8M;{-!cxTTXG{DVYC*SDhXc0_t=9 zrcH;i{O4Qu1OI$BfXZ8A=MeT^3lI(FIL7nop;T^37XO^!6@^R2Qvb#Be=JXlRm4{# zq$U4{{I~z&eMG_W7#wAX2?eYF`mum_;?Lt``9JEo-5Uf!(}G5}N^$26my+#IBiFzt z(z+4?zEUd8f11x;%(CyTiY{k7Jhq(we0#YcA;JC&%YQUtaA>uNJTP3&fAr3xzVKen z)L71cApb1#8r=(MpBqQYw*ADTZ*F=#c;@g^mMpIMd+syVdv-+=HwdX|lC1`DQ06@z1zy8nuPq*0^Z9SUD2EoVLQ`6PST;PvT z<2L&qlbRW)LDLIM`HwL>4$#H50l3$Y!Ro(Y{v)&f_lOv<#quBM{}ibOHHvCI9Eg2Rz)2 zoq`9yeV{u3Klx8}EIYrveisg4{?Fh1M=5qj2!`wb=0DK?v4#CtlP0rk=N$aLThFag z;bh7QA}GhvSx-nO-(;VV|M7pQ0EovC04hfPumAG_2fFX>X}i3ogw=n&iRFy5U`PM$ zzcBwtGeW0tGjq@2Y&|!C{DWL@M@Y>k@GR4lvQxL_z5LsMkpZ~+3kN7?$(aAMl>Y>O zIRNvY|M)-Erctvnv7Q)unQxxD>X^|a?7w{4ZZFIgM`yb&G!wA=2e<~H6uaGGaKZ07 zpRLZ%>c86VL`TEx9*n}(9?-~G|MhS={|Q21{__w2SfxsCO5yUs|4;toRhPL=c<5%d zd&5FG9)5Je{?Az! z|J-FOW8>lm|M7n~#K<`zpXstq@F$Pui-J@+s{~uc-RoR`t!kWKt6{(7|G@r>NCFPm z52Pg?#{8f9v+K_v+5bTc3@qh8$^mYDUNH&DLiz^nNdj>=ZXQ(wS{Fbg4}W`C=Ks!4 zh9IQ98pN+O$y@5bmi(WwcN~q=sf#8WV0`#pe)QX_txVtyDji#nSn_`=0Amrk&w%l+3#n~+ak0igfe zf3f`M!26N}*$qdTi*Tz^ant=S@%vyLLb7-iJN* zaV-DmAOHE#AZ4`r{Nx>J0`#Bv)#nY$Rw_wuf5B{q)ji99#{8dIcnyiWv+!Wae}?|g zKmIcZqUqb!{@EPkAA+ZsRf+pQ&+pU5X|crryzPq`&W2VQz14(xEd zxbE=JW&SzA_esqQ=0BUVR{5QStXbq((g_@(|4f?fx;3|!hYtCt%8Yb+T5|)gv^zpm_qHgtd44tWaUg*N*+~ z<7e;sS>*}=Uw>6+9DV=CM~BCTJ}vcM--w`H%lI zB!~N-{6`!%8t&;Z!Th)Xg85H(D2gbv9bx&;4tP#>pc?pF)#(c5J3LXTqG*xRb<1mv zJ3X5FUym-F&l=X~D)p{U7xim!%!B-McMA^ze0)e9TIQd~X~qxZwasi`$^XIp=YRYk zDIQe^PZVY4KOb{JECR6p>zuy8QRd7jmWS2gR+t?CMf*SSJ&R^CTIUl}9rIZI*RDUQ zMdp_nI#~a8hy?Q=j&$fhL;vT{*v|#ynQ`jf9l#UgpQZk5{U6N#xf5*i48=izaAZuD z1j|t9|5$@+0C*6%M+V*&$pSH5B`RaRn)(Ut`g&LQAdr6$H!lwDUXzG$#xnn0;*;T? zRI$hW9~?3+9B_&Z`Daa}$X=&wF#l=I7{o34&;RycqCswDJK023Nq_?TukMQdo3X$2 zki>;e2&qPZ7tBlz^1Rr!&XwuJG&(QxFY|dSa@elNWVE;wjNrAg& z5H`Ii6E!nVfPZnwanp1GV0K|00**(`P{y--Cbi*>ZoUEQzvw4I=k_3v0G(R~R@5X4 z=>#--SpM^?{`?KcjfQ49b$uJefXz8Vy2_IOIu*?-}*o|K)Ut*9_uj1m_q2Iuwh*8^qB0^Q1_d*rXkl} zUQprWv%Zv}!s@^DraHbmTqi^Rx&09bH4Lf|i+@JiCl_-QvQ;qu+2hIaPewfGvj6;- ze=z^~8S89d{a2PS1z`CPyvQH&d0KM<=syqVvB?^TGIbHd)Wyn+(c=xRu*@gMsz77+ z(Zm425u8RoBFp_3_wPg2Ue`^i(i`G)uEPYy0N!^rtEK+ychXp|^}qb1fmFu->pw&P z2jibkHCBq1LfjabNkO3hV{p+MyE>R#5f{@qmj8qF&99t--5>0~#vR6F)1;1$&Yq8h zuEt|%GUwviOfyv77=PHL-NyT>j-Qh;5>ao1{nx}jm@+nULCt>I|2h9-p&2&iL{|QD zaXU1~Ztbl4PyXY9`OgR84GBL1i+>J8aY^Ff_k!|yo{-bul-??r-rtL`adQF^tN-$|e^{-|hCtB}yc6h8psfky zz4!?6oDJnlOzzwOQrm58|^D2pa)$Pk==L;v~uCaAz;{xilufgXUSk!_9n zKmYj8U)XeVLjLPN%UPWzcbCESlHK=h<+?*>Nq9=!P=}fTP17ZoSDb=frFVATTsMJG z_J4Q~_ve)VMu^D)g>3njKl8~)X1YF~Qeo-FKW(XItgiz)qUB5dSK@RJjxx)TXIx$8 zAI$%WUJw5I&&&DGf9JoT-ohbn=!yByqL;+G8U#;T;q7_FQE%8xkbslAb=nI&nhlZ~ z2$YT{lPI|H#E*Mf3mJ3|d@UD>wXTdF6tI0|uI6%4#TnDVIWVjDZs3ytvjfg#8M|8K z!TB$&|GM?zL14^4E{lHvWAtDDd1?MjSeWRrJpXkhh{S_7=jiQjNWXq)*qu30J8#8@ z?(H!BULBYAqRHf_-G$CZy~ge=eAwbZV;oyG9N}2G}=;VD12??r7$%7Zu)k=#^%3vSAagy!Ug0RfAgOce3P&{Bx3xt zlly<>zufO17<~T*_Fp?U4#;^J@IQ$j=v&xai=%s}Gsm@Z3s>x3uVz0=8@=j(GQ;R4 zVKc4WJ|HBg>g(=Cb`LCgm-4p(;EAI}-!TGPR9l+(U|Gb?4VEogwzS@ct zN}l{G8$L2mUL-eW4aZCeyH-XIFx<+7E`KzM`hKBoo};Z%Wc?Y*H&pq-m$Ek5m*E{= z5Az?4e@v>_z@M4qfzQkHUo8KpCZ0{(xA6<+KMNubH<$CD|IB}NzcNjd5?2Nf>Gb+@ z0Rf=hw*aBD$w~tFf8_bw3n<&(%yXpnRJl{7H!0|!y7)ADZgDc>HFS!}k+(?&8N=VC zv0F@yHUAa(u6RGv_rpW*Km7Av6e00r{xd(jCu{!eAO2y@f8G5eyZNnv&E2a_=j8Ua zTX^Up02N^dxD+H$LGl)(?&-*DRqd(z_{e>5);#cikz8TZ;5-L^br!j4(l+ffHq2oU zl|!E;|M}2x4-Q0(yE14?`48kD6YW@Qmj8U6zYm-Lnw}W@hkus*X9)vV{u8sQ*g|;n z3;Ve>`|cjzp@&!#c>}KcmEaJRlSFl^Xu)&!R7FlLE;NU$*(=z*zPr(5B1yVZ&HkrD z(;=5O%;etzF9G|%{_{Q_;PK#O!vw}ZK8u)^f6ZzCT1KQbEfHwWPS*U#ce=I`2$ZNZ{SdkZbMq`THy<<)M( zW|)TCt~z#HJ?s=V%muyJW&U|}6+Zb<$u((YjDJ?J=D&7B^-ew&%YP==0c-y2l$6OC zGAsXq{`2~|37)H$e>BEn^IzS1t@CU7p`|v!(AV{AN|VvhVm!PfT)XKa`S;gwdC}7% zVgx6~%wPY9XpvOP1`>Fa2V%q>`z=n2YVCSqD2QFQ(TYW3tBk^Nk#fe>ZZYVod7Im+N%=M6Qu{K6QGmc#Z2K z`JeeOKM#QNz^B*a!#{dzS^iHrEJ4$(#yx9JaN*(n*FXLb)_-CC&um$OlC1I$H$4I_ zFR-ga1jSG^(pnL9b1e;Pl&p8JOv>V!z%2bBX`@KSuZ1 z(84XZ29nhetG_9Bz2n_#BnmH9xTtbz9MN;D`CV3lv90+#=DAf9cvj~P$@?5T?5*It_cx?Jj4uZ+x;6S@U0YnYj?N&l}ttc(v?5vx_zfFMSJ+(EsVU#`2%D0=Jex|L33l zr*_W#J<+&Ob|JnA;BL)ZB%xPv5 z3iho0r~c2=L5Vex0n!))mCq|g3QwQm;a%`%pOa8ND*6Sj5jDOB$y%bU5%xgh5a+pFmd0pIE&fQz};Mc$T&pklT zzLQ19{AcN$kH$v^-AtRF!~6%%zgw~63UpD{{QCt7bhxd+{1kg>96SnWuDi7^;0x#9g+aCg2*Y{O;p0F?2Jk9v@5KCPnE$Z+ zAIFTsA{-sgzq{W#F0T86BaKJfVNH|s+aRJ<(S|`UIREak`AzY<+B?cd(}qw0~R&MD&Gpk^p@U=07a|XFhx!=07TT z7M}A1*8Eq`Mp3ZU#=Y^fDxCkCkzM%fKMx)BEtWKJIVrY5a%+>znxHP*W4eB$&R59L z|LHWhYRV$ZTj{EYtMS`d-e8n2`_Jys$QGub(4=DdPsXsZN5%X5n_Zg4oQx~Z>h9cL zB9KpZ5`@!nqk*?>QY%7RGuVI|jPz;P{JT2{e24QCKw$oJ&@mHuu7r~{+lhTT_`m(v z()>HdKZa%ec1IYj{%cQjrtQx1BgI0+`wH#cOZMD4r{>HoQqL_;xFvawjLpBVKl%nz z;Lz9S)Al{r$e90p;Ey4LtOa!Z4=C|t^Y2!{{>@_Rcigv7y6dNrrsa`rqh#aT?0VVs zIHR=6xkM~~16ANnan|sLMfj)2hlAOc`mYu|@G=AI9tJ`G$2(qTM*t}0q~Nk;pf!bz zv*y28{g1@4QJPQh&78CvG_;q01r4cyuI-d#y=6l-FNRdoz=2~uWeu* z^Y#kEZYF$Vf&D;3c;K7#=PLFk+wU{NrY^vh@73j716ckuN{0ClyN-D?J1F0Kg;cJ9 z1D_tNXaW>gearsyzxmJaFDu<#IMO*-^Y8vvhgW3XKTt>b- zGF~=dX32kUKDVAV{}uG%@!=2h%v3o4{_XwKeP6dfI8Y>?<&&vW+E*RVBc)ua4!f6; zdyVrP5_jPo8uXucJmSk#wik%l0N+y}Y7~jhzrPRTf&Oz+X@&~sKl{DAdjihC!$X{% zT6!DD$(8V;I4iv_YCF6rR386^7lm6@o(eYqJ^^OCQX5ppz_ojG# zp5Cg+0m!o3w0p=A8=VJD&$+0*HrjIa(Ae_pB{1dq!q1<%SptivI z_o*N)kNux2wq!QE-SPO4@L=_+m5%V6HacwPQsPRiIWggj(A*9ZG5`4`o}vqg>n(v! zwGRh)c@g$sP^e4R+DHQwR{w?NKMQmC9DWc4{U28T1LxoKJmN26^IzJ#u&=&s!_N6k zZ2s%@@WS&AP>Nid|Ju-|6fpx`Dh~eMs)!PjpLeDe#y`W@%)iM+{I(HLDa3|g$p^_c zr<4s%SOKvG^PkDNeO~bo)CH~19Qm$j<5F_=Ryw?1HsuPzPZ1aR20pg4^5Oh@1rrn; z^3TKkpH{r5Dj+HYi#|9==?ugSK$805z)m46T?g8DZOU0LI7qev^3PKLwb|7)i5v9% z%Rjd?aMqarGq*L2TeR4Qs&lCc`aeFk-xp06*J>>L&(BAkid{FSu87CtVZv*iD<{O9i@m+Er$B*E3c^Y3+;U!4dvGUh+$cD%YyHi=i*(&Um~aQ4j~o}cImw=^gM^=ewX-yKlnNR=^;7~eCPW*mN-sKQA} zYs#>)$uOn@6H^Bjdzk-t0Ee4hHjsbhtX@9n?loz?$$M) z;;kC?MAL)plVu7xnEz-oF#cH!daDkN!#X4`M_TIP()$-M9#)19>}f2wvb!)r#dddzsUNSAyAXj!IGa)#%2>=|Hk&_-nuQbS zDNPx0AEHU^_kmmMRBZnJo?$iYp;0&|A_w39Qw^`c!H(+?RI&c+y8@>JF(oPR%_v+B zu%|}KALk;Pj398i9h6p&Iew!TxLF zUQPjYR-ymT`}h4nX$r+#d7N%-zZLkV$$5QhfgyD}^nZ53^$GHiEI<9v?1an_2hiSg zY<`7U5RFE;9CKRS%eR3PhX)jZ<5&qL?2UnXywRD-XW zz~0aT-*5pl#kY6&pNF)r=Pv@sCx=({RAT*CGhC$UvG`%N3s&s~&^XjNpjl&=2jBl= z&+du6|0nm+cSHFvrd(1`c4!Zche9*6cw6~_rT72%@c&95;5`RjuyJ;sffNCKSs=jT zAIyKYBKE^R3n*8H9E7nEo4A=R=#!zdi7op-k&N=i!BeokCR&$3|HtH;EJW#n+PGm^ z(i3p!D2E3;EKdBsHHVu|Vyz;)N;$(X|)P-Z|jU7}WxE+H0Gqe|qL6R%9 z7@f=S{|T2bo)jLj$L7BzeG}Qhwasw!ZbBrmvj{-`iF~8Zrf00EPcj`!%2fc?zFo2S zrT**iyx4&q4|nm^I$lxc6K4lAwgj`p*{0Kz%!`m|qA=3W5kidx1rYj&XlF z^HTl;{htexozlAZEb#EfH%Nd^{16T$@V1va#?|4M^PjEeC;7qSNSOaqEu9eP*zL*~YZUgZ^$abuZj{j`h6vvwX5{2*fk-$FTKx_GQ>dw5@ z3v2C7brL0Sn7F~nAa0I3R)e}7_Fs=Ta+|lO-)o37Uhcnsc)-@|#RR7=j`WBzKs{$1 zy(o?T{;KdIK<-dH4*dZ%L&_ZG>&_?`S<~bo{|HK`s)7i9$UoCcVdVeeA9{>UL2WLe zd^Q6bW+2Dmi*1tds{bSF&BLL3|NrqbmTZYaQkg7;K^sw+lq74mv1W-95h`1zBq>B? z9h7AxG4@E=l1ReX%3f(vhG^4BiJ0H>oYCw3`Ch;Oy1K4cFUL9eeLwHV>T81Pf588x zbvEBpF`9n^+y9koD3n_psPw??b){Xd)o~=g7w*5i&yN{y`kn>TG4Nk7|5zTFS={|V zzRMDi`rno{!fRDF&^e0|7F5wo0jvfyH5c<;NB=+#Bsxmd$1#{ z+Gu{`C2lX4|MPv|;$;pt{{{LV<~pxPK-!(=E~|QXAq`ZgdGNoNn+0cQ^Jk0hNkR5v z`9Ggh?Uby&43kd`S34Emd)5SZr@!F;GM6ek%lZOJK}8`<{}Zsw_}$}}@1Hv7ekur| z{;#0>N4b%}Z>-`o_vUSiz=hQ# zv2`zTE|~u-={YV;ha2_3gZ>%#FGL3TR~OYVA%4Ws0Jpa>T2u^K%KypN{c<|t$U%Ny z5#&9Z|8r7I*RgJmWua)A<+Cbl!$uGnjCD<7n&JjI zL>Tlxu1$#4Upq^Fvnv~Nk2EVrxHiUZ0DSF5okn-A-`rHA$nS43@D?!d*gQ%L{M^rQ zVI?iZIpW#*Kiqr4|217U-|~%A14rFP+fonyaIL<4MuK)`j~HM(ZiDqsTEd8k%^f5$ z>Yx56^ikHXh`G@b(ijf-ukTf@72tm-g8mr{Ot4dTFErvzfOI8cFgK~mQyfUlt6Dn^M5uY zym;kZKmmPGPM&Ys68Ev?%pIbko?tqV-zwuZvbg{d1OBsG@Yas7fBrAX|CydjAQ?P| zWeE9)Z)D=WziR*$;NB75RLSM$>&uZ_DE@;Bd%lk&4CTL0ysJMSA&4yH9}@rY0SSDn z>Q)C7(6PlX=PKg;NFyeIU5{>QRpZD1o8 z0{K60YWH2?6!hvlj)24Lx)1>=^C`;aKm-XphVyZI4a9_yU(BzctG-8x@3E?$(mCEr z-6V|apJ%s+iAuv~E&M^B;5%HI9MfDvkFzYlGD`5#gtf+E{cJS01~eiVbXoD-BccpIVezl+)<_)CZ zj^u7kw&*!vL1*V5V*D2y|54tRFNj14h^<{o;X(C3IOGa~u=5YO%DB1z>7N;wWaJgs z;8byF{^3zMiyYTxX_!oQnR6cO6a?d<=t7x0FFOD1pS^|U|KysQuRPMqwaogLm5xY> zXr&>q zM|6|nGFR;=EHidfT6YIh9W6qOkv?It@=(Q?F7F28NaX^#5>bM8-$1p?Hl60H!opS| zLlxuUF_yU@lzWv(L!4!9bz5TdbqB)9*VW1QlH|evP6QvOQk54_jumi8K-TEsGy~Pk zKSlvL6;#076}nu*Z=*4psc>lA_l@S6&%-rJ$@1OsEtxVj-f)DW`k&D@PV~w(^|PFQ zY29xKktsV*=dA0=KR`86^916Q=K1n~8l4mKe_{IPb2Bo7bSxl{O?}`v zuC9UH!tZa1@k0w0w%@brtgk`*R*x4D{=8I!g5Pm(ltWmsYC!)y*7>NuO|!o>Neg~c zncA81-SWP}M+o#Jc>(0rgA*cci9D8dA;nV7pu6mz8UZr1(Ot3IZ;bK@LZRflvn*cT zyG~ES<{vEor|t~=rZV&|EP@C1>zgX^jS6OXka#{tp1W|0iz0tHxuncpIFV&I2-i6^ z@`MXf+z)2U5BpcB$RMq}4Uyuj4x1xa04qRNI1k@K%w3z;?DT7Jk-{CgvJ&Q(H7pzn zrNN!O@+DTmAB0?dm)kROoxdcc^b&8gG;LUE! zvstUQ??k?9QKaCyY_FkW>xX&24^`EN9(DRWP-0c!JG??x`^)w8o$#5Fn{+`(t&HX$ z`V+vtKc&KG)1^g=+Mc-AK@NB60zbc9Nesj_)EJW?|L6R={;Ick@g@H9A+xnaGQRnh zoUsLU49!2JdYq*i|Cs7Lq$UKGmbZ^!66djgK%y{j>0jW+r4YYO$J5nINErSD`e!Wv z2hBe;TelL~d(D%C$+@KtT-&7peiMLC%A_u`T2^dcw>rQ83ELR9;!wa)YniS<)GBp6 z(N36CFLJYb{g-%G*k-c~+)l+?EZfh$LiE}vq&%#N133UB_pPSu8EcVi!3D0I*H&)x z9OnITdw=WojRz8~?2r!tR2U}SL2kWoijg+#+iToo(z!~v`}vw9B*x1fP2G719gZIF zTq|y!C@dqk+2L6GP?<$jvHeNqN%bLhtXp&NZ4}7ALNP;RE=uY2uRrBb%+pxx*2Uf5*b%HNL_A zEG6nZ9`b)4cdPC(%OlTD_P0j?yASh^nRTrQccTcwcFavBf=(eAlhU5I>24R=O4}j>aCmq2)IfE$vk;A~0Q z<`8=WM7ReNR8GFS?AfGdlCGk#7YXJZ7fjrKiqb&fpmmB%`t&ILNOb2wR`~RUNx2@q zwob!u1)uhvXPwziQGL~?lg5^%>v})WcTIBROV^EBuA_Q+*{d`)??kKZZdW=hJbS6- z+wZ#{yzdz~;FctJkaFMq<^GA-10_4vz3=3^Dp~V85WEJA2;=wjtOzFtjgnuH3j5N; zb&XEHt}n47*ykFxu@tTOVzao|G^xhXo@iNH%*!zhsZd9wEUz(FryU}@}(CMN7 zu^5W{L1C#=D!HbGn&!9q+DP}0)4X5a)}Z&Y^AEB7XOfN|bF|Gbdp|@MJauma{_EwQ z(;ot!t8nm$^CJhoJYN+F36114rwrp=^rK}Q;zj?2-`>Awqq;feMfs)u%Ows6AjhW< zv_!8mhzTiwaUd#I-T8fkM9xNzuw^cFE(RZ)uOw{urG#0Cj9*^4&Evzq{;)T>G z8OX(FtmmuX_bRckNiI5mBOx*adH?=EqVyJDW&0$Vh$!JL&X)~Ne{8-ky2ZJU_lNZ5 z81XHRb-@Xc`X@|yJ{$Z)hO0q9Ty8J#$Vju07+xy)2J${TYD4bStig{FJ$|9BmX~=O z!F+>q=LApYoi+{LJ9UTL_*RP&W*0}!?L21s_yQOzuL5ERC}ZTR&0c-I%I8@ri%mO) z@LGO1ndY6acMKHUFi&+>6goe$E}HB&hSP_3A|EuOB>DdJ1l}?Ai++3QnZ&cdW9cbA zR@9~ZLsb9FBaoOhemaF`lM0fb(QHQTNK>7UuDBS*h1@2r*Ek{@nX-a~*i^W0*5L>n zDwlwhR>p)2@_#sk<3ag{@U21=R=hXnlT=sPkGyw0aGmFXHk>y5cwzMQ1-wKjdM!*= z`|Wf%kh6%C&Bo}QyzpfRs(%(jIKuva`kzLj;I)j?;QzvFq0wbY@~b4d{Ck>#w|XlY zFt@mnV|xwNJh?MYga50;n7P>h7d?YBSznrETfF)UrS{%)a|Nw?4NS_A$XG#CF)ltWfD{$40R(_;W*^W0RiRbi@ zrx%vtSf~D0bKP9I>i#Jmw*O0Q&wKDS!u+Fe3!`QO|5o6?g5K29YXfr!`d2@JgGAc< zGBWk`;S&ipB}5dolk*Q%rZ${UPfP$BoAC>UWFE2R^%=ZvFl6qqD5hiFuFj zZyv%62OJ6YzcUtRSUlVCngQQGft1_~AhRFzPI!jfPpxM^JUA4Aa**o@_7%Sr;tUdE z6s*IoqD(W1$`1GL>{m>`Z}^}7nRsrlc!Cx$r4zzs4@z$%Dludfp>wK%p>_2Ea2C5X zC9K*01h#7|{|ERlfw0Rf5a7Rj9(J)Zhij0@Jo6;3FvvgD-VehjoBu-n??;|)txrBX z!%w2NQRF!#!d%e2<;ndCAH3G*X^~)G&^LRdb(VGL;UW)sVL|`g>n?io?c+DBsTWD` zRv!j4zbt5lu75`i9z1#Lux|LK2jlz-4XBdJx&iM-cctF{5j zK=Xf$QWSOnnj?G1mhzuTw1dQv5b9Zqe*SZ1W#(u?IAV;?3s~XUmi*a5Ql4c=hs+D1 z%w1$wU1&edB#{5y?rhW?vz~a4HN4nl3$)5P_yTjHzQ23>X!gs<36J&xtzh!)e+C%THExLTI)rbtCNfh)1F$K7 zIWxkM7uB{v991WJSl3}{xci@DYTiMidOw(MlE&5g>hfq2ibcY zL5+0c66AtG{^64TXRxL{{R}(?sN8wM{{{JnJhmE5?s_rY_)uk?kDLgqefNEt_Pc_eR`xV5(O)ONQ{>IOdN zzYU~9dZHQud0Jx0Xc0n?!r}z$LaSm$X`|#q=4Qu{Q}3Q^Dg{t5ffy&fr(PDNC{oL% zZXNBi#iMzdD>tV(!tHwtqbT3)gD}GbcxSf5!zsJVcI{ zw=Gpjd0<4orJt&uSu5+=lb7H}iNf-K;4+N*zsR$#BS}%a&mrI6k))(AcC0{t<0w)U zxV`_=KkFu68WJRKa1?LzV6Xg{G{&%$h!l8B- zXo?YDj@(Nrd;&s#nR`ao z5+^q46(|x4-#3zuU5TsO4|r}mtrQ~qsQW?Hh%y{5H%YmGXK9dEDLIR6wWb39HEzG@ zt|Jxn&nMp2-Gq$pBKPd1sBUX2@LyS_0h?{u{8wg_<$cdy0}Xl;+JEuS|D}ED!)#j< z(JWdLxy{LY>B(|Pd&N&RW!lv1S3~hbd$rlC> zX~@wa6~-fGnQ_JcneaOGotC#WNSqCN*U#Ibi;;`Ck^wzU86}z<*92wi?uCu=_8_YxUCAQ9bJa%|HAmB5#^S%+`92 zn6E|n`1D%?kms+K-Q1=QXEs1XnEz|pUK!Xj8w}SpA`Prr9=IH!;ZzON{7S$)A<`Vk z7ZI42{Kq=7H*QGT*jNT}ubBP^Ho!Sy-2s;zbzB4Je?%@%O&t+zP#YM4!GT*a4E$e5 z@-4jt5oGHWC2?l+`ez$Z{>xya5cFSUqXOp9^;`j6j6?frXhym_Jtuz_^coEu*-bx+c@pCyIB~##^_L|MH)EJvcTQt_~C4$FUdle+AC< z5hB%w%Cd2%#7+zpuc)=gAyN@M2Hk$1UYQFXbOjNL+S7 zkoqXm%Vn;*oCwT6YYLVB;L2his4Yhz|7Ys`Dxx$I3Fh7YfqOM1?$tp5#WNx>p9M+s zZg|7mo(Vb_8z?aU)Vh(Q9pQwViaudwy5W(_{UIPf2yv+YYlZdClEEC%|EOPY7EHcP z1pj;P+{D_#J!Fjk@*XtnG0;mI0A1h%n1BBH-^W*f0In|VQONIGeb_AV$bL zGN&U9frk#j@_)?af@gm=CjExP)P%X%Jlzbkhv0Z7n*Y4;$9cq=KGN784IN$2Oun6%|J^kRAkW(uy)>bB#TftPJ=<4xGL1=$TrMHD)BZZ@|N6s(!A_Kc z1ONN+o@^~cSv{jefZIF%&;M25${R(t00{DksQ=6J-ixAB@76M+395Ow?S{=6thNGwnyJ;G>C*h8 zi~i7FhiJtA%|CaYCAnObZ29y%;&pH?OZp$S|NUK>)6@|T@V|d{u5LI2(ideWf=lEv zK>c5`2Zni*icWoe!Z^E$fa!lo)JuDfs0XS73XH2h+sGPP2zRvQoDIYuDEa`r$lWz6 zWY$!pK;9O~@4>9mTz|3e8;AzvSja#8JDYP7{W@gN*9I8BuC%A;oYy+@HKo`(hvEVL z_p$C&)(#)hg5#P0`MO6HLNoQC<|A44>v9@wDaf^uqv#roHgj5mlr4u?lK`4 z!wPTif=@`9ansO0@3!;sHpWXd|3`12(3-A`@#Q)3VhBZ@Z3VUea7;3> zb$V|&M6bdHbg{8r_mSqzfsfA^aIa{H5RkD7VF?T>SyHDzbzk@}@Y#Y2{;$V-0yhuj z5HbJz;m5LJFK$Qp9iUr)|4VGklK)-eR|03l?PlKX9mfq_E?tY|0sLnv|L0(gOCyH? z{{QxWf&X1K4Rw z)9n7QuP@`)BLx=gU#@(1Ern}kYM70y-1JM5kD3@WcL15ZyMIqq7mhulXR2N!t z@gukxS=amVe7d;3T$1kQ0Jp&Wvq~K?FTwgRthSbYg8mE4|IY6Jg84^Ji~Ax)vnu>W z$^6wp5jbL|zNUKs83{ODds0g*^oEJ6E|Us>`#883(-HaMt!p+iCjKf6R@*8f@yn#1 z(5p_d?gFv*wKh=Mm&SZv>htgyKue3G6IlLpgVw>%`{@$7#HIXaRR65{TejuN%JqnO ze0F1m3CO5aK0)v-0{K5jLtK&fnE$=I>c}Qz>Jt9*fAW9O{;z;w)Q4b}*U>0uVz!e@;5?@vDaTT?C1M1hpaL z)S8ABo7K$`6#v?XvFzGN#W>9W9`vT|W`${2h6VjU{f{6q z_}dC3X6LRWkpiF;Nzv;MRF4 zm#zQVb(-dxt)L;xQ0QLrf1&tKQXGOjlD?KAtFjjK&ubB$&FV;9Xr}Ac7083Hf9Gi+ zyo9|SV$T6n51Ct({MY~Wxk+3yYxlXC;*Y%^C*KBrn;vaTw>{}hWKMNtL9(ypL%**h zjYq{F4~NWsd0(EccD29IDs>GG&40%HU+aTuXmUXwO( zFJx%In)-3**!OpZ4wF3v^qkiI8vpxiVg5N9MXx>$f<^AM57xU5l4E6d&*h6(8KM0b z{9^rTq<{H8=DoWD&%)OAfAW9aARspWqVbw75PjO2zEkJMHe`{*<+OC0@ao9vzp$RO zh~^=4pXO~(0&Y0H_%ca;E4A*z{7|y2Z7g+n(5Pwqe3cOX1Q`&~qqe3Xw`UBEJcGaa z4iW49R{`~bDI<0e;%TxTjlnhXYV7xSdqV&GoPhX}torlwix=7cFUAavr*YJ};HmfJ zO7dCj>->uCO6X4&{=~f`80D|dqt|5vj_Rt;@6`sE1c^t`~ojG(5>3RKSmpys)U?!HqOhNt=I7TG}RCP4*bUF-j*Nb z*?M?)@DwcYD6j^d{Wbfei5B;659A-R_|_Yok^;HrhejoRU$Gpbu&hzWjZFe#=l*`` z|M)vjGC>H{{}}cdQIq8D7mCglS=3)hWma3#OvQ7Ei2<3aQG_@%6ZD|oE4p{J@yL@* zdeeOZN$*j$9;SaT%qqQkmIC@8=s%4N+(?($WYML-u>WuVG2UssTIyo!`WHPHBMz?? zx4W`NIh^|{5%LcYWC#Ip{v(5re(8G_IF;b zz+$F$=#RIJSEGbBK%&DKYzJn}yJ2NcyknW+iD6&p$+G&stCehm)>!cM#J_obVeU&p z4o7r?q}F~*pZiCnc$1~1b)D1nN*y&|qgqI>irMhkDN%-DN&7Z%n^@YF***iLwcg!l z*9CA53h~6dPiA5$EfXYKR&}!_r3L(7HXE!T{vu%dpV&=WZ4w0iuKjBCUhsb{<^Pnv z);|B%t*v(_jDta3uiHpNQzK3AksBrRJ#(uY?Fgn|LOEX&fRY3M`!;`^?!W#c+Xx)bz;C$2mR07r-hH=CgQS& zkaF8gkryiQAG2ai7`P4ZA|eOI+Kx^*!2V)$&+$+GV5Jd?{}@h-RC>jr`X99aG&>`3 z@~5&D75cwad?tUQ`k$N2`1|CAu>P;FuL8}piID$;%|FEKG>ugZWvu_Jr&Bk2Ej-X~ zW+&%_Wnn|bQ+U)^$C{^8@6nA$k{HC0Ybxr11rTpl&qkFZ}&t(Sjz$eEB)KkOSMv z?oig^57?j9lsQ~g&n1m3S>jFVyZ!*}DI`q%=$u;}kNUshg+u?T^b-<{uE2lEJoOqf zUug5k@SmNEGK>z3e2+{f1@aG<{9ojIW*Iu<<5>S`S4PM(#*aT({&V!A=Rk2nh51td z1>UGH->}^u?Y|(K2Ubvc!2eFJKHcuW`s+*Ek+Zw$)9P_W0U!1{)j}`JTM+7(Guypv zYRI!c7gW86oOd_>TI^&ciEo8%cJ@PKj{f(o-DlH3qjbm)_?u73dB4BE1Lfj}7w|Vh z@Tkm3x-E)!Wrqt_zpAU zY+Xd*Vdo!yA2sRAGKa-q&dz`S1NtAV|BG)?Vuyk`HDdWX&5&8)_$;F6^ell(Ar9sr zEy#ann&l^SYce#j{6q43i*q+I|NC^tq&p!F(tBSv=C6$Oy%Izipdys-hl7fcF zj6CLlU+Vul52Li|r-M|i|7$VCJX@PwD6ZHiAII)LU3_GrU_{A?o=cL)`oG4r_iqlR zqWYiiON9=(SpUV9wsnO>Zox4Pv2W?OiGtI0Yjx*wakm9X4YAuWBvN zUUkK8H2-kCCg`9AeylTHDTV<|?qvUl8UEjj-M@&5e%ITR_3MiK$B#2C`gnVDK>zbN zYQy3Bzs4P}wl`%R-cxam{+H7vW%XWRFel$uUQKDkvV1|D$H%%2J(C5Mt*)zk(l;m`x3&=dk-v(f%*5 zTf4#$Mq97`^L0Apdgwn5@*dh3u{dhzkZ+zvWbo06|$WyjgachZ{P_-#ubJwXU@{J!{NlS?^)%S|Qe}(>D`0jdTY&9WwWnB>a z=0N@R?M=>yAT0mzs{@!?E&uC3)wx7o8|^pHmVlxX`~S=TxubBLWxhe@V#m+6q$soX z+GKOI|1{S~+i!L>;iX}07Uus#c^Sh_xzq53W+?w9x7o{R-2+21+W#eHU1dftbMKF7 zb0~)VXZT7@ZSWQD=dZUvfPKMwysL&TLddLjx*=ougc5b~@ME-uQ|6cvn*Sr4vMSf< zXv^0ddhV6i?g8u(FRA5TY`6VL-DN?MJTKdAE~U&*mZ`oZyk33RL?Y~|a);p~eNpn- zvicN_$t8}jTUq531N!Is5Di7|JSOBnXF~oV`l!EXy|7I-Eb(9CFXDtm<;^qpUSvLH zfs=&o|B7RoW$U2$&tKAUb1LS4Z;f}&@yOKNokO8v{&yRoLto{f{THHt6b67}1N}3Z z4hwSI(Xk1!Z`Raw^0w>iek;bAIgN+d({+_HYaLPpzV>V)p#Jw6upFWOFT0}3*YrT8 zio1tXHFzPxvnMaTNTD8iU^hqepV|3`CvWY#$Tic(#(y^HuHGo$ z%>dcMY+Enn|3Lp&RNl{G63ruyS&a6dekM}yd9F)tLGhn;+53`Pp#KX!DM`buU@B?k z3B#4DZ&7->hG{~BJ)@9V+Ed`4J0V`?QWmgKo80o3JdJi$Cdnr$8s3y?^s;@oxG+~u zl{!B7ZcjrH2e*n+tM^NlV{P%$><{>Hq~PmZMZ0ZIWXA&18Kv&qj^kZ=;D79?K7E&2 z=H>~5kO|B`87~b6U=RZT`^zLtHSa8@@Mg$A?72f$*e#GO-v_&Fo+TUqIaFn-CPFi# zqWOoLf$q7rk329oBHLm{h5l2>KOFp4VVVg$7rXy-`UXAax+N9OKU@^OYKi*4lqSWL zgvL6^&Jpd&Pmw!*$^VL57I z^{@X6@}HGDx#<2_|2z5DsEEEfJ{x_#{_OlimGkT`8TEgO70HlZk{JioVVq&tVunna2T=vBn>q=$nZ7DKK=`sqr!&Yu&S=8yl_Tm{tR- zsI?${r|O6P6@%+v_?LEUKVQqoVUVuk11@jzEzV`?dS@?Rm8dw@PT6D}x%?)Wd#`Y> zlQS7CacS^nT%0j}lSjM`(>}KIeSGFhpI(?RIQ34HvHil^zp}ph%=?i4Q!hgrGXFRK zOq=^HGw;hn&qWQi{{owTg15m)ztsQ!Ogn;~@#m91Fl*5N4*Ac==O@dbyTU3+d5}DQ{*B4@ac5OlLMU=?D=QKZZnvX)~kv1 zD=f`FX#O+tGzUXtssD7GV5JcES3477)Wl)__dk0$hYETt49UyU{AX91_p1X8EdNLE zO);bKhQ-l4&aagjIEhuH>QgYbbR?Z{EY%}3KWWn1EjKz%_UZL-D-V^pJc$&PwJE+) zezYzY<~k3fB+V-IAK|Y|y|f%z)~BVtB8-b{a90yYRp$y>`(1H1<>gmCh+r*i#C-_qVm|7%1!>vzM5^D6H@Kfudm}ZumJPyOX7Bhj9 zrFjd!y%qeJyGd#9z2yBiwk34D%HALSnU7;P_`j~*oS*G=I~rDx=8A~9?@lI`m0YTGF_Sr363zp)Q2z3MZ-Cd=0DH3h^l*CU^cMzKV&#E zM?cx;6PNOzLH{$&%YSDVn*Rg-ujMsn+4~^>hhPOdB6j|vCYa4)GotA^SpWMT&-MX= z5cGeYyf6s^4CFs=CnqIC{tvPANgF+fXm;oHM`s4yARTKR$>>GPO9Xz@a6S?BYj)*F zp2gHyCIeczo0I{MiuRxWblt5J?*aPfdE8sCon?~Pn%y7SVb|pPB$AH=`_#4rrJ)56 zlR^Jg>OaQ3OxfW(Ttt*NJ2dx8kc3a&>ZCpSpwkkM_J94ojG{f@e;3oF9LF1V?_+qb zRy4!8~{8!hsVn#7Bb^7>P4^z-2mv-g)C-c~72B!Jq1cH=qY;$e8BBtcN z+!p`3a>l?@g=P(K(M8Ua;ZkOh|8w=^k%3qZRrmH30_;jJ;l#ZnxK8d%N~ueOqt0Be zDIm@@U76>A|3VMAr*U@;Y+x^(8t;yaf#<^bFM~1{HE-Z;?OW0QcVX7969WcTs`_M= z|MH*Rzd&7k8QMm`<$!qM*WF!KiB=L&6Bc(uR3=A^M+9L!9EC~4~F{RKfk;C*j^!w zk`X~CVg9c@p1rU#LH{hP7!wF&1L%K3nZFdf_2bD;BF-hZxZw|d>|nUw{OnkJfZS8? zI++hH%a6nP)~TaYZ3}bxrb3XYDq;Tj06K+ek!zZKw%|>F5UfK|9y%93yC$~ICMdPg zr6E8qe8w1lL2$ioPi`rN>)8&*NV_;(&tjO%cd^0?o|4KhoVV*>V=fd)T+Vm}QYzW=Jf933nIdCq z88wt~wEseW%!0efNz1(>g@C>>^uboHQ#TpPb7>6qe09e(69nQRY+TH+=2;E>4!_f9YzF`q1myKl}&HfA&!dLSGp3FPs`eZ2$Y~-}d>2 zaGDY@{ZC%LMGj?r`)F&zX8Wsf1%&>ubL0xjxax6}ktg=_qnehbr&;%zqc7aP4VMtB zdI}x}3fPswQ~;mim#XrEKsisbW0r=5doJ?JUe zU4)*3o>_4Gfd1zY+#Gu5g~4PZa<0gg8Jt%s)MMl1rQEsQ;b#wwh6X zR;|0})Ce=66Fq~mAtx0!5y#0spB|uwOVPtm$);myiy?XTU;g2IpW*YMGkX|q6%*(^#Gn_D1~adIT74URdab&*@uMS*^7HG?d?Ut{xvD1f zK>joIpLS2vZszYnXV2+>`Ol$}kpBa>4jhFiBzxZlCWYO9I*}E48{j=O|7Wmj>-Duj zd!ha>z<-G7^IU@Y2Yntg?{@5Y>^hc)HgC;Tm?_|Sj=kbm&V{`&QcTv}+m?#?-*3_^ zzwZuqiYqJEt;W7PxL@U(I^_}5x-v=0@+C#5Kdxqw z=U20OtDNJQLyykww7IKKolqsC{)_3$XVu#*R4pzr?b!XNOZ^wOc7-x; z;9*5kvHWMj`I!n+bVafGuL94c0fji2f1)qcu2zbJ{O9RY(3i=aa`%Gy_YQu2m^1&#w1U^z%%xYgH17)@6szNRWT{oS0SS zT13&G0RAgeyR-L`1nmFWX`oxS{;&TF`Y%Gj@SS91F1?35TpzK{_M2l(w`L$ZlVJVt zxp@Z<<)HkRw&TR1VpRX!)zs;LrzcO>@xw$kModHh_wsRh{u4XYdU8!O^A0zJy{Ua5 zkh1aM^)*ZQ5Bb%CFAv|!+mQb=ZqaXAzQ@ks^~dA^I3m#2F~flg?Y|v%`otWL;ppGZo%40i+}y6QMc0=r{M~S%u|ifCqIgy)+W)1X=9355PGP`=rSSk9vmQ3+5HzpbghXL-n>oxzxjvy-_bk5H}2*iNghl6@9&hYXdamerhje> zzi{uEYH}3Xe@a02LNmRqtHsD4Q_%cF;=Lyfx~j^*Z*mg8$;5yH5^Vy$N%E;Ntp7cu z`qcxkoivpHireRPpU|6U0{GAMTJ(*zQzUhtT}=hIK%}5ZkTKvk@mt`NRx+c)wO5U< zI@ph10Y^LMC*kII0mt*Uax z9lrm+7p4sA0q8#+JiNj7VGs++?Rb$yyQOof%4OtGzG7Tcl+e!2M=QP2`9Ef*>Va!8 zOF!|Q;9vG-m2BXoAH>N3L1;)m;=4B082Z0<9zK6A8O{GGuVl|Z*%Wkl#O9wIBOk>= z`z-C-X*igFF#an>(zvqt<-H5A#F{FT*6$&I0D2nwFKW>x1}+Hb|KjAap=?ek=%M=O zx>?dr5*hk0{Yh5WDx!? zwhwJeO7GKk>vsJx`I;y{o=mIpA5Tx&?oeq0m||-zRL^;Xgr+Kho5h(zh|>(LS;HV> z7n>=y>jEBxxbyQM-D+=o4E-0hO2{Nc=O69@*PLGp===lyUpD}LG9=6G{!D54sQFfvV#`;hDvAqN4AE!C=J_Ga5_0fYi_+tC%R{lYy?!kY1 z2Vk2!4FByN3$UcD^GvR-1+N)~|Ga+S)nD45Y(o~^+LcuJ=3`4W(*!<85f~7`y@y@Q zpGLw~ijBZ33=5hmC_?Xc-6SQqROX!?*~S>@eBD!Zk_`FJiv=wB0#W{p8Upv3WWax% z-$4E$d5#DOOkBP@`Dq-x{|n7O>~;p6`^O*9(zh(0{|o)6gYT3@8_??=0quXkeBr*Q z>WsV)+J724`HLU~{im{UjTx71#g^F7e@hI*`4avuG4{4Uv<0?(HvVH%68wYSTWC$q zx16r_@<`f#QAST#EoF7N>J z+4A@73FJUAK-cW~N9L(P;g$kA@L$iarr%ilCZ8Fs7FiAFnQd*@LM=rvrZGP zL-v1R=k}RUa`xO#n`s052kXB; z^M8h`&IA6_OEbf>^PdOr0>HaZ;{*l#U(Hv=&#>qzomP10|FT`|19~6&FZ7uMO5lG7 zs7xT0ClL6&XPyAv_0|NWGlwl{UHzL~8iKe*)PZX|@+?S??*Fgm3jR&5d{W$)G*yFIEZ@j@5eV#StN1Cts&^iD4zt-FJ zt2lS00AgLdX6bpnZzJO=cCk*hQ5Q2$A_9jRwDQh9o-PZwer6E+zA zFz~Z@+2O`;!ApmWZ)|r4|JPFf^E7ffxtM*?G6o`SyYsV{m9UOJ$f1zr3(6L!QX=^{ zs?ZQJ#J)S|KP^5VpY;BLIAU~B2b0wxRye(f`%J$Zm7-p2aB`i{d7&71|CLx`RK zTo|Y{Sn6CcM*uedrFIze7TW*iP??#w`vl-NP8OYO;UHl5UxcCy3e!KM3yKQ(j{_4n zAqJi77%`vX;g5$4A7Ib=iKYJcnT|%ZZz)BG8-l*_%faG5(tJhYD#c@|{|ldC@$dsI zC?LWlfCz(u_W#lkW<9^OFB-=s7mgU-4Ro`M);kM6&hI^12v+W2_Uwe9HEJh3?a!SE#~4 zK-nKnSH=8a1dDceBMUtApYB`e`)Y-U{AVg$mO=jmYivFx${T&c8aBFK&)jB2+hTiX z=?RD631cKKB}YO2Pt;0z;Pq^@TK;=Nxcn;}_>uz0GCX0PVs}Zvgd_V2iFW}S5??_A z|NA*qP<gE#MItv{py%UMElM+MA3Kx_5b#NO`};( zs*jU0OacEHd<^+Ni!4M}ZCcjIip_uBXrZX1I7x;n9zS(SN-GT zGf6cOJ#;Gt{wvFLcU*yY-+Bh*KcD+edIS1@3A1{lA}opg!&&22)hEdx#uF$f;Eplt zpQ-g*A#O|9M4zYMjKG>=mnUq9YWvZL!8LT!iw~IptByBXTs0oUIhFt%rAlM?O4EXH z^SOk?(tvTWoZk5iv%SvJJG^7~_-g!O;$tuOZv`;2FVGYru%4e9U|Etg5 zh}?(fAAWVHg#8k>6$#+iXTCA8{!`$;x;_JBya$jE39D8!JiGs^#t+u)K8==_tVe^Uj$TBF3?c@#2!xXENqgHtL%&Jk_!sI|<|^3itq z-K_$K8=a1?t$lnRvSYdnAYanE{iuo!(BaI!npGf3mA9Mx)OK{*ovI)EPye%5Yd0Lr z=BgGL{)6d%Apa)~U1;Z-r^Kuv;grpP{qoLtzp!6zK52aOFz{cR*p5>N`kxZi3kd%A z@sVeng+4qbuZ<9Zoo|!~MB@qgjpzE0`!O0vjh$-sYr}94(I@R=!GhF+P;IJt+C@Ja`+!CQS2KJkic`HfP&w4U4?7!c*qsfz$KDSE>x zJIVSA5)HDIhUY#3YwF)}prXM4Mb!$pBei$RjlO<2@L%lyQz0$bFKK4*sBHcV!+&=C z(KnaYCfji+?f{CK-Tyv4mg5c*Q-Ne2wEsPN><=&k`B?w^A@r@13T!%?;H{Q8JctxH zqi8lnEM(ulW#CcOj!=vEPL z%|BgShW&D#=^NpO!zYdsdz*mRgM0YX$gi(9DX%GZfNMOxg5abHO4&LU4^t9;gzR@c zDVc}NB)+Q$bkK7}xDqIw+H~6v!In{drd!v8UzCgYR!2dOycyPidWNOFJJ&s%Il8ic zxjCwT{-^&DVE3PbWqRsQlKkyk`$Cy^b*TRf^gp}P`_N;R&3|?0d%sj*koi>eI*bO7 zFlwSzJ*Yl!nX`WE| zYtFY{V~HytAWyz35xAP_{OIcqpf_U&OHAs5CI16tW#Os>|9v=cNnZx@i1&L?ZJXH^ z{~8&FMy9+Y{9g|}#O7_{nLE4eK_f~fX4MAHQ-J?^e|GYDbU1>e5CVRVlG*+5MOt1! zAK>>yX=46&=znid-vj+$S-UPVtGh8N4VwRh;y-XBp1~;?S>f6Jr{n0>wHD*Qo}v$r z@?TWvw|L;cRN{(H-D74DHGE;C+3W-ZI3cbuU@}1M)r`Q6BKYCC*Bt;1`e;k6v~HdW z(0Lshm6z69EA|~@)tz#pa~8dU{`c40IUjo8T%xg2pyKJ}##M65Q}PZ6PLFD&nxfL^ zkeltv`epA=jW|O8>8_rgzEFYXsgf&R$#?6H!{M-Ne-=l>TxfHsYz;ohY{=xk3hiN~RMRQ>Q3E|n`{CAdk{f-5o zY29a`|H2l~v~I|Mo__klozNY+NH)bSM^#zj@Fij@%Y$Q}I7yP1)H_%hihCFcpS@Gl zm6b~FphR$MO(c*iy*H`4e+B+aA_*AXuf88x9c|v@KyQZ1vA_1FD}tBAOxA3J|9-(Q zS>LiYphgskr>^xSMCWNcemI07|8V>H&w$%NO7mGj5!-tTeBWp@+EmBeC>m#($ckdx ziWJace`;Imf8Vp1t)gWXYw3;_8#ge|Ml)$-?bzd zH_-g&5a7RJOkh&<(18BaX)AbmH2--z;{#kSe}5xyi})-?5c&ga9<*Tcahnd0hk%Gg zF;mkTP9K;c03B>ZR^Cn}nY0V?53h>eJmy$w!m@7xo>NlQ{hD1HIxzRn z93Q+3vbP4K)GqjvQ#x**OOEou9~^lCjxfrnf|GZd!!TOuh-#KqHt!4c0aTm($pHR_ zo&kyImj#P>_#s62U;h`o|Gm_4@9AZX7wIi#(mG^>XW19|6}O1!{8JZvy23m*4JIqh z|0TauCfUad!+*L+==Mglpmj}015$9;b-eNC*L8WsDmX+j?G!qIL=ochK&nTmoe>z? z46r=}oMO{{tfX#=@=cZR?J3(ux{IC2(Epz9QAu4uS?p&0$S*fg=6>b6N@Ebhu@EQE zxfx^IJH(ov+^g_gRPrC&!MJ$?W`R?{e|20!L94Tq9oRBfDmnrUB!swjt-#V<48 z$=zhX9vuWK;FkJNf&Vf9*{Doe>^=A->z4o0z> zhB+YVLHkd~3eeLF@SpKNPvH+uSElXeP&T~<3aHkC#ORpqSl0`BuYzrDK%vS4$1jwF zX$%sSg@s*yv@C)BdS7?=eRdxx1^v&Xn|vF(yl?&hSrtJaAcJz(r&ste=AKv;3ZfRC zq`T;W&9b1hXK3h=dz5aYdZyn~({*!sgt!Ib*-z*Qts@#1=+bQIHjw}a+c%el9 z%FhEZL36Ex*~DJuDI8-C&pLjf3bF?$S~{4=I$${7KQoe2eG`~mEJpCL)G+$9ZQN~BuOgQCeV8qR%8+muGoK* z$z_yDMCYG(sQwx4|2lkFJ8M@6^GkP={))ZY%^2+GvxAFttEhUCZ|>{vZe4@b;plo2R|pT6?* zAUty={&S47gx#?tshf|w|APK^&s4&RbnIRPvx)n)p}Rog)bOwNPTgqzIJ6P&SGJmm zZ=g3w6tGU5cENV8jHra>&tSTFRSnr%8! z&YXh&FBiP=Yd1B}KfB_sU`#muNf_?7rA(_nAl-^!Sjsg9fmF%i$NwKwXBiOH+J60e z=mzPMZV*sX9chr1ZcspwMnsG;M_NL!ANA+&Z~E ze*m;vf)cEcn>~BtSZ|ROU_F`a^~df+lC6l_`$wH4(UZBIFna!Y@K&Kn$r$oqDO1?L z4*Jg@uKR*jyIm^P^(W@dx7q-bWkd}ttoQx@_!noH+cz3v@K~SkEUW=V3FE&P*05j3 z1OEd0e^TXa&lX_japS&EB4*9NoH1GC5{f&U&U0_6-EBhkBVEa96Wu1>vM^8>ct z?>y@cmXSR2kK=xn%j;2Pk-MB;9eMX$XB4=GRGfR_ws6H=!Jfd$TMg-S@|%4}MhbNg z>*QbmI-J(I_xJ~xIJ*+JFt-K6tGkIvOZ_95tJ?GKROpDLne)w~ut(_e9fKQ(M{xnO z37f#z^L2?we%v%edhOnIXm}~veXJTjv(bpKL-i+9xVqyM{wc=)qUb+c*Zn8NbO}WZ ztFR0D*Z*AL7(BgibeAnmW?28vXvsE3t8G>I(3f{&3*+r>Kw;*K{z5Hw1vnA^rv3eW zs(pV7S$Hg3{)M(PL0Ez*72mlu3Hj@aepD#_V-G=p3; zULofJ_PG~#$D`jD!gU7y=c-(eh6UFDoLMuL4cSC|kHMlDpizN;it#TJc~yLXfBLo` z^YSw0W3S$`=4(_J{uG zygO(6viyTZj5`NnGqd~~6pk){6(<_5Mq~kh4@7z&SGo5@{js|d%-`&JEsO_73I1mH zj!WVe4cbSp4-B7N-33R<>poq{-kQgQ`M3Mr3S$&%Q=DuBAE`zJm>iU$Tyx$WezYa{ zln9tF{lAC7xM4gZdFG^u!z&vS@Gl4@dz+yP1oH2jr>O}VEc|@u|KopMJ$L)3P83wV zNZy71{`2J7|LZ?wsLij{DJ#$zX`%4nh0HVGVf!z>?F(hTjT>O^1^wqiMFK2XWNk~% zOfAC;U*$lUpw=fxk;2r7d>At*SA!J9{eI@^ftwONRJ@>Jc=nZkvO5;+y#1i_iMpHp zNspU6yAnS}(O!Add~oQ}>dhPXU7mZkV`IksY*p{O4x0~WvzzDYj)GsHI`Vyl{75Fe zgpU-GjV}Gh#FgCb2hNd3?XdZ0^Jy|!W4l_9Kg%{C5if3(tO8 z`z%=sUPJJ}8ZX28e@=aUceX&FT&mu+0{^-P{LeaF>T8!a`KKa#r|WG;)GnX<^YeL! z{7B2?jla$t=U| zu*Xy0+84~jQiAcnfPWDgpgU@SEv8c`{^v{E8jn}tvHe%^pzRG%u|WUv(8hkVatE4( zC)*EakWF)&;5X|?xV~8CH}(P+xvD=m%)wnCVs`7?pGQ2y(^ui1^t*W@IPVXLaG_B* zuE7557R{%dq@c-0P{+4Cr@cR|>T~aS=S^u#2pbd52z%v<6al)0Jg^Phf1N+K1XZUL{#UjpRxttluPQg-zc+01PcZ}jy@fe0cr5+n zUu-M2IY+aT)9>AQC5((8K1#va3G+XTD2=XEZ2EuprppJz+Nv7lcv-NRhAecsrZ>Ql zo-DEzzGx4E!(z|;{UT-Ty$BOMUo}4OI9ycbM>1YLQ6Fz+2X16GfxB$W^Qz^XD;CLu zB=5@iIoO>Xs&YIy^NJ_DnT}Qk<*It$+7Y@=%aV7+IByJA!<60Q*!#q+LEUcWg#qx= zOw3*hzb;4$e5cBescFDJO@C7Zney-t?^Y#E11qYJjT|IPjb#0g|Gpi^C`cp6K8A;* z!Ta}E{>vbK;l1zBgt+bK@84UdAJ~@EmX+!UV_hu`u>TrW2rXzeq4fV8Z>WRIdo6T1 zXB)N)nrL+2d?c@smK7-buf-#9d7&l^JH=qnX3CAMm|5w5H|ogoDtS&N&s#BD6hqe| zn|(IAjzjIqup_*LV?I8s#Li4N#R945KIC43zgA}f8yo1J8Oxu{TI{R}2NPo5{yh*s zC?JP_F4>!|6qbkaPfNP$!csy1`3!~x-KQpIF#P?GfBOID-)U(3hu1jX0AEDTsg=_I zL--rHfz>VKUNI>Mqiy+Rk*jEut{(bq zerZOzr`;oTM^Rn6g6FL`jg321@|jIunV!aWAabfYr%1hr+@ezjp0K}o_o9OJ5{PGT zx}?|qfNdt4&pvOf+J9SuwE9P=<8$!ifBiqkPfg7Vv;L2Nit)d0e225G^$+}6npNn$ zK3k$6P4PcN|MTaKFGr><9l!-8Oj>1EwH7fSEr<=ulEwVbTp0ztA8jf5_Xlrl-M}Ee z>(uXe+cBHGd4bYT)0uXAg4<089%Y!U)YrVoe#&5PB=Elkj}VMK$zD~zzAoof8)sk( zdkyRts_qH@<=_g-0?oRc5JPz4*)HJ``+02s5qfOfX8TCyzx>yqzZCv^_c{24ncDECJqP_iFKVp8|AX;Q z1&-DnI{#OlIeYJyclct1Kf@IMY5iWSOilUmkY_hS7YPoH5T6JA=i6B<2+*G*CJj;|wdT-Aj$bt4rw)4^^u*5d=xgOuZ~b8|CLcUWnTud3i?VUjJS|0laLr2x|E*!=T<{IBD}+;9yuWoY|x^Ri=?@&EN7=8dxbT%ByN zm$fAj!T+p2e%YoB>;GAtYz-_X8)5!u5%cN#JtHmI!H=7;gMeC&(o<7xU)qw>SmgmH zf^A36*Xn^8EdBSGMYhafo>=fB_~Po_?-Jp5wZQaP-oQtI zEKqhPkWHiDVb)|y@Vd3jf6P(jIYER3`VaI!-+`DQ2;N8D4%iad}m^4hSxRJBzh*koXDRWlADqT&(5 zEV*j9s<(r{|19Oa_^9|+m3-PM~rp6dgrlJ*hum$|+{>&yTA&m~=w zhQ;XT+|;mj?V+=Oe}iw+rbg#H`Q7XByLZnKi=h!Tdl2Jau$J^XHQHhPFB1puV9dIu zM4EfD-TLZbc=L<9?tpnCgWTXDFQ2>fXp^h@;SbN-Q|@5S>Q66k<+MQ! z-5}(@&dEC_P0MZa-({pz9Mty`q^$q>pOMB|=H~tj^FK$H=n={D?KvK>u_`IMPek{~Jh#HU?8Z2Sf8oqFG$CeN&Zbep)|fA6J6@o1dd4zK5^jNH*#jlN zfq#vaR#WotJSn|SDRv^@|59 z8&?DR50@M)tHU%R~hKkNu?=$T)eM=^Y|;8V?9QkA*4E(Q)2^{(~P|hg<#rZz&F;I+@ zz|ou$8=F1rG}Ev%(sZ=UFldHHl%i1=xq`>9CLDo=#eOyH`qGAmH8dsM|%+vJ{0<{quU z&YkeJ9x2$HP5FMC3l_IDVy>>$^=WW*6_B$uPJiQuF#-7R&%aG-I1t{=+&Im5Sj6s8 zdHTp2h5ydTM<+aDBhr7KY0JjQ8h->u|2cF7r-s1)bK;YdAuKu%9FKVl6`>g_(ji^V z;^G5*QYheY+u2{o=~N85PMSeaGGYySk>=G$*B!6z&#CIw{APfWFDNGgg>U^u`;pJc z(sa8NBmyn!9b8-DQd$Gbp;PpTehdSzlJXBZ;}o|WK{E}WYwRlp8WIvN*M3BX3bn^R z?>MqNZNV!dT6O1ZM(p;tVC;?<&d64*96GY_fHY!!pQjqH#N%yOrC4lv;ARA={DxUZ zl~ujR%bVF2k9Qt3ntb_4N41^(CaBV%aM{>T54EAX--Fk$*n^PB#--SCT&-ErCA%zyspLL8?5w7whP z{U9q4dQ#(6jl;9_S55W;Ob!jn9WBQZf&b3Dol2}@g~e-r$SfL1j35cbSekPB_vae| z&ch{~nl*tvI0N{nBKR0a1p3c`zqI1&!3gFbT^(wAe!Jhb5uqv6=(i^a&c)b^M?H8& zD#KC1mqJSq!k})Us@Q*r)IRvRkSKlHH*kr%!NK!&pVcj7LOa< zuPKvy^3^ixkG{u#S$hLY_BK7phc(goqpJD=$TjR;g8oCw1AH&|pD%Zmjc_@Ac+x!6 zv}YvK>7!d)=vHG%X;?^X^53<_i+7#?{ile20@fHTivkXSK*ac`al!&S5M}=Ha2+UA zkpnyh^r#nV`MqidXxTAZ$%EQ}e|Fat-YKe$x>IiT@@pCJA|xVwGEqA|OV8ry+dfW> z0H+487AYKBn#R>?N7Hpf{;U0#mt+bDrTYgEFhzL6UIp z9IKkFf?sR_LF(bg4*yF|m3W=A^-LgmRNzfgAp!-?F_?cAE@IYd;J>?tMvl5c|MT4_ z*22Mbb-PCpA(IuvlPj(dHZ3qG$xEuP!!GRw9imvt|gEKS27u>PMvkzg4FTY^b;fqw>!jQD@~ zFVC+N7oyMwu3fPI8dVztz5MVRw;&|KHu+yke0m4&g8%v3Ez&3-66fy+f z0%`y)U_2OtCRn^^i?)kF$D!mR4LxE<82?KHd(a3+R4k+wsQFJ_yO48lf)zpjs}_Q( zObGbzdoRfd3Ia?G^dHalq%cG74wn{sy%V2b>UN1qB)QA4RjnwYYb6(w#IN_p_N`It zo&CkP-(grI%tIdf4^OJ}WMDxJ42YEeL&y3Puq+Fw zWu^jRdBRr3A$UavK)r{NTq?%@8YvdyBYNIqkwZ`J1OMx`bXy2+%Ui2LO}`LJJ#s7+ zk<(ZrYvsBZj(8vW45dTY98o<&2kAW=FrkwaL$&)y-3(KN!r1haDi$U<5wi?~ZJ$X> znY1Bd;A8VP_0<6Q!~)U#q$^*NIBc zo@RFf+ov-v74W}s+NE|NClEUmew=JZ-96E|oY`dXNUJy&h=3=D0oHtrWBK<z?fRbNqP*9;&RtPo7=i|LJG$zWQ2VuM!gTc(UT% z%MDnOwS|;R{4Bg7`c&Q5{n#`S=s#4Kic}D)OkbP3kgFLMlDtZV5F?a&KPYLOp9l?z z4@=|kt)@eW*+6NvjBZ0vkWL;c(W~lHqjwLhEUxy-JmWwZ|9#A}pL6Bq&3-#vH~4=L zzmg3{c~-junjfd<@G#rPR?Jh}l1>Z#hea!I;(1^eC)MZO$L=5x z@OfeW=Yoh7p~WTJk}QSoV5?QME6LljCb#tUBNXKAIzet|4Taq<^gl}{G8NY0Dg8eV z@^!Tj|K-2xBg@5zoVE69wkOJkJ2DnOJk^EZY~b^6plY87{U`W4@K2B7R!GlrP~IMe z3%`h)RejSZtRxkH%I_6wnIfAUVkHCdL4j5<-9wXoPlTE!9OfA0FpVH=ZyYBB%whiSf2c|V}tt-Kk` zbhce8s~^- zM~YCcg(Kv@)?7w-lyl4Rl4c9GUzV`SNSC%qfjT}IV4(jea4r1?+%J^;*9882_1Q#C z1#3$G^XO4}W-1zIx{E=_hiqzgS#wN0%%l$y(T_!>?ZGqF;3IF;*kM`9l2ZgWRUs%ryus2{*$8)$_eTJ_|7ZQwhh6Bdw4Oiq70z>o)v9wIzSHI^7y zyYB65r9wm=D2cmv_SZMo04;>=zkVF%D>rfk{wczTa2x^UaocX!nYrE>lcD~6OFwvm@r0QLu+#Tk2(EqdM1z!g& zAmt2YDbWApz?}q20mc7=-5t>Xe1}xR`HTb!D z^c_brHDGcH1%u;d`v~<;Uyp)utkL5Z@K19TiOsP8k`C6V7f?I=Do@^CVgp{(&%{5;0*a~Fy`)0TgV5AKO-JoK{yjK_v-N);I@0qWK=30wIy z>og9gHX@k+*|?xpj?(|^XKrwxPirg`T+xtS?ra%gT+#~G2`b@Ov`iJPn1A`Ee z`LRByRuG%M1r^b#NDBS_`_)~1SisOi4Cw*?^vyjs)r7V+vLA%A$A?klol#vDq?XJo(nZgSpUx&{uHQs4W@*K z1MOjTkKz7@{^!eskJ|R9lZ*CV-oHm7q?X(T{Xc(ye@`?u7UBuDgZ$SY;Gc$z>F;so zz;{a4yJ9)Ln8YW6HsH+x%0*ud$7Q_8p1-S;vh2edSpTytC%T`beBgRkDRsPn01Bhp z(B`B=SpUzr)^r?t2>xdgPvrI9j>C(s&g|_%8mF{YSP?2cww%I+&<~rI1!^MbKjp^m z((XKToBa1rQFbqLVo>P`sGU2#dQy=@jUfNdY<+UBVgv^G{o7XI5$wkxzW>LMEmUZ6 ztOSR8zC@drhriu-_f(d@#`1KIZ|r@N0_5LsNPb=aW^N1luk*S~H(>gL{Fmf;-%(gn zja`QP*UQeDzzS1i*w9h{-g~ZEy|PywnAV8oCXg*65iD}XT&KT zl!Ycb?fo+Z5s}HY7wKXBKN5e$w>`_?=0}+RqcF{Vx+$BC!|7rDhg3goM%m%M!g znIV3P{zIKY%tyUL;u=W3XeDDIje{2L2>4Xn+y#xSM}gX zawpOBM%YY!4C-!y`3EJ0XT~l9M;P<}Fo_xQK<3A6^lN=q!;$N7d9xX; zb&Q3wH~r7&tJi37#rp_$SN89L7GOAH|Nfa-(-x{5yNmRcqW|3a^?R7Q6#UOaNuR}_ zFZQUgCVDr4E4>LI#)HYOP=uSI!iEwyM6K_15mrU_0p#>;nh=LSpxd?#vNE6_Yp-ou zqDHBRh=xrH`0skOA`UDUd0ANzmj5bxr`MlmSEYkyG5-(ZJ2ONcXv|_q@{Ucu`58S) zqB>`~7`D8LZwTMnm-2R}vPZ|^w-II|_UfrT@baDa9&xb08a}rVl4aBN(CSNJ@q=EIsq-O9{tHjkcv-Im{CDvGd=zJq@3-Pj z=U_%37GJ;C-Q>SBTre$bsvTQ^`+Jp9%iOce#btA$B?Avi;PiVD-8d*^cCxW zzD*?;jQM|%V93MkVutxD%n0^hT7CT7aA^2(9pid{{v*R8VO7AxGu8nwujE2)e+5*_ zA~u{a6okS*efX&c{EPiYnQbQ2#LfKIRv#-E><&$0{m-$YFS@cNsAjcRF#nHeSpOwO z3&?-9AFO&P@A|s;>?A;?zkPVNZFqg=zFi5-KZ~|8K|%_~gqfDZpLMY8^xyfq!yB;w z`etUltj_9HgQsTXRflfYt?~@lV!%yeT`UJs^k&nVVZhk`=YJN#QTh)@GPe677ox8P z9a0N?WVD?Ql&LL|M6?3=_fiu*q_W0M?3ak6(Ozu7KjwFynUx1X0>BM)3w&el1OqC|IZ@ald!!2{>8UHg(oTkDou$K=(|36iR}++bs& z|HVPnL%Z9I5E1MDVT1<*+FpJyLA1NW)}S{9dSf}$v_XRdKFI`wtM@w^Vm4*|;nHKW zeK-TU0%iWeRm%B|(i0*7CAg;`%2fXFEAwGD&|Rh=|K)b00K(WFb2m?YELi5QHYJP{ z>~ZJiYCl{nNFp$6ZG3ue{Tu|$XqbO)6BM9e%*?g~`hSFUEVZ-LtSe3b3{&r<_B**M4d3d5yEU#QPbqPTyBeV$eUpg@i#0#k!Rpw`ZIV~A8ToOTNXawwhhjtMsVZ5pkVMUmgVR04fRo)B;S3H8!yS z3SHlr%cSJL&VQp3iV#6p%zsKBEPW2|_9wW3{dr|3du8)IHk@ zIxE(H{I5^X#mkM#aFqGy-1qfbr=|ELMlO%8732l_&!w%qT>u|dXV-k^Y^L%2U@9Mc z@RM^r07F(V=pL$o9$M$?N}K(Mrp;X#$sO`vUGlEa|K;Dg&O_aD?dJTm^^~Mtg~(3G zno;JT=C|{XhK{ z@=opciIo1Ia4&}^mS9NAJ92O^b!(=?*8s4CB?U_w9BfLh3Tv8c(I(-!o2NS z=)#dFcnw}+g{Tk4f1gPa0{%P2|5J4G$gF6ZCAx;?zaB^`A=TpbFSJ^0&A1i`Du8`q z{-0Nd85|<0()n%)@Gtnz&B{C0!xNyY=~&-eNm>N^FWlz-i&5vClI?{W@4EP_zQgM# z&2QTG*Q$Ca0&ik4RYb`6y6<}~t-}!Sa2>wxXJK)>4Ypk^p#Ny-I$m*-PaVhn&#cc9 zxa;=8fy8w)|8;q(GfK={+yF8H1kS2`C08RkfD100`m`lrvflFwFL6`v!J&ow0CIaqW(#|2&;k2tO7jlIs{Wl3C2$ECv+|=AS2x_LRrm zXG;H%N{K_O4OU&>cL(|pSKbcZl?048 zlKi0B^EuXki2JZ``!qeDvJhg?Esx#6At1jg`M(m@i`goTENk|?9LQrtt%n!cPz)&g zk=!^N5xqDb{(W8u*Zt=-E8<0^*5{3jq(Yt))iwLjc1y=kyy!+4|DB0XhNW4?tyPK( zf&XU}{LiR9n`(Adl1Qr%cW=oF1I<$WKM_?4c#MCs)81CfB1d)go-e4;82@YQsU^q` zC6ZO4|2c3Hs?-So_+K|N5=|~50#L!ezUMTHfmDTfLYSyVOiQ zLBwVp9s0z0FhG6t0X5VPJ25>tly0%6Bd{k9#|GW*F46**ho$~p;hv1A1`7d4# zw@5H4V9E#d|2Wh^{ww7aiI}B#hCBric}92y((u5zv6+AOV@4SNl#~J33-CXmv`9lM zl>OI+PA$y;jNFKydLyCRyEmRm6gh-li!w|R42mgo5%;Cd6F19v!VzIIoGxn5?8nYQd+5TVaO{%eh%iqikg?3VbI5wt+8|Cu4( zThig>DaVm$EdM1anonsEOvm&eLd2B1`B)L4)FJa{BwI95ob2_AKnjCszEwPJ4rM2nN&y^I?2qp@rjC_Ls%6N`G|B3 z;=Eoqt8a}NP|VgJwbv`nv!%`Alms%UdefwW!T%FNu&&!vwd<-p?K(B_VWk=S(}RtL zHxzX2&`H+aZ6RnI`N^G>-C8nXUDhBFQ~H0BxN2hR-IdG<*!;8grfkTx!oU3c^Q~5E zQ-Sc#+*O(&0;d|aRLsFwM59$4ne^G5t%dD!`UD`uU!!82YUO@*r#GrgG>)A{rcZq^1 z!p92o1SL`UU$@umoUd9ppDMo24bFMY|11sfDlLaptpC}$>4h~`X_)E!8SDRvi_jd- zuifNdfd11TSs)g$V{xCzNEQ^SXYSqPpMGOOM1H~Tmwq7NysKLxni~Oi_1n zAq^9ov`Cg%57MLM7Ty<*dgiEsz^s73%7GCj**Uclhc^x$u2~G*Gw4|UKmO^*mzSNp zy>Mv#>&M4~Hni~IeA$5Wcw51oR)Q|L2{}9_P}D;m!VM?DK&Cx!$Rf3aM7~ z(QzY6|MQ2(l5VY~KvOi>!U6gZt3BN?KZoA{5tUY?0TJ9T}0m>4CD-LzX^Pv5gP16mcfO^!H9(vxRgx*8d}J@bNNNQT`d@J|n4;>QtXt=Tv^eO(X{_@{_d9d*Oenn6q$wwX9uYQ*K^driMLE}2Ku z_uaL!=mwx&2+O%{H(}tH#P*yv-WPJ!yt?>tVlw9+%7rWwE@ z5OgSj5pbhZ{HMMpTQMLDmRLDs88skTrHyIPx7@p}a>FFHb>$1P3DIy85b?czyt{}uXq zZqVSyHYaI|ERj8lzI701*+7k+%ZD&jAOn|zM@Jgm9#e9peDMGDC-HCHDakVeMrw-w zbG;JlKaBfrUpsLbuj;L!K$64y4@1evJ{CWr% zM-xrU&aZ^JyK(XC{Mu(`kpXo)w-`csD>y`?RrF{OYwk6Mha(&dZQc&Bi)zUrm-KlS zLG|F7gv0^&f?AF4^Jqn!sc`k}lLIe)yN%_?!T-af{M(3C2t1im z`zl=?zUjvUv7rDW6O{g+2Ym^Soel(tCf}!zNEUh9|DX~3$%S1fmVsOPbkbi$gcq?$ zyKxV0;dVnxv^G)qb0pn+y9kDHG^hcEbTQgPSCE!>UoT;eZtQ7G=zO;~^WHJ{o#FEj zV&4vg6>5gR7_yZdv=ZDIHh=F3mn4UGx5l)BUCCLLyvm+EIZJJ4IGh~OXkc9NAHD1 zQOEj*vOCpNH|((a=he&Ih~07J-c8;sQ#ZmW`L9s-2A7xf<25Q9ZVfIqBBVLce=z;LC}?tGMaEANvzsV;C$h6wu4 zLhH4%3pB_8r(UY3ZxF{Om!pYhx%KF{rOodFwACzcn`xvN<68d!a51{N1SS_kge1df zhs-2vQiS0Ps!V@t!uG1`B>U>e#NdW~~ zTy8CKX>tYu6Ak*&hv1vqu$Z|P_Hq4KU9c_!{?{+fPtO=^f7e`=I|b@bh2be>JT9xiXIRAFh92 zqvXFXfd2E~Et_}mu}ft2$S10B#kqFp1?b`QO#byB&JTcWYKOwfQqeSA{FbIRWmNQR zTzUhx72oxyIxS!{wcrD!J&y3|H+QyUkg}6 zAPL#$qjnnle_9_mbfOgs|NZq%o8|>Kt=p%r9$R}p>ZaB6vLSRCK9^R1(k;)8<(X|x z?P0Y#z-_1Gzf=?jDYayq{4b67W|Ic75i^_m&*Oa>pAUQjAQ=FORbJgXUiUx#X{xT< zBHQ||6O)fg&F5a;%;Q8Ilkc{?9il^ThyU?Um!B4Hc+FFzpNKR0;#(PzSSk5;O#fjK zhG`L=`WGc(A zx&~){DkAK^POsD0C@_hOspFPJ;vZoTPw>kxdv;FmX#-{M_volkyCB81d9ZFA4!B6TM3 zY*`pB+U>7QQf_@Wah;r}6Z-4{3l}E>?vi*s4QdU34)ag@C-DFLq9gX)7uOH~{pZqV z{}0SRFFTlnp43($BBlRl&xg*Lk8bV3RrN~pJj(?)!XEp$8XOyWRu^2`P`2w><8zJR z<|bEaensB;pvN{OeaifE$;SK2H+ii82j(A_?bv+>z3^Oi^QP(fho1iWe%MhYhcMD3 znX6!1?$#Fasx@0~d<>tatnag_|D=fUsLNT4xPB_qIoAHk7R!It73B9+Q6oVQfHqO~ zUk4S{Q5wbn1O8`bXF$tKA;^Di&Od)?iyj*?pjcL+;}=-n<#`a+|FbiKC%CzxQQna> zNXr-OZoV*PFXHnpP zbsmBG+*s&;8wdXTmGSl5KcaXO*nf@N-=F!hdgImfbw|bVbv)@!e>6ROLYRME51!W@ zgHjMxT%>Gx=`C{tn}m_vA(K>ApNN%f$cvKya(Y2Fp~LuJQ7rQ0w^a0KAIv`yRnbCR z=->RKc^L9v@<&axIrtD_WtyUemtR~hK#wv1vwfG`jm1G)A9pIB+Na+A;`a9J=lJ!*94YtcPaU=lid5^M0)kyo?qWSz3xj@Ie+dH zi-EQ#rh9@4xs=q z9~xF=kCW)h$iMzWTIOD6dK7SX2z9qLKIhUvLzMnQ$iFYWyx)G- zrG6fYK&?~AXUgMJ;DLwrKaX|oJZe5>gmKzZLH}tvXx$G_0Ta@jryIY%TZ`;G{W;{R zg5&+1)8B8*_8bJ{+Mn|+txvDCE2N$$5@7!|8hk@zrpdEK6ILEPRdLAcIc!iB1krWDR2buf`JsxbEFV5p@Lq z=U0^epDX^Q6;=XtjI-S7TQL6n_sL}3#DDuQ=s(2tpB*6#s82BD$qb8l%twhEc28=( zmt7jUO+%*7Af(y@d~yY6D${0J8e-lK+_fRQZB7kAyRLXIzvpt-2%5g;s1|Egpbi~D z$>o~$kpHrS{-2=fpc?-&jB`TapZ4TPIx7RPYU2Ybvxas5p0gVtp4Pi7+Dhzh_%@KFm!5@VOoj{EM)LC#koFG9;s+ENBPxKd*uQv(tbYmG8||4qaCV{(GI;8F}hK zlQ_+AFhF?y(j>A6z96Dm=>NgyANIdjg+kUv#@+U2R#`8^;RG>flfPH9tI6^I!k-J+rZ*W2a;ktg-ng zs3`XH0n^jup};FO)~|KXtd4lrJ1SaR=BxQrk%3BH>5z>1eRBHBWr{ zF@FU552x^`*3R!5(gXf!=kc%WJRvT0vLoHA5#=dGn%WX5e3{oMT{AoxT?79!w*Oja z^bVj!J<*JOnrHZ-|4@*@d!w(7MsI$IKgXuesc}Rkq;kKi3qtCM#ZU~Xo)y#}a|OZK$tS=YRRNf1?k4}@mq9Ffs{Op^-Lv{pbi?V?;l;O(O}=PUB#pgHqv$_*^h6`X zaw_~iPK?t31N^TYQiwaW3%P)Gt1x?ih%ZW$sTNX2K~#BqQ8*$t|M=>?!D0I^&c<+= z;WU3UWSOTbH5|nwi!lDz6=+(^k?JhN5ubqnCqo*#fNqGgpbk%o#tp$Tc8hOb?wq|i z;%#J5YMWj@QXp<*o-NTjFqjcEZQkhGLK9aE(h`ItA@*ooU7O$QG&)tHQF$$2#(~x_ zksY*W0_Z=m|2k+NvCu#vKV$ue9iJW6{thQWW&!Tfm0`sb@~L0mPhD~U{&ti9b@cOE zjkR#Vrx(LV+LiC^I-yIPC$Q_@IfK67)MwKx=!q2lXLJ9>Dq<8ck(5in{n^k@szECu z?E`w1wh}e0{B#Ka46Wh1A^&bwqJ5OA8~CTLuT)|FS64?FQm%H?(SqtMz<*z#jAusR z|Jk#C!KN+H0`woIY|uu5|L!!c#~`F>0E@JK8zy%iB%-6#;}m4Y!{_MWQF|Sj_cx!^ z+g8L`9j?>><%L*j@4@2Db=C!rneI~UItpSf_lpcyPv7#+ma{u`6RcnzE z3(4?xWRbh#fPWEey83rG+T;?TE`QE8L&+!Be+ctWSkZW?e$?NUEu^unt%kufok!c1 zDg28?9BeLVMLh1AqO|>gu>POB@f#&QR7fra$?Y&<6xKG5M8eTA)bWC$k?&L?9+k)_ z%Zn1Q{5!3@FdC{oa?pg6D@4$lccT#JsFd;<#zHxCr=Vng{8LZDBZ< zVaXGpJj^dy+V;t$aElu_n)=A+6abumo)$cg3kFT)jQb{<_oCAzuY#+Hl- z=AUuQ|NQAi`a8W0kOL%4RI>tGs4oc%$%Z5cQu|mIs(U?pf=bT={{r(rYp2c9YMSv{ znno}P7OIVx3J6BfvCtscfBAN!4L#6*bd^mxw>=YtB{NGM6;YZfRq24z;NOh5dtwUX zdf7;UhVDcqQvG6>$fpy>vO0&;v#;5w+eJjgAV=gx_MN_|!7hz=JfkFTBxj}`_;AKy zSf<{b$977Gl>Yp^y$DbQ7e;@{PN~@zQ>LbH=s#p`nT$_;(Rym*Nfq?}?BD=jNWxId z=KK>cGB0Z_qH|XQHlH|`fu@FRH z=7pQ0nn8Hy!Q{#dlO}_C>ic&vn5`i_tpBIR`LIYznK%y$AkvL;v>L|cNNtZ?p#Iq4C>CPq7qdVM>ea} zRK!B9#(4_=UC#(H(3W}+W$=lk+8^Qyg_E|GrnVWp+fsr5)#7tZYAL_gX50v}UC{sB z;wzB;tNrt3V3NM^8!>DTE8#_Cs-ZscKSTcyWHtw_@}L siuyaa#d4foP!{~-?Y zU()gXT*v^+zqh`hthIU@U{6C_ek|s5M&5p4lu0sTw({Y_>QR2l&=AB==|3F07_4`c zVKk9DzI1boAstj&b_7>?>-*=~9<>dq{ny+Tv23fUcs>;wJo;=x!Q9WUyBr9W`$jXtHQIoP^K;LpDf_R8 zKa!?4{A~QFfmTmy9I&V42UYI5%BYJVKQY@&-Tfc`OLcDR0X^jPg1?(u#c#G%9H$2t zT=5Z6fO^1U3o0|l@?YwRo((a0AAQF{qY=ka*I7m<*y=wKDT|u`fbeX$eGFRh-Z0BJ zG&xb!c>&iA{EI5X@A?WtWgM9PGq$yl8O`CmTN^59=u=(fHi;k@wKM>~sx!X8jHZKPP zbA=8hP4~hNR8&r)qX(C6H35KN0rw*0H`{-+@ma@ndC_(*LZ8Bu?5= zl^SFHKhBqxvHGCrohQD3`h$a=$4DA{Fbk2d%TL>#*^$m~R*zli+W+xSIf&gCsQkE( z*mzSBV_77JR$vgJ^gnlLH=77*njs?W>bf~KCrV%DrO_0Wslki-B-VeJzeBuoMKvZR zQ8ZW;{a{qDoha2R_cNqMR0Z6qU>C-JcYtRz%s&)_6p@!H1M8bV^5{m-v;^oLo|{P62q+di8NF4-wPwa7zMM2vrWH}$-Jwls_C3-oXPVcU}3 zI1(ccd#f0J+c!puC;>uyw$T5KWR!l+;zl9=Vp-5NdvKUTR5<}$un8FzW?AGH_womA zv7h0^Qwu;A*z`XK0nYf_-#-U)_bxfqZ`d^=)3?=hL|(|h(+YjNBDymc_@{+VjczCu zSE>gG&sZhGhgVqt54v-PE3Xvkt<3h|J6uqj%c>70|MkLUuK@S1WU-6L>q5qM;bhn& z>rwb$mt{QlhnZl*KwXO#EQ_S$r;5y|^g#cC{QJzG9%{5X|NM?q4|GKMcoD4s@SzHP zFwa}Z#ZVVD#0#jY5$XPUJuZn0acbh^c>$i${1#inhv%22;DY&smpF1}82S%k|7Dry zpMCq(#QdMk6Y}iP|0AD>h@dLNl_LWG!gmNm$(hwfNZ-%a1O10l%>v86zu9PD^rG}X zFU+PRivL-{3i9uELI>dh`-qN~Ve7$xA-xOGTg2+O~N{u9+QZwCE8%;=H^OjfQt=+q66*S-tYPZRM3XsOlQ=1}tJ ztic|OKMz26gxvZijDPy1;mdIJq0gdM@ORErZ&=ZwBX720`p=n8GSo-tr7E9YkA(S$ znj@X!|LLS=P&tSHxxyldO#jn=PKbKbW@yTFoz%A-NZ`~CMVA7p{b^D3rvIm`m)0Vu z@(0noADe&vmw#7LuFbG|#*V=MOmj`_B0IwJ?~Ew;KSy34QpNqB|7UfMe+Ckr(HEH2xsXxr-za!hLI3mPl48>w=>KUqy>Q4h zL(LGWb|A5PDdNFl*9eCKSPMkY@6uS}HlM28w*h{AF6kwxN5=MFL)W}Tj&Hn4|K+Tb z3j43^oVf1vgGvAGzs%BIQLP6-+@b{YKgTeZg8xVRDAawn)QqY4g(H4*^$RIG4$Nk) z+EpebZ$B7`^2~44WAjf?v4YVs4f5*Sk$u3+PDCA0Lj|9zNSD zKuy0-=$^r13T5WZOsykNP&}{!1kXdBL6dZZcyZ0@NQS2&lrQ3ZYpQ#(j%1m*PLka}xP<6FS z1$Y7&ebJQ z{?~ulbGS=YebgqASvSKnsjWH@j)IVy;E61_l55PqOwWk7r(aEia?HQEw5&eFI6JeUM_ zQNx@D@4kVwGJgp6Ow2;Em6^w`VN1CLEt*agBpvx9cpU3Lymdzk_Fw18JFxyA?Qqn0 zv3*7Az9)IUgsgtzFUJ3xz&W&MQ1qY9McfMT-&Z&HUw_!7(35um)bi0h&Vx4Gfs+eK zH2>|tx-ZcD>wgCRsejXuN>!;Z{fxwO!Yrf{>Njxz#2 zRxSP(b{*X?e`jlGf%AC`ZAcDsZpJAESIbivK6$419?~>Txp;u5aGP zCUKzuN7s9RHMMQu+iNEw3B9VJhoXRrgd(DXgd(7zfV~rnhz$Wz0YNrJnjpc76%gyO z;j#Bn1ngi%MMXgbD~0xm(=0k(aapF54T=vYeoael;7m)xZFkbiV+2#3= z2R-D7{m*)HO5TF41Ev&e{?Vk9(0@VYzbg06{dY45w3I?{Y3A9O^QHbD3I7@UpJ%>} zu~!DU<$A*JwGoY#yO-Wqn&Q@oB>rbrYeEG3uOk<$w=V3V`cI$V_-Bz*7@Q;SY+VGU zo{5ZhqMQ)L!$@;!^~LagKM z`jCu&7*LjNG5xTD(U`$VTzzaco_|)`w847@x17z!at*~+2qwVwG8sfPl?8_+ z{2!sfC56SJ-InqXX+nzsES-NE-I<6l>%V~f*Rc-9ZY}OGB#do-v5PJBKTl;jA9k8g zBg1MBaB576l5N=kT;#Dh&d3}0UxYrrviFIb!8I_w16|=0K38=fel(Fnp$k zH2;2e=c>2Y5n@6ADb{~5|C#bX8`0PLrA)Ckw0KbOHH{AQ5A=Vjz%OqL8p{z6CnE;J z`7fpj;r&mvA52WV&cmDJL!vXqRk_@;^KQFI z{6GJg-kuEkb#dE=U9n44_U%~{_37T&!Xsn>?;Bfxa`e&t#hyQ8{TIbHa+3b{v&&w~ z_FvHdHJrh8dvYls6`rL0&k1`{bBu<-{Id_yNUZ;yo+I-=PuP1z%0H~nf9f1t)Bp5t zd8|sZjDOeynK#J4Q~ei#^!zDTFMOr?za;!0n18~1Dt2k%{QCpwe{VK?!kJ9vzhM4( z`{4Dsyn+7H*tQe>w;%snwP^qE)oON`t~VznnENTTyj_K|5=md2|7y$BWI(7ku&(AE z?*E$1@RRmm4Dg>#=V~;j+nS*h68{g+tt1fC=qY<4|K9Xsx*3>0QfX>=Rnq;}vgkL? z@m>A*?@r&Yl58G(F8sq}hUoceOX$DYlm4*KAxhzvx}6uKH)B6JW=5Gkr1J0Jf3`x> z{`WS>e?k8#HUFeIlVb^yB>&ZXtw~#ljb#0&#m+Iu{N3nmwsC*&?dg>NdCm9!=U3NV zcPwq?W^4ieb7T|RW(BiN_v#5zO%)b)UQ=P8&-`$EI^a~t5A=Vv5GVfPKfjgqe?k7s zNu;{0a3&#;?!W9>Tyk~~5^N#;x0ud2>P-26fPX0L?!SHfx9Xz(+oILHZZ~!e^+7gE zE>s8q7-l#&zN77)ZJ6KwLdmbH`sg5)e}7^@O7>qrhd;yoLuvl2YSz#stYPE+Q$>`_ zM7aOmdtG&r+mZ(YhoWXMa34U@{?jbYS3NA{H_lP9!ydbEhvNQIBI4WLp}>D8amOPR z%1;m{bd)@h{Hi$rh4r7yL>twA(QwT(LOYL+I{w>#fng5(KjYT68Z8@1OeYZde{P+g zL#Os%wd4H`4BM;&bC-{R3sGEt?63d)Kc-&a3+&gzlO@s%AjA`r8H049)%^@7nDh5? zOOxnI@K?IkE0zfLTw;4G+M)mS-ZgtOQQiXyE^$gXWbW>Axs2d*i(J3!GI}7pTxIl7T?Q zi0~_NjAbahzLT4zZsJv`rL)abxHnT%@x$c|_27MG{@UR4N9if+4*{C9^~J# z{v+xC!uj|0e|neNZrtq#^AGf&;`wL$g=+d}iU0Y&=J$Xb@#6O%x+k*$+YkA7ebRoV z8c^Ij^GwXdQ;nPxaR2GW2bvb^DE$ZKpVU4To8UALx;)Yuzd=co|C;|_|LKCcE*fDq za6lq~{|x-Y;z~bAo4q(koU4}fpRVtDQ2s5zDOTM4VeAA}67-+$8MAI$RqkF__+~-> z7qG|bKRf0{ndM6RFK+m9T|)0gyr?0_UWxuAnSW&YFW7%c{LlKMLWGQ$MY?Dt_@DiY zE44AW*&IRk2c7j`^Wi0 zGG7z&Us(V7Zc?o@1dWZGgVdz`7ghVE^AFX3(aPN>fc=*f!!CU=A`Js3Ahs(gIJq}n zHH;8K45B%eiI~oN)u#m}33cFI$dvM*n*)FP?%$j6r-MA0!k@f<%djb-L}AoWq2nl+ zf8@_npLYZ1KjZo5ho5yyF`=rX3;VDC;U5D3C+Pk^GcDLwkO5a#eyDZ!R70$L|4fPmit`0&d)&Dj1L`)yP9>h&W(Dz71Mok?{G$FX^8ExP}qNM4F&&Cq@@28_&-$t zyZ;{UmKmsIn*~x*!uj`~?#d<>C}d?``f{9qPaCu+{i$<8SIvVvThrki9@|j4U6Blb zJuEWFivs zUwHm;E`adcyP<3-i=_H5B>Au6$@^sb&+Fko3G(z@;Q!$M)3ae!gSVJK z0`cN%HU4GSi>DUSVt!~`D!X_(;Km%9VrA5!OnCI-{5$ZUZzVA;k+3{FUwfE6s=Igj z>__SRgZE!J|AptD;?W1kf>0nF;2+i}E41Hsb9^|6A@Tn}495Rc)2^x4asKNVOCu!B zlV;!;+9_@bGG-~GmdD2f9!m0GQvC<_zdw9?olcGz27h0Qe>iAVNKAwul(w2qK9REc z0rX#dR0FT~T~DFm>1@-vTMll6JE(O28A;Ey4optx>UrT;9x zmu(_MH8}qT`!D9o3O17TzrXys9P;lE5BKSKN&DaN{_6@<=e#kN@(-810pF0zf~X8@ zlQi%2Q9*GLfIRA1*Y@t#K>z9b=j}u3NYa1t_C~Kxc+g&S2F~9I{Ldo?^dC0K;HaQs z)1Yn08~VS-NcA6`v&ej@h8OUEX8*ea=ihg(y;P(1xEPFmPBCp6sDa`?YgjJpG{^G~ z?Ib^lkj+1zp6G;Mi1R-{M|CA?u;iuk?|Apy0`7s?l>ajYFVNWcxGtI8t}ng3^?|Mbj|y+^g#FKUF=`V1$1x)rcGarG;f(|LMUL*6s2T!=Q2k$~v97l{V>IW`@(U|+ z01!46l}PtrSpPB49Hfb~M#tIFGFgkVQ1up;WdDWdpC=>HrL}TfSlIu3d&lVY7e)%z z2an8dDV5{HHZQ6ZMM(TV(*F1Op(|W8=yF>uRS{Qgk>bvzqpcP@yE+g*6!Q;%-P7K< zWl*qjc4RP}pJ%^xaJDvV(XswR%|GF<&$&Fw01HF}GV?=o+TJlZy4rpRjouBVk&dwc zx=8iEzdA{8>v!^%=|9(F#~Y(_EKzZKCIhv6RNE@ae?k8j-hU0~KXPLW!yYr`HCPTI z4KdT@hw8ro{$YlHVNH^I*$xg`0jet!?X<_sSp@vU+G1_wkTdT5GPTh!FVz-RD8tHK z#5snz;D5d{_C&^i`!C1X2ou3JK!^^@*<%L>B+k~#jJ2qq;6V6?zhBb-HR1Z7pUuRj z#Dbx@1M^S92)Kk;4k;{@%+T%3C-^x3HK6}k*ual~9j4L>5~hHvJ(d$MkVuI2pW%~Y zN)f-=XSc0>jK3>wqsq|WYmgB*tPE08w!y?@PcrssqP-I~>deIZubC&uEi5V9q+u1j zqT0deATvd2smL7er4O%v!2B~}2pjV6XPXE;YW{)#(-m@}_qUWyfwJ+!Jy#ix z1pYHM{{a6-f1GWGu{`j9_U(4t!_!z1-<{l~sG&gde|qdQtOLvu+FX{Ct)YbUSZ%06 z!G!CTZE>LdK4(lgSXYD#dwzXUp9%}(MZGZ{AMO`;sv*zguzFP9 zP!HqiTC9mrdjUZu<{Xd}p zNc!Kw|E#T9p`rz!sj`mN#64ridwuD0HFl#R%zxI|?gnGVsi>NFhkWGKke|8#w2AZox((>m3xtp z^8Yxes944U(eSYE$4OHCr#$>M4SjuiZc1mhHJzW#{@?lMnnRYQ-sA|be`*;+d;E&{ zr9b2*VXB#X7!o*kcQg|ue;R?{J#&Fxvt~xVeLz-xx6R?4JXLc zD8t$mxo?x|KeM8RNQ_9$MJoV};GUrYj^UBWmdW?tsTTw^kupU(lJ&pC{PT%{^&dx1 zXgR)u^`FUKGZ1h4GM3_CA5^12ma!3@f5z`vf%TumGrgac?XU@53t1DweV4s`*0$PY z<9mCohhD2jJgonq2$2iiaT+U!3ai^Ril?Y+OwQ(o)sPkN_~56jC?_+}<0Df(N0`w0 z%PIbk(;oX3Ou>a z6D9kdNCR7+XzNn=x@bC~rvG>T8NR@amz2#5G(h=Y>ZFpc1_~)jI>>-Ln=9~tG|-kY z!F*Hj|0JthO%xWESbMPav{VdG-R0@&%7{;7x5Mhbw%BtMAR!S6{ik^T+5S(8jvyP& z5il4mtza+28Bh4=>Z3>hEWEpR<$j8PNaf!n=L7$6OfKdBG4Q#>YZ&Q{%xA4AqHi<^ zGTwt~is}~82ANF4_&4t@C-Yw2nMk3pov89E{ zLrl#a&L!D0<>9FlLS`h<$Ti6RWmPeq?1=oHn3GCgE0AD#(ZZM5|2*?_=*n@5!2d~VDw5>i*L**`5-5R4*(1Z+#Z8=@Kg#J!^sd09H1Jg& z^p*6#WB%crh3oTpfnJCkus=SPfu79n>S&&_iwXS0C2DSEsrGOStL7b6;%gx2|6+?g zboI?&#$=L4G%`aEW0nP_`POJS@+zhy+5W4knr(kA8}7-ZioVUY6Ofu8Hg&JIgF9+~ z{)++$|A(4?2H&SOSUhp0{LfhbnSK(rj7#?3tn5^g(VAE6ojb}YF4~%Cp!!cAN%)6x ztv2R?Nwf?l#MB(+I44a7flm^?VyLN_dt;l0IvqY#E#VjN2-&tg3sIZa zr&OJZ+YUxH$_@CRPt0N(8la^dI<%v&!D5TwjeF-abqI$3)G7ptbsB# ztNdyFs1M_XLiX81IXM~uusCWv&Zm{Xa$S>EGCzptM57(jnnfXSE8_3vuTbnIy@f1()x1Y0$y{tjG5gCIxNE zQL;C+S8nO9R#4*e*z+qbthiQePZs z+ywsE727O==^{ay*99IDm?*Ijx>FZM@qe}sh18g~C=2#qj(j*5rL&NpA3`vpJ^OF} zwetCeEuIqp5Ac5+zK=xNhO8~P|NZ_x=X3*oX0D!7SR1F*#8cG(jjL&?-B_h$FjfQc z3uOLh(0}6PJtHa_$`%tyKGY8}xWGS5lGh-^G!KK^sO+U>kwQbaI9@_`xNIBYMhBA> zAuzqYWvG80DvUZbSu1!lFU*84+G(SO2J{~)|87nK|C#dtK>jQD7eNyYpUI5{{*R>p z{a9Od>Be+~^YM>7A!j`1tu1y<$hk6R001W}qC+HB~yc7Z8# zBQJCXysPgOx7gUje^qj53g8!~7Bkxl-5Ch`Hn|X9y+iw7 z`cGz4<|yg@>*1({ejLbu(Qli8|EHt*VX>#-(ezA%pt#P4+C!W2|1A9Wqa`T0G(%k; zVgEDUe^LCOB9~F~&UwKUxJg+HU8-Y0CMf^&_-}RU9!w226v35GIjF2*3M;y5vI%S* zN(QLr*mRnPHp2ZE+ssK$@SVLGf<|QpgkFl^flo)${THSGL=DPIm(D*|e|-V%xS=u! z4p9rQ@;jQI!u~4&dR;=_)xwif)2Sl5wRU5Bnz~a=Q(eu*C>Hu#|9MkiR1R(wQ(@g1 zI?7TpH^u&+3tuz>{k%|#?j(+`)@1k-=|Hjp-k6cZ;I9S{RrU*lq~@RLr`sSbY-tW( zsKR1p4J$7h|A*3l#Miew{-yu?U~8Gf`XPngEkdFaBO#Ulg8ci$O_uhZ)p;WKoul+) zqFb%$qO?&?;}EB`SWy{)|7Y(c%>M!Z&xy?7sx&4_R?wf^=_QZ6@h4uhY``hr`Qrl=goW zK*bE^AKEN-NrwK5-Xn74!@~JZlKzXM(~BSc>*BK$zM1!0wiJ7A){{pgTWaM1ZnE%7 z8{*^scd7pKqsg{9b?m@dW1xfx8oFpDuOXX%N^M@&36Q8A_=ko&&5esvhnQ+dpVM|D_<5EZC>#9G z()s80ywQ2M{~hyxxGq47Y~5M~`cJ9ctyakY`W8-z?O;42MBS^ov9kH+8ND6uJuoE= zyOMGHOd&m2+sS86OCU|81OoBBBLM^ZuK;I0Q{Ncnm&(bh*oJtZusXjgb)-R<)7}`>e8itYp9dhRqaFbUt)d2sGr2mW3fBcPc|NC&ZALW1U zv#s{U7WmHFjXy`sIi1W@)&QXTeut!v5}1)J>mBj_3-h0^CYt;4YGKOIbOf_>4e2>V zQ^_>HMQ8&4Iq1JQCG-C@`=l^8>g#eMj2^=Z>4_NL(6HCLOKAxEe^%=9(`@XAp_V?m zi42(k)v*4PVKot+qAmo2(Td8y1OJ(tf39E2aC~2t`yFefou7KTZa4NP?4JNNLAM`! z0wV(Q@6WB+g1rvO9a>+0nE93sMjZQlTPXjt#jJC4)Zr?%)j@6f@S+tKh9M+4P?a{y z4xYgXg+l&o=`)G{*}*>__=kL@QL_ElA*55scBAp1aJ12k^Ks`(8EF3H9h~Whl`^ax z5C(or_&=2Y`SR+gS=FnDSiN!C*x%INv(tC)1ekvez;SLs@ed#TSRyRsCw9!Zu-D8_ z8SNQ+vHFK}{uu_Rh>TH6=-hs5_@ETw^$Kf>l7L`h+V+3;U!o!KviYv^P$ROfP`3Z7 zG9dU^{~1%)F2|4isaFQS7rHfAW!#wl*7A7SBkn1AfGOwDsEGGqUA-A=I%Hkgpy z5rgNSstGBK4`VYmhVIda{nx!MX43uFQcZFy%gM)`Q+ta+7U{QLGKNnA?!cfGK^j~D_ z;{BIR(1=*z|495lKNwk>=DDE%&|&!cd;Y0I%gkExg58H`4n|!Ucy2UO;(wOr-)Hx8 zneym~wx5GWN@=zVx|04*gCME>mziOjk_Hp}KZ$SmyUU}_Rx5szX&D`H9qQ1fjWm>u z4B!Dk@qeWHPv)!+xQ*ng7C7oV;rZua^@QXZ1I3^%m}H|}ZT`DgS>XrPc2O+^}} zrotMy$_}8Y6;l5X@DC;WkKdS#u{8eiSqioh?g->nc?BTFga3KuOC@s{gw$q3AOIfb zqD>bo^6kgn2w|s$cd&M6&xzk2(?rXj z#V)vs^It1oo`r#IpYBC@MdZ_zA%uew<$vD4Kr;UV3YpRxY) zUFjSsLU{i9Y*<&4bRaw1YN%;TiII^@Dre;z^n?&l{vTtB{-byj_Fw+*;n6#w|5#P- z!t;-W|AYO{mf(NZ4x}Rm-Owhn5K;bT$^3))hkx`+7qfOA9*_7r!`EQ`&*`89{s9*` zv3&xKWuRQ|nQH7kODAlO@8Ex;am{kQ*0 z__JC)_Ed0{ikDx3Bjx`waCBVX{(k%2Vw``EWq_MFal^p=Yr_a*JpcH)rZXMmRv?Q1 z4E!Id{$pXEfk<&M=Ko0iKbyx-7D6)z=s&XluRUXy%)5OS7NyP`Iv6njg!vz6>mPb@ zXS&5>eiHP54c~sBjb@)|uD!CInt$Rz1BUrW-yZp0-LY~$_@67ckKY+C>A#T7KiHF* z9`UX$<$dq%v-#GB6JoiZX^vrj26uP<5cOQtUS#laKTG+J?e5MN=j3_F@A^1jss1zG z9)bU7dJ70-QvVN?|H?C)eFO6kasR1GgX<2*=VyXh5FZ2mharE*c7JadywroU6D{62 z|8DE3$Tc+gGwAvK^*7xADgTeG|BJ1xL5NBM%NcO!$(QhdY?swnQ~n>!e}?_n7WI&A zNQC!ayoKNhlFUB}3b_Bv`^)>%O`fB2YaNr`v+tCjTl$!9iM;mhJ|giyzurBFe101I z&vL@x3MEZS|AGAbfBFxVe_v;rZalB78u2fcyerR8o`1U>_n+plwVj608? zv`#-7`-cJCX6QeKzt4x=nE#CDAKeRbkR*Bf&wXp!U;gL6_Fr#g|kbj@pg!w->|Mlja3A|>U|B~&$Y=D1g@dW#SEOnT2yOQeQOhcy501>z46ocuOHtuHh=y8>Uw?K z=eqXB7oR`BZ2nyP_1*2yb+;%rr-}EY{{84`>4OUwuE3V(@b&ve#V3Siw}h9<^G=m!7hn07Kj`NkcHcf) z_g+TB4!!5O+-jlCl}*n3_U+%kefy?er_wXG9890NGc_nXX=QZj`L*XS3CaosFCAZZ zC2jKQy>_QIO)kp!-m%59IL9hKH*(Xdr72kpGK340vZIsZ z*CcLOx^csloIKa;O_P#0jorLy#O7?9l*1g46XIeM*2k@nh)s`LpBS|+F>L+DuyrZR;}X}!Z&(+Tur@YfbzH)# z)#e$>3UY@hX; z=SQVaiOin2ENyOZs?YMQDIu8?Vsorwa`-EAMz7rD6uQ|pG{biBX8X`|-+4JxW^bG| zKgDla%A$b9^X9Bt;GZzVe}&t^ESK3?p7YZ9(=sReZ*-iq$ke*2`?&bl$2d!=t>0t(`n9(s@#@i&wha z#551rwG+oDyE;eM@uzy&ZSb^8;E&!s!FrCz@E8lL(WYjW-bO3Dj8}VcBfShGy!2Oj z>4i_#4))Sn;WcFGB(24s8X@{Rw{6uXak*S=9bFBzK?LzZLDfl7Nr}Z$kdtH5=?of; zP7oMUN8s-OV$1BF*zQ2ZT}pux>b2J)vMkrm**G6uEZFWTx!ptDS3e_zL?qnq@o{(h z;Vfr|)$dP4RopEo;3vivX&ic3fb9-b{(%Fh_GsvbaHHZb#}#xc&M$xNEQ`DRYmI#B zl8FDo-TwG=Xg(jKBFxemFyaddf|THH2ju>M z8>O3-(EGbP4Ff&Zw8sH-;s3;4!0vD~DV@U`V~h$j2sr#`j~*U_M2!^gZAN=l)16C1IbXXDJtp^a_01SK?gF@fsJjNapBJbV z*Z0T-Cnl0JGe8ZJ+r7;0@P5!2P(vW@VjO~8rqcm;(*5)Mf4G0j?tsGcQA3-Ndxnl5 zI!fVgb!PUqwwx&*WU{bdIC9dXOX5|HAfKl{oC#tH!;GPM#AmB-)rk(fF9xF?ugl*OoVUu65K7TXL}xj z-dAAU3CNdO&Rfj9Zv-*!7Tnuyo0w<(;ch_<;7$zm=?LkICm^z`k&3$nDC>ti4v> z;ojH!Rf0RAKc7Z2mC*q1gp&K6#?jQ30p0q|iLIj#++3?w{qeB|#t7G4W4pu0$EQ^T z02Tb_r+0%jy-*HG><&h*gAzIRAu+eA2U#t;5?vbAo;7IrjBCB^UFsoT!r=%VeMd*6 zGljd|a;VV7cIo?`b<>|4;<{@bckvyBrtU=%VD~)ay~}`Y@Z~^)&?vn7QO)k71wHZuE)8-(IJ4xKoGrS*g|7{NoM$A;0H#Y1; zR@ugYeusFr^?Nrn}$U#2*UxAK&w7 zCj8LNI%rGpSgP(ZTXoZcbnv_`Oq3mW^JgM8KSb3X?U?{}2W}*&8dB`+aA_tyf1I@q4>fCR~%+Jq@l8MqgfCg}TQZQoF<3em3wY zx+vT~3VT%ZKgZpQG<9Pn%7?gbwmBT}rE!oRH2;wdS$K9DH7v;nqsHCHX*Eig1Ln5d-y?K^s=LN^&uHDQHh(0- zxZ7{HB8mxAwo%#8V{ctamb2`*TfKk}5?&e$sUe{)Jbf^&NRg1a{9~U$CMqu{%^9?}E4sg*#dDP!0tAFzOc* zZ@_g&z0$Y~9CzCQr!8k7!JT9cwvyQ$oI|w`misB(ZPZY2O?d0T?hty9B?w328ZJTA z9nqBUOutnC$6YMdx98M=4D?qJtiGmi_81`if#nz*Q( ziu>Z@ZskcDpQ)n?mZ(wjQn`PFUO7y$Q1?j5{b*Si4AX!+F_|iG;6fq?DMz=}6a}X- z874LEaNGslt;Fu>Ebdc=g)2`Y_p8ik$97MA+-3X2)e#0eySDVdkGoso1?86nr;4LVT0Xs>0`U)@gR)D6NSwuyZ9z)>8e`ew2<0>-QnsEOOnMw z71HDG$7S7TDY>6u^&bcE3^Y>ICBgk0;ka){2z=++4(b&Zn>9_QehEWl^#9=Ac200< zL~z_C^fJg&mXK`|t_S)cEtie#GQjn(tcLJ$S7vued_z-`;{NwL#tcfVfx2sq`~LzD zW8}dkM<%un*geG(+)h3zdo(@2n;IDa2^Giy_`>1f(Nj6!Jpc)sH_;kl!@?os0d_uw(1nvnyKhYBUf-2{@59C!7-2cO7t#<>5x z7f#N<0JnVe{HmP|=uBl4e(8Zc(>;d!7vV+zB#*;Q8!ApGH zg}Cp=f8I;#9zVRCd3-P%p%y-T=?vkxdus8kXV~uf4y+Z=C3TNdyC>o65Q#*A6|gg% zOJ@*BGc3jn#8x^-9m#P2MPn7MjTxe)PmU<_8q|za)#}cq-!pKns8&Ol*L;^8cdwuC zHCj}U<1V+i8-AqGP$pn4$u4Q!7vpZ%uTzuM9Z|Tq(^TB$=LCp2A}(DG{1=A2BKW!g z**)nSIqI|qjJqAGBM@;@)r|(?zTCy&OzJq82GLJGQf>MzO6S5}{wP+CDM&2O{OI<&ZEj!VM-Vh4759BpOoXDd4(CTzB-R zRTVx2E=vWR;XG{jOm)p>Q@H<3usbknQ1%{ejqot|9));WCVK6#i`9U*ui0f1wefT0 z5w~B8I~na%+w*&Cy&;ABkJGa7NB_+~$7Y?6d-{pcABUo$790sj^A%8A5uHB|vFse- zyOqMRUKXFN-j1lcBd~kUwo*4qVGS{I4XIW$+T_Ng>K;RkJW^3fyaadqkV6N849`4V zaH%1Mfu0^GdS!9nyU%C@nL2tHyk$QmaToC~_wNB6k%{?ZByryd5cieXJpp&pkK?{? z5>8B7jYt(kR7cN;dk^a7R}>P#aNqvh?x0~BJvTZW(an3!edxHuDOS2%^K7qC>X6E3V479tP*8c zx+vT&;7*$N;WP4bcU0XmV0VDHODj4N@K@aB%@vaQL0x8dcpA(<3s(>d3P+-X0LlD= z?GB~M3~i1@SyCx%4|z}?L96~ui( z?%#Pb%`QBe?Z+#YBM0REGQDiB$AW`wbz@+WXX+U<((mjTVJq;fs74TXN%{4qFq5ea z?^Q+-L|_xdSMs<;1o}#|9cNm5WCRn8wdGxxYT>RTuN>4nNhbIjRf4= zt@%f_>Eu|F(_QJ$`Mv6yuD$KN>|M2`uA1?^)wOqje}C)bYi*AlgzzPDTdUHS=UmX= z>lXV(@6TCD+y&;JE;G1FhCLG^;fLJvbP9LtzYRE7oW2)Qxj%}*m&Sb;-2V5Fx01_R zbHtb09iDLEKwiKn^xDoGHdoME7T*Rxs#MJc?4CJy2$I5`K-~B9EcF$2(L7P%$n>*e z#8I$&ex+*wjYLi@IN&0!qyM)5*>q*jX!zf>{LFo`$9|Ig>z?1=-#9GvsWe3mVGs8@ zgWdB%eAs4#@re}ft+N1h+cV6ipigncUyAgY}{`S3Ggv@a2=GkH@XQ?+AT#D`PY|AVD{Kh%K`dn`jwmXcEZENX%47h)19CztT2Zy2MlQ2~JK^5j7z`b$u z9^cFsSN63^aVK#ML5JyNB&UGFaNH%|X33pwk^oE@)_?H)v(NESH4=21lk&mjiO6C% zWeyLX-~!%3c&!jFGb)3Vj>gQa2qcin{jX+`Y-Q9;*&UFEr3z|M-z)3h z4*vQ5_s#IcG-=$Wg5XK!AJBhT5eJy0#O{FOF7?*%u0h-d4GtHoHHPNssB?JmMhm>u z!EVaoxWfDc);T4MGJ;RxPQdPI?5?=D(hEhX1MWn3Whcz8rlhm@TVGF=Dt|H>R!0(T z8qGA*^OYaqt>1qwR_so6{eZg1BEIsAGe20A{u4@|hDsfEwR62^Byr!yNq+f=VTL z&G@w&G#=3HDW!JRC=C@3Gs+GGvSW3Cawk9b#}9$Z48*rqf0PGIkW$t7{2TNx6Y|E9#U4&e~z2Uke zZ1+S{zo;P!cRTjZ(MUvAQ*}p~(-=U-oOdPqSYS3RolyoCUTKQaLmDEc!kLSRCW z2J7FHz(7xqiWe)~_3ki(xC@?t>K)+jsN!x4*LE(({mXDCME-#O!xX&tBq-c}LJvy_ zT4WE#-L}2csUepl?&XfAVfB~%>D(*POgFfIW#7Xv>e`mp9sT+Fg)ptIrIEdZgT3G& z;(T+1#;RY}2y+@q4jxl?uV?o0XU?%mZxrGdZd6tebO(XLh(McFuJqkaU-i~`SfY;< zcQX7epcwh(gxZ3Bz`a4;Bc=cR(G)@5Bd)uK`3ISVAyQ&@ume1j%{hqLAI{BuFrn3f|qc6Z4O zO^-J>=WXd<1OH#Qd~MdRk|co(_yf;r*0k+{l(D);v4%YvS27H5fEtl zVU68&EtLLv(%xU+5I(a02Bcl|%Fm3sicR+;{DR<%1; zuCu%G{4@LWmXqF345_-u>ri)HKNm5ixZ6ClDj~s_*d4I`)1qFJJrVk-%lqtIQfu`nGfuI_FtRX6@ z$DE?wOtidH0uLQhS$7DOlwB4X-y^^eHK*m*_cu3xU+J%=aBqJ$ch+^6W4mV}0~DB& z!YpU1?veM-IYZpkSA^{jIPP*TUrt2rzmzNluBrNV7!HRWI~Ad~ef`xH)#0$mA)yx{Is( zg0(UYZq7>#!FJEJU&pO)tLujS*Y`Jbp^)&@duiO)x&L9eb5x?t?)l;vxFjgM!`0_? zd4a4PLo)+ASoQVw{2skFoBgjzd9IF27^5gJi75is02TKoS!O8Qhmh;b(zm?mNVxwFk&n|i+9%jXTH{ve%yG)XlRBcvu*V+;paIRtf&(Qwu_N&`U& zj^xOGFFZ63OwzWuoC7DXXDhN3U(e{hx}?}m+i6KuVc4VhD3fWb*HZg9L2)c)&Uo!Wkq_WZ)ee}(Iu+h3-~YnO@?CFNlw`KZB( zR9_n3#Y^%aka|#z+HRD4FS;{NzHyc&{8gmko^WcDft<;gQyu z4UV8a-v!>yJ2$1>c0m<|E+%O#9etaM!bxYDy*hRio{;J$<*8}v!~6<`MQ#-YlnK;d zvTm+aHn0sf1$(8tqLwWWIk3r6Aq?jZ@hvrF_z$4};JQaWf^A)9gh*=sk>_XRscDoo zA^rg$5UB{^3(doHa^Lv#y`O)5G#rcE31JB;blC*=&S-5;o@et}>UVmvWJt1fJ=zfWM^(;3tBlx%-jEJuxTF{O z>OVbn0zdDIdrz1V6yXI%WBU3{QmGJ3kv)<^v@Gc;<1v&QJJ@Wmn1J-|5 zEG&#*G=c?s}6!S+S?@KIbX>#@(m!|9Qd@C!V16pIN71|J8R# zj)8F93d)x}D02Sx_Uc2Lpud+hF#q%u z{pymqi#%+3N$LIz$9=Qd5Jw^jv9i$4u_nW1D-#MLmOncP8-*MlgP@8owQ{#9Qw+^@ zaNH$Fw_tflhFVb(V#?drA3cNTAL%^UwD;p!uqe(ksu@R7YQ`C3TO|{a3ScNWRdBCZ}M{ z_p84|RIpU7$5o(rMIPDtqk}3=XqzxI`WV!w?DBgGQ;L>TQ#f8RQ;^i4G7y^VE(PFUaZH$sqmF z*cA@ddByGW-qE?9w0Zcih^4bm1O3a{k5P{eRURU7fAS z0R79ZD{olNX1cS_-~OMm9Sjik0r!vnKfCn>!ax^B7R}tQPz@D=|A(5X3jueFMbeEbIPl%6wxI#Y5~IgFEp?Jez~XBTW9}7N!%A!%*}0iomK6PrpVO$S%<{#`FYEj z@C9Q$5@oozW8TYQ-+%ryjfP}(*LeS>D`HqrKuTcul;}T0mhcW{2-Rv2c^l#bAK-4O zy2o^553mOZNM+nW!!oCI&b%#Za4k^&pN|10-wj2Hw21`9-MS)0c-WLLeRFetBH&KI z|KpLJ)=}5p*lxl`#uNYP3dZx#@Y{XMV|||#Y!?gZOC zTOYB5qS4W1Qrz41JA#b7lAx-r52!(E_q@vmMKmg1$tdVzMjqHbbu4T)-r0uVMDrD0 zy!YkumwLCFeK*vk;x0=3Ti4cG50376{uOXeIDgO`b zzczH!IT7r4hJ|WL&{S$_lS)u=-xWtc6r2hd2ceP}(0|aRUpdDcf($135A46-^d8YV z?XDpeciHtlU}|h)%&*?!O)#tVME6-^+#C3Rbe_@(K2>*pKJI5J8{#gKx}!4-tTWyb z{~LE{eYS8pNxSL9y*TV{&uZP@h6}|p-yi>EN&PM(Ay&0-!KEnPV=L6N3O+U8gN^x(i4=W&yd&6;GO|t>q+a7~`qz8+n>aGX;KX7!I z$)+*%5NU7uGE&?xEFVG_ld%749R-J8bjALm%Wx-q8D!9{;r)NbedFiC{b@k| zIq1D-@JiyoG8%~c%IY5LoBDIEuCrp|xXX~))>Z|M3F6(kJ# z|8;kdbM7KJ^5iS`tdY<-qoS?zd%#8K0o*Ns z^5eA|A5w7_ss01D2k`&M^q*t?>dV4q`VaVj@cs+WKWnf5wz_*gb~pmuzx~Nmi3dR3 zX}2;gc3$%paR0dOk&Al!bQ>Sy{TKLuFzz-R*0pN?;ckC4-_@z9M`}|4j|WwE4RPPK z*K-51*EiWi+-3f{Z*pDz#}GR3>SknOHm-Zb{vR>?eVqOn4(LDMUSD5L5PT`_go^vN zSQokw{Qh{V?(u*3U-ThfsF1>)Xxn$CTY21w1kcaCqt?Yc5+U$k#}9`ajnw~hcSqtD zl%Z`~SwP|brMS1mWdt|c9LHU#xbO6X#?Pt|R~mPb#(i&2UF^byCPR#SJO1AAI;ZE# zAjDlaYT;$MD}9e?G`>Tyh5HBpPybd13a(5vaN9h9yY2OqBVqoL_v&}Itdqa6HFfOV z5jQBCjs8#E<$FDC>?ow>m|v2rvgAPt+yf&wM)o%M^*mSRf4HL<0e>a-EWYkWHgx#H zbvID=c!a3faqQ2Mb(eo* zx{9H5G&)AOr|0X83rBO7C&OzUGYs+l_+`m#dEQ9Gb77FU?g(%vIPRi3V${7NiT@`} zsqo(qcc$KRuy{w1OCAoY{IIFi*AU}Qil24lE=Z#3R)2l`lkz{W2LI2iwHmM|THfu3 z$7?DOESw2+UC3|AF8c|al?8zFW;t)Fxi6$ZZ%G!Mm@%&S^pyq494n6yz zmg0}CRNMvEJ(~NWN!yooQ*oCmAKL&bEA#(A-O-7;C|iPi`_KRL;laica0HaZUDjp5 zK?To0F>Ps)-T(GJW|6n{X&_VI9eRy#ZbnAtNB4Msdr0BlAnqF-hK|h?2fl{5%d<4S zcePuOJScKYjF$R;N;e)ye$x5J3UF^y|MMcJD{3ZsGzNz!5>dD}u|XNwhM?|f$>|N{ zmlA*Xx6}OyPdH+a8i{HQ5ufv;zpoKcM>iKG4z^h#sk_#t9SyUtEWmN!doeA(`r~81 zG~s;{%s&+F?SuCq6#D3dH10Cs|LNBXN~Mz&?)Ie}c&E@|sC(T0DJ`<6vhVT7cXDKj z)q+E?|N6r8Xzb`~{QW_-{*ol_dujprpDVi>vHvGgueCII$)f`2wH^1m8zs26>8HTm z3;rLB`|rm4uYtP9LmN5oPy?m^JX8q`VUzV&R&OmOpzi3cZuPrO?WzG;L(z`;0)l|~ zr{=t%)MnJ3?Gq9QaBmms=Bp-D7IXscM7+BCfu2+Br+e0&A17?+Jy6|l_))&WP=cl4!fn;FNWq^VE8p;YpbHLXN;1h`s*C=H=+&v&weQ2 zfKR5)tqiS^i%=h~d&Kk4G`WaDpA9kY|Ldo)%_eZN;MjDsR?b@}u4~r($qHW}(SL@> z{6CccSyK1-zRWc{g7W_~RVg{lxDekR#Wrz+x}*BbWm|_LuboVkTz}mZetZO;e@qu1 zp3(j2P?oyMv{Rij|IZLJ6%^ayOwB*Nxo~y$*TE(2`|Hb<{xwx*pJceV8;%Iq9jy}^ zY>4mv(fXY7KllB@Z@n4``!BgPUNWz!Y@m!*F;V4%uB3<&eR>{MP@E ztv8Q{a{vGTuNh=5vWtxD`_@L$$W97bLM2N@r4lJh&DfWeHKk;1*-{~?Bw5N@AxR}$ zb}blVH#5KIHFVB-e}A9<=5|hvcU}&*$TDe;^V*M^)*H^e$Ejh`YXP=c0l7 zJn%=dt;Y5bMXpz{t6UDCezPjDO{A^rpUW`wG^!jLcNDd!X<9CTyu**Cb+P9+iE=rU z_80g6)B}V3qkl$mZ;bkf@Cu$jofsz{BnRGVTqZRpS##>m^0FE22SHmz4C(9XxZg>t0hx1Kil85(WHt>~XA+(@Po zT@2=5`TNZ-lT8=-?8ZOk(JxGX(V&YuADKViJX=>PmL?)EJD1L;BibA>v)HSsNM z2Voe8Dr?tjUQI-T2jS5!c|<6Omjzbu$uMuR+Y6^3&;?dLoCBVek(OKX`$5y)o(tbB zUyNFPy=#AgZ5}TbBhJ9xV#G{n4E@htT*+o=S3><`^LVoy^liFdG3>tcla5HL$$xMs zEA`JUHB@wjs3Ou&LRpBxYn0&SBa1_j&Y$cWbrk}D-mg0WKEeSA-IPUdly;lluZaL`X5L$d`xRx z!JWMRtZ}sd0|~8vK>z&ENdV~q{m*LL(F*S5Dt%Z0eV*C18X)^%5LVE>L#7Ts7~*vp z$T3a2)Wtm5yeWnYBXMa95xdbrF974R^^_02H14I)0G1Qjg?&i+s^5Y$lv+bO5Ec= zxD!VIhlAe@x=+$hrRrvG5%fL0>(1Zy=Fv_#%BGqi{g%h=D60Pv($cW0Hs(OKpZl;h z2YpmwE6=x?TlqPVETn{zuX+EPvv|KN|)icu+S3_1Ox5U%k)1}S*!y!oPw5a)ovJ~Hk&Fex7`Ae4KKgH0$py=@E}x0 z2r+Q~15XjF*EGw^E4W*_6)0$a4KDnv|1m8BGm;}B4{=9O|DgJxzjohL`J<65{;!4N zrUYI!MZ}@;V@NVy-L>vZ{n_uk0u-gQtE@4^77_t5(gmTnUYgTb5ZUTGyZcicU zCC6;yX`Y|ojgod5FV+QU`Em$yB6&9SYB$4ZY5j0vqm*Ed4ierj<`9PRSN|g}2Kpbu zD(-)z@%N_%gIDGEvueXC1L)>!9T-&MD6NeT7#nx zJlMRdym|BRJjbh+p3JCm8{|+j2`v}P_&CzCYWMZIFNkOyqh+A|9~Aet43_eH_}yXa zs98qMxNNhVs>kl|S26U@tNyR2XIA5mo*eHd*Z=y8e?{vam^~~0z&1DixjNHO5}yj8 z!XkKTIsW-bQA?as?-2WxORujng;0So;vNI&89sx}LTb)2hEpH!Hw#GF z(op{wCoFpovVh*`S+Y(r0>PYcYPxTN)qKYvl4GEp-8xEwwQd{EB-2QkT%acZ_=k1%(9lF)P1ByKb-Rkv}8&Co*%gvO{jda`nPUTcKkC*NHZ=$!IG+rjA-0G12?_ zTnv})N9hN`N{;jbkU`tLWrN4&)rBk4ve`aOnMoykPcD-C3s zW!Z$Ge`fT5a0Y(ssEt^?x40(I|I~S*4YGSE?)Lxb|6Eihh6p~i^>Jyexjx?V2cPVa zpFlC0pX`sUqjq)v?!gD7(samnUdf7%-fYx`fw1T#H9`epZe0UBy@YopsDEAssKGr9 z>P5fL4CDLIX;cl+)=60Pkr;804BRbYybm;T%m|wk4f;Rlq)e_W0*_b0=U@L9WBwI0 zCQ;4jRPydNJs1;_tZWxWL+*Vl_@ZnZ3iVHP>D^sdx`(M*&wjRLIaVH5392z~BR#T@ zIkX^kq40~LbGGRe*@K~fe)Jo?b>)X+To$rA(N``FyQYu@ajh|!6Yw`c!P6RiOrMR2WW2IPSExZ>tGxR@uWidqw$C{=8>;L-n zBGJPVaJNlwFs$p8ol1{31~Z*9)376?8_Q5eEusHI_r6mPiz@K@meY_ApZM6VP?2ig zg;pT3v^O_J=bDogoN0p{cW))iTM{7dNNgUxt32)+ABQXl6pbsE<8=Z`H~{xXZ>w01 zyIyx%o;%I#JXa8+F7*z_z};@#$%*1lJTOI!{!h@~xN8)5QZX0AA!N~}DEq*n*!G64 z-QtsbOIh}jv``^DjXU=Yoffb3**reO@Zr(wd`bijr)Cr5u)c$%9S3no^kvLZbWySK zMN7(^?a>V&NSde0PoXzc(;2>EY`vmQ#j=Q5LN=h*ZD@|5e_rvwJDPx2iBN)Afoi&i z1pS}2cUVx|?QZV?7kz}dg1cRfdqnj=osTWqlj2yKZ8hwDw@@8H|C5RL9%#D$px|zW z{ln;om$JhO&< z@k6f|f%*sh zU+wsCngxrM05KWwh}J(hA8>&G9os-2;f7sT9C2Cke+_-Oe{j|R)p0op=JPUr4G98c zfIA_u*)6s+aQ`^D-I=2;$KIvT17AqKZQxhR)Y_~4V+iUW0#D;o=VCAFf3JV!^Ct1W zSwSH@yy&S6zSt(`3i@-9Fs1=dX|RP6cP(UeCRO!tD$NW0Uw}L5{rL%FB@SN^?f>*v z>H_ZHeK~_04E@j!VS+$R{D1sk973!8A66l~_!%sM@R)8`zc?^8fU|qZMpZZg^DlfN z57a-rhRJwMkB`|4EY18uW1lWA=)u!|lgPDO6B+$~`aht5R$a^Vflo}pb~*p_Qkkq>IO_lEx_ICIwK!FQCdW?1 zqyG2G+LrUeY~XYv$lGr@%BE+GfGm;We~11LRy}|hxh^4vltTUU zU)V+osE? z7ym4?o*&iAGK;18ZkWOC6h=zJ(}$q`S;76o+XT$H4LpOaYmwS0q5q9L8f)5A6|^54 zo{UH1u4%7cX4<_8f2vPmy&}k?K`UM9{}8m|mrNda?Yc^zA6sN%=%49|HeDdsLv>N;w1=#Dze&n5 z9{N92m~gDibpYczqyG~Ij0yQ$z;@UGhzXBY3H;SH3nHP^*xBDf*mK|9K^CY-XuA72 zq~2s5BFKAmfoawM-eQ$i{n!7V9>WgU(Z6v=f8!px^;qduxc_aCEiY*l9lXZ%jl3s(XTq*vE`C!RaWevJ5LrG$I2FM=k)BxD=6~m3 z#56Y9NS-!OZ^5j>LJx64^*@07|BHKjUmyUkrWOBJVW;`C(3wmqMT+b~)m=FRqFYI5 z-1Yl|LhRvRnyj42BE|%Wrmcbe&>V9bXFYXqa zGZsU02OW%E@nqzTuvRAs^JuJ&OpL{Fs*;%?LhFD|5iQ7wi_fP+a zCb~NUUWXi@+d*n?aIFsn^>vC5L;w7_BIU_(jCO-9qJHPE|9!*OAP&%DC#&qNTh%{z zZIiUfcn*k?z#iN{J=8z6hD3|3FR2ITL7Bnm{|L+TBap#A~=&tXQ~5uz-H z=m`)JQI#4kZ6qD)pH=?P7f!1~h(mMo=@?S^SQh%CLFF<@7(|(#yv8BX zct+ecZkLXKCk+G%ICvzaF8tD(VhV}GxfvoZt8tH@|0&K0)09TMnEQxaBJAvd`)A;8 z6&$M^u`B-f%0S5x76ke~SR@|K(V%}`C&TfnRm!!-URVE<3Q6a1%=yNy+@I+D%a1zn zZ3YK9S%13QVR1e`vqOz(q9NpAVY|kv<{?9amjwrU5#K65R$-&+ngw^2SALah>2HJa`${A*y0t8tU zcT1M^pXuxP$u+Sd6Q4<6ZU9XTaXnp=qZMSNL@o*BZ`>pFf5N?`EKxe0)53PTmxlbT(jj+g%`b0InIP)z$iu-T5x;_(69-&VePT**SOxol` zFD_m6>h$?@n@#`VUYr{o21_GPua+@E{v;-D$nr6PVn4QMGGPgEN5!_qKIEe}<@aY> zX`|?-6oPn1!qx`~u;VzOx3`hue^+TT{e)ZL|Eyf<47`Rc?H>|Kk;z2?Xnvt_N2fm9 zzq<6;x%$DJeFyFZ!FY?-)WDla)*bt9L;pX73xT-D75~>YfjxB3ge+F*|A=kruhp+j z*}M@s|0fX9TfzOmGF*2gGHsOU{?7#8uLTjPx}3*tl~Vg41|j9bzF zEL%P!wA164d^hgAaF{;Z-<%uUi=P^(?**|C6$kFvqYLdnak?($wC*Ke9D%kJE$+IX zh38AbuCz*j?Jpy-`eB7fJM?Fuf)FY_gqV_{{<)6wf95sf8sa7Cw{;dwjJP=wr?Pis z2JZGm%>!0$XI;n1@oFiG+$av=He}vCEfoS3V98In5sB6M2lPMoi0d8jVM`hplA=-2W@kKX>!)`Ec9SwT_8jJ2$<`G#(*>|4U8B48n2-IlYs; z^xW!LQu&b`m)A_mf&U%!KjQaddn?Ysj>S7P7VGsD?Cmd2&i3d8RMcPpnX~7>c1uDu zy)6MWjnahk)GStuPLReG9DIF)YALBt;Gv@YA1TKtC%1yem=!jvlI4I^{!b@&C>~G4 zFE7zC&j-Pc&%!CYkci@LIUjdCxcJan*J)-vY%x*8J#+)UHG8)&wifjECoNXujzIqm zxc?kxU0ojsB+f<-C#Nj|XOUqp(&onv-vNby#$Df21uaVx3mbh;UsX^)wq8M72nl2n z71V=ci%uV`lPEQtoWKIF1PXC%@!~|wO8xWd8;_FX6PNo4G2}BJ;z(PD`Uj1ByvC}Z zCbobB{ZFaFGy-wg1l0e&ac9M)?w{2m2e^B1 z=%2096M2z!s$iy$Nj$La1I)G<`ezwcd04fGSdr&FGU9?+Cxk`gt~G~PHXjts*89j2 z_1N`IWHJ3x2{Y27Ot^}rHb#l*S`u!c`k(y$KJ>JY(-aQ|?soqWi2n{Ca1UR~%^s=5 z!~E+?DU>~^|K0DWDKxDtIjS;bSmE*ei@U7}$Edx)xM;Y8Zc`8RKP7`!ljE*+V&l;N zfp?AY6w_KX?)q94;sW*0O8;jYgrESCFY6^U>i@ds;FiTAt|?g4SADLI9o7FB#P(D$ ziE)eTRqZ&oW(^Vbzq9sX*FdvWug`PqJN_8Bp0%5w{(w8^_Tu_&PAZ} zFK$UEn99gnbkmFmJAYe3+@r?vQZ_7Iul@#H7-WFL3zlG&V;gnii=rU`<^OEX$#E)l z>37iQ6a=&c1NZNV>VG(NHydM-T+2kDb87-~*4vb@T}kZ=f!Std(Fxvh5xu zLy_~Z{s%*mH;{}-R98Wm!>jbM2-`jhi}&wvQy z9We(rb7iFHj^9s?jmVodM0a*zZYOh1v(�S1F662E#avS%nZMXY%M8h)$Pie(sbA z6!*XWw3Cd*7kRJV7yV1yui~Ji!{Gk_Vr!!R6sXC2R1U){rtLL}Uf@bcB2Fwa>K_*N z;yWzOTw+e8_TCEy#hDQIi1L5h-0o_w%Kq)WqNyuSea??WWJNiV2>rn!lCk(1et zE?A-^!eL6rU5AQzUIHFdD4HM@ov zeD|pSSruX1^T8iZ%~<};9`E|pr)%D^K$MEoRV*vf9br`kt3#%MnDCyfxh;_ptm!7F(4zqQELD^S&958`jJ>#Bpqj2x$JLJay&j}#%>s&-?M@O(}ro$J&Lr#akV z+vSXdw^Vb_EY81g-}VQZ>*YMuO^3dCyP7}u>2Cu4v%7V-*4WYkNAWB?%0EnX>D1sYmBuXH63P#<84^$7%oSpVhYa}kT7D9N`e%~G(-f)t8+T3LrMFgD8bJ?Q7&f&2VXKM)*&pbi0Xv(* zuVoJAnVNUJ4!Noj_lWAB5!Kt8EcKswq3Lw{8<~@tnCe8E{IyWer+t-PcjE4|vqzpC zuF*&9AHdyOzob4eZ{d?PB$PqiHKYGiThNLwU~9BSg1ASVQC0X_n;8b~c1qUGh?9fO zA)m&hit3+H|JSy_yuG)X5LuhF!XIedHRyj*MIhM+v6!!d%VO7{iy)CaSqK92FTnll zE)fx={NgkkckL}0s0v2nYVdzGVJak;`hfq_au^PW)EzfA7ac-I8F7yYhqB1F7`tc= zCFGC;0$Y!O6Rq_z-Q-gR0W|DcbKHHe&P4KrIT?*vGW_pH zg-&46#B17_ zLI1p;4!D0b?)m`c{0s+D4ax +70yV_J_zR{ZbC2G~O`&z<4pMdpG3bL%m)=w2Ub zXqK8mSTXPq&nO2$8ONTw1>-HU8RCIJ{{#Ln43dS(cEGZ(K{_5dKD+R6gTA7cw@cLt z7APXR&1jU_?t`6hUMvTTer(z!eDnISObY9z^JDG#mLAeg?OXk4p4%*juyy9)2O48z zF$n0N6&t9&bebh}^aez~ZSfK=GAGPK+!5OU!LsKk+&>}sz>u(E)&I_~v`r^ljtk-* zx63Xtt;9V>GZQuMn=Gz`sKIKc%DB)Q_bWd0SfjxGg7p&@ZP#mNF_mj5zQ8GTBx;One z)*uk~nD6X>t$)iREacKao?%AT*{C}$EDmw9v)QPp_aW&+Er-feho`!>-?*y-kLbBO zn)f-9cM)hhO~$Yr1*Fk*!o@)8Vlp}2@p8?C)2mCj#5YRaRO!mY-v^0moYTxnM5COV zc=6PC@Rxe^)?Hs}gLAMtL;o|`7v<2XFB5fB89F&S5#lRx*9S>(=aE0%7)so<$%_72 zj$3bqe|P{tum-@Qzhg1JOnO1$?p284)EX{fWDU2Z#b|oi8g^-&QiVhVCQ;qf^OJ4u zIeTyK)b`OjaV~-j^}lC^#s$B`(2zQ%w>xa8>l z;LR4vDX#gdh{MOGPW6M{fnW9aBWV3o8BEPm>U|yXeUw_;xmHLJq-eqlZZHiwzBoIy zbkR~M3+f-qCKy)dSY()9*-!D90(Zirju5C0w9H?4A8Sq>gX1c)D`ee6$vV`EMT_3b!sWHo;{yiF6@_U+4;_MBP_ued3kbmZP$ zhdq4_!N)k7 zSj4=KHpasSUS#W?rl+y>mryFFY5H@d$nR7P{zx_LMDp1q;N3spzvDQ}R8_5GgtaA) zeaByVKTK`jzOD`T`s!wxuClcUN=|nr99w)MyuRYnE)zomUM>J2yrDIs4Vs)E}P8if+uZdmyb{=s5!PmuP%%zLGM?Q#% zKA!*HZ!io$;u@CfIuFSlCKqJS&wZI9r*h|TKJ@aa zggJ228D+>*q5resWAXax`YuW?%0GN8Rj}n$vb62fEh1?DXEpAT)MLhpu-~B+6~miR z8RlPfU(?u)#7o1=n;csEszY}A^CMM2@)X@6IXa2a3*uT2tWt!CgvE%uoxkl6;`UA$ z>@V2o8piL}s=J(tIhR!z!M^ommCk6J6xUrrBsBcQ{_wrGHO+b<3j`_~!P7MW^Q$%% z*`@9DeUm|W%Yj{wou^(*H{DDWdVJ`fhVO6;XzK?*TsPvhS9SvaPxrxh6Gt4q`{3p9 zuE^CbDvBx@ew00l6ZFsaQ}rdsjGl*{lQj9j_sIQuxtK_%0Z52b7op zUb^^LGz*XFf2a|>6_=`i_qWf~)8}!9gd`vs{;tD!hryYvQ90rp4U zl=8f-cC9e;-YbjSy*y+9fw*hCWd&z3>pTlWGChN$U?U)9ZVS@7 zV@=Nqi#69lKh~>zb(%6IIx@z+2>NIG3;ULy*YlL~QEs4rrr{lt$q5n*=%2Ae)Vtf& zdhv$|5c66FY9H{wn;#NTc6fk?xJSyo#`*_Nc~%|fTgNQTZ!-ALyu(dqfs@N^nUY+0 z#Snkm)I$4+m~O;mU;YaJC(#&s$3!gi+N0ka;#{!^hVN*D1N;;2E%Jt9{u0$#gk=MS zw{apN%o}xqxP;VQj}mdIiu6R%^@3#gBK&Kts`N3)HKCpKqCNz}EP9DD>FSYrO`^dZyrDugcwHT3`BpnxxIfT{^xa7(ndpDZi< zAITqn6Qml#(x3NIoqIo#qF}84JW`{Xbe$j!J<@CK6<5k>IyK?E?V$fT_^6Qi*8z14 z0weBPZj)`v7Ndl>a30TK=%2Ubf6Tl@5ni0mWxvaW6wqEo&lGIby)X_dQ4aj?N8e&s z;*Pw!uyN_f=`m2Vo|%tx$pl3|&wk=_@9J6>h#g)#KqQ18Yfy_WG5%wLMU2)L7 z;N;Ocv=VonzA4)QkyzzFckgIM`G+mTnQt26@D;BuqDoZ6UlFKcB7=*)}Qe?PIBV)zQ64t4VVf17o@hXSQ-(}^3 z65nry|FakQ%YRPW4xzZ;s<23og=av_^CA^jS-dv^knY&~JBN`W zu9ThokP4K4*d$hIxz>j3snBg}mTTa96_`m|-VHzYwnNcq z{X^x>w6BhQxR`i&>B66GkhjZl-jsDLl%?+etAD2Zvb6;6y78w7iIe8Mg=g@eC*#0S zJPF4W4>azY{ty4zSsAKh;tlojAZB)METjHWR@elsKh-1HhPH5K;+<`$RO$Zl^*5C? zVd}S5@w~j1oVTjps{fs&@3)Bde+X`&>44BDfmY9S{nOuk5=st>)B&M=Vlc$(sd<(A zlN;i$ep#7<5fem1EX=>UCcNu99vsj{qpL*&`siGBOu%Z`8UHrEm1<6=AU<;2QY9-a`d4`00hOoRndC zdHl=zi6hom=SO9zb7#eM1D9z=)MtQ}@DEwDw;8Mso%q{DVectu+#`Xvp956R8F)tj zXO4RlylIURZYo0kgX*7k(75Xe?x8jt2qWJ~kO2L&h~7W*FD_n0uQqS{C9r$1_`jZp z2=T)FYva-fr2`%fhY*JT=cOwIA)Jc3#*DYaBm>4usaj zq-2{rp;L-Pwz(b^e9=rH?pl%f zz~RZs750@Ab2A$}7aj}?$Q01s3)aQe`r z%178!AK^(m(E8`aG(M|BK`LyNTh6-teA`m}E7*hPN89Lk=hS-Acc-7%SgcLcm2v%2 zbb8U?b>)@CjL2?|+0j^({{#N7qCRQ+za{4i|B%7|`8~Q|fNGn;|1QQ}wHkMn501;* z8knlQ15adi_~m^MG$|1gY+rG{773Iu&cZ|ymdgI+1$GRN8Z@mJ%} z{EI4>e`zq4prf(Y!!ZtD%hp=Gwj+k&m z@976iETR4}Icjl8$7g!Dt@hbO?~H#)+4aSC;6Gb@d1JF^)3dyQPa;wmSz=mojQJNg zskP8C(W>zyrGAxv2>R!mP>%KA+I&YX#dX4IR)wC$l0?-1^=tH8Run5@oWLAc6|n|U zJm9SU%^qL2?1stOZS}p(NJE>UKKC}=8dJ}Y(lIP@Dt88vKD7U1wGkYWb;im< zNZFlp));1@P|?xqOrt%Y`5e3z5zn7xDqO-^E_x1FrVsG;+V-62jToyRl+e?YKLl^( zH_@0k^8@o!BsJ?i{{?s=ZYnsJO)ZAJHNAPXT71!n7`(@U|E z&OH?hqBA3N6Gjzfr}wOBN|ZlbUe|x^me;{DOG1tfJxg9_o7Zz5>KG6UNqgUTLs2~f z`seY^EQ0j44rzEBQ}b8&&vVl-|8m)=>r4BB{w~n}5x{!S^Pv16oFWS3HNK9lqg166 z-^9owrbVvqOjT>Q?W!?FTwK?1h$=(#sWp>$U45gMbWximYpB>pFMtc8{&%yThrEdc z7RvHSo;{R3N<^7G?{qYnSR?XTkcNb?qCs}CBRO%($fTfZD8FhSvsC!nu4E3h|MNiq zEvo;ys10HvurOsB(p}4b+mIAZ%0d4u;yc_Na~?)?kGpUXcO5H!|J~i~&PlY=Gw)Az zl^so?eR^~~@>ALAdM)Y{b%|TXt9CP02;cYQ&op7l<)Bvu97J09)-qaT{+S5ZOutA-?ISO%)==|$pesYdkp^Nq+6B>8T z@V_t3Lg6t!nlg>*e`+wou~h~pYmk+=>q`{z?MEqyo*S1i?*`-_|9iv{x7ylk4#hf% zKg_=ZTqMajp|P_{QQe14T$i5{@>KP0+Z2?)P^H`mXd^*_e-{G-zm`%+c;p)Bp9_Hhlbv|HSYKMf zJ{u2lkE{OoXB%p=zz%`>zo=OuYY?to>PY1O_`g_`0Y%@Skyc2+ZSb%Dk#~`yj{(HpP8(Ge-S`x**6NvpDmHicL!cLu>+igsV*4plW#( zNWXD$sojE9>HVR6Y6*6%nemFsD+Zs(xaZ(~7Bup7lzYLbq2lxae7HQDRd#wOTp@Ka zGSszJaGV?M|Iijf6yUlZ@=#a!hnzAU`p4K%gWF&I55ztC6ozVrRr%Kjo-Rf&p%oY-0jQJy8CS z(N81Lu%~KK7pBdC$w(*!1wF(61zg;@Smz4D{PU>9Id0PPV-#B3G$lgwn0Z6xmCJQ| zpqUZ5D}e7bR~xSVUP3?@{1aR!L2zSNnTk**^ncFQlr6R4dS3_8cb6)Dc?dS|K2ZT3T+NVj*hrj z2L*?~$p-SjnEQSJwg>fp32U(Mf@1N_d@UpHQTu|%ewH}Rqa?xW$#V7WhvaNjr5q56fkW(9_#od zr7WLH$$fax=tYTZ^V88-@PEpIB@LpQ z$4K(m7qc6hQSqN=@1uKdT36_Q!svWL+`afKQ*WX;Z^icNUh1Cz4JX%e4j$^Cq2|u!^@_w@3Zh}ke41wopuTn+YH@FfHELWspe?@R zGJUpjr;y5>C&vb8V>Ya(#qgKXcvSeF74l``3I4m6W>0^G4?7?i_4=($e04y;bh0wN z9XCjjUtL{o_eYw-Rp8$N|A(G9Dp+LyTAjM-NDt&+3~yzuu0lh?V9VdQYte#44{kkm zB*C*SVNha?h!ag6^gqZ7|9K_&9A0`+6$_24+Uo(E4T5LCVM^$)YRabS-5>W6+|$#P#6Ji01IS zTe$THS-5V4{s;BHU)myRPk+K=6fHBxy)yqg4gBXF{ElINY^*10qCFQEQu!Sxj! zd}ge&=nMH5VH`__DL;lRbFCw3!Zyici9%A=wDXkF6W{SyDNXmX;P?zpPTJ9aGV3tT zdT#zLXXkI!tK+}>n&58{c|*8$=!Z=zPP-IV?jb>I)AaEBFPZp*^tf+jzu|*>YrQNu59odZ+CR?SeV{0drH-j=DFZ2dhty6%YZo;F8fd7BYDvJC%|W@?iH95 zhqx8o?}0m+!kUDd6VFfdTmxSCKq2fPa1SjBUzm{f1D1h?-85=DY)UTdUdVyH~43?L~MqC--Qza%|8JI*`&AE{Dzj~9y>v`H0rk$qRmcxfA`5zGptL` z>$a@1l!3SG>(x9zX6yc>jBw|Bw#8LLx2XA9H;FbcKBVHGgMWkG78S=F{JXmmP2rqf zIe6&*O+DZ6paRJlE zE!uG9kvC+zjK^D?E0b;vVJk*a{tsOOyN1iAXfHtw5A!dwyFsA~wT=I08}7qD{x9Fz zUn$cjoE&@{TZ*jPkt(lmjQp>qZOm+nwqQ#&LG?e^|MGwC;g4Je+l%Uv9U_-!e|2eE zwW4-IljV7Q8USMIp(LMmLMkF~bn0^g<<8jR_Op&#X&>}3hXyS+R9h=<3+0tt2VdSX z?#dE;XA+)N;VOpKKbEiyT&8$PH(Gu5ajm6z0RK=)gL;X%5YFt|&i>2$Zl>f7=bNLrP{7*TFQ2(HD*Dh-iRt!{Tao~UF+LRY39P$JDKN_q0=f82+ zb!rC+tTfyVIRuZlXS%|x#qvEq?NawrJ-E&;)5#|qu|&}S7!Oge)faZ*FAb*#IoV$? zc(N}$(=5^WqyY>5N2Ul<{M(l8PqJvs{h@Gna*w9Hd0Tq@OdkH?RCrS|Ck#cQ|3iDJ z=#Y1$N}Ot4ar>Nm{WZs8b7=(--~;~|^v}K?Eomd8`O_mU(mNeZDOCs&_|KZio4P6{ zCZwzEg}>XUy+UlrYs}Hc%puEj0|cIck8j5;eHn4rfAhaOD_;fR)Eu&fHT2&$-IRyZ z9UT5eb+6L@_(_?>!qy{!40eLLmks=1Pu?w|)<$@>q&z9(vfb)CoPHB4^H4IML%`JN znz16lXu&wr1pbE{WgHGlhJ*xE z|9oPj#7eRgA?_M`8!-$=5P!^a*V2S@0-kW5 zUGV$q)QW()$uSVB<6d7`nm41FyddKP$ir<}7awn~45ZVBnj}buN3_O3zkg)cm4Qhr zy=y!7dS+y_{ecNRmx{YYAMS3RvrOCUJMzI%Y3N<5HnGDrC>l11{IyDPM%RW}gE_zl zC1L~OKaI0J2?~qt8a6P@G^K8G+X(<-2ck%06j2~J`!zH|ZFHNF$QkpD%WS~~Dd z6&SAbzXx7U^seTg&a2_CL`|z4FP!MDyc{oM*>elTaRKBlXJzGDraf8)4HFaP&Vo2N z|MJ*JnI^tm&_AR6=h@$C&V^$6nrBeJdO3L3Qt8gT0!WgY$0ITDWVO-1F9})Rsvykf zoyK4Mu;IzE;dgci*AzHyJ(0amB5Iav1D8NnCDc!7z0p(w^#{EJPGILZ_p@)-X2(Q{Ig_C>N8(Kko`;s3~3B>9qIEkvOG&kN-5?0s^GfWF;) zA2fF=3Fb4fWkmGg11UKlyI^Q%@3TnrYqvWk2ovG+9R(d|QPBN)xV>}ltw+w#ZZGIM z4e+Dx*^f&AM%VJ0rnMz*KMN~*VN|1XjvM#t@*El7xZJp5!qViq4={S!1#60`bb4_M zntx&W6V{Tr?xpF$MyZRIz34z900vmwC;Mu`=Gxv)kSfk}L#tr>zGlcK%e5h``@uc? zYk^kGBr|cO;`~^l^-hng8#en67fdP2{C=H7MNVdExH&*MWl#@3V5x0)%FsOx+Ci51Fty z*>+d%?4j+4V`$)irIC?CA&sH*;_C@g`F;xR?VgLc?iaqJ zt=0$Ew5C=aYc4RlmdqTg;o0i1Fizq6w%wUPQzqxU6Mc*qg9;+~*B4T}w8F;bu!N6{ z`4^XfSNmhTNZOYQH2?H&p^Jz)%c+n5seb@!voKwMBW(u(%|D&tzPIf>O*ny7un;z2 za9rO)_YU-av(5pL@iJw+=cS3-S!Xb${ceW~s|;M2gl&I996roWwLd!xZTqAJ>dP2y z;QyrVqz}}?wiF0ng{3()^tYe?oyB^$=k*nIH@XD^{xkhGAiPvP^!(i1{QJc1v1yfr zg+FL#l|&f>1j1c8OK{B$ln2P=?%_<7E~lk*<|>7&%4QnMtiNfPw2ar#di$JYX#4EU zKtm&w7C1kyyq~0A%T?{J2^(#__hj4q?}pAF3d|CRgUz;hwi#3VXV!juvX4NEBZt2e zy^mMfe0pcu?{@Hi9XM7SwzeJR|BwSIoSg+nZqrcy&)EL#1UIt;ex;8%#{6rAf9OjQ zTb%Y-mxf3A&!;}O;>u!Z6=6&DsJ7WKfc%EN4ObasNS)#T0ygF_&Y$|J!o~7+ndirN z1srcZawms#kc97%wOwvM2c+oCw5q6`^o;VDJxhUO?e+ zbqVq57OqekPy(-bumOcBStQ2xz7-9IR;<4Y6169PL8_`m+?oM72T1Np{=0O8Z4q0w`AjY-zSIxy+K|Jzc%Sj_QG0}C zy)tNQ8yDq7A6b1Bq&gNfd?~hu-F~4q>L3`nhTmtIOo;CaoSPg1dlM8M@cKE(;QyRm znCgPnQR)AiJbbMF)?;*SXYBNNRHS{fy}-`5jLm7wxeD6k;w;y3dfd0mAY_o1kKX}$CTlmM=%W-7eI+PwklpKxV`WNEWvX=?xXuhR_v;kwf% z01>+YLWuTnDF5f+&*Eu@{u$*zFO_BJR_%+7+bzoSu$>gT z;(zZaotTiV^z~>v{4!=kSMkq=eQ zGy}(HN$6alALKu}zwmZ7pp3x$%klsm)VRKo82Rr7e!PXYk!~4_?tk;YUS5FRI-ESX z;6A6?Q*mkHBXFmHe;CZlr|P9ODP|MS)$x~qxbucgW4!WS!ZzR^2KYX(kG-0|=nnOd z6}?)W{pHNaRxhI$KJiVnW88o6jR{x2q1(y+9^(Lm*@#cu_qV2=`#}NY?P~w0RY>h{ zxl6UrTaG6ef4qIE)ca>S;KF#v1Djj0Klu*7vwm=#bn6Ku+55rEyzhxeGY_?YXm3i1 z1wlCBTQa&#_!Mq5&9Z|Q0Q5iKO%)vE_kto+VZC+PJIv zrzrn9@s7_{GhUJzArJCT1x|f7ES5GaI5uVp`R|S4Gxx{a;9w5^_m}2hq`V%`K0Un) zckWQVJay`&XP3Nt7j4-ZQh6VrCGl^fUoKPLkmvCM+JnC6{Od}^`Rd@IXaOa=Ov8Bn zV@*j?CNZTKx;HfM{Q9K#g>UD&?Po5$y!gN-VjUDPP#tgnEHG?Ls z8?Dfl=l%l5R^}%U=A5no)WzU?Va;PnyRH}vzY}@vS&^+Rbw3X}qF!AkGj7r-|L0t+ zSPEc+lH~0djWvRY?)6m%cfU9lp{4Qi?bxULdmJa96FwIEr@O+>og*A}M{`27DE@{{z&Us_wS(`E#ZplH z&+8kymuGgs;HWyJzLhlfZ~j;3NaH|cY&M?qvkUl#cu8B}(P6d5T2TJaI?7Y_cK8oM z{zc>-VHvNxv_o&^6-0B~U(f=V-Ic%QorYLPk)hK`Py+a0krKFm3pz;Ib|J$tPPus9 zE3*_I!|0nz2a3dgOL{uxUxfS%zX(vMUIhJ*li#hU-S&@ueaW|p*!N5O)Nr0%;w+bK zsYjbXH9+vdSG1QhMTRG$#}e-963oB)EWheQN7V%Q&(UZ%;)HMx9_9a}z945iB*?-$ z!UQCa_xF;F@0C-;bo|!!H%^<(uJ(Tp&i&~QAb&b{Y`kTq|3fpekv0L5PY|kq9(}OQ zbpvcYp#M4B-4qJd8+pj&SaZ_#Pq^8n1d+|j%Fviv&V-!+8!Nx+97M+f6cV+^}muuiAXHTsiWx)CH->HbdRLDyub5PSk)^Ut$$`? zGwiG6srzSE`9E9vIqfoS^Vz74fAhb3?y|OQGJ%zBhUUKm{|ETbjy`ER5#&J)iU%YA zf{+}lXszgQlcE2a!5Kc>mVa`_1NhH`Gu_|dnn^vC=Z@8u!i()kO4*eDahL@e^_Xs& zMmHm?Hh-0C_U2BdCoA5i4EWs;$uuH-pijY*LBju`E8m$t^X;YIZ0AjK=^6SCWn;ui zVeU-#&Jfn#r+4i?+T;fF1k;Ryi0jd)<*wY;@Of3PVe_$(Nl*oY??46P`fU( z6Mkv%zvGW_hN*nQ0sm+5JG@{Ok3`a>?STK>o}KR^q8~Pn@_$hN^Ua?fX8z=5==HWT z=3lG%r@i_=rsXWt3<#6M9WQUG^@2L^e(+2Ls{dhE+YoNn@faVw$;9~l&(DfpDYT=X z1Al$VJq_F*TIp$9O(h@67ByG7n1KPyMNr-(;ThUn~^S1yJD+wL#?kXAOoS zsr8b-{x3Z_QGqi_1}z!uC1n45nBi+tW-&|bF!aC6R~0*^S#IIgq5n&-{c=@1{4YNL zGjS!Rce7+A2jCOnIbc~_VBz^~ojx7DBl*7)Ee$yvJZFo~sVjiY&lHZgUuOk-ADJ$H zc%G@k1;G~01?IKwnN118*n4z5N3(_@mu*tyWLGRa?ehaDvdU0&9n(^bnic2FcNuK= zC*jw6xK8@!5KFZiVn*dn!T)8IT=4=<8p!|2;jtDAK|hsdZ@+;5i_S<-kcJgDrvhmI z1={~5^lqrY%?On`k^S#I3D?ket>M4@?;Q296^j0=2KW*3{_B46JMb`T|eG?7x=ksn{CF``sB0*fPIET+G$YfE*!TU(r$xUx)rni z>5BeEfQ3_IZJ+u9NuO|VM!ziZUjgF_RxNcUrawOUxVj4T^1RK{ynkY>I$QbrviTzz zna(R+n;$p_Q-f`zTW+C5)H@GzS+~z;2x`+Yq(|`BCzBLY#jWZvLq(*v0D5$v2mbeJ z6>~|wst`j|{}T*@tH!+t!sjY={KohXn_`woRzHQvyF+Gp5tw2^Xi?)X4fT`lSlOVmTZAW=8Mg*Sg?_k4&~?c6xt& z)_P;D`sUpGCnxw#$^I{G_wC;aEh`saEpUuvdo@V}4j_P=k;HTxw~e<{Q4p<$B^T5%{ee`w-?Uqkgj zObQG)M`_1-AphC)U>Mz_8f~mjVqx;~NUc5AnmFMa{T>I4%3v8djCkBWt7`JI`lQ1l zuLqMKE%#HGg*3xV-27n9Nz+1uo7YkQm-TxZ>9aE^{vk=;xV{PJoYz#DlNHQP{_~aR zWZYt*5Ov^>lG!x;orducVqpI$g}58TM_KTWs59=EnS+}SxN9|4YyVxBsf#-j0 zC4FH^nxOmS%prOE_A3O)KYX#H{g1o~XYs5?u`UB+ns;pQt@lwiQdAGg5kuvgo) z3lkDVS*>s;fER{5#W|j!2>h2TeY%21cb4M)FmcY49%EUGaP~|ndcG^>#n(UjoB!N; zeNq`-QNhdbipmAdAA2P2Hee%nVIW^IYLhx$#;qIH;pn_*gf)qql=qw}!8~y^I~XQr zbwUGsW$sWcR1TMticGX5NN)h~=5{)%tmYrN?Vz$tO4A3zZ*TwSv)&HcCPEDH4!G(npBaV{wKLtOar1WXy58!|L9uvBfE0k^> zBTT$O`@ddDyiOD-W6Q&!{0~ukNYbF<4k6v>IiBCK`7*$blAG7+v_E*wa%?T z7^;KQ#JBJiz`_1>8FzGe(t2~`WXRk=iXOa>WdHll{fI*a80deWHjgwb!$AM(j?A0$ z%JDP3wPq!;97@;T^;cc9grm!J=+v)!oAfYyWwpkjvy-s~d|zQ-G(UuC@%wskbAAx= zpYtwphS2~J!Zx7!&lUXE3B*@tH@cQ=8p!<@n{nuu1-+Sr=h6O)k9ImhS_Q7}0ROY% z`063+N#dS|#@s;68Wh`4th)3)bjV9I?;Aq#PuxBg?~&t|u;x!(pU=OOux~#~$TPfY z4o~YD!dIXFo>uuqQf95@Ks>MiEPaNI$Jk>-8LP!3awhLi5x$c1e@5QLkWl@z$G`8M zi^(ky-u&|~tJKZ8>W{YrmrBdTpwm;;YZT@X3x;G_w;`Q`NZb-Ut`X#GX6nRD>2S+z zg8vKC68Zo}V|4#z#)seKC&>L5vjC-ug?bxvLT_`=hNJn5$nk0Q6Wn$p5qa##-y6p&rBELHCFJ+Q1tIF{{__Dq_oy& zt=rqM|CM%vqqh~uQ`6pDY`SmX`H!L)yeTY1y4<3OV)@IOJ&|3%WJ@=7eUPWmtZ zaBX2~$cen1`p^Hqm+oL?K^J4T!ZqTl-TrqLF{N*NMB*ycu}U{~_Dc)snAaWYIg9px zrHGs1D8oF)i!riZldS<>tT+Sgjm^Sjh7p_zHga_mfE_T!tqG>Z^vu%EQ2Tm+)__yk z2>bK~0rWrCn*Y9|vOO&P7W6+8C(fEjlFUSm0RJ>@SKB{(K05x(v}Axo$rB#wOiLe* zhSz<>TU;9q@b&7Aw;|r&GM08Kz!A3S@qZtKrG-g&&P2UM{a;frxJ6R= z(H-U%x&Iyf?_Dx;gQykj6qZp10)nvf^R;Bl#D{0#5kmVfV*jK`&-4zV`OnX=|DG`w z*M^X*7PS9jyW*A#2f_GtdkZ2xt*RNB$GL8HgYtWsifF%z;zkbkIxueMh%9vPP+A6T9wxYPnmp9z`j z0pczGjTpGp?qrw3)z_e(?0`OoBm3BnCBC^|03GK+1@BXP(%B_rpxcB z;2ReQq5s{NW(uW!etxWfpx60h@Q=Y1iVNrB6r8(T32MOM0Wp=O%P&_xlE3T!x^k@T z-oDSz=?YEW=7ceFrQQ%a_w6Z$?c{}PR@L0vo=ZVxW(o}4a-JwL3!|jI8}`L7Jp9Q# zPM@NKN!tZ$py$ZL0sdJBNq<`C48Gu+h*@!5n2+g9#c)nJs(;2)rxs(%90c+Ff#PKR zqv@qRScn0V=i#pY`TM)PH8*tMN~C1m?f)`t$(Z&W$RYDT^opk9hK=-u9<=|oKQqh* zE|qCvxY+k2E$k=3|4zPCre&kQmn!}LU23Xs60UzjT5EtLi6{HN-jQ1A8h&Qys@@}p zFp)||pRLa5qxeTf==#!R+sUr#WTk+Sx98A(QKM%ca^}$d(x8Ir=ODtCQ$HLB*keSF z#^n)tJy!!&pRCha?R%!&LC~i7t?TNP;t&3@Wb7?&d$A`PIMWuM?Mz z(M~uH{7;#wQqbZ~!hii=DR4f)nfx-0RT-AmI<>N6((BtJq<-lwQG=n z7`MCsD(p!*%FuGpS#&Vxk)%^^mWWKExH&w)6Fv1MgV||eDwOYNhjI#Z5=`51D7SA? zFsr2H$(4_>Zu*5j$*`Y1bGtxeaZ37XVgB7j%Qq8%YSZ!xw0tu1F+ppKAN!h4el{vn zO2Ml`{}*NiuJC4r?1V$!H%~*3eEp% z-C7Bs9p7>S;{7){{}4kI&=@8VR^dYSi-gEdu}NtUK!JdTIi7lq_6Fd|{a>j5 zc`}(<*pocERUdigw^*E~3^%jIv$Z@Wav#HF9p=~59%EhHOG!Z z{_~yZ#g1_ykmuV)By2ydd2lf72NU?;sS?pqCm%x_BeFdWq^(hJyombd9QsJ7KD4u} zvK8^4t+Y|rmf$YM?Dl^N_NR%a|HTwDfvILq^dA+y-t}9ESug-IHIyoW-?xZ<#^BE> zUSfI&D~8*XU~Cw#!PYq1K3Ha&1TqA$qi)JceeCRTmDlf|IlA!CQG>1g-~HG4mB(O1 zo3AaPq73Oy6w)R-ZGrw7&Hrg_VSKD|b9G0=XL4P@iWE%$>+U+a|H7>=jFpD90)yuN zJl}>D(o0*k|MU+Z`B416vjsZ_xV=u<@J4;oA)E_cQm)f%eGzkzTF+L*V=OgYM!|>k zsp&${|NLKWdubh705CPmbjN;d@K{U(2J%f7982YQ6LAJdL`**G>_Q3W?nHvr78r7j zr&vJ$;|K35++N845i){zwQ&`!@h0g0i{K<|QI1Kt1RTb0|CeRa;{Ld7^`IqZiWOFG z>l$o#s@zRBALzdrQR*!~dzAbahE?{`m~x*-zAs zV}-47JKE#N?OTYD{{wR?OeSFb@<~QXXduOxY=q$N`tZ9TU`Eg3()0o?_@Z2!Cqn-_ zs(^*PsZVto79u%pA9Wn)@BwD02M!%28)EsjC#{yy@zJ6esI+RwY2lAQy(auSELm^ z7+`Z|`bo#y*=P^pw9)*-%iJT9n($WO_mKO)+6wN;^)LgoI{BaenQ4qH_>vICS0MW- zK!{I8v7-k4r|p=arMGQP^Mh#r#f4Ykf9KJ@b*x2tLmSmU8>TwH7B#H`|ND#3g@YrK z`Y$aBZRuunJdRCQXtocTyBB+RC4R*xC7Ve*b;gU}>$|5)eMvc|6gx7;;I0nzr~3yo zayMS2n|xOdow_b(D1j;nxWp>kGS7l6O@FrK!8*6z$5-+9yQhSG zTZ#6>q<-saXY6V0X0*hpsCNW0ODmgmqu4RRwoz|=cmLIw3P&-@0{@qP`0zahWDFLb z4q_zSWjs%J_=HWR+ZOoWH`8-V@Sn;dfg*J}75)m}Kv$o;2HakQ4OYon*w z;YaZg7Cr6t#{mEE5SatT;P$eeG*v2=dY=;;8GY9P3=%xPoi8}lL;K-W8pu~mvSi={ zrE75>slYYHw!|olNcO)2|MS4;X308S^YXY_cJYaiKs*KkKzw5(XYviMxpRNIjB_`b zKc{_?!PWGMIMFGVUW*ANK`7K5oXPNkh5pl(FBUIYwqT`2-hatFX3+<6K^f6uV7LEW zd1^=o{zCBF?)u-yK(3a@KWCJU!TCb|59)u%@wWI)(+=l+L-T)PKQN11R=O(g;vcLe zBuG_2_&eyI4~LYASB=lY}k_RO`&(_CCEffUbwCGl1Su)fE%GY+mA~5Ca;PJ~CUnS95nk z5+9gn`kD@qB0N-(e>heSPx9*6qOsMJi)~>9PBBwHbfo>e|N01C@Hyl2&jA07*}Z3c ztd1)4WXS$^W}!3U9s`A#IKvjdSJcBieW?DqGJUL44=@F@O2)MX%tKoWyA9CPL;uBR z!2j@xf&3q(R4?Gy{?Yc~nnB@HA^OhJj5fJ^(@~3>tpuJMaBe}^0H;#G=(u5leAS^+ z&1`nSKaWxW*M!)m+1NYE+cV=S^~IMtpZb%KvU3%dF`TlnT~dtNM+<)t87dlZNV{ai z^4!^b3brxCodvVk5eXvJdM-}|m*PPG<4EZx*JD+If&JHq9y7N}oiNiP>GhJ*-Tdcr zSti)|Esnw>tb7dg&$dPUFvcqe(GxiIag`JG?jw&Myy_au!(dmZVYVI4A@_ehd&F9) z*C0UPK>JVIs&0930{is6D_+5|16_*trD?ltH!P3j3`Qh!z;_WexP!?mGGW~u@X6HD zy?MNfd|(->ciCkt1-_h0@lvRQQN5oHtnH}&`E3D7Z5qbBAIrV9`X%rzT$`)3AaVUu z$$8Rka2T@weMS|Guq?dluILIBE6qIx=^U!~hq$CYQ zvw&6cYO~UM*COOUH+s5IRUDMi#lDIaw6k0iy_gZ^h)4Zj20$X%ZFK!S52Jw({{{#z zou+hxx7Xa5rC)Wk+{J6@*_XKu=2}!}RRIJZeQP6DQhh`@a9N|tf9~hUh3h{)Nr_RF zkD&b*GL&O9ETyJDdaEE0(p<=Oq_>g(LM(|}*>kb-oceVj>fFatEq<)S3E_-QRRdN7 z-9W|j%4~M+z$_R96yV$l1Pq)FuH{2ZI4>fEub)4I{uz6#svLffUHwn9B>HnW7~-9N z@pYp9_YdywXcg(o=+V&(TsZcR^lzYO@>|lgQ@Eh(>kUzc1ADNBjJ&Vc`6M5`pGH$T7~v- z@`3*yeuW#;Z~~M2-%s+uXpA*!%lOOx!2auiV>>4N2>0&(>kx^h-G7ehs3PPauK!_% z{6l_>-Z!(Zlrqe2{*SAub&|U#-XGh$9-80~@D zyDm0Diy-}@Qc<&LorTpIW51c++hyf&@jQ@!Xw_t-0%necONw~3|BKxJ&UAj)uuzZBa4e(trOx!74&C6RVhs@gkTzfb3ls1gq9u=2!84WZ}0S*aRp~=&~1M* z6Ie@+I`k3v*a~5^AC=)nK!1h%r_udaCJ1U29`}1r1_4i<4 zwejkK#O7W97ZxD7Y}Z)M|MCy_9;)@z&>!YO{a?-z?rFyzszCpf_KWF+V^gRK{*b9a zv3CJ30iU!QCk0UY>h5&-Sy<%Wtayfu$xyCtfh8Oe`S0FxTRcksI2>r0`PBcn(V{u3 zf7S~CKKEVKQfr+U2-uXo=K9acXjj3+)~5plIb$-PGZ4h!e~D9Y0+ha$BtB8=(;7ST z%^CDVfR2TG1H2BwU{7>-==Vw|`NVDFt$lwnKxH5j)iVEB9ge@urt1WZbi1wYG`EJ>acfDoc=k{GbW;r_MeheSSxiK`GEhq1ER7QmSy-2H|YPuve0ydN{r@!|4X!2 zZUt1})T}=Ijp#`c6)$2I7HwMM_BK}$7HXt)S+~)03mJvgBTCmm?dIH6cV7R|Plu;B z6_Q-vWcpdtjfR}qoZ!5}?q3YX81HTS&Mx0Uq(Yk{oxFY?df;oOyQ{#o3ysTM9>zv*Y!f0~wiL(rab zpyWiEzV=!cSpt~}TABv;>{LxyT95L$48{kmuWJMMj4oc)H-IuO#5ty$zP1K_n&>b1zmyu44xbQwgc;71d?`1mxd zxIVy2*nd3@SS-JVZfoP9|DCj-^h|K+UC!D(DY}=uXw|q^ z{|kClC+crk>qz-QfUzoe8twn8J`W?44tj|E^?!}8TvWbsgu5LSlsuq+CY6DYEDwGl za0<S5o=!Y7_!iK)cqHC@=*WRWRm?%&-cZ*X#Nj%_%tnPmx-?DW_#E$Ll zBQdjRgIaK0ll$MZqeMZQ0OIe3A87wO=$|*EKrf7DlXxsaVVP3U{7#womb7dCHINd6 zLTUbO*dn_U+71W)FROaX^r?A53Q=FF4szr1&@(v`Wme_jPH30pdYEUOd!IqElSIU8NZvUJ79Xfi;w$`)`VjD8jXc(e%D zjL#5HHa19*KLzkV=6tz<)IrT;DaVRXAJb84{bUjI>L7}BYO!}GO{1Y}d$<2Il^}z< zNx}afu+0Yjr{2r+Wiqu)6$!HIv z{13VR-L@`v-2lIb-2Z+}={^Qo@( zVZ@X?*Xgd`9 z6#8-7t!w9rGjujUFTL*q2n>w}3~|Fw+u7jLAliijElNkC(5_-O7Q}aq@~=U!i7FXVj3i z`#|npJbX=IX#o%*ls>%{GMH|WWIVZwy_*65U>_#|rYJRM7W5t|wt$nOjqkNive<<} zq;PnK3HD!*|4i;bWn`KhQb#os0nmS%Hgq8SY&boEMr0MreZq*)e*yZR)DstpS6@-X z!X}*f1^n;+LW7U7vaeA;Tu#1L9G^;g@B0GBG@@#Z=!Wre?+S=RDhT+IV*qLix(&A_ zYp0%gUQh}$2|9mzk#KfBe5l@cC_lg!eE6SD;fB+-Mo%=r5t#k`b@ZgH*TSfpW!L4^ zX;3WMn3dR^e>#voludVgfb*n(an9Yy;z^F8b3>0I%)^}-*tBF9@ZcpYTY-GR)NSFQEMwB%N<&LL}p(>gfIp^glhAB!|36kTj$H zUoQ$l?E@|f*ne$NC)C)1w1`>Bd|s-cizr}-_W(wyfT%Gu6SD{`y{gJK+p>+2-az4l z^Y41d@4Unwx#bUvFjm2xr!_zSEH^Dh-Ocd)=WFjOv-KY_epKw3>$kD9d}A-?UXZJs z^x@$lSpZa5;SrdU_Jw(7>%O>69c*<9i5(txr7-P6{vp>;m0fbp)%LTm_p^%K7Ivg9 zT42gOX%!3EN#Vi%|MR~OK`P%`I3w&eUWPM$o;Bi9+Vy|kzPTPYJ1(^zeutnKxVqE^ zk!&Q;|1fbw{{$pIofh?O3MJU|__>sN zH{;Hs{`Xp+Zuo~aYG!F)Rb|4K4DDo-`yk{E!uc=jmaT{41^Y(R+%!et-m>36xiIpO z+wO8kqsRPkK5F9x_>?74q4k@i6n%a6Gr+7XskQ*{u&d79DKquk1=4DWg3k70cDac|h+qjg4PZPRc9eJIq(4~1q?;c{qa z)FEN_F<>BPE6t&dOHZmkY`{aQ^j%ttsHV>(JSGFcSiJdQ z(?IH}4`<@h!M=(ULi2xqfe;QphoJxYHPdG`<253wf#yGx!VjuCQeFl9bB#l_cUSB= z{Pz_mz6%h!0c6oPVV6%Yf`348LaEzOzS^D#rEp=Q1$XoJ_HxMiOTRfMMn!8bWCSWz z%9*5zmR$H1{5!oeS_(%~Q}QjB%VdlO3U#BjXoq0dc%cYl#!&_5PthXywYd>_r=B2! z$CUgi&IGixWnS-pN;sz7tAhU1CyzwUqYc_tXCj%!B8ifhm;Oji!~P5Uzk=&fD2@7y z0RNElfAF#%>ZCAY4VW(8=8V^_t(2+EF#-O8{a1{gX{8PzD%gLW@|(^Ka|9|9?SCg7 zgY21c2~8}{pOhqGR}GemvVhw2oqP~24fif6bm}mksH^D}h_L!{4p6p+j zMrlUtJZIUZ8?+em<^1P_O&f6Z$*}4^WhqAGX(N{~rWfFM+|X9`LZqr%)aDzw&smRo z&1PeUd9u5cIi@-njNf9?6S=ukBu0S$NjcHZCTb2}TQPmXbK>us}lz!7P7=W0fU zRRTn7&C;zuQN>cK2|i@oS$g1q`t<@R8~g2V_>ZOrh*~h*k#oycRe{(OSnp@&f>!ka zXDJ#q1GzgRIS&eOza}9dOyglo;%zkTr!$7j%CQgh!r*_`CoY(}z+ElB#W5VZ>;IZ0 z``@!q`%Vs-%?XjIBD}#=R;DnCPfAP<|@_;4if0y@uTzkua05VJCY_o7; z^KCll|FGArMV-Lp{?n~9t3~A&6&Q#lK3*vdo}WzUKVAA`hO?hILuk2WlK?S+klVip z^gr-yi=fXopXrb-20~ky1Wg7CCsKiSE7Ju1vsUOl6Q4<6);XE*4@VkJvxC4srea&B z*$QDPY(B~PMuWM*JfKo|0*h_3|54C+Dd`-34~ePBsmqW1=DN^7#GRfjuNVKf|APEysiW;lC2x7q5pYYr3r>5VrJcK5T`)&u>4y~8wRiE) zjt4ap$Wx+^68s~W1b}Gcz#oz;7dbU?mhw_z}F~vA+lBsSwoA3 z@CbldVskki0-u0o2?E=cBVD7Ybuni7<3Hg~fD6=qVNPhvxk$Q0BuP9m0(gRFl>@sFZ;*8prE7LSnd1tZKkr~NmZ z@2$Jdo9I4S)j{{^5kvfPoK(ut_$Hgzao4 z=K^T8dHJX$GEQCXI|Wn25|^wy1m`TQqpO!3A~M(z^S;(6nRrb5jHg{GxT93#j;gTW zG@CAq;0w_Hi;OUpM01!3{)c}c|M@-4zd1_b#BtCgk#f!kBEx_M2Dp{62adM$&l#(e z_0Q~AhvAHuB&H_`k(hYS1QSE)6}wUjFra@nFY`Mn{-}(RkU-8qgh80IcoEe<-#qbD zkRJTM+6jDbcCj$@zcC9ntWT?2+Z5zK)(*X()>>4IRyQuUy0||!%v*o z%2riO!ZmR=xI_~}{Mg`{-CGavQf@%Lo_ir&^RywyN#8=Ev!%0mI=XHOWvrUFu0A&b z#P0JO8xoG-kpcZrpwAT6e5<^$3_i5~{Qv#$vFJG$$T*be)D7$KYd;&n2prD^!6EE* zPT}w(koC`xRc@&JADL-`lOjhwaaJ`WNhq$2tbg8dDjmo{^AGL+Xq#4sClYl~|GTC} z)h!=Rt%W$y|EPrV9XEjR0AaKI>1*P4IHzP{ck)B`QCwy#3t?i*jTu5tUhsmHcs@rn zH&xS)^9@HGguFri^9B>YXxtAs%x6^B?VT3Ga#n1jq4s$5LRxVn6}| zKTsjgXBr&wU2)`7AjV2a18YvSc|`{czHiQv_i@pOg-wcr6z?B@1EkDAkQ9U;jYwY| z%75rDzX+5;-wVS4_>YrUdko(^c@!(<52vwqU%zCSsc8*69IZ|UvFkq|KrCu^Jc2a4WeW{#t$$HnG~dI9&#JH z)DS&Xc=ehkEsL;G1aPDc^cl^`gOCNdMx9*eEBy$7bNE4N-`>_=rt5QaJQqM94pJv! zQ{b>&Aix3kUnWvC&n(dM-?qtb8LvH3qni<_@<0Fk548W(gW@e!yjnj4#HFuLN$!c+ z&){J2XQj<4I*t-ylexSUbTv@`VXQGp5}*kYIHu|yoaE96#fjZZjvZ%XvyPCA5|bzD ziu&2k1b^#)*a2ds54H`?3vUGV*(7Z14$S3i(p8$(I!GD6rnM|LFD)(@9*u}k#X!2| z!sXsaPaHcW6Hb!mPYbGJg+CrVPXC@?FN#gXv_U6<7wphL#gp~Vq^}7`kN2lRmmiC2L?AO*D zD%yZgumAddzEhWu3qTT2^U|nLnu1U}AN{I;0`n+}6PmPmOTy(K&{MI3t%oK#-#18iOR{ zHWQb&&Ze;md`b@Pa5d(j5f_iJHc_JtnV{TzE#%8IeU7r?5#-OV{uz5Sp5Kcm1omGy zn@tc^iv33|UyiT#C0DT`w8w%esbYgp-RYfb4|@|QVNvegf=Tts+;J|9^?7C-ISl)+ z-~5z1v=jBdRL>LeuXKlC|HXA;ah-8SPAgo1c~Nb>CsV-% zJxUL)4dy%bX!*!&YR?TAqn=vWN0{VmSkcgFdb8$Z471NXz`G?q(I&+?d*G8SepptY ze|`t}pC#+M^AuTBFZp92onB*pC1KxwPFb&a2(;HA59Pd^(VYJ9{ZL3>akYTr<)u!9 z7700`Sa<)pqj!zSn(N?W1()??$L|6L>`0N}p7^)-2Gf|NNMA8-M6RYmi3v8F&^9 z2{O*hF&uU{`iWc4Z(n>Qu`x_a1C#*$&&nX0waaV130b>gg{^Aq@*qQvWDWM=Pg!Cw zg8UyorPQe5TBj)Bf6^g(74?EVBJQp%Zsson^>W zx!Egw)wH;uL*C^W5+Tl^nr3zD=QxrJ`sabi7_SS6b}AiGYX#w{)9EtT4W+%lB0wBo&XFZ5-3FZ8sEUyH-{)-5V7A1XHZc0056lk_Zo22 z*Qc5bBx7yJKO~k-SB&LYRuK!$TTuP82!G4jjc`}POogkc{#nQv!BA*)U-{C;0P=ze z=I{Jx_ae*&8i>DoG!r#aL+Spt6MbGfn}-l2I%5y=HvjcoM0E-$pCJ%r;yQR#{<<7; zCBA@xhE0$bIdGa?HQnk-Z&uF`g{l*I|CNQrRtixg&I9y9tjJ)F+RYPRC^`?FYKxeV zZrrQt^}d?7G9*c23ZmYx%$?_W;6VP57qi(UQml|!{A&44S( z{0~-=bwsu-t(eUJbl38CnwJ77a9-T?zeE3NQC|vjsW&SjjGFTpAL0;%H)A?BZ^-|C+qMt#55*dU zp#|!}5w}!vUbB6aA^(Kzp*wmy2wawBkD+dhUAy)Yg8IKWRPVAFe;KF6QG`JLGsSps zs$ohL6=GUp^rhVr0tg+v$7^24N%%JF!k484{9iBclP;{&Of-G}6b~e2$rg`h0i;hC zN5fe-UhA)flbFE4w+8J$6`7KPPp$;MwlLs+6&)-2mvHfpofxDgDn{`KML2gClWm=a&18WlSw5R4V{tD zG|7(iUn$tP2Vrn}+0QPc6&iY6Mx(U0wreqF4^?j{1>&GafjGX#ec`8gIYDJOVBcKx zN{faLA@9E~pFdJF{jvplc{1u!F+zXb9y-(nUg21fw}5}x(T(Kh-j^=@j})36>U=UK zFn1n@gxnQKSMcqQzP+3OQ^Oh~4Lnkl@2BVh)Ag<WG!c??%th7tRB^PkuFA=O}x<)s;R-$C)+{tFsP z_xY8t>fEZgB7UiLyozRacSYvsfAM$6^SCv8AgX)V6=dMX=W4xTkmJ^*-v^RA*ks7&2O?M%F8WQ~@eYPgPxn(wWe~O1PLNxz5P2@J` zgMKdex7|S?WX3n-8XEHc~AgcE=j-tuZBj2^+7@cI1mHn!(GF2R1$Az8tAfap$P)@pX@Z~*5 z#C>?_N*GlLm0-7Q88rT)`e#}R;G>hE>%;tIf}SM}Vpe9^XdCeQ%aAUENGTD1# zM*z73E-fmtc!AnRhMzcrN4T+#S%!3}3z49Ie&(Jm<<#5CdLr6;v_umN{ij6ro^g4I zXaq=4E#nH19{LG=V2m%&CD~kC8ZSp-|5XCMkxV_fAgRF z;&Svh+!A4ZR%+FBhx=yW@>E#`vci4q&x*VI%8J@|s3$*t9JxY*{tJW(8EwMyknRGM zVo9T@p$fT6gRnby6n8yV@fA;|NrL7Q$p6s`ecQPY;c~lh9zn?ZXWgeEduAv@QuLv2 z5s@hdkLLNEx(800RLJR@_~~?qhgIGa-af&U_Ymm6$g~_spoUBUZYqC3f95;qP93mB zNqAekbVvWfht9`MHhL+>OwLM>s^PI;oV1Yjgl|lbBw>s0k8iOPAaKA>u>Xp@LNz6@ z1Vwo8TD&l@F{{;#g8IK8Pssh3Yd?G-+e2%wQiOp1r!UFZuXu0zakpdS{)?Thb#D6u$V;jS zDyXcb-mm%v@_(o=NNS3wNeCm5|FfTYFN@uT_@u2V^0G5V3~u-VQvqbZBrc)1@y$DV zUy1Xg-!~S_;P!fddbK5#RO|fdmgsiFJm8-wIRA0WK&8N>NJw5Duk|K1vk}@d5zzk}{n$chH&M4man(`%0jmj)m*N26pW|JLB5Qk8RroZpX#YDYE+4*U z!BC>fA)-mv|Ew4vcba141WJat<3SEkGATZzIgxB9i8PwtW91Hc#1MKBKLH)}v`7!> zLOt-WoVeHy`G>Q8>Q4j+q^;H0fCvBP9}eXPZbn0IJMce%e`4^XUF?z!hy%I*qVnL4 zB@YUR%DwUmNUmW;;Ib`^^GcTD2owT84gB}p8?Wm$;LgnT)2Q`+*o|aL-XvxLkO2e^PfrQlb%>69qlo7`7i(Z^QQzc zUF$+~wGB56x&H$E?~s4E?|W&28l@_k|JfUIb{};ALI1n38i}5rhZ+eb@4pfh#^Y5J z_Nd*JzS(Y#NP_=8Q;sgnX1I7S0{-`XtV*(_)H2XOB)x}174i@Bw9Z3EaIJ&gG34#j z6fdhPeeVgSREen~?>^KMFwp&zooS@F-3pLf>C67&~@^iOfvm zPEA0n{W0yZ9at#ak82TzTGQ8fG@9@EQ0}W7<{2qDS0bP7ZYidbWc~&gjllna{uw8+ zpwGgu6&tj*>wixW!c=QAup@(c;C~0-r7VK>UvO_zNGZAQMc62ZTI&adKr?k$g-c@f1$&yN_D{X9@SJL5`y}_C{H1L zuW1iUo<2R{+Q-Dg4n0H3H23<1oY2`TX>Z6N^nZzRx-5<33Z@bPb(NHvl|<-(f_MX7 zHtTvu672dVDDae0*F*Z#%B47!vNfUidPoV#6JFrapwGIoq z(z5Aqt_A%wKW2cklR5Y{y%m#;!&USV#EbJcHj*`xOYT3tIDSbCPJUc` za|jekM#8cI*tTf%gCQsEo%tdjj@J6n96I`a15!{UnECm7p(g0Hb0GxVjVJKuL$vx# zJ5V~h$s)#=Xv4fLU~%d^P-x>{%xh6E?jc$X{`XwSKNQ3tia#u-9j&pjDjLTE9RUV^ z`!A}F*B+yIX;tI+`kn-$0ho&6DB>_h?!TC6&z9PQgs=&3*3`aqHK0TU+G`yB3lIeL zziZw4YJkpHyZ-n7H14znL8xRw{qM)QGZ1>aSNGa%>Ix|l&_55Ro_DxU#}3E5a$pY= zJuCKr3;vZCY3kSaE}Vqhi>&u022p7KA%UsL9_H}56x9Fq*AUfFirLvsBN3bwc-F zKP+dWOaws@0r+R<)avxbN&_jrnaBz&_|$DBm4E~o_FsU1l>e-JtVe`>iJ*U;7y8qU zd{Nl}{qvx9o)&e;4*s`(5P}?okM(${R00eD4;b2O77zsO|Kfeo>|Y$c7a{kbg8rGk z%sKlhf{^|1<);zKhZ2|CyxSl7T!NJAijGjM5~TIV!!g$vwlOr5Q; ztmuiML?RCT+ZIMeM}wp(Tqo|Rrc3)k$K5XeX*nFkr_Yu}i*!Fyz(W2rwVgp3pDV2{ z1wzh0#J-@_Xwh;p-MeC3?6bGY5r1DYul)hd;oH5965pguBZ=0n5zNfTz1yL*0P427 z69hCx+XcN3NjgLln{T21FP#_Gf~JBHdXNP8e@UV#Xi`Gbi1rU*A1)fW@zV3z| zB{csT^?zB^|6l(_h61Y@ZvVwsC#i!otO91$1_X+r?vx7k^Tj$_tGoTDXF^L4>_z_be`&?CaQ%5pp^Eyy zCTgUHa_KXv_+%YUp)i}xdmJE%+1x<(!HkABD3>!K_rGVbAbW-DoYQqFC|;8F&lg_P zB4HAjUe!=-7tsRdg!~_sif@CH2F;))PF=RHHckhOlxgLAK_mSvcOTmy>uLimJ&%@m z(2%J2c&MY@gZV+{-V_lh+CVQD7Qp|tk6Wps+$NP+lVPzi2>9pxKVcu0pswqQQTn}g zNQctF;>{sw@^Aj(<6A*&$eV1yKji-RY$uqSQT?+^DFuv7G+f@Dp}Z8TDj|}-z0)-m zs%XQ?)%aiv)!qHq-ec(gi<0+cf}q$~5+!o>xyNB^23I(i3zm0wo~-`F&veLBYW?OrVh3J?HtjNBrh5cEJBd z#y_MUN(U*YPD)zfe}W<50QxTmDiv3wk8-!mdL%&q7jnQandVUOgMA3}zsE}4g#5!s zrZh#tAIa3ni`&nxP&l0W1^zD{p&E)OdfbF|UhbNo*0AE%bO zJmVZMm|0-|g(dAjjKh!norC;m42kd%xs2WwNryPH{)e~2@U<*qNVa15Z~ui9$tg!v z>uD=QIv0vq1%SZz(klp3{G0#$MjW{^yY~w3z9eNSdE|gnvindTI~&^nh2}r!X;Dc! z(ML+MAm7UJ^Y%-ND(twjAP_*#?YgYs&Z4l z*r7s_@Dz?WU$pldo`*errE-7f`_MKKo&O@6JS`e$_i zl|!X^^EYy78q%jP(j&ir86!10SS%F@d7Xx+qWEY3AOk>0i^0Z9gk3ifd32p@4EG~6f!lb$T-ax8W!Y~b6apNv`!Z2B#EBS zV4-9=b;X;i{s9oS25|=nKvw<8(XJpyaO<<3D?V2sOt3-opD_`VAKSGKbDe;=LGZu- zK38Lx7D55O^%~GWGh;G}XwDVc)Qb|*4O;48|0QMo^wM;%m`>7amNXSwdukbN{eSsC z1C+TqA4F=Cu-$cXAK)J*gx#q16e274AO7JS4Ux(V(_msj>b%~6%Il|>RLE!`%{vx^ zGa&PH4#HQCX#-t(0*;P+hNIFh?bGpZ1)xJk{saqRLgs&1Pl&H*7*Qk46ym{@$27|? zbr__>A@W@t^v|&$zEJLs2405dS%4S|kZ>seLH%DcB469z9kM%N5V7n3D!BMko&d4j zX#P);k&9r(K|ag6qlB-MV?R**a}>{~sdv;WmXTr+4v49fX0g|{%-;V*^M4lKP`>>4 zA|iz*qMzJH#y>{4C_6&;K>s^XHhwNNXD*-q;^?@K2FU}bgVgbUu7i%3Lp=@_QzPTz z?e55z!Z$}%-29T^isK&3xU?sKUF#+NsgRfNPHtHXHAokfe>{E+D%3e-+)~ZE9lvW* zBO(`CuYCP>FXZ4!`x@b)wrFt~WBbC8x?IW#7Lmu@juzhRs=`N+1H1gs-bQiT0DX2` z1oPDg?HXwFTcofA{yWP5@Tr^9K3Eozcg!Q_AI|V-q}`*KTBn26jnehir3JGjal=q+ zC{%^~=R&g|wUf^T@HU(G}K4i0VVlAI(WrSKNE<1?Vw~+x|bY-UOV= zy#F8n+-Gs@`<~@kLP!Wv;#jhjP^nPqSQ2WAR;1jPY}ty06iSg)N{brRu~QPIZKi1m zC2h8f!uh}N`_MeU?|-g&u4|rUo^hOed3|2%6W_er9?VtOs}i<1>50!OJ=~Zx_-AnK zWU!k6m^14tJDatHPpB`i-f6|(BWNxZsNJJqi9z(%C|Ro+2v_H&7AypKtB^$GZ{w54UJMi2s;V zx#3pjx26Okcz+Q6hdxRYTD~uHh3esh?ScH4qnk9$61vxVgS<)Rzof3_Z*g8T--DO` z0{`=um8b6bH3+I&W!SV zrNCD&8OC&&G}+!6SnyGMRpE@xM-qi?t|kU`*|Ic6p0 zx#u0LJSBco0sp}M`-b$?mg5%|tS);QG4}(pnmwKSMFS#G)_{MY|M2@qeH8Ndy|h2g zlJpUQemu%TpIw-vl6c={wbT2>xlgOKjnw}mOm-fbEv4#@N=EwpU7$iBxhX$2lN2e3xeE+NhJw#GIQS!zCz?8vS zG@J@WYjAEXT~t;ay=J)M91MZ~hlhXYbR74GtA!#LTB6$|m@b557x;nyxlz`)VcnE~ zb1*mvazp?5OFBg6Cis8uN$*=Bzj8b9p4>VxYCVSii=PcNeXzRTHv^Lp;s0^v+8;Zo zE1~C=0sYUs{jXc^!uAfhsm^n(88Z4-Ds)`;AXrzVwk$tkP4GX^|NKTD5&b{ruuLC~ zn0!O*cz&8jK$p3%ZF4FFf^u0JT5-w3Qi}ZHY)TwWO3Wn5oT(m5m0*}S1r^z_sOaV& zS@K!Tn8OsfSBGM3Dam|hn#jpD)?P>io!%Vuq?dvZK3@v0APFvcU;gzxe%%Vem^Ov=mtA|%P#-+VLs7oO08OxBO`wk^=W2jJx{w?5K@63VqacqA z{Ti@l?hC^;hPp_c=s!e4N#`{AK0PmAb}?2$L+euxpg=RcL6|8vD#?@_)fd8>Dv=jGqGeXKVAmbl}@ytU)iyShpR zj~hfIeDG5M@|$8;@= z{I(#!v(86U%sN(p<#U;UkT_FQ2jSB(6uM-eTV;1FO^Az@ZbYHJK&l8V*Kw&mLq8%Z zDq9k|>mB`w5L#V>$55M(Wtg{cV(Et)W9NppSq_IT&(U9Vt-D1xN05e}TqIZGX}mr6 z!h9LTue@9u6mEpJ3pi8+uVL3FInR~d{uK1qq~JyXLPmbjD_?hbURU|b+$ZG+frKXb zpSoowgK!^mVhPwo@ITZa%NmwH*MQ>?ID&Zm&x%9(teSl=An^vmwgn}7V}g`odu4olVm`kxsrC04wY2I}gFVI^h=YvC>OYJmy- zMwS6dN7d|7g;o6m%DC8>z(={JAzjs275q9E`M)k(Rh5u7XVHO|pM0y{KT^Trxc3VB zpKD{cKd5;H{$~{{35~(qyU)WbbEU~UPE7AIp-eTKJE668C9RUKEhXW@v5gOo+ze z(KQ_OSUQOMXaHvN`MEcs9t(fAJMK9oJt8ugv_h=3!$0g4<*A z<@CMiJJZ|Wy}rFrbA7ZuH|!aN)U7TPh#nTc~7D90p5p~6+j-r?bs)V|FQ6?T6FEFm--qL~F z+6hToyh#l%A(-nVv$>^maw2{oCS^S8nN#kC}lsh`gV*Bh?QnC^i79|NC1%L0n?O;+TbgZ@L< zzwoRqi{i?D{MkDWf#CBakpF`0sE)&(A=rh2*67XC;lMJs9JeO&Ux6Z5R6a!xZ71?y z-~Rvs3vW5oFE-R&i_P#q4^Aw!eq#fY?vry1_|GP+6h(yo^=7FJg`w$_*$kO#DS%G} z!JvOdjqi8uwC;Z(qwoC__D`>c^^jn5b#OT{gqhpNEg_fn$*H1yMz=?UQM%s+Jnjc8$uRkw; z)UW>`!T*TeTq|Z&6M3YT=YQ^#H=6di3H^tN$Nwno3F2k(-2JAB(oBsecO5c z&r8f=C=4bmGql!@A*QlCo83bp`hSj>>S_rw5a6E`Zkc|`S9~kFx1&t;XE~c2pnvOl zif5!>E&fkk$>0m!W!13~Rc`w5_5MnxOYwp94jaz$uz#Uuy9_EyDc?W700u?ZyC%s< z!@=W*$6CL&E1nSk=Y0KFM_v~~AMn(HJrm_X+0%U^wV zF^pb+BgGS579{@pUb6CjEJiikE_2Qr`VXzsC&Sg@W32R>3m=2Xe-*Yn%R#mqrrV~s z!cx>ooq(i?&fT9jvD4Mo{pDcGg_=Mp4taQVUc^r}<^-dlx<1oN_R#;My5XCWRR!bg z6ICAnBLVZA$nwyn4kb+t&0YCF`7fgXFiT~%OxAG*{4mYcDww{7=DnrNJSt(nJ3-M`La9`SL4fd+n~VImMj}>nlHL`D7&I(L~l?kW5>Z zssx$5$Ew8s>G^{KK1<)+T`+~IY$WtAF3KG41(^Nl6qSSQ8EEj?rOnN!rxpvSpqglg zs?Q|se-ZsZDm6k%La=|zwW3$}T?gf_aC7vukiluBZ`Rf(Yi@uxvaZF;DET!Ux}fN= z{q1>ngMnXL*|55yU%4*S7Vgq)wNBRYrI>A(uk?i%HtGM#H`BYoEkCEbiU_|E{XYZl zk=*nzPrq%Qx5o7jpN))3P)AL09ABr#qRVq`NBeO1O3lqVDyPfKwiEo%zjYV2*BjK# z;GZj-kd(`(7DE53r68s&xqp$ma|khi^FM_DC%cdngV-j)Pv!-6#vZ4jkIyfA)gPso z%76B)cZu+P`!M^8@4du4W79TSVHmr-w7fr7NNer=(_D3}gLlR@bMPD9hl4u{zq~QR zsBp6opJ(mQvxi`o@yov(^5+W4=&q|WnM?9N;QtwX`vYc`-J->(eY^fReGitvVC;54 z@{CQ~z|_;~Ra}27SiAc$xMtaM2&cQB2mepMYWO#H)8s-z|5_B*POHq?O7uVTO?^0f zn1UQ@%iyw`_mm-q?2pg(AHx2H-gD_|LFYyg4*U-a<;=Tj_RR3sjy2~L10Tp0_+IMP zkD+SV9F`VejhsfbJI^mqHoS1=x&T~(_mh_8C`G(nz&;TpGKa4XE;ryviT2T)GKfe}5{ZnLurJmWD0f;e-Vqb z^bS?ne@~R-PI)gK{nlOktBUOZ>AAu%B>SI--`^K=NE1b%e<4;+6Ah1J9K^AXGju#r z9QNON`(Fz~N(P|v)j|#>Da|vc$7?sPF3«XyiL3cPO3~Q2K$?fvNeX#+;;~Vuv(7)Pl9KH7KcyBIwMDnf^o2zuN!# zDdcs%+k=Z(_o5i)PODpWn|yNg&GpStF6n<}UYmUNnCmtd&ybu_bL!{-#=_%92pw!j zD`)pFjvNhmRNfc#yka@A|Lz6(ck*lbvEbjD9XIZDo?X=P?$OCF)1UipXZ;H+fO~lR zr)N*tZo2s5Iv|jl{r6|qi0FToVWmkSqW|Z)KEiYwQafXO0n8dVB_Ve?oNmevtg*p> z|KY^Y#k4lceq^$(NlgzLa_duCmmj9_A?QE65-7s9p~!eo zw@sgQI$lm{CndokdDq5W^fVSG_TORuYwMe++ge@~y1~Z?h2#_LzrR0y_wW4?Kc=U8 zn=^bag^himdb+4(ifI#T@ZrqAm;N@77#-+&2z^AHXxM5c^e@}Eq3&nf28RABOp?@I zT><+SstEdjApfr6l%dXnr-1*Xd<8{)Px;;T@yKv{6n+$Sll?y%Yr~^Yp{+;4^jmA2*Jb8NP zS-ikYXw&u(8NJ~wamMjv)K%Se70^Kz16Ruf9?ErS<{&Y#Qy0M?wz5j zZKrnn-17c0KK$!e#vAihaeM;MpRx+lG6_u8F)E z^ej&MgV^}c+vcnVJ5|unvB4KrJHOt$E2fH+ib5AAFvNtdIH?#;DVk%dDT)lC|Ht;Q zkOBxgrEQ>LetCTxA+rB@mnDm@;Rrn2vrSS=* zz!s21(0^zu*j-0QqTWk?j*hv_U7O246>Pda_sY7AD3~VN3;o!Qx>H7E{wqZL>aj-M zC;DDx{ImP-OFMu4IN@{2yLaI8FWC4sa3lRcy*h5EouU8mau%a%o>KzRf2c54Ys7Xe4u|3{+}Q3l+`GxN2d$UUUrY-_!!>RzGpEO!T*5&=Wb-) z`pE6lWufO?Vk9k4Ht1gme~!wW?l#<=Kt~gxf0c6M*BvrvpfNB{+QZ$4!2 zOU+6ChrmAw_Ai9YD}n!+QqS$NvlCjx2I}IhLzU{a;rgRCC0ol z2>RERz%Z@n^OS&Qz4ylfOi2m&pD&v*(fQt`i;=l#w{-ekcVr&@<@Mot=5ojr_P>7Z zogaPYz}Fo?Syy~7rQ|JIptttweD@-KBYHE2llx!M(Z|ZYZg@IVlFDT-I!6nu0RHi{ z-8K2;*T6*k@RI(5}=U zU;Y*fJpDRZM#zzZ4o*?3LkFc%-C>c3#mgQFqb9U*BO0b~V8u_b8cd#q|7X#~aVhWX zK`R@*mxh?Sm|pa_mLj4j`=`8;e5m+T?WqkvcD&9qd33CCxm39~amdYr{^xg(PApXX zA-4IJ_xN;=hV~%jznqT+^o%%8hTH9ij_5+4smSl2U)5iPZug}>zjgnY|KjmKME>0q zw~u4+qn6O#`m{Nw5M3TtvlBTf%Rr?6r}da-efB&F2Z+6F8NaT!yA;l^%7~YLcSDi| z=C!_BIeK#Dz_*ll*$E=>|6F1w%3jh&m;HGB4_KimH1<_^uDzr?H{>uaK|x8Ss6TztyK7$lIp=5Rtq*Z2-%s*CtMIPSyn1Y|&~XjBXcgH1in!Ld`k)`}de-$)r0K`!6<3}e*(9ctz4pXlH-uB5|L|Tk^@j*g|I%=~T+^s%7~+Bu zyEcB#qL9Gq)*W^6ioguvVhavI{=IL*fbY|{l)Uv@+G|p{DUacs`_lXPN;G#$dn?*! z9;&rY-&_T0dQ@lj*fMqnwbo&{ALQSSR_n9*xT4@QV6F2+xF){=;Gcq$N^yDtpM3ysKesN} z|GJH%O0!lX|J8JS$%)Gw!L(T$x63yn-kQwkk^5iJe<+N@{yW+KGZ7=8FduA@Oux2N zQ?~wCXz#nG7}*eervEVHkzf%I|0u6mS3wb3 z^t!Cmw=V6ZEK(V(L`9dO|7UdY?nN6K4awGJ^4FJweiJpG&2JYNleNSZLH`fnpH_?Z z>J*ueU6QEC?ug3#J9YECujgcgf9J+FB(s_oh85lX{FiwqX8?q{bZv7pBrj^S;+$hN zzjMerXBpj<$5bp3>uzL>5Lip^m6T;IowuB)ikg?{!cuthakw!}iv8AZ%h7U-ju+GRc@6ug zJ*oDczZP9tck3WGQjN&Jf73_a1zr>sn`7P6m}uQs24~U_ANwwhHct`xcP=32`dk$T zLP^8K{yT+1_@9ex5O4oftsx2Ndm^?3PLY({fe**@B+OZ4j576uM>GLCOH~4!z@f_q zFg06_)(faw`RbwpP(NV*;(%5tTca66g#I;nou==7z1=*9?$~}z+pV1gx_IkhI%+yL zU#Q4dkRR}mr_n|E*M8RmA01m;@ltTH-B2?ezsf9XRy3VY_@B8})002H zYJ*dk9c#FyWeE2E!0jWXV76ymT-{DKzyMW-O91{sJ{*|5hadWyQ%*MhW44mHA(BQz zBmF{hE7~}*YbdPtxz|FT>1sh9d2POW@ap4&?ejcK*{!j3Awg;L(tz4ndMO2$-st(v zPeuTqyVu;E)yEh_|MRPxTlBte-gvuLp2R=0$YSUvLN+A-1N}epM!pCMW^#wrK#>sl zBKRML-QfSh2>b*5&sk?`5Qs1d{K#{~P)|ZL)i9)MEF7RTBJj`msX5_awYL5>Bv8c> zDAXeTKT45f?-sQ0L7}Ao`O1RiWqQxyXYI~Dp}~-aI**{*NWb$GG_isW4$6LMeNVT= z8*z%esFFvHB=kQIWytD;b{Bxxx0w&gv=zisx3mg@M8rAPC`f*=xj*BKiT#TWclu0r zs@^^T^VxP|+NJHMqOU>Vg7}-FQxun`bPW_(%=ACw7f|4JgquDryhP-`1kLpE|N4JK zqFy7ko=X!n+z~!LWYn}$%#@ABXYdboTXwVe^dOw@|4g?<2+a51 zx-lE??&cg8n&(r^9=6ZX*AYT54^f2D6~vT7TaNAJ6U`P9zdmV^u5?>askA#cVl{^E zK{A#e2v`1-p;Hmkd1mer*^Ab*`={1tzVuvN_C7q zwV_rO`VYbX%&||AstTO|`~&&-rH5!d{R{XX3(0yan!odPMlrvS1qKH{LeH^ zajcX&$JYR{i2Zj$|2jM#pR%QNuXvE+fl=*hUI4O3FnAN2@?>k_~mOjiF!& zw~ki23i$yswfXRU=ydc@2>c`W9XYO>?^WgbEXAJm{}B79UMH*~ZwyR*{7&qjo~=Tx zH1CJ1g#Rb-bCMejBy9h;3A+5zo4XO*8U81Y%E%<~5BPuDYhuJy=!!+TMoo`IMEs+2 zkw3N6HZL;vAF(~mBGbNiR*KZO1tceX5Ih?S35W%}AB3Mu#JK_xSRe|)+qIM?41 z&c>ww8TP;IDDGtb3;aLZ=ds{gVn4?*_!t?cA>gtH|Iezkjz?u>ItM!rLi5;8xb*CpIOpdJE?CGZ4ZGNM#?wF8x zOE=uyNOYmOE~ofhu%w=Ix-j8?-n|v_Uypy?f--@~X!p`}mlg}57nj#g9$gCg_g4v` zkMc$n_k;iE;5N*#U|h8q`VZINE3nwb^FN=8-23slY1t}j(<4PG|JT=N{Lh`G*8ggP z>*s4v^9gH`|LIv_Xm=zhPm+s@Yn(FQ+R_n`f9GdnqOwSRIc?~DhSs{WzHZ#V35%Z_ z7+PRVCGtr~H`j~O6PxE$rfvZQ4sZYTwb$sI`@j^W{j%%onCBRMOn0Rq1z%HYMC9LZ z=svU9JstGVgByA;}y{{3h&|L)nrv_S$g1_3WSp#NDyCPh9DYg;CaOm_6Wg#8P`|MQ8z zn1ZtBt-TERXYNDE(xA>rb>gLIjz0CZ?rxQ$sQ4hJcURe}`4?8(!~l#_&?$uz{%VZ# zOfYaU>J~&aIlF{u692&dS8K}8#IMgR58at=JGbGM0Y8cu4VifQg2(^x33CkLgU#|k zY1oYa8TK#ER}c!jJco_kdHWYc|BvaQ(pFtm1pSA9yuQ8n--Mw$;rZ+5A{6c$$Zf?? z>v5{iQ3^UT7eW7_)!kSHi>;oy7KeMC^}PcP^pU#?PMxzt5V6HjgDT*k$3*^XGK@Qv zR)4}xn#4bvxpZV3?>%ieIm)k6W6k4#{2Jwv8Kq+c_P>bzQ~i082=>3$UxWSAEi33+ z;hykxrwSm_|3l)RM8R*}4cH9-Gj`#r&{n@wBkr}y3Jc{#(E#ax_LNA+|7?oCB=*0c|8U3O3!^F|{3$FUQ)Bd>{?r z(p-^ew)f}E{srv6e|#cY>UYZfEe+AM<-q?V#KF=97c57i(0}+^&V6}JbtNY*aY=xs zei$mMzaWSB<(`l5_P-W!_Z7U9x*Pp#dMfJ&gTOyoU`Mw|xD@%V`&k_5q^jWmc}!G= z5&GBT=kR_^v7W&{q1`A+Zg&4-NhZb@Cv3V1dCcU$ZWH^b(Esy%O?ArNK><-9M{eyD z!yeF;%uiRMP*vzZ6fmg7DV6Xfck=^eA>>3p%ZOcw z=zk{nzdDV^4Y48wA6gO;VdVbB!;{t-R(F5G{@0NO-F0jAjp0{yzwm8p9x$TeDCj&D z!T#4e&pHanHo;&Q!sS=JwMT%gYGMC^z(0P{hrjyFkK)>s{Lh1h2gU|0{Cgf3Zu_?V z&Bkvt{Xef2>>5>h{%62Hh`>LJYl;5D)iOoyi1oku_bs*FIAYC5O3H}0e|lSgib|KwdWIQR?L#rGc`6Z`KwgTuOU8j7BN zS-xIOrP`guKfwR|WAuCfB8|!-_P^f!=YPJmnZbIODh1~b{b9r+^{*2P++*7mfM2U& zxT`l}B>oXYwuw|UIphDK&Fvd1>KChr{R{9vA1%IokUKs6D{(-qIbFq?!ar~1F}P0+ z)MopCi2Rq<${e)g?M(mSZ2s%aGOuuE{rmJRV>tkCy2Spe`+`i1wG?;<``nKef&QNt zO8d9t<7Y|#^S^a#FK$!4yPiAF+kdxag4tTQTa3tmY4G%~bOq}F_@8}{PXAjkBcMvAMm03(HPPHT*+1n@MNh`nXNPZ z&lV$UYc7woo_yQsb1C}Abo>{;fD~!Se@U&no4|gNw&#DGqtbhsNf7}XDtRU>4lKhYErU%z~`|rws^|b4)*GB~Yxw!taOu&zG-m^}+E1x`43i!tvIps{2UhB8s{XOVS zo5EwY1*^|QP1xPx>0drm3>yAI*P}s)Hjwf8?^_u7e~A54$bYH2){y&OdYj+;X@7eC z{^cl_-2duYU_Q)*C+=6F?Ur1%-3h6B+5zpAoF2`@5^Hk*OZ8sy;&kSzUH!U`H39z! z_WjtYaxZ#hT7S$B*42|F=2D70hoS%Q*!MoD6DDeQKE2F?zmS`^>fWR;kN?@y?qT#6 z5Tm+!nNwCmyK0e53p)hvT|{M3aB#Yp~LLy_~T-WiRs(wKd!t`ZQBGbz)xR| zZUz4{S8dZB*VA?hgMAiVe|4z76+(7(w2)1Jl+%{_z9P6?r|D~!x;+hl3|)g?NsC=@g~w*a9^T$p7%Z+Z5- zEUIIkoLqasFe-}6GIZ0*TfA&QjPU=cEZ*_}oK5GUG}M12;FLww#-3lLM_%9G5goYC zNV2iBz;gGyzE-O9F-=)@`#qUM-I>ME|7?EEA4*waV%z=FPo7<8Hve^3adE@khYBVv zbr*4JpM8Q96x&_ccFTa5|C;T8HrPIH__iU1doT3cAjnO}l(R!^-?il}iTDy~Wd|=5 zBh=*deOL~lk{=+{( z9jI8RJL9{)KZ6p?{J4r-c8xEo*Lj&Eho}D|z95 zm>x0|o|7~Ds!GCvz)%e>kUy^cT^-#wZNmNHr@0)ymZ83$tmdN{OWe1^ z$z4NOr8Dw-_VcW*kD*1?^H$*JWy`^Y4*kyu3H>YjFX-4hFDz(cxd(Nq5d|?Udt9dr zF7gjgvHy#IdRtE{`gbZeYyDQVu`))30p-B$&>PPc%@Y+>LZOr)m%N3pr}Z!3XX9|X z@?w63jMVLAqFrv^NiuRfy(VJga2VbB7@XdhPse0ypVPCt{LZNyh9g`exnupmw7+Jw z8#t8g`S5mbSx1plisHA}u5^_p5uaSj*T9_L1&`9mqmuo<`JYdPiy_m$Du%+ZXtX%I zNMd&#w}FbGYfyfveZ>dy`kx8?3-C`Wx&J^uH$%-Zp_~*a;7s3B1tnPD=jq;g4>|PfK{LiMFP@K?qMAo%^$im;wmV%ZZoTlaX zwrj%qN@-c-0Ni3LPd#s~Pt2I-%uN(lHU|Im^Gm(cX&=9ReWQA9Q>xSLW8nW$`{Qq) zm;><3v)(HT{+}1Fr>zfi!;9h9sS5lL_K`xiw19r9n$|MTgIoGjuH`WLx>`sNqs*!un3`4PeY2$?O<4)08G5#mRhAZ+-? zFa*$Y7819LVn{MPCXGy>9m?*r*p$(pfya~1>CD@^=6>i<<@F!Bvu{QBg-q4|*y<~% zv1*!?sw06KQ=-BD6EpxRpndNq1tyw>j)UiKw*OhI$4i6Cik}no1Ob_iFbcA205sYy z)_KMrW3DVG{m+npC-KiZOi9P?Km4Qbb(LQCx>gM#NDE74-hTS7U0GW+G}{-$7(&Jt z58{uypQqtNuIysg^Twh5kHAuTA=7Zh^jHz}Zc zx?K(Z?tg~-7s3A=9Mxo^mj8=?77rjC_#Y%2uhxS#=Pi`mRr9u&#)U5-ydYj5!vk86 z=}z~x_C8p9+Ol+{de`wpt(9xir~9D)=e_GD?z`JRo4>ks8UQ+2btiisKXNOaCiYKH zSwvR`HVz&p9uVaI>GoGw4-0o*)o^GMFbaxtaYy*&TtzKodsKNV;gs#3ftSu%d@SI@ zCA8Pow#M!}v=6U>&gk4*s8SiGRKJ z(s&n>Y#OvDU`WlMmch?OTT;u0sV(V>=+jG!t|r%flYNNnKWscg^gk!9LVR$xPf_CO zUu(+A{+|;XYA>rpZDO;QXzHVvt-DLX4HYYRT>Kp!Xo+Jp_$PYLRB!3df2~$UPw#;| zXU2;wY%EFUWM76?6)^`@fAi9Yn?7aG+h5Alziu38X;)mbEwF%<#Ye>zSoQ_tkn`x3 zvfMX-#Oqy)?eKw@V9>3C{vXi4i2e6Ysi-!@ncaUU_b;ITIpMBmYMPmaPdvt|36GIm zxgB}7TiQUCj>jaie*ycaRotGEiwE|DXKxSJXmvmIAHMeoUEZp-@W)p%3lRVAP5t^t zUV$02{7<)y#UIbFdui}JQdCCB1&Xlj)A+QtK;Q7A#8US>>6ln^O>>t2A@r|JA=N-D zuO#%ZzE@A*^YZWBd*0LMY!OA3?i9|NGP(Bbs~89NzjUX!_w({!w^gqB!(7F6aClD; z`(G%mf4X?Xi!cBDN&cpqdHm1x3aEEAwS3aWT0`aZ3P=SEgjd$bC-DWjcJNE~!u~}x zO4Zsl%m1A6PJ~LTfXC-&_#ePOf8W}{nD4z2xfA+VUu)We0mOm+Lx!bi<ErI`yYV*b-1lGy3tA&^6y`pqqhih;JGrB|KfXb-K*X|O^j)U z+)Ki{F`OaQ(ZTOWiA%OtcclEJc7y&kyMLOa)s^f1+yC=n_j{@kN+9;%$^4hf{Lb^> zh9dkwA>VY@T~Ms+zKV70y}V*HXL;e5ye0v1$O znj###J4P6=e}SA%m>pEyOYC3VTK5m)&*Ve1_~*~vKiCWHYN?3Gf6eTlF4}+E(nSD) z|0iRZLG=HS{7=n31pDts;QxvI_VLxNoo-vfd>Q;CsgH8-2c)e{Lj7ibU}EZhEZc;;dPjAnM~?mtJ0yzt4&@6h5TjNfpCKVf&4p} z|4P1BiNF?c&gDeN1l5$xe-Zija#^(TcI-Hn@IMp$&$b-bli+PF5U=;=Cde}tT%_UE z9h1xe{|}v6pWbca#gga4dH9FSzrUp}#@Ps0zy+9S@quY!vj4|sgJDemN+DFSAxmcs z!atGw?`b6evtQWjem^v)6PuZ~wjTVTSc}J`q9H zpdc4_Lp3S|jeT$%kHPTiDJ0r}aol8FCzZ&w^Xybx%Xp*Mr5!9iz#XG*!|12GyN4xrv_}UN; z8E$liPveaL$LAS^RpBWf-FA8zFaP!GFXs`aNTvb2|Y=xdyW2%20L3X<3?{vrB* zR1e{{@nWhXXrn`MFNWGrErtBM_w@k=1r<1~jD0j`j{k#7Bu?}nlKnq_CrBZ3|NZaS zG&$M2%6WDa4)8yD+a{#Q{5#=)-gHM6WY)NGsagMXh6+o>gw>I%19nZE&%_@5&(XAe zso(t1?Eb~!MGdb#L#cE@Wp(!;)WO1V;D2JF|EHvdPf<`QJTvYvpW#XaltlC&=9}#V z{^tWcs7_lQ^kIe}1o{_tT}P^k4~PDr#Zwvt|HJEl*2OPx%9@1!XWst1=|egVmsspD zX@~`Yq@@vCbOB@AQjw4fyQ~;)VqvLS|8u%3_Bih%z7k$E=QpilQub_X{wU5cA;TZy%yB`#-)+ z^gqA3zcmA6PmXIwx6kZfe0jAMV{V?tFm)9qKdh0I4zK0GvM}3b(Kx6&9luKD_5Wn^ z_D_|~AaVfsF9V56i>n3li6j2-B%2<)G+_}%)R1kFrG*T25P^TTx*;+9r0Sz_kLH+d zu{TGRGx>Ld|B0cX%0TcxZ(EOM@DGuHPp4Pz`{S53&;JAb&z>wI|Gt3F#6-|u-1m$_ znyQHdau;Sb37`xtz%UH40RO=LDXD)Q7lOn!r&&b84|hS8ikvhB`3Afv%&wj)K@lA2 z2EIYDa&khf8zEF-bd3^J`|i;k#?nAdW+%dV z`WKOZzd_$+Ei zU73@nBG)v=&4uS>1k!9mo?=DK@>25!+p3FwT^MM5^y9yz{)K3wnneGhfHKnO>0h({ zXV^boN9(xipJNUB7x9EgWsKQGd4TmAldQ1!ziJz@!@EQ39@cMtyA`Zydc5wcP_|`tuLv52cR96Dw zs5^!qCCljDJ9-w;(9yX>{(WQ%?7wetW!hq>L*EcFH~n?}gEirQhW_VmFR!j|hEA$| zIk100;GZ8myKAZt(?^NRiRU+QWdj!BjJ}r$vIN7~Pk6wg0uJKv@DI6vF{BfP*#Ev+ ziVHxEBLk0ZrilQe?~L)IvFM2CKNLqPbKqKF870v0B(w)#6&D+yp#UjWtQ+$08PI~E zL;8P6{BvweqLC)**d9wkuz$K+YoeE@e*yk6(ZS(OJQ+MVHjmgpU41y8j-FRY#7?5_ zZy(nyn2IBv4c}`sgwgYB(0>U1&%55k{`>y<33Eht3?V}b73U!Lq*~Ms{7>T6wn}(? z#u+U|?tC3~O(IgVfg7P`k`IIqhYVS)iGt7IpP#0mUS8WG2ag&KU3g2KISOS#4NP64 z5Z9mR|AGA1&M#o&5TGOX$0M-+^@;31T>2*jyPBxxp%kZl=HiaY+)UqCTtdUOgN64r z!&6aqvIV|81o(&CKL!7@fI%+%Ekems7`tN?VmYME!BWA+9RLR4CS2hx#sdDQwxSad z3EeORb z`El5P=jmVPT#mXL7TYDJQc+L<98qEbVyT-TWTx+lF;MQ(B{mIIfyIF+Tu6?KC!1P1 zp=u!(O-u;sgBy`q+09YplpXbjCrx82R>Z=Xy!`vUREynxqQ3TMoQHo_#7)O7CjHNl zf8TIJ7F`eCIx(Yv{hB_tLWd{0UcTLCRp*vb$g z1<4YcD%1n8e(!%NNX<=_rK8t?e~A5yomvx8v;OD&J!OJu<;mD_DN_Ghd&#F!*A!G+ zdE;ET=V=MbHUR<15H1Sk;8;r}eycWvxkL+b$o$vADd;~muTSR#uG9(6UT^vRo}fJV zVQErGYakON{LlG2b3tW=>t9`+@c+c;Qjw@
    ~HOv1RUdbk)aCE$NFppb$C`L8rp zM^gW)Klu`g&)}cJ>6!gg$bVVxmjUtc@rWbge}?@F)##=C2pvKb!@V{M%GOU%i4B#L ztZEwjsSNN-z0{I@aMQ8X8=?OY@?T*Z%>pI>q1m>Md`Jrm5Z5@1;W_hzQ76U*{If7- zfD5Du4XFn0KtN+s1ZgP!Or}wsBTd@BtE>lO1O7pZ{HSQ}#dsB2goFOY!#~h}INw`5 z`b}M$-hKn3|FG#9h5Z%I7)1V^?Em4G!#}o86n?~k+)526MY@FeB)?=I{2Zs3Y8Xny zG|`~{dB*!D5r{fCPKP^KW0 zDu&iyu?j-C9+G6c!^K%l7gIrye?PLWRzw1IQ#eI72$B2mWd9H7Uwp!Eq5p7(|LLvw zM)v6ieQvY(XB0fe;!5%onkzFEmjGF7)YF_{ViP-Qj^X>_g`r-$ER4b;`hOU5Rt4aH zu9N7<1o$*cU#x{Pf&ak;6lkd9@?9h@fdX3G5grwREe>x1gZEuw6(Ie)i+utA{22aQ zF0|fJh|GTt7zOg-j`{~3Waj%fhRIZLaB zAIb0gl*_hl$`FtjNAiNWgE%NYT`j1Hr+_cF|%CM=6m?PDk$=szU=KLr1?<$=pcGxY!P_#f?dc@!^5A8ww} zzaC_}<}-t1iC-drf}-h@l8H{1L#C<%SWz=E3|$XWO$9{qc>|e$C-|Q`jD55TxB%mn zS%v`}J^mPKNRn`Y{^zUyn4&{cUNK>Eu!M(vhXM}oqv!Zl`1@lN)DgLVF-QTaV%Gmd z;-9boGJo%XLH=vOWeVUQCeeRL>R(k)gY9~om)|B(1+Rn_DL;D4Tz{m+qW>3F5D&Ez~H z{|^0!mv3afgQu+a6WWDNrd_781Y!f%1XqxTC>y$thI%AS;eGsj|7+s}CsxWP1m4lb zwKNPlWbqmH0ErCw_bW|?wG2fCA=IG-@An;u5m?{^4UKrW2E4l2zwr;`zxtqDv<&>u z1pe9JHu9eI{}B97RJaU!Ud8R5Ci@Scmbz&>!Fx`NHZe?PXDSEpW24Zw@Z>|r0D+P=YJ;p4;9AVDch(c!-M(F0dhP|5~!|6DOXEvN0Wbs>6)`c?SQSG!S{4LFB)P{-2xMpX;KRH&69t z5dI&c|7Xz&rp=*AAUWYENq6}K-{hTGxWhf~i{ee9bacnj96k@xe>m&^Y1R;z2ylcO z>*L?|Gpz4VPyIAPSQ5tJ#K3*cn&E$l{-3uE$xLGZea8RXRz~4K{%dysRBzj4OX%C? zcmWAG$ab(P=(Ht0APBJr+>w+q$W12o=}@;k>;EC}kK&CcF=15W0RN+kmjWz26=jnB zKTrKS5bMjvJ9|IBX`IDBjVJpbvuF9Aw{^OZzc1*U_5UbTL6MJf2by9#!ef@)fB&t2 znGI5;;HD^IC8kqR6xGv^k|d8v{8M8trV1_IGyR7o{{#JpT~(9V@BI4$A3FN}DQhxD zbu;uIe*66N1qI_ALSpKuEn@IjA!gx!wE@C$o^otSu7qI`XxJMT750zsW;{XU= zJe7q&|N86t{sP#)_>cbydut5oAKw0ZpGl1|BJ`}7zK7!&-#C)6^tILW)b^e zr2knP4jYEMwUVrP`xjdupYlR9kw|uP2EV+kOPP@$oWh|0@NhYkN_-8VsCc60p>|Yc_j9Pm$QKrGthpIFD&!79m5hsyh z-2;gwlMp7VB=hf>dl3|CLH{%9{~`IG+5Tt3|IEWbGRNepMP~^7T|i(sc}fT?qI`&kATC|7I^@|DDXg z&)3E{w->&LYZ@gDAsq0J+oGbQ(CGXk#;^JOO#YqNKmA|-&!4855X_9vmsM1m@&9;D z2hHH0rD@zO?VYDDTf_d><_sS?{^M)aB(eWR>|gxL;1K?29{3=|17n09NwZ`|04LG^U(jp>wk{9 zZp_~WO%Tw3NaWu?f&cm0LCJioM#eG)6i7O{E!&zAp3|JwgWK?{-lr`2KN8>jWRzIjdVf4S`h zAoT5{*Yx%o|Ib~ZjbQ(D?8CECu6WDTFLBgI`k#sZ=l>u6ku%;tFjPd7UWD8s|E}JY zN_djV{R_hX{9%<44e728oah~y+m;Lc#a1pfp5hqs@yii&KhtNsuEnbp6}X+_~S zpnrKHL3@70Ch$)Xa74fPpI__fcuhprZ~rrO9Y*j!DRK+fDXEM1wWy(){CobQ-Msu4 zxqrI2N=&&_6IJeb6qx{r|G=l0SN<831kY%^d*uD1Au|63`d6p{(5PEa_Y?c4ME>jG z9l@FW*MT46F-=(vHlcr!`S%(8L*%~>eV3HZh5k1H&s2t0M<~(%EF_bTVlh^OezeZI z>tz0Gj>Wn@3We2@={4hjKCZW_9QH2)!4(tSJ}J(>wMYzwe;4LQY7;bmH2^L8@Gq2! zWnzh3;*V+W&|lnFr@1X*+#0uO`1jM(Q}E~1^yKu^uc@htsb4=QCntXW`tj@Mx1SSZ zzkd7}A0Hc^7@GJtH2&k``0((zAD>6ReHc<}zI)F7?lGI{q@NeGrVDA@(`foMnmR@KRY3hw!1$6c z-giR&`B9DHvlntNH{|EnBwx9I_{^QcQz!CH9m&qgPE1UTJ`|OVhgBc3IeB>F!P4r3 z#2-QfS^VuBl;+wRZ9UroF zPw?gge!g2b`fT3kvCn7Kf%U6nJY9E8%h$CjU0ZLn-*e#(w>1gwZvJi#(QZo*x-E)u zGupdSZ}&>I5NBuHd0~{ZZn(3~ekZMcPILD<&)w~$vD=Bc%bB^;Np+`_@=hnE9Zqw$ zJIRMQ$pt&f202LuT16gV?LKOBAa%j+bXIWMvfzY;AsLH;5?MY+mTgF|TAO0GHqm@@ zs|mx0&NvLS%-9>sgH0D&eA1i5r1G=Xs zhFGfA6y52I2_{L5wkh>qGwgeKPk_Sg9BxA8f+Jk z@2)e?up+X%h0N{{+2uFK)u>=hH-hZWrx<<-F(j$yGqYW=;`C&$?NP;L;c(-21N(?% zg?q4~J;6dUsLY4lF1UCjHE+hd@h>0e&;98}q<7VFTyOoAWA}G8PCmqwqP?|V{JEk%8)Q6l_Q&}{5Cw3HaC*ED4T|75# zBD?F{&g;XmUzDYYSVm3o?kZjuy|2Igh~oMyZ)=EuI%pTfyX&MG;(1pI?E<>%w5@8m ziDHo5(c8+@NQMs&*`43sRjZ|Sf&eQkf*?RRF)E!hggV}EL^YS7neC?W8>u~TcIO!J z&hFhDCAccOV-P2o>~1mdJg#TDcjH~Z-UrVv8WHaxk?Q@7+}qJbHemx)p>) zUX4;ERPFVV&Wv{hvdd#1o~T=@pt6wTAc=a5A%!y-f^HJ%?%lBNVJ^8{@bUW}8_=%z z=@+28?oG}9O~apm>Atw=d7(*s#=9y(4f=g{C%ap65B`62eP>itX&3IB1VRhFg&v9& z6(JCc2okD*NVhQ-AV_gg0Z|bPK@dbjw*WSph-K8V7sMzSKtU-wqmCj{6dPSp5pwrA z3Cw)=uKUmBxF#njr#yQF&ZhgFh}lB)HS2sz_^P)^3S0V4Q86E7#bQ_4e<|D zlrGS2^aS(F1>T+Z6PpPklSw&roV!@PLw9+1L)}E=)SDvVyKC1DH1fVf=v{{Fx#aV z$81~@)J7HY?j{xGhoa1YldK|)yExM^FYt8j#I`MOsF-#GvU|@h$GD5>?A~izCa#$h zb>RPV_4mV~@_GyP(2!8OfqR!jM3{*%yZ`!57m-m~e=Z7;{T|f?A!tb8U3OaBWkD8b zH@4of6GJa9`*Z%pwX3%`G)#=|ctf3@-G?=W-eqid$GV#)zJf|IVSL~z+G%gUZfz+%o}10W_Kxd`+ah3k$Eh2A$K9S8Qs{I|ELJMXMwwjXVoMd ze9fLO&~E&8rkOy4?3Vw`?gKGQS2A!Hnd^u0)8MZ0K4P~^a$39b@(R$WXiX48-<+hS z5qiod&;;F;qNVHj9AS3d^^G#1UCsF+$ZjF&l&48PD|#U4E*N1Uth)*G?%>{K%1_G- zapZMmVT)d-tRxD?XLq4?;~y)M_8b)cJOv<-_=dbI=wdes>#hXvju!h69kd&e-E#Ze zr}-$6ztFJeKeIb}c{^FqU4YN-{7G@3$uaJNo1(HNY99^Og`nNQXLn4yYLPZf;DUF@ ziuRV!<)#7b!&7EJr7MgIt7_9AyUrWw-s0R7a2LAbR=QiZwwutqY}ZF*V%{BW zb{A?l;+J-%fRJ}x%b{J^he38Pp521Y?w9`flgM3DyGuGM5q|>j4gFl|M+AKn-rcm) zf-Se2hh_I-+{JKXlcoEWsCh8EYZv~jUA-2ggfQ>&w00G;>xQ~C*O=ql)qlM^P5L{d z{G3Q=;I%G@Q3T$d6JoTKM^piK!FM=ryLp6j&k-brQtL3S_h-2v^!^85U#QG&a?7}`Dc)UT2Vv%9<>D#f*{1dlF@ z2!VFv&i+%rnhRL9<%kBWYRtQgaTkzX_Zt32z+K?ky=x_5bpf+`Y!_7<*KQ1V%bG2- zhwQpb;j_GwXw%)5FuNy#cH>BB(29%1%yd~(#LY3iO)8KC@ABh6c3g)WVK)iWZp3wO z>2AJf8392Tk5A-gPS5U7&aYk|iKb_Fth+l`o;1x}+;t%8eX|1Xs-t7RfjpQSZS%UA zsUj*YL_j= zpu4F-L|N!vHe6c5@TFk0`?Pl_?3tWC8BNdbB08=)n!|{_;kRop2kO5N*r46mqCb>K zU_*BS=H2UbSMvKdQdeo(E(LAW z)o@tn4OfhehA{7raCTQR4#~Vn6qi?^nTN2&0bdBZyOFFN_{P~T@UNZs$F$x)m;5PnOsDH@oiC1;2t`ad}B} zKZ~zprkz5DR95gV*WQ^J|LkVI(t?>bNg%=Sdof%>k?mc?fbL32r`5x>8?m7f_-7!T z-Hmy`U0~j2oljkAf$av8$Yg2GjCZGUWnmR0X8cFHn(b6CHjRHw*y1h>R2qDBJ!O_x zP7UVW`TK%OdMxr2&hBn=_B1I-VBM7q#@Wy*OqQehm2wpfyOY0~)K=1+n!LsWJvHebmXY8*>LhY5FYJhV?_NI$l83XCRA)Z}_Yd4l{;EKWJ0kb=+R&r z{A?lq+1~T6J(Yx}@lSl;qry2u*tIRhKeq{bi{_#s*dEB)Y)k_D(^-?Q3!N|3rG)eX zu%Z7A-Id9^C&$GYCde;sA3;Py&e$)cNx@JA%dYbmDI_91ySIXujkKu*B7*2}(5{Mr zYC*_dbj*@OjOg`hsg|c6+k@R0vg@LB=I`K9t<(okv?sHsz01B=jUl@Qx|@XAEh|JV zcGe@du)AB7mdXdTl<=v2skv)YzA|W6zuZo3kVI(&zv@uL?iS#moAeYovK6(Q7b64& z*)1wkf?k~S`mvoXW`!n0WoXCD%L*Oz~BRN9??{bZ;Bx)|sppqQ0 z?#i=^7O9%;sG+~Qy#v#(CJqLpqG|8)?PUJKJ&=9~*)2GC@pU_eK!yL;9+EO8uBS>2 z@Q>R0=X4RSE1G+Nf%1ghMemAI34$QI_fjw0P!!dqsVO27%}Su%Xi?wrvWDIP^R9G6 z=P%e}erT7p`Nl~Rc$b&{;p0YNo?T|js=>RvpZ}S`J1W$!cC07JEuY~oF03Ih3{Mnz zm%+P39tvy3@Q+PItZNgDt8>>vSU>1&k1hDUn!p4H80OvCaEqx(M5{3G&b3d3C6btT z#|DX5c>#}o)ICR&5mn@YnDg0M9?i7qQFsocS#3@voGn)vSF1;mQrJg{lf_F##>e1Sy(T` zX&Nt24Ea4Z*v!>3&*M!7NcpIhr|B zWXng<5;>ruCn%OyDo9)w0aS$GKgizdbWDJxIkD0%u{<%mkI#;zzk^@g5nT&`H3$63 zkK-@IKi0i^1m=VN!MyPQau)$&=wI%_Y^^)o&m=V6-Obi^frc8pAmhEj=RO7PstH?k zL1GXaE`KTQY3=HTRc`yJ=4@sPaq%H@|9{dp{C0gwQT;kd5 z`3j=luBB%_&kzi%|+jG;~f&#Fwys67bmi=1}ddm;@Q3arxs=tnV#t~B7EtO#g^;v<)_OA%$8P9l^kIl9cVdMKB0e5jP z7(1sGzGm+gc@#4qwf;7VEBjRFT!7{kmuz_b%UExYHDk zeENGZ^Z`N0U0~ha%CjrGQb7hc&0UB(q}O*Hs&>Fkz)v!dM4`d}W8e|qS<1_1vQU%HcA+>`wl=Pt_J zwpo`ExY18fvBqd&g#X~5e<&;DhS4XaaX?*ROHqE5<3M1g@52$N_WIMMLUed&~OAWR=8v-3>HBjd#f8#_8_dhuw6o|;@XfO}DD@Tp|9OAm4 zsN zHhdo#V#tc*R;Nq_1DF=5_hAetF1haqfQng2K-P*;u$Y7*k3~5C`9R_;Q;>N`y?3!R ziXbKW>847{psk|=Uv_Mh+40r5Y+{iF*c_B&etn0Xsbo<9=gEm}7c@1LA_1+Zgn8>Y z!MQiVy*nTO{+-Y@b>|Qr-~S|gYt2vO4GlVyLm-B;Cwm(IXhR_-6YFlm_CNaWvJ36e z5Og;+<|&1{qFAAK`ATf17%H}mJRiHckc3E*Xl!n48iHLMTvG^{q`(`FF`eDwV+?y( zQC(+FckxFC=2>>x1N2*pb2&cD?-F>bcD zX4^z-D8I|1N>w12NEY-titp*?l7aaXU?C7!XZooei<2uHq zR&P=4oktHSZ_%{WK;}`Qb^D6vM{u6X5eaM_q|096BANkPbm>^rr@+AzDroL+*FK|B z&{rVTYrNd|&E4dYjU2U#_m593U}4#Hklk{U<%bd_X)vb+ofURhuDTunEG!gOtTWjy z(Wc*r5Do7x_z(c<7`9NmQFLFPh9ottn?bHMOBS+QR+ToXz;=ZaWr5fc*!?`MT|N9b zBs>GIi`fUNQ5}Usb>=)Z?zrEat0JcoTisSJN<{Sp&FYYdoP04PbETRNjS_a_Q+wAlCH$XBgh5Y@NG@=)Wc=#BANWn2_Bf@a~wZBvA-6 z-sQyn`vfHPE<<;dqq}W^D+wJkUAdmwUA066S#3~>Qb>pEhExIzur7q5I8EXHr&yak z1STHX|D3(g9Ib-?K1&@Tsjd`xOEq*+Xfb)h>11jh`Pe z`H2xB-2eOtkYbluU*-HqyD^(l4eOgVjZ8G%-97u62}wm(*xij~w>&$e>!~1(9+x|u z)y>qfkwx6>-%6XH*+UGwGnspdaM0qy?n>2-DejF#1lo=CU(7}dI`sR~>Q=qFTp6@5 zIXxszMvLL>1u2zgOZO!IK&XhWpDM?2POJGdG|fj1x+?>J9+rd`K{b!&E6{Ef0}2eW zLlUcaA=BQSR9V?IaAlgM??k17?t;49H=QJT6+w3IOm+*+Ny;-1%)@za?<;Slxg+!Z zuoQT-hikTm^8p=caT&UZ9S+%bpxwwUAc%>Y?ha~1rS4M5Ca!Z8f>GxLHSJeoV7d?J zLi30>{N63fl18PEe~#xox|@X1b0(|8tym|aH9;=S0IoOX-LV(P%ns(o^Zl~(1=%gb zFZ%#>twpf9khd?26>3*k{0V1VyOD}_H{sr$AD4(K(kN|4yPe#Vg~c9~4?f99C^!T#ox-j$fs0d8DbDz4qQ2f9*(!j1KNGoU6s?842$`EAtp4Tk6@%GRA zME2!KDY#xrzhLiIWK5`n=|#BzS&L*!#8sNpJP*_b9GYvwEus^MByzPMs|xl%*8Soi zhSGiDE~H&S%v+2PD}PR%p2~O(V~ABwy&EQSfjuMC@crn@VV2qQFr8<{d| zaivG*{d(g9S~Fs^qb$I}?AazCKRJy9vkOD#b~p{d`{4@ z?rzMx-2VI48STd1Z(XT-GDzBUHT}43m>R?c+Kum^j|xX2GF&1N%6E`0;$q!RSJz%Q zhX>8j+Xvg2BSdFNn(R=;zTplr1@L*c61?FX{#Uyy&U7uDg;ca-X-*=N@VCMoLyw$-+Uya`}N(~T4}AB`n_DD>bkV&&-2PC z>iX^vn}3eaQ@WMhFY-c-CDg7ycnOI0B63bpW8vD>hUSU)TeVNp;~Tm@1V&N>`=3wS z%pm5D4Bbswc1t=-?k%$(RO<`(`5AVn?vvGX4VN2sO4}!^L*H4SC*QM|qQaMl@#Ra3 zn~0>8y4k zL3d@t^NDdrXBmCQyBq|s%psw6Rryny7)eDt?BWnjKV4SGbzdVb{xE#`9S7z73tTp3@4f;9ubd#mc#%FO*NMced z_A3(GD#VCV^o5NfZ=IUt^;YF-`WG0SD=_dpPy{W&nZY(24;aVwxbv}m;AL0hQf)41 zXv?#?KeFmky7H_wN>SzIZdtOk>=IxQz>x#&e{lYbAQu(DWJgITC}|=#=G_q&XOuZF zg15DG|AFE*_}8il{lB6R&6chCqdTtcx-5I74=Dfu@8ag_AVYCo|MFLNlPAU|T^2=i zg#1@+jS3_NR8UEdwZDD{w5!Qe^K{X7G2p)h_{X?4l_H8X1^gERHyEwFa#4XQc_-MU zY0kN-i`I-$+g-?FD%#=aLjLTo^On+FSkr&6r*6Npj`oT~IfAE#_JYKrs6r(V4cm=f zN`yTInZ?%pppuyA!mH8+dgkz01q6ceUzm5<6vl?4xhWdgxvOBwxew(nztE`=QsPI~ z?|XHh*l#~92lz*zU2R+WJyFozeJA3K)JUvx6GBWq=ToIK1yM?+)%=mMCQw&RUo}FI4Y}gg@XFJ-1+@x1xDMe~}4+ zBz?>xAqKD`vyo;O7aWr4*SFSwG5<5c_^l-Fxu&zt69B zMsojV*8vVEX@m3REF!=YL#HP3PnUEG6<#sV@3ElF#+M647qFv$JqMs!d5B@J7b zxxjxx`zd^%G!<#Km>l+fgxv+9@O@lG5Vrrhb_XrdgyJ40cm_e7fV0K(eJ-acWb}mK zEw{%|=9p98)WJ_9vRY^m(GIZv(;w69)^VN#lMIjV(T{z3egkjL(S_Po{v;n=1v^8X zC{ki6Olw!6yJ;Vh?Up6BtmYy!bnjke74Yu-C%a{xrzk?M>{51$sI+GM`R6xB-K3-- zyG1z|LuMX@M7CcF1_8>2{1=jB$P@7H0>a$yDLii?npKVc&4z!-)(^eGyE8zfXnEZz z`YOBn3;=j>?UeM#z=lK}SOM9+EUlldo~86b?K5|W6S%-aG$sE!P$HR0dv7OjVFP*{m*LrkdX&5)Ht-U96^=zj#+ zEh_*QmUeNmnG&_JE3uOZ{FfARQnTEijJp1T?B2V?LyVP|D!Fg#1Up;!$N5?bMQ|rj z7(qq*Q0vW6vi>YAyCo^%PV(r-Nf$9L=3QQWr1g|p=ba>eYxdjMx5u@8xAAc8DrC3R zZ$TlCH6Kv>vFtjZ=tC#zSa%biUDxH0Xuh@Z@WJ>meE$R4y}p~+pcVOF?dnBlVVdL- z+qm}L&_W_I3^-@|Cof&y(Hiw*+SOd*A#;zTn>F zNF6y^xAnjb^J%yQ^go?xnd0c-;U~tIVF}jXP6%pJHGAfmfUa#G9MD4Kgr5nB`PHlw z)BohYr~rww8%w}{?KVZ<1{cxET~y4wJh$lMTIXG-d5na8UT+*TPxFF=+EvqXGz8hb zGud^8DoK9R+STdomMz+Hv@k9ribyJ%(h%r>=CuO51lhf4{@B4!uyI3}{zo*g?zp`Y zwe`zlD>8H!L=TEcCPaD*ygO$_nVmJpJiNQRYb9U&jf)jIE-dKBloj8{iX0!_gz;aL zn?PD0A?$=FI(U~$P0pcMVRp;bNp)+stOMX5f&NF(U4VIa^x@qH`=7?mzvdkNxKG8fE_1y93!Rex-I( z2akNcAPLzmN9Dh1qrK{wR?$|5A9PX3kC!GfwM&>=;obdmycG{3G=4;QF7c zmcF%T;`s^ayR`j{p?Ln(hpgp;YtPU@|FfyqsF3EHMVx$LX?XkFmshuc|C>PDnW)5i zDD>`R8*8agoC+FGF^SEhV7$cw}T(UTflG!@3Ot6#Lvy|A5-z{-t`O-HpLkhGIaO<(f=5uouV4c zVpfRg$RVb%ySsMp#I<{Q`gAP21?#TFy~|j3?_RMdKTi$5snx`~yD9S*IcztM4{94X z>!f1YEq@hezBoq5_^)H?Tf~uKj5lJ3H4C#_@cj>96wv<=z{jpB@GfK7y^m||$!O0- zt021tygRS&Bn$bkh6>mnciz!Nz<b6i011c@cu{N`ZIT7uq~KPj?Pt zLU)q|hJRR(&*9m<^Z%YW#p<+I0{+VR@d=kFgb?uVbc8B>Kp}W`3+~<7J}8?$_8o=b-IX)m<(?0z1Sh zRWxvN@!oPLnH@i1B{S{a(X1c=|3eqY_$KJ?E;L-Ar7q0w1^v%vP#uAHr=|&sgEW*) zp3(m(tA{q`tHOS0TK^;T?)-N9*Ti8j9Xp6q2k){9$!?e3=Ld_w`mU*1 zXwt3nDC) z^IvKsswRHis49h7k_Vpz9{2cRQFY}*J0yJ>Z^kYYEW5>2V`**jjlk>|Ejz|*les?V)c2=&ju^kzLg z?AWaCSq!l%MdrR$iE4pQqu|buK)0|Tyc)VI50=2|@Z4!$Oy8}qZ~jgm=1q4Ow0uOZ zkX?5JlX@fW!0H;@y8|U&B0&5wL!45`lA5;0=7=lQKMV9fvlLeVGy?sz1eRULNP%}b zT#PTRAa!fcD<+BiZ3F6?`cOR%IcT7NCOsLx!0!ZGZ10yNRy^FhAC z2i>`IyB6{|;DjAC)?IM;O_YjV!mwZV+czg1xk=wX_PN-+!@SF&|EbQ)5>!y8Z4RNG}wcNdV}=@Ag_-I>8ZSa)T_ zL(vi?S{K&cR4$=pyy2FFBIaGbys_ohE5~``Q!71Dm-WCa!|ur=?@zX!OCC|rHo>#& zcy+G%*l%d&UzK>X!l=2>gmrE6dl>}5x?=F`VExCtvvbsVrn~81{quBo3v@RbU&K%? z?%mOMmsnDS^Isl$eN)gU`Q!-&%~^Sk;=d7CogV{}E;fSw&qs=)8+I1%yA~IG=s0HX zQ_YF{Ic5T(9oXm{=8b39f&LluE?+K3u_}V@Cdh6Pc$cs4*tQWHs|5ImODr)b#8QXf zN>LGmzxDHtS9f8NdUE^L9wJDEY`4ApTJmAd9Zt!`x_>o`12|NN9{`0#)ki?w3YP@taFLq?;J;Sg>>KNA9G68z3f5iuk4^ny z6W=P=Xrib$)r_SM*>!61RO7%Jj+!D_@*w?a9iz;`@JbXcdN^`>uxF0||9txt#9#ku zv6Ug-T`)2AT7xar|JdIg5$K;CZb5VYZweSaz>soxSjmsOAtPsw585St{$= zE#3R52n7JA4!5cxyKXe%OtQoHN%iUM7E?*4u)Aqm|AXnD&pyL8nMh;V+!gZPC7Axl zR1s|;v4%4yj0((Qa zH8OY{iP5x&)OJ`FH618OlTpA@6+i-338q#b!*L>W@xT~>cOyy>kolF=cqV3V>A z!l#v@4g>72sqxq@ICoWPY84p%2|e!T$%ybSG7RB$@kKh)9-fFiVL_L=64eYRZ`D!k z4E^(Sya?DKWav`#0>M)kx4iHXgSL5;eIoLDDr{?6mx9AuZbZE`x6?Ukp!^ly8RTrEN&P;X-c$ew%`KT1nZaEbGCooaM?gF8Ax#@CbGn~#Y4}F8i zVl|U?`vpCEuw@j3N;~LoLIfzuc>YmuiMrLw2-x;qoBLuV->&S@;X0>7a0$G|_CHkf zCFMKSEO}W6j6!)Fg4;<6WeXPO|4M|bG>Dd6?7e;D09>w7+L!yETz1^ne!~_R95x-x zZo&OufPdD&{s-4TUy}xr+y8bK!2W0RTX^{yqberBTvVJ*2xNm#8TPuse;sGS)3lvr zTUJ$kg+WD)C(LwS-(5L6(C5*1u9(4YhgG&Xb5EvO;L)^{Dia0#DouIXBmvK4th>pd zh5wC#XFPVMgU&qd$y|AvVG;`up^)b3>=w+syyYG8!@CQzMyMkQvU`Qy-Am62_CLQr zB`(3cyCFK)8=Vtv?W)gIGg3%v4OqTdTnqJ1_WW*^A)-1jqWK1b?j|MHefwgCB_&t< z0?X6XEF%;T!U@8>%a)PYI|8~3#ts;U^3Fy4Ar_#{WO(GkkJ>EkF2Ma?+dfTa_Y$I* zAYA;Z;-CvlZ_9oK$lAg5&rikSj#$8Q&v`}i%*77qVoZ}o!3m|XD6rtNC<5;? zrvEv%>ITOI_kXdjpeQ`MWyz*e6!LYwthN-I?k)iTmoU5K(gvvz%Bvf(5(I z1Ky;OzbQalk35kFzzr1qB4+}Cpomc(3vEm}Y8#**D{7^DhtaM<_WcOx$Y_%Y-4}$Do z=N1COvwPF2qg%Yifras-2Gk*hbyxle|A7Aa&Nib;@itMVN_r{`?}s=2Q=6D^D~kVT zH)4jPRlKwmJ<+EOzav$y5|)Mv40v>vJ{*_*dWVi>w=6j+LG@$t`X!VlUot%WOLnS_ zYJA_M6zTIyoQSy8zDEtrpYU*|`R?G&bQk2dm94|}KTU^KWf7j;`@7ocZf|V^F=X6D zPa`7O|BOR+9SGJ%F#nf%*?mEFOMX|1l+CKj(m4H=borGfqQ$xdHgq=;qSR6?W_c|h zotoSMi-sb439}Ee10PW>0!!EuqO*gHjDu=8ZBL;}!~EFC$sHw|IMCg6Iv`rXFu2Mi zi{`Z7T3X?NVK}eI;dkqd6G{x=HOvh`|Kpa&Gk7@mA&?ev{2K@Nf89Oy@%7o|Fpy*V z=dI`59u+|e#Ba6UE2Z%6$^lLoC#r3Gw?W*!JuWZ^|BwF*&+d)vvF&K~wRDWGJXYKW ze~6yv49t>)U4rRq-io^$9Xe~1U-2JkSh~v^_}KxUY+=ZO>(<&dSjipU*qP-iaNRD} z-_HNZC*k@Zky8u_{Ut9UZ@Ap)2>C;1Azj%txS7x>dPoCc7(vZK(b6}I*S}JvDG(Nr zyaQRPva4^>A-mW0;NY9bD_J|CyAsqbrYsKb|7rtf%^td&W(UpVv(Bw)=`hMsFt7*j zh1{~wD}?;lmtfrg_4^@mj`VK9JH|4)rDUB`=$_gBjdz>^S>syFbJ}(_OLCBrksrJ%6*0ZRFou5?Y&&4+Q|g+j=k`!(_Jd=# zYK5zA#;dois-zO&Qu_D|UPIrS0}Va0#yA8=!%oK$J}ukn#6pKNJNU0osVido=dr6% zYK3e0pno1Gxe`KX_x6w_ay+ZeLys@qcz~uBkVaz9WY-C^dks8u^xeDUSSMFnMz^=e zez1t}RyBpnJibUNi(=rH$3t3{i`Ul1k4?3>EH!;vL{ExO>xX|tH4g9&&8qV3I8I+v z;SZRP^1|nsdbTI#|DyXJ@E=Qf3WP9Q7MCsj6%Z}-yC}9-CQREt-gq!SGA606|rmfE`H~oiRH2f4cC|O zI3EgF6h1u=SrZhcH7e<1vd$lsV6>&<{;%QCa=)_j6ymc?en63VNVT7uB{0KEEfK9Z zfbpUpHVe)1Jn-SksRgW9-o$mwwT6)?0~~p~D}nYEF!n-sK{O<9AP&}Du=MOwLiown z2rj zJdS6jXGb}mW%mq5z~i+$PM3#fU5jcik_+Zl%Jhgx&RSBcm!0nr;md#*TcX> zUHO&Jn~dq2AiEc`Tg1Uzl`7sa_Mz~U@9~96BgSwEiLvqS0!;sdXSV?V<(Xr!F?_hg zJJ>R+gO>e5(W|5Ai0$U?N`pZi2WZkg_}dkek}5ju24)=O95(Uca*DmS%r*}4Dcz}7 zH=W(GAl?148fqs}p}PPg^rcPr)X#l+jJ|Ek{1GGK$#FUx%kC|T6^CKL81uigc)@sg z_ejwr*3U6DOGBTd3uAi6?iDs<7wCIV```b~?%hm^MWx#Ym7;%Ddg^0I=|^~YO0fK@UVA}X=^!BR#%cfiqeX2oWCZ%> z9Pod^{)c5jXB~>>9AB_w?AMRNrdBHEfA6iPivSdH#j<-1n&7zt*)0VpbLjMj#*~go z%8`YML%pH;M!uQ`Ug=nO<^R>))Y2uE0Sa8rqul&=g;!!;l%~A2jNlk9VLd&+Zoxab zTgl@OUreaxDpW>2XJk92gf+jp8&u+_7Wn+)TOWUbJsr=n{ZE6dq^LwY6|#Fby=y+X z@(|FI{nS9byI}E~AHAR9wEk%+8g?jJfPWtU0paI#%>TaTC_hmK{O@MIIbqGC_VK-3 zIOrTL`?#O=af(K=#|>OS|GZX^-7>>}nTdCwQp-__EIH|^4|i=;2`$^i*1e#L4*MTX z%TI6aCXG$CU4{Y6z$2em+GX*sr^wvr_)ay}NH5hPznW3jy^exsw`eMws9Q&s;`-d4H0`LBbp|AFo%L$-8ed+Sh#cgdBgGVAQ&50>)AF>RD=dXmzI zBA3%slXDhrXe**;J#95_Tj%{3J@h!ESRq~M6rI&zQQi*I_toDQJjk*cTvJ2Gvg^)D z{MPB0CE&l-!maTNQVY1$2U%*+UHR*~4=rNbwEh`;8`H^nc5f~;Rf7L3n0G~6KT_uC zd^pp8b}aJInZM~dkeo&`|Mh?2`X9S7vWyQ{I7nGn;DwSfeq3D1I;Kr88@dY|e#Z4N zJk>3$vQ*$Ve0rU~cfXR+lB$Gq5%YpFY_XN85W%^>w|Zc5N8%<9@LzM*?@yQ3sHS87 z_eQ!{CL`LNElwBef4px-ahi)WiR;uc|JShvgzqBehM4{*OT~>qgYHW3e`yth5YB#c z*I0@o9Q?xVJWkUnnf<@rmAkO)y8SdK15dTtvL z)&WfiOp*zTCnXM;gnFr|yTE*A#_~E+#TY!XeisYFKRcSs{hnV;+0VfDKbsM^z1deq zk+omLF$eZPy*p^xZKLm>6oLQix9c*r1#C^$EzJLApQwZ$9uX^LaemUb_jY|CWh^jZv0xHkss~4`G_+}PT&e*PZ5*Ay1Ek|;%1Ab4${a<{C3OLdJsSJ0(KNJ2) zB$fsX8(kjFyE-=Xe$dS|ed|x1X)W5XVmcr(7D-u@lU%e;T|eHSsQ~#98|ec5bFBz1 zuquacz3C=5_X7DxafB4cNwpgryQ2#ui z#{~T2eSCi0b8d>|sa);(u|K;q6^(p#Y|u=1!Ay2qRS5B_(m=hG|Eq;-vX zm2Rc&)Pf+%fJ(Y5WY^ig<4aLzui>RBWy(CG7H0Ta3HdMG{QK@pI;i0P%5fsFjeNCO z82`oSKezn+nmh4V6QWg@`<6Mk)PDwqw`0w^XC+&_mtR;-NE4MSe?*1uf~uvr51GES zi}_ifqS)Z0?GV>B7FvisoOdA+=&qdBKMT5>p7<3;dE?o=+7*$_RL_@JmUb}Q3#!-u zeI><$k&g3U4c81Xls)lnvg7c2Z`}W`L)s6wY8KC6d99`>=&D>e(A|{u^+O*tt=jl1 zUx_@wN-e4H!YaD$=8sQ3y%ddpf9d7s74)#g z%I_~%HT60{cV$%xgvdGW;9r_rY*lp@%dX3z8)T|q3&`SCuf7f0+DHN$eyXtUrY~pW z`@9a%UVfqSX1o>e;938QHQ8|V&x!yV*-ngpR5p$*=G7!wRb75hUj;2Tf2`vOvU^+X zww#zB8&;QDk+DR_I`(-z+!;cy`@YJ08~EeO}jvH8#-q==uTc4W)h$Tuynm zm(w$q&A|U2*X=X>ZF0xoUyju#7h9fE$GR(9%N#V?U8Oayda!~nz)i;%QI|ncYJr&l z{o3st-OydBxBR^Oo%m;c{DWubu@2<6ZGI2Onmq7pI~?nl&v2|=k5@mUg8n%d@Q?kE zg|Y9r#f6JxHLT-$>zXXdvgrSpUDsNAh%Vs2%In=qBOd%So_qYHF#o5r(f#zmSdb*9{~Ey_F@ZysSN8baR1W%arUxvt5?N+d3g0v z(Z#T}cO)NhUYvlsPfqlwtrYf+xGfw({!h~Qy9fAtI)0=+-%<=NC?wsraosFQ<85;O90oipdnX*TiGY49zpDlj2 zuf2>e=x(}|;Q91o{(i>03F>N6lU3a(k=$UyVeRt!(p2<4<>*we;wH_V7 zKNr@d=(!rVo?V^{`kx{avwol5jQ+=}JJj-&?9utL-OVYaMKZ_fN$3jJU5RDa{dMKV zeU6+MWY>-T!1I3ywlE`O*}Vh<8l8Zo9mP=49}m}I{FhaQH`#O0;)4d=1Sa*fkxZAA#)N znwxEoZTyGX^j+##%jaEKvqf!G{oC2)<>KKs-PD`dUH<~` z#!-6QyRl5g$Q(^1=x&1U?w&h`s|m<~EvjyTo)7gP^keh`=6~l;Dn>&DuwefqS~awp z<$Q>-f(iT=>{>NhvxCox@9)6#e|j6MORfcsOieyozOlI|{s8aBP6^AUHN}>FEoUPK z*DTxx#pAb^Z#;S@ng7sAJvCujO=5rd0~Z^^k5rdT-U{e$`hA(p(Tkz_S0gzqz!O+W zATS>lU5)wY*6Jf4`6pf_U*-w2dzW8e%Ik@Igt)3*Kl{~EsiM$bu$i+R^aZ`J|M5LU ziuBR6j_u`Ocv}NrnaDPKy1U?yq=us+5biB^ACtp6%Lw#8Z|gG`L@~<=Z0Rs_#&bji z|9gkdiupZ@ybt->T!Q@*;2+os;r{oeQ4Y;*_N?gUbKCo6%1X9u0@V=4f2|mJ!v(Rl z)|KVCoi!jOKFES+DkzkAQEe9F*N!=wquGS{=^a-C3V26divv!2EqPj<0zh-|KKb3h z*r;?}Gjtd18gL!=?RWGN?tjiMFBSflj{H48IrsJxxy#Oj-yHNm*#4*C9o#=Tn($bv z`J!m_Sknf5p2_a*G;O=ztbu*i>J$Pu3=CzC>6)1TD>~+=8e}3%izE9;4?2Mh{`b|@ zO$CP8%9_!a!lAqA+Gn{{-o4a9O#gFx)>3_C&nHFn4C8F4b%Sq45?P>sKK^m-o%o(} zcRLz<4$s>-I5m0py04DiVpj9}e(bC6_}o2t*2aCR$n9k9N<}LS|4jZMJ*|+?u=Ziy zN^9hb1w$4S54tPk^js78^QJT^pt~SPyP+oO>pnSM*9&XZ+94zSI33)9`%EkwiUj)S z)0@W$sD>uHm zzaguKqob}NhZ?0)>dBO_U#~#9V;(A|aq0W~RDD7=!vNF&%)fL9!#|H>Rt8;Ne-!YK zLwb8naupw#wWO~fgZTZ~Sq?s(j!D-B`zB7^UUSohwYvg-I`5mwBd#q!Z8lLx-yN;p zrMNDNf@Syq_C77U#LyI6O+Q4y&QHg>D>of)KNmmPot=s6pIK|}B)RTuJ-qw^BEQ_c zC=cc$KgjqU(c}q~VWDphCKhKiwigl{@gJLSzD)26riP{3~p0G*aJYY3O^xJ|01KZz$NjvF-wa|Ep{90rPeOdtw~P{gMmdHKyup3QjQJ zgb+2CZk@{t-ipyS|! zY3&04I=9mE@7jYF3?9~9@Wx&5r1VSiInSd2De?r}m4@EmpM}B?nG!??IV9yGFLiYQ|I#pQ=eHL^#-P=2QKBS6I zLcL7{hPhrPIbFv6;av07cP+UVrh@#R@h{S8`C*R0i}*8AIvK>wqlydG8tb9P=7m1tXs;~%WM^2{^u=BZ!uK}O*J z3aiV(x(k9YR}8%^gx1{J8|A##Kk)oR^ljveu)BNkkG3ijyPmq90h}L*w`H55T8>LD zv51P93;Bnj|2b~fyaG181Py*v{rBzZqEaOkvbOj0*tbOXt6Q5{D`I;%%3_M`9(DOh z>PqkP_B;>xx-#=%|D&_H8hg`rXMY&L_CKuIP2KOZU*8?$M`TCi{;vTSR4vPFD2nIz z_jQcRmfoR*{#n!VLy67G0>D4t<;q<=o?aSiJ7fq?BSCio=%4lYBEd?HzT%+&InTJa z&Ktu&IdK_JUfXS}c=dMkb?7xLbIFAK9~}SS-A%LGbs`w(qMeI)x}TwM3=ngGufn)6H2O2 zLmm}8@Ktw=cIFI25x~j#pJQ#0#?3&w2>jR9mS6>&O|Qb2d<=(qX4f!p?AxS9;t+lH z6=c49C-!5yV^3*s(En5{n%$Ruq>_%|pV~WFrx-i0IJwZtg6>L8|6>@r?WcG-P#q9T z_4N-25q8r7 z0>=c9eDzBHyX+qnyM!qJ&hwt?cy|-)R-#wkA`OO8mR|NvL3ibjiGe%&UYu}9ZUsKF z%9{-NKe^jw9Fh`T28P~l&UkWc!Oj7mu)DI8@NfQ)&D}C600sY-SZhcftTi;LgeVf1 zNsapp>IAUvN{Q%@xOZZy(sWCNbrH0)~J5U)}Ij zFT}bFR(%h@cTeo4bU-C-mU`i|{`s>_p{4KEUmP?<5+kB)jocIr|M(2Ng6FM_POU%7 z9_y|gi+b{JcR}E0HUa$aSRfJa7(7q)sTi-zB7uOdyVYbAVcnI-%+}I)xhE)2d-6~o zrhon?kdbQm$4WUg4;IaFdW1dXU|vlIfA?Q7JMj+Q9^V9xn1A)p5-pZ}5(Vf5rU)?F}LDaz0(1+_u` z;fHoQrvI_y$r;6cKY!~tMub5B{901jUGOY~fa9Oq$rKT^8lrJ-Bz6$c+FB_n#PADq z&LSb}`Zz+!aIWp|yJ?Vr2>Ry&c@Za4qbM8iE|BixX2(3ouF|eO2N}=XW7YI`T#NDQ zpZancuCj%@hK;i&9vin83A($FDjN9@3}tHQ@3PzS5kYt5W6{@llPIv)iaM`k%eUB0q`=?63#>=L_=+@+aZlP2H} z-Iag;VSOi8gZ zSLzU%p#K?ue?1vw*O34Gd(cHLL_|oD(Of@pdl8Tv_`k&EQ{?qcch@+%yl;`EI_`!G z7T7F`xEBL)FRgo|V9o&}%H^shM`WX3C4Otp)tRf|4%!|iEo|m4y_pTvDd7&-SN!{gI40dP!33uk3JMn?sox>Ip%?AFQfJt!gaNgVn{_9Y$ z%!$x#14DltVMI{${f}y97u&mwVflxoMtQiOw||3qoN?wi8=p)b z`{h+Sdok%=h$qF1P?`P1wOwLl3? z+tn4Y(kEd0=bVPpiH7s-x0CyLoYR$OHO^xGckLDksPJS3<*I#cX{AE`^CRo}Om%CR zK4$RG__dy9z9`@y-PRwL0{ye^sf7-JkTCz3Oh`kHsvENJVlxS{+Ol>i6M8u1sZb?% zYVC>IbV)hRXEfA>xDb-6*&hp;ojMVn2Tq$i?UUD!c!;W_{_KWaN!lvZCC?>lvS%Y( z^RPW6v9P0=@b0^p$q1`|UIZ5viEeB7>P8%S@g9az)GOc_6|G~O0xEx`nMyG<0a;SP z|3&rsI@Pvgd=pqt!>bwj3MV}ajf3u&PHcP9>A0)UI2-mq=U4mO;tq>mhX?SM@9-ox zf=vN9z4sPcrDsVX*#EfGBfM*pKN~)nIJ}+`<$j6V6wkoZu#jL#cAf&PaM zTagN0G-bNGAaTh1aTJOg&}8NhOW+B8o?sXIaSvU zI>6ULC7$d8CDgx`7$Pl-#EE`LME9r)qN*2n5F!6Z$}7e^B!om^MD&YqH}8Qrow9qL z`6X_;oVsI7$ow9Pw8a0R>dOP6YWw$xqAYEMF-WDzZWv`ti%^*)`@T)qB-uhFOG;+Q zK8mtrU$Ry9EH$!3)~Hmnl%=$wBIfrwXY{<^-+%sipXYtfocrAObzh%r*&y)rhBEh_ z8^ZV_q4&Ron&h^leW)8;v`*h9^!DCpx^=Xf;&?#+e$$}TGiJei>1ItyOoxZ?q_*f$ zLf5+|!AtFDa6dKK6XYydkGnvvYJKoLt$rp~7RWfjKRYZVd2&xi302eK(6SkS9!XQ` z<&hCA#NT1^UVN(@&MQNrJzYK!ZBQ4S5A&~<8$3!@iM0MlL2ve>9~-~O)SWz(|4~;% zsJp}bxSx#fCX>hNqVX`{uDY}Vq|vDU=RokgKyWyN|Lcjyi7l*N`Vbn}R#@Au6nsJ~C!>r@lxfMJZm~~nkcAmF|(c$P3$^k|5!_U8I91r6cP-!d; zncS3?;%u9aC*3FCQc?(41Gnyrorl9%mAS_|Q_`J?X$LQ}D@ybej{H6TGk%hMkDySL z`c33T%ssau^-+w+&KKlu#=IE6J0v>UvO2EBO!Bd;O*K9cM8%oU)+fvAXQqP`L0IMJ z#Oz98^7Kno|8rN3{odq@a++Vm)9HR{4+%q6-?Z~sG`^_y=}fD8Y8?=VLk-gGmIri6 z0n4jjEVdtTkE0Q5_Pdf1{j=-=LkJbp140VTe+K`T#*aSc#&rUFFw_m{4&HvMxizDPk)@%Tj75zJ0deKhba8uj{vBKl*7L) zsie*i__z|6F+z@|@(Gflfr~x{a!No{+y<-bN$}71j5ohzg;h?HknW}c@}5h-zcpp$ z$_`+myL(+@ngB2s^{s@s$FlDa1}*DTj@*5#x7XvtF_?dWWmOl@m;Ii?ek|hu`gQjP zYzNtkzx?AuvRdog3J-kf0h!)@?bGm`0+_2oa!f}f1P;hxj)VS(BCr$kf1tbaCf=7ju~*c! z6RQhz1pc2BY}`Vm%?20aU}9sKKe-Z>ItRv{A9_N#EX4mc^KEKbEWldAFl{sCM6o%? z)xly2wh+0@neD$c`JxYgDeAk1%O<@n(R~R15%~0!^5(vNIVT0lfA*P%?TO=|Gv``& zlX{@2(_I?%_%7$IteCH@jWKKf_n6V$Oqk%!?wChRa<{<$g}YS;`9G-sS!KQb+2$C? zz;NswVxrTSApe=U>Od=x)=_6q)&tOsQ#zbgGsLL+8rp(6@&TL;p;4zbnOivHVdo6ax-HB{s#`!rpxqQ z<4l;JpO}UYgTa3*_@a^gLv;-P3LH$O|MH(v{wEaCA?cNk zXHd4g;PQfDMvc>)prjV)e+10{lNd+RK>y=O-gV~PS!J9-AJScU?&$7opkz2k3g5gJ z)&FQHD3g-JUka<+zuef^D6cGQr8=Fyq+4tEm%TDI^4#G=a9UzHygCMn> zKGpYl%Uwy6j!7RJkIdUVd^5Oh?{f$i9s~W)1$EJ$eHXI1DETP<8APV~(mm8~A6t>` zO0^}`&{cA^$mf{5jQm4H|3eyYbm?P7^v}@Uef*`6`ZabJjK}Y-_K<(LE9o-^L(;-v zU#4%f7kPe7eh(&r;9ow@BtYo>eL`fCh?5fEk10j^FEBD)Qh*8?L)O96SJ9OK+rCb# z2meoRB3A6ob27h%V>`Fuh$tlkWIt35IIt_k=3tR7uC8cYa%TA7vkhge>k!lW&%*9^ zwU_AAa36xaSicg>diu>v&Yk(@E||9+*uMm6xw<1szxo@U@5*@h!*TmWg5 zUNSeseHqDr-g*4C!qwj2OzTTigb4?r#Fc4hMl+kz9+-c92#u05bHr4|?qz}QN>$%} zdC_sK4C!Qa2DnV9iFT>PsloGPzJ0pQ4bm#8RC z-P#MM&lfn1&lv~(`uyU~FXZhO=uwm{B6*d~<7rhvGrsgV8IK}CN;PPusG8+u?e$*7o&8K zxq}sM9S2J79geNTxulO`t{!@FWref^`G=03;CyX{_Z-pAeRM>L!qj)H9eZ=Tz_UKS zxtB0O!cmy`Jvo72Cf-)l?0VboC2jSn$Y(k(3s2hj;wT?d+P<{968_7zQ!kT*?O`{W z=>`5LMHkN3#4J?*Bh-?Y@K~R`HvcNUlV>+}->H+Ke-=3KRoL+|Z9RLXQ|CYbyPQC? z7vvuX{yrSD3qo723i`8v@my-1>C0wj%z8n%E2H^`TcM2y)&CU8CL9?fvtnLjMDJs6 zskZPtT?t+P{ioD(R@`QA1YI2?-U1mn5pK&%-cusI_eaAvy>9HBXXI2lL({NcAiCMaN z`q=@=Tg-}$$h34~+YO#-k1SdvnavA%@mLCyfA|Tjm0btqf#bfOOsyEMf$&Xta!~$9 z!bifW;L`d9{`eE{<`Mrp+T9&6{&Z@ct+d4>VI|CsweIfMiU69x|3znB0 zIbLLnPZHRS*?IgY=L^V1-2b+*SLzh6Bt|`qRc`btr(j?yT(mxM6CmXu@}Dh2Bk_+e zjO8Go@jJV8y`H`2h}3;&n#m_1|Ilk9?d8hyW02&d-IaHpd5X# zg7#PaO#IjP4|Z#R?WZm+&YbRLao?VWUu6RQk6yZn**h(MTm_1M_@iER`F$11w+}Q=q@z)c`B%=bjqQZVXm~>Vj=PNhFNbwQ|Ld+y^<(8? z1=^*ImZHD~{TCaqGI3CLUU~WZyBDaK*2^3hq+<>@)cOA~EB#Z(OzEa@K>ydP3{8$? z{O_ML-E&}`yJ|<{6daq_UUi1ltP}z!Z0N4sl?1ww5v<|TYJJwV_PY&cfPVrjr+dYT zF<5?FUvIXKMxt@HjuS^x7GBxQxWFg+Z2|sBn5emCG#slZ5~J}?#RcJC2;G$uJ}_Cx z=A?|i9+)vHwcKUb!s^nw)?JzO<=$b^B0M4Opa1=r44i{3TSP6<{;zE@rDs5Iz;!iM znVY#0wus~{f(-nVZbk57`oN6o-$ew?6Aj7#vAZ4#mMrXBlIRq(i3=y9^oIPOK)PS9 zAANpmIeB`!YS16gG>KmqgbVP)6*jLEbw@6Jn|8CSnRo8=pO3fwax(dAe-{3G>z>Sq zGjAodN%J#Kn$d!d1|(K4K_zhGpCR$eyH)AI^JLN(-IX^}OEm%ZApUpg|5BEZuTUic zs{PkpSv1A9g@J#-|MfEcXfV!W^kXYOmq7SNu5d@@tDygZ`PYC%==ROP6L1~XNya1n zU*~+mDGGjM7u5)0CE^>1{#=eo{$XoEAqbhJyy;(=RJG@9>dh+_SMF%LaE{|6p-q+S z!c*iuy}z)6u@4bXy^ECk6kz z-}e)5>mqSuf0ZHsCruV)x&2h=?uPsy@FA&&s!{%k9Pj?u|8?;1Jmz2j1>_%gZ(&Nm zC5#c=dDtt5GkOySyC3FX_D>_Y_8AkJfM{UMzk0I3JEzoko8U;^=aI>IEo3*mD9C^I zJ^C1?wT4MUUzWw@QX$U;25Pw%fTz$&*?+#>nWxBnIaEF=FXsYpyO{h;sq z5&ob2sQx*5{|f(MR}}xO;C$VNOBEp4ZYL8Cw*kYLf4$pv^s`)vhK{GSCG~*t8vhf+ zHn#pIpK*;SgP!*A0d;b`na%{8cPyHpNE<#)hd_Cqx&ig5B*<)S=Zwc zmgV(5$?b0(*1kpABdwXR}y6UC%pppL9e{qxN|LC79xd3Z%n9xJRx9-A(-`d#} z4u5!#%h`p(#~;qImj6TEW&l>Kt+^*iG&$y0iEWwtPBoMNa0YN@doHg|y-9X8ZU|l+ z%GP;2%_ZVZpFV>NbAJy4tAJk(mDBy=#N?X_xUfKCrqLt6e1l8VJ=Cda*K7lmbG_6r ziN4^3g8W1LP3II5_+x2-Bsj&M8`VRF{KIbH3FMmiU;gvDBX;W)wb1+@_3$B&m~6Iect^zlwPp97jkdd5pu1b0 zlL=TkL6Qub{{v!=Lr8b`{pWJ0?+4luTzjb}YD}kk!D39s??10kmq>=K!t=WR!1ARn zR)85Rz4^1eIST@p+Ov=TnuDl3;|CG};cUFG%_+$2B0yvgrzUb$= z^T0nPF`$C1(gu>~fINJD)aHm*GV_M7a9@P{!yVj_ci^j&LGpioI-D9hN?}9$zj98; ze-&{)oqjlgMy}>|-YNvemYqYG#^-!@LLR?3+tS4PTNW_qxiRQH9C4jdpp87ncaKV{) zCM(kP+By>#BItkacz&dv@5L4(-Q6mpTo0Y2XyH7J{KHm9u1Ybzod-!DCgr#{G_nx9 z+ugSIGDH6JJ{qcj9&N@T-Q7y-uYxFy9IKY%iRzz$|M3F+qi{L$;~uhu;F2t zId*U!)~OF(0lDUNck)g_Uuk{Tr7f7Bhf9k`H1I}ZJ)H4FII(la`QoZ4TI ze+c@Yxu7%Yhf-kMZ@EGgJwl@8;rm7pM}Y+9^ZPA_L)q5!&mXWDHl7HKZ8H$w|N1Xb z{}=Rswc@RF2>wU+cJk>adWVdyFG=LyXASzF6XHgo6qyDJPzzqvk!zb-wl=>zA)09_{Y-=iKL(j{4sb{j)0FE-7*t8UYOFsugIU z8o``QPho=mpSKuv{`L3*rpYJ#Wv?c^13M1>um8Hcq5rhT6Y*ZS{5Zy*2t%bF_6=ZQ z$4?D($3fuP<)A#sC(wJp8nnFY$I6Vi_>M-Hx^0zGax{Wof)Y!)BV+6(w#@;<=9*Ip z4{V;e*?CsB!}H|Q_QCU{!Si4T;CTk9RFY_{0&)Kfk1+{a>?a{txtDyin%k5vQNNt(0;!&qz`N zOfvbW7;o&Eq!UadG0oSx#biRF8iLbQUMwFt$2tF*C4v8aabB5KsK!rY?8P0E zZfjsGXBB;x2=^NNzvmezih(sIaB;FN>KBjOkl*K*i3+Nc;A~El62krbFaL+{!n-<~ zX1&PXrzaA`;UIv#ge0{f`j14q258a_UADwP|3$UuMrljh-#2^@?lyoZIAQ<6$!($o zCT?upTEw9O8Re4o+@iCC`36rynJNZ8>&x2I+X|_mpWk(wT0x@BNCCc&yG-6G8kXyu z#{%{|n^TMM;C~m6&r3L97E5E~|LB2}v5e7w@n`J)Iyt?ar$gl_$~V5HZ^m#6s04nN zYo#LkpTF1fSq*`|W`~y_&)uz$U0NJgbvgb1Ng{pm%QE1?&LG&yYpdt(A^AUxZ*ygx z8!~=<9R{}NeO>mC@n@HD7p&F&CmgeDZu0OH7+Th5E-fh{zl(}l=CS-9RR0{raeyvk zA#0m=%%wN{WUe7t>noCFzdZ^1uD=&^RjJG#BT6LuzlAUZzn_aMpb@H=#Fs{Fielm5 z-XxT4yG=y2h$C93F`UK1XWAG(V>zRmi4Hy~&6~DxeCYI0b!vOR6c9wKGvJ)J!$JR- zh}meP6)p_=Pw~-b{katx{im{-5u19cjQQ80E6b{(@+|BG(I=qpt@dD|id<(41$p|h z8+sjB=s#5nG|{!M3tkxSDFkg#gBj2px1BB+7r5k)uXH544pw^7KW5+Ff9l<#%lUAD zPnvsIX*SWmvaCG^CKC|yXPLm%7}5V+!wpO|1ksm@R)JTzr(RTXv^J^8=F)nZR2gmPyxnjp0^n^dEfvK-r-lwwLe~1fy7~HZW77Yd ze}!09?(Ao_9c|tUHYxHxd}c#1uPor7?;q9u=RXfDCwIR?S+H8C2Rjg=q5ksqukR1} ze{N5tXZ?a7&#(g0xRYI9!wPLbe@$HZFc#9qvFg!j9i+E&pB*~9nHOEnz;~r!L%`)={gG>4K{@m zqe~MUNtq{W`}1qz3@`GYGUmr+1tltbRdP|-WbblUd1TVkPs4LbA@nq&e-2uT&4X>_ zfBr9+f30V6n1*{AmeGHDkr<8OpQpbs&~4N&FHE;-n7A8P2QB}|?#w-Hqn`Y0ekEQR z<$o@G1<%C$gO?ZKd^U;s3}ox{K*c2xp#=Q=Jh6P~G^85LFOFP(Yy?j@*GQYu-ZG-< z{cClY)LJ5_8Z=LP=J4zEy}C<3K2P;sLc9ue%!YT#58l=JUGK?0>M->Z+Zy9_EXkJn znp)7}(n`DuXwrm0N;;USu>sSUdxm_bJ^+N!l+7!B2O@mWzb=fPP~Xq_O3t*D%rEKK z!CPYgEaDlLmfKLVQDf+exlh|GS(`#yDxgxh@Wu3IzcF$l{yA3J8Cy#L{ztRCIRunN zkpDwcLR1gLVDP^q`9Gz$``qr(_E|m${BwOPKYM$^iCbVrQFFkxtogrY{hS&77nOY* zglartU%3tMIafDfpU-r-#6Xef;f@RqKG_{LS>NL%C{D09<}Q4lsCAlqckl`%)YNPr2n9#oVxrd)1WL`NWgKHI%m;% zLr67(16u|^&?XTrPChByrqJT%{+Z+l1|-|>`Utu;nCcy%+Y@pE_j*pc;&CkvD|@z9#ircF0^5V-)L1Nu`J%RKe;9HS>ubxK-{>R)$;!x19 z`7af)KdA=Mzm9a|K6rl!oqyH(zJC*p=zj|5U~z%}Yv8i@EBx8#(Lx%&o!`prTek}+ z7@{DV|3*mZ3x&a?cA+0m#V1$v9Mj*%9_Ies{m=jP7xI4~|M_&o>GVUv zw6*^CJNK{Q`WXH1JEju~1Oa#q)oXm(3fnB#1VX_H)5!GkdQxljU3K5x!^6v$Vn*Qg zu0l$({QD^Rce;gF{Ed>72OmKHjAvkdC`;fTr6uko&xp-gq$NUa7UXRC}QnRKNRqSNAK-y^veK%#ZPRZ{)(JnRdv zVs@%Czlt$!VxoPU0|38{;$t_Ik6IknM#p~oM)e(eSUlg z1e3tagmvG-qC+jU&UIh;!7}qw!g|_u?TjGZhz4vARH9CbXeQ5Q%WS0oLZ}7Rc~wFG z`-0>ix1M=2{xT!~FuGb~Up(Y4qYp@gG030Ag@66;i+3VZ((w%cJNCz2=a;;cGUz{b z>`1SS!~y^FxjVb2?PNcjm6~_q7;O8p)@uGgr(eL~dAB~Wc41-pmxYZ-(DLGxLc`p9 z@NmonGX*FmU}16~{*?1U(Sige)+13lV^&(r*tX)zw?rmJ>zMnb$;TON@kHX$bXKDl>$zYsun5l za8jcuPZPIJfzkgi7=PlH*pW-L-<;clg9HEjTK^Z4e;Clut>6@e`oF-e9GQ6psG%b8 zzqj1%HFhPxe&{yfIV0LZe@U(O7M^0>(Djn4k)Tqna>-q=ps6BSDC~e_;zzT5$UjVF zPdwX}CSfssjE$2=!XQ)4g?TA*yIg@ir1Urp%R3aALA;a{Ka_O13M)7kVarAuL)kc3 z&ka1Vc}y$%nB6s)h0pEHhU+tV-e|J3Or9WG>udF)(FnIrz3w+YtL~x#L9`z9Kf~9X zAGwg%`Y(W`KN8G5!UXw0_YPmC2_E~;|Bi+H!)pP|Y@WarL;w51D}S+U=9PHF|Lzi( z0A48Qe^;iRdF-A=d;3&5hpWn?i1t@73#d)6ESg}pkBn6@=nISWFS&Li{qOk!5VrtJ z@k}a#i9?f}N5a-#S|e(|F}L`aqd>*Ti3tO`Gr>-wG1lIT(?`avz#c&tS)@Y#?Vl3CQ6Y+6+Iak@TB$DJE zp4*F`6EhG^5bJn7O1B(z=RfVB9?zud>-HHBPsk&{OEfLDD7~`M>@c-m)_Piy(v%=G zBBMcSxGV1jSGRIUeq_D$P{IEdEk044kby_?52IdfedJ79UF$z3so^=c15-c?UN4Ok z`7i%@xE!n~Nd6Dff01)YmVI6fCTk@&kpE+`xZAmhi_%tn`M?}L;<#Bn0pWj0T0S#L z&fxzdoef!cz=d-GVPdir9hVEdrkwc$&g!J8m{M(rYZjb2mltPS)cki}W9N~x(S6o@ zO)>?Px)>TRUect<)HYAe))k-%lvui>C{LHHdMlo)1Fm`Os zGN0-##wk>ADc>Q}#`~3l+c!r2?>TV@*}BK5CPBQik27|6< zKTX5)^isGcAjJCg?W-=2)0P(e@u00KLDyEx})h@%<%s?g!Ql=EI>5=Yan?o2RsEy`*t@N4uv>4kCVq zF{K`p#y*n*QN!2ykbjs4?>S+whSoCmJZ!F;xJ9L?(iTsy4C|kbO0F>?(S}|p!m|C8 z+?kR!;SBocqQ;Oyl>Z@}^rw%}oXOCC+LyKE+Nr*^{?mJ_cMpfa*taE^D0(&YE18l1 z!^X3Qe>T$pw!t`D6Xk!f=OIM{!9U8ht>Wk65dBZ@ts?hw#Qzmxfb2c9f(DVP%H}s| zWHTS+j-1<{|Hf#FSJviHAQOl0440G+sS?pP5KGS5#TKh%yYcgg8#d5d8}!d6 z=pNLcZxjRkzi8E5BF`Ra;PF`lUzSZVUGgAu23Hu5>A6g84 z-Ph~e?m1zeBV{I!Kt1?Ghve*=Ls`P$3zapEM!wK1(_rAAVY=yMX+pTZ6ops`b{AWi0Ddhk7FOgl=`Y&4F zao4SY9}QJx{Xmt~O&3&o$ZjCz@C7R${%fnfgq&fH+C?rPS17rj z(iTaQd^y^=ang{{Xx2`mMe_^nC&NG9q`NXb(a{U|$JYo~f!vW{1zj}k5Uf5)qV9KK z*({ig;vbRrH;{9H;Gc(*S#Ve+D5Chsbxvtn#F_ZE>N1q5c_I0SDE@gYdsi)hHa?Z* zM6_Sa|M4q>)lOaE_X7O$;*Rq5W>y!Z|NX>u(d<*bi2mpF$3aRTcDQ0w8#|wSFLort zIYr?Y95zOudVA!H{-@oeT*v2C-*CXxM}L&cVba+)q&I>Y6@V{MNucqNS` zB=l1W7wLO$LS8KBe@@f4l{mEj7ExvW2BTTp%}pYrL{mA_#|SSFVWQ$grKtf;2zNL^ z7ci{4lAp(BHHMHBk^CRh9;=@;7s&r1oa-ec8Z6*{mi_{Ug^;#YvU0$Dw1Vn?{qKf_ zx(*^6x~br6CjVjIkKmt;`r2RA>||30GsT8RSrhu`+NgUm*mJantb zW0a<}<0k+zFOo!`N^jT(D?RZk`wgl7^0?hqd+Otyl&| z`Wv}j!tQ1;e;AVgQ=A}ekpm{yv#rr!4{*cc?V7HRlrMC=D%LLa%P&-PBo{b+eS0+%n}qP z0bf8%HMCSAgGj+@a}rId0Cuc={FHBg%X)Zbizgl|+`RB#Z%d2dkMeQm$q+TPf42MB zUPz`+%s8HL6e6?bQUBLc#a@RmuaRd)aAEW&1!Lryt3r#Hu66L`=+wFAlL(WBHC7L4 zg2EcQ{@M5!X&uZ6t35qX9J37^gc&IQnLCJswcW{>e=Ve}UtuF?HT*psCQoU)hvpyt zum7Um4nl|SDFrb6@1TF)>&P{K9ex*d{-r;@+c^XDKhS^Ce&)RrB~sjL5`ARQ|5Ts| z5hLUUyR)oln>I4`G`Y(|DL-4;BR>VR^ zT$-{k?dldr{&TCs@|aW~b@3~j{{vWfac->pf=%y%Gb9Y?e>^^*EBZ{82l}MP1lOL_ zQsuEEeS`{Q$uH=V2JVlILzE0HLuIt5Wd6xEZgGRWs}Pv%1R~%N)(QSXs~$LMcWETe z4B;(lz$=K(8kZV9@%xTj7nQ7HO4>1S@)C^PE0Q~7vlJuFzX3I2hyo!3@()SK0**lL ziOcTIIWC}ozP?S&I8_OrFqn2M`Y+NYg;+H!+C zCU&0EPlf(>bOWY#P$fe+w9k=0;d7s-zzX8bL2zNnLrEbi)4G6_rE73&wteckMF91` zgZ^1~M`7A`cwrMqBWAF8dz$+;ZdOigiFHF@am`c7v>F|faW?&pW(UkUV_aiJemRqu zf1%rK4)lMKl>7?;8m;C3+`>J=Gw_d{253ImK>uu?bHtbCHvW(Q5gc~Xy^9Z;e9Pa5 z^nc+Ux&l*>l{Bjpb5al3#Bf|ldb8*DAtW(JF` z>umWy8tK44p4*!S*Ue6>M&n`RTuEx6~3&s{x7xP(dmamX>#s?pSsZeXQcmPvh(&tDFn?^Qt@l} z$F-N9lDhIoVJ-iMHBq{k`WE<~9U{|!8Dah<408QUe0y>XGIFGTjz{iZlS?I5Q!>is z*}6hf77bURcY=uQj)NrJ7^DBR@4?)=UU?Lg`*MCLBbAbQ)@i~2#-=K8p?qN@udGig zcnwTu0H_L>!1GArQM9IBY2s4ZL-`f+yrTejI$o%V1#V4U7!Biyw5Xt{R z{O<`a_moMr`4k0KH2>Lmp@1i@f*1@r(hU5g2%)g7NdHCa@g;DI0{&6<8Qbqdv~5al zzE((Jy|~+R(iu;OsF=ZO@8liB`nj8vq+w>h!Y)b#$oVY73g?CPe{C^4N?-;350z-A zqR4v5eS~B$ZIB}c_o`M~eo5Xas+?()+X~ zzA!VaT{8(d3d#Suw}s835|8FTGyLzH>qPeHrQ?zL*T4SLS&`N*tg6>H@}G_J_aXmT zC-m*l>--gmNai(=|KmtJHGGs(jr5=5BO0s())Devq90Tk!391j8H&G3L@L4~k@Jxc zO8Zg&_fVgXR}X9&LfO{s=j1Cfy$SkfWd8M5quR463iZE#TWUVXbB_F>DRfJ*c@lDT zmqky=8%y!x?F!-O_UYggCDz&I3nPg1cib^o(; zjbpIbC`L})bKJR;2>lnYkF7r~R)@D~K=KcXGJcv-k8qHGh}9`e+GUW$F0@z6^v|scKa5?`{6oHIE$Ln? zjUoj3Kf@PAk94Y0no`YvIwJjFtU^m(_mutSft1l&a2-VWpBGD86dZ#Y{TE+HZk1{( z5VU@Ke0KRMc*hKmm(N_$O8fU*dfC7l?z{4o|lA~uEF51u4w ztwR6Bmsi)dS$5RP8g68Q{;v;IgKwTqI@EKVEfFLxGxH=JGnuo6$RGgVsoi6Ea}V1Q z&}EyiL2xvxv8tps3Tj02!XkD5NKXZaLlqwU??MLc77y`SF}qLQBy5N4-CgLvfc(R} zlvL;;Uh}^r`e(DUppX6h5EGk*hx{M3|LgHxDxTxM6DO*FUhUY2tJFm2Uw)kV7vgD$ zqXma!i`+A5wp~C=1C^p4lkd<4yCcaqTj%--pgrr{Mt&@u8~E4%#i{0J_~Fbh;D0WD zjNY84a(3z^b+Ri>wj&#MN7L5mQF$2n$XPbouBc3K^zF5N*bn9fxc{1y-o@l=M#1V> zoKx*(*_^tjfA%1XUwB2^1N|4HN0I6CTK|{0qvvENy$c&7d?aGKGSdGgrt>j{HN5b$l!41OFf&ZDJA}BIe#m9BG*@zaUr^W|`R^dV#9jWlU zBBBidLZ(xG!{sm$E@Je5CC_2~g7fu`5F#L{GREGn-A95i2lUSx))9YozesL3!~cHh-s&`lF2Sesxqv(UB9FNW1Wj1n> zGW`5vT!}{|@INi>=!2yq>J8+-Cr^6am@FoAOYg44LfS!_P8O93O^O*@}Kcz$A=F?|3x+Y-%%xm{XQST!vE7hmn$fHExM5B z(Ef`Z>>!^*@_*E|`T+}P>fDnl1Ygil#i3rF=t1fn$vB0saLJ_;OPul@#;wy4k~Mt< zxAy^#SH%W}a^eAmoVEGa@o(GIB))yvmU+|}^v@*wrmW*V{g8jCAk&d8#V7x7O_o}~ zL12!q$f$Fp7Gck3VuvG>&ES+bqyO%ScardA-*Ewb3H)pZ{x+*hL-T zHyE5<(?3K1*TDcJ{}AOEh=5dgJ=*O9R;XWrN12W|E-QR-#C-sJL zl?UlqOw(Bg1kQew*!5Dvr_Oz3dIA^}+vcmA1Xww#*=YW=t|i^5$g+&KbvJ?b=O_N# z{g#daB>!+|i*!`B6mPi^a&?1`t<)4gHpYYH_A9PE*nA8;7(@T;4V#FP27-S~ku{0r zADV816At<>W}*L7`*asjOe9ACSGn8NkG-Ahp0t-mEMtYm1rXUr$%Q+YVf$R1<7KMQ7lE=f^jz`zD|@G5z(3U{T@0o!)I;T zyvQZs2!N_FbOZFiSIOl(%>n!~>&KbnPz@0H(KI>>1qduTRA5sWXjk);BdG=t`ezst zpW~dxS31YS5N7|^6Cb?c(lQD8huL*j0wkD!jklHA2{81}xf;L5$#AJ;=%3O4i>PjH zr!GNN&v`N<|L4c2nCh0!+7+h#)erq@`=f8H-xk@x!H3DQ2}qUU)Q zkI#^ld;uxKT}6bSA@9V~3s9!Wv+pNHj>Wz0bZ%g{fcX^XnPS+NGq|5?E;x~c_D&Nsoe z%zdO5v689&$~@c&(Yjs?|Cd-h>v<&qCqE|EyBL>+ay$vd-Q;XH+JA9{ zQyHO6B`g}q{+;*F4PcS{A40BsjUYui=(S!I?2c7CV0Q%C6WR7R4`PYaZpfNUhMrK5 zZ6d~C{Yy5Z|h6wzRyhGS# zjAg_6^vQemx{9h4&_5&mPZ0E9^nbd0xD#%fX#Vr5G_7V<#{8=|+RhMB5n9z>Nql;W zHw*9ZB7#G^<0bY{77*r&j-A=wQ?3VV?z{6nFlqp`8xN5R==dhTg`(DXdldh0`X`qA zen~Bw$GXDzRu?c9OfnpozKdoI*?Sj!5xAk8?45ELIX?S*r9r{l{{g|^>UYY>^7s~(q zn!zM^p;XsA`9z)vs(+qld)k?ji%0c87fJWsGH6#p|2&S>7=ZB^^ndA`Kmv|LyhMvV zDtCbY>(32Al$<{yHV?{yX}@7)pL}539L^%5Rb-0jpY2~1K>um+P0xoMwZxQi%jcd# zE!R3srRXk`H$o{IS=844Q+q9i?&A^*9P4Kcaw(1=z;Umc={Xk>oCkBAccN@WJ(z3U(q6`!T@TjKKKCDKVOXJ;p`9F=UXYjzdPS=foQS^K$~w7kNrSyBiDopIqR79!{xt zuFCx+fc^*gpRtGpxD>vJ{;ywF(=c2G{1fxE82;$auwm!>+KZhbnWi9zIx26$AsdY; zh&1(u|1H~VY<*ZH|L4_{dxus{>6kZcmN0CPIa?hq6lLUw1yawQC2WBsNmi%S@5^f= zm(BvOhrqyVc!G1VQjlWS#+51qKPU8`8pKEeO8NX2`oFrJhaqq0WMy7Y*6~OLGImjO z(fOC61mcJ%w>8b#)IV5U{UU(Ozrg<$-KHQ0`JaHyQdC)CSmp=*_YMf*INhm+7tp9G z1^#E-g=W_CF#ozGqB=QW+z_+Z`F_AZVj&zricjHTq@{{}p|!n~Gxv2zq)i*H z{9)!@dsGFOBV?oa2l~H$j_1Z9TMz81uzm%A3YUX5K1@d#u43z(Zut)qojHrlzX_q^ zz#tIheSX!wF;(U&I9Xu+mA=F5eF?>G=;dh3^`3HmoI%e=Wd3Dd*U||`_nQ8v?bhGb z7bInf48{ZhlNu@3jZFpp&sl_lSMi!9?^4N$5j^Pom2CIAe+v4)?f@+vIgDlWe<|V~ z9U~$AU%upwbMHARc5eoy3*Bn~|J=D@tmF|vn=J-`AdK!MzQAy)2kSHKX9u6ENvTcq z9=*MWkvE9+3$2nQd3fCpfr8 zeM^o&yY*k3RRdNijfQbLAVh3E6itv#Cyn}n{^#Of!{`Ka-ez!IKkLOr$D$APx zE7#!C-RY$w&xw!E1jbfsFR2C5ZV9i`?n3-uxsQR2z^cKL&bNVBRpm>xZ~yQQFF{EZ z#7145l#8hUolC~GRP0Q9MMVb?+MfBoiCHXr8%{Z9&y5{zrT z7ylyX{QHwg{J+f1fBv0b?t=3YhW__onnCgFQT~V9XGYWo`Y)oJ zufdS(Bi4Ab69fnmXF$~@0YqE$D?lGtM%n-SuzhE-*lhzrVjr^|#+I%IH7ckhrNI?Z3F!H3i%C+uF;*eZ_C` zfd7eM<#K&TI>KA=pfkX)MQM3CI^a z)v5#p0ZdSl{tF?IsW-sh)?Hx{;C0J%dr3q3Pa*&CoB|scl!98;_k|+rIkd$w9gCO+*JBH!3xuJwMWgM=51+zh7`^*~E^yKifh2b3 zimp7$#^a5MkDjvwx!`ya97SK;CMgBcmqlGTk^I9YasmKm@W1aqRX}h7K5KQBUj*&H zC_WkjU1bU4h7DI()`=3SkmG#!%TLj6ME}$5a~Z)Tq#vEoIv|ViKdV;W+TGy)+QQeh z{WEe40{kQ8|Klvsl~ep^{tszSkffp`=zmDZe86bLIWq{~JR~zNQb8RC-+T`Elcsi1 z%3#^&8vwuD4E(Yef;gkD%wpxwwk6ovfDKPI=N-1hzQvx6o3|+W zfzcD&KPn5)d4o+OFt`H6(!o6-zz6$eVPb;lsr%vI<#V`{%tny>L%9C|)3ArK4dH*{ z?v!8q*ZD45g9M49lKko!kc_o>B2{7qeA+>4CXpv zMnD!+zP#rDDi2zZFfivW9=p-kw~xDZqxsJRg_pSZJIA8_uW#5BgU6x&i~5~QV zr;y?a(p%t>)F$JUy^3gL{4rquj7W0%25{5?EA0-K7YpP9OBU)Y7BDM6+kREj;!%;+ zMex51*gUd?u~plxU8;`crBblhvT%!XQe`b30RUH=N>DKcky=V+p|P}iglLcXvjSf5 zUF8s6euTFtpP_>fh@_Z<%)cH4Ap;-KKZ_jgPk?Wg(f@UEOxP)Sk<5?gKZ`vfMD~i2 zJCbc1^-^|2`72)OVtHf>cQ@jHXC3Hk-F!!lH2r)^At%2tS1;pZb%hgo03Xb{shxoI zUs$bM`To?6;eWT11PWY}^bzJ?mxyq!;spNZ@q_!w7@D6*-B#@Jfwqq^dXJ!yL4al@ ztxa~#AA9JU2(m&q76EA@7hCPW$-g0yk zH&AY=L6aZ&`}&~&^w9UepgrmA!H2N2nG^B#rCF>NRm8|s0X1x!A_k$Xh0zo1g(oqF%*f7+6!PV$YO;s#a^ z@}JoqCdJ{1LHu9S^WuBlK$V-1_`mQG1u(RmlpHFJIO`*zI4SiSpNPPO$uCr$$<7`e ziO&FF0*xG!tvdqdlE4-7Ym%m*po!Qf3AgXE=dHKSXeCLdl!O0E(jsS8!5KErU6Mhw z^LP@yAZpt>UQVo>WtDr=50=7L;+hY8zm$XLGux}MYrs4kbS`Jc8wnZuQQ&II@(ziG zWdd8^Bd=>`uqDEi}JFdfV+1hK<`ff9@qV z%W~9{0sox7r{w>QT;_L?cvt=6H}X^qtt(ZHwDde*fz<)#UuGZ6yqwVaSMT-&@bIDj zFX+G6<2I?ydJ}@t08<-Xe^IRDjX=F{}H2m&0@{Yo^9PHtl(P>(}JdOCXT_GR}$7)=i$!Z zWsK=VX1VW16jIIXS zGapYh$<{Fe1C|juu07Yo*vq2uDhr1%2QU|BqxcnkrOun5b&D0!NRop7)2ioj!=|$cr0$p2NiLDo zzpcjf(}6rY990lhyC3cU@}G;0INwJdeHsP*Uw^C8fnQT%@IQvPuTH^Qz{VDN7>F!+|1t=@U|vn|u|!ptE23Vuz7_a_Q)^$arx(THvbM)a%FkX#SzfLMF0q zLjeCk|Ch@KPf!!OzpD?Tv+8D_psJI|!UuF|h~t%}7ThHY{Vxdr6OBj-{HEt)iG5{3b^rd+r%*rJ>X92jtB9TwbNALpA%)0W-1`ceDYz_?2m3r~YK-Kc zN|3jUoo)=|&r;$NKbHyfFTg+J$6w|gVdNj)J#_i2!anHX$T^**TWqeFC~C7rj+DN< z|NOAuyK(+3&_BQPog8ly5$l4RMf6yT0$e30Uo4Q3{6oeY2hlDfG9Z+0A}b) zvrk)+taZH{B_w-u5(t}UMJgs8@YUJ02FCV=vsZ&5?f$6_cFV`^<=9j9^VdKfrr%8N z5y($K@}D>GN@05m0EG4!*CG063dG?(a$@v`7tBg`I2z*ygLLBxOPaatq>5`fTm<}4krq^%MAnTd{2 z5uPOarNAf(2vt+=hnYX_w{8D_RGoP|)O-K_-!pb*BxD(6-?C&FODJP)?7I*pV;e<8 zrF|?Z8ariavF~++Hf@tKS}0rEBvDE$?Thk#eTH-H-|xTsao^`WZZq>)uJ`M8UDwmM z-OUxK4!OF)PyOL+7R7pE;9Y7~@hO}InW4{bE?)!*U{8nqy9>;JI=3GsUo@&cjm>{n z*N#_jDY#P%uL8XK?fL!JN)2~-YrN|TI2OrzOWyv5Rg$$@RKa}98`h)w{-5mvp9(Wz zHiPZI?p|6^&FJIi-+v{#!R8Y9U%zF*&gwfyvb|!t!|yrUm2`bC3q}q-P(Snxi=y~t zkrM%)#Jprc4umnwa{t32eDov{DGmSY2VHrkbfolyPKQ1vVXIiWauzr|Ho@0{Y0qK9 zhmGKxd)g6pM|%0X78&2i_XoiUL616CwH4-b)pUovwKYyo1zI6dQyo4T5+KFdG$GaK zo(JUhY$`*Ak5EF!`VXIMW7TegL_o#-{%b~Gn4-0?|8gD5XpoaENl3H#@DY8`p z_qhCvrKxp`K!$Nxhk3g1$_&*%YLOk>nImr=_xj;H|LelmoC_1W1$ESPtpA6b|6*0z zJH!4f;dTEpXRwOE{_D{{ciw13_iUej29_(xiTwNw&KJy4^xFv{y`7l%D)CLyF{iXC zDJsllj&<-ht!WAA^xM$Mt23+@awF5T**_lgoTr(K-_i&&`WsaCKTY}iR_&Dc+1)ea z;cxp?*G_GMXL3YOLqhxA)1--n9@Rh_hH|AiT>n1p4z~ts4lOCxa)pNo{eE<@w);^J zA=?N7IP z&#E9F(_rX7TpxQ7!gCoMkGj3E>hPnV%~^3q=jka}b%(%k=0PJl$M0wK=C=bi-t4sg ztKlM2wa&%g=z(X1$FyDQLUGrPS^hQ~&d1>-zKK0(Tzge(v=hEE2mfsOYfk5!!D0$i zy~p;LUq+|9MP;=P>weZf+Yc zV!Yu0dEWsYN-p%9hv)sz8$W-60Sx$m;=kUwWt2{Yn}suk&1PW#)jc+KJ^k*9|MFks zR>_7!DHMLL|9SgTdQ{+9S32#8$)3_LAjM&51^cgJJIt<@Aou;f<8$b0`TTpk3QTE^ z8B+LvHYQGOgLCWTfIhrnSCUdOO%BZmI}?Qt>%l{$NLuQhU$Z}qB@)~Tt6J8c3lVBl zBuF=VU)~D|x4vOj|I0};fsp^w+d4IL(uo3rkKp!g$4II1F6vv_;s1+gOONz2&r+Io zm(#G81N{;+V?6CPJRPI6g?PH4#s%{~|1=7Ojd12d{%hCYSwP)10{Jkms&Uo98vQ76E3IGM|LV?XDO;g_ z(WN*9Cy@^R=e2ciPdoVL6DY@8zv}${$GdP7ti42}s<4Hpw*YwL?3%VgiA*$W7GB-S zRrYWDi~sz-m4-8`5t3YX*ntc|7-#5It7gs^I;a$Bcwb>%EB2h=WDUYJO~MQIU*Lb1 zPk40?&g_u?lI8aQOuUdS!1iA&zJAF7Dbcqoo+Gc23NMsem)|!_24A_Iu)z553WKkJ zkPQ1TzTaQn%U$k%U|DnLKQaC3x22-F^Pd6raPY!k+4JX95!Oab-cOW1UabQ)BUuV- zR3fqKwy3+={75YLIq0G%F>742a|XvUCA{gu^5GVP4IN-ef_$C+hRQG@iCW+DDd3|f z6rc1x67t99ugGC_mvcM6jzgdf%(w#$YrEhd_I?%pz7qb!4{&?-3!6sw?EFTTnZird zI2IZl;fyFw@VOLfcy}T-9uE9BbWs(Ie_GGT`_KQ}{SFQd{lyFYKVdglB`x@$&Er9C ztZ;xi9ew)d9DFg*hnEEL8zbP>nZtY4vD611nA)txU%65KfFFJUE5*AKqEw z1G~_>m99-vC$(ZqIne(s9Sas1v#eWCM^KL$&xCY;zzm~ZCd<)satLd<%gV46f?kYE z(QuD&`fI$;?M?ps^6lZ11GO>F@lu<$7u)&#J2~_Omw#G(dd^*b<&HP3yz3D?GM1ZQ$EF&2_ZWHk z`<5wH&>Lja&7)JQaKgv@KYymC-1{x)xf|&_~jzz)9EInz4 zoRK({Uvo!9MjIZA7bwv#TMa|1DG;%_BLn@M*FCF|Y^D|l0CMzi{>GGYC8?si{s9dtoC9XaJ!L-y|u3hZintLNVY z2SM>PF0~cTzn}T&#zkdS!i&4-&n@4xyVFp)w zer94p|9rCC@$Mmvr|$s;?HB9ODt>oGvn>x>fd9Vq9vp7rmlV-5*x$QwwXT~842lijmI$I0i7G0gINX+8y9t*X*TcD)}5|L#3 zDa@bJ=m`EF$9Cbvdh4urz^^(r(C_59Jh(G$s_-QEupy0iM+)*^^2a*5z(Kzj;_5rv zoq{_O&%zXJqJH>Ea2==Tn1!~yUR*nLBtF>=2d(6AGRZrM06+L~rmT8qzqGa+n^h6? z2>K5({`;jJ?&XjOW@!X-`R|@@uNiUs4}Zh+pR-MI#ja0ZU|LC7x*}onW{GO#T+VTe zo$q=o;A&U2pjW#?{_EG5l_h@-B-F4n<9Yt+tc=H?P1c5Iqkn#XD}@L+w*UIg!hQsf z73P1|fU1{~YK;FL3i)ifX*IIRK0+{rIP15PG|@j% zU)++ksbQO8!|ajsSpN^`pLLh^@q>{0zI+Vx|45DS^_J?>fk$SUeh2)|s%;ypB0&g& zNe6PWcZ*lW0VaXxK$iT9xrARsdGxwPpq$^a8e)E%d>A?RarP%9gpOnW=jE}N%+x&S z;?_LIlUV;@#q!2&(A5I{ha3MHI$2gl9`DZCZl@y;y(PnKifs^n^DE-?{gcxG-JVk+ zkQd>{{6F3DfqWtrdTY zKWqXj`=*uCgoHF6mga`wV}s5IZDCD$rHf?Bub3Vg>r_8*Og>@kCTv>VPFQXEH8S_p zcJ*EC`49ddS;)U9|D6?j@y^8?ov@{KUf&W_5Zw@tOh#t z^XnO45Qm{4^gp}frLj8U!3G_3sNVv2!1?X&i40%!nX@6_qh7k_mu=?KGyC9QpZ#&b z;96^qsMTNJBF+~p-njelO>F}m6~R*WgAb3z#BY!bqVlSiWcyYE|2;cE&G`szHQlMy z@+WqbbY&m>FaLfi!zdEkV(0r0!T+=W8)8x#ys9PrsKEbXf4b++wmE!;BA^b0AZ;K+j=WLsI3bXwNZot)H7ZxN6I3v_iP~XV zB?}nERgT^g%OvZ2ODWbV?KEd;z2J_hDX-OQwH`T=gz0_>!(GQMu)dB3kPJudr zUieHoMWYOek2NjL+9A59r^2~qQ;l=o-Qfo3Bg@E;f45As&4FlNd-RcxuWtwMr@)7w zh4#6ci|e6=?oX!wd%XW0d1y5wWXJRB%Y$P@BP&^Eab%sf$1OPn9=4H)i?zHA3Pe5;6s&mR_%|jA+}eot4=Y4r><92X)b0I{q(#} zD^U!OkVj&En+_-v5w`z28OXygsze<_o%==PXniyunhOuw^XKo@Jbw0mqFSs!uuYvy z-FwTet0uLrp@)v^tI9OKJ2Vi~w(WhXE)-Ii9ln2ZKnDEJISugF>#T=S;P-IKs{VDLBi7MJ){8Pw(Eq}9E4aUj~`bX`i zG(ReLk*(li9<~^1MocAuS}jiUm_y|Wn(jOT+aO-ZFM8B8?s4%B)wH{EMw^-FAZbZT zuBkc-;rf401^%Ik;4%IeG8{qW+w|6+5%Qv5Kl~EU?AV4=Gt2Aj;v@OBRtvo(3h*QI z_QWX-XXs>x{=-xAA6}n+Wfm(TIAgGaq`H&BaA*ptI9ueV@OJpr_D3mD%UT2d&vK3J z%M?B)o;Ao4ihb>+mvFJC--#0O^V8~fY&9HVzV%IB;)Yiy42*wqQLfRix3nJPf4Rf_ z=QrfP%I1t-gO6%!1YQn4Dob!(Wtt4uygQDtBGdnXC7Bh!{Pk1SX9|a=Z0KXdmPgp|+ zmALdgsfcJxjDI0wK8L2xBkBVGy}gsg&paR&^FtQ;pF#iVEj^#dFQ+IiiXi`BX_SC!MA#3`SXiTCelJgYgg9;x3RER}Dh!VA-mY4bVVcW)McUN94ilm@}bJAA0tae#6(6k<^P0}b2 zp?jVR{Xf42Vv|&j@yvZpC~Yr~Pjxp<8%vBQ4aR{T9QyKM|5aJxR2N-hwC2#Twk`BO zWBor>-tAHM&5zs-??|}l=L;vdqZ{7g<6{~yl4y_R zzwS3xHv6`jFnZRs{KtRa@S6a`ydL16LjTWNX?=8Q$4oS`f$Be{SbwB68!8}fN<+F4 z1muvXVE>q%>|ISGszxjW+~P!5o+y~g{Hd=55ai$giFo?hI9mR0-zMZC$L6=X2ShH& zzi%EBZ4S9(h?`?$`7fUG_+IZ!9#riv5XW>N1&p=ll!is+H)GFR<@KYxe>ycsmC)T| z@7FjNfxA5n8>**^uY`lYINQ4knrx4J2!SwbR6nfTpWE6N!|&{W2wsOI({*@heW;H} zhpFERjsE)BjzoYEPJ;i3tiGoy!gc6>`LA-D?H{I$XwW-FKOfaUU@zjM&+VoSCmhia z=ES{!^Y!f-p!s;9kl}ZD6utg@0B%f=u+%Yk8im08hfjgUW+YeB4oN}bH`4>3E~_Mp zbjv>j-}Pp$4iJ2}`7cue#|ce7W}isP4_qQ1LH@mQO&h-f52>9Zg!-WWXB%FAlorfD zW2V#3ga)cFD2oK0)1k|shKfeWe+|^RukjJ`KAG_N-g}iba0Z+n-}v4<^6#ae!R-dV zkd9pmr+5F7rp?*+tI5B<%aQm*mCRtVtxif1dw$LHY2wxiKZXnNFEkw*BYFdk9l-zm z<;UXr{=+X(t8|;*+Vi$q~n)@aX7SX(?eCz z6mfJ(v*6%%7N{NHntA@$Y`=EkosIV&Zx!A7CHP`=f909Rd=*>JKOaBT4>W-BAg{@n z;j;59UE{Q{fat0^@@J?*8**J^8j#KT@_iBJ|KYI@P=)zI@&5nizZ4$BvY5poYa!_W z;qJdy8jc|)$iG9+CKFc;H@`eQ-dcG?<<(*!ag0-{?^AHlK(r8keu5w>HP=g?1n2;F z{`2*Xkd?JO`XM~l$xD((F#j<*8Q2C+@YHQQb4+ahbGs|ItE^4}!TjfmblQ@*)>CQa zNy0=|NP~ZQ*$&xvrFZhRzv=4X+nnI`9~egfPX5t zT|J=V{4L;UVEha90G)8S$btX0;C~)E@$s~g#;$FWsAJchDD?l#yts4dz-d)H^D7|< z1>mX$S)xSr@aiIj&o@;-J?J3Ep#+K{myR$wH+c|I0sPO4LwV#?;Ub?#Y;OM#uh5tc zX;Vdn63WH;pOJXbg-N|$q8KrPhH||l-AJwMnhQ3mhak@H?`ueLm;(I+{fCW1{WYF* z(6M5?`jk|YGx&cLCSb_fItDD10>79*Td3J7xbHdy`LFk*u6A!8pNY=n`hURJy`QM) zaMWJ}&F8-`{(G)oJI#xS9f4#s6O&p(lhak?(NPDYQfkQx2PI3b4ZMKoIuoP#+5^X) z-+zgYb@C(Y7G0D(-~Yo?oXfu!x_g_!|NQhU`*0e91p|W5m1V%z(38W ztIhuyhxOM(hqu=vEmti61runDf4cd@qw8IEf{5$?Nz@*!bt{s>dGQJ|8&QZ_gD0J~ zUp$T(2mE*4G$|`qqCW}&D8zsHFOymSYvZq6;O<-K|B=Ah7Pt`d1Seb0;=JMQvU55J z{LetCuGd!v-O#^Z>U>-+<5;5)OaX?m{-39hJ!_nsuo|N>>=Sa(qNO7q>VD6&Q{{{! zR(zgh`EMQU$||ep`kz;&dp2yWIVDR{-D?T{=UcX=Qo;lO=HFNSS(*B<{U!>rn)~|~ z2l@x&e+ewskgUhy(*yt-$DHoWk|mM|K3e#?xh#I9Xm!}YsKOd}mL9b}T>iVRW?xG_ zXOWUsq(17qP=M(lYu;<$GWb!|>O2isr_{1@8~p&i1^F-OfP#)F9HQbgor>&WP$wm? z7uf9PyM==Je_|AZNgze_wrrmnHuufwm$|wRXA#P$AG9Iv2!ZSWsRs>vE1%o{bEMl~ zS@_9PV^2EufYi#;FHs(Y<;kSB(ybd||J8l?WU5EI;wRwp za6ooBqt{%71S6bK-<5g(*Map^z#&s9sCv!Sm_&eDyYLzm<4 z7F`H)jUQ^ZMNk8u6ZzR@aC{>tp5(6wn-nD+{Y-b{$aRI=KK62GwPVoQfYIHu!&_ z|5-gyl8q+=FR&8F#{ADUZhS3Fe#K2gt<|Ux<6o>(XQDW^uxTtP@Q^aP4|JFT*vWze zJ;yt~ezt@i>#5AZ*rnjHcwh={ z9tHfb24A^+yW4@r8=)GsBKQg9-_J?g!_aO>X6)MjMcn<@3|Y(21@$fTAHw`+=NA$+ z8ol5;epVC34p_Un; z0fcTG6xVc1j7ya>b=L-fX~DsQQ?XC|T}l7tzgDF8Yx#|J_7l1rp&%akU-Cxq)d!4p zWQ9_||6J|VO@llM+z7C4!up^6n#;LgJ$L^F{EKhzWF)cv=eP6xuT#b-70-nI*N2$Y z7kp@GjCnH(k)a+iiCFdy0kttt5ovfY1L0r!@%Haz~(<4 zNC3`YoW%Fb8rMFIZPr-Gzi)hGNa_*}GQ04ceY%6f!1C{3o$~Kz!4Rqy{uxaFT+ed~ zm{SdhO!}a$VD`VX=GAK+h3iZ-CJ17g7cy3WL*UEAJc{+}x=4=T#-P0>YhT>sCby7eJ^r6xMsRMZ0f z&wppdx7-xx1N}pW@bjR$^BVZe%v?!4WG?@mw1v_zpcVJrJe@~#BMem4oJ%RUXmCfg zcHDwaWvu@&+^un&ULBBsc0&IT@Goe6m9BUG<6j8-Y75WnA1?o*>54}se9_SVlW&a5 z@%f@wnTbHmo6}EGppD5)wN&siFYl6*`H55F^1tL(xjeKkrI?}T2(94ypWPj(sidn9yOlLVbGMP9|R37 ziSdoF@W=A+MxpmzrQpVv%K8cVCuutWRIpBsD}2HQ{^?SWit_#Vnn>2)`TV=X7&V2D zNP2;%F7PkF|I^8{9Mn%Pw*S&wXKCQk2|8EZ6;DOHcgsk!=+f>0eAZ$F^B=7L=k8It z7MJs69=1m%*8h|FC0#RmxY^U$XabM%-%t7#g@h97HjZ8@$`6YoXvXG4|Bq2d5!|$i zS&cr7d{x^Fj*L^-L1=0ySoUX$y2uFVpYYT0d3vz?R}R_7agAy0)AK;G!2Hi;(!Ncc zsX*Xax&Z$)@ar3wzH(Qdn#dmT|HM4w99z5ny*%1I$G$Hm6bsY{XgH{-+M;GQMvi|XLTn4`l`eDf!fD~1en0TAOQ$t zgPPmOrFaSBbZ=-1&<{O&!_n_tw!+q1!rETaj=^JYl`aj30Q*}2HOblu8m=YTVOyd9 z4NT~c^u54;r@XH8dSDmaB*h?r{=xX~8b0k&70^kD<-f4~*UM5K^r$uB9_+u)HjC;7 za`~sRL28OECyv}L2-xkVasYxie0lpY_@j=9QYTO71>UsYUtIDaWW9!6l7KXqYy`HmN@k)hbO>5I6s>?FINT&29jCjd$VD@(rA{pSTn}0{+(xq373e$V!`n z4BGAF)GR1I$O9*sYZ74q@3##&#N}Uf6gAx!kwa-v9!Vsk$bS6I{0KZc$IX9jw|uej&-1((z(De2(jR5UQ9$|-A9)1Tb(bB@xT7rXHAruLpn-7K@*C|keJ9i z^u_A^B=y^uv|*6M!;1`~Ez8-|{d`2Egs836w+ghJ$IXn!shJ`uG}bHu&+IKjv_1n= zUUv`t&%%5yO3ozr;pUkNWn33G|5XHa81UDFw;Ag{#PrYm(t7AB8u5>RtI;&T?f)r~ z%P2hKRoPtRgz>*H{`=A@&Bqi*KQ$|$=Y0GGRLQay^6%sX;GY^r?wPYW!b9z7pBh9q zFJ!KFoOw$Ig7jD%V#59_er-}ejtTkqWYdShls+JAv8l zQ>gsnc4?dF!Z~sZXTO+?391+93*i75ZrQ`~1^OrLHL?|peq3Ku0hm!t|M-3K%P(toqWheJ{zI()r(2h#-t@#3qzE<$!t_t- zmc|HIALEUc;#mv%uM&rpTJ6$WEdSp7Mx9Wk?OqC-`}zEP?MpoS{n~TC|Mni^V`KcU zGl{Z;51<|y@h4&<>BO`5c?Cas5-Np+{)(XM-}!*T5z^E7a6JVWw+qi z#!RBaUYTp+YN7z9>nkr|@@JJBF^;a-xz!BTZwvXat5@sA*`81Nx4>08#NB^wdfg|cx$!7vu{JWVadOg+l>T3C1gs)Z`4+8y@z+!;^ z*#~j?U!Z@`5@ytxhg~WT0sm{slX6xGaY6qeURjMWwn2p*zqCFrmo_&3alM>LEbj$n zH9=9GsDY!`RMI(Fa@MmyE+}(yD2(1xz7=T9h}(a7n3-B=ym1bG4d_2S^VV_nRJ??G zW|?VhtDP)q0{RcfVE=W*$I0*9i(3~mN-5m^S6xnq&+Wj5=*M6iJJV;Urg-~$_`hSL-WpM}jge{aVu@Qp&J2ER~&}+N!h}-{z`G35t+|1%N z_|VC<7oh*p_pOloaJ%osjVPDEGaF?yK7AWrTTN@dbpNFW44UWr4kz`^0hDlcu1pJFj2gy~O zEO}FXnsPcX6Z(HjEi<;j40^bop}GX?KQz(|{ynMp>deNG^E-Y|Wvy)h|1;+QVR$+m zb-FdqIaxN;0D=nYmYar823?E@hhNUvYnWzFN;NqN{m)NY`HYEkJ`~^yM9OIvU5k`t(9*Ak)nY~nU9iicL z=t0&Z+oT~aLtbX=QW{gz8ek~-DzyCe6|ww6;;>dxvJS1zT5ROd$fG@oyCr8Nf(jpc zgxVgOQ4idtB=r{{wU)Nl98 zaP?30kXYTuVVZB7AJAe%LwMV=!Rr4G^75p3K%jdG6o!aqv+xy2_4*5i)|gnmpT>R^r+-@$Wf#2 zRC{!d=ccHV#kzO!u7Uz41M_zOYa8qr%Pc{<50k?L(g_NFVNss2&lO!l(q;DHuBQvh zEscs}R^Zb4mn@|ujfGs}MWa`b{7423BBzEh{soh$Y%_~Ksjgq8MBNKFY)VauqE#<) zx<0--(3;UZ)JH>1F8|%+Qk)9WBu1JX-R9slx3+upvr~H{R(yJFT2Mo945dtwOk+nw zc1~=|TI;&mG=B62$MtZt=TSn|V)Mc%&ngfI`98g*hUj4@@T6h+uQjIwpB4y0Qn0+1 z0sYUq0n6Qw&pv|u*Aw!(<7QBw2>plKXC^O;Froi&ZvtzvWpUhw>*=!12d`P%5|-*@ z%kLoD-8!V;l>WuTOD#iA-DyNAeu6)v%p_(+Eq<^zOYQ*`HAnFXxRse~9&1k?mTUVDNzS22-diaz)2+z^1C%zb z{I5%XZ{VUDadI4|I7yjqHlXjE;JEr}yB$??1%kwa{<&RWcqY2K*|z|WI^6x&F_N)2 z#iOsBQ7RRE597bPF~s=?irb>@xiRn&ENuP*byWKO#o&J)=VQbE>xyza3;KVs{5uZo z|2aL;B`Jwo%Ezu{3d;MnKb#rw$|fyAi3oqgWwBaZPrYB(4c11Zn4U}I&zv54O5$kW zqszO*j#*>q*6^f6m6s=%N-gV`f%#81ZZOTM7W#ia z{FJB_T4IRtPXo8Iba#Jn;POw8$t&MTM;^n?y!!MZ@IOQU&l&Ur_Fq{4Pm+$L^+2bK z2ukK#&SazOsl-L*RCsXOr%R~^p4TE@?|OO1h;mO*1+3}H;rv;Uf6p}T2Z>c}TiMhd_1<7S@*pRL^a4|#pH-Bep7@V^dya!Cf*>bG}~&$w>^I=sd1 zA0yBR{daaL_$6B+PpI<3lzX0ky7xEY_WvyK-;;7f4w$GT;D2%X@7jLtN&Opy83O~G zXwZ<;MxkK*_W?Dc@rl`yn*#)rV)X9V#C|(rN$r>+r{E?HM=#?T+XH*sYTrLO=T&pX z_zyS^LH}U>pRbK-Uc;w+FGids)zRkt&+i}EzSPQ!x8Q$jb>XX%s|a6o>E-Z$g1Shb zrQ^oPVf7TD2?q54WHeCba7O&VKh1yeyluDa2@6NMS;8o0cN(6ZplN^TsI}-1(1}Wm@CW zD|;F^N%946PF&gj<$RXc*V{!-MgMTl?-m+VFV^t+`x@rdcaD%PuTe@2A^%=sv--J3 z=9kwUWS6pR2+^??{LlB7F+uipISI=MmHNBTmO! zpCyA+GGOw_MoiBXyv)>!9-=x97&(Y*d*n7U+Wedfjbn~lBwWFc*8SDaWvjBxjTw$L zjf^IT+c2)o7l-|q#kmhpFM8hpEwFpE_&Hsxru2+!QPayF@P@vOW_=;IjR)iaCbW3D zd%3|y@Zl9{uj8<0pknF>HvfsdCv;->?hn(?*RF|z`49c|EXKdU=08jAqQuzH|E#bo zs2=f)C@n@oH^BcqzyFd2|1+XgJBn;!Tjx@GhjvGAi-2208XAVjU-5(gXY~j|AvU>6 z(#>g3RrbzD$TjtjnRbmn)zNdqrx@2ueap05a|zJ@?Dx*UHN1=Dq?X|gZ#bAth4+%Q z9qI2d{qu3%#jNo^JhK2HC+{@0FOzy-sKfau=t8N$y&@go2HmT_kNgA<<*_=7PyuwR zEdx@l^-4@WLI2^l%c9TTJ^BD$)5qk_DXgIXJOAm#$*(>R-DNP))un#I^YftlC)cdU z;n>?gaQPR||MTf(@2<@uP+P4FpMm}SRRO%!2Y7|XyBD3^)EA2ghCGPLubqNuQR>7j zioO^^Vx7F1vRWLwK#GF|991wwg{z!n%mXI|xXje=6y-U=SsTm0n?!DzgJ^pocH#N9 zMVu_Gwxef5{vGyT)eV2GpRS2@sRid_vd!v`@Chr{UGO?{yQpdV+_$O0@0;IW-90l} z8*QTw^|>34ddOCR5L$-uF9=DJS337Ug|eZf)1j8&qlWw!HviG)x!JtKD6-c;S55X4 z8|AAYwwn%nad+i!7KLGk(20;PcrNsoJ8tCHlQW58%vNW7(7clY zI zlCdnj7mh{uX#s71jgp$Sbc2W?F8@8}^P7sQcEO=`MqA%Yz@{E)T0>uV;f>eLqNeaq zulp~4&(U;;Tz^-pp*plQudj6ZX&B)mgPnr%%!?X_Jb$!AEHMWE5A^?}FR?JuMOgpO z$|v-jqUy^-oHJ8)zB#noi5u0fUpSM^_}Qn^rvuiv;Wa9k~%TB`|~-U?Kxm+F^L}H8OUlU}5OF>NSVEGLm1penuuUj@`NJ=1#{~l^Du($fBEN)DdCcUV& z-c$7?_MU7uLu1_jpWtX&dIABiQnfsB-z<`*N1j?nrGu-S*T(AhWEE@?jD2QRUniex za^Sfw;QxXAdysbT@5sZ?R_=K6=1)~O@E>=~_*~d|KOEz~FWTy4+$7(TG$Xoa$0s|F zS{IOE7mOl$j%d1HVgsp>1H77rI zQXcOuF;m;6utT-xVZMO<*X9qGRtQiw-!8B4#-nZTWJlZ47&>Q4Z+J-g<*D@22n)aGo zmBcU@5Wk@;pW$~#Y!)_2?8j@2Tf!A_cppI z^>fc6DFKx9ou3)PAqt>_(gH0Na7ph)6akcye#l)3v8D&#@uT-o+K!@W1?c}7>n=34 za#&;>bH9->2pA}>=o8Jme}%o25S7+WESKTo_WuC?{h&cbr|rvcPpbgUVzCq0rez1R z2a2=gEq2Zdin~Y{ln}CjEZ}xn%K>lf zFdgDpZI-ueKd}@>#&Oi^QnDnI+t&I%$%Rop3;oZ5?U5^UZ%+-b(U-#z&=pk_hi@`a zuF1mse_}^3UZrJ-M}P|SuHLN-bpVB$8({7RuNW;I5Pu2XjC(R%{bMzmeQbgMzH%pe z|Mcns#r3P03agn)4S6?u|0>&WLqiDhFXBhs{Fn0eRDr-AZ5|xrH%inWkX3s@JZ5Sm zg#@*?fd5CCaTNE0AOc4e@c+n49H1~l(HQuDBv85>W0qo3pg{ysi+l)e@SS(5EIMwp zpCmL?+~}vAXBZ|Zsih+W_BZN;1V!GoyykhT9#Jv69Ls-g*T?u5x3eTcuJ8NUdie>^ zXzR%7#DqKq9D2vXUYeW?b(=0KpXa}CJkdYiEw3!NtayiQ z1ZybXB*T;!}_9@#7qksKBDgM=tFx{Zq9zgmqCP#PbBK8ja=>O3$_ec{? z0vnuFaZDZ|lshpap}5Ocei2vKMn$@O}T^RdGT-z^#8!g9oG?LUL3 zV*Q7&hW9)nlq#Aqb#I1Lr^^~8=3Xu7uOKMMAa4J&`BoCeK_0bW{-3Auv|UoX)E%Eu z?7Eao zRA8Ay*#V0HC`)OL#^4%Zn}nQIsT^+`oY)t|o|R$GQE_h0=*ysgqz|BALs)p1#hJGmpCcuz0%4g@{_{ zcaFQ3ZOb)c1fhqR{uvc$p%tXlam)$p6ja=Tm$ zy0q_0Ytip>i=I|M|6w|>$G{62uKu|o?j)fF;FK9d&SWn+5WRaLgu1Z#kElAy$Nd=A z{~QkehsFAVT>bOLx7lYsA%Ls~2Lb9TSis_L3G<+Z`OjI|+p`8n-F!?%2dN##K0WLM9B%`jE%tM23KN5LkQ651xqV zovn5(*zb^Q)ZgMc(QEPeW7I>NUgRM^$Ho&-;qvvXR78lbxf~ZP{{{K?Ze9G67!t*` zw9>Vz``+l;nD2ipEGV2mzrQ0CM1?<6hReUWsz{(3Y`DUL`A@kN5;kgwO)CBfxBoe6 z2IfC92{7%MQN0QMhbcj)Q#%hR>mVguXfjW(nmqb<|0RpeP!%5bo+vTNwvMyN$ZBtK z0~*sFs)zm8o}H5fW;3RLghP^L7y}V9h?)XdfEsE6|4)pt4r7&Z>f|d4ij@*2`Hjdj zxR8fhHXL&36=uqr8p_`^0BaBAzv>*xlNkTPG-33TfR~0V{Dg=2m%+_D_ww)Qy{16d zO)BK{oeH+-=B2g~=$at|1Ev{%7xhpc-bY0)qaZ519Ue62^c& zAe&=~ape0nwbkNcdpta${hPdg8%1ddxl%>T2&CPmc*#c}69q6+?b!<+a; z$qaoo4gTkwIg3n=p3kcdXrt_PW5t6xvx=5$=6QXXmA*uW(Of-~k;>skYOx_~T;(;H z1^$J0wMT!gZx+9>k5<%RIV1D?BH+IlV*D?YZv6bG;PNk~hR^7na|S7EQV1#x*8HhL z(k8nfP9I;$zbBEw^QX0je;QFS{j>esr7_!lRpIFK+MRTP18NqTO8w~H`OiuV+maQD zuV>M;tssK@m$d0#tp6Dql;?fZbuBfLMn32EDqP%33ILH1uj+bOP6kEECX9+zucSYe zyM3cBU=|*{{)}%^-iIa42F7SSV!T8eU4?;PqTSE+KPMIo4rsvh#ae{Vsh}1_7u|2YL;Kr_bwy7HR*;wBHT?TA{yh7EJwB1;uz(D_5A|6u*kqro0$VZX+-426W52BZvZ*Y3XmiY6hERX?TH?K zS5QPTilwCTUwTv*(Yq?#u^?Tfebb{#`&5bs^gj=Nr4H~egQeSG`Tk!^iI3YE6%^~o zMjC&Lch9xn`u%;;{oA>&9wlD* zH1qo}-?x6Jf=}n8FS@;aNWhDQa@L1ET zcXq@quU7QzriZOyzi{~U!ai3ie1KQjr=4-E0@RbNGOP7d0=VquP?86a)O}$dDl02WwcvB5u5*v1>8ir z>-PLoUTr4tiLVwEm$6N<^q}I{d;k9V!^5i-rv&*gp=}UULf1tlHo*jMKK}*$_x{k# z$a{BPXiRlMX1<0y_!o3>UnOjJd(Dg9LNUim;X z?@k$p@Dlia8sTFM6vBHR`^E4@LIag{SpL22&56sKzI>@5sczQ~z7V8_ZZ>V7=U<5G zKKw@tr7z+cGmOCe&zSz%aasy-{XaWCzg%Jn_E2JJXZ9kee6ZG6+vX|Thk&zsVg6G~ ztl;1gGAt)2AO!j+hui<0@KSTYQX(OlUzDH^|6ivt{WD@4C2OQ1spZi>(04O5+(B4p zkx5uZ-egZQF?yiqW=c&!y+=QaNag2VZeliYLk()sN+`)GCa$D*j}ol^FmI0TkT~}u z=3?Zc`!WF5JZ^_$hTkEB|EEya8}h&4{~0Rr7C|ShY*`EYuRIO)QkZjN`Um5m4k@fI zlwUC((VeS8-S!R{{L4Sx#qP@CL1S3|Gk5&uPbSXkiBbxl%8b0N4ZT`wiNL(fa z3i9vZe}1t3v<*+L^c*UBXF#pMGgAd6mM!q#Rh^Cs#;2Z$^oux(YYDN#BiMgI{;OJ4 zztqM$I(Phrtge34QChsMYpyI354n|(XQ#+9Y8`p_mVs|)FyH2ol%9{4M~O^U$^86h z(XENq_uv(CfOAm6*-uwm2o2N{$+ejO`RO-V$tZ6B&-i_O`R4U-BM5T)e<1%QJ*fiv z1^R!SkGrferSK^10+QpVhR*wtbD)28)SwceL^Kk55($Q0v$ zefNYEhSd!jF8|%%Bjkj2GDkP;Vc!N@J*4JjN5=B+-2GR)sx(^Ye|AKY6h!T-qAiv& zOk$UpBR0%`wtjf=2>K6?DcpXDJceF-A~7cvGT;APW4_X01ANh;H~LyqlGAw5hAw@x z_)r`I{(EB))86GU!?cyMahpaKh2e+Zy}Zc2EvvgoA0@?UfL{{RKO+xk=w0<69>EB2 z{RJ}Q--ml(Uf5T@m8A^%_o;z>{CuRr$6q%-{O1fRciq(!GlK2Eu>6-KN?Y(hPgv~K z^`Cieo2S69l-ou^NS2Tm-A(5ih2Rp<%uVW-U8pd36^Yb6@|Gcbxsry(= znX7;Fg3d42MKYh1JM)ww5R?Q#C2sy5_@@TrHx4X9QSZ3?Q%(0(xYx4TM%o(k227p< zMN0m6!GSZDn7eom`L)HyDDk9;s71k>#TL%L)~zA3@hXCc6h(3?{+Iuv><|y{=8ZhQQo;znR~|})F0D_gt3(bZjo!rkKjjiB zee5kfNYqLng-j1ZR8!LIE^nR&NBFR0qzRgKQoRXHxVO0dKLLB@9{wFP#q`gXi)kqC z`>AZtA3we+7-Rg4?RbEla`RudXfCcN+$C(@|16s@*@Nc&KT(Jvg$yp!X<$f#{QG?W z^Zfqn!!WVWa-#ze75qOL($agadt^c4=?aS9zzNDz5ap)0+8tSW5RBzGWXa^QZcUVh z{5$j?K04?A&&Tz%`@V68&h672l=<@;{xT%Sq#|X9=N1R=DO}k~Cz zox=DRXHXI$KtvPfYR?l6+SPPrj>IMF3%`GwH z!Aj=#|8V`!2?Q+v4*UzB*Yp18QlF`TO4ZnhVyQT0G58;lu30>e4f^MD2Q7hzh+4x> zkHnouZ0LVp9F$81lQdH1jBxi~SN42<^ZPsN_!S>ILDA%rCJ{pWXlwSOm}epSk?6r94!u|B&l{hWQWl9}Yh)@8UyUS&Fd#TCqj?-fT#DGLD@H z{8OId(>TN=P-wh3v30YuB(DE?6kcQV^Pj)FzaRMfXV-Rj#YQsdA9%%pH~? z!DWeB{gn;}Vy8d%xbq;)|16ABPsnO3p_`3cu>skY6r#?zrVOtBY1*duS^&9NwcAP-V6R_Fr86WAR}ioUq`3-eL6WZXs@gf5G)X1OIfm zetEGr1XbtvUwd|?!)2n6u=!8CI1@6cxDd#H0sqvB*)>@FW}bhsYpc)PhoXzyK`#UU z;=}`d$jaYhT>mq-|A(7@zqE3Wj&5Zhos4{|MHxp3tQbAfS)iw z>=vIuL=pNRV% zMX>+!#ligNU;V?K|7?DK#TEDZk*pw^p1fbSy|W}@FSsb+m9p}lzLF%C;;;>IlrtvdJBo(lVg3e^8OLd#pZ%PHo`nT8j`uHnK(3;yT% z{QII&O}C}$d&W`jDop=8I34-m-~5-rW_G$!qz#t;TK8u^#l*`jj*7=Ig|rE%@9I4N zYjYo!jP*Zb`p3Uz#A?TrPrYr>=T)rXdNe9=pc?jHpnnKcVgJs5xce{9Zy5!eZpT-l zEx>`L@Za-RiG6+-kjsZ?V}jyou-lTB3KGL^BY8#&l5sxU9MCFaD391uC`FFq z!7Aa-f090DS4hG@Y3JOeCp4n;6ll}MbCf5#?i}g%-6B}58|sYZ-`DeBzhLxA!-I2h zsVnAxuJ_IvuB5ZxLR00PRt}e=hyk|8wE9P~`CyZ8#;tQX>Y~ z#8SZsM6ouJpHouS5M*NdXLZi?dl^I#6x5QBN15awpSk-l;GcTE|Gj4V#3iVrO50%= z4*4%J&=UThcfkMin; z)v%MrumAK9_G%+Qs&?puaf&UrFz}&rW z41DSSH^RhHRqcg!)tpB9V7;@HR`TuN{vYkC+jTjkm-o$ZSfZc9Zp#cccyGi35b7}b zG<`WqtsZiWseix7etEU1+-iNPovb&v|L|7P@-?S4bCxPV{{4X=2b=%2O-w;An7qlJ z!Y{YH*JX&KdH%(J`7exrF}Ruf*+q;hjrhgo4?bs=8~OJgoJr+}1vS4YJmjriNCcF^ z>j-bcR6aq6gNt7X+~KF}mHP9u0JyL;dIaYG(YRZ3?A_BlQXdsN$Trt4OAIHqysEdR zps2?_cZ%yhe-m00H2aRZ?0Wa*T2(h709`9t+8e*=K8F6o#xoL{IXE7qGz8Y*gSk-2 z5B&G<%hy|O`gP-0s(|}(W2f;iQw0K$f6v$aH~(dt!1e#!R6{EhCh=50xw-%2>O8=j zI@JIFo(u>CNI+zdFho>jiHgVwvK1AODq4jIsJKBKfQ$nuATokt!5tBITXEE4MGT;T zqk^;AI)b2}wTem=5Xk>MC&Av|-{;=@*ysAxaB{}?eBbZ;`9|OE^=|+3v7xv3SQpL< zRaG!bM|ePd`&E|CgsfGz+OaC@Ovruv>q=B+7}-8Zn1~DO`*w`NGr7Ou%(l@*I-`Q}pc_$c%Koq6 z{!{hyTO`ZK=>@LYY*kH*tOFiQ>*f|ir%7QVATw3q7jS+g%w`vwd&RZ7CcaV?A2f^` zV5wC!-m6gfKL#J#W_|j2L_YQl@2SCc3@~WBa8UoOe{t3O)BIV2I~A&${fWnT63@-= zWQKTw?{}P8S#U;~|M{yi8*37srUbuz^nnN}92xj$wfBpovk!R29G~}VdRnmAsOjAW z9}f7am00=Mfun-|{P@>RaQ7taP8c zyZ?Bfwr^gE^HNaLaj|_P@XlrtXD49(*=ch2la;0Cj$uzjs{f)crvEC!BWD*|IHlOW*2W3w_0n-lO?BUX$5r# zkeKir-hVIbz#f(N7@T?AfJm!r+x6WyHy2zy%_h?ab*kSI)o~%_Q#U%RolQh;VUA|g zqwDv4Zz%kq@=&X5*veMbhFNQ#wS%ZTu3ZH+weV5L^~1~P{?nqxBaY2BALSeAD--(K z0-b+G{S$UXZ-q%82;YP&^Ut*ZgZx7g<^r8?{e3C@xDZ4Sk^@(RB$ES7kP~7`k8ut9(j3^<#K8Jy9!Nu7Uao`!A-uJiD^) z+~;e?Rui!rV9w9cyDGlBvEyf~*Vv}RzES3sMAbVi{ioa`qvMI8)#U&3&(CW&bV$_} zR?aM(wC0Im`8`H+(MZg38(2~KpIu*VLwDrX<(hfufvlAYukR}K&o6KEr|Ll-@}Cv@ z=O>E)T)RxwaEl5gyogu@&G9(Tb6R%0cTBcTGs|e&*j3WA7Jl0_Rh>munXd}UwedBf z)p_Y)xo~%*wAg{tKOfo$XY5)*J+q*Qjy+_n@^A(tKJeCCA{imyB(&ck}eYW$0{uG?Jvs9IBFw*U9t8__li&I)ZhV4q@?zkuQ+iu&L%Vct$ z`4DKy6b|d3zaNeahyecor+==`vYlf?z$KWpFhYQn9MH@XO=-8|WN{;^Vj*6nqUW^C z2;$p1Z}D74LF_~4oKFUU*rP%1e=WaP(LOk?Z02)!EG^rzeA<%W)qh?11zLZa>RU$X zANfz6S_teD=PtKgg83hM|Lf_jgPv1eHHfgo&0H-W+LibqZl2+vUbp9w3TG@qgMPo!`AGA6mx zmMD;dWXTMjY5q{<)QoZ3k^Ux+MC}^e)V!zLx%x23(DB%Ry6DDv3Ce;qSjmsHz&D>WUi1K%Cc zU;e>q)uXQ0?Ky?mtl_-U?3{m)iN}JJo+_a)p5jg^5b-NqroLR6=97qmT|Q>-&8`F^uR=-nvBlfXT1c>n##f`6B9F+;Nzdw>Y|kN?A&?g*`z zf399Txs=vFHMsx&@-O{qyY+y@wEe`CF20&OSwi3^Yj4sD46e##&QyclL^Xe%4F`W5 zU+n*)_=ou8#0*kzC^7XehnyQf|MM5lZPekJ9|-BeOG# zj?Ff4P3Wlhu4gu@YX+`Jm9=JOsh|fUXggHdyvIGJqcZ`?nP~%GzS_P%_lhGl#PW9$ zArmP2FS@FbX=)bf*yXf^LDKtQ*niqA9*Hmm5!HWcyV?NZI#mA`m46-~-NJ^Aj4CH4 zdRD>-oZTM)!YU1}mKt^}%V9zoQC)?V3=Dg0HGfU#f1s+0bTuLt>u2|jTA z`^k7kq%`&P#Vn)x--+zy&Pz02T+P>?ux4Mm=DsNjFQne}hcyB8R-nF&3D6V(+_ybz z6_jU>{a?j*cB;0NKRO%J8LDHYqHP!u3tj&x_rDy5_D(A{Gt0rJGVTB1{43HF6A}OQ zpYjCw|GB61xU8}RnP)?R3Q+myjPz`CuU1i}m=Ljnsm8!7eD+bV1I|z9Bch$!f1i8a zHgq;c@?Wn(#jcm)yvF*eVh)<1VK-aDu9zGDA<(ubbcBSzoW(iA>Pg?QaDBoig3 z!8%rB32vE$n>HM6F~A+sbr#WW&3u-w0f?l6X}G%yiu}*T#Mi$~6oD{O&nMD)K7scf zL|x$g%heHySO$~n{V&S@@k-}c79eTHbrTN4G;!HJ@RHOS-^2ALzxY zX${yQWBt=;vB3KlBYA;~3eLZ7?w)L(xJ)eZh&i6S+?gHx+3~N7Yqc$=36qHLYfHbp z*m}zV{h!mS_fV75`X}RH?LOD|R$XsKYax!v)Q#LZNG#KuIBk6$-FcAQ7*P{fGJ|KS z>mC81Bh)Z-ij5aAl>Seu+MFdCW_ttSB2qUhj*t}%P2W<2xbk_JAp5kQ)f8kXjE5u!UxlH&hR{qKWGT%)g2@Mli7GVD=HUFagpBYiDXr3{bv{uK3wlM(n&j+K(S53ui>_6S)kU2vzb^F`D4R?CU z_j_eB2wm&0**4xP?7`RP0$L7WU$n5m)#jc65wIf4|M4FWp6R$6kNF>)*+mu;F#k;X zKe5}EY3)__dtxmQsvt6R*f7`jOx25PCDx89_mjMFd->?BrjBigmlbHi+|vsR1ylWMlr>b0#xHGBuOMR=*$6g8lDbo}3xc)nYnzH+2H0CgIzZ z-s9H_s`mk(Vtdoy^Z>spoUGyei{5|tOf`31om9QNz43QdO`nb7c~uq69oB2|Lu4ZF z>*{I?sr)m4wz<6xScjLl`V?C`Ye4r0Lg=UPe@bxwA2*1%Xk2^|Sd8I>N(KMNVKD>w zCk)t2&A$}{rw9rYvTZi!GQxXoAFlm%6}54dxl2uW}-U^G?I6fs5xlXFnEZ zG5)>2e%YP*7<3@Ww=Q20N)KHW;# zX^m;0)i^|{V&D;nSnF-wSpj(PbPa?~Ig?h8Uj!WwFHikd)li$d&sj16`@iV^i^R_B z=0E01y$Sd~{ts^3W@@{Q!Wa7tDMt<7x1nzK&f~j4_s$3(1mwvn{lg-o3+l2{-VJD9 z4GED5skX1gWi92~_5VdEa)QQzsTGjsYyeM_J0LkR2d4&sFQXYk4n&+=@ic!|7Ecop%833C_MZ}VB9P$x zYweP^ApCXBQ;sHdVgD(W|H1utYX7TNGd-B%{}gI~<%|O+$=PBZV~A4`9S&AQ{vnfd zXGy{9d&mBb?QefQTO!=}j`ikVXR+zj&5d0Y|M~s%%c!+H7vfz&!GA{m^ZU!{Bq>u( zLtO>p{s0Wgfl>Z_IS6ar5lawxU$Otyjr(+B#Sys6&M>9J{2xXBIe30s69ee}Q%M0IaQ^kg5cLM;f7BQC1c)VGc(&_U+qRk? zPkZ+klbnd_r2L;hC!W%S#SIk(-}>J#{jhzD*6Ke6ZPLR`Qq=_BriC_4W8(f7H(i_3 zKifu(%R5A%{xM3e5?SE=h2}p$c|2j6mP1M$=6_sNEgf7yR48fAH8Th3jjycZYQcJX z{`LNup_@B|4EsOJo;|tvkv)DoNM=1{n;L`YJhWoo;+>ZW1X0gwl67P5WB+O7Wh@HB zv*TRn?SsNhPZnVN6hRXj?`|r)#ILtB8^{tY| zut}aDR6RL^$DTD#0LRh)nYB+ln*ZTWHiMLaZc?{JHirw!{;%-k+8S{DB;Ib-ee+U5 zHj-3P;o$33w}eS5`Y%4O*0grlh2zTp3+z9gyjIn1De!G-h?%(9vv7F+6g;6 zD*qB+n}}wmXN4TzFayOV?o|KjntxL8L!Vu8*dT`DKU4h|m#3l_f|hS|s!Yg|aZ&&1 z+;?Q<;Bt$>B;5b9^x;l~@beL8>a#5-x?aHNyMlj6&A+CXa0@t@08$kTt%CAC0x14Z z^5?6&V}h0b&-z;jyvU&^3jXtKAE;U}5yc<9|4#9rqe8)v3xUQ0E}Z|VwSfT_*YU7s z8Q$kxm_X0JxZc6y0tfXEHUHXf$dp$}8Nyi5jlLR@NiqQQKVA_f9nD;z_&;eW>s8Hg z{-x3?X;w8DDZp1+LP+m_eWv%{YceE{@w2jxdFi)~3w)ay$p1nAXE3Em+5eT=>$JK5 ziDLfs+6Us_d*WxM{2%21VE<_-<^Mpgz9ty3M0J`Pph1LmImL3wZJ-O)iv4%YKlk2g z;DU9Kkp!)+>0p)vNoo^b&A=lLqN;W#xnBi(2h@6&;RWfM>R3hHgMx{0+b|Z+$TX57 zaHo)@_TQEJUyF=a0Xb>>pE^ramMHi?81a5|dF8KJCC=*5JIIA=<#k)Q#-0UK|HW3q zHN7CZVA%gTw`9BpBn{QPV3T)XMLZH)NR3gBdXS{~^Tki4UQ-#x=MO&ERWI_0(+x z0lokJpZ@Vps`B9?|Ip2DaU}uyx>F3s5SV{19FZ<{1tIQ#**0SShvFY%{wMeZQ+jK+ zZQ~P)|3me^zhm*k<3a>JU~$H35ed@BS}^Kyy=%n)WI_i#@?Kn*j|t=uTLy~(H+D@u-v^SqB6V=j zP%&N56L%!4Y6h(bdaEWDCqYAnn)#Qb^E%#8?*%6NFji*fv?Adnq2t-4iEnQ=eeT)ozDO8Ne0&=)uc`6-H^)x zGvq&qS~9eJT-Jwx$SR;{Vv=K(#G5I$5L!auiL-Smd?V`%zRkaWoS%i_%`*e}KleMg zI8Oor_MeU^bE4*7W9k0WLvKLx-MD-ws{ex4KZ`>laPPxuI+LgGfVS>BAb$h1DiMer zqG~-NfP?$*3jMS2Q5J?(&BldbNPx?9ini@;!yCCC2lGGUQ+nc94n=RfKs0eBm`2{m z9}xq;Ym%T@;s0bXdG@JE?=_d+5P}5z->Lc6yaO>B*#9-${|>d;0ZvOP{vpMGuC397 zch6V4RwnSu!J-Bf;bhb&iSyl0(SskvHxB|^+^>tFdfn9zL^|fN-Ua2og4ykT6 zK%+PWbGpmu{4?eMfI|d?XbPFCOq0|e2_B$qeQiqr?EQ6(rlu|MJr}xW^Msb+24NjO zxg!<%XXX4W@^T$h!T%ZBtYQ`C1|e4tTu!l%LH>_2|8w&Xg7nJ3`EL`)Bv8SB*7C*o zy)1oUscpjHtG}x=1B&7eoPC*GP3H*PJpAjoTRTLewwEaOzi9pQgGOu>NGD|REFCmL z^6>2))bWWBLHR#N?dC$cI@yqKG;ZUMNp(_E#lR#|gAgk8&yUAx{iF1Mo~TSqhex#j z*{r?F_5+Z(|E_u1pkbJQ82s3YyjdPv@c0%gOe+8Ls(=BxT7hDXgm#;4JFHETtREk; zzFuVQ1YIhmu(8ApAFGEz@;~#h7aFmtAbbj3J#>N}J(FfLK-REbZtjrotvO0)01;Q1 z9KNBCSO`KdMivKgEO?Kj_|GgY>xe463()yzn*U>XK@y`PRpx)Rt;&Y`-*NvdfFXLU zbIqU}^$)fG^?SK3zeh&2U(*`p{nYw(LTq5RS%6Et45VX7j&EL15+PO2zy757hhWen zCZs>ASRw`$_up54uO3iJEo{7Fa01hbn-~U`6XCFiDzf<|2GfU2?g#0xr-)T7iBl|5Bl86|2K%v!|BHOkE-|O0D zJR~s0TdToaa5dSsIFkUX|9x~6=6`x#-@J*q*~?#f=BPj^`Eww6Hw^lClk&$XaSx&OY?es#Y~oIpNJ1LFQs z^nd+&+v$~3|NQe6eUAEER73-=AL;$CHSTyT3wjbVyCU|Q`Y4|WlPMLk92UH|wmHyw z3zNkD_j8K=Q`A3Vyb$RA)Au|OFu5kK_@Wc5TEf=dSmP)X1(ax+x&qlMV)Oq`{)hFr zf(_Vzit{g>S2vuf{VxUoa5V$xU)QkzVw-}0c*rPLV7LQuCDzkLsjl_yc{?n8B72gw z>wGYu8{LzafKRwzRzAK!?|)rPe7+#B4*j3YXbp9LrS`uZa!g)ZvnQ6C^j8O`a<_G|B&i`Z{?At%Kfk9f8=^EkGlrTiV{NZ3b@w0?#r?1^-at|FFZl@tud>e^>ZF{3#|DA79kv;$tk; z6>+M8pfWcJgPeq_y1N~cij@5qsDF+H8^n_az*pvfG~I^z&qm*^m_y~Cr^}rEsr)n5 z|21(of@<>b$G6)A>5TGmv0j|e5vXAZZ_m2c6GE0c&Hst$7D=@u_$A)R!u*f_yvyR8 zOxSjfZ-{qSkR8fp!x2Itv&e2he~!{WbpI*le`a2~ijc$M{r4+P)c!lgKiuy%PfOUp zUN?r~KQH=n#3TTQ2@;1YF*kA{{nUNyxMJg0P_xvLKTH~$RLy?Kl4m<;&V zf1xunj?zC(MP`$oyDXT(V#+TJJhAS`4Yq?B%Q-SjZcf2Q=$ zFJs7)b3+S;`G@I#A2ONQ7VgouRG>(kXXl!lgtz2uD_>mpL7>z>-2x^- zD)Z0IFRvTnpOsMgpS@O&&|No;MfpFoexmX}^!~f?=S&?Vklj}3pTuG{#r_xOpJ$oC zr&qR(!eT}L>8bX|B^`M=79ORxQ|`uWRpABYnHb`GTDF-J-kV$7w`Q4r-VDvye?jw~ ztN88s5r^|XIh^QT2}zyObp8kP&jB?5aEeY0+sx>j|MOi@$&{VASxxyrK@(J|{PTKh z{)POX5*v>_qw$feon-ymX$!l=+PBE$aowQ+mKH`z*`#diR;F`wkJvjCwLQ&0oM(@( z2AKc3REGSY4Ec&GnRh0NE<2pUkjOv$=KtXSS2}^}RH=V@I-QP%9-Dia=0AVO6nju( zFKYkm@?Ko+T2#tfM#>s;tpkdxlC*uYGtGPi8rulcjpPO4-00^;nfPy__umJSxOT(* z=l5+5U+VB)q-SQzmvnrs&0PvHB}xDZ2!wY2cD_$%P~bls#G!m%75#6l;R)G zIk^z|&q4MOeZ$x3DCPeozIk$D!fkCmXW$e{vjfmKFR2w10-O6$dj8eZLY=G!tXfkWvG}%hWB=Ec+O%F<=z3lpT|m#jX#VpS<|h+;s{85i>~^H0 zq_+f34+D@rQu3dBno$GL`(MaEyjg}oR>|^A2H>sqEsKv*g5LjHfcfW2YX8ekm%%dt zivL69f5J8tw9)#9>c9AV7rweL4=up{cbtDEylm9O1GUx4#4Hulp3*4LjUTMbq~+Kx-G6E|9R}H#YdUL^RJjjevJPPyXDmUYwWxqZV|Fk!MelwpM8@PY5rlB zsgrv=h~_|NJ(E}Z3$p!+%~U{auPO8|W@7%6zm1ap24-~s z#iTWVX-CB6Sy*9X1l|8NtbY{w=iT#?2$`EQ|C6TC+x?;m``?G$Q$EW~@%@tD}wG6R>=4}_H9I#{CjOXYtI#`ogY=f?u8pdV=eN2!0P z{r5%h$WMfj_J4Z%=5!A0pA)S~A8s6(Hc^G*A2!7k@gx7n`4`1M9QorP_7;HqUzJVS zX3l0+Apn?vj!#Phfz>vi9`t_x_p5$)6wfoF7Y{J)|NJ-qLjJSirw*glSdxMKLyG^r z`Jt1N|Ff~MKk9A9`^Njo|M`NbmZUGYom43PA?|;9F66??n?$?>gao`s|3_7bS2oWx zQlQv>f58hu{xdMS8g{6^hVwtP|3mYChV_qYE3mcr=-_b9_-ZTohj*g$u>TA9-!cE} zlsmM0Aa@O&|0%jo2#(&O`rlFi9GkVzjq-mK`RD#h8NQDn(M{!}Ho^H9FrYeLsek_C z|NQ-c!MApV>a@Q<%e}b<d)V4+Sa4I^=8dPTHax0by_ZeW{AW$2{!!$g>!;}6kpkKC8uYA} zGg$;l>z{XN1LH?mv{{QKJH#z=4`llXWmKU=47Sydg zwi(w{I|B7Qsr@gd|AYHqivF+l7aKG&|Ge4qcxEN`e^LDBl<+14y8puHyRr9t+^#1!rVtD_nP+lZ!U}SS( zpI5GyM-C6~YSjK0>YxAN{~Y{sTgZgZ|2+O4N8-}de#-wj9dnD`|1z>3v3yuCI(7^dQ`ge6!*?+MRHom9%htF%1W11=b zvlvuRkSY2v=>4x@{&RBTOY1c*z%kw9T{6iX-FDPJ1(v#qd>n858RuWf|DpBIuc1`_ zSwp%1Wio}N{GVcbkKb58`#*~|v&?-X1R$XMFaGQQLjLnX54JkIYgZL{;{cZK{}RiD zY$FJHR3~F=l!bMxU z|6-GKp0}?G-G71pk7ECw+W-3QRQSp`sMzUV!znSFxc5|M}dDbwcmIe17xgW9OGQ|MdL(_lHj%J^#Gy{_yy_U!rHUw{Am#r3y$nqRfukhTBz;_<~- zS5L`q9F?7@fBfvq-N!%PzkleD>!+{ZIa+(W{#yMn7f%21K$?H&Qr`U^b03zJG&W{5 zUfTIFJdh&dzTri1@YAzh;ZqIkEi62>Ag*egOIY3|L#?L zS*HKE#6oiD$o~C3`}gnPv-d#5F3)X8&KK>f*tKg{>8@RcJIgNQPusgCDmyznpIno6 z=yV$OD>eT3*|i6Lo^!Ht&Vh=RI}R-nmj#y=2N&eK9W5DmG;i{r0-HVC+;`-A6l@<; znr~mUeN9$&d{+9pjP$i>1nBHX(?%G(OFxUY)uc(&HIkb^~>4rn^EAJ zRbZD@jQ@9PYI-XE@8-14sc9*xX`42uCnaa3B&Ve&X75}VlajJIX=C!*jalnbG7>gq ztlyNHn3$5VHhtaNjqxi}*Dg(7mXx(JiCmtTxpG5l~h)sEfPtx4U2_fNI zX9s461+5A3Or1SBC2ZpQSq|%G*{lukWZc z>!({M1X`>M6eI+iuMIR^8)UjN(0FyA(aJ!B6@mK81NGtpM=TYl6bj=De3ox>U$K2+ zQlVW^;l$MiUa|SY$Zg(pxB1Q6>Ke1%C3@TB1>1ykw)q6-x`*cb`ses~=Xm*)_<1jP z@txy0HEYV$S^kqq{|V`nCWi*tZw?rj;_H+(&VFLRh$SONKA5CG+uGXN+{|pGvFV7B z27KNK4Gk@h8kb94X0h1}27|y~K;T;*{^C%R+hOO@rV>L0WG>&_22n+U`B-of_oO+& zilwDSHP_MPa#Zd?I)s)!L^E)g5GTs6@sJECv$!}9QY80%uGlJ)Ya#om69_<82dGR{ zkjob?+BWj*$+s1LQNztGYi7nj@+QcqNbdYkxxd;jz$`=F2&Z!9?u3c3t)4+O*D*uHhXb|X%#xO)ix7Zh9Hp4tu9ek# zDBl{5f6Z|2mg}I3w2$C1j}FL8IEX{srz#(aE)Y zugdl*$eqiO6OyB|>*GtRxuwiUEGH>x#7*Oar%7oFtGh!<^H64 zmBU*Z$n2k8QjKJ(-HmQy11fj3U<(A+Qpq*t<|5NkR@XqFa!-8tq^((7PY;zlI+xg7 zcl=7#8ijMYcm^SbT+B;UroN_=Yqg6mF6g=O;FRCb3&;1rP$t*PZnJ&4|B)LM%DtwS z>mY!*_q85d1s}hETa6jEp@H5g))|0dawn1-wsLuH-69ZP&Exo2yYBFBCN!;3%_^Gf zJkp)0$t^1PlPp5$qisT+uIuRLmW1Cv_I>&;jC&njK}0XL<<2j05rkij?+EyL zXytxx>Heu%dgD_bygwL{97U7MDRR|J)^dVU?sRjVLb*q;*2Xa9nb;q1C4vPcuKyt(VQ{P`H@}Qi{ z(@}AR05=y+vT9s;1lj-Z+dnSF69P)P|NYmMVlR1`sxV)dNUG#;O-6=;u$d89jEmx~ zRjqi*j+_>O4Xg@s=L*&bzYaxn@A8l6WOAUU^rIiG+;5l0bl$w}@cG@-dz;?fRBkTz z9N&g8WcRH`-~DfL|J3*_RONGje8keT$16&RjCcR1tNbQP|5v$SlY^u8lK!%tOc<+24hEIwKWRo^7Od0{w*i(>QkGG z6uII3IPJX3;pV#GP>J1 zOzumBT)S}B<6<8sV5y@}q1?5Pq+VAl_l+!&d84m^7gWVMMRE-%w?~8R1z`*6=3Z>B zqm(Yt$sWK3aeQ*+V;0_^DG`A=ww~NMe&UBN0Y*4{6B?Yd?liP6tnM@}6=D~d3x=?~-tKj68$MYmedUBi5&ZU}r zspb|>JTeikz;C-zhRrSMislxDb1BgUbF2z<#pag(aW0LtS;+)x9mI9Bx0XoS{kW!X z3t4xxW1F-DZlp{~Sm$xUD^q2pa&oJ+!2<6T$7X%kW9!T$`G^Tx`XK)oADjhCk=(iQ zZJ`5_JHNxt#lL<*azkPK%NgI4d(@S*Bd+n@gbj}m2>j2wE4r$b*s7cq?Q0a{-=M2 zzAh$6WpnR`HvOqcuF?>Ci*>yV-bTcG;}Z@ix5~{$f-jsVBLDt_5zP$&$pvg~Q8<@t z$E^FDI;?;6RYB@gf)BP~{quD2uc6a-%HDs|Ki!j|Qc#aXB~Ut-l>WJStQ+s!ipg!O z%zFz&)?jlB8cuj z{#l!)i#47$3u$tv>ib88^g|4oXQc|FW19$$TNucc&SkNUrbM`dapF6x#f7m!JFKV2 zwj9{cQrj_#1#f9`12)&me^|Fn-eTq+Rq8mOk0hByN#l577M|Ej42IU^jjvS#d`8ubH`J1PBhYc--w2~@W> zwebpa6{UY>52^p}DRQSq(v!VqH(0QL>ZS$W6FO%olWXC}aRVXcIKEk2)mMoSD**~}1GTw0&^sm)y-Z5~pmV7Td~Y!_&x}~D z)0(ub{6ip#Lrrcea)UvF6xTL?5cK6$l7S^G4!CY}K0-3^3mcx?nz?>C9X2*qKJ+-T zYzYHK#=J;9ajvQGplj3~oZM36&PhmaSn*=JBDogZf*H)?wdI5BS|Tqech;i*nM^15 z+x|pyIZdv@$t~8*=}p}TuFA3U$~kER1ZuG;9B~x|DVl8(-f(WHA}sEW1;n= z9ixK8P>qvYunxp5ozl77d;C~SZh^+jH-nptcn>ruudBWPcuwwYW6uq>Ns8v)w$m7e zKt+cKu{wk2Rht_Ab?EWze@9(8oG~E3$n5n~B=>y-TUkJn%azIfarS{{m{KLVTnDxa z#)C;~0wKMj=djVh;v*pqM)a+sxo!`$TsIG7B!lqVt|fG%$(?M0w4j>Bou3cwprE)ZA8W5B%n7({HhImv)H;e>UYquBBudW!loG+I-n4UhqIdA? zuMQ9NZyX<5G=v&UN$&jZoEl(qjcTr=$qngu7*ZvJ@W`E++A4=aOkavbF&>_Z|#57~GP$;7 z&;QXsT_IZLdJssF%dxqRQ7y8@Z+^ThG{0e}@~wP5|4Z)AoVtDQi@)V=VHWci4IVq>o;Ozxv|`M|*j zVjrw%pP?j|Q{;x&BM_n_Hz2uc)6bcD=Fp*>+`x(X?qQ6nNLoC)!l}7P+I>KuSMerxtM@O7a19t z+*jxydUG+{maS-RLH&c9i~9OCNp%EClN(HebjTdR7&yLgw@|f6*T0~P(my1DliT}h z%H}%LKX(c<53|L#i_Ti(^DN}>w3+pl*wP+;+;$8|R{p_ujR1qGbN!6CaF-GI@7e=X z)B0~{7|ucehY7ybf0fMW@cgasxK^{`SkthhflsVCE&I{<*xluE=yeV(k_F53!K*ksv>Hsx3SZ(N}Rw z{OH_>kP7QU67gSMsC4-!<4uZgtdW3Z8 zjxBYZMnE2f4Yr3}i@ZE`Z3@UZGT^%O4-MX%_kN!yj3EM>9f?A7_o&$0`{w5sn|mbI z(&UDggEoNP2wG+%qDpCUC++_fj%7;~@-@5Pcy- zfBbp_CugQKO3Es=8xvAZ>R7XNtwMLOq%n%g?c_fy=Rf^}liMu*#K|ouOAFJ?1Z=6~+F`A`RUo69>pF)X zS7o*=Qh^2YyTu@BxSVmEPgaL&A>Y2gYuDj3L<8R~88y<%Kh~JKr1Vs0wlBL~Mo(@B zuCd6g`KMEtEBzlNH|%!CLVG2-p|v8QII=q$CmZsU^Dw#JKXDbRNVwC6vrbT3SdHgN zAQvaMypS+}bp)OQarQzr9Z--vadP`u8%NBz-hq=_rYMtu{+>6yeSd9gxLKQs^5nQ< zb^sG>@X;`tC(?6R()D=il873_ z8AmDf&#fOid!AmJeEP0FmE7;?8*zNrnBnGLirl&SH&Ig&7mj0d3#ET>j@Jf+@KKf8 z(p#-4a@er)u@-)K;X>-eSACs^A0GW5oZMmu4hQESq>x=qy%ER_>az*BzVo|P6uJD> zOe=Vvotz+>OPHnj$`D39bQT=I6NgkL*G4SMwa|mAkey2d z*@k|2;*jCK+A795IGUa~7H7c_ktSub$)CRWZob!f`{|WLNY{Pr=)0AGRYZZ>%Xji; zwHf`lpE_pNJ*{0HD}$_0jV)!E-2bBuAE(ByY41{#DRO5sUcFs1JZ$5l=;pffaV_tL zc3+tHdb^Tb_5ClpK#{AI$^EbCXR?{3(*KzPrWm?P(lV81>0&obG6*hxroNV5PoWwC z9h+>u8G_5`|NM0+L!R!f%F@YcMwZ~UJuW^NlEa*LwzgdgCjp-qc)pt4FsNrr@%`H_ z(#2`^K`dstO>{Y0)~T={cu&Wfr86bQou3E)3TQF)OquioZ=9oV?{h$M)!S|O%yLbC zlZu;*NN%t}|0m_;&0+ns;hERZJtk?&=9b^Hb}=abXZ2I%hbZRK{eM^aRq#E&(Q!j-)a{Rr2Fvc#Q=9+$TYxhoynyFLafw3Is5xb#QTE@<(qR2 ztzh7XI)*e}BgWT}e@Mi_w{+`fgL;sz2ge(aF3tP>>6LJ($xxMM<2h>iu%-l`aGdZj zJ%e96Z~N*^PU}srI~XM&a$na2_dhM6$er9sR7$WL-}Y0t(UV)f(FgV(yJLDtp?}s{ z2Nj1=liOkc$M9a=b|ffLo*@0N|BE7U)&5@w#Zl`e>WpDqj{ zAX3%V0>6YTQ@n=-(&VZ&&wpD|=fgqg*+0z`vCCe{@H58`K|_=Wb#0Vgtc(2TpFm(N^S$^Ncd%Mvd1N3?Z_; zdM=)7dxxUXYqEx67$Ll49^)9;i;ppt5c%h)_rAP0GjvV=n0kjMS4GvWnMjbU9uNCJ z-(z!c?PVPTn_IBC?!l!$-ZAm(q?=pX>hJUc3xv5g zl1hMk%u+sppiXO4AW+S{8OBsUm; zeKJ`7#o;#7Y@GEiT%HjoYgqD49tlvm8_Qp98}p_3z(J3M?$kT~>;K4vBS2^DK>dII zkL9&-+SL4uPOeRKleRHPMRM)pj(WWD4sR}kh76_=BrbzZ#6#X7WMFcQI^^U%`jC^d z9K0gVDwAuAMpY#_)@O;GR>nfa&5b{?H1vEU9)-lR&>MbMW0(bG6g%c_EY;kincfMH zzUCfnFfbg~*i2d+u4fsjVVV0atJ_}5Rxbt)cdE9OA-RguKO0tzfoLmyY1sbF|G|da zizEn@%am*L81Kh7tl zM>4fd;Z~*E%Y&B_zYZ<&7Gx5|PNvP``A%C3SiqTNg5c*CuVmG#fy8;83jgQjpWoql z2O>1Cqi3kE&_8r?t!+9n?En1ox0Hb)JT^A>8dMS3@yc`*5_N3UI_CqixpxUQ|Ek>w z9Bp;*E!JBt%+?l|Eyb&M52?d|So*F+vcvGlX+?4;`ait)`+eot6Yw3|B_T`#yGAf8 zXXuBRvhvM5eDR_a`LfNHKdX67NYVd!jpX1(1O1X&f`u%gwQ#o2BG4}Tsd*#cxW zQytAsAc#?HpvhI3T}7+qsq1Oa>H!Co8!mx{0ekJ~BQo!NM<}XKtS=qlT*|j@UB*s^5Eo_@%S|D?pOFf3|Gk(H6&LFV@syN8sg%*lKYDH zkxK#ZqQbf%d5SZgI)WgO`j?4Bh#APUm(^u1#HwM`3%Mw9`GUOV-Eux*gxXlM1Ls&N4c=HaP0RmA3wnv?;^n&e>}im|UahUr*g~+0^Eu zmf1iSj|=5qYlrH$3mmd4@tH|5)EySYKRT1$u-()35YX{EzC$1<0g70iips~BpH#v+s5-)@rWpa(sK&2?9HW!DUj1gYfab39- zdcAWH*`cG=O`cQP%E074QC`06w8rI(IgrZcZoF1L!QCWJ(4M1FmeHA1`gOEJi#|2~ z`sJqYb^TgVscWB3(srHEi33pa$?tUNPR6QqR>^!%%D`=yNi|D^TnC^r}VJ^sX~ z6*kvV^RK^ILRXMta_!wy{f+p=FHGv@c^9ZY{y=y$n3_5Qh|k1ZsGe&CZU7)jS3fK+ zuEf%ht84FCjj}e2&E%k2^rD`r%EQGMkWMS)Qo{`vN-*k&7X;_EGN>P3IsekbA0A&` znu<(#?jXRRJt}B{VTC0V%+|^@2*HE~L+4n3kT1}MT>%284GG5lbH~{U1jUh>AthN^EnJ+kB zEv;kc>gp~`4Xi_ykd_Iy^GcDJ!_yPoLlM|?#@s8CeL|(C3#nE2a+2)XN?N3W&Kmpab5rLf>QVE z_X2nV;pW=Z5;P+U9CdK6a+V@@MqP28zG2pZOaJMg6*EDs++0NcGw*aJKIyHiF`ALZ z<+xT>cUxZD${TNA`KaUtJ6AA1tBb?f61dFDKW~do^p^A5mTvem=j5X*wpvjX2abCu z0d=XjUjBZQDs`#zJ@?V%PNqPz1qT)Q6!Xs70ys`w(v8h6q=HHE7Gp$VWlE0X*4{LAY!7)(EZoN8_{+Dc|KNag(N zT`G?L!X`lD202LH}k9&{t zklYDcrJPKH%*1}J?s%WZr&l^#TheyVy8PRQH;YPL>OQ@^sW)nNZJzuXeye5f8AGjo z?Z=9AasD-3d{- zAD3*`85?CiCv+Na3%F$ri5NZ0@ar+2j(5oyH~%NOUv(NJi~jmdk;|VCi8`1->7UV& z!{i42Na~8Zb0i?Z+Yf&;OfCm@sRm6h?|H4+EUECte|1c~yEtCucDV-hFq`rHOsJ+| z65rQpkFpf>?5}oPHJ~7|3ELN!^~}Rq^Y-A>FE6R)7N0etcRRQ@#!o+lzL11!UbR6RW<>tnSiCF!0x~5ezO$R)ts^bp^RB|7idyNhWcQ9Bg>;D}KKax!MSbQ`HRWuB|nhR{j z_zxG?`Rr>W0P{adEr|1b6E&8ee+BnGxso`!vRl5lvug%LuA2R<;_7w%-xuJ|^RaK6 z&iJUnod-Utig!CYnmFQHFMdDkhwz&1oUz6|NI7Gebd5ymE{wYztU407u zb7J1K+9@a0sYe-z|~uYD~J5E8T_leya<_N?_bd{aVl zIX(Ykh;1(>IR)()HA9?a2RhaSBmB3wQzlsZW?D0$di}PxCKbT^Pvvs-=F{X|1iiU< z(7NXzbnBmaZKCI2BTrJ_Q^bP`-FKV$AKC%NME&{d#NlJ|+hrHm zUy+^oGz#ZmQQh~ie;QRKa^Fxpyt#Ns(ihDnW3Ss4Rkel?1VfqsaRGf_7jRXO%fA~P z2!d~$i<4I~*ePF!o*LtIG)yj6VeJ`V3#M*6UI+eQ9aLd`>GRv5-$omPpeD(18S0w+__>R0rcjgvEHifdT>jT+(-ZC!V(#Q{oLwCzI?Ghk{i+nhu9{r-2}0f zts&f3jy5@4EC{Ob4Z=&yF6uVlACas3J(yL8W=EdPcnAA%{xy-*c=!1!HryPnaZr%U z@qv=@D)n*2lAHQ3W|yw&+p#ctoqcRH@(i+W>c5HV>gvOD%FJ!^n^vN@^QMW{lcD{c zO`5#@ZX2GodM`O`b~*VtW%b&$=nqA`f?Epzhi6&C5Lq(TXY2S=;k`*=3=efT_7TnHk9Ga`NRYycM^{tZF+alE1XL1j~z*BrhW>JjmHUJ z8J*mpLEXBRwj4;Sh`m{MVa;E&GCv4?QTcKaANtO@tqZ?rI{)*Ti=X4TcY`m& zXQm|9x-UNaat7vqE{f+9q$6H_e9ZqOeVCHk{~0H1Ti3k|mpjVaRq}8yc<{=T>z`g0 znRv$a%KFaIn~U>tbCLFc5;|2NbL&_c@Y_pUien`nL9^#Roi+YMW_t!|wmcXRAqeUDN9{JiLr;gf$7=M|&+`ou{y zr1cNpu`<`&&bFDA=IYI8-*Wv2hw=$Ax9elFdDZOU$b*BR=5f4;x}{Bw8DIp=j==Xzb&^Z9sU^?i_OBpb+8YyBV8 zxtM1GO*9fX7g2Hp=$`{7`A#~lldA@@mgFaUbVU$I?ydUHq)VEyOfAougr~ZeT!-Gg zRsA1Hth{2zQPo=TR2z54i^Eg3lV*T34f&ocI2SEaRgN!q&4zQsAk6P$+qscOn*O;A zQ@yVL`O-N`hlMH-uowSJZg76`o@WO6JO85ff4(;?MVj=y6hbJ!pu}l}zA!@i{EYhp zmX=(%JOq1TytYA}`M)JFfdB?CtzH@wjN zul||tWpW>F?UTYFqiqF6^VNMdA%GLwqU?l#$PQXv>*R(H4^P1K;&T<^~gxUPx|vtGW7t9+9}b zGwAlr;6ncbs{hGSctp?ImeNVZ&-9M<=fmoHe4_EaRK(jjAf+8y=>JRZ^d`*1=N&bQ67sj;@$h5+?IX~66$(95i>Tm{R+ZLbdSJmJeLdl)${huN#v>Je1wVvF< zj&&@S#OTw=oeb4yvN(yqR8jr&;M=p*^3PE`uX}bmWM~n6Y5E_Vq|Z4_KP{{cTeZ79 z+g%&6IY!~u3%tXTbO`SQg72chdi|q)J}zAsUw+|t!*4d^Z*t3*$HEdGt*!W(Ep5LO8=6}-=0XIj|5@%{M;P(U*{s4+#61tesj46cU~{bI7MH9I1Fe`0 z=^WL3btf}Q?o{-eICnx?5GFs}#ER%yb}5frT*L)dwNV>on=?ycd zs-9Eo{AE=Ce0XSO>CR;A=SMDkn|CTbY)>gW{q;HV>jo-!CeH6id-J)G*keHMMD;)5 zTqLO@zB!?cq&NR1*U`x3PloIG>f<+KB*3}I=^F2eNR*9tj?7mK%su2If&RI}`ZqT$ zY|Yi5=P0t}Q@BWNyS$IlT$#A0bWP^G@R%E04;3G%T9pY*!F9hl_jbm}64F5|2|MC+ zp)WnVTJdR4mb^R~m0x!&=z1D+rS7*7eUf z?cMg!F|t7a=N#yN)EK)3vXKZOUWBm9FuvWG12MU}VV{q8)>X*e2K}?(Nd^LyY6&mm zc-9Nt(U+IP1)9^!JZ|w24ZyjGHA__`ju~G)RBC;b49P8Xu}Pe+Ncd*w{zj*oa88lT zIh*O1xT~8qFU;A@z<6^Ds(+pxZo2rV9%wo(@LT_pJNF%K4WS1S?f)jXfd1KklFs}J zn%uH5BV(Sl1;c}qtJ-7 z*r}gZ{{*jwvF%fIzyrDS=dYe!M2aFYdYxSU$#;N>69xh3{EL>{8!29$UYEXvVIv5h2l%TU<7w#Le&bOe@0ZJcrm2Fi@ziW`;2~bL$6=SoJOv?EHd(IW%d(e z6Bd%|MyO|tua|jHk^AAfp(C+0{f`a{oj$7y0=L8PKXUo;x}AjHiG>+PU}&z98wwgz zwh9hb=YZOoMsE0@9JqJf4cM6pV0QnG(=KZk)Z)wSO6;Nj+O-}jF4#j4wa3lYc zTe2?n88U;y0Zs1xtN#IV=TEvlOb~WbIq!o($(`2dL<{O4?ly$<`-jYMm(uTtVkBDq zlk&KLm0#X6Njc{+BrBujhDd>iLu#kmB#2!tRiOXDGjJ`RrLTS!x}S_D*Yz1iaVZt# zM%`({t|iyC<%wzrQR+8d#z6mP^S%dA|HxX1YTE)MyJNlogC^Gjx#8Cq^gs^e2B}S? z+A+}oB*SFw!<~JMF2+djmk+f%foZF4^tPz}*}hmIpxq5>E%P*;v6(lh{+Zi3#@!2E zet%m3r^FdS96RBxUyEJS{{+lb1TW7+DgA~7W1hf2!GKHX=Z77=6zykbUf%_Ia5u_+_o%0_TLBJ>W8W2zkLvloe=5tpq}ADXG5X zazu*i(NvkPnIU$4`06OzwHsStmGPi|{>)=nV)1nJ4x%Uh*CCP-UCf3Le=GRA7 zA63qcvHvBP%VqAk^mu*#g_5h-^yzsx|EqtB?C-<;%dv==4Spf$pG)xE3bw7;24O68 zA0HpJK%eT_k-DfpBMuJa(j1-E+~lMl5n(aQywmxai_6};u*PMyBE^=ul_;x&nzMWPSCU_@kt}J9TF$b5BjP{ zbfEuH{jn0N+Hy>Ad+@!5D>(?sHb?G(GI}1t!jkk@xU3)`Eo-xSa3y7&rhgWrbMs15 zKb0Xt+!3p>^$-v8<>?CDn*Ldmck%K+{quo`K;ZdH?!02hgPFmQ{*gP;`B&ho{`QL1kD4lWJQll;b< zG8F=5WS2I`SyxLW%H4m*A3$xOTKtwp^*>dE?_|8vmOWBgh!x=fg4<$f3HqOE>}5ty zG2>L*K9pP~oJ6PJZ%j+>O|=`K-=q&^`2W;DD7n*wO+VL|=KpH9Ox#`sZ8fNWO8RYB zq5h%Ms*b}e5@+}Q<<{to@iX|BMV~;{Y+7Pdg$YJ$MbEf<>aA)YHnm(4TghRIgBL-{9pWdxMeQQ zU(|M6W%w;zW=Z0g`?dOwI`XTLMs8S^vX#iRY`+J;kTWR<^v~K12m($pgiIrs)5r}R zn-itilY3G94~s3&O$@-&*OTl1ksJI*ZTR<-B}~%B@h{D(@6{2B^NRaKj;1OvFpTra zE1H)Q43a!y_&2hevm!sq{uL!RtQbahlzO%o!uof^eaFF*kH+-YrTj-Oza}ovgmL;u zZrD!Rs7*px*8N}qk*nNS+%$gTMpm0Ge%l;_ki1}e=2_$D8=mnzUe5gt967e_X$oD5 zVBCY-rsutvzK>jAzf&RG?t$(r%B2BZM7$fM9)oLLnX#eM)6(QlOJbyked3@#E z$gixO)*af>h%%u(8fTNF)1|?=H$~3+IU$6hf%W><+pBKWlyQr7|JR-`rM4Z0l8cXF zR;r?ekmgm_l6yx!RecW%Q~IZW#+?g-G&xlN!+nOZP-DVIibaGx{*kNx$NzpdeB<^` zfXYZS(SxK7lIw1FRAV;dFSD%}j&@h&Ev{CCQe-=y3L~ItM&~nf+4_=n=)=TC=6k^{ zVr~9)OBj7a zpN7XE5{EdzjEefdE=~q4|LQt7_XbUFxxKpeI~=giLI1~s*>|9kmfTzQ>%>(Erw4K; zP8S?uV;uuGrRAUmrupB+5grL+64LvB^v_{va_{I=P7M!yspFjr)5`QDp;6aA-N1=R z75FT;%nV{dMEmaC+cf1ddK*ycn$33S@xvQAFW|Cvi> zfwy=T?+1SlJeNh(9U_BzoD1i3`0{21wTdl@|3eP z2S=-asJcYsoI)%9>zM4DeDLq6`13f|gl0{woE!aR0sWu7D^H4{hj$f{dneE@9L@i= zquoZB3E`R&lEUZ%xe7y?Kg&OoqC@DpqJlw_dpCsnf&K^TAJ9La|JVOHlvl?lu5Nx{ z`*G9SWZdz0*Iu4AYQUDAs^UqT45P>@m{IZA1 zYSarHdxW2zN+UY}xeEMWsnf2Ba;65QfK?-?pybYuV#><7TjQwyN2%cPt%f%2w-*6( z!(Q-(5kc#pAkiJuyUu`SPuUgL3ren%7_#^?;SUpzP`Q>A5tbwVK&~RuOYQ>k(!b=o zb#m1YZj7zv#*^+&)$`8HX&blU(fL=Kv-CykRPyV&Gd(F+MXQcHnwA~hAZPWQup@eg zJ>Lri2n%%E+xh zPHF?96@==a*OTi|a+Uf{1%vy->ME@b@TnW1>7SqI7TMq{E>Xw#EneOU3kQEGI3RbB zH5FGI8@$K$4(!SJw4G;y6!>p@e=7+$q{yz3J3oo-wgeJf0bxgrXBzs|0RNYGP@XA% z9QD5gxf}{!WZnP8Ry!cq%mn0eT|fVU#+02NTgE+KyK|@S!t4BUw##Qm5|J%~fUsI4 zm;WPI)dui~ZWP~FY%uFA*N8>QRqB_1eb>7r+}b@aza&UATW{6r6$+}nf8sC7bk4m4nEqEy2ZnZ_&%Eu+Xff5}yP zWY~#WwypW!OHXK@Ie_TjIt1cOJh>^P!<0){Be1`|Vs;2-L2@wyub^Q9!ea-~F+_ij z-1#@T1^@OK(NGX4WVggdZKaG?n~yLkxc|>~r9MG0{VJYXtf~+0|yb zsS1EDhWQr$fOjpHrcnlL3`0_%b_`8&iI60d#BJ1QT z)c-{zSK%uq;YT~>U5>U+B}<^mEypi{HD>R#t({N3KJnay#mPQ`JDjSfG6b!m_CfRuzj+dEOW)nUw>ka9Bz%zYRl@CI_YZ5HP9x{9`QJ5!6{4W!kc7Kf z)f{_~Sn-==S2VeI?q^iK zb2=qlXF&=2<#tu%#_Mu>l5`AA%sX<7@=M{PGi-tSzxD!z7{;5SELB2u;hcvx?Gwpn zsQ=3(dLJQbijNL{y@b(aH3~~osaLzQL5%?V>r>a~U+G#^M=;fU8&b0LL)kV7m$ry( zfce)xMHtm+dGu5|zb@ny*Eszr+fy3k%Dc{!o1BsCP}n-PDf&n?38n`94bcC2@IU;Y zv(lZ}rp;;Guq0{Z&f{;Rc!)PGyTYQO{yFh0H$0k8&PLH$=}k;DOu_B%t|rEOuljVG zT%WmBBu@VHx916llmbX@sSXXs$QFiSIbb6{_`Y%>#ptG?Rr&@tJ}Ju_L*B)jHT`oe zRTGn~6NQ81UVKZ?%XAfHq&|xqogCq3CK8@R#)*&!f2s`tL7uL0_D?6w)vdT0z-9%i zKL#!);g}Mnb>06ibFh6axrJi(T3Ah7lH>EdJ>YGOa-eSM>L{|&!H02ey2J=~6Ud!b zU&77#Wi2zaFQJ;Udj}sCE`ItA^GWc(TRE`73d)w^U2LZ%*X>BY?JUOt^HOyFh2>T7 zM2lH_AwGCVP;&X_>47>q#FA<14Nd=i;^S-tD7|wq1j$_^%_5o}U#6m^nVU1Ty?Q5Z zheotyYJ1d~Ir$*fXGC}F9N@W3N3Hf91ajw^|Gllirs&wF1@TkIAUI?C&CaaHG|hObpl2VtzJ`L{*|6JIPC`=p;jQ5v#Dcx6-YQ* za!cu$sHTxHQp|=xjdbWocGqkKXGV!fT~Aksdkc2w*)N@4p-k+ga%2mI=W=K`59|M# ze*w9(zgE^o;eezR6mPWT-iovNr@pEY$0io)l5x4auLA}T-UzCBCTXFcMlN^bS4{q? zvDqe)JoW46(i~PiCqMaMh(SXNN-p1jtR?yG{`TE44lrv4fYsBY{<+D(JGg#E6_?H- z0@n00x#SG>4FhQ&A-AIQub^c@eH79wF2N#lku0!9XoOQl)B7{nJ%$^xI{svbWtS5z znLGDOX{ClpMMj8jQ$v*3`9JIa_wAl^Q_k+8+~WJm6!3pR{S)=T_XT`(+VeFm*XLh{ zGfyv^6;x8Zc$MtuMH#a|{qMr<#)=}hPv=V!eNq3rQjVe5i(#mUX#5|MmVHjIF7K*P zJw3kH;C_jGTi z^EnZ&teqZz2-7PCoFW=$$wv$l@*){GD1zDk|MGvnR_xR(=_3hk$JeJ&FORfhf&cSN zk0RSq=7k=0&QK$ga5Qq~R4kCoSvAZH;_!Ef%kwMgbF!%aU0Qf1X{$*2=>#s~0*fk` zgWMZx70`!)CilATKp)1#BVDCC8j`JVJzQN_h9-}#>BAUd;+xUf_5!>m;?GuHmqO$J zTsqI4p^KlHH=vJfNqKaMi#Lhh`n_{P2YH z>pEaed;BSZ>#<1R6$pr{*X}I>Ok2)6=p*{H?#h( z<;Qjj#2i*I!dC*hTrd$#Y^t>R7wZ4gFm#*D(gh{^x0gE~wqa5I&&rQzfz4;~VSh?I z@Tea=GS$WQ^zblI{qyMnxQw_X6~ez8l2K|_!r9+3NlN=;$(2FPqk_`LsXQu|-i$VS z_rUW(xX@kp4J7ww>Qsp729ODm(9CrU_u)Q#!y{~fHu|Nq53UFB98HtXPex)`MGbWj zlK9``UR+fq4?;_BStoa*$-M^f)d2nTG<%U(HU|;q|Liu57bCK+>wkiFfR|hM?D*75 z?8c6sBAmvVe(-;7=iTh{r8GH);s*L>Ipkh8^b(VBXmZP zT~D@258e~(-B?5PT- z)K`3ci>jb{zU^C-qLDkRc=${j4nIAez(S11WwA%Gx)Bhx*81E3=~dd#%qvDfpUVCR zN3OzhYs1EF*!a@L^(vKL%JiWSk$1;TEGr_90WDcLP5)Epx8vQ*?qif3I+z32__-#iUuxrV6n|6xrA}mfU zs0J;I>3Q{(w#~ZRWN3P|mUzw&rJ*||eKoLv4=#tdDVf49;6jOMOKD84N=2$~ZjZvP@qg4jb-lYg7itz8Gj{lN z&j-9NY!=fc3{QBzj(|VB#!H-^?B=}2WpV&)_kZ<2y5EfAK*Hj|>_mDK4@r7!`k#f_ zKG}NU2fbY4)RjY0ZK`xhY_7HUSTH0ebyBd0DEPme82IsMaxX2pMdW>+R@|{&fpb%1 zU!TQ&*NgvE-}ZIR2HvR6N}!g95VuQ%mG5SZM;lT8;keA;X80g|Q6ow&zNrlnh5=*z z%o}w6b*o`%fAV*zf1a=%QZxUUYw}T_D7l1Q^POWV?K3fL*m7UEX4_}mqHyc{pYOWf z%bg4M^RXhDfxo)@W+R&6gwmaDFdJbdvg7Pjeb?R-amk{FVu%DSxfk?5Ov=($DF1MK zeF$AOs(*&$x^xVake;d#fSIVkta9~0oO-%PeP^{ocNMjNs48s5J2#;j4v6n{f? z1-g2j#rQ(n{OjZ0bIY{dK1JtDZ)M+;6WpR2mwGFqD&5bf_r{FBK7rVl{GSBtdd- zEU8c0h=qaA*eUx_jR6xqjsHUj9|@>`*7!e5s|k0ltvBfg4w8xd7hx|YQpa-^U3!>U zwq;HHJO@59vO|J7@$SR>4!Jn+e@#}2OXQg4S0=Q}#k8V#p%uLgsQ=wDNox{c7l_xo zK&j_VEVYf&vbxSR#-6wTio;8n{2{?S+;-0(X#1S77Qy?Y{&%Q<&M%f5TnA~WCSktP zbKotIcs#yzdUixuIcj*2yA7U1wq$B?{Wt%jE4B?|L@LfA^lBm zNh@VxLmcip?vu=C<`&$f2!_9G3_XJOe_}U~7&da5#^?A(P%R}iU25ii20}TwLH{$e zHAE78c?bd8+x_6-R);aAk5ed`N$|aVMi%Y=!2FBj()X&ie2aAJ4^aOA_!5anFaA&; zdhxQFIc9m}g&RX-v1oD&x1K{ox`tPmN0h`qXf$lb-vR#N%S(AS#kRynI$aPsr4~9b zURFp_hxHFTIc@!qf2F#h$t@cH^v|4PGD6H&qG*}WH49WoEsxgfn*d`H(IB*zU|7$# zq=3ZK4CLkO{hzMKM%>_wG_$4EKO8VCv(eyvyfA?43 zb!m3^9oU0rDjc#ziJqOKjl<3R5yFibxE+qPwP5ml-0U&=rI_GYBg+42(RE-uve`b{ zJYErmf=O%j4w>X7o~6agn$*=z%) zy&)kfdSE#~B!}3$9=o=D07cPO+WhP4QbA%K4($Z>`%uFT++K~rJ?WbEvS!IU5CG%>{}7#jmF2*E##ftROz`KI(v8G-_m|+` zAqS_U^$(3){xGiyg|v~I3{Yd3E}qXQ>KVv4`Fc0W6l%SS}#U*B+%W^S<Bl<{dm@?ZUPmC$7wtF&B`Omn;O_V9#koX>zc2K9gO;Q#Uu1D2~pq(@>QxsG4S zkw8xxc#HUfM-0{f!2Ih$HCzYxmMzHCl#>B7%tMNvPC2o#Z#e$3-nNgH+=6lXjP#ON zHo|+oDEZoo1F6TD@nF#U=lv-MrGYmbYAPp(C7f-Cbz6f{_ zn0eQu$t|e=YXC?lXmTCWJ@3bjS9JVC$yscN`X@5eH9Sv2sU6k-oV;SJ$W(q>)-q~l z_z?`h4_SD!$XYSH?~&y%sU_-2m5z-~@)K0?wf2{3;m`(ggI z65d)7NS#PfiUj@-+2J4mur0>|WBW~;WHUGQ$i>YDY|JqK5`Spsphfv^x)J(6D@%Xk z9=}0S?i+J@!0QR2yS?dq5kpga2M4$N1m$W19cWHxlWscVS|@e*=yX-m=~0!qF_CuBi%z3 zDXDg@ET6?(BkB<+G`R(FyodCLa$MmP(tu_hg5gl%L;dg19&(@j2;VFo={hEyEMYzy z1J0%T#3~2vXK~^$y?-r2e_@Or^v}@$@mho<2Gt$}E6s}?y9mVxzIDJL4dovOC&(y? z`W#Fk^WeOfEu`$#^#cxLj6`5`$~DX^AIuWob4%;@eX`+FT6+M z&ahLU+Z*`bC&d}LC3Hfj>aI#r7=i!Hx=`LmFHhtDIOhAG0O_sBLkNoFb*+yVJFwRO zL0H^HH}}K*3sF2<>;wuJ#tq=K1OJykBa@V3v5;L3Jv}{=3iGed+o7DQj;C3sUo*#v ztJk+8DF3iTsL56dgXl*q?#q3pJm5wT^N7)pD{62a3Hmkq^XtX;ZP-^d{^5K{j_&|E z|5_G$EP!0!ym!@)TU_bvJSAb)c+>*rABxOSwjL=zlSwYUu#(1pTUE01gK~n|UN|fp zN7EHqbzo`9EueqS_sk(nq%n4M+W*1$@wDd|f@>IYP{@^QP8Czt zLHUPn<_w!;rg|k&rh?ijbALpl=slB?a#(gBDA$#h-pBj$*X@Rn7o^Lu0sn_Xsmb<< zV}xl0pG&QO^t8M{F3z=4!!R70G-qKj@#5k&YAfphQd`&m%*hEe;->O&(Er(XnvT@} z=87G}gG0ef9ITBNG1=3W0`E$wLz!(prc4huUauOcixbxjK<8iI zcZutId@c=m0{!xprQ?H$0;U?8I3x)qo(EwE*18Vx4|AC$O%9;j0R~~+9(KhpZy4gS zWvw!GHuptx@aniu`6K#;mK;W|a9lh`e^Xqo20n0gR@wTl9$H!wIdo|Kqp{WKeuU5| z>c>0xqNXM2t_s_70srSu;|{RXo!?tD`F+>c!=V3J3Xk6j-9^wpqw}xmx>xiZ2R>qJ z!dN#iCTdN?j964JB4hl!Yr|me(G8lX;200dEr0HLIB2zh*C86*5-yXn(J+=>4tF;&kp|4M`4X+HyKTO{lv^2cqEZ8S+ zY;z(b)@74fFU;{=(&+rFvhJFQ{GwEVgVw<$veW8WdIbDmnEd{thnc&A7HxTFU@4V< z%!6*4R3KEzH^||QTg}^2de-_s^HXMDs;|x%vmLo@jL(GR7NYj~(Wt_3Lk7=7#YT8? zFv{A3DO~|!&%+?%qTN&vICi)*i*bJ@2U={HuZd~}B0&qF9W)4KJVVlYJ z%oj8RUtZZIKih~(kHm>>9)-RQP;$kzVe8aq}pELX>Qe9ZK))H~jZ@|8eRQj+~WP^Pfy!?L0ml|V;!n4W*>*-jJUdwM=A}`cG zW9N(rZ8w^0!TM99q6fKvtL zO-Jf&ouv*=NtuVGjK;o+724kK8(PPC3y0T7g!UxnhfALPTzY%9prCQrE*%*JkU`86 zqG}$H1xk;wZ_+`8v`YdPACH+dF!K90r>t# z8#ToY=sfCdUaX<%pINT5VXeD0&BJK=XUW%QUn&D;lC0A6aU0dWf&ZLmY6&x`epk*8 zY!dzfc-z|xSUfsM2{{Kl3ayL=dVhv#j??^KtAnu5rR(fRwzz$9FiueO47(Bq`yBQj z-brAyY7HzYEz%%DClk&FpZ${598C3LM3CfPoc#*Go|~=>9%#*ridQkb)*GK=S{I>W z;_ff>8a@qbMwd98CG&POZ!~JZ7x0A1I!)cRu3GX*gk`#*Yh8U*UmfQaK`A6k(rEZ? zn6h&%a>qS_LtRygP_1m#OpUy$r`~kni7DqHPF^KB$Wnq9;>2gxkS~tj->WWVfo7z` z7(|grp6b}O*Y@ZI-L|b5cxD3&5A}aN3>MY*r(>=W`8^aX@7uS9NE41l@yYhg&&Ynz zyVT#gavk-*$0e9XEA99pkb^_(AD6LoxWA^v+n%#9|B5yb;TPNts}vtWOYXJLy;&Hp z4Kv~#oINzZoPM7S<@1{&jE#f;9hczxDs*lLO|EOJ^UZt9Da;T4_hjV(46@9;iyS;d2LBf&R?6l@(@lQO#twA;r6??GO{?PkA-Qg1zhQ1=Ld@Xi!x~ojFVENb z&p3Z&EB5er!hrRJw-?F2!i|Js2s_sK&+j{sRe@q!h9{_cEz$;Fuhi|exT@b<9`?*WbU>bs7aL1?

    884ZkRL#v=Y9HXUL#Vkf&Ywb6i}N=z*JMlHi8W~ zo|ar!mJXXpr@C#oegNHXla-XMdooB1a9{Z4?{$t1Xil<912_;#TS*KO%B5O&#V7l6Q<}PRPyR-Xlg7_4Wt>wi{DpNGC@$i2BqcdGA^#%wAjWA2 z>VMzP{EAUP-Z-t?P1SXj{USdji2S93*!dNE%?j?kwfSBd{D-pg_2M?v8VSzr++e-n z)!WF-k|vM%uE52`mA%DhdMX@jA0L4tvJU27KUVgh0RE@m`4>&$8E$*pvEcu*PIJ39 zv;{tOwMiSdCv5T6bhDG9gr7jsH!%%`TkSXdWU>(LX#Ou7m4l58p2Eer{Z*}xOq(Oc zc@b{{?@pBee4;I;fd0gLtQL(CMA{akcG|uEfHbJoK>xh-Z3@wM_K)PWSs# zUgp_=xC$hwW>_Xo%)ts7rfxY`C4IX#@IaTK<<&d;X?=wXGc~MFA%}>uqx}AWQ$t zKRh^A9(|n|hP+UCAldp%>1xuKO2)G3(EP8T8+iK>M#P^)zZ;`pUV`?2q}@a4(YGds zW9(Jl+jvO@R}M^d#gma1#L4e~K?KuARcEAA;z=&U75F06dzTp0#PK0#fAloJnENGy zCwn|+ifK(z+D|5c81Y@o$k9|SOv+F8R5|naW3XR-kOww-G*w(Lt~F&A_=kEIA3#f= z+W)qw2+|Y+sWU%DF2a2w-L@SI9BP0?b(OrORP7X2JL<_6-bMR17ZZ~wOHxf$SI-v8dU)htYcGzyQe=5hwq^QZRTm4RJ z$`#@5_Gz--H}3g7mLYCNlY0{rN#Tp8QmJ3p0ozf&ZOuwAv6uLh~;^C>b%5 zfd8!5t8yIWKVx6B7!U6jQVr>Dm5`Qe+Ncs#dm5?%1E{IF5C!P}MDtL=JLvuMxjOVo z-0=%8OO4>nc}`epf3t^wZ_4d#F1pZ+aIcHy4>+~5DR*4)OJ!?>oTH}!#$LnM@zt64l_k8+X z!`XMkN9QW&pO^Xeq(3z)sP?G7C?Jp%a`jM-A~2l&6vFKyWNRnVv&DG9_rWKi{X ztVRSBY=y*~U27HWq#_uQSWFU?0XBdBPkh3XG zsOcy`(FGow=HalNIN*hI&h8&u%m0$MF6O&)Zf*($Q_Dc3pBw+MqzU32{J7)h#hFdZ zbH;aD3ydNk2o~UebOpm#=g9KW8!-REXYmvq>~np`#_R*z3In^a&b+L>hF3JS$tg7b zkCJ1p>sJ0^54`qTa&Lmnt~%fAC*t5P9D14rTHtsU&loL)v|Zks`E(sJzp$N|lw%w# zyo<&^+#L+9(Hactw!`h!`5eSZNI&ExGS8-Q@4zD=!+Y6wOkf04TFd{s4+0hP=Q8$) zJs>Asw708a$tVVSyB20v*0k6& zec>Z6aOpz+*FZr?gDdQpH2yQ>Uo8K=v`lClfWgMf()1{1G=d{eWtSK*VR`zZ)ONs3 ztJ;Nu4MFQ4@P9%61;hy&4qst`{!dyG38PQH(G^KZK=e`n7e*^evaU(|WNT&TRKFV? zn%v8mTr<>YUx*$PXG|1R<)~JrZqgL+Mk<2-XT|CU0A^#$sGk^{2?NAO4 z4;ueR%%<1aq`_3N4GZ(H?{&oV4mGoOLm~$9zkK>Z|HIr9*@D#th;WG5H%?Iv&-yEU z7|6f)$*iVT4Y++S;1Qwu7cl?&`Rz#yEKwE&l6$N0HJmqDk!5q&rXBtls(*fX490ljF%hHeee3?OX{;`R+}btkraFYX)LkBu*ToElvu-HUeQ2&o zimWB~{`mL+|N6o=XRB;+#Cw1#+bNt5VKxto;$>kX_1DGG^GKS~k%n26!E2BD-w|(6 zBD?LJ`*k=?4Llg^sq&V#A%PUsp-J<9Q9Dv_zbMe#{M7@kedJ&M7ngwAnEn0p+ySq0 zDdAFYjEnpWEF!(Bj=4B-Jw;)+_ubph-4`I*RoEBE9PBfF$EG=M0je@zIjI7xM1?bH zWSalIEhoY-99^#e=3gIhmnhwR9U~U|A0C};GN3=fouVa;)=$Fq?Da1up8vp0M&EwKrI}7&2ki)Q4 z6MLn@KU$wXhNkusF44WNLUR}3i8+IUo(wB)as@z7(Q{g(jl}?Oz&m3U_kUSO->j@pG$N{ zqApThdb?wde<)Z@FQzK9Ofq8P;y|2UU*1=O0WgiEq1pjnIuh#tsyXUB+`@+V&zg5B z4K<`xf&bl`_R&cw4LLk9w)Q=`X<0EdXfq2P>p9Q%%HZED>WOn_pa((YKWm@QJ@oS9 zyBpsc9q%8yFGK8VKHPr$*pH9DFPn_TPUB*>6&%idkerWe&Ds0K%{!&yXcF1u`-7ti zu#io|sc_AekbhwkFe|{CQG}!Af8lj=JyZ4fEUoo_7`WE>he4Dk-@K7V;2$njhQI_i zT~$IK$~XzA*0*1h8LhsIssw2McbI=6eDypb&du@4Fmmr*_rKF++eI+3@XMVhIDhE` z{SW@Bf~TQ^J;-EAL+DS80x!yjd`0%~pV&Ny#no>w=32ffoSR>*I2FeBz3LpQe}?+! zm|@YOS5lVcAD^F}5cs6+1{+pAUbJky?EP6KbG%w`j)T&-6UUNRh>|NNZHnifT)6oJ zWJsqz<5}MEX_`C2E&%zb%}YGDPbZiMT&K|dU+>drcpKO4nijbGc$u?V|)f82I1Pk^X$ zi4rS$uuq4$BsXRO{KKM?GjirhnpPz~m!+s$4Hr3s6dRe}GrWiJ$nir3XG@h87jMsF{A z5B{&7q|gY?tX=q-`Ytd<3GYDCG^`*F6Rm$Pm%FUxe|?#!)42DU=KqS-xc{ZhGfN?7 z#)K+RZj3h%nU#s&cU|r<8K(Ix$uEN!!Kfz>4EcaU>-&!>Ll%m}MSJ3ow`ZUzpy_|o zo=6|AGN;l-2&X>WdqaS@AT{c6A3yQ=xAqVy*-T0<-CrWY7y}n}s9+%ZT1o+~>1NiK zvQsxI_C=6^{}XPnbuhE$ohn5Z>YskufZ0LTjC>rLf5BZvRPc;6+_Q8U^?%JjBw*J4 zU&8qVBaPsH$373~aDSh&^U{}zg+!GB7(gKuCyc%fNklV0V{pdg|N6haOw@z{upm*5 zxInkf-t7!NFlu zA_kl=^1iUU!m$eSzwTLh#wOkGT&sVMrgxNDrWQ&r@FpWNX3ANCDQd<-8Ycg@cEW(Kf~J423@~A{*|s0` ze+d-J9w_s{uy5cvaM$7*%l*fU9G`KAQ=lm5Jh|!9dH7@DYz+3{p)qUT#oRxQo6p^M zzq!){)jx;9rNYR(0M$RQiU4E79?IK2F#j5;i)OVNfHp}H?gs8SnIrm8*-01gBYpNG z<{P%1Xd>(Z{qw00=K);_^RJ({bKR8z^dp?2JMiP(FxR?19kp3f6=cVJ>rNc@m;X2A!t^i{z*0et{2Mm1Lgmq z0x5~hQ9eT!CWd#N+``JnshtI;VRA<=Gg_vWI6ZR|Oe-D7Dmzu3Jh*5$>hSRV#K%`s z6qEP@ze@Ll!v&)jZMdSQX6O;ML;X{wkSKVqUGr!(8T8NUr=+|JbV5M4OzZ#L7*hKg zUkYY{+T@*@+r%u}?j+br)NrT?sWl~ihTa*>zj)38q&>e^iM(gbtA8XbQeG$B5|85B^Adr9IMgO^31VmKTj0c|^FY|42stMy1*)ZXFZ*R=D zZ+xaJA8#9`oQ5lHyitI=cJe;;N7;MyQWbu$Uur+Uu-k(V4-O`hN550wT-Sm~nD9Kx z8_2(4;`f}*-<)2Iqw$|r`XxPccn-d%yn*~viHhYA$IF&wCrN~5i6@w93q;Nmoqx$$ z>G`eYe_8AL^h_D-h3=>NsV}Heqfx%NSdo}>=)?cvAKuk=&NXJ`S3E502}ExZMYI`_ zWAV)X&Lt}4*Ikej3nGwz!Npt6K1KWN5Jg0`AmYDrv>E=E&nu~Tb9#K!4p5&QiQMKS2xbMlh7sc3Zt*8C!7?I z^h&U72L6xhXOyvs);|g=Ensz+9|Uj|>Kgb(SU5YGuNwgTL)Yk&(4SWQ)~0h}`-h5i zS;E9ITK)yf|3UM=vYnoK_@Z|~-oY|t_7CdVK2(-@_ZjL$0l31_S=qJzkGcwaNS$3R zSag}&cI*ZCzrLO7XLmAr6eXynaYg9C;YW@~wZrcU4LX%rm+`qTV7HhQ0Y(4MDM3jS z+FiH{cY#{;@xz@rjqNF)@p3m$sW)U4;6VQqEnt14*e40q|74WQcoj$m@c$ly{L}U# zy~t8dez{ZeX#VN9opK)WGfAjLA@roHYZx!4cO_5+Kiu-k2qZar_&>9glj6PD5a|EF z*tP{5j-Tp7rx&1qmM>hq$a~!MDt6%~yrP*eFU?abc3#}J59VKXWqw^V{C7Z53Q?l8 z{C67vdE|z?b*h40zbiYx+)*;&cG5VEL;eijdI57Z9exDm9||&%MCPWFE%t$x3J3ik z5Rn7_(AU7Mg03+3hTI*OZ#5=O2b_-AI^KV7a_>O@;Sz6F>bd0~ow7$>UB(<9isHF< zzU|kCD@UdWFeGsPu}kbakBlwMhy|hf7md5aVYYPH`52o2Q8@s7(HK=skpA-fB9h z)>5E-k?X^{|6R?ab=%=gWxLCr@34j=G0VGv;H_YyDqe{ybiRgqPlWmTr5B8|&Ye^*@aNIeI;B%hTFl0L(@# z%qPeNOz(zM!(dC??Mq*tLo*HI_l5iCUj#CjhU!S(U+Rep%d59U%mM#lEy2r$RK3m) zcEynlq-8U8juGMb{sE2XpTn7C9ziLVp>W#EUqxSz1TOwr9gu>GR+RI*bOjz@NGfyZ z`+THK5JMX9U)Ot}a2&O5v+xbjYDk*ItG4(}3adEP;L@7HX}!v=CEjq$3VZXdUq*1f zbmqdrCVJV3$q_ADc)nscak(`AoZR^e0b=73n2{_-jRF5ziI%^TP9?!fgJebgO{fJ;GKdvcFCEkxG^sYYRKGRV|ms4PcTqC+72u5Z3n->(<#}zEFweAg&e8E^1)R(N8ZyO$hxLqF+#;cv-h!KVDv} zm~6%`ao2(Oq$GY`*{0^QDAvfMLfNtTGKEjwL%7(yAas>E5*;gUO~p|P$UmYp@z3#O ztp(?05_S~#u3wG8voIdp)KV&7+==+#1JENz{4!>dRCfQ@^?5Khr$jJ#ko=#4dJbqM z*|7`70sW^>8hD-Zp|B*}{v_z1q5o?J!u1~y)@Y5zu(CkRF&dtI?$VwYfs4~ab+=7` z6~fpf{&&fGNRea|oW74-;g1%d(?9pV@IF1U8h0)x4@!Xsb;$su90@$PJXLp_%_T8c z`G1=bjs)a&JY}BEi-jbIudiP;=|;sH{Qgq|E8pWfId=b-@Qr(z+uL)758mG)8}#ca z@!{cQl@Yo1-20&;AW}%$%GlN+`@6oP(;M$0?NeK<+@0RUh{X}_La1=_zdC|Fuih!tHym z8QCP)?5SaM=|~M}+M)j=EJBB`ElhS5fRUFP@*66l+5HzMmuCi$H7sapdg%ABqCGDP zRu;PoPXC1Edak{t?9I=|eIYAX;gI!XJbAK2VQfrvN4l}R82ii$W zn^AZ6-B7wIYmx6g)?V}b_IpuT;-W0EudQZoRMF(oe$ytM=zPm+FJ4-BG%vqoNqkEp z(Xdb1>218s$=AF3lpSBki0q5t=_$5v#8J3MCrApX6FrvQDxP(bFgf(bdM!$_(Zr_zx>I2f(k-I{#n(VW=MPoEgW7_G|txG4G++=6k|2%7#Hh z|MkDK{qGNnf7-;(%5>3u#s=S|226uka@KeJQP0N2JmdI`S#dSq>P1pKF-_-ly) zA6+`UT=SlvdVYR6S8U%r`w>ni)a$<{DU_bX^DjcKjI^iKnS7!KCQv6wXjQaCDT8>W zE83jh`PQ#unxmTEM)JMIdrMeX8uqEQwW_+G>aLkn?s#*znM2($Ny)M}Y?Y%sze9ph zX-CmoAfGAlrO+u%`fr+yZ;s15GdBJM>vjt>@duLs19&cwnU!n(@2j*d5OC_PUs#)e ziY^ewpw$5?6v6-g=YRe0|LLE3_8cLu^?$v(6SmWW>BKAe+I!?}hQWw33^~(-pFW;8 z`BERxT4KDs03=G_>|jm7FB4mE6HTtxjm4Zl5(LZj#IK)7|2u2yW6pM1x_(V|!S?P3 zUbEdq(Eni3vM>VJXNL18>Aor4P}X!OieJ(oUCE5TzfZ*_kTf$@XDDLl6w0|*&SMK2 zy23&$LIvA|ig97aCAXYua9WMLI)hJ`rjt5i{oeuPONadDe8G3IBQh}dlwSDwC-ZHp zw8_`~M8*`7|9m?;Tzo(1xe@<6;{TF7?zghkGPlNmf&Q7De^@`kh#TWU@(;@{-9H*% zafUGZvB>0$scgXPXxGa(+rJ*Wxqj2)Ku!@Tke8;r%AkHvGjjINr|7ZvwCBX7?=4y9 zK#hBKytBD12XnqPkY!0P7@NknCD~RUw$BWUfvgQ$6WrK}rMrST+iX^gd+sK~^bSbSz}C5N zF=x^LB6D2}(_8q;@Jn*JE~`~?oZWwMPF+B@6AcMRzKbht|2u0wZ+-52bXS2D=zsqD zzn*@Q`pC}zSzNUakah?0w{r0AYqOXz*$mm!@6(^dan zQkY!kvRg~#ceP<(sdMXtL8x+1#T6d9ol*cl@V!9KpTXD6*Ab=5lMz_u- zO$C0SlOvk$%?iNd1^rL(%!X|bE6oVCfuR497l#Yf`!+8D_z&bi`(Zb+c8F@;7!9h4 z{$Kv#p<=rRKaSPq&_gAlf8Mok)X5X{x$$z=RGS(B^~)*Xe_!e2&G#99o2Lcd^6_hh zQ{afa^!adL`BYzB&QDm?76N-xr7rC`61ebP#!Qt#CyfvuNnhcc=$664rD}xBTkuPn zI4HQkhAVU`NmsJxqa`yJ-t`}J+!&I)0+-r%EaHoBi9_{`b?d8Tdld+kyRBxOu^Cwy zQa1&r^$v7YPFq^P8vGC=dOAFM-l!WLD`6BI`Q6A|#xs(N^nX1y1aYxw%4-IV-T$?L z$qyn9KPC(g|M}l{T8u;gsrlRn4#0nKi2kRmwWVKGkAU=FEEY<8^r;eW?m9P3()r>< z1VzQq&!-C8BI9&(AMR^B*%Eu;zlq?(Y zf}tbsU9}bjYPmsx?5BLph{RM%S1W)kQ=iPa4;pkI)F(`Sd9zn0Rob*~|2ic98TwC8 z>T9Xq`$$^rzfcwzJ?=Zdk}>DJ?{EGg^nb-l8gX#zjIsHzrn}4GaTTr)s7U{*&jn-C z-cnblxUcjFOgdF8VcuEBT)VU>JZ`~x91VjyzW?vArn$I~IyU)MajgmT#e5)1kt!CE2w+aIym*U_9d4LH`+5PV)`wd*_ zLdg8nUF5i3VyZ*ch(Nuy9RvRN8RDPs%O626zc~Gc-G711Ki}Sb;X3me!_Gf^UPU%FEa=#1#0#R-R&GrpFn+N(Bqbtjj0S= zDAjH0!Pr&XQKC2tI?%EP>93ju<%ATSX-UfNqirHtQbzGg*D_^$v>^XEn7G(x)#vgq zNx{5ei`9VRD>#Zq@SH2QXtEnBC0`V*vv^@gG(8)AuD~~ux<4^&y0g$Wj0*XOvb)JZ zD>`*QU&hLKrj+V3RlxuCG3SK-NmY>5_8|WEBi4(LkNToMESIJRTw?2g*80Ed+lJ1L zf&U%pzlgcmuYQJ5e4&SWq%Oy!PY3$n(^t@@U#KmhVj^D04xft5nh&RDTGjc#W0pQf z7lHoWov2aqth*Ll5ZuQtgcVk_FWzi5-lU^1@6?*8;P|@Gs?-t>2c+Te`h@{V{&VDP zGWJ4yikf3(;Zqf7kmc;dwON)}HeTimhnFJze1+XY=KM%!t@(2|VlvaYTM>DfeXVK_ z51Euwi6Yp_Ns?(Fa-b$7`G-TpToPM9lpFr_e-Wr~g^ans{;%_G)g|owXLZ9(JJK-h z{KJ|#G1qQwqKHu~g!-kFI>Ya&nCKy#0!ARZ1NtvcNAet;i2jL*Z=2Gt8mFPL3^z)h zQ!cLPe5E|{y5Wkg8>5CobtgOdB~0$fu&gWZTB)2uj)22D7S96M{1>b5z=fq$g@GcN zsBY9e5Pj)fXsKk=XvbweLOUmBNj0WeFO2YD&galgne-2QwdQ58wV2VJ5L8=+NeBIp z(N}R-xrJsV|Jg@8b|W_(t}twy1RnffnOB)B8SC$~qjUf2f7bfnx&7LAga0c=!n%91 z50g=8^m49w%DVc>C%5rL7*t_v36@!yN-f0Pd z`3Za@bA+e&0^n@C`}_-&06y|ozzRj1vU8*(7uuo!t13<7de6YGpP$tI3pmj6s9> zr~WFxg~6`1`N#5KKs%EF`5T7*(>vFo5t5z%?DOZx@|@Ir(gX3+vGR!i2l0P-ER);! zUi6`M6e9ZPM}*^^1Bm_y{9hczcq8jPB>!hsd;&g9@V~E$d&L83R~f7k{w$#e;P$TJ z<^rJ%^9j3>y6+PTuvdi&$|X+pDm&J|;h#LN510B#sdZr*@_+8cQEJP_MWO#oOsin< zuGImTmUx-y@GVw~4zHsccW&hARj_7N3N$(dxWY0U>(-Uh98D)-Lh)0Y$WZ*X=}2T2 z03OKv6Y>Gq8drru`rprs($&r&Am{L^*42M~bH$hr2>%s``mipZ!LzdTH~%nrDPVDM zd3X)~LHaMmv=4`Hd{~~{;Q;xEGQo}Ip1X1}2>(^B%y-DFj~cIfZ8o~_66`fsmS>}@ zO^4-)CCkp;&+v8Bpo~}1=U_+hSioPH=q$oF265F{6!t1OzVYTg`BayS{di4BCi}EX z9GfL+ehlEh!W>2dNt2(-0({0KqZ1}zlxRy*ppgx&6rE}=i@>VXL$R)QTV>TcMT*Kp zdpYrD^Tb(AEa8xe_666TLQIxEY&PTYzQO$SM>{HB@^Topvy!;$m#5JHUwCZ94@CcS z+c9&I^P2YvZEl5;wP{B>2J#Q9f<>=G{&Nn^wG0?k@V}D|hU_5*&Ja1SF#qB|AsVWl z*13Q4e`uuBUk#jphbm<>VPJuXTad`q(H(Yzg-YTZ+NYr7-P#y8v0nyX74=ERy8bfk zlvqn;hgUJ8FOi1{RtKf1uC~6ijGvz=1}@`tvOd!PHU1+u3rljay62bACw96fUBfZ) zu2rd1E8H?68vN%~VU2v1<1CVbZiPqVxHE&6=bK0eBq!-X4X&UK5}}1lvM-nJP_rC+y8#>LjR)lS?R+BgVdu$%{Vn}0=_PIHDwzVZ_NkQ{MY~fC+#{CPRPc8 zfKY6UcI4hL#m0ZE>lKf5s=MByUS(}Nx%-`{s0{ zaV(8QlYJ`!iB=Wuut?Y2K}OG(o??erQKEI>u$wyL%lBgw7`nCS~d^@bxaH+HetcbkRTQA-NIFBwR%O>M>)_#$n$bJ<7mNwJOYOw@kyh;GD zaTrBHD+r-&vf2FCZn)G#`UIr^LN!lfTRnbvMLUB-D zpK$cPN)?YNTq+JOwcw0`b){R^?1&t)#Y7Cr`pcht=GYQzD-JRww*TGa*$Orzp#6)6 zPolg?nPqEyvv$NpBC=ZFtamOD5+hXLC_eKebl7q6#K>c==L)RK1c)t<3Ak$0A(RI5L@gneXF#mL3e8=iX-`{&8^wIj5S&9cNA#)Dc58k%e|sR%o6I3LEc%uOF+5;_X4DE_8Qm7t~E%dj6Y zOQHYE^fUVk1N|m?D7U#6E~Y%id}+LmzC-~Z4e(#5?{huabKG|g{~;p%7eygC?{;eZ zFaP=0X8z#3^^~1y?EWvkl3~)Bfl5)(KW9N)ud3@l3lLJoNt*^T7mX4#F-rDPd{yqG zY|J6+DV1|_A4ph+F^m~`=P>>hNc5MKw=8scbw?E0nMnHK|MS1sr3ao~{B+<#d#v&; zvOHSRDJM$sr5{$fm_WH`$cjlZ1$FSj5hecNSJZhdn2endVcB;D&D9QCyE_% z2Vy&nT4->gfFyy1y;p0&-9pK}Dr{?=--NKdQ!(}K-~2;!&DMqX5x7Ec_#oxaDLTEf zgSE#^Sl*s=qcO#aM@Zf}4t`I7%%?nzsRA7T-~d}_iEOm(AJKsSxN+pP01E8d^~||W z{cNt`z(2$c>z?UP7rO!f86aUq-H0#(oAv%{&+h*kyn00x!e0!Z+Ccm%^uLF+z$1K$ z$oQoh8LKik*kj0oFd^=@ zMfR9HLO~dpwVH=8%n4p%b$|Ju8dg;L(=W+o2}(rKai0A zue&Z&$ozB5pXsRjo|MhbKScV!&I}ai&5g)*ul1h_n&h~pvGbq7{{@e3z~u)I(*MOL zo}d7%B;-F!?4KB@G9_%}PuvfB83TgQ`xFcJZs7c`_T`gvyK2oRajQ)dhP@Q~mv*ps zMkkRmtpDXd$Jj{#>;_My_P&gD_!f~uxcEW-LqS+Er*E+^8(j3yfDHf*_Ofi zVfa&>*p>LEd+hl~@3RgJydy_o{u$cWpD6QD{qP$9WhdnxK^0KF`Y$&9SlXYJmN{rU zLGpi~|MXk)A&;e?nI&E)JtY5mMSIgu*i9|h@(+y%D(%L z(PXSdWSj(W@Ofo0v%w@@h_@D8D7APOty^zq|?q-luE$ zyuK?U?_6_7tS%hZ7xR~PT&%JUlbsU&r0zyoSI$w)85@W*haqs@il9dl8gor#&p(OT z6ag7C!o{9S;=rPsaZ)8Ob9UF&%R>(|d70h_{^N&QK=ueUSke7;lo$YbXQ#}6(C+)dhnpybX1yH!~W<9 z)P{UcdPgmO(yos=g6%D`dl}KFdFrlZAqM(S*YrPXyP8`U?k1O6wi(NqS8`hwTEC96 z-6@3a%_9_ZmuOiO^pG$Vx|IQNQ@s|WF7KG3>Yml6Fm7DqImq$?-Gbgj@p+{GG-z&5 z|I^5i<%s|N=-org_L?1sRvB{=|I2^&aXIYSJ-E!F3H@J?|8sOT+*l2klj>l0{$WO? z{>gcXD&W8PZ+=Wvpp%APMMg8#;cN^2U(wdcV#c8OPo;olq!2$SNGWLc>cUu8hjH@b z3@x8I*2FY3IgtL-`Fhpb&L2qsmys>URR$gDzX;BomrsN%bdWLk=dvoad>xg>7lG}S zwW**M-|}<&1gLhml>r?1T5)ywDJGw}%OR_*CSqfHvYr83pZ(5dJ*Iou{g+oM)c3W!?z(<$o9XVr#`Cmyfv@;FZ6%a zJIQfQe?Y&!w@VHYYUM)y;q6p7HK5xm{xiUr!T?k0CWAj0Z7XM29L&WdR480%{mPiP zZeI77*XI}4`J)!5Z3~h7pAgcDZJ{OM(Xrcq;0j&Z5|JzHqrfw4DA4G`q=aDPCfLvig=ul`EcVZTmu=#KQd(3CC;9q1c zcqjITT$8#sn-BUQLU3YR@&T7NZ&+XCdlVd-9fmkIZow&71Cm)loqbCrZo zP8;AqLEZAEL)3pvo;}(jV@593XNn{GXS$l_bp)0EoBzB`)z~)|+D_T|&usjM8mQ_4 z^H@>HfBhGL|J=N)X>n`uFaH(hi<~}?`Ddw_#ec|?4ET@6Y^uD~7i#udjn+Z<`0Zk} z-$bLyuyW3nO5~9g{EKfia#u#}5Yr_*K>EKJX4*f~4`1&Y;mX3CD+yL!Q0&$!s-cDn z%5_^=U1&>cHrqI1`3&CD=Gq0?ISRgVcZI_RSJ?9syVHid#O)|Ed@%cD*%*{V)Gn zuJP09no(oCuw~PZU)zLDM}k=Q85|Vv^%4&HBU< z{HL(5W3lsD&FRm{l@$_F`-eMKSPnap1*C-OL-lxAPyU_kf*>NM*s?ZUcTZATr6mL|+ zaITgosD+)t5lhOa1&VNl{cw-Br_Q4_Y=holVYo_M&*h2cb{3nnO%`mP|;48^{?n%W_I<_ z_hZATcaiyrLB9t}&B8dmq$SR8sPpZWQN4FR&X*t3me8QgXn)g zKfh)ImpUn$C3Tm&Lvf|KukKZ(9MN^Mcc#n{c8mHFM`2Cz699D5dO<#j{#m94((@(yjC?ACy*rv z0k;0H|Lb~ZE@b<|{PXiydGg%I(La#?!`A}obldSHU zZ(yN(@HRiQqpq0h|0ht)F6$ z`KPEQ6~bw$38;QhsMu9w`BNWl|&8$&Cm zX(S7VJZJ$V+~j=fT!=MM7K6F4kAVMu5i4VcyuYD{{+Y1DmPbN?)d~4O6a6ouN&FSC z&o8a@pCa?mVasigG5_m7CH!NW^_mgAmVdZ=ykAWZ!GG>Cv3VX}Rf*TO?o!XVTnb0g z&|OvHK6P!T09x_Y;u!k@w|Ryu(ubNi<^qkRWM3R1xX%SS7m?Wb59pu2)2>VT%5<|2 zg<52Jz%Z)dE%n#$RRDsdk>gM#U|uf}3*1Cpb+5|JPPHo615M z2ju_gEOP=jNk2}!$=3gr2CxB_I13eDQA@k2lCf6fyYn^YpE5n9l zyoOnao!e~+FogY6$WaN6eQ}kd#aRFaH`N|Pe3DymC3df38nyi=czXWgKZTjtn$JhB zkIGw>+SJ6SX-9Zi08wej6)iZ)r-Y^S+TsJoqqF@;S1XvAy*p~nVU>=Q*I>VacI|~$ zv6W`odPFXy|EuFc4dVa$To3%$9^P`dq&5A|@5Kkt^m!JXj{^TyuR}kT-$`Qke=X^D z9yA>S+j?$CY>m|@HHcT$9Y&G}zOL~u9#*%ke`WA894(THtzShoYP!e^m7ZGbzi=^1 zRd;;xad{d6k7GwPg?L0tv9F^j9E&`{ik1!3hVd>p04fRsZGnEqo--K5Cj-0W(RVr1 zsfAgZ=s5s~!iTa8;a;|N0SxW_*9<_Nkp8d3OHo`edBr_;oE&Z2$PfBw^`VzKaK;;4 z{dl41{DmOabYI;;Ja~R2&!kp6g!)Zj2pzc(=feFx2P(!Ov_1UbC5=npgLv2YuZL2% zHy~&Jwf=Waan~MzZA#Ow2mFjJcb;?kfrVW~CVUt2M*o_YNltYJUIUZWF*B@-jIb=! zwJz^hUR^~N4LB}-{|TJ3&Fh$E?Tr)c{2$Q&T*=C^`Kf&r2wZfTP3vVukObHp2?38= zNf_M=SkqM)b9M$B`)8nCow zNdLPW=MC#B?xqJ~>z_dX1xjITlisyXuH*Te`^}oN^WdbvjDp3URpDQP`pM$%b zT;nqqONG3T@o2w^c>gKn>m%v>a3QnG=E)XC=buY2jE@u$uiH1?0CE#XSDR{q|I`0~ z|LaMk`A-4r_0d8r6_?`pk*07D!4ij8c3dw7Cj~%e9MmPAr)NsRmt6%$thDPH0s&^| z$Hl^3IE?gPTv)pc?EFLBy`JC7!9mF8zgjsrpbAkc4~DoKCe9V3w%CEWYSJozwa9QR zxCDmcrSHQZe_;esz<+Tp%{IqC{_P~Z0?pve^6vQB2cZ*=8Xg@%8GrGgOMgN)<135_ zI&A#M74V;M@%d)Je@d{|XipVBpa*MwXiRl%H~MRXAb4#GSY7Dva2vE1M(DNp3HdaK zs_`CKJ4zVWEsH{7WrsI+#H{_sJJItD=zst6`2{mz=9`vJVziaKGmOB1QVKUXRCx`_ zT27G2=993&vQf$k7eO{kdADo}k>=zhc(Pn)o%UbOERAFi&q}lm)rTGFtvTB8oQ1aW#V017Nm5->jco8lu%m8?N96{c}9HnwB8{ z&}v8rr1S{>liuLUfo8D%UoM^H_dZshBy4fpQe<=F+=9`yj~|N2i8N6u~q z@b_>3skAG65i`*L{uR@w2R;qXE5okm^C|y)B{k?~!t~i%eL}Cbc7R(CDApOYHTnViFEE^EQl)@dWA~q)Fc|<@A_vlc zDj=u5a&jyhl8&{!3>SW;Yh9ZInb_$f5Sg*sZA)gFAq7veBYrtxJNG}*=G3JX8Q3Gh z{~ipX;O!(bVX=6bLoL4(^gs9)I|a?vE&|;I`XBIr!G8a7P5)E>fIj*T+`=`!3%k3ZXc(5w=51AfDjlcWO&=sWZ@t?ul|%ik3CI zEkOT_^k2A#l+4SYE3=`&`8SaVt>RR5S5OP+DvDjf4I_N>G>8HSn-}mF(LJOo0uG=? zK(%MSZEev!1_(O%-`(Xb3sir_47dUv!A9 zfBOnRPzNIN!@(;KIfKVyK|c3;jsLRByf&wc_}@K0ze@ITfur$1#C2=)j~aBLFu?zA zlol+L@D5N@=)b^A2pGp7G6nzpC$^E2z=v?mT_hka7DBu3h!zrxgH{2u6@KRMdS#Cm zq_x4tr>Oai0^j=jUff8OpDP{Rnjphd8^0W%X9ES|w1f#WvSmrE^2s!8vWm@{2h9-F z2@rTeSOwFW282&VpcT_w_7mB+fVx|%yr4_k2k@W!0><6G+Jr(vefs7{l9w{s^UvWW z_a~KCn6YgC`+(1Q{1^{JS0nxJ1QzM++~~SO2%|qj+_(S%3_k~9{@L|3Me52hDN5*9 z?=pM-+12;&vN&7+1Ne_dEE~@H+y99+|TtU3HfvqTmN%E=Us}{U;ld}27qSM>NECfT7FBDHFMz(x=8=~tId%te1#$5 zc!YpPtW%5OK(_wxQiT5^sNR$tnL_;U&f#%_(^!+o@og0|f{niyOU64}fQ(&9emInGCNN7kzxGm^EHT|pu>ozj~RJrcz zf9eXys`{Io&_^E~lDe4mWMLWMzu5YpJ*zPwB0=;&t29yd>)?Oi`+4hQjZ2?@ESILU z^AGD!!?2Wt0sgC}{_(MFcNU85`CGgAN#iYo_%%6BV>;>f4zWOPDFd5KjIgp(MJ zDazJ~*yK{jHxGuIoC}3vDaCQ!THwKnA!=N6MDT@xMacyhy9;puc=yo)5(|+*75Yyv z%XlJyUlXGLxy$Rx18^9^e?k9g@-{WY8_Xa?|HJr*_}^DKAq)KK_rYat=zl-ru?R+^ zP506vCck><7G#7TBE-rO+9CgN^v)AgaPb5F(_7|lvVW={HXU~V#nFfAo}hLH{O1T6 z&TwFCd;C`+)x;9eyirC6fFbW3 z9vtW#accR_1vrcWL6WT1T+mH=LNLzILeQRU1Kn_biS~~q=sz`R#TkGi2w&wrRcrB4 z{R~VpGVVzJv)jb+iMgDQq-yVxqoxkOra$dBOj30n{{Azr0l6=wWEwL6NPT_@N2a&2 zGeHm=)lP!`(=7XaV>~|&2T&hK5^ujx|BwG#a6j3Rd*ZjlZ*_M5GgpNt(5#66{SW!9 zx@Y!gqU-cvP2msV)$`7OLQYZA#AWS^LYVUvd?26O22)d<$MS3!T$oKG_ziJwNhOYN z7v(1ExBPR!8jTa@U%Gi-v#m{FK0qF+pGZezn?i_8z;H-1De9_ zf3Mh9eiU$2Wmt;&CC)B-e9#`zAB3P!)e>4*B;Skczf?d_-3s~7oU}f`jvbmKM+#Yu zbDZFRhl0UnZ>0Y;RU=^W(eidRx3K^G?-|c**Dr7){TIj{p#Vrj-L<(rg2p(IaojkYTUV4Eycaas&XWH8o+-g}JmNWvp!>e=ht^ zJ92$vM{)z`C6%os&WMzTzqvD_f`gU9zQ7As3imFw^&}oR)o{Kd{C<9-#DovM=53c_3Cq>NKk!E4C^cOioRV9~IO_yPVi@yj?31C3yLa}sR*vsUmHt1FM) zI(Pid|9N!MwoYg~8sWb}OgB6efV+UN^B0vFgyovrOC`42$iWj^N)L}gW3&mT z&BRT*t>P^};Q{f*f^sfxz3_I_%KfH#h_K%dt^0pg$`&TlYC?V%B5TJu#ymovRpR*S zF7J9vQ1V+Bgf()wOweTf3lN^t*`n5hfg?8RUF?2_ES&2^yg?%e2dxvyW+Q0NY(3ZC ze2D)`Ma*`+ghD4e{Rk2Izv3_C9Mhc%I*}rf$1hYs_yOLDCYU|2a2pQ|tG|CI1wh*pU7&&fQ5LD?KyT`rqw6v*A_b1N_ITwyA{~ zYjuEZTh~Tm^7Zr8+A@scP0mLbozL0b=kTVzC(pV8?3d~uFgU!pFSh;}3=YGHh&IvY z8f+G6TJAzeG!Kl^3gyfr41X(6zrMWnC8uGp459k+a;a(yzZQ9&{40u^xET4nKLb3!TjkK zkvYfqdqZGp5iZ77-Du>f9H#*;++D#p?`%PiKf+Vq+Qch)I0*&^&j(bU_(Kn2{^6ZKg)U05-h=+9s4R){#M1SQwuS1oZE z@P7rh=(M0A|EE8`Y!v$*bG0tgIH3@;NqW(;7aKx3bS#9ECJ zkMXY$O5+eRv@9mTH*&ybvRBzD$3)Kj#a%Ubmm>QEFh2FE474RGm{(ehsC;=F!+B7M zrw2YLTqC#31X^A@vAeOf3;ThjQ&IfVEfdGFJh*clSKMau}--}n* zC;SWZ&#_}Uo-EV>PhgH<{@KhgE1z^5IzRlN|5W-R7r&OZiW@AUE&Nj8b#2I^5;_VW&XLDR>TcbWVA|iFieb+`Nd6Bt!}M0-SaeqbsFpsp zGWsh{6Plt#v5V2!9{rH^)>P?fty>CK6d^|Rr#ed86wH^=Uo2q01TcaI2gmL%GuUX0 z2=&Ixe5aO$p4m4es@_TA()VxUBZY|axMxne_a&@fL-v4kae`Rty~(60$Jbs%JK-i4 zm^VPjyPCc!<$OiAE#v~6t;H3mOS{tdM+#YH!F<{DLJJhRIrFwktox!3d zj1c{gclIOoAh-*qIqjq)7pH?VmYwhK1aVGK-%Bt*KRFS;rhm>z%RK?(2GajUHXiXs zHd=)LVoJam22wIH>u!1Y%ism3cPmH$luluq9W+(&eGbUdEXc~wPuR(5Hphr6S?65l za@;-94WgktL#=*ov&v4dyk))(CeNvKJc$%F$Kgd(I`i&}X!*AZ%iDk}^_Jf_)Vemv zBO5lZ2oFw_p=Q88`6Z0(lpP=;GT%0qYM65rv^GJ68>HH{{?Y9jE z{pclj|2yH&<6}2|1Py&*@iZtJq-@BX8~LXxj2jZEGP7Hd{)ec?Y(Et8T zRO5PQvFqFj&rZN6I-!S{r!9%if6Ylsxj|C)U;n!Xn|ep^pI|W^gH%Kpa)#n&fn%>G zjQ)f(1)#ezWjb{8)#4d=#D?5eTV1d@ZFYgDx5&IWY^Ir$=UhocvjZCsjYDIU z?v`W~=Neq2wu_aBi_@D2SB|>eO)BZAb{N{JE-x&PWr4?L&W}Np@P855=RF_Zk?%_p zmT`^c7S*&cia(^hFmlLSd&mil%|Cd+{|+3%7?Rb3^j}a{J1Px;8+|BS!?wQj?m zcGA*p{Y6u_7lI1bi$8x3@BK$uiLhBnRqsqRpArg}B9eWm6k$UoUQDIjJ5w__ac3#?C9;qA~7)buTAOZJAxn~13k zh=y?;&P6XH(R-iW6Oi#FBmw_5xSZjc3}JA(fNw)JxDRMTYV zKSTZx`;f|U;Ava3($<^&oG>Os{*P=dSOesB2~j(bIC~{(U8p=86H%`kwPxWAX9v}!sSk1m^b6Sl#rHvB004WU$cGMu{{q7L*`4Hxxd%v;! zFQ+$LW3u~Co5a{qFKMJLRV!rM3k^{>_-blmRv$e;Tr}qcN%^1c?Yuw*+gkRDq z1u|?v{|xv~maETL^dH!Ca@SRl(a;FOyKHvbBn<=o@6dlS{3VOmH0UvtqpaF(-T#+v73T8*~7i=Efa&9quuT%$8_6OXzxvo`|~%xdtk ztqsHW;_RyI(wZ#VdMzDac{OXNF2WrG*YUoMJVKxXyeqiRpIfNd{$*HWX*qYVogHbj zQRzrQ5Bmn^H)AdMCGZ{@TDmPjs+KjJ(QgU^ZXg~eBJiZ3yD7G?>rjyQ*5k|Tv|R^t z-IJa=v-5u-|8RTGN#r$omv-#-wx_gtg)WMF#_X%eI7veoNs3@xNBZAc-axl5`cDSk zyp#jSJNN#e<JsX!)!J`1^yLH$tm$fm_9`oLcumxU zs15fqAqf99<(PTuUi%vVbqMi)5q7}s0O}p_zxztdq^bkH=G0j|!(zWD*O)4?-$|yU zF3GzV!bi{s&w(C-*B-5%6G*Y{ndn3t6bG+zjPfMz!{*%J;s>w(GwWhwqf%e!*}=vc zH`e+^BV*hRxKrba$ZJCJ_T@SU<`g?4rgvTsY+6WUQm%#Oc|aN?!?%6P#=-xW)) zl7dm}p)21Rps#}bLuw&BDEQ05p#MviafYr9g1T@&cf|kYe`M2+e>4e~|LTA0eZByU zwM5eLrtUxe5%$-y_hX$Ua}rQ>e}x+4S~kY+zqpM)797NWDNO6x_zzcjZRNd>Xhi>9 zWs)1O4*qvRqKIDc6LG7dCWhHO|6t%C%d^17e<;^E&b3-+gZ?L)#nMJLl5}b=ffXKGV>{|^#MEYi+wsR$vb6;O zemA1)VA?#oaiKD69udwL8`aKu0Wa>@62PV zWLF7Y5T*I(I=@Oq1Sxf6_EXW+d7vim4gf#zOvUPkdd`m=<0ZTYpS|C zY_702lFX#88(b#Y~RA;i>)!Pj8X z5XSq8lSW6Mh8yJm!yN(!V(dx^F4E|vyE$}7kaDM!6pdq5O`WH+$r#ys-*s~%5)fLN zbP;}#G=3Cj08buNHc0=M1UH=6Tpv-P{|nJSn@pg` zf3+ES++{qO^JoLs5H&mw1gVp~VW*Ib2mecLy|iY+^uZ!?LWBvv!b`O7d<8sAl@_`z zq2cMK_5KAy{+0!HS8iJrbW;dTuX$cVZXrp(7};Pad-B0MYE#-A^GVY(hWaldVcS1| z5lIr2dq*HO1@C`P7Wgl;l(mjk)I8@FK?y@>pP%LZ{zk9Q;GwwR5BB_1s%k^>b9q0u6Mj0Dm!)W5yrctx8~zhcm?2}u!GYy zu1-T##DDtd-<&2=7d!tP2md>?)!gE`RqGG906YKiygafPlBs7wqQaA5HslMTb)3Ez zJF?(Faxj{X?t-s(IANPQ_*O1j=Xbe_YWY_x8u0;Tm}OoJx<0`z8s7G_WL7QQ4q06S z^-%419!q}$T=R0~k4{bh$v_Izo7#~F13ZKra{hrewOq+QlT1Ue5-L?qInK~zO$pp? zu9@ET^*j&0qm;RU!boBwiZ>0VyH6>J{he@FT+ zyq!*-`{T8EqVM#^_BHu@iI!ohWc@*AQ8^I)alxR z*RAmfF95mPq{~f`LqHWiw*JQ=&mXo*{!GI=KmN&F&&4OM6S0Ne|K5+eoo!nO{&~QE zrsziBDe>@FR{p?snH_4X6(F8?Bs?SIbMIXm1mu-|H?N@Edg(%>d3!jnYrNT5Rm(a@ z-c**Ie^?-BS_*-e7`ukxKFtN)s!VJ}_v&<)!3Xqkyc_xCQ7j!j+9rck4D=)dIOA)& zAH)f;WE^75>aLI!jN?`N0N!BgK)xOXDzftrLr%aqQjUTC_y6=iqs#o8|MS1Y{DVO$ zI<#HAa)NW~ftas5I5srj-J>n=HR<8EJKRFDdr)^o)}7obvi0yhOK+be$L1ZV<2p*~ zIKm<`PkHt#Wz0uKLa?cg@|BUOtxzOjQ^S#GF6H(p_ey3r7hcuvL#AI;cOJjwNy6u* zd%qJOlzRd!&Gvu2$b6nKE6G6oU-dg?1&rb77DhcDvl9M*Y&)u0WHt|i6#WAF%~7~` z5*@0mYB#ux)^$$?g4}Z=J_}Rn$)8frov1*l4`!A6wL<#zYlSX+PLQZPmXID&^GZ&1 z!90p;_r}W=Vthep(l@N5&wnaw3lSP~BKf4OsSPaT2g$Qw?Dx(UfYz}8@}F;K>!(64 z>RSHKW@YH>3`FLihb+g_;QtC2leU*Ubyx7{#k0y~f*Vmq5Td$+Csh8&ATat#qo74J~{3%EHvOd>N2QG?vXJ7 zKfaJr9uTvYy=2Hgtf1hF@fu8(>xAV4uV_`Ew0^X5K(sZ;ylGg}f> z=jW^rL_^!bN5>Jt4RNLoLA>>3eaOY`$TER4=Iem}yt#B=FQpuk555Ec6%xYE{}D(U zGx_HTflt=vZ84JnT#riH{PpA}l<)xun~raH-t0SKW7%MO;?52(>HQZQGS)%=yKZmp zP(ga=jDHNxP+BUyK@~R`zyVA zkDp*_SIYO9_2~hhhxp$;1zWhKM-}@?S+UdiY$|BLxAqk7xLG-M&js`1u(6yzUHK6HVMw|$m(^$vusLxKJ|2El(q z1^G~gPJQ0acYzWAiIa+;WX&K&`Y2{or3wtG=nb|cmzcJ z>M_!~(5dKSe*x-NnkW0rDAp)@9I~+XxM3>@&YdW??drH8sotEoA_eIc)z|iF4g3bd#2!X3sy6?DZuO zwJv||Z~imp!sp#@p~mUV85H!N#)u#3+-=|ZC5DX1lffV(Enig*-su4e*I^^%ecnZJgLL$Gacr0#SC)X$nLc<=P0-Wpqtzl zWO0S@ZZr6?iXK^rPkFGKnTrGN)6Eq2V1+@`(b4vl`4PqQ&(q;xKATM=HP}_cSh+tk zPLUvVz-w|z+3EG2$}6$z?>Z3v%LkAbAJOA*42Stg70G`-SM~?>Rc*2tb!xY1)>AzW zw6?b%v;gYz^2n;9q|={nzVW>^E+7{o*zO4~F&n3cxg|BQf)bpc!Pn5MN*}>fhvd!l*3$`_szV95=SP2JS}v zyO#g+a*;Q80}AxdzdjR>-$c8Mp#%;h`9E7-Ak1VPDsY}l_4v*8IY#jr7|=hnVv%be zcF|>e?;$dEa{QBhj=94%&oMqec50#L;U|`gzHt`g6f8H`cMW&BLy#V~H8h+-{{gPqi<*d0t$;adBpS(oX0EjG)63z!D8dZ= z+y4&D^bjtK=zpTr3UCxuZQxWC$7V?R_0BzGzG3qX$p5LF5?jx@F<9emRXDWn#)j9uNKRT?Je% z+&OYKhfKl{{3o>|*J`u_&Ur8tBmM7EI|C)w6R7mYvqNv4Ap?rUq-|N>%03 zuAWCO?EatVOw-Z+pF0u%`|?9-c->D1QfvTcgv~BIar@6wck3jb8Pbm6K4A0>RCAzz zUhBU=9|@TcdNrr}9p)d=ZzSihD%B zH*SA#3ku0U1pe!zFiQeu)0_gc>&|X%X!=6@Uv?*x8vXA@aG*RtoqA&GpeuOks{yeQ z`cE@VhgPDX#slepuh-LbHZG~LEo1ASOP#c%B(0h}WImt~|2wWcAE!xwZVZbV*f&sGvGg$VqAAVbp`#;n{?aXekZ|7 zwGQ;pBuygJ|Hh5ckp7GM+k06@P>}z;{zpTTZp`k}F{rca?Ce50QQIVwKCDN9|4Sra z_~ZI@C{A5E>TT|t{yB7mOho^NO(7u)c0EPL)FZ@0{Zd z>uP8uy9QGV_}>Xz?xi@l@7DAk#iwP?JIk^4&#U6dd&_9gOoi_d`oHRTXgYg6tFeU% z1et%xmxJp~nc`QAVT?mI8)wLWj@f%MxXpE^lq*5eu^sxq#4mQh9EE$${Y1tr`|@&} zuE}Hh;Pa`;;?C+*ednl`kv$?o4u-B}?~yhC`=%#BHQqctsP8{+YBz7$efkFKZM(z9 z8`Jy3?xOw)K74J4=kQt76YA-dZK%)x<^N1XmyD7PxKMxAA^C@PjiC~4HWQK*!-O5E ze@=xZWt>vK+`mVJS^ALm?Ua62Q@DT>Od!zzm2y_cHD1FP^8q54#$o(o``_oHGtJA~ zGO!1IVK|AP?zc$k?2z>_c}IaF4dA~Bhr>TI{~uLf9uL*x|NWU6%-F}iGxiZ>38~Q7 zC9n8hIc?AOaHSJXkukV}s!lw+|1T2aO|MN`W-a=8oDif7(Cf-~dbHyyS z1<{-+t|E}D?X)|Hj89XBj?PEsb#fR!#1hYmke>(k564yIbIg)_muZO}*&)`|3~ zuBqWP=gZI_+lRy+B}t2d-s0K=S2rm{G}S0~bLvR&^ep;+2>)~Eyh52(O;o`C!=sB6 zG##?1bX?nFEJtR={rWER2!v0!LS`HKe{>x$T!?%!Ek=3z{iR{Y zR=If;>KXazVJH1OruW63y{GlhEIq$SpWpxN{NFSfD_KnVgV6zBEKbJ#_ZUq61=8#a zMk|Jul_OHHT~`aJ$X%Nz{ve?BuK zmvg&B1&IDu-cXRR;o}P;JeE@s#0n$&Yh&> znCiIg&bIVW_rT$}?f4>$_=m55EqGV4=z8nmx-dKk$p46a)0=nTO4Mt;xCny(aesa; ze+UB-0{*AO>fwd?flUQd*M9$`6q)!l&xpjE3R-iNp?WQm|Jw0m=<|uEWN`-5=M4mP zy5vi#(jgr25Ar{xAY3Kn@1G+6!TpQ-mYhGwWN|f&kAF5CyykFpgLV*hXx`j97zo$rq>?!3G~&-;AiHwC5n`wSiV`=`p$ zA-7TuFOHXlNu9g8;;3HS*;q8Ac${zCB4udYp@jSIMSo?DN-z2GoH6Y_(SLYp#cXeb zS;dV)vJ1)^MF5mj#z;W)|M2r)*M_Mx+3L|e9XDZkp}?VUY9#uf?_mGWO6L+yF}Sdm zpMM|jzb)1=Ky?tiJv`OW;gnmAMTGu^i-U`7 zqV;ZJVno*ZmX!k{Pd+qode@3nE$dF ze4QfiTONwIe{O-E5lVH0{&i;64ClG~u6)7yJOpNh|M{-g&>Km08^418^sleIXEst` z{I4wRie5lwIS~FI|0{WAbu@$1%dwK5<&vdDGGUQO;~>UR0L;H{a@eiw5@3s8ej;w( z24VlR95+#Ol`6FH`+w#niYT;jnzYPUxBfOFu_G27^)8!rv16**G55fl9pt0^^PW1D z;Ea>uyoXA-e@fsV^gnOB=ZbImcuOzWG5UE9_rGq&OYSAlxZ+=Kiu)IfZWKqax}B>* z$+O)~QL#_~>_6PUnaIDdsf~U+ufGE@|DN+RQA$yBlRuatrSwcQs@j3X-b(ZzZVk|- zuqN~G+lPB-k)!n9x867?O!%J<&IY4Esl6>CQdspEs6z~M*zLse<3!5hDR|}u{^tq) zM;-MqmP;YZl7<_eFkJ$5JPr{3Kh70gB9NSQJD1_KOMUJ+^#6e0afxWxY#}gP)pB4i z?w=MjJQ1Q*BL5QzGz|~`aI%K8r5^5oQKSg|XKIp;8&wD}|K76jWAcnCkfA~3---O! z)T}bRfty1!;-Awia49NQTmG2*6~9At7yLG}x*Gy@h%33&1YfJ%4|!e{=wF9C-3#&e z*qADZ)DYkQY^^c3^zze^fmrPSxrF-{n{oeCjs5ktZdXbKaIOsYHt#VGlp?3dPYW!c zci3Ko{q5ruUk-3zQl#Xk*s{iPu!_n?{IfNWq30z91plLz(4eLWLmvhH9}w(+)g|d1 zA7yG+HSLnsF#t|LyAFTJtGjbhT-%H2VHBQ^`|m~z{0fBczn$TokG#zie*S&Sm&Uow zf=XY+m*R%R`!65=jEHNx96FasDMH{pnSaOr=d)zSvP;e!EfF{w_TmZlA8v5y9Oe6; zw~{`+y)?c+lciNcK4QPfP*^KJ^c? z>DqR?kayua4t`(sp$+>F|J(mceF41R+eK$Y zuk*G^9cQm?|&x6d^w=)$%q?wKTL@t z_P>tde#Q{r{~Yo3FB!IO=u)(;F$35ZCGTbeW(5DE9_a&7n17GK{vW5sY@S0JT`jbk zPJ-BN%5&;7M0H(Tr7c2Q3Sof1e?b~$TTfk@BLc+!#XuH+|5Pu$+TS{g=ix%+znnAE zZTC*mpSRC?{p(rr;#Ao;GnSn5$Q*Ju!u_v8+wH2A#s^yWzYe`KgsmI`{^!{Ad_w{? zVM*aJf&aNALbZH?|5>{TmXjEx-Dwgx6G9)5Y>54@^9E?EK>h1<*VUL_@)dDN5|=W{ zsEH~oZ@~FUfG8e`#K!*T*{=Dnu2dlUpZWWz2aJqaY*tc|sI0s?u+c&E_#}zR9$d5Z z!W1fWUizcIfaxB%0~e=L=}f<8+{C+h2gUxwQd7eUrf-FNp=D-)KDJavbzR?y{f7rm z`yOKY?h5Uj*uQx2YL9i)$i;}C{QWO}{!93wrEQ>u0RN;~gRM|aVz!#yDtrb`Ra|9r@KzpOs&16%t3?nIgl!#R7~00^;DbxoAFxpO%*F-ew5Hg}LEEJkIz zj&OZLMc88F8uHr$g39X2;#3p8s65~XVE!HZpBJ5araZ?C*nI!9cB^xis}l)osTGaS zL_QGwk5y*VW>gUjtn%tA}gyjpd)Lr8VJyGydA(vw-VB)u9QuthXjHrs-DTnby5X#$@Z74=E#OiS8XZn=A^=1g1lU=lAePgiYq_F$BA zTArrOu2%Pk5Q$uEe75%uCc8;%T5M~^08%m;tfgVRS+IY)`7}qdiiBfFxjJ5|J>Hh# z`T*g)Bd&@OPc7r_{Ct~d;FM{(MK|@-vKi}Ma?9>vQOApk{r71vul>`#Lq~YSMr-d( z!vDO@4D~OPmRQ*O=lIS~^dD}wczXw?QD_dUDIjOQu9HDxHPWEUyL4auTS5MP_terY z;tVZjt9w3=LRDh&81)ouHLnVH}H5=6^m8%DP!7r3e!RPBp2A_DY8oQ z@SdtOo+523M&KXRzrdK|GF=yceHJe+MZZv-f}&`LdvL=O$z?MR&$*sY7Tz6>Xouhb z!^c0ad(pItzq*=8xv)OmKjr(MpBPv8JZ#(KIFd@>pYEo(ox-dYpd8-_HANd=*D}_{ z%Gef11>^pgQj>#_u$b;FCcT}+N|%wgc~l)#CgLt-H7%fe<|f>d(RQgvu=J%3%nrw6{~^h8Kz8cVqv7LltMJ*SoKj2g7p(lAC#STn{a)5= zDRs~HpMkfNFk^+eh}HhXKgT3izK=o6R3CjD?qYuPv@c{BxnY{fOHc z2oU3O(?p=$y!`HO}VFR9nqP+efnxIX$#wJ&3R{6XJc%%8~!^ z>DHeAP;Wj`CmUq?!Ntm?+ys$nckrR$r>yhRAm(6JHbl>tZ4X@TV z&fGA{;^*jan!rDpe-D!za-O1Q)Ah<3-|htbkF=qt`nX?pL|N6`iToGxKVQ_jxPN*+ zIS=3J5&zuE#Z}?oTaleKfwot6fWJ(LVmS-{|NE!v89+&sQ44M4)%D)aHCtuir|$Gl z$JJuZ{VODSnhz)Mv8$h)Q`J%{mKoHn!~VloddMP6PH@@_YgH@!t^M=)z4eBdX=(L~ zj;nQS=)7kBnK+Cnzq(seS2M82%E_B(`;ECT-!(+kH@|04LG_?eA( zdVL;NX#5Nr#s24Al7^~=k^KErH<2LBzqbZj3;3V5aytrebcG52hlGO%NSPqP+=lpv zpZ^;Evf>3hi7KrZ+F3)?8MVwi8D3j1BqC$h>Yu!b3LN;hjsj>qt3OdUON!|4`p+F^s(V{fC17A8S4NV}-ch$BmFvAl^Xv{m;Iy zP{HYnGw1U^>5gdiSg*>Q^#9!R8{p;q8j=fdGqnbJs~s1r<&6mc^G_oGwcs!r^6F_q z#Qr<6e;RNW_usA7E^6E#8E*|y*rL>RS+22g(3gy4qIsle@An@#!rj4&cz$WByrEOY zwfyl9?~W}Cy|d?f;zN8@vi8oGh#M5_Um*Vz%PcVPeZ{@_WYLrkrDx}{S>sRG>p3rn zYe&B4yyy2nCzlwkFTu$K!vCy(ZP;y~x#Z1qP9S*VSaoIFZ;nYKD(8F0FrVy`ifj zn!LB)x58)t=`Ffja{MpvpMG|WG5>j#Zn>9Cou+1lZ%WF7{ddozDMNga?td|J2Z~gD z{^#Y=uZKIg2e#}xm~OrJD&hZmYdLj%+EJ9_^Mr7O+@KX)fn1D>G&M|5-dkpAFa zHDDq5`>+3S%mOKi>bYdKHtE*@1pWyKO_-1v41LBzUi{*W2H3Q{7Viv)1l;$rTFj8w=D}#nrhhnV~dylB^2`yIAi|H zV{dX(U9yT?)lpN-zu(w?t8pU#MfCp!BCZQYB?xFS;oI0w6p$Qe`2)9PpY zzj~n@g@yc2r1Ya|?D!UfEuWve_>|Y}QZBg^O^NkVoiRUM%C1A`_=}LHth4z#Z~O1Q zxsxe43CH}(ZL@aCFD=kWxpT%+>&U3Lr^EvmWNYSanqizLwV0`{(Zx?PP zZG5HlF3|7gX6*JcUi|pf;)HK5zrLI1IeL|@mV8L~pZ8E7z9sapXGKX$f$_hilLh(r z|J^^8G=RJ$ce*&x!J#aXMpx3r`z~if|HAwi?q8_m;VkmXrogno`gJ}{C03F8K||5=-w_XU+j6RO_DVJ=j4oyJly~K z=%seduS6;?(|9X^e>Cp>I{E`w_u|Lnm%NEI>t1|j`PImgu)&!-NBZs<&&ZKGWV-Jr zyZ>C+d7SYpB(i2Wi&D{DHb3X*B>utu)92@=LR(%^h7`b3j#-ccRfgfp0Qb|eGl#MN zN3ehT&g%HWVN%fStzW3}8E01;xC+6$2S;Wc4km*{SZqwmqNXfq<&~G#?-B7>P_ecI zLIjsvNv)p$6|;6-1j(60dLD&_-j&XoQ}kQLvC|m)KU?;BEt~6C?i1;!VkA>Wde_W~AmU?Z0x!qP z%-;7YVB4DZdK{z|;Gey-QUAJX3Sh%V>&VRn4Inu>Q-Rn&MgHe(4;5isjTz#WyXb!= z_@DQ8CF}i9EJCdI@F)pqtXUElv>*ugFVz0Yl(3I2Hd>IDMmBz0vA#FWOSQu1<+b1i zfBT62i<`<5`|o`H>sQRq@h^8akEF)x`oACew)1VIy!GPKiaDF!PI)y4yAr-WJnS(y z!C)LpZMF}k?AqtyT^K%AkN%&uUf$dTHhq)*KSckt`bxbSCIs)$cf(+qF{>N)S zb;k%W%5IlxLz0*P|G1~m8t~0gl|=rBOgl^BiiIzPaKt}rpGsIA&p77SaMyZ1=XCVP zf7Yv6`&78&)A(&$8Tx{chU-vcv> z(Lqj?Qdea6`8YUJ*k1|%Gx~pQM^#)pvZJ{{?j`1+C-Dr$RK!2S$p2`ugxMi@y>%cj zkpgBJGENjO?!W8L7h*Yi8k(Ag2aFBr@42x4<+ar3uDj%zJ{68}q;Gv#>A%D_-!Zg# zPn9Y9f8I>&zsEb@nXD(ZCZHo`1MV1Km6(anX7p@Zd}8 za&0?W+BQ{Zp>^xc{mRnjhaBI5k$ScF{dZCS5+?8u_CKeQOhi~D8eNx_B-9>6%8(MC zW&*sfXTya5d8=mKD$f!r=^ZuwB-X7G83=t)CD6aZ)|f5Gc`gQZsT3C7ahyzsJ9`QL z4>wUl-vT%rkkxR?XRYsTI#p`vQ^DF!t`zOi^?NyM%SYFif2&OS`7gfyH9TY13~wzi z9?!9lPwu^}jX$kuv3zvOtD70Jx;-aw8n4K}%O?8VYU0NqD76Jg#9NCBr-$*Iak<<%p%)k4ciu`tbp~5jiWS%T z1CyVBUlW`WO`(w3ME;!%ylE>z`?voIS0^Dh7x|xQ>x!d95&xU*8+tpaOhq3=hpnrY-`7P4z%l$hKe|*)l-|?q!?~&VIwdTa0PLoc! zb?U(NH$!jkmGWywe*XPK=}oy)HUFSZSpAznZHmM`?O1`k|FP(SCua-={m<_oo+0$F z--C0Y@2eIA#GSL1+Z|G9bQ&l-+5lkx57GZj0|!-Yjg*Rh5^`QrAn58o zu`wUfdU%xBKjrVgU)}s#cGC~(uXA?3y_Wy*Hr6V|4g2*MSVgw$M3mXsja!TeTfA}I zYhQNmz?8>Dh935*_3~3oE`}eZW33BTM4BGrUHs?ejxGWIi4*i6KAd0HOM&isqdqf7 zp{p`)oo!pqdg{qA(f>RfR3iK<2C*GOSpk}f{R_M8GWKS9(O8v{L5IGlO3?<{WcW4q z)s&RC)GAUNi2k3nB2iGC726k8ze@zr|2(RH#MQR!QtI=joMo5uUtU^xEPDNmOKT5? zoUD=U&1&60(f>Su|NHM_zZd7?4f^-r&nMo`UtDFn-!ai~L|9J1|9E`MESTCZ;qWu0 zFRRnl8%LCf-qLSe?B&%NQ>4dVVgC=&f5_ke8e5633lX-E787i;J00`nwK9~=$tg4$ zX_M?Lz@B-YuYZX!v~TB`M|2izHjoAS*UB6gNyQ?OJV>K3E`OouXu>G!5lPCAkLT|P z#??)4NZe@_G*Le16_sgbgoF+XVjSrfmO0pUkwryY2;fhln8iTN@ON z>XMZxnm616*J1w;`hST0`=^UDt?y#VjNxNkn`cz!vfZ_-Dfw9?L;k zn7O)Dd3xIIV#{?+F+SQPF5!QERVjYNk;B^1YyX8?RTpoz{acmU5QEtT~>kB9v%-|>yu)QZjgBdmENx{}x3q>(Ol$h0;=pnvr=Wd)iC>yX(5 z{@Gf~qV5)k(?`Hjh$aU7{=@hi+Z1f{nv2`(oP=S@lWduK*XC?<-K4@Le*=avM@O^>daj~~eme-iWv|5a#?D)^L)4hb({`jj!^@{QyQ z@U3Z)ztO{E53SA3RUc(M$zd9KrKA7nS|OkRner#}tJcj;AO3!x{UdelQ`6-vG|2YM zTSDkxc6XiUBKgH@`cv+bxyN+t&FtjMMPFBjC-nWt{}?V>H`*eJ{vWq}IATWCagMQp znkC6n`0F9^U&Q{GlUiTCD~&C)3eVVd3RgrN`1!9NUqpht@^H7E0=>8M4c)%5ML3Zm zjiX=|w*!v?^{?G_PMK)NO_9DCA`HWev4gBo6dAxkUc+S)J3dVT`#qTFy-JEL#vyJr z<5JEGYoA<+xFrJrv#v*s|JTE{aX&`}XaA@?a(g!eGAsv){5x~Op_uXt@uQ}ArH>6& zW)&C&Zi>$&pE10_w36*Eb25BTbmYJMyN8!Eur3n*pA(m8vW@OpsM#=h;M_n0qW|#j zrBll;sIG2)+wRn?YP~i9ItBgDl1S+=oN)gX{XebB?A*x5)v1K4LldS-L1!x#xjZ;@ zhUotp36}%E6Z2n!>(Ovav=T&P3Uqx1BktKfLH=vJDY@_Ajf5Z9?qG@DiX*0NN1Ujv zgFkajvH$1weY;+KnmD_K2T#KX+*t!UceP$s-?rIHGw|X#uIOF;@a4Y=zaMS6{$u0HN>hYB|8{V)FDd}U^8G*6 z^Di^EJAQ6cn(A>Ow*RLydS3CHf|@e?@Qf?5eEtXbFFGpky5R4K@c(@Lapsjg@(ax0 zi5W6V)8tJUDrV0&jqho+&}%H zWCUq)ruJRw`{{T^6l2fF#l9C6smG;u$yI-PesL$hwEHq6dTs~4Sba$C=g5nS=#~pO z>>58h@aM?lesS!7_EM*>5(&bqp4h)Y{wH}^v27v-Y7`B;YyBT#<3N$I48DrX(OB+A zg8oDF{}?Y$f&tmpp8KaqG(B>t!2jDQMu_ByFCW54y-oUmbQYTT0!4*sW(!e-|A){2 zY%!K#JwCoS(U+W-D@ub4TYg{P=->vVm+5GAsuchJbWsZqGjdSHH@JV~Ypz-TrJ6zO$_G?5K`=0|n zEtw0d@Dok!zYk9JDjDe?37aQph*|JwcwqS5UD?RXCjreUkdJ=|{^!Bwd@{txf`qLZ zMDh8b;lNRd!arwLhVmFNv_V(|Z8tQ2|DpCdpCk3CvdOKk8T`t$H!D8b*qnAYVzJ`#N%k?(QS*k8CQO=e}vf>^@LY}`L>JYW;Iz3bN8 zJ$46cV|N3?J1 zic6Lx*1J}M|7qNQe!)>wyE>cT5b8tBRF%Yx4Ta6yW3ZwE*s4|^lmz}C4n|Q^g!Hy`NXHq;$1c_I|p`@j_;BnyaYRLTB({Bq2c5ZefTC8xcAu z=D+y-Ps+PL#*25qzTq19mn`-Bhldd0tJB<*g#Rb&FtcnZY)H;v8G63)-AU01?2|=; zdd2(s{-0Bbf7BSj-944ye=cowigHLvrt08pQcYVuiQOZ}zfbT#hirxRN3w%#MzyP2 zNfP>Iqa4@Ixy1@_{|J>q7GZz?^6~|NUo7X_hd2+Dc@CoH=HS}QJCdENs?(S<;1`lA z?Ow(RCZk$~{vX`GxN>&9;NOIAeSh}+NX7h@{uyVeDR`??MDRbN3y9b6s7T-PxAPtt zdR%fRnH4hGro{diAOE;JK;A_E&(e!+WHOcJ9CHE4c*mOb|Gc8u{utRcfqP`q+?MegxvU~?; zNpl%Usn#)2wUm*QILYizZYpxhpvO)2Ka0Cn<|0}d?;`due#T5=;Jbc&uORUD6I%nMvBc1yi^q-MBx7^wTTX)FOc*Rl?j#yWiggxhNSslpJ5UwVu7+g*c1cj z_(Cu{9#FbSH-)4c*20>;Km>i0Vg}oe_1sD)i>{0uZ5~OLJBs|zhnI+d0&i4*e`cKi z6{$qTKl4q5*}bI4uJ+ z8ZA5>`mE#a-Mz>Dwx7S08+e4^f4U;bTyKtG|02BIzym1*a@yQIQ+*FJBk*VZ_=Gj) zzYzZ{u37Px+b-C@xK{#gg#QQkFV^p5JCR|#RUc!B#Qh)sCsV<6wFHCsMK}XfW_7ZV zmOZRH#1bh{QU&mgLW7W1c$lL)uymVJB!KYpvYaK9?Qe`0n`x(@8Kq^LOf-yhNm^cfyx1hi_~cABgKF{6BKgm^<$GitsW?(0IMBjjRW7=ZyA^bm>|1w(I*4{&5=h0*6Wx$)*KPB`p!v8G9sQ12T2J(eI zbOQ-V|3~Dfr%6AH+ZN4%(vr^-uYm;WY-H z2ME@)WuMwOEONUt^cM49xc{D(lm0->a$D`dm(Ra0Z&WkzWJAzz8}vUH8M;MjyNE*v z$!C-eeEn;3|NZtJ)7*cZC6%@1@s^e7S}X?dDBOW$e1in%-(w_7kfdn>#}~&0vz5&u zcGRW~5O-*|G*;_WHHipYIv3)J0y4;)R!7Eyz&}L)Pi6a@{T2`6u>Z&KK4PSiPog2} zwq;JbME_yLQBTyr9+y|#k(;`#`XAD;@JA(BMk*d166jw^|2DkODV-JAN$@|@mQC~P zBiCU5YvsQ?$G#K(AAbHzL_#-D8nM;7@zHz|EA)Qyj(vSY!^xLP(3H7fv@d&N@J1V*7 z<24Q)#rzj`^&XWnaOUHmzc=LJPQZvnKG5 z@zNVt@?0-Q&cUyh1MKE}sLx1TbWcLXboC9y{c@x<_2p< z#4mBu|MOG-@tnBtqkrao(_d?_HU|5jKYYUnQ|KKJ6{_i}=e~i@uYGpP8Goo19w!yY z-+#a1hn3rd`ZYYvzpH7Q9K{w@!TyEC8{_2_0ktHCfdBa`F}eShGwXaV>v1H0Ahmz8 zB@iZ8x3QbVgza0}kXEr_OWOa<$Rsgl;;C<8CA&7H?^m@C-&WSW%0<-NvLbd&3V)2a zf01nZ%dx|(le$!(w~`hST1Q$u`;(TV;;0{=|#KRS8966{~-e_(iCWLbf2+y^HiK>Tye zItnvE3KDQ*$9kVzx5XrkrB2d&D_gZ9+wi(YNMm*F$=~87zVGg?oajINdNpArrvJp^ zeq#SrZH=f?lf-^4Oq?YZEt!796!{-lp;OX_Oe?((yT+*s0eyMx4Z{Dt?)jOIqYlaM z@ACaWBAPdT>_tLKOW^+@`hPT6{EU%VWyXNK6~F%Z^77xca~M@iIm*zA{-4^EVpX&* zvbAnDib+_uw%ODKQ$Wb71y1Vpq$``6hc^!RHl*jy51hn5^q2S=vO&wvEzmwmLZu)} z**rM@B-Fi?xv_6rcU-2?f_K`HZGExhcL@K_K?Zb=68rCne`H^tr%E0+oqyOdhwwii zh0`JTj&BjbcjY@d1b)nG_ zfK@=#!&AlOl3<_dFWykt^!N7({^#D3S2q)W|GlR>5@z-B$*HSs%)MMHG#%J*S5<#X zd(o@fxTj@{Bp((L_-Bq(PZP0!f%=!-ELHnWwLuR~#}WG%=P5K||9uGcFYhz_{jXbE zqwOT%@4r7tI853fxdH89W<>vwPCN!A*xC)U3qzP-mLGC!mz*`URm+Sw68on_|MMHS z?c;szzh-=$huNiI50crQszTGW+Bdj=`d*3H_LZ4c7|!26b!joYuY~?*+&|qr#h<_% zkFV`rJK_J?w~pX{cD#(LdcI!*cujl7St)o+cg(m0Y(D-O&hZsyb=Na$^(Oq!3U_}k zKHW8Ed!QNWUrWT`!QskS8RUQZ+qc`?8zcIk`TOr0*YT_f_P>67w%b%Iq@=lAjL-jI z{%fND8TY@wy;JM**G(p|wtjuxkU}!6MMn^{9bJ6$kR1q1KqED;tWm+K*d`43-~Z45 z%&6P`r)BRi!>?HlhV!wX%+dGmzAPnbc-OWt!vB0ZMrH0fy<$^h|NRcptvmJYqcKPH zKO_EmHMWW9q}q!9pDDuZ_W$F5(EohU!zZm%{}>AE z&5t4SUxr?m8@igEu@R1+f1k*IN!(eJqiPl2_6e^8{9cw^O3$?ObeN#OVYJd;gmWYUIpor+P0=PV z7x9;7;_x|{d$yP2$MyW;cXg9i>;4~~P-4#7qlaEiqW@>Y|Gf0RhJECtfoE9nt7^Y> zAU2=O<>$X9`k&wM^{?eule7}a?LNT0wnb}0W1hAXg^T{5hOFvt1c4MabxX50a8lMv z_}fSyJc{6?-9o!%_||{^XU@L~zefK2+WJMjkwAFI!rRN`L3M`5SWmldRpdd=LQ3e6 z9>Z?SA+u*g|K-2r4A^Rc|9m-#OX2+fAIV6!`{>x)n~Omi&X*lFJ2Uhs7NSC1dVn#+)mTdZ}vH%Smms2 zZgt%Ew5y~lIz1-*Kc-*)-ZB3107<9uCX@?ZS@FYG^T>rOFCB-a)D)TK*kno0f6KmbDQpOWd?t0_scIwAfH-87Pf zs&f2qW^nleL(hZN3s1P7lGC#d%)tI1`F`K6<6V_M*L{^1n7UMBHR?e=%PW4&$<(A^T6J>H++M3JAl z_Ia0}|2gmrXUP!}OI542Z@QkHzkUDE>=|(3pFjNk*BWCU)H!B}OTozZKg9ki_8$`Y z_f2m>Jx^4KDIp9jp%fi5`ce z;g74gBneIQAL?!Ua@ckS;_mIEshP5#kEbj>>3&Z-S2d(LLwgw8N5gmg6e0`tVgI?w zviLijZ)49#VC2O9sr3(j$&ad>h>v`Ce%ifa9ox|cDrQiFS%Gq;SM>~2^i zdDtp49P?kq{ulaxZaL_U3sW_aSGyw6zkdCk*#G*PW+TUJ6T=HEg`P}eWB%PRYxJyk zc7~R{MG6vDG1;m#C>8-7DMRPLr3o_|Gne8?m=)b3B>JF;@c(>?!{b>yJ`y+cFLvJ9 zKjQc%yGkxa=o)@SlyJm-X8&Y~3)%lZZ2IQz9xCzz`%FK*xSk-5`=_Rx+dgeiPLouX z7cj2Sz7+dUiuhA;9j?*lHMzv99J-vKhBMVq22v_J1Eh8?c zmbxF{_?}QKQoS&L%S;DH=RN}e{HXewp8M)%T%bfkaJioMLEFvh@5f~Kr)0?LyPo7Y zxOr>LV*efK$2IXP#6LwM`~#cJf0=ILU5Su8Dw(U}w4*w>!9cQ9*KtQhL^4hwV*Y*h z?S?g8rFOAT27DJdd*v&b*tT-Z8puHOAEN*H-yM?pq;;36NX6^#fAlZRe@*f~QNh`) z#QW~RAo>qsN*)IGfFf!f-kdSmKw{(mSBZ^Kj_L&`-2eLRNRvA{|IzX8@z%^es)n<+ z48@908JTxH;n%>Q-XCH8N(LU)tkmOSA0CkwojKHbt`gUa<9_}oVH?@Ab79wQg3rC3 z^gk;-(6q#?+gS@oNi}P?6nT|`Dt9UQ8E-z}N*9rT|G{~7q$;Px(#BuCeUOzbu6xKg zikmo@|3dyJLd=v2tbxR2aZHps6(d(g^dC;@U%9%5n#rCVmL;%qr^U@;OV06N&dhie2PLy;{aW^N(7Ow52c!vA(5k{5KTB8n|L)t;@#9Rp(tD5Mb@|g)%=`91sAi2J>n$Ed;K=WG z?71H^-40`Q!*Ay)8G2rd&7Z^fKMy`l9A5oEo<0sss(l~!lGqeoWD0^wC{BmBb)iCj zzbH$2di>SRx1~y~c4D^H*BR}~TG_4W;cXyGi9j2j1PdnMHl>PR| zcH;g;O?N}eFsr4pADj3eACK;JGnK1N*jpREsQyjX`v;tjpltWNvFKs-F!Db?#ShL5 z4B5)3Mg{q=>oYVhH@>PyA2bm_FEFM?aT_G;w|9Ajw_=B?B*4n2{Nye1%@VOvgJuhr{6#7b( z90oV04?gm3K&)jMu}R*@QOI3N$7D(kgDiBL3={o_{q06eAxM&2pDqlccb`Bb83_IB z-AnBM!TdYszhcK-7#6Z%vgTFSOtyk$eabTgJ3=fokKfRy!Ae%OZ_W^1)pNIK))r#_ zl+eGv_lqOxIxr@PgV&WElq1i{bqKL%*Yr zf7-~f2xcwq;{eYXD0Ijp@DJ`^JUzFb@IPNRCg(>3@5@UCdvzlJ^;LRCNc~9&+Lj`u z6)XkeT>*``6ktV-g^XFeBl>^l-}xJb=9=5ekMz~8YAV)*Dy=+U{EZj~UPAsSj`;N7L$4;T5|L5@#V8w2 zP@0T5yqoa<{3NN5)&N6Td@6$o^-kg+?EfMBKh8GwUibjD*(4!@XE$HhZo zFRrKSk|}J8F$&qDL)R;-GJRAsniyx`Zv^qr-TZO( zV6tnr7C2rp!tW12BPvM4a^v?4!F!H4mY~Y%uT0`5;!Q;Y^{^3d{ zk^j1)ABsY9FI7cG+6-)b@Em|{bK(E(U!1)1;P9-#DO-Iz-Vpnz8)5YKv&JG~|9u*9 zy-dUX3%>vP$<9|d5MyP~wOwq{{}Y=iqRs^6IUC55sjEWXIwu3z)O^7g~E-LF$*WT&<1gx2WPjkqV=}RX8?w{U{Kj@~6f2U1AcatQ6e_o6V_P<`< zT%2p@eaQ41R;=UxJKz88uj4bfAjwk-CmhhNCfGk6)K;}#)lo6XttaF4k56nKq)TW| z2aa<@uQ=|%KWQG*8OEASn_X|l``FlYr!-R;57dzgVxqwCIv#`lKl4kMcGls?QB=3T z0Gz!}bamkSpC|M0|4eGJs}9@DJ$UVI5wU-|d+IMMEo+mMiu~x5)D&TK zKtGLg&OwcLx7K+1@|miZ+pySI=Z|=i=JY6=04|x#{_<)wkLdpq@ITUwx>R9N;8NRq zgxNnK=7u{dh`;~N@Bf+1zXvy@S&#<_{s;9hg>^Q-9(NF-+Q-{~S-~53xj>&P|EmSK z|HT|`yEB}M`=>M3lH0BhY_fl@lSV!)rwI3(5soEw{ zHNFo?tY_zNsPoyd3vN|ze7(|~l$0obx506Zpqavo#oRBDqFb>!AX| z8nZBAGm(E!7x;fF!?^fcA^4xbRe9}cRCuA>d_{%GzrR|H{Lke6sg@Lb`)0L3j$09b z|NZHyUWW!($x_RRT_a43T56h#+j4a#-U4_2uztf?vKbjTyREhEs7og#(8eLU8dx

    {6H!;@>tCCzlC(C2ud`v}9Yc9T z2q=5R^zHq>{R^MHrd)@bC1&H_$}F!-6sFp&-Uh78rw>?# zzu`F?S?#4L2V`-@Ph7T$B5F!R|8pt-j)E7C#v|M(wb|MY0s zjcyX>Qlf+u^u4;b^9M=A7Rgc>G)RGpCIsA%y_$*spLv4)_s%0Pfcx*Sz5M=Xf&TS= zySh;i$59vtNa1;92mz#BcFVV8uPTL&sn+V@x8j=S!Bto*Q=?55r}=|+5*frbq+s2f zE8NNc!;Uw1@=RAk(6gb2-HcgF)fuRTVgPKS|7T+Vg84vU?P>`6$xD|2+`l+@EfMn- zQjSb$!~OTui~0S}0{k;oocnp19N~ZF_aAN_T$$OnDlJvr2GIYr%{5R&y!uVS$OkV_K<%)i(Esms$=fo#lw*|JRq`=>BnucztH%TNIErJWfAX=P0EhrhKIuct^5SP=t>n@i2EFwi=EB5A`pznH&fr zVL5`p|7_UCaWrB<&G|>Pg)F?F;_rXOj}XWzmLmf<6}(9 zBo83eU7EFn2T?@-kKs(^;BGcW7^cq*;O3KA^NIZX$k=AXbPYu!|AqZOQqINXG5{j~ zb!^c^e*e$1t0?!crT{@fcL(;@WrY#4XOx(ZNYa~}wGf+on zPxznL%%|bQf*nL+x8eS&Y$fN2aH^t1OkY4=y$cy8`VR&DKhL)`V-TRF#BL|=Ho7LB zUlYy?9&av|1k8W!{E_wQU-bVF{7+D=H7MK1zRkk^Lp9w0nt5&&9i}W3G1JM(qk!hZ(h_6Z@xp{8PpPNsV~E z{zdd3o}{skG|4#H>d|!&=VJbSUo5IzRE8qrAHn`R>R&2*$f@$kFMwE3U6wTZpIvbO zD^W<3tt^V-=0yLGApdo_N@cSk|Gq&Pvp*uRevJf)zPK?@58s)-*ykKtIi%W4%O8J{}9{=@!+y8rb* z<1c#nQFliju+yNHCGh_!n$9mI2kC;i$#UAA_FkH}wuv&hAKO$(WoW>}{;8n<=OLt{ zUk#fvW0-A9rl?{@IpB?9{sJD9BmOy#{^v>k>z^?5v{q%n{vVgwp+f9SW@-M)LuqJ} zMErB{SD5MR+W2uY=HCZa+QmPSc*dy*^vF%*-&^+SC&K{u7EL>1HBF`i-vuq*0)S;k z5oSL*Z3hnRBq01hz4wXz_gA&iPCN**iSCg%0rw5s&Ejx>flvhEF@FEgg#LANtJe9n z+sPCvaJ!_ZIW#D;IqPFrzAtVqI$NP3)ezGti{t;0S=wESuqrf&=AOsxO z)Rtf>oLEd^j^+iCu>aWvk<32wE=3eWC;ET7n=t=%x@$(njtpKI8Tp?$>1KSNXA${# z!)X#d^N{}`{LlM0h27o~0#R)2t1><8FoA!{ydWs5qFI9B)-l2?ugXxha*GN8zs6gM z!YnqPCmsnuTAty5`j^}N;M>CR)aVm_LhL_$O^R(Bbfi*&=sy(fUv%Ftn)BLDTET8x zvvM6Pg$(0Bq6xJHw<;?FeS}WplsSn=fcpV=8LSymo{szPeE*MC39!z+DMJ6VK>sQ< zm?iJK`Ar7h%58V`vt#cvycJm3-oggF7fA=SV*d~Be@*CL4~0xuE(dH*-mOH1jDH6Z zx}HZD%xF#TEh&iV_6si~v0q-F@c+EN$)<>lm^o+cvYcWY!%QNvSAlYxgv#_#CutR3 zJY)tL*eVu)`j;wxLSp|^u>ZcW6j;~43-;d$|8tkYuIjnTPA<|`tVh*z_f%!|f*be(z|H!hg_^Mq*-3po;PUfdg8lbL$9!}1NkNn} zw0cOWl!dhBY3stLFBlEW)ienmz^2r3F5;j3dH?x;j;{Q3RTQ3GOyK)}i2VE4-7fB` zN_M-|_a{2$l&G_IKz}~vc0P(AAJ61bs4OLoiT*?MKU=KpqqCCawN_SaBVjlsTh)AJ zIEjZ+qbY-h2;~3w|D3zP4Qj}Jv|4#HDHmI-E zGm8Rv*DCL)2R!UQlVzxy1AV^EuDRw@ ztQjyMS6yhz-@oAZA7+)nfcyug3H)RHWY+q6n(vW!8F@O6QLMh`H<%6q*(=Nr5^FgS zo>guVW;Q4BPhB!;IYbfthgm9G;~S#U|06_Sc>$vM{Xe*WD#*XP{6DVFJRYjPf8*aX zj6KGZCEF09WMm21#*!sjqD`qGEwqvn8jLkXLJOf%k(T=|?JA8)6roVs%aTN^t?cIa zIcJ9M=jZh-?mwQ#nVECWT<7{+pZ7IK(TGIf+zkHD#vc#j4$N~%K2Hb7WV;?C0`fn+ z`Pb-ta3=d`JpN(SKvzUVp&krlGS!S1$YDD>T)+-nFhH$nN9Z^50zOh4nIZ@KUp)OY zwf{bMtyV-R#`=HxpDx2V3<3XeOr-Cupqh1Znv#)IwU8iMwP={=|04Fke9nnN{y7Hc z9}@cK-|NElnJN-$yXX(>6H;d`U1o(0Kes{-43q}fgqwoJ4Ti$DLu`!6oPzbR)x z4a$UTep3Gs`5&9K(kt;AYRDYdKONOp5ouiwfz>(l99E(s{m(@IX`T)2zi)2RL8UR{ z+C=~RJNCBK9(ktDCRSaT*5k~X_UTBEnrn9_hOcXZiMT#QD!cs!`WPQIUJ> zB>(5$$FM>dvatSv^A<+H{308$cIAW?y6jVNhOrL8|KaPOfzRUMel8(c1sQ#ix>1k; zkD2`W*Lc*QE)9`xL?`+$VE?O}y-yg82~5tvB)@xM$U$!^|07RxGQs?3KIZqIYSm;$ zyf`o2jx9W-xzI}A+`G^|UDIM$377t$AVm_{#FBn@bAXG3^MAnq@e#4KK`DIxp|=b2 z|C|5uEfIkLe*v%mwBlgxAG)+0`q^F&{AUJR86+C@Gv0qO>Hp{;=)c%t#N!`AL1Z3OTq+G62$D&Rh9^>$I@r2 zDb~>;|H&e&&{?hE|7f{F*>@1+pTYlu`B!3%hD(MV3h9LW&nDdenbbcC28s_59p@i9 z9i81APti~JWJdO_Kw1#Y77>%zS3?ipHxJOLvg|pZ`-M(~%}QnSahz-q}N6V)rB) z`Y-tV?{@BJ<&%L_zW&+$bvhF9LyfI%AdUP@QB5Xd0sl}|1#yo3iyd zQ32$CK>w(zm_Gmo&g=go{2zjUSgDrik2v`LSAj$7Pq+sW{6k#-YUw^O@r*s$ ze}VIVzG@fvBfS4&iht-0{il=qXKN6NQS6=Lo8hiX?f!O=M8m-RSQL~1!4&H z-v!`~{+EBq^M8o__sd1hwyF8o&v#rw)KOi;#QBGK{@Ed$#JT%hMW0%dlPMr9t*MLq zKWa!=K~gtpB?nvpUjJ!}4O7rE{yPhC@cs*7orqox@4vwH59FVr|CGl+Oq#J2Bni$x z)U;kSysnS0e-dX;uLIm5%)i3m+iSt@_@VZG-2d4pGd)TGH5DS2h!V6uSxi9`LI3G= zB&I?^AwRzUp0g4OflEo`e<+b*8l9bk?4OPJKjGX`Y*EQM@QXAl7mGun19{+My8+_ zLJgYm#RU21m+k@18FG?%|3&^oNwNRu|Lg_+PqA&+X^ajkZ&NAU?;cr0XIyes zB+(0?Cp}T%@qc*vpEFIjVKaXz1NPrVuAUY}a(at~i+k*bw zLU2c*|I1{{la2-w3;2gv+3Q}O|1*Qn|AGE@n13xjYY+lz$8^r}X){m+@SpWrV#bM9 zpntZl(u4dnPygIW_bR&kt&8w~V(!RbEdK}IY(*P<|EnrZ_8pJ^d_-eT0iqhSq!y^) z{?CoB#sV7bP%YTKSyEls&k#}h`sbkKZG!&;`(F{;n3U@08L#e;0hx58>~H2f_bY3MqBHwJ_3( ze<&`=MgOd=iZTV+LqSY}|HJ1W;{4|iY||}RiV*OB@cpk}X)_ltMMy;v9z6fjfV?J?N@610E~BKgftrMKa^2xiQne{lZ? z^3Snr_alEjmdFY=oyb35Twb&YE8yp!$$R4x;>2lkUifJ2-K}j>H$<)pka+Vi&_9#@ z55Yg26Osw**^dq`D>Vn0U#baiV@>6s=SG=)fyIUbAxhg5{`_n2pDlW5JKz7gD{GD# z+N((Dk(3{(FN^-q!m9)$L;3#CV>JrW7#0-KfGbNM{*5xQ3~V14|C!+a{>=Tt;nqfS zZ&*&iKTk}I{Jx<IQ_I-c<^ZTow&gPyseckWBeEHJ$_2--JZ;joZ ztt}tn=S6$x`)Ai{y4xOhKdt)ITJ@pzX8Vhh_stht9~QQrVK=|O`uzEcSJfvQ>TW%L za;CiQ#_j8=O=a=5cUg_+VxKejKjiFrcys@)^6;CtgDNkquTBmuyR_lzm8I-V*L$gM z*H0|CdT8X_jEPf%+>>bHBpN$G>dzDF%8`4QrGDyE!HE+m9%Y&yJX3P?=+S+dMd?}l za}w8GPH|00NJwFYM&}ks<3G&pg(a&CFKo=rTb7xhzW)OEUcgt2LGlt2GX2Q?j=)KJ7z3`uV_eMoVM=)ddM8-r$?%lJ8xn~!1M^xO- zsMyfmtX+GUVLSJR?ugmBV|U1w=p7rQHbum5iDU(Z#ckdl<+nS*Gkjmr_UIspUTsZS zymg;vK&t!NWN${&x&W5%+W6r0;X!Mn82*84R&9>(-tFhb9F}Q3XqdL#d(SfG=w-H% z%S=N(y_4MC5uM0?q8s6&6YiqD+eK@) z%gitrjh!ykP#5(OSM}{Ksykeix49~Bahb8jMRBu>La>W`kc;dF+dZkaAqQxYY4bu; zZ8oPmZcaAcl4cr|X0|2KcHMqEzZAO_DaPv$nyue&wr0PB_dZ9reKwx^t(F{cbV{N- z?X$N#;A9iAa6!D8y^qs;mXmReh1m*c1E%wwDEoQ)<`|eQP~2xui8qt?(b1l%uCAt_ ztSUQQfkKfMmzX9jDlU#a6BZO9kw_SLEC?f!5d0VXz$_CJ!CBt8mL&xNxl9C6X5uVw zLd$M`5kyUN)^c8tUolwfOe=Jrmt{A+!aR<%yn$u;wvbd%>^YmkV>x-zV<$prF%49jTo#4qxtl{X)H-i zL=|bJi>MhQzLuq2VV+(|beWj0EhGWUU^au-Wm0W>X(^!Mt3JEs;4A~Z{sV&OKP)H7 z-G+feh}UHTTDH)bIn;nSUzbg>yf4oUhn`o2n*U|ae>tV&lEM|EUYM~Vc(v+VC) zU1^!aAXt{ba!R^@{OfYyMx3b)TGqGk)Ur!xJfLbr^A{RF%N6`w5B-C)y!l#InUw+^ z4&NuUTmn5y{I~*WYL zzbq%vvKKd~w1GV$q2*$ax**HNQeBh||NH>n{wLYSJAPUXXA@cW?c-6q@u7-tUmU#m z?Xb%SS@uiIF0K4`)x!PFvzN!S8+a_IkzYPoE3RevvrPLWaiz^U7~*BQZbV3PB=C2k zXUfZql+mshQ(t8cf3;#CMH;=h5!qjnt_t5+{&9kIUYG5R0XL${WcSDoJ%B#w^l<3$ zwF(5w;O_2aE3ejntUVSeCpllUEKn%V1(ma`}3|!x3F3Bo_4y#!8Y< zn$eJf!4{?*?DC}$T2?PvF&^V2YN(?et6(ERaq6t(*XBDlE6Iq*a+>uNx@n2$STF&s-IVEk>Dx75iTK4+q3qGAN zV9VmOoH|^|Q(Y$A-sjv;sMQ-Lknml;f7jl8OHULM0G7e;xA7@b(Af` zEIxHg%Slo-i(2wb-7tQ?V0yb7_#h`N>E#Z zh_t#AKFc&d2Q$F!?ZC2xUA~jJm-ps0Bwq}(kc$}ADN9Tk@wKdv>yfLl8L4K%n9Oo% zp@M~Z?UPw{oMpLtH>&<$Et_DP(olebmPIl{RQt-u62b_&+|qoL(*%HJAPHKQU>WGB zu5*!w%Xk-}Dx-K&`bt#In`K7&Tutb#%{JQ9P%?am=jB<>!u&@DLoZe<-*6i0>7WeZuZe=c;u>E@wkx#4?3XnzvXWgPqOnE?+a=I zA9)<7@x444oJgWJj9SIhKO+kp$}e%>M!z9g-k^UzN0C{+B0}gq!E(B3v>xR^m&xQT z^VeEw8ziD;wNg-8b1pB-g(=NNlU*ir6@u*jSzj<%<63zw(e?22^Pa}O_X~F)nT^)f zp5%3zZ1EYc$h)0qy!CydMM7t4@254lg1SkasW{8P_A!Py7}W~-4Oig$r?otvJK{WX z>JDv6|8!pgLcntJ6hdc( z$>_N0j!r(y@{rJ6dm#jWC6#JlNJe;;YdiHUc{0mx_KV~$gs6CytG6A~5}_ELq05p` zA<^XqEbmKhrCDF<6?)pXBqrs;gcW)S+1X3?GoWs z1TSyyqSKIZTa<0I4bQTl6BUDyV=L&NRkzp9ZAe6{5?K!`=rY*~D*YvqWnVqpFC;>@ zf`mSnq4W8n4o{YPlIW;=vXfqj}7VrNXU zoK{@l{$T-pGhV|~O2KBwvs_=!u`twwdwI0&1j``vSr67~v8&5Cg(A)CGBF+q;&s`A z{-Ld<$LWwoO4JSEu~2}>vX|2j!wtm`83|H_V4q0dq8Fp%`e**)pkFQb!#iV8NrW^y zYP|jF>m~hUm}P3*w916Sa)r5`7nz}Vid>JD1-w-e3a{%m|V`0JydsusE-MAtr~WE}Z| zc{N&jaNAsuLT2y3_#r;LWOAX89J#XtBwJ3%ClZVH`~sfk!o9pZVV-!G39rkI$MU{1 zI8T7i9PX8_quPVG#=X2(C?iD#G9$b!R|4snz7oo0Rg%cZ41?pBAl5+@3msh!$RvS< z{`!bzqz#jQc@!aPe6CK^wH;i-))(bIdY2mj7sx2{(S;dYMQ1ABWiq?D-SJ8Gg;lvr zhvRQ9Iy{?!ci9Sn{$W6tD|P<1H{rc?Cvpe0xj(bR;Z6>}UEyW9?wOHUILm&g$N{V;6 zDcgFCIi-=QL3Qu;kdxvS^CsRrAM`wBF}t{q$1?b}D&J)|KE@3W@$HIW@6V}Y!+lp^ zmdQ=!vn)5r+>a%{i?$p3+)@$S%eG47d3iI>p^PP;oF-XL&}Hi?a~z|8esa`0g;Im) zILp8mrWiz*Tl9qB+c0#PL_#VXIEaJmAA{*7iEs)KU2c%&S|%i6fO@a}I%N}C9FqSw zd*MC{=o|TPeL096oaNNq6*oNgACy2@fly)Lc1k5r86tAwY5kW+kEhA=BbXH6V;=mV8SpNVvc6-NKOI6eM z_xlHF3?<7w4b1(zq3w=y-2q_R57?@?44Xy4uz6RLrG?)(HDR3wcKOou!aB2kMkd<0 zR=+oV$&R(*-W=t#3^cWKNeHs+e3k*8<+}Nn197vphu)HtL7nBB^B%kH9 zQ`p4Un}u4GjAy_QDZ(%S3FN>@;g9d~sXMIQs_CI33tc8dRN-8Uz1%;ITQ*5PD~s*y z>jII3v%J^r2=>-w5Y zpFEc3*q|J+ocJsQVwM@h6ay(;eZ|%m-d`kh8d2?%LD^|3mSnV5Ot*0oQ8XmsU2Z&< zQ#(sX-7bE8#d_B`lAwr$*9C7RrZA6bs7FO~$g+oazVJ1+&(u;uE)eB;a8Ox=6%xDx zn03)3KYcAfb-LmDC+1C)qu7l3j9ai!leF~+>@pQD%bQO|w|A6R@>t&S9>?@tyJYhw5=A3w{5v%J}hAHc2slwIu8R(PWYkjcOh$odo=_ngXO_XVG1X)iv8 z8t&&2`sbUxA9RWHEQ8>R^`b|#-Esk`MzAceZ*E=? z0N)3gpoAiwMUT6_DO*3F;+E@S5^Bf0Y+;ru+}h`Q(j}i9RJL?_MEi~v z)hU@WG~G_BM8XYX(?Hn;{0N_Uxm@D4Ckr0M z`Lj&Bgard10C~r_A6}fsLQ5Q;PLtGQb3*&cPmi18{*T@@xG!uP=)ezAAqPK1@8_Mw zA*R43!=n9%WuQ|;;t(uLk0Smob7K*_o2Vq;oQg!C8pJj~+PH;g%m&w{S zMXuAWqx!aHp7~t6P1pS{%rf)#K-gzCyt-Cy?cXeW*Ru<#VC{zy&_6f%U2ZKJxJ5M< z*{_~L)yUEiL_CrjI9(VWadZ~{&2r7o_YxFjr684j4rXP zR)ov2#WwP=%UHGXjuv+8$k&@^y=RIjo9*Z;8-`h?SAlo#(#W&I^~bHYvkjx00Q`O4 zyWB4m*FUsXw|C;ROgzhVKpSiQ&n_SMKToE$!I@YV)qj8*U+4_Ec_Aq^c<)4r_l|m2 z7!LIqI?tbFhCLnANSvlkMWBD)Vuw_@!l-2Xj}c%Qm~D3qu{*139nI}Kp5SxkM0$8H zuo+g+s`4x&8<@e6<=Um^eyc+-(ArbYl)<(N?fVR~ObEfQgj+F-#-C-5x)yhy@*emM zEN|#C`4y{PAc^ar|FR4MlGT*Z(iO^}YW zEMb-zrjsCrBK~w(oKDFyw<5YsbW=3dkqNGUUYr-EETyz-*o&$D_%r^ln08W9VfLT8 z(N_k-Gpv~d%urnaz%l1HhV46~W)k#~8QIG*rxTeecx(p1NhmhP0cj4FkVUqP$q}Tu= zpU?6h9T2rV!bCmM58yE@gISiHiaheGFa( zzss%W`iJt>WYPR1&I3Z+Z&oG)Z>i~ zTp`w-Fj2V1Fi1wZiwv+Zcs<%kjIV!^=k%1!!&vL(tGs)KiG>9=~=fF(Ty$d&PCg~sLWIj(Y*+F3U+jVe7@dwA$KL~sQ1iBN$Zh*UH4ne zC7;eosMx%3Gmq@kE*t8BS(W*riODXvwI9oe`{kxs-YvrC1;F7G41A3;yxRUd%anc6 zlZkPh(v??J?KIEA*R!^pOpqp-^8{YS>mc5%m}Gfd%qhu%@BS1;lorlGbnt(WpspGy6!hRY6KqFXB$Ngbp-dVuUB|_qTCCkj5tn%vn!wH|6KU;1N7-f z1-|~-vkhYE|1-;s?31@vK`b@ZC^)(BM-2MCaFRRRqHGI9LTaI^`=)d;aSRdnhiI?TN*o^su|AX)H@%^7a)7u`xqiC{}a&nxYs5JQP z^yI|_uE2558+Q2E1|$8pD#|s)3KE zyGiiCNh1b|k=D%+LkYHih}xS3{*RP~(-Cvn(PGmP>%>?xaTLnn%IyM*uF1Q*(~TqDaHy3Kuag;4+s8al+iXkEg3d~ zEPE~EqzTp!a{Jk-cP4Q=ot`N?FTQ}h>fy?iVf-h{^~jw}-vM2=!c)6^w%ZH#Nr|8W z=Y0kOqFM6RCO{=Vfj`c3q=M|}kH`|MbLCLUNuK}1C@(~qx{pxDs{m&@k!7zeZ6mSl zkgT~MlxPaE_StvV&8vuqJC|7+tmGG*bw(y3knf%=%u?K$<_B}?YAGpOSnZ@A*2#|j zQJFKk$>5Nh%?_f=tt)$$$3TAMr_O`VjW*pd*-!f7e%y?4E#@?-U=9iXFPTb{UAAj# z!{<()%5vdZ_M3SejK!OODbf?vtaWd&5G-8_Tc>bd!#!w2tza$%aU$s$$=cBWHAH@Z zWl3b&e|PHu>iqb9twnL~$QQS27=j0mtg^lXXOqPGWX2lfBz)N?uAoSVW~qX^yHV!^ zw8B19Va?bX^PXE8*Y=n!VcI0HQXQ+j5Zei_N8ymvDXY!wF6gr5DzH5Am%j1#N`+p{ zx~;)7WRH`^o8B`|yO23mR4P3DG!a=YV0rsCt#_RbkrEWj)}}L$!a|<`^8B@Pbyodju-@^ta0`a^MRL+xYXZXT^+`m@y?AeEwB#%Tcpo*7eLg>wJiUX?2iN2CR11>#O*w=mc3w{Phs?uRiU?NFY}q^EbwYrch6ZD~Mu}SAlu( zx`FeuQckQ=^q@UaR7iz2HuL~PFaJ?~`}uk3SB4&1XQ3Fnv4u@clb-ak{N@Yf&7QBrGx)Nbh&+5SiDGd zWZejV{^eAkBnbY`CcMj*=yH4IL5+`3QL5NrN(Pp(Lh~VpPUO>$i{qxu`S7=qKlegowB&5AaYF%YR57p5_#VR7PAp%~rWz^ur&At6(!vDEj zbNx%2noZO|2k7q2FN`;QWHw@G%W@e;DcpK8h^%Si+j1j2Xuu*E}|0R4~Rf?UVAHqaFKOErv* zOO~EzD}h3!sO0;RdL84>F=RVIRqQ)B+n{W3Rzc>8rMfM4X$B$9rffw1Z%ZF#V zy1n4JXp&_xuNtykeEoC4^kJHWjbl(x=IN?5B^&$d9PBnXNobnps+1v$W`cyYy1iFd zoR(px{M@L42FjTv2`xuzN>^_jR7_eHnjCNZM{57tk-SdS_-{b z@Z#Tt{%N_dY#sHnW8R==EWGA1fw7%m)$O8-*;M}gOM0gU<_|0Q4hPIO1~0HHN^1#`FV?>|ct(%9MY9$1!!0B!|giHbiz8{-pm`=fU_*i6QrJ5lUaP0<$Y+t zP=SXiK_S-2pI@JdE))Lz%RgR%Vrbrwq46+DTnITH(Y=b>(}o)>Mb2^;!Ebb&WeF^A z_%M9~X6_fk=A?8f8nWp1)91yA%&WlYo=8p1Bv{rtT~WcQvU(U}1^Xa{${hbCc8|-$ zizk`|N~R+r4f}XVxwHPkC{An9P$OQw!)?tSHG(d;dwY62Z{{sb%aFC7?^4>A6w+qA ziLK>PxA9pX?a+t(w(GN`@?Bs4DHyJRL8IA*K77c<&$6#s0NZ(20`M&Re3mL?6U;*r z$>@LdkMLUD%zk!Sz>sqme>u=)Lij&0J~-{&0UiRqz2`hYn)b@l+uy_8$R=a9`|^CW zPEb%lgNu3Sg&lhnT>b3U4kxK zAA;osS@!27PC=y*iwrw1|DAtbX}L?zfvZG=#CnKUs$@37Hw(@(z_aWu(qf^A2soRB zq!eM7ZyJ>ZR+)Ek`Vvl!7?M&`jGzihsiM*_0mD#-#nWVeOx)Zu_)CuU2?dp;v_233 z{|7T1!s|W1;%;S(do|sm&4=GXx!m>z|GP^w`0< zg`q@A?Nu+jmK2hX88%{xBy=8F21!pi0s^A=YiKdXz8*SkMNm&xUA4mFA5&T!2C!!qb^mw}s^_jrx0&YRO0+hG3u>ye;;|HcC}KFh#s{+3qwR@ZrU+;n>@zstn? z#5v@T=YPQexjDmLG*uGbXkM2apXKCtT}DLi{FXhUYs(hHo7l={8K}X5tuYy>!S5|^RMhV!mVYGrU_`3GB5C12C<Aa?nV(>bsbE=tk$@)woWaSwV~ zSR?pFM#s$w^>q8BoKTh^SOzS1tQ+^HK#v@qtg)_=1zGlQ%|6_^8ZdTS3Tb$kEzG}G z-9WE}L}l@W5>Nl^_;_#g$BwvuGLPjgnNOmxLn;RET{er0`&9}a~TpH zSO$2O{jdM?r(1BQ5QXZ6%zkQn;od$n?-4euY(OEvvP26eY7P(nf|S^EOa+Cgj_#J} z?Q82X{&lL1_NfN%a*K`{Sb_W^LQV@J(G~0$u*+vVv9Q>~U1pMH2`q!Q2LL}Mm1YQf zs>oZyniGr2|5(I$t(#dRDvW^TM6e9>SKkHwgJ;<{=uoKsl`KpY=t{m0ivMVBfBz#leje;yY_&@8eEEVw!AQLc~y-(%z6>f@L{pHQVZVoD6kQdi7-fC(poL17q>?KT}!uL7{n0pf#si z4Hd=~*L6R{sCfPd%OBY@JIyp2zQ*nlEQ9yA8S(wN|1-U!1F>wXvLVZUFLuYzQ)7FJ z*{{oEzn+?47EiGZ8Vh{;7vlbp!Z}32cljX8o?klCHAPV!G4U=Fn14BY=bXkkQ(1QY zF5lsYhZMxnnBa^T?`;?U=xak6OTqtH+Bg1i$*hm%;amn}+3_xue?wZCuu7^Q55{u7 zd+$Ob%l>p!AsbA=-9Ju_WShYdK6-qD{9WPTNvj=f6}-!&O%AC5%K)E$Wq9ho!RY*5 zzG*Wmpikz1^FP4yzV>PSljbwEX$Z3H6XOLR%42)~>3+B3Ywb>8Spv)3dVwS|i2)}R zVoZeo+3~9-A~K$x@4Vw@OVr*XcG*64h7Hp+1io1amea5gn%w2n)G9?TS&~y-CUT~A z)d+U^(*KiXKenC{NMhkx_B6fg8}7&U_ce3RV*Zf-vG%DRowf8l+b5en#d2y#bcZ_e z=`?R<4WWOEVTSYSC$KCTZ^zwM?`X=twY{YxdbN&JE)Dm8w!q+&&oVId$K!J@z;eRp zUzRVa4>8u0W3<4h|3vax-s5i)D9{&ome4=XhbQXa&7zTEmv2YQ%hOlZ$cO!&7~k?` z>ogVjgD;t`{4U!$!jwdb-&DOxmiPPVC!j36Asg2}wp=G*T;aQXzifcz#MeJDf(y}X zSc?OeH{Ruz;*|g~Br4DUNmZbEWVDgE^uPWOzR%!Au$*X^)ljzQK6bb=0+x4J@txQr zWLVI*9h2RFHW_+_AIv2WWruKM(JMfWfbw72KnHnVltJi%E8Zg`ZYph8I2cT)f0`R9AT55fa% zUsq#)G8hf~UA{H<432!C%0G9$t8hL9RbhWwPQ)%>ZO)w7;Y|j$OxR-*Gt4(Nl+_$| z1{;#!WeW;UI6`+A74UcYr0DRRsDyC;=kG4xRQ~5-V)4XrSQ)TGO(ASS z-T0Qlst(t~TPSy~cIeNz5%g650zUt8&$kFW2Twf?;6bt_c=`ue-Wyg8NT^#!7IwBB zo<#s z*i6r8&_70>>ACq9shwN#lfn^a8JNK69pz9zVH^d5}YoaMA#8=c?s zDj`FCeI-l?3H^h88(Q%TvE<-Ua+S7Hk)4$iPx%-`jM`IlDE&lUx^9NypC^VJP!S$gccQRD06#xTcO1|8XO zKu0Kj_yOTvCf(j2ZI2JKU)=D${=xRdphxs%{wHRMfb-Ksq9|uF|6{9r759I5U2c=R ze3q=5ENqJ~C1mvFHVpiqrZsq24&(bjEh4jb!fqktpXK^~wt@ds0X+f-*h_(BX}r-i z_vFIVb2aq}xc)J~6A06HE27dqayWFEY^ojIGx|~C!@b>)M+fi0RmW#}_qA;fc>!Ds1Shi6H+2^DoZ~5P#z&F%umZciJK`a?laDhp{a(Z)P z%X6FN%WKNQsz0d|M8}XhIhFQYmQ1q4i<|3mEZ;rXBWs+>X=u*@L;T$S5=alauEwxfT4Ef6q}gEFAY4f4+< zYUu2>_h+xV%X|R4d~4ZzZK0@S+jE5q*yWpmI!l%a7&!d=4}X`BW54O#K>&**$mq|9fi4PDJ)5?yY6ZF8;9 zI{i3)j)u>_PQf3YVF!P7l4UujImz;lpdiRUtMj|uCiBl}@czO!9QZ#okDIsx%W090 z0Wfv+e9pzmNWuKS4z~_q{xz}~(}4iPvNPtvG7H6XpvxpiV^>XX2Ar@fXlvCBr0ck%xGI^Q^$U}xr~zz<&Fp6 z8pH7fK8rb#QMBm=d&ka!4Um6kKnG7x1qnYM9?M|3?`N&?;0D|Ounfq%kr$DFhWXdl z>X|jb@5J*z|DAtTV~fT3`p1B*A7F%A4Yr%%#!anDrK%voX?NQgjj_EjJNJ4Zh{&{p zP7!;U3mPmrd1L>r-EN0hKCoGGd=^f^q@7`b)~^v@RQZBb4KFGHhIPiK7|i4|{6 zLtev1ME+S?ed6{I%)ch{KQ%Crd&y^6hW!M01Q9F)BL7VIKgxe_Prc(ML`fl#7(NopI_pu4B)TnLUP|($v{-i1xhwuXSUOdl~m=c$XWle==cK zy^`oMf&BBXnwd3ZWFGeQea&_7e|Y)lpa#f=pp`S)5R2IV5~Fjnv}dBAW#9&i*~U~X zMf6+xM`M5N8Z*BAfP=wWsNfsfWQ=uB?=BruYN;&{u4E0LadJBno5HOb#k))%XDBg7 z9RKYzc1vUjy~IOzMi%%H-o1gd3{I^bxbnL^oH3byz5&?=4zJ5rTzx`0IZF$+-~Z|# z51eJtB#gWDbisK#ILiRK+#(Skyu?8NOk1}TR%**3fXDR+9e(AJBF0)*ktSME00{zmRJsrahDzm9*=^%cmB}mt0f;5h%Q^DtcIfC7fU_v4OD%`zt7SlLG&vSEQ4Uz z=edyo5j(LS?jg~g>`9ivDe#ddSq8byNm)>x#%CD>H(%a>>z|tfF!u7gy$G^t17B4e z@chqAA$?yX66hbD>KCZtb?7G_ZYexmK1mcaYCMw(JHyvE&0cfHsZCdSJVYLvam6z9~44AjI#{Tfs z4&!=Mfy+YAP-$GwMy)bP)L?bKOPpMnn}V(vRNG~_E;npAES#(9qFb z7%X-KPDur2*e_})@;~M0hu8gOd2c;HrJ)91ms`}}$K9tp1XwuB5?BU??UnfcSN?0q zd~-DfU2eXFc}3~^ekUvrSWbu5u8R^qtj(PX`JZtPxa2Zk4Ne+utUIxHxj>A0=pd?L zXO46|+TCEJ`_^l3_yxE-0?2lAzvCiy?AssF-8`1lean%hKgvwzC~KPgXux7JpJhqp zf1Yurg-8qWLW7_V>d;q89`%aUu83Z~Q0;Gh(5{|oZZCH_gOs#9IIy!|hy6d^yw zVT@WxunfTe(FTnAs=H&}Pd-|Qar=HK9TotV0SF;@I}VsSGH%fKH<24}1bgRiJ?@By zG=H;K^7PMYQHvRYNy#@9^w`WhySh_}E)zb>U{`rzW1t{wie<@=TnOMy9?O8ovh1kY zKUXBPA7U8wc>Y;p!LA@tU3QMmwzuzX_I6G*KOfK&X1%%L%IxmzaMcMu$XN)mM(!ekH{+H(&$K&ndZiw)I!Zv1VS>pO$>I5@L_Uws8v3Ze zIF3!lS>B%>rCoq3TutaWVHm{PId+Mk32rWQy)^&F^BZt`b0012#`DjsYY!{XPTry6 zEX#v=;`?btm&w>3iUmra>3P{N@&N7WZj=IDCcnQn2S~IbfByUnXE_n`FR70Ylc%j8 z=*B<-jDDTjs(b+n&gS?YQ%DU1D6_vYQKUs<^{sqzqw%yxLAJx;E_JCMW;d`!!Q zpi0xD#_aIkw>OO+Z+mCzc~B%e)8a7B^4_#lDUj?;5eIhm^cg1jDCV5&rRDB#i#^}H zJ#J}Qdw8ZPqhw>%$f0lL5e!c9bj$Fq2j(yuVE^mb+_y%|#VErP{GYp^f3z`Pm)ofO zIzG#i-{pqqpCjiTH#Dhu(fH}{v*`v5dTg8hLa1EzV;3$zarcBxbm!OcJecZld#AO_ zZ!@}AIcoTcjlfx639lrv5_=e9i${~>#h&i8)?X-ofE8Q_n>GbOMrZPi7V z5C?AzeV?{5orS^B<;GQT;3BT*7(6`T`|mo7Guu-M|7X`wGQ>IX{BzEd$JvLdcf2q< z=%2&7u9_|+plTEQU;O=d&_BjH@zZqT1?NdSEzX&@$spSI$w2LNImVeq2RB#0w~ZeC zGk(deJ9WhsKbO`a`{lK=x~tgd*IZiu{Fu%XcJ&Fj!PK*!uS&PwORmYxr4jq@5hdv& z((|{XjP_K}KQkSiZvHy;+4q!5wB3=e)GxH6iTx9B%((tJ%huGW%EtHKPfoKu1X;J8 z-#*q`njNKa5(J7XVC?m&`tY0;6Z9HM^q1v*eM`$z*#GLbsD8l}m8nbs?0^2Yy8}hI z|MTwDliSbo@5jPZ_nT*@1QClN7?asl2(m|XdW=Sg-V@om46T^`vuG#F!WkwPLle7a zm~DS$r9I+J%)dOpR%bp;Ju0Bbt}FC?zOmkiyX&X%tRGDZo=qyz8c~Z{8e@6-C;0gs z8E?GH1oq#TwGD(~oGoM_T>psU3%tS4*%lAarBSF!O z1NrAnEJPPW93i9Kn>#2zGgH(zJ%zu8ceync4~(b2`pJN6cKtK9X4?~m_p9^i-F;Os zD>-?~1U8PiwY{{_iMgkWyK4Kt#2H4nFaLm_(!lbDE?b4!YaeyMfa|dW=$|VqV!y7n zi5~Ir-D%?fyx8&Y{<|>5yAV1`beVL8XsBy|>r@&jh_fuQ!lCxwz~fdMzIU|A%z7${ zW@CyG1k0P|pnEkAA)f!!&1X4{M{-|Ng-&CQ%XfD1@|@%Uov8E9-PyXSQePAMUVs=TlfT)fM1`1d=k8!MAbZ zU>H358FE4}G?jnG{h#3Sm=Ks|5-g`}XVNoNED{r?W!BdfnL?NCo{6zLXM9G%w=eE; z`}18FR9D})xFQ68%|~{sjb}TaD4;j?kz<%q+^!bO-(=`AVTc6W70Lj6xh z;0x=h!|b8)IapQpgM>>aq1URO7dvleo9meTyGT~gpS|*yRrirjpCWd|gv5+pnx3yq zj|=_oO8pA+FCVOj>P(@XWWQz;ET_A3T}Msoo3io!FEdzs@1xb_0n6!wg^H~ST3Md{ zVI=#r)ko|Lr@7T3CNM7e3lcx%Y?{3FYVH( zOH#3r%qV~k8DjryAYyZqLY8)D;ZT-$(J@{3e8)FNKYY&67zXZ9eTB6OC#~zdpWlXz z91UkVUF?*ezun!AJR-B~>x&CqEhy$b%^{tpt+^iqET^q2?T=lAx15-Nz4OK>gSCG6 z{@1_voYE8#KL3JTV76IQ-@t=}FW_Mx{7?Wz^Kj3yvU-31!GkJc1v3LU>7dKxzxpS8 z`I4E=lzA5CnGa4Vo8_4@i2Sp8&m4I4vO8&YM_$h}7tW3vA5Blt6vJ6-MzejmW!%)k zg<}zYpQ{4Js@(6-a!kbMUtDV_4lXN0JqvkVCOrR#VG!8_YEsiZou_|9k$ky7vHyNB zWb8bm?(C?RV|bkDPAHaAw~A^gu{*ra{kRna_TNEuJB;gg#A#h9h#36&*UMsQjgh{pOq1YjcI(rBSDQ^o z>~bV!z%=h|^Q}(A3Yf!va*UIwYMvKE(3T1LS_nDk88O(gaGfSgQ_K!94W%(O`sXKB z>Lo+sciCQvCH$YiT_*Y~XF&s8OguBwe2J!}W4cOoBhUX))-D(_%7UPP|1HPJCanWU zbe6FzkLdL?>SOzeF56k2_Br+&1isWfpJ~s?&{-&k==!zP=L zi9}0V9{Xn{>|ez!A-deQCp5tP>!+U#qo;k@;bKp9n~0J8CH+YuN+z4x%+n-}pah!c zK}Cj;e}1BPq{koq&-@EmmXdQjlN5s@o0yr_qp3QZ^4PflGpGRP0_Y#|zb%1H$UW!V}N&mQuJO`q;~#w!6&!q(ZSvp z+vcA~PSd!koZ$b^;epXT&*_KkX&TJG#_UO~Djv%_Gf)gs9g++CiV9t=qn~y>NO(Lg z&uBBdz~XFYYOb-i&ihhSu<03aPDE4}y-3bdL|LL$Rj8&JA_q3 zTT9=!$@!O9WyvQJN)(-LFv+r9wDh#_(_EU8Y0RC0y5nbwF55RQZ=u(6>BYr%NwEKg z^M8hmfQo9d`_}b6PyFtIxQ^B!@;@%J0WwGDuGaNr6vlvTNtiCH#oh|@uPrMg>z5zY zwmjDCYruFh5jc9~gF@+=i8}YAU!Q$|{r4EGL4kgNhVOqt{#n7&o&qKoz+cQjaSoH< z!ZwLE>2Ev?t3C~T27s3^OX{c`S}IT~y@`|}hSonjXC1c>4ESBVF1L@0;r__0j9ZE+ zX%Jm*1j`ck-&N86Il7k%hnha@-Q)OlzQ%|*?*A|-z9UT^$B)iobl~~tiRj_O?a*BN zON|pt>W|h7>Y)P5vRtr2ZuaW=OLEjMG-90i>EcU?`PW(J$bXh+>bqt&Upz9Kdllri zc%3bbuE*B(cPgL-SVn8#XM$xZc3vv~3{uPw!2|^0IjytlU-1HmEG;Y4^Mhbn#!N%HTVHcBRFOQ7<&E=yE=#bU zWCP1`)AHvcjoJTUdB@i^j~08ju%o#fzm2Zr<)2$TI`mfKmyIj(s&EsX%{T`9pZq)2 z$V`OhF2_tnTak$3B1IH3EVB{c z|GL{jX^&48f&H%m{{FkrhMsovw0Wkm|0Tw%oMd_9^RG)Dn{Ruzl=<{d95o#!Z(D`) zf7U3wEQMDw|LAKLzQ2OAyua;fzUWuF{OXfio11|bRa(V8@L6o_^@u)?=&NJG{U+O< zPwdheAnyu7XYr4P%+o(MpNbn5-duZ=|K?MtxQa>0(}C`Xv!2Z`i+sYIH$jFPQszg$ z3wF*DB2>kQF$jLb{`+gsy3ncpcPTXy83ii?6f))i*v8st81AeH!TCS*IPkLw{gWi2 z3@poeZ5N`re~#yAdA5`vRGCK>XU1}b_ zIgK7C=&y<>pe*m<`519SQKS*(U(o-dQvnQi1x_MJCZDD8cw&V z|3Q_QhJ|LTp$jFe3n$^NfzJgK?l)Q3gZRhRkmZ@Bb>8xm;uWyuaUwa|F_e(&-260_ zC7NY|>Gs}+QkSSrYfh~|29u!UoEN{Z$o16U~8p~VT zFyJ2XJY``Ta^MFsuW@W1E=H~T+Dx%AHk5IbQn5`Pa=W}yFBe&sb3 zINp`SLrrs86`35U_o48(^6WWGAi6N89Mp=#()7=6e^;>npMhuL^(sqyb}grG03@aq z^1C@{kYr}7dD$Q#9`p!gnQZ6INr`5Tq|LurfV_e5)2XkZTHs`1x)Apo1OflMm;iF0 z#zNyVcrxksLHyl6QMQ1a!uo!$*318%v<&irdkx(v%p+jZ(s?8hWgXQFx_`aP| zT*XHK7xm+rbRXuScVChH8Y6o$1_m|K25c^_fy1>?n@>K1dbg+ex7$-&9Ow4&_6e)w zN6U9tQIxY5L`XMn*Zck&rmOf2(BtV8v&orQl;sWcuOHaJzjL7s#xSOASetv%fpU?; zUw?zu{zgbG^mBNVGM(<(?{9Los43zw*rV}(gx2U0$rM3E@iPKYwlLA|l$hFj0|%-En8ZqQIu#*$r+l(EnhrWFWeI2GA6Vwxj8PlI@7GwE0)M zl=a=m#a3=l&MPkU-HwURXu?xx`qql8MhdVvn8~7jR6Jz^RTq~I6y@ljTU(arEjEtX z)c+h6JnH61#|Y;THcB{^Epw}2qu=QGwQ;h^D*{{`rw=X_qWnWT>JE!ra~R|P0{a0n zW8~N84#fZPe_9?44}4F&y)TRsE4+?=3l{;Hy|{P#UFC}_jcq(IYe%zeU*9PJS1Ra# zL@W;gK8{lxR7`@pu^?7LZ?w9g9#;r=kKBWvwC$jOhWXcufWBLIF1RHlS@h;&LGT`l zUsLr(MvsWmZ;jlAaJ(G5p2*1u+yn@i8ass85iKCgcHA&=$3DwE!tX&K-@IIJe&_$$ z6i>DLpyf41GDkFt;V=FK9DN*4wZxA>!MEEa2jm8C<=>VZ!euCUec)y07u0x$++XOL z88WzWz>9%dfp+3bXfC~Z0CLSin19vJiZlj&y}asKnyum!hy0wlJx<#6zq_mii0NDg zKXXU%jl0^Zi)fj!U06F*<$Gwy_m|nqdj7x-hyKrC(J`s9Hl+15=zmDpf=*m#K$_^( z8|T`XAU#TpSA(vVdNFa&f%rfK@{57xtt8;V_5FFAdhk?iQ)`<^4o2rPvfF9{5+xN@ zVAw{)3mnvI-%^v3pUj4=F8%l){|D+H{j2FS>!+vY#yao|d-0_!ph-Y*#BXcwrawN7 zC?=-J`J|oLZJI}nX#2 zg8%s6|6&s+*9R`Vf#_KBw@3TDw~b0iP42}jtxNz56AqBRKQ{&%!l`@F{*Tx*{ema( zb;#c=z(SUZVAd5T{!F+L2d#1!kqmAaM3E*-oi)MmahS4z@_+QHysEyyKZI8!lzxs^ zhE)Mq@-NHH?#t9X!}G28w8K`&psWL7Hh!+ItHXHvJ@DvHHRYpOwwG2H&M7goAZ;}M z;Y$0qUC`mBGATtfZ%5KK+&8mK5CIp(8bo)BS<<(6b-tr}>mL2fpJH2cqv@Cw(6QOt z{-oU)8H?^c*DM5dJla>H&buT+ro=4wK)S<>!m5&tB~~h@Vq` z3hjxVD*m5b^yp~)pQc|&YT+rP`X8Z>bW9M%$cbEBtEaMXY~$M0|9sx1n+V0*twTBL z!Q+jb<54jJJ3TEwakj(pA;tqbh&c%9Wk)(=l~Tnl?kLLq)g55i>a-1}=#5O^o%lIi zEBWH%pQ^igz(3@n3{L&8{s|OBvKdwu48OD-njT;;0 z@tFIM`|by!{2%gog|yt?PeryN2(Y~A)#(`Mkb1v>SnytVB(Wl#fz^?1EJ-42(OZte zr7LMS!oe^)@cr3VrLa?5xELz7)$PX4l)&%M$7AmtyrAv%8^V;S-=ryN*(rrS{X6kd zLe8Ln2L8`RJLph9!;3Kdr|PrL_%))SF=Ux680VocLefAkcBs5q-Q6%JbE_Q@vfSJb ztmxlD_XX{b(Az})UsRF^W$8ym+YOx44rO&*B(zhu1 zI_@Tiy?@eoc9I=P2$sO|wwZ_%*Ww5L&snH{lrVNwq&Mn$3Orp74?1@#YedNp+0ia$ z%7LK#AGV=GFzSqk2GVFW=XSwwovFu9bZkLRVgr%;{JNYvdYoIvY$dzd5BAOkR=bbW zvC23Qk!^nst((KCQy!;*^VEdJapKGNR={B6zxro&6KEAsNCf=a%SV9NLg!z&j*~)D zT+%`H$CKE|UH~U??2PNsn_!fuQwI={_bQCAm|ID_q8j%wn(| zTtxj}#faV9o<)5_=+bu%g8tbOfCqu$Tv|te7M!P&yKcYz^@SK(NK{WNn&rvEg8vKn zhXJ^j3$`d{p@$GbF^Pi@znSHBjUz5YEoj`+RE7+vhU|FfvHJ^~{;%}Oj3iZc>o{WP zW|m3pvlMB_vV{UM@}K{`y>Dgs>J$mHKXfHLReRk$Yo8xDmw@O4^DiVCvP_;_boh7~b#lVu%Bu;%QUF3ZlvGy`3zX2Y&DNez@ zyeRI!{O7WvVkvRtea{m5nGNDTHvCRN{aWW4=jofy`sSy(JpP$~) zKgYwkZC4RCmPZ!Nvd#3C0sdjmc)|qf*1pr9U+!4UhBj3Aj;DaSgZWp4_1DRS)9*l- zbPMGl4lAaCjvCefu6_{$_fL%?6K6Uyu{8bjxgWVlM)kc&&tQ5{hw6Wt>l0P@IQEwufgc~tpJbH( z4Emq&FDqkU5T*IQv~%cNVg4ndqn%pNO^5OiKg)GIqepV-LRKK7Bt=LOdB6lCi%3qx zZODDs+vgKSArl4o&lfn9f}+3X@)%Q-1rG#|gN5u+BCOei=llHeMn_Q%MhY#)_|h0NyyEdERj0r8H%n zmgUwWPOrL6BrH6iSRL^D?m8a={U58KnJ+DPG&*2`(S46;z9%1fufI#9;~jBRCo&_- ze{R__!z=63C|L8PD{V_@FNWs-GUc|d_sgw~Gb1z7`ajReqK6GRv3hQb$lM}2|GKWd zlW}U3|7>4obVEdKv;O%39t_m~{z#TXLJ9bX?DwC;$tLxlg$_|bm@vCt1cD0~5U784 zzR&$;z=1(n7^;Kl^%8l|`iIyCS#B>7^~Od7*~hd;k|WUF1J)ad{^h}yC=Z6~TM=O- zaDBJQww)?7D|-$ZS;esc#$tYbjZ_*={Rs{f;M~}{=BUKk7?Lp4V2xSBfK~(ehZlZt zu;5nae!`fC%(#v*i6Pbn+@nYJ&$e}I7euI#WzxcXWy_KNlEANQ*V6eC>WD`t^DVK5)%|NLdC{hEJd7+#I#sm=M9UFmcdEAXHDJLe^eU*FLj1~c2O6}$nH@PM_ z-MZU=g@3;z(&{c|z%5}K4|p87HgD7Niw1?=p=j3Ka=ma{U`$~b7Eo+#y}}UL$K>BQ zw_CjXDvwd;CnatJj;)-02TC|?;kLQ`MEjN(dzf~rDCr>r%yW!=go&5m*UP>AzvzS& z6YYHHCro)Ht?N%|WLn)LMvyI8H}yZ$3}csmZ~dYkzG8%LdNb|(0Q(mvB`mt5x>u0E zTWiKuLQf->QT_A$Se|vwiKGs71^xn>{&|Oqak1mL#a;2GlHN`J^Xf#3RA64M*Cq01 zmdWKBW!M(-4CB%u?*#l`IPS5lY<%$jZt97@u#sS%{wc5kmxb-d$s8K=V$mGpii(0e=&u@cAVTp{S;xP&v+e?GAd-5* zI7eK>vy+Zq+>%R77;#Y#5__G*6Eyy5WsI&hK`W>;wn@w&Sx755S^!iNmI6v1-7NDd zP*!U0^&OmNID=*i37zpJ8~!EezMTP*L)9J0!>)Pi!+C~m>3c2jcP^^^&H$0ZOS;YZ z*B0Zk4z+C}hpyp-%_#go*g?;x0RDF_mTfO z@g!;*pQYfNz#1by$|nDiJ(M0J<6LiyyQ>dbCS^rR{D}Lj_)Ine%Kuqc-X@}be&zKz z!;+W$$9n&fyHSfqTiIBpe!0CUzrH(SM=pt_@WZ44$(Cy;N zJQB?%hUl_zEwsmQ!F}Y(Xkb)Kf}&^iAWO&d8v;s~$%DDp*Nx%?k83E4e12ojz6qI0RZt`mxMXjq{7=hz^sp!N-sc?rsb#9)Iz?#MXG zefe@k^_|KWL z#jth3dW-Qp_R#yy<(38c*B)qvueVKIo5B%YL6A;mdnQ)f| zPV~g2tIqTi${ip2uGgZw+Srr0bk+D?0$TsrCrID^=ywCv|1>C!SREHY-ZwkYsh)`= zvItwOkYE>h1|X?zB1B`9e`s+mc!$oVw@2t&o6dS;f;+ow7Ow$TEa21Bi^XSj*I9HD z{{pe5e)i)dE2%NMP5vQ+`yv|#y)~~{uc?Ye~eE)eEtSK5&}NI)U(LZ3UWE};>!1z z7&Wg-IgqE&F(bf#zI$+wDHk(s{?)9;E_}cpBB(m3T`}THh$0I}C>Os=`q`BP=CQ;I z>_+^)6GysX6s6DzB8e;4cPaQo|3_FiTEtTqH5(+SFa!UY#{c2PF}cOp!&?XQFF&7J z2#dhF-#<_aG}dMrXMoUL_?_`2g7P7#d2)&!@-J+!v|sowVi7jUi!b$gShNlJ&o2Bb%#bHg|9gwv46mh2V|4)=o>u=n%xR5b%be{a0ROOm zo%_@W0VK@)8-gMkK>y5qIr_Kac*hN*B4pXlAK&l&m!%YeAsd70{#&tUncpOBMf|&g z5lznANmZlq56_(zHq%B*K>y?aNh^b+4H;FtzFkp2P$~^72ZUd{+JyUgS2UxkBU0kO z{uqQ>Kqa+>e&ZYlEKTnB+pq4_L8|{8Ib?Z@8Rb83cV|dqL6#RK+Zaa)*kn1p(#1Iv zpA5)yR~G`!d8E@nOs`?EJ*8j+?$a7l_yQ}4mgP24dWq^-`^lkdq|`Qj3w~9fQP=X0 z9q6Bx<=KpfF9MSh;&!qX^S(zzyanXD}x4UT~#BVP$WF+h@3P+CzQw{O%&w&4@2Vt7npx(sYOD#H0u9Sk7NK_6J)tH1y3km zgE7QNR|@!lL)@d_ukA98ua+98?_@wEErEZieUQSg<2cGC%GAlOq-yz>^DOK^>sVhF zI)QJX6rt&V&_2gjA??8+FIYkD#PDf)iTGkG-^X^T?|$EVYB~$*c*5^5J(r4k@liW$ zPrkIJ`fZ$s(fff*RPs6lCE(r@+v1RLy=?N}!Asw|M;=}`%anusi_(4>g8gX?hkPkh zHnQPQ$J!{P7czvSMD8I zwrEZ!eG0)stXiuP%o`LJOE_%yw0@-?B@d8 zKO1d{rEpYb?+^G)^)vo0Vuk-RIShV7JFQF9tpP>BMl2%u|#xCJOQ)2Rf(43^)Qn zd;4h=_L!|=d`M30@y;%W-D(Kh|Is`*!*c#TTxGT38uKs9b^-B^RpK_@c4?13Y|W!r zVsf-pkx}N|lh;`hTK+`}AWzxk;3^4XiR-w^@V5C0(#^d`T0v#7m)etV`jG{X+9@z+ z|7;9vdHsUji0b|paLuUxxg|##6L1%xu#uDL-U@KkE&iNdYam0Td48cHEjs|ZooN0A zr7&IAsrhwGp{iMb9@e`rPu?K@s78aoy|oNE2KYZA45@72U)^rBzD}!u0`t)OOb0mz z{O^ejQICsEUu7t%ASV=Zr_<_Cr7YLMY#RS3U7^GHB_p5F&+gnk&$cii4MAr)8KUWs zojQr`ik{!LAYUOdx}g=!Z{Oe0$yL*Rr3YG24a)y%UWVg68iT!jB}c?)T%hf$B)oeH z<2jsXzcV3gh;J{={|@;VxGhb{{rhcKB-$O}x%3dwZzzieG}iJsW#1rCua9+D_q*zS z3x>U=q11&5sHNdNgQ*Uh|8@9wyxARPHSCv`dpf^gB)qE}(Yds`A-&EV06|C#M3Xya zk7R}prOEVZK3@`)cgP^d3TZ8;6j)F67o9VlPJsNc5vH_n@;PcjHQe}NwEttAOYeP^ zTv$EUfnz%i86xunI|0oDW1_zO7DEbnn*Im!zb?2vyl%A(bAj@yX}*IY@?ABZ0H^0S z^xmp22yAASNbtWid*_NX!Te0RZZxQq3HU$x=Jzbuo34Xl35h++5DEPspNmvP^!hg8 z1Ho>HBA0Qukj7r5`yNOFK+oN-%T7ad`KRIFS}8_K|J&J**aT0w2wT3*r{!NHNPqkS zB_q^7E9`r%?(sUeMSUfGeSS~d6e={xvPJVRa&{!&Q&jo&X*lr7}SkX3>Qy$1qwEodma3ONaHFm}ED&mMCar9jSj||)kz*90Y zZfH13zC#(G|BA7?0lUcw|D$S_H2-_;MCq;K4@1!Z1l`u^hN^RySAPK(&A;%?IJL+0 zj&JVi$0YkgtWB|{>tNi`rUw%)h7{EQg}rVRlRok4b^#XnKVIcbJnGiN`dk#xKagc> z2Uy@A7+Y1VetNw0KmV7A|2O7Ww|1!}3JklC(M7(@R(;OtNr#+5coYzK4{_*Ug8#iL zSy3GB3jg?r{vUMWTU&LnGox9y%r|x$#0rRUAd1Y2bO`Vdw<9HPdn|;MJc7Tm@SI?X zoEv|B-8g81okE_+!AJ)CN|2ZrfCHx32cm%1cXdVzz)Kfv9Etn<)W1P^jXQ|EiAw z@dJ^owEVBpS5UxM0Xqh=Y{CDv5q2ou^3F~&W5fowvUdsmUw3pvn$HjAO~~Yy>Oq#R zpTLVjjS$`oO`c?l{AY=YMwx4l$w!N^kY&rEB9kK*@L3%n3jD*DhjR6U=rJ?@_|N)N zARM^I@?ZY5Xv{7e?biEef!Wky#U3EMK4V;2nB?>wHJ z@y*KPmF^|Xe`C%He#Cw!%pqeNK6E>kByC9;PD^?xlg2`h042*e%wN)LWu7Xdw( zX&%W#WOz-l(9!ZQv}0y64#E0oI$-B+{q0!}59EI_{>Y?bK)hao{s;W;GA^|G$Dbf& zRTtjIUSz)aOg3zI^o%c2|2yWcc$HhJ;o`=KtVAV%gsu@?&@9`NuVMbBAYPhQ<~n0h z{&2~RFCSasx;?FcZNx96&VrGW=6qp~*+e)YUt${kUtPv=2@<4_`B>opTo9JBt2fTA zJxBrlGw=_G!n5~6-2(X+_aOfZzk2X;@6>!g$Qxl=r8tV{QZI7`MI(tpNbD_jBNr)z z^EfCP!2iCbb&qK(JKFz2^S=`L(hxdiw|Bm(4L?6(i;`P>pQOU&u{^Ty>t$m+r!is*Upzn^>q`4{JZ?y;@( zElnu*CrF#c5J$VrUkoXPTs{p4zL(;dVOdX~j6&kn)>znEjA=s1ooiB9037^ zzp;nDGefQ^dk5yhJ&nu%;#?&1AN_L+aT~WCV#C!PGm1g^ zKNeDGW=%2+e)Y=q2NeS+)Q7jFci=ytO^0bKxONef`$rU_ z49;>(-Oq0fX6)kQeOH5PTG-S-U#IqWy+~KMRj&&Emw`uCzBJDd=%1Ij-YG+G=mAvZ z>w2@X<)@y?%Mbpqkm+2z>>9zpSh&Kc z(A^Y%>_7aUD#$4jJxovx)h}R8m$b0X_M@)q!x3x>Q3vIOZ7!M+&QiE$WC=kHl!fnF zOI~(zk@@XG6r0WbubrQ`3r*8yX2dP+8eZoay4CxZLXq>;*eKJlRIIe}rnDZ;jz6!C zeT3wc3I4AgaYr?dO`H&-^bdHyF{o+p;(k7j&c9NH*)rwr_M6}e(EJOTyfYt*_E7qP z|5NSJmAnp4mK-c@VQM;``8717Ly-HY+hA?;T>j)9Icmg#1SJkC(zSyB>-vSx)wwRz z{{{J{KUaD(vHvgs%lj8MRA^Qo1vUCZrau&)PR}_&x7f8ji@jzE=TX&XwV>JM>3qU3 zs&k3;BzYpi*dH$2h3%p2nA3fj&HRgXbAr58jzQ@7i3AzjBD>Pyp^YI~=O=Gp-J_Oq zOBmwF5xL)b_;t_GpULthPmJ!dZ|+sHJ2%B{5F6u?nQw3THV?Z~%{Q7?AxAAB1=au5 ziWm6j)rL~WY589*;c0s-`Uo`s&-CK=?1Ps_QQ0#TpO;rMU@fq-u_8tEbDYM>oQRWE zZ)yiaEEk3gNs)KH*_I+MNUT1C&cD$956%Bx4{2XMVuv9GB3k?KKu866B(oLpKA+wn zfnog2$iTbTP=iito=Y&Cf?gAKJc%17HwTVU$4LYUP*o7`Sr4dskBTVcyoF-22QN}l( zu33(o#3U@C`4_7ic$nml=3{C8_ao_baD33}ADDlETnA>D1l7>LQ!fU*Z`{U;*&>Rp zqgOchs3I3v#@ZNJFhNtXFoW@aog-FF*)t!gnls=@q#RJH2C9!P;i9;aA(cGv&{fpSGqfXr{=9JiK5}3EtIE7F^fU z=SgM+|9e4uD%=(uSQU?&Q?iXkg53asLD==LjQ-#DPj90Y;~4 zeu4gZZF%-31`xq7&JEyVOIgf#bN?Iaskx;RYOWu84ZMRFtJOk%|N z{0C|EkL8wRUUf?x%0JWy&hr_l_h?Ur|Mm5qCeZKG{9lIm<{uivllObQGMxYYB>{V? z7xjOE07%K7MT1UvC0ayH0Qf&HP3#gtjgAujm;d$rE?WQKU}94{L0Xx=TO<)SI1R@b z=znwye}92~)-oWFA?*@br$ajDy+$)lsoO+z$X>uVPLsmx?V&>;-ht@;a>dXbmUcZ< z;)X-~QdadPl&|07_{}l|;J1Ay%iUW1H>fjSKQg8Du1aOiNYoMJZKH^aHjn(Jv9`Fo zW||i&`C*PqbYBmSJ*r_pVT5OuT{h1PtTBnXN=D~jeun3GGUaBh@v1GUi2R!J1=o)VwzuHCs z{^8fxoB5{^4BIyh6Vl%-RT7l{q>GJ==AYV$;KdkU4`8Z*f4H^WG+X72Hx4<%4t;oUEHbsS$$uWwT|7H& z{skMb0xC6lBMydt4!8bmc;hZusa*8*=xO<<4%%GI-7(nz@(*eHXNoyN#wuGSO4Q3- z)C#0c@e7bI^`*tCtNG83iwCUk2Bsj@;D|%SU&5uM671%<>f^r+5{@$W0+J% z-;wJy{-N}M`MUzsOGgib58j zOS1MEd#&p~U0Lc8ec;>}$;iG}?!CNira)#GwC$^LjTU^$kYros)*yrDBFAH}=jz3w zBUH-&*l7wbKQ=0~tu&R=KR8XW9y5Bu#ym}e{P&g;R=ytF5@X0em2kfKpcp`Ep`k_e zNRsc+l@jF0I|YCup!y$&s$Dw0(EowLISbO`$sSLhzru6}QszPzyB07qF#nqT)WmQ@ zT(LL26q5CJX`LkAsyRMM`tfdl<5iLC$NjVTHS+9h_!bU^0e~>U;R)5X`QpO(qto86 zKS@ue^Wt;=<$s-K&i69sl{|dcBo5tp$;IEMLFW!oWb|A`^WXojm_m?7vS4cnbqX)- z7<;rS`VKw5jc#;L=XGhKUljN#`4tlc+R9-3e9xH}IragbMutL~04vl#BvI2$ixKXV zwvN@#RZ`Z$j6c6uIb`scE_h`SO0n1NDjpWT=Fi9C1ZF=>u#J>pomzNv9i zZ8ebjBz3F?PW~Uuvlz=SMqcbo6VU$&TlD;}_sj=~un!sSo%{)keqlhoTa2@R1OL|{ z;+9v#sQ;^F7(|ix)B@gyYjk{9xwnt%M+`?{6R7@~w4Za&Ggd|1`QMg{6Aibq?bjRD zFmFDT+Rkpg+V`#!FVfY1e*tat6jYH|IONwhQn3M0>x3^)LI3l0PLu7z1R?tb zu8%N5v=4t6w^dx&bDN8*r!m^I;kZdnUASgKB*Yz<5YgqW~V|Ahc}2OdGESe;70TQd@KLLF&L7HW=`#XY`6 zeSPwVh&rz4whqn>{5-y&VGCpeUcMjd|GLMCo|#DV{7=g@R12ta>=N*|~;p~d@QDhK{|;5_9VlnwauAZQWzp|x<+46cJ0+u4K< z_ONPby#Vb~f&md!_DXQT;)t*&Z~l0bzSqWm1U5xGiPv|uZa)rFx1LIJk&;=GNwc|Sq7Zm>zexU3g5nb1?3XIS2gnHj>w?2?du zi}Ih(ejnUwv)>x`68b;F>xF0D9l%FXQ6It5?3d?NUwbehuww}M$^l)l|LC8uN*#e= zsQY;5HXPdjnIA8(MfuOueJ;&uv*UfB>i_^770o|goFGQ*U!0&ed&61^)&4%cb|K*N zlZ%TRkF>mAe|j20`L&)v3BVD<7Al|7zB4^q{yWs$p=%p``Dx=wB=Db!hv&V1xfm{z z3wOc3P>My{KE~7RW7wkJS$$(!IQ+zJqOiG0X}70gt34bXjtbINcl}Jg*!UkfH(le} zR}}WB=J>O~F&QRVLd}O~fo*L2{r*%rlwDzDIA5M7jeGibB+agSP}BK|pERYtgm#YL zUu<~U!Hvrf&x+kJe-N184%0Wt|H9sQtjER&iW8Lo-09E0Zr@VK(zmQ>kLlWTla44xg{)rslhbs5;ak_~hJENu z8`)oOTy^*9B}&S#wT28I+af?+45(GPHiuDv4v|1F<51=LXS&(D>a#X}_QT=q0u;YB z8Q58vdd1(BSITCzRgZx=$u@%SUKyMt8evpRQyJs(oHAY9ZU_AacGn6+b@9w;6yLCMdgXIL09F}r|zKqWbAIeN)3iH>DgGwe|G`? zp*g|fNcUg;hefY$)A)z!#rI8hB{MY*>!zg70z?UV`@jla1-6OV(idXH@=V`tc*UNg z5$0>g_27S}EQ~iaK4(Axp&&|TxBeGRB&tz=G%AvrDM&<=ka`6&u%x= zd}+DPDrv-lQGgR+)uylq10{*5P+8XxJ}B*pxAedJ63X>)?Y2~z;Z4kOthEa5{ zu2&Bgm3UX}(Rpo5=H$=!6UU#$PoTf}ej@jfh?wh#_dbK2cH5gLWE~aV%Uo6;Kl;Tl zW>|l5*v5F;e2>G_soZ@_UDOo}dc8YN?pj^695af`CwD>qDNg^)cKaz>{@3+Cb+aGx zvB3ZFZV_f=W1W|PA#>Mdu~WB+F#r0G{&~~?j!_ouo_s}!<(9O0aB4o!z_>4ukuu)* zwZK;N%ozMCAx%s^mQ1a{uTxH{bNszX0xwsT|u7n%woVNq-5nah&;Yy7n zRu`d1r9%|4w@64zP&%N7IQMIIja1@a@c1Cb6U~3`K6qx0cZ!`n)n9De;e)_iv%)jWZvi7~NbUhHtKJhC50v4<> zf5;HJvB^I?sq&}L7TSbHc%>I5M=M+(iQ;n$k}+5Zb(_ZVOtzkMokp93KHL(S@SGF* z4xZ=rQ7$yklsP*Sl2v=+MTWHb&7Y1(E1oBEEoySSt8#CP%&HywaTIHhv#YpNxcQZp z>KDuOg6YzwFVEhxANoJ0F}wIGdof)JJ0z);XRM#{v7rB{cNvDxY!oqM`R@+kAKp>Z zySn(#{|@{^l>g(<(Y82g?vL_+me-vd9>NmfRl6)0x`2(}`%08~+mQ5h|EsJ75MCab zg=)R|&%rNH>Y?=yd}gR*y7C=<+wFE*^w~-Y{=%#2FeQWc*iwB{-uVf)VeO;WN2}y5 z@7slr7qC6>=!oUocV6!JvlD`up)O3y-;__1ZpmxK*}!L(OBcEn`tp{NXXmwbHvXR& z1>2{APiMSlII+%UgIRD!-0XKvBdQnL`P)IgT;`O?o1D>Tjpkoa3U8ML`J6nZskdi5wR|NZZuts+1v0sp&3E$DxQ0^%nVdkHlD z;a`c~y3?NkT9l-zI-{HW_#DrYM7`hem`TzPvc!d7*6>2XFVyqT)$wq57Z6m$#y{ z&)i<#)IVeGh$-z#08dXDhU?#RQ^=3mP|Z+zFN zN8xt(<+r;7zKkq!q{6kVSDwO6v~9AU(8TNA)4nNptcJ$_VJ{9CHo&*t!%;W> zMiH2=c?ncrM(`-^RoTIb~mO2^v+xAO3FU=(c| z!iQ%JO}VNZ>U;SpHmrAKL&}Kvlj{=T8AcIXuL;TK$Q?_d@qhXoj^q+i{?Elg<9C&K z%4YuiJ=u3#&I}M{-c}udQwj60Uy%Rx#SNutK>h_S{~eo@XV^^k(q4!9=M6Pvc4rZd zf2f`^nhTX0Bc(QFjGzk(E`BLSw99hBC^H@ShxyVWMZMM({Wu4Cw<@+_C+~8CjLTCr zEGFIY)p4~O5(A}v!^$UhvQ&9AGfr9Oy-ZPdu2(rW0Ky%^S25;}iY^V|r3>d0-j(YY zy)CsVo+0Hrd7qXCL3N6(Q@{>oE zg&giY`o_}Cl{W4rT^6k-gz|t>&D|a%5q^zU%p9_=Hv4)?V|CY&6d~&Pratc z57Y9$(EJND|Fk-MA^h0p{OfC_>sPrCMOer`9U)A2~b zXHo*WaDku;Pt10ya8U{#SQdrb^p1>MCH!FLe!MNmRYfx1KK!7*=6K@`3ELv{fxXOT z6+!tw<1f>&w%6o#C?9_w-=@5ul<{_)F+y_@{!RhT`VzKiBxWJnbU6g&4h;)_2~&fjDB z!);{z+z&!2l%*r0Mh~Dn)s_6;C&nf3OO{f$ZOT?UGcyPYJv~MITIV@#DjjdUvXy6< zP2hoB{WIaPE>ec35l*voUyMROq4Cb^B$+=G3T~EPHIvxM6L4urc?{fzDRz|qEVdki z7H=s3$8WWE@2T5^;nP1?_f7qW|Krq`-`X08q(b+Q25lY=E1%}J)f7s_5KHB+sCt&OfVGk>%T;a+ufaRJ* zp;tc9eW>Q~xm`=N{0m#ROVd*ckpG49e^_DfIB<#bmsO>N=KnhAGzj@RUc~?8pWg9} zUw+v~(?73MVt3UaiAC$5w|*s#@JU12|4`2~I*w`Lo7N0%G~gnDUu{mfDI0=gE4A$^ z=Px|>=;9I#NRC#!S6yShbE!SSXHzC$-X#PdZ zmdEGM?99cY{O21~ZBg%D1w7q~A^5*&`e$1HJBB#c@ts7=|LVLIG%n#g2!Dpg|A}i1 zZC{=uLH?IR{LI^=L3I9gO-wbpA9EKcmAT8la)5x1>CMvrK9eAQu4pjJ=Fw}rDrYLu zf{y(>`#X4;uv~AFc$LjE_bNf>kd}WzJi}hT1D@)wJ+?R5-znKYOB`d{FLz7Ez3Ot) zjF;Jvru`2YymjGgK20;6TyvfTX$bQ=XZs6!p&umRA09&A8j|nZ3Rm#+C&6U;(U~vL z?r8myIQ5znPbciMg7SYvX#V#JZ%Xwl_rLiUb$n{aqrML~nRQ&|xjyGxB zP((NDpFF{1WjoAozD$|0x%R_R$?;j>2zBRPz-_u{hdyiYia( z9VC0r5cF#+pMm~a_T7;YIN@meADp7=*X3zc|NNyrrkORnh?f8LFaMo>o1DN_VRf4R z8Rb9Y2L1Auf3idV1@`#HIO}amE1dS#ZR=Tf4UV~xf12d1{XeezI{c$XxlPy z?D07gznELS-#o>}#8o1s4EjGMz8HC@M56d#Zj+-OjyTBwk}7jnv&Jbx{>3~sVXyzE z19&c4{;343VZya3bD-9FJj>YRKL<@lUj^h6s{i5B%qV$d)mH-ZFX;ai<8)nlBxcO* z%N@$oX5Jnig!xyC90(p@7UbR1C?lEh7SmAa+oUfkomS!W$SupsyUYOx6-e7-Rf|j| zM;xc*CjU9vLp5D4YsW*+9y_6q7?J>#l~>y6uaVwWxMjFh*gpD2!h}?wAEGE<8ibw~ z)KL{$Xe~^S=h6;ugbm98>Ytp@MQnCs5EN8l=YD|xrvbYC7~mgX2mgC`=pyi+9jJvI zclY^1|3~NEW#Auj9n4?;X^#5eY5YU?`=`-m){gq$3(#d&nAe5P=``K>d^+JdcETm| z?9v3ZdZjKH<`q=(S2@u+L@6RLDMp#5RX1m)y1X^L;Q=mBMFfsoi|gd>#Dn%??1U&PDxS zjdK{QPxT&@&HM`?F-ULfCv5uPfqw}4=LF&PF4*Tz5lh@&J$?l9FWToxm__GbMx9%E z)GTmU*hPfH2OAt~ApZ;ch3b}H;s4qw%qa^lfEKtNDYM)uQL2<`+hq=_mPcW$wTz&z zwe9e4Ywwy$Sf}~F-n8h*MZVg?cH-ItM+KMX7CZJWo83}$gz6Z*s}!Cqdz(qgGrJ=l zf-_*!R4WrK#7f(yu}R7b7GRaUdU%H_oJ)Xz`0Lwh7mu3%t0z1iS4wXZzc zl(pwj(a)h8m!`zAc2zklY-WvtOOFkGMr7S8iFP&)tU~$N;WtS#YBRvO%5SG#_+Z{+ zc?-76e2LJ}Jq&%0fBZv?vftDk zay~)}4{4tTakGqEeb}WZBwpUaO5nvNX4zlofTnPxHU~z99Ne-b7cKg^yz%*juuTRJ98ld=t{8I{`fBC19*WvjP(Ed-A|2lI_yp@&C20!{v#<=Fb(r%nz&Bjx9a;aSj-8m#sxN;IS_aQ(|a zWnJKub9@!7bhS{yG$8k!>^`^#h$wh&_-j|vH{%7N*$l+=%Y5%mBKle&EH6GpndjA0M&+^5omT>1%=Xc@EX4z{N2P)J%f;Cz zdsfepV0*vE3YC?p#dd>mSxH}Kijc|UeKY`jAphm?E)L&c%C2w|_snY@_#e#8`HT_Q z3$v*IhyI26$9?QiFLr1hYN6^(D~2=jDEaDKGS=v^iTQ^jzDlz-%Yl|8weDJ3P$3K>y*MFPsTauf*>3|DgFV$@6#G=^vcHmEv~%KO@ic zW;~}lq5m26KcDWBbnYYVpKkrd4%ETbj`ly}RLFinr&?<8p2iUL6Q+jYI_f!0;FSpn z6ZnLosX#6YuMqk=jkC|*A!h3od+=+gL}zF^?RgxXbO!Bz{!31GnBZ8SF$wwiB7L=r zj7hsyt&1g>Ds0$A;PxxCXu9=s(P1{ZtUQ6OKNn?$DwE%P3;%WywEzqo3;l=XpHT|% z=!ygOEAiG5t8sXUjH3OAVlF}%lFGQl|LuP@KDh)gDA@nHLoezF{^zS}^X^Ly=>9vd zC9G_UN!mFC<$wH7UUwlf;c3JI)-Z-&8rtNS9pskW^4kqIcsI4>ObH$Mb>5lG67zE6 zK;=r5X@PaJUph(uTK)C4cyWisaS_MwCc0ky7#3Iw9T%@_LA_= z3wZ1oV$~DS{QI#S^s)bM|FdITR2X`3qWc#qE|7okz9k2)7S#XTo_s2`96Al8Q2qz{ ze+HD%j6C?CaeVl>sr@~eS9e?5I8M%LPhzXYdp8yx3`9+e%?hna0;P5rX8hINlK=mM*=%ljccP!GNz%@^1pQoHM)0{T zbFlw>?M!5e>%&0)U2qm&uerPZ&+*&>*FGJ_UxfajmT=rhm#Ec!-Kix1^Xn&hui|{C zlh)Dv`#qBWm1xg?`ZDo#=Fa{FjX|DFY*#9}|HU#3QrvkQ^gnM*#@CsBIgC%&lD<&q z_Jnw4bBvH#VufpdHdI~mk=JBa2W~oT6Rc?ry~GBo|M}*Ep7Y0;WM-k;fPap&u{1bF z+y^qd&i#_vI~#Z-l=e%Q0R-j{fa~3+(Ry8YrQ?FAMGRX%0lg$YCA~KgzA+u`I$UWV z3a0DYf4-1dfP%V(@z;$v$9y1?Ldt(_O&V5Rjw4EWaTCmDf=$ z{JSk*$g8@`-~FZV#gGysjKobpLdDrq^LQd%b691?@ll0REp9bQ(SY_@Cd` zzr74Sm8j;r-HVxclZ?e}QG=87E>Y)^^`>^|wJ|mP=$8jQ@RTOTY%3Q7_Yh4CFbI90 zB(Ig!mu~*u`9YaQV}J?mlr~F~ZH3Ou4Lef*Gy6-sWV-&;Gq;KQrLxr4Ph;v~N_2YR z=*|i9!VG+{b{$}H=r9$y!T@$Zho>KNG@ywXUOJZ_c|PdC3ppF7fSyehckNj2Mp24> zne&P>@3qgT@iVjN{uho*)qiyo`hRxz-+!{pUm?Q&*QaajC!IR-*T;7E-?yF%9C&yF z_Aem+4*RDOSq6Dw({xkd|G6s0Ov?$L0=0W$o1wk6X4_AQ71usq1Gp^Tz z6$NE=O)Gw_25W#{bNwQ?Mja|TN9O@?qxOCH-Tn9KR6FD1o>Vmdb^Rk0VLG6v7ZV@| z;Ek}kxT2l>_+0gB;|4EHHXME|@e;DL@Fn8dWRmzrJ{MtO|1@A3MaP_SKFbl2oF~uY zp#K?mA5(R^kK>{LkktS4v9uW82qz)`1;3;oyQEK7fW)rD;q zijneP0~J{nwsfk~+R*>ZAIZrNZs&TBqoH&s!L#}zv7>va036Xq)5nVQT8lt;dd$|i zy|^-${Me>2Y`>(#+dd4M|03yM@9*hIpKCv!T4`4wmwo~c|Khff14kEEIzI7QJjdY^ zLdrjinc^aLUUie5@TvnNH1r=H!Osbx{fEQ#{$uE?ZUNnY|7Us-b{qfO|N4GE=MwnV zHvgYF4%?F~5OlJL(e=|zyQegAZRM$ zL`|Rb0{=q=N=Z4f4)ib6GHXJ>W~Ddi*(NP;)GwIc*}nk%lb@|spYg!HP=~+tQk@%n zOXyXz3ezT(q|pYow+22}K9Dp9L_HoKAsV>;H<2usT8A4;A8KBdGuR-5&=gX{R{oKRkYO-bQaw*5OYs@ITo6hPEh^X*!3F z2PBkDcUf&x?ExpS34pTuH0|=s(ckAI(0`rG6*jeQ`;8oa^rwCy?SBQVxvAD?@LX?q zxp=~%-i`f^!&?+_>X1EO=`?w)mHaW?9K;wP^Sbbw+mrBo$s;y}pAm+yHJd}OrTV8jTU7s27Dl{~cv*%(`e^x8*`sGwx5J_8T0@_~caoMxcya4zZ>`7I zGl1ON|D3od@J{+F>|dn!?$0;+So(lb-m3_H`FH`{j;WaY*z-AR$*_OXU+lM0w_cfi z6@G=6VVk*;9M#&S{VyL;0k^a!7PSAzN^`8i+hREs`xW$}WX_e)d5n4Z4S;bpR-#_z z)u910Q7L37k2{VNSb4?kr?DqKdKcOJhf2Rlcbu=7u!;On2tM!u0EFec>NOe4o zVB2cVey6$!vI?aB!}_O}-_>VCg38K%!lBIued(YV#kG;e{?3%2sts@?g(Do83oP;M z)-%BNBodFA9@d6g64<{;{i54F#sCTqkp%Jzrs5H z00YY2*bJH!%#gW= ztMF6NzyR-uM-RtviX7`seR=A4_9z{ahy1%nmk!5CpC5JWV<`Uv{-30qh0sGs;(r#7 zzQ#sm0pB>u2mL=L0?n+n_lwZ}=f8|1DRp`{pT#6867C2)k?YV0CZFUkNLkJ5mlZVt z{)brHV~ggH5k|G+pPsv;)xtmPOZX+!DgJ9iNcGX%8RXlcz_{CEZT~>q^I=7>;=empFn;9k z+T!T#;_KU6=?ab@wvqflJNu`j>2PZJH~-bhogolwo=w6(0}}d0`U5I>2^9Yrj5V}D z{#_u?t?_r2Sc4&8u;2Kt{N*SK8M_v9!R0lYk!87eO5@u@1K$B(!PwfGFzxb31Bz zvXjUBo)!Ly%S9CMvZDIeQr`8EXZ#x;kbmz>t+Vo9ChfmF-5zCh?PU=&zWIAf*RJQx zXiMY)Sjdy3Ksw2E)*szP-H}@+wbmnC^!W8@@84US+BX-*-=(c5PHFp{bR<^o?w>Yh z&piFEeGBjp?4Qm@Hw95p=;o5e`x0`1LgjxNi9{csV(+ z>UritPAL>reWPID`9Q`zPWcQ_9)A*=qUh*_&HYSFoXP8YI`fHu9jK={`u2c2zDsg|N0YM50!>c3Ic@( zzDv_!tEGkFH+#42y5Qi>GyeDLEW5jZeN7gz6iEMbnLcoBVdB+o;D0uKC1K=c-R>1V zhYW5O^sh6Gw>I52evlPSk@}wxtEA0$3ZiOP5SzhRYh+f1mBO+r?4QOz^JRiqsYYU6 zd$BNhb2^lsTXqk@`!(OH+PPcaUJK~P$1a=8E1jTfG`lfyrp$rw=1rmybZx>)>h`-* z>lYa!nB&Ix*hDuBsV_(Z{*k$q5`9q&Tsr)n-akM>YPq(b$!CsL%#7%=LNIoEAYx4* zYABo10Tmf5@_^GMt{c;lbP1&lIvw9<0#giZ54!(_gSonH4Cr5tVVe#{Qo1_;{#$@Mj>|vHeBi?9ZO$a@tev{5Fsc!no zI1B{HT?&7Pk@lMEdIBp5x3)GnDR%o0JC_HaPSNe|zb|ycBB~Vr4fuc5<1#BlJKzQf z{|`mmBG6%}DPk0{oWKI4X5iG4w>iDv_oYG=L%*SEWI|bPjU_yC=rH1zFGDtI)nY)O zf0g*3J|DDS#K%R_s4yc(w?sx1a9ASHZ6yOBq@i584%=~D#kG-2hv@DSZ$h8whMm>} zVXqjXQXzl($JCCLmIMxX2z`H_NzsPtlc}(Ok#`Gp!8|PRKXIaeqR}e|_#e1}ZW1TS z9+B+Ef3~1~D4>4PXkqB=UCjBz-f&IzKgaw-2mYVuc~6!c6nrH;y8?h&qWh!Zv)CoE zwB!F7Z^RYeJuHLzpJn51&Vv8>TB3k!FZiE%)MD1CGX>r5675m_3r@RV>>#(EXkY4< z-wX!{L)5NdNwopBz#?AN#3FE=J`lEeVY%K+wJ#0KBVi1%k_(TV^T|RM?WGq%|APJ> z^JI3gE5!4kgWC^osEC(9RD2S(DGUIa2(ChKb)nC&kLBh#1dFnRMcD8I>KxFNln?vw zXTtg*3a(D_{|t6Z!)rN&75aZ>uo=0o8Bl4voHo1MLDi1gr-G0CQba( zt2qAq#bDaI|IP#5f4_X*Q7KhO+oyE;iDi4t2nU3sL~ICdqaYI{lz@m1)E(G$ypo9# zCO~h?Y2tbc`B}s=OV8oPI$}j;EQ+oNk@}yn=U)9RzPta06H^^tEHL}pY$O&~S+e{L?o`3iKshWaIJ zd}{(D(A+C?(e_TM($mquH71Q8Y#XgiM#H1RrQh5*GF8;N+IppO66GgehEUO|#SDl; zgXeyo6rFT1-Y}oI`VsaoA{aPNT&j6%2QR3&L!AyWX5}``A*0RSbLcBqN42uQ60j#I zmgtw5s$72Mr^0gE_f#d)akXu_I}h|lx)qZC)hPR{-F1l6e>k}T%#<#!^;!aNK?kd- z6|t1G|6b=ll2VTzmO=me)qh`LKgTJb&jCCn{1eH7afI6cbS#>Gw}iqSY-EwJl z2L30yU4Rx;jAE0=dOvlOppVNQ^e+*Tit%GBm%vr#Nn&)$w^QVef-D!{MbcMdT^le; z{!t`N{6}lmk?~u{2Y)+sro{?=eT(kDe~`S?=z>~qqYj-)DM@=`3owIj4;`D5DL7W1 zv1s-`Go97JM^=>pFAziwNT`a2_zh0xS`72AqmQt>>Txk7C@yST>9XF|D}614*j4JZ zB=8=an(Jx^V?Id&%Hi_9Q0bN=Y*8idFjPRtCXOpKMERfo6E}V|?C!r0rn%>}zfIlc ze_V&8{WqG{N&P>5zZW-i!-k~b40aIq-{)=54=9IjjIBx7zk)N^;wtQ)V&mZ&SD&Su z#wPmwlm~#AdT#r<&kN;O5e!(1>YLyk?)>!lb7L?KlS~W%+E-ymv>h!6;g?U5ygKz) zZu$G$H0eu@4+c-5uZB}oFU%w^wTA8g!}5c*0@iR|%t9~5GcbZe)wLEpJCca=f+qCY z1mMn?k83ztmb#(+KhqT!rPuEgjlusjo_dw@5Po3|)xU5Ys_0=EBcz+J5lKGzhDlKO z%15uo2^!{O2Dusw7Ym?2*1Tzl|H-@l>$AiX74#oE5sh_Q!ro!vw2<-Sj=(z%_MFp1)(Za5MYcuE#ZlwrQ_ECK0Ew>r z1VjXf)9S7l!p4!N&K89?7iOt`fZzUQ$bL@8eYH<6J-^3qwCWEYyOv1I_4ZPB$;l4b z&I0e?vi+-~Bj6o0TgBA{jGmYTAGSr1b=lLlXt3BpcGPo)>Q;^w>h1A3a;De~EJh0? z{DXVO9KpE0FoLX)$M#-18^x2UxF!o|n0#{~CFgzP|e6_}U4F)?|KdpX2o&ms&k& zm{^aeVQmVpjXrCOPUN%xjN8AF3syh4Owx{+fB|0%H}w1fMg$J9;Q#q&wG7Dl9sMg? z={yYhZX}ZYKTN!StYy%1pr4Wb1jRox5yXJMxp#Eo_v}->G z}2RS4xUnFapGq%nIm?>mYAf&EjTxdNxY<8cL17VXF zH`kjd>CD7!TXdKtQ#J->``=g!8nOBhx(szD30b6A0GWZ_KCG5&=(W)Ry>17OlPtyV?DfSq$QY~L<<%v43H4&HZp<4j?eIT#?>5@HC7m;ffq;LY-X<5m z7eG{K|Dh68-NVb==bu{_ne{HnUc_zPf+yccF>`pjN`#iDl$k^In0akns*n-UZ@t+Y z9%{B(;roxOy#-`dc$Z(hOuo7j_TL4gMgOq54ze`;kA{)8txT>aQ=CLg_FfU9f-ADHyf#6V8;szP8+bIL(2V#{C-q0Q;v~ zy4{F19H=4LrJt>0aI8S9Mbx6oYyCumT^rUWQ(?U_0qy_c8X3sBGX?oC{&xYkchn-& zvH$shBuD0F#;3H;@A!X~JNcafSMK@slv8@Zw;*p4bPT?Q+C*Ux^GBbdfgIcLaE7CfF$)r}UM2E$dLO^&(D^!!zqeuC z?ECw6B_>W8zk$c5F9XS*o6KBz?A^dCL}&y4Qx)hv&(Qb%n6EfXIGBim{?(2FCKKrG z?;jp|&ax8Vu0ion+%!k+d0Z3f|7lp$?FNq=G#@Yv8GJhzwAiZ2^hY;00L_1;ih=)G zX;Pmn4N3X0&QzS3!F`aZ zN%^n8!Mesp`q2M#h{HIs*6fQKY5yH{_`NY-E|}#phi59p=R0%Fr>_(nt%N3)dC%QU zKQRgK8sNOp1UZ|SZR;YVea_~cRq6Q_j^01(?2IL}XtuvJ(jUt0&U zs1IW@=TcmgHY{rzniRA^NS>WxH5;D|&rty73A$8AikY`*@@2QX$ZLYfIyom)c_3 zA^%0scT60}jDPn}L$8za=IH*H^tFk#0)}dsr6uiOyupHr3f0K8(w>A!$D}w!eab{v zXHCQ|)q4Q#^A-|r^>IHH9ZLsrmx98nAn?Z~X7ZBxD*>9i9(xxO_02D9KPbWCbc#zt z_4+YgcPxJjt4DWZ`ijweWUdqRuf_zRIXYcG*L&Y~_dnp)t`cCry!2aox)4Znx{_p<9m9Ffg zP9F;KP9*;i?y&n3CAc@h|Km8p7ncMzA!><#8IN6%MN2fC)*brlt-f30i-l+o$y>m; zkOSX>Jeb)5!_9pvGc9yOFA6hpvkHhkI{qL$v8=0MmGt>B_sOy%ibc}V2d)>mDEwDC zOvP<#kG)uID@i3N$G9vgUyklhRela$Q}1ayJI&`;fNPzUm{1+Hhg{nvc;X|8p|kMh zLjBLTeMu??^dCOG_A_Gz{T7U!c=_+;U`hTTiyO(Xg1qbhY5fV(+TZ#Ia&Sa1-s0bX zA3Id*`Oh!#|NMkL3-_f-QvU1d^=}ONJOz4+xNJW<_{-D`9 zIXIN=0_Ew5_c83la`)|)Wh(Ih+!NCN$gJB7@Jsfj`y!x6dx?a8xVdUZO{&srcARAqvuV2Z!fu(6+G83{|6;67`vHV&kL3)9zK3cbOMa-N0t@gHdmeaR z!r{N#Wf*LmqT9yVSg*OE7uwSqUsPcK6!Ks5IPGp3d@(scZIa;m4B1vr}}xA;-(3M1Z^9@W2a z-0n~c)tmbB^YiOiX7r*+B&MCYe#=<);-tp@4f4i=WuDL97t4bSJkNA?;}bdC3?0A4 zoZreb6U_vIMPs1iasl)lm{qt@ai$oIi`yLv9Svsb=@Yf(4;}F{nPnAEsns#4>@UH9 z7DQi+wzv?)ESPi7)t11iOqH%)*fb9Qq5Hsx10w>C2-v?cH(>%ltRW8i4>?!0Nn$gK zf6QyHN<;I#>aWjOn<|%LL)8vzA%kosU-bRa9C}~YDR;7yw0|1bAiJMK3Tj;1GuKaK zNdBjPJseq{fp!Gc|J)dM$bSXctK;~W4=jFP`%-t#wgosM@IObMVZ?O8)W^FAC$TCp z&Fk@iXt`Y(@sad^AiJ%REWcQD3~!qc3WD{Vb-JDLOh0y`DR49x`nZ7qS*MU%p}in( zyC-ynU=J2!o2G!`sC1B=@~FP{K;FbESD`rg+9xqBoZhVjH0h~@vu@r8b8-+0 z1pm+5^dgH|=qE$=3wuptbUxQX-U{$fpVIjyM0CFBmThDgy>K1xL ztY2g;ov6N&KrpRv+axbk9Kc#W)-e$-m}M!4gGtT-AG#D_)g()@=zr#t^EC*5qqV9t zoxCPLTr+NZl=qVf?4erZ1k06k?*tGs?j*L&|bEgZEdJqJmSr?9%>ILN%IyO zT>}3P_@CL0P{t13zqoh#d?tLc3~2r|2~XODbn;;o7T2^4*Or5W7M2AXl{#D!E8Rq?2ElG7=hd`wI?)(il~e9 zU^?M)mA^rMNK^}_q)jM@g9$Y{)CWKb> z>mUY_{aK0X;Wn!EQ@z1519#2b$6}*?H7%xNx3d%m8KlOQAb ze-5{Kv(iN`#07O(-(3vWJ zsUvfJ6809ad<+KV|Ctb-cjeExGC%SQ7HbpPw+_3fysX%zqL_<#IYT;2v~ zUV5~ZDd*TzBmovmH2+S|2mYV4Ayl8>q_p?lZvLx#EY;})T+%(H{MRYFu_+;7GC}{c ze4`14fBDp~e{s+8k=tm;8#@yJljQT@C;`eDut9t;3_m5MqUBQJ;Ew#%Us7xBiZ4L> zpKYj23|D6RMRARNTYe@&J*gUywaJ}}f&(8^EE>J)G=`6wovm!T_TiqO1yO&uL4of) zJ+s(nxSyf_r?j5LFT|k!AB=Kuot4p28_ECESwjggIKV$U`|sZxPDvq1_V&5EJX&#e zYluz6K3Z|EY3{SZw17%9^uhlFEs|h$9rXY{31tZaa z`FD+Z#_IDueE~c8XaAGXW>$z?1O9Q|r_yo%qK=Ui4A@x7A zN*z1MRIC3@0q?$ckhk?5%ziO~=T9w`Lp_wvv>eX-N7Y$5ynk^$vm!uf(>KCU)LYW` zg`vr`_Iqs1F_Xm^Be*Ee`3@R75DfXXVqhA>PNN?libX^MY|5o-Rrh&&w-o`2W?+*e z>91$<$lM}RY2B#YG9T+Xb1MisC_!jkTV3h=y>oot!9S7ZuG@ch@sFv`#9N4j{pa0Z?AaVz67%Irh|u-;s9zvgWx#j_P0J z>Y*axZhbk}ee7Df-t>o{Q+5x&p6zuX6s^h=j8rw@G6Dayf|CbBfmV%Q{SOE9DIj&U zc&tG00hCczsW>rGu|CUzw!(GB2m=u3mo*7V=+-@D6{rK6mql-(-hz&MyC> z&u{wJWnnhiZKMQZbqZ4X9Vzlr<-GWADAZy$n^yQmSXP}#H7F;@LTxAHzi?`i_gLb3 zA^+vt6M`xWD!70xN{+tExmSl3@MklOVgE~)0p8cssQ<^L%(@NDzn8H2z}iID%S!ZtVbOFgQc450tHme2HiSI#iVl9;hJ zOtBy6{PI)pJ`MU6M17KpURI0SqiXOXZ)>IiVgWq9^Wj}F3T_#HEsB~;NbwBvT8xZZ|!)EWKnOT zs|kFbHH=nqwp4=MELR!!ePfKU;0Q~aux|F1^;|G%x{o%W-Qc|iC3r#4Ldjpv(ES1b zb8REEtUkg*{}1p#jU1P|5Sfh=JNlP^aov0;C%BkkSHtcV6&tJ|xaEXwQz4XEHwPjP zj#BJnMf9QnC(%JWm2HfYOdRchmd1tf7=1?&68|&1q!h_z`&y${X8*lBB^;NpAKHIN z$f`vD_u8oc=T^c*Y^khaC^6rpL?*G+2n6=D)HpZk%{P%74NB#b#%^!j-qMW)Rp+f0*=9`JnwB zy|m|fB4L|o{`;rKhquxEm*Q&%3fRB6L(QN@J@#l%VEDnKTA9Zdn)a~BzP>WN`X)gD zUKQ2c{C?0a7*4$e-po>P{vtMkF5oEFcnm6Sm6oKDOgR_#pA$Li;4 zJ5zh5*p&z36Ajbi0DnH(d?FBe=(MSs-<=JM;M$~f@j zbEWB{!8Xt!>p=gCWr87$pS$~~e?t#SUB)R#AJ%lVM%qOVpBL`Np!kRDnC1n!kScj_GI!Z>8-b*yY|3C&rk6mDA06n< zd!?MDl3^ICH=^lXW1W6txvij6DbYA5?_wt?%jicn>@bnb&Z{z>X+GAKDrT6WaCS%h zgZ^jv6@yxrZ-?=tL#WXuq|m8lx$OtK|5YwMw-2(g?vw8e(EYCl-@&w;JA0aN$idTG zi2i^$viDq+&bI`<$oVxx_b8g2VE^K=W9(3fq*G6oYx6?e zs_U?`io zBdJ)c5TX;4@L#+aCO@4}jqHZ}d!sn?_8(I6`mQDc{^#fvLEZc!Qs=uc{394x4H8yc z6quoV9nY(pIGZBcmzo6nm;KC@Vd&R$>>=r2=^5G58)Hj?UD>4li{3 z!EZVIStQWGbm0H_{^3>CPe`C|{9gKJ$N#g}I=BXOgitzm8Fc?bB@)*$K<37v6v0Y| zcr8;SEeVHUHFC|3Oi3$QGrmz=@S!`6DIJhAGg7 z$1RI6aCoTySzcQIk=vJ6@H3(LcblWd!Z&I%OZ0F9kbmdb=hO0>`B*Ii?;6-YofVae z`Y7i${k}Mnd0N}94JY5dt+kS`$s&$Bb|axj1xiBiqdl>(WZIpl8GYd>!JtGfe}MmKtN<5~pVv&!8NSgc6iPnee}?|ULjgAz zI6ai|$PmbX%?&=q+M@fXh)&F)+MXbK*jHG7MJay&n9HCz)kZMtf2Nju!-&zdEL9I8 znl(f$&>b}an2-;fLOb{;@&tc}5`;G8!^H$dO`?ed4NjijMIH)9XWb3qF`46E;uTgE)lwQfwd#D6RGm>$7vJ-89TtbSDhDgPc61mmx8pumK|{#VGFCj9a&qSmDm z6m9JyvR;-ig8>37;dHX3X7&y3?4P3gSB5Cr=T=`P*!Nb%)3cMK`xmOQr2Th8grRww zhe;ru3faLwEYP(2UJLpUQ-YEuF0sXPlPUd#52g~t!~zZ1D!3nje}1DwV1L^a0dLzh zhtfzI!cfmxTa~dvF0)g2Ha1xlN3@JE;L}Xf=~u>ZjEI?+>+Ii1G+%L6BXqn-fxG6% z&L>p8fqufkbw*2oH2RSLx}|Wi_!|0V*~z~javf%!?u7k|+D8b0Zu-lY+9}EAxpmUG zys!KyKI<6sklXmj*YC<8x{5#r^2DM8_fG#$+CiixY~PIvgxfuLKmWh`7jb}p8m^40 zvMJihA{r$8gIu}7$wY>{%h>B-xqZ)Z63%%DqEWX<#RMfzDh@56r~&`;P{ykD&3#$- zQ9E}u|880y&gX4ta_#t_Rh4Cq?wl5jMNRpap4{p=E~{sRV+M(>({zykT93;$xi(*P z4o82R>?ztNZ$ni2#n^`4V$f$z4&Au#vre6XiPihZlyQ1_dS&L)Ip>E?1OMY+^Me_= zMxh^+7eYgi(5F#n<%Ps!&R&_QKy2hZ843e~9W|+gs@VmsxIH7G^KPXIvL=+rl-}Y>bE}jHzII%JEvu zliRek$dNMp7G_#lbsLmr*r-JD&v8uKqClMEahQOc0R9Ku8;KhA9;(pB=P~J%e5y7{ zsMo6MN`0byltDIW|EmMtzj(riNB7@T1ofbDyYhlIR0cm&Aw5l=-}tHAQG)@_>o(b5 zR)w{m8rL`?--QNQ2@E5`t)8Ng?g8Hn_8xc6xxFkO_=-ZNnnOD<;D5faR!O0B^%GAR zoK4A;TsgJmk*=Nniy+v45BPnD%kTBXO(f9Q6ZEh9Zfa1s3<^T(d~@vmJnUD^cqE&A z_^5JbWx-xS+HTD6e@0*3J~GW9(u4R7-FfBAQPn{3rYM&bO{3fmTqa9AKxeggat z3r}&bNjX=U-W|X{a*S_T1<~ua*$_UM0o2Ja78JhA2vv3%UZ0Le`5)TNV9ntwB{ct) zkwh7|)BjTq{Er#@9f7$epB|g)SIG8|1wz@FfR`*eMd0uO%`hEAskVINIjR4r_WB5g z-0U=zMN z^gh?QKap1j(y8_z*#DwCqQ|wyt5w+{gvCH%J?LMzgP5VJRM4U+V6YrEn2(v0{LjR$ zV{SRHtJ93;zvc%o^{m1>^UqqNEej}`+VSk;X4k;LQttwO`SF1d&C zIgo$vFNv20I^g})IZ%j|k&Zw{h06=r$q49PCd2A#XbjEGGzm?>;JO0y7b? ze_@|vFxna!P36=HKLUDa2P6&q7pEDi^x*JM!av4E8BRkAc&ClFo=4}6wI(81r^o_J z&1;>@$bM1Zvb z{LevtL+A_>{+i6+8RssVTOjcc=gG%-4h( z=SbrGcJ3Z-US(s%_@)85l5!}*cJpS!(Y?XccC$%n|Fc>wn*Ul`$fHM&k{yo^nrI27 zMwEXbCnjLBZ%q#V`BP~AYp_Pw|B5k04BDZGL5xdCFOgTm4gv}}*gG@_=&Ljg?X>djleazp z)@7EOC6VJ{|H5+$8_M^FQBaQ+^6%=VzN8OUaEm%LS^x7#Jw*lwHhGze^=8 zxZF5_WdR8$Uu9;I#Q%W*NBcr=jdKLC4*w@~n=%mk56P*?f41$RL#!uYntQkZFh*7) zxy_Q}w(manK>If|X0m}nla&wIOn;r0w+o+#=Ncg#I67LukzqbiAGt+(^x}KxPPwGDe{^z}7(v5Cjjd)fG@`i}+TqWGTBWl3^jA0XD z8g*v{{5WoR|Lc^0qJU@@@IQ_EWbx+Jvvjs0NytHqRW$$pwO#jq>osARQbhZI=D#5^ zN7U)ZBN7iCa)H@X-q!E~Y+|xVJZb;y#qGU~56(vJMbQ3dYej^~b~c{jXyKk%8sx{< zSJ#gK^d$M8=jjAoKcD8`$VK;0_ZWpC9tAMN<{bh0f65SI z@cvUOZy4AP`EUukvi(ed&t+nW1lN z$&kqoz9O3~(7!IfyIbQvjs}uZ|MO|9X$Ap<5#q~VA7hPkvH#n@@W=47XkU~?KRgHfryCy1y#&}7#6R~DhW*pMkI6G=klp@6{}uTneOTWh^&gIJliL9PIf$%Q zkOh9*yKk>-MN?BV3^#KHGQ$$0ooY1e_0Pu^VgGtRJ)#uGOw)9Z>cBoE-#C3SFq{Ih zLHmCSb*NZ%`l#Wklg>?xP*Tl@qWV{(!qBP{2lTm6@8;j{YDCVi#?IB6Wy0I|#7_Pl z^sk!Z`DSCSuwHo;XcExBNc&%nx0mD-1wdkJ3Eyu-R*@z#j_Hy?;&AbY1okiXh?7yoP?Pq*PBjCs0siN*KPoZ2CvP#6kp+VPnQ4qI!INBRX-X>O^IAfl zS{&0!8B3(%f&YzvzSgVjpLIOIPLFg^tW{>={eR;QyDlZYhU8N(BkF%9@js1}eyu`% zwRgTg3*}t_euBjRXaN3+3ReSA@t#vqGak4d&42UnzC8(2P8pDY7rI9q2>6&#M3GI> zzi3$q6oGLfDE=Ykzi0Leazy6;~ zw=eQVCe;mpv5Pfa+QC16^d@GyQ&Ij0_r75rquo!i+E9KW?=6f#{tKWt4f3redi%)v zECn!`5{q{7@AIR#l@Qp!_(LtvZA(VlKm8B?&~Vr^!O@5Iux!!1+qoDz1oi(67#yYG z@Xt(;AVdFXH2($k#v>HoZnVbU7gPrS&%_`&FO}l?VrN(1P1jy~qTw#Q{BQryM%6(< z!zT@q;~XWudCH{yi;u?#)dvBOk@|mt|Jg59mQkew`xpJ%JWr@sMvwTKU{vG%q z2?vT)(QXp|;{q?DLg&8I#p`xUg}TCs&*EpNUq`z9kucal?JJ^WmURnonq3C{%kYt5 zD8UxRKiJH*gSjm<`{;d+$$>R2hDX(9AK3{?>UbWdK-hN0D1j%+21)^V_w2>6i0dFW zYvO4Cq2L7riXHqD7r-4ZdHF1F{+fpq;2&Jym7e7`BWd;RtbEsdg+CJ?xqgz;ecq0C zrtb9r{QjkKS2kgGRhP?S;)|VxpmbDk4i@@CCx|2a=}Z=#eI6mpF;_%i(_ z^*^`y-eVDA719ew@ek)sx6x*%1oFLNV6!Lrf4Ka-q#qItpeY;j??c|m{MN|x`rG5f zWHuFKO2Ge+X(%mhY5#|Rwy+v;5ImiyX5?3oJ0^GYAPpnGXA-v@`NHe?NO7~K0o$W{ z=s3m2Lz={22H&AQAfEO&aI`g?2|*N!7{=T09+A4m#=tA^h>ZvF3l{ev3+&1{863gD zs~$sx=-dbVQ(YV?(e;|rz&&i}sqP10(l6*{ROt>^r7M=W-Zyv6V00R5cilG6CHa3~ z|J3?f5UZjeUu5qwgC};rOCP}qeZA7|O_VC(UjCcJ!=kk4{?~5*GjQSGnnD4hq5X%D z|Js~nly>@X5MLjAs+JyU@TZ9H`t{Jw6A6wNBIAbrclq*w9sdvH-y07r2Jd4%f#^?n z=TgKRI^nZi(S0exk6$Z_TQgB5GEvSm>1{pazY^uN68Xqp-wCH<6}0*6HX7VUbC3ai zac)SH%AS2nDrq)NO%w+ag>2eoJul=vmwa3F_*p%5xkE=aLn79B)FKi%?Mf|9+D){_ zQd6_(+=MaD!2M!k6Z&MN{r8c_kHLGA#`QjTr189N<1ME?DYCw);*6T$J=LzmK0kZ& zpwij>&%`uI|JvC<1^naK%1q0pX?V9U;9vc#uz@cY?I1w?KkK2ifmx@?RHze=2Gj0A z5N;&fO9%mSQYlv0o0NaIE2Fgm{s&|4g>d^EM=&y6hz803gRoiF9UO0^KYTLqv-sk< zeFr39X*_``T#rjW>>qBm;}gLQa`<1%k;m)r#?vBVm)@89E=_V_nW+cIk8?Qn^ko@n zKX_&?t=m&&(ktL!Q{y^(8vgnj02f&RC3pj57slG}9z9>7|Kr&~*|g*T+37#bVF+HHxknM$i}FACjwTiQ%l6H(d92WU zsGD(6dQoNCs9L*+;51xVR#xLWRO2-4gTjQn3IK|K8s%aGDd?d85cb~z|FGJNWhwan zY;qXSpW4;G-qtUrs)3J=JsjJeit1lHWZA&~JodKyFaHJmU%y=u692QKe;rkKYo-hY z%{hFgC45id+`!YTU&}X}qPYdc6u14=M9rm~GY(#?oS06`Q;U^yy8qa9xXLM`YNipa zLAL|rPP-qSo?*!O&;Jbl&oVm85%-FGx1~PPg~C(hKmI2^LJH3w`eCvN`VZgtku|~o zyNqUb-C`Ts{}T%N_r7U^93)B1VW@AX|1g>3Y|KIAVzu)N?!p5{m=JXT>!Zs>O4fb( z8c$}}rJcKr6z{41G|W36cUt#e;k(B*uCb?Gdsk<`5fRyV&~GA=v-WC^0uSwf`|qCz zm%b}yDmze^`h7Xmw@Qn*RV=nr75N5Zf_Urf8ODL8hrQcMWFwg7@T?9LjlRgeTDt6--_`t4Nt3d;=Yt(o?};b zm2OI*?_$30&?Dy|0lkLRNnW*>$kSHiQwf|h`e}z1l~tzcApZ{eFTg)<&)pTD(K5Ym zU8A3M_|2d1o?lb5aawJ6UOD$a#A>CUa7FP?VNm;Mi8ITQLaceM$A}hHAV4Ex{4W24 z>R+tL|MpKC{8s$pd)4(Qk>I^dVqgvq<>E)U^cQ#|Xjx*V@VRPb9MVo1jQGlBP0nd( z4t9mIXesBu?U2K^;0KX*ae=fLV;hYTue9@S|1OqIby z{=Ew^t3~%OehlnOszAPRt1%#?{RcF)P{XY#F5(Ydfnet&9)F(LW5(X7){gS3T^ZmBXXaA5(PGQtaoGZ<%g^x6tlnkYx zDws}s0h4y=e}d8eL!JA)x*=FXU(sEn8SJ0_+y5iuask=ZzXqul!kS=rbQ_7`)3QP8 znwk+*|03;Q*dTIi%M6TUfuMh}s8=YB!;F)y4$1%H_?EB8Y@&l305BJH2n(@-r^3NwS6(!_o;?j2L%WSXok#a=NBK*xm36yb{gH9vEn zg_Dk(3loj4e;%FxQVjkd9eI}PUkN6qb!FzNG(7~9^{w^2bwFj`Uer63xvV;>4EmRR zE5?BLv<~jLHSB*u{(Yza@F)WOPnJr04<7|aCh!;;c~cl8KNr%f?}0SwWhvsp|6@G; zo&*1P0*OccKU#7M$U(?|A=@mxNK*z2!lQ!Yav~I#Wyn;f^ig_Dyt!8hdt3a?Ua`YR zcl57M*Roaa9=2_s&@`5DgZ=mSDf=yJT)tH*3P4&>pab|8-h6>B*gsXgo!<8=DH~fL zSe^IipRbLMB^PWjRY&#bnUt9IKZpNv?MuV?-Tvp+SEJ{${&u4MhiAff_fOC5{g?mw znn3w7PR>&dX__Z%BK7~=IG~~vvlr!m9&QtZxC1s2uP38OAZh<1ZC)v!8#$~@sTACq z8Wkd?*;}{(x6H#r*ZVR*6OZbCsBvw{*X)J-JNSRrD-R0j<*N8@H^b@WL)PirF0_U$;Jev*bc7FH3Ykfl%@`h~m?)BcE>;+w1J64wVVS?`)d7sz&xn@2q8aIb3 zIr{SHrXiZyYBu%HUUK)`=$(!mKD~W>F68y3M;?3R79!Yf*zBfzzFV63t z@L_`Jx=6*TnD)P$OwV14WJg@Jq7GbyJiuKFyjGm3y=G<@YH_N;a@Anvg zZ`wkq_^s6uk1zj|f6yYNR&_+ivA^bu+J5)Hx9@$0rn4db>)3(`5w}E|D=%Kh3Un}C zc$L!sfD#@kN}0ehF)l8QOlp)@FyR27*8dz{(>7N}j(l@ZcJ*x#o;|e+jNg7bQcsY$ zX(Yr~K?$w@;nnoM@WBm{tN(5PvU%f|Jvgyqt4F%;cI0SkgYyqquA3Q|pPHgL(hOfHAOQs>s9gO5 z^*{F%%$GOiGbI%N)iT2G=HV>}$vnS)@VmfE38?b4hZELhQasx7Pu?wvXF1v5Dx3O{ zZyjHZ@5O5;PFc-P&MQJyBjtam@((Lt&FKH~&x)^Wo=!>__;qL8`peCO`sexU1wJk1 zv$l424P5Ez$n74hm!TY_B*f(a5-(NdPm*gyim;!bG{bARC z>bnBn|0M*5oT^b@QX`jRtF0|2QC-I8t%P{So3 zo9=i1^NnE+2R{T)T>TC=tT`6=`au8tyEk9g1iR(fc>jq*L_5i zp?rnvKaD~Cb4>UI(Wk%qhxE^zP9vsA_n)Rqo{3eByz@+p4m?MKkmkRB^mWVYf#nAY zqh3ki?a&(O|C~v_rTt%%>bBU$b@T0^jq-oBOfJVemdAw77W9ApN{YBS6S_X=2_R`y zwfgN(=sfnp{6pWK35zwi^u+UipUJRQ}Zdgu z4;#yWiXv_Bcg~2N_1K;eSUvL}s{i8e`6sK}x4!hD`Y(2j z&tj&rQ~~ur4|lld8+dnK^-m6Cs~XwDQ2zzd+KRU)RUY{B2kfi}p>+Ncbu=m(@<7`E z1@=EaosI(G@;k_XJrOd6*#9*{bbQq^H3OI6xUY8!kR-7oBKzj-oOBr&dC{xo(5iMwO2t-tghkep_u1HKbe3-iybwOIr~k7%_3M|I6gg_w`tQnJ{p5)HjC1r{VIn zWJHSp{5d=32p*7V|ChzT?;kxVUNxwH&eG!TJ-lVN#ns&tH!1y8P*OMb!v0g_zuxL+ zR&@bWUR7YVP22u>y)23SUuQQdPyY2?e&Wh1KUF3v&3|SK+ayWCXF-rB^}jzU@5sPG z(;f#K>HHHVB2FK<$txIHQ)VYAb=$11;p8OW=tmGoUGH`q*;qg*M9`guS{lsDJ)< zwXPHT-(|G>ED#8i@?Y)4%tp6{^weG~Hk`N!LX%4Bl>p}-I{)w{&wvkDY%leHop)fc znSl8}=>J;9#kdlv2xY=Ey8lAEkw@o0hw3*Ks=}1i9t zu*Me)taT^E;4?CV%76X|03vKqCoO{63taHnjg`sT3E#qAN%5bCLWYDnVl*G=gcnr) z5BlGa1T4W%LHD1IR8j>B|2Yy9eqC)!Iwz?X?}o1ax^6pF29pCc|Fw4XEWRrTnkfG3 z=)#(0#W5PByi@d)LHvgky%GfLS7(k1M*b__y^a`e%!gJ9K_>C={&*F)X4*{8Qu#mUXXq0lsQ>A_v9Gbh)jQwF)sAx?u~J1% z*Fb3ct^dLNL(WJmss6{6%KzEMZWHD>MH@pih5t0>QXj*zpo}FSjQZyd?hM#zp2FJT zZ{Vp26Qa^@9$K}_W@>IV3+Eq-|H{~V<}3E)#$&tgZo2;+{)%Y%!a(w4xvENZs@F9h z=06{rH0A}eD%0DXqMns`FE9xX00?nzQox5sq~a*We+-@KJ+EQ@v;L;j$a9{$Je~A3o9FFHY;8XoyCnH+^Sa4;>U-Ag6LE*%m7Zs%Z z7jF%6=L`iUUl}qKMt=#s)MLOP73~e6usG%F-sKOhSNv(a>cQdZg_icw+mNb5{0H-Y zzFj$jNk>1w&pCFR>VJPK%hi{YLvg!^_P=*r_|xV{!Lk;-Df{PQ!NKOO7NhWi2wB7= zKLpoN{8#n4{*N#2KYw-LCHmjz_(371fBxS0MysNh$>x%_ou>D#o{Mu7a8ZS{()m9l zUCtp0E%m?C{a<_))~gm1vAiq{&Of-Mua+J0Xe1PL7tUm` z|Ae~{WeTxBlPceG&?lEWvTmE#lbUs%zwUg0);rR-5SHHhu?X{jiXCEBqwIffWyB3z z_lK32u5C455C9RcD_?E5K>iE!e@4&#Bm7%89!%cww2dHd?0nl?KCOR#qajJt_fCh< zsL&?-Cr}ZB9Hg?Aql-icZ8ZNy`3Ijgv_wo9|y+%OIcY)~#d*7|Lk^)ZI39HWVxQ?6C2! zx5;V7{O6kT)6ZYE*erYfCqkO7~YW_j}^Yk6q@Pt@XP>Rzr8cT7+LIIK)P( zv!Zgd!<*CR0P*$ZDdJ(g}N^TwwziqDa ztrzj1d&}AS(YLz-4)~niu=rN$rCTF?3dJkxQa86CsgGFzpJgchGlBEZ3p)RK{nMid zH*X8SVP^j=TKDE5)2WqZ(5>r)E$v-{`k&A3-L=eaOe_noLV+Ni|Ey(O?3aq%2?UuY zsXS{^(wG<*g1eH7SXLcq{m1dEjr+0_ zBW1|@l`}*X|AqbU@g3<0KV};Aq|9J|b;h5gdM`<$f zC(W}1(uwN-qURr~|Kg#Z7}`Ac2}U-EiHMBpI#wz~}phX$+pPI_=yc_5|gO>pYnlS$ZhTVoTXtBVC%jD^*^nqQ%`w8p6%bLWO!7ariJ~dXI9_quH71b zjaNzsaNWpELA=f9;Gz(|U{Nnp@caPURnVE0D=DYenAWsQ*b5+hUM&51KIo z%;(Pgi22Xs19q+SCPI+B2%d)X59R+dz0!U2Kquxu+j+r2(~%goZ#u1YVAm#lvgIJx zVCt|E<`yN`i~65I{)^WC9CrSedZa?&si-!-p}WV`*q6&?M~MAHUDEan7NwShguIxA zJEkSOfJw^fFOFu(8>D*VS*oDdOeLL3UZ%tax4qO#(f=jwzwrL}sc76+izhp&{tH>b z{8PD<{^!>-e6c;LR$1jj`M<_aFAI8reBMV3vF)s70l3OugI8q#{-OTU+;m)Joi}#B zbYTGH|Ek||W3Q)*z&kwzN>KkSZvv<6+CfO=AEN&&FLZGJA=CFc*`q9`+<(BGYqjym z?IWf8r|DQZe-bCjqRdj!r7jNNk+oUWG^($b>Y->IR+l%b)W8#G$(bvgR8mGOx+>V$y@sDIvJc3&Nu_mAvJd$-f|fN&DV58XUDHMqO;pHEkB@@j%B zQ7`GCw%;P7M(|)J?uF_gVD?$l`e&=~Q^gthdG2XI{InYdz9V8%jcOpBsRb@;GNEZ1>nzzJTig!u%h@DUP3yExIrx*n0V+s-9uw z7QG_~btvug7Ik0?!s1uAcJ8zsKPl$Z!A`tC`zxdFQu^mvyJmV5C8ng*|6X~d<=~pj zm`V80AG6G70)_wldGDwo9Y&pCk~sgYe+y(gD@{oawEtKCeC#Qe|HEy`P*wzS@VQ78 zV=u0geM4W5T59?*wb4@qAUPegS$jC2C4Zv*f`avE<8yXUvi8j|V`$nj^~DvQ+w$rz zYtBJdiZ|d`S^if38m=&daMqgULQ=Be;O?5tLFT|g24S2sQ>we z7q5<%#)u>X^uJil-=&_1SD$Gy*nffkFYn;eTW7}01J!?_gX0|E9Nu<5ekjRM!udz8 zn;2jK)qjEWPye9)SqIqm3C|0m`SHXe!FXHTT`>mLa?$JVo)#yRR`hhpsr2H8H_YBP zvIJ2CA#UO2?0N!RQiC-H`@a+){#Q7dFYA5RGZjVR*JK9yy z|7CwtLEUs!wdf|peORGIaHUN?KE|7W^Ixd{v35^NMvF0=ik6X-Hs$I&t^`wQ{}=LK zZ@!GB_^+em^$%~+R)#O{|88eMh=gN|5>^zt2p09Rj`T1fY=nz3zCCa#R&zFkE?-0O zFcx?)n|m<-S?Yh^Gw;ddn4Vun-xqziegA0o#Z=A3F1Q5nTD3Ah^i^40-Z^f~(Q`iP zrbX4FSC|V!U8@~V(Ri)@)Blc}P?~}~^uA2-vTnGDQo}D4|AqLEH2)blN}B)jD;+j5 zp(#?zfAQ*5LnTSB{K?LvDgJB8Wknlx{Bk9!NZQ>N9Us&`KdAhAOy~Tj!`15JH>$rX zw2E&kq>i+vq8Ue?eUGN_pE+mUl5TaPG0ks(Y=5tN)>a1i1@-q1^`FM`j^PUG>wTAJ zB+nfNY@RLV|16^CA8G$rF2Zt#9_WeLZ69OX(~*{mFSbH%2g=Ou{5iZF-%RUP-d8h7 zRbgJkg*V85bwB?0YxuzW*L42RVX@yLTm&<#jq29xJhk&Hr20>FL|@JQ_g41zqFmo} z0p91{G9hX4pZ>Z3<8*>c3X`k~IIlv0#QZ}o2hkw@Gw+=E_@X?ggj6>G>oTuAxn%Ku z5G|n&t;d_i`Q@jeu@j>6USno9T zKSTMyu588cZb<)3`M>g5kg2}FXjCsD8RWlI!xRig0QSGr`kw`;e@6coXf%9!H3RJ# zdnXZ*>=T3p{a-10nw$)J{2I&po?RU5zd**QY*YVV zcdo3*!j!QAf2_Q}pXz@fBl|LIlU}Re8BzcK7v9^cR}Sj`S%JZZ-%g+g4yab+&g=z?AJ4<_o4!;YB=4|B~1Nd|DK!#YyG=nBLq^ zSeawRvTF%$pg65*@fCDQ(P+W+-# zWp#0g^Jb*QT;c_0zL;t-?y2Kb~B+b==3)>asaCl>Yhkbfhq+Kdsq- z@su~8xm_O2|A}*FkSREy&TZ<>8X4YfFl8G%JZA@*6C-Y$9^AacLU7RWCgV_+hAA2{ zhwz`v#rLhLG~%x2QCl-|r%zm0g(9`vNjXNYmJmqmpHcs_V6>~pZ~W)pF}nYK-J29% z!Z)$XvuARO5ZQ4#$EThNvY|82vDJ*_K*6T&wtRqrr;`5K2C;mH+%ClDZi|Y1PoH>k zrEtu?GvPNLox_v0`0g#Y%ZUH@2j@e{c#xk{-L&H!dqm~b>$Y=mvHo7a#%|s&w|K3be8+sOx zmFl0Pmzd9tg_2uESET)4KQCnKI4phJI%=`W^rCUGms_1<+~werPP)-?LPGa{DVvzr z4>V?Ttiy!dDEMFh7veupot%C5IW0u()VvYko2R0d zDl4>hS-7&vKjrdHO8<=gr|m}ZU#DI8e($})#j#WWa7*b_nP(Gj@D;VWsj`aNN!%m- z&u-l`p0LYi)tSnrYQ-}+Z%RTtbQV* z`@fE^?rL>j{L*ZM&Fbr|rCTxcR~k#jjd0np=(q}r$RUF|5_*tz)khR z&lO)^^zSg9RpNha+G9LFtUcxW=6!;i!PLknD@Q-=QrPGc-zK_Gtx5F1)BM*D>&Qoc zwa3V{Df6^btL)kh7$a=fe-dH-&my;sh_2_QG5OPU$F08HdVI#k>+)cz+3FR55fxJZ zd+!pa=2*CWwb`X1TLZBFi^hLkH*4VhGwK#EN{BBDoK2|yuRVVx%Q8UQ7P`OPdBe1} z;ka2k%Z{)K--QR^!TIOnp0Me9lj8As)^qlt_i)eL5#!H1_uYD6d{Dr)fyf_>j6+ke z7q42*QbeY4ck%iD-u^3C<8;ax@ zmqk|SfA6)x+j7a(WQ)$k*tg9^xZd(~c;IH|ZzRtFs{d<~Zo)U#ST~4NCX?}GGlc)p z{TEzPlEhL#|5sjHs@{Z#z*Yecvs}Rb3w|h*ME;8*7Br*~U_{JpjTE}?5iCm1i;`8` zb#l_K+oHk#(;DADzJ6F(y{V^~|2KNpwvY8KTE1b|67n`-I6LAMI^ys8MB3P=oV;{x z=ketw3!+aVFl9mK|4{ySi{;w9l&Edjxx{`uV*O0_}x-=+A^_g{*Rc9BYpB5-;mJ@!pB z0XyWs()e(jm?;!4#yU#iH(TmA)F<(@qf@QgoiAiLSez3+WQ}#IL|(9!>c9B)b8FWt zX58gRHF3e5Da)w*XOroLjzOv_!?u}X9Ps_hm)Eq$2Rm+zWxl(%YxLAVMAH7#u?zM; zub11hcVcXJ1E|Vv(RM(Rwn+FwahLVR-gEs0Ghf|0xUS1_R-K`Ip2C28r_m_u@Z16; z?{tY6gmnMuUY#*6vTzDa`M}-jqM!nl|9uXA!wEl;|3dudJg1=vVFjFjbQ6IT34N8E zM9-i1)tY4FIXI8Ty*v1pJdXelJ^v)kzv-B8a@xlkPvo{&$NNy>0CWGL&@f$FF9W z#?*_8^>}=+>8Abf^3_o>=}N8^+UZpx`C-!vj8Hq?PB1{~|B|;(<$y79wFyFAaP-D$ zgZQ`q3;8b!|5;{GuJ0*BQuB|Dz$3fZDKV(I+wcxLjz&&_a{Z;$c$@t8ehl^^*yh+e zr*z`;V}bqeIb**5t-s;hv_0RjlOlK&-UN9}ZXFKk%0^zqA)lQVxiuC3A1_=xG$Gjh zY|_b2A1YRrl=hz^Dulce~M#U`hOer+c{jsMX8cY6L| zjcri*h-;Cl!GS!xs6}aN6Bfqx`8N)@rST>&oQY>~;iX|CgPJQUl4Q}eR^5=jVVSbc z?p;qrH>v!?eGV_)OnH^OA^YO&fxD+wN=9CEWAG-f^ zZ|zBq>DtOBi++gfiH01#sApA{z3r&~3BO+9{So_5$8UIxJ9%U7Ze`a3qfv_%d*&OU zXWEHaOu!9#{+VYZqXH0SDX7ax*L0fKS6GVv_lLjvud}Nl6e#~!V?O5pXbV~TQFsz> z(-+Ibz2l3IMR{IL)3V2>&r(;CF5qFYQ?RRVj(}4x*!VFmF~K(Sx-g}4<&B~V*#9ny zI@y^;>7NtxzaCp~Zc}fy*S1?2ES$x+3Lqr;hPf63*G%pH7umWKg3D~~_Z~#oGNpem zlG`%XQRo{ecI}XpH8k;Uh>A+ikkJkDOrgJAS}#lQ#sP@L6`^MHaG{M__idDUSMh;p592J#Q~$nH+n}yY0VgCH9~GvqBYhpGcOSdWTEZcplkMBw={vTQ7Ml3SIn?v&%8~`Hg|( z^y@#r3kyuWPjrAp!95U6BPvQ1`9R}8?e!Xy+?u+apVxC#kTHs|sz*PifooqxD8PUE z=R!*rbZOE2*ZK#nP`7M+=cTLae3LMlfcocI8%dmsIpBBDq0~c$JH8IOUL%#NrsB0N z=_B2LTJu*Fc19IXjW6l%VXjgx_F2;^diCb(v8RaNi$<+%tx$r@;hUDdjceJ9`G-DE zGddNPj>2kyS9Jd2{z(UFQ!7ZHdsDqq2lFK%+thYOp?|=TO`)Xv8~18R%f?#LUSB6uMyOF zX0ZV53fq|_vZ(UVu!-Z+_>cJycuoNNzh+m)`5Q2mkiY8gyV)Uch`ueG4JV9b^=+p8 zr~)u1dCmakZ3O23fUS6xbMh5lDK-C$j=iR37yrX%)$5fABIEq?lR=$O=AVwfLx5SU z#o|kU%>799zb~T><}#~;sQ&ky-Ev!WZPs`3?Q8LfgctUsPsKGmgU#Vu4up{H7m6%U z|7@Ppd4)HAU6*OUikz1U*8~HJED5RpXWiqCwaJ`GZg2AliL+RFY*;09YeAakI8(6P ziz|5;6k`4}`oAdu`@Tf|iv$ei|9s#jo(lxb{{a;aL~dzwV<}G<4He30Pr$X&x*4eX zXLtkY)kF7RApUc5*$v%@#L*59pLgOt(4OeD3`iMe0CO% zt8M*ZZ~;F&_P_s-w=tbQe#3htHat1_f&{y@%Ly+E_y%z;vp!cn_zV>!eXYnMxV5$D zfYHTVBln>0Ku8&AR01mh8U0@i*+CfHy+irSHNdFMu1qS_IZ4$0^JYH>zY= zD4rPqYY_jz{O4Vj@(xjs5ckueE=SL1IaJ~Nvl=v}X`@T6kKn2Vi0rZd6#1|GV;f?Y zZQb`O{!?*(@}#p{jUSzjAGrQ_bDzocORn~toH93l4tU^-4`dedUvD1POs?@Q8iz+f zf-k4c1sTP}Ztej_ww762{lU#w~2zEV(;RO>9m@h<2l2_fbGT2LM2 zT`HHQ04pCjHg}|OG(q$$HZKWB8uu)Rh<2ugqi3KBdEuakM8}^ zotKYbsu}J$uWwH_iV9^?_z(Ku^{OYPp=BF2K0^E$lguNULHJA5KX$1NQBuePOaMh~)(r zcGCWLoPS>O#*3>38&7sBQ20+`DI?_7EsJTj&MNo2TUQOnp=|2GrqFE{lu7cMRqqCf1fNl9L5FhlX-oFib5o5Mur_>VF_C6@77@ zY|z{oArz!|WpU-LuQo%-_!XeI^ePC>x+~~8nWlE%bV-*5#f=e=SBgTfLI2lB+W)Tm zI91zF1LvPaCW}PL@cLu=asBwqeIZ%K?vcpKN)jMg4ON`oDfz z)BZ0?|MNXtg%{lwi8eN@2Y2sOC;SNIy*TiltM%6oI~_*SS$n5dx3aWNW&M`JmT zs{{De=nBR9W^w3r+n1Q|9X5)fLXS++7=XkY{e@6U=C@%Sk*ZQH2K>dpfs;9szyV#B>`6n@q2r+05my+=ka)@}o11}OG=qb$7B|wL$k=TW zd|Ony8QGj-d&3ZDPD;zs=Yek*;z#~v-rLQWoVlIEF>o`0k}3bBi8+(RT%!5Tia9Fk zkd6K?I{)YBf-BG@2mj?C5)BeH+?6C?Txcn2XF-E_!4IwhOG4>?d~mj*@_)v!>3np5 z+)~v4Y|_X4XFC6IcEX|De7h*}hn+vZdLLC8Hbhz_xvMBq;xhEVXK9-+&a{JO$`WS$;6n979m^nD(O^+Ki?E&m1Dq|K(daXR!YZ^M86P<=ae}rTIUf zOmCHW-N|cHF_E9i!?%c71+52KF$J*|$8qPqI8Js{{^7#2 zCLzA91QXgM$#Qasx(P_ro#X%!&PGhK2~7J028INAwaC4Xgtl=J;1mX?tA6J{uNsk} z$bzLUM^{n&*O~PZr26Nd6Tggsi(3aU z|4{0Gzwv7rUSnWGTT~rh?1X|KkTBYmba9jra_c-Y)FuYG^+^t}IRMjIDgNtXmNpXg z-Pr#Hyp8xfl>P?LY#k`MY#Vap(LT}?ms?X>y&xWhpm$`ks-Ay@)ZAdylo8d zCk*`R<qtD4au|Yh8g`1<2BOfBau~2Hu?-nD`lCNW1fTtG*kjmzO{`a$A zSsE&!J|Xd39E&BLf0FSPC2r!D)&hw}a`e9^vGX+m`!CiwC9xINap839()#D2{GZRv zQf4zVT}uP_j&aSYTxA%+CNsn+2^-uu4 zrJo9MHL>-aY$*Nnfr}bA+zjRap#O{Fzx3@PiV$M{;W&H#=_LJWIoRAB)EC91_^+(Q z!TjevPHBIr%K_E@6-4JhqyIfxhHLvaNm(0;-FIu$%JbCV+=YscDlJ~!%wT}D|6(ZrC*ivX z2hO7(L$R(Y%T6%bG6a{Yyy0+g%+S^O#(RC|@8HJ0RQ}K1C;+`*SK@l5_zy0qcMlhf zXEa=tgU|2ot(O?e!mpnMyp?pYly&(eLf=F4PgOTl7Bnp8t#mGvLmd&7|6^T`Z?Pf$ z5Bk4iH@BrQfy)1(@So|$mIWjTDE>>3Y6&GXUnx4M0Q%p@zoz>yX#LO8ImeLy`trN~ z{oP!1Bap~gCV3Xyd9Q;I;yD8}grk9t{qL!K+XB2Gss9W6FN%8P4BR$C6Yc*h(x>1W z8}xro$Na;M<5%3_rHeoiZ%g~X4(TMh%b@>@sjTGUs|Xi!8mov9`;VR1YJ`~oL+hUx z?@YYFAm2Z(8O%TIys_mlBV-aNnXGImirLQ~74-RnuE=D9r0}0MJjK*<%~Ahco+qOK zl>XT+AN^k{^SMR&1jT>pIj6rZ4E2x&6aT^dXN%D4zi{T9ipl-b{)^ojiE&c@yT@9Q zwEv6he?K!br%(?n7X2LRe;@v4xbXgo38ayKFVmIHcMNd@|1bueT~D~Rg))Wc|5A+Y zBWu$YAw$Jv_PN5S#f=$SOp`p#bn0$La;y+W* ze2PG!WI6VqeqKI0c+r);Q>FcMFK|9-&!3(P;H{O`3#icJ@}dKdGfI*qV``+6D| z@0h8ooGhBeRLB7_05hEpx0n)%;iY`OIyCK*>YqQ}*lCr*VEy0xLt6jL$ubHB$sqp~ zk>Bv2|9zXm4CKFhss67en19$}?5k>?X;h@<>YT0P-if;K!(Igj=9%i7K|=Z8_tbd> zq{{rpfA0OIzLc9!(EhJGM+f^)G5=X>J!6l7GQ559@ZN`DNzzx;2vPm-RQ}JSzQ|#i z|GcRb`%mfl=cq43l4~E8-VJpJnu_h+<0N9@0RB+6H2?6evb&2JG}HM%*#Gr@{(x-0 zh9dgk|I`1_{`bFwy;MyYFy#M=8%_6LJfZV{PEIEzzkXWufBx`tDF4tbkXtXW>)v^F zly!r9zP&eT)b3X%R@BAwM@20GnE#^~QW4rwPr#7>y>QH^u;yk4p#FJ%@7q_u^Pelb z(u^FzlJ>us2kZ{$U-WtOUCcj>e)szGYc$!l<>z}zHO_mcPTKkGN`HmfGmB~|2$P@mEh@+J%sE?8J^#@9ALhW)(uQ;bDE~W^ ze|TwTwbcKG`OiDQTus9QfIn-f{AbkvoLqA3cuaaO=F?=h`t5&Smuu%L>}C^S-^=DN z3L>qD|MCz2C;z$Q_{YKnsi^G0{tLSQ9W_GOe?jLT_J93XIS(aMEQLgSu~h$TZQ~k# zz1S#*6jfM-H|80|WHxx!5oA-ofmJsSFb669^O@NDE=a4}taxzlxBgk)ro6e-ZNmfc z3?q5AH2-J(9~eCOKmPag8R;y+)`8On?CXj_%2IMJ5PqxrAzwy5ylNbz5E z|JT4YO8+eNe@!|Q+qL?7b5c^MCuii8IHRI*CYN%R%mZ5^fyK(efJlg=^AA70sX5$? zANnWNe=41S6tVxE_J5)NhksL_J5nc;ggpO_i#dYK2Ap=N{6o(vRlV*~{D;#2Tppte zj~e>1|CIK>e>-`#*gz*!Lgt6%s*W_bv|(E{T+G!Um=cIQk?_r3Wk(Rn(EcwR1cV^O z{x1suq4m#n|HYhR3$(n=8}L1X`ezVID*PK{(YmTVIbFLyDwE289?UP1*Q~GkfOv9qiJ$NR{a;LF^TGTd3nko2wafwq0Bx=^ zaQB!NkRfu)vY3CUY@sX4ME&!D>BkZ&{_ExpLa?bicd-9r@}{3l!5EXNWwZ3nwwY?G z)(hn{Y)OuHGvOiD;bZ;}f6)MY3!XLX@Z>1O{xAPSX8lQQ8KC;VUJ2ZkHPwO2e@6aG zs{f(5GI4&GXMv7;L2DH$U~osq6f_Cj3@&A`G5<%l4N`C}1wqJ7Lbwd`pAVbw z36*2Pu~3|UQoNMeRQ}?TnA6Euj%RR6aB$cn^M>=Xl;S|(Aqk#W6{*(CvV_fc^ zH2Up-f4nds+LEyrcUwc-@p^Z$j?fbL+U+bfu&7e=5BlG;+}{>L3F?2gFX0wh5#-Ao zC9YYziX-ZfMWg&*RQ_|tVru^RvQo>I1FhHMzh)Srj8{*ItW_H6xX8D>?HQ0SwH=^v z+N29R>HMGTRR7ogwa9->-;4a0wEyepMjYvzr%>S8|T2kHj=8G131% z{2Qb)!F#*$1-~#a6?lymfh7gEZ`FPi}D^mUo@t^w@4)E6!pBGo=SJC*-dh_A1 zfe32N56j5X*K=rSlhk`;tI47V#IY9lFjQPYTL=Sa{O9UiCeltWSHbUr4Z`-~N+C$- z{;wP9f7fPg(fBX_km^6B{9n)1GP`N~=cOPs1YuBIfBKoNru#AB)Mz4!-vU$8|8Z0` z=Ks+152lpN8rZgb^za5?Z^C16)P_`>pKzLN{4H&jx2c#D5S46GO-+(*Q+uv{3WUOvQ1cs}a!ld;XF3 zUnpJ)@#IkcuLV~Kwj6lJA%aMx=ct*iw=w^a@_+U0oH1bha^h#~ zf4{n`CJ);B$8EPj{f{}~Kg(MNVD>TF?pkDQ^y4_~h#$%EaRqk0W3bZ{%=i_UQV!Y3 zqFAnZ@`FTl@z9K8N^K;^z;<3`7vi~Ocl&&<^DTT=VgAnqfdK=VCG5Xg(_Zh12D|^S z{<*cnMMW1(XaChl$STNz9Yew;uy`P*^Q{-7!Gj}kwUl^SmocHpuA0jKq4J+=iobnr zS$M>P;=jJ3hU?%I@5uh%LHuXWFj#T-FLjbr8v?>srVa-zo%iT0P9yj;s>c0ZN;EI9W|8tMKbW!awSR>HOPN$TJ}|W;(71jD(t`bsep2M z!mmGCm+rs#H&_`GMSV(foLT@X|GCGa3Yrn5Vksy!G%xlmX#1^yzA{0EtWA|Wi&@n)dtsOO zqpvPaM~9k!G{?zkjtxN~Yy_k!TZ>bZXNk~SepMV zOlGYGp*WAV(}L>1;QK#w6OV$R26kIro(>yk!ZU`%tH5Yg@gr36A^$aZVV83%TLH$q zeiFEZc`CE@H2_8%Fk!!2IQJDEu(1CN@t@Z-@qP~&z;FBq`%h{A`>|+K{6Et3&#L)G z7v*(%@JI#>y$kgD)jw$c58^+18XVOBpzzW$U3+eC*yZhpGl;yoF-tW)1Mz_j#iQxL zsk`^0#0h!NpqlibyOI@E-co*MdYrU7b(?!P?k?Ce;2Rfot8wy;+AX1@23@uR;O-@BBmIKXc0U zauKDW^v`O6X4L!xEKFMRF9>U*mRUoHN(a?{LFNBo|7i^<8BXh2Lg)Xu=DsZ)Mqb{b z1+O54>OUpSTJ@P!{?Bx+DkyPRV8X)s^s$RPwxC?O?T{d)32i2c()p)NeO?%ph!Fpg z=08jEpG~@qOY-ofCPjIGkmqRWK*@CFHbx#EhoHZk!2XND{&%YXB2k6X|A3szp}rG? z{;yashR{8W9q0=cbp9c5QoDep`cHjMph`iAswJHud@c_qX}ZmWX6gKsklAg;6jJk# zG4fx_W*nyQABS0^WUzc{>76sffu&`Z+bsml|2bijZtDLh9_;@OZo9aB;!ZmM@bU$M zT=P^t7_k3>(m$uH1wR`9*`n{cgL9uu*Koq&qM|4!9Q8k&?pnGMK+QjN{^31({@MLy zxrV2;574>BVFc|;zOF+lzLHSqP-Kv(_1<=BCf zcE9lNfBpJN{rU69ub({sF zr0RR~?$hV4PrWaCd!B!O)AsS*lkRtq-@m)x^YUI#>&urfpS*c@_x0=MH_zMJo<4c< zq@|$PkdxvS)0QE|zR9b0R19dfpAE6?!T zma-%zIXQh(bi&r61nQS)`JrR84<3%#Q8H`C?p4WY(aEBD>EgL$YFt91C?R2ELPC5( zY-9o%n~)finCx=6*mZBA<-sju_HFUlvD1G0cGDfXCV4ronOj_wH+vJpPdoAD^&sLqc49;)ac)xHwVVT2b`+wCMOv(d#y?-I%y` zE&dr76}2fgDj_y%ebmZ?sAU@$Z%AGon;NhoJs>tCctu*+iX^|)8GaENtJjmO*KAn1 zc2mUK#MLX}S1pZSwrt&!@R*FCxQK=EVWI27W^Y&+oUjmo%}iL}yCEPX&2Mg+ubq7e2C;HgLPqkd< zW46x6G-irX)D)fNlQ(Rhymqr=WTw-KO!w94*6Xv!tj`{|I@^3rw(FA3t_wB`XKwbG znQp%{%ONytLRg&Rtj)H8sqQ}MZf@z$o*9#TH+xJ5 zGQq;y&eJ^JbIeAsQE_Hwwq7Rdy^Pj+8N_%AqIC4Hc8$b6TBT=b?4`NFi?@s= z)9$6d)RVh-oSd7kj*hwpkIUr_<0#3=v6(Cx27^fu1P8T8K;T0#2#`jGuyh4rihaBY zw(~IBg9=EO76fCV1rR;W> z%5wkx({_Zk*WLD3Gxz()Y*`&u5DX`lkFPz51u-G@i8|x|P~IsGh(Li{1eG`vgr}`vC^*Ksp+WMb^nft+j7^ukxe^3Y2Lv7W2vMtl z7*@`Jm%7ynCP6Zl@qj=9r0p7+B9Lec#byyzUMAzxvydSEd*a;Ww5;_e)2hS+jVBu9 zsRIHt@WvC?&ad4aSN7>`+VS)I*OQU=X@fuz9gtp9%LUtJzYsz~9}vm{DT9D|C~9gY z(ts>&%~E$3jGA@moFv7Q0aAdJJ|NtERM9~I281%E_9Q1U@O2_;1IdvV*64*IM1^0^ zq6BiYK=S7t`I*g;bV1CCsXHRtZ+*Eu+&t>ISybHxEXadT@5k3(pa4=C=H6MY747~( z>IjgwVn9T06tv|rB|`-9$MEL8&eVlT3F3u{$#QHoNH)r%B#Z&S3xaFYzS>G)K!O6f zEitAia6|5bptpFOIP+Q28Nai^fY6KyR)?FfYh_VY+0|-l&#dw#A%E= zAY9Le4hD$<5p``im=6`?AwfK4^JqUH>vM4)pZ558J&=FN+UB($Hei>aGiEcs&P}UVB|n9}qAg z;+&=&Z4jUi2(&?x8G$~}x2-3_TaGT!VqqJ>x{purJLAv+S>~d88JZw||HJ-M7nWYc z;rQ_7ZR=##h6?fmlRYt$WfjoCzyksXq<`3ZUfyQ$^++lpJ<;5^F!B`|BzZ}5b~v_2 zqd*Q6H1D?cQU!Tx`nkKtXI=Sg?g13y-N_gV$U0#(Xb@0cit?yscPqU=GQ?OO@a6fX z6uchlfZ$N+t^htowu1!m;X@&{0wj;;pLPnd3a!sop1820mQ@D9Af3M=q z0%*!-kjXrpl>;kkB}p7Ps7jC)yiL$I3<#~dQeYf3Ov^TCiMP6Ww$=n& zLU1B%bkJxdi0w}mjP@fd3>#5WrHf2t8->N%v5td48W6FjMktpteC5SRDj+@4 zeBKT_qRG8Yn6%;#v(0tx?R9C!;F#)_MhWD`E-bV2@ZDi{z+?M&UauYoWKjX3*d7fU z3x&X?3i8IUdNLRg5oH|_MHwVr)q)}j1|ATA6Xb{TCM1Y0@rkDjf~kOXnTKoZnXu%F zF67OA_n>l^9YBc$1#&b%`t^O5;o2(tfDmlvQyl4B%(p5U?ej-ulD56|aM(UJrnZsOZLXNpA`Qq|{R9bOS>2$xHL_!|Xn3#o1_j+~gky%1 zfRU|XIS(X>`iwz=90icJO;*<*JRn%K))EpCXmUZaFL)sF*SGNy7OL-|V;e*X!h#NBRNW3`;2LrMwgTTWtuDKXZ{_I-T zdIqX7e*KyxMBt##EPIF`)_h6EJomy&qyo~5u52GUz3H~T;Vxet`^Z1cj?^u|$Icie z={YG1G(mj*ZweqizXA#+IO-1tq)QJ7M}Gi^k3YTXsE&|@nhInL5*2LulH_W@yCVhIAjYC#x~e$GB-xbals zgiz~UmsDGDarvL$_>VjSq%rx@fOJYA7yqw3$mwEJ>|j7B2Bh2f$Xl929vu*R&rlLU zkn!u^%g1#?Z3m`{y(FmLnG`Rpr4D(MDS_PAo`*ckHq3{R62yLj3~dfLRzOK4LnI%l~#r!VR1@NS-od^2ee<;Lp3j(X~XcAr|Bjd=%Q< zIZJr3P*X_1u=I;7?wFgFAtxH61pJx&xMMlE1q6jPg`?IVY4$=n66;H4KJR{H2bJ89F zAr+7%1xU}we-y-#xGzxwSsy#LzyTSc2x69mlvC!TVh97WAX^a&^0FbBz<>y`ORgNupOo0&oz@XR7X)KKX#Bm@bc2a}5Cr`u zh*9w)V~KO2A59QjdE$#e65hPeu)7VvIJkX;a3ezr$(ws)Lr}suSde#pQiyjFiz&+$ z_(=w;da%>ebndN3{i-9q%Xql6I53rDVdcvwkH3G>cHko0NwO5NJBbEJzm358=Wy^- zCr#UkDrrEdc~H3*gfzFQo6`o#ysUE3fbqtTMb!K=VnX!WxTs+uV?I+T4G0|~h_`8k zNCpi8F$?jcasClH<3;b6AOkjSNh;BS~WU9aU35 zfw$U7pH7WPXrl{)pIj@c6)Gu$Q#0EoSw<1#cgC%LR3%Fn1Rpg!q*OF+X+tC$1U`n3 zR)sv{NofkwfQZiIK zkWMOPY-5X%s1(wMoD!YWXwldrBC6A>vbG%+DobL1ult_S`Fwx>oyYh4d7N{bx#zxL z@9TA4&uhjY_pE>j!3RBn56!DV0La1?#De+fTs(HIxIymOVSTOE9%Y(8+N%XWz zl!e{Ef`B9x#QYF9g~PT05Mdg{K(PiCWJv}Y6Dx@EjyBs&PNJzvv#fb>93se18O~ry zS&o?ag4ob~esJ@V(s+(L2&8*g5{ABAA{#T4@qna?@8q=A(ngBFAote+t51q(eOiAQ z3!FwGv%(a65W)fJI3TO*hQzw-T!eOG^Uu3GFma;Vc*>3LS`ryT;%Wd0$DR;4ARSu} zw@`9r(GXq`V=v?9jToOqEPW9RTM$3j&Y*1qej0jxi*N$=!q!7HJ3nOBPQeO-PtMkM z5@}PXLqOoF9fdJEC||-F#tVX(>SxX0O3AB0K`_1`{`JELJ=qeg@0y{qC{FPO!7LfN zx3GYu5RjEX&?B-_FhIKXFcCi1MF$3-$B7<+*vKibay2x}Mc>&5}Wj{tF1u55ZGS zL@@s>U`v{TcAGxAfU30wjZaT$kdKO^R1Kj)F3ZFp3WDJsT&BXMBTOL0?vB7FQ3Qa9 zh4wliC=R}=DjbTvM?&8}=9i2CaVVmg{Q*KEu(R;x&Hy5mR>D!LHcJj&WYzu$NS6;5 zLk8}49lW#fSdws66enT^xtaOrj5uQm5d;OnL@v@JqKhjC`s`YGx`9FNa$MM1#1qUv z8f(he8Z0MM8SZQb;cp5=*Jy|bfKW-eRvLu@2D#~}%RGuWNZHyhR31vvRCjTGvG!{I ztwa3r#Co}21ZcA*0zjyF5cjmpnV;VQctWEN%j!xB0+Ij-h52Vc2>=mXma>bVD#~OE z7Q_HZKk!}I%oOGy20<|Yz*`-S4BVg1t|h^~;q$9_>{6qD{`d@l^njQtvU?69(^xb% znJ6uxnu(wwSo)Z$8Vb~1nyR3RnoHyu60?*Mn`$HvSh+=En%BlW71{FEw?9wbcsR>= zo+7s!H^@CK2)V}+H;xSJkOe>%f@{sg3-VehO8RsGAOZpc>j`WrlQpJ^w58BAj2Fb< z0m+4Q5`>DncfuJC7f2EOLr-0T+E8~V5y1xE;W|7XaX=`{Kh+~_12-fHNOrj8iUA<~ z$0wadh?gP7CPMacSKmfNK)aEGY$kX3Yk;khp=NjU+N;c(m|uf?w>N~~0_l4Dprj*e zbP!0J2!X$%#C^F?5Odh}Abnzbs_%V<@uCzW%s-pzWh_mF z^A83{2S8{aMQg4un+;C^h++dEG)10Fg#~dA_!sd`^E}l zu=yt+*e@~+m`XQ0rislBL|!$gt0QF1kaM1vEm1~mE((5DU|Wj_Z)fR4fC&KU?>jH8 zm0|O;4JiP;)JYIcg?FPq+?Gy>;(TNQX4)PCID&Ka2|NKHE7Xfn1O<8O99PMtuW#>0 zzsbI{Rqu1a9Tz565L0=<-D(@R`Lvq?yDi@k=AZjjmpsq#F3f?e&o1tr2?*?! zC<6V*kI*p(c4$IC1P%x#fc^Shqp0MPKy<<#kiyEalzV-7AzVET0+R5y$kwO8g7`F? z8HE8!b2ic4eEY__^KS6}a!fL0AP#%hya;##2fE24!GiejozEA&(NvSar%)5aA9?YO z^R;lHC6oI^uKq&xHz){pFU%mZ_x@|h4SNd<;(heY=@dR+B_nOW@2~07WsMiGf?&{p z_*!gi{vj3d;^GPa*B~d>?I@|WB#;L?aL|90*I!2+ok2~YOtInPB*IMVEA0iFRRx3P zxev})>)!{GF)WB7Ao=h#QAD=H`vR|%=$2!=s981wD_R&C8R8w}o_8Vk>Ttqd%E@`qqIGu7~vM_A|E* z6mxe#Kmhcgvx`YIEFkbmGcF8uyc8^m@q*xIGV88`+``-4T^m!+CSV15eR41-!wo=^ zl|vrdMdx|2f5}!fe%Je`p}vEQpLmtxLKl;dkgde{(y_2=pJAi(hojuOBdYAPXvd5u!OO z)BndHCqaP)@uTxDYLS7cTTqaP0YaaadrL79p&J&&0q}_jj+Xn&;-7do5mqVhq`kap z*yU#{!UVu$>r=>R4zB;eg4ksTRuG)2KviYH6P<__>^VIA&Q%0q3*yg=Wg-BGQ}%_2 zwqr_6CBH{-3$2aIEjR>dD9NWK_{X>?R426I@MWsXyO*uF`zQ7&PWMpN)_>Qrs-7ZT5I?-vu1^0}-Hlj$J0&^}h`{w9 z!TjSGZ-}h_2apB&&k=Y)jEjxHLmG~~sNyVzeH@9*F+#L=@I9mFnnmXsM1%(Kc2yg7Nb$8aSj+c;vuoJ*>u>~>V$@4MK!o|LU>9XcB{nKM3KTg-`-`dY3R5g#7Un;pIHzirMXYLvjqh)yLR{{ zw_*jsp#MD52NNk)5d5G1vt=c`5q^<>DuA?4!|p=Re-QQ-likafC-PA_{C@tMe|D@@ zw%j-e0Ub#i?R?!572i)0=syQlh~h{J)H6+M=k#4ar-!SarKf8w8lO&inVpnz%3}Ky zmO%eGV@pD_PPK+T!VGdd7cp(0e&OEc%)$c#oAOa6KL6nQ&p4~MG0CJGE(idj>&1HH z;O0`+Kul41q{EmkI!6aFej)fYPeVcQ6jl&(Nyb~hi3GkjKRCz704diG^&|Muv4R-W zZ4?1t1^l_y0W?Lz}`C0cAy~mKR}i;j{+YjFJ1CRqO2Bx zDw;2=;NLvb!X|Jyy(|6~5O@&f2<_9L|LiDo0U80Pc<%>vCFp9c5RmnC<5%s8gUoiJ zLGHr}lswPAM#fqabw9kS%}#{W`TqyVLMl-p$z=69t2##U*aS8|Xc;2IwVFlnND zjh%U5K@0{t8wJC78D&}Ff*5&I?<4k{PB|wCNLDbCj_R?xscOCP6m)rw%y=MyeNR?i zEc*IK0^5|WJ~=u(nRk_0uqce%AY2d+JcV1Y?uazv3ISR1gnuS)JlDbi>C0gL83|GV zgWNq2aB{^r&^`E9)aCAfPtF4!<3#u1IO<5%D%!A&O;N)g@ zh?h-76bb7t&R+QY&1ADDmS4G_}nDo4BvSU_OUr`!I` z`L_;?*M-mMKk`@N+~64_hT7KI*Bts}CxKU4F) zzW*Csjv|WqS7eCYKvhRB9c-*1rq|+Sl%H2$;o_}trmN5S;QOyC(0^tfltSILCVa1W zPTIsbX8Y-$x*Fm90|5bXgabmg5t;3f;fK6By77V-j}Mzkkl8a>iSNJQUV^~^_FqSr zBn)A?vRe+sV{{PDAl8749_V9V3vQ5ug5Y+FekwVfP)n58H?oUlU;%+ki!z&$*N@ml zy=8B1ZvruzExPFCb+uwW5`fm97^05chWrKkVGp8;2)qrq_gOqpEhaSGW$n5>wP)Ef zs)`^W@cde@I+SN@%mRymRmk9ucMmQ;II}pUp=8s!dzLCsN>^VEeT~gOhJt{=P9zch zzzh8bWJU7~O5!tr|fUJ4lw^gEeW`PLxHb|^vv*8TZ-;5;STj>Ts z78!mcV37NO0kTGS#5$)+^CgfMV}yWqAZ0sLBGnz60S3_r))5J0QGL^l2eB6CLH{w$ zKw)2QSJ%Wm;`bobjncL34>0_$LHtpP>K9Ewyq3GH$c}F>nX{96Ptl zN2KVC`8F|mQ|$e^AY|})b^X1STDE^YIo%T>n1BBG&g~>4ydXxL5qnfm9bJ!{!1rH4 z@5T|r408KNzuzZ`U}7oT6|7zehCg0dZJ#6fzI55oL487huE)P8?3%ot@y7uaXS6poP48)GdCq3k@KIfKKAZ&fHi#v5c<{^DbADbp{ii>&+aSt04N0m z*-SyqmW2cq9W~r{KYn&B%ql|tr)E;q2F(Nn_Vc<4nMn*SIiyNPaY>F^j!`hTpafa? z2gFU5W5gPeP}DE6z$OmST?|>LnBxI~f(Ju!^$?z?0Jp-TA3*RF|)Yz?ml zKm-N|{bEI+nXx5opR;U^SVKTydrOW(y@7E4IZ|CIE&xQFvWU~OL?^KQ7Ze1q{`x!| zVF5|le;oqZM@l2`I+sU-SfltlkTijY1@iWsFdb(x8Ceb09nhETk~Z5tumeH=1FSb& zRNyDZ76Q@(Y@y)=0U(QCBQeX;5*!K)8k=Gn9640$z>(3`12bLsStS~6I=25>zc5iw5(FSDfoT_`D*BfSzKSqO33= znfn^?F#TtJ(JNIzUF4#-+j=4%8$Hi%Yr^b)CBYxeWl!`*R- zl4dm$a*uRtNs@95Ggz53vf<_g-o*`GHmFs-ZI}1W1N~?CT}Lfg5v815!9^0Az!5=V zns5q62~frhWaPaG}ve*GXj7n|yShDLN8n(IiS1Gdawi7^ZdE?>pEv z_3u5bAO-_O>{G0XVv-H{T3{sRGle@Myr{%geM z`Y_m#G5rSs5tt%{1JY%6l4#ny7WAahhs5Ru+ zSlpKu(~J6RRV1R zv9Z&sorHE1ihf@Ds%5{=(!7~n#BP+J27KUdB_%Yg&}^5Ki;Y3!QZ)Z&I1~g>X?( zU)v5d7y_UPJgjm3XIb9ljh}^Qt+sQEUXC4IU`(IUf4Zy?84pPI$i|HaBCH^$-G@pr z!wT}Am1;bfOCW$jt~N;p400308gyqwV1q$!@R%v6Z-)>4RLBOIkR1~v+<#RR)y6AD z1GKr@Nfxa2kzQwERW9uWCu82`W;r#dmzu-1h#@-5;^hY{AfRF6q>fI!_#9xfkAK@? z+>s2YZ>^Wnn})m7^5@MyAe-gX^v3@1XwGU?jdO>`xU4RA=B*`&OT({N$Mu*25ds3Qo_=1> z-M(0HKY>R_(yIwcodmOPwi*N%P49W7Rh!=6TOc z8wCZyyWg&)@bH3Q8JV-`R0RH?fUmt$XdIh=?CqN&@bDcENa6)~0zd@HHFfju)@exc zIsvy?w_yKuTdcNG66q3XQ4aA=vTAm7a6kl%dYs#8oq^6GGDa?D9kp@wvQhxZqEu{7 zPSxJ^z5DT&OI>$O(M8Ac5fThprv1^^!i$rDO>=un@>#v~-I(GBtgS*odd&q8XgA*0 z;SNDT-n?!AM4YRx=ScSA0f7h5eb@mN1L#5UgDY;!Qjq zsGD<6c>q#SqwP$HrtZK)OVP;x$A&`+orGYaWCrKvYFPgXZuE2?7G-qB=PIBX!k| zArg^&Qhg1(XKdbk``xN&9CpnOVhHwMVIB3a><>+Gc)Y8U!R?K|o+3pRQsNGFZ|RhtEF~a7wkVfc=-Ky{b8!v>K0S%(IIi zvvK|B@4-3aR1NklD>2a1O>r^$@*e~g5Z>qSkMdqul{psrKDvv3dGP*#YnkC z6|#ojQcfAbW@3DLQp%S(JdQeeA|hzR9Xr6_ysA+jn?8y8f7bntDu0=%oXfB~T*B?T zbNODo){>}}5{LSwctMP|@(~gMB7C@bK%n0xhRYNNB%vTrR-H{y-&OOyXqZG203td< z#V|nnW0ztQHveGykG`m|Aa7>>HO-7Yy3hh?vJry-&uepdN)veFa&~^yVetR_{GkYn znQXxHuW_B0fy>v91`rX9;^-G+NV7uwRLwIk-&i^vq%Mzs`y|3?o4ghtd7 z0{fd~{kcVWK@96hSGDAS{-08X+3W&j$vx|o)SPFdk7$_wL!7nK4D_G9#>je+tgs+& zgFgh@1OFG0zU%GH6%ItS;ie`h5_DYG1jqy_p9=KuLVw_j{CK{)^X zUqHIJN+g&o@q%FQXj(m=guwqpWQ1RRlN0j;o%aPm7W)9dYrjseOyL|l03w{vU8HIi z9)HS`!wd43h9j>sOVI7HfPk^mqdPHcUx#%9748XJF0fB|unVm2=!~W}8^Omcx?y_(|#H$AT zSNx%=Jxz(Q|GM+A;#$1u!!uS}xccD#*}rE%2RR?5$4X#;tSt{SjNjUSea%f!k@@(> z{@ZIV21uVeV72cFD+~NTQxj22hTRbg}4|P|cXwr{ut`=Vs5ATN5t>W?oT=JnYA>nj+~ z1b_$vBg_fjz5ngMM0~&$JOjuA|8u>}(lAxne+m50`Vf%3pS@=K?59x~Q3Qa9mh!BJ zvra5=?wXQpY6Sm}Lrht<1@&Qt6*Gq(^74rh(7oV-yan?Q1_=G6zU1KO^HueY0LTg> z2=!hYp)=Oeu z*F>GH+Z`~6juph%F77jfe|;kcNRK7aARtLr`L7_R1#YUUrbQYS%;SE3j@~lR#03G#fxBx1`|6Bn%MY zw466~_1;m9lIef?pdb(Y&utZI9O?_R1m*f<~p=ATz2(^SP5Y=qwY)_-9BQJ};A>+J!4<&#@U znEsQkw`JmeyK{={!1&qTUR?jNyS!ffvNw+y{V&gEz2m zqr0?$1Ax%`1T%sEr%i>r{~ST!|M~jm%;I?6rEzBLB7_8htgq8pK^_iBKYZXYfABN> z=W+j!L{9hTLdAVQ>OKPr&K>jrtjTc=dt{alKgi*6IsImJ!5cMksQIu84<@ZNf>FE99)V_RO|jrqd$#Q_lywi`=*E9YST z=cPUbmik}f^#Ar^a*^LMS{pV{9J6r$TF~tf1L=cFR6EZ_GEH;~rB-EFP1ybZ}B37a(vEfZA>WbT~ z5qthQmm_P&_wT7-OWchqnejh|K9|+|W6L8oIshX29VzVgGltx80ECu!?fcPn+^Nx^ zRN#MZ@ok$0FKkwI>TwA+KL1EOz06s#%yOn6_*I#dBazP*`hW2G#|@s<`1~WT;+L&| zZL`4mr}bG6CfW+c62uv+q&lzbi*kmO4jw5nMhZ(-RSEsziv1oQ7u zZOk$K2Lq)4`sRd65D%Mwgn$Ui*|$%05QOuOALy|J(vrjEk6AodBE{pBN49y2yaAYE z*lG&R5=h|xnT<60U))e&@teE5m`Ajho&rGnxh)IiOk9pUH>C4d|GANZ7*47*xK=?y zF#o}{Nz>BL>*yuZcLtJDg$2Rx&%knWXv&)~Q;@f-yKfl@rOyID1RjvY01*=-@iH#n zGX;5)aK>gpLEfJmZfRBMi^KdATRyY@T0Ar=^A8afj|?a@Vt`NpMC{gT_MX4F(b%9j zd&A;95Ge!E2pH(;7$EDY)+75-eqDI1cA;ch%#Vq)z1f)mS*ZUw#0mkSPMlf3C|MYg zT<44+APMsi0J0*W_FFmP{0DeaKtM8}?g%bw2mo0=?=er#jQ@F6*Cf1hb9V7RDfQ|K z01@k?EK60_ZZ6=mrCKTIMWrFPXvO6}KCfFfHZqK@_g~*+dQG+7f;XHnAjt|?5Q1nc z%YOs1)WS2QvHceg$by2rH@`6Fy8w{&*GHne2uhxd0kZxQH8nzk3wI;cH4H?<0TF3Q z3?*QJzT@8xpD765;}%@1I<`E4%YN?&d4dt4|7Y?$8-=ILgAeGykHHrTjUo2`7f3zR^! z!V7}Mt;)M6$2=uf1Odt8Wse!hl9fzCx5}wpH3UF}d!oBPRuH`U@~EetEBp8Ut8dNA zKFF`&^UsnKL$R>L6wW`%Rvm$i`esp-Hy`}Zf`DX_@~oR|kkz~{tw~?UL(i@TNM_E> z$97w+_lG-3Vj}2nEC$H3X3K6U@)dt}ULu#yoB?DpmHl$nUfzG!vyjs}tAt49q{E|5VGC z`Fi+YTLBlO>Ygj{U;gU`tEa$7uc?(NC`>kAnpHTlv7XOznr{`%IM^+Yv;z#i^v&dTqW z4}s>UwMAzBz5S`m^ygJUGC5(7FFG@Y1;OJhno?ItfV*XK%!>OPkOhEH?etmf2unos z!}nhbp^`xXziy@B!Us`s7K=%C~{tM=x6J}+s*Irh=kFfj~%s(7cLr*Fa079#_knFpn01$!a zzer|g#Y6-)X_ID#BLG7GC;uhGpIgLdz-^X;7X!$m?2gDhM?!`sM$t|*ZD~`#p)@>NT=bx2b zkvqT;p&N{%cmsm`m!{em?UsXQxfMGA5DN3phIJFYjR^8z7$EEMdkGs0B#Z68g!9jL z%>Q#-VooM}&p06K<#B97eJ|s$PA3vubz|Lg)cthlofw}_uP+n2zigOyKl^ja9+0q& z{7zYSR=TZFpR=OE<55K8ecQvorU#rFh54^Hn#~|r>TdQO%HFDwhZp3nnboqPdROr@ zt90kBKdNJHes<{X#Q<4#NeZF`1`kxkP%iF&j{Km|1>2kM{{W%mI3WGu25?G|$Q3Hx zG|+!W-Zuex7ES+xts5x0F#jOU=5a#5yC!WmvfPpUH6Z=q=q^`3F9$Nx2wb@Y4Rt!J z%yBPf&EJ1PJjbm`{=$CmuK^DZm2icCh(#;VkT{Q^8q#tkr1-kgDjFT~UmH?W-`ba! zfd6ORKX2^+{4vcM#tVXj?sI;$fNloeaDp+yKFtCP5<{r)v_Nl z({{eh56>acN z!}YDy#{i*0#?{qucbq|o(N>l&3H;AVr^b^!TWVOdcOo=hg?Y8%sq`p+(=0x)d>!YH9aPcZaf1j)O=u%V@`M0?}{y`MArFDMAc09oUo$t2AGe3~6Wq?}T5 zCEBKBe{_e>mpzLeFN$vNYa-C1qm1~YuP5J2F^<~We$poqX?AQ`|0`zuy^Z9cee`r7 z!S%z5N@Mp=V5pE?tta&V6g$H;gV_tNIYaZI>8IfPC(N^3j;Jlpy)Ul=UdE|gvr}7t z`=8s@ZF@oY^8EmS&~#$p@KO@^p8*i@{{hlhP^^(GyfCQyeMDtviGTd1GJHXOH;efQa74bnO7M1`@*NAMii_jQkG}`W*f} z+6C^_F|WO*Bq>D%5%l>r=|tUymh9P0@;jIJC5Y#`HRce?E6J(1Vnq07dy9pDP)kCf zE|Oc%4SY~Czpr@3nK>bGA}dZ-2IXcL^cG_U!Pxv`bgVUJ(E>E%f3_olTPSd*ATQF9 zgjN>*--6(^B~~#1NIrdS@B3pqBnt)3ef1X@N%z<3r(dwxP-a%Tx%9dDDvwj=!^d_H z+3!wkXSoUHAB}u$GZ(9x%MFD2XU{uEj(JFBj`&MMx)2ch@6#*Qe&S#-5az!$5U9yQ zKtyl-Az(=S|A2^qjCjehFI^fhCX*!I+WY!~{sR+qp0P*4d7TDIn!@aLrbZ^pA@`Mb zPoQbAdgsG;pd7wRP!Ozf3V!Cv$smUXWB%u$rXwk)2EFFO`3LkL_uNGrHBqL`u}};U zx~&?3B*`Eu(YJI)O!R$zYrlJPTB*0L zk&JHraf#JghkauO?Y{or7t$_;WS+9Fh38z~p{u59J{L`WW;^2fFPMKG7{UcsZeFTP z7XTuxPC4h!nY*YyamN4r>hC-=HR1lNuSu@i3lQIX-akCMEN}W3DVrt<{%73(GqeBt zGXF4GJ{+YpwWU!*p1lDX=AX|#J(Y$0!@K!tY)ZpAGtI%+#}RhR22e}sUaXm)d?7_) zF4J^wJiO{ucJm#-I5j3_UJ7c-QLDP1oF*uU*-Au2vwfyIy+S~!p*i$c&0o&UhWr-} zNY7h$e-Du@+<#fst7xFQRDu5|SdMH&B(PuIfP%b4$xTfVsm1gkVgCKiE0S1(sL27} z4ztY(R5dKyrAcImw8HK7=C<1EnnR=GQrz36eT$f5PRk!&9Bf0jNZ!TxtXJHeqrP>`3#x3BuBY8lp)v9nj; z|9O$$?RXc+w%61-I9aINj(TWX986We9#qiI(!uo~W+m*rk(gyr8rMUh{~ROj zT=a*>DN6>^`KaNBmzF9DR@`#T|MTxZ_McGE1}y*OQ|UnfKAXmp{|9 z87qjf!B+R(WLeEETx|X+S+%OfD`i{5fRS%W&dAi|bziA@*!=T)Tbv~5KTX+ECZP`{ zFhJJM)~Dn=E(Cr{wvX+i=mwah55oNy-zrllQ3e2^0LVHqKVC)#c?kib3xl3k0Lr;T zH1vTP>K5kTqXP9-%AonZ9R+m*3%FG?P3V?|3E=r zN1^COSW*cAp^$$!_LbThi7K`!)yZm$p~xRUmCDyj1+0X4H*m+)!rA?`K^%7s5ZbcS z_v>5Ref;s`eIqJEIYvyeJ=KpIQj{GFxR8JM_y}(l=A1oS^_RoHlEVc+X!V{V_m1MF zLGQM9s^R*NEd{YD>e45=BQ*BydR+|<-mzC&Dxm*-o!r-@Wf_#u#r2;(3Wh@e&%gWX zfc%c>KX08_^J5C!r2r6G^;HKP=Xm}-M*HrRhy!?p@q(D&`7d#mBY;XQc2=@81OE@E z|ERNn1v(@olf)UlQKjcOX&4~1GP3EbR@p)RC#&8#i}&#N9EgB!o#iTY=d=j_;E&?R zOqi!4jG+FfCc1>{cPCY8dolj!Ty&GmgM#4PGq3}cb=UP^?Z@-)+~T3BTmPn~f&Yg; zF|~G{f~9B5qHxUrvl;##f&aN_^JqC@V}Q^TPOJ&)PYY>u-u+l;4@ZpsPx*Q*|Gof8 zE;2{lotu&*kzoG80ihbAyNZ?pUi1{?-|yIvsbXEvI}~OgIlbh-->eB~JMcfJcTKfE zYkOmVki+HfD>O(Dr+=?z`u93Z=1u(?^C3osh%3%zQX`DUJaz1SnVJB|%1YB-{-Ain zlP5;9DjBnEBTf<#Ps&Vv8w)SUJ6Gu| z2>uf{a%X#7VG*>JaB~ekDp(E~w_n-6<9mh2!u<37L+pZ()Z8;_2KLO`F?>4;ZyeT& zEC)kv zx*%%HB{ip7o9I8`!CoCN2>!wO`qm!qE?#23fwQo4X>{`D0yT zuv_-&)7G_hi6QR32$G0Xbc27u09nF<;7hBHx^%$bN>(5FkadpZj_1GH(_1uD-quukl-4Fj z4V$dYHH(eAAgS{H;W^WkM~Uux|9#U{UB5QNeIMrk5%qLcs0!;mwP{^L;*=s-g7#qh zFD(E5KF?Ep9p8hG6wdTGR&>MnUp)uke{qoR^3&FxWEFY{Y^Lj%t}s}X!<$MC2W znnN%hpZ{KzjThv(9ebAsz4RZRxb?YasVFaEAja?fJ2wBgZILC&FFRUbx`itQgl@cq zx*h*E^*3F8%5*D{BAIuCdBtoKpm<*2VoraYT6;9@?OiPYep}23yXXzZ4F+U()6r_z z0^$D4^9_su*y9!o@+j=BbQ}1@Q0FXW_AKp+qMh7dsZXOhEF$xDu5}aGGx>Kc|8;p;$0h=Ma(wt|$GP`u zD(t{YmX1LGky}X!jB5q{Pjy&IZ?O0=W51hWpoK^PAl>21*a6y$>Qf1n`G&08$4fdc;LEsx5#3JUVt z2Vo}ZC~}Z#PN^Lb=s!!V#MR90nHN|7Sv!CMLa%O)eFUGl!D?>i-1W+ZcG2TqR=qg? zC*u7*rO2&Hs&eY4JF{3RvUxN6uio0jOQ3~%d{_}>m$FK6KziPv2`R9@`p{8S$}&oy z?O$ojKR7s88-#hrceH>HBPm^!YGU~-Q*R5I-L0M+h606v^nhuas5~5;eXh;`(!YK= z)NsUlJvUR3|7yt8$!{32RWUZ;c3j=fubHCz6_{-*reO1rXsV;Zaz%Y?{)xm2g6-9n z!zv=3oT}UteqsBsU{^Yu3k7*tL5#@?NV`8GtKj?}|8F9d8W{h_Ez#X;!Tsbf)$x}e z&AM=Kuw;`@Xq&nwl72qk^A6#|yvMCMS&sjzffA7mXMVx{t7Y{S*6^=tIb&BQS20d8)qR_Q|Fg&n zPI-SfaOX=c{1TeCXy-cpWOr6iE&uzuWmh*Yq=WzYeea(d#=gb){4@E(2yyefNAUeu z(?N&75##4~L+#QaAYkrLr8aN#3-};ZDcIX^= zGLfB*4Ci=d_7d2Ff*67Sxwn~UA(?m5a_gF#V4P_`v*h3!M~sw6*mj(WxQ%Y<WF?=cUz` zPub)vN35%IPZbLvc7e`-T$8PBaW-kna}V=(-0aIf=BZS$n+~6r5OfipVGx zr{nsMRoQVf{w6aZUWBx%(Kl4n+?Cf1o-yAc|N2E$Qrj%ckS8o7owEdv5@Y4*@sav_ zL@fU<0E8-p4bIuzLqPJ2*PVBT$b?Y;0sc>$j3Vs6)FYFP@cC!mk8MvHzR3F2FC7)= zKeJk1*Tnqjnj&ePZiW0;y7sbyXjs(hTAtx{rnE+WVu~4kiY$F$`Q`DcKO+`ySDt&| z&vl)rthPTf9sSI|!+)RA8f~|F@a@$Ic<0d*ED1-HcS^G3z-!@#7vxbdH8cJOx0=yr z4yZj@?v)u@V_mC!I4jh^;{c%p7x+sJU7~e*T{1Kl!I9~=|C#vDgpODeSw=V7FS8gt zy#MJxm*qFb640|NFo%f?@?WhYm6j=zul~mJ?~*rUA@e-6Ut!>j_S>2mF2Vt!b)4ALHoZDm`oV&0yO-%1^~E0(T` zE@OVI3mLl*c8YhJ1?F2t@c-y}USU=jn%S4+beoH-Y0#fmQ{6dKPxB>FqaMb4-b<@k zZu`>x;@l1;GEwu{ma_Vi`8W|5>6@{dnwZ!5lEve{3D|40!CqEE1`U zELG5$f9^Y9MeS{1sX*eY$!Wx$HHo!YzgY5AI}&`w86 ziu)$J)nV6CT~a)cn=om6#B+>J&39jjVE9YK0ij82fOgAYoK&b^nETe!=m0(lb{0{zGC%+Ea{$h99#pkx?!1pep!-}(SMS1 zO8FzjWb7}j4E?cZtE8;^Y}5_&5AJ`K-|Unusbb%dS=!^6*HIB1HQepCASDeNlv^rn z&WSvnMc;Az`#5}=`KoGAkSCmfd|VhL_7+6{1E~u9&z{>$5a$0;cSR>w-1_6Qnynu< z{M)%$5d@s7tF;uVyguCjbG!OmKZlm$m{_)c;qIQg<9ps;DX57V^{VDij_nS(;lg}$ zIpASprmjcc#ruU%smHV(m?N#nsN*|vS%~IOc6^uIm`Bwl)pH!-qMq3GfVFVudpJD%X{IZh>y1k#E>Bop& zssSWg8AB>^5Y@ItM>4DGNFYh-dtSU<+@50;Y#82y<=-EM0Y?!p$YVP9Og;ns=h;ME zqA_N-U*%s3l3B~xS1)FQ%zN)-e&>bI$2nDX&dIP>f%)fBd!s|T#`B5|y^mf)LEc-P z&SI-pEdRbGeAC+k_*2_1TJ`xP%BcLBs4lOF)Dz*6thurR|Ff8_j^5uKNoRT*$KZ05 zC_HJ@XTLV`acL_s-}1y`a}dQ+alwn)tvlp zcM`DtJ2D;*yoMMYZN22^YXk=;88YO*j?*^R#?9Tvg@U}%ov#?Vrs1XA$AJICbbLLT zC-c_!;E(S;CMxauw_^C8_TGY@>OxnhqeoBO(zmd290xPj0OY?C%r>n1vEyac7Jm5} zwUPP(D~PHPPzS4zqe#d6KW(?V?i~2h623{%XwBs{*TdSjeK{@p{X=wFhxyX&iiO6} zT-Miiim2+m++fr#EXcb&+nM@8pFUE3x-@6-aOGm~_VmPw6rHlz(os?B`);KgU6O$O zK+=&wctJ3p|C;XzDL95JmjBXBLA3zUH(%0`8OzDgv)GC%_WLv*1;a`9I6Q#kqyxHS zC$f4H^5^M7LEcMW5SpPN_^;)A<*tP^a54R7haWwC@UzqE5;=X>pF=6~i4v;;9P;6% zQzK;_Tb9Y~pWN~gZu=f={u!?*TzT2gmS4|ldttEb2s!*(^os~7l)o^kdj#YEoc(vo z;?JY*+Z^%=R_tk8-g$D>b?%eD+b&5Iu39}lR9cq=^A8gx(@0WBM6me>{6CAnFK4^g$!0OTIv%SQH z{3~76tcR)*2_MxK?08u4XGDKeT41deQeB`Km)48aHG zF0eiL1!g`DB~ETv*K>qG{~3n)$KF$m2x|oI!6;xyvL}Ezz{UMPh{`}EPEA=f`7q5@ zXX6|>I=iET;h5RmK-F@I{GjvRJy9ayUGv0zClR@Yvo%o`fpMWloyn|;^cP#LL$})b z^m~HoNsv;ar^)KRB@gX?Ob@@h;Rzqd#L(+oTRe^JeEH1sbykotP@zMTy*J`32m>K) z1xxdDTZ>kg7i_8@r5@9@XL3)V%(e4WYGD5PtnulMeO(V%)8)ZZIg>5z7ys04SUM66 z@3ZtW^HfhIJK+CZ1c8l6%iVmUI~(R7%>Q#G=ZGJTzQuUw@&xJ&c-k8uw}}t+RI}P= zcwl-TtTQ73F8SxN> zWM_xI*A^#9YilB!hjl}G{sOxXC8(PXWyAq%WIIX01R~(n+0^-olJ~Rqb*eEaPHQgn zInDi^p{%eVc)yQgS%KZbR0)c>_c@nhE^Wa3Tml(_WUTvRGV&_XpiJEdK>D*Q>!!@= zzEd^+-d1j=W=;s4yPl5CKa+p2TDWg)6xMJ1u5NU^bkQdNUaD+fUE+jqk40I_9J|%y ztQ6B1WOj@0W^1$B5DbuRA20H!h+5?e<}aECCsg&{;O6Ju)o=zYczJMW@I`!6yVl~6 z?J#yT7(=U6__1s$7Yom7NrW%_T-i_$ z_C~lGupP-RUI1zj=Kr~s`}w6sZtkqb$He?kF2A3xtJ z3m5iZ0{v&tSzQ8yFAMVz>&4jB(*Uq?4EGoKpQ}%AN+|v3uWjCDr=k3BwTTN;P!Q~v zoqF0PsIg2TFE1MZn;#yB7Xk2pDC~!Y@LUn*-?QkBfA4st=9$8I?dD>oR=Mfe)6+e1 z@Z?>-ZS=#7J!+ZH?F+C6)}4-%3*Vph?u@2w1u8<$)dprvk}TG zWT7B0hB5MPMUtA+94L*hCyA@+;r?efM+X{HRXgEVqZ{$PAq5^CJAtP3;mx17R&isd zKe>H-2R~=aW0?k*VP_}c|E#?8a}StdlDg|c-a<@ddWyfj;evpF=o#P?r|3L?8}rth z&riyH3N`0TG+coFmyrKlyyanq&8_pzGKI!+25Wli8+#ko_e^zkZa)$tcJM zyH;!hH{quGGFJy8&;R;X0;~=Y@6`>-$PZ!>5H1(f^b7154s8&V!RH@FoW>$(r3>$K zAf?T-cdUAP@Fmq7u%>k|iDK$1`;_a>e{BgAW?6^x(h=-{RnCPET(Z zEI*L)_sl9H0{v%gCFcL}$kmR6h5I+dmHCR%Tx3?8luD6t$rLMwQ4bkKeh4GIZfa^CfAkEb0HL4)4?XmG0az0`^e!3_(-G5#}_ z|5BJ<48mbU>UP$|jiZuzhxlCK{_8|l0*^NE(ir)0t?MFFk6&KHf41i8uEdlqZ92w( z-kIO3<_X_({@>eV^XSo>Z=H1m))5M)Hbed$n|~f%0{v&!&x>ve-15@MEq(^U^&FUL zXTLbJ9E3+uH6FtJGe9bN=-93z^M4CsD!R$Qe_lXFJrIe3`R8;Pqn?B$UCEuby=_^v zogm*76%o>Hb_NG$!h6i)q zd$0t_sV^{vjm<{}&HVPEPVE*UyQ_m}xGr6bdS8aV% zu{`AoNUNbzDpr5A6dA=Qxkn736Heng@5xBu|Cx3052W@7c>+M^QA65(a}WzFi1{#g zoV`cCIa6uTj1u;4vr zI7p~d$#Vg$=+d8-Cm6Uldb~= z!Q$Dl@SnVS_UDbGtxyoNp(bI%++BRlpMGxD4au!gkhkBw&iC`nj_v<25A|mILjK*I z@@L&BRl~#q=AVssmJhg8DJ_H_c06?18#5ok{DbAc5|VYQ8kXYu_or?0#SStH_0aEv z7*hMS_AG@6G~+OIL7b$z1d5&p;(i#?j_Xhz=E3x)H-~z5KE%(E-ph_K z3+FK+btjbN4#kt%6x$EYSx)`#TFUl261wF#EK>31Prp9)ZZ15WODfF#AK}JUE`p<5 z0M~`t1@li~tFC*KveY&uPyXS(%wMGbx)4~>g7X3uf&WLnXWHRNa|jf~9Pzuh=z`p_ za~T<`;=LIEIm^3z=c~}qayb9zdwk03%iEt`*}xCFkSzJIz%FL=dzFNS$(GCHaJMqw z(wxwf7gq#{h`>c#a{~nh!5gc69XUs2^D09h7SG+z0sUvk3QL;&B@+84R*(nz_kdd( zafM2rUQV|t_Lpses-ZvctB@^DyYJMd{gg^@3%9n0c;MR z7LjG0RjjiH2)j>CT;rcIIeJNRqQ^YD-__oFjsKno*nj2C+aMj$&~oap?fi4TGDF1^F-fu`l}GJ|C$nH<%u# zH2EeHmjA*F^4{LkJ~HUq6X%p6gJekzL|_D|JJ@+3vx)elDt5@SXnuckl8%KrYG$2p z%UZZIgO}AAx^lbqvWyO7rooGqP*a9OYB531HlXW+&IqcL01U6bXNCx*&-f$z`|6gq zDj51zD6;Cn&3ypQzx87>Fwq15@O{KR1BzHR4grWc9 zV%y21O#fH@QlO3iFKAZFQdyHNxH{6A`UMzh9)tUsap1QyA{)^NBZx6L= z7R*Ow8(RO6Dp(u^nxtc%dpgPf_LBb!g1dmVC4%Jdx`O{Xs7b*TomisgK%z*(2XrB_U;>NEsPfCnK^~nFq-(4>Gbxj#b(zTS7Pp zA@dlOjAJC>C`1w&Elx&eQj|)*x5v@*^?Cn(|G8Xx=Gl3i`{RDU-mkao$nnew-_m_s z^!U&9jYeAr>&I8tT0yAQtZ_jrEm!V(hY36J_bjzE?%CZZf>#l;Xa7<7YmgZ%eR?=R zh!_Y`e2E{h8`1KgpHp`2H%OALAGHazSG28^T^sMG`QKfce_8d6n%XAN=3nAi-SRAr zpwO7^;{~WJop6tLxBG642~=8a_FqhPQIh3d;>hu$_MzpO^5CNfbi4yg4)DP&J)oxp z{s;1(p&4l@qf{@iSUh#C%s@6&RR z$FSWq*u}%=3*+#oENlnQ0V|R1`4T7@b0f<(%Eq}A(XFV8s?aFq!k=Y0Y$9#}bJ7)o z@;~_|eeDAF?G$yQ_1A&>wo%b6LhhO57k78H{Jr#>!#H64%c6j`74mK!)Ww3WX#U|u z>*o|H+c&kx-=)S5lv?I)Bg}mM3i@Yc`@^bQzdMRHw>I_9_dMXxQ4@`3G0PHzDk97_ z6EQ(_d<_0Ci+l011TQGa8z?aomyjxoWFbmhVge>(_qV6WcS?>QZ<_?_7`m-KW5%HW zB5=CD!Ux;34YMcI4m0-S3mW>#2L7)A=dC+bKf_jXZ5uDI)gg>i#?e@zBc9(qF=O96 z7#J~${$tJ&U=bzJX$x|AQ1}iZ?)RTsMzUZJxp5H2^CMz7Bu{`4w5kA<86|KCd%T25 zhmH9>s!MivzH48+&OKNZ=a!-eC{o8>0$DoCEkmV#&;b4Or#oXVckvqF8Xou*#Z1%x zKtXWgn4rO3#O>`WTfUTSsU{r+Ke+1OK2- zGlmtz6X^S(WgJ401>}8Y6LrCNa-9jQ^iBtW=Dx@m8+`#sqi(@nV~csN>+>unc_>0h z^Pe$F=hf5YTy-LLoN8q&eafIjgmXXO;UzVEs;D{n`y1*6Q}dNy4SGyqwFDeCzy5Zp zf{l2z1708f7<}l$$RMR{8ve*X@h|9qKB$BL3;cD{u%fmwS5@FT&HJ{dXDU7s`_*-LQn#;TQgqyYZR7G9g{eIeJelrpQ+nu%E}&Cyak>|^js2+2MRs;q2I2@ZFjn6tTIVmxkdBPgH+3aT_P<;I zWc$Ua4lWz@7pxcWHlNRlKXJG=CJwTU**mE}4*q#^VX-xj8m_6a6EhFeLwW(tf(yTv zzW%NQHxU#ByV8BW0oDy<2kQS4($%`YTklt}x2;kv*V%gd$*699(ErG*x|yCP1zVy*_)C~^6Ka(!4k&y?_yF;9#?PmT@2my2$Oggc?g1{{%aN#_;X22i; z`XAn7+WAz&JM0QjeBTB?2(e%;+Q{bjKtvJfe{POO-VwkYC{GjwN(e5e?;K(BiT!{S z4QM|_^)g7cI)UmnQslq(_x+=~FCQRf#60!)kC%?st{v(GIsr#@tl{e4Y=bL~ryl#4 z9Oaf2655LSrAU7F9NFblyfJie;Eieh3kEI=$cC>2cOLG#r5CQxD?o=m&XDB+|}^9 zgXaHeUaG&cxEA(p3it8Juis09K|>XNDSr{bKL^;Xn=d4qPd0+0=SbAfGFfLZg(QpH z{?k7j+~Q|cp|6p$@AQ3$TmzB^@Q*Za{V2@WrL>TMsez2!u&8=-oamRBoA z`$iES$<-YkD9>73kkK^+7#j5C%`bEwHfraY?u$&vWkmlRHkRubWn?jy86 zOH@+8wD2SymBoZt8x;lqUk|NF-u zc~p7Ge_p=1IQhFy6B4mj|Bf^gZ$XyLpZSO*5yn>gaVYsB^azRSY_0Zw#R+oCzqf?&cYCLM(4f2RZ2+k@wHbTI#tR)zDP z`aEfepiG6(!Z^K~QL6s?)vp_6;-q&^zI_JI59psS{75tpdsO`8?xo+W?^4nH=qpz@{?4@7 zzI0HwD)|m`+gOmXvWw~uET2ZVB{JgNFK1lgE2J0sj=Q^{(=y>tfHRu^L%gL)>9SFH zTWNV2#XmuP5T>KJku-ljmhg+lXSB0d&%e%k37G&$4>Gb z{2+>>^i^5fYkfJRG++bi4QBBzKDz7bynd8jp17#{*iy!#Ewj}DDZigC#2uAF9;RYS} zO~}&sk6-T&k~s$Zw#8%Ex4Xp%uUZSx{mC4LN(Vn`o-J8gM?)*}Ows;}V*;7EzqjiHoSy;(LQ>i^hDwdsCVo|KW@(3;+zUeUb z9q=7ZbMLJ!hiR4cNt4(4il2&b``*F!)K`Jdzw{`v>}qWhU||ct*3KqJ5BY~Nwx1u; zVXndk$JU;xK#zffVCesv2mgC7YwT7!C%zW?t)Tx@HMAm| zueG}i{x5H9wi3TLwbmEf89@Kz#0ZOPHbVe+nae?y{dzy^QRMTRNsf|{YR@}t6>vLr zl)|C-=i>0KDwp=YfQ@l^2JO@AhfiZpV9@+SJ{PBe2d(-$80Dfd;D7(e|ExDCGKoNL zVeO?oV0%3BVF%sPo|gE-kk2Q3nSYJIDs>*HNc~$`oEXUeF>}ym!o96ObE`^(8T-{P zOIs*3J`Oam4+02Z_L*y3%qI`jy=351&_bB=er-YhUkI0?PpH?Zq!z-bdSjvzwW|GG zZzR8jP=gK6o_y=xJ;WrD`%n-z%}#{-@6j1uNge?)(+2_dF$%?IxLlXDca*t1eP>2p zuEidTjsyQYLm}+}S;d3?`MhFsO~oKT%KyyU8VsWNQdDLqhQnlF4CY_{@62B1dr`yd zQdN)A`oEMP2)I$A*#SZrGcn4w3eFK++gULwQkF$du3HlY0-K-H1yb9?56b^9|Iofs ze}RsP2?PGGZMjz&7IKessvFsr!!^`GWlYnZ z#_5~8lBFmdgr_v^?EKrq92AuQiQyrveZt*6jQjJOg?_l4x5%q(J3$BVkDy@*Y)`~5 zAKPOdd9I8nAK#CaHcLm=wwBwWIayUb7NPYcFMfc{7I=4n%JMFW)op>$DpiNWwsj@s(h#YN+Py7kyVfvHNkM#DeB zaeMdIMt3{nl$%7#Ea|RB?uPygrT9Dph8??HF3x=Y#KO*~0VNK!{;zC}D^tFhmd~%4 z0~58E>08kG*OSP-ds@PwRp(>JO*K?6d~su2Z($e50VLjs(0{{D4&2Mog$3AdcCdJh zt{aM}6Ui5+g*~kmOnNh!sL7yzUY*?oJ-sjY$>3HNhK67R=K_%$rit3l#DgpF)^LW` z5c)3!bZ>lSv?{m%h`anrZe?wob%Gl#VOEs+1b9{V(MJEJ%sCW9Rg zmaW44m}sf9xc4aklluFrJ~QUBOy9ZN;aj(2B6scI^uNa(G08}dV43Vnlkf>^onR*R z+GSdufaxfYm`QbjQN?-1xq-xYGr;ExJPAPjgX#}00J#!N#_YmfR3_lA2oj7c)x*FV zljS+y3f?X;TMNBW?ubsXCpBN6roxtin|cZS&j<4YtB)V9d~0b_sK4#P-BjBf0}(tiD{-GuMuyI;9dq7N#W zMPCKc>Q!cL46Xm-#xD$XG;@w+r+#po=2; zN+Uxo%)hQ`y1h>GBkl+Pdyw2jSEiKDU0VK64vU!YaE{ET{|iwvea&SG{12M{eD}$) z+|_m-g3xolo&w|c=a?iR`j#UYMwfH!a;-(Enkb&jlvAkLX%7 zG3<`}FqnY-yRWFji*GFHC<78RC*%TIV`237u3Hfrj(Q{yV;mF{N z_ga@f$SV5iN3)DV{!f0$!0f1m&Cp2|s>_ec_XmL08*hy`PO_`1_G;sX-O(ax=h`^+ zryS(}tnc1rJlx{{VxxoLvg^B0TunyzZ9nx%=fS-LrSTz6Vzy1rC0h&L1zk!$q z@B*+Y0!&z2b~|u(+K-m`BkDP_2b~F`YXTxj<_nCvSSOu|31mF{dJ8^G2-Al z8HKm0KpL!0P`Abj=|s7BDU>a||nFE=Q`i>NtqE{4pIpKD#_i)L@?&J_T`|I!c zJ$)IzqVpR|_1_|4wrlD^CPV+17}0XbE}ImJQqSramfNW}m;{cLn(4uJS*oOIf)X&$ ze|jX>Vz9wl3iQu4R!JP~qP})INro#NQME%A$B@y$uoUKvEX1*ZJY2KK=uWM}h&Ex3 z#m=1MS6W`r*Dm^y+J4Q>S}g+h9<={rs^7%2+7zMne+5q~JQEY8hN~*hD&{1z->Hu5 z=E78aJ!7M5*_OEbd=ussM(}#1fQHTU7&==21)chC-7QyxxdZs5b1{OTR74se|K~sb zj}`!-1ITFIsiQAA@{G&@8ii|V5zWVce%;~;H+-v~eiDDTL!W}jm=QwPZ zrED2R4qy*vLGGVv+DACv#lUsn;g|p+Np%oCJ`h&a|7D~9(hlxk(Rj%JdFhhL6OFg8 zJ2T!9a;F4%*vsUJ!8<>8%N_H8{tFXOePYC2zUMma-^Io*M?Lpdw}6rUU;dB6RrWI% zAqgcDPwW4(c?|lWq5GC7c8tw*M(DZUwnzLZK(|SD96#)ilj+CH>Yypj>IxZE>jNV+ zRG-@gvQLPRTqUq;E`0eaL{J#)!`k1Y!* z|8v)J)WYp84AGC@yKHd+U$ekTEH=+`X^3RHvmXcf&x51oypVs`7Q1(k(s7j4fd138 zzAEFhk_4fY=s1T)OI^$`i<4f?!Bf=dbh;wF#NGe;-(4{xWBjl3!2gc>fB8R0;-=yw zVmPIdKQI^fvdGnrAwi{eVXYqw?`FQGeGzC5hyeW>+JB0%EIOT`TlnG0^vfTF4710o z*rS8B?ImIqooHz^|7V)&X$xz+DkB^Krhwze<~*0yiLS!|KQ9cPZ!f%ZuSuKgD#haq z)qKe%_26E<$$?HmlEf2k?p8Yee1=X0bltT4pWP7Edjy7}nA|Pc|C@hknNW2DVw&v8 z0VCDE$%U@wa1iv5Dq^)){`OP?e*^X;UCh|~J=)tbV((DeX5+oq1qAYc{G(HgILp~o zAgh8IJ3q32VU$}6KtZvUwR39$bLGinX;w1~y_ZM^$V8Az0kiFW!o>G4&-9Erw2kOR zlSq1t{JJ-u1eD6RPqH02H6iwwv=ZGdVDT7Y_iX2OVYLfdzS5tKV==}>g?^1qh1HQR zFXtEJCc_Q<7F;1xd`TUR32<>aSa4)kPYxKCE(-=736>((p=R?=y$;Wl5 zE^G4t_@6L^v;)V|rJd;~V|k1>Zi~fu!_9;^{7@(l6oRW-$I)DYvEPRSJ}SOjFFRcE z?nxj5`9GIQqc>$Lz-+G>!4itQEbR6+RnlHsANix)KzY{5aR83)hYUMyC3O(;&#T&OSBsQ6|$46a;iXhO(>!p(GM)zK?u9ygB&D~cf!z( znI9KWiXjkqV}>dmMs^S$dIcp)Wxv@?_`3GG_?}NWDVrL|C1GLmI?ME)h@0_1i4X|8 z1G#-{Dr`02suF{Tou;1cPJ0Bbpg3~1qASb}jWt}Je+}1xcn%K8Kius9Qr!AV8_GYf zp!q+4pWXThdf4UhM_FGSHO?+}MSOe1ntH(SgSn1m`+xeM_4746(#JC)oT?zUSkD^0 z#xN-72CS~Gzn<>>pmp|&E+*hF|F3U+D?jrxrxt;_cv~OzEZDT`ruShsjI2O7{ygq( z)k0vZ%o`!-|56`~0ESD{^z=AALmwOQT7>rsp8?MKphXfqj_sXRG6oL1WvvFYJV->UK0H8IirzYZzXUh zcjUPTNP~|&ZUFz|z0zM@ktU1zc^>pF7;y2Z#3@yr#N9?0mnf=#zAeU}Ygr1pg%|6s z8rmXQ_zw9lg6CO(E1LhT2K`^l(T64Nn!>J=K0FEh(3;xz<4ca(*%ffjpWP(@+bqzY zjB2&MBfq~-!omX?(W3d&8AFF6=`e8r_R0W4XPL6CFw!Z zL}1aj^-@K%cVx#TvO0CJ46AGo{Lil#iq8;EA89(j;@Xp1<`+rM(z~!~RbvgD*|j~` zN?{NC14bt>V^FwbFAMbqXlWndA0^+;bU8eVe~_5r0^{fKX0{eu2PU;UbK{_p{gZNkA9^a z9%s!?CJV(%BKkNl&KA}UiQ_H{c)cx@;UTt09&=Mex3`XjJ!+e$VA?_Zodt%>>ANwq zcHTTkfPpVEJvN>X{Lf%UJntj$zb}k>aL7pN`wnLtA!2@f9qt@_;Q#W-pnuwp6_kPx zhP#CJpWf@vrGq*^<+atXZ{g*A0{-`V8^x&eV`DMwNQC6OCs+ClAcps)!{IX@FdtBE zzVZC(-^n)OF#wUfgmfqB4#Eg%#XOILVP<0&t^fT`iD=htsl}h`uRTBu43vKTk%%qY z;D4txJa9Z5oXPo1laHd#K-dtHC^oswv`5Tjr~s3wYE4S@BO1EJDMsUMahJ6(eF~X- z3s(y=CiVvYopK%3d#~zNNummbyI^#DXC~!ITjX}L6`8bJLnPpndv$lJlurVAD|TPg zD9O7F&3|rseMrvA&lCCh~()jw!M4#^ngt zeVBiVuPxZG0fB$<$M^Ma+n3pD7dGZ2zD0JueRAbPYds8b?`mG&NS1t0eChZ&@Vzbp z&3_&&Jqai6G#GN=olk9Jr@spQ7cU`eqEH5FwYC^C7kLI&>A7#r-h*A{iZ&2|u-ioW zXc^wJ3hrQq1XqjEbMJqpq2MCI`x>XX|2t*Nq#dQpZoTi|Nm&6l>E@9Qe(<_8N9IDd zpHqF{NL6a(VbxPLR(X3aSH8J-a{5Kaw-rP4ic>Wmr9f(@knNiM*T!2X+3!0vx-e}} zb#pF~eDBs~)ATD4LJ`dI_J7}#EVib zUQsD=K9bhPHe%+&3TIbEr0(gn-jVkpmro>9+PClVZ)SV=3Pa<67TcMewrb_NeGS|{P-mQs19=k>3Eui6=Z?ep~Zg67&QMUa62wT@m{~j7ha`{Yvu&-f3eqE zmXSaUAi5PZjA8Tjvc5hDr$%rPdy4VfO1qC1bktUc4XcBM$L%p?A)nb`Nd6*we^fVD z$;s(HKLbW-qKti!_Dx1TlFb9BhW-lsXa3nZr>Ffr$86`Ys()JGWcty~-*PG$Gbude z!!Lo03i#T{rLE*OU5-lyNHRx^#@3ANa#S>rV1JH$`r@R(_sPv zsQNol|5rilLfY4b1R`mKCQz8rApSrsX>a^IDs=0Ofvh%M5OffiFI=s%SP z{^zAlx`Ox-MiIk*^RK4RJv!9oVs??^;d*(JzGQplvn%ruDfLAF_#e@^M%Y}nE{)I6 z{GML>hDJ|Lww?Xrs0mv}6PU!lgw*Ma>Go}N+S8K4h6(+)O*VpMX{_Yu;Dt~bkQqS! z&yR2AnfJWQ0dFUR(so|C_Wf{j4?7W@4_=KSa0qK5m{R(oGS~}q|E728B|z>a$#C#j z#7;rgYfr6`NN(Lc4?^a5EBV%$*~(fs3|pTj;_My-&%L4B61j6i#lEOFL*WPBLgdtw zj+iYJh7|J2$LE-+t&^|ntVUf3UuP`Ml`N8Kol*P)w_Q>V2`*YzrN`Ca-pL@f6NBbI zOG5bjLxg9qn*00?ESi64?IXs{hYK1kIpL~rnqh?eT%F9{b}U;$c^^g={O>N57`Gi8 zUb&xGIdHcnY|;E@_ntfAkDl%~&a4ZV?5Yp4g~J?LQ`dufcERu*2q~b2_BCJmxipDB zSI|;MW5s*nhl@WKM;@Uw;xi?GA@lm+UCo7Gt05bx_wLdUL4w?wzg$Wt#g$A{(EorI z-lkeNct4<&-}OIL7|iXjmaC;*H8>Hq z(5Hcu{e>)cvQBfO^})Q-yBjkUu2(c>^uo)p9yBOFJx8oJEV#TftGLLHwWeBKD+8$? z(SFp$E3RGC$HumC_d*~=w0!tMe5G^iR5bKoc*C=+CT*N}gg*@RA!z@Xgk?#u?#)hMjK-IErslN3C9ttw7U_vLL# zW3hV>vR7@2zSeBW(BY{`rOz{*om$$$aEhmG&BcRA@HHQC1Rq|TkKA+3ihPAEuV z0x&IiY`-`%SsmD`pdZuyFaPJh{39s>Iz!+dN|4=^e@PGa9(0_WtCdk7yLmok% zM6&K-BL%E1FLV>nz&;247k?q_R_o$oSD4g~U3U+knSA%PZMxSH&hVv>whuiynr8+| z>OaAS9nA8719V<8a4LaxU5?73b(wXmMR^4|iIpAu7uiWSQ4=>%XBST15i7cAbKfy{ zTdDAmZel%BbTL~kYLQ>oVWz)=rQQtN7%FDE?KQVA%wlj3wQ&+b@PL$F7jD4oH>`51 zrgu#~Y7Pzwaz_SL*Ep-xv5u^LlOly4xs`!o)lz9?4v}d7A>cyGK{WrFry}3IEYq1P zOk_^w2*tzda60($@iTdyx9x7S(0>8*FLiP}x^-zMNN2>}V58+ft0*w+MKrJPlsSJ0 z&HuTs-$vQNhpRl*max!}c)%*(e7xhm3{ro74MZ34ib4O^`qxh&v4a~*&c<8fne)w; z{(g5Jf)hd0s^lnvNuYU9!C=saV01qRA7c}3Ts=ySV!5RAu=*O6GRH>Ld58(e58{ym zUGnp_Uv9lzI44**-y(2!4Iy*GZoYNU|E}izG?;;NH31aE3!{&H>Z_~3cqXx2MNdy#-1H!)dq$pV+xq)9feTXO&IX#%U@>Dm_U_YF?*y!iHrGpA_z&o3w@Xj34}zm(-tr7Z(6iO~Nw*}02+ z`hWS)Z<>mB@G1F_3pV+m%2Q#GWCcj1j_~%;*;|Jp5n^?mGXad9u=R*OJ9`TsG>z~( zwB4BD+m9Q17&PLtkR;_3PrfltZ){Jp`v|K&nHiBr9*gYlT4BM~pQ^c-fvyf!@V^gL zxEdrnG+tl$Ab~tL7g9g>1@eD}D_zt=caIbvi2^$5lweTCeuh|n9D%$|k3K4Hr~)pz z^VFI+q99JWKCvs*+g=3p&-5X|tgnGVNAnN$k{(&V=K67HsO@$I)61pimh8zef!d19QBOWfBK>G?#If@YN0Emd)Jm%SU4)-)wo6OgZ185xLRUAMjYA~BYcu~D z@_%NQ;iLL4!GH5jybt?# zcy2Ga^Ly9Jj)u$)95zSD@8}IkSlkg(M=7jrqROc!y?Ijc{(D_Q7kHld{Moq=`rjvy zSNdkiH@(RGNbsLlv4hhzyaGQxhn02Ocycl0c0nvcA{3Fo&!!slV{upg7h($+p#L4Q za8paY3H=w(d97Wo{kYNjSI`X1=M`|HbwOWxvpFDF^}qa|+4=_~uFp2}5AUBjrI{*y zq6BgjH}elc|IFsB8-WZ@uwRD=gw6Tat9(&cBw_EjHipySe?Q`qSyVD2iS~a@gfra{ zw5b68CyaBT)}S&}gSvd1<$zy-|AK0}D7Ug@P5U)r#mY3v!20KF%OQ*W4Nfy}LfnRS z`Zk_*{rFVD$Hws_;GeGRKX?|^>K8t5bA8rp^B`mgkhb=y86CS>pv{W)5;U|3klfeQ|4^F{An1A7jSrsQKwE35thrD|{2loAD|EV$* zgmeYS(fYqO{qN_$Pk1gpa0W+M=)&9jvp+t(PnN@FBmlOd`M(quEhlzxqVumH=am<5 z)q(uyY5KmwN`pp(pz@sZveMyhb0KxP1dFq#n&{0&;JHup8__ZMM@e0pz3af+*lEuC+@@B=s87&o<1TY2k|7^KdFhlcL>J@H z`4^41mS^e82)0DIjlWY4UScdXWell5KUYR43fqjQeRrrLVPlGG)KUj9F1~*@vl-En( zQn9`sFy1gI1-FJKfAFU2iD3;!koVu)^rp@92kmNHD0-0pjDW#EGNv2tKb3!f8nQvP zX!@T_>$x`-?(_4I_BL-rKW()f1sfFr|C`#d&6lEh@kn0xOejw z#aAaP|f^zbJ(pLH0q1 znKv^oLpZy}&@b_AM{vjLm&4Hf=ba88g2R@|pIjJP%&v4=2(GZlN@G=nTLLn+=;3TD z2;o*S3Zgr?usBk4(yytg?#ud(E_A^)aqPtKZLBqcgR$I1(EkL6)q0;4$MGC=d2ucY z-p~~CRkZ&C6m=tz^c4yIcUt~)kXTQq(wUBpL$1?iyz1`cfBhG_oI2l*cG?N!(EOiY z!F)w#um1#q|9!61JHgE1lW=IcfNkYAH#Lfz4AF4Zf7Q|Vh8qTD(7v`f=`+g{VQBwXd00+U@ABAFSgz?0LK{YFENgHMGESfSGUSQ) zR27Q}tG#>%%m8iZVM@*fkA@LJ~OtZV9OT z%6L=y`VbE0UlXmf8F@Appz8E=c;@5~Sh&wFwY1(AKjR4VubwVT#an*%b>Ai^sf@?$92~xc&v+N(hZ;hP z`m(p!%<-3;BD1l~4&qalaftAxxI!6jY&YUUp-+|Jz`nc-+bw*VSm^)42G(|E2!sCT zS=zcP`yuy^?Sxm4??C_8lS_9?tBZjkdzK_l>;IzVKQAJzIYz!q0k1ampX=|cg!;0> z%o_NgCz|WkX6Z!)$UkfruqppzrACRkvSGM^sY{|g;Ooe_1?WxV#+r{=yq{e$L=Kuy zT+Xmeg-h7Umi`$M1^qG?K0Lnu%pwOL_RXa$s~C?xfKw)9awy#g{qs^dbA-6`u$ukc zn~{@s=(X&#Nk&^I-cLU?%DSsE8= zqiOkvzk9Z_o)p9V(?7c^UVx28x4w$jf4bR!k^SY}@ka)%MCiXLbL>$KDRTA94WsHd z)-JrQDGqBrbTjVD#}<|vm&s{9G3ftt9aVasTBd{KYxQq4POa5J{IA6g%<+^G3IdWg zey^|$)CbjD6>f2l+Y8Rv51mm40y#~;zfF}UdoXhyxSCAfA!5kJaljlLH<`sgIkFDP z+MTxx|I0{D<-(hCHf#yxA5Q8<^D218kRksN#XnA8<|Tq6yTShs?nGMuDPgkJ z?Ycovwy_6!9`%2nYPl%EYkwM!A+-P9Oud4$$P0n|pBE+Iks0Dg^M6tmkHCQ<)FaAs z&;aEQwwQ4rAmsY5(ChUQRm_`vv$sUxvSl3RtZM9w=Qgr&&b7fK^wvIof?EIjPX%xz zkA5E)J+@nY>1puevcbx1iO{Fsc}J$Dh$@y-+DVq&rC1zNM-HeTyo+ryK(-ZiO&x&o z12a~W)Bz+9ihmHOpdl$BN`dpAle)z`GtvnXUdtljf49t$c*OXU5BGoa50~dVgLB3GoyqQX0M_K-k z7+=0=blPYbojm!h*Ge{c+A4dCTV>k8Q`Z9q8zQ>dh;~gbm*Uzdl}mh+s_LoKKlLam z$fmlitFBtV%&MSL*VTS5eUQ)r!xPJ9|JO@R_8drcLh}zb|Ni;)_16C%{@FS*2Cfc{ zfB1*sBItSNfR5TG{?Sotyy3-%h5j$qhR7K!@bh@Sc^4{a0pR+aCY2kX3O7;zudyOH z?ovW-K$3zj3oyk+!G#JG|LoGxnt%B}4_zPAS1F#!QLkAVo;~kD%Emd&UoC~n;z0e$ z=F6qR&n5v2Jl;nTS}`50GemZ4ZS?ba*-8WdqgK^H0uthmO3-RsSH>eeVn|fFlzB+O zy^n)HSm#6aKW;=6;r)ky$YQ1|gkRt6;P~sFkr{y0?SR_`4Nl(__U_3U|==Ai>h)dm0F4f?g%XF4*JGtr&>dp_s z!^IbzDQ&()r60Uid+cE-p*|Kw3N}L!+r!M`o9EG+6+#VHsu-<7c=X~rq|yAt;JMMq zaYO$38W)BG)>vr$Uo4z|+y28p(EsJC7WxLzf#@dw`QRIGA?%q#ruDziplHXnT@>bD z36(0U)^Jw6Z)EiQ{axaY0Iprf>q4~@577|aQ@OpeBrPVMuh9cym9U@jouprlj@!)t zx%Ib+6gEFuPh>onF6+=Rn&d*;L0e!+Ztvok2rTlB!J|8<&PT&bzuWbMv3>dLOb5w%b^(MjpUT+@DW6Fumr1gJ&6U$V)AelYRvDtsIPtjO^$7cTX zX8(oxmoaB_O+x-N^uNEEd+6$!?gso1;2(tEv~d7EX+su9DZxE20RoW!tj~Gc7g&yv z`KLJW_9A|z#pK`q*Ocn`^R8C;<$P|f8*dBvS6%5aUjsIYRA3p<(Q_D1X=hSnb^M z1h2*gB~y0nD&+r6=S!SCWQ@#ze0KNL#@|2QtC)B9BEkQ)ZI`e~iMddQ+&}*pC6?!) z8FD=e!h1dc!#_e2I{Q;%c!h8%{wei5S#0`Rov_haYAIgigV>FRKk z!1_Ie%qX~QkPRODql=2))@Z7C(jsE#?vjuqTK|^}ntzy_DSkEw5cn9=1xC_dHS77n zN#iK9!TM7ZforWHp8N+0ByzwS&m8QIY4xY^lahOgy*YbNsKG#v*k5ES71-g;Cq(0a z9!K*&^1MMF>#eu|`Ol5r^PqoV+3bG@W-eXsfBmO#?pq$bF3t`FLG$%pkpCRE6cc}Z zd8;2MaTW5PPaUebUF;Q09(#FL;I$&I9Ao3!9wDe>`(_^;y%9pNZ~$4gGYr)kj#&-1 zGQ@1~!+oeXyUdMcuY~;^hH$(-8T?<6|D#^u^FclA@c7e5E4?<2p}E6!k5YGL%bYSG zh;oaVS>Ja=$VN7qf22|M}&{9!5cH!oFjZksuHp)0y`oA|D37!jZKT;#culF z4}alBuT1Rf=>atVm)Ma=yz8_7`cFIILQ#L6krnU{n*Tf+lbZSbU|-YOFTOSC#z_j^z`UAlO)V3|ow{{?#X>XzIOE6uU#YrD-IWfd zRfLh}uaask;auf4N-o&%QIIa_OMu9|p}fg}L|VX!ww~L;qI^+JBm_ z(^d%i&*t}x(f%*1Lr^Z-3e~7XH2u$S#oKbaa2(S3A6zdkEK@8R@}JMjIebD19Nbm9 z#=uGOJqK{nMqY8jaab&F%<3A4c&Tm0$%*IVq>#ld5vLa63QJ01^~@&z8Lbv^UYh8P zWc>18&0-;#!Ie>Ruc|%y^4fChoUI5#HH`Vx3JclpBiV4e%4ew57pd|&dwL-Ss4CF^ zU|~;|^q}P*=5!Z%zBok4*z7-@6X&aiyZXQUpD~>qX#U}FpYv1sb8p6+A^%4Y^}jcL zW*RLr>)TEcg8ZLObT71Qr%ZMe(<=!ltZ%YovyeaJK+ywm^V6~P3TmI@!D@{eW^iDQ z5cKm5wxXXWxL&Yg3kCE)51c9IS2p`kb#$oxZx-w=m&X8W9UDSF*rW@5um*(uV*QtO zT$YISC+!%ub_y5z!Az#Ad~8X+$e}X{)vUK`bfN!Z-+QLFBoD;uzWe*kxMI&&I6@w+ z|DxXKPU(&LUOpV~KmYZ=r$YZ%)6Yx|->xy0Hk$sq=1kq?j^RSnK6Qc++J9l)!w*+l z$bbHkP`Rc8n>_ZKR{ z_Ran;zA90ZbQ})+Uw`($7-$Ns6(N**JrX+ry0*^i#dFz}p|!pb6}Wd~-#JK(~H`rjw>V@?3bUtLD~-<_J> zlC4j+P0DWezt7Y3&*F!hEsc7hZdZxs|MKk3h?j8zPH`6P|2h>|^n64V@_$~22H$*z zZt_c0)Mu$uIJo>s?DN6iJ5vCg{EwM5@-f)te?`nCN?JqC>n8r8K4rP%ad{tB^CJ1d z87~7|aF_hBA4U8#3{bIDu@ zHE}RmLkzEN{0?S$S4_AYFb>N3TLOT8&aVF9t$q-8e5&UYCs2fgW%VzDG>*Mdw?{mO zrs$0w{b9F%%vsx;CIhg)kfmrA!3E_$zebN9V;1lmEmv;hLvbT6lu-)9#h;dM%&b45 z{EtofgAnTSzRQ1pUOS{0Hx+|N_0P2aFKwmgkb)5|oA-bF-vvk`_ofo|9zs+%`@a%y zq`T##n>_Si)XpEO;FPktOdf9Xx_w;A=5D15m7c3a$~Lkpi5h;0=f1&53wXYZmNJC9 z9Y(DYcsAl@|7qx)p!2sWy$mcg<<$5N>4u9KZ43%o1OHROVw~eh`(QjT;Da^z7P7d{ z^8eotW^xU`ax=~yH(~|{iD>m_%Qr|3=$~t5VN(pD(xdvH*nBzHsi?R`TK`vt8oT^y zxBv7%FYZA9g+@d0wCB=0XK1zkumALdm}^Y#aqz!G+GcE)!^uI&ms$Hh^N2L{Idebm z&srlrIqrC^6|$qFYt`_MB&sj85M6u?Xg`3Lmj46(ckcnkW2aLNTqS264YNoVv8)p5 zq;i1&%Y50l>pJ5XhX+pZX1#Wk+N)|k?G04Q&VgbAP%_kH<}A_~RR8m8RRYduhsO

    )Tg~1^_~z-c(DiF=z}W(B&B0i@=oWDE*Jl6nL{t z#%D!i;_{9l|MCyvi+UBy0Sj<>vh(E;tz#e5bs+x-<{2L@{-5tDkj^Bk7qI=5NYWMt zZ-5)Q{*%tJ_pI(39{RYK_Aj;MVVzn%@ zK^8w~)bt#O9r$l}KhI2>67G7-HF{BaBdlwluL&2D(u8o~WVHWF!K0@YR^Z`V$G9{R zvwKtWXB6kOp#L4-tbKlAOE(^KcA#%oZEzmES^q7-#oJ`K{H%;p;#gL!Mub6??xuFu zr??!ZZCz1Qt-(Ekqn&)qM@JF1+0!2kaL+yBlwI3{Nq1KG5+ z{AZYdQB3;|$aS|v|HW-FTSa@6q6Pfp*!F!U6ZFsZ%A-6?RzO#hW&2k<2M*(qmcm~m zy)d7zd7e}jF^5zdws^C40Sjh(ANs$n3w=^#ospkQ<5g)3xnavwJ)GNO@eYk4I-Q(i zyY{I&dGb*Ppz3rtVEbglo4-XC*|12{xdm#KicRe?*mv1AM)|~Gno?j> z@ry&rw~XALreR+zVEQVbomxfXf35{FS%JRX2ub`u{EsH6;1w?nJm4*g-0Z)=e#~Z5 zFwIDpp;qNwy0A*jk~m)*iwAf2I`Vr09Xg}_ukVjoEem{aL3H1}n_MMlqR3Ypxs9?& zj&&%IDtl8Olzeq9ygGr4c>91Q2?6|5aHK$FZCv+}yQu}7&(|66zfrSX@ExI@&!_5P zWatr_B?32s7^(IM;-EcJ)iDZUQ4tW2?1a zI^Qk0r)X*i@t^yy6Q+LclW_^whINXkNTtzA-c5;btnzQ7U6R>O}b@Z&6|d#`mxnDsXxDE@)| zcQpTyvP-xSpL5~dQuyO7gJwirXl{RAcPh*9!K+WalWtq#vm^)rFdJ8ZAd7w&N*M)t zFC`AHC{1y9=i zYsM1ge^Sp+?<6{>{>T3y8ai*eZgN2X=^!a26evl3WTH3Q{w6n259ImxbCPfGIaHE& zB*1v)z8#!33O69Q!!hZ3(r@mWcl9T0`>xT~CUkrB<>}G-zdS9zO?A4m?)uPpmNYuT z+<{(}p>}9F6-J0a=iGd z%S{6AuV4v*|Lelfuak@ox63&o|M~yrA96@5vzhYCpPr2`RfuHyum4?GKQ?tK9mXLn z==>{Lnm3TM+Vq7y0p?$m-PS!^Dvj-vVrE&zBo$IXo zpXo*1@Q$(k{j;X!;5xvZ3!10xfrJzpYm#pQIUuG$##-9(mheh!p)r36p~x^vq1YJi zi^=_Yw^Mp~Q2wVjc#Q*2uWz1RkC=AlRGuvwt=Ug8v&98}4qTUyWtqt%=B7y_K0(){nc zA3Dj0%*Owhe~9XT!sPd;`@R{IXk&o<=X*xPZ3~Pii}5eW)uHRZR*JmRZ8X3LcGmx0 zLS?oKitWddt@VN25^z^Rb}Te7%qA`y*R?SZbWxVh9%4w!(Urg0}})N?_8ebi;!6%g~fS+Qa020 z!faIBK&b^?M7Y)GUH|ZpQn}rsGam+o+C+i>`LF-dI%BsF7OqW(VU2lr6|F zG887~5S+zKAnb*)aZ9NGT`S=anH2V7QU$t*?EY~(I4rQ^+Sv*q$WXB+f#wNFt8&Mt za~i1NtJ1E~g;0Oxqj}AGG5IIsbGecz zwnofti@~$v7XM}+&9Ercz{xl?91Q`Lai-$?_rbOY5g~2J|M8vZ*&~^+cRWTnR+0q|MQ_{F3S3m?rT{B z9lx?{nRjG)HZ@uKYo2eZnHy(&M7OwKuEnK2?c1{?EU1QHrRvI3L27!l_cKWEEe&b; z%YVN8zy1qT`yurYK)-IOoI}Y;?}zpt@Y2GmQ7X?E%U@vsIAA5V9YR1H>$bt@UBbTH zx0IZv_2oI(b<@dBQM)!W6aY|}(N{wcFI?&FYaq~RYoTNTUi@!Z5(|MxBpQ=d~5wE+K^rY=~5 zBI57z=*kSBN%DEUS!))eviZb|b>R>oPV8qcX1El0rQwYJr~kevQ>haf@Rb@8$MVf^)WvN-#qC!xuq-Fy+TGNCEW|$0 z2r7zZ;l_aeSqpQ6R{{FJhJK1`onpnTJ_g0@ms9or8;@z3fws+N9o9?k#hoZN9o)wLsc zTs~zp|Jk~MOJa{YBFU=I7U50HKP()DK3De)H2){mx7aINRs~LeKl|FtmDz!(;j-Kq zy5CK$Mhfgeh#^Qc=F$Yr$4mxi-Mq6(PfookwTM{cFLj#nX0Z1t%a$=}5HSj$U}Qt* zU&S_$Rcj>9-5-B^-DA$-0kRNCXJ0J2yCJBiHwjP%Qej99SRqkF`wu(9ohNCh@t#uO z>*Uif_j&CR^NA$izzY0L0RmEl&+cQyEX5cxOP}ule^kA9Jk|gI|NlJp z-W(%!kj=4WRrbi98HcQcL^+aB>6lrSJqjU|SyVbkWgH_UDnbh(l?Ltg^1VNgdVhYu z|GISPQXS9dc|Pxt+x>RCUDwW@ye+zcV0X4kfAyRi!(($k@zLzTJl&Ip>cvd5LTVJL2EBu2e&>*!>FuZ?Ysz0AmCC z&tLwjjqE#JXC_#6K>q95@D0T&7&Q08;WQKVKmQ~}TjhYtzA#)cQZI_W-d83MgcLQv zcni0^l~<#C*H-b~6!v@_X9_g|tXo^;eODq&_ArkM>3g@$zK)XxHNmUHa3(R3g{SE8 z)qs)mKceFoOp+*D9xz_nVc^&Cd@}U$*rSdkH^yO*dAk>X_{I>pCeEOT)Ln3vQ|2rB zPg8v!syl&r<5P%UTmniik4p)6opJe?siQtxA zn56Oifcr&di^8EV(6!FeZQ33=-!=Jb?}J6FydSGgP;TM_#rxm?yu5i|W_LVPTKsyY zOtoj_+ry#UlQJT_BVg!}QD6V&{DbDdT(pjj+RR=|9YOVCsxpZp4@CG z<&7Hv3uIU9nqJ4(bXO3q7}%q4jKHfQ9$fY}*dM_$-YJ1T#TwqGH$qD^&@myZo?O#V zlZjdhU}5=KwA^6&YIu)S4O@QoZpgn+onR4=b82(B$FF^VgMxSJ4P#9TW-o6BSImuT z1qaKT2`l42ein@A{}lz_a~FX{^~9&X{y(&bbLGB`d@Y+P%Id2-7kIjZ>GqQ3Lq1j* z85e1)c~NW3IRSz7NY&9KBc1Q`e_(ioC(X%UU)O#Gt7oP%v70I(bFJpPP!vVO{!h!h z4t;$;^Y-<_=eH_;d`|fk^f`o5z2txidGkmfiw_xsdn%P3_+K|8-@W;w=zaw>Qi(;l(Q z?k|`)=?3ARa;3!ONCuB>sXM%}Lay6WGzRA6w%J<^oFB9eVmn@~y;rs`!h2PULIlbz zw>fsWNaKR6{ZSn(lOO^O^`!e&lyhCePB$lB-M6519$ynWjNA~W2@h} zXgd(TehMaX;u=EH{MXQmh+=RtMKkLAcx6@4xhhgi4DZYS2(a;e0wVmah-bzHzfL7( z33B?GA`u?+kYv#bNqqv3uA32GAuL0#N@cAyb`&skq?czxtbv6xXPB7GBaA@*PXXlL zH}xOr|M8lf^D|*75ygZ5`N%J!_p@fB>hc+|iGos{Z}_;jJ-lsp1QslQ6tvd8lfNT2 zNTdCZZO&QI{e09Eb`_rsx9dQ1E3NaoWpmoub)>X?U+q}s2<-V)y;rAaLLa|>rXSNe zfA`Z)?wE-A?xhkZE$-OJrB91m@Om$X{cR%SflKVudcJMrmDdVJO63p3>dFbMXcT(_ z#|i5%oC4S%)z@o=g;4!x(%nO!5$*q>!@}_a7Dp)@V(J8HmdABNf`Vt4`^2+5z<-Z4 zEnvAb7)g!XWSn)ZKcPCqw0(qQ|aJ;4tvrZYCg093K_W2iC|rng+7e$U(PE$ zsdNDs)I8mr^L1WbTSa>NGvS7*AE<%I#TN9dG5O#IzPKrRH4$7oqUE8njUM-paq73N z&U{*&DP%?u8$HRa4=0fip;JBVN z1pwhe3g{QsDRAf*!6x!96wP22la@(?&eo!aQ69T<#U%@JHHphK1f{ z!!1qOGJ7NwedJiCYt+Eo%jcF$j{Z@*NtLJ-czZG1>;)6n^AqvR5Uzeu{i+Q1Us|ED zP2PP;f8K-fxM%9dwHpVYUD>xXzBK%0BwyEg(aDVORKs3neB!~i*(2Z=z{8!`yDM$v z>$@{;W$58DX8k34xNPD<_g|fl1(atcq5lW=UmLe8jUi~%sF}qH{vU2Sn)UqP{L|Q% zk|gMV7E;;M*i&PEnos)Q{!7Nb%0_;D^r}ALy$jm^gZh8QHb$voIWo3{csfo6&1|5k z!D72A>h%N5$9CvH&+dwk_a`3C4=>sVukI64f^VNl;p?e6Zze|s+CA$>dzu*GDd62R z#RwN%yz^-MpI&yQL(o+9iA%~(>Cl>g%o%ai&jcdeyF^B>qGlmI-xU||g^cV4Ux?Fb zYiT@Rk>v{r#r6}UtHbJ|`xmN?eUm>h307}Ofb5u!x%=@`iMlY#X_;k$dUXWvP4T_o zd_w=*DmA-cUUHLvdeQz0>i>EF;{2T?W?N|k!h4$k=j88l#Q2G+{HzEb`k!55?ut~p z-Un=jj^jcF_zOJ(`ceL=$qeDdaAAh^U9T60YaP-njw*z@fs%SNy8@NN%kfLbH~FXa zF09Tnp4bs3XK-;H`0sjWUmL{QSMEyLapXq|wy^Wi*L*ngQ!!Bv&P7qy%E^->|d z?Aq+htL$lwMfvYCsc_bQjQW3eo(KMW>qNFuUkd8~NnpnADvkN59>Z5RW0D5`XO#c` zoWP@LUsff*&{}rv=m!u~dy`@M`t<64K!*@(9voig`eX}EyELL=PdZhx5cE&PiX;Kg zCk0Y%#2DONzU?*BOw^BFovNW{ z;t*H$Z6C|&HER}Xx2&Vq@i9Zd+NTp_tloUu`V2Ipf;YImQy|p!f*@~nzZ5QJP28L^t-0!2>>%k899uK$nzlR5ZEq_5`x{69gUa9y6! zNC*Bq>i;<*IxB4k>zxwY&bDyld+vgf(HW@!r{RV_e0jIwhrlU&<1IRyS6%vP&i44G z1p&6{5dOUEv*?nup?`D?J%J`LdJWz#`vpZ-cakjb+&&Te{nM*cgLgLc$sZWnq^8e2 z(@TDxSVNZ;C~*h-2`is1iI-%bVZh-)Inp)sVT3O+Ln>GcpJ8^7haI#H5J4icMg-?wa8B6;cnp=mcI zZjx?C+K80T&6xg$h0>9_u$gEP@c(pZ?VYkCy?*~kUnUIP!SK%;0JvJiEORQI{Wspe zIBj?lq@mwGzkQHR=9t^A?A}bMgMJ#J|M5@%_WwwkXvaKig!d5i9|HeNQ$8}M#fAXx zi*?X{N(g$ka8+Rqp&z?Wje5r~bKnvMq(%Aj4bIR+GQS`@dDEq?y99(F!WGYjg9+-?NoL=DY&lG@ z2=zbL!oyh_pWmY6w@?iP-cnutSUTxpK_zeS|AgyJh%4IxNEr5Ct25AlxXJ(OIyUG7 z{={>BNB`}=hUAT&M9IUe`%#0}oiWCTnl|8(fEsJyzZW-`{R{+;^q04s#3zgjUZ|MI z(eXtY!c5haY&w=^aQ@!f4(6J%rz)wh#=vN3x1g7?I@NNZ2>O4Hiodww_1Lc3GJ19P zM!-!wqTarq@mns8t1!9d+T=7yb3Mj6EQBUF_ouMe9jd>bu#o%6vCVzPiIgB(kl7Hr zG3g2Ig02m2=F@JvZ>co@v%FbmVfu)tl3jV)o}{Hc1qzPkI)1IDANZ9CFnk_hqV<1n z`OAM7r}h5?4yWA%{>$>1I3D^BnZ!Lm2k#!2lLzz12-<&m#b+sNTps$CpAMXO!VSf1 z?%`l#qI|TR@vnJM2**2^e@=AvqdCgzxO8W&=DpRPcZMLMGlIQ(cx9BHzVP6)pGUgG zngXykManaWFlLlqA>CpVp?jRUSH+0H z^lU*om$xXluC;#{CYq7Ltoj~ObU-)DQz2hsYazAIN7L?BZu$Isf z0+GW#SiD;kUaya+C?sQl{U87IewA%QH0u8uT=45#8iOtEdm8`ZMOR6&`~#{I@V_Ac zwOj=Xy2+RuDP759y~1sBOFM)%*@N-%kQXo5;{|P>=yeWi$ zlVDA>99Jft9jwcBf%N9#SMhZ}89BUzqq=Qo=n6;N@ZOi~U$uf83GdoH)dD{ScgXHM zz&NLz*`W$rE8|`w3q0Fj+K@1e&3mtSB~re(_bUorF(O>m9~OdO8vaGk;hwflt9DCWuc0&vOOs(y8jt6xIlR zI@BU1s*W4{+y9L2zuMRBr13{nD`Edtd+)r?;n`6EW#@{Jl1=~5xW=VYzqqfk#)kf% z_|^gR!zWTgmL%m|+uT8Kp`)f`;v8PYpmi`4IH#A?(_)V^^S2Y0*ULXGjpVCDkumge z+$>UdO*=5-*{JxeWAJSl{5YD`^<3|a?;m4id#rnS*@m;;3=P5)*uHpjo&i#9?d@j| z%ri2>@zt#f!%XP>Lm;?z!XUKvatO#J+NXJnUX5S0rrGE039WlQdT;6H!ScmmBbJVF zPria{GnD7u-uX+>x+>zY{|EXHPo~;zAs-p4n0Ffg=l{v}69MIKUhSX%hsvTXdd;v1 z){;)3|Kv-v?hHs_eL~xRQ3KySstdma^G_`8%0sP~)8~n%L$|uWWH78aDw7rla{8oM z1)BFQkA>f3taocBbdT3afxV&QNKe$vFmaDyN7U=_AHy|l&%Ax>C71NTys2PRJ9JuX z?}6+J&$*ZcaQ==+_~aZFUvMgciL<#C=o>Q{STIHEE;!A8er*P($d?ahzZ{q*{8r5r z3g^W>^=x*jT>iIK$tY+X)mxu`% zCO^NPyb}BZ-G9BCnVWZF0$uq3_FwIR4;aE9*21PmHEiW0R3X9%5;Df@16d-BE$|Y3 z1hc348G(HGDL|N8d;yFLN)hl=a9Xn9x$N_DAm^7Wd+v7EkxBKB1VE4tJ zZLL8!{|i)tu=fG5Disao%JRo8kAi)U*8eOEk%@RpHa@%qAlV!4ZUOC1(&#i=tW+0% z4Bl{#l#lPB{}B40Px0>nH@dKT$k5gKh2Oe=`Clh~R__d!c?TR0@INmM=3!M>L~9I3 zivc$O-_GCP-~YK9xx6x?VGjDws}IZ-4dIsv9(`0szh38==>7*nZ+WokWGDDf^FqDN zNf&6o;R;l?O0}FHCcY;O=PyUrNlLTI$hqDgpKN3ubyK#?4J|V{HC9w-Qa>vAhxmrO zj$};|U*NNX=CUpZ(){|$6Xoz0O4Pe^3QwB7&rgZ&qp|5`3%6|Jo)cwbi%fREJeE1cckm)H^iQdC)mI-lDm))8V z3*4`Zt5zs#Ki$6lkE0fTJM3n+e$ksj<4vgt*W!MpW!m6Vqi3QZ*f~~~B0}R|Eb*a+(s=0q$ii5jc=7LLZYQq8Tt%syq76|J*g51-tk6PmTWhpO+h1JZm*Zi(yBG=D+IH{5q3a zpFl5U=##d0g9Q+ft~XntGoV}yn>k2)DG0@xO9aa`KIXE$+hMk;?$tU|8XZY7G*gFX zp@db1O`Q`lL!jE2Fe{d8I{di($E)L$aRRJG?SF1NkuWiqlRAkvxJZ7F*te)e)KUz# ze+e&wFO-t!?|fx9kYo(v7ZYE5Hh|h$YFd(;DHRNF+yoKlWgC1v7_DSz{I3%z{}l3H zW+`-r8J|tgut=C&>i9|ar}J-Uhmu?9g4o5>k5JQcoGmy+)Ztb4cmMUQYUg3*v*)ai zO-1ye`LEk2zLnpFWWuG868JNv?rRIwqO0DXP!Ne7E>K3Vyo(n6KaMl~K|9qtV<3TR zriC@H?@s4u=7O*cfx*g2>&(-Gk#{2JofzM6V;B0{f9JTcgE;@H^n3&38y>9lozW7h zVCZeOpbVVYON7%0VJ_~3RKM9yx$v9U`{6IV{o8#e8qPds8}axiPfR0J>7)8jKcPggXm>^E{2Q}_8o`IlcOTm& zJ6ozkxUn6+y=nO>k<0P(dQ>9MWl~#z&Ck9iUKMyc)+|W=9>@C(Y$I8f|8~~i`(zg> z9bd){Enx6$uR5WB^j&c+STczQKh~GqzfUl5f`S76=d&s&|2w9UH&roTwaI_~=l@}* zbD(7VnVjNQ{9f!w?$2gLQ2wdoMk1hIjS)Gu+(6De=rgS_hN?)@qr+EAl7l(_&Oe8P zFNG8_?Dua=vv#~_kt0g{JSrjsA0GM-3+g5IP7&In9TWUN@rJ~aHfymfE~%j5`?I*H zL@dlLIKlG!88iqcI2DWl%WxT?mw}2EIjdYQlH-=TLY2s~Br`f{KM$Vta^TBPSf#=R zznvh}pQ2ghJ{!Fs-f&t6AmRKDjziYrPY8CdsDS|s`p>VRC9qS{@?T^9_gg~7wMjX< z%Hoc^eSAr_7c6$zfBg3!YY3YEDnIdy%K`^rza(>daS>$k@tq4NPeT43!y=F-MJ-hJ z?R_-kygasDD}5}Y#IgnEAA$R&^Su-HG9hI`Y60e({P*&}&!gy-$1^UV{-52@lp1-1 zQ=sUAVY|WRm7g`_1pOy-o72s>^z#bM*gDHBN(j3DdTfzv^Fz|&D;DnzyW$lzJBWX>Duv1%)yo1rCo;D5F*CLlTW zDGksps(UOaV>^^cI#32U-@&sGi?4`FpnsexfW1tR-2bvhc?yU3xF$NhJnRa|H0qfp z|I3p%pt=C^@6k#g!5M@+TpnUU>RiG94D-(vyXMo63pHHjeqq|u#(n)aKNu!M|FfOJ2Ds4)`G-BI zSt1g9Cg7?%!9i@EE$@ zj7i`Rj@Z&mT8R6y&<__rs@TYs`nHr+*%xk#RRJVC}{CO1_^W|*ndr5_3x>l zNeceA|N5#-XhW8P|Md;;gK%4FlC%9WWC@GsD~Rv+P5*OH7r8f0aAz-0F*^8xB>tSd z;LC*{pMv)c@R8)b1^@HZ_1J4@dS^Id?FWB|L#0W&5GNb**Z&-63S1mc*nh>L{fA!do@TgSYw^H- zg>Yky{WFrV|7xybQ0B$9AKq!!871Qk$Gq=fzx;Z9sKRyHuB?6_gBRPMDH5>J+IDXe z{;-|eLZpaBkx^ar{)b=RiK#eM?Al59um5ltLw%vn4DA~B-IbT$0oN zkpa+TmPR<=%`OdtU-Ds{^KRF7pc+B`{l)uJ1=j#hl#l8^KMV(p!9L0ue;n<9zO;Kk z_g4I0|8qC0{}{yZZ1Ufav#2qimQqLDyV6kp*B1TIs0cbsan(Mh*w(1j%;5sUSYTpd zTIOg8_^6+lrg>*q@stf$?*d#6Ha{4aoF0d8*Zljsu*iCT(oCKnFw|HUMv zbj7QFD+%pCoV^iZG}$YS-)PoxJxiJX5>TI!Cv9J^M5;>3^fhU@zJCOcQw#m^PQ=4E zIBEUQH$zcr2=ecTe4w9R(}=JK`hRAlWC8~CNU^m2*Y)6wJ!mYI=6~iipnQCeWPtz2 zpnZKwZ*~Q0pMigY>OW`1^?6ix_C9iykzp6aFOF=}XiLld`Gr8I7Eqf8cRS?h@0{Iz z@;~dbQa`%yt0hvqQU2G;tNa_`ovlQDt+Wx-41sNR9X0Qsn4jsgIgt^VHe#78ZN&vs zlZz5b8A{HHdw8(t2wN)?Td7tBJ};^OLzBXO(S0_n7Wk*~ScfMBkHgYw=>E$^#5({! z7@GfzbX{!p(4g_(k7?(rL2|q+Ws`p~vs#y&3jNQ(e`nm{uDTCF`=3kQNejH11{@+7 z!}UOzfA$TOdT21=e96;>+8i8A3Iv<;V`x}@s>wsee?V!XHs~A+sR#O>-#^Q6->L7~ zGz+a+(EmJ^G9iE9Nh|TNVEPE~FF4KU7!_O_4?XMr)_cVIxRPs~Q=QWh>p#kbTixkN zFBB&)S;SMGzJdd_psaN{VmkTu$qrXwHeqg9i}e9+$S~{s&^c&v7l!%AQR<@GWc^Nv zc0vAKO5gSX81vBldq;I3$2n>IrvDiV@wB2pUY(bWlAECUfAj>9hqY+_olUEsSIELr z6RAM??>=`DITeM-3>=7NjEI|TYJoiAxu>3a8at@1JJ3t#q-mQm7Qw0%qdMP{;trUV zH?s=ngsmoLe#c}tgwNnV>=As;1NpDr$AnDZOey=kuuHOTiaVl0xao6V6#5Ut@u`Mz z#j33Yy>42n{j7e+mL~&c_9Nhq{IpEqXm6Ey##{7es_7RbYXreN3;8eZ|8mh41Mt7% zZ~ZhK&mw|UwdsEzydQMGObg|I{qsLB(vi{mN5VW=Ct-kRo5gY8Y$MEuA>`kUQU!E# z7cCf=k!qLrIR;^(uj-?@Ug?J|hN%Ci4h|Bw<;7;sR2u*NEFX}PMH&v=-<*G3jEdWc z&kCVIA7C@6|7W$rB4<~PYv)(zY80FW>rp(;`9{5t$9kZPic=EQ2VKvE`f10Ann-F2 zDF7c&2tN$7<2HN|9M7UoPiol^3{MFu#|bvo%cFM3X4cR$Cp3c~zju~EU?$}fQ=7q_N> z|M}Zn6@Q_oyb9o-YAG)mU#$TD51_sw0;}bAhGJQ2=~yMTw>g^lghGM=<`g9j-GAN9 zlVLVf|ME^uLB_gb?k7ATY|meau8TyeCKn7(az);_Hwh;b=>O5X>yFP&X`Ycmgp+3ytqTAXWz=vO=cDs zl>45ow#Y)J`JZjtBbOKrbNi9*c8yotcD#8yq|QhK>RtA^=lMl4XhPFFV<0Pc8y=m1 zZav|t4V^4^ZD<3l>&v0MZCI+@gSxQsiytxy-aNzs|Kh-lv0M5VU2fh^-`Fr`0}f3W z3|se!4)tNP2fh6k7}$Y-0Yrf1(DRDqB*?$Jk551ysvXI1dc3mF8i)W=R9gN^Lqtw(1Mm!%r~&lkCh zRr`e;bAV_Dh19VB8tK0;9du4l?Epj+uoTkLSWQEkt5(-!v!6P(AM)?yKgT1Xn|VHL zRvf9(1s(cuHWZ znRN?44^G)+ZrQPv4OjSJ(Czd5Y3wJnd$qO~si6PhkqA(x?>fm?=gT>&U9;kp4gqqm zA5H&RTRNVQ*M{h($vR_N{{2YyMaxUk z#JBbD8rs4-{d1&`y_vWO=jiWipH^HN-EYR6Xc$p+t%ir<$EC;`t30R|fA=6r2k2N8 zxSartVP7QQl z+w=oKDUycc5b)pE7s3Bye=r!bAT<6v+=CD*JL@!zkhzoL@sjpwpb-+h-+Ywc>!aBuy)kYl9OgozQFzm*X4Pzr^R%mKgTdR zHyi-FNJI3nBM$BViJDmlj7Li}aio?r*se^UwB8|@3EloC>3r|_hk}IA9{Mhn{zGX}V+svIP9k z7@HD4|2+%xZmpL${QAr}#WegMH997lpBB*wonCD}oN7(;KWm@(sfp0|?~s2#z@nL| zlug=BkNoZbp&VlI?ao3Xw!}s%4rVADJFf~0Zh@Bny8>xw|Ihuf2LiUvmxAtTDlgFb zf8Mt9^nQjVP3YtLmfpovwEWj6EJJqAMN0ucV@5~*>;HN4 zfPUw~V%^In^I~BVIVRm%>i%i5*tnr~zp7+U1;T8{3HBXm|MMZt^ewdi@YeA!kvftA zy*#4KH2x{t|HHqRg?|}C2L6SL0_;W3`S-Jcmy&K2P7N^sg#YBb1`af>ht(?_(0>T~ zuX}*f0<`vz+iskB7Yzyj=AY3VG06g3F$G!M?*`C)E$(RSVX z`8n0oui@(`FXX@8J|2V-O9B5I>rHoAK92=q=J)jI1?y=5CVR<4^DOUV0Q<6XC zb7B5@n&s|Fe+KUVh5plGcL1U(gQw}!f6SSX$)*<+(klk!{j*yqnYa1;j#*zaDdm=^ zgzux))5mkP{$~R66wSZK|M%w?{pZSn1nzBI2#&$mm+ADjT`n#NZqxtc-ze9!M~#@r zQYr7I1pBX&3h9NaNa>M6f`M9QZMw&`bLB#4{{1lcpY_yQ>Z8LiYr%o!L?m^HyNEXb zC>&A1n&j9KM>-VY6y#m5luj2?bP))2F0XsRldoV?Q4RVJ{#kpXyX&!37?Xj2LARV( zC*o(DdQrmsIDIz;rw9nu&bRv?Ad!he% z208e0Gyi_eYK!B1zz{t$fZ0+kk(A6KX%YwkIa>eom4FL&DGqAH;WM!cwcyQPFGN2W z>i>De>KH22R8s5u@Z5e+DslJ5fB#`%{)xa{v~O;f?gE7QjnHQI6Tmq2H$(Sd*H`2= zCY!^yT`Tjarfdh6+&O~Z_B52$*Br5y#5x5CP|D(?cIQ{pVfU~4EztbWCMsIa^_u)Z zhbtE8#LOYtf{4&ld8S%A1kHa5>Q6PWsE`(p_aJ06|2{uZz`n!unf%cYkrSPJRS;VL zp+T{#AB#rNci{zk!vKUi=r>N?BsFWBslNkqwd8OxUJmAnedA9rXmFdVIFxtr-lV+41xejV>PmK{Q9Ssc zcgO$6S>zbD54Y{|QrepX^VseSfN-|E9x-vY|1Q507XFhrcV2Kp)Z zc4+#K7GRIk!BMxP7~_Fd1X}SLrWtke)00PvR0D=fe>nAFRd$#o(|4~y{}1#ZW;;~L z6^YXN5A)f{|MKsy2sh4Q9H}_4m!1oBmxUz8OqJd4h&#+bPi_Ki2Iaq3SoNm~IxnYZ z?%Pws0{Rc->U>|ap4R!cH+;4G@8RM9L+k%h`2{d1UU~v`tzhvnrk%6^ze4$6_qT1(k+r+1I!O2C{>wBCU{>-_6IBJUhpY?REBxpiNh1LA=8AL<^N$~R1b{vU#nty9}7 zG;0x99QFSCpY^^ZrO#XsDmrI-U@DylOY=W_X|Gn;Uvh8ve+xXO1EibxYP^oZNjCON z?&kcH02DNsII&dEa;0|kgIPKhy39Mxv=VKmuj)ej9{69CM*BhsZ1}Nw8Ke<7zN6xt zNZ|ciH2?lY_TUh!6kYeBImo{=Z>8HX1pjDg1k67_4zEnK2M!@~-OP;0^+T}#`gIiQ zjVU6S?%%QWB7w(S=8c#bkhP`zm-fAgKdwOd{NaWD5T7A&I3s+)|052ae`Cqz#Zbm203inc-p5BxZ zoDxcJTKFuFHHI(UTN?ku^M;Rqt(7wlK2Nr?^;dF{sxWdj{OwoZzaO;ELLOWkK2-yPc_P3)Cr+~j`^7m4k=UO#i)b*xs`Ycf518?FD4HvcRP9@~-3 znun9Ku7C|{4Scm6J0j}#(cmm!KVXwtaoSK(AwcUt+%u}F1_CA%lFl!ip@Tty{|@ud z(-Um)6|0{^vIZDF5Pa#!d|@k6D{!%60HRA0)IN#Ck_+-}BH2et52>m->JCFTno5 zYiQ$I&^=xu^nusX$jFwbK%hjW_XYxQ`eM7UKLkHCEC}v5qyLD0g_#^Sd`|R+2cNYo zk}<7_VZ?l2I(Emdr#*DY?ghjOLE@rTbWk9N_%WD&p#SiUxFOB|!;B5W2~NY?SKTomGdb80Hc^AL@d{QvhnU{+l?Z2ewr9j{^4~%K`E0V{bZ!5= z(vstC^+5Yl^6CKoSM6wtW&l@Y)bMOu_@q0Kg;Q7ohm+w?f?KIj*7}_31mJlt1y9%O zd6}hUPC@=Xdgn!XrE9RHLUb~}HnRBp3L!C@^H0=kNkas8=s6$X9*23l&Hf*;T?AqB zX8$2aYzq@7LhC=&2x3vc{X2V&F^vVOhW=+;PdY(-qgQAAyK~vn#Ouni5!1hk=HK7$ zy2j#sZU0yX_9R%&q5p6*|Ni;)C}UY9QMvPma~4#IKahr_7fc7QonWe!wJs&VYjs>9 z>~l+#6TF801OEFjW=dL#Owc(>L>&g`%_(XX7z~f&f@y}JvQM=RkqxT=`!5C&r$x|z ztUVpbpzloQ!xYYQkY2Xcj|LJWOJ_anzSbZPEp&a_|C`8mn4%H?8%|Bg78MkX9h8K37NP%Z6^}8_x zJ@OOvKli}?>%W9CWKlBPx(a|w+l}*9iq(k&+N5;3&H0DPDS1B#kXtYL9sU|7SZheI zgZ&pe|0r>u=!L=g#(^pK6C3`Er@;TS^NQ;JL@3;p1?B#AFHs&PLLOP~BPvY7{>yEW zy6U6}X6s@E7v1Qt(jSIcZu);FQ(4qc-+5_cN=HGkZKr_ zLvOg3&(a*>Q7jzb<+3neDW2~E9Ofa6()b)M2@sGUmnS>12~1)K<34|X5P(5||Gup) zXZ4JTWt{ zwcUN_drxBWSrslMYr6*@ISsy|$d6XK?vWzdcwdA^%BFbdv24!Hq%e2&ioEwqUbSH} z>co*k`6rsMN2_hpmE@g+P|?Z*_@}h~Ljw9J8{(~XZY=c+fd0cL2S`f;O<dFGE z2d)1wzSDQ2xBnv$i3L@H!rUFN+ko$^aDEjinzs~$YGW!vG@(fyy8&YaiYIJi8T z^Rf>^o^A~u^ih@M$l2-_hOBTR^r`&27h>*SlNBis97>fIE9CG{56n!H_6qvbafFzl zPOQqmi#~7FVfgzG{LgP5U;C^7yn1+QTbf60NIB)>t3z}s|FqAr&yl{89@*g!{L_!~ z0{DOYQ~c5O%k%5JX#cZSp;DdXsZ4If9rEw3(uth>I>^;ne$amq2&N=do?_mnahyZM zcyTC4J8ESVKupm8<7jq#&A*R{ofV<%`q=s^H7hVS`>k#p7z8Wkd~t(sp!s*PmmAjHIKMz8 zL=v2cTS=w}s|E+RdSK|di)dk(aBJX58n0q>SjpnuJY`aQmwcXDNU4hQ0-fIVmH*>k z0Hqqxs(<~@;`$CyB34BN|BsF!E0Te|qqfRKUK%YrKS#Io@BWMHmJKvT{^Or6V2S}y zdh6$x)hE{u&0)yDe|&Nst(3Y%PloyD8?%nTSt_$O*)WX+04)x+E!34t0PO_!(UD3t zSM8+zhtdR1YQsEQ`~fhoUkfegdkBPbt3cLSn}8B+QV-}qc98!9{YT*P<;eGMCST@4 zAwf-sJDikI$&IG}4A;0`3-Q+AUEp(5!tx`d=T7r7u^?^XGfwk-dqMv}=O3{mTkyR8 zM)zN!|0H4l=YQVuexjBh87`3DR$+9??mP!#aFXd=J0^RZplQhEs|d<}Z;x=)Xnb&W zPeE$9zsY4MY5U;}Ud39QnHIDBLS;qsw2}3t^Jk2bxZV|CvOOM8gB1^h2m|B3Is zd2(r^lE!~GDs!{#`w9y++WZ6hPkqc91rrFlh@M17Tg0+>+(=jz*}99Phe*qR`Hx%&1_4%GicjWrzDqGu|Kut5J0!fe>Z!}_V;`Yfd8 zj8icxxyqE z#3JzjFe8=p*{VJZR0&Bsgv=xg&?ePh44F~1s}bSxXTzR?77RtVtm0N86`sx2VH+jt z)(MnkfxGaO-ZNNXqz`q4nnmO4-s{zJ6O#D<`JZ<=#`ei(SUci3vj0;YOnEJG^y>tWf{+f%W|P-c9|7?Mu2j2O0A3qN+%R zJi@3Of)qpl;ba5k-<#^Y2Y4jqg%DwDxO3Q8xR4)wgCkhBoeNLD^93cR3A4ndA>BnP zTnd?dyw1*HBCSMt@~~4=z}Onh0?%!whb6Y@a=Z(@{r79F^VQWJ3c&pH;GA~2lR#P~ z%s-HSxAkh1JH8S5_JE6Jc9mU&fM1nX1BLYR1VwJ+;pI*L^94KcxC+~nNBvK3dwX}q zyb&gCh}hr${oPh;d;VN6jeqfS#Q0c`hT{!%cCC*_+NE(4`s}f8F-xn^Biz^;D4>o zJUojtE7SA#G&}v`&awC9EukH3LiNW!2i@~GJ0R3#c0Fj~=S}VxfDIb>&x9z%w>kfe z)PM{d+1}r9V8?T5d>o*ap#Ko&A7*wTAgB~kGq4&5q5lvIoiEuK-yiBhBm6S3W_-y= z9rb}&DI3XeLerl~g-o7$UdI;yvrvmE8%DZ3R4za-U(;J^iL{vIDTymCTftc+KR@UG;vnC3Gr|7k^O{d4rAXMv^UGCKijlKB?AGihXPii3FcVjV0+gXsFB<5|!2trz*DlLz1kW65ubr`^2~PFXy!mxLk1j6g>ID z?6jwD{Kof&qK@ zwM?da{6#+q3p00KWc<~C(EcCbUu-okc96hoMXi?3^KI%sRdt_gxkNPmpJ&H@O65Ru zr7?!n6-f7Rfl)@-kfSLh1G3rwT$>?rXl}(<4b$yVg=liTMhss&)$Uvwf8By~qw`P0 zz{=X3;aM@H%Z)-ZPDvV)W{Cwf^od1k-9WBW~?K zO`aCZX#j^M*Ia7TrV7vqX}XN6+sF zK%oBz*MEi;S$*=tFVKPsYfeC1y8Y?MwEkxn-((J<@aNsS3sUst3?VG=PDxT0%%<>B z6A%RW7q)mHCetw>c=^vPpBU*7PRV#=gY}^l2aie!3#UmT$4qP0Fzg{}!$gEtMGlf# zFE+ZL^;bU_g45W~U$p7}iH80|g6aIPOJ6VSJoE^lWdP^8bL^Sn&t<;Z+KZ1tMLl_Kb!oo`@wG(J^F}RZ>JjXLaH0(pI*D$ z9Sc{rWQYIwqu5VO$l3xAhaplyh^51tWkn3F$yhQ=>GB{$X#V~Ell|SX4}%!UDF1Z# zojLY0U3H}U?7m71Muf({=mD3(e;B+w&Oez$%y8UBQd`NRFK`4_?AlfyWm`qN2wmPxx zc>7B80=5D#gND5?>)ukK|9Nx&RT$=R&36KJLaEkI+5!e5az==@Wd=tEveo)f{#OvS zOQO$wOiVpss4JG9toD*q7p}_3IYwqiqevUewO&geF7`x$N`xG>ALgI)KB^33JdB7h zI0NUxlaH4n{FS1l4~i3@Wmsn#k2sU4%bA)wVuaNu*`ZV77!;ZQ}z@6~o@- zb~PoAV&bcU1c_Sksr+yL`B}O8=h`b}0MKn9pWqYT7q53XNs%KAWwT4$Cj6U!_`Lcv zFX4R*!qyhN(fJ3W`OZ}aCTW?Q`!B0&GB#>a)QFk8OEzVCgv7`?jZ0i5dWQ!hJch5! z7pX-|cf~rtx_4eJU>ce3C`e$3GoWcj4xKTND1tDkIR(;#%Lf@M1OJbAr+QR&2(sz_ zG5Kz6!Nn=Yj>!3pBV@Gy@WM1dlO;D&tU<5Z({C>w*q6rpvNy${#(yLm^O{7@QY>s} z?u+o5&$RmaX2^faPaCN+OQ5i9v+JTj{wt52+Z8aOq0iwNRCJ~MraAOKgZ~HBe{SsC z$iN4l5}Jiu_J|oVsMC3vmVTHTyUCMCvMI^jJdZ?e!qNVy zr~h=CSL1y&j;#4#&Sb7-!lL^xSs#Oq?=pdX+Nl2r-G9~S@92XU{*Hr-L%I*7KvJ3~ ze#fbqK;ls3cz5i}i}c9%wfQ`j#O*`y1t4T!&8}qW#&IJ`i0?<7anhD(9D?{J<=`~4 zx?sB`N!x#QS@trMH~SACB#G(mQ}rD}e9buMk)Q{S`}vdTfq(j5v!}+X#H!$Zm33}bYIkbY|J(n(G9LV>SmKk}ne!?eCUKWn zJ|f8Kbc;szM-21bZ0(4-YzJg+M+CAfn?WLH!Il>p#stdoRbvnaq}KV$F7Y0EWR`ne zHTR`i4hJ%jr*a7NpEc1SB!uSwd9U@m!X(g~8~OR|(}z3TDw4OVAp#6!qf~kXv58Q- z8021K8ccC%6t+y}6fx|LYD{Y662`Hsg7nZ>+-h}RjzmKH4^jW~;)Bai@0|Ggb^h00 zZa!?KN&6!tA}T`?)*+gI`=2jFW@7EjI%2$Dsx7PwLjO;|-T1O(4iT}x{XckuM)dmh zN=qU&je~{8e>Y)9G@ozTud$tlPbcDx;Rf!IA*TGCTq4UT?6J1XJaJ? zBh#_7Y)A&$|C7unv`a1~^$0Eq^tec|I^1DpF9CCO;&uqqJ4dGDnw&^Opz9nRB9H#Y!}XC*^Gfl zMXi^^3s(34^6xvaCQIN=Gr#wI_pwUIzaKo#h9GKg$YmorPtBl54a^++@<-!_UN+g}x!Y3?*4AJsmi24wM<1^&M%h*0Ip%QCY5&XW}QCE92;kMn9wsH9y ztX%vHg<`=u{MY}y=lb(IwLd?ub)L##hy9lc0=WYggr9Ibk@M{-TK=obcBF=h+@|f1 zAV5lf29&RbdtOC6TLZ%=2u=U#(K1FXv%1aCswm=+G^JQ|BEo!pixtMtTuf)z@hz-P z#z|*H6!bJ5co?iwnZ<>WG){8=84#rCeZlU?hTJ|$&;8%|WHw~rtdxE8ap?aUJD(Q; zx@8gXU;XFnHc5m$*h4hHY6Vdu?()%*lX*pv5FQeO{P~G+4Ixg!A{)ehM97P_|3dqJ zEXN&o#((?%=hrD{kOKctWDjX>gGrbZE*-CKF7!XkI_7EuTAp6j2fom~DM7%0CoNZ} z*E-!dB#@Z|A^%<|Z-LqFs*q&IjiB?7!VwG|gUMXCF5N4ZE!&Np+;t-1G07%?RJ(R+ zHzl$nbLuR7$rxmlf3d0myzIvXBy;LUJ&i%G3L7$uq54mlAqE*K+Uq+W0qPgOFAIrR zIS#%x>b;+l2>Ky9Na|6~oWR*|;9z&6*k&^$^Uwdh_5#54$6QeyETO`ph)WzHiw(FT zsy!)x{m&ns!F?mp4cjMQw!ibwi$B`~`+y?``LB)tHjuh}7EDD&Poh%X5E-eU?Z4=J zF|SUam57+_-hX(Jg;mJX5`pC`8Rj3%@tB+dIz&h*nxO=#pyl7+Jf-=6cIbZ(LFP_v z(K{F-42SIh$JTp?HF>uG!{?pN4ny_`Lj**&>=6jdjY<_QYEVSn0jZ#(hK+&IDJ zT1RUYskT^G7y>F_u(rif1sS468xa*Jzw^Eme4gj~zCHR!$8oe7cdqL^KeIs#u>ZoW zs~5yS&RzORegvOdjfFZ%Mz@x>>H!$%r-|xWEckA5z6405!Rt9saQyUHw<)nSivQ}W z&z$+<=cXGN;2Dej7gh|5Zx5C{W=Z#h9pAV{{ z|4YLkFXJ3jE9AcxwPE8EE3i)*JlGh))()pC#~u;Ra)j(2$FbVIh+Mn}G=UabsWmgXp*R^}f**Zbqz#-_KQ zS2mFS7Yk1T@7q5aWd1YR|Mk;+UFd1syE5|@;y-U*_I`^^jP9`vE!)^Kk%|iFIR^&E zH*&ldp+O;*oPYL2T+Vo%z}bOLw_*wKn(+q_P3Kh}r9sCzVt`-WjVHNJVGS>iLFs?g zPMoVBsnaxuJ=lMtn12MNmU(U4wfVlLAp85{J9RZEWLMR(#OeWdAk^w?)c0IP)A*5| zz=Th6gW_{tVGODHLj8`D^q%Z&-?)9Q@h49kB7tkS`aTYAm5TnaSKUp?BR8*nDBY=s z{a@eE5}(~+l$I&_zmB&!T7rB)g%_%!;J@znW38rb((>m+_R&uBj>E|nZ=M?j@UJ!- z5uK#}t0}WpvV^5E#vH2lb}b3bwe$hm_XXKZ@#T$#2e0I4MuAz_c9}~t0c8J$a{@m2 zh?d&UKC|k*8`=N0Y@y3yX6~3pTl_(Ie~2&{F@pTz>LU>>s3wMD;siAD!!cLOU4nB8 zjE%m#>>oo|=I${3ZVeAQ(+O^T+~s_E^}q+WrT6zv_~+`>{nd+`ik4P;*tZ)wQT$iM z6l_Ny_`I&E7yG~76Y_tN|1!v+tC0PtT~d6j9w{9B9V+sFUQMr#OQ=a18rF{A!G*|u z9xD8LRjq)H@gP^sKR2UGmN0;$215gr>S$0Y$TQE6if0$_oWLLCRR6`PIi&yV&(r2t zo3{lc08phihkGurUQl7NC>%s;&|J!dep<8KnhvImTN?UjF3Tx354xNmLv%K1sUYAt zf+@$WOp{yt#vBYy+3I z`V;>y@^V7`&$xwjesb@`8jn;A`AvED`=Mnj1Z;Y199xr$`9J={?t9;2aTXKvfBq4G zXaxH&3>Voz73zPm|Kf#nF;PVmQ~ocGuvX1j1#F^T@#-0%H-7$C{AVb#OU=y35z0Ka zb*U9Q$E|AMmUM*{;&1IVX9-E_*zMqDKe zpusctQnHW0uHAM9iQM7VfXwBdiE6JB;+tjGR~_G1_0RR|vX-+$0bx6A~wV*Z(MDFd&ujUT^g z_^fIfI{73Y`hx^8TEZ$&5huNhG| zbA{|2nsC67GZ_FL%;J+lAn&(oy-#|inbfl2n-2GsnsanE)JOgos{sq>A3|4N=ox`oO7 z=iZPo8esqFzMow3n8<(qeO2NA@+lmE;<;;84Pcc2Qux2J)bSci1n#^n6DuwN*Yaqc zLBPVR9nDMVQk;LL2!^|QBMv#c!sVh@bpB2YwkAk5gxkk1uAn82>VLd`Kpw(j^IKk0 zhWUbxL&k7=(bHWEKE545TOC2^pSw>*LUG_{Jtyq{%ClH>)p%km`teEq^QUMLFnA(f zaK`w)a*$B-5BlFbG48a}iHUq>NnnAL4L{&ovbhZ&{;u1|`#;RF_cwr@^_QZ+6<;NREv<9)0Q#14^AQifS9C;a>H8RR805 z0=N!)rjY)3+iz|zZ9NnkD+%B0yVt=0=AKJn3cCwfKXO&@a4wK@)_9rg|+>I&)CnD%@nCR#y#3|2olj8HC z>cDp^fej5Txg@X9oR2ces&DIdsKS;89F5csZ&IFUzmRw@n)y*8&Pc4GOS}| zz`u`Ouj>W{nq&UqMaKi7`BOHL_|H=}CV#Wa*qX2Y_txm@`rHEaf9X2(;PWre<60IC zBY3RH|Kaejo7hC8M-77n@gI((DF{*CWgYCvh$~r=nW^rRsAow?DE+f4_J1Wt=?hXI z=H~fRsQ;N*Slr%4!E%XAQ6`54!rDO4VROW!|BETH%;kU)YQFiuIsr!mqU5{2pEY*k zvKkJl|LLmFpZWRH)0dBfBFXv3{~kkpt2Bu_%i!$K$S>C9n;$wEb1sbRzi9n!VkzD% zIP)p}?|qB=rjK=x%|!qEHBQ;I>T=)X7;=CWAR z%*z;LKdJFE@oJ#-&u1SS#)X32sv0@LiPfk0ub$=||9oEe^yMONGXJNqd@Q3ZuDLF{ ztd2SRGWLJ{k+dB7uT_7|Pe?uW_t+={r{6Ai;%CP$CG&rr*Id41?!T?ZW$jBU|L3~w zTz&6G4v_utr2hGZdSn%6u}x%&Rk$Z4YIV|8z{1mC=$gUiQ25Wk7Nq`%ntv>VS%Chp zVeaD4JmW3?!eGqwQ3vn!IRDs)nXV4tvO%Xe!^9ZQr`(&BW#EbZr{^jC&%{@s5rf8Y z1O4w44vfvt(jMoA>)G`g{-<8uO}f-T@n1iOZx|diIp*AAH4eu$hwT5l_si;A3Aa&b z(*NfLivLX?2G>QBMsXU zp3AUq)VkCjl>J{N=3`>Y_!+2ga3n5E2nf+cx>jVjD~nJ!cdll1CHJbV zvim3@J{A4r{0sBDBG^FXKkG{~jbmwtx~TqB)c^eP?)C_-#3yUe|7Gy69;AQF?NsQW zRRr^r$7<+1F1aJD-PJdF_qb@q{L|OEf}H25`3LpStMjJscaNRFZa8PrxM-|Cw^4GgtGF5gSndWBz%&x2FxHA^w9u ztc#-HIg_xWiPS$U`7hN^^*a|?Z~7RO)RjpFmd!mQtSu|K2}LyjS?zbw=Ml1-%>S9y zJK1jPdG5jNgx`;&m#;8sFjUPzq5rwthWZ^+|4izCmfe)u*Q5St)c^I*oYPLlHIe7& zQnLTO=z^5a(o*9I?SMO~|Dp1Kk|LTb-EmidapAx(k8#cQUgUxU-aLVpIyf$3iG;Xq zxOza$OgtWqV;#nlXb+ixcrEvZ@ttew#V>mWn8T=VJ0*Q2K?hkOo9hJzX@iWL$rwcy znz1nK!^~*v9~H|UyIr+)Y@qtzT{G1TdBXGmXa2F-B&)Q3+K^%H0i7eUVnFl= zQT=A9<>#p42WqIDaGeg*58sD7JUZ71E}u5kww}WCUn-ya#ERs<5^nEWefNdM)iv+s z+c4EbO?TYPBm5gbzPquTHtEZk)pxR0y2fJu;q;bx*V4uvl`gfr@--}-_Dxu_9KQ|H z|DL6(alg>mbw!P2`cE5?|N7|<*^R7RwHNnoLD_%$+qm>JNn8`Ytc5+CIsy>?IV_kf z5oW2e{-giF{&(kvAJu&kO2HqmIM;%|WoYVnLqW?WOcPM|wfv9jaeT-vw#!G#q! zAKnn)aD6EiXV@wQ{`2ndnc3HitjrK%;@4)oAsBJ77BGw|CR35U!rbxJV)DV{G8>w1{0=~ zi}rhKtM2wdDcZeeZ7S_k;eRh#cDJ*3Nv@LrdViCOkrLA3G zu-q$0MGx_xr<&UJ_>@Rbh>Y5LTdYtZI9AYTIV>iVZ;-_F%cw` z|I5f(S_8abHA8Ju|4il|+JAE<9{HLqT-11Smu|I^F@e71`cRvxX%=4&@-R&<;(Dj0 zAbpmhJ^l%}7IzgGSb6rOohxZ7D0lw$?#ZrMiu~u}W4?1fd@^~YE~&C$smh*-D|A-h zM^DuO_gT_U5Y@rJ{&z3SgM0d??|0knpz&>XUz>~LlnPN-i;=3XXKI_sa)B}R%_;QH zf3A8v^#~1`_BdkyMc>~KQrEh#7?fE~A2*_9=DVT)?kRVsl;po=;QWL7pLQeYG)FK5 z=bxS9u>W)enSZz_sG9@B5F^|FGmVB~%=~hYJr3e)hvwt66fzy(GB>~Y=ek>3JpL-a zw>&Gd&;^mHd)?Z%jY$2EP<2nkE35iktV*sM;^{QD9dnOW`rrSp z#3OwSIa)nG>v(F{KtT2I-h4~J`XxCUF5_)hU7W2V{66l+%ZK~V_qEu@uf+VHH1u#g z>5dDDJCZXcd`GP94(jZn@}F-koo3`7ZNo3_>}87$lLt@F%gUWX>VNuTXXj3js$49q zx*-^M`+z-^DfB;_z{HxUl6pMuSdol-hz*eZ*S;%YO`!gV>c2q$*Scqaem8}5?rkCp z%LFG79K`;wh5YOyQ?KOwo{b+XfU4Ye<$ak%9lt4 z{JY4<>F2YK+uWAlIW#SG*I2=vQ#$BgsLOYmX+JNy_eQbC?AhBiF#qsk>^xe|l(z#> z=*%B9^EYs(zA*~_xwmfKx+YZ?&ojQKgpkx51d!rynqvMra859;vd%xT<)QuV6PgFO zfc+OF|HX1l8Mf02#dl~?h=?nkO|$udlPTO{{vF zx3yb0s_>G;4f#nSMSr@?bJ)8-JGVfQe>g?|PQaF7`RDOXV@KBP#n_*YE13obkBId5 zs{G2aRmL;+7Hb@gUfy~L2Qt6#*-Oz5x5r)(mx1qe)jMLm7F!7#9sek^?v-(%9y;zD>W4fa{6z89*2RtSr{AM9^zcszsX{$uF2 zSwfo|v>6q6`JGa)o*cWdF7{}nx`ETS9~?(c)h!)Y>O?-Sbz!%5lV1JWx}I(NPGtVi z&F^;|a@#c2^FHJlj?iXL(%)NIHD=Mh|Lkes&DOyFug6!H3YIw?@>&0&Ig!quZeono z2FZWL)p6JB-Kz!YZY(nM67v_+K`nUuKqMh_U)H8S3pf)tdMN$x%5}ZS~x_H zJWQHov2@ZH%s;%w=Au&xvknt)+|E%mp12M3f9$xehvIkF*c4VDL;Z8?Z+!AWHkdG@ z0u8Io5j9<$cCs=5$EI}RUU^6R?^*e#JQS$Nqbi$Jtc?6W@n?t6z%2^-FXI$ET_c|5 zFMoZ!)zn*1bb0wvI-lzQy1r4!-et-Ks{bq3%o)E`X(dO61)Yt1{Yuwh^~tNg3oFH0 zOu)*XQT)dQwLQ4S;QW(&8Jg9#7P<&f3;ebNs?(^06#13Uw3q$7L26d&ZLbAI$%` zFMj?)kn{YopVPHU$5VAVX|J9@@wl_KIX>$=R@Q`;wk<6&c8{%!n3tS0CDmzx#03q- zXy?b|ZgM)<Eo5^LUlDfJltXqWNj(>XAb7UZ>>0_*ts0H-u~r@IgXaf&Uy0Y?ZN! zR3YZ)ga9#!v7gSou%F+<5HqdjL?P}Nre);{L;2P;DP1>q@VTP@H1^Bgk5A7$!s<1h zAbcAI-Ay}_pv?awtMpbWMi19rbT{x=(Izr7TY6)R)5(h|8Fl0@bbbZii{xez^_%Ek z>!n}+bzhHq_abUo5qZ0(jS9a&ya@?T{B z;kBb22KJ)e%G=plt7+{s3pyzM4_IC9kkh22{qJ4%xw_tVyvkAib6H7kzB+^!G33&` zgL)8sCR!zfQ^JDyHU@$+tp~MY8p?tja`O?YJ1v6-HG^Z5_Bt)Sf7kxb#*t%Rer&q_ zeD~P7*nfI+jC5{QbWLGzCToV9d+gma->ky`&YGxw!me4RQ%c88trB&is}D2iYlKub zIr_iW&6|xsmwo#)jQ zw8Zcd&IV#_Nni+-!nF* zCaU*HFKKN4dJ>oMpHcHq?A?d1M~|zUOq}iCjN@nsE}-Bz+`gny1+3G{yN=k-FmhVj zFX?(R>{{v`-`{e||EWf~XM`JH=8C=uqd;S@TaEM2-H{X8@e<_B2QWLJxH+x?b$CGf z-w%3mEd8>TLn>ylqZ0K;-*8NKfifc~#PK6(?K*neu6 zP!7-;s%2s(F86-L6dHOizAJJ~X@bzX@0>kw0DnWxP}{D;Mjizu=3Glmfn`oDUb z?X#mS-K|DHnEp!7fF{Bttmve6NOkN&S^f*1fV(8q6C(n<1i^#&fm>%L`Rl_np! zF&j0Yh~7q{|BLd!pRDbU$9tsv#%{LAJiy@1Kf0A&gJ0AS;xlRUB>6D)R)?O_qNuVA zQ9F`2Xriui$*3EP_>cBqJ@O^8CE0}~1VG<>3|{;x#K zP&*ja|IEVv)4&{6ah8_q2%KJc;eK#iiKhpoGfm#E#5)SX1AcZS6ITCi#plJNx z230rI3wK)M@o1}66=_*d7w>cWb|a6Fgs{MKb;3x0B>}f9*#3!~Rw)_>u>q>%#!(Oa zde`4P>AIw^a6hVRgxLQ+{;$&us>D~4zgNBIF!P9B>^9jlUx!%9q5L3sO1WhAn!#tchB_-R-ptgy!$ zgqZ*HPaLPql7adkO8>0Lf2MI2`G+diWv>$$K;|DZq>CG~EJNed(8xa0je>AK%^RUJ z6R(vulDMO}Q>J*V?C89Ku6Of;X#;!CWwCFYdE%9;?7!GXi~2z?0_Uf5p?iHNKHoe3 zn;$|qp!eF~EWY_T1R&>)n}OhDGKofD{?F!>$EQ|!hxTxd2=UrQV4mxZL`LXN3%9F1 zGwXA18M!SbPx~B=L;fq;ihYugwU(%})KB7>%3n_Ry&q5UUw@q$Hy}D!ABOK)^pwRt z*35=$K~urS6d!w%Vy;5|tB;p0$EAVHe?GVDud8JL*W=nnlu)_i=^OOA0={wCwAOq_i!gn5WkJYgaJ$k&w4C{sl?#;H?9p+$fTL?OIq;E{Us^r@ra>iAYgClhgqL4mmf|C#-xe(U1tCYrFA^nZEgnmaD8 zLW3MB6a-F?B(4GP2&^PJ8m>Y1UmTrTfBZLN&s1#Ws=wlw>c8o2=ZMkm+yZj`Il-1* z4Jak`&xdC$!ZUfsU{F%swIE$_6q$cmd~5&2*&EMWx;5pp_1B>6O&oWf<`rcko~8M{;$>_60HRU)PTMaqW(IgYV3d)JTERglMRtAi6BM) zJBXC|&qcTQ*sp7QJ++c0`(~8Q&oa{yF#7I?t5Bm|8;m+jkZbj%jk2P;ANNM2aAa!hZ<6<~W-s z%KtvMWCWYBpXn*+v=(bec!1r!U10~! z-6H`#+1Xk^>VH(kIRB9Tul>3^4oE`&Aob67caBXWTvp%gwZD6k>i@dnVC)y#BP;p2 zj-G4b9)3CoItS*lxzHkS=ht&xV{7)D`?*iu*wbTbBy|3DHrI1?3@Ru?k-`MSej(9` zkx{$G(bF<|m~=q?ixp+V-k|~EbWvPGvl@*2F!$#*r2h;3@4sXIkLPNAK|H=hRLyDl z;*hk)RGek*2rf-h8esp$(b>3=sFCxJlK(>e^V6^X_uJ;v%)F9&hVUf@enIc4{A?AC z#{{-MxDEt1=b+?@o^4ZKb+k=rxkypUFWmFT4ghi**T3@y&N* zlH&28IXcO@7;_Q|tp%No3@#4El>a^8L7ke7j|0d*Kk(Yd3tq4HD}WCegcn{0Ii>%h z{O{EKv-WTjKL4+}i8SvcnVH+QIdDEzOM*7n*Jd)_om@PoKN3?%@gMYmxm4C=M1yyV zVP-+YHiiDjI^4lMzi^m9uGvx#%2vq;@nk>|N!t7BTEZ&8p-^Ut+sh$;lVL+zS1{Aq zk=yTmgb?5P9#TvZQ~3e=-?a^0br>}9Pg=GxyuvcF#K2?yWg#AcSq92HO$twI%Ipd9?%oVXxhJzcVp*T~`!Z^J}<{C;9zj%0YWmNFuX0C-B z6qdDv_`@4m*qY-1iEmeT)@{TGL!&{(534Ef_U1 zS#sF01N*-Q%CbugFk?$v?&F?i7M|@7-1!RqGxndpq4+PBn4zfwWd6_J!f-aI!AW5y zPHmvB3H%EG_j*pKe^%l@hyQ}&fRB1kU-J*22!Ow{BGd#Az!Z5@tzf68dvppE2VG-p zImQ4(CXG0#ZW9fW$bYGfiAaGe7xL71PlVM2x@e4ZR%M}SXh_7 zO`Ka|s*U_t3zdHuxE1?Psr=`pYHv3>mH)iL`v@Jr0mqay!**da2Nqu2a;R=8#eW^a z{;$>djN#E${3oONKL*a?tDCiphY6ue95~_&nkrz48+R;|@qw46spghjB?lque;oWz zdMDe2vFTboh-|HkV8WS2^(gOUBim&sD+A%XHEQ|@uNT+j`Sc(Cv(o?Gv$nVjBqaW$ znotNlb13kNxSuF@S&Rtp%g1!gBU0o)-#)u_>t}q+G%l$)%p&#AWdB9ZLnnsYVJ}(aUnQ=t4VR9%F^*?dHnO0_E5Q-V}w5VjzG2sjIe^jLasQ;m* z1=DM@dEqg5EbdkGzbpLjF|!Rf5MuOyEo(pi&9`qO4Vy*{vYHtzB~b6A|#sP zh9QbWQM?W5e_w+Arx6QR(fGYLXV=6l{qMo+DrjPlXY_y`=`)$6RUNSZwCJ<6NqdZY zQ6b*F3^h84vHuj6b()C(VE*&_WPKwDEj2&XplKYwy%?$#^AF}fSEO%v{bXEmGl(}+ z`k&I<=EN>*3#$Jj2yp&40z*!xxKJH*0%Z~q*XO};W&Z`){}t1Yvl^F_gMUx?zkZ0l zp|g|&E^#XiX(y(!^jA~)hfmw+pe{No7jksWxL~(HiT^l#|B4X*@#7rJ0fZH@P=Z~* z{lEFIpB|j!6!G>G5-R_hmMA8~nEy=7=nA3qKf2==wsgok>dhVAVlabGhZ}_{)}r1oh7leZC7Mo}j-r!2_g-|4{uGj~>+Jsi6MZtFtb$L<5dY zDQ)9LsDUE?`D^~q)%0O{WUfC*DgQemBacwjKmUumg|GfEO8&uYSK^n4Dz?7ft#s_eh$!1*V( zV^7y&nsfmdqst{-!MPY&biW0nE_oK9$Y5X% zc|a)qUnKqmx`@@55Yly^pKNGfO-M=q``V`!pZRJSE?fAO|04UpJ`S>rT5yc}g8H8~ z|2|#2Rh92V!~Ew&D*sUB0G=>pjJ547QvZYgFZ+GP{JI?`76Iwf{qz&9>RJ&A=l5GK zK~=9cDkFrt(o8+fqGCu;*e6lu|IES$D2<52f=$An{?ED%t-<@m~oyyt(K1uj_GF zGh5qte#5%>NG4;11yyJGM>WvGuK!!lh!sMs;4Lg&~1F9rUy z?%Ek~crkm_|1|_5@m*B^*WlCN&#oQWj53?IbWRELtNuBzE@OO(sZU5q)^^W`M=crd z*|{h?@J{aO6kI0$Hau$%kKs><3dOJ7Zpe=%zvi*Uu}o{ zH+}eY${ni;NdFh*e-9JHchM%|{Db-*%>Qw~B%a@BF126#zdRi^A)-&Z;bZtnQ;DVM z(J)?7nEwp%yo}emVR%!knglBJ&l*9^=!+xspTDg*%_qdzf3fLHxg>{csEa7vW)M^O z&lW}h#mWZ>W{fs%xV(wfKi7yJ;V;vN>c1fK4~xoCczlpzPifRk2Q2CDe&rj+(ez6q()3`%j$myyD zoPV@Iq5s*Z%s*_|UsWqIbUul}z)M`nZn<`tQv{qG09`6pV#@sV`@YgXGq z&(-O85Aa{i|7EpBOPV`Ps>uKGn;Xmo68}->(KPe{@t^WxIU9#1|6J2~$vqtZYHq4kpz&)&_?VbRDE;qejh=LKOw9y+ z=Y5XQq-6fjx~Jj>+z2TAXW{1en!)-YwpjEf9kftKP3oUN>#75(f3D4>TS&Qe^)zuV z+5hzw|H*$9R>Ffos{iZ8E{m~IT~z)vng3(Iu2&E7p93SEN67qVa{l?Pu@&EZpLBmx zPctI<6@h=qYC-(NH}m>q;yn5kT8kUF`g*zj zvS1WBphe9Ns&+3^HO4F<(*HG6!t$tc8EoY2K)~HU3dE!H&xPj%HzZq{2BY&2>VK&G z=ZUf|rV1ec^#|tvJicUG!_XhqKbxD<_}z{8I!gRe|9q>|_1lHJm$h+qJc{x5>dV)n zga2@1R(xcho^Np>xEy?|Zh-jDpH~j1u6?Mr-f+Xu-C;#MhvWesAy(+0O9QX?a4bXS zoBp5qhs1w^uCl)R-#34hCxPtjV+N)F;h0kSha8pdY76qg2XCvY0&_4a~w=*G@QeVxi z|JCt5y`$}KMyWFY@T&iY@ea8u@>i@jZKhgLaT!qluh%mqAQ{y^i-dVRr}dbBNJ1+d zn0id1{}~r3whWC_!;rnA|I+{589RyLu^nd{MgH?k)Nx?`vvU4vm%tvU@9ReWU%ua{ z@3J(DHqD^tF~rspnW#zrWq<(me=YcE#a7HeCchGSDn>BS``}mSJX0Y3U*!CgiD>6J zLQ3_2VgCi{pUM26sq+NdIuN?kcESCj<#&%v6UCAJUzGm&g|T$?L))4G)c>IVxi(|Q z{-o>?rdTg91$WzwTun1QnEK8on;`iwdb4zKCUcbkdN0?HwG);s!C$5#|M}`>rc1Wm z8>B&G{?F+AW8g;OKeg=O_2{mm<{z)*^N~4f&6NKg`LD?(FsIt1ws&j`<^P)G9?CY- z0*DHxLlo#Pj0EAH#sWI*b6c(2i4=^Y|J`BE4@QYKc2B-*B0%APUv-Z;AzPXcotXce z{FLhdqUN8U{NnQP&9fBqPeaxz(Q?fHQTX3K8A4x6bdBgGmH&K1K(|{xKSz_of3{S1 zN233W!hhDU=IDd`r3TjU0_vY<%xs7((Sm*MYrpUbaX;CA(FUxY`l=vO^j~2ADdm43 z>eju?*+l^MUvwi)g!m8kzdt@v_h0i*NDiIwNQumu5~}cjom;E{CPcBkOTY%y|ImhA zx(QIsKh?bjrW$bf{QJ7=hy$g)?Rj69k1r!%f8qymt{RZ}KhK9i-O@!Js+9d->)(8! z{O?z`xK!hR2lYQR^!eS;gmpxfq!Rd;|MQ;KIjF>czL+?fIz%6-@gVaL-AWPYn%kF5 z=Kmo7)rZ?yqFP`vis~`{5S)|woF^pzrRHr1A``OzG-#bz#LW{w~Bz4*939 z=e6_lRUx!s1?LWl|6u;%g6DOS+6I_^m{VmP$)Y3w)f#sU347$fKJ!AG*%bfvQGXw9|MfDHiTQ_wsW1*bEx>l8^grugusrw(=+ZUN zSzQ)nVTr%2fd}pgSmTq3`kxu{;QV5Zuk#PxS<1ablZNCf`Y#mu&kboM*(E5tNN2ov z6>yPF89?$+MTO&-nQk{2Zo5khRy}IC5vI&P{NeRu+1B0~Q`A4(8#rM9MenHpYvF|z zH0hi(3xd+I|H2U%kq9;&oP+bv${+?piI{)5P79<54fu#y&j-N#LzbnV6RCfGwfH(O zjk6rbuhISs<^1#cy`aR(2+G4Z->)k|O-xk3L_*JG+ar6-&uU~FkAY7tO@D>~mH%83 z>tn!2ck6?vJIMa8pJuR&7x)<{`!D>{9CV;O;;?x@fdLEie@4F4vVqKhK5QY?#FP+V zxWtiy>{*vd|Ca!q3psHh#Qa0Bsu6>b>_7F(1h6;{kpzfo|7JoG@n{` zqRli6cUt}t{OrSxiX&?Y#DAsG|33chcyV>YMTxTi{obq+ z5F-AQGP?>_CFjRiEHeGmKuG2vYW(bi#{sZx%mJ!07~-4zCOD_HFOFaU>YtCF!(D;& ze_fRdIH~IJD#?Uhxor3Um-ryXO=Vt z=BSbSAJji@@!#<8M3Bt5y%hN`vj22si>jF7zqF*ZZHR%JrZUR1m{RXfezf!H8<$=jy2lvx?EOeLVVN@>mYn-xG>`b}RhvqxsJ_ZDx}AH0l3( zzPO1__scYG=Eg@9ndwLKhb1)rUuP`I{x35B2k{@XZ|Ug&iXGMeqzFJp`w!U0wLU-z;73VU#}Lj5!7t-`91Sh$n#Upui5sYeWT*2%LiV5j z7ypI+uM7QVA;<#qFa5Imfkxu7WubWTV}EnDmt2|Nx8??GkE{N{5@=z#t& z2O&tGkokwSG-(za^MC$#{`r2AfnPM~|LSYy1TP$L(dhw@ppu{Bzly-y;5V^>bBLqZ&TRF&!nA0&VLsP`8aWlxC|t zo0I;pa|H%CkCF3FFDFC@oe%Gk{8tz*kCyKl97~*1@uv+?OcWD&Bjg`nMxI_9`N?ji zf*7fQFZlOgzI-PC{QUXzmrrQf`TY6A=TGlHfB5j}(=Z0mei|PB`0?Gl;Wr;Yy!r6{ z^~blb-@lg+zkBs|SU&vv#k)7p-@Se|^!n*rdGDLv{@1Vj{~dfHmv_qlc_@E&PyVdq z#q&S^d3NVTPwT+#r%(I;=zG@E*WK86@A2cuj~+ehdi=1hv%BrDNB3&7uk?2O)^+dK z#~n5I?*G{F`wxG&obIaJ|JU`Bj{Rrbem~fDtGM-ge%qxJ(wp0*byd|h6%`dbe!RHl z)~}nIYEoN|rPlwt{&G#?^&`tJo%!~PWXk2Dr0VmcfaXT|tW^Gx& zdB@JpHJ4w0U#t<}C@EGp3Z52J9=E zQdk&RxWl!q$Yy_`V{zg5ZG}@Za;I$0<7aN;Z`tORx!r5ac4u*cT}I)iP2$bzo72-b zZ^Az}ZBE`KPTREQyG>g*Y}%Zfwlytvb5g1}C3VXNQFcnwX3?6h1*t1GZrGIi-G+6d zR8iu_`QL4iU7r=VK6A!~ycz3rrzhpF-XLC`v?Vb$V@axb#R~DtxVkH3WBS}J@pCpV_*S%FO4^*5+;66d=T2Yq4S(YtpX6yVIn$%oPmRc$ z5w>oc|CY%f>C@ad%y3=ziS|LNb8ebL(lo0Tlg52F$zpY+eOjb#YUJ4Uk=7}Zmg^#U zNs$(!NVBz(rfVWiR!17Ij5J&ksh=3Bmk_jJTkv<=0+I?slX54f7EBZsIBqC(N-7Lq zzAbq1_Mmy&1#@$K5(-?G=X=k~6HLhqK}T+gC}{FFe)xADQ`U$1WQ0!?hr4bT_@_I2 zhDA7~M~vU-?R3~>d~k%$Qr$6s_~_5Dwzf1kHy>kSqN_JXOG`^tLyOJfs1q0H3cG*pnp#{(x?PoN_XUQQzIeJEjiB2kdnZ*}=jf`ztY4bD7?W^- zquBn0R+Ttezas5}zv(Srn4!CMKt`e6vybQ6PV>z^u1I_R`{o-5@mV2pUi73v;dUk4 z4i(#;npdXSXh8yW+)7=`Ftb44i^A=SXt(1q?9>9QDGQ}b*gMDiJ4!&h-#t^?*lKj! zn>mA%bsW)dGVN9464E7B2dY4PoI<-tSZ{wEb{0059G%y8nL6zRZ2lEr(DazR3Z(x@ zyT3}hJ#MMVDv#o)An&fFSC>?*jR~_fsUq$E@-WkwYahi*rT+*W-5$3m z_78)UN_$nb{k*U#mP^RCA7J^T)z@51XiY2kwvVf=qXCI_KSs9)Ky-PWHCsd3cIeWj zhHA$nrzo`BAmcOb7nWg_r*sP;SVE~C=T^t6bSk$!%I%75|1o=Eu7NWRxDzIy$6;=A zsoQEy`yknV-+ZtPM5_VRN8BqYt^FN@=qVWW8=q>=($d@?MaAYCxzPaE8Bl6h*Q=zmWghUbz5bX z1SSwtZr4vWIeO%_hi=z(#*woSYYK?a#NSR#M|%ie%$i=#yVoAy#NkbFRHmH`KDfqT zGD2;8um2jM@z4Z=;aQn1O6};tc&k>1!dhG6BU61E)%G(qd~S&rVA{RM2D0s$Lc4YP zZh$W%T~=ushO@>7TNxMlB-*V{vi+*&O1H=F`aTf7qbe(lfNu$#jtmB9jPskkM_z2r zR=PcYd7o2T`iwOK4>+L^^xte(N9L;|?#2dK1N zS8E@hJ@G$o567Su`5+-~JER$iQ~#*jb%hj^ZHIdscA1fB_r96obcjU}Hc)Bzw~92n zb2SfKAk*$q?dbFXjmD6e!~jE&baLD4I9oX>d%?vY-Z{GlcodrWZBh5B?rCyHwIil| z^tG1zco}wq_`lNbPZP(mUMpg|(Zaw)nXFR)YT7VNqTO=R?GaQ|p2Y-2yKi@8GsTLw z=K#N4>)?*d!Suk(!>@X79n=a6$0Ni00E!T7(d|0)&x4(7RUcpbJRlop=Z7oZ9(Hmj zD7U>M4J>~0sM^slyt#`Y+5Tx2q5JRjC*qO{YL_=L!met z+5Sl;=c|A}ns&dPm}D}3wG7pca|Y1uT3dw556II2sU(AHd;YHLK;63;&J#QoYDc+H z>I|c4_a-KG_`g?ddxbtBnr7ON&N8Feeu0Th9+zwIr-hCUM@UFf3I89od!%hgJR8Vu zk1bt1X*WXkO18gkQ_TLXaAn&4H8XWoJJw2q4=?iAfhaeB+XMW-+C9B-37*A8A0kT}if|M7!IclP85D?d->2&tz!% zUQT^#Tes2g}!h8llB1~^2l~7?fy2R-4&OaX7*|QRUrJz z_9NPj&ph6T*!IKL+SnXSM<1+~{ew&h(kxQZ7vl|<>?>#j&5!;>vD7R}^ z7Exs^Ws+<^<@O-Ay`n>zd2|?Udj{>4etBJYRuKmIiH80U0SBPW3=t(iNFQi5?1l7B2<7U z*sf@MMzp(7DnRB>G5;KI)$>hBYkw7-#{_hHVA_Ym?K*M$Vig^p9Yl_z-IK%WFZ_sT zcl-IctmCBHwKbEF>Nl&5St3)Uy?UJf@~pQ>b;lx)wH>lxfs*YHRA-5o&^cnmxW!Bk z3)5anw*Tj4V4a8-D+%d#o&Dp&Y4wv-fl9l_ZO_aCUr$L$1RbPVWZDOn_B!6{C2^yO zWcx?a?xTvfpTzrBBf$c4{^|V&Vs=W!+g%f%kZ#vNGwx5T?*!x#9sfSn6gRc8&nqRj)y0?@@2c_C^B4|aTv^VO(FNB(6 z{`vJ^hVSKmo~D#^dtlnVGVP4uyHqojZ2y)3hToiVnouJ@ine@nP!In7(dSLWh<5J` z!hytOX6R-9?>PT>kZ8A^OmF<0^BtATN7at$_Oo-}N;?CAi7+Hv-oPu?LaY#5z?4zBY? z9l_9Ytr35mTPU?-m2SWMNETun6x;vNOELePxfG@X*!GO`&&6bq5lq8M-Fe8wdYqGs zt4^4fxrY+%-Z%(UdGjN5T6|6WhzrTEE=F1b(>_SFJ3Wz5!N!^`yGayeg<$j6S}RwCwU_?-cTv##jj|ZB?FZWq&(VfIbP{5N#G3~K3Opwm zuOIpH=J!06k!c70pMH3DE5cd#z?JlOSC`ZCG|hvy3>*HrJL0yPKL$R=37+mK_eqw; zTwP@(#`y=?esx9L&sxvo`blK!NK9v~AdiI=Sr4vibkcX?uOmkBY1(^$_gA)G6We}z z%?qoz1P|H%2g?l!2)wLqDd>3fvFY~0!1bF>`*$_`YZ2g?`24}Ook5uP`i>Z8_t&%W zMXOV3_r@DXOuq=SS<_9{k2vD03Xdu?|8mV^t^KU*)y6~!BE zEtjFTtrgk+h--0l_6}2zpT7P5qrpK7Cz3E0@*~}(+v9)dA4@?F zhZpOziw@VfW7@~*;Uzaq{L(sJef-P$@RhA^hpK{qXW?&F!S>_)3!12VFp9|(|Y2yP-&OhJ$XzID@ku0e`SHskMb!Sy&R>BK4 zAk)r}?N`h{et(4&jg3r`3yWz?ah);7CFA_F1T<3Qoh4R)XgAgNtVlaMGqpCD4#x>Y zJDe_%?O$|lPVmlrb=yhvZMN-d$T_tUVK=`Md95b?Y5kH(inP~5c?54kz7E4#CcIsA zNYgZE!{GT>_2aZmy;k)|;+Q)Z2BalEIvN*&f3ZICWjCrFasGKvT;;YC6xyv&J0jcf z@JU*~P1oE$GBFa}9>`2tpgh%QS5Gs?H%%@A@zN=Z`RDc6ZAiI|wjHJ}s7mC+k>_z~jro^|o53K8a-8b5*4vXZ7Il{?~~2a;L6< zw2b3NO}+5wdPQ%VQQ^O({Fmam7k?)?ypX^{V38tCa#nah<1}~|F2GeYe(oK{)1@u74yj*!c1CDzJ_4vkMV8| z(O0*Q44u=h@5#>Bb?)d)>Sk+IcqN8VZja^d$sN>76>CmKr+fyhm3k2isX&>5{c?Htv7FS)}Y~v+wc0RtgbvTkZlFXc8 z?wfXB7Cf6uyWi`iL6_R&DcNy;`EGozzWDhrM+C*#bG58c>Gt^J%tVYV;hJLFtFrBf zoPY34zl0B|j3C_})cn(NE>6pai)pV$;vCF9SQS`hlg9?`dPK@lETHA7f4+)Prk$}w zwd9&9>&I3O3%{xIp$wy}e@7k%Z5f(#VYz!Ap7x2ylxZK%xQ&WXrS6ZvvAFJWGb#{8 zz_uUd{BuH!B}LA8NQwXaFWc|J)H3%~hm#7mBep&Fm(aPI0Oy;7X@q!>!))`8I7Uv1 zDe(9pB-75kv`nG%^;4Fq2|jYQ@f^QRpTE5N(W`;E+xzX8H{*KEYKI;pf*1^&Z? z@X{C>1K7L~6R1kKQX9G3)X&be;~X}Iq1y53msj;5?yQ|TH4YbHroNWuP@>>0Hr5Pv zOp1HA^Uxvx^tU~6S$%%Pa-4rGW;C@Ljsb{FY_H6b_-`0KUzycM;XnU)eWNA3vTy6&*1&c6L5fh3FoVG~x^L)54NK~%y<1q=uX zxL9IDR6s>s5cZHg6mbwh*(xYnsEds_gA=R+6)ld|g@Q8j{Z0aH-|s)KUbTgjlXLFp zo@l3d1h2bjiTYlCc@Mn#FWB$B9$1hHxI>usQ27L6?!;xdIUN+mq4Ulu&^YpT9 zBDfusrPb%}?C8D?kir;C6=VtV&&2h-hu88ZeaozbZb!2kWfmgN2_IP%o%_Yqa&y7H;4*jk1?Ick9XV#t{)UCPZK1q<(!tfm6A0hu^lXGH! zvVlu9{R{V`M6RX`>c5ur;<^l79RdD{7e5F^qZ*5w<9iEDe4--|tUcp3EFyL?0>#9* z1S3^UO_n%F(3BY&hr!`%ar?)H_9QyN+B3HHWAzQb^0Q9@7w05uyEnM;UTf)rG1szq z4F6zjhj!kPcIjj!T6_Kn|7`7t*C7yFxM{Z|)^>J@s9Siw5APxcAwPCM9Mpok53XSb z<`LkirB~wHmWgkDd)vBl2)^pj_F7@UD2sCJq#DvuW`Hif^`kg)XHeTdX&1}_*7;=z z(e0=OCsSMGb_|4`RP>67lsNUmwh!Uj;iya{PJxPULE2Tw{|MV&rzXDBmSP|8Oxyho z|C1Kfo2{%DJ;cBV;*vDQdo6FxL5+FAz4Gd4X(CkATrV`RVz|zW z=cN~!m?7YQ)GbcOT@*Ykz(LxBY^-108j&C=2Z>y6z~`LOG;6qIn_s7{giAKFb{K%T zHj%+NDihXzR-TO&s{NNp=M&(w{BPP742T&2GoAKGTomK*04L&qglo^iP6Yj2x=DPH z7%DJwEAK~K10-t&KA>?s8UCR?KF!edbCi z845-;O-5E~t6Ob*GV~=eWjgI4MP&VP(eNbScENPp2e=*2u4ky!8rQ@l)-?Z9{3S>M zo*lWU5m zaWLyqXsm+LX40+|p>25X^x7fjc09A{^B|5lo%R6!C-Q|lJm$Cv6bmRRD|SD=h-3&k zVGhLNNoY_9?*IAXB|zq-3aXp&w}HxD1Tn+lJG>{#km(^v|h*EdaN* zZKC+ML~DmFsh~S_ILlgZDVE}eleZ)5b9t5!9U7Fg9sfoutA8&dJPXWDJ~xk_UVafFr#C5+@Opi z6D!mp7V*+W-qeEu)X?h_OZ`WuC^%G}m`<|{KgRYfbILNcjP%w= zwb;dv_)CO;`lR7JkqV8xcX*pW`~bJ()(?|)f5=kSGrv!LIi31e#Q)5Zw!9^VGFGx* zeoJx`N20Vx)ElBK0=MIl^EmZYZ1!g~h_!wEqknqOd7f zua^vKKVD;zNgd*P*2S)k2>^{gA{ygJP;xJv5<-T@H%*mFA@c|T7rGhw`f$m9>a_PL zX`t?j4|in;ZT4?NZ+a6&`j^Gdvq8(j8tT*~`KWj8KiiuA8g z*F*3|XprC$WR`gEH~S~;;r5llg0$=5x0d%3pgtPofBxfkH1ihIv-sG6L$jYyVd)@p z1&;N!*`TD~7ooXG#;r1j!A&h-?#M|LZ#eag!+uuKU?7 z^6);8mP5s9QT7sbae}eQ9b0o`=@uLgtUY6H$0l!-*7z!0&U_nR)b`3Oe*gW$=vgh= zJI?UKx=pfLw7)R7~4|OH!qW;V29m@+KsKemF z=z5+uq&>_%l5Z59N}6hv`YNleB7(M$=qmxv`5Ma>qh=xgfwV{WX&ktqDKUJ+UEK0@8viWf%DefaW*O9$W8eat zJm41qchrreseIkkJVUb*_=ejo<0is_A?>O=|1wKq)inM&va0G1d~J|3+(YeFk^$bi#VH>?1bVA2p-Y?&ts$YKInP?+MqCk#((0sW>bk$) zS7+#Yf~0w|VxRLTr_z0soU)m=dj(Ri1}l}I(9VOl`g&2czTz}jJs{S5*^gnzE= zC9ck%PJ37uKM44W{ulo!hvx-DS*p9f#xlq+$SoXOzPjwCXgUCn@JsRUQsyLK&GS6{L&uIU8# zb?RNkl4<>Gt3TQn>_Yv5r%dZ#82&lX;fm<6_DtJb`xNmo{VQ8jn|BY7K>wOc_}m&o zD$r9x(`(N{{R>NbR4!4ShX+heb9O@4vjWAq&#!uh z9IwSqj!jmObTj0Uo^MmLZWoEGyfk8>)uSTgYTU>EfpKS9YRPolM_eqd9d+YKOlxA^{6ad!$T|UxDe^$fMG0k3301*^TqB+wtT8&yj#A(J_bt zX%CV9Rrt<%4YgvKy;#4*`AxXNw6Cx6TuIk_E@X4TA?y8d z@&g>+mmpmWBp1$VCrTqZ!sp0x;-$FH@4t_a<|xr1?SZX5e+xyW!7lT@;N3RE+98I2 zgl*1!iKV5ItTI7*hb=z9KaqtT7LMDNNZPR$v`qU#W7ll>k$Hp+-O0hm@lBH4 zo*cS8o{T&DLeBcf)Jyx0bEkxE$3h9-+jtrBdftF=?MI}44ND;x9J{#@ag*Q>rwXt5 z8x#UM7xwJNg)NVi_k{`hAE@|@E#`pLXgclj+uwf>Fmgpsha^iuGiyI-3Rv3*Q3~a* zX+kV4?NJop&w;y?NL^l>hFG-L+_;aNNp(Dyc5T2(zg)f2NgMlS3ER#f?MfoC_9se+ z&*dA#+E2?4$HRP=Y5nW`mN*>G8d1G}4GXBAj*^80wEt;Mf<2xqZ2OSq*(*|o{Lcm( z39g5)#!u+Sm}xuHX(ulRrmZy|6img97WK-`Xdi6F%!BS=f5F3WjDcPZN=P1Lg|DUug0oHyHQ1L@|t2Vq?dOGgs&qg~;|B9LT`m$oT#Q*qzaExJ{P7*hhwjd*| z-C0sf3^_sD>$#BRSsvkt^X{vG>tU^O9tt{%;|kk8-hDITA2-O&gkr0nY$BHSnD+m8 z7G(sH0hPWykJ zU%L*c-J0;0=htq1mn(ZGN_#l(#!m4<{8Mp-tu(o?$X#3d5YmA`=J7tYc?W*4`;pUR}W2&e=l{oGHIT8heKdc=# zEnC$tYrS_HV+Q{yQzR35v$b7;K>0Pj_LKDq<%>;D7#lEW7^N-Z|LX;piv1*pYfDe^%9^iTEFAI}1StCZnt!LjTXpH}^3l(^t_q zJihyPE!wpI=f`_^i#`DSGx`_7y!I*ZQq%sQIU4a<#)ifERtRBhKQnC~6lfor(%c{O z<#q4jg>hO}_9zLyQeO6K1K-%UclS4q=6EX^S5AGoDJGXSqkrjelcoDCMQ!);?;c*d zE;G&lkfZcek^Tx;dzQy#o9^Ae7`hyw?F`bcg`0=S2aTOA{%0&3*o>%?a^F(Um7_@D4TSUXe^WKFkyte&GQ^8Y~E<7zO}jtJAP$la?39#k0q zS#ve>$T_m%GGB&O`waepw8u=^HTuImL4W*DXDROA&o1vd`m5&-sZ?iltkEuU2LC(< zgek)YP>?8UyN9$Z@IP=_q3wR@mAyzh_GyhS9!tA^0R5}pK@o0MO8QRWf z(jKYxQ%B~1zpIF{HJ3zt+2*{+HS%VBzjRpQ_VVept04^`{my2DZvV&s)kuNq#Ss- zg8rqob}Zu6HR@oRot}gYOmY5i+bi%tmxATKCZ^}hfO%@u=*r1!4D|F`s^G*!8~=|} zFF%alE!4K%g>cgP!z+(1ik+H>2L`=Eh<}C~NYId)WQMq*SlYv8I_(Mv3K!wp&#Zam zmwFq;xFr_bUtw(@Rkw+j<%4PY4r1JLryO0kY5$L1R)!bC;R$EfA20JpkI$@ciRCi_ z6B5PHopQH*kGMIe@V9`(p2H;sbNYgM?uc`N|M~HdceXTBrG8}ck`d|7G0V7Dt?93g z*gTs!y!clyI6hl(fd84jcVw6Wwcppz2a-!b|LT}YdnDp`SlZ+2flci8svBei%AQ_3 z?9rL{>=7?Pk(_S3-~E>&Cj!x97yrc8o`L_F+=CN;-jSnSiVpnpJ12epY#exdv9=Fm zHeP0lhkrN=Dyr-uvguH0>7iQ^Hv$&;$g=|N*-Y&hz|!PrgP#4?tQMZcJXYK*_$NaD z&lWgwu-Y@k?fD}APxSZUzKY}&82!T54i}l5s$y$D(Dv~`Tzh|QF=EW5JubZ@0ZK0( zL<|xC^W^&=7`~8$EuX47(skJz|eoom8|8rNn0?)$yKhX9HX;)h$pY*vV2uqm?{m)cx z8XOXkDjan%{F9-f7S${4+tRh}b_+7GK(^p3DuRH3cx*yvd_sY_)Lzwp101^#B z4k#LVX!|H`tl2NnJ#TN{G9Se6`GSt5%IUPnUux1^q5tRhuMu%=@BOHFI_-M*VmyH> zYWr~4;HGN0muPSYUqVURziHP}DPsoWjtbMRDrW;($~bhdZo^%y?W1z4PhDPD`Wh$# z!_MC}vFY6SDY(GHIaN0Cuacq?yLC#~Co1>Yt?I+l9`Li$b>U;-I6&Ifa|j4FUl5z~ zm1xED`uO9kziHD%ZD;rYgb6B<|GBTsymoO5YMn`YK-)*1J~yHjwf?8=tN|}6jrcf~Z~jv5xa}>^R_{*=jk#TU4~=|3wkN#s63p}_wB)1TEV zj}7z6gl+c&b@11ojqm>avTNT9wtWVGtp`H>N9g}KY|}20uYndfVQJS^F{UXp7PWmq z+BJ>MipL>g+s7+MoF9>l2EXVb!+k!;SlISzEXyt1brx9qCO)?I6MEYza?176kJJa{ z3ppwBnu|7JiaX$+UvQ$(Wh1U_Fud2D4mw5FrcjPvSb312J9|xZo_ToKz`*%bS?!YM zsAb;wQ%*lIa<8yU4$~)5g#I7m1m^$2^smb?UBICKyLPza`A3`tG`>fv_O?NvkFvle z+<}f8{I~5a90ybb;Gf-}$atn}Lg+t8q}?RCd$MvBtXxb-)9 zCfM5Zw;jq4(-&+lsbZ(``Dbl_h;WbMf5z|+d?AB6A@&CNC*38!+=sHpJYBx=fZYzj zKZ_pLdO&YvfypY4%OQsji8af&&T0&-`%!a8mx^hwIm@++gBP*b7 z0a-znhht6aUlW#Jq?qd^L)!AYRvck+EO-By__;&Z@7}Wm!>gyC};gjqO7?P#;Xsk3;nCW{LL}GXzg$~%jLki(9<-F@Fxv{r-AS=DFbvf`ViRf1Dk;NCb-NQWJnoH33VewIF`QI*n zhz|M}@ISx+filGZP35^ZmA`cZ{J}*;`5d@ab!xIN~_Ij71%dxVTf0v z|6dA5TLk^J2WapwiTEFNpZdp(9yWQ8cba(VG6DZMR?M`0?Az_2w8TzJn05vHLxHvr zNV|4tY3gaQ1BgiI|H<@2@sO|EPo!{@3)S_M5i6_Ynav{rp-Z@&< zKAecRwhvP)sxx-U&V-`w)=;_e|J!_UV^pV3-M3yi7^~k z)b=smcK;#Ngu$KrLT!N37ImwVx-{{%n1n99;)F_Evk!GMG4PAMuJ%>R5~ z(WSV@f`&7=ZID>nS<2Am<^shr^b~iK)$cTgapCRqKup%uA z#})aXA?=FkUze0wM}K7gv4A-I{Pw=xhs?whNOM_WnS6a6H^z=a+;4xPkf-;ZKWtcv z`f+6HJtHFblPSMg(C67IRq|~E=n>!yRW3qn({x^n(~=~V z=B)Mo^uj)tqwjvK7?$T^KJsDhS(yK_$@Zvti$~LIKc{4^qqhmuu5N&T9u4It5&y^k zJZEDG6Zj+r=6*S>?W`6xZ4+-OBS3RVr@Tj*UCPzC&7W`qHy^cTNTHzw+xw@u$_qIB z;=Y7r=@P3GmlcP<-u-M7eS~iY{Lh!*>$z=U0l+U$)h*+PzkNEeW7A&9uHH-);h(di z&z_jm7)#D{f&cl()~7AIK3qjzGKCbBB+7q9zEU?v0W!!zUK!2!f3UW*=NSHZye`Rh z)$~kt=F@ z#r)6yzeR!mwZ)qm)RVFCdydxSxL|k}f3T3I%@w;?Y@1k_sXTilpQrN>o=QFfq98c= zEklHVzzB`xZ0CywCcXWS|A$|^ffbMcNB=s2%TTnQhxl0A>)%f=9O0osbuLcXGXFnq z_mKaBw%3BQ)k6I9*r=KHdh*ilXu)vbc+ZV4G4%d17Dbd+#&6*BM?HBfXv|ErjeS89 z^wV(!V9Iug+U_m9;5a2~zOXh~h<~R2KOrueOA^Eu{nK_fU@xg?tOouc(b{1Sqb30I zU&ydrg9=43pnpNz$29-r`BwnG6{q3tia!5Rs(-A)xqirC%kmdCd%yo`v{MKrqn4-* zyk+;{x;Vp=uQ$I-nr3wZ9_09yFKoI#GRg#z82{5-V3LTFD<>%$hpCo*`H%nEd|36d zA&Qu!MMM5;^Xgn(hvgJ>X%Dt`IJQmBwQy|A<8_ZXS+^LxJ^$PG`kO#5!9G3LSl6%_ zMQBa_l$gVB0!vWAl{mq#@2`6#DJ2Xbq*mh^Nbo{A*ZC#MkfLI?p3jqTtYj%K7@6{$ z==q2Lk%x6xcCGg}u?%X-blZoJ|G9XXT1|qs4`@4+U>b92p8FTiL-ZOU|Kql|oS3a` zS$wP+4LU1oQ=pMmQk4I~{LiQT&y!*u%3i$|bSKK#xASz$@!a)S;155g#^&n6_eX82 zCr7EIi<1UVS9s^m5&u18O;Tm%A>nP-b&9LP{5W-FiX@vg&Ht#wkEoMT?Q!0aT7u!9 znf%wN48f5+4*G9@sY4h>W_KM*j;o2eaKdEuHNx4h* z)_UQ_2(3vBj&%7E2CmhP!O>W{GcEnO&4G_!CoehQRitHeQqXWbk)meiHxXl3kbg^s z$x-&xZGZ=Gv><${BGqZS?IVN8%G3?JT4R#n1pJS%?bX}#{yaf2e=YDoA0E}s2jy0n z|C(cnwSAm^LRxu(oTod3f42OFOVhJaMX0rti2jRzY*Om`d#K7wPYwno1iGj)k6WjL z=N0A6i=Wv*@9%J9{^JMym{y0+F54uCx~~BaK+aZ>y z?M%tfVID~3IeIEs+v`7T&*z4~;<+B$&Q`s}VLL*CYC0Z?Yg-mxX6fy$=4&A@$bT_w zbip1!PzOHASxleP*&cla%Hz4dO@+34(e?f8^^)PX6~g=%w)RX!TyfjIh@2RT`G3Ys zg1T;a88Tt*@XYA;#-&Wm|D&)qfC@G=fI*}@0AJ%5pOa+N8PhFO(HQAESe78yk%#XE}4B` zas&brIZEoxAAjZOB!tG%X2W#Hvc!}C+k}%%0Y4K`F#bo!snC*2K?i}1At$B?+s>BU z8}C`U;fgDVUFqugX~T+GP5|V;b{MKM0RLcZAMW#Ph54^xz78k19KI|p;D3&uX}C|2 zgcnHz#{X=2e3}vt)*0obG;t(aI~2B^72P~!Pf;hLA@KivS}(0$K7r^jNixRX6Fd*B z?X|le)AK%Q+eC91n_@xs+60F&4*bwNH#5uX516#V6YFuMZ&xTV=+kW<*3vLW>jOi> z1UF3o(!M*Iv**eRz&|pR}WaLDsftcZLcvFF+PZW?Zkoc?*aVI!)Ngr z{=w1Zbl)V&LEEv2|ADp-38yj~uWXwC!SIhPbtqu|##Edz|0OXPpQB(D%ka}l5XZ|! zi?!L>WBwm=zjx!L98H_pk9@KwE!0kx>2i~!^I1IWewDlWkzVYLUB|)jPmbm^{<#H0u1<=W7!s~MgS4iye+P za%Oq$_X>=P&(>ZT#ZMDxue^GBY{kUX%izanwXGdEuX^Gj0$4K6xvbb9M9hlga>OKE zJK+7nlu&3NfBae7ddC0L1>ie`dyfFgMDYJ;^nQA=Y%j;_%-WhXW$T!cexd*8_R#>u zdpU6b&y~C-9jrDzID1LYQ^oS{CWz^AYJRPKScw$!Q%ht0!~c9x>dX+=EqoAg?-dyl z=v>CnNYuLSvM~Pz5^8JwCoNVyL70Cxx~h|uoNwy-<0&q&TiABCA!sy5`&!&b#WEeY zMYrI==~XeWKwL@<_@A4G1qSekmL2EEACGGl?8N?w4kJ`2A1pM9z%Wd&|d1o(e$m}95idpwghhn|57{;SXaHRk_;wzI$BIhWx!3;mOSSC}Im zL*_C#!nNnfVDm67Jz9U;^~2K{SJ!#QsX>I=VC_)2_Kf5r(x&rYxm(DA zuk<7e7*YJ`{8w2Aq_K&+!WOJ+k~41YdSK+v;T#>XYn7rsyX5wAd~}UAZ94yMg)u1g z{rXTh5BgWdGJO-TYLA-c$Q8+CH(dwL^@YoemOnIQei*p9S-&PvSD62@i6bKje~v)@ zOQ?UzXaqZhNGq+rC?t(Y(R4a?Ch}L|R{!G*ciQbq3JU5}FkYDy9uwp&Nv6urLBRiD z_{Ww(#nG$o9Jg|F_|k^w^2;0!PPe_fm`qsWu(o@zij1+gladD&!uc_i@4ewWKdpZa zFciEpZ3&Qne>#8tImPSXy)L=3p@CzPXxI?2YHUjFZK@%x{b2d`%(vi&n#EhpjyF92 z_EzSiQ=VP3j}7-d)1Bve%G6tznfH6LR8JPQWZ=9(&8w6(-nDGb-tno)eZt>RRmlIm z4u0UZ21oyWd^^f4XGw0#BN|82gM*o_HeTLWjqXWpa zbD6qX%e!Vj^}qP6xyJM1=`~djHD0HeClNhNy=y%U>=Y~;^t@k6Yrj1drl`k+wtJ$S znzShY6~4#i0-c2MKYQUtzyWR``t%cX_b1hRf;u@K$F{_r8}K05C6RTKzIF}=_Vhsc z+>HMju{0W~jt)}F*2SuUkvL)7Yn%xGyy?|W^CUPIS#UuA(%ule-y)u@l#%FUQ^HVK zk1+nnHevJioo$}1n}_iJw|MEs^MbC#DGSPah5V0}O*xi9qHL2-SxhU+So2%NoORKw z%uVz4ouc?If{a2k1#5f#k*Oc1XypB@U59d5k^=d697{YgRM>XrPboKa$xN5*6sp)dc4hoQnFadNCC{(M5k}ooqz>W5 zu0sB6t<|NyBk%j{^$tM(J>4$Z|AC2jl}n9fR)(VK17pWbKF?Wzv3gJK!X=I3mM-+J z(=Y24=D((J{U$s|lbB*13_=8It-$9H)m8Kn3nHTz?xKGfPr5oQMk*7M3R(oIrwcv{EyK8?3~H5D~BT3tX^#G$Mt9TDjnN{ z{Og4Sgsj^~VePQfkC?TxG|l%}@WzZH51(RT|BngHDbF%X((%HoYWEt?UGGntJ~RfN zkBehjxX4kDdY#Hhs>J$#{N5d1n4@J1_~*T&!@;5rt1$et*w?!`;+fB~MG*+>_Co*j zk*`vySWU5wxY1UY$!m%{m*RGG^Va@`$oC`f{Hq+}s?F_Ikt7wZ!vF%&D@cmgw)qbS6)9Z%xpwuy*vFB{j@@UqZMcGw zBX+|4JI6EfkM6(zSh$Vq9l4?_3qG-0{W9AHSf(m0qn>R*x{a+pp9oR<^!VoC){x&9 zC3PE?xQy`4HVE-hp$6M$>dNjX_f~Y@p`dtiVcYA=c3BswJwbSJB#AIrbiQ!y$02ntVxJ^Cx~Vd0DuMUkQq|43>ej70pAt$nTKV|>_4pm90jNeN z_3MZw=W1Geo>;sG4T9>7;UE78t;GNF{~Yj7lvF_ezlBi}-(vZ%9%0+Pf8*i)H{(?~ z%BN!mZ>K&0n;>j^{p*8@G`BS@LCizM|4?5*`BDF6zjJG$Zs(&)=&jbTlOJ5AC>-W# zrvk04x6Jnr0^5x0cW?cbjGQ=_|7ZNiM+T%hl&Q;QN(MQLHqX+^D7ZNEaM5i45s5qv z%Y$~4H4C8iS~Z$XJs0epA#bwqV=)Q<{c8m9&!%fQGT8p#zV5eu1+C@3M!$Xf^q>6u zK|NQeF<+>t0&UmWrztH(M%{{IiRHgSTp<5mZX1&E+$Q$BYu~F;S;Jeg_55l6#~M79 zh?kbBTg%7vuNn`Z)>w(7qf*6|0oS;bzr>|yHS8N3`4XH=G%N8ry|yK6*F`1Tg)Mza zI6Cfz;kK9UyFPT1-Gupf$Jh9VgSvm{mdy*d%roEGUmqyA@8hO^apeQ$NIU&aGPJOP zGY|L7v(3^pUTMeL^Ga`}w8>IY{vG(AO>AyzapUE9q1rnz`*5qFq3@1M5UoRoD}<0juX*7iWo8<1aLkhq+JsQ)L85s2CC z74xVeio*V%`mqm(XRW>#>;3CHbn)!Q~Ih_W?F8vUSs5%tEI+! znmg~60HD>vlNyLCTKoC+eNAA7goX`B+POm{t`Pr7tIda;iRqe)mxGssH`sa~5ydAb zDbraR>Fr|Bc3TtSEgBzJ|R^DAhz2H%L#^a`; zVMl?HE{}|BXm^)2S%|Q;~tO5AW&Ipc`@MzI8OX=9XT`mmE~9m^N39iCZ#<;S?uMQy)x6C-7lqd70E zq9>-_1*a0>IR|dU3Q~^Nl2x23yiBH{hrRG(gU?WHs@)$-+%9VFt{o$S8nCD;^8vGl;i3M{6BN!p#SIl+@`U}5cq|R z(cc_2o&Q=)4ooGHt&XYUxTqsv-#N1wTl)!imbHX;W^2L?MVN6MYkry7MMZN1=Ql1O zxB*y<#fwlUM_>#kOUszTAIWl>3Qw|W!mGH5A!)KN66*L5GOils(!_ln?OA_Y4 z?$#ohdVtb*OVw*6RkDv7Ge!PqbDGt$hRp@1Pt%OTj?)anLjF@)|Qz1#+(rODh`KIe`FiljEg!h>@5D}YW7Q~Ty>I9|2rK=SZkZ^|M}~W^Nz*S{11-9!||FU z;W5MPZ|uA-qc~eGm+=bj3B_fI^CA|t%FJ@g_Z7^;>GYm3Sq*#YQOW5!>aI?x_$)pP z)_%VId3F^^$p6Ub`+_Iz0PyzXk0Jk_GU{7amKI8e6Fy_$bGnI_uaasm{z;~9;*39y%uNtVV<2Z|L)w|6t=PE;eDS&eAiR7P&}&l zZR5Rsa$!q_*UR=A&m{6~Kdk@I`wz0BMMQr?eESaxN{Lh1P{gk!<2<941Jwq5JsjL} z)otY#8|ylz0kG4@`VTSuGjA{k!70U19%TyK?&V!jeJG{PHeGW?6l!cwXjfDRx4s`r zl8{1exFMIXTtQooiB~wA5xH??k)Bui+JOqOo*ZMK4r1cF!2^@V0foDJV$Zj2VqJOX z(`0B^|4)_dwdYs%{(RS0=X@9Z&ma5yDpKDLw!tI)GOOU7mW@yGM|c~*g|#2GONztA zT~csUfdAPlij2hVfA5Nke+&JG97RXd3-1p#N4=T)GO_9GvF0ed0_C(s0P{RXIwP&`(na z{^!?SOh3t(z63a#&t89vH*b&p8#yGIQteMAy15>iM@{$NwPLN+aqEGBn~!tWTS5P! z%=hRm-X_bU_)SsoYm4f9PMeiA?p2Zo|8pDf3#NZL=Xvc?PbLceKb2dXC=ZMxc3kkV z-=X4G9ah?OptdrLX0mnQ%w!}E;b3P8bqYTBb&wF~U#qeH!?y%iHpBztdvgdgYtMQs z=!KRj04FJ{cPO)(TfR&J)4zy$LpSpDm;PYK%e`41rLnYl(6UTl8u*{8qqP_V33TvI zXFXCjowZH*r^FD^ZSVK@e@>J=BD?Je|MTPDw6$5Wa5;+GNo?&|K0=!?#QKdq@$SQ!6Yd~ME4DRStiDPc6s-PvAmEBF-b4(+O%_OMaeACzy91QnLOQpcvzNh1J4)NmxO9r?W?hHzt|SD zb=sM0C1Y$8Q(OtY?cJE^q|r5k81Pm2hEjS#>oJ5YYo4Ki_ASVMCZ>|#SNhHFH;)6b|kM+PT9 ztlq9fAeq4l(;WnQex4D zwcRVA5^U|q@jhGy?=(K$t=4NZcydnIIR;ZSyd$2R3A$gYboAnxb!R=+3~kOcQUU+7 zINdG-H2mk&{Xe^wF$Pe;CkV;{{xOSb7}PY5`e@u3-ytJwmC5JK>vDnb8XXG2_bfol zHjIX5`BV7pSr$JWJmgPx#p?t@{~_g%!Yi?p{Rg1ELK|?@QzY*tygT^xmDB#8Iz(Uh zD~VA#@-n!~v~=kz&ecR&!82gTH}hdr90X5~b&@-6_2RBVv4VcM>F!DmLDV^v5tRh` zmoWe3(%@~MN?G?zJ~>5~#(=haF|J7e>eGL(IA^V?73g1HE%^o|>_=C zfzKh0fxwM7vXxap|EfFOB`blly?)M1NY0tHx|)%$D+c{PS#$gDjjn{Qu2CVHq-e6Z zgPl}GXBpab{>yv|0|oRD89JFJ(A&^>?=Z-|&~{(3tm0HFsJ&M)8-`ouY+vw{bq;&z zFE3v%euQ4Jwq?PaZAJM>DWd+v)p1j~2BU}Ig<@W}*2*sjew~I=!Ia8${~@o{<5_!_ zT}K0^fAul(;*#o?5l@=D&w~zZ6!wUgRa<$RR#E`GfNyX98;UI(QVYwb^Y8WPiBLHp z%zr5&$}IO1WCBf-3bWXUV>t6p5mWSa5$D+r?^y);>T3O;9&c`F8OWw(+IC!=O>-Rq z@vkQX?670LCU%LT*`?PKp4^)0KRj5FT#5OAyg~m`0~Ne}682Rc(k%EzvlU3xoZAG|7@qHl(h6z+%q zv)5Q1SpV}=>Mns~&unT5tUcFl$T6f=u#dIIk3Zn_>3VX8nE85tDj0DO@}(UJM96=U z)yO)EVy*UR!u)$<5J5u|&gHFQE(GD)kJY_|cnzOuGsKS)J7~Bf-_^=arzedvdkz{L zxNn4~npLk&Z^z>@jJ?3|KpSXsshXD|Xc9I2OaCLH|$HOE53dm>ZkIdr#h2a~Qw=#@4r& z{hk_nFR@4;*!h7g_OlFq;o`gS zNK!#{z&4>HUt0^s1f0We3fjUbF)9R-X-oIh<*3QMTGl)H4dK1uPk~76vkz4{n&5?O z-oGr7NKvu(cn9!}6=oJgrngc8VrZ zl_aFJgfGNDn;ir-?UBJrZ-xHnj!-Yk8e`_~yFY|7NtDlnbCKNKZBLWHnKz?<^{sUH z2?#oY=H`Hg|GqMwpy3krk%b2LCu=B|6sp=1#M6O)vu`wA*m98e->=M zSGZfD>hf+*OnG3_ZJI8V(=CUKfVF3(VBg6d?-ff;n2>*;^yD6_-uC=rK)kp6vBUVa z4hD8Rl(#<7-PBpMCYd<1_N+3O=6tTR=u(lbTT4`J3f#9r(xUv=`eUZYOdUC}b_n?A zjr(A|f!#N(|2Ys9LjU0|VgIwmZCyziSVbDRdpOp%XUK)F=79&-bw_HB@BD-m>MZw6 zX5+b&%iw`m!dw=#ae09TYTiv^6G#f$77@?_BM#*Z(twV?X-(~e-|+i5)D6;@{*p0Y z@QfAlc)w=wn&xO;i~T;oY!7)}d1ux9c3ac1-!_q1<9RYT<2t5}*AouQh${{; zf4jV^?W^Mh(^Y<8SSb+oADWhX)p*u;pYPUmESdGV-tU}yQLUnPzAjUg|GH2sdG|MM)>bnH+` zT$Ufh5w0C7MI#VXm|R$CLvSR*B~d6`Chk>yN1?uTvTXjQ_s!yRu*c4ff7jpe^G3gack(FwZ)aOA?)e%4dEwknli{m+zH z7U6pC$N4 zUyTF(3o0hGFS`o+e>Ahq_B_9K(@yV%hmiy4P9lyMdu1;J`VVVHb2Jz369Blbs4o0% zO%5uzUFEbwc|{Ii-$AcdV~&~F4PKrl&BILuZF47icA@^&+K|{wK&gKEIADsQ z|8R#{uLq2oK@Cy(U@k#Iip+{`@IChuuVA=}gV>ON|L{<^d<>)xO#j+`m)-!5Ab96R zd>NfvS~F*C%HUehnHM(uD)|oJfBt36{@QSD)+w?u{{{JXEzF6=uV{lx2CD;*5l-J( zchuA)3Q_|J8xN{6G5q7DUk6XlHGE@X|Kaj;QCUYV=RB@ip08Lofc5`mrO`cVy}}+f z7Z`6k?wM2x`S;oTuFLI4+}lT?P7m9^Sb03t6Aqr0SpT#7)<2>voymxebB(awrDzde z>N4A|qiB^8GXSL(<>I)0NE(6wUmfr>{fB4PA-(d6e#ARVG1S8LFI1DM`ewOixK`D6 zc_W)lo^c?K`-bLL0x4T;?dLdkExd#z(kLDd&__zl0{`>6R5dmjng0MIb71PH(yMdU zyr0Abl3Du&V}0#OmC%34aS9~tTE4#4>B!#?E4oY-N+AE`DnP8Oivj=qoV+x8_DYS{ zv&%JKE0@F5XboWLj?!z!+*eVNZC}N2WBGS22ZwiFXCkRPZZA4TzKyN@Y#U0ezE|hh z0!3{L>+*s21pm)~y#^{K`M5#|?>6Rt{<~AHM*TNuZ2w|8)1%fr?CRt@EdPZZn>LS! z%vPi^OP5C4Rp41Us`Gh>zQQ8qI6XQD1x)AP?`%96FOdt1(N0bRz7_mGI1as$n?gnD zyM6ueC)11Mkv`#ikGy4=1gccQ+`vM%AAK*Th>o_q~kZNE>I)w&*X&b++8;?pB?s)zb=S)E5&Ie^)6Dg2?=d|k2skSf)m>hb_aix_(SpG}Y z|9s(KfHTD5Py-_h%YUVN&;?&|gL~xloT5~11CzwkP25U~VgCXeyDOF*wm9%)r>qUf z_cy2tpp?w_KbLEpXy~Sb-#~7&{U*NRa4fu^gLZw|5cOu_{YJZQ8jFm=EBHyZKTN2Q z)f{Q)4I^CCZ;cyw)FUH@dqpQn#5^x%wDm=M_Ta6T1Ml<)Q4 z$12jVA=j2LFUY~6F!T*AHS%e_XD&ZRl>gFSQQqRU`C)U>>!@BkxB-jluR5QZ8hv;q zhfMin4&Pi9b(s!*mkW$OtutW4+M&|JEOk95tUX_lM66_8TNFd_Jn51n&8}5?7ig50 zp~c~kAill8g-B)Z)rdj#?L-q0scR4&)((@TVC~uWbGAN2!(6@+@WB7zG(NYMNk}TX z<*r5Ng45HeZiif35PcW4Ci-LfuZtffZG5=4iR!nD=W+H{`~+AEw8RZg-`8&0;}~xf z2v^F+|H#M5OWErHx;|}51sTJ=0uGmDwySdB&G7O2mzzD=C$8+DoErF)Dyucyr;<^I zyME@y;(*Un1K%#1QfVpD*A)3d?;p+H{^~~htkF}g%AkMg=EJkuGQTBM_*BC3?-#us zYCKPQR)zbNJ4nduJ973Poe@ zM691*-+&bjB|0slsevEcxdQW-Kf4z2?o|~Qw>np78!QRZ@W|)MOPfBuxE9;Qe|oJn zt;F1kV}a{@M5frfg49$8#EE_E&yNH5;k^h!^f_Nar_s8)_gd_-anfA*l6=Fya1$su zR`GdtxoDPAHkO6jRxYsh3iCM=xpdR<@+2~)5AyHI$veXy)vvw8x6R7frt)wtT-Uwc z5>z4najdBJJQEqZahct>>cZy8Z8v+_V2w72grl{Bk;HA2Jzt;qN@-Zim$aw!8NYwz zrikUgtce6(LP;3guDXpVewTty;JD;=gx=8|GeUfE27ra7K9Uo330yJb<@@WADE}Tu zkiB|TOpc1>-%srCl&4wO`_97${2I~r0Ur^pJ@5X@=!}Cc3V{6k>kiESJb4)JcMl7B zNyX1kdd2(V$%PCC2dxjPf=dR6g@*h8N7b9hL%IL&<0IKq_A*K#vS+y^q%yXw*~gyU zSj$?`B8;76H&KS{TlNqwB5TbEsb~|~TSRH|d)}k-d4GTZoyR$y^Efke-_O_ex}Mjf zyMr4WOZaf%Ve^UY62AH1|0?6$;FJnm{ru&Z^4C*G&wYG5w~&o`gAW$}`DuA|4;K0_ zvfkB;8qi7G8$RG-l1j`{AW)7=X8KIrv2V~9!YQKt7X%K|ieT@Zs99*|TNQ_})*~V>*Zv9Uu*5|0 z9)-3;$bSa?4_UF6ZtL-oh39qj5U&+l|)QVRI%y(SI^ zycgrgY4RIM1WbcZ<$&>xyYQ7|;xyZW=d{WnR^z(bntYzS-v9Q4u{iF4uarU@2VKG0 z9ZFJ|h#ae}8rOV63z+#W;G5bdk2&#_U&-j|<%1fIp4)f6*0;YtTjfqUHZ= z+~11!f1Nx-rQh6tNf)8YVZw0m16&Clkf(s9H>V0?t!Vx)PWG~(Rl|UXS7JPT!)R3hL)hQfwn{X}+M;(agG<@pz6OM0hLC?KDXd={NY-re z8LKi+_?Q2Tq&g1u6Q5MSvVm_3_#auUR%y@D#bUx>s@`>dew_KZ9LaL}gUqD^PwS?% z2(bUEDziJRM?g-`OxS31c&i^Nx4A~A&xt9QDr4D#(ct9d(mH;HtLQ|tkMRC7i7oeA zn8ibHJxay=um2*CE_Dyh|K-p|hxzCK`o4r(V@|z3f<4G^=bz2WkM+UklY{nu-8;nz z`9FUjbO9)!>7NaA;(4WPSHXu6yJLJm0{qWn+=;X@^Q<mua;m}qC{Pvbm!9BOP*DRO@EJVwOpcgIX@j?|&uNb90LPxq!TbC0*AG1hsSm%y z5q0S!AGzHB5BT55r<3MOym^nDOQocuZO>u4B$MQU5LQmK|Kh2x#T;+j?*>O9+|*v$ z{%gBdFbmf$O%(t9;25rQm=!O)ZV7K_vVgK#zmk7ml>zwQ5k0fCmcUORntb_b{Ew@c z*{BQ`7WKa`azgC3=K}nrOMiZ?m`gI<&O2=Lm9 z^gj=c-)YhKA1x%W(~@4-uRD?r`o9Km3!g~k<;CP2x&6L}RXp@7^A)qL_t_<+Mxrqh zilBc6|JMl16%7_R)ON{7{%d=_$T~U~%_3`G&`RsSAPk=Y>( zC7Dh4_W=jVMOvC@KdalQ&nryR|2*}2rF`aX84&~dhZi_QYrNWl6p0ZuSv*c>y_GdX zSlxd&hp-!Q59VL~&!brB9LyCtB0~h$P!@yXH>2erMp|fSF5yzxKJK(0gczfVfW!$qy4O&gbv zwmtiuj2Adyz=cKoFFFvPoox=K;zBrX>)TG+b>2&6+18Ua)}@H$FptXDa(vUd@~*0y zd{xIL7Y!V@ZSbr?rUzLwc^N)owL=jZ@tN?oiK9&P`}M$? z{7!WyrtSL7n9*vf!yH{Hhm0ca73`}R1dpzhO>tOi#W@+%~I)&b|BDZvUtT~|K|Q{t{9VzoWYRe zr^L%=i01(l(XRAdK zo2i~YvuF?K|MC&a$HmG%mq0>4z1G&!dnOw)e{Qq_J;qS|vrbgmNR{o~ml{_11$55I zLe#PsUt933qXteS48dyG_mGbgQT;Q4XHxR)0w?f4r>Atd%df*G>Js?hcaOBZe^c?N z^$P|QgfVf!hzA<6M{F~Jy`V7e1c!zRY!5%*iepw&hPG!~{`11##xgTemZ-f)Yeqn- z#}u<$5b}SRF-HH(e~z(by+A80K~Dc3(a;pzKp_pZJ@;(~T)9|vW#Idcn}^P<|E0Q? zqHvw&{|Y)#Xfdf3Kc5Ag2{ivuTTez1!M9SD&{rK_>v(eXeX+5Fj1dfZ zQ|Q02Ya#Ghsy0p7U=&X8YG)vU`6{csBv8QWgc4op-xep z9lUgC|5qEe`E+TeX{8wbaUYDBET-&CQEZa%%SA9wX&Z4U;ZP+0L5ETNAO86&D2xIA z$KfZPs9}wL{IGM^RgrxswAQb@i-@DgP>M{!{}ulI)4RvnJ2q&GqYqC%m4aGf1Jl9t z7NRm9fKz?+*Y~G_mENFmdZv4oyE*9hkLhQgz@-=xaM1r1c&p66e1AxLCUY{Hf7q#I zp${l96Xk!*4;0(XhIZgI8^ukvGK{~NJ+2&+(jpY<=U2p~l~h_wnMMNmdtU1(H1x8# z#z8Hl330%6H7}Jr%ng&(ikZRxPB2au$gZT}ACWZ6n!No8s{ct6vjiGe81kR_jl@k} z-4DP<#$sV>j5Dwn64JbO{L114Y+h3KD0#b0FmKU5L8O;g#f%TNe~%L6$1oo=0p&XM zU+m2Wbr-KBz9KL6vq(SW--g~UlV@O#_(XV^Nb`RI{&{x&Bn=OKp8Cat95BiP8}sa& zt2&%&5m065#`Hw+-sM$mhcs?USXZrEUu^h(^|dI1@;_y!yy(c#!Igb&sk|24(R>Yr2_?hf}6(!A_$*iwb%>ZmAU(}WvF zDkt$FO~zy)yR6}w;5o_=?%9~%D|7J21xEKQJKVkcC^MbdgZjUyM_5}}htU3uy;7U>8X3lex?nD2@_HEv< zGD$jI{=mFimw^Q%b~=vp1UnDBHIGcpKg8r8kso=4ar%koKbP5jb`HR8i2(eAcOtv( z;1yuA9>F}=0%j!0f8Lui1Ni4({$UV`0wDsNeQnX}4$qIs8MSlge*XPZw8~-+_s#iS z+|=KWaNFPC>RvJef-odN{^6&+PQsQIyF$!~jA;KU_t7*Ze|tC=QT=m?g=}!cKJu3z zob1ImcSpVq2eTOIXXdz!dVk#RA!DQ{1RVCPnemAXWc9-60OY)Mcz3r&GfH6f8(u0| z;uE>B&8xPV^+kkNN2?#I@@))|Vc)cue>Ip0GKk1$1Tm3iG@oNy~ zD$>u(hk^X(WJbt;-t@m8{sarG3mWug?AYmnJ~yfvqtUlnXy9si+z+4um2pJHFmqxw zDH*y!;Ml$dVcuvsEf(}YMJ$@py}ACCW+93H@(;J$!}d-b0sog)&@7Y+mQt+m#_3SY zgXcp#%%a8IPUx40wFW(|w30qSux;}#XXYx-jf4GHdAZfWvq#R44c}r;rsy}lY%(uh z&J;!rsTR0w*-4l|6}HugtdA&S-yW!LS;IvF{SQf0O444Emj67^yun#!Oqd^IVA%eO z9dm&$&V+mG@i=yk#jRJ;hEno*r51xR7&QMmea8eO9~@`r#;Bh4&;YTVx>Ck|a-?g2 zrZ~#~B#SjKzCioGlGV=o!OL<>W9l&ka{0~|1WchvE`g-{IQrA6j-a)le~LWe96>N` z^s`sQF67An`J1bJ3PO9q|6X*S?(KasagDZOz)|4;`b`zza;GBijy~G|6~}l@I(Qa< zCby|@vCaHQeq^*xkdl#O$eEe zb&jVtbZKPNELumR&>Wq+M4fpHt&Mt0@_;A{;2(lpe0sDF+JEs_f4+ou2=LEjjlzvRbUq;q3Vqr>n#^!A@Hq1UD)@P_I@{v^;27?Ek5yCe#_X}9wCzVS8R^*cnhp!tV%^qAwNqh|t=`ZU4 z5;vhr2e*ooP493RRRzxjJXPczDz?7+vMEs;`tNiRlJ^VAL-p1&MhB$4%v!%%R=e~h z?NK{ag_kh|wrAL)()y`mEoG}^aD@Zap_H<}1Ceq-Us0`0eTkiI0mT$I^$>kSSsMb{ ze;Qg<;eU;C3;MrgNLp#CL>#xIMd9hODkWju*84^y#~#0gvtowX|KSb&dn8Me<%%)fus_HP4_FsDS2K{%oB%IoCEGPOrPh zLbxbKYE`h46q(<^YV1xxD=bKmn(AG0t`GOIazlcG9O3KyDCp88DR|F&BE!Z{{CX+eUM2WjbU3qCvM8RA#=5>uCSQ zYn75kFnrzoGWF-@7s%JXdGzA?=g9_qZ5VV@fbWFJlDwIJxcB5yquhgbx4{BiY(Ptq z(a@>{w6if^*8oLI>JgHdZTqcL3ld;Z>HQ4-1bLaD{{cnNaSnftI}Gx2(3gwmKWhb7 zPRMA({>v?1hp!DdjKNzhUfFF9nUqB>`f@8c-Vw4;MY)yytqydj3W_b;X5V#e%wUD( zsZQS)6+~7}F{93uWc8WKEHNa*g%@5-xLE{9eDQv4;2^C{pxBc#WzjcuE1OjYP$}jB z8C{R~M<(diTYy2x2^^=e|I!hWfO7=Re@fuPxOPDV zh9YpBk&(CHh>f!+v?$Z`Kb;qdH@34CTEMq>HJKgstoJeZLI2F75A#1_&yg_D$u@ZI zfQRb)^1Ej=h3lWcOCO)!+k5=tkL9@r-9%oTRf!=`UCiu{;2^solwHl=8ysYcYXBR1iD50Rymt|;%_et zE0=f)VgD@{`Du&3RLomtPGLd}_kk2~I4V*6)0t;pdiu?A;?HJ3fufUn9}4CKd%z0y z{&C{Y!V-EOA}+K$t4`b_q`dPC~i87(q->suDk|7;;R6>f=O6#YEe^?k=) z_4u{Fb5V~|_JB&k8e-Buy-^QaU7q^|=?40l62HDYE8X!%J$!YZsz_w?g#Fj~LWvbo zwycBSmRDh=>i}}tzpJjw`?q4C|I}C%x0Su}^3NZCRuWi@tm|Bpy`zZvStXui{TLe~ZL{dq>}aYwydOS5?`cXa$9Y#GukTBkI`pu94ihm$!LUOdl?@gzT00W1> zAs^UtsvOo~fc$5=h}%I^G5M^U{TEZ(d`i1^m)C#nzq;2lXXG5kg+3xvny&HomaP!C z|MSP{uUN%W<9ujthLduy`nkoo5K@)Grxv#H<3$7XRUNzZ^V=6xH6aD~C+rVvvk&Ru zkH4!$x>xzYhPFt>((l`-nP?3PFKA7Sfz90F?4N~LG1z}yafo}d)!m#K^v`AcBE{<6 z`Vvq%70Uk*^vRlMS(9g}8SpJKuq4kL)V*w~guf^>??~WnfK5p;zAIIT&BGvF*ob8E z!Yt;9N}yY_iPx~jz|MNNk!#(Bx5}T_x%POQwpk1~$e-;nhW(cpe2I4@$>%pD9nhzJ zyyY_M*B!s7B5s2>|-!{n=9W>nugG)rClOP%6bzIhb*pL?Oz`a`}f(EqNJ zZr9>(GyMpZJdppa-%7Cr{zs+QVLsFp$Gyj>I&{eqVgg}IF4yk!s#n^^U++{Z-cj$) zmaDL~G%01rciBeaRI^DpF3a%pZR-q2IwWQ>=b(OqF*+|Nr4YitFW#m8J4F!!$B=%u z|4z{VQ1_A0M-m|fPGgn5B9k~UhceB1w_uD&lJ?QS|8NggoJpWy*d>E!DLMt6SLnYF z*_NJsd*6BLTRab`0*!zw;JwePrhKF98l96g+W(%iWTMTQj#au~5?`pbm7YcSgfylA zf0_%cw`MX2N);Oab0|riTMgVH4~lea1*gHH`DfvAEi9LA919-<4ddMKEt@|E)jv-i zg9%FQD!pozBc!$b=hx#;H_CU)+8gG;;Dz@9v+v~g0w&VTlXrhyv*#y{T|o6emmJKL zEh`Ez%m)QAfPV%IxDFWekk*#pX!aAAd7%G7VoARRj_|*!m@)6?%cDZtqy*6_V!T>P?pWQcmoB2pI#)F2ezX7m z@EHqb)5%DTbuR~o6w9%U>|YHYep$iv~&ZB78~Ka$vNOW#b#Ps;}$<+gi-F7q{WW=3TI? z;=>}b#!XYBvV5eLjNxKUJZrK866Z$NJiwM*6;Rp}&%Xs6{0tb#|Jer7W2Wz{$hm0# z&p>>Y-!F)6{b*8jl;|?ToILleBp%|)_GzW=Gr5)IgeidjuR8&3??i6lGStJ$;viE3 zLln0-gt30I`vC)=sV(NXX2(DOyDSvvB5%8*HawxFR6XqT_%!U^;p_bQ7ba)zjn03Q z^WNo%jjgEv>#chD#w^)+oFb%Y$=Pv*&V*XX3j9yuHH(_vqGmL3fWmE6m={RcJGw3B}*BL2q*E3il=Q7lI$W_Pi zMZNoAmJC~rVQ#%!rwfB-%;I>ZL0!VCq}2i(Om#1pyzwkUZ}4-&OJ>nMa@Z5Il<7hX zuZb!9)&w3-bFChFRR6;$=VsVqNzg+3FOVX?a!#`IDQO3QV4Lq@O^Rg+4HA@$HNwTCx1O&-Up=m@{9It0NxiC#p+RQbI*m1L;lgR zGa|jI1dfn}5kKBt7)gAj#?)6)tj)9=8P)$32h2*6=jK){+TcRL(a{L67(R|;RMM)7 zuLR$jD%7yrZ&hDa=eus(v*;sj_rQ0ZYi6;k!Q)|^l1NV#|Afb2j*P1Z-bT(nEOzuO z5uP&Z)Uba~S;o1g_m&25*G>&BHh{<0W3ZG$4lgdtsG`(AiF;wOJjRMa`@gJYitHp2 z=>MwmQyXsa8>4()ruAPq$3RL66Ke*qGDZ&i-_Nfw=~sMy5#%1lj1edtKm0&zYs6=a zY=#rYiJn!2K*K-kzhVDXr15Bew_51uS!yMIM*&!ae}K-t_RTRO+s?Ix$0t$lZ1F`U z@XMWHYx6G_p!~ZRG#_)7o>%#>nNl`|@gsgfw?hgB^nWqI{)>799dB6hfA!-Iuo;;Z zXAT!%Co)Rja#(uVjoVrdD6nZ?2SgNFS6lb>GBy!W>JNJ%NKY;_tK(6vx@W1bZ^#iQsr(@z+q2LGl zFOK}+w*0mj&*dJSEU9z?)i6Q{)yf?G{Rqa(u>Yd78J0i+l9YUK{Yo`l*c@Xvz}W+3Y|A^B9Sq zKC*}&BLH>u_g`O$k7H2#FaL0Q=qA$+-BbZ&iPnF5>9@%^j;!Iz&Vxns4{7|5&V&S~I^`W)&)GhFs)@>Pw&F@J$! z!>&1rzrkZDPn}JY$F>pezaXw{!!~+8&C~Gf@tsGX1@;vs-%5CdU=)|I3>DknJ-3AN zN5#SZLge_w7%o-#7Tyje;>QjLp#JyY__P+gP5*m9A$)ixoXB|v)cnD$SKPTuUaXtg7}?uxA?Ou)r)`Aux2)TAu(mrl3>mVV?gJ!u-#{&`d>rYjBFY_ zsf8immR>dMY~mk(hdV*ZC@`V-c?lOgH<#?35oKVr9qr6i_~lbzE2ebYjvfBr!S$&3|^!lp@BQ zUzsU|m5E~h&+_0oNiy|sg+se_+&(xv7e^&rAIFShv!rts+y}DceZ~fD{-7Tjb{8ZB z@J}(z?!$5#+V`EP+ngE$(zhW1}r5U$F}Iv!~)(q-jV53dow5&^Ri zI?jB5)X{DBf1&;FFK!27)-|5d`Y)z?s>fK^u_gtSMd*L8J!LwnAEIeK{`p;3+N1cW z=$~e;g4KY7G$4m2pPg(&>%n)2Epg8Tha`Z3 zCzDU@H7XC7;vv67`%jU=wsM^7XV5>hm|y$_YD60UQ^kDk$-ct`%8xHmDE<-Fy>y|A zar@q*;bT?rM6_9XgiRiClOW?qRhXt zjST(o3wSu{$Pyrwk`At=+;(U4!nfHFW~{uQ*NlmtXrJH>59x<9t1DI1padYmBRJv0 z8QE7(uaO(99DTdCkgToi#RR)c}a`V|Mg~>0CogIPdZKi^EF^gtHz3s z$sW}IUVHl9C4r;ipYQfHFgXbxJ^y*OZ%Svo0J7Qt<)RhOz30i7XaD+7zx8tRfPv+> zjV3MsP|_g8V45BEe|aWI0@^XoSK&<7EM+#L6u=~}M0tDLrI%it&vwQIel-s^8wGc} z+>nr&MH_1ZwO3zJU>~3I?ELVVh}lxfeR9BG16mo)=dfBFv#H)yL--Z z0~a-ufgWQPI_4~U+VIeSp(ElwaafCRBKuIXj)jUT;GfVxFINlw-kAN<|Fru(W%&jU z|HD@%3gj>CJ|8&U|KdmAGqIC`NB!0oztDe7zOF{NgZ7`I{a+6T$~Bq5{~q`o6BfmI ziRORL-oXm8|GZ}Mmw)+(toEZuqcZ6JEBIWcy#&G|Wp;$<&qqoY;9Sy2rz}DmX{sc~ zq_t;~T=>>&plP>`+P$qk zjeq@LQlBi?*-55vILNIj(Eqhq-UcVO81!FEb!VDKi*@|`)<6Mdwf8R!aB*_$smeXI z`ULM*dh4nzF&N+sn}^n5j#ALf;tNFrRa!rt??)o-5gJ70!wFSP%?^Wv9Ivf@W}fn(>^7C|w=z8lRyjG^tn(*8Um zN9nA1Ad&M0bbD;SLH>_+?89beqq5UU6mTCHL7JKP--~A_3`{Oma?a!UjZtXF} zrSE1hw4d`PUvVsVXt#|MZwmS-Yktu6aU{EDiQ#Vrz$Pl;C?p|JZ|NoOJk@!@xmk|< z;OepTgI>eYbV&!K1s|Lnj(m~8;768oetJR4M<;1lgHyk+P^Hpfc8NF6gV=$zE_ckb(jKyWG2n zSNLt{iDDxyrUZ2VrBYvEH)%!gt2*B>V+J=r78%vXTlTO8(Myg2wL>xGe$8l2z8F#q zGpx8Thc?5^OOp5lJfGZ9k79(V8?P@nG_UU5#w%$QM{#<3!@<17kR->##wV2}W=N1_ zNEDOC$`hXYs)&>%y-=`2KRj`pS*^=5W(>uAwV^cFVhq-R9lX=H*zHE(|8h$nAkzHr z1VP|7W8Czh|2^>MnhY-%`Y#~=`I&p3mp|nZ%Lhgy@PA!lo41KZXUB}>{hl5%>Q4BsOPzGe2;`LB8|JfCy zKjB$;aD!81W{|JJ{XqtU!3^7rO55(R)kur-J++}Zl!EeNV?bzc;CF_PdVU2yYC16n z^gjeGzzgX9>-&P#OHhxB(EML$|JQ@4q%G%hng>!3AhiB3$Ultpd;6jFS6W?GV*s~_Sm z0mGFr`cu-O8d8G@X3v42Gha>^9d?7OhI4P4g!z8}JZeoktJ;txtDzcQa+{X_Bby}Y z2*a$9g?r5~N74B~4#P_3Hk;c{%`W^4mWJFVj>|9e!8MZ z67-)I*;oQ82>Kr>-PiH2n*9ic2DJXu*?3_bQds$#Z>;FRY53ly@ z*oXXBDI$xMZO20XGmZbzxKaStU^f)lq5gMq`AlWY7PSB3+EjI5CglI9nDMol6D%qw zGX43mIkhig%$8)eyVQB}s~rLfrMpHhyWh-WZZ%x6q}^ZA@(&fei?Anc!Ms_}9Z35z zn=^z24h?3Dxi%$u;Vgg)u_jOk0d(P9Mo_Aum6ILw;L9qw#X^UjrBksfFk(*NYNEQ8FjYZj^uJ!%gZ1H z$8vLJHg;p}Rti~jYNfQueoB&z;-5mhmfd8p`EL> zrB9YW$04-;bPELm{d1wF?wih#|N1Wq$%;eiyOpmT>C>R$pa1&bb7iI<(M7}l3-O1n z^%ZUc_`l-+X3%?st0}pYh+9GXzfABU?ICPNR@HV5`dnuLCcysd1WtLo(aXl#;bP5| zHH-GpI!az3esNUF_3;HEgAzl21&^qU61bpU0h|3_j(K|4NBRI5a)YPpHUI{B&0^_X z8MkJaJqwoGhNR9eg*xEO_43a!?LT5Ipa4haUzOnj{s;1( z?^Ik@wj@Z}-nBN(xMN394O_Hkos>rQ>pcNVKe^v#3Ep{gSR5I3B<&H1*~IoTK74G+ z|MH)C%B#;APU$7CC4o@)i7%K+403^|PRUp0tK5Y(TAo;X7#)+Q8Ui=k^J;7xFf)P| z^iiIK)N$cgf@BM8GV1@@;s0m}vJlYx=WQwqShHu8!CXG3%tKbPSRwysq}UvOsN}V!mnX^LM6nA$ zZ00=3DyJ$P)U*i`t7SwhYl?{j^v~2r7WgxOfByVsxO)}| zzjF9s>8Srb!TkI}GwYCBf93%^e6S&pC)eFkX%T|eYK5z4dK=uv2$VZgV^ZmLu3a(c z6A1j;P~`vhzwc%8m%4$>PyxeEC+(8===7B^SYv%r>Dmk{puh3I2D;e|`{2zy|#4spceY_P@(NY19salE$KdfBmQBoW_EP z*vZZO!z@|q2><(ZEraYZ4O zRyMQ7WR^r7(;B#Y6_~r@SmN+~l(*D7P^2NMYoEe-&cPG5C+OIaWo`_cZd&!!VFDT*$r ztN{O0L~b|%g{ox0Kj8mD`JWQw$2MsCpTj0^ z%TC8rE=)z!`Y!~s=4SxoK>o9BmBsP~ANJ~A^(QzytC1d(5C}sah%F!w$kY!tKqo_e zz2+flxL^%LDm{yiq4mEDn?;hKO zeAu%c6JEF|oqTkq`yn5l9rrFvp>i|OKgSwmrbovFfc}}k@@J0`*T4REe&YjRs2|c$ zBn5BwzbD5S-#8RVd6cETiGMz(u=XIR_QuF6;57i{joa;V5lMppLn)6iQXXw=q7FrD;2uZjA< z#xlWHq1k?D$!1oc{DOw5O92JK!cqp6HdI;;0H%iBi7HW_yU>78@AKwKe*5inZ>Pu` zb#5%l>nm~Z%TL{8Ne16@v0fq{C>LFxILg~W8;U|iZ|eG2r7CM_WI3$$Su|rZZNQKZ zqXULtDGQ5UPE&<6kHR;-ng6`VvkU_Q^v}0!8pM>$ZefSkR}OGu3p9wp|BTF z-o!t}#>GE*z))t>)X(yb=^y`-sIy(%Ck}U!jo5?szfbDShgJ$;q5tB2wFR)+;D3J# zPwS8~l0hkk{~seDcD>2*)9|#~_9VS1+zK=~t^Zxd*7~m1m-jZ_s|-UvF_jH&2mSLs?+IyS zZnQQV;3O^oFt%250#y3nK7FL|KU)}N+xTE9s`_lcz-#ui5RLx{jw*#4G3x(<{x9Hv z)W$fWZ=ydVfRRXwLh}!y|6*%XHMQ_`9_2I2|D4o&eK$nY^_#d^Q6Ws6sc+@a&>yor z0lgrRP@&2H`(xS?HvRARc3<8%B|NX-CEX2MacsKl+{{_I$_E+KSfc}c)X#Qz}` z@_)epH5GW&v`Ep@wCpgU#JrZ~|9VMn?9*go|MB_#e;D6UJCD)4@rpqAUf9L)0H*6S%4TXDOq)>yBZ%!RPZlZMoUPCeX z3CRHlUORyxn;jccs^P(Qgnvgd66k-TSeUl+F54YI68V)kLVu6Ti+JCp+|P|+MD@=q z(0|IWn0Mq1<$w7xeFNA$d`mlS1+4HholY0ON*(O*x6ti%PA^+Qd5xWzgOOi!_|4~R{ zlD~2LTsG*R&vBCA&JWeTGtyRyF8vvzl|<>W+b%tbDQMnd#SB36I#JA1G#I|b_4Q}e zI{|T&UFu=MmaS$4lIlg)Ca((r31RZ4|I7AcbfMyb5k%`>{{{4)zIpp#h4KIT-_iW% zvva@UwoAi50*CIxg2m+3u1)^uaYY+w*lGHoeevzn%%J}vIFi$eVoF$T^qh}k9Igpn zLXjpqRC{9B8>Sa5#o(?^>%Rze{$ksm=vkgAii}hi#mB0v2_xYD%HI49^HelPrLE=M z`y7SBLk6?z3`!?jL0L3Z6*})iri)`VqLHo|ac4&F*hBx?IhG_j?7yG>gNTwTr zJ%RsOu&OFXKOw4rCU8u09bf_dGr<^ENIEqWHbBlmoD7O&kr4z zNA$AL^s*el(mE>aCSTcpaqNy6q3YDV&I+IaCp#`PT@~YLIa2`v0XNb9uX%o(qOrJ< zCT+svo5wd#q5WS=^uEs+*&jqg|CeFe8Q6dM|E6e&1PEhQF8>|W*u+0-DL;F}sV&9? zW57ROKNR^-`b*$Mg2arFmPf#Ug8vJvKiflQ!3JBY1`|?1FteQ#dv-Z2Um1tcgaA z_lP1XL1az1U%B@;^khQ+>B@}Vv=;C`VTcz}(H4*x{jdLGJ5k1>=;R3H?UQO+|2z2K z0snk?i+&!!KUn1}@Ofzc7h{#WKgU>xfN_BQ!-Il%TfU}I!2czfTm{$g`&X|ra>A!r zesdG#jY8mmNA*8EB**0hF9i?u@U6w;&Ov^dJvA)+iSKXj?$w52LU-l(>T8gs$^tIi{HM8>{Ci>80h zso9ic?IZMYn|^2#-?y#UlAN>w^b4z-TcEXs1Dzq1o&lJ zZoSh$h7wy$4B(&Vblt zNaKGdfN2&+m{AJ}+=2lAWTCepH2=9!kGBR$BDDWqFK~aUcj5=@A~7bK|EoVw57ws2 z=f>Y^bTz>Ki37fD1ww9s7Uv(3hMH|)PmZ-91uXdrtIhN5``sux3Ar^@?s zKiO&523i1CUZrdkJ91}228riA=#ivjaR=n~hP3?WfxHSC+)DgQ=zjF=j(~q6)j-<^_r5Q{{sB) zk6{SG@&o!WuIh{()&>2Kt~rZ?tTy38(TmKQEC{3nEm&zh*M0QWlJtOugfn_?ElJ8b zs7;{tzeE1>&Vww8A5GyHdsx7U($b3VlIiRMGk~=mGoXpAg^isAnuL7ZR1hD$v@u*V zO0xJ}h<>)DObaW!MOiinx!ju*xFXd92NI4RgT=2Kj8s)bC(oZ1GC;MTI_v?^9QruIGoBE}UrHM0LcMs7v${IyL&f^ojTc!9av+F?2 zB?|oX3k8=)2$|OZ4*eIU&v`ywJc81dQ62;Q@-BmLljYmyBzLMghPRR^o~3pcE{-q2 zAS#M*dy-!f8AY%=Wx@8S_(1!_PUnZ;&R76S$&`FI8l<H&J(>|PXVLHv=zne;u|6{Q<(76hW3siPC-6u# zjgfP682o;0$!e>BVNIW>pvu&JOYn{uU4r(O4_Kz#Oc7VMGVw_qppU>|G{^sh$DL&0 zeHWamX%Y-F8%%q310_q!WXnW3EtYh z_s}v1knal~3H$N6E*w&4WvncW0RQm%2psXFoLlXmF}|)$pxk{B#f0|1ziPY4ro2&O z-yHb+henk0L6XK9%8x%Z|2w%;3AY0EGid*X$K47$Fh4{7VbyFFWK5`r>#VRk$Rf9> z$5E*t7Kwuw;p}`GB(ga2E5=B-D7g3L$cu~_jyG86Aphxqt{2tu9=w5)*^r?$2#fZ? zNCxw)!JYN^t_1#P(Y?SsW>EH-J*T}{l4MjA315&x68&M205_63+ikoG103B?8Y;SuCDGOx9brVh zXW-}beFOgY@5`@pqP5fc{!Ma`o+K2&r>$c{=W}XZ2#&2mc(_x-6=ev={g-Bb$UAn> zBTpK$G}(SEXn+Cp6JtU7m3{N|R?I;+%*jXEsZz>Z~1oMkx z*-0fvU*2kL`rm6^hANsW_8t$SM6j!sZ6|^LxzMQc5bQkSbS}C^)B4{(X3+>{uz?m{ zL5%r8|IGFgw0i8WNX9UrbXZ!4E}b(S{_w=1*1RW0h%FybhQ}a@e2KQ$dsxW8HY1fT zhJgPa#XlrNN+GOtm-y>EhH`{0@Ogal7Z}MvvG{ki=y3`F&)@S8iq3wz*%7w7xcZA}hhWF$weh-k_%q;tzxqU@ zA)|aZBvxu^uQ2WqL;(Mw{imS+Y3iW^V%Met7>iB+`)TT}mt*3?fIUCYjJEGQz5n#I z6`Au$&h5AMQ(R=zySo6}xAKs$C3)ac#bLqu(V9t#6JW~=A060L|Nq|((YAoY@2AI> z2F9d}Q2&?g?sV9WAz(W&$dR!l$$D{#NxxJiJRzxwGy^y94!pm>+Prn03rT!6@S`y# zt~QO7YevT$MFm?Qp4JioZqxFg#c#(}1!~xf+n3?Gx9;Q?8~>#X`>%vQf4!jpqDVLS z4@{cJwfpZ~694tbZvE;_gG+y0)_&y8ZL8JNe?BO>1o$6x@sa2=G6f{qL$i^_;*)Qwrd%`KSL$jC>j6G16i})Bl7>3ZI_#7sXAjN?D^fHk|94 zG|6n1qJf*@8v;KkLe>ZZwVQ2%W2=BO$HWG{tir2F~#NfaOIS zz+;6)8sTf`F|;uNuTF_CT#V|FJy3t0CVX}o)HSeFRRvcQ$4ww@LH}crJOEz(Z?hCp zSoqAK`9CkHMV<%IU%n@OqZA~~mo`BE{N~2qV;8}S<;okw{P(wJOxbMhmVFqs{{lG& z{qLC5pWj}C{!`HZNLV$V<1Bil68*N|%MW?Tp%Br_LZJVm*YwRUA-LYr@}K+dEuX-y z0`h;f%)McE4*eHHa82OReu7%{AU!g}Q+p`Jm7>R3I|0 z1D6Fy8vaR^GO|TF!fvjHi~;xJ-k-s4kmZ{rcB&_#djrQkkl${b4ca!EZRR=+nFY_{ zr##5=7k@U22eeQcL;qk8VJ+W*>Mp2{M<(>!Tk@AB0ZuSsyrn|zk>ghwlapQ9S5F-? zCzR3lUsvT`b=~>=cd~mcFQzw}w*SHq#wG&()Bm(w-n(%D4qE6xm4l7@RyORBI60vQ zC*B+e0M4HLD4qu*r9V_pNFuw{!aj)m?@eyEriw^CtU=UAGu zz6~5|=)#iUZ)WgCgD0g>B}CH5_AcZ<+g3};_%v4A-8>&MR)y-qGb9XRw%Jr$KTc?U zwzYgCn+8jB0hUT57dVFinF0UP3OnYg!vQX)pw&3PO{LxqsdyNo8KnaLuRJBGhZ%ht z5>{292u-H#zq*oywx}KAw#Om4lpm;IJKU{Ja^oqeLgC;=0M znn+72Ba&9qW0lUmssGKwBdYZ4s5(Z}Kwp#pS-mhhRsvkr|_$Mqzega3WJuCTuvmwu>vr>tY|T^aKp zLo9sw0L^ro{%8G(&3>jKr+@ul0q^9T!wTl8UEOZ|>2q&wRv2Obh5Elx{m;{g_XQgH zs!#Tz{x8t~H2VOXM*Mk*b;$p9$Hf&{%NqE3LI12PB7ui%Ro*e(71l{vEkczD34RQ) zYavZOu=Xl;>`v&Y4*_EX9EYOj&A$AK|5F5k{y9U;#OZD%L$Q$IF28p;nD!dMW4^-Z z?<{0+Xfdb6_Hhz#QFPhn&wQSy3z3C%gjmq^k(4BoHwK8P{->~C4<}+#mbFEv&aJP| z9jetF?p|0o6JsYq{Tmw8E${{!zc zYI12nZ@Xfss})%Xefo1b8v8^Oh`_%_6En__)fx=csE0Q$zb31*?d{}0?=g@iu|fnt zgLP>VuM}V;X&|qhaY6Cfs*V|$Le6vIJJAKyenW8|Ao9`vcS}_NW6SD0_t1S~fKSHe zHV=mi7aO09-JNsmxEObUX2S*7RMKa}eL?fOu$lji>VGn5`!BK8ITj9V>B)E7Y@=f! z{}Av`vE+eKWu5Y7J)B~euT6s+K68bSrvIUyz@B9@a%c}ZNk%ijP626)E-la%8(e$A zSzl*My0Axc2G!^GWysj|RM~iX-n^+Ebx73~-G61rG-F?aF~LbWeaIJ1sOAeonlo%^ zK(TsD6pp~;!oz*WtQ4gZpPnRZerGibSwICM4f)~ULD3eyv$i;*{7faqu3ihc$%78f zga>93m;P)4mXlgfgiybK_{SU))hn+3>%S1u&pS-rma9Ne=+x+f{O3*mkJ+ug#|UN> z_6R^=$o~QUhuh=U>HJ+6r)c`;;}>=2t>B?5I*~5ep_IZ!Dm4K@3C?Hee-~dedyM|P zAc}d#5D&7fZkG;lgymK{0V#65J;|n_{mG5FNw(_KjrSrpaEUs*faIYRs0a1j$(`~5 zRjxVTARn-D;vEVeRlvd30znLz`j*4|h$*hi7TwcukaN`qs8s=wNA!Bg4hZ7=QJxZS z{dlpal^kRn!SZL~NReu>&`P%fJlc1BscFT>=@ilYpPf(3u~?OH9rFL_f3i=O9H*21 z@ul~CR0;UM{?q?J{_~cEsGfIS+EMcbWNWIhwob^0f_LN8DP1I~`HKFdIT&W3e|F0q zhGF&^{O>kF9>oD5W_kC58x;qu&%ytd zqa?C0_(HMK>!yb5tL7!eI|2Daeq%{5CgISMVYBjqbGjDcRkT{a?_he<2Z-dl@-3y*t zJ6h6JWF*)89iO7olpe>O3+(uH{vKU~c|Fruy1! zCq3#;=>J)>$X(40`1te24STw!t5?i~{><>L!?6E)VQ`*fD~n*I6yGKDpZ^E=?-zc7 zpL^XQY$rQ4^XJ$%EQ-R`f9BKg#inN@T~KEGpI6DH)rHj&+5R8tNDjH=N8`M<)L1$k z><{zX(!ldq_5ubfY1@xObLB-?AZLbrQTmYJ`D-2>9>M}~*k9pM_75@0DuDm_(wF1x zV8W9GeXlmbvNe}G(+9(`Xz-Sjq~)5DAH;IpXNKpZ;9k&zdjY&XU)iSK+Jl&>7fR>) z;AHLuOQG-cf1D_%mg#ua5_N;5pzLAG9p+-_Z>cpotGI76uLJ+$h8^9|(JKajfZomS zfBv~}j$_*hGxQIH%LZ!#p5Kgk0k^>IqrI?2!Tdkb@y(L_w&4FcuL&U^$3j?Ux`6-Z z_7|t|TZz=qD~^BvRMIvM8c{?zxtf(99fRMH;|>RV_%B`R5Xf<)a_YKWk1Vl+xBc{h zrWfoQoF@c4b^VFw)oWrk&aU#6y&zg6amu46?FnRSE!orCadqdjSDtD5k@q^j-}M-> zU38EVpJT_ntRrP_A&Krp217-3jB2?HjXlT!|H~7W4nKbOZ;I5ZV`b|(!!WRF`>l|{ zQEvABtNVz0BX{zvyT?sqspd&rl*sJMqcQc#_Bd@xNgHIq@6(&*64VQ_TOf z?N;v%2RhdO{Ni9K{P?2LIJW=ipy|9DwgrUw$EuKyEmw2aTNT202vWlyRtq-Uz-Z1Q zCBa(&qTAag*2|q@2Sk5_k88ph*B*7Z`o`(=fB(R&#@T*lkP-!^7#Ku+ba%kuCG%C| znXX@Z;5K+&c&xIf!`b$^4yMQYmpLEi@3JAMhGA=~iOpolMNnfb-#xjQ6nykpjAbcH zhx^Vj{FkX7yO;HeYk;@|`cI)$e$x;}h1hoY#FuruAwLiNizWY05YYRY+XUJA&jYsq z`S@F2$iM66m_~w-lI*x=FtB0uE6ev%6?|6Ak= z86ozfvI#09wGY0>8c|f;){kbf=y3k?(6+e-UqLYEYkDK>8j7=JBK@=1dNhCsu^u!{E zu;39_qkZWtdAlo8t1oXICFLh-E$@c+zg>RE>9`)9gLk0tpm5R^>xIeB?}HIw`*jke zH^HUvb&dSPrZ~s|68cMGbC-`!6_Bzbm)K1 zPFsWvF4sVmk7oOy9Wnmbn}5Ncef+DoOiMy-V|Ku}6FQgRc^s zoVEPw_BuupD?Ii=3@tbCT_v2*AKGI+QrrFHb5J5|#Ru~bU4v~g0{@RYBn#q>|Ipcr zeej_{gyYkjolWVx1kUN~XdhE^20o?EBD$Ws)c?aPuCcY9<$b|8m}Q+i5GQ<&!*7IX z*5WVpfBxqTxg5-{_f230{YjS-(~H6XvpZ?mBlqf- zj;Q=*q9%OsTeh3ss;$T#hjwI_(Q)iIGZjFUfU{ul(gy|j7nSzOkakRl{QKQ3^OoC( zKau@AGZoB#v)}gKO-{^){aT*Y^ucNs0m(IN{}0Ch8fn?LxG&?OP|oLfwohPlcEp72A_edCrk&rvR#o!H@qA#-iv*|0DDfzf1MFboOEmO zf>v+8!LZ8(o^XcC_Z08Cymd0njM^)X{rJ9%RLpX+WoG!xx>(+>itP`Ip7k6^bkg=} zN!|N$-Q#DPwmv)h`Qx+(2mM-t`g>?ouJE3V8!U*Kbb*mE98`&7%w^!4uOL{)mM65f z8w+gHv4;NVv$=b=s<8WiTxKNM{vY9sc=1JE8k`!;hy=GT*#<`7i%&&jb3zRDU6)Ai9pR`KR`# zuue~}zfA4L@?S9j$dWp}8eB2|bGt)NJ&0%yR}$i!bTDmm3hXWHXI}m2lq>8ApnfZQ z3VzR;Jr8abS!WcxUpubw0sKGJJ~zpt7PK;W!fo~jIAN*o(isNpVg{}kSp=QUhRLwTmdu#*#S6jy zOU=0w%YSto9bryQ{}9gA;=XfS!5QlLRxi4pW$;D6gK!odHo4Jp!m{LBgY{-g?}Tzj zn&ZX6|8ry(i+2Rf$acZ${XhRt*}aW6U$n(?F#nI-HTI?u^q;5qU_Y73VqHrq?tvdK zn?IF;m%qH_ecuBZPV$Sl%?#gI&kfRr`;8=5vKzbM(J;&4?x%o%L6;xLHb^dIj-p-C;9P|LSN9bSHGW&@ z|LKUs9IhH<=s)~#|JC#5Ef4VD_s)8&&wc|~x}Y5e=_))tqq=Hq`(o)E#m7SH;S_Zp zj1)4)VFT^3|6dceiJqLAF?dIYA;I`WaQFJqxsjS;jt|n1&@cy$3_iWaJ z6(^w=5EVfZ>*Hp#p*xI_Z*-#~yA?|7Jo>P@5qIEU7(dwb5^i#vv&ON)ushW9sxDH- zoCxGSL$v*mVB&=S=ieP`B@BYg!2gp(X7k_Ifp%rP68IO@9W54*&T4O|M&J3rR>Z65 zBg!n2zq$o2lSG8bor_%JV(2~0;(iJIiZmC3f|#mc4#vN*RC*`^t6}N&!g{>IIsfrb zOZHu~jF5>{#qwWPo3NLBPX86Jx;$*iAFSPd2|hp1RLFO_Njb0-x(077sd(Zk&>rrR z&8VC!oUd#Fv3_lw$+53Hhf{<;@2YrK0ih5|PhUm$PiU5_w>pr7-R+POvsY)kM%ny# zr2~`xz(3V}Qx;myGLDfFO)m7RBjh(_L;ug|vii_Xv6k%q=RDeZ=$Fn4nrE4bubqIM z-z#QQBEHcs&p3dE7#w^E^FM2M7jdCt@IUWb=MyDRzNizxn-qvFJoB+vs|e;_^LzYfcSEu0_WRc`-hCZKRL@6 z0$i0?S={ErrlO5s-n-uoA1s&nkf}P9Nz88&Qip-!?y-yT{x>Z0_;~sf3}i;wN!FDr zZn6&ct|07YY&-VV>TR0|RebyJBjhv~U^3YLALr1w;5)$jpa1!Pe!oPIIeEbU69hZ& zLV^(NzvT2|8gZO>B)Q7>c&buIoVbxL8Tjv5|1+-CK~z-`!$DPvj$!_1`Y_kxyfTjv z=6{Cy$3?8pg?)zeSMM#hJFhtJW(Ttb_RkFu+*)Y`{m*~G1DD-`C!A?RJDKf&o*TvZ?|HexD-G#+=DV@Q+B`~e~Vg8@DZP-0SU7HS4Q*|4lW#V>iI-3=i|KcFOZC4IfCs`8w9EwC41(8MS zZtD+Q9qxDfnIs1K&qa-%g>tv6+h&q>ef$Fd1^S;q1{&x?o8<1N_n>`5;lMmWKz~l`3FXv~#4ut%d zw2PzBKDPhoK!8T@uoL9JZj@vE_rRB(H}<$09v)^cKZR!u*3i3xDb1t21QhbN^P)gq zQ-Zo0ntb`F4(g!*t(}mMPrYOa)@VMUz~($mRf7KKhBZ0Xubozpw%Lm2JY|}n!e6Dk zVn>Y9?OowFAYy~fTDSH0T0p}K6oDpxdY!txOy=GP=z<0R4>XQ76uaLTYPc{{32f-= zb^$|Yu>M0jxqor#=1gUkx?h{@uRL%YN*nD@N@Bf*ldtdZ=rLr#n?}KdCEw%(t4rwr zd1D@*TMu-d|M*{+{*$U;bk9eLr)wh_!TzgkO{A!j5#aA2|89H1CIsW30{_A+B8N23 z<&bL_&G9EKx^2q2)Dis8Ng$lq!9!;g)Aqr=%BSfpKvCev0sgzKCKTs8LyzL+O?B`! zowS3x<>#L+yWj2;I^nue6yu++@*kEGy6k*=$oAD2{;pk*=_IW43tAKNE#OTItFgW^ zXQgy_H%kv(-}Uew=3j>W*QaMI;HVOu%o0@FKD_uH^q(xu|6?SLK>tZOgWAQUQSH8W zuP*+IQ`SPK)!6#aJGm?MI1BLLo?<;NdEE{rtQh|S^FJGv-3wLxa#cJhh9CN%JZ!eY zAO!v&Ye(27HbA$}WVg~~Sf7m-#7ro|;gtN*>0R|REgQN&d;euiD+!znd{=W;)f)Ui zP^jg8>%cV<5z+M2XXGZ;g4|2O|IIF+UeiVN5ox4?Yco|p-&{~7v!EHg;`b&h5C zl5LjspZ(X^>J`jC0`bt>U+tNGx|j*$ zpPv&*u02IlF4_k%Nk;;P^WQ%|n}=r!dtuS-SfKye!x0)g!dM3u zzKf1MbbM_IhIjlf8|eSJdEkeb|K{^+yHV9q@e*56qti#wESCQ=+e;J00sjKye;v%} z;~^@<%pZYFImE3_u?~FsDG}sxeVh2@QGE>wXnP)j`RD7$@~~Gc^RFLU_Gz0{Nn7#- zkfFSrq7?382nXoYlrmWK7cA>3+wz{|?p9af2L9*6WqZE;_zVm;BJ9707V@2ML)j2W z*Fl5VAvue={a@Zbd`eo)j?KRi*#UI`*!*L#Z^Qc+rQj&S^q*sgKGF&O=cDmh{=Ea+ zfBiXvJ`C3F7V+Ur7Q&-G*ni#sqhy#K1TbAjV|h5Y-LI|-|I zKCpaIZ{Y;3Hn9KNl%rJ!M+{f!e?HDv0~o#r8zXA;+TC{6HHSd?#^9BZf4@qsIO}v7 z+Ij+?+g)_oeml_fDgY+2{^#&mmjE}Q5BP7RM(b-2+0?K=m{JA*PeW30VnvK9_@BeY zkA7(5W&59(2|u{n3$IX&e}V14l8c+nmiAv*{wsVf#(zJjbDfhnPKZKt0Bj%hKUZu1 z{*J>jVE=XO*WZh85=9W`KRlu4LiquwD$}T(CbuB}Rk3MzQF(7^NEzhlg*V*!KAYB= znDK=dj-H4ad>e#MJE{ZO(QR=mbQc)%6W437=Yg?sYKj)yuMhrwud=OoXfN%z&@0<8ZT<{$EF zYwTyR`4_Lx-KP&Jl0*NZqG*4}fBo}6WAhL7c)J1*p9s?NeVi@IeMZTOgm~4l`A7L9 z@jdcZd2|*vvqTq^Emto0VT~+4F2nez>z40*t8asSGtVdj|9z^^X|(gGW~{$Pf4Qqf zRwp28;9my(3xTp8tpoo1yLzDKbt(V8Y09;7+tkd=ItyTg-8=DTT@E(eSYZ7>zu|Uu zrmrjnbhglVXXc8R^QwG8?_iM?ZCR7t3x6`ZLOYCq+Pm-X5r|&HDf6HI=hLfuIBfo@ z4Gk_)PMOejdT@QG;Rd_$EB3eUAFaYSQgAU82?oMZq5yX#cdC_UDI$aDB`t+{JUd; zBUhqR+14rVV)Ju*B$deqp%ZsD!gn5yv|JMFh(d~<*q1iiGVMy3{ap(u@KRiHE$=)k z5FLm4f0_z5W(7j{XLolo*8ee7FwZ!b?EVS<;LFwn$d>)=PkvUYjci6CO6x;#W z{KI3V49LKJu-F6sS3?ny60qS7{Lg)&j^zu$Bl|RYs7uWBZx)qomZl)&a+d?#4+1v&L9wHNQ5t&6yD z|7uP)P;@5$r~k-GK@gq{!5;SfgYn;Uc^gN8f4Ww2AT5RivGt#$&0NB=*!(ltqr#AI z3ML_kUjHs#I+gpUxtg86sc#&^tTz$F^^IAW|2d$CR(7v!l&B-21o^Mv@gWJvvanIO zxtyO(d)B7f01jl6Bs}5cPcCb^iz?e>YJ41jRM)4qWm_$g)X2@|Ulf`h0JvyqwN8Hi z*1puSpGd!g&0}j%gu=}{pUm7H7*6Sq*j7rt6i$`n)xdVqu&K{b?MDq zfzcDt|HI~gHLPV|{%4&C=0;(5|6%ZT9GV`evLdYpust_o@4tRQ=OI0=F!ZyU*Ufb& zi~G$!2f(H4DZ%nz;a{xF>o3D7zNG)GN|}Jy!Kj}J@GqeM5Dt9O20G8L*evlcF#Z>h z+nRn1jkk^d@jV+$2>u_M5ox(b|A#wVt2b zZYl=$`7O;q=J7TZ<_IH7&>`wdD#0B3pG}yXLsmE2i9qjQ%$ziyt^b_WI|kNLw50!_ zBvlmz`!567*smm@*I^2UrIn{{{L_ z{_@MC#Tv48j!A6)&!HgprE`Ce%|A!KS#oX!YGjMOYuJ+iIpcytFZU*#_BML{g{)I< zSdaRFZ?9fD!uz(Z3%0aa{=GXvLk;pGKa%_a8zy}6uZzW+RuYE;2l$@_B((~Pc}WsT zdfjo1{~otGTniBjnsG>W{VM_oqHvR_!2b#e1Cd69Ez&%eQUsuCLn531wV&fVAGws3 zk4VacIC(gnNQHd&j^gBEl)04n8p`8%@Mx+3`2&IM2WFT|?HBTvCo3HbpYi^e|2lTg zQ7!moeXKLkc-Z_?ON?+C_R39x%8BtW=<=@NdOG_nJxhrB(EoE_l3f`;A1isQzvA#g z3@tW2NY{XZVZcFIEcG8IlR`&r!2fL7UG8<^R@Sjfja%gejc{Yc!1{j_C}p^QPR92+ z%>T1$L!2~85TR62F2?U4g33_p+H5C?e*gN#!%JZ{@`<|$I^ytM)Hsa)H9%;jK?%zC z+Rv*d|m;BF3mT6j{P8-{|IlEDdQ^(sb6aUM?6a zHjWHI^)Nqci!6-}=@O@1m7Y4=Ityaj9!qinxp%Rgi%(S1`m$Z}Tc^fH^#zs>vw-cz z)_>f@)Iik(h2!3&DwLD6`+o$OJf(pa1Y`~T3z&a;sX$k3j8t^eLG2ZXKSlDRzZ`!n zqTnu#>xTW;k=1Y8&PZ@3XtDbbH%%kv(Y3p~WJ8RR{Fa3{B^x_k#Dw|h*_C6=MD1hx zIfgM1T3-9nzSOSwFPW;e)PJb7Y-N(n=ioBw{6rmW{^?QZfHU&eGSH(S|8>H+z^NKO zewRY$?-hRmq~QK@TFz0IPz)v@=kE$BZjrTiQWn17b7<~YKf zSoFFffq?&o^&h56Ob>fkDI~!4@ovjpzCY>|pH4`@ROJ8mUqm4u1pF^oAbbj_BG`ZN zyK-g7k%SQ|&^wx-3eiN5(0mRA{fA;xvVVauN=K?PRdYTXV&k72!-isu}x_9vy?7wd8Ao=d5iIM_+Wl>e$pPvdS z4g_AjEyh26_x%2)lP8z@pX)gpP0_p}RLbh87G7bvy6}1DA|R=?fsg5o68n?AE z>Vb%?X$uLOT$UGU51P?^}aH|8+^SQ6pUy6QwH2ajR;&l&%l-aiy8h>-shy*T95C?K5; z`S(oqjc4n_>S*Bq>D~Qwnb{h+>mmPMgrb6+Ik_8`^dEi-6$;s*)xJmK|N0L-Yj@zT z?0WG`n}HY7uboAW!2dersTRPkJ-E9^DoBf~u!{!z_zr1yD zlgG_$vtDffl|%!O%*vp7*J3|w)=HdzgdCV(YAc|^1O6X&|Dl5q&;*-f zohA50m-z1kqK;)=qd<^$$21JK{*ykwOv~#r^LoI3uDgVXhpWfa#TYD7C ze+eVrW%^s|n{bGof1j3WNf8x5E;qL>Z%QOlmC@|)!Ot9WjygzJ2+m&Q(2?3XkT@?H z9pwEH`WElNG>jdo=Nk(E{fADqEtE_eBHQn0k+J^g15>@1tVShu;6j~-`3L$Bzt1Kp zCSp9ldar8b{Lsaf(0>R}^d>6a_U|gLTTC;~)E+Hu?mJAQ1VS>&vralm^RT994)~w9 z)RqQ1<6-^@|NVV6FKKxo(;6|r`jmwdP9IHFEq;6+_Fs2k{;|8bcjk^6C%F`lK>yLc z#l_9Iy!m`MDbkyZ8@Z@^Bh*9zMe|nppYSL?aZw(WZ6ZL4A&R4+n-L0Diw-jSp$Mnp z67P^t-8)K9Bw~|zOtDasIZyJa1$>7_T|PQ1kux>D?wvNO@qd))bWmVf<%@FbFjn>k zw*NWN?WLG|Wv(GMK@E1cnG&K`o1 z{>UQeKSx{xrI2CD-&nzA;;8$1hrT*t^hd0zX&KF=J zw;ZJ*n1AjVDq{1``U%}0Zy0-24x_mcT1>JM4+8zCrX_`Bh=yDFMsNavPx{S;Q?Zky zgmC>Rb0*p4Q=>+CHHdl|`P9XO)w?R9Ej4p2dW-d>4<^Hq|Ihz3R1>CnnSWW`3k@25 z`W4&%b2}Y->So*;!XPFHATj9wIo*X(v%AJtc>qDznZI9B%g>Mf=csosAgNO`wtNBp zm#gMzF}^Wq2u;2ze5eC!n!CrAMT${v3E2E2hw(fn*z?bdpSgwySzEFB=g`c{yE`B5 ze+TRooMxVB1NQOX{6m*2)8O!4vkU$F>*LSk4J|nr!ml{=a-smFv9+F3 zh|NFP{tMQ4(uQEMfc_r?(0@*HK7WaavaGh0I%cHuLowy`IjKl~+fOkiw_L;Pz#`93 zu}xJDnBU1mAX4Z@s&FVWL208jN?3LB#BoW-7@#5SI2o^DgOVr=Ga#Qt5WeD+O9}9s z<@uA|!aYu1w#xsJ{zvdXq|l|0-kMO)yb+j7$aic=-?a()4{y?l>aK(! z{=*GouAsLRT0VH`RPN%YPTp1PrvTbBu#<0Di@6KC!p{QljK_s-sd{vR?! z%-Rt6UpWWQNK#JAdt&{A>drYH*VjC-`DEe@494pC`YcI!|GMYp#-xe^C?f@CYNDJ$ zxyz5xVv$_Kyr`B*0r>2Q0q4T-&4bO1jFUO*kExTZTKZ{JXJA z8H_8Xj%YFcZy~pQy8!@vjd?i&GjbGV;5z@ye(w(CZJs8 z$Y+ia-2xl~>;J*C5ROB587Wh~8)v~s)Y?4Y?YPtGjOMW`Ow`y=^X`dE%y$pOJP?4_ zDst&|=H?MLJ~7-F*-{Mmb}X;xymNv>u6`u1g!FdO3+B4#c-x&|w=A%Kz9Fr^4SSdU zMKRuK9RSx3}a@cAL1#V+{L z{9_j*l?Wr}ay<_4Ckd`sL`jTv9HlkMwm<2LgDzLsCT#wJ{vUr#Q)v_>^K`-fi{1Z2 zV(A}#1^qw7fAh~435|I5W5pqe)ekNv-VoYcD6Y}7RU ze^(%?*~w5N@uG?@r7Y&2-s)`-4O^XK*H@l4N)vjaX36Hi4}RXw$`gfYM|*31@MYx= zv)-lt!>7d`Z@~o%U@_GTEwIn3@F#a|MRx+RCzJK`jBqUu7CWC zSI1>3*v_2xpZ{ma>kTXP!)8a!*!~}cXU{*)G2BL*2gPG?4B&rR(qO<$6-N4DM1}E0 zf`Z7%0)&53Ne ziS#ly|NUr>pEOuhnW3Jln@9e@O}nIEAu9n5po{l9MJ zIzBIrwW&=Y-Uz%j`LF-^(fL(}l{K)B#jUdR7(DF1UR>W@{%r-o^058azC6LRTDw@a z-<+7I&S`t_d(WjW^&h@V;hKG^gyp{kAME7>{+GYa(HH>`!hcI@p(ELz~-MP7xfj!7)T!UAI0ox!|RCAb9poFw?i0?(N~5% zem91`Z53AuY)hDX+a|))KluK8z}xuLA=4OsC`2YLzF-C&xCW9wZ z4j94}Qw9Gs)_*u{=&0uUviYEw%6riq=s!H{wySI4W^6#>Sv+Iw> zj%QIb3_}ZDhMV_t5sU<-d~g!t!7uMCH+z*!%MUl?VCxXa4UUYMwERvUQ%;x)#VrdT3oe*8S@P6a zylAKXq-}0M0$e&S9-)#wmnuDLimyuR1wAuNmVUsOspoJ>ET0ebpC7idVabvu2csslwg2*8D>{oI6k1kI`=61x+gI8kVgUbrudRbmC=Q3vvOMdCIXHEwhXDWx5^q-VVj*x!@|IbqYGp|6is4`Eo3}r9)pRL*cpQ&~;bDk$M#z!ZaKHw2u zX}ou4t4}muBoA!DC>!*jt_%7iz$C@ypAGgNgDpiI3@rbZ#V}72lSKt?rE`9qNYubC zJ9V^$hjDF(>jDKTrsYjf;w83R&^}XQnP=RUxPR|Z?VjggXIwXG2lM~T@1)iK4fwlz)z&(%GO2vCx9`sdc;9Ed;ruEfrMa0-XsWQfou^K& zf0S+3a=Y%+>&A%t8CG`;=Yk6r^{6d(8um}n+L-6ONrz?%wuUV|a z73Lr4e_qTJ&ePvJGY#N65gEq5p-F`Vo~IWz53}wZjc!vw*#4`?u-j4`=AYoP_O#e6 z6H`ef@7d$mA!G>Oky*rv@Ib6>oO>IWzS7Z63bDjdv_jZuV<>05gF;a>NB57l+D~c& z@+xgFo=x6T>M&%P|0c=Oj^xl^9=fHLm#FV>iiPPvSpVU8b^o_dEAMPd3qJUvEos}m zqxVmAq<;~})^NYi`19+zJplF67Q1~PbpGpqK5c2Ia5TdHYoo_tMhy3o{zD?fiz(OuhE+bFDkLeBXVup25m(uzmpkMVMr0xf>7%9s6Nkh?&6n7w6XOeNPn9 z+;X1f@Syd{b|`xh4P43z9@#NO()fhY#+r zl*my$;yOX!LwsntSG1{X%SChk`G4le)-C@}P>^dEx$BL^dq(ZV~@l>y}a=jRup&6Qh?(8zk|Kg2O+6)Y0O zB=xMVJwgyggydxRJ>hzDe9YSY1H4H!yxyf|vHaXH>OCniJUH6wsHAM47c^KKu?6%y zn18yJ-}_zNqG!h}_u-V#5XJakOa31+pq}1-gI1ekKmOu=jhA20@~mY1`RV#$tJ8_a z?7tP~fF?_JE_1uFPA{#%x~w_&LvdhMk@bB0*@$>Nn}2b0*CXCOMaxT6%>M)Y(_=p- zlHc+`Pi52?b2WSa6*oAx2%;7I_;9H7hiVTQGTioqg>dTbhPXPs)%0=<_8=_(4*9Q5 zTToTR3w>^)09yF^M}{XuQw0$XfPWDxYGdajT0lz@Q!pgx^5z%Ci>R*-_DgN{KO+Z9 z!*2x|ose%9J<3(>OzTRPSUv40L!2CfHZd&!PDpN~<-?=ypX?0pcEC+w`!AS(oXFgt zFbwhh_4S9(yfXE^e**U_cFj1Xl7Rxt^ZAncA>i@B{!8tN^~H5%P7kia$Gk#0-zdp4 zujle7mSF_HUU^@c(=xqu@p6bZviD#5R|c-_Mfql&R)idO|IZ`IMPWJ9eY4Y$;}?-7 zA6aMw3w&3e$w8KszTi7JmYse*;=?Hq`>#w*_qdUmHdq9&1OJaSIYHf79~ol$Pu~h% z)BX z`!{TqT#|%(55_-jJDunf%Q<*e#=s7`7B%{>$`&4;e}!o?6Puy`@YS7$ge_|1L@M7B82mpsY-J$-4#0Sg_@{X$M@Oa`YIyAP3_=#hx}D?UuMot#6Y&Cj z5a_Rlo8ICZm-?SKH=FbEBk2Fh^|+p(=_s|tfA0|39E7SUts*A&G=%<8TH}^M8?l0m z^$oRkp?_PUiO*rBxtSN6~D0fw1{w#a}H1JAom-{Cga>!E-1=hvWHO=+{+f9!D{E7Q*C4luG3LLV@ z3Un#5K27LI2OEnr8T`+WOGA~cPUn`d=D)!!1pa5N|9Q=>6>yfT7j@wQ|1$%1EcHKM z4EjF(=50xG-uh!B^*-Xd3D({(b*%#ANTkp6Apz z!#5Ahzjt{Zjv-1q#VnK}?^$o%1Bg!y3^(~^p~H_oUDWz`MNu%tF~c^M8I;#ws_-RI zy3F#Tp$fGy{L6cXXYlavg2<^|ni$sqjOjn!*`43)fqyss6XJ%yaWhNP1GtBO~jiOL(mRr$6xhtNX{_J(&W+_gQcd^a1n(--XRTPcD1a z%_tq6_k-&^cJa<(I+uv*R^VT>(g_jeZM;NS)hXLA^&f^!R5eAb+2^f4z5rIO)`vn0 z8x|&yiow#r>6?Fa zx7vuSE}9mT)g7ZO)76cx3^eV3m;}zR;VPdRJevKN|1u045j!!R54#4)zk~lrc%(^y zyl)Kh?|6c|S>)tGLMZ%rP9+LL*PFQ#c+6sq9mPi?6%9THngDr z<6qz}?-Y_zGkUfmKlCRN(|@`jmx+^7F2au|C8oW>q3b&zMNgx~Aiy-|}zXaKuV(W^7&@G>eXnmo;_h3!xTr6Ey7%IcM4a zA1wd=XL)O;$!3~0nqA7jS0JT?WZ8A};qRfGJgH16`Q!2^1o&UbEUGk;V^^1+-BSP0 zuP+YFdj~%W2lgp~{*&>L>(ixs``<16sfp*ah5c6}suJ;z)CkU%{vZDf`hWag)X0L} z1f0^U0@~3}ihJM2s1~**!~Uyppnb~?$3#OPfml_zm9@Mo1+6xY==XP=-K>;K?*b;+d2yZBL@LA2xD?!xHk!@=9v&vAV zoRCH6@Zv*Z5ahK&qv~jhD-iI%*!_nw0*iWY-ttSxAk6>VFvpEY(0?e%=#tfEOteR? z1rV5_MY#2p0*>*%|B6LxYEPkc$(~*o)051@-hcgirmg0rw&yiQ6Blw(qtSB=p3*+; zxWE6+{GZ#q`7!=Q6Z-!5E{_sz8XL=yH&( z>E73kk-xqsI~M^L9FA@<*a0$m7x3R3 z!gP`E@n5QMxuxIt8$rm+Mpzp5Us(UMgnh%{+%f9xVRLsSaIaD^yBwSuWUa^R!TGGr~Y z$pO_kitPm%QjnAnLP&Te0{`bChZpc@?7nu%Bw`W!@p0gPhW7&R z^~9WnWpoP0KL!3pAr7T7@ww8XI_o0;(|-yB7Wn!~EG|`rrblD^(}$Ga1^v$}8Y9K+%U#~o>`qy!;uMoz-_1e!^;K&#N;TcFflu7IQa1oGOp)v& zIv-@X2Gj#lB({!jhGUo`?L%;*VEPZX|7xG($S`rIBdLw)VN=h(>yXXiiIlxaRi9M?`M~7Hx2?kmOGW zLc`h}!f!bk@1F9yZPiuqi4j|GuY)|gfPaCcGAFy8RXSIwY9H6dUB|@>fh%pTZc{TC z1NdK?uQw~(a3huhIRH1pA+EVG5DlEcCGwyVE3L&29P`eP&*L&jP$PT)1^Umb`y5#& z;JTY+;TR_w@YpzTG>(|XAm}$^jBua?a3GjPTMDl*b1E9>P_nSeGv46472zH{5h7{o z8>gjzd|JCF-+0?wSbN##8XcS&8_bX}Jr4Dw(eZ8I|FMck-koGYX?Ff=OIBdv-czig zj|%zLdU}dfqURjPQvcyLP0RvwvE2377;o1$;9s19>kR%MMuc@ks5wngWdqXkA0Ak} zlT$f!a?p7^QA9;6_;-RYM+*l$(b?Y3T)e`l3@6WA{Q%}4M`L)b8!g3^R1n!Sy|q&W zLH{A6s&H|m00RBT-mV$`9{=nAd2UuU`-ZbcM4ktoeh8*u4m8zYV#+eJ7UI+_szMH! zgb2sG>ov_hjcQKQ&>h`+W(CP0hPOJyszXTKV0!B#Oa9~%yS!J-sU`k}RrODIk+gd! zi&<4cQ`IVr>@_V+jQ`WH>c$c?|=I*=>L%!6zAr` zqh{Pl0%jN+v+@`rj4JUbknHsn9AFSSH0=XmSMj&a7c~wQ4K(!bv=ChH5N?c9pe3cK z8BF`ZUOlD6;{W{5(0@oGyfO_h{yS$5{EOBEL~^6;7u{)z?Z5iIb5IgiDxgd*7N;#c z|FyNDUHYc|#i|HjjQ@3U-9y40AEG-j@kL{vI0{#$1j^P2|BnVDxGuOfRQs9>sVW9f zmW5qT;#H8Bmqj6x+5%!E#&_=?4hpB}dYaUE-^+XiKHDoEtHW>eN2Ua#;Um1`UE=CWF@&{684~%Ow2X$)7WSD&n#H*C;O2AcgIJ9;`cj|1UTC z@fjy*wqFk8zt25=!&S9K&*@oJs7;J&esip#D>}lvT9LtN&$O-|0AmL>|NQ2q&?Hng zm+Klek0mB(I0pVE%%jFAWhK~u{fGu%$a-U43NEH2MRa41u1}LsldwFAgpPE=Q3XkF zp&*7{#j~qR`j6IgPSdZ`xE>s1NILv0>4!qhVp6)yW)JvZxJFI|n}RC{8R{Yl8j`|0 zyxDkZy}=rULI4S*U^6||A?|0E7ZhE2^h`86|Gw$MhUC)M_mBPjd)=IK{mr036!>B$ z@*n@}@7?u+j5n=2m`nWkhvQm5V}$zDg8J66`KO0;(XC^PfA2mN;@fZ>{68D+{`@i- zI(an8JVDH4bFi$pZ&aL!sw$F237lLgipxsMCyc{N2&v>K`}jVHXQHGSE?dcqlly)NM`cB zxFnJ`yh=mfVD=s1s)Vp2PBbB9JV385*?3B6B|-yLZ#mG+uxJ8vbu%vy0{%NY|Mm6b z%evQ%eot3QfWyeNVXyst~zJQpKIm>Od| zhx3ljR8vHZs)EU6nlb>dorf7j+|J4oo_nlTL za%P8);~2_FVkfHz`hUi4XoZ%c)&3w$J0-Hl`Y`_wrvF69(`-(wnIL6M|8XwaB$Q1E zjeNi@XQ&|s2iKSsas6!BA5CU4i7Qs-OlLP2MZ#+-M;RW%1R)VLI}sA)8!w?o0hhnG zyp=G7l-TwTwSacT=l?MLBj!!to`op^P#^{s-K%KDjONXWz8odtB<5?h>dY7 zWDisZbra6WCXl9(hGgx$7jLwZ~tX{zW+WzCGrO!H|&465>eybA}!PE56?S2!uVg9{~7dg zral7x>4#Uz#iD9^`ZpXb_{`@;vJ7L9c09rMUp~nClFr@c44Ku;1E_JS|L5>xeAVbf z360Gjx@L~i8;LSnB$O>O!oeZEIh%amch^G^#`+wlwUZK0ssPajdGj(k2-2i5XeQ{! z@?Wt3@ZV0&!JPF#db<0m*{}aaVKn?2^SI-h-y{AOBPkvGQ>T9K_69GlbAT|1%OO zoz9QAY?CQ&c${XW$kDiOX6rwL236O0KNZEVI5FVCDB$?F|6;|1=z

    SI7r92^1U5=F>LvB54DT zcKOSM{B?B8bgh7S<<3m_g*6XnUPDtbmrxtuI8!B-q+TdBCui>IZUgDo#9a>F`6U2EhYs3)*!zmpI%fG|^ z3*Sgd5tB>`4Wd)tSf%qLRe)8z;Ys3_%^vgiZL#J?z=1kq=H8!b#CWe8-BH1i>(!8?T;`Nb=4P;?r3zcP-J zP-6aGP9{05SVx}#{yXr$eog=F-uX?0X+p;OpGhJ<)T8vVj$>*f1$Uh3B&Q&38)?k2>j1Co4{y}YU%Ke7_K*BP*BLSRj=>@BrN~M=3fZ- zvwS%@mi*7ZrNVadIewjn5Zr!@|5bAY4$?_#$a?1yguaMNO62k4W?|*0ZW2S_=X&>) zNeP``&i%pqe_A42e(|KdG!3U`8-V{g$#|-_`%Z@1rXe>KKG zUGPywQoce!9s3L}>Ca`-H=qh4Hjv9fIA?eiLq(Y>{r%j7W8$viL z>o6*2riS6Ps1UMU8CxoQLt)6;|MfYi%>2Ipx%WOEb02pe*Xi_Gp6}KF@k@%uD|OG$u4+{+KHkT>~`6# zLgnAD8RvBcnJH)(_^d+y=Ygoe%oNJpHyhD;D)85tHSRP&1wFw0yUhRdi5mrVAeH-{ zvtt-|Dw_>111(&#mJ{o*Hv-|4W8{;%)KMzXTdK zv+uSG&psmOe-Qs9OHKVs4gDg^I;D+tXvv!tB#9AwpsE<68>$&n_P|L%Ll^-UsA=x|Coz{|*XZS|lrj>N%}=ua=iFbHoS*&=`iIc}Gp{y$R-5R|(o7w1^gqk{pJQ5i9+ilHun(T5VmtVz z+$rtrX*C8q4Oa{MUNWbrjWIf84^c@LVhjLl2^4QY?#`b_#(VGV#)F z<2Sn{50-r&xV&zgG4}UE@x==4f5Uu8lF6yUJjhbLjO*%_Tx$PRzW+txpDn#kRs|BL zSjMMa>Dd3{K;^$E{}0`^%o?3MD^OB60{Y&6LG$sb=WUb&k~jWYU`z zQ0}oAyB1w{QT$Knx26?}``tn!I|^+CyVt#-F@iMOikFdCOf6npH1^E&pT;jfhrMlB z$w}qkN4b?#{7(^kbB}M3zI)nV?H&{MVuDb%|CRhDRUr- zGl#nx0KWVu|3!@We>9@AYgec$(ZO^YLr{@P$nsy=T^9{aY{3=5Ne)Ll@b!D%Qr}A@ zZfV`T(dO1FyZ6MfzQu;1og7#9IEJ`?5ghUn#}9B60t?LB#8?tIedo^$YnK~JxBtAu zR$L=C36dcHv!dq6)TUwovyA_7i2ll1Mdt(=I*@d_Q6-IhOz}SfdEi@W&QU2BccX{! zvPIslP<~je0$~0f_fH*20~Zw;|1+HbI`dl;Z`Ox}c7%lT|D=EUMNC7L%CmH^P@R@u z>K0!$3rlwbQI-Fz88^OMc27-b-5#3PyOKg6H*o)J@syuahPsFFm%!DtXVyvX;{KO1 zsbLv8$mc8TW+TwdJ?~(b};J^enS04dcr7lg5e-cmP zi>xf?O-d=oj&yC)Drk`)SK9Sq?-;5 zIZ42@uW@Gb^~30+ub~?W(|Z{1s?ERj~vkpjVtAoQ(4So)TJbhq43YGqc+Kd z{RT39w|Pi^NriX6}uHvwz>4<-@|A*TD>g%jsA@e^+-$sVO zjz&=WSGqCJgob7o&uG_RG_a`{TS3s~>c>9Yjc13>%1-qyl;8?Z;rJzs1NI8jzuM+% z;Ohq;LLDd980S-;{yKho)W4P}#vJXrsBw7Z9oGvA7CxyNZ1qj=g~7M|z>AO9tmc82%ak z=zl&lU}jyg-!t>G(;Ktw0DbqU`!LyyL4NrA{+Yxj)e^cAT`Q(+Bcu{aEW|&XRozn( z70vZ1|Fb*jIpblOBm8gvi>p=9dFt6E4(7k+H}e@;g-ZMwPEG~Gw^ScrF$8r9?BM9k z62#K7N+-m8KD{!eRh8O5{ZK`3-6j!k?p9XB7SoxBBK=>yI=t{g zF&(z^v7c23484zMU#|=>oLTNO^3!K;+7b@*DHVI%R7%5U=Dx z-L;mv9AGH}m49CbT0vdag!mnH6VAsbpG^)gZdB61x`D)<;qlMK6#v6cnN_cTuqoYmR>iEN0KVJN_Hh5` zfOqyjw+~EWclxmZ$HR<587VK7^{`D(FY8eMi*V)}V^@b!ziW0N|5NEhW7KBcvM*NB zE2(n5?s2NvV!CtwANa#>k^7%-&+sO7EFYyy#LZ1K zy&0m=>KYI(Gk3A=s|IqyGXs~nUO@aK-+$j*iNb`e|A*Rt2kV7Uy$AE}XLJSiwZ4H| zaH|;XkA3XTweT!OT?CpL6Vtv*J#ha5^Y6dkQ82g07bMmYd4wafn$!2a!E3_i>BRVD zm#EF}t)npizM@@6SlG2=?4l0It{$5Ozj;TR`b20d=1(2RJSVw}{13H%5xq|1aDaie z=Q!&ZYF@d9E*n1gs@0j%l_J}d)MtON4-PuyC9S{Y`g<`>>o-HzE z#03?Q`=4u{v>8Q4MqUThzhwAl)4HuT{vhlB0mV_P1lL0{FAx@PI`?^vjMfq*a6}$f z+_-CU>|cXL>4)myQT;#jH1VC~zW%AiqvXXt+1D}T<&{G9pWpa-t~-PLLRS!%?b>$8 z&Cm(AE$*Iozh#X37jaaE)dTl0aQ~FYEf;36{2KT~Mz+!zNpq;l!B#~GVxZHu6^oN? zUJR-+tX23PKRuNk?P*n&xFxN7IdH&to2DB|3`LDJDdqo(6E|ki;fQTeXD3Lh{jU={ zB5!0g7aObO^+6E!{{Y8{FKK9OQL!@LAwf!Y9BCj@@9V_3N4|wCD9imnIowK=Xr$q7 zMHcA)*=1ic1+CUy_c8xv;O^r+FhG|QCboid=>VCjuI*H{jtqL7Y3#nPi&K%_BI|!1 zitk|$p|}6yga~vjJ_~Kh=RPTpp6* z;)GP5O>k27#-*N|NDZfyvj`Ci3?>gnqW*>Z7ptAub#(ZID=u+Q9Q=OWtH)(}s(s`Z z4J#GQf1w~ZVa@CN&e(|F5#$@P9G4>&{1)@#;h#zy$49o{{(H(-Q_qZyY;=KmsT#x6 zC8R0s5*Nfc9NNR?n+Q`tAr~=)WHQFY*U^szHoFCmwho?ouqE5rH^w_BtrA@mxc_DT zsfh*KP0P%S0I2@M@QTo-PZ*i%Jm`z84Mz>qD~Bc&hBJz_MWVib;nEB?UPH8wYLbq8 zly+f<7=%>+GlhT9{~S3UWnTsj^IyTCcPB7~l>hm|gE6MlG>zGzu1*5n64tlc)Q zI&o+%vzhCdiT;!nG!+k&A0%}V|CF#|^!`zD_<7#%@l#f)=-pjdUFfk^e}?W@W^DvU zBZb7{rh0k)%YpKOom!f~)8Nc`4>5*k_FCD2q=;@^z()}JUJXx5h7PapoKd^Qb~CmJ zK4v|TiYHz=TOf8K`Y)+zYsH9x6qtA_2O|k4P4!b(mf;^Y*kbN{+~1Ct>Vu?+Hacw) z|KN04=-sdjTef2fS7}=fkXQFo{m-)gAMdP!$f8OD^Y2^%N|>1cnuYscijN8M1n>-U z8qut7n)1BbBi-=?;-7o9e>RUEie4BJ`EqGPnkDuhqW_1RZW+@xb#sS~TU1?vl|y8E z6lV4FBdE0RCr`_@PscAgyN(*m!2ajycaSN_&%+>mpNg7HJgO*)c%R z=1Sa1Y|~CRaK_i+?stzvruuESaxw=ni9mMF^vKts8yPzhxIA0O=hq4ck+R?Ea<5a#sw z(y0EQq?KtG2|)jIskDl<-6a7&4WtkEA5I-z8c>cYI4O?bpxG1g&tfiJ`09T~k!Lq8 z!fp7_+VD}g(JI@HdFGC@Q$u_({}t_d-J@jG41)=H4j~LeW5knE6}3#wMHkW4y9poZ z9?e{nnN`A~8_1i z;79*6?td}(xPOZMhw2#!97t>Y7P2GL@Y$g~Mg$VI1_F$TSx$r)*-p#A0w+S~m#;tT zf1H;HTTuU6(~bRyB~MK~GE>wW)0mhsOX=WGMSbn&p(!20{L$-=&6=L+6U;ILVaPu zw>A&6dG>}L$%F50Oq+e#JbiE#AO}Ix{Y4<+cF|NA7%gjQ$jITq*otaIM3nzIacf<5 z81|EC&Bupp`W7}^-S2fR>_U!)9_k1lgxCf956{c`e=z@b!B`NNfrBE6@sO}Wa+M$akC~6t8tn!fGBX0VF-}L_Z9I56 zPnRZE75V(MiFx*nve5|{ z$3dViRJED13EOX$dq52Rjhh(3GWH_M9YyJuRlC zc_7fGZMAmd1FxaCmD0bmw7(@V3b7gmR@Ty`9CL%b(2E2~;h#vS*c;6kRoD)$z#Ha& zDE%v8?GscGB!VYs?6^Zji3##dmI_F>)d885z%UyJLFd1FV&_E{os+)BZTF;~o=dL( z+y5h~?vmPv;(DsI`ashbMzyHDc~JM1kTIC#FXg;*dLX@?{UL747`S_MtX$hFR=z`!)#pQr^XN0ke z%K0_mq3W=<^YqG|R_^Xs^89<^N8HFL9QxFK@N1qf{f25+O! z6(;G2-1W!^s=A}4%OicE;zo~~*#GPh-6b6;u?ocI3UBD*NTvDu`q*PT(A=tFF|8Sa zeCxLTPH`R8L^JEk42QVRG8A{E1OqZurUdm|+O7r}UC?pMhM^I>3RUp5i>i23NBwJJ zR*{(_2@N2%O{0UOLrpC{Cei=gII+7qTEk4ZBSruRGSJz}9QHp`{m)5tEu~ZD#t7Pi zY0_owyCx#$@mA2_Kxj4aGnjA-V(yD)HU;WCB~US^AN%i#8z}!zrC)f<&_>!jHKllYNpX2Jchvk8-@eHP`Icr!@0-BOTj?)J%I@-fmF zYoaW4jO?u|N`ERba9rCV@IG~DnwG)D`EVq%W@*;w#8SM^570_BmZ+*J0uKk(=$K6?>9M!Hxg`mezfsf zWB%wse-QSZ#eLe8=h6cuS=faTK8q8kJ~eQ=wK^1zSCyo4jYmPXyEIKf1xzc+dnN3756l=KtIj^#cwum%$#_r-l%4lWnm5L7%`BZk5(_05EY0Sm&^SIu~#$E=1(1*&u+WW)%E6`}yOZMgpHN zO#_${Hxx#7VJW5;lhm49^*jM@qpeC1IbKYKSs(nN>zz~4j4en!;8!;^Zy^BtpEsQQ zO52KnZ0_Ocrd%yO6`=ZmGPU^7R3&>%%tWzyzq zDh9I(VB;&ZAw>;icuA8=yM0S-{0p)nCJf7|RPou7=QI$eN{NTy9H%k{lf-pY|IZZ` zX*&RXYX5@r|H$@F7tn=a%kZdwnph}q?4^+p&e~Lyz^{t|Ou{%7xYjGUB;A)J$n-BN z|Nh-onf&|yby@$<-~e(eMd_A*etti%y2^v@elE!&<*lfw!Z&xHOJMYIcutoJL0vG2 zcTGm2w!RSFda`HC5J(G^4 z;{9{$r=vCc4~~Bj6iyniU}*}}{so%Q=n-w3huXX#Tj_`Px>5W-J!Pj#Y#>f@fE zGJTuKt{%YqT1_94*lQ$S#pv^*`k#|G(Rq2j_>wsH)nLwX3I5Gc%J=`^A5;G$F(yr0 zl=5}1gSgvUKXG@A5YbO-fx!lchJo!1$h}j)NTkQ2oztYYKu1 z@$(K7S^qQof2bacoJ-U}Ao7N|A&u()Sup$kM$@X?eP|?-%-Fi4A>TR>8SS5GTtnwb zG@&brQU3}qb3GcCMyT*W<@@J$+Ob-M<<%;dbAIt(Nr)$dvarlsjlw^Fdv%ah z{~?usuT(`*{L6>Ya{cSuXX;f|i9E-oNT{$2;VbE-v(+jRh^{Ow^*MH50N8(+Yg)m1 zA*dLl!8W*+txF>#{7Ni!&Fej<;*-V?&mnxmJKtER7!CkwMJhU1e02hn+CSZPqs;!G zLOvY?y`5nf6cpg!{dal({SI1f4=t;Ch8h{}e@#rIN$J`l-koA~W~-(Fx??2bbe0Ax zqE0hmA9P2o)t1QodP1!`T5<#mk&cwK)J@d4rXv!YP-J|qN(bd(= zfJZubUrHOHK%uh;i2>%#I=AlV#zWHwaMbIfN=<~!jfMKkjc zbpQeSe?If#PeZjaF!@7p!nQ$-|D)=R57Jby_Jj+SS)oSU>Xb=gxk56J`ma=Z%zi%kuAm?$c8p zqdKZgsx1Nx$jGyrooR@FGDe4r8e-{m$mUHV@z1}Yp{zwAnVN8}v1xRdlT?hS4!pdM z4`3FSYYHNWYi5wf{*z^;_}SLJDTO>jivDLm6LaSqG$zUQ+Y!1FkEB`JGmiJ+!G_wu zxKLoR;D6Ypui^?~%)j6JjpgbML3gnK5dA;DPH(!T!b0c1<-&~u7T;;I`L5b@hJqz; zTo44+*XG*&=>It|b!`eEbs!bOI*u=YK=uF3Q!K+n9m+&;y^;Z>?J51sjMv>Wgui-y zZ&&EJU%!?y$vaOFhCiBi;JzTWG%vqWHE7;uwZur6Y33UM&w=5K2MG{&+A)!hp!P5J zpQlR|-Q%IHh9S~W#QE!*rOQxzawv?(?!s^ASZ2({>ct`h&qe;uCrlO9^qj)P@}$8i1&kLpjOsBO~f zzh_F6F#rDK1H>T2Iy>zc=dR5E14$eY=n}BBgZ&K#F!M$SYX3`F3S>Ksz9p^&$if?e z4`S^9xixguM#!s)$SJXO2!hydEYFkgpJM+Z`hO<%OM2+C{=>l64FoC6e?@=$&>ljYw_tCw0R@{B{`%AxDwHznErDdqq9`uPRE8UXPRNF{dE{(GiUjo9N|p?~VQ zNb7j444&bWuiF6q&x;zkQ>MLNcvmdGz*L?#2Vy>{TasMc`PTDddVHB6>;DPy*3|Y3 z75H@%LfQVs>`PJ~C4I*owly~*7;s`ief$XiiC!cI;rw4m_P>7hIUa@gw=Dlo+bHeL z-9E)X2(5t)_&S8evi(yz|MQ#jJj{aF!a7#EeE+n>k$Q68jzG=h9X3aekBq!?=mbsp z`u@b%vyN7@wq~l#o$o5HA%wVpN|cFadNiwQtAlL+;)9xhfq2l);vy5|`xjG41KINh zu>Vkwe<=U+r;nI_C#9(_$AR#qk?4OWTswhZ;mw~o-#oQC9c?mJDF%M_G$2PsPQ(5m zds+T#AA=5AMkcDWFoq!Oe@@t8Q^?Hk7?1BP=Sv5rqa6JAR4CvRkl>8+UwE{;Cug7a zv}0Jwz(3wu6LH~t5RFgqKX=DDHS`W~n0mn}_(qt7{7=#sTD2HYT7dgsA3Ca-q^$o3 z_b-O`Pj}h##!QTg!T)0#?qB$x64Cg-u2MG`M+Fzr?dd-`Mzj?F*io|nXV*H^zqSss z=#Z{zp#>!Fzf=1cM@wuZ%={13XZ>`Q8r|!?T$z9vlw&ast2w*9GPaFLJjQX35oV~G z*cuJ%U+6rCLaP5zc^<|%P1qU+sw4c*s6C-8(+H`k*jyiK@UV>g?^OSDh&=z^>^H$z z17LoiAkDm$h5qMw!AzPI`+xS1`xEd_9Yyg!Blu@2lP)|SiZ0l4YX9`FUzeaJt8s^V z5B13?@WtoCXNjZ-p5cAT(LqG3+`;`0x3vk{XZX~$mUW*>tgQ) zy!Pe&KL~r!&wxkry)GLv4MfOl$ohXkJrrxi(z7^vrjbHk10Iy*`FC0WA?CjfMR+i9 zmGldH+KLU}{+X<5I)@?Sf3^)V^(g!^od3f9XAut%$K2U@9`gR@vGF#NgIjlwU!<7i zmu|@&g{OFl|4|y5?UMM*EVY@Lm-bDWR(fkF=xvW5*I?HBSqLhgXPW!30+G!BJbi&b zL5sLVRf7)azhJ%-b+eIJx9liLi^w1#`VsnnMuCF4Xh(l&ad=LFh0VsQE}Y@a)tu~F z70NRq$d&?QuL61h^Y}n*^w>SSsu_NT^+O_{81JFi-7lD^z!0~oFn9es-u=;*m07B&Q@Pc7AUhpvv0 zOyrRupiF{RViNaHX*|mRoT?h-{V6sBiwmgzcl1Ad>&-dNC#uo^+^Qx*?oELG&m~rV zDZ-hi7ubg8g;iZeX}$#(t{b0s)Ocr=Xa*oXHA=vO*3HMK_{fqIt#%-?0K~)^#Y+@pte5%Zj)SLYa$+NGY zR%$)h2aX76j9j|ov1$pwkpadN1InO!_xja$PR^YK_7~VC=0?!_zdtGc>yHhgc(v1@ zr*+#-VX+q-WchcMy$s}kPFg{i6n!+tu6U|??3JNuJhKY>pK<@|&EwXc?v(%egNZ=e zQQ@O*HUv{kooEtCLz*xW-+YID;2%?OEwEo+j_t^l{)PGX8{vBPOWGy#>h0U<+S4NQ z`VaK7w_CeqAR3iefyJyW+-UAwf|mYB-2eLd&dK@Q-s+_n zSjhj}tPs{V(QyBw=ThbwNd_A>4)_d$ zuZ}QJI-Uh3xPLk|Piu;x<~m4y3r#W4LALbzl~VifcW*4aWC9VFl2Tiq4(GpaKIQ2- z6?E0bP0cCBv6Y}tgw%$_nUXvp#dMa1DMZQr&xn7x_ed54{XfV4=?X)eJe7ZUx#OW? z<*W`}_wKw|BK^Vtq>sUbrgVVPzucD#(-qhZ$Iw}bioLK4za!6b5w(ADboNO{uJoT* z2!z@XExPyNC*1$4v}?#v)UcTSp|8HNcL?bk43cH>mZ=u7fa*V__TQ=fi*b9K)Xlkg z#HaSZ-igX>b3iJ~zf<}b?q7WR=iarjOs3rb99i8}VCI}6SXo<{#?jLO$md$+7!&30%eMZ|rG3_Of9X4l z59e%a|0Ds}H400oxWoJR3S^5a>R;LuR|&m~%`N?JLBHmzTTJT|!LFqhj7;o3AHhEt ziiO<_1wj1sv>&s`3+4TX^8TO3gO95i(2%Y$$}y=dhE8_~?HkfbV-4e(#u?aw0mTa{pPSpznWNXC=SUt3(fH?wq#D^{+o>Pj4E^DKW(T(?7S$@sBM3 zE(XW)LLjOA_dNaa=Q@^pq+7*wgi`&_PqF_nu|*mA9|UB>{Lish{ddSm9tv{q zHdENDml(59Qro%(C*ep3ghr@dwC{1*;MT|~(Qqg;Fd_J7;{RqJ3f=YA3(wlh(}X}$ z`7bK}ek~6X%KQMMjB=CZ}sr`$YRbdf$W?0ja^dtYJtRQ@HX;kFG zVf-^;pugUe*;-^XtKwD}1tJyQW4f}@iLlMuH&lG2%Pe=B;m}0HB)eyi0ZU(>u1FdR zrs#jBN%<8t_zn9HGxg_U{wr!Nst&Z_{vU&3|Bp}msB8^YFmT^uK=mIg=Sj%IDz4sD zYX9AEJ__dhh;ko8))hJaG1PZ7bP1`Dh#R{Vw`-W`A@)AdvfUa34&q=B8qe4HWemwuZJc@;}DkzoaQa`;Yt=JyhD1Q=$%i!~3WIhyQsjHJpKwbZgoE#hBM^ zk3E$29gN)5aQ}kxKYP#+Ffe!T7`LPm^IwPB1&&GPLwCjW3)@FiZ_HQ(p_X$)L3Al2 zGgaFZgnvsJZ1UI~qFhLb$DkkwBzMf5s2odWBTW zukS8^nKbzJ`NNxgzvDNB)4r&G$@5=omb2vhrwXRR+GgZ)W&Y>bp&mj%q+g;&?O)*j z*P&T{>cZ(er~K$Y{Au3-gvmM9bB}!G;Q=sJZ*Bz%)tLVRm%N4j(hR?RmaPA0JK~>t z`Wq!6&QRlPK^5+QA^wRPYa3hZO^1>FFZBOV`S*?E)y;WeKAitj%RZ1!<=G(+EzKM*KgQ%dF^~?Yj9t{6DI2gwT;nX#{x(M#HWf=D0Wrhq^$zXA%vh z=}N`{I4`K}9M^|<1^qu-qys{=KV}Wt{Yx;s|2{P@k~eU3t}3=JqW{@E=MWo8ZLZ_~ z`;8~q{~UG>f;1qG4zm@o4`|I{a}F=I1T zxR7V@3i&KP7l!)}f8r(yyCYB(mF2(w#Xm7(jwlYq_v`Ewt@NNHvbKtuZ#g}lUvuN` zC3U_g4*Q>X+WR>wrqIZtf0UFeyFsO zrJ`(0>fF7)l*q9h1=;xKC$r40;rI$!{)?pz{o*RW5(9Yu%J^}!qR^ZRJZk@r z6kFVbr5FF7faG~96Ylq z{9`@Sq3^rhx19=Hs{a{}@m()(n?(LLtbZ+O0PEG-Qo>BA{55iB6O#N&@j7?IBAQZB zE3Ovcubrl89tYM@vi@hA!ccLlcRJI3N1qPSWe@6ejyoKMF08BPii9Klk6K|&cLbXp z@jt(o^FMojXPLVIZ+!Gwg6stmBF(N(Yt@~17V8-(86IG9J8UOR~-JO#(O8;W!_E%T><(vCOVd|J9=t$ztVn|6E?=zqIR0oRcwP#xw=NB$=|6ltFz+`mvih`%O9p!QGY{XdldS?kqo zj%y_Jf7Y@M<+_x2qLqpUznYPu!XmnGVSTLg5$tc1@joB_x%|uGuEa>(zd-z>gU`V9 zfBirEl&SoeQJGG?l&`=8{dgh7=&fIc^%eI7G(s44w|11y#fAo-0*g6?2sO{(d0sRc z({MyK6ZJ1T63o>8#X&PZz6ccIUi}7Y|6*kSbjl|6fH(^OxFxB$OtXa!8VA0)AUdEG zGI9Tly(tJww~+tA{%20?fP=8|!&-h{cHMea|J%d{~Usi^U@fg;=+eI zS^u;1s1EB?AcH_DNM|7Eu`gbmXdJjfqmxG=$f>57P5?tm8IcI~AJ$;mcuZ|Gos{(- zDoq;t<$nFTR}}skuVCc@gV_Hg0Cg9ijY5KNr)1@_+sO{V6aCktZ-%B@%JI*4<;m}# zj)G}B=cHQxiv2(6f4(wVnY?>?Wu@4jM(v+&NaW*PL-9XK#@Cw_Flx5D5(~+3Av~aP zqp&EP&4Bv^Ul?}bk_z+&^9jU1Y`l+f|H9qvE61hW_B!xo{m<0?*MIO2?tiJjCs72a z_P-c7>7H+ZTGHw2rd$mx?a0!YvZ2KgsKlmS;cTCvl+8`O}KU7I3gfDI!)STvk z?Flbu9UT(x)*%Wh{R?=vLerYt@Rb>b$S*>n*kXo2r}{br9XRVXI%B%4m~ak8=VAX( zOr|ym3{d}y?N$va#L&;Yw-J}*`j@=_xuBeC)4=^NpPzQqc}P{!vB)U9#A14AN90n3 zeCqKXU*-MJxPPJgohE(zMBL#j+y8ob``~#flJs8{q~V;xP>}q{fBnEegX0y@s3RE^{-zrt|HNwQvE-4+&^_1=x)T%Gwgr%3M`;qWWoPV=y%qbvq?n< z9I{zMWaHI-wKDMh0W{T9{XdxhqVP}8wUe9OH-EE+$u&TtfQO|vL3M&g6{|UT(+RNb zM&C$46Ly%+Z)I`B9T~FyFAD!?>Mp0jPKs|w|BtNy`Sd56klMdsQT;#H3|x2+bnF(k zAXOt{rflS-`~D{`;IN2U%q|&{O#-KuU`kT`~2&t!7pDvejNPg%jb7rKEC_( z>D`CHfx!=N20!$DeBbxspBEqdd;jTs_5S^<{`b8D1CRUPbiI9fZ{XG4H?KPTUfp{A zT|S-IT%UCI%&Jz& z%0EvoIKOZD*@C#!zpg)YYEkXa3lAR^)s|1Ml1!-18CSLK+g{qoa@L#OD$jOmo!{nm zG%M)dcK-E3%ksT@w{PE`Q+6aPyLflTl7jSA87c9}+kQ!={z{72aaf#N7@3|CnvuDN zB-fG2E6B73WJW@AN>WO4QgZU9r+xTq$Dp(NuOG_%cHPBl$+y{n{B%@ ze{|ti``oRTTl1!8=J;>U7G`F7kU0}Ga~;WCn~XdgGB1C9R8mq(Qc`jf{*{=Nn6M`? zDLHWy{u}?5vN16wVN-HKLej?gg3g{H*OA3jEYE%T(&84VZzez zw59VmFAa+bpS3Y4JU28vJ#cPz$ei>=OV-Svzjkg&>fDg!Uo@}O+ZBQP^K~MVI1xYA*tAV#v_@nQ&15|m8AOTnqC~o@ zM59*n%`8M(D||JU`SQYD7=A`ZM%voiTs3tCMI{!CO=mD@G&(^Ls(`?hz?*n37MI)6Njcr9Fq57`caO9|mAOKkx) zw|jAdm5FyYM~&un`p9(|+@7YXL>@+9*GOsTr@+9#LAXg6!8=*GP>(5J;`(jinz+3kOQ zbvrxy;=Zx^qss2d<<^hUwvfhNQSFhDhYNwSx&_K^KYjl%UsCIZ>qt{iUvvr64t2$EsC@y*i*`cfsdzx{|C5|MSW$XGwWesKDJA^r{0 zjo~Xmig43m+IGU zdz@exZu@KvMLHpd0IF_txNgD97!$vC z-+*0-hi0Bo6gF|W@LRO?W0JtSDQa$IhpqO60Oi!-y3J>QFvTCofvs3Rx3Xm6?=iRQ z*S}KvWkH=(5ZyOO(TyhIg!t3Q-0r6dxcL|jVbDd%ts`+m_5ajOy%}dmr{adGff73c z;y|B_gf6OX;l^R#R`qAfWqlOg`1;$Ic^(&{o;12Mc42n(1JiY{v`qz{ zKfa^rMjfhd(;=HF9G;t#(Ty5AuyIBxtDAa1NWty#Q0kDNz6|T8X1HyX+wE7jFg)o( zDsK2X0#KPoxUD^L(`oA+_7PHQZjP$k+)t~XpOM4XSRj?#%})N_w&TLoZ4aGdcS=X% zhL4sKF><=`!#`NJ&~-)`Q|kXu-4qMZZvR`7?%L&-CPm}i95uHKk@xwvgb?92+U=>j zaodP?vL;!7Lp09q_OSZ(GxBI^wWJ-^jmJG{T{Ce*!K)?fQcg%^ax2cQoD3(>IZq7B zt;wGU^dk|aQ*(1Dx9*Q+(BWUXm7*I_7*PTl+{U``Eqo!=Wm9ycMl`S9jv$BUR?u#5 zyS$rBP>{zBTNvbo6&beTO)|TEp#6HzUeAoS+vg8{o}Zc^uUq)p=K@Xa$=r~QX+e0v)mbh5c!w42q0=?O_ZA^nhB@3v@LMMyUy+@|bi z?(cxy&B&pRk=DuLhMg^!x8&&szWAMGP^M$15R`iz-%dJc<&aFsWiY70Ux@PFe=tdKK28mPE%oOL4 zguHIb>#y$CH^%4Yh)3u~0X`-vcRJe5zBBY@9SLMvXo7HfZpDosN0-i$&+SrnvlA>x zV(JgqEoiMKIYqh-(F=h$_x1gHlW3$csJhMMcL8F2TZlE+UnPmPhx`o$`GyqTSlg=w zV9;@(QmQEUK{p9-m~N!%rqFJG){mY|1GnTIc&pRN3ZHGJV?RSW ziv{~AxV`EYUEm09yVoelHa_a_)ZhZ6b2|*GHsyMf+Vmku2 zzs_we(q|&v9+q3tZbr@Rsu}@j>?SV-I22eJkiQ#4YZf4t!|hXZ$&tCakBbQ;F04G^ zI8@5xhF(~=@OhmP2n5#&zBdd0wcB5-+#egk`af`ci81l7+?r0GSjcYxp;s37r~7zy z$>(-MiRD9F9ja~t;kJ1j{$L9^_*l{wj1R-Q1&#KDb0#eqOFJc>n`3GHr6IhoG}ZNa z#{eM=IsBt;VU95nXdWJ(Tlwu3T~B|6Zp66Z_Iq=v2Y5dvw_@Bd(Srm5HMg5?A>9b% zfTI2SZc<*iu+uJZF+r|Dr59p2_6a`{<7l znV2v%EX~1OPB&6@nn< z|7?8s@&RQx!!HODFm5KRTUeZ@)4T=iA67VfE$5bD-RA0+vn}nq0xE9E-Lw^>6Czi9 z#OBHB7UCDZ+5dO4IPMi|?V&s@V1C_OAA(y{4pCC;CBKw&%P9P_^D3Se6(*NyBKEtW zq5Fewe5;?PSriN?w^DN}fsdx7>rXO=&THq-Zyv!v!*Roo1C4ZC_O6o2t-3RiI6?el zv>xMDAy_v>;UC)^ru4Gv)NueIHz~Pwb>q#}H%r_E9+|(*qqFjMO^#_lP0+V+b@1*i zt_fk~@0uLhUbeD{)sA+v29(Go!d%U%s{r_3ezLlSgA49NFVda5&Kf#~>BbK~0|yIU zG{?*NwOD8oN!Sl`dU@9r=aWFW)YcTbDoh{9aoFBCzp*Wb2^Z`sOLOW!9Lk z6H;ZX#fOFhc1>8(S)Lkxh^zDb`hiww4XXe>m%Eoc>%&_E_S#2wktR#_xvIho!a(OS zerr0Owm9S{lUoOt6sq;ohvn9OzGs?#bUSqYnA?@x&6e8=4giaT&PBRPt6*A$F+t4cqOa7~;!n0rA zew^r`@DHu041bIO!_0)Y_b-OCEAZlcdmsN9}VO(Fi_z61^zrcWd(xpmgjAl>Hz?6rVVANuQ`_oL3Xd8p-6TUeEUxs$l_ zxAkANQEvUI2CDTvS)d^vr0`Gm60C?O?UDxl_fC%QD;-)%m&vUbr-++RmG$hGUAk3M zg5yHi@o+1c0E%vu;h$fX4Nc+JZ*u&zb`9S?oCc4Zaz;^eD`+=EyZ!g1gdBA^a6TNre~j6JC!kmH{`o7fYVI*-yV zE4&3!oy|E-tUOuW_}8|l?dW>Gbl=@(25MB1~uFH;r+_zIojoH3sDB zP06L?R{uSdskxO|M=1D2@Tt1ZQ`&_=Y1u3!(;k2MsJg$-?ZfMDWfN6~9-ZI(@hVwA zEVp)Fpz*7@7WkZVM*P!#K|KFkV);p}vWq?6U);=6&6CNk-<9W2*gawWzCIx3c6+Q_ z5N2TY)T(>u@?Cy^{oqWh6TWF>bffC;!tit&CAZ4!rYQVV@XeX7<0yhb3je&ZS=Yv~ z$9ElPT$1$5$MO5!Hf|_?E~UNMUuQ4=jC7-qoNlzuW=cIxrs9~Bh7A8Soc_GiZpj#d z;p1=gPpa)AWC!;Kd(oQ-EY3P8Aj@wZL?=!mQZVvr`O+3Otdw<~z$p8WAUkump> z@~(-)_{S?Fs;!(bQ&S>yvEQY{x&}8`{~+hw)GQ zyb^YfCZy1~qba%(@ehtzKhMygFs@wy;^Df5W9zB8-QSl=2C(;BPbfnD3LB--`W|YH za_b^}MF{CGdR=DhlebQ#I#juV*TI?ZuSc9_z+0$G$8j%=DDV z8=es$AeK*D?GXO~9}qs=vcG=&DWujyWuKGaTtjv``{Y9>fvj#k$$4!rD_1iN#FPJd z`M;Cl*M3Q+JYA;4!Wa-wQQ+%fz;68hR2B@=jY#GV+s)ExEAX5ql`88^ ziiAS6n*q<8ypI56$jsN&dm8YZ)unk{2&%X*qzNxi9s2%6{|+`(?@>?FTv^L}s5>FH zt<;9Tg|gchSuVn_vwWghvXl^Pr!$d+h`7D`5Wd(VFC8H!$mVvhADVr}aV#LxYu@g% zGkVdm-1_DLUDcB;i%IA3_zugZ)+U4y?e-pkZ{8nr>)K~my_W+I<<=M1HOMN=*sNDD zMH@xo6n)^B>O=3<6FnS7;PrjeUflRyIzG{rbUwbheLCmVGp^2m>!up8Zi+_^^FJ6jq~uly&e9gHsm?Z@a~vHH z0+Vb(M?u=a0_za08<+7v;kArl8aV?5B9vQC9S+>5JTrtWK<&PWP7CN-OeX|eWA1}im*Hm5I z`(;BriFH$mf5fu66}~noEQ_Fs7cW5+_V&fSuQbx_(+8y7ATrTn;bg5rO&Izalzdb$qwHavXr zWy2e_-?EV>TXJrm)4Ka=A61t~TsHhg9vHTp-9Z%?gdFE_-fgs}D7Pw(GBQ{ZxbWvW zV#{%VoaQJ~c6*dtV|x^3cKZ>#ncQw~zhX4@g=5|LB#(Bnu*WAy7iUtKQgIUtbu9az z{?Ob~qzQ+jJH%!T%5D~YqKlUI=jN~dw|{ngZG6@x{&_VKzD2oAZa4cl4ui!(4xe}? zN@Y~t<~W18hv%C=Nd>^| zNxCIp-4vwN1U0t{!si{r?rg4EKzSR^%}xIB%A{P!8HlY;i9;f}51c99Y z8J?S?;)bW4PchW6iuMCTfH>}e7g1Oimcc311&0i@w;alSGnP%QNH&+nmL&Xj0{4fS5 zpUXt3$m>QJcklN=sZ{Ed%lAnm5eA_F4&#PvPU3GD)YsWId{iAepLF7qgrM{MbzOdv z>bPbLux?zb27iscSF^Rmvbw48sUiI@AKYA_l-CwJH0?Wm#BSza@9C|9Z{N<-yOw6r zskxQmx`nMH_^0S0)KYW1deNFdYOPLH_ste#Cq-1Y876d`lt>SrsdqWUd1l2BkoEQv zP`7oIFkcTofA~{OkYdGgeUDd940!aX+lDrh2yZFS6_k!yfOfOB%h9`@C#(@O%VcrG zMW2g~&MB4FMNf>r|7W6j1poYc%Db()S4AC~x9YoN-0&^V?YeAUd_R?|2FIwm-7~Vf zsUa1kFHEk!5mh%nWs`z18@VQ9Qkm~t?&O??hj;embqvB9Ns%-#Eepl8X)YT>#QZRP zf}q?=iwV27xY%{@^^2o)cDu+??2q_oCeE$6!r7#HTD!b%b63c-fPF4=A+kDL1v80x zM#FQvRNPQY$}k+7Y}vshgu`(|IsZcl%IwnHl;Nib(RCg)=|a#U3x#N8$mco~v{$3x z&w@aWpt>~D7!sTS2uT?uO#Us?N_2ij(^TC=L&zz(}6i; z!`=nR|NNPZbGrtEis-qptv^MVQv8qmu-tlLd@GMZ=jy{*s&4A5yl!*9ku=Xt7>#%g z$1?arxX&gZV_`8Y2M7CfP7xjQc)=jXxS?{w*R6xw9%0MEx?AFzbgFKX=$Ep``|^`N zI;HwyGLzzeeq(?{iY*p%NNR4EiW_c5{NwOk5q!9PbtRPqIU={-J~TOA6C^VHb3To! zpVmeIRW~lPn<4*`xu6?g=J>`&{3A_QfbZYOf>=Q3Dld=X1UKfO`N_zuKsn*E`^N6? zcj?$np=RZ;jr`C2)5<#I`Wy63Mf?8@6fge$=vtQYaNQ=3%P?KyppCX1<R^X|zQ z5VC=|Sgs1cjNqS>C2a=)WO2j5lSWf=czS77!2gfjI`mB$9@d4Ww&gK_S6mHbm{*pn z?r6SevjV0F1j}!g)ePNskHt@Bs#XOoovD0`ZK+EM&culRwhdJ2`3LXq!E_(45f0Fi zYI3_h@})82EM;(&djDIui3Ay(Tx?_OQB1#x7O)P)KQg=7vgg+56dA@plTLE=l-G4Q z0g3$2kGk=ED_4lY$J=G~0x4AQTD#&ZN3*?*5PB)-Xh78*E{%x>`qpokm%cLA(V42a zP&_k(fJf(4J3CCgx2PC@?rS*n@ubmim*rB@xdic#KvuW87rA-RQ$8Bp#Mb%2|9pJ& z;3UQW>dSKasJu$3*4bROLm_NqH8GC{V=jJp> zmYHuk&jv^L)C?hwbwQlY;V_|?R{il>X5D625bfpnBH=PhpcytZBPU;PYFYNTrAN`rwBNfT@uQIC@ zgtUGFFj*=a+qwvHxNc$cM5f8)+-wk1b91PFoil7?(G|Rfpd-gWC1a?%@waR~h<2v@ ze^gy}JeBSLzs|Idy^fK+lWsy9a*T`|D-F@O%_tR3Njb*~$t)d`B+*hLfx|xEMPfG+eBtLFM_i!OH{cH6p~@Sq z9C5axnI#yw;5d+MI6hbITn><@Ah)OLRDhb+29_t5lZ5P!AD>J{7t6L^pShVIJs_Na z={ALta?5WXm@%u16D*XQnFoy>K6YA}S@M1S<3q7ln14B@${7CMKtmNCd&zB<6(sNq z9PvH^agt>UQ;a%DYhh z+6{sKd3w%gC4%58uubPqy~lw#Bz44Y1W=o$Z-_d?YDl03Vmg`d(|nDGO=&z^AqQY`PaPCHn`&?q%_y}LtOa>i188xWI9vmMm*cdE>7)MMS}m? zM0USF;H@|fPlx%JQq=v}U(npPMS{Vb+xKpvvKW2!BB9-|*hY$o+su4nSp|A8q22KD znlxP#^bq1U5lz==4@lcYapS5ovvt#D{*4%x)P|yrKW9Q3mm*pKc0kB299_%{hk#ZXSpg zG;X*tU5Y&+HzOECtIZ#nTpC+A|Js*$Y~lDK{3l^I%;JmU#(i-WV*VA@#cD0loQT_O zWz0)MKgF#EZm7@QEPfLS(STh^r+{n14;+AHGKin8_||M6!`MrwMU3LuMSjrN|{+j%E%t)*O3|l(hwl zlRZr5{6ZE=vEIFIzGSuE~|og6k00?IJ6s3?cvH8Ele=SfaS`<=5jkQ7DLr zTlhQwn)_@MMeZzU;!SFL;efR1vytC_?&u?jAgeuL2=ydO ztIc$UZNM=JifO)eJv(Q(voY}pzrNPO2iN`|H`QaX&c0% z68L8(N6}|9;+HPlwpns+iaC;-z(2-U`LIY^ZNvco5Os4Og>w7xF_lPU;0J%Uv}1G% zB#`Jz$kC%CkxpzmUBdw3)Sf1o9-vhfWg9+g-%RZ1y(wc|L!3g3Ww%&*eQsB_eQ$js z7+opFn7}_uk%i)LF)Yxq^8Up?5VuKgBlsVRmR(p|9vOAl0sc9HwUCLrl`E--1}LJs z-4k2ZEmxb1QgmPm{8w(@-@KRtmg!z}!Bpj9=K^o{& zSj7BmLT*1JpCk>DR(60=TBSi8tV!xaEFXqzQgjFELl8IajCqitpikeCmw<+v(iIi< zjdx%8cCSzjX}OyqtndSwIWq-Caz1P4X!eai3ZkY>@ISkzd&0%~GT=wE|AT*WNG^rZ zEWkheET=Lbgn2N8aZ{6?I!Qx}W7VR|>fl!SU)&}W?2m}G?muzk+wa*C6gZs~xwMf) zMI1Fu3w03u&qp!rO)mT!nl#3Mzn4!!L4vbgzt4%Ywgdk|>G~pDDrP7^U&mI{Dkt?AVHPp?yC`8_*F}>2<5v#yxr;Phx3*y&T-8*O> z`o@t|HlhEy)nMAGaGhZn*fkdW1WScldM#R-_z>FN_<1rp_gm0sn(q z#`-Nk9h8L>GAXU4j+zeICM{$2VwJW1LjGs=R^o@P{{}xyXg5q&+j4rjeH=djZ`@SS zSHM3%@>v_~O5Vr=et*R$ec(+qrW18@lO68rxvmHelyph_;r<(wTHju* z$azIZg#ITQs!e12hI*&bT19rlh&$?VQl1fV68Q`5hD{GgpJ-?^dz!Q#jV`_<&|(fi z-Q3ET&$q7E64?!Ba%4RR;rkKBjr-{=2_f+b{m*Hk-2R7l79THzV?5XmKmYS8(SaqQ z@Kc4$IwFV0Glmq51b2O7&?Ud{ata{H(+1DS9;##TptR^UzOe+FTKxOvdM zmdqgHHWijZ`{eutywK`$*k|npu{h{|R(&^nd~!iOdLxXRTIOb6RG&iOH2lYIc>Ih! zHCbHW3euejyCFYmR10b%5T_+m72-BQ|8v6{4vr%I56PSla{D(O7N5H7zu(gm;-C7p z-|wm9%>6SyyqY#>E0Wv4(2(NvIC~Ss7Vtl}_D-#%!Ep$FkpH^W&)S=V@Uw*e2jUi< z*CP{R{`GCp-#W@FjKmSy4MlSM8~XtN$Vp* z(wrUlJ&-&6TREDD+uX5?1aVVfH>^zU2)$dPD<{N1gxuaoX&7{KxX^AmJHR`A#|6l*3V4j=gE-vl3 zk&O`Oe=?LNJ3;mmM>PKu#!cxY^I?lN-CTxZRdr%DLT%U9y)wiI z{3D!y0k+^^|FlSA5Vv_bbE0l;8A%woa6A7Dr@|u}Ue3ntBDwtrU8Qc4jxcWF_t>-P z=`wVU^oqZBL;aTsknex+kL8LOa1g>N80?19gDWP-{)t0z(z9$o$ov{VkKsVF7H0t#$bt0lJ+aSOozi0W3h5_Us^{|Oq1v@*U#@c@hZ`a{DVMo6>fxvsdx?zHrKX z0CIbJ%;#6{wgVxRHfeOz&e%T*>HQ<0U)lTP3Bdm>CG;vb0HysNmi zujOs0vCqLdhGaz%{@M8t#Er*8-P~0%PWjD}8B@^n8$$ihFE#X>J3vt|&_p_-xG5P@ zn>z`2giYN7^gprs!2dWTJv8Bf+@4BjaTVrDdXg+wobuK6AI#EsTPr8%FVZOvFd`E%dC^Z6O+LX-M8n2%r%_HI#c3dK(*S89%hTdiWaw(Xx!JE+k5Ozbi zgT8hBlzerxd_r!Ik{+V6ZA9E=A|bbriy=|V+%pttl6y$hQs*44?P!yUZ#jNLtBI3J zMpr1%RggwP5I5y>dd}F9#6GY>9r_GI(SPS(KVbgVM%tO8j065@%sq5D{(Db1d76Ct zekUADirVv55MY&ai>3ATA)W2vQMBg>yU@JAK&bzbfV2Ak>9KTTV<1Px4eZI$aMzX`&SOk;+Z4iN_ zsX!jWv+f-v;-=;`yzxQqZr{pk$XP_(f>XHur-P3Y&+<7Q zd*1TYv0vjE)%#3|pQz4Va=UPihDZL(xCeXKMK3Pi%npA5hn1@7yZPeFNfY{?IhW9l z5<}0~h$qxP^Nh8-EUDMC>BxzwTYMwBsiGV^hEBJh1cYN_@|e2H0Qo^lO#sye`H_zS5Ak4zhFe1!Q^CE zMUg2+uN+do1Ggj8KbOvly0zymIG1i&uU*jUp}@rc*3A{z7g~FeNqn#yhTpCDrQn*a zKFb8^=0x}>&?E%5i3$%A{u8)kE~KBbf&VdsQLGZ`5!THGUyoO2v7jl8s9U+MLL!;K zymRk+T^bVB&0Rc~JY@&eGuMyEzSP5rxJ{w{CzHf(<@SRfkH9}Azoe^NAGJQ}tda%& zPnDp09H^?*!ugl9>m3cBgBv%@Ec~Ddzxl8JxnDKkyl9#WnV5e;+`_VJGD=eBqWRZQ z?{$hv?zMDrsN1EK$-{A!?#L49e~_$`66$#6@R$E7b0ope9{GmQ|H$jhrIUN~T6fLl z{pEl3!;bCy{K_E~7sgG!o@awY+&Ji;y+B-JE>}Q>Ur*Q#C0qUL!b+^e^QnWN{~3Dc zH%V*|@DCBUdHg$zhJ^a(AJN@Xp84%faPkwzEgbO-P6vipq<`+=Z}$f%fj7}mrfh&X zAXq~Gv!Vm*ZDlCvGGR7L;Gc500t;yt;wp>kcAutz-Ec89((5_FMJynG{q&(%koGyj z|Lk%&t|#+qkC{mS^IKz13_pgzKXIR+#n`SxpUzRnAJ>Z=b}cZn#Ru>0Yq?pVF4RBE z(tFC?aB&rDVceA1b^|~lqPT^UuJW7tGb}~=pSE0x7?p>uM_9MJ@z?`LISraSbGQsa z4xGy>cZ9=|0s0?u8Mp@I_1&_O?jhm)OK3ObAhH<({qvamG`k$u_Ia-tc>VFz#zeVPU-IPE={m=8C+({cY!~9K?up1s*ymK55g&I~orY@kT zG(C10t`pLdM%QqhH(MMA{bVTyGlhr3TIrBQiTQ&5N5ucc`SOu65x3BVC1om;%>o=<&H~5^WudWIjnDnHin?UyLG_cd*{c3s3 zgXc77l4oUxG*hJi5%NDC5gGT%{E51`PG>~}RrCk+&u=;oDHK{e!tykMf6ly3k`?Hx zw;lBxaL!a>N9Q*BI)he8Oq1DhDA_SrZF;(9tz(OXoc=V9oCk>|s@wg(3%>;qiXp^5 zA;dS&(Nv>n>dgZpD0j(*6N$fmjP;XP-`>d2+iYFb3IE!Bfw=oHx$;h_sqZOJe7??9 z#fbTrjGHvGujLLe>KmQG^tqWmsk7y+H1i$42%!#L4=K%?msa-eG4-!wmv(U+3H{HH zH$l4BAQ^2Zup5SC8*P9Pyb1iX{$Uf4H2Fpz zO0|brN!h?|@Go&lD48s2kt@iJ;kKkmt3<-fCDQ+Bt--=Ml1OMbTzH(=!}064xHq$n zeO57%&gPSiiFoVleiA>;dloVNp-?Y9=(%)vgraO%faZ|Il zoJhR#^$b~`up7es%ea(ARAvJHfx5Zx@9Ya<1s3XmE^lDgxzIU8+@{ZID7j|95iotK zHj>cSRcgcZC`d5=hcu7pNTmZD6yhJ4e@W`QbM^QxR0=WwBIJ8rrI^^V3ir2Ude%PE@8fe{KapfFJh8ax z68Gy{p3$5eFC)#UY0-MNIN#vQH#$2AbRj6Rw~CS-w#8;^J~a*W&pU2BN``qUvw5fN zv)|{KunGT-+q`M_qeM9WBJj`MTnZU=R`~({GqW|9u4V%IAEIvMT5**S&$F5faSLW0Rc`C9r3mrZzNslx zqHeB#^hWdY%i038%e?!3*j?x+R7`qH4reK2a>wKv73l367Je;b&)Puz2mDVOn`)F@ zGCe_(yX06#XKmPx{3x>X27;5B{l|Sszf;2lPXhd7ee_DA`+M#ZfCr6QNpHkq%|dh?B(7y1$K4I!T=TqR7@aer z|FPnT&j&0_rrqL@sDOX$<2g})g^#8=u@L7;jiCwfKbt>N|9l(fPrvJuENe1ot)@re z#G={Pmg>gPvc6jQmB*33(sPx`3&*1bPb9Ml|5vhL9i?(MZ+iah?U0hAz#9pI18B!J zSux8J{+m}&Hz3zmHF8aFb{U|Cgb6MSx$Qq@@G;JO6^Xh3>rOY{gk2 z2p7dIgx{r>HMxt6*;otl4~glPu4x&P0t!rz3IEqoaXp@|E>X9#sLH+9%n(368R8a# z7G3WaH)5lh;hYs$2uNQxIW!&42;WYjX2W?(7C1lUJULa8iA7zIlLJf5}WnE6R5_FS8zk>b;;x?C( z5Nnr#_K!$%D8!9dxS2j`uKIk?4uiTC5&sj|G{n?xrA+w0ewaUPSaowb3y9{4xTz6~ z=dyF1-0jMo#W?&VifWB~n5&#lW`-u}5&FL%ZWHuB5sJ9&20B1;yLp0WlF6E<)~SYq zZH;p2eKZCSTP*GiKhb9&$-i~%{g{s?PgJ-2hfpcRq`$>l6?qeJo5J~*=i)r77-y*O z2Gp&L&;BK8%0Q9AxT)c3I%=dIB5qUU|KgCRdoL%RjXRnSmG(o{k&RHX zaXm|Ys()DwiP9>oO!j9tjc$1X0!~rf!t9@*@J9UFs<~Dkc-Md8#$RbkaSWyy#KD+$ z0{^7Tv66K|W}`T9g8xz2(w;$ru)^IFapPpBcC`CZKZEpaJ z1^=i2*>;VObcndA+GrBR#%w{*@9~V=Blnb;uoB0ONz(5qkr#AOt^8@3%2V;Dd-BMs zf?bV?8-JZ4Cr`vp-BAD{OVh(UY#!A}7&kR*hcIq4vlbn#DNE(aqnX=8{x1~LBbQ7O z&A--8>c6XLFl&8FBhnG#AK-s9`D8hp`TV27O(5@WCo%2F$s}ne%e?||{-^($m11m# zcw#7m#`*r?N%B@%!R14XM#sC)$r?Ej>Ue}2$qc3$wW91-a_em+lcju4uYW2>dFt8m zWq+PXwolZpC`h4v`}`jFLh<&$^DiQ9b4f%-hn|O+WoQV5D-d zh5XM^f%Il;$2w|;f}z6y{9j4Rc}~gF3@bP`N^xMj*kp`&XafJxGy`g0%Cb`cU(%#_ z=Cjf49noYd=B>S^=dO+9ymGiJ;3td&;gMsGUvTF>czla{|P{v_wGBx-}E2-&x)N_%9AoAJ@O}ct|w3UzY@n&cv|*@2@~_Le-c^wD^hkc z;qdggZbgr6;_E7>Hxql)g>b#Oas~+hm;ZTg(hIOhvyS5-VYEvUS$J_E1}9>2f}kfu zKnPOpusKc9mOw&l+AI(c_|@zt!^D;YL~8y=k~Ee4>6EnT z4)Totuv4s$1yeFHngLAYgqYnGR&ZtkXJeEP3k7X}rTIzZf%x;0J8 zb3SrEElJm@ zF#iJn=P8SkEg!NTsD|)= ziQ=YC(+sVA!-6Skv7y+K1>pbM`o%B~G_Rc_>z?al2OU;s~ZyU&Tbxh&I6 z9I=S`my|mktyy$=rmhy~f9fltIeNppOCF@Tbf%b<_f;4#quJeJmN zEzz}aS%6qV{qye6ED{knMa;h}mN})7RXd3}XdD-E|}PN=-3&{RntI?Q>zvTq@yjB+bmlUk>$#LZc#P z+SH{kcJJloMR?fViY0{p8KI5tNyp&V6Z)T--|{afdVJ&tKYeI=HPNq^dkFMDk4{Vm zWlks5?bZZe`~r1z(Yv&H+11LO{Vp^VOX3^~A_P)I+?13~Yl%t-bawc54T1k%%9-_m zDu({2f8H~lk2=&qd4gPWL~)x(9WYa3#H?Ypr!1Wt&OTs^Zv_8)&`5dgpP${wM?MSS zdT#DL9YAWG!84;#&_54{x`Fc{Bthw$f4k7$^`!dy+~54 za1_H5D==p7F(8GGSU}xe(A!b5bfvu=(+0 z2BY6=M}ki~7yMrvqbvl=`87n{E)&(wO8-?uW)SnQ_iIzvX4P5s`-Q4)>t%~R|1-q{YEU+E+-R?eIsAB7MBv-sd0b)nf04#;&`OCc$Eo$a1rJ#{c-A`AO1hDDv^iC{Pp~ zn3c@YaUu)jrU?Ddu@187eDPqbRtA%pf1Ua5u6Z?40LRU*${!>_VzGzTOv~J3fp^u} z^owO5G^19=kv5F1J(0Lg0RIi4|4F>;QWTOZxevpc(*>KMZ8A)Jiq1*20sQZGinPys zdHzm;dH=Bf<@oE-{w5I}0p}v_FxVgb+RGkAoIUAZ4%;$L`l=BZ)7#7qCB;D0Cd&tiVFP00{9o?){Q{O@^=BxxBGH&M4Tcc!~v3x%^6g_#`MK+kF~0{u@( zx7mt0L3sR+b~2yJJLG)XMK{Kf!8_vF`*UkSgd`jmjt8zeGF5vHycB?cY)>vWquwgs zz5WP{YM1k2{-q;0wNwii#cjTDIXv@nh>9fJII@Fx`6+d`T7CHGnUKUPkrjre&Ri3C zHcPGH&guBZuD)aPBK@;{JdN|nVh0O8Fcw2IBzvsm0b5ZYD>axqZx8UoHYE+|!>DKH zK}A$dc>@X{jwG;7jUxSXZDJc}?LaJgYrjJd5w~etDo#QUI})IB_TRY8&tfLY9=T%h zR>fdx;Cz*yG$(~QeCW`kJ{4}0xREE-Jcyt0$2}EsO%`9`I7)yM*JhzOdzkDx*uJQ* z^^xL;csnuw`gF;u2e`%g;+lrY@Rkzhb%4wXB)L(v1h^<}e4$^*r8yK@Wv7vEo}Oi< z(lL1^5w{txB%w4da?^XO&>vSb5cCTRfU;v9l<x-muWmxRJRNI=l#=w ztL1NmxXl%K$l)|a8j5~MnwWnb963a8kN@%hd;r%;XC6S;59el3h@(0YxA}El&((OL z|4Z%=oNeK-;KY|yhD&uJ9gfxKWLxami1dlLO+B@Y5Nf3y6U*m0hW$Ea&S?p+=Z?R(dT)>1LbWJv6ve96OI%>8l@hCtvq=g}GvtrGCqwH+chY zyR>G!X}}P^Hb)bMw7~xm;ve!52lPyub~^6RVgSUsC~mxNK%R}GHp2gPx6~o=B`b`= zfw;}Xr+QteX-YDT20KawQMa=0S=v`RKCwA&m;s+mr)9!i#0z}OG{#I~G za6&AC@PD1#Rzyw(N@jsLuup>oRD0Phrt|804t+Mi=iX_DL|xxJDDyPB)oFy`3K{A) z$K;qhg>mDfpi+FuRFx{csXj(w0~ z-f0}>P+*vmXOO{@RnTeS6%4dwsG!*!QHL{IN>sO^%t})?4yj;4*hiMUb8f&8;t+A; zy{((Jamat;Hm9x-(?C>}*}NE>V!#ARP78x6s#`hVu7&NJO28gA%)cBK!@A8^vRGE` z@|Fzqug9a~*H4KF@lVv2480?8&5Oi0k_y@+xn_8-u~^M)*Bts1zRSKjm+v3->L^>I z04rC8H$E}{`r1>Ad}Y(@x4CA(B&vm>-mo}TZ!Wq1phKb}5x1%7?pr2-;p$VIjgF}q z+rq#n^P!0X)2l7!&zs@krW`GYAK!Y04!e<&iLOCWCy4|3KcTlan)arUk#^bu>fj`0 z8EPAZnV|pae~xt?(M@NH^goNY@}ek}!u+4Kh4nXNc=an6hl%hHeqlj|6UtN~FTTd< zzP)8C_^sZnk)0dC|Bg4Ziu!15p2_@coM9`Pq$=PaO4sv#w`6nNX_w4_ql(ia+n3wF z-(#}C0ILZl=3i&mbO)d}^k$fUd37YM3ppFLM;~h|NNyq%_y^U%p>VJO5*V!3JZLzk zW))V-j}tsl$|zu3+ABb=e6LHAiiZi}IUG#p8Au_5|AF6weA~I8-pi#UrS+`3FtL0l zG64REh#Lq0SIbeHjB+7vQ}ZUJT@*L2R+Upadp*CMT3{%}ESa(C)5Eu~VIbjqfukGP z@Ww8#uch0H+WOZ2s9)0pU7Q9!0O~0Ysnd|yv|6%hzCU&`mGOtCXo|;#R|s8q(Mn9= z{Oj41bt}vfHCcCQ7smA1&s+2EVN|!3PpJX-5ChgiLjObWRLe46d1%@ir?YT{-goAN z&nuKr1GKLL8M#48u#(2~{Olkj8OtdM{O`{NKVy-0Jp9vgk8D_T*xJ3pZUySF`1~W( zKlfgTx)q`SeJdyU)fM|T>#YFq-Vjl=s1S9#L%}W#OS%$6p3)*&FEiwV&UYVz83B=@V{?D+Z*=ic!T?7U>!vA$&PZnux z5BRFAe=a&Bm}Bt+%Z~tH#fs+kP%t$Riqy7lxFTFD`_8C zV%MuHAtr8Yf|!;da#P#@;od_1Pl#?yWX)liO|3RH=_dWZxbe8AZ{UB&n6@|(x5<>& z0?D|wU^C3WK>w_94!|(r1cBXWiZE{c;AK!t5Incv1&De{1#Dj*1*(p+rhVG8`38Uv zx{V_4m2Jvwp|jV&#^2q)_+9h&hNUU|1*gYlM?nJ92>PEFv(PYD$t@GLw#R?Xn`sZv zP|-T#5ZF8tU+qxAM(t;;rVsf~TS?t#vwF}xc$)YA+^Ea}Q$wy|HfvjAPoyuWiXb;h)arRQdDY5hL++tpZnp6s>MnKq})@W6|73e@c?3;kcCwuim$m97b) zS1YPeP$nfkmzK32%a~)cfquDYpoIbfAwWWDe@l ziEyV(=zkox%X_&({tv%d&QKp|g8m2URtWslhFynwhWYXa9&LjEd0XqHfeHQZF#p1; z-+k<#es{^`M4L59O@#le1kPy$`>HfKn4iKW8#0)cUN9H#Aa&+U6|~RX1i#ha03-Cz z&NsCnHtP-4Prrv;v2)L_jH!+Q54%aM_u78dsmv?e;s3_aaZZMlbt{2x?=@+>^*>INsmFn~F_cus_BYDqk|XqQ?1qh=|E>Ra<>C(BR`TKXhMtA{ z*1oWc_@Hts^4wZo!G%rlRZo?Ed-MG^2%+ygHpWQL3#6%9VBQClNIDD6RIo*-L+%_` zW>>AeXKNWW2Lc=i(d0O{r)!4fL$MlE zk64N!O}kno(*K;wBGaKBddolnIMX=fS$iO)^M7@7XWxlI=8BGji;Fb6k~nJ1J>sqT z*Z=-?@b3O|$06PcgwR7{w^yD}-tq~*0(HAu@P+y|a!CEDp8H&GA0;r-oUvc0#BE~HdY9wR!Yb{HbLh&qBK=`CD&F4La;&Jz>$VFw@A%E4XONf7H9DOl#akG@t(b^XHXf+Ezxced2g#J0MmM(>K zD0od%69YD#u*a8XQ03(evn#}}g*oNH`dHyT$w|Br)dq*QsknDwZuF;7gSf0mg40FdHx zN6K}27T_Oyn+=bn{OkAMwzCOR0bFTQVrQ98jkwO$_6Y3xscR$w%yzy300+Lf<^Sqd z+{~k_ATbPKxr{tiRxR>>%^GQCpg0m7a{GxeGN{|Vq$4gb?;WODTPxJ2>3L>rI?^;| zZRA7UoY4R7aIS94Y3nPUzvjR1JeTw8YGQvNd=XO*R=Yv|57f~Y5-99Szsm4C zg-f{Wqq>c;j>8nx)4NlOeSlB+-$#G=pA*H6KdcVIADqyJYi9b^(tD4SE3b?xqc8vb zL#hP)BlN%L;^p2Q%P%LCpJ(ti#2 zQ^Ef}2+R+kP#|n1{O|wx9FTSe3BvP6^|GcrB`1A)UK5GIhnAgbcy!`X@}d`0s&DqgAJu zPsV1)aY*%$HI(oh=m7tx)=b;o4{J!xm2fo3<8y`|M*C0mpI-sM*#x8nO~gwPB2bfw zIKVdt{qu~ki>keQFDxYHUwDHLnf6Y}Vrf~$+*zq7B$?mdZbkULQZZ5fp~JyDMv(`W zJ4#(A@DEumDX)Q147jpzZs6nS&bo*;fY}De@~YhPbI<&gKYXJ^3%O?hiG`WBWJo-U z6xPjI1zRw!Tu4Y}-l~7|4`nXLB-1$+)7Q3l!AC{JjX$=#mf|6e6d2aIepHx$p_JL+ zGAHI=PwLjZKdXEtp|9kA^^en9R{nb;he!9xzPg%l)`Sb1Ffk@3P?@%tHaHyWU1X)} zJSf4anoKimhbzno}shHlGskfCamL@MUB==7OMQPuSWK*I3`K{EZB>MYD^?vwZ zXO*_()4GVe#cNg#GVp0Ww+rWz`%M)Ov)ilB5vHH zP{|{>O$JR4wLUbFfB3jzi6UK20a?2@l2YhG{ZFGDj3XG1S#p+rEp1{lSk^Yoc~p^_khU)}p?bl>Nf_6a|~jg8)bOneMK%R_t$gSlP6p3yn-8R&^pJ6*x* zG;@C=?>xRsz7xk&7u&DlA!(D&R|X1XO$dd)3C@XutY*?01(<(DJt7nOlklmu(BTANmeMyeIMxHK&<%)waOA zU#pOhIDhL_W)S$tB^r|QC=?Fpf2?#YN&FNY@92lPlb*Z0zN3M-C0WM|W-!MCt1q;v z@T!+yB=Qfm5m+#>KR>t6Kl}Nhu~%EMVgCS3!JCqsS=skCIYBmD>vk_4@cOpov;HYAQH-2H;$S0He#*yuXHbH0M zc0^?G6P|v%>0cjFl&gDtPdkk>_UG3KG8v?B=o^4_7vUe(`H@NR9nsy>5T7UVe{HTH zu?86GI@t5Z7$8|d|6GY`SvwWg*zV~ay#_F}n45`VhxDYF<#RL6Uf2hA&cFKSBS30H z#^gSm6H|h!1B+HYl5Fp)g?+n+L~p;k{oo$>q8LjmeNy?)uWvgi$u+%QXtTQY!$RW- z0{<5TSP~;Tm!C~pH>P>u=1{lWz5c{@!R*DmR04||qG}g;kqQ4hmK<}r^|hqxDMvhd zalYZlN2*%N=+APJs|f#y6^K8c46q*~E873A^gdALxb4ibf~xS_B~w-d2Vv#2KX=U+ zrMLc8P}X}jQ8t)pDu_?#{DrJ%RUxc6aiqHeb{OK+1p?cF02>}*q3 z;Mt6BD`3#!6Fb&f=YAUk4gYXz^a#Qz*` zI!sNQ@PD0O4;4~VS0tVP2HWW>!4K~J^98#re2U=7Z%$?1H(7L?r%qihM>56BTz6m4 zbEIi?VD^-Hs9QOeYQ%kKJtfdOYGr8ZJZc)j|2*r1YgRqwPkadVThF7D=}{$E(=eym1OWd4lvie8!#=H)>%{%{`=vF0?7KpZxoI>vSpA| z`~U}WplAJeb#osm`ydqd*I7t zm(0%STOSS_hKZOQv544QVw81##9)%Vhc*kmbV5^}FF% zQGIjz{<%(R(Tc`ekpEnGNhgCRgJ!lzk;;VruU+Gpa5n}z8mdF$L;mxVn$YGnM=}Tc zX{8{xwwEps*h4Cj)eM3~w=C>Fj$Gs?S;2neNYDEI{;_uV12{fZFZwb5TA<-3k@f2< z-gQZsKqbmQyn5vi;Gg$mo$s{gaQfekb&AomihXVugXE3S|D+afI==q;tvx2bKsl0U@fem@xlK!>q8R=N_kqs zx?N)amE;EXF`~LTpX`&(;;6mI6Y*IC;=3g@pl)SZ{6=vN@9bQR?BptMY%7@ zx8?Ql;Tst4n%zR~F}PI_z^|cBlPGN9)fsV*3G<(EJ^N7lyKYzlnI8EK@bLp)a10v! zaI8M!Zsp9NvnPv;ZQuk6{;$T(@+ zBpnT>UW-edUe~K;_xrjL81OVNgqBvAJhO`N-e_TI&@SpDb=)MBX~Qa4oPxrh50|_rIzoh zYZNsweYG`D4NT`(g_jlDBoDR&xV~Kyo~b0k_SS;@pKsm0Zc@m2um!~^MG!5p=T{GV?t%^@tp;*Rt4tMNrGV;66j&esF} zhsgh#{t@=Pgp42UG-mHLscN_$(3w4OnnC_Lp?^M|ZAi+NghBKTGCkoZ#AbNyGW1$x zh(Z3(7Qym|^@|UbZ6a^bb{KcU4Yod|)-0)aFDV39htVM?!v9Vka%PK(hvfgoKQ@(- zWXONk^fW+r6Lq`g2ILAV5|bhPU!9etNp2h8KpRNfZdqNPxMe*Z^v~5j@Xg-aW4A-_ z=}bP{FD8b+{17}%3b>50o7cL(8cSJ1=8&rEHcc|2f(Y z7}XV}QAks*LR(K^F!n^H(|JDCgG~t%IQ&jX+C(T@d0m<{B8(-v6V7sQcVPQ6( z@W02!_}*K7Ii4<=1^GYwcCAt|^E>Ps%@4S`mdt_tAL4s1+sXWQ{`FOJ&j(*vqIMe2 zQbLV6ME>(1@IScsCDRy)aSes^!=|aIMfkt;T%;XYn0T*?tF)TIs;)S)>wr7f2Z2v3 zkC)Eb(Z^n<%VOP9w!ov8A0JtFuyHQVZ|M$&R{-=sS~jaB!2ji`s7zK=W*0*KKg_>w zQC?o%J_5XgnCpiCsGBRb-%n$SsYusxNdDr54N*0)w%Z(+1O5l*Uz$Jtjx4NORjo&i zQ(AXV3SGkSP`8cZnJ4y=T)#%#*$e-%vgcE4Uz~l=^?>@OL%#QBT5f}bWAxKAQR`N< zx<9b;0sT)anc#oW`t}*aO+He>{6no}zc)~9iW6X%%E>Y`CFQ){dUsUhnn+NP5yaLx@Z4WfTg!w-z?fRY-tm^#lx7E*r|LgJC88ao*`PqC93>%Up z*j+N(tpgMW=zr9j@{Ob)!#B_5-X!KyF#kek z=^lEH?KxB=s#}Tebx*T~b2MMZPz$jL{qv@`dOSYDn&_Fby3ubqmdWV_`Yg_KUr0ug zW*BVj5Z7x;lQDL42g7MgI#*5`fqXkjnwc!tw6t#I^Fa`1@MALcQg=yC_7!L3*5wFZ z*~Nn1%=w3yG?S=XnSImb+01>X`7K#n*+oX!^sT7ld_k2rHZ;EbLrMFwHEag{t@20W9 z{}psYZd!S&wk8tgKYxCqUpRmP%SAVEwnDzby183(`FQ!^G={#Dbqc27(n{5ES=(Ml z3L|sG9Bru*`PW)08dgcv^YDqK>!>0VyD}Y#1GYl^Bko#qC4Lv*qyi?VDq47>ONP~_ zS6Abc1G`77?mgDSh`L>g^~x7EnfRrVn-$uP!%KzF8T3C0^uqq1SJ~{{ztv;6k3v<)fp;8{-LW^@VyMG zN51vn{GVrYaziB~5r2mD&0q-FCj4Ixz9|w}pl)a0?=3?a;L1iMziae#HFzch+_wi9 zm^b%5eNovM3A!U5<_Vw zM&utdSHHLs{pNAC^J0m153d^!fq|h(5&4H}o9d3foD+1HFrog<|9PkQ&^qcwh)42k z@pgj$N#eG=gZE`gePGbsjyB2yB z!d;-s8ZC{hIqH$W+K0ha@FlG6iMZ>!XuUHwXIX zQ@S4l9w7c2rq_CFsGA#E*TYwP6L&2|kRhWxmzTov(@&?#xv-w=kOzQ`6V~mPi^KAQ zmo@6>O9B7$=tPwhF3J44yn_8}M7DGaab&u^&j<8B-`PU_PgB7A$KlTBWPncW2K`S! zo){-foy0@^^6iG(enro?2nSEz9;n-me*55cg>Pp$v7x|f0{Z6}>%0=h}X)5Gxa20R;`V7f;{tslD5|Kod00n4GhR3`OYw z0=_@X*e4oNs$AL%p3hW@#5i24oH`=rUz)7ldMXGLL(XlI*n=wngQelOIA-|S#k#)X zuN)H2R)YU=dC8On!KZF=Kh*7xWYtv%?OQWxeA64y|9sZgfx0<>{Tt-ilM{aR?7qDd zj^85xdtOL6oEqltSCK4i2q^60SZ-A?oab3rb0&Jpt_4S&@W{svkpFYHI3Q*S4Jn!+ z_EjUUWxUec`%Z-wUK^;D^UZznqeR^tdM#1C^`ViFF4somVRo4l^1%E6{Gwi z(PRf`P|&*Nm$X{`V7Z~$R3xn1W!45$YzngkXov|>M(ar1fX= zW^WAvvgr|GBHcQoZmvG6;Aj(Ktwk~xM1CUZfAG4K9N{#6`AJsq`jaoOCib1|8r`yN z>EvyHeqV=P9%&9#+t=ylzF23pO_xe zOF@%_Z=^<(dp_M#ztyQQEO<2iqv{D4d|ojz|C+ZqV8$07@5G z6R#5Cf0rCwXdRG&?6ylPqg@dlg@$DPWB!NPx2?YW;(D#JmdbV}z%Q-InV~(W{cSK3 za71-;K_e(%R~r4?gi$4Eh{*q8$ED;dIk~^_4M~wG&~weC-sh4y5y&w6IfDEj;D4O= zcae#@IWs*lSin#TTUO2_oxw_ZJth zTY3U83FJSQit&c~gWlFJUZd%-Z6BYgo4fMoS3d5wOGWA-+>Qz_ia6koB$~8}xuMiE zr|8G??8&11XQ-P?etlaN(jRviEIh6t^nXzc#2F@@1z{dRG*?3ZY;3=aF2x7^kA4C- zO`_NY%PZdc`&-m)2}KXPKXU!LuoMawo^aw7^z&t5z&O!=5$a~%U5ofn8S`=ji2jSE z7h(;WZ84DgW@NiDpu>;iEU6AiwY7`{`G?Sd5h2n)HvzJwWT~1^U>f5^%uA1)U)ls) zZX+Bs$C`J7`Ez?8n+y4e(?3PO??_DoPx+!S zt;K}?i$pQmmwr`|ogo?PmF5AVbU>2J)m|~@k5ffhaC3z8{xgBud_>ic0sj|8);@`v zp=9crtHsTLx;dt~!;VJQ02#Sy;d7GM3wJ5;pl&Y7!@yJB#I3F(l`O#`@85yC1$khYnLXBLDlM=qw{kR7T@F-ULO& z%WJzQFScMzvUTU|2luLSL8cqSO6^FXHoGba}(Oo;1G%vb6UzhN|-&oQ1;CSFg^yxP( ztH&mp&-pmNF<03DldVn2`2POsRFz2p1nT7l>_ZE$)-0J!BlsWO%&UOD3X<9m3Flvx zRV(ex0RQ-}Ox5NU2CstSw-h^XPVkK2Ybnesk1`{KZCWK}X#^OwhN#;G{WC4s`6dai zB$Y(!k%jZGYman7sgk-%Xn+<&rJ}2h^c151qF2^d2Kip%4fiSII&Rq?!X?36i!BDD z5_{!T-?EM8sq0?B4f-(FKa%tPYV0n4OQHR?A4g)pe|kH5ZWQj_qt>R;=_Ph^;`##5 zTrfDLZLzd}d^csQ1082*gGHujp=B&s8YkqAi_m_dui%^Xz;$-hFs`d!;8#0e@L=i$MSLv3nQ$OdZYV z)zw#{pSnj!e4aOM8yz=&weOzRl^u<1dw;`XB&^#NYZeEDR<2+pNK%GZ$S;)-{a?$s zm&3a-R+sis^_UCbpI5SJ3#M?co?6zq$oJWqz@zi>>Zot?4!dN?D9_s{o^Lq+$f{+X zkB8N3<{v#3rGHFXF~{lTsU?79RBZ9C9q`6?4&L#otw=4l_9`|c^v{z5+is{v*ujW+ zsNW5kS@s_c@!w@kwad;p)g zl!*Vq0AwjmrQ?VX_(xZ}Ysk?>&e+=>^*ryM_Kz|i$I)G;-cI;2PYx=COR7)9FR{7T zfd9eH4iQT-gBkQAkV`6{3* z&#u0>Di8jNKJ!D;;*l9q!R+_Qc$Dhf$VZ~CV1L{Ys7D&x&U|JQ~;h!c8O z0>N7P@jYZ!%X+#XlRvsQ#3d2Tzl8l?xl^g8<%fX_)I~9$KBy7i6xRZ+kA{+53jw~! zUzcU*qmC^t1KH@{kB^If_SlpA_dn2sto>i$YLYX=Zta`4;*@ETF@`gitunOGcGw{} zD|ogAPWN$wpD!+?o;WxKBl^GY&5=JDdD^8mmg*U34)2r@|1i0!0ZZzQ-m9Knn9H}` z_-4p&3jX9&ROe>t05i{2zNr5qH`RC9vrTm>*y<-INkN;$+3mcXfI~rB6erE}#p8Yq zEb%Bl=~Uo!b3K&~{EtVl*Vp0Kd!^76tQH@BeZxu%HI~XHA1lhxUeS^s6-$P?-PPT# zL;b;A;0xb8hWx{IFE5$Eb~dESO4GAKN}&HC{9jYr1sx=0H2_g#(c(!YR>Ym+g<&K@ z{{x~*Hfh7tU3*Mce&d1u2RL8Q|7`sx&R+ZCJcMh7N|s*n#M4!$%NW6n6V10?cUN4k z&W(73cKW<>_$c`D;u=jf|C*JgEWJn%D*)IJ(p<>@`KRD_>POYd(O2EM!;pW-;-;F{ zUPzPcow_K8ZglB=iVLh=CS6H5))Ug5GqvMNg1kSh(vl7>I-^Mv1yXFg+2-!+VO^mrff-~W{4sb873sEv9DQ*&{(sV zEuve>QnpIcUVi7AdVjvZ|L({8?s50Nu6bSObBh{0&r`s1^K^Bt}IAP;zj->|`jz!#hT_`CS>6@EJ6|9nRzporkX|EI&h4%C6a-p}`dGVR15 zLrYeH-Kb{4q-zhAy6u<-&Mfb*|KWS1^6VJ&ja_z-Ps|iI&el06>o|72wB?I}2yS7d zkmz& zJr;5L^Au%fxhM?fkJ(&T)GSzVwiuK zEBY3fCFtQYzOUI(NAGB+95RuL#r*Ss7FpORBa8|=AtjAT5m!Y~5dD_}&t7oSW%)n* zS27c?dhxLQe>mV9q2Wn?V&^>oN@pn|5&e6|)%M>LJC%hRzz&ksU!paniqAgiXC}wO ze<1%WvVNf0Cx=O)iY(Kl@wKO?JGOPhp}1c$Qp67a&##LqF6L#WX|nkK+u9e~PKv6y zkJq|CEwdD>aCsEQ#iL>|QTwSSY}tdbf%oSZl0@#HoSRsM4b1FL50oeB&%8vH9eYv= zUmvG{gyM12t%}CL>_)34d$h;hGN8U_;a%T)O~uWEUhoV(l+MlHL<)NCaTtwKIU@d( z?Qk!bs>cN!szT_{H5ruE6@j~`L#+G@Lbxzikb8@bx})G-u>B-fdXLmY|5GY&_Yw6s z|DrFPx+jN+u0fh=I*P&aite(lrwl;;*Q-WoX@o8tZ2@63FZF1HL*O1O(hhyAR0|;- z_#gTJ|3^*Q;i(?glwDz~A|CYbR=TE}YuySjxF6#H{?9n$g3D0pa=I-3F;(>bCGIhe z>5dw%>6#vzETaFq=z2v&g*;K==jjF54?^QejIUiifv&PC^`=-}Ps0enMm(!DUHIRL!1 z4JeyDVHl(-0_TJNr~e{Lm?UGKrf6Bd=iQB;VW+V2zlwyf4&}>Ln9#Uc_)pbI4t?Ys z@meT=3kC9=NF_kj%0u`+uUZN_PTwx|eDmcs*f5s6;p3o)=+p7}$CH_mfBF#cp9G=C z@Xg@f>o7o)MYq;f`Si!w|8A*`r`5RK13iAX;y%V$WdC5_o^2}BBr%kT@CJQ;ILq7-DwpN2-M;pKN++|?jf`FVAaQ z(x^lHH)c?%zxk(v6e@P7&QbxJwBZdclk4h-lG&In|7Q{Ij(11F|5@29ewZa@T`C;H zT4kz{nITj(u-cLQ3%(w?#GJriOE-f;Ke*}YlVjbB&uN_!}NdK6iHLJzLD1@&)xy~ zr%8E@X2RTdKvxc{fL^41kAwhxEMOISNEPb@plzOEiiBr|EF0v1U6uhE^Eb&@G~hpw ze=!!=e0f`N8gJnW8|sp*`7UeJ)F)Xa{}he-pZ>kAi=!JZl<{h|Ax=Ix9cBC#XvF`= zCilLo^oHyA&;RQ94~)|Qef4;9azE4#Cwwv(Fid|@LiF!VO6&?wJ2gm$45Miqc)|aW zZD&dhnFci#A?bVzyyG-V`CT7cIxLCLXx1s>uz*rJaVfWmeGbY0%9nKXXfH~YH6eKq zmGR|WXE;xUo*t^@5u{I%#2JHmrD?S;*O?aKB0RlCehdf4Vr2id7)PR_G3L|UF?*Ny29}4^n1ZwwJib_ z0RD#;zPAdahD&^zKsiS8zqnBU^FIuEhjg{- z5;s+#F!GnacwF55Ip| z%Y0zH^F#$VRX~L7B#FPeW#78%FO$t04rv!0k8xmwz47-bEV!x!)hd+$^h#BexzQSu%0Guyw&HXqa_JO0x zUbZa$U^KITbJPyR|9RIlp>yb2h-vP)!mFLudw7oro1nltWL_5J&H!}5W3-fmI)v=M z*eHj=|Ii8aUwI7J2;hJs{Jai?l*s<;i={MYFR3NQ^1tbwR<<>f>;6>g5;7Xlnq|zs zaP>m|h3-WxU*6wiwi~XoU3k!co#F%NnowBrJBr=t>wDvpvfmzO_;B+0_GY^xhGm@w zsRXbcS1u8i(<8;3rrXT{r~v%ugX;zn75B-eTOT^Q1#w?U@+;$;FNzf#la`L5^Ah1j z7jm=x#edL5Kr?+|Mj%P(C2UY|l7jjjhrX+7eu>?!&FGZ@OVVr$zVvzI@G2UVuw5R~6>T~ZoF zS$j4UeM#8tK)~2Q4vOXf%pv_Z|MU%Q(F^1dCkHFoq{BXL*=5~8F)azJ^w1zl9PGb? z=Jo1+^JmtN=Ku0B*p*PerI&u0@zNf ztlwh^z<TEQdkih!AsW~X&)G{3-J6!;(RQ^Eb!ztbyT$y?`z zsM@{-{a4Zp#Gl5(3;Lg9lTpC16LMf)!eTxElk*3X;e)lv^CXd4`!895Y(=hO23wVh z@PBv`!@tN2;NW*|hL#5SKP0*Kl+8th2t7H3I3z##Uww*SGHtQ&tHK{L3S%(7}?Q-=9MZz+8{^lIPFMKceovs706_b}* zL7u24D?o#ZGiO%k*5{E$ciYpLomr+gym$FulQ50u$OHf98+py>))V8!dZZNjc1f(z zMo~Imvh}mRMS_rOlB`zp%vI1HZIm(fKv5jIJwR&@{2wP_aWF^fOZbG9 z5VwAs_4P`2=4Dy^P;nF!7bCclQyfL-rL=C8G~-2$_1;uFa31tufBg@0^Z6^@#8`^s z!8^@D4u<6Ibkb`orG_lU(3%0A0r9MPP6CjryF$cSwS%ywKx6s zKRkG0sLCgoNx7-MyhR%EKjfS?NmIAJe&P3hc7*?&1|2v6hD8P>VJCDRAH9NHvI7XV zaibciw=~X5!F!<_7KN=zVU=Hk{>wH>`niHAFnNS?7qA=QB{nKk**xJ7ySLO_Ll%u$d?ADP9XaLLjJbicaC4gaXnnX> z^>|-WoK|crw4YZR16O|6%moK!C2#e?$ECO`y)LG@Iz9`Er2&GZFYdEc}OaW7g*-8ay+u9oP<_ z{}ODLiGmK~E@4cMI{$89a4)Iwg3B?Ex#N;{QnerwiiP`a5^RZgzYd{cW zw-75bcvS5>F}r~m^zSR1Q`Q!fWqpg8{L22g=jz7L5s7iksUGjtY1;AA1Fnrtp7@n- zuS*P*HlvI%?+`!^WpoG_{4rk4e{Yc$wW-$?C405+_-fK zh^2>kWw(GM`{Wb`{?M)s18h_0(;{kXf3W2p__EI8K43LXSwl6{^^TP zBUPrP6R}VWEg=6QxFw~w=(?hRE>jrT8#uhp<1z*`7Z$lr$5vd33K0N%QQNms%nkHVGcT0c-*RaPhr3bn7m-#;SaW`rq8;RJS;||KW+b zJ5fA7nvnl>;msHqs)udKmkovZAF`R8NXBp?GQ$}z;SC(TGJ>d_6L)6xsgnA{R1>q|MK4rV%>`z;`AtoQsuqs`6+KW5=_iW zryBvCHiO6TNCg|Fvr~9_fF&?U0pukyZSc zB7TJb9K{ASq&wDQ$3@W5o5>76rrGcMTJH6Pm&P^K%WoKoqtf_m4&;gFeG_;)(vHFf ztg`Au*9K*(R!AXO629)FJ&L<<%&<|_22yWAG%?v4O4A`GSqi76(HKA;LJl0O;Lc{8!s#paUJsCqlEYD z0Ib)blS9%Fm8DLBbrZ6fE+)cq3Fu!hPI`6oJ(BT!kPx(hL z#w-j-5gNlq6hw72>w+NvLgjfeu~458o*XSvCMsi6#*DhRfU6brt+c67FzeumAZScaM#o1KV3oMD*`h@I9L?`r1Ah>=1zbcj5M5 zsGQ1X331ecDBj(Bj;F|AQ#N#~?R+XIE)A`>00vDF03)pR&YNl&K^!V*Sd&dUCPixd zrM6B8O|X?%f3HgY`qZu|^4A~tz6_Dujp5VqI%Pu@=}7*CB~M;FZ5Mk^fU4Kxzz_^e?H-^WY{Y{yS(Ugcf9&Aif?@ggOvpSOP@k1__bw z*}jo7xXVX8nx``~wkt}Zrr9X#oJjtAO3P;M-fB0Jb)MaBGW=!bdDLXB8*+UHYKzthmV)YbgVEUXHv!ajn6dZu0=k{>9L~Q{rIR^+(peA?$WjVW?K(?>^_)KbXHLKEX|b-@=VBo zcRrWza|^i5pXKd$A^ty)4*)=Hlv4u3d#uhm`!W0E`Y%1uEX9TTde-=f!8YBRZz8QY zvjX~`?X#c0@QFWkFR)(YHZIH35308=92|k`$742rZT#WU^dj?u5Sp0QeNlPhj7`eo z8Lp8Br(2p<_So%8f8g9u>pNu6Gf+z&r>r;-fIcJQ$lT>p^m#zVk!0^~&Yo99C>|og z7vZ;+UQTL_5&#JN4zZ{6+dJw!P&5Lj(Rk`^PRb*tUlAxKlK&pDX$r?i+OX~Lx!BJR zpV%nVPf$i}*I})gCASlGVzT6Bi{O^+)?>SDX1L-5*HgI$_w~cA{`1>P73b+JO~|8+ z6WiB6`S|==)GLq!B|tocN=^Oo4GG}?)9n8u2x2JDziE5@^8x6kL|FOnDwFJ+@b3n$ z{@mAp6DW|pm0D@Q_WzlcVu)_lH)Q{te}V3W{P$+OM@Is@#oRkowCw&>GoZq18H-RL`O@SX!-nT?5zo<8h)7AuDFKe)#Ikqe1QAbvA_F zmqYop@dDEJy)U&&9Q$VVMv94A6|+HwTaA*O9|jcI3H;Pp5>qj`e(8+9tiM<>nORN7 zJpX!C!vOZS%&6I;;QuVt^Mj*Hu$DlQIg%}|uo;KHl}&y9@;Yu)+=Rx2Fbz+Y-nwwYM>4%{{7;spMW z-^G*`BOwmGOTz(yM{cVHLKIOaC{53QTlv1+lMmbL^J@zb5Vnb>|Kb+Ej4(YvcEo`{ z>xis`jy=6#YynCJFzZ_HqfxN`GWKBe{3O$G@l6WhC+8-Md&e`F&)9EpIZEUAL$<|y z)mxVTGY5tn$^l7SH<|;%e~$2UrC{Zg75&nf0kiepVGD;VPKQ6v?It9MPk9stt#K3k#oq+MRaFlu!VH@NxvzW9zW-F`(c~f7-oZz9*%M6ykZp%j!zv76jt5{K`?sDwW~ zDgIm?(hUqTo{)d4m#bm;NrpQEHe#Ay3#cibAFJCgdz+>gH>fwBvypXdKV zaOWxUhH$mz7p>_kP$1UB#xbixyR2c?w(|3=@cdD*0H1H&ck1QA!e9O?Ic=EU%@V0$ zA3N{p8M2GYThIdnxya+@;6Z6C_x_b8Y-P@&gNcH`vi$amm*Dnznwk$%oZGu&c#L~RDgzoD zxT5WfGWVS>5a*g0VLKF~GR`kKh`r9qBO!n<-NVEr=$VX}QIppIdW#d@8JmK|@)n#R z?GOL8;l5Cn35n@{vLlug@E>9lE*e9zK!jusyZ2g3P|tZjT7 zd3}R?HegV8g?9>?sc1^+u27Slsp(g`VL$!=1CoXR@ExrmmIwbI#MPkgs;ElC#cBT+ zMRM=qY=8T-{PVJApvQ$DpOz;sKh2&APkr4iVohw_u4(wksT(fi0BqmDqQSW%nUoi zAQN|!k(;STDZPSRrp$%eSt9n(xveGnky}aDh2US1E9=nfupjt866$m2=?cN~;@Jws z-mJQy*>DkQpt!3NTAOCkPl~TAoSuwz)T`-(&U0IJ9cg8e=bD>hm_slds@Dn+Oex2h zK0!qP74`SECNRPOaPNYg_c{neUsNaX;S?Rh00Xj*qRa53vic-rF(|-)>TgLG=wH@} zLMv&3|KVgKuue}v*Oj1cse?L{fHO>z(i0EkM}uGyRSP-is~@u2P$Zp;*lkXq-#uIV zbL-Cop&W{OaI^A6e_s7d9aV@#?Zy z*^M6M!xAxq^D*LosKd-MzLwor>~n)jnHL5nBIJJ!xyka`NjVP61O8)8lJ&1qWc#^> zOz5K&+Mpb-AdWB*wlbbpJcvJC*A%&EsOYR3^twA&(dQJSE_mKG`)XkiP36pd!uRzc z#-jLSWh!2ArTLgmF5^*X4b;4Y$c{LNYjil$cU!N_$N@-JSu#OzbO0UX}@RwN01dr3zQ6e!}` zzCAl^k-~vO`k#zeE|k`v8Lrf6eGID1?Qt4Qsc z!y$&*#(wu*`d;%#Wdsn+3gF3%Fkj#ZjcB;m)vn0z<#jGQ-3E~O*F5k^&Cl%Sc5kI~x|1_?=V|ew~ zpWo9@fQJP2c3G_J102rjDRA}Py@j4m(v-#MSIL0^C-9$<)NRPWh>DVfZ{F&u5u*Q^ zA0!MaQj=ws1n9*?b9Ya|!nizuBFlrZ@ibUBDe?*Bb&Hjox7OF8jV*`|Jz$zrcryC3 z?(y3}F01^q3ir^CgzrHN1sCnmrEyd5{IdJAA@rvV?XzVLl|eJzv+_oYL`^~?(-!MrzK%Q3K5Lf-D-GyB&)Dw|PJ7^XXX^Zv!mU2bLKsc?B6G6X(06m#QCvbv zCqA(;^-jQH_se&Xv>Wd8*|bVI@xdn`%!kol^YP(rNqZ;tZfK=xlz=h}IB=?3L_s^}re zKgB1(R$rfcLLPr^b@LCeWtd4iPBz9AYJ&49-53iZ#~FO-68^{GVniq zd%MkF2>gEz>=oi*<-b>b+(nV?*mTgN?ei6P6bka+*~B}qOCOQge=e=Ji*0JAYpZ!E z>coqn!$QI+Z?s9EaV!TnireIw>ibs*A|SDRI)4-I;9vgpu&>x(|7Ti}q-`oQU>Wki zMm;l_*E#NTZK1RD@0rg9V8Ul!3CHrV@SpA!``yWku%Y0W>*YRHXft|}$~OIOYlRuS zF`G6&u_BiVyOmFK01&&*gbPR$lf+en*WcRb``xnI<{x^0V&K%<_g}Qng8$E{x0%}l zLg7`Z*(?567u47tqmXYdTIc*aeC3Bd_s9dAhFC{*t2s`_bsYTG*!_uj{7PUmk;3r= z-qdku`{xc^z~=y}C(WG$qdw$cIPeRrAB8*I2^>)jfd6dPd#9z18;ar;Ew&w?-2?qQ zJ1Vz5MT?pS`KzD2FfyvBVfLw9Bg6p^y&>1YJ*9x8u5>PNIq00v}DxR4pbZ5%rOKNF)HKoFIy+;&T zpnqC~aIDhJ4Y`$GY!(s5IBcDd7&gfSCF zeJpI+%|}C%2h(^g7~~c+@^s8XCZ0HTg(_{7;9M5~??sKMc}vp_Ql>v5ZN3v$uD!;uJ}k?CE7Xp^PVm zx)ir9Yz6-F#os{&$QQme0q>=hpMDg`m32t|#nE!+H_mS-VMajmzphvh?!@zneHxSM zFa&PDl521B&Zqn`!D;IA(0B8LVObV?fQKmr!8kXCfH58$YOh~?#gh+{nt`NHR5(+;j)yP|bMsVhh8l^1~9Gj*al5d9ZP5PIP`0qBKa)>|GgeSX&s zmN0C#=3<2|qlzNqj8fPL6vXn`u93$coG$C6DWn%?X$7|ry4XLl3fAs_KiPOy)amny z!F#V7gVQry9))xa7b@f_{HH~n?WNg{Ty-uFU|def67341 zhow;OmD=wn*D-U()2Y0TI5%Y8_Lfv>lr-=01c+pDpS3$IeS{~kFa!e>!F?(!k ztqr5B6fM2YLdlON_dA8dTl*~37ZJ3Y83418e{t|}+!p$ma)j-V`* z7dZ1YL{Z=$|Hs6dl&O9kwQMSMf;SEF-}S_UFI(}Tq^yL$<;Y+tkbi+j>EFNC1^nkC zXD)1#IOyNIv%vq^G6Oc{FK!mVUWAqZC2XYK`C9{{J4GU15K6 zgXl1+(+2rp**l*G>U4fcXjJnqxuqPs6!P6{RAE#rxF9}R`mGt^q1~&n2)~zkXo^ZToUvQ_+_O-UzaMy^#N&8;u-k+>vKeR961`hwPL_ zHrq+t`};`4kbioiam(Qo-Ap!&k<|`0>Sy+ap(h&;X_1~SRoZg$ifel@@&0s(EmIV% zq~qEC>fgctS->-651EPNpUQNn%A3ngv(L%Wk7qHTA^Bfc_+D2koAd&VA&~#&1xyjT z8Enmb$AY`0tN~kkK>Pv8=HBH*ML|7BRD(XtwqJWC) zItkX2n7OTZ{G(7=v}Y#k#dM*kf9^vXDGGHjA1~ye-gxZs9QK@6y-e%!Q@?+lvFv09 zaNeEA(UJU%ov*OG;h+7dK7SW7IkHvjTrZeef_FtRAo$Fuz7e`;ti&ZSE z{{jC)Si-|w2w@+BvV!>ksB_JUsQvBIvECLpi=pgGn>)$kwa-4kZyz@dUpO;$Ft#YQ z)`=8Gu98V0DxWS0kHPHY6{!mxI} zpPb!y^PfuSo?xbyNAI+hQ2YPoUksfd+R2Ce{qy_(<-bRnrX~o{W02yCVdbATH+Zn{ zpF}3&{|w8_qzeFM%gNHso}Z)^vbwiNWsV*UKEZqBGBa|ItcAnw00VOo1>;_tR&ZVX z&0xm+ta7A?oEKFo);YIYAD;NO2ePA%ce#ENdHP>NjsI-dDx+tesN?oNVN3Aa_-)#D z^YL~2efkOIL9@l$(_CFTu(sCY`V5*{n-2`+D00DHbFFnQ3c{d z=P9$?AGKFTE6f_IVL)2kN_QERr&664MPWK>1I3UHYY!O&*(0`@Dn26z?kj8J^_Phm~ zH(Uc0;MBo1Zd$F)<*=3T>O@hM)9F`Yc-|I?)_D{_8%FETQaIC*P`Mub7Cwp44-s32 zX3a(DpnsRLEV83jhILFfs?GHi_Pf;&)^(pr!W9AJ<|1{OTfE5TmG$zt_wG>*=wW>^ zwu`_54E6gRe!L4N{0r-4u;;WWafoB?uqwf=6cZr}%LCcKBKfDazjnV6!)NOv%?Xl! zdgQL$Z=s{aT8aauIzTp4-0FHnX2`YLaLuo*noQ855>@cV8vzr7K zpXmEzg#j~m0&1hh#Qnti;qM+k3G26131w!vP9gLBEd9G(onV5b=ckwLH@f<7skqHs zGqf)+WYqz$UgVL1GEtMrXqfG;DNQXRlbADu*u3Y^4p#j6LAWAZYe!QPn+ix|D0f#@zAP_|XdINwXDhQ7eLSP@CONwaeYkl^zQrm1Qt~zIz625!ncRSSEwD;!@LU&Q<(Z*bpA4w#_r)5pXVC2#@@HJ(Q zNJmxfMlx}i2L8_x$3X~Gl;cT&{ENica0Cdle#T9`X;xE2C`y5<);eMfO+m}w{IAM@ zrBv{LCf|e{$y zO-}Y1w0?cHufrh7@N2mf{6>o_l8e|&UW9S`& z>mnFEvpo)l&J33S;UmcZdhlpH!`L;uA#7!`F1Wp)E`O#T{!mPjbDd3nr@&8dUz`f1 ztN|AO0Ao3gGl%QFxp;9V%uwp?7?__Ews=_vr=D<9)F2ro$so5E*myOBb9Br?{;B&( zJvr!qEW2c=um9$sM&7B!aKeHKdHA7!{)Y@Fk&|1seVG2U@p4DRSo-&4&zBtWDM0TX z(y&pp0{%1O*n{^9-U&>Zcreib{&RjidUb=?E@c0OM(#xdblLSra4*`#)Go`{BogRn04pXRwB`R@-B`=*stTpP~5AKT)0*=+rmipzXFO`f<> zL_VmYMM)687Nmt>_3|A{oHLj{Nmq7En|0KQw-( zhv?sT=tZYo_d-us2*9RA&ZgwVid08@;Ga&m=@rkd+5!L9{}Au%yvRGU1;589qf(la zmzDpuG%v*Zo%s`o;TA;jAFm(rK*qWOi$>2id3)aqCZ)xQo7bV@@da7=Gkm~gdqg?x z!6@Y~A~PJ$>Cf^JR05`~*Q7>y2z9RY{dL`pGGY3^{P#*v!m3=)4N;YB01wCK_ssDS z>Vp~av>`(L%Zh;Y*f1{r*;2bgasjhq{O|Y7v_nG`k@pAxXCHN-?&2#wM9|Z}v_2mN zY?DohJ7X7q{6X-Crr)-(mg%?%HyrO@{!e+6#cfT^K8df?VvGA z;D*?~>;q(`9rQn*Mu}oPlKLIsK4j9RP6hqDS9fYTy^>8@9%{Rq@AFjH%FFwvF+a1f zz8h7+my`jXwP8#d-*Soif;EOWG&oAZxGC!~V?8gQUz}cNGoL_J1^fref4|>2bIR5E z+{&BA^FAd=Ei(i7G|oZGgV7?u^YuzP-fq&2wmu%qZJ|U)&k zwZG^gZ9fM2oo9N|FUs%<<6xOXJ-4YN1^QgdZ!{(r8s4;O8v5G?oQesw{hl%0;+lkl zPJu^u@XW!DAq;&Eqv`+UfAz|G{(ENCw5H))`7~A+rz{{Xx>2Ouy$C5`d?Gma254%r zz~8YOWl}0TxDos(x8g7wgP;iD|E%xi&?v z=FP8Sd#9Scm+0VsxWdxE$BH}d$``9hm-?nB>y5*%DbFN zVG|(>6z|$(+Mm*4*(i%r&xnynz2SQ`VuV^3sn!J4EG@hy2r=IQ2mET{|1-Jt6-c z{C@}qfj>UFW4smUnvq|s*`z}i!I}&BKcN4Dfp3fwCpK!aU7hs)O<(a@$iILNkF6EE zWse4FqcrqCs?z^^X6S#Ej{SXRpe|zdxsS=?M*(HGH9J=5G;T&4Oj8)uaGf;c!+b5Z zQw?eir|Hiwh|9V)A^c~g|A_@*n{s4*h+R6;|J+Kc40{hB_LH)0@@Jat7>kTz=Q_j2 z&KTSFVljo&3^Ou3GhGA!2;~a@0Ll##w>#{w-1U)CnhdCf;Z@OQX1E;JI_QdenU{pM zQtovg={rC@`h7QCut@$F@P91SFHTVg3e^&~?M3{b_otp<>AyIfI>4FwNN(Ev5EjWl z1^rhML1+)&O-<5|Jr>+te0nrIz922Y0C+@epFJf7GeT#^+y)Cb{JjbqI#>zeGgTiR zXY|<=l1sw)#GaQ9?+a*COqNcE-_WM9U-uJ($HEu#Pw$3$^*ZPP|M^K0xs$FCz8aB> zu7Vx9+Xryu{0bn3-9kdH*{FGXUC`z5WS+P1%M)M0FaKpj0^Xez$`%bDU8x^Q4taRp zp9U~l2g)c3;~@W3EqqP>=pCzd)a@9wKH`7Kk?e(TwMKq5e?XU#ES&{m z4p+p%$m}ovqa}z8a~|Cho`{PWFs@nPVNY@w`j~6=1;7xAGyJVyaR6*xUiPzvx z0AkBF01k~1LTXEtWK5zk-LHU2+09rUP$@J>!oQAUlrFP zG+bHv1RIK4gP7XN{cV0ku)d`uCgK->8|t8Hh}-(yP;^` zYA*PH?=R&AeMxI^Bxt>FLD{oNDW zYmDSy$Y>4PLdYZ2V$rq)zjMx=MD);5{!=qRwUgHt1#tIBX6`^gMPMH@ zUxjKO+&!BKvlT%(e+Sussc>DXx7@wbCXj#OH39yIo}bH}-QD-jOay;!Eh=~4X$f7I z)ex$rGrgib>;s(tQRMtXlfKC4VV(bE{dPZx@>7g3GRD*Q*lqywsqT@9A~=}hskxS7 zX#n<BDrZU@zx%H zmj3-W@PB5X=D+%5adZEPamwqb7dJ6=Pcr!xq5o;Q#HkCb`#Fd)VD&$hQfl7*%mk#q zbY7QKLXH6P*W1Gp=(EZ7V<1PM^qDg0x*55+?P@yS{W_DnxOOE=9?$as`MewF_W7Ml z?pX#my~L?XVt+{CuoQvRks`aOF!=}?)*9&E{?j7NhtKaJz8}B`%_y&JmGh@F$PLWY zar>fF*s?Np>h0lnhn1mqp5kJ53cf30@(b!^&>Z@HX459m1HKj~_x-$VcNu5Wf2JQ9DL(Tx07%{Kj8`y1dt(bqt| zN&nobLG?xQPrsJaRncm@;X?TVoCg`c9lP-N;W7{A=iTj}$8g#M@BZnisJ#kPt08F;hAhO;Y^jlrWEY5U#E z2Im*N%EV;sQuAIVEr%y~@hJ*llcudB=zA%a{d_6B}}3;I3&pEB|ze4DI^r*4mo~ zgP6nMe+c@o2SQ$Y2pneF!vy}b`1cj!QyHqMFx=tuL;0fryVyehU2HZ;k)OB=NC8|( zaqv60?w|r_C0OT7+5uNDe*!E2UFJlBgswBOj2G}9DFbjhYR`PIA)zC2Yh+`v=!gHb z&o0{)_P{#440cL1d84E(S&z2ZmGBu$5u9s_eB~hYNVzV|9m)4QxD#v`w0m7>Pf+{f z?TYO!g%?G{@DrRh+d)B`+Pv1ZY;?V!3ZRSou z0M&0;&#tWs|8BTUhRe7jJbZk5g?p*Y~i-t{*OxPb;%i32rw_<}ofA-sdstJPW5i1@9Vj zX$6hxKo$WGj=}6kc@_?1JAi-ye3@+B^JW=v=j9U1xv7UU?B;FoaeI`mLQ`(0!_j#%-)j zEDvJD-;ACq7RAj${^`XeIPIPtNVPZm`NeSc%|NCPuL$5jU2c2ikcsWXSfzG8RQr&a z@SDLgFy1*AFxf)Z5(QOUBXt<;4j^*WYtp3ogfQdHWc37_lK&${s{&) zZ`qzSWn$3DFEGd+>C>Tt|8wxpr`ZS2+)u_7@X_xW{`|;xHw5y(So!Z~itIE)yD42o zBD`V;BF8CQpnr${r`-sasxY+lX;9-7&MLdq-OWh^5E(G+3+w1KIUe3!zzEubWeqKl zDSsO~d!)&!RT!-WGy-Ytgjr3FEp72{lM|bwKavcm{UOUzWWbi|tIFtC*19Pe0it?^K{i zLPbSsa9bRX<`jrp87=_;hNKQnZLA0MKdRm(C$Q_+#qrw@)_#}f{F{GjSC`^tp04!z zb228@+cLxJN8A7QU!QtGfdv%e>yoyZSY-cob2E$NMjT=uAE_s#spFF>sA#p&rE<48 z2Nw*hfiD0wLMB19h+NQ+Ys0Zo92msmb@Il%BU&y^2{d#SJ*?tvi zY24snnfa8Z)&((QlckB>A`hKk0hqyIM6b{T%oiOEEu+C>%3-vcIPX2T``iR%%M|>! zU#LAp4t^ElQ`mns-8%opq6Y@8yI4?hz8%OXKF-z+-+KYdw%T@!Ox;N6>;A|8`9J?> z`QxjgZ@SFCJ#8Mtnb3X}r*`sXSh6iJ{PZrst)77NKiLsLwxC-LAo?#ggz!~Sif+B( zqvV%n?PANGCs^Tft(q3zc9nRCT+%Qr56`S(H^aggMqn}@%th<}&3`X);la`x9(;P! ze*SIQEmcqGVTWP>nX-9}PJhFZl)lfTB}`;IwmYTPg@g<(al^0?-s2+|qa;kXh7Ws- zWkz)Y!k~|Xl1>Nz=N_!IFFfi)T_|sPz|;l*=dXR7&&2SZ$(iu$BK&8_KecKE{?Axx zI-4*jEC2oK`u6okei%^(&EbIk9EBQ(FU-zGkh74@MTdS6uC2m2st^R2jt zfwL_c_6TJhZm-u!{slfO!w_vtx2-ZD39ER&99!{h4@Bj%*}e$6d4QyTdhX?mRq#JF zye6lcaSgx-yKP7K27`fCug#Yq5_X zt^p(yUO58$*HzO5Yn9UXnYA#f3VRc>6o|9oYx^%RIPb?p{^{F)`Y%Dn4e(?Bo4Nr_ zUUn`VnpN0V6f_rJC}j-Sdq7#!EFe=f2Ea9dAc9ibo>cZ5?KvbT1N80%EF|xb~JID(1;$QvyV5wiaQii%e z^W$)WnH zx`O1NvizSl&NvBM-c|?y!^qVqytvY1b)joNXvH!J{|5olaDe~R5*A@er9mR;0=WkK z592&6J?%_ke-+&<++yDY;O%8*#rh!9D%!*YayXJWigztz#w;?R%-Xee)ApyckVA(D zp})6=!v0I8uS)62zh}-jzokN)k3f3QLFhf5cL>IIX7U(J*q4mYMv34hbjo1r=yBCu zd9zFR2jRV2_}b$7%Jc)U+y>(Oi(&|>)eL+yH6q3tkOkC3cpW2VSr1R1avDh*Cw0 z2@MV?PZ4*KI~&HEKsC$*(GctLVoyf-1P(@ieIIxJ&(rLO;cpgR{QS$QtUtfL@jye^ z1z=eYodts6`L7Ej(VQP#8|{IAo{!)z>K9;@iu6Bf7iTDk|1bXwcl3_l-pDVMX1YKE z908pR>|1PRIC%=}*!EIuI*+p6oBCcQbd@u8V_Psni2vc4yOem#x^z4&3wE||5Ko0%(B#(c2<;@g70;kJI^50Zb%@;}6GlCszkYkB(s=->Te zxk``Utm4sIAaGa4=QgOUZXm27;Qv^h`-Jd)VIlzkLrdTLOo#Orn9q*j(@I97LdY<# z7F0U|BhF6Ny}|mN?T95TRbMl6xs9y|utf}K1ktl2E=9yvD!K(@(BWnT;rS*O|=*yzlf?O2E3JWAo>*A&;0{EPkD z5uqN~9`qSf4cw?YW7zwhrB{oT9b^P|*8#}~^SYN7%Pr7#d;lh_cuvPm2 zGM=@i`Jqu@E2#T2bB^4OJtB^O|0>#d?eqmuL_+_gVwDAD#V`@Sj=v?@55?3VcVJS@3_hixWg(Pd!6V_E>NOD+Z2HGb-kDq*g>(9B0UeZS}YSWWQTq+2SUMG-VeA+T^k z_Ftus5d0@LqWwIA%o3NzzuUYma@cP-+q-thKp84dYg65iTCem9MV(s1XXsuK*)pq9 z4lz%m87yCUJ@NIL208NE`xiZ5uOa&P@4tomPlGz-$H&)?o_x671UvWFKf#hs7YzSq zqd^OG;KCYn`sb5!nr7H~+l%`#Dv;v+;q?};|I0LJCf_>+hW259Av%(OdWlOOY)`-@2i;obbHp0j z{$yFcgrj$BN*q1TEb}AG$HSl(=Y{6a0gh54Q5hD}a43{T|1PAxEpDlXE+hN-@>4qJ z!Xo>EXSqe>^+{-+jlVX~vp=urFa6&lgMm;T{v8|6;ig69&6 zD*Re5p>yRMOJLf|%0HbM8@Owv0o6#-QLrBll^yVZ24~IF5q}!nU|Tgj@PB3sg9pJ} zLB{etN5{K?yQz6yXd&)QY5dVdru}YEL+F3DdZ@62!|lwGDq?h;bJ}t-3s3(!Xbeu-Sc3O#pN$Ts7 zp0iImD3`dU4dBeO8wknES@ovOZa0e1=?2646={0-`a+_D?=|pRf5WWs|C|5i)8s$o zvx%Oy|J5&5%-qgrIl%wf*3H6>nPEY5T)asBm!8sRWD!=LXNsO)TO>LT?Q(>v4*Y*& zN-B$hcMR}e^)hSWNkBlfj)lx3=hw_geZ#UW1&_J-HM8{obkKl}v))}p?P%nhQb9H#o21@%;Q>r{jkgH@nAJr`U?+rvm}KfYrHjqOU&a4l-c z1ug}^%CAHC6pZ@`TET7M47E_JTl?Xo6S*L|sZytX70G}9^{rB;>(d*;1)ttqs%}dm zvu4nx*9R4J)ASivohwfRSM|-_l0le5c98W6X0SOT*|;dIA$z#i%bD^!tzK%L|B7(? zH~skknELX#7~B2-YnJw+WvXeB_Fbk`yHwgY?NLmtN?8&rWJX02+D(f@`>IWLrbU~C zHZ2c|M1&Aevh@4hljr=tf1KCxIl3!ygVG{P1V-_u%=} zYwsTZITzWJ5zHeZ1Yg!94xNTNp4q3@j;%cw|9c_!?F5I*4pS+_Eiz44uot0ZL3*iz zlO*`RSoo*d>hzwu5*$HmIv-exzxYQ@LH8$7F$5D!L;r<#xuex%Lm=~bMHrVXPl*7O zPR0C!`fS)(#r>%Z9cRIEvAJR+!=d`f(%YY3!mX^qce+<#Ft~r&S9AaD5iz&&IzOXB zm~@xJyKu7gE6l%+cozoT|15qFR>y_)2Roxm?cY?58hG9B6;|7P)?44Jig`o4vpQ>o zJ2fgyE!%DzEX8oYCihw39&X2G0@Do2M`}s}m`g_VA$tSV=xcZt+~(h%N#M8wyY%;u zQ|;%^T{N+yLjU^?Q~2@JnCovY$=>A_T5!ZXC3xun`U(0Uvq`h#gxEOx$v0&dErg`6I6t3`$yU2}xU2ty(T>^CS*@IY;fDRMT`<*< zs6oJ_9p8J;UWsbI3A;5f+2#b+!;L^0{-OLTaQTtkD8;=^76zVlI`m&u?1Tmh6?v|{ zgCD6Pem8@vh8I6m`&jy)rF9>!QCl+G2bGjC$ASF*ooV)O|GqKoq04v%lcV{@K3D z2M@3n1Ren%b#JMuwiBzn>4oe@hk?8 zCIE<_0td;J{Z91@nqD*gvr!&r8>%3U_{qZ;CeEL>~{@+(Esq2x#>{u`NOU5M9eJ`N-)@(w>YZnp%sf3V z;D1E1I}>(cl-@Mb=2?kJotQ-fEmb)!uGFYVi~A3@Rc?RgdsN~(UqSU=SPR;RKKkB> zwi#~NHr{_>@Q}+Z+Y5GA!-o%j`sZq3SGM2HfZ5a=hKK4H)nN9&5w+RL>$+=u+@&uk zs)qDDK>u&w{2tVvK$bQU6`zunz3RMkWfizd_%t~onrv{du&T#(-tghl)91Y&i39(0 zvw3z9`Y*OWg!fGKPB-%YW!&5262kh5spYUaLpVP}=#boEP-ZEdIxk;m|FP z2@rggR`P$=nqtBOOW+>L;0~5nQfo)TMkR}_-i@Bs1G+R2`NtA@848h1rm`vfVMzAqM^Tor0*?y4a&P6vG?Ns+O z^n2Z(yB{FUjqP=9v*O?RVEu!M2of#?$dR|DaT8d+lIK&Vca*~B6txIu)Q#}rgRp_P zL3@qMQw^N*mdiG|QLiCB9m_xT#rnTav2ANGzrQbA)%Rv1@IPOI`*PM_eF^?|H44W6 z_>C`w4VC^ox`ojB?Y+qnX5Whb=SC|FTtEsEm<2%pytohS2$+(muQ`lb;#{s_FV657 zHKR}U-^saZcdO0-qzKt9?p4jD*y6n6``o?sonoq}{`QWxS+JRJG~54%y-baPovYjv z0Q&)sCkrawtoCkLV|7q`P|W_2lh=*?Gtt6eEA>_h=!l^|$bO$+>VI1`@&OzBfS!A4 za|8aB&?v}Twr zOC5p!Q;h$K--vh_j&%)|3LrynFphE!i?4oJOx_%SB(F; zY@MlLa?_>B8Tg<5;UM6d8*SX#^%2~)Ftmoyq7M@rl*29w0!cKpxX*snGaF0eV3;%O zHo+O;ddU}<{C6i*&u`e?e)qGi$ZqnDsCN%?joz59`4#z8KYVR_#Z6EjIeLJy4mcvuli<-6}nwfi#X>n{G`1%<6v+ z>@t@zd!3T4;#y(}kIW46f8Llk7}0p3{|oXDe~Rv|Y-lxRm#Yo41_XZY6Tgg*szTtp{KH|#?ucQZ(jBrxBD=F@wxuvzOk>^ zY`r&Z@3{L~T%?YSoe_`=@Dcu*{|r_dVZNZx(>>RW9%1-|>egzEMeE)8GGxQAhzR1cf@W&=-L7-ZO%z2nk&w+iocd=j%e9oryW+4}5q zG6Dc=m2uCUralmj2WOcB@}igf;)f3>D!!(wcTSjaTS^gq*J^>0wo`5;V& z{6qVEPn(e|c2tK1+XWu)(jB0Y1|R15RoAC*BMtr+BcAvHoM{K!bLO!y2P6b3>K(=eHj4P%Tubqo9WopCMPF``N5GED+-*=JKpl1E={0sCy+j1PG zbg0kU90xgg85@s)|DC1(IclK$6`)mdaJL{G@((ow?_aun057Bp)6tyuJ60(~dMHHH zgZ{^lYhpc!6I56kDSlOO@)OvH_59mgD&w+v3aT(-BStZhw<6XtfXrFe0=;F_@c7nSvT zQ{_Q>J3Vv*^M9FC^C!EOa-UcAy_e^zW8=c?n7t~D#T~y_m66F|z<4wFAU? zVq^8LWyE9BmJ`lRSFQ7nwM}vyX?7V<6vkUL$liAvWQG#8lgxx!{a?A9Fo#f9keMKs zl|uh}U~j5z676BWK9|`k*VUGiw*vO>3^XAbgMU~uZrp86E> ze?b2{;sz8gv%Y7jhM#c_`Y)1@C1UW8!+a<_orwR@?dRC2KN`6uR zWV2}i0HNwgGd4oH9X$CTjwpaKj$ywKd(X=W#H{axz_m%B89sF*VsGf)Cl2R&R zdfOXzlm1)~y!+k&i~X!BcIfP>ayk0o{sRfs&9_^!mtC=SVtJcpGG6*YfblOTm+NJHY@x&Aoy*d@~p1|8OX5#t-tRlJI^^hZ656viZ)1%!sT+2O%;`|KpP(FO2uR zzPHl+{_AWt&oUbbaN{9^|0^yRN;}9!(0`$@75rZ%wn?3~?d%TFNm1g&>OZZCU`^vw z%RwEPs<3S&VhlTjV-*|)VgB{14#Mw9dfwE9ca971x2jN?u$yF7jsvxjd~;9Ar$K9T3RJza^HX0)*=4DC zR=7{O-m`}wcBYvO|4RS6pt`qc z3;wTLHm3&Ic(MHF0nGo^U!e8y7l4x${%4)IJ4WC$<+rvt2r_{Gan0@x9dEL^Tmm7V zGisw(&IZ|5H*;@Ori2e~ZTwz6bah7(2wpnuCLlc;geD1g6D|JFPh2`a4IZ(MJMMn2 z9;uQyqNbB)%@0(Ps`_&MUJn37dh2^98IGZG-}N5LcOZDd8ZNm3Q&C4H4pwGe^RGI= zceo^!L9w|F@>4>{`4&5t+J6QHM(6Axhn?Mlyx#BT-Z0%{dw9LSN3Gy5$|%D$(#qif`Wzst;(C+zc~M}c|6Ru*3bqV2 zpS&N%><%pcr#UuL5j$dNr?83GC5uxc3aT(XvaAP9*;kUPyWK$0ChYP~uHEY8Q=R>8 zTw^mlyQ+jyG-7jz`TVNq{DxyvN0)TsCuvxSaDfiY z-W!1b`SZ)~vdt|1ul~n4nR2NuA$X1!%L(MXP6*`aCTJFSo{=<)>!<)y5R`R6TiF#$ zd$ZMjAQ!Ud30R<@|KbMNN?k@h=|8xlg&}mSt^uFmE&v;6VweS7sO8)zZu`6ro)xI_ zpJXNxfQ*p0DfJalJBjsQgdVO`J)p9TS#i75B|KHV^aEbBeLn79H9_%gznRTK`>qgv z1ZD#=#iph?9iITXkTyv$@Fy7RkbN2i6;c#^uS@D1U{Qf>4=KPtVV~sW}{zvL}NoYM8PD-x6BTEY~ zz1E$5dc~Uhi$ElFZdn&m6A#cc#9zj99GYcXz=KUm^N##lwuM}6o@7@E9sytdz3p+c z=E0o2!2h(G^$wpV-^kP5pS`R)%p>_|&kGk})nP~%^nV5#YcOg<3OsBl5S~G?xkZCa zLS137H-$s^dx-j&?LZJ60vt$;mfbJf&Vc^s+0BEq_zXfB&-uF^*IfXQ^gteFWp|`;myHX|5o4UeNzw{;!Fa5)3}2 zV}q8L!2ejUQX7|dc|rw6#L=Y%7_`*3b*c(5K0aIfyRqdR>&+MKJm8p5iEML$+iE8y zPCoz0qs9hr&i*#~z9e#%+PdeaZ>4!@ug8^uX=y8Y>l#r7o2t7>1ksPn5?Ab!T#ic2 zz_PceayN{3rVPVGm`NHxaK@d*c^z4JFIr-K4+I(7S;}xJWF}^SAOrM2m#nf4v%@{A z-NqJe3DAEE>`J1r(FjyydsMkxib|@5EAMSuy3u(Jgw4N(Z#_-q5>{;t!h!#5N-ubl zwx#AeCN7h<-aYunSsDg%EB>!X`799GR~~$Z3HgV4-`-(4T3FSA1m)Vk7f$l_TPr-U z7*p*dP+l++j=Pn^?XVxq#U?05Ow`!{xu^=<%%MGPkb+H^ajA5z@cjl)!NlO-Pg6qR zZi0;GDhSjST_t%1@(=mMdWRj!S9Z67)hBb;HVyaj82F!o{lG7+4n`$qau`dQ)t18z z`X7iuP$Pz9>-^_`2mG`Dc_M+65AuJ0%;Em_pUSc^n^kCdblDE$e>|iV>!AX!OpUA` zJJ*b}5J?p?3T0OK@mTd=GzL_V))*2)p z31#n5Phos}u6D`Rr{1_*EwBUz&m9cgcIT!$(5G8IkI&S$T=OlJ0{$oe+9mr6UoNkt zZ~?}vF8gozF3Eo&)RjA1e8!Pc4JZ(FQQ;EnJtt@e%UHZq{VwR<0UMhM3=+5%mcH?T z{`q^bBUs%=bZAuKDtY~Selongzxrps-`zOKKa>K!&2E@~y|O-sEYG@A1KI!Uzc5SW zRCT9a8sh+Cr$H<5RONhtf2MxHBJW%2zZj3MEC>3PHiq5aYkaQm9#&Naxn^paVteUtv`A4hQYor^l~K4;gco)>rxwv?U`RgGjy(=YkiZ>xe(9AW(6EkXF4JwX)T z3H;A17+tE!!|?mOI5zd!$d_+K>rXrmc{9rB<5 z>VFhsCuu6)E@v-j0sheudJDf

    JRLvN|1+CZk5Qas`Uj3R?9B=+ zRV##&3ZC+>n7;`4ACYzM#DMYRmpTU*CWP9{lg?_xyqULu|Vygp6rXzPuT{vf~)5DS-x5I#q^3 z;1aBXO+*DM6X^RE`5mfuJw510@Z@&rOilBK?K(2Q9-17p)PGcvRJT9(j^F>|;kIl3 z+P9AWWM8$je^0y1Cj8eyk88Wz6JsBEa5=cb7g~B#u}_d(d6MRr;0~aevJIPJVAtLf z$p6_Uk_C*Wl`RAf?*{+7oXLAk|MQpsq3nQ`3X)zy*=q8AtpDQRuTf3ZlevMP3y1cH zJj^#Z^!>IN)X9dv>&ERD*DWwT0;V;V{--JI(OFDdM7fw38$iJNPur6cQrN5c86Mc? z{8iF0+I66Wz7)KUME)?*nuS03;ptT@4p?)Kl&ePM#ON+a0TUqq`1)GS{B?@gpI>(t zd4$;OMX%6rgUTj>pSQ&CSp)xzH$;eQo3iRAnFdAU!_`pcZ8vO6E zTtbSN$~ZAYkvp~jg7E>n08Eu=SwyBAr07(xLcUlYj=dE{Abk#hO?=1}kJ%ytU zK@ajwPJfR`l(hd|w1uF!tijj->Y5v;H(Yss=g7~cXBg77eUO)gPXPV%{nJL96WA1= zsKK`HeK$2X3u?TvrVz#EP)b@j6erGYt+l3Zz15OsKhZKa9@7vg@-)owvNh2q9dl2PqYGAF8510MR_%vyAsl&W7mi@mq)1?h==y z)I&v6!p3{?zfOTKjuY4fD8iPs{7?TAy;#OePJ#TN4u*gn`rrI3T`In>GO&z~ObPkJ zzzMPDUy;u{#yEXI4n>X(Z#gBv;(yj{h2}%2R5FwWr0|g;|EGHEQqsDi^$}QZvxEp( zg@PY6HWIBqY_+b537Zh{lBjk)$NY!Y|7Bh3!ZjhHUk|T^H-YZHYeK_o94H+jF^9@s z_oRP=JGO7;VQGD7+`@Y=SxTwn`9iQyG9c9+%I3FypBqiueg~K-ejS(p{O>Oz|Jl0m zwm!o2&nIF275xqwPcxc;L}1yigrNVO<$qrvHUoA-8D7GE3({TDT3(J9~Uz&Ji=N$;sWd`K4R(0bZBxdj3Lw2}h8y%*mf`P6k2 z;Gdfd7L_qv6JQ=sgSoax(sSF6n`1s-qaZbF_<0%@! zYo}3F;Rx?;WC#D($)XalXN1971M{yA@3+gtYLvAQW1*b^`G=VPhoyg3-9tqP67%yn z?3)4pN2z~}qbcGy%u+_2YjcwQ2g_Gu{;$9IhbpVPt&`KCBsiB$7ck0+7;C(A`F@^! z_&9{KO?GC#4;60NQB(iBig;qrj=Se{1hIm^{(~e`fAQEd z=Q3)V43$03kz)uxC!g;675xwKRV4uLBtzW^d9Kth$p1OP`z<1`!Vbzj0TBWJbE(89 zGnfx_*wm$G$&52j5(JiM&_5qf7lDz9#~^cVdaOw)-|GsK_d_%9fB0v`|8B_UGF&yxK}n`5Bj$Ur zdg%Z9X86)pXcZc&^V@~zW#}EAn?%rUBxxGXoO_MwpP~ONAw97MsKksM-Je;Ueb9fZ zU|p!QZmY62yq*9;C+MHgLTk1Hlqz_A5AFTbZvxHqFI$VAeBH^(fkKxMp2WfXYZ1dgdzRxb zJ3iEdua`@@*$HS8ofv+ZHjgJ|^gP8{AvubWiYGDCCPz17q#mL{xg2cL2cF#p#G(Ty{s)V1_r*w!wTQemH3ro~{$bKu zec#DRI%mXaJ%chexlivwwO4*)^gEI&N!g{)aMMIbS$YY$wH8+9Un~9Zn*NB@|CK6Z z!_SYDy`cYn{|q}tAI<;z@s*oWY@N@QRpus`nTc5R0@ z=7ja1#{bfn*fMxI>A1o=Xh9gP^x|-2GyO`X=?2O3MNrh{oxKzs=~24ls%khLNn`vEPKViwr_qW0 zs?l#7W-vY*K%O9oP8UR{!{NKsd1l66%p`5FHw|FGJQjiy`e zS!)=bh7gv22>6GO2lAgcKq2MqU>Ep*4d{Qm;p(`8e}40j9pGjnE<8@eUKG~<^($te zIkd>!HXpzi_`f!s1iTRac5BIB|Cdco_=mVV@AF!~|NiuD*@vyLCL#EoPjMIw>@)yN zWdm2X!h3!Nt=Tv^QYLf+&D9`|E$N6zH?bi2R=APbha@s~w8}jO*Z7 zMb6U&{FCeU#t8sxaCk?BP_x%5rsU>4_sRj=!OOMcx{lB|#qxiJ>k0rkza5ty%+&|| zUk~#fgDTxspg$=Ldt>X|OQHGMvKLOKqrd;KHNAsh$fN;5_V&s8h%*f`ZHb(C1pE)7 z0x_AQQV8;&f9J9(iXcRqY=y?25$OMl|1D$7hOR~ul8aQWPeE?rzvw2E7)BE?_p3A~@+jl8J4>Hy>)DsCks>4P<+ zk^!j5F+Yc6oBHl+aIp`;_#YPjS?`Cy|Fw#jO6L_rnExI6PwyKy(oU(1rHYxr6a+_r zz?+RAq`-a#1Izz$s*Tg#MJdc%HzBVwpcQg~jNzXRpPr?9ZqC;W&&CR3r6K<(R>8Hg ze$zEin2SLFJEs5n`Q4RSGYnPT&;@$dwRRwYPol-U0XCWGw#{tl`zOL)f%n#l>l@HZ zL`kEz($y)SDOCKRs3@rNv-F|=l+89n9@VEWzp~OaL5T}#1U$-Aa?VShlmjW+ivK;t zF@HlV^uK5Bd~`uDB26FBR{ZY@+?fAsFSx_Cg&ibJ>5CP(ubt;EZh-tBVZcA&e{Z$y zg_E}2=1P&K5Zj5}pnv{!B2RIhJG5q-1N_4Bf2is`jVI20zT%z(R2TGLVEtbSvF`6g z^{7{CaQ09WF*~49e?JOGgZ4Q^-}%L_jbjoOB~cot|1tO? z!iYm@AmlB8cAc(8f1KGNB;ao80s<+l{~hyx9XilLlQH~*%rczpyj-lyHOS`hj#TCdm7;PJGk&_|wT zqz}Sn-dEzIsdO$k4ixkC-Thr=eWD0$RoBK9)noZTgiz@jYc>S@Pd{|?(g+*lp#Lku zT7=@mOkU~#0{xE_UlHJ+D`BAwRCMr+td?aFbQIGt{SV<>(Pe$ctM1IT$1=-TGu}KJ ztI+(~1@uJt;^I`G;wr|c$#(F6Ip4hP@?McLpx{@%JGn~{KFIQ!=V3E=HApRxH9R6 zE~1yk?0qZ#FTZgehSNhW+wkVpTxCi_Fy#N_r&l-)T(-^BizJaJTa#9c{^?#8tlT=9 z6^I?DDwHRsp#S+TP`!g$@82}CCSMmSx=rwI&ws^A4xpNy_wM2^dmB4Yq!v;xB z;FFVwhVn5XNCq}MUV^9#%C05eEB-IWhS()qW%zKFxM}=z+H7wZTsi`}K>zbyeY}}E z8Xex^kf9v%*jyMN_^$@^f3;GM>w*s~tX`3>0{*X;9aO2>rEd?!Te?SUhPP&?kV0H@ zR6UMQC%doPHEI>ubb>Emch~a0pi&I~%stf)z*R)vYTe@ zQHD3Fn19VfTfskOZ15Lo-TUd|g`ZfQd>V`YVfB9KA#>VutWaOKJ)|B&Z%N%@zXRc|Hm#H`oDB$E622)&urf` zL1f*&1yCseI1KtVHCg^ItF=C|9x1|<2)9btt05R{-*zLQS~w+3!5(-dEdQBJ z1dX-?Kad?c=LLUx`&~(Bgj9T;bZYXt&Ed+!i9@x$g6E_b1`)AFO7|O+*O@|f2gqj?|*arDO@kjo>RA$?>k#9);zx*HYzYA^p z(gxxJkH&a|U%`C&mb>nS*8-KontvUC(ng?lr_1iM%s){9{ijG(9_#<=%hkvUqZdt{ zW^kjprC+myi7D#T6mgRjovV9)-m|H1g51FF#fek<-nxQtC23H9c&Nf+Dj@gOY! zIcJ%Jf$4ve2Fo4R8bvY}fB*Dy6f=v0gqTd&(sd$1NW{D1|MJ?sz%{OBRa~U`o~YRT z_DPBT>B45ayzsC=sv{oy-+{H|p=S)IV7*W^kpENmxWq2UCcSZib0z<9r<6SpZFZ|MA7AP~0HQ5@n{%YLHnOF%=3nK@if~b5>7T7Y|BU571OC~B_|}*v za;DSyWR(>WGkMDffp)vsUntO1@i6iMU*iJ zx+v`fY2Nz+M0fZ?#4l+n?-Zd@iLmg*ispFy|AE1|zf2lu7emXG{@{a=DXufoKdy)GMW z+6HpI`o9_w%PwKV;dUeq>h71A{#nVnFb95* zbacgI_8sxarV8`F3*1fKhnvJ|*$}lAkL5pazjvJP!OWqEH$m25~sHh_PKYs2P8{S(-oAu?v^?UO+cXDO4zOLI@;wmL~r%A7A+ zfw4`JQs>wHWV-8IfgW}D-~7W{LXiRN>)c^`Gt|WwWBEV*`3WfskvDa`vmyWI?6U_X z{PSY(8w2k|TfTU4y5oxDO`!ku#kKV$yKx;6^zqddW_ywdq+sLuLI2!(9I~=)z?-s~ zVhsHkfPXfhCMm??n*^5o@*%Vo%m4Yy|AYye*`HtUA3)I}m^tQihm*JhkeM^$pp4q! zXrWIo8)OaLa4YMVG75QIpB|`muFxdp%iD(H@^b~oao=gc|I|pD!FIjnmdIH}vm>CR z#`Hfh|9XC*nQL6jCBI0Mz;*)g&q;x`PDB8zT>}AhymoFB42#Qn!>{fVuCOZ!@(2En zn+*9skbfwy3-7SZUkgXmixe&T@+?8ApAEgZN9S05w+OmJyjy5nF#YqJN0)XQvY#+y zIQIb3ie)&+x$UP6yIo{ZEu2yeN0O(`@!6@_Lt|7!_jYjHAy$ z4Sx~76Y$TbB<`#7_Ib@NnToAT&<;UcwWILp*q2ADrlLq>(woKqY;U5;tl*#Fs^3`c z`}I`Fj&yta(eHLF^lf2tGz|ZM{uyqj8|?UH%oCCY1TD|<8@!xO7E+SuMw9>C&`yd5${TZNRR4Mw~Kr|D4hez&~FLb2bIGoDyqxI>YQM-|^zl(O*>Vq!e98 zakDUdppT4Mk^m!-+uv5gl4(a~&A+_U&lKAhnLB3c@r}rBsV}j8g`1Q=yL02+SXQr)PsF`5^y?<$qVB)%)`x z7s?bfk&jm|3;B2Sr1PVhfztO(t8{U8H2<60(TPLHahx~YikWAP4t<|&2;^`UGmcv* zX)W&Rm(mZPoPP4^hjZRW`uATd1n7SsT;4Z;U@sMtq%=;H;G*Y*7+C%f@ITk~SQRr& z4u9hohvbkPR{l@)Or}{9&_^1YaLQ&zOj7)QhupFaRF?k> z%l}Cii#*<0BcvUgZ`pwRis2v7KOfXl?#=QUEV-J)zRSI0hi4HlH(NOH_L*k2cwJkP zviD$#@OsmAp6cG0Ea_Vn){X0UgL?4)FXafG!OV^Ue$db?Uyxo`ky4|KP3{RFL#_G$%RYf+B=~($>^Z5*=3tFeyE(Cm$tOw zgnwrifz9x&QS98L?I8QAJkzjASjnAd3yt@EpMJs$oeo=C&?G2A|Ap&R32s^m7BY3{ zh+U3DraXl;{{sC_b0K3>;P-dSV@+PE%u&!k1OF3y$ARAV`iZg&EZ2NbTXA8$2mC?N zenclMtK9aM@w%KE_|1-|+2Zn=LkFnz45Feg8LHKf#GV0U2K>*X&f{#UEd9@npN$~Q zzfMbR-2Ds71;$p-SEZ{KaL{sC z{vq4XZ`?RlYG0l(TNQteC$YpKQ^c#TIc#9=`BkHJBirxRgU(CHn*yy=Td7O**^@YQ zW{r^^&Wfe~u@A|D?6Cg)G+Vq7)_+lIz2ERVdtbO?mmrvb1flAI|CUIX-U$M2bl<6`2=PhuL5I2h-r=3#Pl+E9eht}9!I)^hhle@u@~vxM&bB(`>7BW>v$saPDm&?t<%7 zBh@yaT@8rTVyjbm?#YfVUKsv4ECM%K#$M_>MAh)Ssn|m6VdubPfC2LH`5&r)T-7dXan^q)<2TKQxk4 zK{t{+l8ia{THo^%RxK% zacmJ+0?$_){O|XcPpp>Sco9|ow*(q+O=&RyS}kLB#5)we^uD6te{`y_{Lk7yNkdOD z{d3vYT&>Mq4uF4JEoGQ&4i71QH^Gf}8Y|4j@(*?1Rv&oZs0;lUDuY%V3y}Wlh-K_2gM#v%Q3F3)Js9l}9`x@KTC=rX2y9hAe;@_N&uR2r22HL=h8lvou*1$>fSl9qj1o6=7d90k~oduRGX0 z>EGFxuNC(tYqt1g%s!LYfrU+k69!~`NXiI%g$3R`ZP>B%tCdhuV3Z*ELap2&(eUwK z{^z4yQULpzc-_!Gs$=+lX!MF-M%(COoSjm45C7XqhxwNbLycHR%kAFWD@>uPUOf&r z6PEv-C=`C7=fGPs-KQr?q0GzJTiQGb5Bg{CdfHe|6ZF5E8t7&c%PKGpuQE&jTo!fK z7H{Dp1c{)H;m>-nRzI!Vw^S6CV7-db6f#!;`Omczpnt|Q?61{5yM>bV4S*SXagU}z z6u@{AvYlxvN;B`o2{^bYLnj;}3h<&(_B4gng<6NdylpKmZw`ZSecBiZ$%{4yPlJjS z-n<#Wmgjsk4;un|bnbi}vdPqP+9OZ*>A!sg^S@hc2-+oi@5tjui>CXZF8=FDZ4+9) zY`y3CI=F&W?^u0;Crf2JbnM9nr-)LhA-1R5*#7cA%Jl*_54?X|yY1vhVREtnPixzB z<)Ak7(?$@BWO-Kbk8R@?knXx4A4{y^$L3%CB?fMTnzq#FFQ0);!|=~41w)b(?0mfp z$bU{i)v6-1OSo2GgjSJS{s&f8N4;qSKc1Jo z-tF>U93f;F4gxIy@UDqW^!w>67T5a!$V2|my||Hk(Ep`|5SY?<+Y+()7iY*IC>9m9 zz(mB$ntx4K%zDdGWPi+K{imRR_GkmQy8>A^`1|M>VMZ|zk`=MUy)zez4h=Np0vD7A zN^Ehg{2%u+`Lp^m0XQ^Lvvm_uMGKEc>rphx-?anB&5O`n1#U7DKUjd8fNGTIKCZRK%2YGi2A;m)Qz3SpKibrG?OK%gdh2 z)&Fvmv_mczY8oAQ5BZ0Q%=!4CUXlW(t=(J3g^f2#l~_f?X;)#6nliU5+t)n1IXCtM z*1{^@#d)gvfeaFqe^V44=Fuz2KZN;Lf^DL@5aU;b9<~?$)jxZuJ!$}RRrBZKmHsc! zg3*xGg{BdcG+bb(Q?AzG{-TBi4tR%$2SzLRcbyf}r-nlQkB|}>8O45I_>sAjTgmA- zqQsF@5SL!cTbp#enL8qxc2hH6or0$!se|SZ5K{HB<4?( z#4%SZ^vh_gTiL2S{hhs8nuM-Z;uN?#7@umysGr^XEBuc(-{ZZP6L7Q(#)nmDm*n2! zkmczz7g=Hw9zp*X+hbXHz6Y1y_7p3zI*EU!S^E7-q&_N`N?6Z6MDGT z&Vv4ujh8`jD~iSu*hfh4je!37r{2v2w+~;*Cn{c_pS)~NH(=#I_vI&i#xm?@^YS$D z2`0Bd|9qp`po}XuMmK(dW^m5*&eFHH!@p+TIvoJgOiw;>+qt^%cj&t!T=EPHEM;j5 zarBv=0RG1p5K*d}9yek#1d4IuD(DE2D{2#;k~}J~%oS81av@$s<8q^)l(ajTR)oxe z|5^8LS`)4sQYM?fmTZwbxmw&bj0w&#rN{23%QRxxOCTZ0b$)lEJla*pG#$gm!|V>i z-3$6p?S|aO_8?N428Ui+kGu|2!{W{o?MilW_CM{`8_q+4Uy$ z1%bszgUYPQ!-G@k(Zx-NM9&d6imZhhgId60O}Zh_!RRk2dj4ksgcx(XEnq z?8(>{*FNjr=ujX}7qiFm4|h6_-8v5;SMiJQKHu`&-22(eRzukH#m$lK>u_5#(o%k9l=|eoCPmzzXlUJ=FF-VoIlNNjmMj8~_rmoaA#=(zAw=|Wfis>AC$)va9dI}UQ z7SwyRcXmq}+IJw3>%8-*@oBjGBuhP4(fOi|0-Vo$W5(9^S`{7;E;E^ju1vK|h|T6( zbMruDpeBBBT$f>fY5S(bBVmKpJ0I(v!VgYB{&Q;*$D>4b4F8{kwKVw859t3Y-Ea)s z3+KX6E^ZLhKR@P@Q;>0XcN=El}VY~hO zv?h@A486U~AUZ|fnj0C+^gU?`vMn;+KZ9PvhYh#gmHtz*Eq0lMF=JU!k0~SqvGNa9 z&ip*T{>8l`!yQwQk#h0QzFN>fWAm?e4^A80t5F`3ir;k?FykRnO3vfj_j9p+dZCwL z!Y8K!i{^ne+YoxU;LxYkIlct z^rEl3oDfLqsWDax=@}(U<-Rt0nEs-{?n#&oxdk` z+W&L!7#5APp>9}#^4I^)P$t5T^D3Adr{coNzf21N_R8k%h{yJmtDYG~KQfGry9(nf zmwbk1LZ>=L+Wwb+cqT140PePOLAiqT&%Y%w{qxas@I~`N1vR=eCC8a23Fj)-|HYeU z9Q%B<2WCozI+2fy!iwQ$ZWJ?Lg!?LGDq(aG%8B-KtnfciSM$?Y`On7QE+>T~HW{Pq zFuk@s#jZm^Kt6Yh8%lEuXu-uO9n~}FgCUQkbVA>a&IYpgi3p(}=}H{h%twknq3)w4 zpXRa&@DDBl(?8pdS@veBi$O+h#+si1ZAM`JWh&UU{^C0C?|**7!Ss(D1$3G$`~zO} zH``_QmiCgAiJX?XkOIhPO?Xmk@o)GVLx=Yxn-adK`jk6fGP`s9yDEwCu)yf>A6~=g zolXyB=?1@5K!;be?N&UWgaf4V_c78l75Ubh#di+%39kscTfc4*OWa0u_(SBjL)*XGIrqzaiP-}OoRJ7VC;1!*EIGq71RRl$pjQJ z!f*@h;g@%e>*JEQscg~ZcnpwB9pyeKYO?;*YXmC$={{FdBEz{)KkB+Nb ziB{_v>ORftnM5mA{^3Bax!HOg)IaWpc&Q<^w#25|ZP)`6c|Y=-`aN|_Eu_zIb4mw$ zUkLc$q5u766Lt|UU{7#>$YId`ocJAQFKBwWV=>pl`td~ri@hD>4C}`kim?HcoR%D1 z843n2h$gkhEJZPHL3+!+fUY&hhfZ{NKasP|R1BS)u7s}-KB#}np=hMBBWVF9oy3nY{m-jaidu-)5dgJ~8NttKwl|bkuSOa>@RQOw z^CT(daOve!n(|!IrbmdMr(hF$g7tqXRIv_o(_pwTy2%0l*{&(|>vH-8niHLhqzv${a}$meXfN;c zq*gQpI%O&tMKM9Vb}BB`A@7b7nYNkgdF?>ku%j z7OP0MIg9CBU|?kFpK~)GN;pA=*AiSIwHz~;@*dn)|J*(HYJT?BNr(`v_G+Dn|HtDe zq5KKc|I~P|#{yZJa@H|yt`u&{K;*?uHt7I&=23f^f4rB)x-w+pE5OM!$u9@ftY;L zSUX}_HOv3bcbbi(>_F(#OWP#EeUvHSe_!eU+N*sO-!7V34#P-JDRq4w5p`pv{Cz&q zNl=nsx`1C!k)8J7d|;nXhTN$lv*@&Gea-To3^N%(?L$|P`>>c@IPZ$ckZOhB@2m(>XDEO%)b_1uPdS= z=I_JEo7u3`-H(WJCF*(i^=B-HH;BQd56tN$xiWFu@M>Oze{ zJ>P%-JalFJI&7}=u-SpvB0@YJ`a%TkJ4_NkYDfGp|B!o983_p>2Kqbya;+8j;L~yh z`@C%lcGd*6MzrE5)yJDeGLnCwNs9~vldY&?*VDFSgV-E-uIUYr1?BhWEp%&xBw0RY zp>M3An2Xj8;_Fs70-}jK&+v}z!=cc}S`5zghJ;i6Xt`<&r9sAmKpSmb)4dl>0{&sm zzb5|02{;vyxMXoC3W=JcQ0Tuvf}1r^#VTnw=#qy1Q|$6X3CU9y8wl!36F?(Pd)CT5 z%21>e+3+2ZK>Ppuzwl$Xr?Pnc%Z~Ag%`~yGgZ>93IeTTwIFLG3ALd_}|6K}~MWk_Y zAR($Z%)bP&{KFlMr{uXD`Zwg7!bmC;u#&|oK}Xq{MEdH0j#M;HdN)Y6i0kVS;(VVW zzkde04&*m!RA zzxh|Digh7>$0;E(q`yCBi)9xPVf`1+ijra8MepIyo7daoI}9*hsmEKOJYb zmy?9N-_iqYr8h$P%q;M~^R7l59r_P&@WNUA z&#|=|kD{Nr$x!xsJfaYJ#999LB=(I^t|btl=;W=M2l%JhirZwSfHJ?yL68&K0VRN} zGmYt6*~16qthrYU226{&N)u8I4uzw|ANyNZDTrt(UuVjk5G0^H!*6NF6X54pZN0NK zJ5d1qUva-#{qI5d10GLVveT~aQb?u<;+JVy{tv#p9?`ZV;OAh9;;=@+F#zUFJ8k*o zEwVbganfC93Cc4a3?b#F5P+O9fPXOk&v5F`4RykIkGFYH^miOv8|9sOIZ09KIDL6G!Q&QD>L`uhELA~^dvB;q;7LI-q`sXfZNZfZqwXU_ zvH!=_cgHn#w{PDk8we1F8g>FMPR_Z$*L=1=mj|J>0lQ}Z zHEYMj&rin5ec2T{Ag_lcK|32}WYT(-gbMfHx5gyQY2{W;#9Ky|(59e7f5b}Rx@!uf zsfe$Q-FXd&u6?sQ-xkp^|E1=CbXd2279DwY^Cw0i?tg70-ZQ!Q88+qVS_Rfb9gl`> zW^Kc>3H+Hc|K3`&0QWD-adki!BT~-6ULe`C@MV!`ZHvgGOqIiTHilcRd(0qD7#zdU zu4D(7noAW93r>hy6{cZ{+=Off4Ia8w5A=31B8>QM?&iQd->QeNOCfI(G2qodjtbzZ z_rC~tjx(Xs^goZ{AH7?>1HlGbzS&NPd}%Nf+z=Nk{R;`O^vQl7pB-c}CH|q{u=9Pr zpH0Jd+)8j+(NU@&BIA#c&M`%`l7#o~gKgTmqx+|+4{SHRU3l{?Tc7*u*Ym%52FiKg z6it+$&(>A%U&uFrboa?@EhaRO)cytKfBrqQ!g0ZY0Nj5xm|`Euf_&Y*hd_Ru=^VT; z!yS5a`Oa2=`j^R+Vy{}duCKE-o2hRZ{wTh@zeP_^+c={abOFd?0E^gq~9;~8@AR)@2wwbU~t#Ua6uJrZ1E5tiSH3V9c{ z_sT}|Uk;1@`gz65TRzG1q*%|unXAaCI?jeWgJ$YhXg0G3<^4VI8+A%>yCIrW7lrvBBv zXZDNexxx|H-7Y;I7rf2LBv4jZhELIHt5i~I{|omoY)`&z(_eai%SmegE2Ulw93lhT zX`}r=H4j{L$ldM)q9#iJLjTW)!*&!4Cd%rF=?mi0bwRpETO~oj4c}Mg>_?0^CZ)58 zP$K^mPbj6=n*B@c!V;xDSvfi7sG>`I8pi=?G)sZ_RMN4>&N)D^p)Me(!rl!Z7U)>i z#=nO@BnR^T6__M;M9U!M|Jhv&x%v6J@X7ykI>#%ysUGc(ZpI*f77(m!YZV|a6W;YF zz_|80NGu@N+y-y{g5^?eWB)Y4$eA2_Hp%p_6fC$VYAZD|_j1~2RJeVYPZIinu6*Ku zqIe}owhTYQ6E&w3`5z%o8I{b${Zlt8|GvL<4CdcU6wJyYf?O(Wy zb+G^WGctkd|3Us|`ukj`T?eG{soBX{==M}t2bESTL`6(sm3Kb4DkMm%|EENY2(F;M zB)ckdXr$6FXDV-BK(HP@a%Iv;73Tl_9(x^*4B%84z{&Bl?4}$Jm?swUW{YyOjHkyj zqU-2z>vYQfOUs~{MV{UG`q@8!zn?ZB2t15Y@OF)Zt}k=7*1&#`;{@f~+D?tiV&>-9(- zH+UoRV683}_b;gZ(@Z8gbaw;kNaf$r|C1Bl;%IULN7TC5vCG2^_2AX>CwHvmOk)$6 zJLtTE0c!q-6CHrxb%#gNvPOVyFg6NB-VUA~NNI)p>IqV3R&8fOxi!;BD5~OIJuZ=M z)AtjlH#{BCs?eWa#WxXV6Yy0Cas3&riK)N2z*4G91b=v*Euyye46XwjOkw+E@A64zQms!oo8YVrqQvE-x z{xPNh;+W(5^ zP-N2RrX|%8ZRN|#@f<<^`t71)`3ey}4ejss0;%c$>CYF(aqr{KW`1uak4Ze}qjm{5^=LI?E!==+?1|5{9X0qJw$oDl`zWx zSemC3c@+W{c;yP|s(@k_DGUh)XqbQZFb*ap_3aqxtzm#XpoFgjXLEP;`4@XH8>r7h z|8scljLeVk&m6k#Q#Ug71FZu0zrOMUue?iiJeZGAs{ScdyHNi^|MSVewP6_%N2vEt zzxeaVW%Z7`ux#{E_J)e!1$KCCzyFJn49S~ddSMZPjua6OsQ$xeaUGT^f(6-eTICiY z`RPiKd_PEc*}*j-BtPVt;yS9Y(2v8lFoaty0X?5*=N8c3h5vU#hXv+AwV3}JFq@4A zH+BBq7>E=)9jN^aw$*u0^;2E4cgZx|JTN_Ffd3j**M%q*fR1cl4g@43NDB6I!4ZFi|MIGzx>PsEmTwZJ<*ULdnlV^k(V5W8Z$3krs?K2cGf)^&>UonM!ZFV%XLtq|5<+)s=g8;SR3(A z8=ofe#htc~xDmfujeqt|TQ_;{kxe7LSw)k#cxPh%E2rb+YM(8s|MfpJmlxq<4^bTI zUsV39sNBWY|DM)JXpnfDi!JKExc_B$yxx@{mxXX_^g!WJLFK=QmM$Zki(2}+rrgMs zEWa$FCp?m!5HXn?Q2L#d_2;q;pn-9nE9ChI{%sRa8grqfwtUm-<*$M^Z4k04%+Gc`CEj` z>7V@1XHLCA7T8{Vzfp9T`pr~%FXrE^O0?e@zyP~6EkW=l$>Q6j z4S>WzvdwfX#NRr7l_+3=U$J?DKN70or^25s$N(U55G3CnxV+mHnc{-SKTl7EY|8(9 zh|0gu5=6EXvOu<XLf2xmv^AF#A1`cUbbkk?iApYtN$%(jNqZu2oy@;eAK>rWs zzkdB=*4}d=Z$ECM`JZm+o0g~|JL&{Qdp$O7B#Aj%C8POwlh7UbfFwuzpF=|M20TY3 zZn&92X|rbH(`!-x^4zLr1Q4ex0}}TyDkcP-7tj$ltrYYnH8y4O&Eo{VG$ro8J5+K; zZW79~8=jBzM$SYK9!F2L(Mjcu`RKB13f0}cQijCiZ>aKeK z;`_pw0tN_Yj^~sK*@hx##VwSH^iAF5ak&50eJ^bHk&plU^m@%5Gco#~UseWVeaZxg zlU_5{;U9|pit0c7`uapWBs{u*Rp;N+zG!w_;1Z5=qxhSf$0OAH@4r1>?3QvJBcuC~ zCQ|#KqZ+uFn*#p6xS$FPq-_p3Sr-(Tdd6{X5-QBUD=h|jO)bGZEukL+Z>}#__8cda ziwb^u_2W9^e=z^9*1w*lJ!=_hXF|u9lULln_2&72Nu`t4@z~N5ZtAP{2hUgNn~nOP zz23|pdGX-gPW?{YKi!^*xBX6SctePah*qCb{R{Ws#ZgezL%XNOKeHL#QZ$~GtCAQt zj`*9={MW3Cq+wNbGn;2(1j{x3ht-&oi;~5$^YkI#(Bk2~zzTam2Z+c0uj>a4z3LnK z`uq&|$Y>2dd^sTspXFr}b&&u0KJhno|MNnV@Y+G7ZK(Vg;Ut6J$-o?^?KIo~aJQv6 zlH1oMYRxtXdG^RT_TF|2k0e#n<413%40i4L;zQ1gzwgXY_y0^{+U{XRtwP2^kQnQyKTrTbo;CE&(RquEz7SD5h6Ce9q4~v^W^piD*D=W zaElxP=37FB_hbQNL9UlC0nwEHH6GJ^bqt<1;Qss7yZWhv=QDg?GbNaR58k2PKP}TM z_S#T~XdU-2uFb^*ECW2FIUT+G+BwMojJ178&vdY&sXM=4$IzA^PdH^G-bYe7R2=D(gSQROQzJiJtKTt%|^;{fwt=RfdF!1!A*YAdbvNxJ^1 zgWeEoKFa?H&u+Qf`oWBi{gM+Hbpd5A7mL4~F!#{h4g0L?i>G*$!R3^Khxv;tWW5eI|KIr zQ2o!Sf8F~wtHa3N1**KaO?P9!w~Lm_Q4VE%n<8NEqiZ=U$AL$^KVMba**f}c}m zDQf=5;>UAaPPTq9VzWwiIx=wo>v1MI>@cuj>fy7j_{*1nA6vYSc45|-NsG^tXVm)F z-f|0iRj}MH9CPcK|Jvv0R*kYR-LC9F>?M~CiS$5q{yi4{~3_~IiIUF zg^d{0i;Fbf{cseNYkKw}|3iloOYDD!d}|TnAJcgcuOA*6yz%+DVm;M-4gQ(`DC)Sl zhB`l}7M@&bF(@Sv@18L(?oldHnbw9l&T)Y?tJoqU?bfa-104#l4Uf=@dH;%)>zcQe z{Tlh7Ju}wkzkY%akCBDgAc%RZNmy5Y*e~zWnBV6!Zq=f>()e~{hJTg|slxnw(Ndy< z%D>krY;$VSLGf^xi!HX#T|OA`g;(aYmf&u@1$OS8_5|${v1sfrQ18F5x`sK|R<61D z!#jE>O|I6z^qf?E{v2D+vGq~22N-1WSi-3td@;_0{14X=JsIV~t4Bh18CsA#CM|6$ zaa@b|$1U1@WJ6O6;-49gswjaCgT(!d6d8@4U1GPsaxwo|ePOxB%sAIG`!W1OIqDj} zvkxIxin|KaN|&TUEi=Om5Z`g5hjVio9d-SO*_ZxA56x-5Czq*9B$ZwB&0 z#*F{$+t6LsOv*~^oufB#dc}#j0~HC+UOcEch_I4$Onp>UAKOuB>q+?&Z=Xlv96eZO z)cytL-_MwOlh>;H=u*9^hrfOX?ME{`Uq1%D>#{mV2TvchpmvaB{tKO764cQ?ynXeg zpVGg8$%eW`(|Cw;fv4X8Qv08;KOHDC&;c=x6B#gq0K{s>4c-ZSuvj~SD{uBdPPu19 zS``24z%NFk^+~r6qto0Y66Tp>@8P>3KUoE0wf{$75P5>0$JUcpUPpx-{Xebi z+o3^|{|efv*PCyU`ggf+jqE zjew6Vs#i%ez$URh9!TtJS)XQ{kg`G#Jx{&l&C^dYNEv_a{e$N$_5SJCxv!7S`zG@x zlAPT$XQ62+t);h!g1G$(6&m)5IgFJkNOV%&o6Zm6Qlp< z9VQ6F65W%Zy*OY{b!v?p`Ze~#jD&u%%n@SksjuvBx>?E2Kc{(|l1B`IC267cz)H`e_GDSQ)XSQT5VGMWlYi`VL)N2^~_w(RbhR; zuF%RmS0MClq5;MK-12zNwQ=v-X89p6p9P1Bcf*m4V&6W2;MvXN-H+e7ClG-XQIsO*`^G z<;ER>*UTbnZ<7fR3=uE5gdJ+9u}Tg^$kU$TklzYZUy?x>GB^rlJ$ajSCVJKRuc=;lutZ2+(n@o*U^~!R!jLc>x!XU-}TrgTL0F@w* zZ8Qdb!AZS9wp073tKPWW>#Vl%R5Z5WA&T0+&{YPOaYtrJA>Japh(ovX!<)#a_C0** zy`GyDn6c??YW&_2QDF77%#ZaOnps?`|EKw#eRz%crb-prhiP?m1a+N#Y9rgySWB=juoC6WHGuB*t z|K_{&7jZCCTh7aKvLFTe7yEGWL z$~I?Am2uSl&s6^F{ucUBw{sW?wjRPjo>COt!kKMXh`5{D|2ky1O^N+KPoMU)FPpi` z=YxBZ3vK`zra2Ww_o{dz8z(r8`(MB80n<4A`61sF)jowhfB|4V3EvmD@pH`r3eAC> z=daLpEvy&J&Ti+z)ItT$X_56# z>Lb<+JIvX!pS{OuM$3D`L8|0?XEWY7+XU(I(f{lc*BsQ

    b)SH%D)e^ zmyOMLD2rD5>e<>O|6?-84z%1{K&7QZ=*{+)f!tTB4UY1~UC^6Nhp;_kyYR5RO)w&a z#muC}e3pfaUT#BjbbhwU6d2uqkDms@XhNmlKfSnn+OaZwUwF?ncW*!{X6(S}wLwJ+ z`&n@g1}7w1<@~VfTW7lKTg|=FU-S@|{~GLC{f0ksJO0vr>=DhMRQTpK3QB9Y2 z?e$3jM|$N5J{Q&dr!u*zFH84XU%&mf8rK>3xQ9Vb@jnw&By|ILEx1*;<4=$H$Bu~V zL3Hpl*Kal`1@-x*)}e7QA|-5Qma(TJ6)Eq-1HNf72{JiB9thv<;(*^DuI@DcX#dZW zGpivbhr?mOC;xK-j&R)#V3VkS-TtyMmjU$9#~g#mdJfqH#tviyJ-Ez1qO$k+b2~&3 z*#CU~mR*N#_Z)})CSx#-@;|@C{-2yOOmDjQ#*7#3L(^TA<%}Jj!7eNG1U@9}a>m~As<*HH zZtkNyTsD(rgEr0BAwns#Q822z15oC)5rM+J%=#kNQIUbV%19t;-jaM{50?R&nHY?f z;%s~{Y6s+YMb_g5GtD;L0sPrao^N(kLvj{_4nc&u1CWk$3~lvRMFFYNzkbzLs+a;a zq4PBTKUd3b4Ny(D0PH{X9Cs1FmvP1h`xqEkH%yU%7)Ta#fO0&P4v_=y6a?+{COBpK2Vqan6iuFR8(|%uk}tAKm}* zF!v@Ij=L-Bn|Z#e_gsUo?I*NFGh*UhsH1@o*-J)Vuq-i`ke!E~8c7vON!GpIL1%lJ zW~H>R_Y!uVagSdC7NkS=j&6 z{q+wre;Rp88&6SxQz$+C`ErL7_2K63@efIL|8vu-E_^!<9Pe+#x5I43gn!?TlKWTo z9tZx6jm#=)|7-r`JknT9;U6mh?v4-9OHK5)EWQUw2Fv2`vxGniwA>FfbITZRATW?% z=1mT(ocn`h$|o`R7$Xm};`GWrJY*&Ox}YmsFx3C^_AN15Y+!mj+>8ct3;KWbL>xf@ z4@1W34zv9kM#8C8DiHYQV^~)V7`?U8(nbBtJoqlBMBqCcIB=_VJN5VwMUr@T1CRW|V49@S(A;ad zp4gwi-A=r!>rm00*6nysLrmRl-}S^Y_*p%_+;7CM*iMS2%WLh~fd1#8W6pTX(DqkE zK^NtJHuk~?3FH~R@C-=jAP#%?S`Ru8>XX^|I=XtA{vR#jBvn@uKvOOllZsLQ^XzY> z^mYbN{fC23_Xl#PNx@-(A>NAhYTSc~Ufa%{!bDfhzuSfHo?hz}U595d)W433=d8eS zx>-A2aR2>qck=sZe|1w9JMlcuVuIQ~b%NH?&xv~!|MM$WDKNQ|{&nq0$Zy;wKASeW zE}#jU6~A6m?Z(p;KN>sA!!=)gO66CQcjThf2^#2$pI+T_9s4jo{5HPsfPdg^1#9$R=vSVqVX8RJig&M*2sCTKMK!{;_>)T?G~{#`|tC#gXWlq zk&NgES96*RMdss09?87{#db5miEk)X?47y>w8}B(TRR6QLpkB%m`qer_&(3M)=!<0(|1%YIW@fhwd}HXq z!iu_4{-+5056w5E+;#Pf=k&GUm#y1suz>*FKmB6{=df>9ONA-4@0={l4c_6k{t}r? z_5Yy%=cny>Aix~LQLKjGl~kkR&UcNck$!nzD)$VIU8yOBe;EF_pd!Zo3rhcLo3{B{ z%hPU}q*NqLYLVQH4lXgb(Eh#`;9`wc1|E&^N;37$8pbnnp}x7tmbVP;7eOI80lx*qAgy>KAv3=$TJHsm7}AK)U#kpLjgQbuIgdh;icp!@n5WEc01QPLjCHcdPZ!nWt7yUeZJodhe;MeF^qI zQ~Fm1gGelkmZH@CEuDWnj)2EwWt5FiOV3r_qkL+FP)|Z$I$QQUAHzRcG+Spv;zP2K z0zcbegWpg`(;C@b0tzhLVxW*;h9a?ZB|evuZQ4pL9YE2DjtDcqDvk%pmlaz3Vt~%k z$WF^X&<{r1cdeyu(}K-3%)g`lh5qNTudfd>ZNf`Y$-?F+9WLPh*SZNe=y==jDx;}r z+M)6uM+_%^+P`?bcmiUDBE)mXH0(cQd0}6uL;hEST9=F3v8|ug^A6FLQ~u}4u}8lR z<_Vm~I$I(E?0@HVnh!J zkkIi7K@-ab9p&aTVw{Q$IEuZl@xzQ~^tSE0+>@@gB*Y%*s1e$GWq=s-?>b>GTA*Rq zlofQ7lt<7dZz(b#3%Gx=(JjZor8uK&-#|9k4!G?NNrHk;kA5!_RPyNsT_|}*cVoNg182_HnE_k2YCYV{HaF^NSF@!Va&~OpeS zX@Rz1rGsy@psm~-sQ#aEB+D+evQRnHC&BM5jgD@IU;}M1oUr8EVLzTtKx!pO{_B7K z?*0{}BRe^UK_~>K7_`}K@A~|rZv$IM~MY-vq+Wnel7xDOeqT4RaT#Wh`mH&Ev z{|X`9t9RhXmw!|J&%d18w9Z_*kvkHcC2A|Mbl&LHhsxe|O!W;c$YunU`wY|H_&&GS zwIUNPkwgwtq9;nf*TQ!`$O82Ln1bwT-9?sX<)bJhk;<`qrbLAPA(HNvGRWIUxkjA{ z8SmLZ@HT-JJQ~pI1+?1c(t0E`vV^DUKSck}sQz_L5MBzs+Dc%tpr!bn(!Vv^m7*!P#yi89#MPCnck}Ie^&3GE`_2oInlRzM)42VZfgN%9+ZPbUo(E8Qe^yR z!kL2W{_aj7XKgc|5`SB%RT-$e*ITZtLK&l3q3@BR>;)CIf7(ItdCmbHT5#2@PzEYo zl+uP(cQz|oTw6a-bYW(+l*s~z4bK8tz=nY-#|m4iC}H6+mF-v@bM zfOa+XKbO_zTTho7A18YIbLkMc#GxEpVk@C)zyX66|LuQOLE1A$zo!4kaAKlsNWQrt zh?(+zZHDI3@Qf&04z1iSBnAe5q;Y#(GH-uRb-S5aVz2w5<2ioX2+#P6G8)`IyFpk| z=v*IMA#?$C{)=sE<%->KWEM~8=nDL3e@+EbX5tOc7%UR=?{rQhp4_gFd>u^zGHU-~ zMYksZ_2c>rc6SF@h=1}t#rPL6wAgWC0*}pld^j+fg9zZ&BXAHy{|C?EA@%-+w-@>1 zVPnIZCz||w_Y-G4P#|8GVzfzMXzT9+)Y*Wc#RFG{AW#omz2D-vL}Z>em`xk61Q4~; z@xF~OZJ^L@R+^G8Bvjd^Hh}#<-`V9DIN&9X`3eQT`t*3np|Ny)5i%Oq0pR{AP2A8( zz>!6^cnpD6QP}&igX%wA_`}MZ*|vDrfHP~ybtw$+H+UmKIga#2+{#&Ts1dQ~#jRs< zMv~uC{XZ1{V{Rh3*J-CpeWmU{+>c#B+56jygqEgrv79r!R%iv+PiFq~O5a0%?NP@w zGYRIu3YN+MHl+N;)N?uwF#QG7t+E}GEH=G*q&?e7nH24-YY4d|yha8KxS_RTr0~@E zXLSGl-J91}^c(Pc!H>u5GzPs4#{uFW=Q@sks2olgKDDl*_P;3n(~bu5D%S*B$tVBM z@=zyTFJq+)S``K%SXT%F+&=|}l3s!px_hiaH7#9x3@}U_W0F8lATUG!vxSg>`xhp; z4n=sbq5MBzA307RfgDpezPU=);;(BLQG!3tX#QQ}|0yw>utd_L#f8cVbay$S$`QCg z`p5SsGq*7+0hU?{+5T%Y7Z~;&La3T zumP?~wx~_PGQkK)Z?u*n(?o&0M{?Vrzcg{FiN7WaD#PH1YD9J|0Igdd#Ze%rY}69X z^rrd`A6zqz$jx=IX3Q+J4^4&kL}jayJ<=W;|MPO(Km8_cBK9BpnkW0wfbu`nR1n+5 zu;3SqqiM}#HeM!za4yNNYAv<$*W|ypqi$7WqJ#M_AJo4#;@Su<)}qL({Wr3MXw2Dk zQVULg@gyo+)Xb*54eZB#%?fiHJwcH#p~}*?nth$u=bKOYpXohi{#k{lVU-ymsHK-! zyE(YtB;+$l{aI;AAlTyl;jOs1M^@LLQ*IvtV;iY^pdbRRaSi{2e`apg9aHH*Xz-73 zzVXfE<}w>=sK@<_zh-fLU6n~LQ>gPi3|!x36dJJ?@z0p)mGM{iE&x$jF>`ePYdlul zf1{o{BPV=@!csm{JQgnsf7B|(O>{W5I#zH-;jg6)&KTF)*Fq;v!vZrT)`TRw8PKf& zWOj*#IRvm8l&u`06fcu5U~%<8QZRPL^i+gscspR`NDE~9| zANJpdI2MfZKW~y*qCyhH6#ls}26?R}7S|$Ft^#qb7g+w=|MR5po23Nlo#`Z}@?W@r z;Wz?XNsj&v-0CSSew-`hF-w{Y^fyo!48{g*Vji_CY*^%WE5|}vZ*(CS4C7LJvQUp; zIyg8q0CvCao@NF;9Y!j)tqopGg1BHbJ>&jqb!C>J0mL~RjwZ-??CB+g3Q%hDUl!{8 zdwjDC8okv0KkEF~=U^no$NZ^dOTiBFU%3B8^*>LrJ4+*D+oC@0U(DvDKB%y}TRw{+|mMHj}PZxghK6^~L0q zQ(TWa|85(;>NE}VOnITJh;H2f!u``gVeiGx+G+A0GGTu{)f5x=OJxyLg@Uvb-qn;@ zY>{=EQwO0Mj0Ah6W5>rLF~yE40Rf?+@?Wd-8vHvrz{_=bg9rrspMO0%FQiV&v5P>i zc9Ew4@bRbpi~dZBHG_`$XY%rz(~L455KR!^c^>`Ggq-NUiT)q;{`=eK+F+lbjv%U2 zqkrk(FKly|*(1w4ty0 zpro8oZ9x6&;6a5y-dzn94qAi?`wwydOMhR#mI~1h&nc>lC_;Twvww>IXU+asv8a@H z26`#|%O{+NU&LA9Du?(7&ulUIH0uAUnZEwUVnPXdG%OKwOjP&(H1kN5#&&rxQ<2dX z_o!py$rNTYT~cbdKFwj*cm7l;B>#G0AJ7<}oAwCg*#G>Kp`R1yFKT#WezR!sWht?NU?KbrloRX1#F@D*zEU;R4}Q4RGr z;oBKY<=@f&yyo%yVe|+ch-jnTn@;(k*F5nPVB5(iEG?q5!uR5?GMVtivSh2N^zk5? zBExk>4zdB6nAVH&;}lnZN)QVzgZ!zf!@QW5eDi#CkO>0HtgRtco&Qo;OZ9X&+FbAC(b<_8saW=AtaRlIql`! zF&M-75C3$mb|zrbVki0FjUP6Rn|ymb;{cXlpor2a?~S6`&0;PtND+)grriIf#@4ek zUB-)TV>4|O*W0207js+BG!W~`bGeAlGGNFQ{m&<6I5?I!O0e4jNx>nt|0mB%AsZ+% zG6wI#27ryJhBH&+Dg2Z6&&>q$o0bG7t<-kZ;uZ$L28B%Gja)HJ^o*R8#J;ZJnzzO%cm6GzJH+ z1=%e*x#q}cUEMFbXl|n8j?d&+Hpz>qgww^ifAKBL+t~50is0H#6@k0P|7>r(p-D7? z+`JGw*2!@s%`h-ktieB7SS^&28>$NV?n6Oiq;9RBq=Hezd}TGb6-5n*3MDZtrbB zx|Y~T_>ew|e|GoKsQoWZ{%iR07|eeaVym&5|5=Z&q%q>_-^_Wfo{7G7<~x(R`i@ee zn@hF$aIe2XF9(ijJq@EM^GT+vb5-v5e> zrQ1Z5$bn}lA;31|ck%3ILhb)a^=NF@)rG0%1Wj`A7aCYb_3$cNSh{+yp;e8b=6|UE z=ZAB#|M06{+-bRlhW{~}|1Ew9qx=u%ziKZVqW(pF2#vcx!~9pqF=Xrlhvbb*4g~bk z0*~Ky3cJ@C=@Qe1t}dr$J|X#Tx$*kybTKcMgGT7bUg&>*nTY!ri+FVb1q^M^%6c(K zH2CME3WY|%{jbULW{wus|6E{c0WADDa@|wtRR1C6e;(ccx)Yw$uFHm77c~AKyi+vp z#r=!_>3?pFD+0NY93!{%D{(;oGxdG!IOH5V&N1>wQcKG&q|`3`Mel+W0l`KFu%s!& zF}e*+g?a-c>CftexADb%;2{4KS;tK?7B&j<6!^`QY#YCB{I6ewlX!z+3qfN4GYx&C zfcsxp>Sl)AMJ@He{ZnfH;RRBa_rLP>gW74!asO)+|Ma8DQ_iB|&}#f+@jGRW zP~~Dy8zZplA>idx`Bp9pr-W?wl?v0ad@T0fAl9{(|}0a1n8|1(Y8%%Q_af(8=mrLqnF^Z!)3KNTRYfX;~1 zWorJ%_bB|g|8+t%7A7yP?xE#V{^#Vo-%~T*;w1-gtC+>zN!9g4YAbgTlZKUonc6`0|7?8)#nzsF?XuYcfpk#spYEJY@&c<0L5%ySJYOWF z&Q_s+$8>d9RWwNcx@23UK>kPL{~0@@IyIsD;4~|blxC{`5dF_HcE8fb{nJnQXKgGY z&Nz)%t9t+9%`@?QCEgcWf5x^GN=>9H{OAkC2S+U%2z=SR|3Aq^?pr_CL?U{8vnXnXeu6v>&$twu=b(!6)j2%h^(M=M(~y z=pZ$}s>VO=YaiZH_a91fZg{2a&(+``YX5zVrvLftU%!d^F5Hb!uKdhsk*ZLQe@-kp zfc9`!=s3qYXR-W(S^T9Ky8s_dxgTC(H>=t!p$=WWNB}v^2OIQ}nv0ND&d$lvw=Av) z)fwb}#uYnUE%U=mzzP$%mrY4JN0a|bTctG?@elU@Jh^jjj=P5+4gJrpA?89tsqQ}v z*-QC zvbMznD{KcmewW4NPogv_4)%*t_MymU0fh{vM+)j+f|(T-y5<~2JJ||5fAl1%2o?4p zW=)br2DImKrx*7i)_X7q9ILz{UFuN$L;Uj>`hPxaSZYGucNy?~LiaE3F`!y>I{B|( zzWRiJGB?Gx=F@6gA#u_dJW~pluQrI0ecI&HeC0OJ6x7dyaPIvYV;1k1g5+diA7KCPU4uSgD z>&H5)96HK`*ob9?H1Hiu(6XBur<6D0OZ<2LRL%c5bmdpr*i!o!Nv#GjihnfwUxSg> zbQ=j75AtWCSrulhd)$*^>FWGfXhNs@s z1miJR7d_)&d1mD^fa?GGX_^lIKmSi|7>p!6N@w!b{LiXg*#8+#>FQWxcl9%lj6iC;+=2gT9-X6y=OQAz)LaMc!U`J6wVUNVD5udeNzVn>^?Gtb%hu00sv#0%=l}e1 zY7U2h4E6r|tARQdYMJ@nav;?E zU!EH~X?Do}sQV9H{*oHO!jp>$JKVo;U3p``MWtP}XCA&(da8ETc)4moHd_=}X5g7C zQ-I*>xnSvz_~%PniGg0Cmm^I{axIKe|61f9tgB^-&k;f}a+J!y|9)=kI28lqvBMv9 z+>_+oAa%41^IxO-*FXp6zviFqYu6R%Q~2jD3*CJ|SCsnz`JZnWee(-~cQyZm`|qz; z0rnpPUCsX#twoi#MBSjdVN#xC=hOwo4js|^^2<#z*!KE%FP(fkfWa4;KMPOqsjdK4 zB;9P*Y%#en0M$43{)MDhvwy)M30)qTd*|{Etcz^I(b)9=^FOPG+IGaCo!@Z-9FZh!TMq=g2V?{4t?nzHn4#UhZ@M4|q*)eST0nE$#|K1TrxD*wJG z+B}&SM+43N#X>dyxsY%1&NbhRzq%4Yz|aERzj&M}XYq7#|01Z?9Qgn3pI#vrx1FbT zs`1Z=8Z!QMjMqyGoMB8`afuUs4V%C1U#4V9LRXk@ z@okyB)EoUj8vpai$6*5ZFOr!2918y&k|F7Y1au|mt6z5MV^GSy*LQ@*1Z@{S^pZ5t zpXmUyGqaazn;FK%;WIHv7pAS5dPqn$icj z#6LJai2moXGd8-8>pt+!Vl1Sdg~eRTD#tk@I{YEdMgG$tZ|PHW2Zt1RD@Oij0(B1p zeLe)R1-=EyyENeb>5QNBcvG<}%cs`s4gQFze^L8ij}pv@Y(kQ$?mtBS=imJ=s{auE zKj}G~e8VXqt-*#!mZ@uLm!Qc2U$m{9REeYMW!S=jv+62_rvE4SyL&%x8hQ1=jD6rB z=HDCR2uwX{iz)tx!aw!VXR8wK?!ynAnJO+LB(xHyWuiFJ&yS6boZYl2o}Ztt3F=>8 z{zQH9l>hmgX&kPJizpW@wPatjSvDgQl<5CK|8xE-Ra1!>9*J@Pl+wTaw`%sk_OF2r zK<$4Wu?LJgF$@Hf(!m>H^$cAB0&y!ne5q9a3;CZn$(bW{ztR0E{3AaBJ-=i!tNz{p z8g|28m(QM%IT(|6=3I^3&ndz18}46Viqk)ze3dg&zS$}q)l6GjQRH!S9B%IL(`M`1GN~g%U4wq1MS@h zr%>fOzmCVw7pni6!aob&e6Z2c@1*#j>;(j-)yItHzdk>k_8Ij4@!MoAyU@f8N`{VZ zDRSm(@Xxj}xrYq=Xn<+@alB|e2-96sGeBla5~krb3}V#(+}xUk-Z<9(n?M7C3M|7^PDYOy$Xnc<`S(bt8WSX7`P%5uzWGdEMc22T;C1dxc@%<^sg&xMy`Ck zkSu@k!k}{N zomFD}9&uAZK(Rmvh8~%;6jcZewd2uGh5gTnf0#*Gg%xISVD^#3m*cVjS+jr9b4;HE zLXPuWbhFj4Zy&+o>DuxZS0(4Pl-LB=!JtlWAQnFzCpG(D#pA|}W$Q1Qku0}E|Fa2p z&{R)*4*gXA3-OQMWjY}LbJ&8j!8(9l#DeABN5edsMl7K6@4NjKys7x2SDTXs*5M`B zCUj{&aFK3-j+hq*4rY)c$96{_9K821t5S&gNtOi|T(~5oQVL zuO2Pv*D}(DLc~8W{0xLFa7F&->ryuE6eBp0%n*sffOyAVxWfe0#Dvb*iu zOoB=&<}Pg&@}H=GnXO;_C+>fp#~^%CpL+ik{Xc)T=O9)=d-M*6n6EX{v{jT|5ES2TYgRm z66adVvHu78pBqOO5a}Nt&dcIhxFP;g^FN8v{OH-3f6?aq;wv-EWfYrEFJ>IhLjR8m z{>i>bAY)kKshEFP?|-epQ%?7_wJr8PYsvk!V4_th0#z^& z1WBOEmm*H4kv$yWfp!CH^zy1`fg1gb;(xAT|0yiKs2xb@UzmUYV;vJtEbGXZ5R%dU zpOw)~`8>m7zR-Zynv0K_P=O!fpe6_~cebXf(EnrMqt3tM{;8(_r`?RK1m42$sF8KZa#jPD935^FWi5(%)q$f<=wkM-T(aSk);RE)Bfdu zyw+ovynD1E!Z1T$0I@8G0dKjpsb2X1?f;?MR(2u()10qorQv_5{jW}r0i#S8&^@lj zV36f@7Ps6}0#HBZPjzrej%8_?O1S*^)4t6uW5tMnM*TlqVsQWTu$DM{ub-O#IW$4z z|H1vQGqdo&;RJ@JlX{Zs(DGy$2&!#5J6jDbjG*`O?%xnXQ2VD&J!`b=r=tEv?VpbF zKgkjhzF}~Lc)_C{fJb6NK@^No|9l)7Y#BM~G*U~9)WS#n_mBAN;|J>3hYueJp1*zh^ydeEs+9=MP>! zAAIrr-m6EwFYZ5j^8EhbgTEd>?Hzh@>*>Qge?RVdGI-b=%K7F1MFlZY*nQJ#eOR_t!0Tb#;68>^WcU{blXm&7D7N zxUzF&%jpe2or!O)UGdY=h39L6e%zLJ>{M#qiA6^bf37?%{ifFC+Y+DJFYRiI`gWLI zRoL&zU-rXRzsfJG3W|1a&rZnRygsvNS0?pOMx0_#OnzZfR+c_SjlHen+uG z=~j=t5|1rgC+2K(+OjP(wI+4bmbGbHHf-9QhQHD`uU)%2Gb1wte{IU#l##h{Q|87E znd#}7>(*xGr?1|SzHwdZ`qb5%K2NR+ODUe8mOFoSo_u}w(l2sWr<3vNS&1p+iZ95d z^esur8A+=%k`gmkC2dShOwWqm^hMmJgt#pUOHyNEa$^@|#^OKoHq2dCI7eO>@mb02 z`MHbbs~0XzS~O?#+&QZ|-M2ob-~T*x(_+cm`5{~81!T;dkv`8gC45#+L~vq+WOKM@ zMwr_snM-PzQ)-y#i-7gp{MQt@uiNUBUgE#9Xl8VwXMB-se1YHMV&AA@KUs++tZ@3G zV$XTS(#Xx;5gEQSxB2_!&GgF+^34wP%$82w66BmIaoZ+!TOH)I(aURw;}kckW4hEX zZHnV8sc4!4*)y&4a2$#fL z?PdLRRLFW&(Le@QjIZq#G@tki$Jb&nSzT`MljEqR@ZKC)1x_x|p?ZGI@cd>x_zzR=q>W zBI1z>-G3{M)}i;HCl}GXH{4ET1F-A_?WW|HQ{(JcEOZ zDqo_^=TRE)xl!JP%oN)a6`7#c z(!I!i`JE9i>mDkFbFL3Y*3VosaoMF*p2?{BYW%O(@crtS4f|=D@e$4V+Qql05q$nf zzMaud8cqV$zvio?Xr`rD4rERd8_H~_JWVDj@zE`Iv+^zty7u|{3}@lzD~g+YZ26y5 z`0gmtxmiY#yQasbRxJ1~zmqE8re9dMa-{32@5$AFng+D0%eQfS)R#F_^VOks6<+9rB3N%I2?Z*Nm^#o?88@N$Scj<)TxpvIMGp0LRw? zC(?TV2Yfbwm)M#3pYnlQC*7+KC8^5?psH6RK4y=2tFoF47*ITJ%IZf|&iOiEM<04h zu#N2al=w)2Pq18tSHIS-PvYacm2jfKDVmp~DId6z`S*^o(@wGeJhN^kZ`Y{!xZ$~t zBV|7SpcWt3K6(71slw`i%;(=tdxC%Gn9!| zf8S90o?5MI{hSgX=Nz8Dr#eOp%~xviQBA(lP9JB`0nJw@*S#A{l@I*3QT~h|Y38AL zkq(VZl2haxYpAh}30u|Rv*#%rO=^N6%pQ?!cdy~0Xqto?U)yPy*^e<3Y5VtSSU!N| zOR;q5m9*75I|4z+(WU&Yc-Z}VKgS}#O71fVfrnfZ9vwCDnw&sUs}F2XY$1wPf|BR{QK z-%DpB`PQ*C2@rf@_zqJYz66Qj^N%)?AU}S1|IP=7vpO&dqaL3XHW`PCa|K4D;PcyI zRO+;*xz|Yi1J&lMTJtkc?#xP$TYMhFclSc8#j&8;HRpaf*F!l(VipI>C%0kwz}&%7 zhX}M`n9duI=Bv=DpwgJHsPd&9-{@P7o5Tmj{38pk#=}`0A9;N16h9*!6qNYro`Drf zQA53rmQ{z)mqnS+Eqqo#%IaY8{Tbt^@e!(gY0Mh0$fpclbC=QbfnWuGgi;BuQ86WM ziMo6sth%JVS3N$9jpfbCO z8~Zvih^7qua{6fb}xJS@iDc%37Ej2^8d(s^LQxt|Ns9ri{}QT zr5XkyqMcUDP)enAN+rgwvW%so97mG0Q=QU2_OVn-b<&FDv?!ENw(s+r8J+j%_xrEg zxt-gM%Up9^&)4(uc-$YGoA=Rn(0h3WaXxZgfDr&wy| zOXi=5fN6$j`A7ievkm&k*q{%x$;O8NzvWxm%sKrq1c>1S-p8*I738PHM?r$@Tb+b7 zF?`^GvMqVdg85`TU@6StSyD3qceV-t$UqzKS#) zAP^97dJLcjW|L@Q_`u7daDBA%d+OuPCQVjV5-i^eCgJyJ^v?=h)bsXxY`<^{zI`S( zAD&|J=7w%N-CF}(3kNGO7Ms^;I z03Qa#^N+qmr);vSg5#JwywJQ5cglPMUh|Z-e;?;FDdh5y9|^i6^Vfpa#@r zLqZznAIyC9AAG>#%CD3@3RPjAA~Ih4OZLC{=d&VwKIPj_lJoyZd`!^81!#jh>s++v z%;Q7?58wk!#q-aTJ#1LMVdCSx)2Traw8P40f1A%?`4%%K-7zu#ZnpxxQV*7IW6q>N zF?>L1MTCu0Ih)4$T|TSCb?3XITw7_-aRJA7RRZ4*dzN+}P^c&>U%TkaRCM-CR zhKY|b|Kx-!A;+W7A##+~dIIJj1417Vzx$_UF=~X<=kNa|$hsj~zOA_x>WZhIhyI4< zW7zU-)fI)UE(k&S+N&Eo<~U|$;{iVT-~1!1g_+Oaz`XHXz^i?8FLv~6Rda(WA3h${F9p3k1@iC`y*m9O5>$b`3QL^^*d{OyqhU1)B$V5%r8pN$zKIYOr9Kk<=FMvgHqNbwOTYkmLF z_&c(X0$=Bkq4&W_$?`NXpJVWC0}p+)Ca403q-n=yZh2Gd0neAhe&LvTQr}zO2rdR+ z!p!IHhp_oaWWKtvuI|`VQz?cPik8w^(feF(VIj0$Liq057HfFRON|(v7`UP?|K?uAALUON zDx&h)EpIc@iGr_?hN3f6MCS9xG0ud-bXw2(!SRF#2U}qNxv;*W@Xl-rd{Sh-YFOHl zg%j)@`ttf7na6Mi^nFw~8?h%Rvyw&oIeAXm|9g`lWs5z;QM}6rUiFn=luw1Pe>M*k9eQWHCBe<_qh-oyw5`U+I>&b z8haqcWJxbH(Y}58ys$`OK7X2w=wkRbn6GwQzP%g0+r5ez;&D(_Rb$OaAOS1`2j4qekZmx(aO`a;Vw{6zhA_3xM(m4mU!F&Z!A)O9iToJbaSwC9QhH6;Cl9#tgqNA5XCK>k4LVeEg*9GT^&du>V=xe9ht|69PjUs~N<6F?{Lz zB`BYryo}gl^SR!GavV=2J{EfY7ru+!K%GBN1F>&jJs^vgoLl$j3!I9f$b7YLmyC%n z5(of&HaEiE_W8>O)3?xj*HKY?l5}XAdDxQ@4~`^!_fWoQ&Ot~d1AK%dE+07u^UwI+ zX`=EGFrWXLn~I)`;M@PR|H0-TC|^_94%;5l{A2xVBoHrHz(=al0wm~e%$Z&|0y+cU zEMjD2yymFo8WpYe)gMQ9XUZR&>-V_m45uFUKiK>uBM8eh)q$sgxO}9s{4zO1&CHWq zyC}$|zE@;E4>@;43W?2EM`w_YW}se)`FwQfLmV`j{Wt#re4ucwlR(ndkjy_1AvR23 zdu%XCcNRJi`ycS{Rp7i*arN*Z|5fe1Bd+ev{D>t(tyPZ=C%tu868K~!dPD=cI=)4; z|GD?z)7#xK=Y?qs8x-E`VM+Es`+k^D%|FA8-i=T@Ald%_d@{}i-klx&V)&%Ed_;5R z#7JCC0|7A?%xF!a%|qs^SobC>o;Dn$u8ErLna~Rs&p+S(9u2|s z1lar&{@hB&NDo;_<{uq}kGKvDzBDURy#Fa)kXhf+StSMKBP$900+2SeO(P%_Flx8; zm3oZT08WsQYqTVPVRRYN~t$6CZ6#32GYi#(X<#Fd&dIJmotRaA@JtuBd7kIJud4SsnQ1B9m!E zK|6o^db(N01fGROMTzTQ0CTwgOipRWk(uGo_!avw@p0dedepxg1}bx*X#VM{?(FTH ze7RwpGOe+HV)?TLq)2#JIP4xO$d^Lc{s-otAE~Bx_8MaIIl#9`0=ByX0`XA--^Snr zx9v%~3ShqaT2QT^W*TA459o+jFf@f0Z8sc2tH_ST%aua&ypWOFrO!{hE24m4|WRLBCPI(=l{@S_(&pWg1oJ4%Y%14eZk0VQ) zVu0>W$g7h8KKc3CWEGh#9}$hk8%f&oXynJfn%WE_$3n*eM6c+oB{^dMxZK^Ct*6?q zE}yI%)BkL+onv@a(>6kg!6*Oz^z=+Hn9q+(x?_q@XikHNR+?oF$()Xtqf*75Ywk`!wG%zeL#7%JHQXXcj2X_)zH z8aH}^s3|dD&1@3G2f%!;#wHj~&fNht9#Q!2PRxCo5NQw}?~CCBqVkbj&l*w`ykdZH z*BPIwfW{`IIyxI*6;CBZ@vA{3SOQB$Fkf{G&rf`*Ioh}KpcZm80`U>xOV%Svn~S@3 zZ_nM|%4eLcU-aiJZur}J2;Y5uwH}R;9>li*d=kV*3_j4)><^qfis^`+(bbV z^ZCfvxfgvAf8Drf{z>_3UY$Hqy#JAy&*>3GYy!zT8os?CKFX@W;IO?b#n?W|F)N+} z^9N;X9-{Bi=S55|nn)lA7~qu`EO=BuW7tb7FKAAk@1b8(IT@BPogkfM`B-ZioLeCtcqmA*(gwf`aU>7J_7rT>rk zh$HYY_&|M2qI8Dwl43`AN7SU~&H?k49R1Zj%Z`b~c=~bjG?T^dy{VUj8|Bg6{S71P zUn%*q{k>&-IH3;Tdtm>Q`Ctt8CM13MY9&!c^A83e`1!3n7`a~(?|&{C;iaT_Q|2q4 z{7PhpvznTJ?ECAp4b+hf1|Jabf6A&S$pD{p+G>TBuVtGST3KgxHIk4_j8$MYVtXG) zU&jyOzOGkNn4{Sn^^JRgY_hQUl@Q=dZ5oq<<2f>y~$~4w9cFbaxRP*KR z>|RyE!h|R1IZ<^^?W?Y%Jy&=>>L2u*dpiznF#R7U_MPW{}B95 zxXTZwaajF>udk3Z(x80AW>f8Rw~Pfdw+qVxIoaL?vm(HJ)hN=xR)5N4;d%Fui}pV% z0%>zQM45tb_E`qmxC=7zoDw~NhJb5dabvCYTe zOY72)!GUkABP)MyMk<0!1f; z!M8Enmq*ANN+|gvyEcKQFGD z$fCukvQMnS%GZ`pZW$+Mwyg9Vo=BHlAfA7Um!AYOk!{$U+5?>W8Tod5!5b}tPXc^< z;!tsuv?zRs?SGKN<(>bPk4W}E>xTp`NknvJIU#^41#WWHBkqA+NU#N|?;3GOf?3(I zzrKCBMJ7xNws$vdaS1Gphxe%-)9cr;kM7;rPGQ*rm4D^(?KcV2jMii2YybL#`MeN= zZ@+&S>+|vLIQ&1o8xHsB3SuW?vP?FL-~&q4aaWT0+VAEUVWtb|+-xR}2j(k$ z(V)UQ`|tQ3sDYv_Zi>F=w(@WS`jnX^qOE}yMUzUKw{ z6wEyvEne87P87D00u`!4$?u5dl62CXsk$QZ*A^qPd;7}+6UaVVEc4aZ#*}OEJn{Yq z^e^I~R5T)O3-AG#LwFqqj)%=ZvqkW2n17ZmKT(Z%7uM{z3XCUkWZ;Yd=Bpudk^+Sq z&I6&6qY8Vq=U_g^*~i4Rj*UuznVB-HXwdx;krz~FSZ+I-l36FSe=_FF$16s1`I7Qk z+k8tE*bDOsYzj+MK0C)Qd?uv39wjV*K=PKy9ghgw(AT|1$gd(8`@+XEczf|lBT!?W zoLf1Gr{5CKKX2yNsH&r;VNQCM05IawiA+I() z+7ar}1uh3wbRe3*e&?1!um19q@b&E_8RYds5O2`LSJ9{Lw6d-iD4O?=#Nqni>&}}Q zHq`6Eag*p@o}eF<&MrX4&TiuU&!XiuZt^rRU+q|~M^>O;-ZNNGrRQCJG7myZ z3kILe#)* zJd0!eKue;3buU1Mu>Zl#=ZB}24)U!+V8@S@k6`9=AEb4bs}zrr1il3G59nVYV;}_c z&xP6j1GNN1pjbsip|}+9atICdrd<|D!6yo(-eNT67)hg!^sGMJx3L|@uTt^6T4e=J zIjDpI9ywIs@~c00E9lOT`RP{ylZ?&34+`gA3gd)A`E03&9oPrek8r9uCy4{)v~lT2Yk>%y@I6riFH zLFWn?LHS7RT^vD*VP~9Isr2=&dc>p<*;g=Xl$DVp@|~nh;SpzW>{bejhYJ*Ti{Jxe zljw5^gM;mVq_U8oP*FM`^sjl*ZH98w?W4MHGi<_?^6j?^p?nS7{}?Yl#^u!Ec}FRw z#A7NYiL$C_hbVmKgP5h2pqMO}W~L3wg4q9aRQ^mCyuBcN2k>pN{*_z@79-vjuyJ_c z06uVSdfpnCff7Kf`BvI70EaO0bXBC`RcVE!<{vRXaJeQ~RlZ}QjyHVCUB<*t{}mt$ zu55T{0^bNihwxr>j@bXR`RS%EIAZj`-45j=57LReZEqPuiteK5%43ea$9(_0j@q~X z?G(CCh3{5dvQ80seY1%NCMP)|8HY*ev3UMbkBkTGBD-Na+|rWrHPQaZwU>X&5z)gVC5s9p4#o|-~)Vn^GbbGo8jt=%|9ES-e$DfWoX;=PuzdK zp#JMx&*7nu&BKRU){DzWH2SrePCGx8TLm3dGcr~L=OyT0Sos>MBS=A42bAtS=LKZ+ zV)%d&d^6dsZI54haKLcR$*LO)0-1GCF>A8os(S^jakX^#Jowdx8Y2^j zYso0Y^RQXtSi*+B^(w@|8lrbomLU-|7CRd5jC2`|MT^Qn^U+iLz(MI5{2(Z!T%#r6?Z%;%po0^s=ES(DPx zO3r+GF9PMw+Wj>eZy&P&)lxu*Gh;#d$nbvU_P$~|BoRNv1?W7OH_ZUj)|&Z7eh(-?$2ALa`~WtF>!DJO0?t(YZ(bx`I^}O^DP+U5F(?vrb!k- z`3Pn{-}p%Ne5(_V4*MT${<$!lbJ&~%G;s0$XWwZ39n9@hdq``31_qz3gRGtWpK%0{ z|0hN#KAnVa?i*g$ty{Ss@F(_}^Y;I;oD|GNR!(o{ui$Y6nwyY7I;|LBJ=rP?<#`8?{Y}hSg_PrILvK%eRu!nzPC!iHyf{6fR)d} z{PR?6fgG(mNlwcR&#nlX#?_v`6<)4dtpAyRR%YPXw#aBj{0BO#p=lFS4(1;nk^U99 zA&*rDq%`pV0DQ^5k2vGvAT1jw*zsTZ?&*a&_jvH47QqJ;3L8|=`}==Q{>WTjx!)Q= z`G{^zR_w4}Ed}>g*PaFNZSnj=B9}gDbMw|_f%#lNfrwlWnhOKcYLdOR9JdR3;_?w} z{xMNT3z252Uw&|%3}4;lyw{`>Ex+$M!#6qep$--qk=~yYBB($5qC11)Z{|vHM@_|EcJlxoMD{!NK5@ z3=~k`5>MUWz_Yr;(A$g6KRzcReD}vk4YcmwBHu14Mdk|lFazskpcy2FpfDpE{6F{N zTBY#;Q~RG+ymT}FWEWJle_w_nx|<;0|3LZdEX#no`-aqKc;&&&CYpa5-tq`Mk^Tjh zm9QcGR)@itYLd)*-ydRphIAhcGLIzifjMVk&Xt?a0!_>X&ofv!jT=y(>k!#f?8=4m zSqXeH`zM|U{vQM{&44({VddZcA72}<3KpJ??SH`kGqgFw+&`I%%6DzaFaxDu1IOFC zR-}I&I*M0-Lj(AK@?eISq6mG@N3lWwVkjtD<$p_(rXcRKg8W)2O=hRa&yABhq+xA~ z`G5ZY#2l@TtE^t|7G9ZC{vYlWA;YF%Bm$O~dVlpNc|}6`+WZYtnS1R$xmft_&wac5 z-rkDESz+ZP$W|Yfi}kOD4VMC&REbCoUy7Z?{LhKydd+IaI>?w9a8ib+3|EDT>bit6W0q9@ZxZMitD%a--g?}!*j+L+d8-oM*66jwd z|MRv34lZ=$*HS48-_82E{k`X5CB%B!`j|_}1$`X+&o{3^`KUlQ$qwoO{%i=uy8aTBnYAAM2VdfD6x3nG~0ytvE> zPHu@h-m`aV-~<5Qp0#ZdBv6D$PD%0xt1$nw8(9@HFk<+m1se&To|E?Sh4NWZ`5F@& zz>{@i`oF-GG39?A-aPKaX`l&FJp%L+jhFTz#KX5quyQ_uK!I zIpp>Jp)3)J{LeEH(-*art_AC%&dQy#od}kc#;S;FwC?ugNQUtoJ8B#}gd#t*Zli<& zd{XRxj@i$~;ZSWR?0;_Wk&@z~fQR#(b3ro{`G3Yo&3nh2)xW9>HjVWzW(H5eC!-Xl zd8ueoK^&(C+yB_a!%}?xJQj1o!6(Cg`i?AY{=xjuM_f_s`7p`+bJag6Lro6K*D(JN zQk{*g0KRR)Bh9$D>xaBC z9bRjof63NY)gWH&0ZUaru4CYx&f>yX#j;u93ZDi#S3hc9@Y!dntX;2TqZ9ff3-~)td?!Duq>d9Ur|MTB_)jlsntFOyg z!$c&8Pp;mjWZ=ntF7ZEa%R=QM{R{c}<*N`W_^JI*psZ6_>!b+|N#K)?H#j+_i_Wfj z#Nsw&7&=^Bvweq@XNERPUQ@*+HmRAfEDi{JtXC&*;kebMv#QEPhTymexmW1Jwnqb~ z6%*PpCL?MdJ+6ib@PQkfb+PM#^4UZ8=-_`goJYL6+pyOHR5EZ)QbHpl{|~nR zQKl|VmQ|0isZ2EC7>eLa;Qz@2`=(Mn@x&6c{BQp=2A_l>MWlZPuFA_c!ajq&pnutx z`4$;Z5WzPo(!XYhm^!(u885mx_;Kke85u3-y|!nTNXfu2Xr88lmufov_sXl(s$$nk z*=ZJj#qI}K`ZCj$eM%reNiXjZ7?$*+{s8F%3SPxy09d@tMFbfN;yGLiZNbY#O?dHm zX;8=_G^P`Ho1SVE=D`<=C z|B0E5FLFMG<5i32A5r*@5WvSt<(e&bM?#VRM}IG+sydi_M90@2xnJ1(_a>sQV;(=C zEBOFfF~YnG1b?Pp*MsO*{dDdERd5RhllwpGI!2ELA&O;TwY-*e zB0DZyk8~8jMkr9QIhlOa!bT=MaQOv)izS=To5gxY^0Wl9()5zWY_Gf-aBp^APIAM| z;99{%gC=<-)1{>wnG*QYG47AdQ}b4c<{zwlmX(EC#q$sDwiQPzNeY4gdHK!UkB_LD z$Ysj^?CH84n4UUR5qzN6Plc0c9vAPrFw?-!6iU;M!u*4n=05`kH91=Lh&avBuL`>Z z^)dg?yLy5Q6@zc{lol7i!rI8OJo5|H>VXY@U6b#mGZ2jPzoCdg`+kBg^V&gFX&tJ&h<>z)ror;c`4w|G9)+D!{jq*5br)B)DdQ_D%8yHR^hG$y7OG+w#_067QbM-3SxGDS!~H z-58$blAM-j(J|rC({NB!KD&C_Qef({f;K&&aFVI%_&{OV3OTT5sXFy2GC5E_^77h` z@t1O$X&{JHjca9GQq32^2QtR)Gn^CU?;PNyDjNG1T#jVcN|UDYUlaQX^gaBKY$VQq z5!e{H78!W{U;j^4g5FtkZ5%WlrYh;$X*>>YNG03RMfopq)gKw#h*7t4O_mp{S^?uF zk40)Jafob21rB6NNBdGiB2J2gm5&ILjh&Hs$W7a~_cTU&{U5=jINQt)-XIk?=Pb*$ zloq%!FWL;L+kA$~>1OJT$?gdlgua6R`O7X7-vnyD)y!+DjJD+ZG!|Di=KzkKczAwf z{B6ck48C;q=lDBnX3K`wqRw%>{h7)%(f;R8%_r8OExB>C>syAtgx=4ywy|*~BFKM% z|5^*P6Zruddr9Mq2#V-;{_D*gb|c<^fJEhMmM3rs)4xtzz6BuZta5W) z>swL&Yn?_wYf)OFjI@=$ZwKc8fp+YG0VYptNiz7Kl@Kvzl>b3?ILJLy2i`hh&h$LY zaIX!|?@XdTm?3p7m!V=Lmkyt^(l}Upu>BA7$C3b98Y^&9xDv>LXsn5%`Dal`$u5G0}GIP3W;? zg*4b^ZC}{qTe>jPVeOJ#NB!4@-alsjy}2bVUl~zEJE9JZAzH-+Jao!6>=^ zFLnf1M$<91m3|bzPRRerO29sOnJ4rFB|TZB?Z9LZF!;8<(~@ErL)ibAFTLAY@=^Ip zeoJoqvU)FnLtpOR8P!kD(`-Uo^488i=&{tuIYVe~pu$O@nNH=u&N%^x2q*SGXY4=0 zY0x&)#L8zy{^y;atGD<{dBW@X7v#VEoO872u=03_?xbSkyQKg501~7cKp^dI^v|kH zBj}lcc5#XSd5flW{?qe~yixUw&*rZ{n_gbKbHY>8QfgBwgX89vj$ixcd00+~;7G*e zu}Rqlh8TR{RHR935;cE@R5qW1%@nNe#w!RR9K67VmEeB{{p-7&12aMQ-id`R8G-oc zXO`*D9xP12KWBlRy z#@PG=`LDvc`#-g08OY`;iOOdOHy$)|Mz(@t11QOuT)YHXbs(&K7H2OI1g5B>DgO`B z03fg;xso?XD6J zn$4#sR@f(Qssz52QJYGGz{Ot+0KUCr| zpelJ|{{k-fe<1&*a+DpG?NptuqZCec68oPQyx`*jzKxa7a&I^ZrI6ho_eq9m|1+l% z$;s@@z7u=@>o{Rp3i4lG_xJ;>;6r(~o2s7_JxcP@p1(#WA~2~>*FQqye_mz1I&>$4 zW9Sh#gWDBh(vd`6f7cuus+^>7%HsWx-*!C?mu(%Ke1OV;%BcXu8o0vHqqcwGLsGHw zHQ4`bnVoz}8Lg|q^sg=Lvhj4Y#|ZU_;7jRr;a_x=ftAl<`=6sZl{;MBy2 z3MMBRvY*oq@s&6yCwcHJcoD_GUb%K*8(S&h)o6}AFRaaV$SL)LuP9t$@YE!6!lglHFaz`WKdgzw=+&DQnKKN+D>_ zpDQY#4LjaZQCpv5>}X7v#)1F2YrRl^Wuf+~yRi@d@?mm%WV00!d~==QceV(5{3OdI z!A*k&(P?a^I2YSD2~!j1{~4@#QlykjrCCm*8}3X_Xmi>OuCgH)+)IZ{7(#c<|Ln?2 zv058c->YHd%oS=hmGovCh8`16`G10AP6Ux8{+}B*PKv>9bV>fp<&57?n+4?G`LE?? z=3HF%kylN^gX!y_)FBW>jTa_Ahxum;zJ0V>*@WX2Pzf4b{ylR;IPdbL@@rC+t~|c} zaQeO2vB9i-FE}}|xY(W8vq*DpPq`Gm9j?DM{ls&Im-{MX5P6<=T5Gx2yV|Fw9D&`Q5a zSp{()lmL9mOAATLXaBS-e+BPCwv54gp}9c~Ua%Cc7kZtVeR%oZ*#2rL*vwF)l?Mca zET2q0ieEk0?{P9YO>^$N9KKUL5%xci{?bxZ($CDIpsQH^{njqI$lBF7U(El5`JZFo)bkm0&aQzoA*O#R zrEL`D-x0`w0oeYBya#RkAo4%sxX<}Pjq)-JcSSSaJqk{Pdoqz844B&$BNLs3>)T*; z&Dt;x2XXU!E1S@rVL4zxV1qZnrJ zfG=I;cI~{m(_`AX%90uIsWvL+-`+DY{#kaLQa3V04#>YhluOgFfg^}`{&Cac%7?3yu<|wW{-^C3gNWxT%}oaQ5(eK!EZat02j>4d zv2<$xW1~@>kf^FXhBU15% z=1lHjxCuYe%jE#*U+15P)K6n+Ib?ryJ_%n@y+%4N;xSci8rSdI&W}5k3`1IMb>bU* z*5BRNWE;211@y1z$<%7SNDRL1>0e71=ig75T9e@Hi{;;+HpP4Su^>k_bKkRG*duhc#9H6#QtH32kX9kKIm$4J)zob5Qk~0k!7WYEXdeZ5D@dhFIe`Z;( ziyG@6uO5Cl-GG%St7qbDXq|5%Rhi3xGN=OEO@rZ#j-<3u*7?DYFB>w|^0EAv;g?fL zopt_Q$D^ypRCwMBpBp9+d0!ou`tg54`N&DHh729nU~<}tkl_i36w&-MHsKHd!2CZq zt^_z4LitE{^Px$+6Gv7Ky@33eF3SHo_^+O{bzL5OJcx~+!!wLv@lB(7DN*1I<+JmM z>_E>#hsZTZkSBpJJ+aqs2DeQ1U>Nh2`H@ z33Z4jf2C{-4M1m(Zp2 zIZBRLH~mAmEH_Lew=oG%W+O#p7pKT15}!Ge(v(-VINz2)+z75LZ#%j z9V57FVE;46?vSF`|2%6ey0dTd#z)n7)Kz!LXT;g<=ePIfwjYe0TsZ`dWo#sv2+FXw zk%saS48D!Dc`ZL}P66^2@?RqO_K<23I}H9d@(is-D6txc#Pbi9f2SHUOZX5jXzimr@O5}LhQu`@a@f6lt zX_4_L73QBWFD^4~=h=``r4Ya;#rhZW;>O_v_Q)IiCN2x>D#r7~{vXJHeQ}Ig1F!nJ zOe3G4I0L zRp^sS>&f7B{JNWo(t4%TQtmollV!oQk>uaK zoA7{!z~-OXYM-!Rv%Xbv+Cim-EqNZnq4nvrmjdvyx zktqMJQGVEQPrL}ejp<(tNW5YNRdf^=uF+M}#5XmzvqDy4|MTe;`;S#;=qj+xvy_v_ zvMdpNd(Q4N`$l83l;#!GDX|(dkDgc@a7cp{K3Ec1b6PIn4x8Do{5i*LU%z0y*d-bC zuYa2#8e1ih(lu9Ki|AZ}oxz6t^l)ILzN`-UcRu5ads{{5F-n#{#gler;WqL71O8|0 z%UG@y+y5LSHsX066bo}N!8_R#Ob4`K`y_XvDj!{FpTs|mYS$I@_JL?(X7EG6|gZoy%L^_f2Ued}q z$bT_hGj>|1xGS111^OHkOu;8rdXgF5MS~x#x$ZftzKAN;zdQ~rzwYloAgwS&h!@VNL~~yW-8-^KHl7xGtH7BKYXJqKybwB&De5mL_!oiQwBDj`4{E3ZFi?Y>nb0xO1n<*M@qP zXv{%gHcsXs!?~ygC?}%*50sC*+LOfOmWuN4yI9+V5q%Tyh3YIQU(?mCmrcjpUW+>6 zq({vk`S$MJ=!W%R0RZ@9AMk%@82`r{kJz%o7CixbVQ*;^(!$bVMq>Fd(7y&Ntzob; z7v_rKler8m|IQ*Rv$*mhs|@8fz#zn;F*&4VhM}Bv)l)%ku$yG3AygFCq^(yUkIMI| zZC-J~vb5zuM?4ieFE!Z)Jw4Yb$RhaY&Wj%Ap2~_`EzfZHv%Abe|8)AX zS=>~0Bl&}ThF1#kf8J=b2kG_eS~|gzVi?+FpEUsf9}#@pNEhx>ORnAAeb-vr41Egi zx0@~#wlZ1Z|1l!)rFfu!U8X1SbFL;)PgxT}KFHBn$+_0Sp^d3Zve5PMq?HM;T6&AG z32On72m05|=&+EC40TJ^?YyP&1fDqm1^QPZO#=)EjQDgNUzR?voI?Rv$`t=+x#g=U z-^4Ug{(a`C;r;0re{}HojD}2t2MmL6Z=OXxO0%iv|CG&xlI{j>it(eh+*u6Q(=Gqf37dgOU8ToI;!Vem=HBa*5-no-Dmj6+a9J3pREyG+Tq z+6uj!3w|Dk*AL>!%K>}iamE^t)z?}Mn}t2FzmaXYTGNP?PEqQ5BJCz^ z$}aOW4XpK2(s1MXf$ro!osJ3tzCCwuqWUxx(8sPjcv#s0`IW24zloMH)`Jr} z&VV4vf6b4jvLybmpm6$JP zy4x%g>0c^_xA$HL|4)9~5sUqkyz1}WpJ*1V7i3LOpZUeVZNr0CunI!3>RMS5`t;z_ ztkcd(aM(_uqyZ5amB=|IZ+YqUxWr+#;eA?}Ytb5uoNK*pMvIE5G?}=fBqbX+robw= zg?d;Wt?S)uSuh`3di(W|2{e`f^V}B(zas#MPsl; z`j_syJF6$rwxai0hHP?%w+%in)x_1DX48fj#53!8 zzWLwrJdFRd*R`XV2>xeJ-wIaU!IutBrA3G>^8WyQd*QH)nhL@j@q#e;Hk7ZeOUtWM zX90YBD*wK)uoj!-JF}}s7u>WpV`2HP`+I*doMHcyp#yIQ0(+-Mpj#^FUxI|uiIC4n zETXP&^|V~imDW-is5NJL`(;qpyX*6;F> zz`Ct}sxEqaZ{K-A=je4%O7{Q!R>52zP>5-`I&{^wXN-ES8RE4*{3wTJ$tZcx!LM#s#z!2&LN^#@L6`k{s;Jn zS{BEL&Q2=Qx*M~!UUY}&Uxm$679)6L4m^t4*rodG={w~vwq63xJn%m!SaDnvm4YoL z`FAu8f?Nu56oG@Wi{A-n7YY%H=O2bks>4}Z6p&RTjWlQN=zabu0T222Y}Erq>3os@ zS;RlIRvaAd^K}E{7!c()j*LHmQ zboldyatzgoAploj4-jAK>W6F8m>joki`CgOcUU3KD`>2EvK}r#gAI+%82@m@I^PF& zgZq}JQhQo53g<=|e95^I0&lSy-CoCReI^T!WHsi71+^)Q@q@>{^SSFPNu4`ykY+n zP9q?mv|GI25;qd~pXdDX=G)Kjr!TC&4mU7-yce93-tf1ZHXga)dAuP1a>yaC6&JY9 zs9y%)QsVO26}?(67fa`wxsYbOnw4MZ5qT}lzm&rr*}LO!uVYueiZtU^9r<`DOWCc= zZ?ZTZvAczoG>x|YYClCu{%eqAw#*oEjfnl5|C7wnMo-)pONso?-|GhuyT4;zj4|ZD zes0%q&f;&M&=%=mOHc3YTMzr6>wU5dq+i{O9r4Su_DNp;Yw&OG-&Oi4_YJu9d|C}L zrPUy~Ax(!v>Q`oX6;YL#u>YyHgh90^DB0hTh4Fv70E3E@$y^_Q<3JJ7^=g! zV5*@Vs!VgR31$8EKj-w01G?<(+YHq8;`$W&*}PT#sz6f*SKqd2zA)*BeLs=%J3P<0QGKMNZ`b?KhL= z&(9wpGib+eT$pXK6uageUj|&W_H2C#|8T{i?H7Mzmd+f_z1PBDur@T?&4fk15^Yg) ziAQ8eYTM;S1qW4~e5)yZX!{r&J5%GQC>Pq2iuj`P+2hj{zy1L5KkK?tGj-2-YoZ|0f%sSls3n@Wc{ElYK}6NL`EduU|iomwJyrNo^VPd1&Xb???Em zQa3FB#i{3;g*|qR+u=&eQj(*W36Nqf42{zcX_(=vscNzS-^TpU9*Ab_SL6)g9Zdg9 zGl?wq((-kK{Ih^f$)%zgfNw7=BOA_(aj)dz$UqJ(bPfq1$cgzskbhsv>Yr$r-G@n2 zKL<~|9^SSEoVIW3DRGHZ+P+BvE`C@+J0JW%0N)nj{mOhF1(d4fWAN>*RGByTntsXL z*gF)i!el}DEXktrh{e)tUf8#n`uQ0b8dbbQVgCc=BM-*IUR+(7A=KFMcjIBR4G#;| zm?sO;U~19~$-dSC)!IcRH^S&ab6{||n?Mds_Dobil1886AF43$MYY;=^!1&X|1*YL zRpyLmJ1K+v$hQ*nKg*-&fNv+NleK&bT?VqyB)cKQT9~Lx+o25j6<-PeklQi-?)tXB z<{kg_t@GAyFvdoA2#IpA_5l&*>-ebD2jHi0CH%v)Fa{L`h2r@ft^oYc(-s?s4NshU zJ?nnH8Hd|Z5Bc{r{ngjJ+F2n@M=V&eIPfIrS;?{_{GWF|4@(`)h@aXGU5=NAyn)lM z(aHyoYA?FIR)wuRE^o2A$1TM`&(#+6FD;}P%vRK}UW#XLgYsFj`b5woY2?%cB_`PX z1NpC%MWE(?mM-c=<(U8Z)m-E|z$N5h@_cy}Y(a(@e16va_45p(Mg>{_hyR1|4^>Rf zUC%I1E#WfVxYn@CVR8G%&xtS9SBs%-lS8DxbSLsxYEiBJ04S2)6nX2EmLK zShWY%pkyuB#!;L~ulKz<{N0$tSoyGU%c{iGwkzHb*M#hS@Abl`h<#t<5rXpB z5|y$%+yM5jS$qo1e{DM80QIdE)i}Y*^1!+bwc|+8ennLi1-!hr1u{e3OL#1zh=2HI zIgaj{D?7i~CGeY|CXRHGUs9!E`7exr`0vB@hKQ~cPtA{>tj4r1 zbqW9QxScE*Fl}wVCt>{OM&2qz@5vX2E`{qLJJ6^^`|#)#@Shu>{js_IcFS%f<)}W&6MIYlc-#1DT=SR~S!9(Z$Jv%1`5{zWEG3QI`1quJ~ zt1G^vZ#|0(D7B-RSdK=_Kjb#Rp97&1nwmb>F0yB=9_F9Fs20BlTiQ$ACb=uFZ2j`< z$EO+BLfZ;;z6@>Ibm<`MBKK9RyF4jYvYdW@)z$6_N3~ThJ)-NxWKjU)qINd;0ok2wrP(JH&K;A*I z5utYnFz45Y?Mb!LNYBPL?lOT5`7i%#V@gR7H`gfc??*^Lw&F$&bvJ$jm|w@B;N9@U zbtdBcJH~%r9Gr+Eewfl&zb0T|(Fb?CxP10PI~LC_dfY-)W<`E{u=c9Q3ziF4^Tx3r z86}#ie9iHBmPOqm^#zlWV;ZYuQI3(%$hQxWSvanu=Be+w{QSm~C+nPDNFx5>>ExLf zFRrX?+j!4;Fs$8XW#KySoT1U(CqkNo!2hG|utPDl%%v!(6bJct@c(R`i_3B_F!rio z;@P78&n$&1k7&QTRP;ic~(_kR+`a<2lE`5R~p-Gd=3cPt$OT71;xks=%UnK&@4=|SD1t(m*MXniPy9>4@dvS_d5`J zb(49~UH|z%8*&BNP}eyfHLD)w>4ESy(-TX-ZzY|PQ$7AM_4AxC)IYe!4zSfQ-u3xuj|qPSCQ6fLw~L_8wc$s*+OwrF-a z--ckb*LqUCW-I=O>b*k}h)ArbA~zTK{}@Hj_qERsNv_`Plr=e0&V_*gGk1eC;L4YF z7-_*tdb37cFp7BO^Bz9j6~DoTDcH%HQNMmbpBHR(61Tep$Td{wwiE1U&iJMR)c=r+ z9eImtKijK?@}G_QD^8qI%%T75|I<{6l%}d!lbMGPv}TCo2*EU&zb#i8}j_qE)~Si6b(n;ot40ywn(DR2zN;)#v*KIxTk&wY4$^Mp}zwUg2c z{;>`2PsnSt&T_m;Wz(7W{wA5tVuS#w-QE5=9|=4dwqJVBApZ^wRc>*5<3; zje5_MPx4COVLo#8r6`ESn7IrEpwSwiuvxwd_F!r|m;!4xB;?UC>mS(Uz+o{6mMC;i zvMcoa`QsA|nsc7Q$?W%^pwU<2wn18yhG|K?wV2|z9wmxB+ltx24} zTeMM*#XQoBIsBinyHw=alD4-Ld47~r|G>8I|6BpDSWn`h0u0Oap5H2Vtv(7w z!xzj;9LLGOm{Oyw)l~%s z0G&I+vq8W;OW$rDtKnLvK#%6+Uu3%Wr^RU(UA89wz{8~|TWjuzNC=*MVW+sG-d~Z8 z|MEk>%<2@VITIGLfg5dnXo-ch5k*8;2v^jK|MPiO!mDIRcdxa7g!=zH?u>D1$SN1g zUAOC}cY9g+DJgA-gUZN9`3|(6BTfb=-4>DXf~<|dYr77u4MX+s-3i5}8w6q%zib`E z#IxTw3f!K5UBpEBSFwb1@AEmfQL&61JHU@|0UsE8#26A@~EWt)Fo>&;Otqh_zo`?jxMzw7GPMlOYpudhXHwjh+pR=H}X`7aPqIU~FRz>=V> z146~y>CXMVS0cZZ%7UkWu!1DurM8R~L2MPxZs_@H?Bjv!`bX%OP+kN}A z&o$Q^W&-arw#OzLd^gQ$Voz=)EH8mWwH3$z5X{3ua+-sRnOdK}vT*BB{*URK*DeMj zmqm}INxgfZL}0ts`Cgw=!)o|VbRF-{zguv{Tmd;tihr1D=$FOpItF3{-H55?<+R*G zSRAT?6SM=H&e=7%6?fQr#>YG!2{Y)~K~W+h&6 zVG8+Psd|<`;F)TP1ph-hN{YHW)Q3@E2B)9{^RIJT+PT*5y8N11#)YJ;!<{w51ps3F z{&BlYNH!0Jrl?fpfAT6ky$IF>d4~OG74j1>Z095@>-gW_HxPNVQej4-uaNQu-qUcz zcK5vd1^2FI)bFK7dq}}b)|(RE_W>FGfyu!|$Af!r?_0a&FrcqAT$2E$XydLd_8=oB z`}(mTU;n`F`q%$CtvL$@8$j%6LKiQU>3{dKn`W8kh_B>lqPAD96Zu}5L$RT=mfvLk z&6M=ahwGhb3hMY<+{n{zaf1dK$uKPNf8=Nuq;NN7zelk&ywVZSzh{dtcSva$RY9E# za^SnEDt36JbRW_urWG<)8A(V^&8P`A)7li%iV;A=XW3MLst^SR%KwGW=7a+Z0O*{& zF9M2q)c=_)<5|{Ce#+;U+s~r-PdtLz8H@DH!oA=kUmwpMf`;)}ZSS97dw~^J0Ny?$aJMr^i<6B_t*0NG z9DFq5Wso#W#@|uI{h3JYguk#E% z(wTjE5D0@08Fn?_DVaI79`K)4e>>Ia8W9r>Dgy`aN^3il6|8|Z2mU`uF24fFuZ*rLGTdGAOL&hN;?&5?Z3g@wn1A{CCc_DRc6-JD@cXO%o?`t2qKcsZLMG|l zlvQH5YQ#J#@u#>irU9 z@`3E-=F@9C<6xy~r0a3U@xT76KRC2z-4=J7vg3+SE!QM0hi$x5@W1+JNwW+p`6DL8 zO6{_s(Jk6|%WL&Mo##r;?Ofq*cS~gotg|m<;awXWe7?LCmTL`fB%0za+YbaOBL*_g zVz&e^M%~S{hlXkS^)}?4HICQS#9*ZomPhkaGob#7R@xbNccR4;psL|#{P|d;1~bI- zNpB`q%9RMv5c>3rS9ubiv+RZK?sy?=nH}8;yc)pY240$+7PZ#xo!9IJo#5;t+J@L1O=GcKZAM>aB;Lc{Y~yrDAaIKi^k}Rj%<-=6E42{l%&b?RaVma>YMWhZd!-99vT3)ISevJ>}4O98lK- z2ep2_7}}*s2%ESX6b>qKk1Xh4Q&KUIe<4@*Iqq(OhTmjEozS;b4*&TKx7D4@R_1go zPQT2q*w8hLiROPbRYgo$?cELiB3l1A*L2Kl7FCD>Q6q|{MnKC>0nAF*5_u`lthBBl z;JKA;V7D)SR~cN+z<=ga0I0W@*Uk!wLj4ax{{{1}{k}{Z@8+V627hjngq%n6B%1$) ztlJ`%tT6YF0z*chDIw7m1VnY=>mY>T4=hB9CW^P-th;ec?pEVf4oVsMqggRVOa*6r z_^;>gOKJA@w$GkDx^wM=-@UWndv11~`tWq?adzt8nnR23rias+X!vZH>!tn1+Bx}y zqPRM;>=7^3yOMYm|553DEe_j6p+9XY42x@zdA0Rt^1GrdZZ-7*<=;~6f&Yw45!)z~ zrxTe@qO+F?A-lQ-s8D;Qtfu+pJLxS`-Uf8VCP56X=sJd3PruhpJsyU3v56 zunJY4i2p@CWR^7Jd0LOXfrEH8ptM=W%j61hVUSee{mK_gtqyb?Wo);SXy>>U#X%}UN0YF?v8f4DiqQXk$ zGP6$Zv0j7npX+>8cvi3568~fA56`b1e%Ft)Up($0t(M1e{Gabg0kMf8MZcL|Fhp}943nzAsOVmo2@u*X$I zHddJ*H`OwjG0GP)-RfLU+=z$#i;p(V79*pTcRlcgyCs`XNC+2e_%(PbQ2dp>3}Eec z5fTCZ(U8qBO9~nkK(P5UGL7V9+jMY>0sQA5gm;4r5a$AGc_|$IS5-qP-&$FyL7_WX zjiBMPRU`d*;LwHo|J=^Px)s^HHVd#3N6`7#t{1|9|AeQpFX#hl@0_pD;YUrY91Cz8 z`?7%lyi=Xvc;3VZYAof$zk2D4IrCt=iQ+%P`C>I!XWzg3b!q9T_U^P5{ku=wenb7; z1K^a`O_q&3(29{*;Xm_=OkE=~jnn z4d*gEyn^#|HRn8hs9It&28L|nb&J%n@z%&rcWr!m!loEPO|$uZY8vL-n+MqCUh0r^ zt+e8d8f|_+%s>VdfQ1U&Ck2d#)0&|zBMK|z=WpV%An~sF{}?;LdBkQ&a)JL3%Ky=s z@mCR`sJm;~U*P6qn^}YZb0S&qIhdb9{@2W~BFT>5A;KHSO;JVo*CQ`jxI`eKY&*%X z0!l_Hf_49RX`_|hT8^q?1DgNkx*nJeDC;`n$IMAfYQ(Oly(8y>_*S1Y-MupZ+Baj# zD1`Tns{7QU{v!GV=)b7a$I_N+hs$=11S;Y;Fh0MqX{=usBP|x`I+M*#sWz!$;Z=9q zKgN%N&KmMB@=Z4{nH;><`EtHhMCP(>Iuj@bTd;aS^2j_&q#lWxPf1u7dMTVQASyoN z$<2Mo0ZgKA@I=q?^8+&jcoc-Y3>6u3n4@hw-<<$}wU%0-JEx zzU%tY5tD-rd#)_D*0t)mri1@;-R5lF$nNHaFk)rfr{@>y`-LR$jtU`){<{u{*9kyc z*vYo5ym!PzWn|Ml+2E9sEhmhOUIXmy`mZGssEu~^W`j4$7f$^nKyf+y`@)8PKA5Zh z&;N&uOhI}e|4Xj4Phkwr|61EQ3^aSzc0yp^+efHgP@9bEzhYEt;N@=#g~>#lAvyV9 z{~zE#@2muPugXd87>sm9akaqz+4xmss)j`MCB*~Cv#XiBt{PrttVTpPk$z!!f&E3d zspg=u3Ws7HXEN^^)h|_}Rc>tux%PBrPVZr!Nvf0GXbTY*$4+us^#H)+KcQ;i|>HfK0i2;4t z6=GKY-bcB5etrKSnBhsV=(h)r4dj14ePW96$cpkF;WFzxyUEoTd3FYl8-!McMZw&) zm&1Rq(zDD_bO7$ia1w|A91Qb2I~)N|dbn^46u}_NL-S87?)b5n8au>K|I@vbeDL#I z4*%IAMQrB)+#MHfjZgvu2F*V;Nt29pQ3AyJ7!05^JqcNXKD+Z$AGUbXnGKPhA#R3| z9hA_qN;?T9!qGu3Xdk2-B4_x4xu-4D;B&JzI((*0t}`yXvu@9|(-?+Lb8GZmYouF+ zHQ6fXc(ZM>ZX~-KPY-=p#47WgAXax6su6hm0!6jsj)O= zgOXKR&oF}eABs?9;<=L3=z=T$Kfi@FReW<1irK0XEN~xf6nNn&mq?98o^d||a)E%9 z!C}z=i=;KlZ&R~@3ydM7`4{xELL6^LR&^59o@sjQVJrh}%h353nV|}xU@r3bm-Id? zu4MC(B{ctZ_&haNorlD)zSEL%F=!?Klq_^JZJvgK-qeu(3Gy%Sev_^$pW`wgDiOjb z3@!2;oKy ztLDV$a!IYpDl4*0L1o3=g4PH+=)W8)>ADegS0ByZz(lzaEC2SB2=XIYD!&ZJ6)?!C z2+g;p6wYI~(fOClgg?fLM+f}31^8M>L+Ktaih!9dZ=+aWO1ioV z@^wEVN?GxLwsGpsQguR%dCfTfKT1c{8$c#*62d%ezh^IpFJB1$he5Vm2S2{!nGl2| zmuJjL$Uo)qf8K1lm;vwxF+DiDwHnJn{hvu+FxTC{nPO5CPl7sGi#rZ(ZRP-rg+eQDZADr zMo(8RVi|Xf|A80VPEOB;Y+I}zHaJTkCz z1)-J_H+1MMhRcPrn;1!ilp&$c%}YV`UrNJtmXMI}+P__Z|4{feh_a;$7JFCI)lD#- z3V~o=2l=PJR&nyb5JDl7%S;jB_#fJU+X+p7d=R8&(M}VgCLds+QBdgOmT2Yky(yeKEQ@Q3U#NsDH9|CBG@sC88@t zva0p8X@;AI1fS6B4B$Tow$er&u_x;iiKzb}8J{U88H&aACcD8+r(Z6T3kB*@0@{HZr&dh6e$YdfS8nIQE_720s@Crj=2lbCzoVHZij>!ZSe7W1A zn%`8&2zOUeygYK!;CMg5=cmE0x*O)cW? zN-GmQ2Df-sXvj;eK2n_2USQj{AxcleD1JWO;Nwr#mi3Vj>wMa!q6zMz8 zwYW>v$kp2+CU{Ux67s)RNw2=;nJzux51NNw8G;5$6v^6JVv+}Mvr6$CPzpXgy>4Cl zyblb=ENXepPbygRBOhjOpIDRF&x@e^=Ns&;IPm{L^S_)*Wn{swVHYzifFmJiaQqJ? zq)-03bY=W}6dO*I<%<9FW!Gg@@g|i2!%~hM!xPZ_i-iSTtp!>1W};LY*n(NP`J{hC zo1ZMEjS3H>2zgaeLU;4gGCW*wwsB1lWXfqfXE3v{Et)dd_D9VL3AZkeSJ~1(r^)so z%4^t9xgj|B01!mrKd(Pc3@@JZp)+k%l{DZyPPx+`u^1Vb!_O0aTu7A$VghFV<}rYb zQUvw1oI?ZpGAlywS2|<4O0NM}WnOrJa5DpkMZ|Is-~5dvDp;u^g^tH;uWoXwWL>~> zQy?a+#Gbt!DedCJJ9;Ze5x6PwhzPj=0}C#MbSIzm2k1(rGb2=T1PWoLnz<}V0};wT(}`R_|M5^JkYzH3 zmhRU31a!LV7hcPD}u=l96CbQ!D!)D5l>~pg~8HM zE4zYLXUKn7ofDS3=-Dkqzu+*R&TlmA&t(qz7ZfQySzbnK1bfOr3CY#n`=kplAv~-i z_#b`~4E~%8gU-zq=_A=H#hm>2S4o}=2%vx8MPkV#&Sh!vzN>->tAc#biQj**%FH$Y zfX+3o58*=nf5@sk0Vai8+Lt+A{e_CL$qEbsVUb+%J-qeprSQge7doWiJ!&Ic z!DKHi&(NRF{J54cqgcm(<{&K)Mr_qSGu`@sTf<$I3E6{MMY&I+L%`bn^ zCHZ-*pXD}lcdkI#uNLCes1lto} z{#E-Zh(A*&Xc!#Y`9+G1!m=eME%-x6FXloP))xs4hA2|SZFWF%Q2H>t4a5s)Q2)bQ zQe06>5dwsj{P)w9c0~qp_?YkeE(P5YrGG;8@0JbcQh+Jzg0d_@|JC(+40a&V=Sxef z{XD6v8D2rV;(EAy^ekE;r!V_YhX9vm!L?z3(+$B}{i*gBovV{+U*5%gN6p^4jlC5Y z1`AA9tvlMBHenRqt3hw<4v{wU&VtnhKfwCPP6fLD8bO(cosVKS;=la;Q|8$%TGx@E zYJdEXzI$^6uU=8Hn6Ot{Bf0NxbBZovjOM@d@6kiL^p?0O1DOIDP8Y;@DKR)Wzn6x4CPb&f>bH3l_#m=~=+_ z5@C441pT{|RslC(NW>=#)w&YX@P`qK!zt_x$UnXL7(y;Ge6QTP9&-t<6Ivln3q*JOehhc#F}aqo@n)l?Pf0dR_@yVncXCOP8uBJ7yX%=cVH%} zzP+`sc)n5R`0s!&$F;QPR7|-`Z`3TeaT=?~x~7{?oJ6%(_MeW8oaUpkh|W{77e=kp zSyrbn)L%$@q4^L`P;tl>RP1$V_OQ~jM=soCHih2zML7BIyg+;Q^dG>0Epq0%jb8X2 z1Pw%sHNs_fTSA15)cW?PxvlyK?;eXZIHlO+twUJHmseex+SVHJ9ij`@$hY~;ew{6L z`6WXB05g{Vq)+^np?I>VE88 ze~p_8Ap`FwTj9K?0>(|m-v0E-wrtgY68=j{hK7o=o9u z$^3|Hm9pIuH&BRF5anlxzg~c_iW;y;WVrG+nTe7c0{dygz@R}8%;)@9?J~HyW$Vwr(JA%w*pT-N!HR?N2LBeq0k- zBU-#Q4TE!@NgR3pFwOq4b5 z1X+Rqa~Jp@$_&Jsl?Fnj%Gf1``&`_J4PnCE%LxdwW|1Jd6Ky2$nSFR7SlLBE6q3a8 z>qSLB?MbnrCa~v|f87=`ixW`v>>K|35@w-Ge^2uCaNSvryyD7oz5)KvOF>-`7o1Cx zOca0*=o5nATQBT-@E@|1_~KG1j)Q?b`FhCBQM5Y&&rHiG<`?^E+p@@d4UHO#C{T6J#qzIr?|@%bqiSSVT}#BjeMDb>L5#h9Etq zi6TPqR?Twq--CJ&SHrrB5NgpJ?)|X#KmPt{I>ZYw!ltrSXL-$(S1KOfQMxFL~w;z~0c(YU`f-RaVT7LMLc@hcwxXn3g+u4vKVRY_K{GuLv4LW<*mC?+9rOGW@>+1DZe z^a8(0}o*Rl@Kd|Up6EJ^|ZoT*K?{?p6$Cl?C-^%}v>CC`r?7cD5W=Od9iWTyr^U1tD>N4YKy|kdFaeG`1Yli#HZ>!Zx6V2^qa;hojo0~{YORaJXn-BiG9Ul zXq~z`(-z&^*B|9VPAuNvnHVic3YNI$)6rT2^&EV2W$vNrL{)VLX)=&bd#NeLsCKW2(NbCEFynh&$yaJTLoN87m?8!BnI+bUK1NQs1f#ydWp_s%`d7&+6lGgcWixlvm z>;I%@z_9wuJ4im4H+Jt!F9_TyNc$YmehcO82M-J<|6(;IW9eP(*T=VVG3Q}@LH~7P zD=+2S+lf;OJlnbxEQfTo@k!g*#SSbJI+u;rGw^>Nj3@$}4~O2eCuQguyx$1CUU9Ie zPNzGCj`7j*TiwVq7p7V@#Tp{ZWb1xPMMxS2l)?H^?au1Kb?`^a2qWDXL%NUMFi|5} z@7q6GVQGY!sP<;}Q%md$zRB0eyuNo#(!DRW;lNySznQSTcLrZl_pF%I+Y!9?j3d`H zKm2-|sSuT+%5cDc$RPo{zb+J^`gfz_l9?DrNHNp9Tb{=(3BONklMDYvA`b#+_3;3$ zR4&97_O0kIzN?!|s$@1O;RRBq+|3YV^$cw_(tmQe@$+6sQRFTc@rUL5RcWg&)Sxv` zu-clC*z>2H*9`OovOVvBAns=jcGfoz-A`jB{QSy(eEY%)=9^TDfX?GvqFl+S>TSRWhcl zeV)RDv_3<+F}iOkQ_1S>%%<4a)Dk7C&f3{ts-UzI_4H(W?CG(RMG|JMNsNA4p&T)z zKqH{Hn=gqgF<;O7sTciZU*tqpWCydyw?#X}T|m!8g9fJAt(N^ZOZe$w3jeYR$T~6Tg>8FTbA&bG!VzXyKMe znmw9-F(AN2(Ru_%H5c7#@B_<||uNsOsc$_XQ-rq(M+>M>btcFY$P;52`sQ~IzinM&6k;R>y+p&n~Q<8G&wt017wmJ zoru#AH9*hQrYT%LB<0Ee>~{l8TJB5p+1;pRW`l4m#MRMGw`27tqWi$-$L^*YPUH| zL(g9sat`KS4*USRrposB=BH*v6S|vKEOJFAl?Y|cTuB$xIs11u>|3UX#=e<@Shl#4 zy}W+l9IM*s&}yHK?;q18Ej_eKI)455AZ>R^r^JNvhWE0aM}vY%@~eA-8?M#J>nP2I z2)gj~id>WGNF=36U?$b}H8Zo0@M0#F{Ck+S%1z9IE7s@hX?T}0RR(dymHtWMt|$)h zAMSVKf`X~iLlmfgBq*yvft%Vu1WH@2CzjXT@RU%hf;p0*ve__I9DF`%`AYXV@*u=m znJ6(P)MINaU60T|__cYlzcvPv=IyXj;)vGi#x=fDQS2V1)VrMk`mYTIS5K|+gD{~V zf0TTFLdR9@a&5aMm38IBl4s*j$$h|IYk&9W&i5KzO)%7Bmmg@cK|%#Q)FvYu3;Yi& zas;`0I=;z}oA*CmoyJfl%=YIj^x2E!Q2#^gGp`>JEXnK4fJ0PciFo8Y6_l5y|C4$uC*nQm|k4(GLu;GQxzkvAN_m)JLh)qZ2N(};R|-JMk_iB z341FwE9*;4;@%y+ytuGotyAgiVa5id(}nHkFZpMmZ?rj!mFeY|TsC|oti7*1HaeY1 z#neTXIl&7zuZ8n^B&N~}Eyhe6S(F5SAD3ne^3vybb_2tpMC{IJ@ai`WF|VsC z?Jlhje|T_r`+>Mu^*);qy<_IP9g?c+hz(C?Y~Bl3Ms(Z2$W9fl0fhj4ey(&b+O&@I zbS$*kp>mgPdP3;zXnX#VmK~`H{a!@KeId>OnyLyqeq?#aQfD8 zmp9VJ@ylO6c1Jm$mf^ra8y`r?1Qf82Cx z!E^RMER%1AIs8g25-WXhPPK{aWe z9_S-XQiV?ckwOZn*-g32lH}Qhvt#67OTyOPdir-FW7jBA~OJ!J|GV(Er zOZe6LOL(%R^U!-Cxveug(VOEkaF^oG?(=Mnc{PC5ydCbX6W9y?y*qKTRJXY;b4pT6 z3`gjU)gFBb4ap;N!w6pd4on2P;^opL!<`jFt}tb`oj%K2zZc3cuU1;dS6wZ{a}{EK z8z>3BG8^1q^ruTHXvqvC7g7X}w_Hxch{E4=K&=drY%WUrCKc5G2kDVe)ZvapvP8{z zksJV~xGj=IcCmBK7O+Y)TaW+z`;G_o{{j5x97q3^9rYw@mjP{*g<`P1G(nuoInyKA zTr2tSwC?tcZ$)vH4x3=Wv66pUA?q~{4-%ocECny_u7z`hhhTdwbOeM*j{wUS8;@2S zGCq`tfBQtMGd-Cvy?JpO-f%}jxq>#y#sl-IBAZV+9xxetqqTeAE61kJL!kfSf9KfT zd9dO^iQ_39ACtiTVJHFY4fVa}yh_AQg%>o~(39U;^uLpUPPWwz05S>ie@Oq}un&cwA+_O=N&ISI5s<3uZ*#k7z&u=y}O6uID0uZk2g^UFBvE()B{+_g>ReNoXw+@u&QixG? zApDnqdjH(!50i>Yv1*+B(~U=8%c#QqE4%$-1}FdB{f_~h2tsOEJ6`Fa7UxP=6u;x4 zMA)aa{P~+rE_C>^3!|z+4*)?q7;X9Bjamae zN$0)yPfi|}-Vga-@+GYC>VWHlr`vY4r(R*TLJ{%1m>cdMlBZt!{PJ}b62^G4f!_GnXi{~28ogiN6Pa^6J=x~+{9udN zq00}h*_3scSo8w~V$%~#|HPMJ9|!L|4W9(f<%g$(329PkEX|M<=0%Si$8$D-v(|P? zA}9Z1;f#I~@1V2-8%amZhRgcWx5QMvP%Pu3K>fp|tPuEeN7iabZGh6KSs-?cna7YSYj+VYlvU%uFbi~v~R)64=L zLQOqg(eE6qJmdnCvzLJXY-FlrNzE~EB7gnx>gjomndNOAa1DBm!Ys@#I!~JYCGwyN zz4}KAYwZRy{H+s-wCpYZSqjc&ysWj#ifM?kGv0jFT8V%2zl#4q|L5dXyXCyvO8)8P zW^oYqRPKDxmm$i4{PzQQJ3`^8lVwfgN%LA>ge=%WP-yBB4h$*}K_8(JS;Hi9w=QDq zBI!=iyCt^+8{?@Jdc-Ex0$--K$e!9KQy=^KK=tBGJv?xqtZwJ5-~VLGQ8fP|*z`jS ztWfZcc*s({68HwJOpR)#O=7DD@-z>nO>hp|Ps6bBYc9=gG<>T; z0X6#j8(pBoaM&=ve|$FD#95#n25O>55Pzh+feNuandFZb zZQPOl@71N;0x9k38UVE)y7B*z~7e^CCk=ijWR#%+J;t7HvGso8(lwN6M|wNc;Jq+9Yx}*uWb>g9&--%@9rb;4%fK4mYoGU9*wcLycn9+Z^zqwx(<_mQtbeWp|=4P6JD(5472J_jspc( ztjeGfh*@WP5f&&J7RWwf`HRng3xwu>q4f{BFbBvsdFOj`El)t24vKa>SWC1RP^%Ok#KLcy1Y8dk0Wld$rPs|CE$~JvCHhn%-JlvlmLwP+nnnz9RS^ZcUS{89^s= z^yPE`Ic-7})Sf({#r1%(x>@wsxn11fnJie`XLr`vyLu=O1*96?U*DUBRe+N7{geLm z^*f$)J^ISgzZ+dY^5w-~`uBQ|&cN&Al8Y7f9#>*t@5FBK*f*{h(AAeIw)75r#V5UO z0iP5$4J#Ew-?YQ3K-cC%Lmi%H+i`TI(6g2S{2#yT+&1WCl){t;{<9*^l4VRkd3d2v zi6E)%TPw5bjJ*9|HZIKCH@)7b5SFOS2Yd!iMYPq8Sy7xQ#i(Q5c@Sj)w54TQq zDdvFx4Xa$A{5D!$@qFS|^Vn-{bjkug=NzUkf`$)DCt3adjkfA=TT9^nLjI}uJPcZPjIN_^NBy4( zMbHsj&JV-osssFIdPXIA5Y{ExS_HQ-j9JB{dkpF(^coBaZhGErkP8+R%pg>$lUyg+`qm_2GAHTsCYLYC< zf>(1`v6}0Qlbaa7R$+t9%|6=Z2bbj=UFPq-1bkvbFK%I~J?8s+A4r3nzgxck=+~~m zc2)ZJl8&8Mj(mE3^R&Z$-^RetPx@mf?yc2{yLGhs&I8TI4X&?P-!2>I^Q>gMbbBr{I71F&o-e zCMED8jpKiqt(dHZTW3%C+N0P+XJh2dl9|aE#ylhw1OM5zJv+#*KsVrAMh?{;^zY$s z+AXQT{{jB9Hy`Rxt+uPsd0ZX)+A43e(Y{>cz}{3hvDWYh*PP4Y zZf**jR`LP{`-A&R6U5pq4{6y$7~*{0RA(WHH*G6 zN#OspB~=}#;7~N4#ju%&n_wVF|&wJkD2m5*rg|eaWVE!0weLddl zQr5cQrED*AWq;0Ln)o$gkM`(1Dq}NA%cnkeVZ0^g6=2DMpVJ04^ED^Hm531jgw_5f z*s2chmx=nvoxbmme%o0P?rGv(caO38L72Qwbo${>Xow~6!rDB;@JPmf4^lI%8Yn18NEk#Ga2&V!T)D_uJRcl{MhJAm7x7h zwEj`WdE65PQ_RSe)C07(GCC0R+j-P^c=|%4rY)NAl{|9=*K3t-B>n z+5T*s?X7_plel{6`q*}A_2Pqi&&Gpo2i#2D7RHqckbj}-J*mfV{UUKE9h?#M;HVA*OCeKPspd8ssz;k&}Lxds>gn2)+Ym4d(git zfHheLw0G<1)i~kZW!C82soZXNna#~u?J22<5uJeEHkz#p^iD2weVO=!^$!|i+Q%jD zJg9f8cP9^k|1+BZm2-5Y+-SdV*Wve1TEZr3JR4mXM&S?do2de)e*gJ;}_5y-(G~R0wGP z*AD)1xP+EEkG2Y+{?CKSX-hQwGC;}q~b!jn{qx?>cRj`JAc&UsTs&Q!!P? z88zRz!)_*f-%E7(ULUW}$WQ-Y{qyQ#;Mdf9Ino~>mgEg=`$IxL9p2aPF_H{2Wb}P6 z^<@gF+K<%xWa9b8NLF%sgz(THV3`$MA(3B?W}@(fXQM3ef4+aHL|^{sR+>G`zi>Hh zXmahz4lFZzV*)4tUH>J7%^QSlIr=0^|JaKD9qON-Uu8ERe3I38?AJkAL-OJ8OMm1J zN6lWBTf2FQYXA8~QDG?l3+jIeG=mr-|E~=mNTeDgv(GM#{5Mms3BcrtEHG{?)&?$2>&>Emf~P*%HbG?+%pzoGdK2RsP57^Bc*s zoA+VBLzs}O3H+Z;P4h#iP6yz5Ounjsg(m9%vk^`>vuz0g0tUE2S;v#88So$-&7q}4 zj^HC|!1k~ra-=Gd=~eH9A4Xi^v&itnF}FI7;7iW-BNE^opOsnRWk_ zPd=S4Z$?YF3+pGUV&=e+=1V;Qti4aq{1f z-<_g@&MD|_{;`l(ECu&Pj!S~$DOw+)!0{0qem&QI}m&kfU9;Ob%(xf@Vm4It^f z{tPUrzkeD7xgv-HUE#&nt*v>i-P>hwDtEnWaR| zGr!`0$f@d zUJV}Xf-5xRG8C#ke_{c>@_B@(V=E%Z0smhI=Q#`uq}s7ANXbq08YDa`9P+UI}HUY(C;}6?l7`bq|%0?xiB&Z z={S+Pkzoa&IZQ$mR2|o%pC0@g24GyVFAmK={q&rg8-#D_LgAKD>jL0E$Dw3FobjLqq`e^+lAW=obt<*oShk&0;7BZbCRzm() zYH2^9G#*+6g~1=6zo|K+Or9T~`9h&?As>uXN=h%*+gI*L6F(F-ZtR=E%G;cWwmRiZ z$UkkaM)TjZao_46L&5vxy5_u20bJ>$6((`eBY^ZIvsY!`_%++A?#!^}aB0A&nXgF| z4&!*gZl-`#2Kq}=tNQV`+M?$y(4uWFjG;&<+z|c{gHB~iH>fLFa`N9@=0+?lTf#S; zn95=}p!uh0{`*}yqheEXqVPX|*Nd#=pWf?Q{_Z4O5EbW z{z9zmIzpfUoB(_q4Z=sowRynqp$FP6iMe~Ndr*-RL@_BCkmRv{@N3;zs+%)uYP+sQVV_s^s6v zES<6%P}h#Uux@#jG;sOBj!9SOXB;5^3wRGj)KvTOpexMUpy`L&>w`;~EVq0b3-V9D zy$7O}s2tQkdxk6rJ_hvBBz1$q|9Q_q#8NZlpI!|zJah4uE;PKSA za6)Fjwvx-QF#~II><863b%0VX)T`iKwy-6j8r>QiPgF%=T&KuzUfOgusgv^QdB07e zaR8l~Io*F}M&0+d2KEkV0M)n_%afBkuqp_R9{P)2qivb!Yux~%W3Ufd7Uogp{n_@`wS8MhH|L4hXby*z1Hrn_- zUgH~`*}!B_vV#s@^*4-%zDcYn}0Zy2Xd~sMmz!d=*jgMk6OdR?c))25zSs?7I~F7od;tKnY(>5g_bG^eVg?O@=wwH_w1Xo8KStBFms^l z@dME-W>De)R69>#PksG1?cruXfObeJrZA%_{0#F4`!l@Q=wApnIg}7E;fQ-enlCo? z{eM)wcRbba|37}r$S5N^M2?+V$4pBcNA{Mi#7XwZUZvz@=a5w)*+OJwN7fN3qs$T_ zic&_=@OwOuUhm)M`(L+Pf7J6lpVxIguKOf(%>kLo4*eJBo`A8C{e;!pg4W~Lk-DJ& zSs0MSKjLn-h5n1Pw_V`jzt2z}Ue7%MV`3=Fr3i8R>Tqn!B*sW zyH=C1kbD}E2AP4FJx%h?)%U-6S(LZ(YOg2>{Q1$K{=Uyi2fP!pELRn z1+TdcRLw=k&A+%KrdM+NlaT~APbc}Lf+qs4zN?oskFzNEE!NKz;wlD-5#oEO{mQgglSY9w5s0i zoeS{hle;nnlg*bc!T+9=gERQTWo1vE>+US*e_wV(0snQ91@hAV^`9dBUjf-fz$w<3M+{bP zLjDinKf*#@NdM{70YGao?K;nA!2jhfsDaJO!wBYj!!?}$RB+q|;5SYsA2qwP*DYmC!~r4B7JN5&q?`~&pPD$emQjA~zI zpB{t*1v`Mc0{UlNDK`SyT3rL8ncN8i?!53x)@tY0QT-x5+#Xc&Zh|9<%h^H(OX4RP zQScv$kbx?VTUo+ZN#qa1T0~oe?Av78ecGb0j|XYa*7%&&wRvQ_2*`BzXVZG`CUVs5 zx;uBSO)LMv%!c?N0&m0(*2Vy0M|huf?Smv&hDN15X`nISE9~v4r3-~Yq3*%R{n!81e8u<3r@#EQEdQ@%!%L3UYw|!k9~>J_IFTQbXbxnZ(>2w6@TxqfwC8lT<(7 zlgup(I8@p2IuE{-y}_sOatd#^Yf_!rl)eUgZET;S9l^<+1NKx%7vYCBUm%fzgX8@) z@+&;DYdeXkbRNJ~Tx8P=Bh=x z8|MduGav9D+TBu$23bvkUnlEb^8xm^Eqj$|)$wq$&a|v!8h8;-CF@&tIf7M8`-QQw z)dPZ6+JLtl>4|Cpw7&@M{7WC|eT-mlCm*-mh(Y)-(sdwC`%JLNK4 zk1Xg4!Z=EMl?eWK=hx*h|3Lns_htpgWW>Yf=r!`v+{DP&^R3tT_!@Ct6&FAzN$J0E z=*r3?jVZ1fSn9vFn&woMwVM7%(MneBr%Bm(y2Ke7W|NQz>4&YHb2V#$D19bEz3dbZyVW4L2r{tr2x{1x!w%fmw+SrLT{IipbuXN^k3ApQ{5)& z24Y>lzsLCShXt>H9(xk7&3`3y?J^wQJ_k>s6|a)l{^@_1c#ey&ya0THNAY-`6bA+W zxwoJN`g8)99vIm307n6EokMS5MhZdgf{6x^3uX#-7^vU^iY3*>msGP@0NM%ibo1y@ zV1)bTR3a13%ED#L0^z^jlv&r;KG@~IGp+a(>H~tlh|iQ$irEIp8OD`~^<~bwkdbJ5`Lex8|7o)Md`z4R_CA1L zG{ z#HQpugP-H+ewb8$Ik$iPS;`kqbm~t3siQ7_&20xe3-~e)Oq2w$DGNkBHSn81JU{Cd zCPedf$`r}}`Che4cGCRv2ban%I@w8C^NTMYm&`45OV-wuR9@#z+b4qcTgFb{@Um#V zJ{}U)0i^%p`qV-XkooplH&ii(Eswh5!0D|Lo!F(D0mY0vxH?=VSSPxPuMm ze~L^P5x@LgI4h$M?xxXlRL7S=XB^H-bADkE0BH$%Qfb4xEha8UOlEf2~`uwY-GG z7@*NF!8s=R5`4_oAL;opKRi!{g)+4M>D}IwrR24Z8edg>#S0bNWD?UZh|&yP`SSMH zk3F`XA=}@_sscQ>L5;idE^$v+gyWkh4JmtHy=`$y?oJ@K5bwW%$I zV;Q!tfyT3sN}NDM5;G6;54N&GMEf@2g;-!gv=O|}bdqO!#{bhqvC)*OCXZsdnS@Wf zagI=`giX0^vjB;-)Bo=MGQr5r_G=(qHznWnN2!?9MO7XS=zk$_dHcTRu0qV@sm~*2 zs}oOb;Pw7kYU6nNl3&^DDHxY`C74M=)^26Vob=SGz*6m$$Ayn9w_UbT>wjpvV_wUK z{jhjl;tvAO(5;PcLxtdBxxuX;*{(=k)XvNxh(Wr~np1hIUQKYta8BE#833+@teyM}%+%rDIi2 zy$zQBQOp^zn~*alM@*_UfnsLTD3FCi6sPcgE*7#{#_^}&vfL-`@zBtlw-dbJ7p7XG zXB70UMeOJG+0;i$_E*||vdnks6lc$X`3E|l8aImVzrsxgsP|()AG965sncHXbC{m= ztGnrFQ|Pb1THAa`nfT>GmHlA^!)h;K;1m1^GW14dB1NJMf%zCzlfH zzXM)=12 zi|Fm%bD;HnsRi0b0skq~mZn|a2l6a?(&G3& zID-+iUmc&Oh^!j>YNpX#y9s=ldgSK`r3<8v_}P@__v+0%UgfK$m8VfuP(D9o;LIzF zsnJd;87$UD{9n5B14U;FEnnnnKC-E_-7W;D5!a)UM*HO`7L&6dtQ(9<`coWowC^b& z7U)24Dt^vB6}I&qX=8xj<@ev|_B7e-r&qVumRElO{Hr!c=>Gx^p5Dn2$$wVZ%?bFYPTF9566U35D!!zs%vrPO(K_!G`e@!Y7Rcn;TAStFaHxqQ{TI2x zpq&MQ$PmotyEj~=ToSoDV_ruA{!>(bBG{K@8!krRA{ESy=gK%UX&60I0Qy|*#+I;I zMeRvt+uY|FIvUJx58E$Z&Ud{z203Mjm}S?Utc8IGALpK@xESa8uUR%0DBXI8oV}fh zf~?%X0|@EYk~ZZgANw*@6cR?2v5hUuTU=jkPlkdDeD;SdGXFeTohbA?cq(+|8HovO z%uHMy7B^6Psl$KoWs& z6F&1l{71bOC>+Q?l!Bk*zxgaYO{ii*;9Ke5FrCzQ-b4LuJ1k`bkhlcVI(MGO> zr6AV4NB9|6EVHy5q2<$aE@y!e@g% z%aZd@y3CnLFChP1Un|+pSOQ(k$}QtFcbQE+p!{ru@28bo5VP1!QI?NV;NcVPH}||W zDYXDf^y|AC*N^3CDt^T=a0g?xDYu^Qt8r0CNUt+0Y{>*ng;~jtVCl}Wk1<^a`=q7} z?g%^m7fAo9_iW44#M2NN$^NlF7RA6utqT38RKK^LWFAC)eb@Hq*fVN8v-x&whvA+t z2OqNHc#!Kxu82g&xQy(v$I^p{k{>|i#Tj$ww8KWvS{OM3 zv;ht-ivnX(FL+qNb!nnm3!lK(!LH=))a9lpUbD|rG?m^p1}qcnKbz^=JhWZBJf{{= zK6uz?R`i=7-mNQ6+PbMCX;>sJ?{L|5Gxi!}WV}@!FSsghCBD;tiuk`O9G)h$%`nAI zNZB0#@WeQTlahi^6fWQo>@|c;D(xS*X7hml zA8-!SadVAve5|gpM+44%$ZT<<++^xT*leWEq_Tb4^9H{^VHPswD zGVFXj$S4=?XD>^d0?*Ah$AWvO(!VpNm^W3@rom+XRn0SKaeQbr^PykdaO4X!kGzZ9 z^yA1_?p7OdXATWfpl%49JNy^)U%>o>e)lvCu^vs<%hm10B;J4%x7B&L*d&?4e?kAn z6aDr=7W8Dj_tSygn@Imx>kizyZI6cFx_$+G&3*vl z90?xqe}!!dsx!e)4!q#%I@prV5S+X@@?CjVjmlceW<=uB>dc;X#dF+zc<-oBeSf}A zfJ|mX+_GZB=bv9}ni4T{Bq?Vs|MLKew>6S)^ucS>X}nKo#AUyVeQsZhkU8T>olW!Z zS9WuqOlf&mF2nbaIN+}p8DDx;RM{DuCS%zMx^U32?DW5HERD&?ISr3Kj$}#`wr%n) zc75WJ-W1dsbx3_4;lHR1BsxE>$a+m%keru(de0&|MUiy|^q<~Q0{{C_AD)v=fdAN7 z-r`gK*Z-xkH2RPc(~g_paM$Z2uRy?SfF}h}okbYo=V_9A_Q5V-0LrmqTbtJkXQIk; z1ZTriKuZi~C&!`Da_!0R)4N5NhM(9yYpD?w^*B&@VXiNQm&j_Ta&2*VpXmr|&aud^ zpnv|c*S;fw<88fbA(-C~DY0NGvT=UAJcm7-z@6{i*ik8AIBV6HJ{dUoxMz1Ex2(0x zt8^oj(8 zr^HIQ5*&cpd=0^WApiLny9zirW;!y{!T-Cu|1zQH787zI?<=@;pX|FaiDs)lOJE1h zE4{)eL$Kn0O9A)^8~d0lq8b4?t7~&CJ6ZODT+J)2eO+%q+|0XJgv`O-=e;m!W>Cm_ zY{P%zt>4uje4&7vl9K~~6y5Uw{CeHEP-djU_oF47y5fi>$m~i?QCKA&jFgLCdnKYd^Inhd4?g3|v5`sYua`k5;3CwAJVro4Ffzc=I=9Os@H!sEC0umnb55bnlAJ(846Xys^J!c%08M$_# z&t>9@exqdFcB6(}j5$r&8{}$|m%iNl;@cGT@E6PP0GO#K(Weq7Va+e!c^HxfE5f>c zFvnG4X|mq5{dqhuU{ye?*Krn2cg6~v7Zbf_ML(Gen@-d&UdO?Zf4;$7gnB`T6O#CO zI%5EClybR6M*81pa54LY{4uTh=*NygQa{5H_d?E-ax~izp5yM}q|xe};e5#Exj>%m z$UyWzC&EDg%=wY`nEx=aW~R=@ilLzYf&MQF{-YM=voy+!wdqj?EWOVH^v?K-;;3Ig9D;?d!!BFue?oBeOqz^ou6>SiWQ-1#Y+`69K)vu*2uo8>pGN&({>#SE zs~&g8-;m^n5Z7#F;`^% z(IT=OR3ISzr-=Sp^$1vt9Tx|6o-;%KkDtWrqMh5=m?-2wU-^Tmt`Xi=>+S}dqGhc8 z#aNu947hg;-PJV;5ciI+_c60*s|s-Mz(?VhR(GMJD_ZiB$7ucYg~5F~g+V^$*2rB$ ztRb^uFn^-Pq|nl);mu;qKAoGeG<@ct=L?!cZi(597vnGbXdNo8Qj4H(&mwi=(R5kd z=>w6>>$oVgK#T~;kfiAvq_%E@SOV@m_YJc7Z%bmoOtnM)Ps=YEx;lZO4^eTVgnKAK zIXnaBJte~4q-#QOn*uzxI^#%$t_CIlunw7jZXG;QpyhChqJK`(Y6qz};6Girk0#Ie z>XVg81I1N{q9~kxp9*J@nDadl56;EuniT4DBO3%lY*aYf_$`x)Kmv9{ght9r*sfK8 z6vXK^ap*#uK;{fr+{hENGMEJvSw|`$IE0N?u@6ZKh!W9%5?HCyeG8*`bPa9b0 z@$Uy(1-J%YbFEPb2r!PH!A87(;@J>RryeP}KVJrPk|Z24z;o(i?cN17qH>02r76h> zAdSLHJNlnszXhA!uiih-jOG4%p~;tRN`n~^FwaRvN^#fh|JB^Ej_7|1TZk{S6bR2^x5aY1Sr1?rh8`mGPsn;YNWj#yTM8hM z3-NzFy%JuY+Ke$-fbSpje{?~mlVJcd`xTLl*Zt7S%8PMR;y z5VZ$nJlbxASt)3z!=^~A0Wc4?gLcbxL1Mpn!-Zmf>5IV&@l`C~tukfVnMI0Cw+1x3qWLoQ4UV?>7} zcRaN*Q{Vm5xm~H!*0sTl&tlkN{^=wZLrii8qW=-8T102mMdo=6I{c7<{)?}ohu+hT z0TacFnZ=!9&FFyqA6Aoa`V4ZCm`S&;t$Io*Ht%W}vMsQ2U{ZOX@UWPEs@axrWOu%KbT@%e6@H=<h9b{xICWsU2A{~;J0K>xG&rY3~j z)3!O}bIDlyB^KyEof;ibr$_RC&`R6>eftsPm3VT!Z=9|p^j~=P3$t)$;1RMdjA#9! z(=9HeTs*zM+nE-7X=5aE=51p5MGiVCWEO@I03SOcb0%Iti~|y=9-Ya(0Rc$~Fb?Kk zPFeNvu$Hwg69<~K*i6Q@nV6yega2*) z%MhG^oTlN?q}wGr>9FB>jTuLD#i8l6@7z0*s|uYemeBu|tSf{gxTqkKW=jA2WKVv0 znofFxa{~@!Vo3h8&8K`-7VJO&7w}(=!JXp;20U0oZ_;ZAuyE8s|7jS~|23fxa{YRp z0uwny!q(OwrqM@2&On~SCLcG`8OO1cu3!N9hXsDi9Kb*ckT&DA?>;gb4?hi1~F_#%i7QNp_Q)4)PMJxCqi+7L>dH`l6u`4v)+~J^O>z@w0dyI!9!U;NzvO>b$gOI(4{HrLBn97W1L1K*9ap|K^{*F~>c; z(Er8Nj2v(UdYLC9Vg4CX+U`C~YSW%ie5^xgxWJo5a)Vgb zSc!O*6bUn#H+7bo*H4rT9Qk~BViA;l>SDBXQe zOyX;oVaQ=WYzg^?yAEdQqP1+EC7#Z<{S&;ga1<@basF@U7V-8q!Q}H-W&$l;de9CQ2BySX_uo5e-((rjqX>te_cQ%v z_U?xOlaRc^Topcn+$`=oosc?cu&^M&d5~i zTE0)jcE*1hUN7QR7G#9Wg07@Tksuv}t1+V?DJJopNQ|xnGXJDP|LI(pC)Qc8D#Pc} z%j{!mY(jYB{X-WTd>Pd#{ijI(#a(~*iPE7_Ocm&V-g+)AU_k%uJ=;wZ(`4O^7RA2o zIe<9a0RO49I$!OUfx#liG`TD|re4`AeCLGw>yNLXk-+P4D%#v4e>jiR;gz=8M%9#o zjJr?Ba^gCi)sS0`ExaCfm%NOmx^l(4)WoJ~-fEAE=RK6L|50+C1Sv=!9Kmp~MbDaP z!J2RM51XKK2Ip%^xVO57sBwkOEprf9?Px0R55koR;lFnJFCM1Llh8T^10s5a!4jL^ z+A`=roqg`d8xe=}UwnGosPuv#8&1Q-T8fM$Q5+Qgk3&~{0x;zT*bI<5If4I6?vepW zoxDG|p);WWJ=tjAi=kYL;k_i#KeI`@JutM%t%-{Jio7}M0+xLlv$2NmvyfI^aRyWq zu)o~(yHLi3_`l)=@g#}(ZJzVR>_XTD&E*-eC@6c++{lDIZ1jma9Am+phxRLlr$D3w zD92%87&(MDn2d*)U*8Gcn9fD~&0rH=Fl_>oe;D5iT zOvTB_M%`l|{ICCFK;EA$b#eUmA>Nah$-2=0PSO9^s?10AoyDf#CSk1JER9w5rh?)Y zi45?b>$l-#x|3r7`iJR>Gzf?pWgloD#`Iml#yUCY(&<8;5kXj!y6eSvMN`mYoVy`8 zs`Toaetb+v6zHFexlfnK?}muEN(ZJ{zZhNcJi|Rr)D7|y+Um-vyUGy!zo>S=C6BWF z5Y4wJqH{;)pSRHeHQnj+v11A5AA$_L1PcE(*PWKUixts7ga3W%6ypC%qOx1L-?kTH zQ4w`|9`c{r#ZHzw+(h~>b}|u zjgXoJGnledu_DP3qJ(Rxh!%86>3d){9(r+z7{hFVy=Cq#+3OAwD=wJ$@%80pxNiCn zl=Xi8Vsin+T+5P6rg<6!ea2rzol%&h$f~;`FY~YmG&He60B4>Fl4T5HX9U7_)&c+5 zPXDRL@|K^A+H-kQu^t|o1(5mY*Aq{L;K9U09>0~pe*pYv*Ly_&+#f4&T5TTsFUF?s zYb6tzq5rG>;WYeVYwgoWmFL%%6Bt4|GpYD0qE$lr-(McI%e~mkOd12`A05erZVB4= z!X(EDtXP$Sq&_jXO7_j~DN(v-u(9UOP;UYuN(76f=;~N2U5_GuQl*wCyNvC#y0Fzv zjw0opnHI*`PNor9n4Ix5>w2Ry9+O_6vXn@#Fe$66ficV&LLLO%X=2OqBhenyd_pdJG9X{Y9orbJAaqCHVb*pC}gXWemoDrgqLP+4jATcIRKQpHs{H6c+ zuM;t*3%rC(vU1pNg%Z=^z_kuc1V#VT7(BOIpmem7&`Xqt{O9o+rwuZO2Iim1dgq^s z6#NJ2e{c5f+ndpr35KZ%<;oer9;M}Qjze%RLjJR^sA=`tczX>=oM^QZEI%77k==J6 zX%Z}-;Gttr)*YUwJ}iW-%x*Gxs06fw6#Wl3{~~OYlPyq28^;C6s-ZiRVhooO?7)z< z3&6SAggN-%L5gey200y^=#@}2h#s!LMbSS)|HV%Jq3i|0o%x5iWFVQ?hl}Bur~v)* zEIxtnga<8>|C1!5@z{@7QT$&B{_~s>7M%t!@Phb7;{v_Amy=SX znFTLxLlS5k_`d|r22Spa;p$}r|9g+Tv7dBm2_(}sMM~~YS8N!E5~^e+&pb1zSDdKT zNfXCLvK+*_1+tacJZm{Izac$Z47=;`2c7T-n@Mw+4jds1C(h6rw&K1|XU0MmVs5_} z5yaNGb^~7C=kX3gpdkPGr`Ob-1O>rl>&?saFYfjZ3=#W$KM_Vp&#C|Gf1hGi{T=vy z?!{eMc528oUH+U0e+Bc;`eq&`nbLpppZ=NV)&;@b#J)Hp%? z8io(AS0l??lwj#Zmm5n0&u4E%x9@#eXO_NhIg#9+lYt#H^u~4$g2<$Qzdc5sEY$I-9_Qi0uG}B_xXK-Oz(mqq69< zAf^B7Y^CGt2EP&v_Iw+0-?3CF^od}|Kh&X5$UNqW^q;1I|I3@|!|O(*|2@pcVr-}Y z%!N1E#7^ELzU0CFn}06$-b?JnRBgTxaRfg;*NHpwi+Y2K%~b1sJN;jxtA{L0cZ!ez z|9MwZu^AJIv~P5iWG(4*t$d&+0+)6&Y~YYhqd!O(GeZJA+We<^G>D-=Sdy1F6P{_b z)+V0ZakCn%rG$1p~(Wn&K7%2Vk;4zg0{m+qH#0VdC zKua_p$~=urta~2k_uhv5LuibKWyE>Cw9@2e+{UiF~G!5|do0=V`B(`=|*eMME{N6SXGs78qpn73z4)gm1Obr{Y^;zGlKuvsN6m7LUx+c7iG;>MDU-31}#mSE0F&T z_|J)O79C|&y1~AxV1g8yATEddgNNV;Os9|QjPfm}_^ z-c+;;1u{NlR<66r$WyBJ2o{^kP{E5cP^H)~ufqxXhu1_0^IwRU;bM{bXL!-1Co>_N$U&QFfXg^Qe(Knd84%cHPo?kuIRME1FkR*tOHt9R|-Kl5+z_T@Nq zE0ZuD3)|^fH3FsoqF&57gB%zb^%Unl?a|MUqh5f?k zhCAmza)ob_GXI=E)v8fc%l4tmo%fl0t9ee$u#=In&A9oA2l>dy$)iAgJ`dZ0#Rxy? zkTW|Hs|qv*eKd0)>Kp`RldVb z^6*l}lCcmL$^RJ}8hCs$VK0Oq(K{RGYpb7**jO}^H;W~k)>!%(iJu>-+fvh@cN?uB z;RXi%xzfwcav(?X7gza&b_2*q{kgv?T0W47HlCx$zi-vB=RATH=an*RsaDwb2xWOJ z>o!up8RLicDRr$>+h>eWeehMw25&^`x={RIe|Y@e#;T^OD~E`P|7&p4NFDN@r`w~s zcX=WC&%IQJ7J{^@5LtFEu~MJ-B1=Mu8KtU59~y{tMv7hk(?8$J+WT0CNFE*mZx<#1 z@avwvx8=?fPwK7WM01`+GBLHq6iAo@rY>RG^i%we!ugT%PBldTOo}yJ93)~z4%;>O zmhseRkI2|exQx@%yNyUdVHIF-qId=rE|$yv6|`&7VC0>gsAw3N1rEp6)qm<@m(oeK z3yP;_GMCqhvTeJ*zME8d_K|5bc#x3(3*XP<%?SV19NNDFb9!y14vyM_SL$62+I+SIN2PGja7^0iPTQ3 z-JJL|@$^yrnZQ&mo>`RCd=%dj-@&Zwx%3 zW8Qm23Vpig8H1EXe^x#AnVM*rih%z*l%eQ9`XB#gN@Y#) zzoYnxXHa|1OD(|vUa*+#cq>7cFxleQ3w|pF!qU6WyJw;QE9|R1;w!b&?>pR#Nw)ZA z-xVjPHG6}c0U&}D=$|*usx4l@%Si*a2Wz3 zMGXoTIX?FD%M)y$g&q=n!3W1P@L$D9{`1MJGxENSXh2}z5kOi;@qe9EsP03H8}_Gy zk<3r>Y{h-oMo;aVex`O0WxnJ{J+k;C~$uy>mDJGVvyS3N~_c| zsha-2>nsy0;h?J*jU+8f;FTSUTzXXQx&JzAXmSP$1jRe%J%8s?~%;Q z6#rLQh?2)q|M5gdnS0EiMRFttp5JbS&skWQk2Yb#co*#egY?2LP zZnqTNqXd(URkUSx5FL(7YmJfEJPp|nU`I7RWmYbe}45MBSZK^ z9Tn=@MMhfGUiixP>xcfN5~#35p$=0)ulHd@|6{X7E0D^C8b$O!UofZ}+rZ<4-tr&+ z_3H;=W}zNLm4AMD4Gu$|HjN56nNfvBgnH6JxfqPhDg|-$VK@&i#9^%Q&R}LW`p_=8ZhuOpNmNQ|>@%hwHh@ z&F>ljy;UIn@4Sj65esQl>BZRA7d5UCaj0uXPoVZFXq`Hos#lK*wRf`Bg{l?iA8Dy` zgAl*C_mYP;d(&9rQnd|J)KgfAYAPV>-FC zyQ6f77yE7QC3q(Wu+G_vdLd>}%#{2e;e`dW>Kg;5XhGJch5!0D|GXa8QlCfouOBmX?9Hv{qyY4=9_^S__FX>eDG5LF%|)ZNc_%==&eJLsQP zM0}UhVS`{e(heI0;(t#SsnW6(#q|~h1*=p1@7&EX{z`WwaD<vI8L%3dL;UA%*h#v*pL8B!D`wm#t%r z3Dp?%BdgV(nh6WV_DTr;1O6|ifWM)enJR>$vrn$@h4`T+>XH6;6#Cn2Pny!zllpuh zKdKIRIj;|ou`u%FDnBeCiQtF)yN1+tmMg`B4LopGE{^vjbYqHL5qX`p1nSZ`f|D@#qpez!P#v%Am z+|+1gE-{YKBlypLDy0AP z!x@wm`5u!04EWFR3KF!ci*^6Ga6$cl^G|glM!#EyglX5xfGf{Eg#VJrz;EBplV1I> z1oXa5^A)~nN~l( zOziZ3LH-Y6a0#~CzROs*wH0Ry{`0!*Jow)m*LA=VkD)2a5XH{D$kxn$*Ccjn5Gx%5 z^|t7(_2sEY+tHTte}|7#h2k6$|N9<6-8ynSN$}BR^bz4DkLt_O0m^Wo1@ND*6TI$8^DfAa{;Kfo)2u(aYFYLg$p;~T z0m(lcc{K9Ar(NdKuQ z_mK%)fe5gO%%K0ND)^Uw2>H)<`B4mr|H~*xZDDC3iKq|%MRMs_ZvHJk1;1*i#as=h zI|`sTM(d>N!Y>XH?Y*hY@DgScGjyInlp)MNnM|KXAw-y!&jcS82lEeyW_cBwyA59l3A|0yfo7CE`ZNz0*gYdEUU1oS`pVLaVQ^r&++{F@`|Q*25VQuUPt zMm)`a54$!AW5LtNPv4hs1u2J3L0_*McSZM|$AVfDV~yy4{^kG7cCa2ljxrqkez7l& zBe0H*?ghWJpTCh8Mz9)S6T~Bj!yU+Wd z{%7->dUwJ#!S?8}eEbgob?k>)Z=Uqs+EPUS^ZL=Hr&mZN76TtNS8<@gNBFO)Z6cj2 z^uJT1ay<_|z3MU98bjY}9Lo*AJ(ikUm6?)%xG<25IeVX+2>vf&v?1(j&YQ%4{1?Ul zr4sOcDTt$e&ps59UD~C3fhcn9&Tw&6A)?iIvo`SHqQM=dkgcVG2l=;BD=fF*ujXIy zU|o7DrgMSJue&^zx9M|}8JT~+L;jDatzmb%UvO?TY@DF~@mQ*>%+VdpgOa*MmJEvi zxi82FWANy~>DY@ZZwfVb^v`#6F(N3(A^O$_mNi0}Qq9!td6D!&FYV}{q*Rp+^v`n8 zf6*t19nAMhGLK^3SQ>wlJdFg*tR%ob_y6EOKR>e++1XR0{zCu7M@>6w0b1T;6|aib z#aMV0{I-x!Nk8+1-_}}jm8K3}I%I2dY^ab0O(Kl1z`m=iokAG>4ev*nGXGQuaa__> zPrKvnayuGb07^G5g<#G_BZK}t1;5BBivP>JMtj~&zRaqo%Xsb(+XJXAO|ltgFwQaX zKvnxOy^rKoQiW?9^#iC`k+zj$XuD1a`X5GaMXA66h#1f)@I}Mrv*Ds^2=|_9_o~Etx07sySs_3iY#3gjBB$ zd=E9?Jr)L7h)l z)_a_EH0m%i|FDQ`t!eG6cH)%8hj2;ecEwfP*N~$Y$Rl$}i>fNarbMq)4nu(&BmV{#>~cr63Q-3h5DEw`4ml0{h||7T=$n&W2NhaSSum9=@@7b>K-4kcb-Wkvs+e-Qo8A?j9!a9WN7!YIo8)6FEzUSXk*f59&wvXxhCBBpmx zI%K@4c<7SBofH1B0T01&{XiqSWZD7l;{|$C)te6R-68(>o&3WRV#1N3HM|KBEPYuT zI>dd)Yd0)N*PxdzL%lrjtqtrwfdBk~`Der^$K&&T4fVMFJk^(8g7}D9^)Z3XLlyOE z#hz-fgfvem2El(|{z*Ps4N1Xo2w3T3I6TZhH5V^6e2D7%q7V2474Tmx(>GSCF+2QM z<|*huRax7`54Y8o>8>PBW>n}d=)VYHRE!D5a|3!BD z`~E_10bIhePPGG81^pMu{1d-LlT3@UJyq9sLz6Q9^s|K1q;i6#tL^bFy}K%|U^Gg` zBO?Iv4&mE85rTZAH<<&l-k*v9MuBGr{wvz&BPg79^v^K=eBry`;@?9Ob$pKEhy0)S zs`%}_f0;Ci7_`@lz%wB^?*}=0gNJ2kG>F?m-D-DjwZ1>QsKdfzP0oY-XORs2=h05< z!WhVZRv@8XF$$0+byUMGsEr>$3Fklm`=CazwKU8>9H5%30RBtiGl)N0A$v@U6+F3`UN8;#Rq6|&Lz%_I(#9gELa3H`ECIRxj~Z1?QK zW`rgSD7-28hryu#IdfJ9f3L_@@k4jy_HnP~^%FeJs?dfBctD z+TGM^g6he-1s4gIA^+LrQm#%KnL)&7asL4<(tpul$|9f${TG?qcs(99!ee&9HP}s* z2=A&d|MYdm=Bnl%5HjrOpP@i(%1fn- znWO033OKjV#>V+zw>I8J{D_Scw0iQ`Rjj@F*bj~S)>_*R8oA*?tMV>hjP2Tkankl= zayA(JrTl-6m05Ksyp=@!?^4j)UV3RLt8MoK3@tn0Kl*tV1NItfz19kXLu{NdU?Ta? zm-7%SRjIr$Gsn*SGe!?_g-3hHa0yGeqvVaEvsWh?-aYO-zHm*Qs0(h4(Zp%6VnO~- zjmO-0%|p&Zr#iGzhP6w(4x9?LL%n*lJn=+03pYwd$AoG+RDruNq}j{g8V0j~v4u`H zT&SgNC%g9CmA|q?)|%}GF*`xtlh}@bdGII!cT3b}d=2zJJ<88JmXf5xwh14FaKL|I zpZI>AFN^B?s*y})KG+|<%^r`oZaQ;ky>IN{#g~NFMQlV0{}tdc()_EzZ|Mz2(B$3m zyt6~Y1|gmH&$to%N6cX%8MDKGU1d`}3%%lpMi-EbbuO&iLgI9e!yU5WabK{N#!>R0 z84{t-V1`UZdj?qu)O1O!qWW24qqwBUIbE6JWeBNqp8cVGjSGWgWs;Mqt3XcNEJcDvt?()yxl=kp{{_b-{Y+(+qQvj&op5wVafv*4h-LvGGpGU08zmLfBqYI*Z;$RO)A}Z zcFl@jf}=?5>HF!~ACKOZSyh<6ZRm7*Rc5uh_U+frqjFEfe%EoX%zR3O^CMMZYb_3J zNd(KrE6bMq8ey?iI$UyR%Xr%qLYaS{AT|{(7;%poVak=SvVz|pEY}ud>I^XdPd0{Z zfBD10e7=3r6?)V8sB9_^WKytHZ4rbwoZv*!oZZ!@>{!7Xm0Ev(|6B>nPyA zP|PcV_7PMl(EnUn8bzr_z#T(iw5nS_8=^7c){x(SM?U1!ik8r&m;1%Te*c_y=zeK< z)aAoUJ#uOkz~!e$=;Qx0|HNaDOms*fmDn3^dq-eJoQ|V!Zr;ZY`SBnAzWoCM87)`e z?{+&4`G;c8gQ;4p#^Fqm|IDXk+mY~A#XrT2)Q05$^h5vG7wagve&qM;H!kSgi+%(A z*Ohoi4)i!d83Th5|NLK~T9J*A?(sBqxEIfK4Kii66+w6L0~-ezC%Ki_RWF{CkZ+b19SDFLZc7(t^KI~Ir9m< z$m(EWrIQV)e)`-W8$h$%f$6@(Cj(px#43))9$gdZ-Gu!t=9I;xH{^V!3+J73JN{w~V( z?1p<7iXwA#$Stb)BI-=tG`o-bpD*uH48KoMfm~f2xsG6Fx zDpCKlWTPd1;a?E;MNK?mHs=!m#_)9;Zz(UVy6y^YRB1T>!u}8HpM&U}Y33qrn@BN8 zepvR+Uan&`yBJ8+KYttcYf6GbRN?A|@;NN>Exc0x{q+t~4nCi~ja1!6(-c40GvIRocDp zW*)~$dVzCQ(%f>})^rS%e0#Md6z!zJCH7yZnC&(DsUdvRxwP7^w|LxXUj93p@DBQP zdEtDZ_Uz$nf^D`UivNT4&$PSZrH!+sM=o~Zol>3ub?+`Eze{+)yAh%^b&R^A*VY$O zREqlNr_2L7MKa5voEHr31}CI_u7AOJV^>J^f1Zq_-oRj+t1{#tMxJj;hMF1lv9^a= zERc)n0~vHEcJOzGxF5fpsi6f)^sEG0dyxYlQ+-xp;q0}3^fcCvQI;llXMBj>w(zRJ zC2ypUA9R);{&UxhvwR!`+xX+rJHa;!n=kiD|Ih#4)zpmv?w5AlI$7TF^6&eBT6@Coh>h(KKLTqe9jJ~Y}*Y@6yoFcBfG*;W<3-0-`Cv#?cLj#$NqZv^~-t46?lV@ z%TvmXg%M?5o(l_DcoEj_;@mxpS5G`^*ZYMG+g`EPXq7!4X@217u5rO_>cbOQ^5M1W z&np}5d967Zxc<%A(Z*BBep~!19M_xG%7c-Axb{VgMA9Spf&ZW~(lNO}Dfx2kKPMN3 z)p=Qowmv@9yh`bp7`)os%rUa7w2e9=ETc9KC{I{~?c)WJ@Q6i~SiaJ``n8?X zQ6>L@50=|$IK05lwfTSZuj>$6Vf}=kUt%+1%7&CDcV`wl%`%1K>n+G^{<7+ZCL&R6 z#99)K8kT}cbN`+HdfflIfNf?E_VNMQ(Jsv?(RJ~@eGU3qb9VjJ)YlP)drx}*_-pD! zK_VOxVN}Sb`ajSA{>mo71<$#zq+`^6`>$ewt>{XhLu8F$MbZ^&nrPv>^@Xx|UiLJp zxP)bd|HS-6?R5V07(7i64>WH&S7zn4=9>5VXAdKn^(Ko}oWFavcp?8|{#PvG23^*L zXu4vOwIA|-j7OLF)cK`;Cml5ve6wvhu3nHFVeM*;voxBsneo)FVTnt2PPe74Nn>2? z^WX82WdmzMTbit0?i(RAJZ`3X3q0>V^3H8j^hLA-I3RH30`2OLk&cpmR{{7|0g36AK6&f$?Crk2d1BP zEPpK?)~y#we@j_=I}$e$sAwD^{l&;60+oJic3&(Yf}Di6CV5m#bc`wesiv zInXwhaZP+M`8Z7khZ zdLexQt&_npX#^ULFFqvgV*FT7yVG6i8!d?gReOp2$LzXR`HZWq&v}dMq?rGnSLU`b zuCo<;DcJwHHFl!nik?kihVqrV|1;Q`PxmPcc%5Mx=?+btToZ z0V0{ja!yg!%sSkNi*vM#OF&`>1K6RZobpwY@+7!bz#n~yRsm$W7MHJnaY4j zx|o`Ux~=T2L1SLT*!!pSCDUx?WbMKESGXpQSf%lq-{m3p=fKSMcVllO2-n?fZ606b z7`b;EawJ{muG(WNi2Awt&(*A=@+}jix7GUR#bYs-$5|&|lDguU?4FOMs2*u_< zZt_P52qE&H=cQ#8*-FDJokeU#mMHXxH?!6$FYMMAz8^PObkZjq(12hd*$(zYc=HR~ zH9Cd|{$TLTtoIGuM8i=Qv&jKj7wUhmmRY*Ap_u`(Zxf>nQ2*>uYYHoE`F39~IO-$a zfcamw3-Rcdw%?SayWGrk=D9Sq1I6q$PgDw@T-ji?XzSuT*_k2g`PZc!UDtnIomSVP zw`*QOg&?_1so1OYZBcDNELoY7&?Y{_P&kpeHZ3Ht>75-tm>F8 zg3A#Fyv0SW96@0;$I1-k)`|II7M%H{qY)f*3^=y(_!Iqk8h^9g;&voHeKe1aBj!o{ z&#)AP``!n2>%#3tzh(_}v=v$y0;&JWV3!^pemeR)zY|+#-g$0!2vUQ}KmBs!dsV9ZELwr)Tg5_()2#dIBV!lEt3mPg|v1IF?x3?U0 zjJoS6!1`xW|C6e_B4%|_tiJ?7ThS;j?Zc83c_#kO@w2Tfs(K#XNGo@V~ zzZ--&+_B{DKHgg^a1zr2LgQYqx*So)UQGM(j!IfqVF~Gnc}Wi<_OabJr-O$j&WZdwf@)y%wPVYt7FaW`{8G-^vRMF6fyekY2TYCaN6E7VCGAP$NQfP)>6Biqhkr=an-x1&1(c5O(Plt5OBz4~Sds->|5Bk622hf;Vs1!SX zeVn>bD?33^JA*lO+cL`(kd~RbPRFs;kACfS0o#V{=V=yc8@Q4rqyG62!@zG-5hGl- z>L(k1keB?G=xR8}Z`0`zB~}H+Q{CfVkS%NW>41uOIMHg`FyGihB#+JRr(f3Ni()AK z^Tl^>Up+2ldS+|rkVbJ$An8>F<=WJUUuR%{uE>DDKkJ#aOB2jy+)ZgAfaH5aDMLu* zUr6I1fyE0m0?*{doc)(Oi(Ee)AH6BMg88R`_nZ2jJN;-OSPw)J&b+DlSFpxsf7{GS zKmFIc@0XPHENMz0N_CSuu_;dRpTGAu;tMj)bH%||&U}Wo7Ur}aBc}-5fUQa5_}%{h z_&@yrQKPYBL0+Zj_j&dVtbgX#cs@r30dG@d3iRIHvQ9S$-$!q!J5TUQjha-BUbhTt z(=ZqubDH2-xds_B>w^pRjIA~g%ay@-G#J%A8QT>@Q@s4!a(iofro*)=OA9!Cy-!Mq zSCsw-^S|D`wLBz#osHumoPX*4>LF#3c`O%M0;u?%g`P^u>6CH}vJ`Bkpn7TtI_}QW zb4kp}N*s*o}=oe!gaY*2Md#H}DP?xR}bn&{mzOl1`74 zoml(N)Z3>Q|G*sx9$xnuEnF$%KRgE1Q)n_yWH9h44EKMzPVIkxY96z-3(v&@fy=^w zb2P}#5+X&O6v_aG@EIc;RD#t#ydGR8Oo>3a?KGo4Nkb;TIg$OIQER8v)W!u4?as31(6W+Z1Q$L$Q<$r0|5;9bNa9s57~T$jFi)vR=iIxv&c z|A>d94$Rrse>OW;i*v|3o#G!pdu0FVI2M}4`;3u)i0R#+&=9Pe7AN!E&PDxmSO-@g zq`9_+7O#wb5+m0%GlA9|c|Q#d1*rd-spkKrczV+34rAN@%5f6Xr%cZ?oZeHDO7MbY z0iitb*kRgSC-T%c^`s>Uvorb47)`#iuXyCSESlgQH=4ngm}k;1)RBdjM?J zvv9)P_MH|ArgQLt7Y4S3Fnv%nbU;kNNBz$Q-vXORZ)nCfS zdiI{_(t&To`s-Phj#ly)kRHs`fVZ!|Zz3dDt>-Xw{>TKA}{U)y5w^%@(UQnd07}d}bQ1h?iuUFTa&FGf#7Z!x-3PQqvlm``P zkjJ1z0c2h&@(+>!L*<_$E? z#o+MPzpzBlb<#^67H|_ggPHBjxw;LrC- z#M2AYPPf^G>GJ|MQvAdH>{B~mI^TOcr40RF)0SKT&cfB~N&owEM+yWq{;NR5#_cwx zhtqK++rSFy{N4pm=D$<(ud0(r=Uw$Mn_-02SBsGlguf(})$+KwyMq0`a!o+}vkouE zSV#`rUlZA1KOC^QalBp>!eH7iZj5^^T2sb!^@Gx;U~F#$F5ZsHJzkhG-_S3ztA(yu z%be}KnMVneEbTMBia_}CFTMzmKfQpkGUp|7~IB7Zgb zU`-NgwyFHnr?%KrRLQbCG$rO#|Hr=L51R{f-@ad9i}SCjT2D|-FyujDY_;0jUGx}OHmhrMG4iuxP0Djh{)Sy9g1l0t2` zIH~`6YTuBst^Gl+7XEq3D}Hjg2K>8i4CK2f-n0E-agi)XWB+t1$Al2Jg;qG)WwOun zLzuX8Ze^wdOeo4+H;?DNz}Y*c|2a^Kn-I@CU8N!_HT6*cv#|gd)vY-@DgKXnr3M^@ z_=kw4Eafk%W|x=yRjplgG8}JgU*Dn{q1}-~F+3K0z2?UP-q&jV^Wc@eZ?AA_eLk<8 zc@z&exU_rT!&T9vN@*hfopw5yna{U5hG?Odzq<8ifue7uZ>ghti}=an$67Sp0! z2y)Axnu}@0rrGVqHdYp)UbqB>AjqAKf0s{*!Ym`h~auOGnf>!JpWedF* zSA1IN5Y>cK;Jgq$m$-(}RxIE@Rr7yr?uT&QVyfMW@zFD^Ct#a6yiY-p-1*=f-#kXJGzqC_7(Qm8Ujbp2or756j^?aZ?EyAt0{C5rVnn4j3>oiiW ze{MRms+)_!iapIk4RlhIvpJ5ieW?GrA!Ppf)9krgq|wsxR~ea)B8<7H>?DWmac!^) zVtofgltr2r+u0d{P8#{Qzg#%cOvo?1er~XJY@`h<8Al1;Lz)_BmKg@il%k7@Yr zlDcheNNG^hXC^x*CmE4BXF_?P5R zUH6$OJm*=r*poEYY1M(Hqj<<5iDwn4gpctdFfDweL8Zsb-m43p`<$ll>e%lpfzxxEJ410k`7!?##0llkLNpX8oewB# zk%}w%`T|@pbb>_xd(2t6REuljvNhfG$lPmXY(V`pOJ01qa@VUS{)1-@=>O_E-bQEY z*LiP;WBdm0T_Ow_?58zv*vL7$G2XY_L8sg`DkQdRs3F3{ z$-A!fc+4>^cnqd#KFvqFnUwx{qksV{XK#R!ayl)SF9K@*wF~p#f0+}P^QgEOACPG) z;Ss?#vIqdN!Y-oH$-;EN)G!%KBg{XIOdry$oa!x)ZR>9+-phBE3%jrkEDPNE>U63d zwp-NtpQQ_t zvbMz77E2*ceMdonnW=fUq0)wl!u+^Mg>US_l?GbFuS6lUzC%abaB z`aC_v07*L*uBQnnmtS(vCdenh%oyX?<`q^tLpP=W!Ti%z4(w`M zx)G*xwOJODg0U1@GJ^)XcIh4NcR)JWab=&CU^4&0 zk-Ax&Ef*DFX@rpXw_5vpBuD1%X#ts6Ds$avUEt2Sp;OJGCTf(E{;w~^u;ZP1ZV$yj zJT&*ye|4QByM;fbhJ?D;&XRla{m~Sq1C;)UBh_XA z)0nmr#Wq74Ot?jOiHYQ^d%3eX0I2_={O^0d;><14rpCUn4Gv%3Z)E|pEZaw&Wmar} z(@pl>GU@^yhqnX~!@@i+0eFRbxxS(H9zp`H8u+%Lx+`pt&BytdrSIB)CEu>%)};e8 zJIW`zPHg%;+5~pJXFn~m>f3UAC1#0M_V9zHhl2Ma|FCve9TFI534bm|{vq~%ww@jk zeOBXn1D&SJ&lS4!MUBNc8u$co{xwA?!xNY%e@25rYX0S3nxyw&PLB0*YVUYtxe!qQ zY_>^@AD#J7DL6(W`@zizUp{hgU=+S1_^T(HY*1kts|iJY z+8_O2f83rGQXY)&to6K@hJi$U-z^M0gag^hVI{iurZsUihq(oMv&=!c@j=)sed1hS;5f zxpeT3Pg6mC?p_0s&_~i~!gW8N2|B%M)~Dwz;9F02cs3!#0Zq2nl)(5_0{_CSEG(Bv ztyx(hyV!Q6f{#}(Rog0MnS&unR0bN_`p$OH@P%xMp<%&=&d=t|A-K*_XBwVT7A7_S z*;3pjZ_CBO>r&+(m=H37<*}%|)I1;q`OnHLh!W=o+44n{{@Hoq$(uJ%AM>2bgC?G6 zQvci^-|*P<>s1Hz%FX=FmN%}d5gKgA{ELNxPh?_U0_z`xST7jZ4&={YVg2*Zvi1^V zgUy>E4)edXY?+|^9QDuf+98?gy?lHMMoWsk20`vYTKg9WQw8?d^gw223PPNJaVCXH zpBsRDTgXzH{On`o|C~Z&WJd+gzpfpc{fp*4XZxuzpi2_uefD;@I|;co&;;Zkp0R9a zvE1{7Cj_0L2fa{(^-=%)^~4*keYY;{kfQ(nu(i>_8GJ}->J`!r1?u|e68gV3;_PL0 z8ou+MN(FJNh{N}h+N1ubJjpJ&&ck!n)<_&<@I@CNZ)Q_|Hj4kVKay?Y7VJh9syT)R zF!~Tj)AJ$Zv=>Njjw}Fo0VHTENZl#W5PeLWq~UH7Eerx^)3XY81D=i|ufl3c9Wz^C zO9uv)xANsbpD93b!IR`Q48q_~mwbrGeeagrL6q;?KSQ^M4^v9itZ}ex36v<$pgDZ^XBUkht1am%B{` zcfL{ce_oE}d6?1hT{9D)qSWuUHOhXl|MQv67f4yWTE_P;C^iy?Ni_}ab6IyBBlmP;Ja%g62`v%*&U{+;)zRE)@-9{wc*jWdD75cGiRn^WV|`#W5+0 zYaV*2Q+spbB*i~`^Uqo=KQ8p5O09m@=N?x0VzbSnu9mH6myVleN`I7D`EO-m7r6h6 zc}_PUM>$1pRHjq?pRC|3vMshEWsR{ZUtRzF@G0K&q>ykpM@Jhb^*@kJ*SecN6wCnh zzfbi7d7Ivp-~*eR$~A)NhLQszFoymw-9HDa+#YKR)%jmk{zXrv!LEeYZ6HK;vBN-? zAde39B4VH@vpuB3Koh()z(E5}_!n$$%2tIKJIy{BkIHIn%zo(q+@*2^FnWaIAO3Dj zq)!pjLHr;8dBT<;8M=->HC`DL{U}Ja5LB>IzuU}y-aPL!kJO>g$1*#?(eR3Go5VL9 zq*pD=C+A;hg7`D=6ts{kA6^Ke0p)+spU-b6gqhrhr;A{W%75>*dpLqil4RQf>VNKi zy3ZPTFVgvmR||A~f!8XK(&6><$Mlpjn)vV1 z`dntVDWpJ`YB|e-XgtdQWwcJ7bPV56lf0h#XEgM`*ZGHGxk{8zm4t%NUcYbF0*)2{ z{Bt?Zc6joW@-Qvj5vB>(66JGxT)w>gWYY-{8?ve5rtF3XNj`*Kc*_nMQ(0+{l9qSg3y%gXP=n~26^IqJ-$2C%{{ny8pBD16PANP%tD6m0Z^d-{Wp^h$AC<8&|H3AiLE;gE#kEjq&{?7h zRQ~(YBN0_N*Qk?0f&=oO@rh$KNgnWC7dn9pE}LbD#jNhso4Vg02_@9}XLbJR`n9S8 zu8TG#{CINaDF)66O2LEHtqGL=XC9%!b#`e<2x`k=Px{|aefHOp-EW;SN((dc+%@#< z+&yv?rDn(^o&eHHBI8C*XBT_@8!!_A>`)oJ{6;l2$$3@#K`@h=#M)Bx|`lHTRo8H%*Jn!c- zGd1(7JgYVLn$2KxCFpz9 ztjCjp>AH9`^iuktA8qSKg+QwP?_ap<)?;mNu-z>+i4daxXK&MuAIhx!L0$i-`GJ#87nWif+jZB3;rq$YgF@(9ae1Vfr`SE7qgWvPY#(pV_Qyk)b*8v4KZi_Oq_NUw zD3uid=Y#+2xvt}?G-hIZZjUKnkf~R`w1h!o{(Jpuc@i$OQ^BVCYFu=t~BibG(Vv9Kbi^^tJ;YRDZicG zmdimu{v6Y1o%90dxV+Jn{X^Y*9sLW~=QA9%xfXFv((^~=&O=Meo0}Lz*822;vch%F zPiyZ7q9$>06(XEIqb*1N&qXU1`S*RN9TQl(j9Wx@QT(6AdD-UDAu9irntwh2^Jg0^ zBLP{%91O2!FSYQ^k|o;SXkz00%OUip$7Q_a0uO2*&=geuS4jxZHmuOdIsPFph<3Ty zz)D=oX$}g}U~!yVj|*su`SLk9`lRZghPgpatL|qY{0@f=d zDgJYEPu0@L9asPOZBBZHQudgVCu07E9nt&UKY7!o((45npKT#L;I}1sHG64`G*LT^ zzfkRepX5Jp+e97a%W|FW{Db$6^|K0)XAS6opE5tOq{Ent=+6p(?sn{;;v{kt-lCv{1HM;c-L4a)$IiUrV{&}GM-gjFz+&+&JDEscpsf*sr zhB|U{v{0oqng8XtZfe4_Has{`|Fa_Lh^W$GR;Wyf&b>-2QMFfdBCXrlO?2jk>5)h4 z@LRo${GWf_F#r8}OMl^4l|SrODSQ8+KI+WOyJI`?wrdDE#O9))g3<%Cq$V>~cA{%W zkT!%|NViDrJ5v20%Kv_3)@~{_#_YfR3p2@;($w~wEPODTI4Ud%Gis5p_A`Zqai2^u zZ;Z>JjwahpX>feQJLDgNEyp0{GOJM6GNcX#K>0=1!I>kb{SDz|j?WIh0FQ>S3dcpb zU#e_)SpTq0ys|ihLDq9{evR`lwg25q@t{x(Q2+C=D6xwPqC)}M)U8h2DYU^9I|cb% z|1$|~EvvA_$xfnC{KHKj3&3S}-+IvQexp@8oudSy!eG8Du<&y`4 z3q%E_<~|vBWf^a8Pvc{_#DD#te;uAD*et03cTe48*e7{Ir(f3NR_Jg zJn9@#>z^_I^gP`|5uyQ;{&!UQ(*2w8qy+5$jG4K424#23xT38RM9vVPX%SgN+EN|F zC1*m=?6jx4cxSKdd=ce;x17<^{n z0#I#;I2d?O#VBEePde5=RQ@SpI*^bxD-h&gjsiGZM7A+ zOfdhQVNT_L1+&aV)mpulRv{#(cE(lHmgot@bt3=Jb#d@^Sz;31`OZM|m4OjHL#)pW z+IAXSqj~twZ+Svv3bgVGusa=RMhG$gG@lTvKoO1|E(p+6B{4{=5I0H6{zuxCu95?B zc=^Ag3CE@l3`ic8?9mn#m;#5Ye?Wi-8?U3xDMk!gW7O}tOvJxxexOAucwT{g2#-2F zHYg_b&)+;40E&M&G%KL31^}dz8415HR-*N}`ck0ZWzGCIpAlroDNR!0Qh6#jc z=vvOMQOXigIox+dit_egivNuH?|Z-VDyxgWqKEr`PlUB_)5;6HBnRvuc9mM$9^Lxn z?yNL0t3XRj%XeFzE4xr;&zrsNeO$efw8VI-F7Tp9+q<#ng?CB;4IXt@Pha?#=dlod z3vXNG|B&J`Ye|8Jvi#W9A}dLqS2OT(jreAGo6SgOn(&?EsH6tSwf4^2Oez59Up--7 z#h?f^;?Dxq7%<@FU(B?c*1_zn`}8d!;5?D=f&a6w181rK{{71(G8yj-Y&iKU0|#F@ z2I7R)r}i3}{Fnl-^L_T1>KdFD+uGbP!K!xjj={^*ntdkIK1KKD*ZY&H{L?dT*l9kM zKA|Y@TY6N8jv!W#%yaVyNz7cEzD-n+YUv}DhgL-tP+}I!|3&4W#yzs{sAS)*)hkIZC8R zVCdP$q5db2xhP?fPz;KS^ge5lrxS_-hXqYc16vJkmdlcA$#KB)%WauFq0RQ{PuBja zw`KK9DgLwXx|r|2*k|0jdr-;#asf`38&GFP$c1frtWP@g3`AG{Sad4&nQVa_rB7Es zpev)3`S0sY<_lVPpYCZEeHtbb7dvvd5}{}RFp#T-ii zqwUp{*ICBL{9Q zqUs;;9x&~*3#+i6Faaqi!(m>vs}uB|S^w7iuV24Q4WlkZ*o7`ID(shHN8jZT%9tqr zbN}ZHTS0~O56-{-{>v&C@3w&cFKaT}f)YLhNy}%8wt9HT28kT2CEG)qn;8k*?BMDb zeJzE$nE!6(P5Hk>5iei0eUCF@v%Oy0dHm-F_DWk2?=KC~r1*zRHlV)nqyN34v{{-% zTTs7hE`wvs0WY$bq*N(h-w-dj`>ciA(MQ%msfIQx&-U3dM{Sr38LT5i_Puq&b;XQK ze=T4zgr_+~`nPSx8ldL?kQFYznRUl=unuBL-i%I|9?Qqrf&`Ki)n@iL@gYRTlaIWIVfJ=|-J>*dvs zj!>CAU~bNM{VI*3syUm7$7+X?v~B#Ned6k8gy9h$zasK6Q2ggai#jASM6j&<wj|o75}lm=tELmAIXLlfj{O8ZE*4_H|XIliME1SYm*j+nq#&D~oE9@|oInr;(m!8li}p;um5MoJAPaFyVwV@1T%E9a|@XPxk&}-Br4P{`Z56 z*Dqr_%j@txpqf^c{znnQwTNjFCdy#4|Kp9<)1)i<5Kv%A<-b?X86N}T^VKk?sX;ye z>hmwCNtfYKb^FWZzi?VQG)X2)9N*Sb_#gk-2c=t>|9;3)kvS}_n^X9gB|1^0Fvu5$ z_Eej#P^$g!S ztwcOvPV#^9OF_OF>mT5Vvl1SJYs2ENl72k7=oG`)CbmJ69i&lVVPly9@(=#^nIu7w zh!}k(dqx2Af8Om!2>>MOAssXNv=5DR@W&GY-FW7L>gg2!dB&QbW~|2X+x8bJ$Um$I z|8o15wLY0BibTtVndfrMHahyLI{)JN6?-uU&%D!>_#Jd`{T4Qkq_+j-=~)GZpdJjB zU&0*5H^X%P)acuDzU251>L1{Afk zw6QLeH+k5$Go5|qK0!dL^S^=$xW0z=Il3&M{O`%vs%JHXlnWN-b6sXNyc#3OHS-Ts z`sY7pwCTIJfW3xlP!ll!JvgeFAzX9{3;`#WEE7JWXTB5_q^bR1iw>UaL6yQsE4{!aFzwtAV-5p{>s?UdI#=Mh3C;^_#zj*Cw9rPLTt{$D>U=V$r5meuyZRNK~ zvb+|zeT&&S2@kp|DE^`JA;VC5rL;uKnR}>u_CaNPcEr&S!f=o3~DHmX+qPQ zpAW4Ft8$Buj?Wm%5fvCX<#B=ta`syW7m7GT4B^g6{?BCoDZ%89*lAbjJGz57V^!`x zt_^(TKRa!I)fS@70v@qLMaXgf>j|3VA2ubLd15Td z(-=?Ilv29ahF@EbUAb!R71CehiQP_`%c91*zWknfll-5B`z&W?zwfO*l8YJFn@)2w z7PQkyy~QJyAZrUpF2H~aHA0wwA(iyh#^8a(uo6Hxo}!v_vg_~?MQAYStjzcmM{taH zVcS2$PbB$DnIj9>KdJ0>4Cr=enBk!C1ih%UDg6C_{;%4Z$bVk?e6N)zlur8Jr=*Kp zLGjNz%s;)}7_~fKAI{f`JqZQ!58HA-&1!!zaYcq#(X1k5+iE#G9!NMJd=)*G8+#kV z|BrvTD*#@M>?ty^bLn~OvEt$N!+&26oZ>zcO(i!b^DhkDQkr;t?@h19UK^$8RkpYW(B~QQ3~e2=dK*f9;ENn zp9+Yo;Zpwh6M@OLml=Tkp98G`X>rZq@v97R#e226F)|q2sw~58W^}>VYxED!oa7%; z`KR>VMEWZaLxDQ~iyH8CRdiiwGE_$J{1%Rk4?Y;m=eWAN^%!uKsL4EaYoz=}L6k)R zsXAVa{O3{*>vUZ0k+x#JDc&KdunJ%rz_s;p=aZ5PQB{fj!``)$VDc_zq%ycs4Vgu8SkXJZNYzxh{vaL$zU__1^mu2R(hOx8cf#va=; z2~t^Z`&kW;hQKhgfY!~dHqBz~d17cS{+h_Oh&wyF^m(XmE%JZNXP0=Lh&kp4K3mLl z{!jnBSYTd6UCbloE)JPEc*@t4q0&3D4*{^kTo>}i2x=Ik4+dn>2KjWJ2g!6Y1Z6@8 z3Y%EqquDFR3dW#_bEk$)PGx85n|9nl?6yQVu=aTh3!pOxMvL^nr!u|dskyi9fL?izh{dr9S@Gg$O z5*@g?ROEQ+{k!r1@-MLdd9I;o&lmrfe~R<3`c)CUz@Q>V5FxY&+v&D(oJ2+O9;N}i zRnZZO9=r@?Q=i@M2+x?klh*5bIilFe3M^+&9b!PEg)@+t|AqCBL0zk`k5DW=vpmel zmzYbrZVbKpjyyso&-!~8lM`|o_(DAS$bUxt&*-dnBS!7oAF3Jr{VrkVhGPk~E8u)3?M2Kk@l zxefPeLU}!%{4TnMNyt`36dEmz24RKR!q{}5k8QbXNGzb~ zz)aAI4iWP9)2aDa#_yVtU8JYu7jKUbv7>A%dUBUs4A@3@3PB}t`J)J=DdCa1pP=RG zas29zA34^AyAtvb?{{dz|DS(K@ehBj=cx2=fx|;v< z(f`#$&A%WZjne--x>kv@AKSsJ6;>A2qe8`l@DeQ~#CT^X>(*la`?W(+-=BN(%U7tu zQXHIRi1`;fHqjTd(r0c|=U@14g5INH*Mk}g^U&h3PAfrlwV3JK=re(pgOH^YQfHrn zWe?`R-~UA^c+|B zhiduk2!P)HNAqfcN9kB8R?9B2*|U(6N(UyeH4<#KEd}9K?vwc!3#wW`^37C3ftvqw zYu)0RXolu5yp8>zOedj(8ZJqlvxh=h+R=5feTu;nV;#W!_tQQi zeZ7z(T&oNeSyJ^6GH6*)Pi!Uy4Y>XcBq`^U=!(hu=VSiq@I8xvwH$r8z*F;|L2pCD zKpS0)U7VfY^MU{T+xY7-H#q&tzPz4K)&8%E_h>NP@mJ@^`bSSn zsCdD7%%VC__15cJK-#N};7%6qYtgacy=^Q5i|C>{p?d!1vLMz4TW1w0fwYH&=Z=^R zXvEH-shq$p=AEf++{h!~>>Ppf7r6{tD zs%uEWGlC97S1YD({QvVWo;fy*i$N7g`QPs^NuVWEIxN`0gLF<}iJR0^-T$HHUw-Vt zPEFV=R&Ji;KU>`x>pP0DtiY`ZV|{M8I&y|P+&sI~7WHpTcwsLH*5c|c9LK7OOX@h` z*UgL3C^6DO{P{QAjqyEl4vEFS7)sZj(A$Onue)bW3?co`#Y`@D>DdHqYEb#7nE&-w zo&S#f!=9J9T)YT)O>Cv=pR3lpydd3d#xUy>C6)g*$v^yeJekJOgg86?b9MejYR86E z6o+~(vU=6|7qjERLyw1An5A3O(+w!=ws~6X$l%p*2K+)W9}}=63+G?-1Yxk4HDyte z7{(_1Ki94|KPWMo<_@MQ>iUPX{UHfFHUHUf2h;ONXED}4rN@Iy^uf}9`#AI2Q5qos zxo#}~WB&V$#>i?n7&u#;H<^Dr-nDSAbzrJeu>iX){l_BiH(X?tmS$Fl51vHZ4Khs9~hzT-YM|9H5Am&)1CSd;8I~^a71f3;k6S}b2+3TnW zK3nSswGlLr`Cp|U`Oj>>vm-y*G>j)u{x3EE`Ny16oPS~eXRrUa8}5GMNX@@k>ii4L ze}9FNLu`TT&Gs{!xoN9?x+iU}rjw+I*|q;sH`6M_T&P^&aIKiZgx3GrWTVds`G;Bw z33(M$9k3p*L3D?eyBr5c!a+6vd2{O^KD%I)G@$c|23E5}u`W}f{wJ#JbT9pyk=wMR zZsCb`G%Z7xIE^=$NQcS%cWVAMcdzxM$JxdD@FD*+7A6q>PUU~8{qGtF|GCD1=R#bpV3Oxs0y*le2A|NL?h#R+8$GxV3@51l@Ad9Tf^U4*vDw75F| zPg*#8Yr&ZE}v+sN5{sYz)!tR z`3({KG@*=kTy|8u(oz?TH!A=AWbBN1q==?=>a*ziG$`~}L7aVA6aFv% zy-Y8CJYgueSl`meTJ=iRL5C0d?~aj{F*~65e1*FILrVxMNSW0Cd{_Cnj|1}HxxoeM z`iGi-^;q!Obtr_;$S(_Aa;CK@1A*pXRwu-&2}nJOye^$~OuHOq6k3{YNlcs(aO_M_ zHBn(e`M)s#H2IpznG|ZyNoq%J2aY=b;+X4zk0)-EKf5%Ifd`d;iuKR!&wnBGECXXb zUJvH7^c=5dfJDuIzFt4$C4a8Qr2qZm=l)rha5j7TdWYQ`;vvl)bq|&ISty6)Iucxhuh7SFl2DnTWQZbDW z$(#^6p#NQ+|HW8z7QI9AHI!&o&42DYGOx*P8^~QqZd@ePM0|5{GXFGU5z0UV8(ztV z9uOpze}VkN^~)?H)1l@A{~4ceBl^G8`k&UZPwc_aH8i+!QvY+v@_e+pU}KM5&a=l+ z#m*K?JfV?~Z&;m?dJ^(~WPAzMQNr)HK4E*G^y&gdFglJf|2_Hw>VGazi|xytOWDTE zphlhle#>A~(LyI*{_WCKHvvM-|9Y;@zp(xxTT4HX2f`2fpG(*Cy0rdp{)JHdL@NIa z^*_-KBdA6!H7l15Qu(J?|K!9Pt%>^F*L~{f$PK4?hXbPzl!D}jV`ZQp2UO>?bBm3z z{&DczjJuk@4~+CO@P4J{|J?fH*2zU@WATT^6nk&nDI`?=lcI~pSwgYAbykxU{a+OS z`JWH|uk1gE5_0H@5Bz6S&=Hh7sPo^k|Kl_T^Dkx?3hL*o`G=2htout&;x1d|w)9PW zMh;D}UN6dz{g|$y!2VCZok~uhP_Mv;$NRBb8tFSW|BIFP{QhxVb^uhr-SY?MI5F@( z`E7jLbFMce+`srv*UcJwDF6G;|ME|9{$=RK6lxgh1I0fa!~9dYC_9nz)E4tkQU8(1E5FyT7Gfi4y6TdDlRYF* z{U0^|`4!dw(S4h6goYwZb^e7Jf~k*$Rk*Kzqs~7KF9g+_nT7&&|L6O2H6DrGvd2%} z&Pq3pZi!FLyd1vII=FymT8?i7L-OV!Ih2Q;x~P^>Q0=@b({`7~K1OS0o{{7vPR5Xb z_zKvvR(#|#BVAh=&e8tU|0D+IIvz?G%m(y-aU;iNIUo$(OYwg`=3n$U=d(1yb?G@u z{~U>O`cvVf@}E;<8dj*=%2XnJdnJF9v8$Qaq89dv{-Y6Pnz{v} z$9qGH@-#(fg{kBbI{vKArTAm~&2o~Uh>E~MnZGD+Go(}eL-y9ziQ}2cSBv;dLS@Ho z2_fbG`X5#Q{LYPQ8ijQ8e;!+4GC851e^LBrb^hu1i4PKeuJ)_q+AyTpWK5|LLFA{U5o)7TD(Opip)F<2Nc;HP5O9W03|c z8ha?iHj1{7U9GZ?l6z(FA4cpm;F_aYjyiq(u_q{7_2o zvi_MK*l@KN#mV*qs3@BBzaLuRPhyy|7_~+p>K}Ff`;&W@Z2rcHBVhgomH+krjdYxD zZ-@Es>imo25+3TGMIZCOe&DKtSKEb^Q6pyT{*b!NOrhC+k&?rNl^a@f@+tlE#=8?~ zO>G6$uFjPHd9!oh5n;F&fn#j0v%zHjvxwa5eimvQE;GoF{KH?c|I@zL z_2sCRV<6@Kn#{l0&r;Vv=>Lk2G=&FOFtJe0f9|e4k3W!OlsWRBuh%S~go=&CKvVOe zcBQr@ZhgH+BlOhy7wc&enih^Yk3;_9)1fQ5c3Hrit8e6J2t`!>*B@LwS>#mz=NBV( z8yexYU^4&x|8eyt@KERf|L>VGW{hLTO|EfY9j;0>F>aM|Rw|Vcx*e6N$Y7kwQ7TFr zbf_)q);%a>72UeWQRy^FC1L)r&u2#a{r`IGK6W4b*xd%7Oat)Ry3%0`Nb}5!{QKQ++%z{L@-{L=258s~!|6TL!c$jEiVy5^7ZH3#_J0 zuN7he3!VAi68uB6Kr-sni!l5{_{{jm_sUc>NA4K^6QneAGinF_&(rD|HKEx=u2lb$ z?0t#KX*FyGLehzzY{h_6VqkD|6J~sxt%D#*IW}tJjM4f zu>CI_|Gb`WQwN%kB>&!4=JQwo68{mSo524RkYWE+)heX6z%#`r!f1AG?-7B9T?FiZ zRh@x|4s%_Q%Y7z+3;D0IR-05J7u)|To%Q`~mMVRU77_%y!eJQ&cOX92e>iLWBDnd% z|6J+%mP!6s75Em({<}o~g8i@4R>gxzaPicVi&`-NH}L%nD8FdH^6xnQ>FCo$S*pN@ z;{Int?0U(4`|n=tlFm@zw(*@0a8u!)QggGQUMBD_e0r)qj(yok22!Q9@tu|BQL-WaqJGkM+;$f{-rPN9CColSIAtAh?S$4So1w~6QFNaI_ziwVtt8!_CXl!=~jbo@v62!xC82cPUX{u^y?R%wA znIhFG7>v8o1kVh-|8ORvP2sK8iY3#aCTgIskFiotpnzmm#yi^7@1)`Qr@eExjt*dw zzxb!O-?+KJXyy=xKw63{YzITXK_aHuu~;YqaaGAIptx9ogyw#HZ`}Wb?Vo0U`}1Dy zwT@lDv4B%8MD*mOZF3CdzpOh>F9Xwd_yQj-+lVLYE!*G1X*KSB%bs?_v;sppB!c~m zNoj^~w9tpw5a1uxe7=nloON~)4rdWp&dOAsp^JJyYuE3z@b^R(fPXU8#LB1!@?VqJ zK7oT#mBjy(fbG9y`d3OaAWihGr&%$yHdL4jg`})R@avOz8#o?jt*GyTS6o*^CUb26 zVyd`r1dMR7|Fz)|goTHGH8Hylu>K$LKU=h8K2)u($0gliksE>S2mGTQ$X*o&UL0)y z%VEp6!vqz-y&;$5NNgXZ9k4O9pyG}anz3awx55;1CvMpq=UiY9|OyDY6O8r0A5sOv#4QR$Xp1h7zF24Vz1ODe}XNd+rPN){+fBsd9 z359n9l9i(-R>c7SvlGK*>4wIUb9UN9H1o>+jeV5S zqkl-v0+PHMlHwoESHI0#-k_sxmC=KH4R{Fqtx0FZqTbZhOLmj1oR>d~9{b_Ql2AJSepev{S?k>K6F|_gHy+uHVT&HSd{m&Hf(pJ1*PcE=B1Cni5 zT04wn1&Kr7H?Ro>*#7CzK4d}&#qiJ69xjnAzt@bl*H7y~&+MWcpk#twiQt_zs|o2O zgqpzqmv4p7aVRs}i|>EMznew(DGm`MKDK{~)CRHVlRvU5w%`Z}nRH#}i6|?Fr-1E$ zf&T~gPyc~BN5wgj82{6@UokMv6FyM=qYfkJu%Bpuv4Fr&IK4~`QhmVx#0Y^l1^-WQ zY>t`#dUkfIX^V;lV?EXOj0r6r{DdZOAYs!i{a}$2-+!0tUy}XPFGECCGk7J?0sqA4 z`V@O3{xC^Z9hH#=x!rYXbh(26)xRLd^X(l$s(*cMXovoOg%TTwi$##khbPW>gyEl1 z7r;LnheK0k==y%%1Kj$wrajh{cHY`?Y1b(z-$3;+*N8_%lKoS=BZ}i$`lw*qZb|=eoMg1rXW23^#}x9N93L)N`rrbOp1}ng{-RX-OPjicM1N{(;>27T=bCczh7Az z{fGgUwtdYhK7d+hyVQiGxT%#=dYomU%v*w{{|Ds zufE>{Uw?n;`}MPL;OB>*-`@TZ^?Va`|9Jc2Ti5%(Z*Tg(c7N!5{^iq)cc0$&zI*uP zUHgZ3_j=ym{rtM&?dzsbO>f=^U%!6+3Vuv*na8xv!l=2PIuot@cc$~_rd)y_n&$6hyS(=2`0UD?QIa!b*A9z)J1%Cs?BlKEr5B_p$0wx~ zC&91egf$tt8@MU!G7}THNr~H&5|fg)B_$;!ZC|@RB_tk+_o)g+m@s)TXQ(uwr|~<$l)Y$Hg1pI%8lK`-LxfX z{zlsb$<9R-xV3*;oRV` zwAGQDRxRHewjw%Y+47_y&T{|6g#o-^-}Qgg?(!}7K+a?S)Pb-e#Mf$Vl6WsimXir4pf_*uhk>qSNU#n!GH9N+CQXlVpiRA_0CO>`w%LAVmG^=ugo~ z91J!jq|ptK5z&Hah$*`Et3%+gpy;}U31bl|!x|kz?wox)djbku)&ZgLvICCsu;>Dd zc8Az07eUdrK&l$Z;tNB9=t0p1M)QQhRy8ER8a;7G+r`g0mVtY{z0X8|U=!2bLKf9E z(dcNb(TY2wArca}`373ZLY7ee=cBE)w_T)FMz3y}aE{c`GIB13`z7)^q(Jor9_=qHCgA_^)= z6oV^5$)c}m)G0vp8v3}SZ4oqwPdAk|I`Cc6l}s0JY=BnQqA^7m7VU<@Z|}0yS3<() zk3Z22tl{*u1YQR^za3ibHQA|jeHs$Jc-psIpt8Ub`N5(c;(((25^FTi zyMXvF8{Fwm90zc;J^TJ{QMt#CNL~}R=mLwrrxwd2+|edgbQ`|*!&?;?O}cc^_fDok z@Hay^1?eN7W6SgeIqGtN=wH9W7kxJ@?<4_CP_zL?a~V+i zv>HS7)gvotMF!3jIU<4(TlB>i?Lg6$f#1Gr*-S!)y1tDG5@L;>Z%n;|I|*oLV9xeo zfrvsM9{lzm5Pk9u4Kx(b>g1D|`Z%I@jDgRQ8+kX*0s9=PmJ7~vl*1c$EJm23DdB&#*lyB#?75iOvg9jMUM#;KJ$)Y=o9v z3KMmP-=QnOF&EgGb&D^o-f|K?7(3*$0l){CzmzDt|6Yl}8yc;OFCfvd_CS`24l0r?+Wq6^umT!Aq0ws9Q`wkuuN)|I_$6`l1EWc~ zL2OZ>v-l$TT`@=haeUEtGg6rd)hMY=GzG}k1fL1~N87Ymdff8H^= zz<|YR9U(39RnrGWw+t42kK9o~Avbv1f$#7}hg;h5x4=CymS_Ylbf-jcynv#6T#F`QG^>}Ed}$7?+`*IimuXxtq!a1 zYwcyjSwrGzTmL0IJzoZ`@4j~m9Q|a2?V*T?Gn)CwxZl7Hi{LhH@5=C1cj8osx8aVq zrv&b4uCc2;>wg%{o0Ev4ZWQ#huemXp1PZc=d{kQ~ z4sY72wB6@7feVf1;AnHu7BplMQD|ckhUn?edpW2^qUcWiO(3xSEm){X)Sc;Tz>+%J z9v=fr#4oO#tbclWU4|6Vw`mXx++ficZ?xLCe(BYO{>E4Aw{p%uX=dQ|JZ_8p#=04E#M$N*;k_IUY{`)y-}Xd z>`2YT7|p`&Edx~Fc#P59=X7vrBf=WZ#~4jgF17|$;4^^-5d@XyQUGu@tu8Oyp{L(az=t#0j>e|?0?m!chLn#j`mSm0{(9(ra*-%&^N-U5*B5XC0RLP8 zM)My6>S%X|XRTg^pSH_N8SWd2qph}I(GV(-0gruu5BMiDkK3}F3XV33qyO0?3-K(x z(Gyp6Z(QH@(|Y^Kzl!dgHiMfQT+UdcQI;^i^?q8N0t;WXQ*OJOto@_2Gpp&bbHPvd zP6__8oWEZN2?esUP%#%>A3@2ZqK#kiMk~HcEW~z0GZ_eXv=tqfv-j3V{Kb#K(ZA?I z!1aQPOi04q&NG$bpZ!ZaK})ZVeS1%`=)3WPuoK5WDP-|0qM#;6Pd-w(0>?j`*CUlD z>1``nUq;teo738&pwS!@-Ejj7=y>2H8|B-%C6j|t>rpBGA%zy0v}7pz$4MP+Jx%Gw zbp*lhczF#B|GXO7^!Z^dDD=U_OqTTcin+Y%CiT&z|d$u%i8CT?U5W66AB9Y z{~66Qi&hTX>zvM0prMZ0G3*uc`j@#c2kPP_jh^3L5s@Ip+!taFl0`f1so-cEjFm@N zq7lbG2PQi+9z>9^Mu&Y16P4UMbns3H3%vbKvJT}n2<7JZk%PoRcP zQ$->zXta_#+6HULT+GoXarEmLyWb(eqHm$eoL%E>>T^qU;icQ*?{QnzWJUlp3&buZ zD6{k{|7HS_O?4~exF4LZ@UQeV6PS3TgH+KCxw?67A(1VY@OzN`p{x4!&E%hp)%_ok z#Q9jGCp20;T@U}2hRHLS7!_iSX5&Mvva2E0j-6XRpIAC^VYL+h+%2`g1izsq(Fpiw zQ#utAjX0yJ=>NdI6ES(&M0Mr>dsi5A!p<)k-&~&BqB?n&kHr?q^&|NiF}c0U|5wp< zUe1O_S&xCY@johDXbZ(3NB$)A9lX4N1B-T9UO1zv&#EN@{x_SB1gbcr*n%!XJ6{*+kzt+H3h`%5{FeSewN{l3My8S z1p}N14Y~WRtR7Vzf6(AV)7AK^=mP#J)pR2vOwol!-%c_(`t!z5J4@2H-U^PB(0Doj zuU}ypYo7ext-nGp$EHO|9sPH&kilkoL3jtlKd(6tMcgeO-ZBqHyrUaVE;>XD{O7 zj3zkR3RX0vIXU5ueo%BRF;YX!_R2SKA&P3;vv;W}p~E5kidzcvevFQsdp~KJnNy;u zN6Kh!x`=ExM$54zjh+^!^G+@HueaZ$Vh(@Pc^_|dSbmu{UCn%+q02VW+H%BQx8#5E z&&3HSr6Eh%Y~Eo%+d*Eonz85S4g`v>B+&?twmVwmB#l;WhbAu%J`*vEgfRS*v_FNS z-S<*`MXZpxOJ4`sR6YFq+BqR#K z{SDci_U_|>L}{bB^P%lYe*k8;OB=1m8XYKygpl=dp;D32>{9PDu;_y0AKOYe(Ayv_ zaP-gGZU1xR-(UCU7s}5BMjsua=q~gatkr%*$p>akByqG=dcaeyy?j=t5eIYhOC4=H zhu+imSt-QMx=NTnkUH7`|DZT3L89n>hemTvHqDBH)^c_cxspW}+|gfPY`(rA3>wX~ z7J&8f^11hI2WNUm|L8jio{*on|1nM@B2&wv#NxWBf&)$*ktp^dCBF~HKT3)6aNS#d zj^UH@ar}evKbu&|MDp`M3r1O=knlxp$+Nfzr3g8Aq7fYZ82@7+$Tl;aQU#cE%+ZfE zI>-?my;H#&Ax}X*u;`2PKkIus?pNMcp98Lg51pWzSjh8t3=SjX(G7~uGVbK?Fv}zz)@p}DA9W#f7E9AZX4BfN4 z)U4E~^fDtf)@YtWrpq$G*P$NDMWHzUDKz!n+gc7MYD%26(Ol|iTlMJBLQyoh0#Xe8 zoDd(wKQB&=j_i24QZ`-t*T=_yezl3q{xKSVrM7c-WWgb)^@GB49REB!{nV0x1Q{j@ zvxSD3qiuj=aXIesY&BCBzUbn-oj)^8<@U6QDhAsHnuV;?%>7FpGh{kf_0~}FM)Q*r zMc25jAdct-{XJv+k5yns2LaXZW}rx%|B>RKhmi#2YCHaSC&KZM)t=6e*!S*v$-~U| zWQ_!VmfhI)Bc#(Zx9Q-F z_;(?lt3D5&33tg<(X%=a#RskF4AY!I@Afh$7{r*k^B9#o*64}hpFmAUy-8Y;nFj9Y z#}kc|J`s<^rBQMGgEcxF3p~AkXsE6uq2IE<#iK)aM1EMOVyC_Kl#u#?RcgByKOz0r8 z`XTn>3u~TT-1J9deaK}M6I8QJ^Yl^$CunBC9Q|aagR$f0{Wonty(Ab;D^gci18lMc1V{V~NK8Xkzbd{k80KCcn;h&YB(Pv45dH>_+CnOFEksv_9nS~`9S3gn{ z`%N4l4TKoYB9Jq_GZqQJ(f03Azf@y_T7vlHl}!yzcbQI!PZYXHR5-_Ajpm;du!{oR zE_|{S|0tBGn6ma*lb#1+kFIP6xn6ZK#{YQjLpCNdIbd}~9ig{oQOyH9k>KDGDgL>k zlU1OIat*x7LVm}+ySuNOarQq)8}L6*LZ$meX_=YoS~eV5blIM$Z>=N} zIKTDEr_1Rg7Qz)>N*7!Uubf#+yGC?+A`1?Tj z_;>nl@NgBYm_++BSvdZ2sM0`eSKhR4cQ=ahvU^tdoPn@NA&mP>44?-rm#woJ8FE zN-w$`npf%cC6tt}h?X^MU&>6`#hCqa#5z^x_084m;bqa#1XprnzOiq0;_tW#&}jZN zH3}SUPfmxQR;u#Tb>ZAd*5p+&$|U$_G*KBLrf#pm1U}(BPX%ov#C4UUP?=nV2@rj_ zqaSy)9k98>t$@PMPCt# z1`X>PGG$d5h~M5)dURs20}`Q?ITW<;%(A)nja^e2Uq{uC6E_^Pc1n5#_hF-=z6Ady ztd|2v+v>V=;AjK>=NH)lyoanzWm5e)oc}SLF_Fm5)YOBkifv;^@^JOA%z| z5ymWS`94875-6UeiKv#qyVx8W9i}_N?-lxWxw5Gkz4{VQZoWH?oj4?1)Let}KdTAs zGZh*v3I7w$bV(LvVTneJ|5+_(c>9i)u^xlf-f5bvtYuMJ+Hcn*lSQR$$FJaF_cYYjegIeGEo^Dh>zYv1p}u@e(`#{mEE-F7IQB7>uUv8YPI|7=qt)VpDc zM$FN6Dm`0O*5VpZE+tGsl?nV$Gq*mg$bQAO-cXxP-ah+z(@}@C(P4+!xrU@bV}`o^ zFaEiSHF`e#E+PnkphhyZ@$E=Zm!^^^3MOzVMQUWnrbD*xf z<%G5WU;MM=k{|N>$<3pwd!kTzK1l|}&OotoW(v8A>iGAU1XzYlHw)>g>7>mQYz%#H zn27L2U%b&1Z*mI(w_@Dmbs|x|k z267qd_21i^1<7AT3|#->(d5^cL)+Y~EP6vJ3cYWX#&Elr8>>=5%(YoI1zAWP{naOh zhwo5r=8cC&2Q1Mj<$pl`TDN^Lr7jID610=Sg$M;9d5|7sR@|oRJ5N*-3SgL$jv9%@ z&b*X%b7l`lw%t*GByu|T-N4M=!Yf)ijyW9RreubE!Y$ni~OqB{R|Z*gK(wfXlFn~$&4cT@Nf`!yM@0qjNP>H zW)J^EL1FG$>H(;R%Fo%Q8~8x*!pk#zMpE^l!pAgUix&fuVVKIG2^_di zeydvOZxd(LTblqDULY1k!4G5k|Y@hpl> zHBx}qf4tF3>S)9GA2~P+ig7VW7HI$4cDOz$&U=RoL?n21S9%=_CVI?DBcYn%w0)nJ zv(UL!ZKF1~-Sf?-JrLG%-Q}(&i@P2_T)6J;xT#5~GjuLG#q^Ny;zLEL!6C(gZ={A*z3&#?{d%*m@?hJ@Ht+dT91W^R}H zaE1yXXtO&gQ5gQ|mwEoZsetd6r9iIV4UT>s|9m_LhXu^hzdvY|KXXMXpV}Qk1pObI$PZy~4!{Kp5w$Z@0Kklc!>LLG8L{?)IV|pN3vu?*lS2Q#Yt z{v0+WAUS;v=p77~%TRYRLCvR@OwBNWS2taMn7d0&F6QN*h45^i>5V!1N9uQ*tH*)3 ziij#4vBi<9i<9&X;84i-qEqR3qeDLUSCZ6xnCAtRn4^CiGPC1l{^fr<>ogD(^e3bKBh|m=qd86{1&D8r>`^rCo!#1@V#r7asJ0k z*)wOK-8`u9`-^`Jd~aOa^4;3$^Fpbc`X2BZO$%q?o3_837@7t= z1q$K*I{o#mT?9*L7_aoF^{>M4rHZpg&{qiu>D_!*6?IjY-(z023 z80svrL?h<^p$l9~EGS6S*JtowqOm>&^X*UtgnYUZ3Odi}Z-7{Y<5n)wLk=nZgtL?8 zX)@Ylz36NroJ?4Zpn)&O=@|brW6QUbE^yP#n6kFLF0DzHFp*Q-^^AyCKhgk!!mh}o zkjVo4^JR$s**SsJy!ZBZ`WN_{4k`b$ehJ3^;QpU?Uqo+P_t}PT^pPYQ@kT4%72aBw z0WH)L>7pH$Xv{8AM&A3R{-1@X1qEGcJXsVCiN>oxGcDn*6AZhSMR2a~eX;OX*;N%( zJT>8Ki0Ad>-!JR_P)^hJasBJ-bx%bW=W|ER3JP6cN2XU8n;#_){Ldxfvos5TBli>$ zw&()>AEH=$_`Onoefs{1CIVN{zqZXiE6~1{122$P8)CpT&5-u&mhF_d;>(# zb1-wjqFu;kkDsru{+e`p#$3)2*60uuN}Mbbo(Tv2ip5#W;HR0}l}$iaQN6Gv%5=|G zQK#|DG5u>vN&K&gnq`209-m8?1Wy5Jqr=zdaJ@+J&kjxV{`3M(*YMDw{~E2zTw1g( zqYmf7ds&ibbl%|PwI5~@rHRIYx8@?QGXe0A#L+L1VNG!q#XwLS*^j=8|JA08Fh{?` zueSL*@xMn$9`d|;F#T(9s~f+O@3eVsM-4z}MdD`B%UkEjf_sMZ^v};yI}hEzIR0_? zVC;XYG@xE}{zplo@!)gfU;ocZS-o(jgR8!iC60a!|GcDG2ukHtGoYsf<9{I0C={p% z!1@?0x=8ddX!N89Iw3)+Hg|9G*PLN{3LE&J!*hBcJ_YeF9=OdEywU3X7e z{-3A99-m_SfO)7DA%QFvDH-`x;rIt{bO>{oGtfoNRm$9>$AEN$EG@iuqkK)y#Oz=p|MH9pCn9qY*}$y z-<);Sw-Uso$$G>oB4N4Ya$EgKP`M;Q9v1D|1jZ8o561sY`>yfdqTLbSmQ5Jkf~Zp9L$qVG4$> z;hu;w=IAHK@EZ#3R@OIL#vdgi@c+P~%f1aV=)XoQW%E%!2<=VzgyKJ$=C5x8xgGVS z^zy&X@t1{)Ex%5y>*M~P>syrwIvi2^@fF5!>Xun8ynpH{8NIpLFQe%+#{b;72H0NW z|Jl0f)8omBhUb?;pVr?*;~QxFf0O^w)dj!~6z%%Tjw zFBVI4jsgGUR~{GVA(t;{bZEI?{d7(xZE4BaqRY~MH-F(Vc}hE#3H<6;S2?);)mLH@ zU2mgRcbLeP_QUFL%|S`|<78jK;pdOYwy`g57n&q0m z7xtmN)jvl|8X9o?lh$40CX_AfZtib41d=iS{RHGq5g=J*R>!$DZ>nECzS;L{ai>_@ zSKrrqVeit;MQt~(WBOOx)&*Zm$|sFOy>HsEMZ2*^^E8_qGASY^30riTcbKYNCxf&n zgZ^b(nP(Swn0sG2Lk?Q%(aDHs58k$rP{h5`MJN-@qGSFawlT;b+(!Sy4l^$Uk%XYp>hbC^Ph?kLo?Nr~yk1TB*!$g*&?Q|s`L zbmuAmUG)87ZP5uINI-zSVvPSu8E1;{MLY2SEV_1XYBy4cC!c?PH=bzp>bmR_s#I2F z)gJ>t4Z06&6Y%!!b^lTk2g5%Pk6li7{ghoFK44LnAl^IL&@E}Snm(cmgopD!Z;x$L zK9*=9NWMU$1Lps^GtSUG+J~Wn`+uIU&tfV_5{;oZ*uu}DXB+-iqIz;89Kq_sr64UE z%LW;wWHxs*Rr?GU3`pcCc*(N$u*vK!GzupKDz9wWXkqM?qlA`Bbtp8trH}`m zsWu&gh)gxzmAvA@bCGk4NQjvW<;U!0*SAb%t=bhq;i}GZMYT2IUMZxxCwj#Evm`b& zdYXRk?L>H^!Pq|xER28Qq~#W$uip88S{ zKg^Lw{}ODd1gbB_|M(v3X(_$BnNyqaesta;Fl2~|rY7qr4Sg?|SeUb5!Z^!-#}$RE zc@*a|4I3*YqT`81e9^AFo@ARFa-YzJ#{569e-+zR3S@3`iGAF?A{S+&G_vtZKfYyU zw~w~ojFw`{aL+t#o46-6mf>MuC1xvIgAuo=E{ld}hy7gATZpRjcdg~`>XB1HmSqFY zwnQ#8I@~t$E|)Y~ot@y{Acik>7z*LvInB#*%)19*NI1#}@5$W-LfdLx)^B zR!hpT$I0CTOVFSK^Olwi2Nqp0|MSC&kjtCX&9Oz7HBA}r9~VuIKFpqLr2Zw4$-1@U z(x4F$Iu~0Hz@pvED;wXdyF+|}iTj_S(P5dFwwC8To{O#P>|#65(q5no8;EKvGkR?T zJ!yG{p{*wg7PoVCQ`fdSta#WXvqu5xZnkkF%k47vZYl0;uk%r%L{@#K!l_ISh>r0c zg+u{#^GvdUx~Mx81dDdJ@x+5taaEUhdmLgWXg2b;f)R${>lB-W*YFA?_o@)wsm>#6hWgW zTmj&J)?p}zr^0#WZ=%uJchHq8`=9?AX>Zg;1v7rhnHyTcB5g>mh%$YnUv?P$44GtI zcsROK(MM{GyUQ+kFRBN*BJaXVL@}9B_1VVlHkf*pwhBT2x-m>NEKB4N$x{E1UqyzZ z0#edt==0ywbfJMIFR3yH=?hOt@WKBhV(8m;k0tG0}tw(n?v@y}AE zSE`Hn3pY^U6qJjYSfiD>fEQX}d`C&$j91LIC8CuOLr zeh5LMCt+=jUkV8+5Z8CFFTX0Y;Wcq}hX876Vsr!JkT-^Z-rd_B*b$e;-8oKXZTp$D zD7?`#`KAk3smwJK6i4G;*_3IV*O<=*Ix6WWg?FU>=PqSQqLHwa=wY=Ou1&knLX7`W z0snIZyGu+hw^nCo8TRD*D8RQ7Z#0KRm*ujC18)(+7JV`QPg~iHdqvOggZ~+}ktaIB zqo{;Ib^Xi;OP!^rhG5a<#N)awWnE@4V$}Pno1k6Aa3f;=pBERIHv1;NB9K=1oU+Ai zvB~0z++9Wl)PN@%wFDXyWmdO$2BfhSbwWfDnEyHI-V|y_O>El6RXQ^7g#J6dDxIwAuU34G~+&jUeHT4vE<=Tv)Utw|A@x=Rz5LD(yP|a1#;q zFCyc@>9VVv0ZC*2A3Yt?J2-W}mn9HM>)Y?79XpBnf4a_uheO{ji3*FpT=}xAob%v+ z2HUfw(IL3za`Nx5uPS-R&U82O%jWBl;VsB+t0+vB>R*>?a%Tujj2jX&x8je`auLZcm!Ug@H#E ziWR1}7G*zhZ&Jb<9Wrkkwq%k09~77G0=FAiS;M3pyugx1^WvIYNJkO#|8Pk~u&}XS zCiLr@rsQ`h*95lEl&4Q#+W|-Pyq)8wu4YuNNR!u}SMAW*35&jMWgB@9&NO5y=}nAZ z-@#97vecgdi@uLjn{fa0`nw;$zImFQFUmEYvGv);zV{(})B?xJ3WbKAXi1r!Liwhtdd%|M~gAhx_Uqiwfh2Sfl6i zg*J6Eio4BRtv*|*l3iuMy$bjzQCv&5n(5=WCC^!C=eAu2Re9BaJ(q64yV9fzsHUAsjQv zfAt%c%}hs~AA0*MHDH2ZbzL~tXl3zc$w(wbj(+|rw{E3s#8V$(Cu*F-IKI8%)y>c1=baxU z{Lk$*gwWTC+0!{s!2f(>F1*b~XXw{150~f#y6zpVH60 z;Oz-GGCQ4n-(NPj)GoA@`>dbsm8I+tMj(;;+72cEj;dL^Gj_sPXv8|63Y{jG<3BvP zFz3>3G!FDH25@&_h5qYpK9dvPRlc(ZDj$@l)49Pcoy3ETF z`;j(1{m1_-y0m#@=z<117Zuo%O2HZ(;sbMj(Q9Y08nRWV{!;oq1FFDB>b^_#uZ5Yf zG5-%VIalWnmSiD;aB9#KT`>8C6l?kijRj81sM$6^D)O)G$`q_Hm`le zPos!^%XHvNz4`~PNzDHhn6(1f}vSNXjsLp5>>O7=0eva;B$vY&(o85v(=`oZ55)9lonY-%dN~6 zc%!F+919v9jNHee@ylfVghZ65H|;EEU)y8nBw%l{9d|Tekr*#}S!yrz z)Q7X_quTp@Z`wKUMA)J);2)n0h2^uVuHl?%6dhq3`vDdR6d0vkL4|h?}w> znXsVI0oT7S*`t@zMk_NJW&71d_EDA91;vOh;eR51cS z&dXbp|2jHvcR;P{x>jXPTkrq;KX0EK=Bk-_9;U89p`P%ZQ&K2^UTRpG!$@GRMac%CS&<8%iRsC_Nxk5{*@WDJ?3*`5JS>vRj4t+ zpW+*Mta<>x3^3II|8r4bAH`Li!4W~cGVLDUF>x)UxO?>*=SyEjV~f5QQ2Vy#Iq;Z7 zl1B6TeSD9ryiBS8dHfGBA2KGtq)29o*FY0ihsh7=H@9oayRl*N$iBaH`k-sDn_o_lc!BDWmkC4 zRX6mWhzfB2XTSjSKRb)H#i*aEi)ENNUq>%k!J?g8CfqGd0XhFR#_jz6p5d#gv$^#H zSY^wmu_XETBXc1Ct{?Y+m@i%Q#q;m+KojDPR^b0x{D`a_)y^->E=ODm|FiW+v!N^R z2IybVXodNo$#5dZ^sgI}&m4v6o5;gd!b5l3ZiWV;>iY!8$eQT6`dJ1NxZz5^530b1 zK;UBeuh)rN(st32>kfTIK2HPk@0EGK==#@^t3O6t68RUu7;)A!YVNt+4~)QVdQ8r=Kql|^7%mE!}x`NWmoy)Jyxb*@BQ%b7ceaCo;yH5oqMe>jy?XS$uv%sc!W%v5=1OewEv=}`(w%^MV9^d5J@+Zq%aIU< ze_qermcg*{v&lB{NGBTx6@PBV^Iu4}c$Uga88rNRuAhHpBt@WTY=kWE{5$n%Rjr@9 zJoulDHR{6qD?EUlkL~HD(Y;xaH-DK55Ljo+kU)DQt}G08z*W{(nLJ&5y`fv0f4^Nx zMh~XgE*7rB8_nk#lIz`0jI&^2`LB;Z{&}Loku-XCOpq?Ra8)*aj3$Ksf<>3j2DMqr zx1K?5psZxkrOxO^;UL_dGKA zf6DYKh~VJa{9P8gkoP#uKXDja^!?U@wptwD6((EOi;f0vNO?bfGb=yj%ZvArBHGrs zxYKg^cKP%PaMx&sf?0Ih`q7RL52NUZpae=-_nj_uP8tGlGoJsF*Y3JcL|x@t6GWhY zDTjNqcC+_v)#!a-$};x4cvW-K>lrMp(ep#x9A%dMswd~S#79=j!3z=d|Fmnv?hYT% zf6aksGx2UoE=AI4zGlIRaUofT?#)>la;SOlWN0+U{Ld#Y2oBFuGc#*-S9bLPJp*qv zUr1J;yn1U$FoVDb4}hTv7LE8`d(=EcL5-D1WYArYi^Be4ZC~qc;?|Of32;q5{^yb( zz@9d^k%YBd8lvdTX=z=}XMoJp@2t+cx;mY*B@S=&%!6wZQI*Q0Qz+X2|6I7V_1CLc z7gqCu|H*&<%K3*_bRHV5oQ5Z;MGDVg`LE&Kt|eDw3v#tIasRW3S(Yw;NeyltXf#(g zV5G=?p>ZXRR%a_StTRiqmE#fkpU3<^#mZAQas<5D8M;9{G>Ge8Xna*w#ZlTWbF_c@ zy2UQpyQr+A!pk}8ZMiBaQuv$p^B;?C;*TYNKwmGru;x>Ac@0@i*PlLlwP*_o{LeFE zWEHzy1+ryhjaFfB-J)h2vs((RBI_kL);@cD^X^bc(omZD`DoVH(UQ?jb1eV9=&7YG zDIep1x>ICM6ir$8^yI3Co6`ySq6-Di2xV_u`_D`CuLs$=ah|CJuB6fP#p*}kVxP!DPqq42PK8|e59Ls-Y z9|L;KMXg*=i8WeTn-}K%+7LN&9tZHxtyN$3+7dWe{yhl%&l5zF|Y``YGTPwsQSUvg|#6R2Jrc<}d;*SBt*A{X^+f)`v#VWr@5@e9_K%H36m0pG0$9Z_V?HzA-R`)nUDMZZw ztZqA7M_U^LGg$up*;(6aK2@bVOWAhet;8*ei{;<@#?2Z)Y|Q@*3%MM(B3&~GiYt3G zD_PAg2!=&pXf!X9-v~eHI*Ob%t47!j-dRllnzHWep&KFnqqn}Re`ENy^ZS0s&O)bypnp9*qX{+$s-d#IhzE_H!$+bYVZ;fAZp!NG z#2U?=Mkc6%J@MSl`=)+kbb8gf44SG*P?gWh`Udd-%!&vB?-EQ=A}#x{!(aqz%GU=j zS?$E53&tkZ1==2sx6_OlRx`2*{G)SK!uUIB!0<{I?PjZSGpxgv^u~4fG;66LeZ0|p zJ~Avu9W9dnpUCJ65_gfjSxisXF02Rw&%bL5GGQoiEm&U}IR7KN7QW`K;?LRj4YmCW z+M7h)jbPePg#I5=j(v2&&T$(>S^7zRhdykr81p~Z>;MNDk3nT&{^x_1<1Vb;C!$Ik z&6~3~lq0shK5M7XqVC4IORAhXqT#xR>sJ$4$+HW^Ti|;xLxM$L-wki~Tlwu2HSbHl zJI-ni^x6GQg=z<2p zhSau@B_$dTv6EyKX%1QP1f9*I)+0Knmt9Nv>fSk)e}^Bh;7&0O@BiThoDblOe2z-| z&lA=V5T9B$tJp8LJ5q7J84K(G*%{SXrKQe}j!|x)6>2V+T}X)C`C`M`6E<_}NiE_a zi=d;`B@c{`23gv*pI@c(*41aPwQX1yq5aj>4C8jb9>EXGe|>m-dZBZqPp)m$z1(bn zg5du&ngjlEO_AR^fZB<4EdL%*RQ#!I+HB60^`!*(Kn36eVfuAv9CX_7`qndm%lvU* z2RXj#;p3~NeHNV`Apef_|8#+Oma966DuP9qqE&o0M-)Nhy~h?^0RLQ>5ARnGo2A{2 ziu0@HB#ZF=pBEaH%@KyXZ;!ip?McnSaUQbsEdrm^DqqZKQMEwg&z(60A>L>X{XeIsdomdF^0cPy?rqm(0RE9Sdj3D6&O98- z_I=~e*az9ylI&z(N@yy3_I(Xy%@VRVZLh(QHOdl+NU{r&B^6^0g+hojM5So7PJkNdK*L_{*`8grc?)8H{p2PSYD*q?K1b@d?=@vcv`X_wLWVudJ zuh8N1zvlKvqaWw;%Z0NWpMM{5#G^#dEl6}hojFC2VTItC`1bu^)MUyBP- zG0D%hE-?cB&)A0;@sWPN|M|aqlII2>=ei#PLYaax7?J<&k6vGS(4-n1IhAlYt-_Y_=NJwB7)A!?0Sw1F(%eC_8%Gt5&K=5R(TUT$|E?K*Y z#6;0s7|;`|!b=d1#Xua{Pwr6$WhO!l9d-WYp|b-*i}6fzI;kp`*ijgMa;fE4+3_oehSVg4M506}#1m5JqnsrwHtqW%#RqK6pQHVkb#R>#aA<-~JD( zf9_-nbOPil0s23uJ87wjb`@XAancW>1drXH`^k0~hd&q*$RXfff^O;44^E$6z4>qS zDrNHVcKx%Kg)q%&bL^I+2Q5OM?ug*LM z$x(2gk4a**m9d&^D4x#=;*t$4*Ug*n-7$V!-XXD@Biw)%*`ajYn-inGJXW#ASnC(Z zr%#Lg<^OzgsugJ$(%MJYaSSDTQuRMAy9*<%)Sd`Hw>BvC6l>?x9gu{`~4wrk-wAMWnB`^A#+lu%K>4NrLfj8N#wc9tQvWuTKjJK(#Sk>o1SQt9FURl`aPPMa~&k9#z?`4cO z1HqTt|M6mgL>GdX3x!ZfTz0vC`p)BG-FU41{fWssct1~hr0Db;5=S5@CCAzmC3*_| zpc5^XE#lTOg)ouN`{ej$$H=xmvH)?Blp`5qyV=r_2j5$MQoggu-6udSo&!@mtDpy%dPTa_hD_z zL3UQm+_S^wzLs|)!r@V>n`-Ncp#2|30{=*n8wNw5`oEz6|+AJL;ZGe2mu zIfSn2BXuBR!h!yo@6gWSJ>V(7?T}V>nGd(^|5`s4zW(QT_y&_DmFUTT7^eZq_>{R$ z+H#)#0XId^`ln6q;=Plj*P99s);Pa;Jh*w_&mQ5gc)n4a$AuGOQ2(69DDMGPE;|2u z9Q_!r|1nH-(CY1s!&?>q%29aLe0s_ZtPZ|hQg%}f7Z-XJcB!j6yK;!CEi(%uX#WRG z&`Gr(xs2o*-@8NY|6Fi?J0dF0{WsC21pHr;Z80W^9L8w|zSH`MA z|1-GR_xa5cT&617SwCgs){90z1yWbret_XY2uN%h)&KRfkXUi%4!_cU&`7uAL#@gm zT^vl1wUfb(l;o3gR?scE#Y0@IcV`~Iw`<}SW}E-hqs`s*nx3%je}8=VH?1L*aj_tg z`k(#>_=mwoq|R<`1oS`Mwp9Q78Z!TaaTrZ>0soh(Bj`Ve?C)(13zzC9BIII}Xb%1_ z(5iNn4Rhhq@9hJ-FzBDfO1}k6R7W(PEYR9NSUGOh==b}&!C-ck*-I;(WBR86eLU$m zdW_h&;sQyCG|-73MiX6@rb=x22WNeJ@H(OXW-{Xs!z^NH>Frl8~TgWb*u-?p!%PuyGU2`EfL{j>km>yrLJJ4`)keZwGSwAvYff4}0M>^xwX!3V#Gkz4&(BOlrSLl^@gSEUK+ zAK)M2GL_?P##df}ZTJ!A?}?T7@B!-TkQR8@CG4giMcR(n?2puQb~KX?V-T zi-fGs2(rH)x%D^EWn`;%9vn4F*B)Hhdfjx3_>Ri|`MUZL#D&a`i5bNXE8HSDlz({m zBlzEk1xK$Nq)b#h-)V#bvvO4ISPCU~-Y^B0*7D0?NWpo?heP$x>F`=kziGSpachS= zydSF|649T#^7L#&DcnOWOU#R{Ds;1J@+*VO8!m3&Or-rhfXy_!Lv_l7O+=`7|KQg&}1MGoG_zK}9O{qK*k_RXhft%9x-eV z^gkTkx%yr(xB4&9uAK>qVdmDj1-CKao!}tR4hQv*NmkA4)(EW9@%D4yKTV-RzeiCu zGru<-hj|N&vs{~=>tp}F{(*v7J28n|%|rFSFD?(v$QgR0ukcjPbW?zT=)Hsw(Eq`u z_$joVPvNUE0R8hUjYyq4j>y9OnpjEwCL?W|u-p>V{?80kCM>^pA3a`K_+Pglz<$XL z#Zrc@J;@6;IwQPlk_Vk`@7`0B0_NNN!%k_A(GaNTM8ufVxWp5f2p`6(T)_XenFtOC zBG4fJ>Yvm9(czL1Ec^ug!^0PFF#kg9pMU@=lGL*gXJG;q0dGFs~#IZ<5vVV)@PE z3%~yS?#et)<^MdmiGk6VlFIFiq)aN&{0-(`=@DVzAFjE!^`_1)xie~js{iqPdhUA+1HY%C&Vw%qf)dSdnWv~T6zjp~nuzM3QU80-W>HcG zi(MG>e*}<1Nc2VdKcSwEzm{9W>}!rai2gj?dY<1BtUkYHLP_H1fYXFO_WK^c(dgp~ z_~X<>-=Jw1SPyGGJu+6*`PaV(O7UBlA9R(t{@i0V(Gsecjb;D7JjOqItTE9+{#YZ0 zz@A=HK=LG&3#X!G}rA%FQl9Meqx zi1K!#T?xC&(#SPh1WmNtql-bP_0P@dE#@ucro@yEXvX81)?@<5nbW)e>YpvE&V8SM z_^M^J;6c=qZd-VKnYXb5Vgux;yRQq;Uzi8=k9mGHjT@K#fM6k&|BN8?P2ywPWM}Zd z+uphE+7$khIo%$2rzP|r19Lj?pVvliHTiMYI5ql_od@|WS)l)OzeA6j=<;=WylNo- zgjsnzmMt57V~GzgZceP^d-bJCA-SxC?!AHfh#$&-emAUIlqQBL+V+1r?4AdOF5)>@ z_!J?5B$-qMT1Fxd1Im9E{^!4t|T|4_mhj(3Gy<4Jr9I2E6He@w@cIh4tt{_+oFf^8O`zEZG%-4^+E3C@Vq z){o6sV1H>2PIU#Dag$tz`@a8|=&KGNX#{Nt zP+Sg$ul$+nvW5C*ZQ&Y^v7Uta^gZ7MIxG}YzrKHj6HXf;&uBEFF7sc>iaB0h51;+gW#}UzRSHGb*f^Gf}B)WV)!3WS<27>g< z$LF^A-gGVe2rbf%m*Q8o>mO-66r<=y^NkU$fApHsCZ}4!*65!(e=7eFuL!Lra~ByS zJoJBN^MOL%v1Z0xy6hkeTd4ABHf@%|zx*GVf8~jIm@GbZDK{x@J1h6*X+BhM4fZ7P z?*IE^ZNK-=UL!gJB>H~+{AN!Plj_K2AvDofH=adMN*|#y%1{wO?5Yz<#<{fYhS~^) z6ZFrb;_!Q@`X5Y4hfYr_vykzhDYcifuODBip`i0G;6Ljnu?1m(e~4@S)1&ro_2wZR zTNY892zySknncz^YD7k~{^_}o0DlDfIg~~!{|AS@xhMs0VKmXV=N?mtdq-s(tgIkz z_!a~XY@JMH3f6J2ECkkBRxHU;1VrrVR$q@_;HH0E6=LjMO6?HG1IDSZG=F&)wi8c1}pS#x@r zyuZrcDBq@{jo0D^=%1g&K8UR1E`&tiww^fSt?cI3;y-h(DMwidp#KpT@qD_I6QL%$ z2-GLYy?SJZAes$S{!bxp-YkR7gDrlIhK>OKFVl1fQP>S2h6zn{iTXr?h{<#K1iyKV zzeV}+XWRdt!mmUl(i(=S4gEeZ-48zuj{wz4dRzbe@->Kh`Ja^=0F@5-KOc3R7w$ae zDg24nKSv&ORi1v+UQCmS9oB{qP>8Mwoc?kZ6wv>0iruzrCb_lCSO~SaRLa^fXPLac zV^ZNc`-|6N=B{}*r5*UsCre#kw^xveN8f?Y7x>T2b3GNO^`-vmpG~D*i=&_s^uI(q z0#+}AsM-B?Fi$P%87nlH;f*1(H)Ig))tOdaT|{FDmbGtYBhep~-Mug`F_{E8^X{V)FzJ8yRDs?M_uST7v>?=COuA`2g=qcLbRO<`r-~?OqKmQz6vdI7ODlX*xXXK9?k(1#6A#T1 zv_`Dx*nM#sHouvrfAN2CsfTi<if<3@KoXnNBHZR%5A&wws<*BhMMSd z$Wc9b&boyoz4hD@MIZIQ1OHIrK2nGBpZPw6g|O`Nn?ry7Up|9kNWXT9I@Z!0^v@H- z2&gN~@B{`(w387wYkqX`V-AeQ$}HE$_UaHhuh^DV`yq9_mM;wpAn5Q~4duK(oq3^C0MVTYpeSY&e>iV|+8L-jxJyUU) z+W1iZGh^N2?2(H<*MHx;bm;<`=nMMic!N|eJX-%8`Ewh#j2P0rn^gZ7=zlc!jyK0m z>5_r};nWHki;P|JJo|HlYk@l=;E6GU;lf??!hbEjOjiKi0Y}=tnPyk zqL(UrHm|Xy|MkC5f8)c|c?}ls-t~Nc@U$*@>M43H%!ixxk~xfkp#JH55!(r>;1a#l z5ZQaj{Q6Gv)c^eNpM_<43%|bK74&6k#V%(9eC443IZP`!S?6s1|NO%`_jvI!CQi)A z-$dUK&+fro0pA;RFaiZpB@?QD=F+AYU}5&wMv9+cQT@;Dz4zksq+tFH{liN2L%2f`T0oiAuQ9P2#xT6KI~eWR{V`O`{z@t^K*@o*vYQe>wm$2x$IA`&J8!>R2rS8JR)#KgE`mFY(4q2(SAM*c3Ww zc$9CXx`XjHffX4$BZE2_f2@&@AXaFiFXz>AWCZxn2yA^lf)CVk^vWL4f}pHppkP_N`Ty#}drKs@wS&!2gkkxES}4gWxrK2H!0^KZLr4>VN-F|13%}e%a!$_MOpEKZ$#wowu zCj|5m9)^(^V*#bmX9)B^iC6^uU%u|QJYj&0XiyVLHz@Xc3%tkVZr~^6-TT5&&&@#+@Qhu1Ag}7k02DnuKAzcsQ#z*^d;ChVQ|!c z?LhRO3+2$athi2O{O7{YujFPVWY1#;&YX~V7Di>>LYRNe*Yn7E$h$7?)Vjb_FRJKT zuTCCqG$yi!kVyreS@d)S;6Kl4R_x8LbWz#|L7oGzDr{d1JV6L*Wfq8-z(p?!8UkAX z>^zi={Pn;8+DFHX*i||7BVJ~=RJC&73xw(dTT0vtAs`|((`fy(Tf`l57;vskLlqlJ zH#K*ye)+xmvev#mdUGh06o)3-CH0X@_=iE1o(248R*`!r;XZ@!eKGL*;kjRbrcT9E z^*=7ae}45H-Bj~PaR-}MXWQhU4_o6R1OBgF2aZ={R+_+q#Iqe@(QqYKx{%^_vrC%i zt4A)s#X7dGEt0ak;^p|NEw0Ow&u$4nmvX8P$$oHx%|V)zQKld1fA7!|0(W)oT#jNs z1sBSkKL#N%7|yo4eOjmK0RGQ`Sod!$SuDtQ{sonPNTaQ+ON(4*%mQ|r2y^s3qxxq-wL5{(pYHm`IE?ZS z^-R8-y0)0Zk0HzbG9o3vwLtFKm}~;M6}%Zq<4CW?Xk& z|1dJhT7qN0kmAK>T|8=AkpSGEeuPm^VH?Qi~T`x{Gf@x9-Yxp|E%w@mxTI9FKWi`g+k~&>Gw3zmw|g=x{U2YJ5*PA-!Tm5s$JVflkRX%siWgGm zX%r&gq3aR;FK+D$8i=hD5@$yCDK~`43TYT}gZ_Dkb{8#({reH+nV!9;Ut6fqUq{$=ie~`dJ+h#|GDsUZD%(TsS`#r)x;5R0*3IlvZf>nlo%elaFK1o*Wq*Fp%A13@|K(rUD7n2jxr3(=&A*_3B{K#m z9MoEa_Z425)ORPblqGW&J~$&sW_GN-d*X4uybejPrH;yfR>v*SmxMXXzx~KXIp&^-sbDi%DAr_5XUcDQH5GEDHLc(OVgIk()U?xP{BDOASjSV1qc3zmrTJXyUF7@es@w zH&vg2hTm8%n&>-in6G6YKsnw&Q*I~ z!OW>J2|6d@M81fnG~0^6{Oc3kaBU|W%nk((KINi6(|yb?ImJjI&Zd--a^Y;DIo!+B3l zm{bHrruCTcD@S-expmNy#i7ii+8v{}!M9u4t;yY)6wa?CtRPvPmD2NmtR}>P)eI4( zb=3$-?RdB~q73ILqqBvIC*3@K5`S~MGln7<@qU=$xO-QQ4-G=~f2|y&69q$lCQ~e; zjn+RK>koGkGd0A4XFV#)x>M7fn6=kdG>!*AnvTtEA_G|D6~>9_O7~DEWGIAGgwm$( zIuH>6wnwOc&U}XaixYdmhm1eg^n#8fT{|ZJZFlJX;MY<()?HfNC3~*{F67hV<|WRB zYG=F_B>JNIABjUJx1S5Df1V1viawY9q&TR5Zsd6;al>;d1pZHeiM^t~rfWV$Y*C1p?Ih8E6jF417jSlZ%qSB|=^KaRck%|@ zHs2}?DDGme=BqNqw+D>s((xY*y=ZB)HLektFH>MD6lz-@3 zl}IBJhh?Bg=8E+?4EO8{7M?;~5Ws&fP>LQKm_57G}cwn-|^D4FAhbuX0fb!bg)v9TtV zgnE@))Q`ke$5+{k-{LlJ@u8_^lIQYekWS}eE z$Iod9Eul4fgiM`(vCd&`sIm&%AtI>%E31O3o`Xi4o&}+V)OYqg-FXlggyyJnz& zKJo@i>2;_nl9V2E%DcTAA-`<;U?++Dg8~1$m3xUro%^^AQ;KMn5gz!5iPAP@PVbdz zy2i0v?1c&<7TE{)W3-=K74asp@ULdU`)ru;{MG^0g)>{C#`p>le~-j4{5JNW!ODps zAWV$e&6X4jTmU8x9Rvz@8f_TjkbARV@x}9tAQUS9 zM^H$%&?)gb9gQ&V?3WfnA`o6Qsr*CDFw}AJ`)t(bo2?o*7d~~Wq4f`%e*yll0IP}n zZuzMG`Bih!rC;j`uOx85Kio=tlCKf^Se|VCNXSysX|ZS_fOzR#q-h1A_(d&?10cyM zWS29`c4ZdVDS?$urf%oKoZJPPH!k7Ljg?c$^sH{tkm!8SU|KhZvleP;8P^~4b}b>P z+6c#$MLRw@)nFCRZ?T*uwyDC@{EGzJ$!cXW>zGsl1o%H$#`6Q5s;~MJwTmzW%bQ0Z z5nARyjqvj$`>c=kDT*{7(Lv;UX>Rlm@eu`90O{>rojHUG3)6UwJ4RkEOY+qd`TkGqo| z*kjW_zQk*hWsmN1kO2MjndkAf2b3*yK>vK{uEwLY{hij8rM0bEZgozP*DVU{6(T3= zKUVfsazg&an%|ZuJ?rka@D{D0QQg}@wenC%UAs$0pTa&QBQ!aYiJ;mMb5L?2i`6id z2XUVq@rbvbv``rko97Wk`9EV$F|7Mk6b-M87GXM!vm{)3kfONjxu7RZY>gE&qDQFw z=NG<~%4P^Fs(&6WmbF7VUSt7T6u$rdII91>o*}GRNm$y7@~0;S1V!8YpI2!ACrHQt zRn!hHVbK4qj;B;mTL!FBt^yz;nd?qU8D|>y>QA$YmvdCSAr1T=$JqF1_Y|EjQ&M|v z;qnssaf$48DA-ooA&Ua}Uk{=roSLF47HZfW%1rP?_GUYlEdi~!b@JrA(8`&;{LF|M zqe!eZ^}Y$~nSq|UIoJoTTy}g2=$|cb(!V+%TvC^IPLu}KKV#@(?gZ`9RxP=o#90EF z7DVQBD?N;MceauQVy4YWhxDfIJP`J&e60fm{&&bfZICF;!XsqVI|TlB-{-eAjUZEK z$#n|Mg)@6_(Em{g_$Gtqe+fVI4sxk5#@~Vb)20BpQ?><>7QV|l-K}U(;zsM`#sJYC z*>`-`q@e%P@I0PvR?{+@Lg}%^PPCr=F#>lnwm0o|X%yw)Pe0bYObgsAs;Vvc<&E&_ zr=!M<`P)X?_KlzQC>2sP++s%R7_`ag9lKgcgyvatqU~HF$J){gq{jpQkWssnK1>Dp zn_W`92=ISO<2Xu!Ua9*{H@YQpN-$uN`M#8wBCww$jQXxR8S%4N|7siYO?9lgv zKpGJa7I02MwEn5ZbkpvZNG%f4H)3Vx5rH2cefaf}@5>vDDZZDva|FNcM>tgJsQTw^ z|9cGWn!r9hilv15=K>QaC?8k;jE5UAzkYn?|LY&q>vaAmukPDqQd%xx)ue_DLD1d@Uhlf!VN7IoBN9Q~~ep&!igqKMJJj3R`CcBrq64Fv4JzF?uJ+B}Yj`N(N7X z8z<f{eA?5>R>FpR%O&4y*l);s`>NHbKf`DXY(POnil!SDT3yo zLjJ|Z0w$4>ea~Takf(3gKfKA6z&|Vs*!TlEpP+yKtN(dxlvdQ}FBebcKfCDaeqYJ2 zG{gtKh5{^8qp+CrFI{q(b!EsRDUy`AuR_}W-AM6^MeOjH`BvSSo4sSz3)}^_wWnQ| z?&e52WmD!x8>{%_ovLw>P%5q}Mv*=>R4p2F3qQi+#%=hWcFUXNSr=mLJ_>0?w8@h5 zRN|~#Lsa+mYh)XA)r7PNsPxlVB!Cr>P5H}G+9(}@=6@+HjFsCe8Uj=`fd+Za)ZoXh zx71II^rY=M@(k)jf5-j|tZpnO78oAT+C(~1qh}xF%c(BJ=R~(s8VANLPCSHh6MhP_ zfXNg^(E8^@^gp=YKatzEsGAh#UuNYT#Z}QiT~}0^hrv_Ar}Urx`N(|h?V$XByitnhduOBrxN-#5Vu1FWvVDpdFBBooX4tf2PB%eRxh|*_E*RXZ952E5IZc@=wb{ zw^%^`OwGUWHolT~D@ZQ>k`M5IJWejJE4WDEfdA8N`yodow0S3)Q;Wx@GVG`=**dw5 zt2*2=)EUs-*3$0Fb<5DzV=uL6nvC}WAwY#uf&1Aox|ElS=6Kn9&&8zLGK*}=+fuqJ z2W5}wGiPrRFDc8pEpxn_CSFnzw(NjUu?ZHWf?PA)BG#D{KR6#dTEU1|(ITOh z(CUSf1m<7rL2?t-1zdVG+NS{k6Z*o?uq)ddf&8y$1aL5M7$F+-pG-3Jo-rk9=*D*E za)8Wn(>M6l<8w=p@8=72?r4CsoMtTQ65J~TiX;x2G$G9`L{kLOt^@uL{hG{e+Yl8D~CpmD?7eg%1%Iwwuq^ucR?`G>+rqxTK+iDlIMcMDi` zyQFZSf9}ykAK7D7a-IpVO?8-)EqN-r%%(cr-PVAl$H%7?xg1qrNqomC=k{s7A#ObB z`EC9t;*O8w6))>Ns)4R;*ixr_yeR;B(C~a6I*C}8J1M+{gE;{euiD{1hSPAxdS@)) z@GDPAtc#U{76@xK@t|aEEECcaKG+gj+029h|A+su&d^@f2^fBK2zAQ@Pa9qlxk^v? z{&51}1UH?2v+kjI4aZ?fDTE3st7v!X^b#_2NR+4y8r~PCLAC^4Yg7swJGyS`qCP9I69? zmiDJk`g+u9Ig`FJO(=R)L`DK@j=jq5(^!SqBGd4S1^T)9V24lRT#nz6y(G8Al4MJ+ zM%-WDXvK&*Tb#W|1N_5JQ_}qzD$KhCxnNSnQS$Ucj>u7@+V}DCA%6a&`^;Ks;D$hV zqFWi^@}kj39Pe-jl)N8!eE!AUiX6?HsD7f_pB`E?T?OSoU$E*Of=D^2e>AyJhB55q z3tuFKEml6kaQ2-P%FCpD`f#rYl#fGsn;UHGQ2$(^@muC0@ZtXEzdKaJ-u{624%Pqk zCHz|70WA>d|MX;<2+dgGm!1dlR0+L?YKKxo{5YfOUH~%!I+onF?rJgehud-j5BlrW z$sRqXlgggm2|K*p4V)_Z#j5M@){N`{V_P{|_4E5v1p<#Z?IVxUnF_4BHwnYm<rOV_&j zj?L1c#$_!KL*bQ&@W223@BX6xuMC+jsq?R=St~C0&G3+ak>ZBi z`?pA6HDEJ)M>tE0J8G*CnI9QRvQ4sAj`|ck{@go>_pV1(+_-1A42!s66H)M9(1Qs+ zm)_Lxm5WcHi_nDUlygXRsc9 z5#|~3gJSat@SiczrL5Ui!q1AJ{AU92pS_&q3{KK;a;X|#5v5Io(It|!qfI!N&UwuhE56-Cq|M|mL{X9-kxnhgOz^DZL z=g5Il)U!`<>RDJWfCl7zUe$(@a2b4Vxrb6F0$K2zt}8K+e-4P?Ecx4}G_cs=ljVBE}r2w%&?pNJ;*$xsR>6vt&)xH<-(obzv z(eiG2<$HS1k#Ked7tcIC^VoGO>r>|`jP^%|8ksp|olpigW;OHM@2UA0O>|)C0wQ9m z18D3v`G;M>Be33-86!c9fY5`xkkj&}rXxD2=LY{50PkIUFSG)aq~K6As{eUEe67eI z)IQ%){d3%}b#q?f>879s{|M6?8un?Rf3C$kwuII11pSZnxaLv?5Vz^f3fkQ2(5GN= zUfJoo)7nnHM^IceCGy4@r1IbO?3>wB)x#pF>ZJPV-WTtxmnwhy0x;j+KH;nP>`Cmb zyq$EdE}==DTw#~urW{!(1bw7(K3Ke3WUSzGTTLhO$vP`-;%H4sb3;FZahF2q^##su z9i(>$|JR{=AqzS51Xh)q{zOf&C4>O}uVjvJ;xzAWq7lL&YB(g|f1)=LGIY#15Y^?W zR}-r@<&fYvZ9dw!3J#+(OIi%%Up!>Lo+T(sXP$r+ANoYizv$V|1QgUh0Pk7yV@GYYLuS7%TDMxe*MEQqIQUnn_Ln0Fc!bU)JVmbEfdOqUNqFFHIytsC!2PkXW5I^rFlzeUY zMr-sE8|0s!4UcEj7=Y$_5$Q53%Ks_Q8W!3W@#*)b@8<$d7|e}_Ut&b_zb;@%LkM7EDWZ$3Ld0{?+x^)oa#M$ zi90gw2l*PV^`CqAel8_Yz*2=#Xrw`KR_5H&4P06Zp^SHj)-I zH759d!PP`L*H-vUQoFf7&O8B$MxD*e=kV;7C+lrrLPCW{SHfClPZex|-RC&v5{6_N zyp7L4gb0$H2Ja?!mFu1F1sc4coC`8Y_>^0Jtoj_UoME+tqyvda{|c_8QjrfLD!vl? z6y+o8N9sv0_$^C9S_XPZwgQ#~S_VjaOXR>VbRv9Xc|u{34gvlT=)U`Dx%3U47u1)T z1jQJ^|Mm1&KPSQ+Ylc{D_VY3z`;5Y);2hC=p4zhMySg=fxkH0QmhU&o;|68;wL`|IC=M9y`qqBCU~r|&V9G- z(u7mavHMp?4IHZYRlWO?>l^H=1?rDocgM1vbmA_wfbR}UJ$7O94ylh&ZmSAgQop-* zYJRxlU}w#qL@497b_zs(>`m;33k2kUneT!+d7|1aW=WS8?f=knh|_Ab>PjHW{BSQv zIG&eta?$D!@$*1YT($9ebK~O1a!>fnXE$0eu9~!OIsz66@-Mo4N4it729pJnvzlQUyFvs=m}E@+C={_>J1ovG9fO(rkNmW_VWZJhc5G|5S27OA={}zyhuT z{9nNTc_CFEG^$Eoqw1gWRu{Vxu$DHZrfYUzq@KN!!r4~L11#jD6wYPswiYAnZs>5y ze6T4Cc9b-EFxj+1XA0R9?EzT1ldN~^z50Kwo3Hab-}N3~;!mI-oK2UbQauPcKdRBl(^DjW2zODZmdoLL@qDVf=ydU!4@wyi` z?=YuN)VQu`w@F&K!asiC`Yey4YpWf8dj3NM+m=9^6P}`7S*m!t%&2LS-m8x}r_Q4* za81mxI#L*FeV$}FKS~|CrsOlIrF3uHG|z9eJwwtoVTiwByf1KE)i6~+ z2|ibPZikPYg>K6Mu;EF!D7v>13`lW8mepY`q#zRQA+1uE$N%(z{s(|Wl1APh5zM>P z4O0^d2;uOdUjh^TPA{}+IFMAL)|6P6xlPbuAxIQ9&_~Nz)0(e76ncV4B(Z9YdzJM*! z>ekl!cok5l9$s8~-5R%HxeRTF@A}*PXB^W04}B5+Fyx;;Ei`b6vS|#uNol9@4{^|e z9AvbEfJj5pA3Ks*5 zWtm%B)C#mT)1@QYo^_Hkh12hZp*tV*qDANRq?PXT9{r2+-+9`8=%Rg3y0 z(6H2hRnk_!~JP4JA*lSkq#(L(}CF1?AIRXs_Lb#9$Fa!ii}lr4CQer4&Y z$5QD0E5=^Yq16P>A!Ww9`tq&`%)f{lUJNXp4h`NObOi8!U1q)P4fby60yy^5iPFr+ zFd#}NRJ9>lniV+^8qgy46f)6|MBo2)Aj@yGc=ONfE;{P`t60@JoCeMRs%q=Hmo4Y$ zxJBjv$WI-;p|S1%YKm;^9=D;f1pW`?U!kCEbiu7gciik$>Q+)Ft%kDj-@7Tll3pg{jb4hM&uMfIW!5I8A04u z{lX8`g}!Slw!nY>>N9*(E>rQ4v#R&Y^SeL#R5>YmHsN+G@CNdi8a0h|+Ps4nlf`@Y z;<)%rhYQW&AV`V76uo7#g>=nwHDkg5&a`Gkin?UITj5wT4@UcnCx%(VyI;WIZ9lKd zNI#)mn}%SF9Q8(+bm<7D*SKp_Ljg;G6-U^*GgL~9IOXxj^3HE49)EcL*ProxOc9!Y zQL?J72Kj(hPBrsW15Kx@&a9H!&L6!m?~MOER}sJQtiYxD^9(u9^soPG*7S_cKH#MR z|FEUb1D6TAy#qEK=3i@m%V60Crij{tHLs$OBCcpn9~PYuy68G|8Ci2Badl9tO&G%i zBAw&hqh)ZU5_~x2`zLsX#G=j0fB_#9C!AdiB~NR%7MX z^a{d=JC*;#F_1gOiRrMfc3uml=D*iwG4xEq@K3nWe;n2Sp!r|M)^Y%vNB!@sJ>1E= znyv73>{9Yh*#|-^DDo#$eQh=R^C@P%FFpv>hC}DWqm-=xjw&bE$S?ECL*=@*Qwlb6 zubzyj7I%i$SA1HbYMdC_I6c=HttS2~aBFI9VIWJ~G=Wu6$*U>#`?BN^_g(h~v5{eR zryLa`;cxsvtmUh7!jCN&qYKF#{O^{$s}oANpWi?CW$xXjZ8s&0=6@CG*iPs=zqs@C z`fbG$!`V^Y1lB8B26c&8-S2 zQng|9f}s7MYpUjLjL$^=>;Itq9}{{5g=6A4YW@WuyF}oO6?o*=6B5?I&^P1;6^t9UD(*TPv|&m|QIE21k|P|bl=@V)nwtSb)pKYdd@ zxmB58A3S?ZIB@dmN%+SH3I@*%L~WWR3_0HQx}QA7)yp!W?9>@R{?N@fQR{}M*gY?; z%271M-#;pQtrC{#eBWv2G80!x@W0*|bR3^LSD2zzh)zr>Amh$Eu09HPeR2tnHio(A3z>K|_3r-wq3 zzx9t^65sxFJeEa%aJ+~9;(`2A3v+PgVX6He?KbeE$>O}8%un&K)kxw_t#xRXeRPGE zjC%#`onY?5W1phxy?Etw>pbF;1l6QqPxX2tAh#9X&Z??*@~Mnt&Kbe*lTW`~k6a%L zbbdcFa5dX-f@Oce@ouLnE(TKpIb2h0J-UksJ9W4heuu{3@ZhCU1P&!LI)b>8qJiF4Tf?EhP*0?u2)OzeJ+*xD9_?^9pBRMF zrhi)Me7@OMUV~3Wp313&=Dc>4|NQ-xe2Du=;xz@^1P#Fk?_Rv zrp&!5Y8>uxTv`qto@C9mKHFBO#hv7{zuQK^I~PoDO*?p?FtGfQ2Zj|2{XSSKY4o4&p>bJTcOyfoGSe(ep(lO^T*r@eLV*0<;gT*&gPyXn&C z{A=?gW82Hch#^=`= zQT|Wbo}-s90GtglrWGXyZQr;6DAZYq=AXXW=>8CQ^k4j!#iutx|8wY<=|~a$)DwR$ z{QAD&i}G@W|NQ<<+Z`vJ3Eo5*vT-@b^SdErPjXUXnN4fhI~(ZYVg-~VnkabBGC8N3 zW1YleAwxJCj`v^wZX#t1FJ3)w|JGohtuV^}q4_-(D5G|AAV=Y2seImD@BTnRIYa0= z9j{BMXLev=uXe8+gC6OnY%PcP>SW_n#{9Kmb(9YH{$}o;$U*C$19r{972EzVciZVk zb5e=Z+8&#cMr*lv->^GCEOUQW{qt(0Z#0GQzx-2+ptj*cor{!?%L?b!sru(fyVC&p z`Y0Os&odi)>}Kk8NWeb~Uxe3d8tG!uOf3Og+hd}x5)zI;U6m_O;B+@xYu zbu5;9;?$(FS1(ai?<6bK0t1Mwi+YB$THMpOlED8dRyYg%XX0e_l9rb^mdAAy~AZ4+K>VHS; zpRP+-T3LfM9^t-p;Q#z9z^xu!jOJfxhJ#n9eLqR<`o2OtGO34jZ>OcC>*6!GHNc{C zTXu&|el6cY2lTF?ZU$d+ce3cmMLDRc2eK4gdaIqkpZo&;_tn818Qhz-x-XLHdAP6d zo^E=MSLG?49GrV)b7zQ8)w}NUcU1<{TxEwXh9OqRw*wG_R0<;TQ}C78~-^AV0Kv$uOeU z6R<1`Yk7tmilKqop$@CJO0~=#24TCThH_P0`Z9i+Loa$}MYkyg{_|vm@ zAX6OlKMP53fn;D=eWAX=XOLSGmvdpiSMSh-s!vna&o9}Z1mwL*$@Q`frcO0(Z8G&+ zp6`rz%i*p>j8~RcaTRV<48V*73;mzRo=LXxWsv`crLD&wK9b88|5kzPEo)pT7{CyO5ccWdJAiDrtt?`t9I>oqGoKKm;I zUauSJ1(pBP#TX{F!X=FI4}ITI^WVuEAL9DJ{{{8WyEowf!qG5Zy=BPEj^>}L+A$^X zI$(z8Uualo!?v^&@-OP4$wP(~trwT2rt3Q`XiH<=6e6oBV>ZnxZs=RxA?^SN(((R` zDU+qaYYpCmIRjsMlCz~Pawt~+N7Z-7Q{DdmAK5FLkYi+IwS;gKDwUm`mCDG7%nqgO z$}YQdB6}ozMmbi6R7NP}P#IB&($MgGU2nQS-`{^d?#KPO@6P*uy|3$eJzvjp*L@lC z4~h8E>(ixzN-nJzq(0{NTHSSO2^QCIiM879K(=N+CC2}FVV~>|4@MfY!@GiN>opke}F*24CqLCj@T;( zR#-EaN_l2fICMqA8)J2>a%xHWY5GAwO;LY6|FGjq)jNxnmvN)Jj`ui9nK1yRZ~G`= zvBZLHc!wo9kWvf~3x0-r@yE!gJov1nCjoNP%%J}%Cg-zUW^^;nhpqb;1GBMtG4MY- zA*5Cc^`9=UEPIH#zSu_Dxt{+F{ilHoP`gVVtX=Ov{WioJ1Nr|E%mOMG8g@u~zSg0v z`uyHhHD!XV(td(p-W!5N2L`6QN!IT}DJV0w!e+*i5|-};hhJOxf{;p_6>K>PB41L= z1fanrC4lUQ`QI~D_dPx0@O7py8M2TFmqgujfoZ1%$rSA`5m^7b8UtE@{|Eml z9Nl=moe||9dLaLM;nSP>McQOqtp7CX#$@0bZW8pLZfk)Nn>4|^)YK4o3ORz^qB+^( z(`E$%Q9Y+Ls@yEsrg^LiaNX+2juYfK!P2NQt9wp$DU~JW%!x4nVin#Smrv z8}q+!7v)wqC9eEpymDwlE1aI$g$y1^Jm{Zyb-e2nF2V56z}DAYhD5^Ttog>5aKon# zZeTD-&o^uyq%XCA;}GZuW~JetO;rw$f|r*%nfp!dVEw0GPZzo@-BO7t@yt}TU43FL zwcoO5WG8@}HX+y*74_z=7sk+5;aK)enam@QzRmhQoFM<%wiQA&r2fsnkp4LW&s*4kQ}q(&e_!i=PqUAK<7do>e&$_=$DX4)fo%Mh4rI^8k|>#xZPs(>c392> zUzP(|Zo1fR3qj?K*DBmzz<9p*(%-U_yRTIBhzSygCA|IM*XX04z zKSlay2O9S9b{V*z{p&w%3Y>BY=V#|8q5k)c^XI^aoI@2eD%<1(hq)K!ssaE+gv=h_ zYJ^)VjQVE%0h3!2{bA5j%F}E^j%1&*D6`t`)RG{coFB(W0{su_zxZ+1s0>c8Vc6}R zd#q*mcgevUdz3IVm)~Nk=#Uy%{3Xu_@!Zxl5mBxoGWJ zQJf3fDYPRPxm218422~LsQ+S*&mA$hr_z+?>-j&J{$~om=K}R1C1Oqg)0Lu@{6XJz za4+RK^nV%OCTRP#2QnL@{x7ek53m#qjk9!Cuuscs(OT|uDLh1?HTtxUN(R)9REBpZ zr0%Hih*_Krh~wMk)Owxre69bThH=gWzT11&l6b z;L`1H%E-D&fl#QC`o(-3AO2;zBBhDmh!6j&-rMy_+JfyCq<r(1|10Z!fR@i2YFIU+@w)$=6mtIS^Ah-Xu>P-)xs%az9C$SU0{=T_YQ0wm zb*o*`S+j=ANq~Q7OYPL`KnO=uz$&6ZC>va08I~Q{=0J`flCUg;EwCl2&D4eUfBhGR z`gIqS&0wd1k1eHACpwO=>QtTQ@?>XeT7^-?NYKe{7Gs%!iqJOl{yMWp^Y^BdW6gnP z4Ym|h;D6G+&`x1UZm+-h_Cb=~v4QBqHUAgCLdjF}CKEv*joI zpJM)ZY0axPyD|T}p{)w=0$Rxb{{7RDH}`=fEo@kvgj9mqs6sW8Zo?DcycmroVUz?K z2aoh~hV$0Ut5eEx0+lBL56(rErR_2%PWIde|2s*Sxb*Suy|>9SfPXehUO8l@bgVB! z(biJ`7<^g;^u{djIn+8be>9I3QGgJFLD&lcN6DI{Mp(cb%h;h2)y%hz7k^95&ucEa z1m2i-)J7@w*&8Yz#FTB$!2b*!eW6RqLHeIE=s$fwwk#*Q=lsgU(TMK(|MbtJ`*Mpy z$Yw8*|Lb$~q{{*a9{S&DjX>TgN4Ux-p8yLG@Xw+(8DyWp7h2~G(3hdTJ#YvuzzWCq zNIv_+*AK4wrQ`+Wg5B^8P*-p93fPd^BkRi;k8{r>%qo&jaF*N_$)sg)8 zMy@qPXd8`~#`1q<KPsul~>-U~;hh=de4IE-Z-u zNxrl40R#p`ruoso_<*_qfSB5Du?@23Dnn@oERGD6!-!#srA{AEM$47O@Sz?adE-Pm z%IwGQ+Z`X4zT-+Oe4HQx_-E?c^p8b3!gU#AFPFy5d(Nbk(W=zmfl9PC=bTw#ZLZdi zv;p71vBN&=*%|UFTY&vdRO+XAdCLY9z&|prkA+L%)7att>9-n5A4X&Cr~asan;)xP zy(SGC9`e6$_x?**`8xNQBe=S|PZ-NT#PXk4W|ILIuJ?Z(-jS>f|DE0bKPtrYK+g@~ zI$To+^#E=>iDZ-{837^N4r(_=`zGL;^3d)GM8kY1Yhk`#+FJh4v)hU9o?n;wJQRNr z(?9EzwgVgWhhtI#-$?M5u_dnuh5~zvyywbm|w$s9!B`|WjwIm{@4&3 zr7wVQi)b%qdC6>ivNOga0Pg>g4TSQ4y8N(Lhxy-kpFaE|PaDhsS?m82s+&`d_}vin zg%R_=fB*IeI+@(#5+)gNdrD_5Fr=Q=*+BMk5Q&YN=OJGKf zxHEb-YXw3Uu+fD_(#vh`6PM@cEBKNBU5IdndW{gI`mQfCLgr4#m4vNsCznq{M#N@~ zz^_BSR!YZvJY}n$PGq@PMs#lEAlb~Q=fJpWrV(5V6t$S$b|q)C zkpIivqTaJ!jM51CKbC}a$m5NYDTDl+l{^hh;PLDzcBsJ zuZ!^Nu>LO%FV7fZIh21`et-66y&?2}eJhi*Yp^EjrFTV@g6n{&LPHy!zy~vL$KvW5 z{g;rW9LZb?^Dpp!#R_+C+1?m0oJies>C>%bg=4LRpLZf5|9PS3#!vST9>)}&>pZ1S zScLCxQw8peM@jWcAh!+q_tb1X-wABP8};|4ut?97{*dntnen-&% zh;B1DLW1~bF9pQ^I2~V7uuY3P^yNceA3^oY`ckai6&PoA_hd)#5cja#%WY^Wh5-VB zrFh}?M|7}FcDCokQUw23?@{L-bRc2piRrf$*m1kp_L1af`df$>l3Awjlp|ypTm2!Di{5VpAo;Kg4%o?4`^c zqgBo~f4UEQj_xvk;<>E%fq8VNZEYm;lm%Qf9h==N+anGMR2a=zQ;u9HH&Zhv;`eXF zUR~Pu8+uhM8g!;cOeFiNgY3W0s6+$9g2%i>D%UNwq2ciu?egJ$L z_78C-r9-kKK!HaxZ?TL^g`F>H^!k;{DF0`>W!3VN)%1IZV)-dtYTSv0H{YsX^P_k%}l@c*_HV)9;QSz|I$PKrxt|dBDMB*tpB3z&LlreBJ_Xt zs~|-jss1d*i(BB60Qld{&=2Fn6N;bF3`8*|!Vj|!I9VS%zU>4V^l$LPb~?eKRkGfH ziu&LE@{C$;?DQxAqSE;ZL8{uBb#$jCIhre?vvhb%AZetHY<46|#(IXj)i?m&lM(Tb zK1aG7;M9@oQwo`r2r;wk`X8+S9pN8qPGiwM7Z_T8 zMj-z?>VLO>E`dj1Ch$K~hmu6a?Wcg)1^(x#4kq=3iBljq5Cr&Jdrqaw3$XSZTTh1q zr^uMKEucc3{uN(uam9%acRmd$9K7f7NS#6%8~OnPX#UJSm1n?adq4{#KcT9E111|uoY3JT_;c=azpBz~KR~OHd z8}Obm|9ikkSA>quUajXpkH7l9u=nhbuY&IwR{PiUpCSK;aEWw;HmV!xpKD$FoeW`9 zq;w_JAPXa!aRuYdJ_4{?Gy+<&9VmaSvj-_4?~Mf^W6l<_UIcMjvc9V zXaRl>JT`0oFX%s&Hg3JKGZ_qD+k+b4J+6Ka6xFdOp6nkbdwCQBYexde*lu4z<&4jl zM%!VPi>J--h-v9R}TsNW2_GWHansj#yM|Fsu>P-}^qONmk<&^u&$4Au9tFqT;A*i2;6P>$ z&yh+5kEa7m{2s!^la?)`$tE?BO9rp7sq)1Agwd*~GD|?m1!niW5dIkz1wzW6wD;oA zuEYoFpGhrWA&ofqV)KV$PRqpaP(9#SwKb#49y{Ez-3 z)Aeuj9C+Y=ENgA}QtCXCsQ$|WpR{gS+D?c2NtLU5Ahy8r58s5l>We4_)Oe1DqyCFqy_tTg`VQ_veLwy* zpWA-i^WyKNp{IqPkFWXP|7e^eP?qO9m0)iUKS`rngkj#Z~I8$>7*S7zgf2d-NqTf~RdJph_(6>*u$(QRvA^3G}s2YA@>xGpc z5DMN;Lc{#`J%x!7+6!&WAW?eT()a6c&|kKdFRjnPhXX+nw>1c5wh=E17K9`TH6E`L z7~!fBycqo87yEBjf?r2$v%1%(aY&H8p?dCpNvgk5BGnr5pRxQy+s{)j&u*)FtxTvT zqWr`8SIw6aT~nZXd=BM5U;1nE+JHiX@jtYxU~*^fhnceXRoctU{T)}ar*P=h+EZxP z%Yxag`+`oRJd8Ixf?>SLDp+Onc=vcmve~o|NsZ<=;Gd#rf*Fr9NJ0 ze0=Z4)z!c6pA5;{w_KV{5>yKOeEh!uY#gMkC5UJQul~*7a;SWKU+}y6Vd$QE{f78! zEYo8eB|I+9{V9cyt_8X}2+?eFXs3bvpSbC_A1?j+8uI45G0eZf|Bkme=EV=+J;q3e z{)>6d^EgUdmN17H#{VQt_rpyV>Hs4T{qzO@`@YLCR$Fo%(4wr(zg`>qC`F<9*E?4+#yZnBx9K4htjwO54kCKr!1h+>=`5v8j0 zE4i`$_YPV|$;Xh(_w?C!@ArBkOJjdxe;T9L{O!+bPw|(xdqY;17JsF02Y1!)zq8GS z=5P0%|2_|~KcHwn{}cTa3lQ8cwvN3L@b(1msy81zGFhciOI6#x<(yZpe)=xxzX1Ku zrHiYbeo`Z^_nj->c=Ok}?f9wSUrR%=o)9j0?$evjf%JP0njl~snu>JY?M4V^TW-q@ zu65V)nS>$lc@@%LqSw^ZyTTlB)==4eBV3{XbRGZrxV->A3i>aW=Z5rsXroa7`#5|W zKt}&udVlHauT*A^^WU&V;d-&qui+^mJRqY3(r|$%8DmA{PgF#6O$`CdZu=-0_GW!S z1xpguX%P6IDK>ruy%gMz>(GCpGzZp#MCVrGsw4Tnz;==v#Qt}BOn*9Ve0$$2NS%z| zYMwvzakSbB2|+(+>&Cl7{Q3nDPXDP-^>}&dQ%xjasdEn`IL{4^R?+uE7TVn4Pu1^l zL9{mysor2(I|of(eyQ^5jprBbq*T7&BguC+e(Ef>>`PS(S=>r_eqYcyTPF;40D!xvoCacrq;vtv#2I; z$bWtv>OawP>HC~vF9@mc2MeywR=a)%IpOL78nI*l=3iG|WN8wYefea=o%OQM|t?@r|VyYqE9#UwT{^x&xRh^d? zzs~4M(YjXYTWpat30dR-{pDmkCM!y*05s|GK<0Z%;sU>EErILm@120 z5JJkAQ=P%$`-kO0(E#+JLs!|TchJ&}$H~(@t5)|ch$-7$xAnaAJKp!s@cU42nu2X_ z@ZztRu$0QSkRt6#^PU5(%F3t61&TZIo>Gx*b|Kd&}Cg#5z|jNA&S|1^?? zaqQRhv(2GbxRz&2VFr9Z?(>_*k2B>=p^Ut{L$@9ck19R4V0V;k>ZHreFSWIz=$Id9 zVTjhAnPJmd{};pflnW<*?^)FU^;LGT2>f4XR;EX5jEQxq|5Sd&16XNU4}c$rZ(A)u z|FiJQcJjU(`P9queBbwH50a+@fG=;rR8ZsPhFRcwvjW+}iS{dt@aA9cbm|7pDsYXv z;EJm(KW>0W_$Q=t>D?F)Nw4+Y^YSyc4%M$)FFgPFCjq`yD$tie{k9U9(_VSJv4d0a z*S9~VUVoa@2xIkAqmv;$ae@k6$)d(#^sFpWkbn63uO{R_fBaMGy%Bfu_v*Kn^Nbvh zgpgG@R$aleAc5PY;&l4cap?d0jy6|Pqut%DXJp}VT$50O{GY3g(El#`>%L%QVd72g zb^VV~mPXLb*_H2QWj*A-dbJy7fZp5>gERbwxlf}gTlE|jnJe=R{3 zqi1n0eX#$G6le!c=+^q*8IwLAC;CDD`fB05o;U%;$&pRnJ;R|9`0{xGY zxLb2H^1l<{AONM{vz+~AS9t&lhI89OiX`EX1hrU}H zr$}KkU(*zoTq+zZqtKhzt8hF~o~7Y;12+Ou3b!HuaCyAi3SKxjE=HJ9D<`XU??IQ| z9p{W=*#xcU|L|CJr)r#E?hQ)(zWum{--kDUBBr0(YZ3tec)u;=Ai>RK0_Fc8{m=UR zi;n(?A3qb|AISf?u;+-aEP+oEq_MMTU_-e0dz0QRxGWfu4B%oSLr@gX8VErAP1zug z*~_A%6);3^kI6XXeG8?AIQOqI3ePaQRPS?w!RNlv%;XjyJ^GbBK{n@@b4zo+kqCiU2QU0!<)no5()((w&!CK6{r0Cl zE|UiLG>azX?04guy&=E;LTN|v+kNLgOtdvaf|@)WAEWIN`B#cA5v@~-A`YSc(?v*u z`5*oX_A_}wf{rkx{~2#SK4!_%kMuuQkom#`{im1jQq{^ZR+%_(q+RH4q%zki1s{Hl>oXB*2XxvQ%V~@(JzLc1($91Z^Xo^ zb=jdtr+4+(ySX9Ac5)h>lVam1f&LlhU;eTKcLHa$;^iz#vPvwzOgt`G>;H1k?su%Ee^JN?+vgH@PIW>4VS+;APFc79 zzY~wkfs!B4NhM8(a_fbXv8F#g@ z8u?8cmQ5qfABSR*2PsBye}N|GpWmd19q;nIP`&h_r$_&GtKhG13uics#BbvuD%)eQ zB4?H1(qBKEZ_X)3Z2l!}-oPm?vVntOrMpnqHg0hs1`4w!!(q*-pO_Ma6~4W%APRZoTf3#&?( ztd;Ie!0l`Or^ifQ8DRS7SGsRO3x@hH4B&hwSEt_tB@dax1)^0o$LZaawst)cb;p}9 zI}tEQmkA_Cwt-f46DCgM$%?DlHPB;{>r&Tr%;V*~6AxTmA|3*`x-c57?D~}bt*R&8 zy`8tTe5UB!B?}`E{^`#-l%8-I!bMaoTM2|Ol$rxrpxQeU%?0blPU}k>V=vM(|c$|me8pGBqi~`vatL`RDhs=lju`4%WLh}) zRJr#(i94p`;&}5aUr(7rwe#sSoE5dO1J^&fF10_pH~nsN9Vj|#Pd|+pR&fW@W$4C# z`9C|Mo6##9*8fds^9&0&{-Ki8qWB7{-qy)gIjg3pZs6sQ>)|xI$3>dpC;} za9-buj*$Nl3j`YksUQW8dT4F^X*IN9O}H@{xBT~3elLlU;F?tG_=J&{<#+Tw*d zD?en|eEm=V!>zCtQf-7$@~aXvU%h9x$q90bEKh;H(9W<8QL;Qm`pQfbvTjR~PyXc} z8bG(*Gc^A~`G-rrKMlr|{^kFG_(O|=`rpk7qpii{Q=tDbnCvKq{2vd9zyx)`DSnP} zByV)HWfj@scvE7?wA(8^--i6HYEHQf-cwhc$j5;BP4IL0UV6e zPS!lx_?3Y8p9H8M2K?jq`BTfU+xmGMD6(twFVH_@_-Cn~HdxwFh=lcDd=?cWd5nQ3 z2j%||idu8XU!eb#@TOxN=3iUzpmsY${=x-nw=Ih)HRwCCCMqnP?B1A`Z%>X~T#}62 zROwKAMNq>9M0+_pA+ky(3dim_fC|bC8EOE=$yNs|3(yEdH~Dqvo}oK1ZGByFVoDc+ z;3Oy~EFdp|Q0Bl@5yt2Z&V--fe^1{?(?;jcyUml<=cGCj`Y!_GE^Dz9VE$EiVRR|q zHHMA?{qMm4fD=z81o|(AO0^k-KgXgN&EFwDs(qNe1onQ|Nv`FSx?~BoLa|dbAduO`C$>*H9xatahwM&+@gnm4SJUqv>aeba-JXZENR)6RNBm_76NdiNOPKy=VH$F0 zp^}S0+~G`{!JaG<9*FeMoQcqLj`}Z-PIg=k=aiNE|MGuSz5Y&UXPRR9&#nH4k%{;pV!2yS9H#|rFvqRKQ_ zbfUd0UM+U!{&gvssv6aXpCKpP)XoXheV7OnvBmU1?WFyVPa(jK`dcLm<;$S_A1|~3 zG5$w$MRfla&V{x${L^xY5gLp{RlWX~f7r|+8K07~mVXHSU!XpsxgY5&2Kk4Jjw?|0 zN`m}nBv0WZ#qfisPiKWZk~#$ibRjPMW4zZf(hv5OHP z{|B3Y4VSHM@`X=~g!GyU=(?X?j(+TUbSFm{Tnj`oE++pw&5aBOda9kp8EjN0`pDH>Y|aK}3C@ z+H)>-^CFR4-Nk3(j)G62E3XhigPM@Sa-+V*>{J)ziqj^9;+NVLETIHD-Wn#|1_{hD@DGRJ^*f-&~QY|YyHF)8UZ%BZkOlNg#iSf*HJltLxU znfIiPH=jFUG579jdgX&InDfwiWKsj0WMi8xJD0vhRrlY6(v%R5V zFxWqSpvZ$w4F2*M{vjDQ*Dbt){GSi#FaHn!fIlWroaG<>=^i)@um|wZ+XRszRWoe< zmB@G<6Y3u8$G?EFfjT8~pD2tCim5Z*?JOedp${bxmzbp<2*2m*J6YHcnyo3%JM=c~ zJV_=FEq!G|`9I1-lghm#k59Hc226Iu!oO4TfAr7qij@*YvIA!z|ImLc>OaNwKW32s zY=q_i5U!Bmuh_jd|5|Rk%ZvHH4tmVlst}<6D;|2$R1Jw=KXnes+U>DRqOx`67Qa5B zPvo^KiP~rnHhfqn`9|a0j3KjY?fKrM62hHJ;jAf+8VaW`1+g4CrccEs4patZc(-uK zyMdvOI_RJO^`EM~DSC1H2r9CN@w-xO>J znL3#|%FG(lY`4{wmZFg)8=RBemIlVUj(k+8YIq09w(&C`VkpTMVF520&dRx#x z<6A(UV!cYVyys>9DL{r$wOFws8XhMkpY0B>)z4B#L2yBI9_c$yv^fUEL{6*u6H@jB zK~jb(WJD+gob2@2^y38C-GZF!X0^kZIQ$R)ynNflHwW>f;D7JVWpPw7AC+FiKhXcB z;{OBnzw^ji)?6`3l5mU0`Y)jW3t#Kj?uYW9w_1ekA7VoDFYtwafNO7-23WVD_ClJl z|8g*l_pP=N^o0X-zVq_qi;3zBriDf@@V8a0gq?4bhCQShf&N*MWK`?E$gRTm2@WrH zHFCSVKr>{seO>+~aMrOcg86$(fWAe&!5aSQHdiql*+Hqifc#%K)v*4HnWvYiDeL&> zSbbp24PJ=@dH?#~>-5k3ieMz3rL>1dtVFZ(;NSOTPUXDo15MgMvGHecmxC|5VJeKb z6xqxlCBLPEh*6tVYE7ZJzWoKuRO;ZRcaTXKNd8ehK%bPV1(}@mJkgvK__EyFfX00; zZ9N|j(HpFZ|MkD?n%A2L^hCp`fg9^T{lk(bUvwse3i&@OcaIs2KhF((CI|9=pzsH1qqC0s zn6*a6XVPd!=wnQzjBlRNoZQEgLrjS-gf#UaM5jUjSJXl{ELRzQqK#QNUz|v@rL76O={Z|AIuAHU0Bq!)%1i`uq#w zpKoEDnr6IAiT{WH$*#*>%YSBG&0a}k?>`X3Cj;j^0GI?3H7|6|L;f>JBhn!ei!_9vtm}Py@{L2{f50{)EpNx%iRn||s?}m!A%jye$b`IqK zS~;-X$BXzMehV;CBK?m#4B^pAE_WD;#8z@^csw1eh`N0iHT&SlD;9U{=iUs@)f97K z{a?j{k9iaV=Kub+u!4v>GEpeNl(Cm*9si*G=WF{!;7*=Xd_J0sdK@8`vMRf0+^GKSK%zD3Ou= zrx4q2zkO1r$XPw*A+3B9X;Z(v5j>kH{~5%qGSRq`%RDOmqI_`l4H z(_}-QHBj0C_8|Ues0s3a?j2v;q!U@hiLXZbpJx6uyH6*t_g@$wBA`0m`)y0n{qgX~ z%V$rJp#lXQ=su?iH_oQYgBsDfhiFZ+|6GE_7U+ME_-3yEa$5k*zg)T@5zD^Tz48vq zend9A1OjK$;-7KiUA3#%(p5 z99>%rC!cf-09Ny_l%tG3u9nF&%nA=YNnd(e5v7KXfr$e2KgVWGtynjyTP9FXulv8+ zW0qqc-;jkHWRX4Uzc7A&=NkE6|LJ&7{r2OyP#UX|(7Y#zU)$=?yFUMlwmW=HLYoNv zU#S1IFAoS0n16wVYQIlUbRqCRcg4q-Am;<{k9X9sgUrDH5LI9;l=G0L9xZB-12&4N zTSLo4rcq1N^2d?lLny5itSzCHtK|Ky_kVtw8G!y%hd)vt1!eG`e$e{o*SGE3@_g)x z$o~cUA1wceZySN>A!R>~O?}>aieat)^d0Dbx)Y*!#m6GL*Zf}xnB9P%M*UxE)m$oG znOOeeG{jZOLH|X~3;;xR%89X*Vre@_P$QcS$&n6ch34#MWD|f4`i=xC+hG{*G=T~V z^0^g3Zu2zX7Slg3e`s;)OR2^D?{*o~{L}f_%H#pye;#4}uhCuanZ$9PAMOVoN>#q_ z<42p>*jKl~VhiZMSg8>|7z2Iu_L%<1`_MpiH}bzn+?Y7E~- zpu<4_^AdZ{5Nsr~ua#$;J%}eGpq!<^FilR4Y=-*3;$c!~&MKk>{uh)b3;2g9m%kJ_ zE#7g3TgDXS|BwuZ4UQ>AO(XsDew`SX>%63Y{15b>p12t5i^Dy=wa3sT$kzqx5dN3{ z9JL|1fa$sDy8qqX!t|9i>c5a5xtCGld^2)dVXVegDc}n~3*a9g#V8&DaPHj?{$hhv z;ILsa@JM?W^OX6)-&upvAbp!%t+ZLeW>)?W*0t`lI@3Ruc0gEk(zf_+*nn4ysTE#W z|0(#tx|C*b6no@@{u%X}m_EOgN7PmauJFooM^3~13-X`SIotteqyE!=QO7OjV6y`Lhtg*L=Kcxbe8FxKqi=)4QLtno{ZFP! zz{=NIQL_R)`-n(J56C~Pb%?Q)vk?RTdz$jP|EnNsiH;H1=r`-Wb3Z-QawGm{4gY+< zXqxxPM2rwC2>j3QO}3_0?iw8EAD#XCJBZ)D0Vc4QKiLH?jP?Xgl@UT470N$Me7L56M*jB=1LkcsVs{Q)<$YAP({zIto#Ow=f1cEi zJbX=zfch_B{-qBm0+_2%{*Q(QC?Zh*sm@NcC=mWR1x+C=iG#)FFew3ESCRnyPbck1 z$H&SZ7RR6V<)7#|LJ~;JVy=%=ewa z_O9*(Ca|&`Uv9sKf4NwjE;m#{ROv*w4h218>&2+Z zQUH5#PDzi*Gv;M=g>QDLM*%cC8@Cnr2=qTVu0k@+#&bO}47j!acbI=k;V|(>?32`b zgOv!B|3liB9P`bS7xI7p@jvAjenf5>96-)mHs3Fd9Nv39>MEdd@SpyN@KfqQG%pR} ze~|v!YI`j}deA>dFmhalJMxGA+(XA@Z$dQ6oA$)jy{UW7=1?d4?}=V2v~Iwq$)~%U z<(k7p*sLMx#Qw|gPolmo&_A=-y>9AcTHU-w0QcYg>u6fWPFiD>|J-#UNERBgld10p zG5^~2 z(m2HblnU^HH5kXpudr#L9?nJRKi$M3X)*aQpR5EF>629Ga1huCu>!ZXFK-iwy%r}> z==`XHyfrpXKmzMORly0?L)%w36CDmh!!Z1_IB~E>(%6B~7@L13ZhR~#>ze)#|BTa! z?}}XSKQ-nEh51*=k21;>qwycj3)FWnAN<#Uis7G7HgO$uU{2Qh-*=wOJ^c#2R2cu0 z-H@{qf%qR8i;!k|W|aSIU$7*N_LV(;bAy>IC~~6)3Z^#Dg)&+mUhh9Os|^8jgNhQoxT53z_|Pi?jJQYO|7!iSa9Yqi0;)N|LLE7v}5SRxuO3Zn}79hWD%+{ z!1~`?@5_3XP|Lvo-csUSBuB{s{^yTgNyqq;HUAeQ_hxH4)PKQ)$B9R52nGDJ0X3*J z*iIOQZ2^$;C`nwFxzIA3#j^`z-F?~GUf&uh5eWZ4{_~lS-xO`o-+3%zdQJa4ZtE^AO9*G= z*=9!s4{A2*zZhARG=FQZOg;tur#&g=S7Y;;QUCj)bkzUlF0opFHjI_gh#_>Oie|Hz zcm!9&>%xawC!OBYgieqzKXB}rNmKQXt4hL6yjG*<|4;usR_`J3E^bf>@_#V@SFkpa zS8Y>B{{#7ljE%W-p(=F+M;zUO1f7j8+?Fjz5l~$LMvCG*X`cH8} zWjmVqHf}}_nu}{Ejh1!gsdm!}-<`soPC(di);Y@pU zw>xk?_GN{ZElYUNbUR`haLE6~1OXa2Hra|1@`l8YtDyh6kLCaH(jWQtl@0XISpE-p zV$L3jL;5%W8k+|NG4vEtA^&il|M~N?n^R){6}t8OL-Rt1aDGJ|jQ?3y2ZNgdWxX4&D$|jmUY+AHi&c?N?F`Q$C;zbE*Tm$HT zT>1{yoE{CAsR8~+|0cpe?gQbs!hEA;>gGp5|J)kyYJ6)?)W81s2lW%c{{$_ebu0Ll zan1jZ>7Qre+3Ntw^J5YEzX1Pm*PKp5_-9}11v8L6ul0YWRt3;*VnO(aJsRq*fKvqg zQpwks({jP4&Y2ZJ+YC3p3Df@we1=>}X$lR- z|HPO1L_{81JlPcwgQsCbxU{D#9&fdqO>3}Ch5mPT8&;g|L`N3?IlX8mTAW=ogZLJG zenH8txQAJ`-@V2ULT@jR9n$|m{twCS=Mp9T)-_)h=6-+7|3wlph91h52^!S@we-HN z{vq_g2YPa*$s$;ot3kFuu!$vlb#%t-%p$klWiTtLIX|9I6vfR!!e zC_c5P!{JyUGTwXf)7#dN6r335|B|q&@m%A7Q2t?gh{pGYbkeo!DoFnf_{aDj-uU(& z9~S8Uy0rJt%D1WXq6k;$e<$L;Q3e<}ZYz5+6blYA_KRwv{;&1^cYYno!>dQd2y6Wp z?+zb+UK9rYFU0>WYE1^-<-$Y%`-hrLSzCZssQ;bUSPJfN`6ZE;W$}u*Y+ZV>+&BA5 z4?=1i>^;E$G@Cl^cF1cs-&~jY8_L8VMIhkB@(;oPW#KM%Ac{?VLu6qo>i>#i*N_#e zH733&LHrNt2pEdj@K3-_XXNvE`yiC|SD$|mgqh{YrQ#ko>K|Gw9~B4D}@UE?=Rjx(!FUC^d&Hs9+$C`hHx6GJ=t;i`$TP; zhsN2sx(U>OdNl9APS9;UHb)=iTK@&6e-5@ts`EdS?a;=n#ml>dY2pXsDDP2ssl!wnJ^#gTT{4z!@P{wVDs_D9MR zjEzAN)Bp58yQHv1K|Zyv|Dij`4)tyr|HIo4J=!{J`k%s#rVyZyApggvR)uUvheQ1r zF;LOOf#Dx}TUlEb_!~kPWbBJoKdJPvH%Z*MHR<3%<7TzAseoOlz#{Z~N0slG#v2{AZYd$sZ%VR+{O;SvyZnt4J-vQTW*~;z{4T5k{90r7f?_bAH$ncp)=xbRUL_@AG?TsSoUV#{X2uj_w6 zCw)`PdA~BVvFEz}2k=kzcFixEbN#m=(v5dJ9{y)A|582o_x?|)BmCF@j>B#JX>un4 z3G&!{r(6tS{PF|r$S!@bI);mmPIdJO?eUnHr-A;{|M;JWI#*bLL<9dj!arpra&I5R zms5$T|MYNTK|>}vVi(N60&}Hn;SUUrKH%gft>GV-e~pIm z^1(|;#Qg93yzHPssJD+2lu$U-Jp1VYr7=NMBM>sB!pO8J|7U7vz%Li*pbGdwy?gVr z_Z#)Y;QwL_-M5dHJI=@u$CM3bn1HqXLvmCODEurQ^QxGqQteRwVVB9U*t-8+$}pTI z@=*R0{ak5E*v}(2*}8djl(wtWiQ)&Z(9x55%#r>lo|}F5VOcSPVHWB?EuoWI)l?;g+1v`-N{=(}9N_7{zsPdVYWk=gxIfeIle4A*CWdw>A@jnRvJdEOHhTX9r z@Q=v8iw4CIRFEnQt}!({{wWdC&Rr8p@&uTF?TtTz>Plh$C8D7s4c|z7d%7?xVWVb| zc?iTU5^)ZjWqjw%(*$sO6JdD!6~I4>PNvI>wp&e#T8km%8R4Iv{Z%e3{lVWB#=-x- z`l1>#l_$gx>Wx+Fr_<4}eKiomn}W`Z>nk${98`NLSGQvLXFE#_&mm3>|FAK0(_ChO zocto3qi&+M*NbeQfy*2>lwqG*g+C?UAZb!?zU|R{H}?_~CI!D1aFEvV&)k6K6&CSb zyzBWt%X2JmpsV=lWKjJ*hnB!(>ccZqiNj^b?xFlcEAK64ugjvynEp9xXYq;(hiGfi zkKw-d`lsBaC#k~D@I6AvRUuUDW#?~P=YNFE>pXLI>|ql+2b>r zlx+cdncO5W&uX5R2E(gsxEOsC)U6ciT-qkC<8+z>=3fZ^ID`QHdGot%3xFY zbcq#jk25MJPaEG>zQAyoqVx!kXLqp@!FOHkxAzfuoP0-HxN3Y`^8!4)HU0C^4LXrC z$#*n~j!6IG(o0-D+m#!6*|q=l@TQ$Nlv8FDbg}$HUgNE2#&%Gyf&OQxBxjg15aay@ zO3~$@|D7K&(s-4#*-%j@8v*s7YNT(&A1(lCs1xWp%;Nv$KPQ#j_dpvV;2+38+$?Bb zYn*pWla_8Sv-)Um-Zkz6?t@#w|26f~PnPqJ4$-kYP4i~p!q_E-ma7Ad#^5c0{!{i) z;^L%0iuvfnoNiu81(&ZB*k53Gd|mXsJevKcvKKW{V3on>w)R;Nqr=82tVNtyn}7Wb z=Z0-^=3oBLVTijAUgiGa0r+RXNsY^qTZcV{;ivAJ^!Z*s)oe-BiRqx#d!SHWsKNrno0&VDfs+TTn-=DOhxYE#rec;~9zm18Z7k5!-U^mBch+H#4y+IT0jur-8` zUlRrS&shH9#?3_cH*FEHKhZ`pWBnJ%|9+?A2<%2ZE_!@*p4n^o=SVV)&#J%(ub%<) zuUkK5%)|)kB?zc;Zgss)WxB4?SF{^_y)*~(IPpcqZ@k=SUb^Q08j>H@j9^%9GaE+v zKYbe4N_q=V#xHP|gOn1=DM+5;P82(E zg_rq1{8J-4V+i0n8tXqjiqlJ%3&_qpl#16&I)KADMqKBW{_zz^tN5GB9)n310I_as zdxMpBfbp5|9`A{V@Hk*^2LC(YpL@sm!yyWc5T^eTRdM|U_&>#_G3NW{r3dhu58hPn zxt4dVdaN;m5zzMF>MC^axI%l!NTt3TE6mkDy>2=;6emBWCC~fx=O?fb?2O}q3Is|pJhQ)j2!qq2QnTBO3EZ`R`GntM*af$ zr}EU}7{$mLQ^0H^fPeTu+3J0m90jkLD8cX7jlLUv@y`|7!rdA@Toc~z>s;y0TpbWd zg#4fD+ua5jSd5YW$HU<=$L<*)qL4k=NlsYRvp|~S^G7JqbF^Y-&XZ_%)e@5elM_<~ zEIg6~;D1y>|Gd?sz`(vIk}>$}495S6oDVoWRCKgdS8=S+b?{bvu=JT5HLf4?)p#Nq zFaG`!$_CP1#!D9={~7h4CTbBvv0W{ZjQTIknZyA9Y)%oIZKKtSkrm#9!<{`5BMdm| zhfl7wv)&%B2{xg@%l+Bh;zboFXb)`^R`GpzfAk9@_`g8^tSPq}_RDC1z_dvJW4?IW z-5J^*?=Fv>c{OY#+Hwr?f6jc{es?V7=Va$E#?Px<(Ep|FHW0|1_@*UTDMyap-L%MT z#3bIf9yO}Q2`fd@F(~Xf5`i^f9;Rq%{!6|`9B-SgLb)~|KsCOXgb`w!cb zBfx>Je}#paOuL?cczfU01o+MW-)kM+0VLtmJ{|*Eif_Yz)xVrkf zHa7k9+kL^+_k;60XDsBcKD_QMg&X$1I?uP1Fk#4~HG2iO)MTseB!i-cySC6GOMrO4 z&APWh|3k<&$stFhWl~giLMast6P}|cnY;2?_*6Vct1>sFGWVnYuLsfNLW8yX1rV#6 zY+L^c0?f5z`PpsY!WRem&zE_XO_Qie!C-ya?Uqi0=fXZ%PtM+@S8FIN0bAH=3jJTXT?HU#Lq$>J?#;I+xWfnaO1kA?LBe-#QyyKnOt%sc{|&q z`Z@Eao_#c7( z>Avi}X#NHK4<7gJ^Hjh0@vGONZ2rT;oDOzT0wdQ(U!~LRqI=F}+~U0c;mAfMm%y?U zW?iejLVJ8B?$1I_Ao#zI7MLG1MeBr(at-j0;f)(#|0vb7J7|Kv4~+eDT=-nw?XX;B zgla+Cb0AfZ#ir(rRnEbIrlA`!e1Y%_$p5eP`SEP>rm7dd)#r6NES5XB7LQ<%-) zw&s|CNS<9-&b&bD#FI$S{~-L6a7)&_*kf#mE#&_w;U%8eo!PP<3hYh~nrLiU3a6*% zzjvbkS#GzWvg-jR!!7%DVK+&lPQX*(AZgt`EG|dKWc)=lno}GkJth(dqBl^Uuj_w2 zMCqo1P=fp)s|R=v9P)piyS;@GCuy8CVM1mEujML^aTy34PEojxJpifO zCo2Bn9^YM0uF^6={tv+F&9huw@LcGS|0};IZQ<|iEAT~Yp1X_m&&kvIrkPYG5%rWo z6_fny(#jSkgS8&Rb#BeNsdI!2Q^OSD`i*a!K=yM*M8B?VwAw`&n5DYAuj5)+pp#)D(xHwb^G;(AE;Xf8v%kg}_j9WKKl)0fR{u;_^2H|HDG`;V8$ zHTp_+(Q@-`#4%AinZz}1PQIM=o#B+wj%JcY!0w;^`NwC~h>7{OcEtZYTl{}qy?0!b z_xk^T-PweZurz@H2^%UBMiCS=5M+rMkg9bCPyr_>wWwHcAWTt%Vns`7Kt#0G;;N(8 zh9QDQ#ZilOGz=AIU&H?AZTEL4{INg%25Vzk*~ro1!JxqNgwKti;!|$C+#pGv4xPKl zec3!R^)iQ9($m{j?sS>6(Wkap!SRXQ6f4t&=9`H*c-T(rH!crYY4E1ZHxwAcIZJI+@bIh(`&3HZcdIxoK5SmPaOhRgrkx}IVFGv>eanl>Du`uxH&GUN} zwUg33=B)Wm6FQjLul0D&x`27}h0k-HD5|R=|7FXg_FwGjzPn@C?7+Ym13tUY7L3T& zziXlwU(}4rM@xTSVT`3{a)a~13}A2aovdn*rWBgy1gyZ)ZG6mrFyy~@9I>Okz=GL~ z`>*EwJQHT~b3LX1JnhtIlUeqP?dX3Npjj06U%eg^aS^u7Q&L_!;_*&*wEbsXTKHf} z!|LbPP#540?43|~V9q!H^Z!`wONgpDkbdK*r4=S4EdlvI)=U0iCpXNIgm2`3dUQV3 zzDm;)u=3tlnGyY5iNG3OFd?t2+z32#Bm_k8ol-5D20{PYhv8jf@olC>8;umFUJ&F0 zQ2DP&2W4`-iM80xu~B>}+1Pl5-@>d=bO$^UV*eNA{{h^88BgiTk0fBE4ZK$DK35Sy zzQz3{d+n-6a~c$TYr_h5xJu6X`{X-wxA`t?s3>nKoLn_#1=3DdIvD03;}}LGURCS$ zg5YBQ#ob{f#eY7?Axq#$Fb2LiM{L|J>v_ZRb*{z>=X#GXCSMH!QLwygy0~5(J+sLB z-l=aNHqQBXHHLqN`oH}5Uf!MUo9nrIsja_nxIG-F_=i7OwkrOO*=QnJH2>QmC%JQ> zaeB(^U=ZgGs{O;jUo8Jk1JZP?PHX%<)ks01tBIbac|ECXnYsvI+~u*xXZF}^A= zH2_|lj6IUpWu7JVNvV%!0OkKN9f$j`A8qxD_!?~QGOvj3xKz&*W5`i3VN>cPj_2e~ z@m@7Oa+8}~U~sp(wQLjH^R`K{Qz~9c>-3cq4E!IGFY&F2;#V8^&r=+LV`}4`MhFHj zI|MV0x13Z(_JkE?$DjaNqzx}+EV|m5*@vmItv@xoAH;{NllSbK#DaZSDU{SH1OJfP zf1OH13fR)F1IM#LX3&3VoZe31r}Y;af>yH53qRdAc=do{*WE{4QgDHDqEbI`rrPH+ z3B^7v4*bMJx0~)xSEUJ5iQXnN&|Jic(=hP z)wAe-9_l~s3asc?93-?%7f)EyWH}4*HaYKfP0xFc*w&Llhs0?%-P%>GEB2+ywtkx5 z-`aC!&oKY6`r9=cYHQGJIsTnfFx7uC@2lTbKJyNU2r}|RPVh!_O;G&9;N1-bW8$8O zjX^61iao8*CweTcU361>xPS)cmY`&4n7p&p{>$M1*`$_jyIgOvQ(yse(f_mZ?j%-n zL(gDm$<#7qK{97fc|>uSWYN{OU~&H=V=TYgBr1>K5a3j_3EFplmb*PXE&vxjPI#HH zqp(YnV3yJW^Ix{d=r&gHq?ajXY!rEV0u1lJcn+i0y>zk-A0E&}iauRD{ofB~KBSsWS$gBx;$!&lZ{F_BUZ|t^KTmscXLl%N z7t0R!Uws|U`wC6R&4l@XRL?QCr}|GvPN>dV{XF|f)S(F0PJz$_S`Gf^Hhk_C7=9Xk^SZvQaL_~ZO@{Ausjle5kJ z7Fqc_T0+!!=zsp=0sFMTQU1fYPs1+MnGbRuBTRO9kIo~DeR_;*eCGMj2O0jgCl*CP z_r>)S_@w*TQ^J+Wh%s^tE6boc=a2NZ1Q}^g(@rcC`b?Ac)x`=ul!=^Z#|0Zjt{|QP zT{!<#N+`Cqz_)*_DPp!sCuM6^%jhh1$bA&A~{ zg7lOFSm3K2kFSQ-GNB*(B!RX$&?xh+LhVy>T1xfy3fzRv*_UeiVvEgFExzpl9r~Xg zKB@3<+7oLLr0lGSPKYqGWW?`;2F$<5RZXrAjxVo^zaI;%eclt4dT`66gqh%tg|uwlY1Mhj%U2p?H%GDKIVoca^f}FRFC{f z9>h0o(r=nfY>#I4H+Lr z|JPm9`BD5m>)+aAAl|@#9*K_}A!&lz{>7FtW}6qD&mG8gm9y<}LG^_r7|Mc}I zL&;4e;K<@D*YemrY(%Op&r=QapC{O#st5?1uL)Y|>dImQ_Fw$E>6fv^;$%+wi7C0LCOvdn>V+t}g*YC#Hv{OY~6A{IEZvz6XS7IrrDmMFDlsz_sODh`(705=SSK&dt43< zr4ibMdI$t9POdx{bf{S3p3y|WfjK{8<*IBH$)Dnw)|6j#vD(cQW^|XghI2d@tx-hh zr$k!fK`W#Ek_vrzD^Rg}zK~~6?Y{~|CwvfE*ZKa&$iL@~pPn7Op7s6&6D~tkr+njb zyVI+w{gkz>46klU!OD zYa&juSWP@V^IMHrQbj7Emn;uY^#@@~>u)=ZsQJf8EN4Oh;XgUk&Et z#qK`z&9;7?VwJ2m56?xy>O}Lff>OR{asbvQGibJf8QrbKi+j9kD*tZgHS@^N>)*V5 zeAtc4dwA^t{e9CF8Y~6NYdy2}*rMu%{TJ-<_l4+Kzso1VRcw7u80ZNt~)1;^oXpYY}wSe>Uq*5d-+n0FCpR zEc=nr|NGA)WoG!_*(^ofBx}zpYy5q7)t2sb6YcBIKC~;^%5REUcsGa*V)iLQRYc7{ z@q4DdfAZZY)8F={H~W1P@HnJ+bWb`#j`{nG*CcyFK@su~+x$W?Yi~^0W^4Kmrl6#g zu#$7Li$<<}Ws$8d!iLJ7IP)FZcJc*#$`Jo&@GD~`<^Mtd^8{cFLO$POWj|NQ0t?K) z>t$*u9=L<-pd)GI<0+My_x~oCSX6Hp^J0Ms_IsA)?8PsSBbb||%1G`f09}+7LYijJ z#qUD=aB8X;DDjoxJFu2^){GI~fM)!o`+om^+Lx8reQLSZej%eWK=#xA&p}cDE(0`P zBrm37)Lzgmo5K?&h|xdIw!r>(dcCw(UTS6?8?G&PWrKBMf;8%>DrOTMd~K6jgh==u z^8eucQ`LKa<(&tAjidT6SXoB=>JhNw-a-}cuTx*_iA!phpE{n|w>65{EH6T?OV|DG zN_5P-k7g`OBedVHEg4JY-+4TvME|sbnmI%KLw(<`f6+9U|HA%vHouiGA?H_>ln`27 zsEYL||BVHfF|$E7`x0n-&j?D$z@R?oc8Qa;?Nof0yE(oFT+^V-7ujraZO*>W0O)q9 zr%33!a_?fb<26$k&-`eVDI7;}GHJ46KGb3U{pdcf#N09XeiF@Sn;Rd>TxPwbS9k<- zL1|3lEt=W)3ke8IER0vS6uONQ=`@#HRV7#!*`RcyB$-_O65M}1 zM~ld;>p$N+ck9l0Zb!3A&sMvaokec+{fl6H`76GVHZ=8R#s>pf#-}1qxVdk+FOr5f$!s8PzxPQ`w^G|xv zlH=N4>^x2g4ogZb-aX~HFL-Z8Psb+L^w>(x3Zi6(8Nh(Dg=JP1ih7iR|J=D_&ifk_ z#c?{-|25|%lSKaEQC+$r|JAsduT8O?Vv5BMq{T#S;Rt9h=<`^HK*Q+u<0oPkaORN& z&1ke18Tv15%S*8ku-a=Ps15xWM&YgelVQcN*!zzAuL*-2{Cz`}ng%?B3V?28Q6;?_ zUKSuvF1wEpp{?m9Rvt0^{pSS&C{uL+6pnO@B|&PWEy3T}44z&lWU)JZ=5(S9?pAP) zFT8kkBLBP*z^X^X^H2F)&9il;W< zT&l8n1y%Ot8oIS#P#}=fULeiVyR8;dqtTh6{MYNu1M9cCO~wANm3KS6a9-cfoRW5F zy-7LMfAL__cKwlMb12x>;l)?A`EFPIXrCSdy(w1IQ~u|(etYru*OatqHvm;bmhVJj)%VnE!hFoQ;Re@F)|BuQOadwjT8#Q+J^9U!)9d1tLj! za@kh=h-rF@{;nDyEKdt1bU+7@P&%L?piE+dc5mDRZ*>*I@tpgY&jjbZzI(pKjZVgW z8(oWQQmx7esQp)I#Xzx}Q-E%0{;|k@_Zr`sr7nW({m`gko4MGIcMok&A;_akZk$}O zIC-!IXNeVc>CeI^;tyudHCqZdDg$yT{!ie+-ZLBe-miTpp!Q#%90O8#cm(FZ`c7mS zZ63I!ru8(*7g>pe_do3x%h;Qj*wUZrj9%=IF!h2pXg@W~Kly_v!ZAmXwmT{`f5Z%PYBa!G2n$6)y$k42%5Sa;vS{P*-BPju)C3c@fISc^SSEBQw}@ci=Mu2<)5TjV z$1J~fa`o%^XsNZzV}haoD>0qxX$!#xHlKMW@&>2k{nMCNly4tnAsy(EOr*18DsY!X z`JaI=&ixs0Y(`>ixa@K`M`g|y$TYM_X;r8p|25-j@0q(Fe*gZ}-}d&{|21zJAv?U# zi<*Dzc3yRtWygLOurwb=2LG^}-GJQ zZLjnxYOBg6loGI8UIxXSCQcdt+Esd*hDHOY3svPQ4ARm&sS2X@u2>K=^0~fAg}NQ) z- zLqWWsgmr)3w0*d|CODonS}ipg*o|JQSjHx`I`LH!=|*E zIM+L^G5DY5C&Rfjk1pz}EqDC%zD2{7x)|!Qf)~YZ*_=Goe~|xNw0!Tb1wrIp2rny< z6f#Id|NAC81_y4t3Gj|pZ}&?%#kk}{z*?|DwNkwPLk>$AKwc-b&1P0f&`$! zxD&o3WG#!xYhfe*FtXhBvz`98C#-z(VMmJ}#sBdN%jr%Ntf z#`eboI9+)68QUBB-@iX*UKfp@FpuT+r<_6EsV_HF=O<^wqk5asD~G{O`YhUNZ=TCz|CH z|IkaY1PmX;z(34%9El$ztxAq3oXunKV<>9P4E-0gJAChaH%Su{`u&KFRt?D5!u=Q3 ze=+W1vBWvT#0ZA^PdzRRmY&vR_`0GbO7)*|l?=WhNv19KyW7gQgUhsTl13W#UrbGC zAq%pfKfwI^kLZ7XHG1zeI*n{8wnYBVYyDRHC`a6X31vlgVV#vt<^`ssZx&vpJnjE$>)_jf>u(|6u-o_aHip zaQ{v54|%-%Tm8ZYHe)n_;y>&A=rU=WQL%T561sUh!i0?VM^UFs94eR_Hz1|S&+@_C z$+z5s&`ty<7j@iAyNbVfeYeNSgRaH?3q$|uok`m~>Jxhi?cK>$L0>#MrBli|);utA z;fk4CI8!bwq7Sjmz6;Gk-||rZ*TDz>-t)YTXD2HE{u4fk*{4+3^UhNJ?|0gEPwivb zpUUFg)L!}0jIf!hH*uYf`cG?#wfDlZK>prE7dR!7OwbzgUw6xd1>2=5Q6f;-kcB=; z{-C_|=NF4jlP*Y!t|SJ46z}d{6@jYEP7Nq1{*U=Xz1O_abS=(5yHWqS_4c3ppL`wr z`9aW9^UvmygRKm}{?qRxx75X0VYI%n=VSlrUxUjXWRsu1IMJM<7l*0vRRB9$gh@YP(9WtC`xuvL8E}I*r}K{tE;D@cp>#$C}#E zwGj`5Mhr0Uf7boW4Ryr_sBZj+cdxz>5IP4j~O^#awG;4GZ3S_WizU^v#;!hv>Ki5P9LigZZ^zwaoJFg!e%|!hN{XdVo zM^~vizJ&xB^qaDaAaBcuVAAi>D0Pa1joO8^#Su=Pj7mU+Dl{7 z;h%?p>`~OV=F{Q%y?Nb@IOiINteCAJ@TitcHxF*g1hUyKy+OCSy=db%6S6&r@?WI~ zdf#I$^1P6EtQ!+&(yi{rU&?np}I2tr~s5vI1W z_xgwZKi|*%rS#|nm(+{CgyTPWE=jK&jR!~r|L}%QgRXAS6t(9EfBv%Jk7aB@&c?tb z3+M1cz7wP=F;$NK=aJssnKTmTAD16niiFR{f+7F@CfvrD2OsnA@hLMB7IfhEwD`&g z=@|xj>(qogEU>3pSVmobp~w}7aiDW$@LnBxZmW4do%W*&Vu8A*G(1&zx%h3c-_GBNXU8JRnt=NTUAg1KFlo5H-BqueXp}de$O2F zKOtW+4EBUvI%aueP*>94`f-oxy^v{h2h47WIf*CtX&kQ0+J5zm6R+^5elfga5g? zfW4<7;Z6}9Hrja)5EDBH*!Ic2hC={F9v*_ky}kwV&2B-XprMOsqI)jv)J4geScIKV zmH!RRKZDmEKfqpu!4GGy;MZYFTd5B&GFyiy^WQg${>+d3_ob}R1YHJce`Y@`UdFG? zkHq|!!T(J4zh}Glym8)mm9mc#Rj&H$btAycF&6nha?^QjDu8~J8T}eeZrAGPcgMOwVi)vt(Et4OoyQS24^10u*Jh)sH?PMjVlT`kl_g6^%CNZAAg(=}@1x_ZE{_|ZE zOOR5Cn@a|V#QhhIAIO(6tj0#nSFb>5y6=Lz_E4uHM}ePm6?qWI!Ia3&L-{Y9e}Joe zEiGUJm4DA#Ex~CpWVCw^zeZNz{8@FwKvFGtliMP-qYpZGzQMiiF#mb#ocC*sBLA8Y znyvj)ti7{Yd^nZ<{t``0v-B=0)}jArbhV7szca^Jt9>Yr5>0-F{&#b?X=SPS2ZtHq zxATftEck7^Pu~vzbJzW`0ww>e0JIFI@7aAH8V&ic>Eog+aDO`{sZuJ$tQo%Y3Ds*F zJ&FNLe;G7Z1(EErj-VbrF-|B=&_$ zE-L>HgD^dD$00lw=5x&JUxP*hE{eoDZI9+`F&9R>&^*JnFFRy~V(*s1bA_Jho6W_T z6z1Ql{nwS>Pk)*H;yqopXOAgu_Dkn2@*`=r@`Os2&|+bkCS)xw+AG`|t**Jq|0x}y zc$v+mL;jz%L8%fnkLgykT+6q(QX6h+{@GA;^~kT}p6HeM;r-K#&FmyKgqCFoj@W;( z+Lz-!Ru)(iN+XefD5uMM7@VzKd0il(wSh7}C7Gpe#uFffT_DRHU1;QAznZILT}gD3 z4O>1L%71n3^7E$FaP}efvg>9VYW^Xj24DLy&v9Yk%AV=9400{lX5glLkl11m&&D-^ zW&)7riB*~{t|dD|mtDhb`*8kUHL%FxROG7<=b+4Yiw=B4)F{Nca)1?>hQAmQbk3a)QleZ0hsF|_-KIQ0kMoelZ(Hrdl3Q~jnFXFHQ{-A3Bq*3}0EpSCRDE|#W|MPNupD|HXX)dBG z^60PvV)nW3pfO9gh(hZ7ah_bGvU1v|*!v9EDdTfkCona`tA6o-;y;i7<@L28@oymVHFLET3PPPcTLFuskZ&}weIO_bLVzh0c|+{ zwcTX$+pLn#$^aV>`8X~tt4+V09rw-4GE9eH{{{2K{T)ZrC+!K^>Uti3D7CU1d$p|t zy$J^Y&;CD5=3aok`hpG?Q28%oS)_?;30VdQoy_vt@cQK)+priGm@5YaG{{T_<52^H z9P_-65)&R@noe7Y!`2Z0@JK!fqV5gU;Ge=CW*l2}>(AU$rjaHGGKTW+Q+7BKvWy%Px< z8u&YYK~tzN=&GLASg5sOVE=m*o>e9kaNirfaN}!aB<+1uzWGJm+fw;2i+&gRc70`V z3Fg0Kb)!RU29^!IvjL|$6| z`M33Hv3!hRBmY?kr9MeM-Jp+(XJJZzwV#PH; zq?@_4W+v`Ev=M_NbK6#JFSeneh{~@FT z-!>zzYszW05@e5N97>5Ng)0`CZ1qbWi&I=U8w~mPS;v|;nZmmd|ICu0s8V2x{2$bR zTCFSUy0xI4e-2G$c>Iwu-R&WVWjFIhT9=|e5Ohh=OkfItw6m3|=wK6!5s{)T_B?gs zj#}rU0yn2oikiz#8*T2Me{%{)BP##(!@T8R{Aj-?dvHY$F)ttNvHxN*p&tK!MDXKV zeb@5`wrFJdbs&$3UyWY}9LYEEpLve4#cm4Cq(UUU`7Q00gq2vV%UsdF!9JmG^g72o zu{#gTvZL_oh(+)0#X5YfOudUGFKuOm@&1#`Yvn0z&d;7{Xmkcp{qMOQ0w?DSWto== z!1-ryn82J?xZN*RtI9=h+;y%wXeNKZ%Iisrn2(ZXH)?Q{ zADVxj_Hbh=g71S4A0x;)Kdla<0ow>ASTD>9cuaMM3|$QXF9x)m)`doIHinA29nX-c z1;!Dx8yj^^TiwPbJklf-T(rTsAuff6`cG57yM1nMNc>Y2V4O}Pj2`n}f6$cg-+CGR z&lA+R>VAYEXEtwe$p4ceMi~nApX2-~lJwGgj~IWSOGo5I^)4~~g@aC#gk5;sP_NvC zIzE0wWfdCPdi<*1Jc?4AxW|ctvZ5~4$j8bA@fB&Eh}ZW7#mPjEVrhz!@+Qx%fJ>4#pdmh}mWZ3GS=6 zIMJZ*?6yZyp?5ks&YyMZulq%z)tl0P(Et2$+N&ogzJGPm&i*+DX5*3nYz}|_bxdpM ze=o4;X-YYZ+kHd-JMw>IvOF#Yc}D&r&s(xHBJwkYN-vy z|FO<3q=Ohr*<<+JL<9nUl^CS!EBUJN!3^AEpSzU}Yt{>47O zb~hwSnY^ncs=PKm$P{wN8s#R~L!;wWH%Klis zU|w5Q5TV8V7bpUWcrv=yPhKU>CA3uk`#-hrmNe}=EiOpJl>P%uGif6bVJx8dhrvN? zwll6Ci>YIXe|ml5-YpC9x%(NKYc9*9dG0awIl3M~yOd>`EyMpr(ae7Q{tVC#zJAJ^ zx=%)Q-F)+$#)jBbQYGh!%pB3RryaM&)4?%Osg_4uSTbkoldzn~{w?xFMZ>k zVrEA`a#JwMf2pRetjomw%h&(zcCNSf57s#GiPhIl@^tQ-aT%|B z%)d{MGOvl`nL)$!A2%^Nn4-Y?7dkzo!}8O|Qzkx6_88`SEbQxVS8>H{3<6qk;Zt;aKJLK^wg;42 zg;>70w^b5;k=FCZ$Z86D_OhVMho#ysb!(Q%_%sIHDO3g-D+XdK#&c#(pdnlbw>Y7wU8kq}}{$o4tD{B66PTz-bYz9GM z|ChXj5F`Jm#yf{>q>&W=@bK5tJ9xbuaoH~e@t4cYVx0BuHLVOhoe)+7_=AO+Q$X-u zTF7}Z)pm;ccSHZ_@_S)Qa^utfj=vg1oz}gzeYCuD^@Jj0gvvhZZpxo9;ai)40o_pl zDbt7>=FBn2yr5{ji4}%iyq#b+13w{H$YuC<4$Eb^%~hU?e{p}RB$VV&mRoTy#_n8e zwc3biDcLlM<%`O{2eP+$7T_C5jrlKo2MAuJP9}ibf3;P8o>$~C z0p}lm(m5vVo=or7I5Rifo5LxVPDwBF_-suMu7*N4dxq?zra(+`edPc2&qhCYy!>s} zeL9P;HSiA^i*kMp)u6GJ@;`4L&vy*Z<0B@Y@;`sYS+^Qbsb9f}X2`-VAlG&I1&u6Y zw^}ms3SXE|$0@IwvRk@XyY&NC* z&;L%th~}!P?l|yzQ~NL4hl9!Icu+`0%_s-xYXk0VX?zcjJhyGK`~9cRrg+L6@2z+` zKL(@dH+9iJVE;Sve}29or~2Oy&(gJdDb4=2J-xh}V}FX3*~{4E@FPw#=zsoB&QrCk z@hHjY>8K7-mzl>F0>|54n`<6X761^W%)WhS5x731_NDo( zP?ECPNvsgkCqRu;V3%5{)vqWVL_-Fp{}_oGmf}oR;8Vu}d>nWa((X)jfP0(6z(Auo zoB_MK}XZz6GjQqm`AuE9YeOL(t4E*P>AG7VJeg$eO z|78{v2*<@qO>DM#;1W)5s~nfRkjP0&jZ|MH+@sCAXEWHDU35h??Ffc0p{O}q_R2D(Ey}w269OMi9TFEnGvGy2kqfKQ@%6DcE>;E z_}vZfzesER@RivA{yW`vgm~ampL`n+nqOAt^%=2YWOd?d-vy;Rk1e~Yf74gUtNwX- z|26Lypc%KxG5>DZf0f<4h>yURRxud2svhUw>`YBo_bw)#mhR>Eb{+AP+M~aUS?-7* z72mkj)iHlY1>Rz3YQj^-f*`Icl<(#a?tkmfOv3TBO=aY#3P&@{;l)4d?v$B%O92MS z%!ndOI{>S0Ajq|XwEp&yc|g8r8^gd~7x8~g!iX8580J6Aw$Qcz^}mN3S;G>Fe;BE@ zU5!o6T8jT1|COSdh-3hWUbJWG!U(XfPP`t!8^H?*4tbEDib3uF`Y&RM*xjWR|8Thf z)E;FLYv05ug{`;rG!8;&F~c{%>Zt`y%Wy4f7CAnJVo7KLUx;tP&EkorC#{Gs0jT}r zqQs!M?n{c}@DC(wjuv=FCfK8QJ{X z4vEAb(jLBiOm`Y*zr-DMPxwKZm8xt_3r7@$r&hk>J-yH_woWX_#Roaq*5R{E=Xk_(~^e#yP^Mm z0DfBTo%z52MJQ`=YF9*Xew)?k%t@Z;~XO|2t90JexP}-5g!%VDv!g2Y-s)& zwEx)uj+MAPcunQsN#y@9UG<9Ge6IUQF=W5iqg0i{>N#QHABx7Ud6%8ZBFmkQ>Vhc! zXB+aLml$y<|Fa~!yjDUllgzajG&jkia&KjvL`4Dxzi5N7obW06OOmk$L;W^~(+XY&DK$=bVMg52J|9qT( zIx2WS&NxIP@16&~UaRe)lgK}ut~f+9vTzCELbj<+#&foQ_qyr7{tHvlD@>b|EN;pV zKJn~M=Qrcle-M$_e<2CXq%k^$PC~pcE$}(v8dsSnIFwXu22}q$@4FBa?;210NPKsNibabYZCdOd{jSvwhL@P4#f1cvt@xT4od~5Q}0r@}ge^Vjr6vl?*UpkC0E-4cOUx>>p^bodHU4O?YEH9?7Nwm46@Z|N5&9`G-%gnc4u@i;k;!W;TI<{Aacc1n!>!aMsEtMg-dG z_s#iU*{87o%fLT8zy3Qy_RCwx?3>8{q4r<@+(Si(;vc??hZh!(t$1a+nq2Z>K?}-1 zg`tGf5pZmw!fPf1QyBl1D479<{tE`FX~Dh5+-2@g>@zg@=>MVSADXEr@(;fee4Py% z>_6RYAnAE|BPu4^o*J+B1ICwDPd@=&6asH{L^q(6)r5=M~+v&!J{)_X-fBx&# zEcB33{6k5SFW!8be3q-MDzQ?l3}>PKlX78p4?fhHRqeTi(*Hi!8Z_Ad^@Yk3eakF< z7?%M*eW4BYqXG6`td7K4i}L?$b~5k}srhHZZSpL7oDZa2-1O~Po_&ZYTT4oERm_+K zd<%wlFyYBxbnOuT2lMYge)p9z;Qk9V%#Z!2>ZIloFvLG(@$wZx-wFB|>dtSO`VzvA(IM&;k5xAc$T0>wYHkg>cYdaL$=rvCgl^2+(t{Db~yo)kCA zEE4lyo^#Im-{(k$mF+t`_n=*2#^Hq_No}3QH281-Md?47e?MgW)u^YSIdJRe?8l?2 zf2qnUc#U4KKqZAYG680oz-EK~Qyq)_UuVCFnUS^y-veF-|MSi)Du&!Z^`9>OVhqY;qQJXLdQKn3njuz%^<2y zIdAM%^jF*m4dws2HL_dk$Y6kwFPf17xPhPGuWJl9 z;<+d+Ko2hX*74Y&OS(?wzo`C;>xbe>Xd*|zD{yH4^%%R|a6}9fN-T@i zQ|swK^Sh;f79()@62x{1R(0*XqF0sYUnhWO7H>*v^#bcac}H=^=ie>r@m zM1Ra+5sEwjfm%7v?UyF$YS8~<;2+*w_6rmAx}!SDQ2*EOKaTl}rd|7b?72rcOrKUu zBVbA3)VqX?H8uh;li5lK?nvMHf+UsNcY#*apm8b_d00cnqk*TG|B`&!?vcKbvB`IF zT_xxY`>&LRvamB{HlHqB6<%|hVYI~=hqrD;TNxMmKUDr5k$)q{FVSc8gcWdMxc|K| z`9#c&L*X=%;y=&XK+TscLi7zd|1dHAqyIhcp0`d*@t>Ex&d`lO55Tp&d+7f$=s&_| zc>RK?E0cytCOc5OhJ?1L=z|OT2k^J&ptnDayw9g`NaX+cbxC%YS=l+oJ>ulE%qq>` zBmXe0U$LFe=-l~ftRw0_@5jB?_k2+iZ|G;^5bASDjtlhima3VgvW1XQ{O9Rk=Qzd`G9l2=|AqPY ziEg@Fd5ML4OvMXD^R7tLYbxF;5<)w;z+Y20H2<`e@a8=o(tpN$c|nw4#H93}1I#I_ z9^W`&@c+Di>;)CxKQgC$oPSQQf#*c9Ad|%;=_5!SHX7xY*78IRvCEOc_b>-lU`sfg z%P@f%rv%A#hFff%%A-$ZROzeC2nKR^{s~s#+0 z{;$Nn!TVzO`Ytdkq>G#(O--YdL;S;+`!_pSfPw#f0Q*k~3_K0xzi|IGW4SS?jBsxG zIR8`$$bQb)b25D47g@Q^^$hO16dEH8S`Z)u(nf29Cf)&najO_&rK$m@MwIf6VNE1J z@NonGkbOZ`DhgT{2N_*;9Y!7tBH*|||KY0}KhLL;L;Pn${=L^>l#awhm?8hZdBWg( zUND|k4gP1QseO0@bEyB_(0^L!1+!1#KZ*G0|7jCK>;rJx-OOSME6Ma{_+&9v_D99n z3|pz&}>&U?L<7X&5@1djrXXAK*q^`tDx}Z`@ zv@m1^95|3Xtp5-ynbRIrJ{DeFR(vR_)yB^b?cASyx$LHE;0Qzig+c!r%D?CHUhCs2 z{b#8EJ=`vsntz7+Prd)}Ae1lCy#Ck!^;AXV=~|5-Arabtum!H)ife(PAY*T$bAUs% zV8mFupiY`ZtM62sQ}Yk#GpfQM&LbfQIj9?e<=R*)e<+Z zK%(qabt}WrZxcx*svT1|T5<5TCaVd6qCmvF7(zPK(Le;vmZDw^dIy;bBvc^CaXpcwwD|5#73U^*nf)r zAD;uSK~qQdpY9x`ZNZzH^8Zl!4|mo_|1(SC$w!>YL z$RRG{QO6_U-2M{Bi5##NLc^RJs_0^_3ABGZ>DYleC#@^}Ki`#1%QIV;F$!cK^`HH( zi7mc3w^WXPvUAShT^UE5P;cO(7Dl02>FrUo;ecvt=K!@^Q4)JX!T%3Ps zO1{fE0*tIjQ~V!!v8kUT$2aO{{QNRxXi&PPXGyC)mH>#x=_SY@Wq= zs>RC;_?#v}`}aI8_FuTp4$LTakG~qc7qtKNe^LDxlC^k9)uwjUOx*loR1*LI5voQ!mTq+qTUC~Jh|qq?cH>$p;iLYu zyt>226VI(OC81sS4)b4Uf7Q^nY+xb(xl38>x@5!(&>8rLL;YWEWfl`;ZwM;??il&i z?1Qar@?-xOY4OGX>3`q9mx=72nR-M1o$~+Sp{3Sji|fL39HLQ<7mRytIOzWQ1eJY9 za9=z^OTcjd1!*#__E7^U!-ZBjtr_}HF(B zbTf)-I_U0=bT;~*i6!lgAj_fpzm83r zP|62a73VsrI~c$)TEQ4Yn&1aV7QrG+<^l0HFRu%|Bj0x1j$A zd^sf`#{N@qRr}+=%#eS7Il0+2V>3p5Y=`n+!}`zsGY(%Xfebo)7x!OlSzR>De|_w~ zxaXoRwG{JTbfwafM)tIP=8XT(!}^bzos3~U0%rVbrfi|Z&7Z#?0C7&X zC^2IOkmITT)8AK05s+lbh6gu(-Aw5}|K-1^`KMCR6UxKW3yJ;j*njFp>&}bB{;whZ z$N8)U#O|zXWM0Jni#exN!solu|19^fLGAd$w;U8OZIJ(5W1Khx)QR^K>L+iTsZ5TD zDzzF6gV~?Pw=ppPUQh-6X-kTIqq&CpXXa1o-!#S_&L{qV^N%R|?;;lg4Dk=!Hsa&W z^5gtdwb~cwADkvFhv%QsKOk|+E?O-DzM=npL0@;Kquf#iIfE?w*~kAjP3)}nnA&Dy z4d;WLucKnZ@f>UD|H32BEvovE{NQMDgI9h8}%PJ^56XP(4Ee9rEqwUuLE52RG8W6= z|H<+IR@k}9&r|ASE4VoSFqKWQ_+CA2=)d^g8CV~+kGs{hM$`X~Z7_Ge1$$G@9#D0z^{a&XAW znbDhahHHem?8Fnyq5iLmAj{1s){l8u(tLPp>Xh%YOM&ls1jjfk|8=q={mO3azu2~@ z*2s{5|HbAG1E~IYCSQ_YYUgxre-nY4mM3~x?gP1O&Pn0U%5)m(4Q7*(7bVTkD0Ooh z^8fT%E?k9KGwlDO^q=AVSLoW82kQvH{nvMGjq}fJb{YTmJFPAydMU0JQ2&`V#(rr2 zsXD&=%R%+oE2^T^EAL%=S^?5^V+*{p8x8)SFaN$V3-|{95AqL3E_W;ydM>%9@Hr?} z?Qjp7hp|P=s%71;Ph|i}t=bNKYZP0&ygN(}6 zG|hkW&yo9AsQ$0_*YV!R{CnXo$Y(fA8oyr+nlU8HA=nvYn12sO{b$(!yq%8xLuU0u zs{j4MFaN&8g!6_$hbe~sQ$mJUm$r~?%M}l(osDcIt~-(c;~-;_3mO7J!3T!5H4O6) z$2DYu0`;H&XZ|@dpe*50^H1#ko=BR>_^%M#taSyGNngK1E6u%&-({=N|4j8?L~k3x z?7Et69Q?8WeIEv=bbtPC=zlN0)76h(hm#o|;55-fiK{C7gqo(EbIPd!J(b!Y|Kd&w z81nzL7W2ZP>wo>HL;YW0<>-odfb);pV=YQ$rqlKx2koa|^hx%po*wl7+&uR5(J=p~ zfcMIj^8bv;uJBI1eRK75TMDX{)Ym2&opogKv0c069OxPDe}BeHs6>oisn9Pf43x4n zCfoeu9YOckdO~KM-Y^@KdNKBYjkFKeO2P^FZ~tX82K_(CKP(%c1MbK_Jh$Z&e2<|1 zQ|K^Wiu|9$uGLjzzkD{7{|YAfUVqXk|MPa=IcxIaYD2UY6 zlg<;lMGTyORw)1;Tpa|FUJtrY9|6SZ_AWK}pK<=#Xp7pH*Z;Q<0{VHt`%fR6a}=C( zZa+Oo`qI+tz;%TT(EVR?s~d$qCk1E<1BcQXEY6s${NUmW+;#Tsa{UJ7v%ndOZ=c@- z*Z+X_e~HPQX!H^lW{?2g{{_AO9X$RcH2nv#{qw|~2N)a%nL$nbQ@Mfrzg_|vJPdXp z7y_95f#?5Jg4;hA<{C4InBHRY2hV?{Rspx9n`f^$6bYO@o)26pBEt9qa{lu))@-Mv ze8yQ5-bhST0xpE(_Ic7Ekkl@z1zi7pG|>&%g@fGxW%!6gnu|fW0C@i?c>D*t|4S9v H39tqLG7xkg literal 0 HcmV?d00001 diff --git a/resources/nuimages_demo.gif b/resources/nuimages_demo.gif new file mode 100755 index 0000000000000000000000000000000000000000..7436fab01e153681c49e039d8ac2a2fe010b5596 GIT binary patch literal 885289 zcmV)NK)1g~Nk%w1VSfT?0r&p^N=iun{{FYOxvj0QQBY1&Qc%Ib!u|dIP)|)(R#c^? zs7y>tL`6lZsjIcMx5L84#KXqQ$;-^j&C=1+)YH|~)YsP4*xlRSO-)NkNk_uL!z3pr zy}iFqPfgg@*_@o6M@L1it*yAYx^QrCSy)tkeR`v$rMtVmOifHbK|w=9LS9{3d3biZ zy1iFdS5Qz-AtWO~LP3LrfXBwjN=!=_92_t)E>u%cSXfn_pP^b=R&sQ7udlDPw71E~ z%DB0^#l^>Xd3u|ipUutBJUlzAtgTE>P*qe?xw*T%yuP8MrAkXmsHv)7VPRWaSm)*F z>F4SS0s<}R#I12R99A2Qdw6}TvSq7T2)zG zSXx?HT3A_GS6NwBT3T0ITUlIOSz=sPUS3^UU0qpTUszvaTViBgVq{@sV`F1tWMf}x zWL#}#UTSAzW@cn%W@cz;XK86?XlZF@YHDg~YHVz1Yi(?9ZfkIGZE|vOc6D=gba8id zZ+&uSf^1)nWLKSDQmkN8zGYU*Y+J~0U%_x>vT|skcy5${a*lp+hkI~;dUbnwc6@w# ze|~y_etUv|e1d^~fr5aAgn@^Jf{2ELh=+%Xh=+@chmDMhj*W|ukBgCzj+By(m6MN_ zm5`N{la`m4n3$NFn3bHFm7bfHpPZPWo|>JWprE0nq@$pwqMoUxpsS~%tf{4^r>Cc= zsj8~1udlMPv9q$WwX?Ifwzs*zzrn!2!NkPI#>L3T$H~aZ%E`&g%gW5m%+1fw(9zJ- z)6&({)7I70*4NhA+1T6K+1uON-QL~b;NIcl;N#@t<>lk&=;rC^=<4d}>+I|7@b2(S zOGr>qP810VA^!_bMO0HmK~P09E-(WD0000X`2++q0RI3i00000e*$R%hZG439UvSZ zA|N0L1|lXRBqt^(5e+9QCMPQ^DlI81FEuO)1uZTwE;2MP9vd$&D=;!KF&GmvH8?Xg zF*P?eHaa{vI5s#uKRG)$J3Ku*KS4e~KtM!BK|w!3Mo2?MLPJeYL`h6VMMXzQM@UXk zNjo}8Nl8gePfAQnOHoryO-W5nOifNrO;uG+QBqD+S5Hq)P)bNpP)|`&QBhS^QB_z{ zP)$-@T~ks}Q&Uk>Q&Uq{T2xtFRaI10R8m%8T~}9CSXWk9SXWqDTv%ORSy@D(6XJcw;Wom0`Ydt?}Z*pxqIBrf$Zf|gIbaZb(LU3_%addTZYiDwA zZE|>fb9Z=ja&L5Va&&Zbbwo#XcX)PbUUqtXczAbud3JhyetSnpd`wP!cy)e$eSch7 ze|dF)fPjI1e1v&%gn@sAgoK8Kfrf{MhgDUHhlGoWg^P=di;Rwrh=Y!gkB?bckBo_t zjE0erkdu&%l!}6tcy5)Fj+c{&hkv%0#zySB2tyuZD;w7tE*zH?)~ zzQMo0!@uq(=jG_>=j!X}?Ck38?dk6A>hJFB@bK;O z^6&KY@b>od`1kYq`Skkx_Wb<${rvm={rdg>{Qmy^{s{j7{|ThEs^F`Gu@J&)Ww4<` zh7cc0R5-C>M2i?HX54tOqsNaRHHI81lB7wFCsV4N)$3QwmoQ_>oJq5$&6_xL>U6oY zr_Y~2gYFtiw5ZXeNRujE%CxD|r%ur24ijeQ)?gtw>liy@^Xe7MrVZS~a|`zstanPDwwOzyH4f`2@J%fC3VzUx5Z9s9=H%GN|2y`N?P6n&@JSFQzEtI5di5V>meG_zgHb?&#x>-UJDxkVJB0VeNsXJ$zm7Zzly^#Vd$Cw;Wi)L!fK(@ILWA)YGGD+^6hmP#V8M&#;?v`S^rm?*O%iGx4 z`ckIlzK`iUrXXkc%jj-xcCGMmsIC8o@S}e^%G+pbzE&}D1Z#SBrou(cF~@*5Tv(*>yZsiZ1Y!Lpa(GK z)j@ui@?Q65(}$1ckOt;@+uZ+vgEompE^lj5O`-Va6&#X}Q9G>5{iNeQ#nsP;2D4xO z{syux?yoeKGoX7|M!*27PK>2fU;@{u#s<1Ebq+kE1W`uE3{5bDc*LL{^BAoSdeD!$ z17WxZNy0(~4_>P&m~CL#mKQQ|c|*kFnQmCawF$8Xev)MNc8DkK(a>>qP~yZk2|vO) z?~+nHSjL=anJv1qV*-Pm;RHd6aXb)>w=`oeVUz z%x3;kkW~|5AxU`1bHPSBEOa6Ace@RlrGv-8%we@#lArM%dp>X}Nr`sSXp z#3w5^#!u*6F`Y~0V#L;3K1nncs!z=ndHJFRLr+O4sY zwWHEFt23A}+e81}Pn2wBUz*}7w{EVJq|<`|I-iJ2w-xu4Tmvi?-|0N=<&!o=)l5=< z3e?5^Rk{>wf^!TL8h|1)laej$Kns(J%We*rnhmXlKI_Z&vKO?Qo#SXpTUyhqw!Zg$ zCTm^G-?GN`zq6$+S_L9pwc@smMJ(rAeS5EXEss*Y-78~N`dhy?*K8IpsdIPAVWNhT zyXIpU-sb1ZppqC2DXc?d-}l`RW6HSh%R_k~nBE%0*S*2?2ab1~WBA@OtMjEVe(l@U zA?r7+VudY$1?&vlst>UkmJ@H=##~l9*pb0C=k*jD$>ZJ>kzZ~ia)T(?Peu=JD;Du9 zC$*Ll-xdFd*iG2OvRmCLw3rYlqcM7Id}AKJ*Ux`eF9CyWWFqI+&__E7dDUJ6`q%vkHn0PYI?u68$c0Y!p^sc-Lo0gGjb?I_O$g~w^KZ*pvYVaX zn$k!XV@rj54a!Ka=c6pVYyD( zt?mCiDyajT;laK;r7xe2NlVSw-^TC;36~;UL=~!*})Tv%|h+F;g zS#RxYx!!eYgA~_`kZ|(MK6=5T`pP!XYs(pov745Y=Yy;Abr;*$M9IAf-kbO7gVSo# zNP?Ut>-*pBuJXa-8b^lz_j#onA!0lJ`HxS$uOk}u$y*=umS3&4KB)QbbN=F9|9byg z@2scLZ@K~+&sb%#Fxw135iKam0` zm|%NChi^M4YiSoghmdPgcX-ODcs=8M(Iab^{5UI%uR zVSYxC0TQr&FW3Mv$N}dEX5mh-v@wew^fbZwH9G5rB(DfG+YsYti*$JCcQ3xO}{}fq(XeO*e{=w{U~jfoAw{A()10xP}&Yf*JRQu_%ji zC^<~f0T57!x)_5s*nW(Wew9XYoknYF=Ti>jguB&$qlSp|)N;93U5-dxaK>V9R&!KH zjnvqLm#A;tc#WyVBgjO7p7@ClB!;4Rj$s%~dq!hYmx|c8ifEW=r2IQ;5CSKE;@fB zbYiupjTd%>wRUZ4mQ;k$4WyNPDmg}ocY&b@hA^3qRTW>T=#G@fimm?$k2>jno5zN2 zXphB!k6fUS6rhKCxNR?p7XFw#WruAA7#2IlkR{h*Ksae(HgiXqT$w0y8aXJ!Se3_h ziLgYB%J>8DHi#p62u*S$gtn3`nIAHVj-|*zVnl}R_<=ULlX|I_J?WD_36w!OezTO3 zl(LlRx0GXHluD@6UN#l5!cB12={$d6#(UfqO}UImwrO>6a)7n8gs71O|xtNQa7f zn3$HF0?8)tByOITNv0>6fjECoXoM2!CC3PfU6y|ysZ@|@YK;E~mR~7+JjE7TWR{R6 zm8*FW%P?CyQj@Sbjx4!ev^ksi*-!&ym$;dm{`ro)*_#47pgj3~!8wbvn4oYdn7=_e z>0$wfIfKi&P5yY4y{Ln3B14hMo*%PfFZDdrsBcOLDG_OzM;M;s8JhVMnS<#zTS<4+ z$v-892?p^JD7hl?If1d6pZGbKrpTZD`Ja`ClLV@DMmmqcd7uh93`{zlO-e4&0eZ-( zehR>kFF2uxnWb9F7FxQ8c$lHtSzwPzl2eI|qDeh!HliBljNR#xaaV6`*M!8!qT&UG zAeMJSCq8(P386%zERk_H3OicWiI~+ZJ$k4-s)5D^q(T3Bld4#xM+&LGnWVzmq)vK| z3<^dc_Mj+r7FGJGCS|3d+NC70KBT&sa0R9b_GxHii)Gq>*#@FZfOkx^6~l;Tv{#}q z6^%sz2p>9~bXuMoNmwn~nV|GVdP1U~pkiF$2%kU+sK5{{fh6rXpL|A&F`2E}il`R& zqwJ^-c{!v+8mZ)3t_5nK;D?|~8Vr`ou29;dq9g*o=&1+*s$0N*pDL>Q8Uo0Os)pH^ zc%`eBR5+6*X;L{JzzVFB8K*D$nIU7EUX-llvkA<)2nqNZeyR{w zgGV|_sP$PZ+nT7t^jY{JjyW2x<0`2jB(vyRsqO#DuEX%IGN-AX;wI=WlV{$>-R$P7jATwVd^7LxXPIryLt#~iQ`FgZ5Ej!N}l7ngI`-&9gB7xn_?g9 z1(XnWsbCQBDX1muvYe%|*ebW)%BYPhq~f}p<~p-`TeIrAt~ZObI-5@YQ?_Z~v-v6k zLQAxXOSDG|Nu_$B{)&|**F6DSjd_)(kl1FtI(Ha3k!<)qvtk9j^jMF73%G%ckFRS6i0imt;DU^+ z0Y&?d1o^ZBsV_>o79(JT%{j2tyQ&%rmD>N9jlT+6xtg_ZO0Y@wX832mAz6gmhPs@x zx*dDAn4q@F5rLBM3DSC2uKAi<7`JqbyLG#}x7oYC8?(ZDt_eK6#A~xS%cRKLv%qL3 z6~MTQTL2i`xIn9q)T^}0*{_u}HmAz3jp?;*%DLfFzA8*uyxO^SN>l60zNvSfR9J6@ zOF3zK8p#`h%t{KIPztQcEx2i|V`Pqvcf>54bhb&rylcRDtG5fRz)1R+#cR9|?5+%Y zTdez_7renoSFyd&JH{RW!jiFzG{}0$^SEXla#Y4i(A5Jw@(bk zQ9Q*B?7)F5xLGWR$ClZ=v}ioWL5l>*xv!?$k8B)_-TSoY>95-)bCdx-tfI^-B%P;=dK5yR<22wz-MM!o*F? z$-n!_t_aGae5qIL3oS9Ywu)y3Gqh=uxap?`k-81x^^xc$P4X^qHAJ*Y@S-6%>M_!bM~dsoXGSm&G+lbjO?tWV!)g@Ax1oGn0!#& zPz!vTzdu}~vIx#ZQ=i`ov!4HK&Qd(7p`6a^?9MtIbSfavyZFi+0My!xn6S*x13d-# zEYS6eNoO38>%s;gXBS7=oRpMP1Q@0$Y>|wpwWfQ_-wDVl=9$f$SYwGvhwRbOyvSj} z(*1k4Z+p8YebQ%SD%ze-0;9BDgl1OF$D!65ZMO5f$RS-{$TmmKm%F`Ifhm*Hr4V*cmbTO# z34QI?EbXy;jotbitwsOwJK#9jogAQsjn0X!*rUwYRFZ`Gn{JXl*+QKF(fhnA#oCoU z+MvzU70^QYtpN0DucS(}ZH&sOz1sNg+G-lxCpy+KcY9SCP-9lUBl*=djE!l%)@M)% z!mZtcjoc#*GbKI1^f@2b@Cw|G$c6RMuv^_H{=+QptoVD~kSuxQUDKevbvSygY%e3U3s7tYqhjkX)^*Fpj#9v+UW!rXK%j?nGV_q3_~)7C0pxO)xb zE$*=z9t+n%(>DKp<2bG3>8j3F%+8DL<2jI-oSNAHKI8*!=}&Fty4;UTj_FbkQ$5%c^#@Q%kY+bhUMQzLJI8a~k2{TeTG~hviqlKAapI9o!vV+<$S+ge}Pv zb;-9&lf;4}CVt|;9ejYU;q4uE);x!P4$qyC?bANv;$7qAt>}!dq>s+ykS^)jyS1jC zrLyb+Kpo`LtLgO2838@2RxPjjT;*0O@0czEQc&vc{;810E-Y+wM@8#cdztO~zP9en zwZ8D_`H>4KDAv8*(+upy-RAHd=LcM^b#CX%zB+l%$lRUGVo~kV9_=W<;=B&T(XF=8 zip||#&gB2yw}|bx;y&)=4(UB^LhoC9Vv+9mOw?TJ!JB^Rp#JVhf7 zPYKxd;@kb_ZGQ=Y`u235P;y_u{T|)jsIc-c&z6Xu z_~8G(_>J%Q=00gf&E!u1W`sfhPHy@^P8LaTs*7QvqK(v^pUQM)2Sa`O|18m1Ut$Rl z5JPs<7>GsAm>)@q6eLKa;R}XNB0dZvgvY`yH#B05Vo>45k5e*s3@LCTNRvnuvN@QC zkexP!FsbokkP=HZlq4<6v?*0jK!-B9M6*dyot&61aTC{++*7FK%uS_Q_1sl-S+%C) z+I1Z}u2ax@t`654+-umeWry}w+xGu$ zrnzM5I8uL4yYp@H=FX(4YeQ_7Gd>$KRhp0tEiMPXPf56v=`D39L^+3G{;?LHi=S zZwdq#e9yfTOpt-33p#{wf(<&dP{IgFu!DmS?K`2Pmd+cAu$N#&XhuF>OldzPj*5uJ z7E7||B#(f6XhxJ&lBm2QlSIYDGY_d-x2?`B7T@wE)Mx6%z zjJ7hvlyp+oDs}DBFL8rZ)80DW1~}tF6&1PVN|lZ}RI^hRyzg2KZ@eX4RL`L!F=P)z z3lZXPS6(ShkwFba{8d2*hiwoG4@u0mjSNGO&shvA{OCbyM_dub6xpkh$31&=@!K(C z7KAZH>NhPspmq>x&eUe>21)FzXJ$Z^L+?l@Mcd#uT!Lld6c=FOxFcTg# zQg!HjGtD*GjQC+V<#hOBEALFS+aRC()38fGA(T+SHoj6|M>ZST(d)ism{Npe#&lDf zYxeYJ;XoZVIi8#I8Jg!#O?6dQi|%ebc{)ZfJrrnV`a<~%TyY^BdF}u2Kw~>}w?bus z9fLk&sqR`r8kn^f>Zzlqz&)~01lB@r_Z@c&Z#foMZg5AUXm2QG#GA<@-xabTmgrR% zUwKI)S6`6jPMM~F3mz29;bewc%$F}#bMrRIl9=L(E!I3vJ2f^vT`5r4Y$r#JA`0=p zV?tTwpIVnmvR^7+c=yXyvpiEyZN_={oqIOk)S!hP+Gy{Z_YUcjWKH{N4&qb$SgNbG z8X_>j4lqErmyLG(Y`@N0?6D1GBiXTuSpS0&oOMC%6y!KzK?viHbt8rDwmWZd#fWdT z8ere}bUMQ&5O9YRT)iA<9znP+Pge2^ce0gTCUO3vq3n7zJKBxx5D~;7DPA$1O=S**9}FJwi1#U-<*azeLtgTh z$2=&$N<0rt-Td-XJ@_a~R}9hKuFNpL@M%D59>5yc*rzq~RZEO&MBj+Kw#M|eaX_}( zf&J*Wptx<1aC7q;0K>(s1P-o&bz>m|?`22@u8Uu+YaC8=;z-Fg(uS$%h7o=D!4RVG zb0jRG2~UVZ72@fIu&c@KR&h#_6|#n_t6|Dq>9{PRLY4!YT`XFruptt$h`T(Z5|_xt zCMxfVQ8XI#6sI>9aF2S{Q{&aT*dP3e!FvF5<9o{ZtLp!mO+#R-z!+)LtoFSxeG}?t z9PNm?4 z_@N${ZmJ}DfjG^1#&XtY1bOg4*E-~^JS^#u+lo~m-w97XE^u9fG+gZ7S~$9DaO+b{vVtCSwf;31n(bK9r5`bY}qDK+^BZQ@nrM>sPg6gQnLQkf@QH(2*a?~NC(mEW--Vj!_Kp0 zZk?~8x+D$Lo>q7>z3FOM>)K#CQMR*n9(Zc&Kr3>{y6&=`GhG}ls-n%B2w@v=t6Imc zW{q0cz29`*hea8e(5(yF&V33l-c}_x?)@HKCy7 z9%)kemXw3|&98nL+g|`XHo$5=M<#T@T+|V^r7L8zya?l`$DIt!d~kB?IC|h>lsUtK z-mq#r{NbGPRKz3xX)jI;-l3`!2;>Xr7@tKzVOjcFQO!tL%Qv4Gr|$qZ-t8JTeQN&{ zw?ud_GE{Ydha06>($L(|cK!ThT<5E^0IhR*qfBME+BI-@g{2tI5Z@&a6evm=tY8nt z*;3q$(3dnVM6K=LG-KPbZr<{Hako;;98<~-7HJ=}#8L_OIZEbMluBE%i(5`xzo_M~ zp%D$@5HFh1C4L^PHWq2YLG+^Fg7ba0D{$2_~(|6tLhX#Atdm7t)o81?WmZ#de z_;w#x>un4&yurJ<+xVTjZG;~qRr?OyhG;YJ@M#bB!H;bXTHT#PyUjCV_hdQ-mCTE4GKH0Kk$=#xI`+oC+uI2$84 z9qT@9QVF|TI2r@Oq@pIjvzF^yxb{;nRui~YOFDaEybXaoJ+i+exwZZi!OQ!9RV=gc^Ln)4MjWK#mHuz}Azf)VPj!1GmhGG`cgxRSH1? zOp4JPD4XCBFgrR$tUCT{K}C7NtSdAcoI@MDL44ap9o)gP11j3U|9sR`_R z8(-PJH=;r(e4nO?Mf@?pCX^Nex;PNXxV*C#vg$aqI!R5Q-`3gmc&(b^Ij5X5uTKSICkvBVk7L=S7ePV_|Q=|SAOEhbPnxHFL*fEwwF zJ5l>PS5(DP{H_1ws<9(`!nunNkXV~glgRS<#X`)g!HB%>EiiBxQsO2id z8cL=~tVa#G0^rL>uiVFKbT@bdnt=34vt&NBBuLnLJ`yV?w9CBtFedp7@+!bV0x!5rE^q$JC-Jj=9fPPHsZvWw1DLCAExqFXT|B+EtZY%%Us1MUPX zkt{;*OvS^~v=I0)k@HA~5KqJmE`?MYlPgAIJPF*qr}R=U%OpeTdC4fVMv;(-#o0+7 z`iVtw!AWYUp9G=NkwKy)A#SLS)s!Im+naPsPnrBPma@HjdWrNaPCuJUcbvMeydV)J z(UgKg?GP2_6iDa9!L?*d>GUb;G%B{+&Ky25+CuTNs>cJi^IZ_oKRt8r*pwf+jPdvjK{jM$pBReo3pGPA}pFp&ol(Q5Hp1PRv1r zoX)qTJsz_H!fdth6Ga_WQYtkU`GH8d@hS#Ezr;M!ibT95Oegq(AKf&?+!8MMln5LJ zwvS*u+qy#gj89eSwqvX{ms6`m;=HIlw)cXtK3TvqT{Z+Y)1Qlz<7m)!s12mtK*?;9 zC<(v{jYVrsrdaHgK+RAAMYvJC)ax3)5&g>Dlp#|R)Icp!usqioT+T$*#6@LP=xfyN zm{Ba0OAwhAO9fXk90-Nn9va2VAdIfFvIt%r)%q|(u0oqBTt8bONzG&@$5Tnlo5}xv ztx{XP$3jxcX5_yT&5S$?%|T5oM2t2K1InP36L0{H28{)1t)f@`qkXDYI32m=YRBGF zL__?pyOdI&bpbFSrW}58C=nK9mwZ|(H*qc+nQ8c%uas2SclBi zX97vXd=Q6Kqw!p^h!iV_qob`{M~r>bjBPTv($LAlUDZ4T1gx&qNiM6#k-CzY zAWf8wC~>F-(ooYj+X)7$&tgrw({>9{ z+=EB1rCHdCC4uUa6XjNzV3d*dRxc>jq@6JNrN_~0TGB0D)0NuPU7pols>Tz)8#@rD zqBXoaFaFFft^rK?h_u%w%-kj0RJ@k9yVP5S)bm;=j{Q>OZ9Opbz0=nm;>N4f5DiX9vS0HBUHo-l zq$|PvJ2)MGam``h-}Ejotmz$M`*7WrW)#snzxMmgcn+IBZAFg~LyHVHl3#u4s;- z^@*`m4WJ>=5bmKU9z!;eT*`7{b0uUVuF|NyT%UNyWQ?#$PUJZBV^?-%%ROak^s@I| z%`~NAEB@cp&EjI9T7;XU^&8{3ZQ$AcOE*^IQI-L(6|Qt-%4Oc&x2xkK6XPcxpqO1o zK3?1Z;Zs0vRT8dSn!6z{4c7lOQEmo=Mz-EZ?nY~P)>BdratztSf;J?cyjecyf8}JH zg)eGVUr!#ypjBd{sAptU@2w$Nbw*`MWH${l986)&a5wJ9}2a7GMbJrY*d zy&xJiF=^-0$+CEYIdL{KwzxD_Ps&!0#}1Fl_k%2K8l>-@;L!UR>3K4z#sX%&cID*R<4 z6Ko4W0qUCOWJV^J?%`DZXPD*>0v3`?UTVVxout7scWi4dCnWWraZ4A06wY?0 z1f0|+7p>-I0}2@+{;0;K?WU~UM}*u!^vIX4vd!!5gfimx9aqWoHd980&e#RKP42W_ z?kjfg=Z;S4ZURu_T^mafpcU94v+L{gZW91oIW_5x8}DU4MU)1k95Vs?F-#oO>Bvq` z{l#-yvAc1( zR;q`rsv`8uf#uW;mm>lBZY5NY3D9U$m#)WVawXr$5a%_Uomh;uzbW6pAeQny;&NTj zTm7y+;-ypCE0;ZyItxehoXE!qYh^hUqNDtYSN}iEt}^}nmM9)QTF=7H%5M5TMhP?6 zz?|xB$LoAfVu7|Wr^8RG_O{(?bYyq*b;q4amvKt3bW7L4i^f*Mb|Z@9^d+n+WHIJX z<;(LaX~7nO0`WVE!^>3%&%;LYBDIgiPR##cG_Pw{NBZ{j#s2b*&Gjwk^`&VpU$^oT ze%v|-nfmhF=q1i&Hx1eVnnIcE{G3&32VXO1?L`-Kd>`xZmDQS;SrApcZy!wcjRJs6 z7pE3zn&4s*~l^j+845SMd$ z`Oph0aB^2Rap8rBHN(EqvmBwLV#w5tZ1>N%sezdI?A-E=Bi=Tu5Ih)?I==iN|!Qi zTJ$N@sMe+utxEN3wV_(KI_v6H8rZMUn29B8HW}Kq#nu-4=N8^V9w}bPje=xF+$mBL zasWYs0|NwsD|oOwVX)u^8WR5`R&aqJ;>eBV5^h`>@rf}TBwOg4Aus^}0Y-=B>_QD+ zhIBXgZL{M7h1i8xFKn|Iv*)>Tkm&90WC+16cJ9hBNFz9L42uC3xO$3q=0+}#EYRdiPT!wL> zh*uLJ4ro&(5BZmaiMZ~bqt$qwe?nO7wQP3UMYG2!C)D1;31xtVc;hZ zSfKD(pAx`^TNH*9{emX`18iXo#Xk~FKv>`_yD3<49vlXZwh1LzH;)Quk z$6b2TF-a)E|T{ z+;;r-tFYr&mSk+_2D5Z5eS3cxuZK@kY2}p=e|d4nyKnqE@WFpPyqO`dNiyJ+ zTdZtnq250CbIo4>ewd|+1v*y)MPG`VrI$e(snB?4Zz%XmCvCJ7imLx93btiBxN1wb zy<_C~Zk_)%twlaJ9spy<2iV;&VQ;hCs%rJOd%4P4CkfL3ay6qu7zuU$X+xCo)~x=# zs|8-`Alf`rI84n?dLo3)2)%{47vx4lf0!UB)TNQv32H-%ixrYOWTbR;;u0J@kquo$ zxj6L=N<5)p4jV_J+Vup9G~CYyQ*uEiHf(e0s)fjSXO!Lv&tqLAUh#}4GUN%RWXg+G z)t-kw?pcO6j2YU_h{k}JU5|R2!p)x;#hq`(Mvj8m8uzVlX-NoV)4m6oL>g+4 zoVr%H_{Y2{MlgT_wB#i%R3e34OOu!LTiRTgq`K`STK!sL9yBPnA8MzSAgs$lQYcFl z#?t?UvsB>+1i~mK7r2=8Xd_+Np}ox2qHP(jg=47B>x}rUyiKqPjB8>N zpEyl~L9v%Kx*`@M#>H`Zv18?&20Fti#^aT&oo6&4<5)(uMNWnX2N+*th$bq^QBN>* zyp#7F^GpPVpnHJSrwSe8PwOGFkX&P=`Wp2VG2+~6aLQEknYMda2kX_?bz>NJH7GGgjhLOUjCOAb2RqjC0w z7sAE!R8w0HPwDr-d0{hd1bZABKBrYHs>_Mnl;Xm?Nu!4CA}d=wD_Y5U&b5MZon-%P zXBkZf4^E~rN4y!GLAAGodmc1svvC<;^T^kKLUf_Cx=lZIs@Ga7c8v{1?4;y3QS)U+ zTNt&h{+5KsmBNi|nfz?q8aP8;VRAetJuT@rdbYkPkX3d8klSW@!9_&Vb_Ii}^=$e< z+8nO2C5-A*af-{{4i1MwjR;8?NG>2I#Hc3Csup-tN|ylliGn-PMuKYuF{lJ66$x04 zPLe^$-Bu)AWgU67SzGgZ6Puri#a7ZPU*EB{zHJ@jTj3gv{qFajr^T*w@Y+Uumak|! z>k~c=d%evBR-lJ%Xo4X-xOM5aeLM?+WUCiMIaPGG?5YE155%65h6l6-G%f#h^;TL> z&Zo1ReQ_w&DbcQ|$=uw?1wrWA(Ht;o7pN(zTlp^VS2f^t-HWc={h}-LRjWDedtbNaH?I8U zuYZ3<8I_sHXAN~DKF>#3%lLJl*#hW*7YA8CM#?Xg<(h){CP+|Kbkc#;Er)l^E+B9D`{Q3s4b+#W~j7(ic4;f@y|u(T((? z2H$rgUzB0$)j)n8-j}AA8{$t(kboX{wY#+H1m~{U#iX_}t0Ot;PGZp3@sct}kDMMn z{F;!D_3_PrtD#^E8#0gKt&r=78fsZ|+1IqhO`wg-lWcg}Ob4Bozu7O$9=FyTIrouR zhic6p$4TG~uaw-|<)2`Y-Ro{Q+T%U%JNu&T^nS}S&31$iT-c3&%ISdv`p;bh>Ro~+ zIHHO8@WD$Id(@^}r57c5R9SAf6y7n%PabN?r(DG&Mm0;}rf2^Vf0chNZaL=0AlqBN zb|%LhJ(_u)^IjWyemnw`YzPij0PR7^VbRQU7|H~yU5=J?#Pbzv!@;2YL~a$v!7?FCazpWeWUvZ0~~(;yiUd*VSxFb=!X}yGuTTs zJr_@SgX7WtmG$0V<)6}Iujtc|SGA0d4Y0V))`1-5mD;R~hmB>@a@da%iI>aa+Kh2j ze}vEK;ankIng+t&aGlVC85yi4)$J`HN(A3PaG%_jn}GDvvkX-3G29dkAN0*2MU2*s zan9XfRRHQx$9Wg=jh(u^o7u%#^`(;$suMe5Uz@RAFEsz*_8D4m(A_&Fl36BX6v5(>%M2EFj8;W4#!C}T_+_6281QwtHV%|on z+-HH5X$6ODO_vjS!6}In=@A#}Wnf_?A|WvsBTm~5kyMGXVE)7p0de7;RSvD;7JJ!9 zhOmVGI3fx%+z$pKOcmmo?adCh1hMRd^Zj5r2_f|rp)6V-ektJ+W}o)K9iQl)8WCDO zB^KcEOhlyyke!C2xt|!qk{->Ue8dsX)Dxmj8l+uZdRSJf{a+>W9yhwsDyG=yDIWsP zO6Pe}%s2w*)Ebi5R?L|mC0XDF+T0?>qs~?1EGhq;vM~|TC7F`_h(UCeKT4Z8f+G}t z2qaQU*Bss!R$Go>2@NI-HR+g%P?dR*mbs}H3{}Z@IZT^qQby7N zE?+jXN;#S%)GXBw970248&4RbOxy)MZsk07;08Vo({$jj*jRSf1+KV8RU8jYvqm4uZ~(^dbYP@Yu$N!b12k^T|h8)DQil^gBx8-wAV+Xwj@24Ga;0$wQO>R8PBoC`SRMZ{6GNN_kbu>H zkQXGn-W~>5b2QVv7!||x!Hx2({>E-1*av4{Zm01*~Vb)@Nx~F{3XMOGhmvZTS z)?8hQK!CN3Y+mPzHqw9+C^R|>V;Lw+I@VW$<T_;r9WJTzS(6g{<{g0ni1nV#Jt`p@#szjIj5ZC9)}xH-+v4zP?KRaB`P}(ECXmA3 zz8FmN1?E2DC3_hQRvN17{39iv;tCQUVZNuW!Y7ty=`GS{mu_ieo=jwpqQ+Tc${ieZ z(vh2H#+m}nvYDktu+m%F>oIO&mU-PM1nhZYL=dFG>RiGbAY0UB zQ?VrJNZk&2uIeg8pPAgM#p0@#`f9J<0>^%7N=l`NNTsF~tFdNWvI3}2E>yEV%Ck12 zpe&*lu3`GvU;QW$RQ6wLZR@qh*v_s7tc0sGZYrUcth0s9k_Aa#OjSEtU2(Q62JV!d zHm!qZ*4}N`!k*=`5ti(Aqr@F;dG^pN1{DSQN^b&zRF)DH9PQDGCl9u&DiTwahHaOL z6O~pe#%8R>a;)BVtYe}``F&z^`76ra2wvb@C?hOZZ1f=CAsLrcAI&F-~Za&7# z2y9}ka#D)HWvGJerX4}l$?aLn>*R!D4j8e+@4J)zIrf!Tt&Pd?{Fvdd#XleLjOB(JF1YPt*4-Cj8%(AP@G8iLS zu2|OW#%)_2hM*hj$b_~Q0?)v|oLI=3;puLQq9TDhDzB2Yu8;bKQLP` zpfMr=uIJ;O2(HwITQU#uqUU;$M810BlI?4ck?9*E3k{Eo(c0+MM&vgh43utUD|BQJ z`*6E$ubt(h5D#%J6fqQX>d2z*fguzeB`y^w!12DU`&OYA8=oXrfiSzl?jdiSC}Olm zT%+_H;RdUaqOs=+t+#5c9BPdR>7_rerOk5drrPnax-KdG5H>qp$^J1Qho}K|ACT7W zR!UFfB$qDb3k{wB<8x(mOLY!rZQ~Ga?LuN_wa&mj<8U~>Z6uu6B+Kyhp4oa@ktw6H zLjy5lCgCdc>c+Y0FqZ)_r}U7l^kHGJ;3QrK zDl5!9(&d(~P17-q9q=2cu{HM(H_J-JWyc*q^D{?5R;ud(HlSZDs5zJOIg{28^w#^l z-a0F2?{){j7BxMSQVp9|9=mWq6W8@@FbaC@CVyNv5%g3obPZA`)@5WWaBP`8^eJB{ zF1E7wUbK1~@z!$mtbj%?+frbkZ(ikcVx(&rsI(p=tYTkiPt(y@E}ief%beb*o)YU) zLUrX8&@{jQngfq4RA=x%?lesiLWFEvLXVtiEwygk0E$twIhPG(zO@fRU&Yq-5T>#!GhszncV71`CKIqmt8w98 z;Y+6Mj-NizYao;JGUsz5Z#RYdwHw!FCm)bl%{)BY1v4 zbvi4!$^CZ=z|A=;xK`C8GE=KHLuhxExQ&4LWRt2wBA!)(Eube~Cn`}A(I{Gnr+YsT zawE*aKHn=gH1G<@5)gm-#wTc|o$vy~4m(Awa5#@qv2HH3%xcc3VGg3Z(6^EfxP2Co4+yV^Cc zB~a`6C2OkNc(NYD^(3P-3s~}=0i2Lc05ID?yK6hly!v}b{6;tE ztP8GdLbEIr_!{f_&@!rKEA3_@%$37-P)aaY`zXpEBE>6vgomh_lR+9?(o%+{$2xZxQ6v`8Kihin!ZGPWImbsP zfJ>g{-p}9San}ZC*nx7|B0EnyJErx$0be-0dNAVxdqy6#$1C~^dV98V5?inT_!(Cl z=B3JVORw3E{@1E*uWyqnt{J??bz5m~uGTx%=dIPx{k<_w$|}IWKkIkjKBa{E&DHep z3feC#d8@NJ3@rJRyFA=SdBqnne%tzhV?Al(^xpHm&r=|M|%|q2AUi*trCss>jyLkJ_YgzI!wczP^JRg_|e`q_ZZCL^YB~qU@9jYU#ASFR; z)Qs^GU5@t+jGil09cJmm{ zoWX|m^vM%wFQK=H7X5b=9t964m@-X*WX6XOJBBbp!2m&41zNSbAcD33Kv=F~wL%3u zcI?y&Xd9%RqJeGP4nZbNuwljuju;|*94Wy729*qU!+?4EgQ`OshvLKx7n;&vuDerB~QU38gpsMt5tJOe7JMfB1*16ls&n|;@qrx_x2rlMewMf zU2vG%C-hR|a*{IzRGY{a=1`xr5h0=NS?yaJXwX6RLinlCzc7?@o=p1Fuu+qCzZmXX z>>J`MtpHc|!8{YoD;~juyyfgL4+Xvo1kN`x^y3eP|D<|ww=Up7gQ4Q+^9~6#Wat3G z3kTbv1~n{1A)`JR>+J#D@ka{O8wJJM|!wA6&4=t}CIDpLU$ePOo?#^5@3Jl^Z z6R6 zKm)vu(?}Ub5VYow(~`NROjE16svyM!q7SoE^Hr?wYf!%VJoFBP)FgDszu}rPP6G_V zv~^fwjeCvP0loMyz~lOJ(1Ze^BQVfMBDwaD2@7@rYg=HLA$75*6Yol^$0K3CSb{X41%l4JH`>;U=Gql3^)B8p=wf!XmZ2 z16Q+8xmquPZ@yUFN^{Na)S7c*IY}m~2Xf26^G-MQq~HOiSg;QR@MaCSsrE7z4$%m8 z=B+}Yop{#JOEsO8hh#lXy5gdZHbRI_rCvHzf%ZnTAD`<7K6TurOvuD+|! z*tF3UHi)y)#!fPg%cdD_RZCP>!%^uYn&)+8WA=zQ@Um(+Zg1wmw(~*=mhouCjkfJ2 zw3rYOMHbO0p=@icgIX0aO0jbaG0%~p8#CFLR~bp~5#WIjc0J+OVUJzmhHYQ?c8ISm zSybm#I9yAsgCDo?Bo>FY*ep$+6Y=^^R;mig^_YJDdLJ;D3s0ASm(}JkW7Se^y^r2B z-vobtY-rBC)D`Lc%AJ(grTJ#R{Y~qS?tb>9M^txduP@5k3I030keFkC`vrNR1(G?VsNo2J^fz^?+s6OXD7@7X!b*3@aQPlLFpmpfPgC1IyB)(|U%# z@^K1u?~9Q61j)3f4YD+hp&Hh>#7O)J?U6wLAfQ+Bu&7F+%8`bAO>Q7%rnzNjfyevg zC=ZB7;+chRUlb4Hc7Q=K=ml~htlSHG2!+ClOM4L<;e*IULJRa#ID)g>=sHnMP{>F@ z7D47kgt$4-ndfd}P^P{p!YLOH#9uQ!-Msp>icR#(7c~**IGwmeazYWEQdDOYhjJwa z`q4ibxR`QsRnK~Mkx=%e-1hv!`Sbk@qVk{0@o8jxKUBL4)KzOFGH%T{19r7*0c4lhOe?@Qx6K z+K5P5L7!?-W8gCuCDA3zSu(SQhCmnpfP87eT*7OYO+_QTk{QILYEzc4gyrw}0!@&n zv|b*>p$%_1O+M5TnpWMWkFxpAYYN4jiAB#Emb8dX?1C#R00KCepac!VWGSqNy*Sh{4@975 zxx!h(?hLeUtD3f6s=vP`Ds(3G&xuPs($=~-lFu}%TJ4p?COpS|o5U$^BiB>4$w!sK z-B3^+=&WT57OEGr;R#d7dztxy&Zkw|IAkzwQ?K&Q~EIm~eVM>KuIJRL>8Q2|6I>=Zis(HD#3(C{;wa9oX`R_^WJAqT)>iFd z%XMn7B^j0*ukMVutvQF#M=rIRIIVGdEZ-Kn*`)+BcbyCEV8;_eF4cCGr7&0#rVrA^ zlTLR}Gp@Kn54~uvcE}-ORz`;V%+c(ix0SipBGOf1)zO76F6Zuczq{V=CL+DrZtuQE zl2d&aEWfpuaexc|*y0HoxMLHY@Csi??{asz$et_li4IUjrUh|o+c)B3Q~b0YcXe}H zJhhIRIaNg$F;l}UdGMZqx1ZeUgT5Dfof4ljf zTYs3b^**fTjH{ob4RD-K zxzY>hj*h4Qv`kUJDAcmd=4b;2#mo!Fs?*Xc+iGY+z>Nnf;=0gFt>R7Ifl~ z$3q}!SZuBYXRo>dak+rcaBQ#VoUMCIs4ARewqyyZEPy4)B4|)S3gapq(Udw&m2M4M4AN;Q3^YG7=rlQN1Sf+mH*sFb5>=swY}<7Cyl*F+nGb zf;3CBCR>4C2BHc_6AKCDEucpU0us^8#*Bu5|2PmW{|U4zsKI!LDI1f8{9pteQQ+ba zD&dI8`cNFb@X{z#E7S4cNKUj2kmF*k_)KhS6cF~t(%I}J50tK#WU4Mh01tdgY8p^- zJP-mt@TU$3FBy$kxKctYP^7vX0yO} z>r5_-Y+gmE1e1Z%kP0d+I3Xbghf~&8v`6D|J#}C*m(#VJ)A-KuogP&n@t_e77wjxwc|Qyhy_ zB#|^pU*M$LZ_}PL@j$JW+LAh-6g#1_#SFnLxhqg|b38>vOzjd5$h52fI*(Kr(LHJO z5P7afH%k=j&_NgiEm!c0%m+Vh<}PD%a{5tR2E{Nkl+GXlq_JhHd_@yFDyuV zZ|A@W%d)jVDic^&%V%;`SK+Q#^R4cXZb;#<No&V=Sf-2huW5He7d49=)<&T`5iKa#ZDsAKNwQx=kE~up=1tW}Otp z9yL*c^T@dF7@aV$EVK}0au^TR7*ldJ88%Z_(_y(X%Lwu*&2TpVBa8^Hl|?o7Z*c`< z4X!B@4nP%wN0ldHhpY6+@++y79Ramw|QDN4Uyw8a`roo zmRVDGZ_)CMOyCcU?&d^bT3=UB2M0No7L2_0J)`OoS<6U?gB;CZF)fl$?b6AxR(S)< zfIKlgSaw*!HWtm}bEvXd9h5DL7g2+d%IubHGs0kJGH%EBB@6a$!`EPIp*3BjbzSC{ z)-Wp5Fe~;haOd}L5g=noMuE&IEB2Loa@18>s5dc+d6uPsA5ZrnPm%i2I^(B-@eZnB z)>efzJ&5&TQTI{!ATsQbx(WeWi~tb6>k)slE<;%Lt|w^!&9gA@0)*bEJ)!YUvzC{H z%pQXmUoCQR8`3U;X?cmve;iOnbIDx?6F|_G$_58LlNftDE!6TAn^F@abA&U4BF)a1 ze6JW%tvG#ik&730ebJRv3KBOeH*qgN`xpjiP~(u*W&?LLn2vK7c|b3hJVuulk@QzX4kn}Q9pj}Q?Xfw2 z7KPD(EUz|}1a#KYwALgpFtxRW3KdV=av=$^Z$@^8Q~B4vC8|us)co_kdgNTtO4Tfo z%m%fWcbS=>_?v{Wt`34j*;a11xQIZ4nY-A0XCc7I1|H603;2D3bOf}p1)tIU@Q*GN_=2gkO79eiCpcJtpn_>ugC!V} z9oUf0YJOT=PLN({5m8$w^y7R`CY#X^0RdXfx3z`2FsF(SfhdP>Lq5M|K zA?1>c$2cz2xB{MS;L=zOC8JE**hV)Xo8xa^&zUcE^i|0yt3kwlnYw6)hcuS91osoE zLq+Jgs~(c&u@*>bi{C zz;sb!>QqeH&AZ`qK(_dk$boCce(H4FUF*s6T9Hr z#Ip0(nm>8IS*ebmixLz0$5C09Rd;ebSG?&Of-{*7qxDOHwp56=t2DWy>(lgLxGU)U znne%ETW~+=Ii}R_J%q`xZEKczaHLgt(j*wX8F@TRKn(4A{J^awiWOt*Q0A%KyOWcG4uZ(l0ZVnhW5$?EOyl-n@VzRmZ5?w5= z%r_SObwSC1X_bP)ScV5Vv1z-=SACEf%*`M0vw#D!nIp>UvQ<=OrSH^EqUW#STjy9E zqkH$1TeOz3`{?kAp>Mj?*4)Vdbqr4T?a~>Xy_c9?(^4a|$%0W3`ihw}IddW=oWd)7 z&<9q~)BVC7c5iST)Db;K$6~5Uyu`yUA_}z(nQtUBp z7H*B>BzMuc(bk$Go6NdP$k2QEAvZ5CV?E`nd$IlFRaIcjwdSd$Jmd9yU2Q1@kKNdh z9pp)xzezgwBJ&kzw@;JI-A#Vlf1u61+w*=|PP1LpV!9RIE2gCwZXrBi)9ugA{oK7t z3DG^!)7?{JIk#^+enW=vLO{gf4?b6wSYqy#>_Si6sAv1aqoZc@LR|+4-s%OVUcL7Js*&mkbA*rlz~Jd~Aj6j5kr2j5h;5+d-SNpVZf#8dAB8=m zUESh2I^q`}R3INwk(=36-t)Uuk#_efz|&CQ{LF!wLA#sR$YYsW*j}-Ym|-YHE9b(? zo#~rC>T_TB33lq;9o~Om-o@}#n_KI*+yVd>=Gs1DmER02|J|1x(wV=MG*Ir1yz(huyVVcyD_hiW7cmYW?Uw-kKi9r7fAJ3x6i*(@?_=`&9>Y+2j`(|j z!r+|&Vi6%DBj6MQgeOlOgGzA7P$44Xh71-cR-}Lt0|yTsEMD|zF+>s~Bp+(bxFkr& z3onM87{S5{!#;NZ78<%KP))-tQM~m02~-ZvF<+)&IT~~eA4-{owsAVt37bPy6&`ei z73-(1TqQA0b?cYdv1EIhJzERmt6gl{x_t{bZrHhWYuTNJ_io-;eAo8<>o>6AR{sja zV}WAg0|*lmEG7W?vE&1h9b2XxATxr>1v&?iP$L9nk%=b?(5xZiWz3sbyKX)CH0#NY zYtyEUf(L8VwsC*_t$+c73@$hb_gz85aScc=GT4{`L~IG+Dg?G9!e-|vL)(VMjTe^SdV^US^#klPN3s-)n#W{2o-3z9e0$f+M%H~B8U{Jmm=6* zXfKubs;E%{QG^tNOi1Ccy!J3ef%$Ft;HWz`2nSF-)n|mS84@%rtDIIVYmGBdTkDBG z>OkNB3KVYO!j-5ft8K2S<=7*MA9A7Jg;c(=haZm|iLaQKOkzpDSl*j2CRL3|@V;33 zf&@{IEGmEsgP@)b|a0GMHLd*P7jyJ2fYIGwh2er`S z?tpd7;U1e%*b2@>FTz<)*zB+gNtA^MPlTlRv z#gwBWvd43Y9y^`2gN*y_vy?mnMmzV(a?OA)I^4@C|IVAE);5-+4TST&Y1XL@;qpF@)~qCWiRu&$ddEdnc?x*ZB-GsMHV8MU zBxlD1qoqh-J|>!Ro!#o11EJ$airi{_sd~)+&dEmh<&mD_i=?a8rN@?d?+EGx#2*0~ zh98wrpD1!1DEYHcS~+r}9(3sZV5q-VVoqVTB%2ARB%nX+kAw64o3rTuxU?IovMB3n zotK#OLXZMQO!yDT&6O8Xw;HDic#HMXYo#8PU=CE zG8N%s2i(vINZ9KUWvFUXuNSCvs`HFvh2Qs5s!FDX6|8YI+&$NtPKtg_Iupzz9u5jP zv*xV_`)nS(2&urfdZa%f1(yUJR8hGZ)+Hj;s3=_tm5iEUMYUXPaVn}MBIHw7Dm~;i zDR2TIy%Z@iHBe7%$WflSu7o0dT~}U9Q!%B2hFdY_P>YINqw1EKcu^`hnaVP1PW7`* z-DYvg!$m##>qu^R=I^k`nT1kytj9HH-f*QxT@7fvmt-V88JO4ql`SW%aP8-U(lyr# z3hu7xdnb9V=PvVDsDH%TTVOlLOOdLsu!=n_W|bC8$mUSMk9Fn34E#v|D{G#9eG0fh zC0Wt7va|@J7-}6$iTp-oNCX~X<%0RkpdzFcMBS~4k;>a5`ZhLAbSggkmO&FdH&WEq zCcOa0RpGfAs);^O7RCw(4u1_F7Z)M?C997xWzwSXAG)P2658=H`C7JXN`-tsT*%q3_1q# zbS<*ZN|$rTVZ?5E87gL=Ee}!l?Pm%vI+DqTxt*F^2-3E>WWgE|fkKEi`}SHd{hoQ* z%pM!v)NEw4S~QWAq_dpoeC^1JG`3}S_Ltx6m259%+=3poPQL=DPxxdMSV{4(NSxwF zLpsta##1yW^;Mkp&wwSkG_tZ+R&p)|Qy2jgz?!ShRWsSgswQ5(*$T5UF1(SmMx*#3 z4srArGJ#NTbK0UU>#X4yq&13T)d;I>W~+SVn7fb#!z*pbaW{k>6}Qb(ZWWy3e1w}M z@K5jz^r9HWDHf*fCPpSYbj#%sXc*!V=bhWAuf^W~z^yuq^&L)5_q*SpX0>^jdxLXXb!{fFdK{3dkZWzmi$e+n89&L~Vf|k}f*g*51*NgMEOP#gn&mqmJmKw3 zZ2!tt=HI4y&2MgPIQtT{fz}~{9p6*S3G;_QMN^u*;Pj_MJ>I0hlQVat8GNh$HPqBJ z0x#WTQ>Xno2IrRhKef&v2y?cX5AJmj_Qa2Tcddn}zvQ|yl==SRuqLvu za<9JN4_|o03j+I=N4$mP8uJ0htFaN*_Tzp2=4<1WbVOopx4RU~&AX-Zo@Y9|ML+t| z>!0eYPrd)GUK#!-fCA`Tvgcx=wpF+XfabLSfV}rxx))ck_Cyu8V}1j0nL>9?mVAKs zUXU~(eu5+Eg{=^<2aUetl*CcWa0(5Os?bXMNh2hb{<=eE5sOXnBA2A^=2- zA}52vH&73>ST!h9nCDR*hB1CYe{SLkT0jaXc8QjlRGD~+UwCPQ;1J=YRpe-j45*0~ z0B-ivAnKTknz)JKNQY`QgD%Hr;8%j#(i;#|h8u`paQKhB*ho`WeD`&Scqoj-xRBk) zg4NN6V}>gPxsMvxNtgAEe3lfw6mExrV(C@|+h|0d=Zz()g#1U3sApB5s0do6RS@xo z^!R_Eb%!&6YVsJ5GKrJb#flLTjng-CK?#&XiAfpxPmDo*WJGHM*MVEsWxI%rtmsX} zQGsLD0}0uXS;>$Msbog!l_>Q8a`{MQ6}gP}#f&k5gh3RNC)Nr3vS^f8k|?Q@s#XG6 z<&t-aH&?flO*E5AxtD)AmpSQVA+b4VHk2B6wL>m__-OqbZuC$$V}lmSb6#Mv#_mk&T~VlDm*$-^2>KV4H%`jg@$k z;0Ty!XP0-GRWXo`;n;r;7n8(UoX6RlzjqRF^MZ>Bos8Ldh&i2Zhm>`sfy^0l69|x$ zxs;YkT*tSDSjm~?X`W&uN$EL=>S>zo32pD`p3VrCVfihgR~N6jTaqw^umhJ;M;N>r zn97rBEa{G_h@1y18wra4pn@5g=Tw*l7M&4Fooa?QmV`E2^Z=UDb-UJN0N0AynS3N- zJa6}*1Lp(ed7denm1)K>Ey|vy38OChqV3aN@oA$NIY9QQmPe;sO#x2)NpH0iq*G@Y zPuQTP!jhj@g~@q}MjD_B`lL}Rj_6Zhsa2g43Z0$;1&Udr7|Jf#7 zk&F7Dh2f-5_^329j=%|ukrAn3$fV1esi-1~omzG=l5JRerH84dqk5s+@n9P&rj-eT zQTbLb29^B;r*2CBs|?9sbXuqDIitIpr+Uh#eOit{N++{P35d!Uvw1-G*P4A%TmH$Y z@+VAkd8wQFGj&O+FS!~}+N8PHf75!T%DJFgG@zl%r4?$bU=~TDDgxy6l-}vCtonD6 zIh7QlhGTeqvpS3lsjs#Anb~)%yb7?tDzLyxuyy4S5OSz?!gMhZ35r@`h5DyH)T|2Y zu$%|27rS|@Ag$f%t<^fI1j+<@Nv)dNu~JI1;h28!cRHb2uA^G2qKZZIAv8Scp%F!q zvHFVrWTI4=O;NcR)X}f}8noOOu((>ZM?0{5+H@GZ3WX}H54#q~8nJUSu@w8PLxf^X zd$Ai^vi;}(S%lLo*krPM8G8(RvML#_)oM%R3XLrrOJXFd5_g^ND4x(qnXCFLu=t%l zW0Td>oiPivLMym$YGy`jxL}L4iQB8tscM}D(0(l@?oJ+ZjxOtMRxzdZhmzxTn`?<0Knc3RCV;jDr zOTOZ2bHf|A5$d`yOQzygw@xNpWI3jK8>ao(w`OXlyPL1)_F=dbbjmg_&*o3_>21*x zy#8wcjKxd5#=E%6Yp@Rt!4$enTX8y9A(ETBs2OIxRcp1KC$ZU!wVTJi-Rm|6PsnUI~H5jlp zxrhdII0T*8b~-XQ!*KeqhkM0X?5kP~%6_Ut6#M~VV8Lg+#IpstbpV8^oWxxr2&mlu z#fp|tSh1N6u0ZD!z$crW%{l_jHb2wz9!np zWWvS;0|~B>uonypZJEZC?8HE+gMFCEE||cbjLx3C#WJU6%Qiy_(jV=H#*j=?L(;#E z_=AzC!LXysu1w3X{Kne)s8`UE2;o%%&C6yxj|%O=EzFMR=ex#Su2dRJU0H|BT(7Y9 z%o$C)wsu8?EUPsnQmD|D0b|m4$!=MNWBiiRO*~1Ecfi8S$?2@l4jjr4481!iwH70} z`Am5+Jrp#NdHky+K26U?jU@qHvNlzU2Tj5WXUD+Y&@9}5Q>?mvjJ_}1wjVJ6J${Lk zcw4hThQHy7$RZGA5-y=)=W#B~kSZ#vU7-N2vB*L|J5)vR~5 z_k%n1EH)jZGSZPgh15T7I!fKK+mZ(2bc<-K$EKUm2yNL8y`>Oc%&Uu~$o#q<>s=t- zp&d=T`i8sp#lvr>VI{2zp>PT*?b>V|3R#8L1+CU3ZP%)`i2J+SEqFhAUDFI~*oIwx zmQ!Wg$4Q;UW&^CJq{vf~_b`T;XO8{YUVYh8J=F|N)mFFJoNc9Boxbh0)!Kobi@bY^ z4ALB}eDZ2DMQp^mP22K4-`xz$udN89Ajt?E)I)98zfFw7J=}X;+yqB0s9%a4WKMUTMC(-H)$kBNn=Y`(&YuX## z;@ZQrX_&L)^>_j!+xHF0{A@5q4d6oU-@WbIo_Ws4E8xUE;D61c4hFaD%;1Yd;9hVO zP#)nDuCG5?97?^*m0eC+eu}zEJU5VQ<+cW6z6E(UbV8B>oSNA{<~bw2vRG}dmszx3 z-NVxCiu8NuJKL@^zQ`!h+WPIyV?59O{K5NsTIBZQJlEg)E7L`OZ}g`>toIbH=yR-ExP;W z<|Y2wCqA8Gi-8yot};H&bzVq*%ieab0%r{i1{2AuEXG3BY=7owJ+6mC?&yzR;1L|a ziv?cI({T`7P>qQmF=b^bYwi)2m>luOv}XmQPT97e(9D78Uw~Y+H}CTv=J`%qwGQur z89ABk*#VzH$^7M94eTIJzu0YQ5gEHrhH2X8&x8)bc0ui%Uh$o|$=8nUH;v@na%&2e zWc4-hT0p@4rC{X(Qg)`~GT|TQZqXx5DD^_eQ?-Cx&Xih`>gN0EJD+83*K^0R;z3X9 zSXa&gKi(LC=}IhD^{Z9I);u+U}qF?#5~AT7D{% z@^h^uaHt;ZlMWg*T30B`iWzN_=%0V(s*sO!U2AG^yZoD%tgTz2vdA6~r8 z^+nD4%PNWdJ;9nT_GKR-XrJ~a&-ir4_K53H$gMf#cA8EOyzRI3X5;&0Bi&W8NPZ8X zYIs$7dF;kXD*iW-cx;@xuO0c$^Xf$&a+r9LFZ4st84qgtm%r`kN|O@`BiXOaQUA9` z)~cOV+IdUkROvM+k9Wa0T7_WE0I>?yr-20xmP*zCik8Ax4H*&wgeT9$h=v5gktj*y zMn{Vv9SU}a@uW$VD#7&75mV+2A31E+#Cdb(P8>9K{&W#k=+G2Ji5_K< z6o%5JGMqlO!;umusv6H+y{ba%)(exoDT{d^+&fQyA#6QCD1Oy;ZAn^gl5F9t&&^Uo)2^cz1R&4n~1 z7oX0|AOi!*Q^>qt^BTcvEU}~B)~hHDl_y2o&cL{`3lS2tQA<9aymV^i%vnEx?ig-x z>U*pUS0iPKlI=#gclZ8%2yyb|$A17T9{hU$6q6@sSUTakd;0YsTYxWL^M3T4Mf2A$ zz|BG$;QeGT|qm*V$>pB)$YRM%XJA4bKo_zeVN1%}MX-FcULh?wY3^9?!kT8N`1*US6 za0Id7n98iBsI#dnA=v{FFuU-|3yC1flCexPUqYdZ#4)zz!lg3700OZ)&$c-1G^~D;_%5{Ez{IN$Gl{g&ipR4 z)dWWb`h-Iv!aD0i9Qok3kBh#g5!`Uco$4Ti#?4XOPt#p@xFCUq_e&#{ERx;!j+CH2)eG*-lNcAo0SCI}+ z1W#!UHCCT7y*LP~fta^D7Z-7=Kd(pd1IC1k4p*$N1+EZnwXs&JVFbC7gxX@wK0-L7 z@7^|VcFSd@Z@*KSifp{uCH(0BcjL`BYk8BzmvP4*cd8H<{e7BE#BI3MHw`zGLBZ+v zfGJm8Zwj{bRaw=TBfi8WSOqgEX1&W2J_cFNm_x&izZm2@?9M%V4>0JHga30h#rQ&W zIML5u8*-?_9R!MYwJX`zT$a0$^`?9$ z2myh92Qy2tY(YgtqTw+Agf4=>o~9aK=9} zTEbjdd_)>I2Eey@Nq&D@;N0#vM>5v2dvO!UT+}AO2KI4W2>BbeK4`t`InaR;L}Vf- z*f=Hq0Yw>vR3tlyCXIx!hwH)F=F()vO;(PR;WG-+; zO=5~7#0D7AmCl(>@uWx*SVhs7DD&N(q-Z;05)lH@LJ=Bw1TZdk5moK8mKf>iHY;5s zkfk_ZHnmBnH05BGnsFo?!I!$wv67m0Y#ueI)ru>!v1;o&U_9js2&z!)n&TuS9}W3H z8uXKbiu`9l7YVulaAL5XkyK*oK55Ad4s+R*OBL^BV!qO{j20f?;%LF1!gsSSzK)PvU<>LXD?e>&uX?&Iq?T!NP1G%#xx5KP$?oNo6^|MR;851 z1swnYyq*>Rl|r@*T5px=(?fItxJLEB4u-W{*O+OizC9{$q3f)rHZfUEed+>*BStWF zcdapTtW{~~O_Dtim~Oo1SJj)9Exs`!*d$~$=Q>~dvZ}uJ71e$9tA~R5PfRzms{!X( z(7gJ0uMG4n=-4LEz*bPOg=JhG9;qUP;zm*MNnVB@M>Er=JOVwVz`GB&MK$#e=-Cd*PK z!8OpZo=c%5VBrNTi*A#3dCMe^0K3`E;�|X6{)4IEy8-Tkp$X{(>UCPE{{d**xb+ z%rCzGbE7kW3&_U$O4iSL?gT9Q#?870SilbCYJv0k(MGV3uYFCh7&zQu1`pQK4rU91 zF(_XRZ`d`VxtXU$U1gLC8KyZtb*fQ4Vj-KN$EJR%tLK1=9pfSkh@kZeY@FLT=ynjV zekm?o?cq|xb^*~yDU1i;>LS0i%eF(-ak-o_>JqO28EAtckf78MNcopTnN*fZWqQ$u$DJN!}QXe1t z#jrIsvRz8+EJR}tD|NcF#i~_qmoyPKMtj<090!*MV5UK*?MzcmQxdD)q;Z(K+}W<< zQ`+66__lZ81>ejLV5O{~>eRKWv!aZWHQ*DSL_W_~aA>NL;0edPqa#9ZJ%e=d61V)o zML&9uZ(MH|$M}KuNqRzA?$V7jyWd%hcDVaJ5NKa@?pKTphc|cTkAnH{M_p2_D}Be6 z5V|sIEn**sz3U5*ZHX6f>z9UorAmK(1Z1&&l$v0q#&>&VVXJfi++4)uXXzdPKAeT? zpV+~^V&1KYulu?Wpa6WAsfmR?+1tDH!4Zt3Mo*yP}IV0AxRxk}cc%F!Ce2 zX_Jq^qrehSu!Sof-k_m}J1sL%0?qTN#!J1&lf16_2z)ZQJ99Lf*tZr8!F36=b(1r@ zVZDkQAO!L^e>*rxqqvR(xxt#h0vxftV>O+Vh3I3g?K3h{V8XBKy51YLm;<#?6FySY zKArQiokKS2L%`}AGFQVj==-`@o4%^^xl@}1CgeUCV>(?^t+4Y9A|o>LGe4wjEmPyd z0B{vfp)Z}$pZS|H77IjW8^G9_x@%juU_&~$8vyJ>zT29<*BV4)lSBvqs6+#tu?LvG zQuD$qY_?^)KTc#R0-QQQY_SqR#Z*+qo4T0d@sfLciPNbG!(%k`$w5^x7!sVg%hM|q z^hFQ^K_`G0K76aZiNPyUCL1(8mY{=XbhtraGq!5RX&kwWxr36!MzmT*R0KdZ^hPzT zMjHS`92>w&G)HGEM@t+sq&vg(JHImgu_w%qF>pDV3qC6Ry&&AVs@p;{3`DD2y9HcF zrdu|F6hIZrx(FBn8*4({$}J`wKbV>^R}-=aU<9+f17=exTl>3?W3TRc9F)Qar&GE> zSik902A`X?GekZIkcFRHKTSM31Y|_=(>lCENnpb}g5*D!95(0wb2?_*KJ&9aSaZqh z8^cC?$F_UPN=QnPbg=-8G5niK97_P#Ax67Uj~9fz5d^reG$6|R#f-x|ICF$404%I& zv#vx&jdM$7T*iw8zuB`r*(>_sv>F#J*GWM5&v<58wyY zLOVu`JC!0r*%U&=_{Hn6G^m8f6H`8d?6DI=zJv@;NIc5&>%ZIbF=fC8b{xa1GeXil z&89<4;hawD(?}t!Ne@fO_VZ5ToX#No#6)mP^lLh)v&kO+69oe}fHT3bK0~-pIYz%R zOI`HEP;iBQa|N#CPyFOJ{{&FY!p|3UMreFS18ukjRZu`p)V}}2LX|X0m6Qc-n1gBzLQh0Gtvk+v6fqDp(Z#e*umiCW4Kc18Nz1IqP}5A# zq{5ohP*boquDb)#+^zoWI*z1+iF7vZQ^e$xN?g;w!FWyipgY@iEn}Nf(ulh_AKlDq; z5;MrEoJs<$yHqmNLq$|W4Uh{0%gAfg`s6Ip!cR&6Ev-oHEK0T1OV!lS+SE?HEan8& zP!-isjRwH<%ToQz&ss*kiO@9czd&$6^mNHEd_==^fCfBG?0ZTg8^}7`$zFRGAHBwT z0aN8;Ov$uKnEFgX6vafGPFT~q>)cQqz0oSW#~aYm**w5)oxAVsR;r9CDCGxD>^p5W zz*F)JnbOiNNq}t3IbE^pjLOoCJrhke zUJXz4R6@zzNIm7&{5!xE1yr?@tw1DCRN4a>B*94X0f3xPv@=S&*;9Q`LozY11G@g>PRHcVmQvHZL^e_XOjhr-y5j6k^&?t`64o!#M_D`3Bvsbl ze9>IpF(P!@c;vmyjKZ6_)@;?*h~?UKj94mlsf8>IQLqhzi3W0OsEe@?U=#uoIzjq* z)X>`vt*{K@qb$1(L*Pu)1Oz~Q6|y*8(imU_+w#8#3{qIV+m>v)fh|8KbjsNFRTksb@bfulTfbRzfWy+os~rxIJ=FT6zk(G*f&@FK zgTqvkO!Dl;-R0fhB`sAQ1W^dls$JC`iBRhlL}Yzh5W85^ZBfX?P*^QK*&5oSRjZ?g z9FfG;!<<8=HBQ5YUcL=6M!31Y?F z+iu|&T)>9c%B5Fi&=p*kh1o%NWk!L4-Pi@!PxRb~W#AASMTo7?2ToYrtGTAFU9(ft z|LZwiZOQF@wTqm|#|2s-70J|vuZ~q;c*WIQ72c=AHJ>yw%6+x+H8K5j#H9003GU5l z)LkGJV)6`xAuiAx;RL>nSqD1aA5$?;BtKL8Op(>#n+(7ScG$b@HR}}$?9~|*qu|dK z6k`kK)~%h*$dx;=eLEpdVuQS5d>u&1BuJp^Laviy zltfA%M&%(ZRg_X)7Us-jlf=lJ(WM;7s&mn$T~hGWI+fx?{bO{b&+|VHM;m8j+uGRP zWH;W}wryJ*+qP}nwr$(V%s>17eDB{qr%s*MR9AIPS9SOGV7jl`Ab~v6#W>QVIO0;+ zBc~w3H(T`nr6_T1d#^gO)eA@8WFYnzfH)Wb$-l05}AfyWw`RqAi-uBvC!^RX*sT-iOX=?>|vreK20p zItea1z+2k?3}0*4Xi^Z~U76Qdy!b(QDWx7jrMmUU#9LL<;?Sf~I*wjxu;b{fq_f_* zJ~!D>N|KpN6N6Pu5CK|^GI^UcaLNeG_Xw(@EgWK6TKW(i+R$jS5`A4?7d_l63Q3ZE zzl7ejl-~g2v%!AotU~(OQQO2c;n)@Oyhi)XAt2DMgx;iti*smry-}gRlR8C)hfO1*U zX{6tC)#$Lh1vRhdT1ZaJGMQofvh~)My$UL|X0MR1@MfrbsQfVPNlmYi963 zVM}0#Of(XHdXeHP#re`*`wBg~LDcaA9O5|S=(51p9EC9~S0Q+Z-|mdSnxNRayW#8z z@;@GLjgSCfu&# zZ%U6!CXc^2hjq|Xq_gtX#_f91A2o)ZbRqBBcAVIs9!a7d$Yt!g*o6VoTO>_(MYgEP zMi5Yy!deS=rrY*r$9KR6=deC!%Mcw{tZQJ-H5Dn%fq#q`HRw@7{KwL@?(-~ZMFscu zknS~`yYq8@l6tWAMeO9kYQGd*_YHoh)du}O6#MN7zg?i4(*Yx674q%P%k}2!=n!>!ArPnL=-*%u=E!TNM&;82JoHoxz&swg-cHG&|n+S?C zS{Gz^wziM(QfDp|LFU6zc5no);U-pFu2wOacR|;;u_o`aHep{ikQ zUpl%yqBvy5-}y)g?PEpn;weAkR0ctsw-X0MJ}VVXlV8((JV%P%GLk*Vrk#uB{j6Nl z0zhdHK;FuoV)C;i31nPgP27t`y*LH#Tm?Q$eOws-2m@GdSBpO>pRdzby0T&zt@d1| zjwX8Pp7b!iRVm;1wp}#@TzgaiVFG|Al{?)bm=;2l*6jDrX2ALK`vs+$DFhLo)C+ny zcnRf8ACMBbUTtkyjQcrhpHlrX((L(c1AH!qrMJCCoS=Cd^07nA%Y@1?4$8Yh~?GUaONNMiDs-?ZZ0TOhO; z>C7pJ25C~|&U8yn@Y{V_`U7b572GW_k(;4+15;^wybJ)t$VT3IE*-lMTO|V{s;4Kz5uIp3GfX#lR1o@>2?-zK zn>^c=>$w(0me8JEF@YOxZvsJvN7k{n&mFc|wsa4p3n+WJ{X1=hCdKi z9)~a}S|a7NFx^;1C0N|*ui4)y3AVC)Q?G6*b&?#SmlUybGIdbOZLLb%V|;E z=5cLbK*fmrar95(q=du^>&BH=EfZ~ZQxB80+^LNbVzpwHF~pJVzz{K6Yrql329gIM z%=mwXP+?n{BmdgDeMRaB>oTJhcP&#i>A2u>5A0^2Jg)GH!qYhM&{uOUFR_hnV^lUG zBcU!#WF9Fk!Zfg~9n88@sVylwN~0OckLx(AV#&#<8>^{{=&xXC|0tWFo9DeBWrjGd zgylKUsGshHq_v#!L8>g6{YIKzF(=7lfr~SX!$AnQ$c`zDu&&KRzqDek1tth|^uOGX zTqE+|bYqy5Ebctp#kK3SxGq&j)>6f(>5;v6K@awA;ZALguutiI|M`%^!wN01Te|JBxMpX-$8JN?4 zn1vwCWSy6~rbk0MMnU4WljIcwUj&n4f;dx|@2OlgF_&6iHVBt8*bWRbL=m$oLwN3t zqy1fNyfsYA;R%AtLr(RFAd;#t9DpxS@*r980=u zGh)MxDmPg!ow8p*55wI;!sN=({w@p(!}l;sv*D5cd_>-P8_Qtbk|*SOKvIF!#2|MQ zPgo4t@BaJ@3e{>Q1oVgs4@E}MLHxn;v1H^s%Vml*uON7AK2{7GKyFZC#^`KaZ z{*H)Y<4%+j=c!S!q>oPgp1AdM&M0Ed7oUWCVoEoD`$-7o9DkYxN+%I15^`M|Pf5XE zAYf2Qr_Mou|77&b)RCVjKgR}>Wc~@2`rCwZ)pcmtGA>BRy1DQI{~>H!k`py$5#*1K{10x7e0cTmXwMZS}5go z!%#vYWtFv<+lyMp?rc7rgr&r>;fQTlRcd@EHeF)zNIv~cO3+&&Z=aV^Y1T|h%2|&S zdKE;4Wk-qefJpKQ9&~x+3=7M=9MVXCQt7=_#3!mUC*M-Mvu#VNR|EJk%z#+PDjs+Fl8$li#AHbavp=7TqCZys_skCg0&55lg5}CHuxd)WNv9d*M zWeqE>eIv_?jVd2$!Lj&!soIITeT{s?d3t2?0<8AeDlLq$A@Nuc$!*oaY51pdK!eSh zRZY{xp$LA%$^ctZvSvd9BRO22us(ZFT4Rw}`=R{86Ki9bc#XftxH=dMXP7S5 z!rZohth?ALYvLt$e#kKl52qHbuvL3pp6h^XEKeM$W`rXg8!%u^+cdERQ}P}!z=oDm zYEw48d-5Q@q0g*gG$=`W>bogh#DXZ(!sTi#S?#fGlIWfnu1lR3y_U3Z36GkPG2@WN z;Y`3D&=T1to9Bw#DPpqyK#EUQh0&^#TaOpU23lX*YR;dv*=BMM(pu-U;Nq@ghcFJO zMlR$PaW(K5#fBC~>YJzI9jewfDU#P(S$BKP#osu?)Ehe3Da2)6W>E6^+*vOVWMtbk zzC=f6VI1Fro|+?sR<_Hu?Z0)sK0C*7`G zhv!9)f}6`+4nMWVRkT@f(#z+Pr)+pvrcQl5T^EkqSLCn=T^H5oliBZ+UcX0WOU&0R_I*z;FAgItNWMfckWX=cwgQr$QxGy3%VBGXtVS zzh&=+ngg$18J>V38#ib@Yrqyp_0f(o?>ix4rdVDJ^08<~`zhgrl*L?#2){=K4#pb^C5)M{5EN_%O> zz7yYo=KYByQOrfx*P}DfEqI+N##i(b{06cmh_yD|`>;5JWPqKt3rewb@KYB;Kgj}l zkMK_FP?HT?;sRK7i$5N z{Tmph0nW!5qd6H4n55{|0I*n9kft#Nr%vnKND(Fl4M@ByQD-RGb|BegcqyLGbw|wU zoG;~WK-`rBPe4*g}5+1U9_QmeyK| zn;MfoMQkV}PyS1O`ID>QV00teZdda&`_D&MfnmW1wa_e9zAWrv`H#UFQbez6s zzFiaJpTwRi|tj~;Xu12u# zjC5fsb-y3k?iuQB9Rcen$*ky$^ushW1&tjgiVY=99?10-?hcHMk4)`Nxb4tQ{V9kX zDOI&|ZPH9wODNtDaJlBrBn#hJ03+%3ntq3UX0hA=)b!3MM zMdOqk_>LSYPW+XNo%BuY<4Aq)$i_`gDD{p1c5b-%7f<1*LA><3#ub|>V;1|`;c24OnsV)!?#Xg*-zu{%(9K~bgT=AA_yD^s5a3{V@b^OhVtXC4D*Fb zGWcoYNmkCIlm1-vF_d$%!JmC5YZSP_?aiiotL(iecdx{Z7M4(I&Z}oPd}oLFo&rZD)(6` zubZQVf7`8w#GuXEA;qu=A}{0NFS6V%uji+&nlkHSE^Sb@0PP}gY&&ceDQ(L2%>~_U zo+@S1P6rYaDI}Sfl{A%)#96k=tC#5GL+_b(%;l;Sj(18>O~9h(ua(y~kq>}vq{m73 z5YW`VRn(f3m~s{OjnRx_Sk;eJq<_u|NLw)RTMkHDF3A6A+C#{}$l-KL{Gn|+8g%&Q z?Xt187f8@p-c@gKRYp*bi2j+oh=EL0wNo8^UDow`oegw&&3h9A^tpL;hV-d(b;6zv zTzk#USv6FnK91q`8l>&mk419Wx@7ut4YxR-C{ zw;1_0JAKs^4-yJNwPe)v}bNQ&ew2_BufHt9Kz=s zq-JVJo@_>xobJiOy3#9>`0EX3bnsY4hH|=N={jZ>2XY1nOq04DYi3F)@dm(kPMob{zz8l0W_qkap6; zWmj4k(x-J+q`ir^mA$w_gKB2A&@&V|0%KzBXTbtz{sYb?_REB{->6z<(XzVA z=wuU%aNKmVA{T`PeK-eex*AsU;`jR#F}!V>Y+^=HPlK*YM|%>}HE}Mvw7a_2)Z&ay`CwDV7N2)^j{O*56jV$5p6njKV+`f3 zxq!LG8ie4CdX1Gl7r8zeB7trafk zwWp%_=2VY1q2U&y>Q-x&GinxR>f~E?9;{VomA~DwM1Xfdfkyzho|d%AN~8mrM(QE5y@EmDS{LAn;b7TD z*xC-&-ZA}N_U6_R;5w{>7_-bfd)3ej)miXd z-^bvY&Hhnr!V_U~?J4uYE5e6J`)mbLr>5aK^V25n&TX0DH3y`VY~QI)vSIqx%{ZHP zjFP>RQrtnld1~2xy!sOyj<=WNZ4&prYyWdS`*SS)BYXH`_*Yvi%S$hum*)D!91EZ0 zbkl%j=YBSw8R+=iOYJa=djPH7!j@DiiSuE->84KWdVh25ICosOXAq@}&=V&_y?1Ru zMR)gyNq2i({A0^D-_}&e=6>gSGO_Fp#6>!w4ayU^g3I6koqspp7QLDynZf&>h5ta* z>tMU`+QVD*iPIXV zZxVfImBUoK;dlI*q1hb)XH&m@Dax&$$I~@STQZ>zpVzz3_5O8Bw$BJaw-J`Hrdpe3 z=4Yxgjf5Z?7HgRxR49%_AGDt7m{55U4r4z8+XZYt9FO5?e=Gn?hV{l?T&@M(?^Svb z^}MaTV}1iXq8}GD!>08tfklR4qe0;o^STYrO89mpeOB~A@qBi4I3ql66oppuw%M#? z6h)G*d4ZglJaXS|m~j|w{?AKEeH5E8(D(QF_@!ew>J)>dM%)DJ?8V;1+sgpU^atQx zM)nf`pD^>w;eoO9F|A;>M-(~3RuG!7z)>ES`kR~rF73qvWU;Ck?1HT54nK@7df$3gKGM@L3byN0o8t?`b@Xmzv7y8)HM9~eV&V&L;j)Tx(?t0JjJ@pT(5 z`*Z3$6}xdPia$&1S54f$acuApKh9PtYmv)q-NcYxE!#Y9#BbPvR=Ui3!2DjA@qDce zn-=v6I?(b3jcS)Bx)_Kbm9h{@SoJcms1_EeuD1PWd0cX$-eo4%$@8v* zuo!(>Dr))CmD46UNX0mQuo1xs6&Rw_JWkTSo@(Npn5c4+8;ZfLs~1|y&WX96*Vw`n zUZis1l}^Mp>s{G}qcfllro7_^1NmYh44;IhA2uzGYt@L$-fq?O^>5Xc)L`1gK36B$D1$}e&4l~adE>qdU)0TD zvXf^nJ_iWtwN?5!iL;F~Sf_GP+x?yAVhx|pOj%os@eO2`9_iR4!wxx?q;D*;~8{*vuCYKK3 zyX(5i=VRF`VIT0*$(N~y6&UK=Qm?fPYuYnl`P-$}Vdt6%t9NHM&>#Jx=Nl6_IKmu) zBFK-whJgvZeTjW$7c44nV*N?4=u<|QV{j{1l!aJnvUVQn5Od1V{HmQ3-b@}CG5Uk` zV}&Ie9vT^GhAG|8vy9%e#6VK$rYm zxK9j^XeMS>Il-}7SJ346s}BW7-XrLO%qs0f^bmV0XgOaWT_t%8Fl!gCUO$D24lM~W zgu-0$Clq^?vx8M#LLP_}XaLEG^hZx!H>JBH0Z+?>Ejh&~Hs3H6_4Z|AJTf{yFffiX z8>+Lnw=`CmJBg?mx>Cj-;cr4Y-eE_+xI)x?+J)uzY{xTSaxI)%I&a?gP3pwEBU41( zfys(xX$ip&b5#4nA>_L1AAK4}Ff`i}A#B(Q?Y7Cpiu!$2mg4@T*=ySc$77UKtT8Kz zeThw0e9n6qc}wXzjJDfU@mc*&^R4@Qn#H5dEUGS{9ls*-`V)b7m~jY5B>~KI(u)WQ zSvHadmtjqsYj141P>ID9X(Jja@HQD^X8HUDo=nkLRfXC$c>BA3nm^|pBNkPSne61} zWLjZUae#`f^9!JvhRN9yd!%AopHjKR!X5* zxO+%V19`;}0N9q^w|k2`#w$BB%Av5RzX~FaSoF0xKffQvwIvo>J2sZclr7ex<~bJC zju>bxd81_Y?UjqOa7l@kVN~yX#ff=%ES;t%6lA_uB43vUKkczMR0rCy9KVV^=|Z)R zqL=l+YU&>9%jJklT@)!2ahzzQwR9 zT~2odvmgV$hVPLjJvUBfFj!sRt>Hd5HZexkMi~8&H|>Jwum53bhibV4=C{%-g|QRQ zq)9xatlEawWFL(Cb>fRj&8HGg)*82uBExxcpaj9fe7kIq=*l&Odoe$C^?iTX%z=I@ z@w#d4heDSXdshZurRzStA#m$YjK)y(a$H4^5Lat&OqnDN`rU<4BsizYIzD zse`RRow_5{An)%r6dsoAcV1}&zc(IBv?}LxRo2%&h8|Sn@U6s16IK(Pn*=#DjS6r) z*2bM)iu|lnDwl9OipU(DC&v0vd|0;5_+8q5rH!1duBzf)w+yG6Cfl~ZPj^f`W7Xo@ zr2^Y`K}4T=ifN5f|D5gcRX+rLBY5y-*%${zxzRLwnMY1jhZQNmwQCvOaqPD*EBQE$ z8BWgjif^tn_*BnRPc?XGY%Ys#KTi;AJ0fmpTna7g_Ig=sowsFPIjOvFPIoz52Hc$L zMH?@i&OH#seePdwQZIqRyABQ)GM5KC?5>W_@rS$_7`QN`F!@+DLkP_H*m+iSXP1Zok* zxZ`K*D})6gV4P` z3&4rZ=I^Obb9ew})JI#?3-3)xkJaH%14&~zFB%(>H<|k}>u>lqpoCLj2Npo8ZTjS= zYb$I(v1h(%Fs`4_eK4OM@G}kxF`i>Y-$Ech+gF2;UKe>IdLD&AFodoj*l>FTFM*1( zP~U9d4Uy(dYarzq`cY;q8)DvT7_X~huA3_N2&%g`ETCC#Iy|G#mtw#IVxASHAp|1_ zZz>AQgqyU-Vp|GNOQvAc89>agkcMwSXp7+LE8f1-z~|B-kJi9X&lD4Vz5W~?W?sf) z%cy!p3LG^$|IGa6OcqAr3M?8C=qR;F6oU9T)9e@qoUnyLxc#Oli&D*S1&_45U|e_b zecW~y!;EQfh7k-@+=w&gdp*n&Bjfk;3i=!-q~e%`gjj@X`lUVFJ4L2Nh}dVbDTrv~ zKPfg?91L&KDY2$w9|NTb;9`+%0-DoqvBAxdRAA2;fi<5i41}K& zsO7Ub(}6JcOvK_9JXhgFP*R5tx9wZMGq(y+&TH|2!+5*M?<=#b3$^&&jAK9G$sA;{ zDD5!h?D1l!PJfFNa~*%U7ox_K5%G-uwnV`;L!tDmL6f@WGM$(@p+qCMMdO(8ym5}tJYSa_31#t~l;c`}y)Q6u8 zWs*)*MMSKBTprTKWDZ6eU~VaXZ8M97Jje+&2zezuU7W%mFGxHtv{qb5w<<_Lp&iSz zDkeKBny@km$Fp#KXNb3z$_yKh(<#GOGSqP#1?06i^WC9bOeb;Tgc&cML(KqWyw+H3HBs|+j1fYHXZ8gYk5IE zK6YKDCgY`~!vbgUAe)GvtoA>V(ded4coy{*M=ZKAEWwjahLtVlMjxX>1n+C=hw`<# zVvS&$1Vxq6_7f~KogFr*S$-`Db~_oX)iy0$xvc;%jc)7nvy2tZK-jHp^`TWvqS5V8 zRqU+T?*R#hTuY8Lac0x-=BpTH(P&~`F4l5M4$;k;(vUg`Pu6lAIJ8PmcZZL%vs?-s zfH@2)Qx~#%3g_14V|W)2i)a1U1dIS>okbHpmhxarhI71Ro}?<`dL`?|Mc3_6H(Hn9 z0(Kl+H7+hQQM&Ao*ev$1##*-xr5!HL=uDQ_FA`SGis@`-bj`wwer}9?rU5E03>=kV z9B${!0hiBa3PWzf9F(&TIP;40PfgKR3O=vsK4l4P#L9k%m|7O9w!@gVgejg%bC%on z17Ux?bRI1~;kPb^WllLlCk=!0+4aCz=#7AepPp=u*2zDI5_CV6HGe%iwYvm|H`>~* z_ztRsc(Dhnpdj8t;|Wms1jz@0QoAdtIwYnz99M?!R{t1}WX9`dC2tOE9&)`&j;!b8 zK934VrSL=K^c-&SbK{gGR<+npl-yjg#za;67V*mp-R&LD)g;pAmNTee#gGNNzUqbR zy2UtsJUE+Ga6``7#x)ju)eqOrV*b0fkL#1zi+ZR_!r@DR;0rM-wfW#N1uAveFSP_O zx1@bLH-TrjSCue%wV%k?5&c+6qpGRZT$i3He+8GL;_IS$79x0B%=&3qG85wj+$a{g zRAXLaT~b1;pA&Eq)4q)<6t5<_U?^$;m($F!(`y}_Y^~%tRLwWz(3h#B249oSOwCV} z1;s6LM5%9YYm%-TW12#PbF{+4Kvmi*rOv-CMMoH28i%R`WRuA?PU6ertbsvvW%nC^{{ z9$A2+303GQ`xs09{BUssO7oecICJGGLcN>;Cx6|Vi3;;3Yc<6!4KDsITsW>2wKWZ$ zD`e8WsQJxPqeYO%IpbEn$P6a|Pu-BT&4z0TTMnz{kTw0&!L2(jKRqgi+%KzAs318l z?R>53)2%*#t)GO`F}E@3V>uQGT+2j%Yp`ky9lQ!vd(p*ZtNDDXg1SdI<1{8u;~@8d z6&|Q#(n#Frt{+sbms2mYXefkHUyw+sK-b7nST9#QXwrCZb#ZI;c`OcUCs|@k$&_U0W`_?Y};%FOlb$mh+*?&z(|xGcF3P+sHoV*UGx(|@WFH-C&i7?rhv z!>6P)Gy5@yZm=cYF)4*N<>gd!_)(ddHnqDpb)7NtOS9=QEv_HE3)c(pnLPwv%SR@x zII(@<)-!77wU&5&lKL}0(mbfuV~Qbtev+q!kFQfIPLp+`l`RVW=TKh|Z^o~9W|2Q1 z@|y|0%p>uiz4&}^`0Ig2T62{6#)^nBF7CXJpUoVZ%UT^{^O{4`^b3?5R?(TW;gCF2 z7Pysxg({zaH#ZjrfcQ(z3yn3Oys;T`YWpSWB--B@He9@_JT^CU__ndzMAtnQ3Nu?R zaDQ=lnmIOa^`dQ;W{y06uJ7{B0kuZuWOp(nN#MJtkh?Zy-j-)O50^D!nl;@cyee*S zvIw77`xE=GN%qM@V!jDXV=a4 zGPNcU*LkVa+m73h%e{O-GNRw`4+O9e{4AI}-<=WxJE>cz=z!DfHbx@*xM;K(emuiy z_C=d6PN%ntYrZoMZII!tlSu-1gzgi{8osS9g|)4(cL^Ns@?Z65eeI0_*Im^j?Z6uT zb2%#)9o&m=K%blB)*Bx8rADpW>$w%i_gmubTNUjq%q^dp!4r zpxfu7-7m?FLh;+~P-^Q|UfFs&8)_hL`2u&S{7+t)Z^D{;E6iCzsWU#?_i&k~+9@yZ z?H`0I57YdFnQHAW021GK;oa|0Iywrg#&6d?ZzkI$xX1#Zw_@N1aKAniJB=3zgwy&kW!oFge!$Ke|C8OG+MJrZ znc4_!sp807&3kw}*10S09`XA$u~KV8zQt2dgfRE=sZ1%e-xIr=T`~l^Bn~BY)dU+S@tm@c}NqUH~?9 z&vTtBqwNo)Y!S=^)rm25x~z2g%y)3F7woN%;l*{kxDv{>{a>u>x$}Sf@&GVseoR0< z2^KGaoc7%<5a8GOhf#_Vjd)xS`MnuD@=^AqT>N1?Ta6gMJ6bJlemgb5=R`Iu?caF3 zHsrLwq6;V&XgUStB4Y5LB2>2V-hdOXKoz!*XH-(U=Oy#5do22#qaB`RJbWeO*yl2z zEA7$kjkeoUp{?Z@rnr_6k1oX50D1)*-~*1`7Oi&TXP76mye)Zh<%@f0g>>~lt3sW- zTc0PRSI}qvqi0)IGeE<%7p+12{_Ix!V4L~sTZ+wH6FMl{30li>wmuo1#4!Hhu|V26 zA#fTWS>f${4JACZW)ocJA(SwHw6i@LnZz*%~~f6bch z<8c92+SZAXF0bLZBZG|(Z105G?umfw{dU|SOX#Jtrgjc$OP%as^e-zkC>_GXuZd&i z^JEg$#GupGJv!RnesQYwg0204c(^v5I-xY#E&ynkgI=nUuJBN~lj~}B@&9MY=5@KP zqfq#@1PE7ZdAoB}50ZIbF}pv6f&KcMsltH$RrTngu1Y`LL!E5>i4n|v$Bi#vU#Ttb zzVpiB4}a&7^K4i0x&Lys+~Ka}x-RUUw+iIG#2307M+fM13u4hqu;3pK^GWnJ1IWAe zjdUIwldglkPaNj<*Nw8yiDvxxKn(cM`JWokF`X z9XqUZeU#4|RTJ7A3%K>oU^9I*V`AB-9n;5<4EQ;W4;ak+awB1f1VHDM_=-1e_Jf>y z{;l$X1>ftg>(lW<(l^)YdaSkD<`1O(j<4;2<3IhcMKPF3we@dCywLiH$b3pRZMNHp z-3AZ>4L)?lc+u<>Vl!52^bAETPe^?JI_>2B0|B~p5 zh2>&}>HSB!GvQzOAI*gPYx+;~O9=d@@t@u%-dU|m?z6cbr4-~Jh7q{mN z=>3xq2;%j9ale0jj*L4kl(*fzv$~n{6FwNg|F5&V%Wp5e}n%T;a7nFH`M>f|0n+n z%0J0};rIgog#W*QM&+jeFS7r~zw$x+FHuonK<_{P9|-=B!+K$TQSiSUV!?J|1sD9& z`F{!b-X#PB10jBe_{I$K3GxHv>*E7v1_}ZV1ObOmc*@P`4S;r_`ZE%v+YtPnSR{kf zDt9m(jYWT9q&9yz5?wG1o~!P_7m3oXL^Cz6P%?o|XDouNzGyO?)8TAmw6b_Q`)2?w zzC=UGY`&Nqjn-HLwNx=kQFov)S;=lJEFwvWDW-QYPlCV)kZhRTkM>nHn5dTUl*tx}thQxBFK#WTIUi z`1eQCyKX7`9c_2#E2S361l8|BZae++KwO<_-q-u1+2ZD5K-T;7>)!GCG9n=U{pIpF zB(J&~2n2zo+VQOiJTiyynm&V!+-coDM4A}^JA$FWC zHP}O_)&F|7j`rNJrJ=!qq-l7cwpKSwQIX$fFjUDbO7@kpj!Yf#+`Ez_vdNn zfJ262P)Th~F*FIx@-iPxNAoEtSx?IyMh!B@OF1PaAVNgc!mLi2A~nLXDc!iB(hbVo z`t9g1k?WM(ak_8Rs|H#9ty_Ao$62UJUIzlVKgSfd=mvM3h&gp=vb;L=-|L8x(qeoG z)7=v6q&7mIk~0??{)(ovvvRm0U>Z$D!xW80WtsViUP)WrS$S#a70q68KMD_Z(QsA@ zNdyBkPk%kT$oPH(AEX&wj4%O=bliqDE_uR^JE=9tfdm(MB1J(ud5T`}?4VW@*%E!4 z;~JNGo_XBTQD@1nhjF*J{n9a9ZE0{xo-ZDbZO+r#=t&MV<;O@r?Deau^QeBt?NBNX zoO$t}KkScSvg0$&gU==F|7SEe381y3<*{=yg7t*f>^v~BHS3uH7S9*P#tSG7c z+>Y1>9XYsf%n`sj>;}-Efs>SEsTVT=(>R#gd(+&@6|Pg{9Dus9nBfcNiFiqX>j5ie z)$36=^VIns0WN*pj9@aoV7M?Yg5aD43tvH#3Rp$WMrO2daFb>1&Sm2&3PS~UF3h!T$+nZ^bw@9TLiXo#y%P!qXIhdC;3SZgp62Aj z<%;ttx!=a^BXL^b{xj8~s{8o-dYk`k1XlO$PjcI0?7lCgq1L%JRQn3ojB{f<{?&|9 zGid0CILro0!obZlsAnpev|N!te)+;&vPxq zSoFcNtnh{;gOt8B(oBwWfh4HmDFdYP@_)q|4p2B=xy1QzQdAj;^&S5bWD z+fD|d{h1<8>WCgLUPRcljw}r73JJua#e8m2qAkX=h<%!yLlEjx0zc*$A0aun9rIFi zMsmNGQVGj(%bBEf7*aHjPD->z$LGKs0sBVl&C}u&3V(!f%e3|=mf0uPSsc?`czz8eLMwzA&Ao3h#C8Np~`?h&+e$O@|;7~W5OY}hU$n9MX*N` zyf$tC&CFlGQ53Y1>Z)anY%oDW=_u1C_hBKFQw+HG*}${e^#Zj?#Sr$l%r#Hjz+Hm+;6+E4;_nm{Y^Gxen zL+56k2fq0Shh=3l?rmpc8Tg1f%F(As$$;pDlm3~sIy+<0$;>|sJkxY9%M z6DJmX;(W#`AwFH$F%NUn^q0>4Y+}=0L9CRSpn==+@eC_X=Z2X~y6c1jOk+F!UjzTV zG>e*}YlcNmT~17;7MJ+j3To$#;3B3v^vo327$h>&=eFJ~2zK%yz6)>E_LU)H=Xxl5 zTX@usRjhB04JzYnOX}}y^7HJ?ES$H^FiF}Wmrwfn$wyovNSkX4E)V{Ai+lnOHk3e0 zG^X$RgN^#DA-nJ|pD$APY47c$Xi08Ux>NUAh&a28f9iS@5gsJh*Av>d8v2O=Moh&6 z{`R5ngLc?=0?0+py`P+RKv(O%+j3SS8<$LQ&6?{!k}Zb*i@@J46_Vszclg~jIw z>FXO;T+bENc$W?q7<{Wjgl*f$(k~>WT19_tTpI}iW*HKmXV+`z_;xJV74>g-jbye_ zZW&iK(V0QP8D&ITCC=BeBv{ryJe_)H_I*BeE3VOATPBE8+0U6LKF>~xt17Oo&w!JV z=+ZgtZO5YQfrEX6BYyKhxNd_+|5OB;su}`45qm}D6 zgT|;}fJ$tq%jRAM4QWxqn{hbqjm zr697!0fXBDjyD2->Q9$S5pZo{d2irPDdwTy;^05*4;5n5-fVfNqBafb|E>~n;S?|g zS z@#E`t7e2DgMAOs73Z_c&e*q6L!U{Nz4}N-fAwKc2ZVBt|jM@vq!Pn zej|bbXOD@(`5i^p8kjyD<+-Br?a=pTCB({D?BV|dVL+b0h=g*Zh!1E~5C1iR5fuaz zrGd=#h{(uxLQo|?@`eP|dn1yN3%Qh^D2=63e-pWexQC6Xc##%KkyjaF-?)(+IaeMT zk{?-)A!&|roiP1d4XqpMAUoQz@4i%T(#z(@WP-}^kcnL9k*_W6hiN`3AgV|g@$(IDV zdRMuayb_sA37OL&B z4_TR(*`U>_o!L2|5{jMMsgYc{bg<}==V*?o*#+a7n&oMZgC&fuDU-ZJdCk{(%M*dJ z<|whLo9byd52F{+)Ss?VoIpv1!1<5F$qYcEaBL`D%gLMxilFT0kPa%H4+^0LTA>yi zr4$;a7@DCP>S>{Qo}xwLKq{n~=`+8Uen;AV3)*OAxTI}3q!BrlGygcHQ2MBlnw{dno!#l3 zWaObAYMyYBCL;-^r73|k+JO6*eM!io>~vd}yZ@)1%E5m;s5WcNzrK zpgQ_zemW;JgX)|IB2o9+E&S`l~r1)ub8EtY8ol_rDoX|mdC1&HL8B- zrfVvfOvtS-N|T{ldVU0}IVz{NdanLC0(y$826>%FhJG{GoL=aZzov$RSFBIEs0atI z%F3+oDy0m^Y3)_8SNdhpim77>qA8%M1=ywSs7qqHkJ>7q>^74qNuMZih#RYq__?YE z22Cq;dN?|(>q@J7nrp4-uUAE=_Uf{sIIPsEulvfc5&ycZ0SkxQ>8KY1wVwda{v`wY)$IO0csmi&6Ua zvYG{owU>?gO0$28I5*3)IxDtu`>&7cvp;LFpn0hxI|inS%{wRGCGU@Nz~nu*!bt43P3XnU{FX|r!zx~5CFnmf0t z3$Ph!x2}t~|0lG2Ynp!Ru!W#v!*!b!`$D`}pW>Ra!27Ye$+&43Z#-$Ig^9Ub+XVZ$ zr`o`Oej2@s*}0w@nW8JN)tk1&s*N?)_>`?>@RyVdrcp1K95QM;+> zJZu(j?$)aMIlQH+w5Te)penUg%O}=zwIuts!@0HU>Ijzmyj-TMK9IB1q#fBys3Y}N z4MHQs;(q$WsBG)KRq3zeySg1rzU3Q-=Zn6srM|Vxu;iGj3k$#T+pQ&8^B z%eW<)zlqziHEDYLX|8w*egcZFQc_gjz%aVLBmrX^_GK|Cc%)7VzSjZbq0$sdlM;il;xzwE0 zO0CxRhGe4~f9f<1lS|OY+s}Nx)qcFy13b;34a8gx)_t1IWPREktk#>R+N{miCBwQ+2WEPz??%u64csS6ydb;NFHGFJ(b&6R-z}{qCOouo zox;(&rg(kZ(}#!cY0D?#y1!*~QR-X#NW*p5iOc;$`jPT-@3+KH+jc-{WlGu@h=hXS9dG7Czpdo7M%d)q-G7|CCL3A4>UJ9m8=7mj($B+!jFbv6S;>+Ob;m7933g>U$$uO?zbbi(`zU!`y=Ps?cs)>&J z9q6m62R1!!d{u!u&8B#h!$hv+sv7E$rP!kG2Z`{^8LbJhaown{a+tn)x}MTXyyfcM z<#6ob+`aBFs>i2Z-m30)x*)5BIgmn0i7IZ|wjSsFZs)!3>ogAV0?*Q&X3A1`#xf1D zq}FjrtB&$1LdMkW3BJ#Z9?K(l?M{B&D*)-%KFL#ltA1(a1dGtZT;T><>xL5!8UN39 zdgw(h@8NgZ9RHu{@g5HPzVH1m&i?-IBtP&x@AJQozOjwl)4Jzrc?g>tLue-C-dcz- z9Y+E^{G;fGzoQ$ExYF7D$_?k69e{krljPwmytm)5-MFu$Meeyblo z>#%O~ZNA5IZunEr_y`X9 zq$kvcaQV9|F}qCSGXKNj683~#$Spqh=1%&Na_#*)&(19OZ6CQyzOJ3k%&ecQ#Vpoa zeCs;z_j@1kyC3u|{nqR&`0PyEg-`jpc#}76&tVtbfbHzuN=rH^2yp-1EWh;{fBhaG z+L;de6#xF_WN-Q!6L0+(`OE~(tS`-rl=hXI(Xrn4Y+p4*lfie7`?=ry_TTgOpZ_*q z%EAs1R|F1(K@iIpJShqq%(AecmO6sGe1StJqMbW=GFF`9$VdzZA>+syS+ZlKgdA0> zgoJWq%9nFwlB7A)q{W1Q2<>bn$ma`~I57&PfnsRTqaJhWVfu0DPmP0kHbt5z@@2-2*=^R$8MEZikHbkNjn(v2vRs83Q?`2b4o{2{p)N~X z_W$h?uv@}z+ne?8t-O^@6L5HfJLHg+ANSljv-9VOqbHVby!v(Q<;-nA#~rZt@8QRd z=c_kwIP~e&-(}x^{dxH2-_M_WqP~0j`1RY5+g}8@9|8n$Kmy@15Uv0c+y^Kv9Aao7 zgko@TgN7!=X(BK(6yv6(I9#d24`*@-kFcPk>L{C1RFNpNZadK_5K$ber59l&F##J} zwC$;>Zdpq#8FJ$U6SUMKtwMgd|R3l~>K>zH;tzn1*kAg3|RR7dV zMyYf3(dn*~^w93SqZCR^%hR+;PMz%Z)B8LHRaF0&EVaM`4^(i$kScs|p$HFp6;_)t zq$msxIhu;7nlNg@#t@?twnRFK9rmJLS4=j}U4fNxMiXPqu|yuptPMxB2HA7at0ut{ zNtD)FlGON~%#=#&uI%QUM#W4tOLxQEk~3qm-J}#2-V7qpHu*FWP7}c@HZ4gCz75E3 zLAhm5g@;sYmtdG#rZqMT1rWP;;ca))NICY{(o4_XwA@k2CAs8KQ&!pJl}{D$WmE}V zwS!hIq)^saXNB;>TXV&A!;^xI>DOm_9dT$H!>w1?qCH$zM{1odibiRTn*X-i8XYzh z+@BT>POBetBWun(i);Dhkwu=e%5>RgyPJ*Yjd$+J>gJLIYrEoCO*fCGYH6RkJ*^Xg z$DWg6fem(8tKTMmjd5=lr8{m&JO23ckkyuNbCl2SoMq5IcX@PEWmb^piEOrEbqbl9 z(AAv*X{hI)Y3~VWpKl_XSKXCzI$**7Jyuzfh=&SrfSWeLtgD4rJb13L%J=ZE{pBTG zQpV*^9PYah9ZM_S&WxMNCpSMHdD2JU?s>=a)=xG;2|jD&>x7=Su(Q{{VXZYW5;iqC zPDuO0SjH$k2l>f6D=nNu!U>)sp zce~mNO?l8l%I~^1F#o|&crjTP@0Mr3s#vXgJc8EqutzkuywEIe(o=A}f<65qVS;Hx zm-wDDfCZk9Zu7VR71h^16G$v#f&!b1_8=!_h4F7Vw9}n@!?^w7?|+2rlb(d(HCren ziUvF&0-ZQV1?JI#nqwjaEx5$({ZWvCBpu9RHlPe{5LP906$nG9x}nT2g|-XIBsJuc z6lAh^J=6&9JgGt<=4pO_v*8M%My4{Qa)+Eu*pDdTCZnJyQ!DD?_v|<&n7$qJ^gJQtR9#%4-K^=lonz5{(Z64+e*Z=Gd7z1p}8r#UfTdaw3 z;n7$*f0;l%rgM+$d>|kPNyvA?Gner+=0a;K#xI_-nmZ|C03XLqF4nS}ivy={95;{vz_j& zCspZr)p=eJk*BL^gBa=0)=5Z&0d)Z-c@`3b4ispuB{+sj(RNsruI+NgG3QE$DNOdgH2;}LooQ^or~=Gp7MhyP=@_jA z#4&n8e}k&n5iy}SlJ>NytL=)yAVyhcz)f<9YOGUj`&6h(wU1VnXK#J`Ti#+dRrh3- zBEQN{5Znx`JIj#l%<9Rs=8miuX)9jg`a8VJu&(u^VRkWE2poj>Z`T^q5Zh==ST-TB z;v(2+v-+v1-jPxLIVUh7D_I5PV40QO-6l8dU;0fGw6f{xPn+q=#Ia>tkNYNTJDR;M z0!o>(y=_xfn%k7>cDKTPFK|2T;SYyctA|V^>mUR>7&!7lv2yNPbH&!SCbYVAV#x|0 zdSe%QbGxK$XwypLO2cuqL_kg4dh0qCVSpI1@&A=CqfAiWU_N#)Oo-{;${WokP}jwq z3~&K*8p8sURyRPNYbzNhy^%K9v}-P^Pn!{qZ14a*P`)q&H5}ima#+NB=5tm3{AWN9 zx5N(0l~+}~NJ%d5xju{TbwP67;pGs!Bq5FQ@&M)||8SI8>v8imP0=8fc{ZX&oWAZA zL{t+)$x~%=g~`E;CfD}9c%kxszj9@?VhhKh^sFNC7vVI&sk2^eN>_xnr=sNA!OK>2 zkk<48X~@OGuEs5f?@XQz>v`3N<@UBm{Ov%4+nzljcex!fXhU1FcFWaga~F+JomBUR z0PbCk?dsi5n>W+DNpP2$C+SjtcY4w@nExS=l-m6L+S}%8^|U3h48P2_vFZDUOaawQ zXFbo+^gebZd31F_v{LdZIRUu zF7R40BFkM~3}~CJ&Xcz`&21h!aqB$XJI}n&fByMz2%X$SM|uY%^h~dwIMK;oI@5K< zdWB*#>ho67)P>fpe_u3kY)<^Z?EmU}z223TXBqp}iXX&MdqUN0XL!wHZjLM}R_>7f z0Pe|KmXUxnuG^;>ft}5-fgh=^oK|HjVKeXKUvu$8C3W%%dq#;x-muiBeC3^g`I~oM z|N9><==~pb01UmLBRvrct8_cSpFy40tAmZ(vfFD4r*Xi(TN+m4y|WuUuS2C{vA$vH zy7US^z$zBAW4z^SKAW?=!>Ehv_zy0cm;fms`Vn z7OA_pj(a629K|#$vLBtv851~VKh zI4C{SE4c-1z^2=X*^4x4lsdi(t$owFJ#-#F+^giFCgW=@HBv9J`>1i`DX>^OMpQ(- zpu5(<6R!KT_R_{|bU9A!2v4LpWqU0#vpeuoIi}G^7E!q{( zJArh@;iE_3tEj%Zk&NO%o&2GlJh*ctN7LY@P@Ah_U^Sa-0Ernh?P^LtTqCEX$4&H| zs|dx}fFDd0Ho1a2<9QmQ8pU5&zmR*!hh#Q?^s*$}#Ik(3$fH7ww6ptTK2q62j-<S{)_GL!9l#s`F`dTGjPv@RSArEM(2n~KS4kw?BV zN96k}A38hn1BFD4%XRqxhAhXZJWb35HBnN_G5Zkj<41@&%Q^&r?h8npB8vC*UM7L@@&#%Ie9w67w;sV4-DFDFna!=l(XCLzuynS`#54(=(6np9;M~m& zea%>mu>Z*UP!L5yjGRl)D^WU4(dq0b(r?+$57D9%=}JglGKGf zII%o1V$(4TEKXnJg$!js)IrYEsPVxHGal+I-Ybl+}5?MK6ubQw>g`>PKvf$i_(qU3Ako&9Isy z*8h#X*f}N9ygb%r?O2c1Q|oNjed^9=MM;dH)|IeU8ePxcQ&86=yil~71btLkC4$==u#38umnGN@UBZJsFF49o zqtsRXia!x}yDF4EVZ}4Kv{;OdPLBOpwO!jHXj``x*}yc(l07|?UD;}_2pOYMfd$ZR z1X?ds)H?)M+YDEzAy*4D9KX^!&GcD1{Mn!#+HM8ao}J0@D+z+bRb9;&|7w+6@li@M z+_VHBUtlefD@(0aS|^T%Vm;<&9aw6(Vp;SvnZG zN8=wD+t;DWr{a0H&t+7gNLU}_&C?Qv(llQ+)tifhUxfs(eW{q$HI(&mU9o*#(Tv>@ zMOy^MT?NkF1-{*Fc-!5LR^NT3Gz{L97~Xo5H?BP1mlQadeOwV?-fxsr!L3#kz$Gx;B+)$_>9N~f%;i~FV8ca-okac=#MAm= z4AsyI?NZhCUn(ns1~?A^9^2-O(*sW6+HGJj{^AE_;3D8tu&Ur_?OX)hwf}6Dz1Z79 z$eq-9g=21QsNDPB5Pn${-b_OjwZnVfs-eByAyh#gWEr&;LLR8@ecDVM#UIurr*&G! z?WE)VO8K=;PfFuMHe_j@=4hsf zEWNw`Qd6)jO`~`;X)b1?-DI>B-EHpUlGDk5C1;@Afl95{4dzIOC}ehDZ3S5IL?Voq$&~HrfQIHmvt@-6>8{r1E5*`){U4ilWDpS7GX3e1 zYP7zlfg3elf~2K{ghezZ%7ryp){rx|3|pf<>Qgq`6Wv&)ZfgAuW6u8Ur;cD{ENGmW zz2|~l4el?l#$m`@P@~BkVop?uj_3g;$Z}jOuQoz9y2pwT>Hm*LK(DgBbz|s~2yP9r z<-dWynJyR|@@ss=!GOIfE~8I%^%~34GH-Uksw9b`O;8Bquf>Mdz^27ngw16jABrsM z%8tv5?Ubb6Y^8i<&z5ic?rhL*YBHAAKBeKS#_EcuF;!(_Ue4_;eb?D8C5Z-H>BYV} zoXdm`L1svA98KTZ>pYZ(+lo`4o|&}!vJ^d zW)9yer&J()Y1};AsKDNWGUqvFa60_&zU^(0uJ9Q~5&wc7iElpURqZO4jnEXvL=s12 zjcYv#r=+cIasTw(dg7;4`;_+bCET-Td*ru(jr8`P>M zs9aC+G#7PATc}zMKcQf=K`3scyH^yyb6`I?4&|mlPv`Lpwo}{fCqCFnWS4cM?Dghx zMaM|ZZgi)X^!yTcaxeFCxAa4sWs*fnIe_0w342R`8lw;dvYH_J?gPg>ecJU^4aiBI+J+yc! zo=)~YbZ+lAzF% zPu2zVrg2VnNNXpCuC>mQCwWswYWGI@lrM6Zhk3ov`@L6ye@6JEQwZ*)Wy7Q-{=RGP z-RQ1F{BVZ1QCED*wWcdqHYn%xe>axwyAha`+Ip`jsF!8XAL-9mc<%G*^7faA=X%vP zR1*h8a&|@3N6e4_c>|kiBy>Wab|dj#HU+R^^)7h>u6w(`d%W*^^=_nzf^#sqLlDDw$VW7#Kt zmPUHyqt|*>@va-`kHG(he|D4x2wwtsfRg0m1ppN(7K)+M$Or>CMtBjcIE-S!gP?8& zDM(62#}dIl66B~UlgEq&2yr5rvXe)OBV}S7NK%>xbm?~Lzc$Xhp;DFkR2;R05}I}zjfo*)-Butap%gdYqtpA zyn44W>8tIpg1~|Yn;=}+Fyh3A1OEqTysyI?Fkd9fX(-w9Wj%GmVAkR}3ur8$?bz7> zL-dx^swZ=P$oevD)`L37w(N*T2Fh`ESMD8|^yc7shT{QFTo7{Mx{34l9eMV0-qN>CM{`n64ImO%4u&`pBz2>^xV_OUoU+2XD^?#_ujAHGanK8 z#~FKISO-Exe6$o2NFtTAQf|Q6rc!T)P)CGp0h$NX2nK=Y$xStxbekhhX!jb1nK5!8 zO~zal#DOm?=->!JH29JvDTQYpN5J6nU`!$kGZRI9At4SqM=7-=Q%gDZ6qHQ{^<-34 zQk4^yS!oFtmtvXq)8;74%$u=+U}q9rF1%8mN=)ieaF>{yLzlC1KQI0!tv6+@h1gl&L~H z2D=bTE_tZYC@kz!LbfJGm{}*irYM=W2DbR(ii&77q>J3{aO|k8f+(*hJ+@0^w&#AZ zOfVmy<&%_7P8o2NS5m1Y!UapUWtI$k+2vOemvtmsYI$=e#%OBHmzstZY3DQ`*C{f| zCF42mt0_}fC`70MTK_0?%^E5*LyfxHX{es+yqz3?+?*Y>qW+A7h00DkbgvF|7fOdM zwN$KlCDPmKr;+AdYjEA#n(H0;Nyh7TtJx5m7kUm$?5f>T8Uvz-It!D|2Q7f@DCLfu zS#BRTDjO)`GSoAr`tmZbC^<4sV~sXFJ$2LahUntGKMu1mzy2~g@WGiYta-vocJ6S) z5Q{!K#bUm7G3u#jym7~V4Q5P^%j6I;?IqiOyT~U)>L>3PhK+a2gVKB(@9Zf`;G&5- zdbH1!I=?gYmRe2x?n=8?{qjgh|6jVN60|sY3aZ4ox6D^Peca7M5*gWG7cchMWe0!u zXVE955Z-Rj5&v(zM2`pW-hQP%LC^+n!^?!+_8<_*Sp-K@E04NL0VMuSa8%mr1-q2- ziw!cPgG1_u!3c)A&5@9ERytt_Lr1z5N-RvKQymOXCqve?j!l7a(TZ#&joIO@hqW`A zV^H?H{`?GijB=UGDmA>7u}pbPgjM=fb-gL(tam}f)b)l)pRHZZcQLSA^8!Ua4IywP zWek?INQ1RNu+eIvY9FCwCP4U|4SOm};^C0Bwq;!lH*WhQ-#&G>XyvU`pU4*jeZ--T zJaBH1F-0*Vd818`s)7^*Ng_4K!PJ@1b0{ofCsPPI7Cx+$U@F}UV>qT&s*;ATd*uxo zDZY>Wu>Y1mv?c9yw?sH(Xo%5iU-Fb_MC1j~c*iTy6R{{oy;1Re=<{NQR%J(JUJ+tSJ{on{7VWrbQdrYP^XEaI8u#;G{nPU^{Rx?M= zD;FHRNG1m-PY0qZhnSS4A`NND#U)N~{h}SkLitHhN>r38B;_eHib{)dw3QxpCC5T4 zn342q7ahbUN>ge}UBYUZsd*~nPKbiY$;3HbHr1`J6P;WI5KP&|wq&8|g7{(qh2;0OaN;zl zZU5uu zspT_rF;yZ4(d|yp$WXiGN2cejDFMGqNBA`>ob&@K^FU-TfBqE%5Da5ik9)_zWv`M8 zBoez0Y71-mC~^%IsNvE(T*v7#adOzFj)Z; zSh8gTDJ)@BPx;}3a+`hd4kO%IU$#xPqFpWEN*m16?lZNuou-9V2Hp2a?VH}?l>Z;? zh_z&rIDNpS%x}LK%LgV!9C7vPgLAdYt_esJ~XMbo9de83vu037O>=nTLS)aT-E6^_$VR4GCSGTMROFP z#~LKEQo0bk<}!K7Im^0|x zZDTDr7@_d(ANfvVEN08ZgUl$CwW+<|Yhydxah@~Mz1`7ogB#CnN^NG`bW3)pyXsVT zcf7MM8m+$@Ar!Y~h?}$TnPE7rW3Bh2W97!M13R82#`TJ=nML3hw7uo)^20ZMV;q}W z)y6&b#+{qav`3oc3GX8&S{>uNxnQq%$1or-ZRI0tIq}x&vc1;p<^P(Uab7r4P92;THGeRV4LuTfO#G$2!;Lt@ZiMd+SJC0@&4lzK}OHr$xl~ z*+mR++M+$+ZVz>sK~7KKY6k8Kwe+pk6aE_=u^!XS5Cpzn zWi3Ovyj?-f9_`s)?crYT!JBCWj|UFQypcw!>6^`@45Kwwzad{0xm~^a6wWlC^BG>N zYzkO;oo?}#`!&*dWFHPr-1l*uatRUnWkBBrTi9tCgrJ1B4F4J9 z0G3{XnX>WOTmcXOuGh}#%MP&x!wAd)u9*Ugp0+Vy1Gb(UN+2A<5(P4XT_qF-W}pV9 z8wZ9U*J%bHilFe!K?%Z)z}X+B?F`BBl;8X<*d`vDFP&JBAlnz>ToyV=c^OM}u@|rXlk99m0-E6& z{-PS5p6WT^8^+;*9i!9PP=^Q!s@x$S=HVW8VAg@)9|qzeB1+8cl8PN4`aKh~7?t|9 zpxiAVBzl%M#af3^pQTY=3IL%sCP-EdB2@{?@4e68vHw-|AVQWo4t1Gg%fT8ezFdK* zRiY(G-Nlu&p-V}`;`baRC1BYKI3!K{AAAj#F7hH80;2))*DyX{F&bksPT(9eBPf{L zGv*9RW2zJK)T1`y$AAD{ zC7MSr{^K9iofM8`Sc;|FY2}l}S3}w+6V@V+xR>3*)r_P{1i7U{nN>nUj$9xh>20J& z?qy#_;9rJhV9KHYMVTj<Bmc!XYLip*hu4Jvd?Hio;J4TBUYO9fu`^k zWeRede8Ql~9U^}gQr?A6fIOu;cH(7nV*OxV6LOQ$q)R219B$5(rahHeVo-u!p7n5E zC$vahK3UEMXDsI5;q)eWfnG31=LlWSbW&$^UMFB?rwzSmEKy!JdFOYcW&GXLLD&gVA@C8$N!;whCS0;tGYsc90MCK@P! zoQ9$GSm1qPMABFv?GGWy&&a)}Q?Va2)!}banUO_;*vTS_y~|7h*@w{LEB>Y^a3^qf zXrDG`BIU)KnJ9{;sEU3hi#C9Z!f2digJo&Nx1d0|9cFmms7hj7k7ivl6Bv<>`??_R`zM-`j9s)WoNU>Yx4<(#lQihaM;S*vl~t z>X{9lbmC=+_7^cOYNI-;yeh_J<^PtEMUtgnsvhEKsKQ&neyU6MYh$*?a+GS6wv|p= zihHtP^sQ==$|_dg=WTf=ZW(8UzUr&} z$;w)V$=|q;2aJuYvX*J%vCCfhn)UtFh#XsaN!{laMj9IGx(+R0Dk?H!=c6v|$UNPF zoD&~pR=!?pVxES;0xZ`JY;6etC<^-ECGr%0MwgTNWX#Z@g^=rqs+_I{ zii~7wACQ0_NY%%B>7op%n6iykqRp~eCK!|v5!}H3rA=2#+$2^js!7ePJZQ9X23*Q0 zpu(a=Zs)NyCr8ki(XuPiqW|8z!Yk7@E$LF=R34}?=xdGQPBfZlsABD=qQ=c|t*I_r zeOO>-itT0MM#C=L#p>t8UK4dxAn&dtF6NQl6~ea4E&tVRYQkiy1*`b|3HWiGnf8ya z?qBB#S%RJ>RaT)O1>~|Mgc2(2cIGLa#-j6989-5&l-Z&{35mI(Yg(Y5=h|qMYs79a z*@~;h(%ABnRo>>P^nNe!37*DDC|$yD_nNGMsutug` zB&p~l8$VD*#_tX9X^m)-)FH7XA93gAul~*}6Wfv}Or;{wXz50=5=`+G>*$V-vfgI# z#ab8yZ;=;Q@Mp%S7{Bs2k@0bqT#vYE`z_%yWB&^={s; z5#rOxpps20-H{m%+f{o}ra_LG7gTTP2^fv{^FJiq!r> zu{%T9|HhhwW&iS>@T)KzNXX9QXDt=&YVmSNumlGwW@?Ilg7Fu-vhjMWG&2a#?i%>e ztuGe&FRQF?N)VeOz>>+IbYQdlHL}|}^-2ejNFY<|vi7k9O7(5%+8| zuq>rjkJ(H|bZ4sc={|4osYV?8bsuk6cFhf=S+xiw=*qHUtcugx5*HGd$1(=D^^NZ# z7&2mG;vymTV&m+`KsM4bgPjfo8&ore;kN zsRlFRYqzq$5qLk>wzSe|121V-6F7?S_6?rjid^#=CjuE|<=X;i^d9$d4>KTAHzywW z;ubbpPxotN>D>TsiM*AzI;)=b8TAC|d5b=>r!!q)KkF+6> zyZ^U3Cv7`7bq|feXXC4U=QnAaw$}bPr=E5xk7dHHAMRpxeiAg8clm3>YWmW)t2TJ? zMpuN_bzB#7T#hMP3pQYVK^!|}Wd1F+NM-OHqWPM2;`U%)8#6R};uo|SHvV5}Vr|Wd4v;RPu zoWwNz0I0wFy_a(wGWpU1L-j>Nl=rWGhia>1d0QKNbB8n?OEtIN`c~6hg17cn`TB)A zsKch|7#I7GrelVLQ{DRKvb#Cp88=w!d1%bIfKqH(pGSogl1(A=p6||Xdu_P4txHUE zKmPD7Zi=;gq2b_ia8hUs)8Yrckb5Ho0dD&Hq!%t~@`w zt@oIhS&uWM66(EYy3}ceb%yTIj-k`%yQnWYJI_As*>}}9^}x#ysJgBK6Fk_*I`ww! z!fHF(C%TfF`KMx|n7_T;Tl^jHx*20U^qWY`270p!3a=D`--A5dy15JN7{}@}ffD`= zMres;YlA*6*(-m|vhdC0yh)e2Hg`n!6=q-c@X%{bS_0s?0K~9j#s~%j)+3q2YV@nt?U--h=s}^uvEqw`#!Bh7yT*X-!8!>BRao)sx+qdclv3+e<4HY855Uz2v78u#tPn?=W7{Tn!MNtqaA zSW?^*X3E4Hdr9PCu;&DZj`P7msE~C(HyF|I^0~9|!ifdrWgK0yl80D&voD}Ivo}iSn$dXu$awVL0B67MfI{6` z+Z44HLC@Hzl6eU3U3ay^bwN5WH$ zFZ1FvHoasIeUv@O7^U<-_y!HORQ&+$4!>0^E9?bXGu#!z8J}t|RsocAKvD+okJ36;xGi>dyLC75tuVwgXp#Hqrv?TUXmg+YMLW58=IWgn6UZ zjzJSW%n_Fe`R-T7czwj2Acnd$_~2n)Af`vuo0ORK)+fdi$`1rG`Spzt_W1VNK^B>J zUP?|mPT+%=CWn^mL)U>t=$yBYURiX}RhmS} z1D9NCJW@SOwyl@6FzJTN#f{#{z3zKJVaGm~ykoaV_J09n8+iO;zOZSIaR5UgcAnQg zqoKzU)+$Wm6qmKefoMhf+CkJ9k)isX4_1zAQRX%{3G1XtfdEV0%0AY@DJg6wx^vwO zWoR)T-B4q;!(HxlSVP{83{7fM*${D+D zbD-OvwZ-RYaeVB%ny1KEKT|0u1MrKVf2j2=Vi~7fwCac=D&(zh-O*C{;SA53xU9i| zF^hgwlpph0#{!1pUk@?`j`n3g(qNH$T>n&5swT(@oS`vqoI6p24x~s9ey~;L!iB(u zQA$Rp&_N3_$SR2kyB@x<12OdBENOW|A-Pgd&@hJ}BoVm?Ig%LvuWZVkS`L|i+0eYh$RwSe6H-1)dH^ZVJ*9;oL zcLmHrnKB^=JBT%pZU~W(yXYx55mF4^(uE&85)EnD(yQ2Vb+}BaE@$e}-UYFkIbE3$ zjLFl59Fr`5W8yNg*H3ApOPkd5p8pL{V3)T@Drn8j=QX!Uu1f~0o6X5t_rfU7Yteyx z=B$nh2~#%tbikcsrRN-h)gN;1ij9UU>e{MG&1UISta!vGwH6A8O(n~!;;W=)DtbOl zXs{Nx$OYyq_c)Fq_LJ_4tOxmpfIBj%AgFWfC_#i$SALcoE467aK^uawn3koyRBbMI zS=!yDRw=+tlTK;t)1UqnnNrlPRDmW}|50(N??GtxjC$0p)-_sC#j0DonAL`QHED;; ztE{#)9OxoyI+o3@9U+LcZ_;&u0(35T3s_E}hHtHTF-=6D8_s=>Gk=&uVImvLS&Sx@ zq_DwUzBZXb&2_YqyaLsL5C03`i|R);5}^unVxic^AF z#M<80wm==rWAM4#5}%m2z-`+e{hFxXI z9K^Q2OL=6T?Ihq{B`IU^Hus@~^e)c6N8Q07$crfO)+ZHd-}(;JT-&W9)Z~V#%~FD- z=F;FtDZA$RHa4@;s40~oT;UA!(iz#X^PTr>VGW0j!?V>hn+W~k5HG}dQ%&kne`ewo zJDRDBF;5u(IL)Q{kGOU1t^a&HW9RM|)OO3Gdmb6k^}dc(`E5^DZm3DDRsUSKv5@vWAtEA_ukyhzPrLsxxS?98v>vBuUvsk@y8P!uZZBSf?kfa6FNilLNpa(CUT z?DLWw;~HJFr(!*;x&a`{C7;fCdc{|QPL`vfG@+~+t0K$Mrp=rK>1$>OB6YvA&V4?* z(%X%0rl*_Tuazyl<-OCMhPd92#`o6es&y-x%`==fOg6t>cH`bvpHv;Sri&YDFd_zz zahEt;=T2Dc=Kl?UDo`k4zj|C-`#RHF9Mpw$Ec0x^adD-{uY|@G*zw+xfy3I5%@s=E zd;u8SnXkMdR(|u(c{!t!9eTG#=jW3o{pnC|y3ca!EZL@xFt}D^&CRWrq zx!3r=E4uA$SNoI)UflCwa@VJ-4CnWb`Pn47%*+l@#WTtGT4lBD3}yRX0krVy10Y-~ z&vv%0kMKy|IDPJ=yy0kMd7OiCyP-H~(Jx2Bzzx(ztW15DP`_^1{~rKbZ}rxsDR3&@ zVsG{kEvR0`_HM5M9Z=tl&+&8(_$1Juw6A|IaK$tOy`XE>KBL(-%2nhAj>O|`D5L}} zF8VyK$^Y&Nxdi0XVlFyT5IN|`pxVk1GAOw)@YB*St!D78f=*OWt)z;L{owEBIw-$F z@AImy%kB$FO3%;wZ~vlB0Ie_p2e92>?*I{S_Hye1aZe0$kL&Uc?C$QWzAO7IFvqmd z_}Z?m2IxPO@A|m!$bJy{Fu>%fPu5y)xe|^C!9(uWZVVhs{R$7t7JzYR%=>oDfneni z>oD$w@S=Fd5)Ei?JPMU2#6gNK!4_aY`^+kTyh*$CN}dRBY_=~D9}%pkYY=he z1^*~z*=%kyjBx_n%T{i3JR71hwr5AtYXeF26!C&aOlX@5~gla1{yg z9`mtGwjoTABp6=G6+O)A<}CqZaTae87sC)C$q?kUz-NB3$sm#jfid8GYdzvni|7yr zeJ$45Ng1uoo1hT`+3LMMvR<@?Qw$06Z~(`+vAMv}eMGSSp0MXOQ6?*A5&>};$U->VJNNXL-zi&_olh*2cPt`RXnCjTd= z$6T@(S5gR}Exj-a?i^0rMr!fWQ6;4hq8Ly7Sj2O95;C_C9SzF}Hv{8XaLUYy&AM~GzC{v4FdOd@5et)? zywf+H!{^M=UW$^71e%3XLh7k~LG4K2g&t*(7p2L=>-42WTfYy;3%# zP!`2DBe}=ACbP|wtn!Ew3UN|K4a-PBbI|rP z>XPJP@Dnv(Cn@>UO0RS^o%BDq!gK-(Nq$H65cKN4QbB9;K^+uAbt_GU(IPXnO+U^} z(eh3EGC7Op1`SUQbF9~7Fi(H4u5vUO*`up@3zDF8WTq$6;uBRPhE(> zd`hVKvQgFTiXvJJ>N?Ju$&>va@31Uc{73)iq4Bv@~xPROOY3pp+x9Rdw{0 zRfBdx$Kp%{b~jH9{X$YsXX8^^^ei)I)!iSBu5`Cbn&a@?yUhY%MnL zLQ?wTPgD~X^{;P&krY(@HSQw8)!q97HK{7X#p{79abc<^-?&OODS! zSM-%WlNWH;JwcZA;v|BM0TdE&Z)sL%arWEvvt9l3 zWQ2i9dbR+5_Hg%=Ohr*~8`pZza%j|wp7v03v2z+%aGe&D*P?85p|)U$wNBGF;M`I= z+eT_D4gBs;b?29U=a*Nq6A(f4Y-jhQc2|DeHgzqkbLiGUk>VB-WEge;#0rgXZFW@i z(}5uvwft6R3D;+ZH+m758LIbsZBsXU(^${x4iyu8VKQ>LmxTXM*aR20ILnv3@|RdQ zkkxRNYqtpm)b@09xPEuIbW?2yr7;}0m49V-h(T6dWl1VQk%d+hN{g3*kvA#yHhD|+ zaa1*f`89F>^@BUuaYNW+RhR}-Sd2?Jj87PI%{OY_m*Te80#mex)!1u!SdQmdhwnEI z$rhl-D0cU^hy}P@DQse>0T-$iOhhw+pICzP5s97n0IRrBLI;bbSA#cLi?^7IbMf%d zXGh1Fj5+xOJJ}CuRg`(_jh~Z+onw#Q679~Iyn2|9Um2Fg_KrnYxWYG5H;<1saikzQ zDFzt}4S9kS8F2fPkcIhy1J{97#*t%5lABqBIrx%0cn1H!xSl-ud#xD{?-Y!kwgNHq z@7UPMMA@<~bVbdusua*^Cc%lmQQZOPQP3n1)#yxiWU0LzSU*?6azN^AO%yE3P}!h$hicCL7O3lf?=c%f~v z?x@)Y!5E^&7zCACG24`2D->Cpm4>x;I19FQ(N}X_Se0=YbwwJicbM+N%Bjoxojn(Z zuWy_X_03+|-H7*}p}2x)qHn2K6qMnBA33NGx?lfstCAJEs83RpFHvKyIigb-4JkTu zp*oErZFJYSPbJW#A3Jm5)NCrYoprK~!y2r|nyink8bkZD?-i8m5Rc`#02KHt?Q=C< zOF^5|rej-qb$X|{w0RN3Q-OM~`*o;`T0#fgp_AII4V(84Z>g7nLM{5DH5#L7c(UPA z7omI7NP4q5`+u){j)iTsMVq^~8??QfwDET%(fT`7yO$9lN(b=T#;vyHTA<(iO^Cs9 zaGS4p`?ZJm34HsK1sk~cZn%$|JW=?mmAji8duf^bia=niH#%3}$GRU}ojrRHyBoYI zyu!o#zri=AXF9!CFBIko8Dtx$8JVu@T1)>E*k1KIdKV0mce}R%yFu5KzpI(BGrWu? z7n=Zl!9BXN5xj+`S~+;!z=d^a;uyk@Ty-Vf!Y`c3m;Ae%oV0r(WzajP1KAW70n6EY z#AlnfJ~zK!9<(L7+q z&>`2H#^Kz?gEhxVy(5>Kz=`u$f1K50ExH+;VTqi&zuM2S`;`lQ(03ifd!5h?9c9ur zw;w$yM#NVN*wJBI*_VA9O56+Ggn9q*n$j!12~Nb!H63_Kq0_nU(|x-|gB!SsTgOjb zvA;XHFM8D*(#O5|xw-nl5pKF|{nqC_Nq^nmOC8_y9p8PqDne>2hn?B8)YzjU;EA2Y z3qC8{dR3*NrwvzV>-E~P-9xtB({u5lM13>>eB8wySfZN1)qUNW2gp-E)`zi$X+Yj3 z>w*LdmB6g?a^v1jI=pLLMgyJKV;<*oUe^ba->>3w{@ui>q7*vnN1igE4<5c3-oqMR z(-SaYG*#k7f#NG(3@;wzc~s{)d7{fY)s5WD9Xv;ykY!mZ(>I+* z7zAV2sN9dtMfDD(zt-mUJ?sDXp6@BV1cWI^UK1Fucq-kc%acCo6~1QwGuoZk;SG9| zBHry2SnB_p>K8H#W?a;-nd8AXvDZ4ziOZWuACnDkl@k`^-2x-^d>2a$?O)Vi2H(s# zYw~ZX1T;HPHGlJUg?H0A@B3cwJKxv;8#Y3R@?9DiK)@FadUyt2iV^&Tw>?EJhB?yS^ubT-5lc)Zx_iF zc0~V=!S(?H*s^7$J{jDE(38SWdH^|$!2m@QNtq^Dq*xIX#U&hZT;v2Yq)1H(NiGce zqJv5lEL$2`=@O>QnKA!q)}+z#rp^UBd&1PXYZ$X(1#RIf1>%>qX*rrQrDh`P)2T|T z0@KuMpwz8g6=dt`6>L?pWXqa8i(#Ss$EHpDt?t*bd&{vHZpP{uU7;cTZ_#$&KIwx9t zmSuJ$jsSiZpiZvw=oxa&$v9+@j|Jz2ki#h{Ba_L=$kTC7K1n2vR4&;8eBhC%9hae; z(j}N)c2`SC-_=)9L-Tcl9-D2hiKcz%?f2h~blO>`en1LHia=Wy6@x$+o|s`-Y>{Z_ zEk+f3A%%`UYAB&0S~%5JYI*33q#&ZGjESD^l_FqLv`A`07VAURk7ZNeVmckWRh;CqmnO`JH%u8Wg6NUvd}Tvu~>T zX18&AD<}U*1>i~Ut>)%=r=IB61nr2R9F<;Hk!s49phD2g)S--0x~YWKMDy>y&>*_h z!2McS-lcEJ3-G}O%h2hkpN6_(6sTT|v8hL@%Id1?!V09Vc?LUUYN?i7tgkAkT&!ur zkqq<7#G=ORLD7Y|VYNC_H=ZS8UXqJ5Wp+ERo8Z2S^wHrio#w|*KMi${0}_}poK7VZ zan^n&tjl!3l#odY#E?if!8wTCu)_j-9r2{mcu_^%a?ee-#dhC~ciwvM&3E5?qb!&W zqGqfy;iqcxQpY+j9&6*eip&v2K>}`a;gruTGv=DN)>6#Gz3e&WHJ4P|+MKbDF68_vK!HKgx~af9OB$}p-jw1EtEqhJ52 zYIDChBF;wo<6jGL5*h*Ok%cVro&oLmM+Eu`f`l9n10zTTi2MdwL&{Q{H0T|PfhvPz z)0%m7XOa<`uy&mMQnN5ALKTX#cYG9HDIJ#+l6Y-CIPk*M7_mQ?NG?Dsz|{?INy>ML ztvp!aAo6?(y%HXXlZx8F`l1*HuB3U0V&H>rR|bgChpQE=nQ{wPPoK*o-miKRU)W6FH`XpSQjWFQk5I)FY&Nh@Jl zA(6w#WoZe6Ml41VOLRl68HjoJ`JpH}xychQjg)ufXh%`m5m<_=G#nd)Nl*V;OI*z| zrA5o(o!S+{ULJ9JGPLL?G17?*kU%^ayIABOO;)5lHS*i8VFafJ919Cs0DC;(>TjihRpe@QsBDY^GjmJEunskDZ^)wEKKnhi0&=^V%U=)MdPo1b_EVr%qANjr z1=zfvx2{_NLn8Oez!XFdaqkTld*2evM0zN_qN(p3;6OqO9jy|?AX{n5~B=m zEeanziOPZ$1DBn!g(K2m0MLz^cT)^1hFd=}ETV9-;P7IGYt3nL6RXdC1uR}nhAO}U ztI@os{Eo|pSiCrVUsbMfZ@k%Kec7>Qx!y3V7nTR*@0r~znMV?F&Ip#+B_|tM$=-C6R@QQb z|LoseNp5Z_9-mSTEg126F?=VM8>vt{)u@V3n`z+bbzi|wa;E?DxxF23ZfQJR+isf2 zhyHP??Z#Y-F>|+FMXnlaTx52dwT%_yZs8)rIG6gQ${*zz4oU+{QsR}%62vb?T4h-I zs?@)K1w(0{?dA+OFtF{VcD1i<=7u1L0ds7cAoLK1GROI5bS|*D8;aoP;rXKLhVV%G z4CpJ%n?KF2@-U|14)G5f8qSeVTw&rY;XH2SG#rX?T&a!lzM{`o9DUf9fk0g@F^{8(@cJs!g~}A zfzY{%jek0+>)a_egIaQh$9S8QPrS=NuJNO4bBk1f;&lHF;!r13j6#mvb+x(VH8foH zrw^-jqo7SMzP{n<^P(C`KZe%pUT-#dy&JxmWctDL)`$gVv*^UQ*kWehK7lLa*fc_< zyR6wjtRUMzvgg-QVl+-yHUZ#%Njxgsf;lt(^4Ps}?`u}s;;uLQo8EK`T#zlz+f`=V z-EQ*}c>i+WJO2F-fP(jT@peaU$2xu@fcoW9qVfL=!jNjdMR}XoYP$t_+oWmyhD}Ts zKCy>B5~zV)C1Z$JdssASH79CTgn}tJaw%v$S3DUVCTU?3`h(QxJIpDRx#Fj&;)@l*l?<_YMl3hu}4&qH)cN5)qbR*R} z3OIk}7EGxahIvMK@rMB5*B9a^U#F-RVz~bTeR6(Jh+(3|QbWj#zW9r^l!c<8M**l& zWcWhpmOZLyi(?2`eFutY7!zNpgi6SUCq)24=!POFT;oJsM|FYIq;MI?Rk6p1CkTR` zS6e1#h?MqJaRUdja0Xn!1*z3OZlGiIsE_-Y3abDMl(3Kb7z?RzJA)8zUsqCpKw2)v zKfCj54MAljpaXrdkh@Y3mq=a))dZ}Cf1db>dlZdsVsy}F6K5tS-kjew>I9n?_Q zD1E1~2N$GH_lQ32lL}RZmE?q#ZqWag_c(BAezu{*?X>5dx?g7ufPWM z7&gA}VtV)r0lAMa)(UNK2YTsZw6Jyz@e(o_U=1l?u^4Cf$4WX95h}tWn3y@$cW2+? zk@uv0LU&8^j^Uzo41LZ zgM|pA$(pZ1luPMuqj?O&nKenNlw7!XQP`B4xh_!2e@e5BABa9CmWb12osOo6)5&Ow zR)^e4oztm^H9!Zh&<1ol9e3$sdZ~{tMxJp1n6Dt1=%-mCLqdpILJo15*tdKt0Ga=U zNOg%I?$(*lw>Uw^oXsbS1FHWnG(j~pHAJRanh_cbJfxh;=>$R3gasj66sZHBpoTVi zeitQ?6S<*&AblK)ODT$?E2^RuIif&Q7ht2IB&vPDX?)+36~u`(#c7;)hn##@q09ME z&dE>?Mo}8!N>)j6&(vrjhI5wVd4*wUOllYwV+8GKp6HpE>d6J{IhXIb2vn9UFQcHQ zMVS&xqk%UO{X=2>sb+fBebGi=%@}|YS)g~ipi)Q&nrR#sF_MyyAMrtc?gF6_%8Xct zs9{ZzOxqr-W9N1A^(iY7VwP!H9k zr5Za2Dx?;6q5M}#Mr!{%eUPNXfStLTk#eOmeiWXGF`i#qkC)(&c*&(*$_jm{o-P(z z>_?wwntj>$se|_hgJll~5CCHUp1PW=h!&nss;j@ctykbf_*t%a>LITBcWh>+K`N+o zW~lLcs5J_zFjbkCfUluijUf6poS2k3l&^F;t@T=?Yq*m1T9l>=VW^s_qPnWA3MY7w zr|No!6gy$mxPP?jsOE92%u0`JAeWems9ESsi&d#pb0#l(u8~T9L*oQ8p@*4p2Q#Z< z21$={sgIGs30iurUaFT1Fk{V%AK8|vpZYE$$E`_AT~c6)2k>!du&q&xXxQbgWGNV# zx2>q4m2}7kTgm@u=ZdH7YO$f{eUpJ}BGP%hGJ4% zV~$6HH%3%6*Hl#SyK1z;D9mHGcLr+N-e% z3yn$~jrzN%b#QhS8$cMluVUPgC?LRnxd>p%$N!7KvqrpwJiNyHYTGGeX2oJ=#W~2D zvp7qal%R(Q_X;-qp2`ZsF6d|+m`$3z8k6T~lc-HS41&;=Mu8|+FC4_p1c)s+!xQK} ziRb@mnr35pQ+gA}3MmJ!)Kznnn4Q%r7zjF{x@Lbu$u74zsB-tPRNTJeYpL|{p`=>3 zQ!K4`oOeN5jEPlXse`6wjEse=s%l)ubt=cuTcJga&grbq=}bI7qhm|;Zu=>*i@=`s z>;`spl@=$%nzmH^#(G1w!yLDjyY+$NG#IS5M#7a&6}+YO2+6}b$pieCmt3A8Y{ScY z$W$Pg;-rDtw8NE_O&cv--ML%WRe@17PA^=JXylJh1%aoQTN^xMrbf^&h`g%CMH;=s z$5l<##DP1V!Vn13QUI0~I4Y(@xzlS;gjLP=TnRw{uglELSPVns3)NW+n<2{0!+igt z@mkgJf(T)4qF~FY%*ee0JI;w49kQT?ckIq@jV;z3#&b>A1Y50ljn{bn#Cp9aWu(AK z%Xzkov>PVagxyrF@W+5G&zI4U_56?DX13fI!jVUDZBW^0WK<;=(BO!10cQndP+MZ; zVw7hE|CNt)iKSdXvvMiX9gN8u{f)W|+diz&>GVyYM$!Q{WSN%I0r0e>CdBB7RWU71 z?f6xK?2Y0>f*4qxTQtzeJ!*Di$goVM4eTP;En~6p)KM+3;>>P?nUs_|HsEVJY~!zA z4ZcO#-K(hBS(m}@E!LWwBWhi$FQ#K`EerRJ-}$ZI{q5iPZQuVb-~!&#$y)#52cE2a zEi34g+#_7Oh34QHC}|j2RMWZB%d2CF9b3LOu$v2W9bK&9INX?BtXpKz=2%sT+*GSz zX~FedaKHtp4PXzw+J_ycu8j(FkjX7)dAU5>B`sVPSb26x-9U!ZF((D8Y*x$!c|1kS z$`y0Bm%t~^1}_I|-(yCn4CJ%C*(hGiM&9BZ$kT)!*p_YB3B8tNwZRqSpzAHwYFr?0 zt;Nb5hCh@CuJXK@ndgvguF6N>rRu zFz|Y1#YL1hMgE3*NDg~4cSf1U<;i4@Tolwu^^RIpX@(f#92dMw1#r}L+bP$VkpzT+ z+oOJ7qG7wC-^;IHZ4iM#p{p*|MSFuP3-Dy!@D5+s5%0|;F^Gcx;KQ5H8i(dr+vFT4 zyB$ZvOeXRn?{Muo=^C9>`s{)_+;NGg?e|`Cq}}PSEPA-LX(HAOjh(Nk&gZI*c#3?7 zPQL7;7jo%twL*R6np|?4XXd+J$-nOFr%Rp^E$kERrE*}=J(d5JK$Yx3evVr2^<-)0 z(IvYp{N>nc10p;(jL4+o-n&&>P2OtUBCbrlOie|`hg|>c7no??$m9{1l?6;mTl}yF zZ}^9g2i7y?c<2n4T1okpZ~2#x`Ix&@n-9+k`(Vt4R9@-i5NP5c?Rong zRk>AUKOE{=*@0;Ew4UW~o)slUCF#P$;9|akM(yLyF4Pw2`?5>(X%tlc$n>s&^ExlG znZ@B9W9_SN*{F90>W=QKSC&S0^tMOjJUqcZC7v-33xDYem^;T(Z{r4^vo}lZ*I&XA z&b#iP3Xmv1)jp;2Z_D-%KU7|7C(NV(v8qZGDO0AXDpUV(3alw<1Qt})Fd~&SYN#k= zD6wHeDz7fKaplI8FJUE3?iiI)?Zt^{$Sm}!=Iukc z3d>lmTjmw4i5txP1sr$`fF(;DwgFL?24Tf^=q%}37U9{9ZSfvN`?lv@g=7tKRv5Z$ zar1{W@|U_Bmz z8nK)>Tm~^!v-ieK=GK|-ZMBArn~MaA7c_q!`Og3J?cKi*5I`QRGg!*2SDsuws@>_6 zzxVI0SdJ5pATs#kX+SAbiRGv$4(tmCSQx74xQKKL?lLJ3q|c+J_;Q7%GY6RX+bOSMd^Hz(Vr3amSS%7QdP!Pp?2hCqk%YZ8^V)1n>UKrbou27sjC0D#B8zicWy_6V*9gYi%_LN#dp-KRVBdcS{&!g2xU>o5 zB|$#D}bE2C=LGt`|UPeGQs`m@|bS+K=czSB;^e4?CFk%c8t2I(NZ zNenHNrp`r6ZF+zCHv;Fs|Gm!$%qb?s1`-s|CWs@R*}POLwN=LmZF|KVXahI7rA%RU zbIRUqumKo!5Nm?T;2APFE)tf|gc&T{ubdPf#odL4iem!|0T#o4f#xUG>mUbns6!t1 zu!p%yUlrEy7V8x!Morp{r(pjO!=MN$SlA&^Fh*C3(y0h(F1j9PthTX1;V5=4q=>mN z1DXoiB~eU~h(kiuyYL0ijcy!X(^ysqI?i!weZb=(R2csv%5l z2!S*s97Vk$L(&qYa@r_9^0jP^kP9Mk92CR_5baS%16L?3Wk36Yqz#tPpa0&t%5)77 z5q`J{0^>%9v(X`KA%O#`WaTBdQIHGZvEVB7kgBGbkPP$7A=mWsMXNx?na@OFe4cQ) z7xJwlG{mM1vl+t?R?mfJ+@|9i7cBgQ>OAYC927|+#S>AnirymSi-7hgAmQs>jhq+j za5qNF^yXzV;@u=!Ine)s#W4qP!*(q<~^CP-2`Nj{1bAk%i`YY=miG|6)*vD%7xrtsf@KT1c`|&M#LD z-(d72m-5A_vVQ}BKm(&$&O)}cA}J{)P?u61)Mllh6|C>DV_BBf^q0V@TC8*`oxbUG z4z~p=cytR*Xu|)Bx6bToaQ8P{;u2S~l8ukAsw%^3ic_2a>gr&@D%P#87I3ixSaG$x zUG8dEYjjemT7SXTcdF!yH&GpZa#V%^Aqx#p{7i$;kk@$mOm-Vu#m|C9&(sQ*uyRCf zfEUWt=_u2{2S%`gEjrm4$gjZ;q)rT$DcSy3*mtN1?RiOC%I0p?!X7Rt8l+aIs5(=s zwuRs|0XJA!m0?u7_3dwAJmcwLz>cFtf%#a=+T>1CjVk^ZcBs46=>B-f)ve|?Pw8AH zH+gRaK&g1w+Ex;|m9&Ri?6iQWD@A_zrT+zRn8jS!)D%~h{{t~<)!aW^g2BNMmfwW) z<>kOW7sLOb9PNf_W6I60tFwM4*oym%QKrrqnn)U+p{L@O5022dZps@B02>(L<`~D9 z&M0^R(BtKL54u@BGFQXnP3g`s)S}+gxL$Z_RtrWr2#E5Ccro57mp2r*c34Ukn`P+* zanb|@^gzK(+A$|oghU#y4A;EuG%q^?$If&e&45{fKG)jU#DqESfit1>@B|h=i(OK9LAXUjv|^p&aIgOR?dma|w%F4%?B7{LjI*cwjTwa&EujQ62}m+2!uLng`hJ*j266 z^*%$!o%H4@wIfJO2;`U}T$v;{^J=%gha)h4=Zl}|b|fG9!gjZ~=iA`YA|2_VYx?L# z|2)xE5QC*V-RY(dE7ZTXde)0|;DD)m>-SE>z>KQ^3YWDEbidB9+ZwLm+IQL0K75P+ zm+gNq^W2r+-?}3o?|GlS%Aec((9iqvy^V49wZDDtcj>okXuKXAAO7)Ap7P~~Jh}g? zkMzy&@Cxtu!stQ&VEWJB{!1vmRRMN-ljF606TobGJ?~ky=wrQRs|kd9pAtZk z64@48!?lOWgPZf4%vnB|LObHSxZpZI=c^&lnmg$uLFPh?hbtz50W<{sxL>0?w-7zg zySLk;K|Yu%MQf%U)WJndKkG9;^sB+gGlI&JKN*Cx_H(}uiou3@v-(>5nC5v;v7Gr}MQ!b0pW&w@KdpOR4hOvE4Zs`MX`D^%2O)`e8ApwI18LZH`K-9 zgTpu^rDIzJtFuEdyf!?HrwdfWZv!{}kvTzpL?4vIX#7F)`-23aMrn-1Y}7{e)48O( zzfa`EaC9?qgvQ3`JfX`xbQDGGV>;newo?q4f(yV@l)Y6PIC^|Ve5AcEoI!PBficX5 zS|me7n75fo!!_K+g#0mIJjPrc#)pJR6U?KA)I+m7CIsWWVsc*;~7E*H!? zuj;p-EJ~r2#{(?Nd=$W=1TZLK30(NcraVlScqLq{$g8x7-5!)0bvrOv3v=Dm*gC`rrP2XhDx)jcDY|9-BI^;~w z3SCa-Y);F1PN%p{+f0(38PE~~HCJrMpajqGtW3*fz!3pEQb^DAM4k2|O&D4PR1iiM zOg+=2PlM!2*4!G-I!k5h$cz%XajG^WT_w3x(j|>hw(P_Qy}!sagtwfx7!1t5q|gfm zMGSq?-yBTm7{vq4!4cI-KSQ!>icEDu(Nt8?rYX;X^aQ6oLs9xFvaGWblptETQ5@w? zr&xsFV@NmX(fkBNnF~?`8`37dMk<9%1y$4rWzt54)ZqW@(n~AODQyu;vr;X^QVZo& zC!|zJoWU=R86+vyQ$^KOO;rw6%~adTGkry(T+=KpJIH%el&CT@RL@nAbIX%rm?1}v zFDC5NPd(Qw&Ct9IRZ$>zygWES;u^&XKU!QwT=XBESQb)mcr^4K%-7&DCDjRf08G zPy$U-+f(jj*sYqgVl~!B*-;+FLOb=(!gE$>wSn)_$Vvmw@LNl4^;St8*=!YAa1B@T zB-i6%7)-4b3{BT8Mb}Cd)tcqjcO}EG(^+~Q*oOb}8bX!Te)ZP`w5Zl%(OvD?f<0KT z_|-afL5Rhs?O53O%(f`8l8FUYH9XX_eAO~tr9`z^cJ0=09ouU?S+Ygju#Lo(W!ttL zKXX;rxQ$sYh0?Sw+hU^uoR!y{O^Jl5S6lPi77W@h^w&a#6FOQghl$svZP|o0})p5-~-ro}+7q}r&hiK)$|!IMuNWypk5 zl6Ax0{OY+5gjv>w-_E01yOrINrQI^D zUE4(k9OT^0^rEn)hdU-VVqqe0l)xVO7}-}sf=N^I8?j$KH+-~25vFy-GK z_F<^_-xLiv0nV4cV_^h7-o_QP1y)#s;Zss;jC{q@#iU>XUL0pMTM?efEw08a{!AS( zpfDC=SR&&w)+3iS<1}8>HP&L1z2Qr3V-rr{1;y0$RbrQn;XIb%xSiwr_2V1PVf_{4 z-L1|-He^~TNldNlSe~Qa zwoG3zR^vfHWn_ipE^g&lhGjN(HF5C$pOM2z0Qcw|UUGf5V$ByJewDK<>r=H^Wot5HJnqd_aK zJ@>n*l$|^aK4Vr+<$2~}7ocTi_Tps5=Y7Uyl=Wviw&QMQ-ztUSVct|ej?zigXMR@Z zH)iHRcIIe)=82~0q@Cu?eCEK#Hd>9WDt4iiDV|CGW|5wtOHRT4Y|Tv$C2bD33*F^7 z_JakETqjs%G(Kf|wr8AX=z9O&>4xrS8=k3N28%h(P?aQRgih%4UFdx7X_)@$hlb`B zl|YG>=10M3me6QM7SF2m=#brFk{;4Y@r4FJ8wq}F!!*lpG6n1<&vw&}ON zX}PZJrl#wq*5%Um>9rN=Dmk7SENG-QXc_Kfy}oOvR_w+8>Cc7gW{&EJcIJyd&mg$! z>9pppCSK`X9FVr_fClY73Q#%{XN#S}aP!@8BjMH7V8`(|eu?YEzURE2ZJx$!#y;kx z%WYl`>lW73!Upc(erd%1?71%P+D2~T24rbE8i;mo$)@U_y=uz7Y|CEC>by~AEjwfU z;=dN?L`dCTmI~P<@ACgP@AGCy^xi)2E9H2W?c{#%+m>(m-tDsOW=eKp2^|dK=I)UPDb2HOxAb8@_D0WlWM^AtXVk%Vb~6w7qmy=0ul8$4cx*rTgg13k6nqMcw2MurkCzpXUcYmaHya9sF(Vy z5Bc5GdfwZ5hU5B?cXz2bLt=+`r=Ir&HTsz6_I*ctmX+7<#<8Nl%XkCzx}Wh(YgB6o z_J$Ywz7P7p_xrSWd9}x2Z&i9%e|o1^d|G7u#>aZ94||f2e6XkduK#+=uY9rhZMN@g zmgje*m-(6J{54K?^kmzd_dK}=c)N%6PfhE+$8^0X`qT}4+6VmF7yR6RdC*_`!$^@S(j&=`6eXHmQW5~m3@%?{Xeo06O_eo2 zqRgrD^U@jS-cIRH`YiTE&`mYgMjZzkvO!wWT4Z zD+q!NYE3rca|jntCnkp{|R*jxBpOZPl%B*UoKgcdg#Mf8YNK zj?1@j;KNHGH_Gd{j9txh>2fYTI`!($j|;5p^YUiSIKOM;=+tv}@8-|X9Dj3tdGvJK z+AdD|ZfEvS$BWHBR=@uA;2n>2(O=pPn3sWxHBq2~2o_cdgNQi@VT2Mk2APF|U1%A1 z8@ktChbVyPQD~x-R-%a~t`;9|E9%Cgiz>Dlqj59Rm>hA?-DsVT*Yz?Tk3YI5*LMNd z=b=j*b!1aaMcPN6lT0e<-+lN=`Q%}v!C2yfC{0-F}VM3$ocJ zfepeL=b9AKS*M*9;(4ZqMt*1|4H=AxC5j^unx&yH$~Y*Zi!v%yqVhq?T#f%e;z%iw zJ!0z1FPcu59e{j786pE(aFk@3r!MJds;aKqq<^2Pb?78{wdy6AsDhcRm|=M_W|y^k zm|>d3qA9GLb0X`evJ*1<=AF$x8?CX^>WSHQeX{ygpq_A>l%ldWn%lSFhFk8VlUABz zx;t9hZl;$8S!}4l&g4a@Q{ifBw)FBFS*+r2neV8#zQon91n=rA0|$e;r@RFvd#tk( zQ@pIj(qc?)o*G+Q?X@3+j4hShcI$15g_4`{xSFhduFEg0yRM_`(hRT7?{GWq?UCMo zciuk+)3@KgQ`c7JN<03i>L&eLd%SEte0=1^OOCLw^O}A-)t3_@dgt4Ju66gCbFMw< z)sK%lr~i8}2delOOxwR3GOdFnS4_ zTmY{(y7Gn3fe`#2_aaz93WhF&8gv->I>^EE9q$d8OW*t4SHS=Fl`w^Hn;#2fW}3Re z5P$mPU+-ubFCr1oc*skfaY_cECkak^3WOR1mv*=hmd=9WyC4&rxIrf>@rg@BViKw7 zL7o+E5n8MS7cId>63Xs`VnksW;n%`5=14^`T%!zcX2ZSx4R8%Z+zTP)zs3b|VFw!` z5rqi7K1mRJfsEq#FbGBP9Ws#9GWkk>KtAp zeQC&dO0k#n{ADmtXU;AivzW)srzO?i=1xo5 z+sQb|nX8Zt1S!-h1~Qz13{55FQve)k>Kw_=cDi$(xrC?pR2olup0sqK8UsyJ#ZP12 zRGS23N1q zRd0Y58(#hBQIb|t>!Gxs-C1c$ZOPJ>!nLh0RjW)XXT+T1^q8pFD^8cR)1Lwss6jO- zQ)yw?!y+}Yid8CO8>>~04&$hWT_|OriZo}g0g<0RHam(Ihm?HZ-JbpDlLn&Md(1ot2|gQd%MwIv8GoT(1A~D zADCPFzBR7*-EV&R8(XvSQ@FtuFeL+5lHwj$xe7L+QK1{%2Uj=3622~WqgvS*6;M0t z;cU}7Ygdv2GNYPGZ)!{Y)vl7Yto3ziOl3RU{^FO#FlMPqrG`pL{+7Uc)v*>u_&)?E zc)<#GFocV2#&JOX@hwnN@baGoFLIXFtO_ z)<|x3>}cx(${1S8lrzlYcq8B4fcevCZ8XLNGMr;**~BGguSer}O5F-u(a|2ld(-?TQo{eo>}iF6yW7CUvBdkDYATc4+~(HejhvhAkMG%0 z9v^v%kc;V&#FEiHp@b~bgj<~a}FNcTvNKJR-Y;Kj)* zuS&0Yk7M6-yKbxWwAV{XZa)8Y-S7MMmjAuV|K4lqC?nbGT|M>XUU$%v*z?Lh#+Oe{ zdc3~}^>FX}+*RBC#phl2+h+Ru(_eJX9Wg+rKCHKQf9l@HU))o6N}#TIK^joz zNeK=946oE+10@es{Z9YR$Y4d;nDafF*byQ5J>mG7U+~b^AXO2|yF^#%!!r=cA*Kn-xt20Bev7gnIVsj+9#q~`!Qd>xM3W2hFh2j9omeFL0Pk~{^38iBQsWEChj9aPKzfd7ARID@Acv=79g&4<2L>TPqocLDPcL9 zV;-7?$wFR+C>VWK3*FZ_F*taVLJ*3BaUG~-lRe9 zk~Bis8DiZvmgE{P-_rCSEiM;^trB>_l2BIJMji$9*;{-`*UA*w@=3%y8KE|L{{wCF>qdLl@X5J&sdFEF7=5iuqa|)v++GKJn=jQThZQm1ptPlGG7LWF2N7x}%0}==tdvfXSyb&Y?fr<2~9Ze$wPF?&oDz zOnTm?Ov0T=cIH@2y#-iYu@Wdu@d5=(1q!sZNO6igY_a0*?(XjH?(Qy&FD%aD?(R_B z-EIHvz4w3boo_bZ%p{p4Gqag<@|}~6kvm5e`TeAb^7{E@Xz$G?8I~X1yLKZY*Ddx> z-`)C9f{ga#7%36-Ozbc=o%)I|2a9!#{@Nr_gbkJ?>QP2$X(!Z@MFt&wdNoc`%4WEh zLd2mk`L?A|lq;*XYRYH=_2(?v65KXAz>g`)ODxmpVLfomTXpI2Q@uU^wo(B-1G72G$ACW;?NrkV}_|+cXt|ddGhYa4PajJmr7j3^nkqfO=$wH0%6!RckCB1>DPMSAuBSSSb{mh$tj9I&Eq?3q@&Pf!!=Nz z1s$(3)abM>k|L=~<63KDueM^rEp0T3?uf?-thCq9a=aE-{h(DnMf`1-BjH^jStca| zvcNGax=5=W8P{&rULOe(=-jvQMCZ>15&ti^eoLHr`g*I)>02xKs|*nulS9U13cb^o z=9=feN+=RJt;;uC%1&cKfWZfO%8%9;c~WGbuB-_fXfa82VGqMgYC1Vm*5CcSYM{qnMu9 z`P5_5hU9};eGS8XYYmUCUBi+U#dHn(>F;y83+ye123}jwZlJt9226$Ht z^B}1D<%KX>oo*R*3vV4qbX{P3UkhpF7x4=wWuxVb9|wJm#df>=Omzaw%x0W5zmyZI zRIb+v1v$;@9*q77H;^CQ)Vzi{2uxcivQp=@|JwAbL;G+|+%i~0T*ZoD$G&VQ3OXmt zzT%cwu!)?$8QZD+Vl58v(_?m0yHmpclQ-j5XH;yCIy|^Ft{bSowpYg+q&C+xN|&X^B#5zIg{~WWB2twKBe2F zUUPPJX3tmv+Z40Empbt$ITbR$S88E4I5dZorxHg7dwysN_HNWSXKasP569~HgC+Tb zZxso=XY6KWfZQhzJK7}My6u78i`d7f?o0t>XittY?nHs}&dU0(g6S^EznY-y*Um$| z)KS(CkxWf`WHdkxfWO=mr< zAL1FGirC)OG?+WOP% z6^z_GlPgWMQN?c=u%kp}7n9AhOYPPOEwBv$5kk{v* ze93PDaXmKpyEkrek~=dNWj_(@j0oXywJ_}jwzhOlbGRpSmII!8?XjRggdJ_cZvEM` z5MwU863d3^$29x;V$hRgi)-aD#@ToGu6Vg1Y1BBjRx|an2omm*BM)C~%krn^aq|r| zUMt*FP!EEbdLxCXJ>{F1x)vU0STrH^gzkX{GflQD_sp^(Nrmb*jSYqY`D%ej5VT?d-hygU2Edmv3ZX8)4S z=CX>{$PDqkjvPp&k4rz)XDPa<>v!Rv`wI4uJDh_K6KYdP=E z`>3Due8>Be&K9<8CU$gd!gL#BPhf9E831r}zp_6AwLe7erIU8=B(%28B@ zl|98zs~U6>XNv2H?PWLK`y3rAFK!+h4Y@y~ zyb*8xD$lbqfwDUek-BaAeB_jj+pq{Er9HbZQoXD{8&Mwo@ue-QX=K#lE z|A|L+x630sX!XE*V?e|-^!cfH>j>-?4t`&I4Kbfck*?W11Z8&LoMwAWS3#YCpa;N= zBI9X}4s5AnPj$wzR2^?#KPOzD_l1dN0rj}Vx#+%Lk`s6VpvgA+fdAzc+C2^l7H}0FI(q4@C1&G#AuzyJf><3DRn)Sb zOnaZfYQ{s5Nwly{a98P5F5yHk_SmplQzK|vpBuQ2K<)F0xtpIGtgN`}9-ko7-qBpI z&dva1VB-h^&2BLC(fUrPFxn|oy>VYBW7^54xCK*c$n>N4{n^9w)~Q|e%ZoO<8ms%D zH^49F+if57Y4i)Q#q*5qW#8J%Wm4xrnE$6w7h9(K2_d&R8Li4~VL(j8msJd0(q%8+fucobzdu7F&(kB zNm;WAS)Y>__)Tta

    vlB*%%61tjVTH%jpA413q$x?e-{w!t@6z`OTUN2k+}>-Zp2 zvGyA%AO&i49}bZUrhdpj`)&YFvHg8^eBjdEM7JjGvTpNAzx~m_WKbT`x(40eEGK5= zdYJ~i%!dY2^Zphi&hMj0IiuPo0`OR~tp?*huJMsNz~@#o2{0s_lXLka{0d*N#R^o& zBJ{r3Z|}Z-;ER5NzF0q}+WlC#^ZHcQY@+Th5l<)H;(1SSD-#D9f{Ezs%<%1^N8!V^ zNp_<3uKo4o^p;I2>(34mOvEb#rl;*X;1#zv%Kzm~{S3t_(SW3Gd(Y85T9z$iY1m69_pCYMc!-BScmZ8@d>P*{t>!>Bu2lM7TyFZC%o7Y65JU*Q{x%k&W$-)5? z*1Qi0rww6_gd?gyJw6D$1ysXiA*km?LNDU)R9&3a*P%g&PF8Y8WH4u&_C`@{+}?UM z2xd!hj#&9$Vx{wnk0+oP*nCsusBc^NjjiFnyKhC4Kt>xVcq(CV+1GbAdKt;|b%`caFG^h72KGHRS2t;=N;~1vKXa+^La<%wPA% z&s1IRUYs9Zq?_zPyf5Pb&6}o#;DXO*tflMGiFZ{UMOmBDHJKm>m_~Mit(@NGuEdxd zun?FJm7mUd4EMe&&1;f-C^@=KIAb+k#)0Ydx&OZ00%CSr#XJ15L1v^@nEI2(bdCR+`rZMnst!BVpZGrgBjyK7!eN+&Ei2&xG3Rnyf#0 zE8@B1!>+lAs+R*yR3gAsm&hNdCe$6tYN`_6? zXW$a=ChAlCBhc$JZk{~)5!CAJwhCPjOO^WwLG$FFi$XHvUE&FDos5}N8_QHkB1k@> zaaqe3L7=tq{PEGyeE}y*2*j%R(&`swhBEIiOprZU9+27i7u_RJ7E84$tr*{1Z|c~`*uJ%Qj|k0&Z2g!~kTROUm*s~9>wTqF zZ15&!xwgL!G91K-iciTTl(^c!hW3P|FHkT z|H}QBgq7ywgmH4Zv2*;fBD&wAb}Z)eEU7;&lKZ}a3>=b-4_70?zf{Es=^p~){2%B*EP>VX;l$wM;Qoyh;sck%119l> zL-m0Rf%`{>_@JV~fx~^^V6Fd4eNbV{|D-VFAE3hFeD_6#asJDMReb&rzz|qgoe%2w zH#q+TKJ@<=srwE656y>O_xpeFFZ(~T{%@U5tpAI{ay#Gtw@e+Z)&Ge5KXw1h`A5Q9 z{f!RG{V!4nhZ4|@kRZTWf`H`Wg93vAe{LA`_@nd)aQ}mDu4(~JK_A2ff2{u+pu8J9 z0f7@{F_-~is)zA2x-mw6bENoV-TPqW2q2EYVVl6Akp8#l|7q_3w)^H^>;Ibm|9c1< z(LXKt{!>I9Of@h*4EdKk^RFC?fNAmHsQ!b%@PF_>-NV%X9}E0X)9gO}|5x{@aQ`7; zDZ+o?KUx1V!oM>AauLAjK5#lM|1+9S3p+O)2TTz%9bEqu@t?HS|DQ+v^AtF)>VK{Q za~wpN@BGhKVGhX033D`L-065uMxwyHx`1pAK$nx@X^ZwBW0C<1%|K<}C;pgY$7Zc`}78VfT6A>7y2V8{11eLMg9njz%C(CQDHGL5iv1QF$oDVIXQ805phXT2?-d8OUTGcNlHjb ziAzaINJ+~`DJaNDOUlSd$-+QRUXGJpPDVmbPDV~$O)e}_o}Nx#fJa_lM&ajA1$kKo z1zAN!IYlL9MO9Trbqys&IVB|pWnlpoWknS=bu~3L4M`CV4K)o-4Gm2#Eky|}ZB1=$ zZ5>@*o#+@{DoWj747xhndW;{JVhDL^lCMHHK97gJL#;lws99*UbTBat3 z=DLdJ78VvtgUTqZ2A;ETbb`It`4i5HCPL9ql&My9b?v@(v zZf+ieyk1`JUS3{4Hky8Zz5xOLfl>lN_S!*#{=vaPVWGj1;h`~+;W5$CvC87{_WJSh z2?_BriAl*R$w{fHDXFPxY3Ugm>QWh*Sy@@x+1WX{xp}#H`T6RS`PTaRdHDqeg@uJh zg+(QWCBUMh;-d1(Vqir{Noi>*u(YD8tQ7+McU4qWR8>_~S69_mSJ&3m)YaD3fpla+ zwe=uSeSJN+zPy&NwZ>ro(BQ!E(BSa! z;K=aM=*ZCM=zms+WP9o_SW{%(b4tw)yvDv>+9=3&;NfeR4CldPIv!-Jh^`kHkHOe zwQfl;_6M8$pf zt@WVd@$BEO$X~_kVCS8PI8td2)s;@C$mEMjiq@CSMyi0!+M4Tu%9Yxhp`K(i-vi&8 zFqI(b<^?QcnsfBj)8Elz6470P7nZ?Q+Km3sU06l;`s2c*@1B?Umh40B&p6eUj-RN-m73eS#GpHqU&GIIJ57VE+wEN z{A%meA5H*m^U&mSp4b{pm$J@sJX`>2uWPks-JPv=`$LJO+gl&5_Wzb@6SeU?-JNZA z&yQ)gzC2y8k{f?(ZGgT$M_v(eAR?P=_@EFsF0)|pUyYcR2rVp{X33h21ST6crv4(l z-25Ay;7E~YL=eQT7fS9IG91QFb~B@(6RR-CV?1d{qUizq8Wx%Rc0XU6V*{xmO5l`2 zB`S=pNkt#bd_Wpb6-Pl9hA*X(81KBvbI>rPN95S z%0W?vQyOuCY%}UsiGE^QSZT?a%T^iN>y188s1GW(T`n9)pQ0Oh5<8)vc6)e`sBT0_ zQDcGy<74LmD-DpH%%FctbvUtWh#dDMx7rF4Ydgx>>f!N#JlX+A0< zFlEPM0x~p>gE=hw|9Vju5BMzQS7d$*UO5>eO)j^~r{@ZzOJ%V4sPX>6ENl!(q6wRii{58ZThMJ}vcc$(qMgaj54+e80x4zghbH7>!XyHq8Tg~Wq z?~L1F+J=+ML7yTTwqcTa%ez%Gadps$Njmi8ZiHE$yJie_k5xY&LX^cmDgD{g0isRW zRu_%%!DD`>GVl9Mtb((Zqqf;`S^cKQ2fF)%-qY2)rSkEU+oS64%)4WUt*y&#Ue0or| zTBneRe0LUNJuYW~E0#|Oe%3wfK<7nbwAX(8_!GZM3l3P?YsrrYZO0h$SdVQJ6ZUuX zx&cu2>C7FWM;FssrQ;jjc<_B~PAL?7pabdig>Uxqdqj@@>Yq~8^wO!n?mGQ%cP@P2 zaj+Gi(MVNa6FW%oFJhL&^>i}sdwLCs&E(mA{G^Rkvj3wkYXBj$V5&)EuYj7H$8Od!Zaq)nc0Xax>Ufn&n^RGkoDc1Lj?3u zE;%G)jrhZ0k2Y(qZfv6C&NKgq!8cRWkFrICEPwjc>>@dLR(RYEQb5((Qn zyKIK4gT)I1IS1LGtV7XNK3Jnz>Vbj)lcgo7)-@RKYq`QZ_c0r%>*{Pt3z0=ik;2i9M#{kQ>5ie5!8NMb z2#4<3V{G{`xh@7e_zK2#wTY=EaLa5t(Bfc8Z62PbbyJhpvJHLh4E{E|rNYeGn@fA+ zRi?3?021L9lf|D_pNs^tdj=o-)NR}^CeVA*w)wE4g#R&htd)ND_N$1l|v++iI ztkCWN4ii%%rR7`~ihe(1EH=XN>mJi;;0G&iQ0B-UZ}#1|t5i%jN@NCqdIRhsw>D8a zVw09DZ8+>?NQqXMw0N+wFXh#YXYsoqGrI3+=nr7r$WVx`9vh{T2mN6P;SOo>{H%Z_ zOAfijUrfNm99@Ksx@fr>S3g^CG$v~S$tqg8#AApefqlfFMjMq)S|49E0xn(n25DQ< zyV)I@GI`HEMHDrSsx?rBA&F^ zZGOhR?n#%heSg*b_uT6) zaYyn}cY4`}0Q!ch-Miq}^D5mh^VPXywN>AIoUkbEhN0~F?d6On;oRmaJ3qtgRm6wP zt%J{QLhfUL*y!hNvgn4vOlNBei2O(jh!bSEbDT1cH^aPV*qMoH7C`Qiv~tRVqK;>k zL;ZF0PjJghbe6-8iOZJNb7Pgjt3wIG;u-R`^>sq{O}Y8Yj=a2o#rEYvdbpb?^4%MK z8%Lc~-aHWVV%pNI;&zVv>xsne{mOxsbIbbI>sI;ewal^ol5~wj`RuTvzfIzvB(H9+ z2b@u)+DJ86MK&-H(w?=z&~uP|O!yD!A{!B?r*w$bg^EK!d+o0%FXTAr}?Z1)!s z)}BBqJkXOX%ctdKrn|6c@$*1{S-*T1hNiMWv}2aXj&y`k*(h|c3hL1u?e!>W==p4H zf~exQFfS?5?};|xta9sFx#z?2!(|IsXIt4z!`Z7jS+(sBK^MhUY{R!H5BbM{k;8=k zeBm!{V`H8`hcRVOxO+b+ijP&lA3_R%46L~{?}m-)MNt0{huv2XY^|@vf`{rYR_CRI z;&H*|${LJB(&&42_x2{)DObqzBb)!@o*zSFVDnG6$>6|ZWq*_sXZ8|_ff;9Rc5ALi z9Ds{lqLNFnuuhu4`wtUbk;VY)IWNft_q0#0ejRwlx2k7gA>BrARg(}2B!7*@kZF`4 zX6Sv$4*$=^1|N=)Ak%vv^MN2mHkaQers{0c9SJV`(Uk@*e;MP(zBz*VYJA@*trs#X<=ZFM@(BymPfd!mC zLl%mgppkt=5kKEK5oCtMNLSQI_MqUi`LN#Y$h&@nd1fCnQXiWhD=n9(KWyR6E++IK z`Sj%20H%nZg~%%vZ@=JZLCd=>efIxPIg}i_z2vac9Qz=y%k8c`@js;qRav zQQ*e-9SS`qkyy+x2@?*nCnBL|13vvBp-iO-)y~>vhH)}qg5{b_TLqD(KAS;J@Mu#b z2^$lC4I+6{#ETyQGJ@j|@YFd)qjBnwmg#fCDo26=6;s z#nYi@iGio|HW1Q`4Ynj`3@c+tU13i$XyOZ`U^IG2;$BQLe;ezj6wBX~+T@gKP!ijQ z9DVaMn()D=A32yzRG-g8|EeePf-P+BGphDM8UkB-#Fw}H1$r38A!)8jpOjNO4pQrp zl40RWz}VmZZJJ|fvV`dGLQ3)L#!&u&WG_>mti!vi=FiG1ai8VFJ?k?4E-->e~!}KNts*f|&XhFMYw|kel%#p&*=Aoo&Z$5|2 zkec31sUr6?e6NW=wI>UwEGy-0lp@C)$FzYq5=W*n-rtr61hAIRkol!WV9r)&1k za@jI$3+U9iMC0Oi6mQg|M5UNKl6!rXLpQkL>esvK!jJelaBnG~Ehx(&Amd(~&(q52 z1eNqh@o%Xsu?a%k%8GWw>JFBQ@Z9e2ols4kz*9uye$ORuX1>EgrgSpNv{k68O_0l>J2Vj4Pxna?!#`9?xE7?^^#$6 z+N$*uBKAc~nGC756KEycPgTreJ^+Hc2z+MbbX4kxXbSPNMWe#m9dLg?C_cTxNxfNE zxVhv>%R{`z;h`ZJzmfE9nZJ5%Y;nqLNWJtxqZ(Hk^+A2KdIkJ-t-d&TgR?aeh-UWG zRQsg@Zm5{#sA+<7h?waB(` zjPO5>L55Oyd%M#+8|R$uj9&12frd zG<@HtyDkM&9khGIKp^N%UsgJKM>=s@IQTsa7Eoady ztI^-LiRR2%Yo-CYod2a5pUe;Iq2y}wB4md~jI~)xxx0q{abpFsN)q3u& zs7pqfPL@>R& z(OcZaSw7UEZXB;M))6x7NrTM(jC8g)9FWnW{4~tWG9F}M)}t{}wK5{3(Nt|Q@`pP4 z(PE6mqI4UWd`eY3)7o|D?z`xk^HSLO%LC%7HssP#RoaS%M?1D9G4`_35N?46Y7MT} z=xI{-8$%xt#29&~o3d4xCo8eM)98h#oeqO?Prn{34}6(OI3D2;m`IM0+HG;9#L)M1 zAI0zMeCX1)JMyGjBrjPMQ0l8FfEz6y3-H*jtENF%pC5Dh_(l62D zOUi9zRu^PxXZ=-X#n92TN5@9p7L$0U7J}xkTftssNb(y2(x}aEgQuA#4ecsM9wg>V zHR~Klmz+)-Cb%s;cd5Niwxu=b>QeaQs)DB8s-!{H*X8PW<$x2}ddub7@Cp0- z`MK2c!bkT2%f_X}VKvnlzuznPquuyjZ_^mp+7G{H+Oz}^#8Zu`&WyG^SJbDOJE7AB zZl|qmyj1MJi(rTxImrBbxLS8|vX=HCjF!7)MzyM1b+z3y(j&JP>^>D~{yxfbCFbjf zl-$Np>Ntv4BDdCh{Yn#hd(*_b!VGoD293mrs2^ptYvNvi<0@izH2;2k-6ndC{c*}c z{Eal!bvcPl>3S4Ta@3137k=P#5v(sF$k>%I~ zT{U@hM`0wy3|}5k@zWjAIqn@iM_iPzmx}F)l}l&Z?%!vev9;~Y6!gCPu5AW! zamBD5$vAD^6skV^r*vX zpN;6s5^$wpSyR_@1@+tE{Y6iCuJ~Jj> ztf1nQpD<-a?B#ll)Do)sXd1n>l>EpFc`OE;TA}rwJ|2teJ*AD2B=^zBV_vJi!q=Ne z0^lwdF$O+Xbo5|h*CLMLi^R-zsx{zj$_>!tR%x~SNEWb<r?GTxI+OaNDr#hj zg=JyVU_2^w=@xpDmS!Kn`?a}YKN5!sGgw8#MR0pCluKhRqe;MX@=SI>r_;^xSVq&O z*Y@jy6E1#7_ny356jbs%{Xkoc#CCfFsZTz~h_XAwS5KYA?f*A%*j_M4@xou;uIM3h}qyOCN z5N^s`j)+5NCWO}Q>n5(iXy|~PYKkuEHz4W-zgSl)Y0yS&Cs&b6Ug26rXK$Y0Z&M$zxg5SxWJ&=Sq7| z;*&})0EwkhNQ5NPdiu{N&6N3oS$AgLvbx02deY_zM`eguh9uZOztAZ1$BCbQ(7}+m zy2(Sjn{D{~F*?Hmpc&2|6Or~M@cn|GJF@t4gUwABZ*@@&aX)-RBvAM}sNO~WQtvQ zYogJo~K?ytKl79O>4ooC>}PYoEz>TeD(`wZC6Oy(a&8Mw<+z$ zXX`g1ONI?!my#k!?BG3?B{_A0O3cO|Qy0+HIp3gjy1x67#DP?Io7`ByP3`~oaG@Ht z7s`b7Ez)U5PgBDsu{q0iDB#y&=V^6sC$8O*V8`%8YDh85-cF3`@K)}8wKLDxH5m&- z5BTPOtPl@-<;trZUW3+<=&RRHPP=ixnnI zIR23csh76PIJ`;CfQZ0w_>l+0I~bUvG~n_FHI|0d)M1W(FCSpcE1x#*rZ!!hn9Kr2 zGvs1NpQAVa!avIpsTy>x&GE@K2kR08D=Of}*U!oMVES-p{xou)bmmYLJG*%HnPT$# zJ5iZO*k(TkYZULy#QchIa#GC=1!5O-RJ`PqAhogw8z>`92aMtE30QKhf3m`n<*mcr zQg91v$4bku^eb~RzU-I)$9J&O*wYSbioQr@$i>Hn7_0~iQ{@YXU8zN;e?_zZ*<5B& zjp8dvS)5UyPr52^@^_{5$Lhco`-MF4<>Nj-!eRIEI|m?&kcsMtCB-VL$y`Z7Rlyo~ z5$wlE-7mc+>mCzqkI!;E1clV1xM1b1TeP53>_u{k>prBh3-Zu9Q++J5`LtbTXze`$ z{#le>r^DeP^&FJ})Xh=q^G#fm65rw5lc^3)@KsuJfxclynA+GFHc;;qvGS+K+2y6A z@&cL?RmX58MMj0j5HNP+L5H?WEB4V-D^r<2}rdrZij@_oK4jgs$mx|zQ68N zJ13Zj5A-F;SVkmC>3j$KEOvazkXBh`#H_9Q3rmIqrZ!vOkmqrbz)J6Z2TS;PYl)y_ z$3lE%$f&!PeRdPq*3R0DUJ6SES737&AjoE5-0>n0*Hrp-DLJw@@sPOMRPyI~FQqrVDhLh?QZe5bfHj+jp7VhGC?%r8HYf0}?m3HDX>?9sTuc`Zr*3I9BSy z>cpL}CKX+U;3#d}3-b-g5JM6lJ~o(TI;N*uV8pSQzvSeBon%TIycbD2@_=?e8*tT z*eNzXKtr|7NHb4MZ#xd>;@TSDZLE`D)_+f&XyQ}eD0;=7G}~*RG3TY}*=K2m9e#x&W1VxRzMnaK*XHl#z?fYg0qt}2_`;(XexsZ#_ zjp-0?-erEBQ%kG?Pw#E?+%z9XhZC}HKrAv#wYJr^M0~ur_%O$4spe_fW3p<3h{!nb z*}eef+qsuBOWT9xeZf5bhM@(gP}Ay(7Rt2e)C>#5fy8FWU_;8puFa>Ws4S4X=hn!l zvj{@z+grxNV|`nLH6N&$B}94o_{P0;!_gPzi(BNxbzV!?}6 z_Z!2bL~gR<-8SzeI$t~&YYsf~i_DLTr}|G!wI%1jp#ew0w;=)?9&nr)0*8$PC<6jq z3Eg=M0)QTYrxs2kMB1n7eDg^^)@z?{E53K)f;tzinCNkZzk6^O>YDDW%vv(VXWE{o z_>#KomLAx%*lF7RL&#}Z$TPYq6oixt_$e$pXf%N68qKuKpsgWHT!+TFcs_=JF#PVq zrX|R@T#Z;yEF5X)XZ23DcsL_OL7wt%<7WZWkzUj7?voVW(e953&l$oRJ%vN{BY9u2 zvX;{4RLz z_nHy*<`(lx5Gwg7LYEOS7#hMDK+EVYJC+zv$NI}Y<_wa$URaS zu1k1gh*1 ztjo@AdvO<@_bXeYVXMwzJYA9c;5ambIQ~&n_Hui~Pon5LLKZJ^^2Q->LSo{caFnfe z+!vJ2=pf4RT;4n*9;T2{fHScqF$MQkW`#!P17eRMp&VZA#N#yY`}1tQktvZMRK0_O z^I!UPft^Apw}b) z9O3)r4fxy4B>BA``?8G1!yhV_xNIFo!Vd+@pwE(sX&!O%nuXJz<4W6Z4@~EQJxiQ7|?SBRGzehSjcq;QS%NUQ2 zi+z-DuahZIEpx8twewum<(xZ0rF>Pckf5yQ%!d!%F-XI%ND}FU9a&$yh(il985?Res6<)@^E^WGBQ-vr zC>D>(7qw}W@JLsTs+FH;jPIoR-for`O!9TxzG&`R9{L| zZ?u?d8~xHiD%DK$Tbkw<)M|9~#GHwZbUDdUl#)!%@ne0%fDCk6g)XJ6+MFc!IJ7@g zzThi$Ved-uti;*qO35e9i?7<3o|Hhc)vqtSGqfC97<@#I3TZ=-2|ukekmgf`03H53 zZ^7tI;%tg}QI*P=i>GRw1H@lId3MEDQ-dqSePIeVu&`fmMFvK)t1_exUtBj&ev_Ululn4xm2c=npJAK)z%W$3V1?rG(|#&e2h zu0Bb^HGC>vT2&_%8cDqtl$T^?{iwsoym33)iQUFQf>D8Ghu|16wtGB??4A zssD}CH>a8fTD|W@GeSk0#8%>RJgSYP5}09{DKr^xkhibpG+K9R2oRUs-Ve=Q%eC3B zk^cOZLO{N2$tJ4uoq~Cb%~b#W=Q$QS=^!#)_M|PYHAAf;{VAS7+}oucF~CiWFF~bB z$Je#maOH&xv^yDtKS8^xo(Zv$2JtuGo6tESk7B*gQX$leb^cV&v%Kv!a^LIcN`3?g zZcuBg4z`q&lkcj6uim={>@baGsg3OgVQJFu|BI1u+F*|QS4i7{-W#yRw#}+Z9GPfS??QN>h*bMWaSi3_iv7VWmLx0F z5GiB+oHklywV&n^u8cTOdi*+)yvJXgbN`sB83izy80OMhuxT9@dmR@i9WE=F)bz>- zfM+O*m1qk!+vaVR%~6sr2HPsdoPv0#u6>#y z<&dt_tbWaBJp>Bd4{X+xe-7@oZ0qCK;U2V^Y0KuE+Tl z0d>%KkLc-o+X-yQsu~Jd&xKHRq+XJCwT{)Djx~zT5BltAt8D0m^Q9YY1lmr|y=+71 ze5b&=&^JkxXWxwRpemzwVa68Q@V{+8K`5r2Vy~SPx^HMwz5<%9M6uFTpVn;GksC

    >4Su(u(t32jV$TZ#(B-?C{9jm0JW! zRdI|FxgNj&WglD1+wXa%$#$f5W-rsd=xROJ3*0tSol>eHfA)(9zVH-qa9??GuSG~e za~4N(b!YHUZ}(D%cX_XsoY(mSqIW{AckE`7DgwmtuyMMz)M(EC2Ge;}PD!-BLC^Y+XV98Mn3TR7S=D%=XmmXS@O+mzrWQFLU?3*HCJ>twd=;D{q6ns+y@9@ z0*4VCXz*Y$Ua)})2LE2O?9g8>OBxBN?PdP zRqNNTD>P)_AlB?x7hB1)J*$@MS+rqGl&l*?##^{mv`#3SW36AcWjRoR3-&_b7Ibm{ zfKgm>uaA-lCNoH2nKI@Am=O=|s^D{G(0wa+jzF4n+_?`#vmWihv{)f|anSwH*>MaW zTEw7fV+RkPKZ1t)-g}!Qm@UeG+!)E(b!zCxVH@U*rLbQ;zkw?!v;2E<=DDe`*~5Jd z7w}))pl|OU_;5+`7bT)kX%eJMt`HvU?;ja}0sc3jfXWbD#n@tnuR9+=9s5} z`I=aw6^Z6*Myl3YTcmAC+ic^d=bd)K4YDVn#F^*aSkDPMD4~5FNhn{@S*JyFE%c@x zo+3t?oO`k{=UaVz7Dwr(BTibLA(=7~$$X>MxS@in8q;8c3o_`cs;ydxDyuj-W&1QAbBU~P% zftE&rbcjmqnZI(GbVPc;nuo}+pLY7*Zj^>9YR4Hf>}hul$G38+ngq4~YOFEK`YNn4 z7j$c`8s?1a&bs2#A<(q+YLmj32$u=5ldQBX)6F)GEYvVQ3vIPkQ+xHb*;1vowph{Y zaJNmm8E(I1mu;K5PqO<~di8#dmT~L0J1|&NzTE+sWa^vmz7z|5_Smoi?00M>9*nS_ z#Cmb(+9Fsi0mP6;K6%(CRX!c%a9e8f$fI&Ra>AvaJUXY6n#}md!veeK=_q3g`kwj8 zmlUfr+kQLEHs9W}&pP-1JMTdc&5|Hska~2|OGmxD^2|H!tn*V-Z8g@^XU+B2*=xV; z*y_^#efLZvnKqqlqaL!`j+TF@lwbck8X6CQ|mEi`T+^F3?Fa%DC z?{TiNoW-c5xgHQ93hT)e?E3UM;g~LUq0&yt)Ds*6Zi)t%y3XrJ_nrunEI+lY847Wi z!WFWRXS;eK41X3p;L&h+F&P9JJhup>u^~m#`ymj6*gWY~?|Mc&BKEYW#7i)ddxRU` zNzzxR^@Xp1Mv|hX#=;m7K9GZj!J^~H);RvfPkv@RV`v}-Ht2wcPW0Q%V%DP`+Gt>l zWFe0gMr6gyEy|7cR>y3BOBcF`8Abqd@llgbEN zSDyqWU<3QthceW68BL`|H+opbt|$hMXboD?>qgFP={UAS1r3a!2D zbuWCGMYMG~=Ol?Rsb%qdS^7YWD;hDSVi)8bEo+F;~* z1)NPq<7%8bKkD@BG-1=&V?6;V_f0eFc2|fwJT(B zBdk#|*shQ~PZv3cpO$Uyu(}hhGmxC#@>2A@@{RA4Rm6w%Eo#5{oh+8E%-{b0x61+E zYiGG<7bp@Ka0afTX({Qk9#1&I5teXqnVMS+M~=3=eIjkXDX08}a|L&eB(XM#$sco- zx%Lxn9Tn_NMDr0PC?XdX{MQJ|p@eYVkHTic20XFCJ`z2mxKEU1&7Bij0@X*pUKaUBJ(=rHjTiGq5` z<}J0UpN4ArQXA?%A(V!gJ_Hn3Pq!ZKjnQmavrVhpCw22NtJ@4ktn2gtt9F#_o2? zs^h#;Fs>zLt6as4Pid9%-po+4y_t;I#O_<)R=@fpf?HkvJg@5o|N6lZp762vlHp)> zxWvu(zO6bZwc1!gX)~x+}(L1w_0@n4ZhQ$NM{|&l0M|wRkZXojJnffHuZc{R^U&ba#~yO z^;d(v_9zWI*%xm1hT~e|>tcHf{-S7OP7$ZO)&bav&#t=9{Q-opxyykrY~BpN+hG*^ zWN>R|zLorMg+IKvecKo8p7}Y(z6(zoIq3CUp2Ce5I=UX&R%_#!<^OEMb>{zhOpTuF zpKdxqC=IOd;h4+jP2gY2E!C~8FV4AIkU5G^SdDwHAU^jsPADTs(?BU>105z!?jPcg<$%%0cVjpcqv}GgUC1T3e9btT!ZgHEE!4-<;i5z4j4&Ep zO3B{S^x_Kqfs91t7lPp~T8}XvW2q#gGBRTv=AZ`_UkBDymWzH9vq&jLH3QETi zm?Izxj1T-xoK+Yi22%}!T=%78Ob(M0;9Ni+oro0W34$1b0%-%Hj8%Buc`aGipg`HDZ-uBi>Xac3j3brd7bmq;!oRI`*C6 z{UrFoU}t*&#>YLPPk~$LDHogJMxpFWQ(k8ELDyeMB|2%HJuS|Oom){#nrW^ZyDec| zu^;BeM_zu&SzZKsd;(jl*DXraa8hJnP9#UV#3GDDAPih{>g8T`5)(d24DT*ycp=qxA`PMV)A7(xDqgPsQ}l2DO3Pp@QMB-Fwts2eN-;Bte=TBUxM#5!uo@g1$C3k`(!-1B023vn3X5zhFX{tq=#TF4xNg!UPOU`FpNG1-} z=VxmFm}j0Nfih)s^`l+J&0Q@e4UUU?LMA2R(K`Ys%kY?Owj2u{ln=6p1a-^>9i{qZ z%FBdkd~v9UIuInVXhSOJLvECI(jH{}A}qA1i)JTZ661{0C?#wQQ`#gQo+luY=Z=QY zN`@n#q~8a<;bUqh^BHLb2nK2_pCZN}Y0~FUBFbnc;m^39>>sXDvlM)!4o^_DiF%5I+-onDW1Y8v{V*GNWz}x z7k2t-p8jQb3K&-W9k&W)#ks1*-Kg9})9^8B0Ld1OVXCHjqo>N*o#|(%re|n9BK*Ps zm{LNFbA4Qa$>VXMPgqu-n0jT9k(+iIPNqP{X%d?Jy_=>;XnrIez@u|j0$SD8Y-W4W^FAg-IZrb%B=ON=lNyjxe}pH z-Y2KZV7xlwyLK3&@{e7SfMud!^KB`)7E+;UP9&Ykxh#+;8lsstEQMJTa9*PNIadYg z+=Tv0hJGv+T&#E$Yt_AFn=WguoGi3%=y;f@pHkh62C83vq)UNT{`BOP@@Uj{RZIFP z-*z8LaAHoG>kHUxqMa37>=v^5ti0Z*A}SDlUZc3v_jne?%dL?%IY4= z{w!!Bm1}GtC^DM2#B5k4-*Q>%H^Jv0Ug|fV-(U)2-nQ%ZE^lalZ@2Uobww8Sm8&zU zshG~(YI+#adWTDr!SChWTeTnXF{Z;J0#f$oAzi4uiQbAbOGmj(@S@tD0ENe@tznt$ z#tO|N#2?&7~I+H0V6Q(DsUvcE%3hX7Q2E3dn9)nstbGT1;Z-`hazc70l4Df=2ogFR&RRV z04R=NRW|Q7>FovsWlcicI*2N{ zT=@X3B8=+b<;g-ZpjMU%9iSEGt^!{~7JmRncCs*zaunBX5{a?)F{ugPWEp?&p_bom z{HXo1G0i6Okg}&QJ3wYy4k$V<48x$4cIzLCFjWp_Y;N%5(Hw8AQCgLg-}Yb-k}0G{ zau9D?tZv?q#f~es8ntrr1DEqh3>g*oN+@&eD2pr~q;nVR?kWra?{gXp|KSy>cera<3*ka-C^iPZ2!L7 z_tyw+-V>W&LO>o=ETG?{-`6+LXLel^e~%oK|8YsYoqPee30=`sDUvW&sbWO~3Jl`~6+chh54NucD z219h?jo9Wiy zJF7}1RAJj-V~=qFV{WYX6J7s_c;%EOTQO!7dO=%cVH*? zUw`o?l-+alvvf~)Ki{M*JGN-s@zlOH`l+!}hU<5mABkJG8N+f>Cn+E4mIn`>H0365 zqUPeT@bzx^XN$L%PP6$kW0`KVvt=|Etjt^aH`PY}tP?+RN<%m>Dz{!&@o>A>Bxv!2 z)3brIZORVs-QM$0XA^a2cywQN<^&Go8ulFPY>NYBQ|~x(FfonG zQ$Ch(Eh97HMTR2wEMD|@%(WmO*KZ~LW12qbuoYC=5^xku_?1U^ls`GzPPr*hO{5QS zrYoR~?h;R9_!wJ2dE42Se<~bxc&7R^lD4Lp&$)i48xI;Ud6&4it#F<@PH9JJG*gnA z?>V0XAA;KV&1!S~UM}To5z^^UBl9Ew^|lul_@@6ggG2eGgMc~x8gwNrbwt4pnjqffCVlk+aKGxoZ9*E_Et z5TIx7niadC8+(rfY+6Slpmk2bZSs<5JGSQ=wNHC{^_vHf;lyjYwsU%?&zg`B0B(&t z$+x?wg8O+}smdGPIaZEry!ra(XS=d^iO)KZqc(O=OvZ)y!DluX+4z8-v7^QL+{B+f z>334P_IR%K&o6xFVhpE|5OGYQrsI%7T6xFsrITAci(>p;O1y+iaIJBBkYGj01NFE& zmB^z!XP0;Z#e0EP>a3f5R>QsBTlIVU_gB664T7+0133=cu=s8(?!>MHUmC)&v(A7+2{-;M_+Sy z9nQk~DoD54k@u33;gBp1*)S$D43m-?QR2ft z^fmd158$PO$;gE}2=O9GltQ6ebqccM$C4>W7IZ{YWLK#wPxkz|vu4qwL2rH~%XI2l zs9CRW?V1*B*{@^QzI7XSZr!W#7&1kB(pNOUPpdW_nzSh8%ab#A-n{X1>ClOfvVJ{! zr$jx#?oiwbe0cHW$!{{xDN}m&obCDikU|L^@<=48w%QO44r5x$DHa^`V5bKU1Zo9@l$c3Fu8eZA z#SmAV$}S>kAaOw)3YtLw!5vjF3Pq$|6tcs(eqbrEwhlu}4KiZ%tI5FrGE5CYlr-c! z>ZrRcGtQ9v^0?Bp95YPXW-G4DGizCs%{Fth@)6@Gso|{Z(gbt5JnhsIJ3jr~v(G*k zaVpDC3_TRlM9V{Oz4Y9Bk0Sjd#gD&AEu9q800SIQ!H`TeQ6L{rys$zGAvEw*hEkm{ zBNCqU5LOUn-Rg&+dgO7`o(QagM!aBr^;eQ+6j8+mb1i|TAU`Cs*JhI>>@aAXR0>Hb zp}n$DK>z#$Gi}L4^G+|b6O-I<%{2F1HDj9-F+jN!BT#g`9rw>~>%DinN3?1(5=IpT z<==oCRqs(sCC&8zQiCNu7~zKh<1{B!Nv-gph)qltV^by07{QG*3O3dvXC0X-l4Tt6 zQ(a9!K`3W|1)13zcd)fY5SIGM0-{QGa#)&N&g+J$dV5PTC5_~{WH^cTm)@r7lPQ=EYxboG7VIf|&7UJEsCL zxpsaQ%V-U=axT+TD!uKZ3(YoeK9rb5ZhNs#H*2eV=l#q})aZe9u)R=w-m;ZXo_XZ+ zwHEH&rO&MYZiVr-{`yTjhS>0l3vabV!4uDQWBR<;*wf7&hkSjcVeJa#%H{9QW%Xlz zQfK!^-+%QeKeX~FP~r==7y${QsV*@FG*1E-g(y520e8~F6YwZlLErsuYLq~X*Fqzl zLy^sSAza=F*XE))nQeM0tRBO#=dc#OFk{?1U-uA%BN`%XhJ?Ez;wH7jhKPV#?PH%1 zhv+{2@oIANV-<>IFf;yX&VD7^-;iL}1_2IDFJ6<975DNI*b(q8TKtY3*hYp+WCo3J zd*EUkNJ0#%Yj%{;<`&GV%j0m)2f8p;h|A!9G2S55XP5{53! zCf?3RvjYw8dynMUI1@)n9g1@!9}re3JIT&Zg7Tg5L<{D^LOGvUu4eRmmyFgqr2CsXJi~Gc;PmXljc28A;3_15!YbMU~mi zlKK&)BxD3l3P{a?z0^_IbfkP{nzwX9lAJfyh~Bi>!%Lb|Q$XDalQs!YcOF%qNrj64 zH^Ya?$`MjUUF0V~Rmm*>y{eY9%Y;JpKv6|aG^JrR=|;&KOgI8>tHwOVM?)$elAd&| zY3rj_A}CfwYOr5tOk{f)*~q^BH9nOjl?;1|&Y*7dusJQPB!Q?&q&`-uk-e%>M~73V zI(2fkT8k*3a!{%gbtPZaDs%9lSFwUMu5UGKSy>y$j;4mKQ&6jFD{9x&-m5QOy{o?# z^Bu;RRud~+*l@2$TuYr3d^kbSV#$fvYzh`a%=M3}`dL)Tw)47>rR@7MTTgpl7P{!e zr)HxX6{&vqB&d6Y0-N@=}QfCo*H0BN@{7+?SDiGwa(PXTBDM=+UM z%D4yytW7IvS#V0%xzSxLM<_Bwq29E*oZGH;H;moveucat?hYxN3SPnD(^#B>U;TpE zm(;P*IqscTAcPP#+WzsPW-`xwhbcV#nnu5E{pB+M3s>X?Sfy%QuaaIWT$(0W!2*DC zU#FWy311jh5S9p*f68UEWLU!;4s&+NJXsIRvsfY?rJC7H;_sSxiMvWs=1kZ+_WGDl zP&4w4hfL%g*Vi-m_2_=nP5#+ep~5QBH2*lo#np#8u27SAOsf8LVla z8rIX|r0$qGT;@`n*_~@vb(=xVYF8_yySG9y4WDY0@`hHo*3q+~_x$JoKJ&O-c!n*Z zdu`|E@J`ax91t*sk}C2^fpe*T(L#jDg&OJTg?@ygv4}lI>{T;uSD?1@5yWyy<8= z+*sJYw!|sUZ7rV)z19Wy#vhCBb)Q?^E%W%uN&fK>dlyA-7J+XTMQ`10)n{)Dc)n*2 zOhM<@Ry!7Wv3cH1;Z4)v2QMR>_6jLXN!zd7G)svg3?KA0-QnBz&)p`{SdD*M+~MXp z`$`USlhd>7U%ye<$?htdO9#GZ(}NL?l!%_w+!Sg2^=e<+?|;Alec;E2d9EAHTctJo z5)6m-;3>kliyt2G*p_;NQknH}KmPGzFZ<*vU-_@!+fHeRD~)Ax_L|TA(4a@_&F$U; z(~73qdB=OAGr=!i_Wh+V2E5=YU3*K<8sZgixY9jBptqc zU;O*S3u*}fAL~~q{{H#@PU9b6`2r9COHScXY7cI}#?%J-u7DACuKFI3f;8d!;?Cc+ zPc*iJ3r1^gT0EH?TG02j4Guv9S-{MSXi2mZ=*135b{_35 zxC_=|&6IE=2W!RKi0}V|@DN)q5lPPT2BiwzZlIXYjWp;e8juMc5EB)R-ylPf?(R7f zZV8tx{JIcd46ZOzkNgJg54O<2`05PN5EhZ{&GG~OJo{r zwgh24xNsZG!zB0);FJu~)`Rxk@ZlIqLR7E~S5Ovh@%9)%7h8-Mg>gytMHKgN4`m4^ zNN%cz#MYP*sgMyGiLi;-4I(WMOMc6Mlm-S0&>F2t1a(j&NAdfB%`v{Q3fs&3?u8Z6 zQNT`u9XHbN_J9@7s9?%)VQ5kwpR5HvCDY>Y9Uf}FpvVrg_9X}H^KW}R4=rv-i&{RV%f9=t@a-`%cG*i(O=|CJg zGxjdiBK3+A;xQI)a@tT%AEC}Kd6F+*z*nlpFb@+k0ih0*GcKwk3=$Jb5}_$QiC1<} z>m2VPvr{{_a{#dr5$X{7w$c>cuKZ3CEQ5_a&}QEZr8R=1EMbdwh-?s8lN;BPBTM4m z-V*Tm&^A4DF?4ete=_BM6FBE@FZb~yh%*N5P>dLJIhnILi7|kTrvFe4I~5T_xsx&} z6PAc_5We69#FPBEk38p11QSptkxl3S9>X+ALxZBnm$;8U(UUD<6R%c;$(D>DZI^9Y5Ks^f zk~B|`4g>JiJ__|b1#BQEK{~mBkf2jiE3voeA|cNUKzCCf*U;&flt7s@v3hVY!P81h zwU&In1gnDYx`0@RMNGr= zE9B!0mz7!15D5@b~#%`HTIZEzY0?s! zwPj(p47(s^W408|5DK)FWpfq^0XlFKeBO-UJ^=rTO5c1&3`apIoz;l%rUVlMpvtUK01zA_j z$yk=Sige1{a6s#?KnWIx;MU?WXG2m)SoIJf?Y2tWR~Q}DAUTOuBlbE?W@7g@a4YjF zN1_I+QwAtNSdJB{P_ErX2ci^KN6i#}t%Zvqzzjk+crCXSrgmsyH+13k41VEqU-uUj z;Q)SN8?3hp!uDnVF&K6+IA=9j$~^9H+LQ34#3p_pumH>^>SfYX*;zEQuhtIc!E85 zY@;tNZY>A97ksf5VBgRld$MgW;z(U0FiUsjso<7ELvc)@jOn>UxwIDm`OYtlC9^3uVClYE;rRO>Hrz^8oBnN*UX zRvN_L>M@V>IB)y7SK-%>%gDoo27>u^kRi_@`S+0hm!OGrpyiA#PHsRkt)6$#V`*Ym zI}#=dh?R~g8Ouuqes-J9U>l^?bEnspb%7L`){Jjgmd_x1p*U7}&S$V4ofSQ9BdY>#@Sve12T6L3iSCHU8wxpZ~6r<+^3e^_>%8p3CIwL7|+BLz>Jn_Y1@inm!_a~6pI zJGY~Q8JM-X3#!$amAa|RRX!Hshmcrvsd|XRI+NprnzcBr3y)^Q*v5T($h|pT>y-jz zYFf41rv8bZ96Msfi7HeA2kn}C(06ZeMN>+wYox`zPG@2DT36khqDi$5*Ef*md$0|< zzQOeV=*njtxndx?My#refIzYd*D<{U28@+NBv^HM_l7M2#KRhjp}-O>;S7Y?5fuG% z#kjU(*<5XRiM4fzi5iKIx2wSwXd%FbbvUTkU}ljx(q9$;pm%kJIu1lvUA1~?70hT^ zmuqp7nrW)lZ@q@adJOE8sC&J3n;^THJYi;FN~MHBsOt$bXCQJ2i0+(c)_LOp#Ou5x zf>SY!eFaD=r_&&hb194RO1NFk`*zLSynoHy*^dGS&O+V3EH3?gkttT4S^@|2ypl7v zDZ|`HlYMeF8*@3=-~XM40UqFY8PsLnXNk594-3W-t7e=0cx|x~8odpWx)dB82@+jf z30C7`^!!8_$`XyxylR<$DLvYn7UDXT16kmFn2r;^;+ zT*UCOAemyZ6=%tSCf&F9kvR*Zsr2U=xG>o}RoQOD_+42Ap6UVK)T@4T?bX-uvft&} z>tVT#AM8+_v|%h)*GC)!F2cmSxDgbI*4~ZU;>;mz=AsExB0rXqE5_vi2b|^exaIr) z?*TtjUmmn(UIuqmZ^Iqub3SB3&VV2PurKzW-@8MMUIM&e=&L;BJXTh9kOq3j=9#|0 z5uc(5sRC_$?fM`KB7jnsy8sGe4Y;`v3gSMXw*#_Z_PY(Lx%J5jHM!KC(`kQ|genA||K>4rV`lCY-N`d;V-}|v2{J)+r$+Mt6S7wzu17BqTf`h<;1TPdkh|q&VHi;G`vg0Nq zmWg1#RJ@2WBOf_i#ifFkxGbb61HAVXD zDYAtMp)88hh(R=|(ibz9I(-TiX;h|Er&i^HNTt@TbbwAgu>yt-9%Ch7JxQmj2qj*M zd=PO^t<UzYW*_WvK{KdQHB1G0M=aT*MXOJL>SKC^aM2JnE_k z;6DES6pHN0z1MDb{7?d1LR5iZTi^*n!2=HF*MI^MiV%_iZZz%GkwO`$CIN>Yeh8w5 z8;%%ai6jXY(-aDh#8ZndwpbEMfT2}kS31$u14TFP=+#d?Ze&%Ea#clSkwy+_Targc zrG;rTKGIi@BAKO?8Y4DYlL`%X_yLBo;iOYTd0p60S9sy{nwV;q$wg`fB5@dVid?o? zW*g+N*k#T9ve{#u{i4PQf;z^8Xr$?8VH8^$frgEZ0tj1_lGY|xr4>QyrcoR1nU!SqCdhWGZr7bOav%tS8RKDaA9_&y!d?^Z-DBQ-!44~IvHT&h zUwuko3TbR%3AJFG3oiQLgDv2}EJt;f$ZfYGehaSufdM1ev4$fK0_0x4v_Oa42QzW_h0)LHO48q&O0UO)$ps*QzXZ?AQfX2pVCJV(NT zt;sK8aBfqIZqi7mnVlo(VHsx9+}Y=!eQr?5Xn?$%;e=k+NOQZBs;eTI7D@#sDcdOW z$#l-WiZpeMM3yvlthx#{tlm{rUK5sB;zR*XSW-zJNEm?iuMz48;jvtvP3@M|s=erc zZa2%|xI82HY`Dv2ThYLD?L8LXegCb4-+&8#Te&sPjqbkeO8KsgE1}5a-#62nxW1NW zD6rm|BboW;Ql3T_~e89ktR-lMkJB z=yPY;9Y^d?9+=h~%x&4nU0DKm<1avbZ zNU(MkWJ?7vh`|hM5PDhYpa#A0AIPmPU5eA0iyG&+cq!~ggJ?m285 zXu}&SNk~qb(av^eI=(DQbzlmE5d(9g+uhD*+mixLt^h(Mng%Ghu}sKf7CguV4-doR z)1ZL20ndEVF=>$1%(9X(E#0RJ*)tRW957Ia_{njObc8?`2q3=z=uwY{r~n@G2uNbN zpnQWkANtf+zNr17eQJ~=vK|S^3;YU)4I~f;H)u&GM!|waoZuzh1(O20VS!;m+*kI; zKo3F$f?hJ^2unB^BeKzhv#cd2H5tN(9L|-nl%WYJwlTn%YlS(1)-UsQ%o*Mwhs-pA zGoRT^Xi{=4J#^d;T{D{n0;mcEYo#n1GO>mku9^@Op&Q9LMV`&Uih>YL7CE!UE>huT zk--&tB8eZwE}lZ@)wEp6IK0v3>zH%;IN zqsYn_zOt7jw53sFxyhvFayGuan-2t9>YaF0cwm|#8o8(y3fSM zu{;KyBR~)PSWi?!p#_zLWaH4-y;8P30X+ajG4Kn+HUW|wJ%D;@HQ5T3f&d+jS|UC= z(n0|$wj?!aWDo(JR1n8gorpv}UOKp$(lnD*t?B!cnp13sa;Ns{sZYNdS2(y1hC{_F zFQFS<>E6}2xD?advg*tK@Zu{*p$nb$toJ^kkWRd?Sx z3jED)fB%~xJcc&0iM513_nBII9+JUaNquHk;ZDf7qalEyW>_;8_k2xCs}X&uRytS`v@evk3r!M@z9j*#?5PQj-+} zns8g}lq=o z1~)p^tyV1*ET9=Mm~0D}p*4kHUk>KDMCKtSJgfea-j(KC(2#E z2dcZ>!5wb6c`VDCgzhH48)xf6yv|}(*K+55)u-B9K@C^oH8EuP^{UmXtNB$ zTT~I8zh37(uX&APe)FTx{Pi?{`bv~u^cF3J>#Z1>*lQQ^fR4DtZ!hQ(#;lxenaIQnH@(xSGkDc!01Y$n(n(s-|#&i0IS8Gv5TYu_Pf5W1czWtFGJ;s%N z1M{hl@jj+MvqfKzv7(OJRfphJO9L{I4I!E)j9of~G&|a7OGs>IZ3stPZx?0X=3Ic+ zWwv+!Za)=N>NQpSc1~87A~@A|a<_PCrg-Vnc;+O5k{5V`w^ajJDTD_DMu%7prh-1V zg0~`cliT7%?TUq?uyfdSVscK!Eq|7QnkM{RpYfF&3}j>v5WSVR=a zN=bot6Nhe9MqL#6foP^uKL}@^c!7~9iuIO(@fJ+^Hi2%ogKcDrtw?4IcW_8Ri?t~K zi%Ma1D~4#hxLF$}hoYtn0<}=U)@!pyX>CYr?xcf7wFC@!dw~X8tX6!wz=lc)QHM2r z9A=HxD0K|BY9;4cW&m@OF(+4;g?)f?(1&X1VT{3;W85bIrUs4%Mo``8g%|(;P+$pS zxO4~A2d&qOd-Pd>gh!2)N38`l{kJ}4hXM0fh?b#G6E+5l2wXX$a0O@+0MvlsrFZ3a ziI_NwK*b0B285-U11E`+DXEex$&x17k|ueI$y5UcNnkZ;lQ&6Vv1fxj$&)>~CqDU; zd-7rcgJVzli|iPVtHmoyseDK|9@vM5JC|9AHHLn)dv9CD~aeTUsPlfnQ0X19^Hmiez%dAsF9udm8WxMCE%5P$5Yy* zi6QZlr^%YD*_y8DlBF4wEs_PbatAm`lW|p(vSusYSdbN!f|>W4?ilaBuUqDKvMe3tR3Y$opq^mibFH)Q7(U&V2rBiBR z=sA1?2~f>&rN)E6b=axw9HyDRG46mki3Nf4PH`I; zmkgDli)xq?YFd%%sT<}{#uYG_fdqK4f3#%>h7?;vpnVfC0d9c*9acg%6OtQDkS8yP zWLjDUiu4C7Dx(Q8tOQ{%#d@sCdZTP*CZid5vAAt)^=>c~TrX;^)QYVK_*0WdR8a@0 zc$ivt+BF>41oCN7G6sI9rG`g$mh?GMy4Iaz8VB-J&vOojn(q*m(d| zxnlIWga8YwddgvEn5Luwopp(Dw z0gs^oCV*115~d`V1>nN6UeiI55m)qzbyugRX&^KM2W6!H$3%I3gx`JD}j^KJmRdf6` zSdQ@@9V;l9s;NO40(gm)qam-(Ga-Zr6o?A!iq& z(dz(N(32x;y(3$_L8-ky*#sx`v}~{)fg%+HxXgARM0losP`|_}aFS?2$;%xkSs1)~t65wC zHg~-f=EYwOGG8plV?4&SD3r9=EELcL!#B0%JF#yZm`Mkyly#niHfgJM24(9td(6ju zEVh3P$bl@#YD>tBHX1*wBOe(pZkspo_rLHuVjA4PfN8+ZX?}YOs*$Xg@k+xGtgjP{ zxGNAJ6idpD`lxW+iz|!5m$8FJdZVBF!lY~e!<`JlvP^PVgpNAw!y~ZEFoVOFD#W$> z#jvN7WNgga`@}t|gBF>Q&D;g)nFpPR8aAu6*|@$7HY<+MdENM7OuP+De6!)KuVbaX zulITrW(I9cjEH4f>PNYXK!jnCbO%*YG=i zM%7kNeY`kf)JdJpRgIk8+lcWsBvY;b)OVoHk#;Nj$#lNwj?^iiTS}(@d~5SMj5F4_ ze0Tt+6J~Tp*9J{Xew@>Jozgn3(oXQu%8IOk9m+MzS=KCcoh6K+O3&T=29@iJAMB49 z*VvdlgSEKVwCsp1jYI!9HWEugi1F{Dw{Zc*?NuF4BgW;N^386 z2COV7M4Lq#YRK-xymvyxIQz|SL5m>}1Gc>Z!SC-9*$FaE6r#s87&DZlixX<#}fGybh zjjS#$(~Wv-{Y}U6ZNp-v*~)tV#$n;w^sV3tUI;a<+N+&u3I5&_PP(zpqM|#$VLKWY z&K4xMC$>Ve#Y0BM3!`k~+ym{*Dej^w&f-vg-4G4YFD~P+T&-GdqutGf78i}@&3--C zS!`|IehYP=x@vU`bR7iW0UIz~T;4R78W3JHYvF04k+?YncmPNn3I@~Z|+?5m;e5dExisA^knKA{V0{w{5 zO6cBZ7Xv}!tUgdH!M!%Oe+KJNnVF#^%$G8)^vZt$;-@ColVn1R}* z`QU4i=JQSLXS&8js1U{e*VpdtG==0`4aJ_+0mO>k%)Ie8B*g(o^4hNQD?jK<(%rm> zI$#)OONoj-J`z#Klz&A zKTq++ZuN{v7fqZ0?JW*v(mt&n&$1L`0nfWO6JmLTK8j8j<1D}RUO(*su0T=0Dc|-b zKRDn%^8kLz01nPVZ}WSv_bBzFqt5ezkE1@{_l0lxeDAw%Rp3O5^as!MkI(c@@9?gv z=xhbNRgd}nF1(uW@5jF9cL(|{+VXv)Hu&^BXCL<>FEDYB?V->5iO%-!#^{D^zBqZ! zD)>jXdH6#gyqvG)e}Bk=FZ@4B_y%9|yMO!u|D%hK$ld|=Ne}ta|Jt@K_X7j*R*n5Y znakGh`k!y@5iMU>Z2E&eBqx7AWB(!KANy|4{&4U9?+^df&h3oO7?}_B_mBVium7zL z<^ZuK&YOV+2_7^k|0kisg$x}cZ20gX6p9cfMzokE#YR%woNNRcQshXIB^zxslQE4- zfh{j$eCaagOqzETYTPKT8P1Yfe(iLUr;kmcLyaCqnpEjhrcH-#t8@ZHi4Ik#vRFa& z#a0_Nx&{FoR;-gsKbxIJn^x^wwr$yZ>hig#!xFA9DG)f+NhhbPZapcdr1HAGc<5-2@wN=Fy!)Q4@=N%} zBy&HT_L@&T``S~kOf%c;ucGY!8123JJ}TPfk4rbkI9D9o0}pu|l;-;*wO7Nibi9HNh}vt%+8I z`0_*=|2eqibtErqO%GTzhs{r}G;_!kS&)9hj@Ud}6%#rz=}h!cYemIVQy#O`wp4Gw z6>&sy$3+##6enGgv`Qyvv0Y3kW0j3}O^M&Es0lDT_BdpbN2V5EY)`fq<&;-07hXqC%``6) zXZ{L67H@X(-BasTv1fUCF0@;H^ELHemlfp~$$?!BxM`-XGZ^8BrCe>{h8uo(W34IX zdh3hp+bLR!p92|*kw<3WQqXXAECxABR#|A4=QdgHxQ`~cZPg<6470U!2Ha-F2EQ9r z|37^O`sca}Uwr6r6HA&^XrFc(R*|E|d}_@j{>y3?uEw~_;c&>co zIp6wJNHJG2U>)s5AdP;c&=MJ-YgiTyj7-ZVHy4ubH5=(!x!LO4P;iZ2T$JfY~& zCnomQDo$lnPjiknKp6C)5}p9$9|Ji^LKf1HheTu|4@n^+7O{%FYb4>6*dr97!HK6) z7xVr$MJYDWT%Wum7e!f^2bS`Sbem)b$EeCOp3#+QRO1>&*v9j~k9=|boE+)6LN>Lq zOw3stqKq9rW(c&smnx>foyA@DTlX3 za)z>;UhJYYr}>vv#)XW+S(O=IsU29((w-}XBQ8~!LO9h?D9XX3>&`Y%|ALmyeUKDq z&=i_bKB6<7jwE0zC7B^h(m@XvttgE$$;oe0(V-JG=R?mqQdE`{fC5rwJKNbxvdMFv zvb-lRT^K(t>GORd;b%W(T9C5U^JxZ1Km&w2)U*+Gld@aE8V(9dk*<@Pk0faX!x>SC z`p87tWCIxvxF@7;l%pPnYDmW!)xMFnsU}rt7zdY9w$@>cUSa7=zvok!wr;1V%co6o zI=WcGR5=R>s!t0FyLAOmP;ZMNQ=eK@gie&PL;Gl2r~0F-W|W(GJL*@#y3SFK)w7)i ztym+=P{}%~t?q1R&EhIo*3$B>{xk|rLw8eL=2Tb#gF;Ysn_0qk|FxkiTdG8bTdl`( zcC^DqF19MGO-uR=ir(}pZ^0SX&vquX)fMe^NxNMzqE@AD)gXmd>(bWR^|i32m~3gA z%k{Q*uXCKMt3pOpqi(jlOjT-PlUqHi{&v4;rPN}niG|}?)3}FcudB_b$@^OcVVU=QQ!>)|!he7Nt5s%o)`$Wn8?uP*_ z<8PNzeu#Y>JK0IjSis$tv41J5;~jTX!Ed(jXpd}UCeqo?|4L4Bg)_Wo;$r2=b={m! z!93`$Rk>Fq)=G(exM)T<+R=~BVJV6k<1v?6&6W0_rPF*gNHI67m(zvz)T4&>%2b`|I&YiX-!3bK#hvH0z8bt4loz~YJ!@JE`quBZYhJH* z>D$$tnfJ!GGxKe4d^6kMn%?w>`czdw{}}>3ZE!|KH{lAWH^nCo?uYYy z=O_pIr&5V=SPOXL>~5XMKE4!?1r%6jEP2k4sPl^BndMhMIm~sl+=3$y&Iq@8>_{zV zpZ~nVmPh{am4ErkXP)!V*L=fQ3HLF6 z{%#FCf%bwKz^*)iU1V?1!zD||6fl~*axrVRj)nnwGR5+>%Py|;ySen zTp-5}U--n28|Y&`DoDQNbG`#?J^?g7Je$7n0tD-8wwDn-j-$XP5CMooj_?yd4$H0c z8vyj%tv6e^-TNGQ6TJ94LHaYi!z(=e%fD>0E&{qaoD)GuQa}U5L7LIQ0vx~`Oh5!& zz;PqNzHqy@gTT1!rU|4#t@*3Mpa?Eoy$uARkO7VQg1ms^AKAOT6y(CvaxpVoLHoOq z1Nt;PSUkp>v&!8nA&ApF72W1I#&tp^ORPYOe-Qo<#y6_4q$4va$e zOELCqzY!e5D$>HhgTF28!ojOQ8@#51|8oQ83a~Y#K_3$%d%3|K1jRR$!#S+OQv5^+ zsy^xiuOplhFk}^Jn>iFw z6irk^P81$)ImJ+vMp2wbQZ&VAOft^U##B^Ax6;D~`Nf(8J6L2b3&a_~GCvR;su@&7 zF#NkWx+U9tM|qsYVr)Ujk~aOTw&44}O>9Rx9L0dF#(@+_g0#kKguXSa#BCzDIncvy zWJRgNzM2y~4iu(lY)34Nz>Q3eT%*SptH(#2M_>m}MS!G6gLKJ)Y{`R^ z$y1b@ZPcn{)WdIVtB91uMZ~cZ|5!JRJVYw&zieB{kaWdJ3Pu!6$_i^r&bmp(vjdFs zI+RRFW;{wav?iF0$(QWPnMBC1Bqv3Au&6Ai(j^h(79%dkXDvAmH0O0~o*gvw0IF|y3m z(o3HtkUx7%xQxrt97Lp&6~5!Q(?reGRLy;3r2N{Ih2l$&9LY%XOUj(ez(laY6g$Gq zN^FEo#stpbbj-&@$crPgRlH2K+(SsbO9D!oW*~*=lulik&g!(z>%>k)($4JU&hGTi z?*z~A>Ri-egJPq|e0s&Bd(G;S4uUVY}gR$jnqr zv_vT+!M@J6GJvw%7PZeT-BK=P(dHX4RT9vYdcYYKjCuUY z5ak!$tV<@9ige7yC6&|rqtiOIQ>>d)iQLOcIUc}_%KbYOXnRlHv{Loti;`?J&q~xU z)zU_F)Gl?*{UTEt|IJLD+)O0AQF!_)K@CwiML1iG)2vKS3~kO~%v1BkjfM16+#J*? z#mWUsj3Q%C17lQ3CBR3uRWHR=6b(~vq*OCqrb~S}a#_<&?bKrpIM4`yWlbB$E70;p zRe&qiJJr)~$;VYap0VImHhtALtsqcb)LPwDT_smuCD%Lj)n5hHQ>#%MEsbLB)>Z|; z5QWou4G?d2S5h6-YL!-MCDm)SL{^njZjIMX(@JqI*IXS}g(XggO;}G%*CLGA%A`?y z+*bjD*Mi+vd#$;8MOIPO*J1V4k?mK1J*HdzOKf$nfz{Z-OfxJ^QG|8ag?-qW1ycsY zOkkbZwIsZd|MjnoZCQE!R8Za6kB!!3MO>LQPBB#koh{R`1u5DS*}Os9zja%dg(8nV)wq@0%k|vN zJ=uoT+tKYhKvmiSvfQfu*ZDkLtnJ#ZP2AUQs+@IP%WPbp{YYqK+Dr|Op#9ud)m*qm zTF#x?-zDBuRaKR(%HNsA;Z5D%+E50JS=WW#>b2e<4BJVyT~>@-Q}tOltK6bBUhxgy z&IR4g|HVj+v^-B!UdI~Wk8Q?6y;H!0S?Y~lt-W5^mArJVUC5>1vNhZA72g5gShht! zb~IlCPT=B=UZ-te{(IjKY2IT^TN|}U)-_!H#or9(;0^ZDW8z`@P=|&R-42uEl*^ijC0`hE1n5 zVOK<96^7ymU}5NeUIm8X)3qvw#8#S9q9}e}X9U@3n%*%cVj(u;A%0jR-rjE9UL^M6 z?iJQ1Hc(A|NXa=`$jb&M$ zp<-UdR}%i=Y$oGohGuDQT3dv@hl zW=`#Ogpa0WeGX|`j!jHWvu-x9s13FFq*8^B=xnpwEzVZl@GWv~X_uzxJtb+u|D9)M zwr7pzXy*}WNmgPww%A(+XstkLoJ>adWNBbd-$OOa-z(^GNn322=$zhWYTjv9_GzyM zY9KV3tFm2w_Gq*2XMc|4OKzy8o>i*8NxpdLJa$zM@eJa5Yt!B3xn5{@KHh)s!mkGF z!47Ky{LjV(>3uHhv|el7om{_lYo;z{fsI$XhAzwyUEK7WPp<4_{_F4MOTv!0p3dmf zj@{N6YKc8!w7$WTPSPcfY!7wlyjJPHR$am%!`Ws?2(Ia?&g#oPW?bG^CeCM!7VYD% z=fw5K#5QaGWp3^@>Qt8FD~@f+o@}_5Ys&^>+&*Vwc51%X?r#ob$9C;P|3>cfF6`l4 z?&nr*#Aa`7R_l%i?BK3$+P-e=-s}oiNc`?9!W_{;HRks`U(l9sVw7jWc4pH?@Z_Ge z^?vWOo@MCn>Jnz_txiILwr=~@@M-f429A+|*5andq?WBx`K3I?Lxe?$gh^m=L%1l) ztMKW*)MZxX1xIfiZ%pS-?Dl?d+$CEImvMeoY7Ect`>mg6qZRTY@7pXyBKbSH(C{w~$BaA_g~eupakahwuk4cY#N0bO&~+RrhtjBxB#IbY^E||81M*V?KZOeAoAo zFLXn{_UN8<tg1 z_yNaRydHXcH+p;rd3(2ZgKT=N2lxP#dX^9OB({19XGxmBd7Bq*jP?3AG=ZzQ>9R+B zvN!vmXZ8VSC%t_HYH$0=kM_vN+me5Jx;OBE7n+v$d^mJ)hSz79cY4kba=`yt3$-hVa-$Lmj(2(FO zs^<@9=#Txg*XqD;F;c(%QwRCnw|swZc;oN=3oU)%|9@TDwFJ@Ept45)FFk6lw{W@* zy-dz{*iUHss{iTt>lX%k>z8AIAOeI(5P?dtcu}**&>~5bFhSHLaU#Wvn=WFcxJZzm zHVLOZF(UHdz>y`3oJ6Uz2g`ykSGN3N@MTPyG;7+(se_=-o;ZE(1R8Y5O`ArGnmmeh zDM^?!lQxwSCF)d=QV~*xdKIhHs$I8o{VLWg*d!^z)+waO5!<$F=fDXgcP^l}Zri4v zn{_YKr+$^5{W-J-;le@WA+XHP5wr^+nfO|J@-M&ST{{SvLIPv1cI~zy-IW+Ul&YvTm5nb`& z8`S{|)`EtPpoR}EM*JRryv9h65OMUVy{y*0f8S%z9)ECT_tWj;&(D{vQ~3MY0~jEG z0}2=xfo5HiAYE%&(x6*;=@pk+cRd(kfBs<@n11xt$Kid{DaRRTB7#Vwi6J&OVhN;` z)|zQAx@N&_GyYbijmhD7oQ^x@*y9O5q6l4((tRjHhu0P9p?`Dmfu49zk{2a-52451 zdLgxU;fC^Y*`=3XZkc6<|7kg2foB?cAev;Nl@^6~S?K1QZ^dOHn`PoDgq~q4`Q>td zsu<{rf({89iw(dSTZ}Z;mLiXi^>(9=|Bq5SDWHXxnCX#8a@wbS33cS8c~U}2>Ue|v zVda%rifN{+e7+iMo@35RE3C4vndVq(vMJ}B5H2|3ufQUBgt5HdimRtc5~=B+mqJUZ zw1OV0D5H%wx1JgxO)IITm3|wpkj)zD?6aLR%O|MajcTg9Q!+${7^@cBsgbB3S<7f)>&DASl^rSA zt+*$nJg&(Y!@zRHJS_|Id8N|)ZbTdH!N$A;`>QX+55L^A!#?yYbiPA7ETLTot5rvw zb^Y43odP56^TeByd#={ea=kT>|COH!?KRfc zwH!0ddaKJFLu%JNWk!QjiK@Ij|7tZ~>sg5-DF4Xw_)>yWTbvQQe+4({7>*Dd2}UJ%uJp=v*0!p4$*mM@4Pt< zlKb8~+c^gJ5jgK zjeGs2^S=Ay`Ns-8;DLkZ6N3YV051YNL&&dt&RZblyvM-k^#xG-Qd;KfMZpSAEpia- z-RJb@!Sj*sZunCXi~a_`{}ZAxg{(s!7fw<;`@Il`BOG7&K4`E zOJ|7NAoz&bz#}?vZy5`W^{&S~%xy1&(^DW0Ie0@FVsVBtY)%Q?_rEWyP=yJMp%%*+ z!YlI6ip%R?sDwAX0{#$Q=}DduA6UmE-VtcEI|tc+qz035oyUo?k|&?{F2Ow7e_g=P>0kjBq@=IM^k1fZ%{;77c9u6s134| zc2puJ(RfKS(vOjkWFIfV2+3apbA{?r3K_R}%SzfZjmpHK{~Y4UHx{pLAJHHxWqHbL z&hmRq)Sl(^;uARZ{}F>!EFvw{n9O4$6NEC;n+Wmg(fRdrS^V@TN8Q7PX7A6*Q zvz1wB=|Zz9Q<^^YrBXFc3Ek$Sso-IKDQ#rCx*EIWO?6r^W$H|!YSw&}tx4V~sFdKC z5uP?|ltO(dU4!aWq#D(g;f!KHC8*KAo|UW}Rp&=faJsl1$5?Jq>sITzQpX}Rs)SW# zS!)_tfNr!^|2T!F;M_{UgBp&GaZRXQOWV|^R5Y*k+ACjWWwE;gwz8LXX)#}T*hs}q zvB8t4N_#ucdrEe&!nG`BGrOhDzU#A|Rci@_(!)*Z*IS^SoXS?x9^2(SH>G*56`!|4o=!aoW$7^M~TCu zm8*1xTHuS~)w&99a7!@-UfF&)pE^d)gehz(J+&df8Xoe7A1g_)f_Spx4KYb^3|ld! z6>a-P|FQAl3*GOMHmDJ8tzPGpU>XNE$$fS3j&uCr9{V`GB`q1<{QyA;i7Rx^;_*;qIi4wz6n z;Nj|J=Q=le%*6%sHMi47ukLxLbL?r0HyC3$d9zs9h4qX{y+Fz~TEtF%bgow>Y1@Hd z($3i-hTn{7RMU*p+wruco2~0G-`Li8j`G^{%-`m&`pSWZ98m+QXn}f}*0l!oe0c3_ zP(vD@zXtaGZpzpqADa^Kev-(J% z|0g=`Xn#82X00v0Ke}#VpG+le&M>l*J?}V+vf?6-c)zzD6*dSt)z4Hh!UO$c?8O>E zv$p8N7kz7UOT4f+pVY-Me$~{SI)xt}ddD$s<$dQo>7y>W&6{Fs9L!ezeCLrLDa~Ntc@}$)-f)rR-zghzte4av6J2P%wfFk45IePQ(p49FZ!yHK6jEcz3JKPyfXtn|G*UP?`v=27-Eo0%0_1z3EYfmYToMJo_Oo z{3RjR_}6}GD0L|BlTXUP{DRU z-tOt%$E6C~tsk^49qYZ{AoW370;=H`+7<$$&OxY78UA1c*5BpLV1kfe1p=JB*^GEN zVSN1`@DXAmh9XzQ-~5T70ig#LNDsuIpt#kX`%NE{C1MPAA0o~m_GL`i$i(9f6h}Z} zB%V;88N@7V;Z8hWytNM;Dq|dul<&pjGeYAi_RYIl+ZzP}8uT6hx#As)q7l{^G1gWW za-lfhkGhdY=V=ugLY@~*VljeaX5d$~VWK9w3){`ny@4Vu4%ih+qqYPcJtbcYt>Tyf z3)H~eEAnF~%Az~sVvnGg3^rsjhLkT-WJS7<9MZ%)YUB;FVcTuu|1z!=8V+JKwqQb* zWSOjsNQH`!jf@GJBsab!76xFW(PGCyG2&M~HCV%B3wLM2ny zrDH`rfbq= zG{PWgcECe2%A#myr1<3+a7#FbCSgL@dg)hcDkgD~N@PuD|7FHzWHKjgF6Sr8q*dM} zUJ77t-X?A`N*66w6}SKj45rTkXL#zIa2BU=uAD>g;pjQ1Gb$&!wdZ;^=W|Y{B7!41 zUZ+iV42vAgZ-VD{ilwzXI9$gb#9+^_9uj5 zpGz>HdFml~bRy{~XoE7Sh?3}tUS)*-XF1xZO;%`T@@I-7QapOmc@k*lvDiN1=z_Lp ziJB;nDyV}}Cx+T*xM*gJUMP%?<)4TlOX?tpqUJ{;XL9OjkNRkcdSjKI=#CvJlETf5 z7^#cCXqfty?fv7WE#`oxZT^DTq5eH!l`8@ zshrNKm}bDEf+?ehh!2{f`F$1Cz2T|2*^@FRtA(nR9_pLss;)-osLrR7VdaD>3aX}R zZAL0&uxhJXs*O70C8FuFGNY~H>W=QJbb>0cLg&OCtFHP9B3s9-7( zBIQqR%929sJoZqnf+)4RC$@H~G+HZEZEL

    WbcmxEAZ2Y8<61>#T~O=uzUj2HC*6 zt70P1B*kmBLM*-3E3O`_wvy__LaM)xE2R+||C1^#!Sd2)dQgC#E3GappYh;aPVBWx z?6j_|#r~?kmMUj5;+_8Mg<{DgdL}TI-)|Wx5e~+l?$*hgtQ$Vyf#zt$vS-V(Y|{cM z#;U4>_J+o8toKpdSAJ|TnoSx4>(Wl@&=zg1E@R4aYSO-})4J@f(w(=;Y}D@SZ&2;H zQjX3t7S>)G*@~*(bR^LV-IJ=Rys9nQDy`S%?bAZ-;ch95v;r(J?kv!PEIe-HO77%R zZY@-<xRQP$nNaYZaLVl zIp8krvMw?R!{>VL?*eb<2CwiAZ}IwWgfAd(@+z-0ScC4S!}CIK^q#}?-frzyZ|+iW z_Db*eN-y&kL+=Iy_#SWglJEGIulb&D`l7G;uJ7`;ulvHU@y0Lv((nA%@B4~x{^IZb z+He2z@BRX?|H|(G6L0}@F7YC80xR$WA8-TT!sb431T!!MJMaWcFa=jI24AiQORhcy z1OOrV1O+kx_y8;b0001g0%-w<4g&)a6%-X48yO!U9tH&1 zEG;lGE-x-GGcqv@2r)7;GB`RiJ2o>kGc`6eH99&s77;f%Ha9+UI5{^tIygEzIXXN% zJ3Tr)IyXE$Iy^l*JU%@=K07`>Jw87^K0rV}Ks`T1KtMr1K|wx2LuNrlL_#bpLP9`7 zLP0`9Kte=9LqtVGN=QUQLPSJCMMXnJPGv?#L`FtKMn*+ONJmFVMMz0UNl8XYN=Qje zOG-*dN=isdN=ZvgNK8ygOiWBoJvdEGN=;2mO-)ZtR9a3=OHNNsPftrvPfbryPES%( zP*6@$D=Se@PEk@(Qcg%xP)$-&P*PJ;Q&dt^R8&+~SXEV3S6EkBEG}7BQ(0SET3T6K zTwPsOOVpMaLmUMJZfQ^oWT2O>?T!@K_jBi|wjE;?tk&uv*l9QE_SWc9cmz9>7mzbKE zv$dI-nVOoLoNiy7oSdDVot>SZowBr^o}ZswQlFonpP-_kpP`|kp`xLoqN1atqN1at zqokvxq@<;#q@|^$rlzH)rlzN-r>Ll>sHmu_si>)`sj8`|tg5N0s;RB3s;jH2tgEc9 ztgEc7tgNkMRjsVAt*x)Eu(GaiVXm#OuCB4Kud%SNuduPSv9PkTu(GnTv9qzXv$D0c zva_|bwY9Xiwzapmwzs#ox3{*qx3{^tx4F5uxw*Kyxxd1?xx2c#yt`>wySlu)y1l%+ zy}Y}>yt}}?yuQ7?zrMY{zP`b}z{0<9V!yt?z`nr1zQMr1!ok48!okDC!NbJD#Kmu5 z#lywM#mC8PTgk`D%E-&i%FNBn&d$uw*w@+Y?(O>e`u_g@00000000000000000000 z00000000002>$@@32a4B6{~{?6BbGBrMn9G>&$xp7B_PF*_Z%8#={V|KXv z_QKq~i~mo2ojmsQ-^Z(8k3M~S_vYijhrcYmePsCS>(5Utn18_l253`Ne4S$8fu`s+ z%7UUah*w?@LReNkhP1WTTNuhU*M@aPh@ew{1t#E#{O!jcW!$8gjfyC?xFU-%mgizQ zG|K3ijko1!-Hzz==$bh?{#ad*LK11@kjxE7T#`&SiR6>dO~<2@P=W>|jx)|jA9yEb zsoj@e$|q)*T8;@OnP*~&rimr?_urc&h6v)EV<}Xif*q^49(Y2}ffdJ1Z&Q7UQbl1`#p-KkY>N@}a3 zrvE7`q+HfYtC^MNdh4vX?&@oqY&z=Zu#Fe+D{eh8iCF>?(CK zTCA{?cKf2GnNAumrsS4;>A9|s8mqdma>{PHP_nA3yrr&6Z>sIWx-Pi9-e>E-zxK;- zueuT(aKT}6tMIl19_w&lb}q}TvwM;ft+W?XTP>oArjo6;!-Ray$ixCX@VMWKYwpS{ zxBRKQ``(Ch%rL(j@4fTloAb{4<_mAlCkGwu!3GPAu*pRqO|;Pq!)ffp5JxSsqIh16 z@w6Iqe65@eldR0pDSNB3&|%N)GP*#g{ja@(uLdp_xQ7mUw*+N-;O(hyic4b@WE$oiR2v{U-{QyN25RO z%12+l_y1d-^aAL=o@wuTsykr!^hQ9;oiBgj`yToD7eNVz?@_ReVEQ)5!Tag%gHLPU z)FuYM_{oocdFmPj9oRnsvaNss93b;t*uZbda7PNvp$-kmx*J;1d=I4H1pk3ZMD=yh zgCR_!`>Ix!3N_-1P|TR{hDSX6sgH#-Y#|MO2)!C&@rN)Z-~ti2!!nMsi(U-k1y`uX zHV#pPZd{`rjflj<*>Qa$q*@70c)}`r%!+gr;ug6W!!&+zkYv*v85e2BMl!O8hK%DJ ziwMRy!jY1bWMdsSiO1cI&_H|SV;{8!NaQ6?kXj65BvXk>8@lp^udLx2A4y9`){TLr z+uIf~DM?GV@|T$m5rPPc9J^p=>4P+p=_nh;fIJKY(vc*=90lVhhw7m81Pdem$T{iiAkN>XnY)SCzG zXhTEVPlrAsCA zsZzD7y}K$_QTVaDBJr(XWu-WQI@Yy2#QzcqVyxK{xQgkLg zcWO_-7WT7V1*>So8raf0FR6)DtWwowTF5>&v~GQEU1jT6%l|qAvzeXZW;xqg*n$?e zh;6G`Co5dlWA~e27W?#bB@Qz7&g@ zkUTyyTwR-FE!RxQBqlJESgT|WBiY3%Ca#4m>*SO`IseL17NK_WN9Px_(Zd}s?v~Na z%~^sO%%elGk*iGTGA|gFcJAax-VEnBmom|iMp&M`%;zGD%5;AgGk*o$~MMmM^C6rFRV)%)WD?-{yEzI2#pt?6`edeDWou%ShL=2iQ|)S9hvsz=S$Vox{2 zyH2*D_nT>Z)w;Wsel?;$t!!RT8`4e&_NjgMXks_nueWY>kE`8HTATaXu8y`Kb=~LX zg4){OZnL(7?dWc+I^M>XBiZb-OoLR~(h?Noxd)E!K&yM)t1c6s$^Gv8lA3FOa^f;4 z&dj||c-V^0_`SE$KmTL`Eq_=;7`Q-kk?$c875_3&;nEFn1IxPE`6{@P0Pb#S_j+~< z?=-?&?rRwlS!`Byapa)&(xuti#F;3;WdoVjoFT}~?V1|m2QApKH&L)%Lf;edt7I+?0*ZK2GZ8i6@tO z-><%fFRVWBUl_dXRlj#U)SmB6Px-JG&v=1lIk@7S^^a&ydETFW?3r&YzNg%I&wqZ~ z!p>>Y>2CKp>HY8`Cp`A89($|L-u0>vKmYc}kMgu^z4E;mT-brX{()n@_{Kl}?`w{J zqHW&i0vUwco&S93XS8_JM}4HnS(6}sP}h1{kbtlU22(%;-$!-d$9fD1dxjTsPKS7f zV16(We=8+$^Y?-5Hze&w4kQSFJa>8RhkT`VWNTLx^X7m52Y@mtdZQSxhhn@(E&MrnmS7CosEVvOj-eAbuvlP( zcyKz_X^iNFiCQ6Sc&Y|3or4G@o10rD0{~!l0W#2(x`VTxspqG zivCEA0U3n_DT~|)c>$)7-Tz2t9LIqR36bGQXcH+E6={(dNf#NpksK*!iuMQ)NRl~F zk|ybtn&@{}H;q;~jSJX%Pxq1+_=h)#jmQ>9xA={Vn2@g6lVd469{7+E8HhnSCqqe; zzE~GeXok|4luDU#O!<^jxdVO~m4F$FPRNqE$AsNCl5-e%w#SwHs0n5VmXN8I2H9Fv zSd(aZXlr?yZ7ErA8J9)LF@Z9Pcxi8=wwE|?ny3i_d>NQTNPAbwkF~dhP|1nTSd}Z8 zcm$c7k@=Xsl9rN|lVmiRnyHN=$d;SAWG(2KpBW(;gOmZ-j-AAVFQA&#S)Ebono3xh zsh5>~DTLvtgRf_sD*w3&ycw3}d7hReoTlZCWoev?_-^WXoHy4=%DJ2na+G&DnrZlL zjv$?>X`S*227Z{GO30mn`Inpcci?G?+Nq8*DUQAgp}<+0!#P^BMV3A(pXkY%hC?~^ zIhR$0pUw%Ly4W!N`JZrTas*nSfJcX{d6=81m9@#Bu-TXkGmy!rn*=kV6zZNjnWMD` zq|L=<9}1!#QkQo5oYhw>Z6TN|dV$~Bol**ZFKPx(r+qX^n7W6YI!c^C%A=auqY*kE zW2&LFGhKF3q@*O8P{gMCd2c3KD2(}}Ejp4Y8UqLVqA~h(SUQS$_y@3Qhx&IAU%8uH zmV98Es1NC*4FCD1LK;zWqNZzlEs!t>(U+HS>J{^I7Fv3H3kaZhil?hNsbLFlK7$*NHrb^Z9Nf?$4zN~<{9UbuRh zx{9H`YOC>Trm^K$!%D0XVytfZrYfaA0$psgydv{?qOs-0}%mg-uo1=+DB*sdbWsJ$AbyZWmV2_W~1ugfZ;lo&N5vLXNK ztcD4P)BmXf1}gzNyR!$|vk8jzaNbJF;O) zvhwP&CX2Gdw6gOB(i*g93$#D$vu?|_LaVl^Nwm#~v^5H_ zOS_bk@=~V=K8Dx zO9Gs`wh$|~aND*D`v9Y>uyGr)+{(1CiL~N*rH7}td+Ucu*a(5UiZpkPxLdnkYPGpq zxZ`%RT6$wO!j|986rOUHEd%DzVwBc&E zN&hRG@;IPaXS}hy1`S$gv%0=zH?_J;xW3D~hl{w1Yr73LAuc&_FIl|c`mm-dz5k24 zbUS-b_e7v67vqU1vF8Fe%e{QriPsy!*Q&kKE40ZdjWo-;$BUTUDUE`9hbSqi`-_`{ zySN)#o*^4h^1H$%E1|$^zpFMOQ;>(RSF`|J!8eSSqGBdJFp9@ z1#(-PYWudLE5X`|gy74f23n=4Dg{NzdVso}S$V$sTbCvLpf1cWz01N@i%u_`qu3_H zI*h$e%*IB%#&EpDJ)8t|9HVWxw|R@iI6HXNd%R8j$J8qXR~*HjI>nzGy*`Yf;{Q6Z zVbFvXYq176!jj+@SSz^di^3^9h|m_l>_@*>3&R2#%9S%AZ4Ah!T*`7B$4M-uL`#+9 zDs>}p#AC3-r`)-9D8f<<%d4u#p?bP#fXE-bxiT<#k8FoQ>~y!O7oH5VP^)m7JjR@C zxXVnhFz1f11I|s|12vs_(3}5ctL>&AHt=&u+-K{hN=X*u{{bcwqd`>;EaxHa*a1 zd~FCV48QPe*6h$g{m>9C(cE0r_ju78T+Z1$vFRLs?3|i=+`!wK%ARYTN^PCl*95K% z!bf|z#_ZBxeXdW0$(DS{CmhC^Otm+S)1REvUGkQ~ambbZ%BEz~DX)JAR8 z75&#aJjkY4rwIthO}xt0Y0ih;fN$*1K8VYX+|t;Yxm$hADM_CEoV%dhu?c6;YQ5G0 zUC?d4ia|NoczxP=t=Edp*L_XVHhk1odCxhl*H#_T+YHR4_@r7~*)Oe~*csM^yw7AU zwZrYtq0QFF?baxx*@Y@BAY|HhjoQ+k+C#nCd)?Zu4ZD3v*di&x6#rb^Xv^9uiov?= ztU#>Y`COPD4BVzD+|2F1@J-Wb{n^Rg!tn{O^-DqC#tP6K-P29os!iM2Ex_n)$brnq z-VNTNdf=*C-uFzEcnitw-O;)-OpuR++qdV$Bo}2KHBlO-~9d4|DECi zuHq|x+lYkneU@k$ z>#DSv%qfZQ=yc-wJT$EDqHEz2#hf09fATV264D>m)_`HZpvalssV1pZTrst%eFn6fK5%~hs=yHUf|%m=UWVfX`97< zde3`4#9FuHfX-@)NZ*iT=uw{N_4kM;E)W86?3s@2$-e2!-t5lK?9l$@3DD4*&gnNU z-zrki2p9-n`tZnU+~b5^9G;rJg@Lv z?(k;LvkhzV4k+%$_?I>Q@C<&o=6>!zuDl(b-si2AO%0n*+sy6DxGnGUgdX!=fA2G2 zN!IA^QFHS;zw>Cn>C&zCD(>@JKD`Z{*bI)(>TG*~$FLcX?X)d;a9i|2i_}0)t`77fv;#WD>l!cJ&6$?j`MYkTq8^1;t^EHa^ka!$$kmbxoi_3 zk_*SVjCu2D$7El~pfP5#9mJL$H8RvF75_77xO0c}xcg%7-@%3Z?l@eTZe+=M9~XJd zd9jkvrH>r;ocZ-P8#e5{wA6N?gk?9#Nx=F(MmtY_J_x`(UacT{t4O zBFlo!49}`mt;x7vqY_Fk2cu(<(rQ~|%-T*#&^zS1Q}aLsvrF^MGn=DsI%1+z4m&sV zL=(<6`{Zvw`T`Zy!1)Rc(9l8&{r{6qKp92!(M2P5bec97wA3V8GJSB6$gyKSLZ?PG5bM!OwKWqMc-JrkKhg?RAnfS|DUMC2+ZJ zNLt#1ZL(K945AHsj0zs`s2BgI4{nfoZ_{AbOnAZ)W>AI7YhLuE_d*cLNkKE5)ASx@ zz3bgVhZp3Vgupkr!G#D`8seW2K^8ubx$1vQ44RK7BQ(ymZ-1I2pI*3vmm)qOG?5D+ z05_*Opm`~RTza7Gs>Q&jeI* zHymX3>{Q4cVlR=L;@%H|s4*-Kab!<1TqLuoxD%8tNSC7>idJ(;kG+h20o0=8^zy~^ zHBM=nIb$awh!!*6PnK0$ zNy9}R5}MCc-o7 z#2=0@IV~~b@|Nl3q-$P-&orWjmMO9yWymr?a*d*y$6O{uof%AI3R0LOEM`O<+E8L* zw1x}aC?P#6OB4A4-KnGOKMHHzSXU9g-~5laf(hX0uL%>Xr(cbRWt_NlTIR~bXw%49vTv}AGx9@m;t zjj~IuvxO*H+e+Kmmesbn-R)V`I#Q6%m9D}C?r^oYH%U~6uYP5&`Nm1k^QEeaH_hKW z?HK_PaiyO0d#w9vIk~m;Q=hDBmdOCaP>Wi*AmrND>bABW zysdBh3&-E;x3~Re>wW`V;E)nG!3w^naU1+#i5Y@2%MEOWn`>bVN7uQLOj&jthZU;C z6Iy@~>dLMPBy`G`o!CXC1v>j)gQnOtv7`eVC)gRM2Gm$i)oN)i@ndZ+IKR0avVY?h z+yNgs6LkNO1V{yJU<4mI%1J&jgB=Xv)@FGYTgGyiO&YOBIF-T|CiBIzVwb*psKgw; zZX_A|W*}YFoRtGAViy~R7+BXuR!j{uD+89@{^0)r;kP& z$pq%_q^YCVKuoO4R--hOJ1yyHR+_h)4s|U|eQGbK8k?)O>v3Iu<|>Q`seQ>>14y^*`>Fi&Z@-n+F{1S`{UW_ZIL zRxJODi?vy)T#_{;PUR%+49_nd+lb7TaA^j5&^5A`sz#IX0~4Lz_1bO`=;HRd=_q7% zr+m_dAZ;$%O=>B}T;(XI`MPI5WlEzO-uAw$ynW8|d#`%dw5AEa`%PGZ101?ZAD>Q( zn&NQL*fU-*PY&Y?fK6g|MwQhCAo<8f$a`TR$lx=U3H`$gY`z%>0^0xDsOkDkY@8$ol zoxm5K@G1{8!*PU%UHbjyb`QSrb#8d)7Z>N!Pdt}_&iLC%Ui*;8KKJR3X~yi^=pDo)3I*pF>MAE-QyXDJ3>TAB|>%a*7K}L?c?M zHnz*Ljk~zfK)~I*gy(C(xSKl<Ai}1Uw@dAQ(9glsTI#Ll5*q5In=_Bf1r%KBSZpXC4S1M@lrzBn?Fs$ zING}`CPb3Ru_`K@vC%5PwNs2MbUT>{MNu5ZoU27x6FxMEEY>iDTQoy7Ttj!1MSL8= zdz?j6@WoxsMSvX0ymG->GrB!Y#y%`3Bnm`hM7=`vt`e(}_z5&xVx0fCaIZ7+oI&)u zf(ruFs6g$qHWiz?@Vds2TP?0K6H&9rb$rSF+DDmuKv`Ua9$2`WoX2_;K{g~lE&EA- z#KoP|N9H@lfc!^=LP}mF$W8zSr)h_P!C~W>Wt^haYZajCGirIlj$0~CbV3v9ktJlWB3ZOX<4gaZ)IwKON5G^>bV*L2 z>_14bOI9RG#N@}sRLs4pPREqa#iY*3q|D2FO3XCKqU+3JOh(Yu3Q}4>ml`E#tRiU? zw2dUfppvB56ixGtiyS$cuRJ*YT%&2!P1q5;96P`(JkH_l%L3iYepF7N6oouk8cRS7 zlzIbtyw1m*P7BRW?Zi+D)lh+4O77H4g3QeDyfwc$iiGTm^L))`F~6TsPlG!o)i8Q0Xx6m2p5^MGKXMl*WErLsh1 z2~;C}FDNR~PE06KaMZ(O)J9F#$BWcKm`p6KR85UmX$95m^i*oSR`0A#?;KUmltHVg z$`l2s_i;!;gi|L%r+J#qu0%xo%)!$9NYZOUyws<-tUd55RLmJILKW8DER*0|(nUp9 zV^!9jgVaM~yH6chPMuat#a4%1SS__y58ciWbxQwk?N&Dh*Hhg(1<%OeOC4weDM4E+!C7%(v}5Jjo`qKAMA~|sH#(rR5*S)-O<15!TE$&l z#_c<%joOHX+{e|$i#0j`z}TwHS_B}r8iXWt{ah*<+0A`80Rr7$n^7MG)SRI@38vr}Qo!rI^T%~>5=2c#( zectGuUg|YL@T)Pa#oSW;R&RAk&edG>StWMu${%z_|G9x|T&FdCRjf;bB#(khi);;mlil}rYPUJ8cb z3bx?tt=y~4+#0RF3X7@mwas?2K+`cqL^};>vC;UPqWWB2cSV_H3|k*9RKakidoA08 zJ5=UW&;bVC;C_E@(^*wG z8n{*DZA92rT{O~HWr5uqwdBX!)0Rwa#WM5uhU|!@y zR%ApbW+@(KVZPw&je}4*Su3`%NG8qi6<2QpfzVss$QmlrW#O&eTp^s~&-K{27SQ;o&CQ=!o`ThIR&eqCQNO%L;6He+29+eSyU#{M15&ZsQjoF(MlDPSc# z*C7mXbmb%s;Nc^~dY)x`uIYRR)i9)i3H@c8M(CR^XeR!IffnkaK3su5YM?ggr&Q>g zP+P+7i*Al)7X8DC2II9H-)~+vUZdIeJX^1x)4BC(Hh#i&2GWAkJ@`6dmd?c7C6`B- zXP5>l@blp(4Q8J1>7@3#yjz)6ze`q}?clfb*4?BFiwAo*+K&TFDhZo)2X=5FqyhHU6Y>`{2`=tk~1 zWa{w5Q$=i&7!=O~*xpqszw?u5RTb@u-ii_?osX5e%mggVa1GyZ z4ySGgm+l33><{;A%I=nC9_@0CYR*QwtTdve`(zyjThJ5X^R@3wW`e>!Z~B%vjvcD{ zmhW?p*Od+f^}-)ge&@9wn!3c>S2k!5q`)iyQ9IByI~LA+c5o37aV+0*D7FIGG57Xd&fb1|=03;*ymk99RK^*2{;TW|A2mh+!j4LY~;6|dwE&U1lNbXFDCY3uVG zpXT;6?;UmB5i#@;UJ@CH-zj8te9B+D^-sBM=YHjF-A-C(drs4#gJ?6X7U3sn%W5gn*aRV~F zs1_e0gq8nhH*~9m_G1%3+PqhAWOyMxi-T?N)O64F!t^GGyl`KMEck#r0On9X_ouyZ zdcXI5_c2l~g&@P7a7&}>uB)ZS-qNTiU8rms(@ z{+#(1=V*WUJWWJJ_b^1?v(`4t9KI4?HRWx8HrX|H9HVq3L|E~V*pXYg>kNI>T`oceaqNmKG|969*r|zj@J(qT1Px|%7>^?JN z(*6J>f9C;0F!&p&KL4pss7iq%)o@d%Pnsq|ykzkr#!MPBRt)7)6vL3KMv^Sq>Les2Fy0`X<+9+w zmoNp+Y-vzt&73xM?tCTdr>&qudzxBUROG{?5j%3Ues8Tmlt*UesQjl6De(bt+ zsa2+7DT3wt)$CP=UaKzJ6KBnsGWX`zD}dmx3IziM?6pgRLW8?}2NUjVxUgNphXem0 zUVI?IFe+GGe?wo;aDp`LjyM4L0jt5aY86z@;w-hM-yk?A8ogZ|>~*%IzLR zxWGlz`wbo7Z-xH`lVuBUZ8G?G}1> zJbwZW8U#tLLZI?b-c+~;hM_Mx$e(73lW2WB(KMhi1R7Hef(aIA;D7X;l2ut`H8oa* z5C-UAD~8}e!WJ4eC}1uiUbtY0BAWOiibkmjABz=EI2DX9iq#@oHLhhNg)-_W;f*wA zWQcmy(N!016NskSYmPCNn39o+)|g#T9wr(Lt67GbldN$Fno734Mw(`!&8Gi=1!YbK z1DgAh_E!*EDCg#y8kqUQe$E-UU2;n3AVUj%-e%+nO+W!Bq59oso?FMU$)S3JI9kRM z-Vx_!czEg;SE7}^Ra+M874qq$K|c4WA&*EB6^5yngvSo=E%GX?CIL9>i9IREpnyH< zI-`pg()!9GL~yuOiZStWEV8wdsBE&uI-5{@K1!QojMeUlBerMtO6|4Wrezh7-3a|gz#nfivCre<*bSzMM5H=LuOwio{@d!|A=v#Ox5 zI!P)yulS(OHPifSO)@WgE3U?l=+0;*fsa>-`b^|UdB}(nWdTg z?oBsj_>LfP5?ZW%_nTj+OetJ>um>rn9S>^75+a0S{Wb?4%b_7~f@;&lkWd}XiDx~QVot|Qwm|f_PJyM$ol$IOyb)?^b~XVHi=+p= z=S6L4F3ie&sF%FuMbCM|Tj33BXhR*+E?U*YUfHx4#P4~ph%O2r3C1^?^7W5y>w4nc zqG-4ODNc%0B;B3x*RQrIj*GKV(`jV0I4Y8nVHiAI1l)8kbJ2-B3EUv+1SqgO9V}p` z8yu$yH$hBo3{*nHN(n<4g!jDghk=Be3D46N72>dwSu^49d;&C}kPL=A+~I*(cu7p= zu#-L986bri%2A4Ph@~_lDx(sJ@cc!Ie~DlD)TKTE(h~oce3R3=r06g%7Ho2TQ=1pV z7{>XvFE+On<7)o&%DxqlOn};34zkF{1v(Hn)$Ecr*{B7#yv~9M92v+q_%gRR0*Jn% zLl4SHLQ^6#T6_3l3_a;cd46(|(WAuE9yz^C&U2ss)Tbc>ddgIijiBJ-9d0J$%DmNb zeeJpuL(Rt;_MuU9O{D1jCYJ$?9@Bhml+6Bq2~lKLv7^c?=>+IxC_d#eiz}NT1?yBf zInFVg2>XrZ#5o?}RB)Vs6eT*|PP`gPMh6w4w0xAHC9AR%n*AmX`n7B}Xy3wnc#yZxL{fS}U5ky~1^- z^_vu;U@BNooeVt}lPMknr`VNBhdK)64maHcR&529dql0?%YGWwss2+$PNQLG8EI9~ za<;Tr^{PO*I?&Y`^hK~EYc69eTaofLuWp5F6|acaXW~_YK9G%J7{=RV?x&+D1=NPX zh|T1_5vO|uCv@}I(&!47bDv}2VpFF@=|nR)J{@NtKc(5ERaUi@^=izH8VgBo*0lD# z=K~kH)cJ;XwC*MCuePdRt#S>utS#td16-}N8B?}vW!!KBb=->-ihFbrD?whDtDuEDlz{}mvg2wHOiae1kPcnn=-#1Dm{@ks!vb( z2K<<}#~x_}kU`j9sSa6H`J^vXi)`N|6FI;B?eCK>`&9u`8C9-=tu7gi;NG(FU>v|@ zfyb1X*wB);eoNVx7bjtY9f8C&2BU(JD_x6@l(#JH<>BZAO%z}AO^*9m2hps$njY|i z*ku`x)w|9(@HonjUJ*P&nPep2x4x2~SQf|vP#N~H(nRxgllS}RQJ;6pt+k#KAM8ZF z?2@EfuJehz{LuuzI?pMV)L-L>>kWk&ad)+?yW*S|y^`5+%grvbyL^Q^0v2@I&Goal zBUnWnvb>~D?RY1xZ6+((CR+e4d$<41Y5ank+xErwe|>D~QwKQJW$jC>*-V+**7}!h zQ#OjdOz*~Ral3C`E|`0bK49;bV814yUHNTnLKkk~ATAG#AUwqH4RUI&oo;kX znbP$Q^+~>c1x=5;oh5H_xh>7{jdyk3?6xw_VlMMjx!a+B;j^&~j#q*=_+D+~ZqD%? zbUl-F#l@}nZfotuw+<6Y{6V&e6?SU@vWYj{ba8Z}9Wu>jyO3M4gAIf#>T9Rg>n8U& zxr_YSk^2ymZWlM&Q?7FUx?J5XfBC8MK69CmFUvPS_RxpE?+XVTxm5@J!1PyQdTVo; z1F!QKflay86n)qk?ji;UjoAM=58=9qXAYtt3$qbR`^GJAkqF0Lce@L@+i7pm+MfdV zv(Mi3uirGMRe$%><9+YG*E{g}P8XZAkIjicKJq^wCw;q`=eKOU@~^2}F&UTe)h8yu zC2Zz+&p-&!E)H{(4=@i$pMxWN`i)Ju^*FeP+9OOoly_hD`NzHfJi(_1pn$5A*S`Op zH1{M#8vjWe-Ra&s0a%s6oB{^l?=hg>4aqDuiJ6FB^i7@Oaaf;d-$WsumOx(xN}sH0 z;0F2{t;t#VB_94j)=PyR)=AUnKv2WE8On$ZbahvYT^)F(mjdnz{K=mW(jV@f7E+nS z73AOkyknN}+L%9fn?_2_Sie9q*A{AsV6- zdK{2Q%M@%>%q8L!QXvvT;{CA-YzP7(MxrLJ90E3=CvG7qI^eii(G|U+!>OSKf}qYt zp7k+Ph^b<-fuPo{j=urjDy~>-%+#KANlwKS=_Oz3^&z1BAz?X+%y^5&9ilQWV-?a~ z5VnMPnGmKu;v_cXCSK!^b;5lF3MNV-Hd0~%rrRfmB00K>IiBMvt{Rl!oEWB}JH8{) zEg0mX;o+U4EcX8y=lw;29o_cf;%q!yn|aP0wpbm&1uq&J`7uyoS;zW)P?a&7`*orf zej@8RqeVJdi|qvdloW|S)_-pu>R^9^ytZE~OuKj&c~}Hua)0j-vurC1o;Wsa>U5R-;%w zm62gbBW(YsNs=Wv{>Rw}qFR>bXtw2BUS{ycB~024UDl;v%HTb&Az&J1ZrY*@>IqschPG=})Vpd`&6K;xU`cSZxiDa#2 z$C2h*0?28mWonM)M`mX4wWNE#2W+NKW4Oj_(&k@!9)9BHZtf?4X5eu7rYn}qUFzqD z6(~~vn;5=o`Qy?6lxA2 znYJlw%BYrRDLQ(g%{`PU<|v-p=8@(npZchu{;3oI>I=F6o?YKXJ)cY(sg4S2HJL{p z;z47ApKiWb9~ER&azXo*lcoaM8hk*8#wkU*sYhm~hdM+Yh+Rlv8i~3n0-EWG`pg($ zKp9*_HP$Ly`XHRTCa6~EOUCB9Obm|Z>3|X_u@)<%{%CTU7%LjsmR%mOCZ@0=>HT0% z$w(^0Om6d?($Dyx=ixT2`4Xh%UEF)(K-|!i<$#P-asW$mi!@@ok}A8ZW|)G3x?G5gWM9@Jz!v9IO00F3D;HKL#=fk^k|)Q80@Hr%n5yj5lI-o6?1`#U zj%;jy$&Sm8EzI8QgwE`e0D!(O34QXWVKOYv9xbx+tj-4P=>!um`Qy$Rs?Z{=X>`~t zE~*A08peUor&?Ju)@!R~t<)k#)wfxs_*R9E`V~M9`@9$NGGp=-N{;S)Ou;-e%<|gMDkWI<=P%fU@EI}Dm8X({K3lQ zvR>J4>-k^r+zs9ZHk}uLW>=~Va>xQuTjxY(A@cGIv?Y=M3_L2=W zn(x_fDk<=rf~&~($nhS<|JHDurf0bIun&d70uvzb#OuhaQuLB-13UjPr$X@hu@a4r zt^j;528-|ba?a3x=I7jLpAgE2|pYA_W$GBQi@c5!kiZ*e9!Gc!Z8Glw9fXs9TIs;;m~ z6Xz41)@;A3rE7Qm;FRrMbsn#~a^LpbD&+zohDL8ws63_p0FF)6uZ9p)ut@j>h zBOkLeC-XlK%rrlBJ*WqsDPKL@m1x9&ls?@t3YLf7?NpS8UGfi)j>j7s!2Gc_EOYDbf4RBuHN zKLn)c^;ezhYQ=F@C&X5#BqV%_Qzx)4^YV1cFiUSXh=TtpAI`J`iNF(&F2BBXF$ZK@ zS8-nEC`;XRjKwui-?eNHHC+#NLkC}{!ZJ(x^|I;eRR{JD6*gB)^+u1u5tA1m%QAm) zfw@xQWqWmmaP-rPwb^pEbtBP13XDFcwT-SdYQN+6#UVn5c2El`ZYe9!x%KGS^-j0< zY_s=!*EWwaAW2d+byw!axH0|$_f`ovM3ggFoaJ#JQX6A66FxSCupO(Gbg4##bl*2- zUw3C?_rLKEch{_G2ab5J;9xQF9ZDbO#Uqz#pwXIjPGd2AyLWuActL|HXRCx_Pd6m& z7z)?`8hGn|yK`_0VoL1GZCipLE%rz&x9TZ4J3s%p;y!nTOE{B1FkJg5WL$VDlA&`} zYIU)oFW-?Wn%FMhKuX~?I|gn*uQ!>qIBcJJY#a0!Wa?ydG+4?w%brewhjoqvnA>H7 zfA_cq#5hGLxe(2{fd_D~3^~$FW|G6RgUj<+mr9c(0!x2(VA6|}FLLfSR;5rY6FY2{ zZ+avzbgV@mi3_AM%Qu>rI;oraB} z%#6b`g5PnG6M7(3td7Inti$=DYxaaIF!r(%^&RX1aPKh7jTHYGF*=u_&B2vpo%R+h zm#>+Qp7mRwIv1z8shhh{qxva3*Q4w??{@#Tb;tS|I09JLI=&Nopx2Ho>-s3Y!GN2P zBmlZp*lMurUZ&|7gLjP}TML^*xUx69#5=o_W0QGDMod>brJrKNMV-f|0Tr-7!fboT zzZj3o$=j;7Yb#8;pF6v&JIu%YcQ7XxJOJUsdWA+-!w29w-g~~A^Jwz<%ndHNjW>CtJb0hDdcS1C+bo1^J{qHxgq&yU5<2cpXyzTt;Uh=NKF z1Q6msXW}Eivy)`JOTHl(yqhxp9<~2KFq%isyL7a7=e{LNp0_jJVLPlxd zPyFjg=d)Y9Jig1MFNrV#BgseV$2;iABLT>(v0_5zmW%pZ%l(tjuHCzQ%hUaAJ0}B7 zQ|v$bte(EapFjGizxuEL`hR}~ynpw@zx%7d z`^&!r+<*PsfBDaU{sTk>fd&W`9B9zNKwpvStl^^N%a|-knm~C7F`^GAW8&cP=x|RV zLn8&16nPRQ$w4Vqk~A}sqJ;-D8PKem;6Tm-1@85%7XWBbp+ki>$Z@oXMj}cnhLK{$ z3m7yU-vpT=bxzbYQmS1aOKXe3zM#0yn6Yz z{qPs?i4lVXi!clVG2z4^N-%az0`lU>f+trld|0t%%91m0_RHCG0K2{B}D+p{6bM&qdWBuT@C7dK7``N)Qih+A@Q3Fivw2>?{Lew{#e z?Ay6__x>IH_X=GAIMAaDj`e~B5;3Y^{d^Sl2BYW^7zIP9aRLuID9rz-pNOhXfFFU9;H@I>0?8q&rb>;gCRd{)E)}87VhtJh`VcO+ zj65>0zOcZuOE1L~qBG4vE7Qz0)kG7s%h+5KPSMabZACir$O8-pmYB+<+3wU0&pM~1 zQqUG#l+8Di5M2~FM*Ylc4ndeZ&Oz`Jc@Ml5xN|Q&0HTo3QwX(FFA@9TyR^E5Mjfxb zPt(J0!UEZYqXPXJJgC0$eEGD%0er0$)9h3oHpmMF+_ei#D)7};Pr9f;5BuPtRn=96 zrBKvpqbRkY0^&amR4q&Uz3(=2_b8(wROEzt5ufRYpX3<*l9!UBHE>q zm38U?^=Qw5R^g)+)>ditB|C6;Y7fF(*)SE86@qN`zDTg%)@`+;Ep-L`#1@rVXU(G( zj!5))wmh=gFkpp+2&py0T?tntT93Ul_^22;6;PBagGn!@ue{4RZXGgMGxIAU$Y}Aoj}|K?3JB2N+Nn@-qOp2q&#G z1PO1xI>X-#NFQkt&2IE_gX)rHAqNuBAV_QAfmTofun7QI%(|Mc>?cD8UV{%$LLb72 zXflcXZb{BFqF6+iw9(babUh);>U6>^9BqkRxjWIH7WN|C*-m9&G}P`SRz`OLZ+K|L zSQ@WHvNLgvdCQZd@sk5`kN$ zIHYsK4c5?`jH_mTCgcWYdg6zQpk%{(hC6nOQHz;GqCD$GId11iX;(f9 zj9q9kpAb0 z#e8wEOX2%#X9=qpnj$g=M=9#I7FaC=QY~rn!(}ODwaQ?sQdzoOs(jv8LS9zafo8?! zrw$lD_OSA-1c;BU+<;cJ^=-U|P~WrAlPkZ$SDFGyD<31 zJ7Bmxbz%tG4}qZC?$Q9)=LxLNJ_B%OL5ff;O$}P9YP%;6eQCeS0-8T(2o@9@U4Tx1 z6PjnZC%%dR;Y{~OsD$tc`9$J{Qh#bXv4(L;nGEAY8)1}UO|he z7(K?|M8+{0=#90A;~d9y$2_j+kArMlAzS;RNzUk!InY{e$LT}4&53abDTS134-GY9 zft*YP1P`}N2ZUW&!bD4M5`kvEDb5ar1^u@|kMMs>)wg-Z3m*aIO1(zYZs7WTKz-|S zh261gr@CX=6(riSE(VW_qv_&|cZ)k7AM2z)E^^fg)eVn~6r^f6@X`4P=I((xkc47t zrOpS?nm#eARou&Eb#T~|HTeQAhiqR_fZ9zNi-tKISZF7--Vco^kg;8LjcpsrS;xA! zW4ra5Hu7E=_4Hv{r5AdI2>(Buu&-iu}FMulav+;{}$2CO#Y6zR!R6?t}=KZ{L!0oEdIRBJWg_<3~ zi52L?rfg)WNJz{oz3GG9@IjqUJ*m?eletGN)o1E;-+x<2XjjZ(3y0dex#1FHm z#Qf=BfBWB`{+94XC+hD9HIM-S)u0Xh&;JU+7epce|1SXrPyu(U4`SdB;*S6)kOC{v z0tb)*$-w{2iSi=L@>;G|va5yUz^Ky31XZb*IE0myCB^?TFS};0tpI?5POMT`Y_ekP z#VB9_bRy|ohv}HD+2AhPu5Rims@inV>cB+nT(9ev&&ZG``Aj3j zqEC?6P1)j2`l@ey!pL(j0;X6k4!FYg3I`Jgp$1YQ1@hwpE?@)Na1P;640HjV2m}WF z$)D&@0rI180O1Y`u@4Lf#PFqvZl}jmE430S2sI)x!eCOwVXyEH6EiXY!~quO?*cJ! z0GDZH7!VXQumLA85h73%HPQb(u>whv4>rWoO08Tl54x}=aZF`G2moo2hLor!#WJXZ z!0)`CN|mN)NbF~yZer+sg?D_=?V>RwI!g#YYl;7|t+b9%8;8shyAdXsunEHv3ZGDp z0EMr#%wDe0%hv4x(s2sl?&}`ukd&+o!47A}@OfAxEaXMU{7UN=U)u^BkOu`ZwVG&kQ zB`YERUf~fqkpfAv6x+Z6OF||WPyzG665RnMS8^q15*B6A18cG607pRFCRQfs1>wwa zs0t{8Q7N?}C~Rng$ch++A|V)H86l4=Q45Q{GPAZ!_K1tL8u7L^A{)0+Et3!<$Wa{K zQro@_`_8e;WuQw<|Pd-m!M-tZu8 zU?OowAsNykA2K4>Arc;P4lPoW%0MRCadr-~BW;gNZs$(OhHS{`BzXfUW%4yCuqI)X zHU&@=ZxSc(004LL5qwf5WAha4XCxPCC-iL(X(jZkYbp5*m7Y@5P)I7D(wER9fX3|} z6-p6$q@QpQ*FGR3L<=kfPwB=J_SEm7qVe|75;E9wE!U{sfbYoQQa+I&G1tjNl24@S z(xnone0;zUzXUbd(Jl+cJQdRptZz`XP-i#?33M=}PVyzpATlTO5Gu1FIr9QCb2B*; z5H#~ML$d=$6C?LU-bmC=ifUL=sayZ5C=bjp003caq{Bc8=~Hy*o(QZEa5FdmfH&&Nu>F8YLSmc0Pa8ySlkXCiGNBd7mYx741A*!BC2Og;awyTzwN)QL=&6?Eo z62#}GDS`y-raBJfvTLClwMhTnz#dM=OJ@yCsZ~tJR6GNZ8mo~Ct!_Q%sR(%wO?_-m z;WJJ<>e)!MUKUXaJk1RI^G^Y_L+DL>o-X?$i8Ox|U#Cwo&CMMlwFV^A6DGA%7s4_z zG*cZC0qD@2E^=i8RnkJ#L}4SpNYz0U;n8?%s1EU(j+1QIqr5n!Zm{O9qDJG&Nf&hm zHUdy)ags+T4;Scx9#}F+b#(w)vS+CwCQXr7DNi23uJAZ-R4NV`GYuhhGB0WEj;Evvm24xJ9x@G_=7{3 z)DGZkFro$~!=eAIEV8syggY2#q%ACCcucic2-gyM*YwJk_f5ZKY}fU91w(olRGu&Z zJtxB0#8$|Lvs#C8iD^*`p4d^HxLkO*iJ{nvpLnB+fIyxY4gq&zb;=6bSH&dAf-XeU z_(pyO2vf*RR`M5WM&*r<5rOzue@saPv#Ejv#8|RQL)u3CyoqI*GG-f?RsoO>c#{Bq z6IX$fW;0lap~5Wj6^Y#wZngLT8E{AY zZ;1r`jeq~1`GV%E%+l)`l9T)1mvYlfyDo@t{7rt`x2?9t7{O(a1BaD{Y0~OSznF5I zJwi7LIRFnCf_XMq8M%?k<&hR*HIV5Ax9N3hP8*2d*a+anUNYAU{|CbNBLVv ztCUUolx5g1&UKZO(1+#Im0LNBKPHG7ReL8oE$B6ub(wRKR%n){rP*s$2@O54lyc95 z;Kbk!2@-vE`ZAMwro{oL4cD1p>1a#|1;GY$e<>S)!L<*=VnAxwc$W*)J-}vHv+PUKy65H>5r0U1zz7b@^pd z`a?tszfur|;=_NlnyjzJJywt~o3BUA6Bu}}RTu=4pvZw3 zz5mD^l~-Tw`n^XvzC)6uH9B)1dZU+@vM)QoySzwuq}0l{A2oZDuflEPx|UHig*dg3 ziB(wi@lW92MKTbai!}8#oSdyt{im)zwzjeH_TUC&(ST zaERPb1#~BeA(}}-YpQ8T(%tA*MEI@r#$Z3lxB$icRKz(SAE>A&4>G7 zr5U$Wd;aI$W0-$lMG0*3Hst7&Ug=RWQBzvnJ$sV|%(t!job}kvP0y&3`&!(XtO$$? zjyVt}^1|^Q(DCqaf9^+uMQ99<28kv+(w^;6tfzWvppAZzExv&f-V^@-f$#fX)&9Qk zALB2vRtJ;&LhF=cR zNmmife<$1D{gR4h=dPQCiHWtOd0sdn80mTOqB7%!SV%fW-%v>`ya z{Qx&E+(Zt$w}Kuyj2!6EyP|J}zk29Ka!?nt965UR>Vh13u;s;P zIdFg9Q0nr^l#LohV-vXltdlY?KCSSH6uqw{G2!cjJZ}Aoy*09Ew`ALWnBmsTBo3i zL7HjxAvGs!G93WhY_r)W900z(>RVy3vMOAy>xJQ5B+5NkT?#AeM_qN#Y1iuopSh#q zWaK5-sXLLhmtC~dPCM+diFj!vozbCY28v~jD8v5=6+u)$x#pfbK)U6c03y2UZWu*G z<<%Jyd1s!vo-SyBC{Y6{I79-u?jAre!7+HN0)UY!dJv8=(wMEoU1AI5j}vd{k}eGW zS1^JJo*TuJevXV~$y#2FvJ@(X$ui4pLabWEZYHIgcw*cvGtTPmymPiakH=uVLJw`H zo_y*FC}xgEK_6t|R9cy%7_P^(qnG*uwX1u1a@W&>4yGul%6bY~Af=s_8ks?&s;a7; zfy`Ola?ee7-F7n_uYv`)S3|D5>MAUB!1gU{w0dyY90bQ+xB}ds<=dDil21-~<(0Dt zV~$X4o}a>m@zV+ghFaGJQ zs$V{E3s(z&{M5=nfBp7b>pIEWW0dq}&{c2^r)6zYjmnhiqF^aXZS6OU!CGY+#Tl}B z>JMk*)@s(qwu^1cXOHWk2PFu?4}PW(0Fyv$znECZwcx}Yza=3Nyb9a~W74Z?;e}~q z+tAeJ6Eyia%XmEOVe*V}nrNM7i1m|>5s!$(Bw|O2(mKQJmIp;2(Qk=NtfCdE6&{>H zj3mu#(GR-_M(4StUY;96!PJEy0jRNj*elupM6$=dEQN6<26zu}&_}*LYzmKr@=Lzb zC%ioh$`^;R9ucW{zeFlhkx0zjTlANf{@uln02Ckr5E!)ux-WrOD-`M^W57&WFNYv( zh^RhSlv<%@am9>x)X2l8{(w7RC1GrqhFaS zVn1Aonpk*6L`HL(O5~asDaIPUP;;Bpj0BDPu*GZ6ah%^1;}gS$CeL{iGfexWWZDSH zILdLHoVp_e*@sUCwiAMw6iy`}2+%vtW{tnDEQ9 z^ni&=^^<^GtLP>D!qG-y!hnqMBqm1x8a0&ug_IKHPax11vv@wzl`?rDL2HRqgX(l_ z7D`?UFK`{e^)iNnn+`Eu=&Ya(hK9&0=sw?*EO8#eRLm^ri$Ld0X3ElDT8OzMSDFS1mS@Jf{aY|Vf=L+8c?P|BI)+Mib#~a?vxs?y#q^?})de<=`B)+_L>Om{O z*S_jEp4mm7Oau#=rGc`50VZjt92gj12=XvoyOh?jMqmXWRxg~bu!SGz**t;vjWPvk zt5Q2F)Pi_8oHZLvV=7zkBFDbB#jT5V8#{tbQCjzDOj3>btQYc(^yU?VZA`?4fAx=g^4IMM1iR@&nJ~_)^ z{;ry!N9$SZy2|D4HLsfM^IDAbom_v1$W z-Wlxdpa`KQH8Ce7CgJJJwQjf#9v)vg6)nbEF1D{Pj`56Z{MT3Ja+iZ0@Q^#4+Gb96 zS&Zf`X9q_B(B`nqU(RXo847HN?Quvgr5VC7$PEO5Ft|1KEX%Ak=zHmm&()3e4`z4* z?T+WW&qD90^V^pHqtWy(Q=GDWPyM!&M)z??E?UPhfBn+6m;v_%z>{j#Y zwSXPuRNi>qIZk)B$JOKQwfNls?)H+eU3*MwcCA+4a+trIm5gt^;*)g1e437dT3aWh zg2NzlPyX`eBk>)R4tmclz3I~K>vQraX{m2r^`BbKYw3h_4QhSsUH2gBgNL5=q^9k% z7O&pL{=8f(^8*4KnBbK?_=eFM?s2EM-|udB=u1EEzwYv|-79iq2R`^=r#M6pba=#f zFUSb`q-fo-^V zb0C5r#{^xNeJ8km$wz^B_v zn0iULbxUZ8HuisF@q`W+h4H3P23UJtl~xPrhFqvQQuT#;_z`D>E(j<)P{d?qmVxHS zd%(v3hNq}9a|lF*XABPBy(O8IzNR3Lz zVfFWTr}q{|NQsx|jo+9d?U9MA6nn9iTzbV#={SzWXo|m=itlJNjb>M>u@g-Z5y*&! zF{g&M$cw$$kHAQ5bvTS^NRTOrf^Ol2$4F$#*o==DgV^zj(kPMDNRig4U)UIJkvNG{ z2aX--k(fwdGv$4G*nLHiSE+`Q(xj3rNks_9j>I@|@%VjZXeRwKi}{F$boh_{_3jfqB{zwaZ)t2hsf)MBlOV@;T7*qO`H}}ok|QaY z%?D9Qsg!>x9Z#v4P|281v?pC@GTOM2tG1PuX_=F;TxS?j#mJd|>6Zg(lxx|R!~>UT zCxo;Zn4gK4{3wh*_hQRm8ADql4)s|36B1T zTfDi7@kpD%*O~j+dukb-zKE7ZNrs}SCl*69_PCrwh?-?6kao$M5bF4S_@c`?74yK377+lpw(Ibi_-au zD%hUd`7LBoFEU7>aN~y@otXjsU>m{k1Dy?HWlhqofp9-p?+9{-J zpWUcaHd^mil{mfi^ck=_P47XI;s1*tX$Nk zS_BBe7_CP-tzJm1*6OJ$3aTust!>J!phvEhh7jReG6QI;9TYDWFoyYKrt`|H?3%Aj zDzXHtt0Wt*hnj3`l2sPro%gDN2^yYP+OIM@Gc&8K{rZz6IIxCVut#~YC(E+fnx+l= zu<91E-Dr{kXO0$oXs_C*9V?_IE3e`LvLRcWw>qd-i?vy{GH)>d6i{)fEo-lSHHX;_vQ`VUKO0d(%douZutj^ZM_Xw$}rthx8k~B22&Ngt_<&yvYl%%ge&{8o)|Pz|t$jl;*T| z8;um(t9@(1AiTBjd%-IF!$X>jL!rSEDx|!7lWUp6u$R9i?8L;2mNF8s!#2f08NWLW z#3$OqNc_U&XL`^(!(rTSa^sthv;wfhyt2E(YuvxKi@sSL#P+Mc1i~j8h{R$GsK_m$zqqmx?6G{?7qhuvrqh_fxN^!S;YWb$eN4)yor3io$A60yRZU0z+f!Nwgrly z=Sfd{fUV;?9bUC4%%AMZ%nXYWTCb*j$9$5We4L@G{KtUI%Ht`;!HB}G49bR! zozJt#jf~5=?2wRLz@%fYHavx_6RQ$v%)UEX0@}t4?9BNb&iG7}OZF|K98vBorA`{T z){M<+d(d*+rEk^E0-JC9yv$Ks%I)hhj_k!mJH2&kd6nFoLUTUzOwYDSahfdA{M@;2 zEY16dQ~qLoLlpe8o+?(>JZnPpr@lO~pQa z$g~{PTFuB{-Mr_F&X0W57YmJb)2DkatLkdiJxK% zP#tk;i_?>>(`H@Sm+gTbs$MzE*82<5>p9g&EZ1ON)S`LU^7qU8^w9|*&&K-K`7GF5 zE!&En&$FF?g*|4Y?9~JjcaJO38J61xUDlR8+?c)DJ{=>3T*W1r$8LStqCMKzO4oHQ zz)X3-Ma#}<*4M60+kh>avHjY(-QDB8#-RGXza80<{o!QY;kguhOU1eXg^!tkQ3-$S+R+=#IN2hu(MveqTzx*ENpaFRJUmT)jXZ%8zd8E}iLK zfpYE5>8DBLOWv1zTa!uNB!1$l{?p|d=#}2VGR3^Kt{^Pwx~(hj;~wthjztm< z?8DB|x@hFl?6PC7v$^d9Z@e(y?A>~N^u0O{wf zZspp(>DB7Io!+szZu1h1^Et2cJD>BA9_;EK<-6Uh?+)_r-QhNU@}wT>N}u#3znbc+ z-T@0CC(h^B?(+T~@cz61>E0glWRC7z{`G3E#up#7)Q<}IV@8nM3 z^m0E}D34Vtzw&F{@^;R%o9p##&*}M9^fAxvf3NX|fB0i>_K3f=9naf<-}WkA%}hS` zB~SOlo7SXa3Q71N zFYj>g?3K^-yl?rxk7U7JDqZXO)V}vu5Bg~i>!nYa*n|3y&Em12_|h-?|2_Rj@Ax7y z(~&>sZ$F!t5B%ZZ`*r>m`~J%7b^Iuv?GWwaVDG5T&y4L4`0g+4)F1uzKl;@V6Dx7| z05N9E5*;l*Okg1YLE(dh4ID0P$WUTLgcBiBw2;9=363ac+}Ht9B#?q6fAAQhBnT0f zDUGOv*+gbcDKux^#A&7GPM%PF_WTJ{XHcO;jUFYKRB2J9Ihj5k8WpNjryoPGpi$Cg z6|QOe03sB&j~KFE#e(%pMeWP2ZQX{{8W%2(ta7c|#afr{-MxKZjMU3lM&J`2Nf;b) z&@cwZ8Lv8qtT=LG%99t~!(8v-Mb4HrYLNW7a%j+^O_MIYQT1ciB3y%L9UC==4XH=l zew~~4V%V}*t^I@hX&yEVRTD z@DKtOY!Xcd)r2s@Hs7Q$P91s75X?DMEb>DmtIJHY6aSQPMnDJ6(MCZFr4!K=7lrZA z#BLlmH+Rx`;IHOo&y zT@z0KH|6BjLRbF+R=+!k#W7Ec`pi^MM;l$XS!bVRG+JqQG&D!JB%Rc%WVgi>Q{r^B zHPRtfZSPc8xipvDbJHbtJzv8#>&$p(jTK!dZ_U+HaNpgR-(UaDE73X$EfU!nl|`7~ zK&fSRVTP;4Q($5tp0;At;^Jb`AEm1o;Fq)nymul-F%{<#Le%7_fQk zl@-9U?7er_2X!vl-z8%Xc4DCwE;vM@8+I7!q$z$_(V>}cI_e%X22weWJ;plgkVT%3 z#$HVpH(Vsf&e>&_%~ttrwCnSEtpRDRIjyx)<+<*!f5yA#wx3RVvxE&c_GrPO9UN-^ zz73apu>_&hc)Zdm(mL|3w>}MMurGf(ZMD~Kd2`J<-#NmWX&&9?x&15K?$Gbnd+&f< z_wH~PDYpplz!NT9>ArV&TH?fim(3)`ookNqx*&sGa*%1KTuIeW_Wbi&JD1*xDXs@S z`gPlW8}9Duy?J`*SAU*$^I`Y-QLojn(eG_-$DKOD=hxk0_I;=S_XCF~zUsk>T%Pin zmo;XgPhVoIUiK77J?$~jfmG62>1qbP(;;wtm}^}I&u2jVHEMtifnWSYm^q@2*Cz@g@Ro%qP!~q*ux8! z&vG1uV$eF+A@>yxGh7?t?MRry6HbwS`AG_MT4*)yaE*p%tRVqMvqK)Xv58C!VjSft z#37z5Io~54Sio1HC63FFZe*hqg%w2{4or%#lO6IV7eXvswduqEffkgGo?E3i zzZpJ)jx&}yBquG?dCrNd)1B$s}b!Fsi@hII>BkEmEBp0%fG^(k8`D%7@m(1viOYi3ExS3ueIuFFXZ zZZ;cLXm)n8U~MU34|~vUwidB~oSB)%%2>Feb*mVZXecEswB1(!HdC18EMG}G*VN!A zDvevFZss~%H5Tx+x`pgxQ@dK~zSgX66zey0s@gwJH?q$iWOykH-l4(>Q@{=GXf;~b zl2p>EB2+JPO*`J)N|(FU#czIZD>~YS50AA)-G1#`-`1 zPA^jP;-k;{XpqgsF@%Fj-WcN+#?}=ul3ANB*7diQ|7CHKU+moj%h<~F&E@ZmWxESM zIL9a2%P|9*S6u?R%Q$uxmOEVKHB))SONO&+b%;_YBeBl^Qf~2)+sx**BACdDtlyrv z9ON(ObH`$?XQHupAu|)Yv0nqIQ~bf*D~=U!LZ z*H@mgpHFSk^ES9+Ue;`6K04+fHyYQhesny2$Ll>~+T6nq_NLYIY_;6^$;TEovB@26 zHBTFHd%di+9UWZO5Jk~5p|!Wc&1y(b(bSaIGrJYuY;^lr-rK8NfBE?CP)GT&(8hSZ z?M-9&Mr73kZ?$o-=iWu*3ETYKC$6D&@Qf!sxx#k;HnSg|X`DG+&fHTrfd3nBnKL}P zUog2@!y(D+K3Jz99FH1duRkyk@hc4N3FC6TlJABw_ zUX^a9xZ=q+JBo!U^sT>L+E|Z!=Ce$(qx&i8lrbh#ZiMfX`!(f(XD{Z}>UG1nKHY^^ z_sn4%TNLk{=d$Oz+XsL7jT_2Rb-%X!#1(6i6k4AH9{}J{pL&g}p7N}RJ?<6HDt^U; zwrH6W+8@7D=|3Ng^yYo@dmpuqGvMPR8wt1n57bwIgM1>&XCC6f@BG|*-~Q2$ef}?a zeQ?rV^7y|$-4np@(>%E&J>at{yqh(Xb3eaxz%DB|-l9M7g1)bFy~G1H3`Do*alGxb zv&j3uH=DOolO0pb9lF!ElL|0pYKgszp$9y((_6l)L#G0qzY%1+00h7c)W7bNCI34+ z5DdYX+d&+hK=CucL-89%GKj7#IX(HjTMM_2vJnHjK^?rn3berK>p8JAr4H*r4-CU0 z?7|{6!U2So8XUjbu`CosI!GBgGGa6-gg?@%Lr?jb*jEbDc znao82kV%RBC5-e$Zu~`s{797lTs)8z%B&(u$Wz5tG($#&MxSKKhU}Y%tizj(N}8ld zoV>?Q#K&0N$gSMRj{M2jD@Lv=%IrhMWs{?oWJ8F9?1tjGY6Ovtpvqhd_V)W^(3OwK&a ze>BYB{L8YmxPer=zC_K?)5zW2M|cXR*Nn^Aq|DmPw%gPt-R#NZ{7m4?&eCKlL*%(+ zL`v@DP3*i*>rBFhX-nw;RL{8#zW8EJ=Q2;tOw7FW%lb6Vp(M^)0Z-Dr&+(*B<&4ks z9Hw|e7TGk=*ksQr6uyg`pYjAy|J2T~EG0Vgq5tx`w^&&-uj7wS39+j85rn z&qB*XS)8v(%gqPXOAZA~`;5?_^tB%m&f%<4f)vlf!^<5F(7Nm>=e)*{Q>uJqz7=KB z-t15q{mQd^H4w|sEHO#`?5ZSn&?CJ|BfZS}0a69!JG*1hEFCHjCC?3&Q9*>4&=k$I zQqgOq0&jU z)a@L{c>zHiRYgIC)m8=7{Zm^7*JJ(E%-p-`R8_Y0Rrl1^6K&U3 z71rHD)^qJskxJ7{Ek|e_M9=J0G4)jWORz6h7z8ZQQ#CY$<6;ggn)pd2)c@5dR^jLsZ*03vAv&>j|kg6eT=d#aH~Y*iB`snJrG=Y}uWq)z1p{}*HM*Q#HAKAblt{n(!5$- zrNz)$Rb9LFTg(;RcAQ$n#W&k6T-+7h>5bdam0iYl+0zZB)NNX^)ljG9)0Mqm-qqTX zt=ilFMb+j7UESSXp0r+;#9r;)Ub3YXPf6OcMc(j@+Wnr%3;x^ymfrF$QUP9Gqy1oVWZ*t3;mTDPc=UBdGVUA>C)@E4d=41wD9W*!& zJVm^1V<>gtU5;OL2I6#9XKQZFgB@l`URVM)=6Y^og1+Zn(H}6xWpe&!fDY(sj%bK( zXBIwYF3RR&=HrIm<_*^5YSzO}spu;~WR1;VYW`+#c4$cUJb9kyk~V2TKIx7B#xe~n zi5#ZlTE*y>?qyKKZTUy zr;h4cCYC22XhYJ}wVk&Z_C5wKg0(JZ9@gls zR!V!G>#0`irQRjGR%m*5=fmb`q&DTZ_K0!j+b4zN!Cqv(j%=@1Y?S>F#=dLE*6TXf zX}Ct)tIiwC{_9QV$25jOEWj)lZq}Y(elDwJ2D1&zX!4%z`>yT5f$sHwg9OI|J8Tmihi@(thmn#&1RrZsEokLteImo{V)C@834;0&nO8S8xPh@D%@p6=!kxHlEnI z?#3SL0l)D5{W;b4m2wufj`qG{-RmExaoP6h6f$uJfA1DYauv@5700W%Xdel;?VJ|! zsYdM_;_hG3Wo0CA5#RDVCUKg64isPVBp-7nH}e*M@D~@^7_aXU2XW;-Wy|iP^kZwX z*lZi8ym9-!8i#W(9_!Ff?-MWcK_~MbwBrHH~2$$A;4JmclUP-m-pkma+|XEdJ%8~?)H6uc#Z$_U&;)77>9EZ zd2}H8bSU|gNBMS8`ITq+mUsD=hk2NfhGj4YVX*m|&xM?5$9Z1Z`JN93VDR~$5Bi}e z`kFWTqeuFrSNf%A`lfgKr-%BePX?No`l@gGk(YULzy@ZZ`ljfOG`*gOi4>kNlQ>sO-)KoO-oHqPfkuuPEk@%PfbuzO;J%#QZ6b|PDfHu zOHxx&QgwDxi;YuMQdC!0R#sJ3SXowFTv$*@SX56~Tw+;QQ(0P9S(BDpE-zYKTw7jW zTw7ONKRI1sU|pA*US3*XVPIffRbZQ)Vq#%pWMyM4D`RA0WoT$-Zg6Izq-SMfXfrfu zXk=(;YiOOHX=`h2Y-wz6aBgsLZ*6LDJ2r4}Z*q>2b8>KWbaZugb#`rMcXxJpX;yi8 zcY0w`dV6|&Y*&4Le130Ue}I92Z&`tXe}aRAgMxsBgoTENhKGlUhOp1z$ zii?bljg5|vkB^X%k&%;Va zo}i$hprN6np`oOrb91Ajqokvwq@$#zq@<;#r>A39r>3T-r>Li=s;H-@si>)`sj8}} zs;a81tEsB1s;jH2tgEZ6tgEc7tgWrBt*))EuCKAKu(GeNuCTALu&=SPu&=SOva+$V zva++Zv$C|bwY6thwQh2^x45~uxw^T!y1Ki&y1Tr)yuG}=zP-M`zP`V}!N0-7z;b86 zzrew5T*1J>!oa}7!o$PD!^OkI#&BT9#KyOV$;imc$;!;i%gxKo&&|%z&d<`& z(bLk>)zj40)z#tQ;q~?P{r&v_000000000000000000000000000008{{Zg^yhX5J zEnEi^B3#I@p~Hs|cS)Sct0KjV7Bl+V$g!ixU>`$@6gHA%Ns}mvsa(mjWlEPXVaA+! zl4i}DAZ_a039RQZo-%_1Ek;!6&!fhSA}yL!X)>lzn@V-MG%D4rShEh*s<5j;uU~0_ z6-zc&*|W3Is+GmomD{#y-@al+H`S_BsOsL$iC@Cwqh76=b?evGs>z-mJDcro+HmXc##@}X z-oJyRGe>TC@#DgM1LwxPx%1uEYD=e0eLC#x*t2V|&K-UZT%Idg!Eu zR%+>`k47qKrWYi$i{!FP&OwK?RzgX^7Ghw#0R|7OnM4<5XaU8SEPz1f znh2!pfCp%LD`t~%uK3}+z21puo_YeNFQ0zK^`}vUJ|l3zm{Pi_r?JAC@T~_I?C`@6 zGmNXk6E95h#jU2w>c$)^m@&q(RxGit7K_{*hm_q*ZzsUQ*s>)l21~4r&LZn0w93}3 zbF@sd!37sFK#}dxT^_Nmy4dzS0+;BjTYkYzfH2*BhNi*$*+Fwv4QdQ|1FQ-e0MDP-fQDctKoFlO`d1? zX3g@6^1dvy%`@vvx#c@6i8GTUI2rB}UUq?&#wFoWi+mN-a9nu6O>e6rjss z^~#Rtp6}LPGw$#2p-mLJHWTc=O(bzjc1y6%vizO`ANXJ^-= zK?V|#$TVgjhgnEw_V9O&^H6rp6BXRG;qlQv)s;Q$BMm5;wsLMV^U) zol#Qf=lVz13$zoR2Rv!IVEfXy|Kc^3dYvaL`-)G%`c|-mRculLO5Eb=Qn)}>>TnmU z(B?LEp^sf=WTi{p%HndUn<*n>T(niLQX)uR?Wz(dIZ~i)>q$Zw+GwX_*HFf`z2-#c zZE?F--SXC+JpC+dB8o)oBNmFJgqKKKjUu16<(6 z;<(2K-X4M%Oyuew*ug>O%~hefGVVT(nwxc`vM{{i@=lAiA!aREL!8$362MLHeQ(nO zFkctv7N@(7ac^l{V;kR?|Hnlw@*xFG=Q_hV$wt0jo)s+TBQqG!nW`s~vr1v_QhB^p z7U`BttL4vmfCypAQi(6Qz$cv!T`EpYi_QFEzNXo)_|0!%_uJ+-|9HsKJ?^RR%<6;r zdC#csbAtbjOfm0K5Zh= z2|%D$^**5j&o*x8MC1x?@MfBC1KLiVFbBA%=6^{43lbY{D2fVF`Dfqz4 z9&WL&__?Wj_#*Qe@rplBS>(y$$2UIfi-0$<3C|LR#^6>PsA_OXxn?1_4N+qWO$9B;bse^3A31HbSj6f^~kANO}FfAT1Y zc`TPCmNo;()@A>50+zRE*f(=D=XPOt2qMCBbmw!{)_T2=6RvlDdY66^Pvmdf$AC1~fIY)~5lABJ z27cmafftB@8n}TRXnr0@f*|;QBS?j{S2rt&f`OBQDyW6IS4hJrcOd6~6}Ef;Xj;#g zgZ)#08qh#D7(% z772*uc#avVbg+hqQ@D=m$UI)Ch4MI$CV7(YD2a@PkH&a}`nV#K(2tbEj3%dHNh560 z|JZL132EY#ht{Z&3u%W9xmHu+0!C?+W_5knXCh}tk>Uu82EIFeuJ8^tIXC*qRGn2a*%k3W)%Ke>}~d2F8dlR!CoLV1)#xtD8| zeQ+R=73h@X2$fMeiyL{E9C?CQiIt0~m9ocsV>xSLIhm9hnf8b)Ea?|a`Eg{Je3?jt zUT1mGGLT85Ep$kle92Zo8J7gakV=r6MHzvt=tF`@n6@dEhKZYqxs{V>g~8XD59XD^ z*_D-9lE!J7mpLJssa=@(crwVA@FRK4*FRea9M0`h-`hinx#mPt_hn2|5=?q z0#?w}ioWxdx4E9W*`B#Mm5*tNS^1muxm?p#oW$vf$GM-#DSsho5zN_~&RJnFIWx*f zdEV%g(a4%!^mg7kRz(?}dRdedie-8@o9Y>K?3qvQ$)UT6j`(ME)&-pRiJ$q&pD3E5 z{dt+oxt8m5b!{1=;uTtPh?6$Ti4fV1^;SDWS)obU1Xfge5;~z2TA}i@nM#3(I2U5=@Kka?n9s-G(Qr7PNzC!;T`D26SDEX}EjHF+~NDkLLy zqckFo5!i|1!iIKw0${WOQ!=FDS)^lyGqqWfgW04{8l{D5sP4&|!x(rW|H_zgwxuSD zoM1YsVd|fDvueE zx|cX;EYTFGg1Vu#N~_>1r66i?Alayn+F-pJsgkO$!rHD-XHfw9eVDb6oVu)EB|pxp zWeIAR2Puk0(0LGOs(m`J<2h0&WSsi?SWuICz@zv`~TX|Wi~u3}mq z9ATys#+J(JssAWa`D&p7DXRS{hp0(}0!gsgS+HUCjR>oxvFfeg|N0aSE3Vr1ks@k> zJu65V>!m>(mP6~S=yO>wMkg>@ulAZmjE1k|wV*t6vZPvyEn6+G(*e|4qXl^bKKinx z`36VYlr?L!PI|Kro2U@0t86Q=Rq0ektFc5ow{aV{=q52FA#Iy#pj-!5p$ex|i?U(I zb|Sznh8vy$$y!wNnkB`!j;npnC$s38aD#f0XN$I&tFuezvz%L{e&-T1@qfHy9LsGI*8@T)`u+&IO55T8CT6wuUo;@h1gz&1c+N}zExtW``T0x8T z*|zECxypNjqx-z28@(4BD)Aa&X12PY$x&UWBRVRwuS=bP|LeL{i=!>;Qn#Cz)(W{T z$Gbo1yTF^U!%Mt5JHMLio6UQ_ZtJ`cCA!kfzy905rn?+unpd2Oz0x_I{qw!B3%+kE zzJl9YAAq$c^<@*Fs(krqi5sM;!?^1OkQeqC@N2fkOTQnQyb;^EmMV2}3%&o_!T=nZ zR>zhh<)%eqdSyz$K?Jg_JEPoN22hJL3Ouq=+rR_(!1)7Cqhl@$KqeMklnLlLD)+b> zyt~ahCn0RP3p>JE{IKo#v&{R&_}fP-{K911rLE9ty$iXED+Rynf@+zpS7S{g=b1d5 zt=@aDK^(ph9Hh2ueG=TnJ9sVw1hyPJ#b4)9+gi3*{~U{3Ou|$-u_&CJPB+G7?824& zF_KWHOOC zd&fiU%Ko}+U8cLWte2hq1nUc=XuQjj%Omgm%Zwb%!koE%=fz=sxA**&%FN70dw4Lj zhE>u79X8NCD$Uk>&5z;B2AsW>U~!n&nP|5H(SpqT+I2a&%PSd{G5WQ|9~T3yT+&~%S>?4sw+(gORt)s%?GNl z%o3;S%gU_`&WvURW0zK^8L+hLvay-Uiu*2VOwt>yIi5MQ@EpI5Y`l+b&n|t;3bW74 zJk$Q{NNzaNY~?`M$nzncjoO@ql#1PbjP2N8ozn80wvs*BDO}lj)1_#g+4&g_Hq+Uk zjo6{cZpYHoO;*~%@>%Ummv^1oMvbj?|DDvXZOevTnr1hoi{{-T{ewH|wOIYv4!sE{ zoyEcZ+s7N$V%?}@Syg7O+?I@-r9j7fK;8o0-j;_uG~%K_4V_aaWwj03b-CIB*xh#A z+Gu#Pr~2U8h`xpUXgE;F*?rZzyWJR`It>_Bm=NFc4czqIQ}zwX#BJQegxty9-!xw1 z^vK*l9pIx%;GrnWc#_FlfTRdM+K_jzd96e&mCF%H!-zfM-(Atzn8BY{-WtA>9B$Q0 zl6g1%wa;qN4UFQCz2f!V;x0bc`EA+z&EGbzf}^n2-!xA`@2%`?3aLT4|n*roy_Qh9qEw%#93{ypJnMPp6O#w=J$=vo?g~zuIk*L>fT=J+zwUM z9a6Dg?(cHYGXf_4cI$Y~w2#zP+2X)HJhFuTou#AQu{-QmE1``$%g&zO%|3Px%G)z3 z?bLqo*52vGz1(OH>g)>c5Z~?J9`Rt>D#;L&ME>SGZthgl#sR)>=2QV$V1#qc&?yqc zrcLh@E$Gm_xG~p*>`mU||C{Jk9@tJil>KhbSDt-aUYdFty9ICXkd5inzUk6)?F#Sk z=_=zC-}FvD@lYT2Qcsx-A_}Ek2}ZKyF;p7J?E@J5c?xedyA zD)4H5+QdGDcGyxH{kYcJ;61P2ba1d#lL?vI(-f*AjP)*1={n5V8`g+gW zCf)Z45BOl6@PkkI0nGFg-}{XJ`}HRwZ7y|0vQPjn3=c@&ta6)M>?*QMtBX`{`f5oSyWCzwL+b`@lc{iT^#y z!0sUu`NnVVc?u8;1P(+{pdc4apFnxi)M+6@P7xa>q)16p#Y&beHu5uvP@+c{Lvkc} zXwgZNcshbC6zJniHZkFp(ISP2kee_{sKA-C<{OWBWO8|81jUG*7bH+1p@inoG*Qhw zg<6zo7p!IOZMDZ_iY8H*;ZIoUDP%bKlb?acKv z*s-I{rdHeb|83mXbZ6szEjPDu;DiS+SKPQc^K{YINmp0h`gH8S zo{+Lh!aZ`-Ysb9OVhRMG`s$I!tFGdztEWO(N+}@mT1zdooQm=&u!4$;E(k2h= zJw(-G{|`Jy6-;zdMH6&XQu->j^wI+})zm;vJ4NtQP``_&k%=612to-fRCPuREpftu z2Rt0euTV;~6~&D-66p~>a%6GW6;X0&5EzN&E3dsgs?bUvnWbY6A!mxIs124h!mJCe z($+|>q^!w;AH&`DNpkC=K?W+Vh-+JHx5V+MkiZOc%rZx_*G)Lp+)Q75-^`caI0K%u zPJ;dPGnGDc!Dbt68XhiChzFIJ&_@rg06##$zqnCIF^+WONjt{$V~|7MG*ps3HM!(+ zN{QFh2P2$oRaRq;A_~8DSh8B2IrM-K5>LG~R}+EW$Rl8jZc*5mT$E#JRh@;lSuUN1 zTH0xE;QyckYug=n!y~=5`Yk1~zLx4K&6Seebkzmw%B;wCJ6c*_$)#R;@9h_0y!ZXr z-@Er7xM08ywvCW2Bq>-N;2w^6VnHpw`0Xiq>7=tAc}PR_3jn?!NQ(-tX`24xi4z%g@c1zhVl7PQ}|_ym5*hhkSneEx#P| z`!(0U^Zoh%{617f!gZAx#M)q%r3`%{4P&X+RXE@k6p+eiJ$s$5a+S1S&5m{$d>T!3 zC;zqG^$vt>`(3KoqPi(T09IVWVt z>mJ|6cSIy6(R_lNjVpL%Kuw&cHx5gj``~xQ_%W`2lcO9L?Uz6N_0Mzu3u76@_&)-L z?nI+Apy`GOK`DS}fz3k~3kos73Ic6d8RR3`9|!r(5oS_(hy;TJq30nDVC|7w@)nUI*+w0f^DdW+CkS&VO-~9jl(gU{DM$HF zy`fT|;QJdi47y4>!NGawYziSJqo*p)l9pPurTlbx(OqKHqF@}OFoXHg&Z!ZPIYOor z;J8c(HnR+UFzK<@hCOQH@q#MKrXO2UJ8ph(Sm5-XA;oFP(z%66OEE~ScCya1WCeL= zdnXL1XEx;VP>??SWL5PkN`MxTpj#cNYP?!dRTk_On4lJr7?{KCL3D}{MchOwip7d% z^ozO7IRTHHq0^)F6LqqJuxX0o#O5y@`oshw<3eQH@midK<~?9gYOl1cgI7lo!}mpy|*&8uP+wz3Uqfm3r} z1JhQ)31)CAaa+DeoNK;k4a7rZxmGF$m$<}fD>*zo+>OdrqanW7a_^d46!%hP)nsub z_+nC@XjjH3c%~=3D}-jIU=4rR0w2bk$7TxfypHwedMR7T%EC9O-~VFD3XS^YXWpP| zb==U8>RC3?60gcPs2-NIaGC)Ya4ETpU^FjS%?@sGo7o)J+)fy0TX4lHGpyka z<2ldA<#S$7{O1${^F5<#F^h@iq!`Z_hjXxT7Qmb1m2!c*BT_8w8Y|f%PxYQiPO_5S zrL8TaGo8x($&_*ENSYi0f)%Ku4QoUaVE2OLfzdCRzvE?g4k-K(iU;h%A8oabI<{$04gX zy_z;Mr`-+Irhr>n^4)iwJTUwz1)t)fKm-CSSd-pnt>? zu1$Ds84mG=^E%>VfAGX@QpZIjMB@v?aN9rLak$4_?r)`g$VU!slP`Cd)gGyfd5l09 z9JJ>QfobIF{WhpOcb zNo204zUyB9I-JAadW5zjnrzqskutvax96T=x_7(BLI3>wx;tCYc_)76sJ(KQBMtC^ zM?77;ZE>?Uk`0ST@1-HnSZo4a=&?5a+aM`psskLeOjk7X!Y%dfhBm5_4vO~Uwt9sx zT z)}uezv%mYpKfTzi=_4jdyS;+~K(;$VBqYEjRKndWz+`(ukh{C#TR;Y!D~%BbzA7}x zlfV>ov_=!QMuHV92%4RJ@~`H9^66v<3X^?zfDr0t)qk?v@IjtI3~O>NDM#&tVAg!xhmwS$l*I{^C8la zL%`dFJiq{1>8zD;9iDT%Oxvb43_;3kqgSMnIFu(m+(Q^t7yH`7&m))8QYu~KL&_7R z2_iFM3`9d5#6mp8V-%|&?5;)ZvHt@*Nu0(>TteWoMr>?CZL~y@6TT^=LcJ3{V#vbR zS&;+k!W+>9I3TK`YDWnW12Nk`4phZ7{6JS6!ONj#^2J;9N3Mvzo_rdlOFMxaH{F>) zV3eU5yrWKPg&J%~hkQt7jL3Gsrgvk86N=GEb0{g*5V5W%ZO8I-lj~vUe?8uOWLbE(e$jPhXqP}2x%jPRA z2Pp&2GKe*>2%4N_-y?rc9tqIB%47HxjFFG>6aOyr0ib|=hOzONr zW3)=l#Li*p%$53!b5SX;6g$%#Px3rX)C5V@Tus)r!jc4|?pVu}6tdWq&E|Up3*rJN zD+oL&L+j&9d;CkL!N;Ow!QwQ|<2=ZsNKUBZGVjwiE`zF{d^ejSkyD_|>by?Q#L6VP zOo4fYX`wyh)S>X)O#CZP6Ft%LRZm^E8LH+sa$ z-`u_tEGx(~(1A0|aZ%8xYsyM8NUPZz#{^E#T!}xl&s;JlK- z+#w=ur#VH?CqPm#&Bq#Y(pBq=)Z(PUs8A}cQp+ST?IckR<ly}HD&}0!DCv?r#5yC> zO-hZ{>6BJW&DLwp)cObN@_wYa$_jm4*wPKljZip^4N-Kq!!!=rmx ziv?M#t=dr)+3`GAkxWyrmBK=y1=-2VwtQEd+F7PV9hNgX4A4{Z>Q6t_SK)k4%sbAU z)!AX~**Z+t@Iy+XNV?Axwk}K9$egT`K-!2^TEz9%id|ey1qQ|Ko>2f%tg#l3RhY*O zS**oe&E4FqP0Ai8;j6u&i}pzQ*?pUWFb%{LLEk?0Iqaj^nib69jkmhh-GM_;fz&%tCPZr)ObYwo6 zlex5=75~|-NBZ5@$^*O^cBga9Ut}slMU|nMlcb;J;Rl7zl*kK)5Y5At1wuArDh8iJ zW@3B^gc1yc5w+YRc3eqj+A7ZAEZyKO?qW^;Fq3`N^Tkzjj8`+J0#dfmYSMzu;?ovh z;Wl<-IUd}XnGk!!01GTiIv&uAW0E_LK{#YiJ$9u(7$mi|$l9CW#Es-cPG$-QAF=|y z1pb9d-p> zeqr|tJXrQOCYu0!*4R;NsUBI(zk=ET2!LJwTi*>W@H1OWB4DwZwCGBhXU5PYR_0~? z7yn>83c-+QX252RzUFN9SZ%Jv4&LO68ix2}VKJ7`b4F*CUg`bBh~Nzab9QG#m}z5? zX?<;F03FiEOr(5E_S%y^=oZlTK+-z7ThX0)1|Sm?mev z-fOA!T@z|6-ctGAw>WIEfu7>2#hUUAO%Mu7xt>$PfHtVv^UNlwf(iG{oh7OX}LV8T+Pb_14)}t6G2>%U$ z>Adc1;qD>7cDKXcX&D|)QuJx0z-K1)#pPyh28flTX6%99W2mq!ogP}t7PogS-Vdl2 znDXq=u1Z5b@A9q%Ms((}Zg19}=Cgk9GfnHYW^37Q9B}?*ah?#6SOeWQ;}I4Mfw+<2 z9_|7!?jx(kINs@-aPH=|fuDxj#e}{HXKV|f?s|J{CD`Mp16oQd;Lw6E-@P$fcIeKA z*g+O+MP@zp4sFtYaTq6}Q8cJZ#h@%WDM)oyLqo^Sj9@h-O4HSTW{#y2sx(SZQ& z1Ltd&cGjED$%MWAPHw^4} zd+wJ;RK>>d#@_OF#NjA*)c^g-OFFBlGg8bBfxsY@uK>M67_$>c@iljE9Gvqv7oSpi zp;xE#Xs-1fH)|i)bJ&(BU3Z`SHtGFV36q%XE)13w$kRhF@I;qqGQK+Kq#g#}Zl>I< z#is6RukLGC08AI2e)U_P{XQ=r=BSWq6N-xwC!EF{VpU&viCuBoa`i$+W_ee*IDe&C zXZ2f`W**;nJSWxr_V?rf26vU9bOVY&&*ND&DPwotWLNfPjZS$so&U*pbc&DVfUT~? z?rCh_@@=Ousg7y@2J>Jf;MN5L@QUEVd)U@W^;KVBH-GtfpLgrEUK)pYo3C>n$9H|d z^L_{TU1!2;BY3#xO7#JZG}8`=Ev+7B#gzwDE4#{CxuB>^OjF?0vc=fq`%edF1zVPS<9 zr4T{Xno_lIfA?p5+;9EIoqfw>cl(!pE^UN#!T;O`h+P72>B2Seps$3yfFT_A>tVx) z1|?D~W|0`ij0!a}>{yYbvXCN2HhX3=8cJ#^SF&sgEv3t9GOf|XmQ5SZoHw`e?8(h1 zP@O@CmMcfB(xgR7DKUK%NfK12jR>Xbljv#_3^e9o-Qx9YmatZ&L?LUIi4!PW4n37x zVnKliYw*yuWA`rJHhS~2*~8~A*T8`Q`>|>`4;(pZ#{X(5qquI>#E9SYr7YQT<;<8P zo9W!xv04-)JV+RN--jS= zB+Q5&c}U`k$*8!Zh%dnu(~B{}I8#kK5p^RtKjAnIk45!Z)KOElm6TLOPDR&Meq==g zS5t`v){|$Il@?lS4XLC8r#;5kTwMM^*B6j&IsX`$g&j6WVqjtC7iWRBxu#~DaTZ*0 z$8k_vYy;>i8*6;}Nt7- z?qvwQVTkc1o0658Z==P**(kvO#+h84xT=6#TzM)yTb8Z$`6t7D+IHJ$iW>KvbBP)( zoyME-=2@C`VjLZsU04SOVw+_83Co@OjX) zn)I;7HS1kV9OS?r5U4l@D(BB3m9v1F!bsjfxWtOZ?6>LBnxT zfidun!K?$ogf)ze0xY9-G_U~-_>TYuOk-(IhPVZSk%4uzW9RHPK?{DSNT6t)1_4>Z z-igp=FC11yX2-ihZjg}%O2M5{Xh;}>uxLy}I69e%Bc+56$w zh-f4uDuoS^Oj3~$(?s*djsIOxWSkzYsLPOH27X$!-(_%2@AQx`*gCMG+U%2&$%-c z>TI3v#H1%TnY>HpQ=gMOWGJ1N!|744lm<6CJC4L+GYF9 zbrzfEFZFG#lznXhtxlcLblLu&K=(Y|K)nliy!Z7pW$xjCFyG zCp^)qPJ0$lkwBFv&>*P-Le$fr7kTPE`zclCH4mSf^q^I%8qiM)bfBc<;XzHwjD)gM zeJtq9QU9FV1~xZ?jTlUq1Oe6{cD#yh>`W0WsV**#V-BROWyz&4?O-qzob4=rKMPt+ z#EgYT&F@iV5J{}Ew!p4!?Q5@!D%#q%tGCT9gvFX8VGz+T?kWLr@e0o2O4Ny#@rpCg zAlHkoU^OOSZvVVMCd^zuH;Y@eZeMo80U?A@Hr(wlj{m0uDtT8cbX?E4kw`2t+9N>5j|*;(87zE-w1 zX=T@kS9|%(QEjSF(|H~<`x>@vj`OkGoNQ$;8_qW)3AAg-)j4O4pb?g^P;S9J3uCDa zY4xR5{Qum<@S+8#5ORK+r&DL8z0d=^U#Rj`t?CYR>DN zBA*%u*_1IHdn4RcJGos_mNl&-j^&9{yy9f$vR59_>tYA%zaCeQCy4EAts=YRCXcOX zQ_k^dPn+cpruJ8_trD|w8-20Fi;_ks?kLezf6VM3!eSGK5u^AGiy_owiw)c zUBZb~YK1R-An2P$Bi6sG#!ff$-or$?1S*$5RHp^h;T-bC3cg;VM{1gd(Cot@E^%Df zy6$$j_^knS#7u2@ zO+VP4Rf&zwj#|6*FrReL&$fvxJ2W*Q-Zr!h>rU6g7#m(j5v z_^cih)Rjjm2kmK&yU`euw9=$FQ>N)&!z>fQolj_VooGN)!kNzf;L5`RyRe4McA&6`1j0`k~(to*(0JAN#f6`w<}%KL42I z$;2w1jdg7gRA>~4!QL5&+Yt}}jlo{k9bgdP+ZX1E6F}Dkz(Aw1-s$NDqs7}dE!1?` zTmGOGq?v(t{gu9T7w@?g{_xx$lE8T_nu-md78qaJ9pWLD;0B13Q^;Tp&K(U#q7ClO zmE>R&3SlKF;X;gpfR(`bvC|PAp5iTG6N+LflA=@rntHQ*Q`*K#G@7f``<;M>r>4`3|azBFUVAR`vRVZ7j82JYUEfm#xn z#d}F$)aihwxmzF>U!u&|yD4HPe$Gru*5*)$9B~^AI^rWrA|&SBrkD+tUH@Va79mk} z;wOS4CK?+nw&I!zq!9+>$m!!NlA;sF;w%=1Q!rEyC{dIl1zT*GTgBKp2AW19qc4h{ z2>@THArKpWWNQS$FjB#xWuff7l{7vh3MAv(IUQh7V>Ox)Ic|(NQV`i?Bw0ig285$H za$q|y98aELs*zx&RRHrLh4#&(Q$8hBdR9~5no+IdKaR)=iQhqnB0&!1DSqWB0_0T| zBv>{fjl|*(HKY{69#d)p_QfOAN9zBT15gM$e|qYms`GNBDm6T zX@)WVo4@g8NN(K+l2I1WrAiK_mn6x+u;gJ#;N;MrhKUhm-X+(OT}(Mpr&teIwiTL0f^{wIKrWw7w$ne8TR zJ}Bkskb-h$m2N45b}59qgeLp}kHMIQip!&<-koq4(2ZVdI42r8SBahBb+)L687643 zr%leD<}oI7Pyik-jC4w7HtHgifmCIB7zD);kD8r~?$4R}p;C3`FA2w>VAkHnX-2(WqcNRt_^F>Hk#@Qvo5pC>&8V~1sG|%Fq1vblwv~2KfY=qBW~L*0R_l7^ zk))bQ3r5DJ!lRN-Vv{;)r>df;f~r;qA(XbOymCmEn*VCOqN*#B>TRa#sm21;UDqHO z=O!fU7WUt*I%ckB9eCBHYb54KHY|23*D}6dj>2lM>X?ZZW;N}qGCiyQ;M93atBoFN z&lRk}F#*D4Yh{gNk19%Z@L!O2PH0NXxL(Q&V(PhOD!Qucy5ggOEzWEDW4yxVg59gp z_N%=PEx+Pxsos`I%*w1Tl-tD*!@W#XuKf#@|_fC$RyoPaAgmaL#M9NzxT@}UjcJfD94E#L~S^X6L5 z7Vea$NF#((c}VW4W-mZhF83ZMSZeR&QZDyiDVEAY=2m2OT`a$SZitek0(34j4M8zd zY%Y%M*1B&Vj#nopk#b(z2KuClwHWQzE*lhS+q$j(UL#P-lU>Xy)SYgsRds!2B&fJZm{4+FS{D9goIoR!ZG+3C<>==NLYaZX@aWC za36cA)7FB@bkA>TroZJcc^xqi51{N4#&CfsrU5GZwi^%&uSlMbld%|zqAg`y^0acK zNOB<(OOqBWnQ=5QWbsm?Etz|XK$dhdtohLyqp|X;@fx!+2NSLb7aJ!4?Hkvz^y(`O z=R#8Qz^90B_|7mO4|57XpVW>iVD7IH>tP~K-5%(1NycuN*smymt?I6CC0FYdU!Vsx zK@@AU6jyNzzSNq2VJRojw?>lz5C2*@ZnEMuEG!?!IJM<0)3QG^FD~cu#kuhZ8^j(o zlN}T^FbA_8E3?$R0z{6mA2YK)OV&I#V(L4-k*eGt0i47^|ZL8u7VBQ}UH7Klk%-{2oqKGe-aM(Q*PIH*qvC4k9DnS5Ue)~HO% zWdKtbOh@r7sq`^5&O1X;qOv32^7IN~$6+B~9?0@gmnl&j^(`m0VlQN9J$reK=KKB@IkG2{GCzeREV?9iJ|8(k7vuoegThm)#&UJ0i z?kekO4=-$%>}r6Zjo4!54Laj@e#`WuiZ*E{ zjF8u)n!@*co~$8yMgnu*lOMM#9rjTNu7p#0%RHXPX;&FgxII-lb%z!Wg~DhQ9+_`8 znt%DJ+ZI=wl@8nGo4YxSLk>B{WOAZ6ovZdM<8x~-@m=X6i|%=yjzr{}Z==`q0h+6Lbfvn|Z0P z`!ck<<1P!Ak^gO*19^F)cGTTojGuROZnIxPr(jMjx|9{L4^FQSdymuSGMTrnD?6;i z#inK2kZVbS>vRAvCAGg}BJg7DNqJ&#d$^A~sbGSjRLaL=`Yu-)Y1n%@i-u2;OEJ&6^abaU$2!2h`$3jLqYGgzyy!*3wNH$2lP{aQ177UY$V zJGHyXo0DfQ{nlH+YJ10fJoHvLm#3V_XZgv~F}a()(k_A-{71{Pe7d*2R^c$p9&G!f zSF9hai;Ii1|L+iQiLeK|S4-!fQ)(jmIkDfUcOt!PEWKL8`AN+@u1TAJ<*Om28h8Rq5av1{cfuK+r#jgufBD+zU#+6Dv}2$v`yroe!}A=7ecGv zY9qN;d&XZf=a0YFe?7>1pXi@KP@O)?qy7o2{p*v8>=z3sQ1$#D1OCT7>)8I!14M-h z5EL+QP>{k25*G|6eDDw=!H5Vb7C^{?P@s%jC{&z?u%QVgA1ciF5@ZAqC|1n0e1m5T zlQME%`q^`n56ze&bx4Wg1Ba418f^~6sf8nm5DSeoHBuAm%&2LoW|68BYm=BQSp%Zzfd=HY(YvW}k{}`!;Uc zyLtEa-O5#P;IW9)Dt=s+@Ku(UVmQNkICAN=j8~s!{rYt6t5`+%{{0qqUAV~aGG888 z`eDS_Cv$HMe*5?G>BG+`zP_*Zz&~-<|G$_31spIx0s}ndkmC$ou)z5kZ1BMd9V~4# zzfjV^u7ToGNTQC&`Uu3ZM!@5!4>j~iL!Z{LL4(V}K;k47*;wPHt^Od&j-#p=BL5`} zdQ^x+9F2lQiKaX_vak|L!U~QiyPDA@tEvhzE3W*a(JK_f`VcL&)?#bJ4EG9C%)G|D zYX_uKKq1Y+x}b?KE*4RY&N}T}Y>6TO>p_Y>BKy-d(njl#&_V}Qjl$P*({0h;8fDbc zNL}lW((RH{E|p4}+d`B`F9k0dEb|;U zCn0efl13JxZIQ*GXd3FuqKZ@s$rKKH3f(H9+;>WSt?XCLEd}m!tS>txv;Rzlr(Kv~ z7rL1DFTtpaGm$$jrW0I~R0>3sjsXo+&_oeE)XC|7$}qZ_SRqY%v7Cu8XA#YLE@2>S*uosPCz-&=gq1Vh z=~TD6*7bxXvXkNLYKOZT+U$2JQm(S#UudMi;eR8T%Md1G4~zG0VHsdll(YA3R*A=H~-*(nK)R{9qqEre6B zU=XOuA@)#5JF@FX@haeC`bf_ge)Dr{yrWO##+SB^MF?dDXh6|L);AG!2N;keRu#(7 zdj`^>1D%Rl834Kal+u)p8JFD*V@8g$(xa|~W$|cf(vnKbvN{`{N|865PNd-}sIg_G zY%)`t+O(#qJ*rWAp(|yM>WiFdZBc<5wN5CAwbrEVO_K#Su7-_pD5NS?GsrOeaS5*J zG-%*>k}esYFeZ;gU?vZCw+b%cEe-^n0^<5lNA|0(VilJWG$_ui_SLU{^&I1vuqoGN z5CH+S$sv}C*wxV!NQJQIV>?Q_$-4m%2tyZXYA-w{fooa^jE-@9q^pNv)KjL`3(g|qJ&pUOh2b~!$ZrFiLRv404`8R9+ZAL6v#X^j(fy$#>pJuN=fsk?pot6=`(H(gobA zFDk|X0dl{sQ0Jc&Zqv3V0NqTZ8R_Q+xmNdPc%{qDx) zn(r(3n#bxD`Cs5f?5`AneW#O`urD}Jz}yGLXHN6XpIxVDOWV!V&NiJ*YdkwA4|pD? z^NqmWZlkL^wS$hPCtlj3`!>Vc@Tu|n8sMI@vQBQ$S~)G z-V9g!uzzwPd;dF<36Ty>7=3VHwyB6+GJ76v^VsIL2^$n91G5 zriwz8%ruvL%_A#WoabEUna5d5cW$Zf)k z>|+lc)QMN&<287+)P?VtnCs zCGY3LO%(7z&rD17P|M9~571Vxd0yhr$|!jP?NXqQ_KvOs6>u@;P4`rS)%fSDvLGYa z>EA4=0ZK5rlu4O%%mg1G2mgL-1TM|Ho)5W``{`?Qbah7I?= zuab@PeC1M~+L`ezS!sVE(@mQ=$5<}T!E)rVkvAA%nZcYrxkmpY7 zdK4@IN5v0x?!<%+0arr-7w`g=h(;{1!&+~eP%o)sh643)>Us{`5@Q2J@ZLO-34KiW zgpVT7zyur75gjoTBA^5TfD$Xw5(R(~4@xY&KodK$6AfViE>RRQkrYSqlPG5If-p)} zkqATXEzAP&l2E;z5YwJ3VB)JU*vkh~0`V*Z{#a-G4ucCP&#~%nBj|(-52YX;(G$g@ z0UTj0Mo}84ks7Pf8viM=$IK3};ED=hs2lwU2msLzFN~!;r47str~Kn|4D1ysOgy@f z9mmlfOC>V)Aj!0=5Wfnr9E=WD;s(uY>?E-iGf^N(Q4&p|Af@pc1+p0(u@eCx0$>3X zU$ArdWd3Ty!o5Rwu%5)c@10A9q^P%#yO5XvgZUf?RrU=ayr(Io+iq9Q5_LTTcX zPpuw~2;46XWN=@Gu?v3^D9KDCE(QeCr}9FB8L5#GuW=MtfGM4F8Ux@dr=TMNp#etm zBO{;^$-pWFpaG`91mhqp3!pZb(JV`_D@%|eA8{=iavHt?F5@yTKXL_H(EAK1M!wN5 z880ta>@DRoF8@sfm^v>Fg{d^)tvhb-g2-+g_3;IvB}=F;j?f}4xl$mfASkZdZ zvhP%~ImAyb`YyeckaP4Y17JiZ4I-a%6I@b626~ab;*VcoPUU{f`I^rb}G7~RT{u@KP#0&0bv@z5>U|(%EBdbzoIdv@1(?#u7BNgE*1N1@Bz!pfA zK(iKBHE|;;^q?RTAz^_NR{$2!b}SS@NkKGI3l>ymb3zHWU<-6?RrP7rzzy_35C0y) z9+5Q@STST5j_m^D>Zk-U=<5rSaXAL{)oSz=8?{`0^=JQ8Yo&BagLFb!GY-PxT}$;} zBQkU+Qef?MBD2(NsT3ibwqLyxOVg89yOdw4@-eY1yIk!QLFo!@B*_-mN|dB5Dvi?; zswLahVmHZQ<&+CttO&Wz3X!b)o(ozv{TLZ zQnQj%)t6tl)O`(-Uve82W- z17Ly6!e*gVD$N&jru04~F-wuQLpxF$y0(3zmH=w?Y)w!i1)*>2;Px_MTL0x~fR@)y z=q(tDZ|(e)Z=SH=Y_#tV@>HW0R^1kE5%Ozm%!lihV6){(VfRwqQa0%|Ee*0~Ex0mS z(}`&pil^9ACpC4A*nnrOM#)AA?G0rELo)Zuc-QnL%Wo;}YVk7G_+Bh`;S~M~DNli{ z>?BuAv)5!{ZDZmOd}TN%$oD@bG;d!sHluZHpLA5WloaJRbBVNrYZHp?7k!VGT|Lz^ z_4hMJRdhX9fYp*!TY(VTz<>t;J}t5}_q8k}a)B+=fg7|y@0S$I0yA~kYa#N2I~RSS z7CV9f+Z9+tx|oh z8J2@M5@ESO8)IE{*lN+YYc~>hD>7{rA!dOVG{xeAE3!(Hw11)Th#mBQZC8gi*nABZ zt@c7=5Q$CQYP#I(;)+)h2_P2Bc<>t5dUMfvH)w7q>46wa1iB1jR*feHnvUN`qPf?j zCwfd3_o-sIk7rX@J2#5ccYYHxD|y+7jg(Uh7C^7EQ?=4kQMIP$cT+?2RRfei(ZFnD zxpq;vo>MhJHQ^1YfPNntflc{`uQXQURes5}EHtt+m%5g*cyzT|Xd4u3dHI)r*@W!? zSpflt`53M9W$lz%q?cK8@@8GwGBY!gE3*<{v-*)MF zPH5{mFbnlqJNNV`3^^07R6t>OBWrfAiFA%#_8g_xQj2=B(Fxtah>A*pdu3Q~=!8G((fU z<+-`-my)?#67PEv06Vc2b;CoEAUzz!#bPxRyD7)yWdCC_s_ayt|2c62XeE*lIA3gx zDK@i_x1y05y4LS`xkY=KEUh-W>pDXEa5s?Ji=v}2q}5v4bRvNlwXg}>ib;HKrMxK_ zoXVAQTCA8EPx&m(VlBnOAlWt}-If!L`@xeG7NmQ`Y4(59d@U7$ZNEFte}K-z+5+xi zT4OFy3Y8O@ufGZ5qQLOA3p@-Dd?&)fe$%sPDOYx*9DWnEfv;c?5)vYv{IH)~6dO|0 zz4F7Ue6cAyUh+!C)#hV~w*)wQcwr2Y)Re|;Jj)ikq8WPz!cPaKU|m2ObgX-j-0s%d zpa+JWy~r*pT%0;1NJ>(yked9;33AyNz1g?20RNu7)5{ig!XVK#oy#k`vM(_jn~|=q zk0A$7;B#$(j11u?KdP-s`rXfZS#B&_#MkOX(jplB+Oha*Rrk3U*>ml> zHbb(KF?<^OAORBI02KZJrok$m@c{o1)F-=R+h?Lo931K0$UBJ0 zr)tzQx+((t>oEJ|U47R(+wf?;$Y&lONqc+2^~Z~R-&72`s)DS%S79%_EG*U1FTLmu z+pu4C*=dGH&)~&l>^V2u7CTUI))FE+jpVgmDnH!cUKyyLUF*pmzun*p(%lQr`U~<5 zdmybZIgknzPH@h6($Zf3YPi>ZP%S;ug8xYs8v%Qgr+CUgm3@hx>&e{_zTpWJUhxg! z8y^0k5`N-we#cv0>qc#knJd_MKrXs{@_Q@^77K-PO(kXAU~ayfYF?vVy$WMp3~18S z^KpK_>yL0A*ezcsj6E<2yjnqRfZ(RQ2e$@(01Vc4;S=C3BVO?pX_#B^8%&}26@P8P zfTE`13>IJE4FU0Q%n`Z}58|R+?490&Q9FO@F7ak`2EE|E0EKM<(tcSL-XH$sU;gKx z{@)+GKPfPRpPtTpvjIYj5`jw4803O*pu&Y+z%+aav7wqe6enUqa}LmoFN!*T@v^ba zM=vA|nlu>l&l@BfFmxCKC5)4iNdMm0EF+~ylbBmRF!=c(gN`UrK(uUNi%bKhOOZ|y z0LWB>2oV55jRW9n)~r&YN;MTV>DH|S$_6NV#nHDX3EH}CJI$)u0C2-inb5KWUIYaC z@>Kv7f`h?;{T4p_S8v`CK0_Q0D%f#k#(OD1E|8eR-nnx#d-%K5hn;WGF-Vrp7v?CF;?J=Er;t?fN7*oWN2~~pr z35ZpN3XqkdSZTFYCWV^vm0Z&Zpukq+85KfWYjFsdTxKC?U<1~zMwkSqMs`_hi5V8! z2PWj!>ZP&%wJK(+E^rumy!ECTb;qezt6;3T23?EF?XYXIJ?d%Psj@bE92|S(L2Yx- zMHiF}qi(ywr98s69{+n{fEV6*EKt^Ka_MzPUu`Gi$F6-Z=9kcZ_Wfr{A2qyig@FqC zOCcDYeDk0p0CH+!0(NM~slwlUsG^Q|Scl>w{DJ5$ixuBz?7GLMU?;qA6a?#!0AiwK z$|@&mU?pTeS!FneNI7IhsIbAGqT5D&9b@l;#)P}nR*38aIE2)2~(4WPJl{Z$Z;{ z&0E{HZQHi(_SUxT_SUv-_tv(1Yuo(4eV#XY^Ce$0IhpL4-OQdjCuesv^ZVt~A{Fii zWF3{})!MhLWC9feayLez?A^}p8M&{xyf8XUOM9*>uTSi%cf93fX`H%mM=(hIUZWm^ zkkr1t2}3nQj19@%L^+AVp(Wah%J9R8Ox!zWgq~t1q+axi|xh5vG zuWe0dM3vv*(ga^{n}6#il3Qx8FdjD}`J(b1=9~4gCnz-ess3=N>1^?P!ls68G*^eu5 zw9uv(H1O>7?fI~YWZ9qyxKDi9$>87x)2j0@N(lX8ks|8g#( ztmPCc25SIfKE2nOX!m zReIPm08ko>js~rLm*Qn|NbHm(^U|_NA==OY71A*T)9$k(1I^LGY$#=r3g=U(VC$v* z?eNrCupNI^;{af}#w5EvjnsRy`b#A!!~Dmcp|sXQWK#$PS-B(1bT23k?xhe(y&>AX zfUK;43l`R|1$s9VQ;-l3X2ZPX%&8fIe5@#zU_Pbu9)&Gg_;#79mY3PIm`m<_OupnH zi9$$hX;b7dP1(!U7ECf#*q^i(-pjdyv6Ah|6>denRV5g}9KpZIoXFA!3Z;jNa13+o zqY@KwrH5XtVwjbjhlwjg?>rBAQTq51#D)LI7B z0vxNZEr#Grt0*Vo&S72kTHUDLib@aW&}(d=tJ^E#%Q_^M}$I~D9A3(XIE8R z6J3z90v}*bzWyb#4vBTxion%XZ-?NECia>#QF~g$=vxx8YLgxY4XDhoSiT~3xX|7a z4uBr`(1pNm+ptd*j3V&Oc1X9`{0C#-4vuN%vZ_6L*qL9XXl@Th38}X~(5QLbNgv%* zrI#yAq_4eJcPZ<&9p2mW_d>X;AW>uIn>Q6y=_=j3LR(D*RaO|P=%$p@b@#InfCyU8 zcLc2w^N&B}h>C}vEg|qz3oDm<3`;Ih&$C0rA0+mVWaBz;^rZ%bpXLgZXhYOq_M*W_ z|0-2$6QrxC2~reoqfV17gy*~Mu9yy`SJ-otqwitca43rm1hb+hoc(W>(-KUZAV|Rt z%!d%2t-ALWi{h9$SA>}<3qS385qBI8feRK#^WE$m^%LYOg|4b#XZMDO6K|R4xo|N2 zp>()^9(3GE;9kw%RL$Mh>Ya4%r=+1%jf42npN%9OGV1n!&Qt2-nx0v~Y5z3N%J%P% znkx-YLH;`@kjFIt`UtEB^(djcav=WGpAib+c+Bx{UnLd4&WVM~ib&V72B?g(4Aj-K zN$LDTuc!Q*W-OPeikV~1eK5S{zcba-YgnZ>WZX9T%bjSli$S}8u|5*d0j#;f(fNk% z)M@jmH@dm~skJRLby=6=;ZVNL6n-T)x1q8PDA7qBL>MR1vy9@&P3@ZFgg{=OU+Pmx z`c>#}T>@bp#Hpje2? zOjK}0b+6R#f6mRLm#AXYhO;x@55XwPJs+`kRS+ zjI^jswCF;#E4xYr)}sDK0@fn%a3`IoutW`tcQsBhgQ6-AqOxSQXbED7&{x19+O_by zDj~#tf!n1ps0}AH{cUC6fa+u<(kT%lhUsLqC@hmGqLauG$1LLb57p>E_##DqkOR#f zkx~;3HmFco(~?hx2&8UBa2CT?fW!A*5#c4-(<8t<&@Ut~T?E7|z&v&p5p|IS&xr2{47!#wx(PEn;}#IrA}Q1^8Qn4F+B4?b z2^D=u3O7UE!ZN!D_WBXLqm-jD+%3Q&G1;#BfYx#DY?f63#I}xNyP8xF<&OwRaRI7;lAHDn2gLR!u3cZ5nSBR&NZBQAlw4d&7Ke@7)mfpF(S;;6ir>@ zjRQj(ojRA({Zk{51uarn9gFDz`68RV2O0}%yuxd`Vg@@080G6Dy8;rCazq@RMm!i$ zJktpj-Y7er3kD2yLX03}JRww4eg>O5)MGGgH{q{w#b2TeFjazly9qF2>b`6b31*n# z#OSW$nu*HpiI`eh`t^xW6&mDLNMfT|mM>W(RuSv!L{aTYwChPGE0ScG$?%+J{-Y_~ zjVN4Pm?CV+UWdu0D9S?QDI@zX3sNbYo+&#)pqdmaxdSR{fllJ(-ZZQsM3AWn2f}6z zBJoanj)Ecv53zqKt*{s9o6o>3GQ1RktU%|}^lt%a+AIbun(2b=sb`ikF~$mKuc-yb zFebqvL7rM4>um5ze=5;|ZzbdXutI`Z3V9EMQ!2nnrHsH#2$`shjJS%}qF}pINV_1i z6p0g2&Wb=S5@|WZv7ilQJF-&dvcmr<#P^5ENM}F0`;uE}ze_0YxQUq_o2?#YXJZ1v zl$-gN<>;a+YqfHkZO~3o1=bjalRhQwOXa?+t86%W1cc)*uf&CEu`{HHBxa!MoI-)s zi+H_<=y*_KE5++D<*CfaMhQd2QtMj-=fjO6f>ox)jSxXmXTffOQxT@SVEt5Q=aFoL znkp9hN{QT+7es5t(};?YQi~XxAo`zI4nhLZbvS|iqnsg_sLbx1T2JF$Nc?#=pqN05 z{#sXKXcy5f8re%~){PVur)M3u1&LtSfQ^Pz6lG*dF;cZ>1OSe}=ZM3nAoDZi@o{N6Bk&eRnwFi)=V@k%ZTUl(?A zjEB{Ub2m&a_KJUSQFw_A;a)1oh=#CWi!rhCNRA?ReB*@&jfD=VKrXJJpQ|{z&0jkW z4rQjgG=&Y3uIw32)gOo#IA)hj%;Obpa(JLrNCeeQR`AlWzE|}z5x3TKVdta>6KE>3 zasMv>+n?|>a-sKHhlJ!yw(k(_C zX!*k(LMu?qqPjr~u#|ihC+*FdtPh&^KZ)&=(}Pf`wl%tI!lVRoloFQIeVmsT9n@8; z**R0!k1xkrF4ohBcn1lWFU;kkZKfmPG`v_vE1t$~Ml_htLEJYs6pSLu3RV!=RG>DR zLwhw+bu~iHVT33*6>nf@6%_Ql7hYP4pmDRC2A9hwQc5Rkk!zsawOJ=vW|-W!C^8c! zWVWqUIgbptpbhjWT(l_Qw37d6Ju+`Ssp{=~YrXQWzR7Aso6mfJgr(O`f@O#B?&3Iu zY0tpmIa+A{W5l`70a^^w|5yo7jM~hJ!Ql)pD;;^N74r-{>TpQ)_DIjl4XVg1zo^fd zL|cc-L((btU)S<@tJi4gqF^j7S2jg@!PJS1T2 z#87NK87W3jm_3c%yX8&fT+}mo*t6;0bHLMk^n+mJ?P?)b1Jc<`Ht)!sOmK!FoV||c z%+qIN(Y872OrP9$GSLPp$Ot(A+B-mTob_9>BH2oi;hzL|M^gKa8TI)_z-4E9Fs}H= zW4jD=y|MH_A5mFswFPF3GT5XD?_>w}RMVwLJZUxBEfo7XX(0`D;Z1udmJI<-HE~l1 z;*w@Qq&0ZE%upwel-&TJi%&O1pxT;?HiWm^Nn|p`7X%yc1WJX6V~BQY_?OYBkA{nh z=gwaS0Fxkk*oY!ExsC=S}ds5$d{=8t4#NPr!l-PJ@Lrd7}*@iX>pe}^NR{34{U|- zeW+3IZU}E@zDdgWb+=Y(XS4}d-HgE!+={YnPtss=cG^aV1~<}+i8$~{_=XwBK#1rx z*4P7b>-!OyOw=OXMJW=i-?qm4Qs69_Q)9AABf3jUPG&~pkVyAa3b@k?TQbU@B7Bv- zXH|cH%Bpv8!CZ;}uFBi8yE@Rvvg8pS+2^>R>cT?11;F&F;W9w34tDD54_w#iW@jS- z?Tu!Bx^?wW+Mipi(4WcJXmjj?b?B)cM_iszc=a$jxmO*Nowgy0(ChIvi)E%0cf5#| zx)rG@3(U5Sb)PYm7Zvn5!tRRfA}Zc3Z6M9Y$e(8$dev1Qe>Uo3k*g-z-1b&T_(qXH zphSF^RJ!IWTjokEmOw$){05d-4wfh3!pC)|=Q3LvBB0b`mNj}dhb$34{vy5_>S73LctVKsm; zwU$^t)M^X(7sI@stF}%!-Edp8-pnKQxqzOl2d>SBSlb5bT+w*(3j!M-Qs!2*$qunY zc(c}ytl^8TA~uy~X1^tTTTa8c`)boggl+(DEZBBQYTJMOYK!msKmcA;n55MbHu@>$ z@3P)sH*!JbN60d~XcANX?-BlLjssCU4#AKlvBw2w6dXEW^_wjwg5X0h+tm%LU0~eq z@3Lc<`rXLS(ukb3sGPL~UeEBP{-*E~zr+)7eka%+y!4t)Q0;|^bV#I~bsFEjf@$E~ z_en=bAR_wx^!ais{xhoH!=20CjM}rSi^XQD1NT(KZ{LIcv@amDs~S@`=Q|@iJLIejXJINYbYqsi z$FfVu_WT{kR41<6yXg3%A;z2A*H^pn*L&9|0W%asU;h|z_w0?pBJk5)iMV6@Na#LJ z-H^_bb~Z|U~5-AOIQXRq}UZ zTjv?*r;pjugb@>XH+KXG_kTujHr9H3E$=Df?*kRLdcLRocF^$|;k3$+RP%R;iNNTY zvH)(EZQ~D!K3RKIvxje&FqZ+ryjRGy=iqjctGRO^w%4%OTyON(Dhf|(H|4)_@d^C_ z*V5vUXY!7haeEj*H{ygh3Ar1@tNY6Sjg;uOcDrW`k$zb-txgIr&JHh3Q5O!dzG!{- zg7zAiL_#2Sp2F6L;b`~a@vqu9nv}(^B3CuNaBtmyOTD=Z40#-t$`5@-;mR{_{JSgG zB~g3|u5OxK`>fO!@{hp3A0=;A0ax6z2Hc?h*op{X$_`If{vXu%*EJs}L^*dl{tIeo zv4(Z0CcA$jh+n?fd);SS9cFj21afbqvZf4PmL0xa6d>VC5v3Fc=;vMpND&*UY)4z~ zq~O1tvtLgEE91+{J~!WR;%^Q}AY5ZQmU^iEr0+(xfSg?qAR~zxoFJQD__Uk=MMC^A zf86`Pc8dbcD4k+jA>*PDFfec^k4sGC!%<*j2?)Uq<)Z*^x~o(rOlSE499G@6>PV6k zusBreN3ipUvbkJhZA`u&#;33ZNflf^3mOSTe3dcXo?x{L^;*3iziF7~N(>RdUs61f zXGW;4Qd`%GjbMr!sL+>#nQr|ec~+iR8@`GCVx`Uj0r`>V;McogzVo+#aE1c(DI-3_ z;ljBd%x7}=DENbzT#njgLtY7;yqXj7D_qS}7UrbUDn+bC{IYdk?00&60V{UYSFrnk z^X&WU1>eR~r8h~b4}?KPdV^oSk%f!rqc%{BpVKq~XX0n!sbSCn09>x>G|qsUMf&?B z8S4X+X;pqLAyQo`vLoc#rL7}~>9UK5&{n2yAe&;wgGqBqUMn9n>z#?cR+|keWlL@+ zmg9zo{4h#+n+leA`H}`olFS=68j$6HaT+#mBq&HnSr*Sv08i@f0WrN3KQJ&>2u`J3 zLM(B>S50MKaz^%z&xnJ`m_5$&DcH9tuBo$YZPV84p&uuqh-Dm+TaUb%@5wqApX4@y zkXjam!)uxpWu+`w6atTnwkK3ri!7J-fX=F=TeM)?)YS3I^-}4sEo+<&lH&*%hLY85 zdY2(OgTZat6Y0PgE&sN+DySK|wyHuyRd6wYf4NHZF>Vk8IPbmc^ti_JHg0=t`{Fuz zR26mKkKftxK>BkqbUM#EJ9n}=czyy^aKaoEm^Am2FVsb#32iga&_BpBgYve*KJ&mY zO9i6d@l`{Kn)PJZ9H_N=9%an5%efpL1oa-()R14|PFIiedtbGk^m^YoPeQ3x;Ayd$ zX_w7n`)UT>Vsq7dZX^79JGn9b{&k+$ciHq{!(u1MDhIteIKcfhH*N39lfU(9-Vbv{ z7YHu98Y(}B)b=T=d+3xXW0=Gla@HTr5tV&-jW#%$n&3iFjA8lum#fZaCX`_+@OSS@ z;KFjfg!>r&mHDc`2(F;2CE|>Q^=d=Kqk}Q6rV1o2z-%bvsMuu|epBxo^$lmx%9no-<2hs&uhGb+D(JI1!WPL~~gSj*i)+ zO-mqR^hM36&7e5`=_#>UA{krUGRwR*JF0fy$qu+grcWgp=}=G%*~UJE0^BOc1AjKr05!k~oho0w9du92kW=hCvu93C7;cCxEQ`j~+8% z2x~5N7_0ofR zT-gBYei8m-7O?=^c}UWwF@Jbg#CTLn9ndKHAj~K}1YV4SAZY4B+2%srtusgtZcS9T zrFzA64Gqyw&5@D7;^AMkaw09oI>IiYDWR51nVhOK_e&+%R>$H&_cCMnDw&Jf)XMXJ zw0ToQtrg%-lr$zc*3p;gPE||W?j=ELX8?t)>^3h17>TwC$32jQc)ki9M{`lv^K(? zCKbP2Vfp~OJ9E6!G}(eZSJyML8C|kWA|Pmg~7TWAHEWL4DJ2?bT?NPD>5h1ruo>6WOU_hvPlABUT!p z;)9n*_%t&=UY_z94%s1-Qx>k4$E`wNlO}9T&VZdu8!*bG>o2paiywMs}kjpW7#ho{P*z*1X+1OYe zVZUOxA-(#yaU{;AYa7-&~Q~gWND^0V9q2ZOcxsA9CR7z)*uPGBrYE4 z;ihpI+Hghrp5n&Ixo9SykG&_9v|Oi6uFnj!bJZ*?Ey^wdUQGllH#jEndv z1jw95!=W!NYjuy@8Qzo&a*RUv`HsvXJ8B}iZP@&`Rt-+ZbQ^p4{9yEILsE#@?qCW@ zu{T$V3`jgl+=+{ZFD1@fNG98!2cHHE4U8X(QU zH6`hTeJli7N>CH(rT~z@AM;v$J5IexZ58up%>nqQqnr57WE5-`jqp-|PPqNkc|cUVR@%m_>U(GyR-0*L^}Fx_)mrewYvJB zNEn{0GHbi?1>bvrikPl~(ms{*JXxS*s;b%-%TZ`MLurs^L4&qeAv%?$f)5|qo&mx ziao%xR9FSlO1Ts#9t|xS9EhadiPtgYLc)Qo(w3LiH1|`cUQydc)QZ>LlY~9=6xF6k zf^b0yLIjpV39# zTR$b!563E^bJ@?znwktdz#%H4>r;N}-T$`&9XBgP02hpC42jUTjo_f;6lC~;gpLiF zx_|}lys4X3HLAEgg=_=pP#l46n@1%X@h!_ZEE3MQx(!?#$kOV1o`xI|*l|b}w z(V{7t9b4Ia-l=1knTa6zThE!Z;+b>Nso_tlHWS(7rFI`)_`~Y{*N4#xS;@I#>FL9% zHNd^#?2=#!jGPQ>KMw2k;^o9r@oYFP$B8h;n&`;)VY5*8G^XxkimbqJwA9`(%0HEH z{iYUgrk{)#0fqPB!(s93tt#1Wf!8TTJJxRao|&qpnJe8X6C4JR>GC7Pp3|zo_fu2o zY!u>x9i@et8!k%d;1X!dbGUYNk%kfBx?Uyksd?kxI zW(r}+O6%aF;73)M`*a;ibfq>eJGVr0tU`MfYRqN0_Em#8z0T`Mr> zONoZ%(f?Hc9d;|^NU@ozInk}M@~NrG2`P#3IOmK2K9xC+M%76bbibCpORy}wTlN2H za9^pgw9NfLM>>Hj#~#iS$*BkX%0)2B6lbYJoHyE*VCAOBpxq6&q=3-T<@y)O32xPRVWnQi7K4sil4dwXi0D-GF%B|+}!of2w1qpBT^KHZykCh)} zv1F1)#A-%9F^o*JLfRyh@@!%3X~SG?z~Cv>x2QL;Z8m;wjQk?Fv0WUGRBNpn(r(#o zpH}N%T=hND$ojk4)k77rsY6LukziXJTGBvlw=$ro8X7S|otZ1qQl5+}V7EQEwXuSv zxbT?2rRk$J`L#9mv~_-@dy%8>q_c@g*UvUrk9mE4-QbG#8iteKU`l< z-F{KkXt7&{(=HxFQ{4L8lwi>&hTDnMRPG^O35)3jbKWA?(}37oxxMA3@kj)U+L@Tv zKlr-?+oKPSuQ#>4HP{&eJv_pZx(tlV4;8D|PG_(Tm)qmKogJ!oZ#JICr{a#5Td=*m zThqMFw)G@f$s;G6@8R6C=G0tXqlGqFEBF^rybCat7RAC*#cc0RZ;!}N78Rca?b{fce4yTU4^}j?u4s@$|`rigJ`hJO#~pZ6{JMQ$@UdxL9Mr%nAs4Omx1Jv;PN zTUk$#x4!T;g2gn$Vu?MrbxQ=d)V^3z5E&eoHYAUa4Dij3$d7)&GB9Uo=mce;;tpxa z#jIw`72%H#I?NxzDQxL2k&kH^AvWyAM;YT1EmT)-hYSk!R0nhRu)aYjnuHL8jEr?F zLCJ{T4wZ~IkGvo(4f!Q}^es?At(Mv~P4)H3{1*N7=8*tl$6+gC#*c@n=9YP3n~cGy zthL&pU-Vqh3Nbs|CZbD@*OpPTbAQqd+x|kQM5zGP$zP1Cn>MVRc$4>2=_SA98+?Fj-CSv6h;O&KS+>Pp$bSS~E5df-p9lAL zNtgb$n>q1n-tOf$FFV#Fg0r};H29cCZ=0|_;5%UaDtvmFTJdHI}>HMs!2N~ zk28$IH)M0u;xjdGJyxmiwOQc53aQQPP(xdNzRm&6*z|~>PmwF;*WyUxU0SG3#>Abg z_YQEOGbels>AmsT`aA{%3|Ra)p0&O>YO2 z$5#M<)2Tm8cWfWb?yzBJZp5m=kDDuEY;Boxe&%HMvSP3i6BPKz~95A z(7szP&I?n6DWi`=$qfLFTd~8$l?5@slsjG+njRU%pX^r zV9;&+t}nOb9d~jE&@67R5~eM)@49(!9^7QY@;ON{q3|JXq}gsK(4DY3n45dINZY0s zL$7G+ep6z&kn33nqn@wO-;nQC(;3`zHCu}VTCbeU%^X**SO_M;#dRdK(pYZvdGjQo zUZxYE5V*>m3HqFmYabMUv9lOHL9|)$3mAG%-HReTYI7JF)nMH|moAIDXggf2kt>Kh zJjU?cmW*t4a5+l#xmb_moD6I;0UW(TW9Q9)u!4h9aGyNYkL&0!2sxg3FtpiypX@ZO zwE)gyZLSn;tHr-i^>)SXzITP{?!s{_=hLpfYVB||ga}6D%FzEwxs!tXoU=&xyq(IQ-8a>xVqNe2jiQB(z_(2yBm()H=;a0j*%ky^%_cv*;P(e19)%JS5+E$7#GT`t8PZ zka&!K7Prwk6Bun<_|QXoWlY_eOWmzhyKF!09timq`q)G1I#-s=#UpyOYrI8Ze6?-1 zLz(%uJ-r1!M;H!AKi&JU?e-BK zPN^TX!Po=FjQ@^6{(@jrARXS1q;I=Mbv_uD-t21C84 z!EDDHB9^NyHE*KO4TeMS0DK}J&O3Mkz6jZ!rBbIjMq{A#E^&BQcB^U)i3+`s0*p_T{kWuQY?-)*WeCI^@JFxkxnU>V?(}wJU{2%#9ca z-fSBYIxdZ3?3ot&A;+QdCBRs_v0R1a^O4k@ZCQTO%+O{jZFg&j$!g*+YH6e~bY z;;Q-06D#C?v76$E!JJ`Pb>)gzC6?`+n>N%T+n80BCRy7$5c9)=IAuy-oHVD1Q=;0U z&H$HXTLpEIXGSF9ROR@j(d4x}#d2@O;P9Oj0>cC>mlfWzN7a?ROLJR^Dx79mwS4$Z zPz_)yhuXEwrEM^#mCIu}Hr_~YRsQ`6DsA=g1a?sk!iy$0Nq>}E{O z+b-RHHcLymMD%Nm(Y3AaBzdUJ-oeud)%9F>3D4tE7|%{~mjZXq@(+F`A=}!U@)hgs zAJ?m=C2$Qmo&s2!T#t4CkJ#3KsC9g=BR+j_-M7>PANo6#ILaJ#fshE!37sF;ypLoX zGoDY)I(1FY%ywMd?V?2y`iHx15d57#Kd;9%Uis)?zvw>t#q6^ZL$Ay7vbe>j>Ro>? zfTqEXTgT@14b)f{2>VR{v@YRY%yI+!5**T)%FYA)5kJIpZ4g$r{^JaK|79Se-#IAZ zQnz~!07YHic?Wy<@60F@tlf=sgO&EDxiZDoQXN@i6%j(+NPgC~717sb!9~<-0R!EP zGwaayCK(7+ihODoLCNp@bkI?pz2uG!{x}hbSdo;)gLPQ8G9BFc=)8z6}gK zZmenU5UAO#5sT5}{8G#QOag*E%(CbR#z=D?jXzN4sF!2?e&3V33XN|3NV1a=N!g@G ze@Adxlk5_3X%EQZIK|OJ?~>JvjUQG_CJL<*rL|0l>pG#tmBJd6b7fZ`VZ(b6ndM!f z&+wH;IPfRmF6Ep*$(W{9WbdFC3zZ?{9||vI-# z%4HUie^s=y#qyVHGfB@C3R5pgpWl=TZBucnL@lOHIH#OvPYM`yN*he4l)CfKXrEN1 zjNJyPr9)*0aS1b z6v((&95b70xzMh(bt)@cAyQb#KjwMFRwm}t(rV40sttb9^UHUh9lbzK@ds5Rr4Ugw zLiuQozcg#*gJz9lRHdP~k=On7$&>!%)S64Y%WW8AG@I$ETT1b4HdnHBmkZljtGunO zI&ri%id|Uz#g=~wViCPNvdRVF&M^YvXdhyuHu5E>J?IpVg={ECB*>RMwZAeaD5<@y zv>C3SX-m6OyL9i;*?Y=#^MF4~oa-lBoWgZ*IPYQfSoiMQx_9_Y$)HF znATHsjR)G_XOPj#!;oy08m!o-*2!AjP0&q>>(JSN4@q~F{u+v1VU>oHqfXLqoQe&; zjW9QX;BoipvlFJyKEgdoIaOx@GDi?2gEgJ6h!*sHV+jWBIRqxmP|Bub^+mC}6qE7n z8pmfUHXt3EDE6F0TGElrh`qG&&agGPX37nfJJ*!q-daYo6+wC5bnbdDXdst$bcbUg zSb(Gkj<&6%g1$6F;EBC`dMxM}vxGJFUYs~~vCV_K($8IGCCqJ&02tAT2L^NPcZ}P# ziQ!p;>gnoyqq6;Mx* zcRfpGsu#b|%+#UN4Q=#n+URw3PQd&*Q#mB>qD*+tL%yqEEI#iNO}2UJ`Ts=%4)WIj ziV;0*1{NA#xBQ`b+p%t=&rtGlE*awchv@Fo^ivPNyrr=r?$ADaH1iPXp1HQ!+D_ls z$!Gt&$q0`1Rb{7a^CY$^xD=ga~!Tu`L*Up!1oVeZ=5YhRZ0NA zQsSz1RWi%>ET@nagau*ki;}w;TBkZ&xpTdLzuKl@;d77`1sIZWrlWb9=w4@=^qkq` zeqpWa`157wJ8zKpI2ra8alf2gIClB9}L%Ae8|=S`ev%wTsS<2HW~? z-?88jFAg;y2yVsjWX})^M!(N&x3k4)i_HiZe$UHO9q@X#eJ(|aVxI>&vQQS8kByz* zCYJZ2mUFP3he2%o-9hM=n)RD1S*{)P0s0?S(!Ud;d^<8i9Ed#}r$UspUEEG-yd4N+ zbUg@pLdwEIifg0RKte)kBAhoxVB>wU?0=r5`*eOpnAFCY*y<8ufu!G=aJe%y+*+qS z+vJt|6v%s3s`|5xx>UxQ`1B^sZuuc!1!TuMIBLY_Iocc$==8_?^z_Cgwd*i<;*!nT z@X!bFW@zk{M&!gMydXrlwS~BD1ia5tRNQH=cR8%oCTC1|g3Pew$OmlbhwMS)+nxu) z%8*?5g_US(qVGS%9>WZk2XL)L*EI!9@Ws4t!~njP1J>_@p?(r7VG~y3Jb{fOY2|n) ztYV()9j6RkIuKQunLPZx!|!cVd33ED-qod8ZKkj}c>Dv2phKlvQW-yW{=#BpWIg;+ zW97JF1UFN4SVg{C`*<5W?_s`q=KGb9Kb_}a#1>cpl^!vw$L%#5kcHT}%?vyTkzNhNyCbd`?{ z?vI#v(xf&*Rrt-MCrXr`OU$m}pM;KutWAuZ4vg^gG_Fba<%(_O^A*?P4e?D)rAt9N z&CCXmbT{;#tV;DVF)WGAy!6P2UG=OW3IPkYONnff!uYw?8>ZoWrd(+z$6MlxyykW= zCGhg*_7l_Y*d_Mp7V>)s=q>seIi@NwWd>n~9#O~C+D9^(=CsTw!2A?0AO`!92QJ*k zhhOs{{m}{YrH^|1cKf9sK^7kH7MA=wGk6qE$mAqXibs1VHW0CN z%=?`877W5@a!{0E_7;I|6hXC`LCTa4&v@?AP}`D}7QH+A%;nyV7L9L2QRXIL)+Kfw zn&ILn;cf*Yapi3>qT*+Wilt^z6P77nWzou1(Z`gx{U$yC%q&34yoAZ(1}QJOlHJTn z(Mq>iwUnom>fEUF{HzOFi>JBFkTcJby~rz( zZP?Pvv^%N8=q=QM)T-&tkCx7B?v0bYCTs62qns*pVJ_-RsaaM^kcnw3R!$rJN(IxZ zDye0n2>25`#_FxL9Sc!T1XT{~OD z>X*%p+qC%5CSIp_PgN?uw*6r?<1jS?xwMoemz6Vxoy^tqIn)J%my5k81^TB|=G49` z6xFRqG3U04=v1T1=Te`SZO_yL{5C82l!lp?>t2;p3;NTPc8eRQv3^ubBetP#QauZ{ z--b1yc66)aWvkq(|0XooK(0z5YSxkIHcGGNCz7Y@Z6}@1wwUd=3#%I(;g_Wgrm+|P z(i`@6DrQ~V61<(P)&lRrU#{U_&ym~DFVswS*EG^s5Aj!bU0YsYm&r^}9G_nu?^m7p z*`FStRK}K{wc?#)GXVB0>F>#J_8V)j^mh-gO&X)j1m)?k)ERs#?e_oGTx;7U?Az{2 zlNvjfh$1sIV_%;oUEW98Jm}xlblV>~l@gED=(pZ%Y}|ZN)7u}}EyEbUn5(qJRrdB0 z5;u^)&ft_r)x4qC)|KA8Q{SBLGdP$lrw?9z4BB^bGaMc3f>|)IC|!PEUjwM?NpcoG z#d0H}9CmXR-pHxnD{o+%7$8^eQwrv-{#eWXy_z(yhiIx3JL>rD z_8Zf`v1o@8UdDmpv|fU z(4?dI;sfB+jODOVS-}2*L4$=YdQ1qX3F{<4C?HiE-;ukDMf9bLXNZ~AfR*&)R+6BA zeixD7Ad}tyP5|^tP{DZ!u}KK0g({kno4_dtm|j+(SvIeZcGOwU#6``fSrG%496?56 z(LvIIReqQCSswEmuSJT4jR#wMq}OGZ8&wtOPWHu;b@C}pi%qJ|6kPkBjH2q(jZG7h zO&!3doAgAP?%G#8W0sguP0yAGO|Qe@=Fq{$yZ8vY$!e(cM;&xTKe$=rqeUvlB-FVVkrODY;qD4&MS#j3caCV0uCDr`H#Uh%OuF9AOoz3dwS<4-T9VD9? zp#p%;UwqQ&CMNT&JK1D0=~j65D9=K-_5Ps1!(kuK#`N84$)Jq1dOKh@efmhtL*{PN z#csyot{2{7CZK>$qW5Xf&T(<)+{D3`Z0_Lv;-;|5(v$_1OdpWe=9$;#EbVG$;Lewk zO;XYf_u!Q7$YgMFt8wDyd*K%Pu=ITT;sz;ZMd0l7b*(JGPQ#)R!1HiN-xgTcw!Ko| z&C_lt!RZFu9AdK&qHtGdPA_cX@d=dKeaE2*Qx^fK61`#2?Q`=oV7riKjCL3a-er$n zY>V-4qPuv0+*9kxBM?6n{q(XI?4xvXP;qAe5Pxir}Bm0n$V@?zwVCi%o^4f=~(j8#T;PxPY0 z>7^qvtFsiP1ts!|dl#TB+`G@Y z`v=0bk>$O=sB6H1tChJsVeuk?=QFa%eK`JNxGeE+-%GwiU9ZUx7^!uj0m4}LCn3kK zFhW-Gfo4~47*SWdrv*F@MS2XQA4(5>XWcmlyq8y;eWiWAhT{1^!~GWAf8R}f zPBrlz&h{M2@FmGyY8>LP7_0Yvj}KTr5QcKKh?obQhH zMK1bJLIV0|dRs->Gc4IdKK$P%eU|`zpftBW{QfI8y~lTaEo%{ivv zU-ut84m98aWSj?8L}@jI;vHK^WQJ-5CVaVbO6AiukXPP)k3#_ z(y#Zk`?1Uj2FiD3tgpsG;5OOsJ>%r1EZ1D*^$q|4{6dgd$1cQD6>iB04y8CwDkNEp4yZiwwLx)e}KSn+G7v^gZ$ZgK6`JmoQ zBm&!;0y8xJhW8r}li!o`$D-qXkv4!DmU%mCo)C%gADg$IIm|kK4e{2E`~5^?7QuMJ zCKYK+;A^$-3$(Q6gt<*;Dj2kDgn;{vL15c}=bF-gfA_0@H2Oovf4MXMlSAOt%Kr<6 zG^MreuIxS-l;@&}f4j+V>A(jUHdjla@59|gw1a7Ps4Ehs_nzRpcC+ljzTvKKBp>wS z@S=C?fOp%$UoFbNsX8ZNGA7=|_GVDM49CL0K68I~3*5-Tx~8u^D2f6#0+kPb1~e zk|UUz5tM}%^rt(^kMh6re>&$73-1sM?f)?L)lqRY&AYfmaEIXT?(XjHE(^hf2X}XO zcXxMpcXtaGAdtP}eSh-ZzwVv$oIW)@PgVD9SI?g9*{&L7p#x|AH2A5@^(hbjRr$oH z&Hq*T*Zybu&n2P%HuVpIO$Z#wZZ*gi^$Ah#q<@7$E(dTPF6|K$HY{af?DZT$!N9~Iz#S?BwwPygA{|F8V1ILP|H4SY5R&PE5$ zMl{IspBDahYw+*j{8!~aGSlEcr~j7!-S5wV{b!W^cI z@gGn5XFUJf|5W)a|KkGxdg#9{hW-!jQ~uYHng06kCmR2HAbKDZ%qPG^Uqvv+Pv)oY)+Z-`VGfM`f3PG%zw~)3 zLO=Fz&By-wOQFVFY{FK-`jpC3Lx zp1wc)e*^~m+3EQG@bdEy2ng^E3TuX&N1Ix+-Rb^Nvpm^ayX{-y{)Bo}S!ZklK%uzS+GEfd8t{ z)%MeU?3mhc#+#7;bna5RRq`X}>3prxwswX;_t)*-U?gx<@=wR>{q|zzE@5~3?_byZ zqq&T^-40EIP?x>Qk-b2zauBLLzsfEVBR_xQG9%JhKNdL3bfGh&AT$G!{oq8d1)~r= zh5}+TB42D6;!yX`B%`j6`30yNW%(wAOd}|VQPe9h;&>brVZ$+|Y5Rp(R`1wFks_Gm z2hma#>n5189OZ-Ys!GJiNopGF5JtaIFcD0=T+63Yozuye(_+S#(7BtpODG(As?RCx zYU(j9GaMGQOYOcrmr!PoKBZ^nKJiQD=3OG%?0OuMM_A=Y5?++$`%A!;7yZzPFiZiO zM@-UXt(X{nl#p1p}U?o_EkTbv2;6Z z%yRbKyq>c|wQI^arl@{gkSx4p>s0Eced@hmh`e1^zm$Di(*6EHXzuzEP+n>lXyZ@@ z$9h#agi%PdKU^}9%{+<^jYD4Rep=0vOqC1VGTrs6ooYZ~=#2t2T1FC7Pe+}4)6bGW zAw0I7s4=)KrdiHBo+Kq+z8))L5qK`?rS7n=XcyKruYzRmyj^?*WbaAcUCyI(*N3WiNy}BZab;?ML^66+qOFk&EUQtLK*3P9eK!CeQ4(P^;Kov&HnXe z#|G)`G^_BN{X#)zF2i|!r*Fq{#j2p-)qQV_U{OQtwcrhmY-X8DYu?U>y~%*+-kpUy zA0fS>nY%2`;~z}^b7col`p5(*W~t)K=_rf!MZ)HaC$2GR2Ja)<+}|^)J(wlmf^Th8 z5gsH5pUw_L9vgT6xViTdHq5X!u}Z)I%`JFAPVD`p*ycCgESdMH9sGyTAj0v>x}bfl z9MGl?Ajz%z`^o)1mm341!^_%;W8#8%SczX0+61sW>Z2Kg3h{?W2RH|AV+4+jkyJ}X z1up7CepG!y7Q`L`l8*r_RWmn=+$);de}*dol7A|R4b$k%OW-Y;K$eb*BZVl-m<|~n z&Wv@ld4ACaWEB#qm-TQ}+$UE7z7m2;%L;BZ#QIMaGPH6a5ogqgg_(&nLAv)zP&kD5 z%Ub*_SPPX=xKAH9&0%9cXOY)gG}XN_f#SC=Rzkf`C7~n7U2Yr6l}AdctRf}Q51(;u zS(I7H2ei1;2LiOGU>dirdg^Iii>$cT;fi zj!#FJ^k)`q?U)R)&8Yg;W?jja@~poqe2-BJW{t9ZzmsP7Tw%@ajIzG;eC@4&N=$u9r&hb>h`9T50&LiWPk&tBpr$Z-+C0m{G zSVRsOAXE#R?I7g1kJ7e{+cfy3D>n^KneSf!~%z0o#1rVn>iQ4OED#!41L z(aTeb@{d}An7$Pta$=Pl`1;f$TU;jI>8>?I;Z~1tS7jP&vOa;ET9k@WiN^}RG3)tU zoF84|;uS_Q-0_rK$7N@8ok6^^gHemKNUNN$t-F=M+1f!)NBL_-cUL1hiQwtXp6&`QCGgF9Tiende zxjH;hMH!siId_jt)J3HN^pU)@dbGre&Bgf+&^A{msKgy34X+O9;oXP%_(qH207IlU zRHd%V%z++sMbN-BV7_1iQ+&AEejmg6n3R1&Li)P_B|UpB>Lp78gp9v z)-fY5Pl;&=ZS=hNA={~Ye_W7V=JvaV0kL%kG5Ay5ht6rK!HcHXiF4w|`za-~=Ugbm z&t#Npb07zl#DziUB^w)B6wu?d3T*hKol)#ggL61MBPjd2T@WA0&$7L7b4 z`0_j&nw>|^vfb;9+8%R4*FKT3&)Etq({}&*^zv@d6x=$6|*&76}tBlFVw7A$DhuFeSUIwxf^%^ ze>Z*i2F_+v+-zME=5*b5uLJ;pKf~|dJGC+hjYsK3EE_&5?sj4a9C`QMVmzsnnN|MM zFVvQZ1&=w*n76fea34#cn zu)}oMf`iZe%q%IJs5~~u!gki8D<8uwizB(zVvgD(nU*6n1{4!8iTk;tJX>8*L_&Gp zqQ=WYXGcPPUOrpyUchIZ}FSi`8EO{UZfNHwO-a69;DPQnThBS|-p)6}j6M#|yNI zJPY%_w|`0zO~Z$L;Q1Es!}xb|j-sh_iY;usO?pbAwj*F1i&=}efJsP(^M^D} zY^)NgTzP84eb6s#Kd&;iO}a}H1(ChSlAe}@9B){=_%A% z!Y@cj#z9%@>8*i&zgEL@(nzY+qr6L*OWIQ>!^BsHT#1N4x7xukhFKcRlbeCs(XAeV z?b?ZO?QY%NJOJn{OYW>YoZKNCi%}1KpF7xZ%~{e`KW3yeK2xQZ!sbo5XS+x!`vh8U zu4f0S8t+SIhHK|+Lq*1<=XAB_>{xj`;AGvm=i_hWr>3Wiw|@nIz>bBh;&ChedCirl z&Rg+tB@bs&Bq68F%iGa5rd!B6;Ld4*&2Jt}JG0K$fhu}cO<_3Bm+&k~mS#t+PnQgZ zPzK3hEzEL;EtcmD0Iet(Nh>Z}FM!}l7KsB3Tn7yr7v{<+J4RrMFRxk(7tWq@@AWDzT8ssU9md zb! zkr8HN*$W2#Q3gt@)loHdQ643N6mO%S6$gzM#D~gK zTtk!Lr<38X{h8n_`*kde4H;~{t*q3FpJ}XU3A<`QChkV23N<5f{Guc+LUG!&Ht(X! ziv<_@P(yqT#v-rs2)Bet2m;Y59QQm69XwlWf+h%;w(%mVHKI}mtOVk>xUW0;`CG{4 zM9no%l}by=Jx}eUPVJw7+SiN*8rm;id4OG=ByFR5s`g6t@FZd&w3@7T>h@c8VR-0X zM+QA*9prCu)3-{z&3YuylCQrbSf$~Nm8(d3O~^A%$ZV;oyqam@y^o1t)#t$3GlW~5 zD&TQjurJ}n%M*@qO9IM45cJ_ak88gaQC(BAD`1{@x zWG=C8;c655OA{voDRf1nWz%odn-eNU2lrrbADbED+B0bDvUs%y9#a~@;XpGJ0JzQPz8M`hT6@&+|4}F zMOwV=&xb@pBrRM_;2g-^i?rR@neG0>BCW`cTOJ+dwjIU5O7IsP0A)p%lBG~>XB{kL zXUIfr6R)LVQ{_xrXMk4|er6M)Zd+(%W+Ggj%^rLtykn|O*Ij6rXhqj#XH_={@$c1U zg||NBtL7!$ZgnN14P9tf*$&*RSP#EC9~l5bX3zJ@#_P)-pWmtXwuOFPHl3NhZ}1KL zrl#+wm`NFN5L4z*bkuQPk+AQ9XiiW__(QrxLto;2ZCSH<=`Y z^A4A9b3hy3965Lyao8HnqDck#jeZAA@Fh%m;fwhrgKkrokRng+%hJH=%5iskhtO4O zWTk><3~NZ*@cOmHM$sAQX(dc`njs8w3B#cB6%Jfw7v7P#Xhz2V9$)DiEYM4DB^oP< z8!z^rD9@5!?^K4@18bFSl*KP`?wrXD_~Gf-6N*r&-ZiPkS5Fu>5VtjqfIFhiJ;E4O z@;g$d-4BXouF~IQh9(`diU}?IYJBwfxFg*tYIMqB-}E-ZjF21@V$_5YQ$ZSP2c%s+ zJWmrt9c1O(5M@VC+f;RUR1`9LGBkh7D}LK3Flx#?tAr4xEAw*np0|q~kcjj#2Z=DT zmNkv0|D_*<7>vaXZ+brOZoVR-y#S^q;JyEWgG5m*>Pz9IuKa8ypI>y=9IO76{^;z7 z-1vOP%HRa7#NO0d)(nj_jN)aXaQ2`m;=KL$CC}@`Bc){!7E)}2HA4Doc9|MP{7JW~ zbdBh-DE?Wp_hrOn2yodJ)9Cs)IW2a(MNY(q>Zlfn=vgJYl zA3aBn&_^PhzuaZ7VV(pzs?pD)2T4Q%)uBJh z;%y=OqSK2QK@J!|!AKhfY|$W)r)N*uOm9JQBR2Q3M7*u|zav9)ZxnTVT=~arOmCy! zY@-|OyIyRhlk6OB&kw*i!WAzSe;=ai4$sfa(&G073@*^qlQ4WN%wVpZcIc++r;Yin zSRsBc{kpY~Eb#GbEwF4OFf1X(M0ybHqxl|T)->!9DB;e`@8gpz+U?AtlBM_kD8V>r zimC=#PNx*uy)QfHRaea!||uY=Z1ZR~B$de^rkAQup%FVi2n6Szpk z9C_y)VdWeH&G_o8=M@xo2xCUrfOGMRM6i0a#X33qn5xBan`<=J%P-N=Gg}!y#&;o)QC=vQwIt`2lnhMIY1rC&H(hG zGn6LaZ6U6&&oatLl(#x!x(@_?H@+c~8BCnVZ`EGd98UUN7yKDx^SdCf46BSe{j#1Y ziFUZG$o61BKF)ou8*W8*lbZmoqm(H#eeN zm*Q5fE_Yz0-@psnHeE0>{s8~nf_yufx4(n_29Y~+w?;UJsZ5)-N39@3BOO-(3i$aM zp$bRw2oHEc2z>1O1%F<1)247w0lfLyLp>e{rv5GYN8S#p01-3e1MBX>_x98hgF?>O z2k71h{$4vSJ7l5WKE*yLSd~YArd7f2J?z?_ac37^e|{GU|g%D#mXxn z<>>>3qVJ_7f;H}4>Xv_gvOA}HV-~;Uv;D@Ey?d^yeYk>jfq47S$oZ>M>6e8UtUK^< zTVNV%ch5I=+Xnk3@W;)F!$OdpbolK}dfZc+10ON}0*T*f5S=DM!*Rll>iFZak?%pK zV6evA7`AOi&KEA5*=KIx?K5BO8(;04x)5|V@D1vfdujXEzWr~fh~MpoHZHi)N7{D!s7B$=`M$)wHGa9*)1$uUPTTe5Xh~97Mc>oPnC_v*Rb?~h&8+~Mj#b?Kj2u{Kpc3i13|WgtbwSb?n0)(~le zSO%E{j%KdnGK@|-xP30pE7TxFq zOLgr8%i*=YO~y})UX zbr^OR&fO4Ipo{Whg|k|RB=NPq$}LT_8lT*8rJ||=ulenw{1ck1nwuEJomv2tbb}Bh zI^aoN6Q>Dcpcm#MN|2G^%t6zDzbU^LT^TTzfEEb(OoC|_DjU=(}N6ozo#nR^2SJ zP3^3s{^r=f((+b6&yV*8FFNOUoxSbr_0D{p^V_@gX~xUv>6LJTxCGT#%(ofSqRnCG zr>6Ih5$lKDpQDftPRX5mponGdavlsWaPRowSX{X<*){snAfo6g4*gEkLV+xMdr_k zo#!Yi3_<{7;I^#rH3UcxeR!<*y~u7gq1y1{HKM}RqRDRr(|igFWjhjNkrgKs%-`F$ z#;<`|K!{@rkP&4++(Oe6B==DWo;!cfBhzsWvVO%hDOcGkP~yooaHNPjJI$^jMYR`V zxU7EE65RPZ5zwwZjMiPjON;shurKkh+ za(8jP6vkAS2NJBtHCZN#ej#hxBgF=#UbYIyXuG3bJgikNt_kG~S2CjGt1uxxph=^@ z&z31?J1oziG^DtMH3%Z8@nlhxxVVU>f*Fg@V7T`yv?Q@)3gfcR4TTPgIbwuV9@s@c z;L2UaX^N);&gj(>v!9_1#XOq*P|u|nMa{L*D(2K#*a}eSLBzrDR$2{Q z;<4FxB{su{V*=D_dl9TN8imL%VbJWC z+QTUtP4V&JLV>o9hY@k@GAQ=A!?p&$FJuQJKA#f&`%o^B$|P|l9dzOVS***oQ699e z_+o&EtB&@h0Y~E{BnAf#%aUb$e_mzbEfE`T3Xj@%XpH)(`4foEH7KpV(aQ?WZ}3Wf z7X6gGN^^F-P+~i8nALqg$Jjqeln*V|3g~4S^jSCbPDM0_Tn}w~CF+y=Eu@CAaWhQS z-b`*rw0?wt*hdrd?qnb-jh-x@urF2`JVSBEK7<*OgB}&scA53C0!{CUO(ci)iK0Zc zn4Bt7)Rzm4UHkOdQT$)m6*r&bl8_{nf@U?8U>7;ts5RDU;w+ zKy%Oc7YO?s)LZ#B6@9siYDaqU396L4a4g*0 z?9MEzy=GToE9JfL>r&yvVo8vbZ}MF;}NNZ^#VK$_2&CPAW~A@20<9Gt5! zLp76DoeUV9{p~DGekBZ9@VGTl`cDao@*d4#zD9lFx3ew9Kjs{9YiXUnw)et2Ijd`m z|M7iXZqa!0!s1urwmhZ^o$+uMa%r-***CRY-CdOjPAyQvJEESBR+Hi%EDELsfB%2fHQV1>&gZitw$+ylU5haqZ=~aja(} zE!6eIPA$W@!2KF6$FG*vAHZKfytW%VJ0JMdzy-k2V|(z}Rr}fxWd)9PdkS;~C#}Hx zJ;9=gI4?R|i)5QO3R2X;3#J?SZozxC(FB=ksQU)kA5{AvCYv8s1!2*7akBbfyaZn! z;Fa9VFr9lFSh@fF76!tL5JL!&6!C)M3+-QagS~evN{h%zhhoyi!vp%Vbz8O01B+?8 zvR^CVnZTCPOMk%)UU=~poAR(e3XMBcP2u(L&`>6(P!FZ)~ZM8x&Ph~9fi%-G1T>cLG0_Ebe24r?<$|1gt8b5tp^ zJU)Pg7w8_CE^R}PJyFUcCVIs?cs*EcM;$kTCd~0DhIZ98+bNDYH5}?CjNQZ~z{XDq z=oB)G`)|N9e5v6+iY2 z<{xCRK@oN7B135EQCsR!8ptPrtJEZ`ywx4#s1g?#WYXJ`)L-H;xDq!s6BLAy5XB!c z?21Bk3OYuW;CraC@Rka58L|u-l1!B7x-19E#fL&ni9r<+e=8bxz~|rf@4#9gf@B-d z6qRZtM`)YlgFW%KiSm*n3X^uw15fDj8om~c4Efr4%MEi2Zh zD^n6BRhl(s)kU$>C`DN%F-am215xd?DFJ=dl%v`k&sOqF8KU8erIAmz$xON#FuY}h zk?uX2UNnv|Hk#Kzo|QDneI)JoIQ~p3)B8S||0t`F7jQ#e6(BdU?kweCB{SkJGpg5A z9wjzjBvCmOW8637wILI8Ht`LgOc_m@DT2)YJy48FUX*LPflYo{Z8{kso~oBqhf+gZ7b&Xeul#nz*l+l3pJJ zB+)R-N#m;!=pZQmrsVhCCn=WqEM}=AMzBkl) zm@L6u6nS*{X_lwCUuwfCa&bVMVDY!DYP@3EkN4T_Bl*?=0zvQvB7(Uid=;=|F0y2Q z)2l^r*O?m|B{3frad|aMwgKvFC7NP6X}SiwWjSB)amewd*&!-N63jDaKXj%=R)S?W z7e#Qi;(D7!@^WRo=2`efNuFqR9(rYSRXqM}Re=vxe>D}6k4T~FC<8>fC0;cN{w5Ro zD%0dSkgTfh$R)|mnJGER*ef~ODvbpbWRJro<8m-T(o$u52nWPf2l2&bwMA9LvKv)( zSG(mxAxac3V(solo@`CNYIUSKrY~*kdeiEH{0jQRsmF8+BIa|a0F^-fB{NqgNxLQs z*A>7*`uSoXX1#_yJv|VLxH{v3F+425{zJp@L+e{Irs{OObF^ft(W0wQ9jBe>edeP3 zwX&xjE#l8cCQqs_^Q%7Bnm>*;4Iqd7g0uY*H$;>8P58A6#n%ed*UYQ4LW0$3vy-f@ z751rV_~zPgt5z03nS%FL&RAt*`4utk=2zKBp=H;Tqg8K+Yf_sR39lP(3WebIkU^qh zYA>bfkJk0sm9n?jjpUUujb@B6bi}55gXr~&*tfQL2TJtyq?fWB7In(6r54mMCbIP= zt97QDH~5)?qS$#N(!jz4{Nv~sm$L#vnOEn16w8=&R|rTFCgz);lnJM$Z~LcP>9N|Q zL9&K7yFN5~uJwT(I73Roc>Rc5{qk}{{98r*Iub==#Xbh1!}wv*I@a{-Y3q8^(>rL> z8?veab5A-8?DC8Hvns)S)oJr9KC%$RWtzd-zkGJnlC`x4W<9s{ce)MVueB|Nq;AU- z57-Sv$v2MlcR(5ThJwY;%(w0rlrHGU!@IXCi#1gF8jAflx8h;rru7gx7M;{{s)cv4 z_;ocr^xdQNd9w6<#e_G5srK;ces6ESM;rS?A}Es$;b3Zj5b6w&8-btfgNBR@>uZ$` zQ_C#NS@Vwz6@=r28#(Zq&^;Jz*6$#U7)+7x-ji#e2x}+G8)`)=e&rFyj4^x>-)FWr z=5is?@i7KsL+$J)?|&xmA)PiJ+S(5&J|xsoBT_JeBs?IofJsWqrydHAH8P37s0a%_ z$n%$^k(fs;*`l>q4*K0g#7*wU%k}Q9+04$svSX$Yvx|^EiBZE)rgDg9V9Lp1uH%2m zwXzG+a>(O*%xf{lzhX*!a)|h0TB&jnO=RY}SV|{q=C7puG-ZHvB2*tdok_ik9K-Y0 z74eRK$P#iQ-=e8>!`d7Osxq^V^489=a$?W$wO1cPEyWby&HQV~vDU~TU&*odO8uxE zJ<@a^JOCV_M5ER$Fh-Bh9m+yj=ZLz-WK#VINn_pob3d#^5hBh0!=)KtHC0H8CMy(O zW)KY!BP0hv(d{U342o#!PHY)#Z|*E%5HDbP;rT1u&!MxZ z6{{}wz|S4NDu1jrpsZb1_Tv>S&E>5HB+j9bj?(Uyu)9ySxO-HSAV}NJ?>;Pr=JHK3 zgD7EbdSb*KthrKyt(wJc24c=$n;AZwE;fh!O%{k+s0EnF6{>1zE1yqv9caR6hH9Q| z@{X_SF)t4hZRZ&-FZeD`yXC^wFQ?_C40<#&=2cV=EtRWnRa_Od!4tj|%_nsqWy)Ou z2Yk~%~+HnqBNq=)0sBk9d+qT}FUHHdty?C7@zh*BP_2xL`>V#nds(15eKfvG5{_^I^>GRt?*YpO-dg1xzHl*r6$j+z6fWHmJ zln(;A+0vFh{>kE&YUKRaO+|o?>TeD8o#)&Bo!d6Zn+SWus{ottA3wpM?++F3j$!~O zB@P_E_MO+mxP+k7IahGKcL=KvNV|6u$x_4H{3xU5%M=v^M1^tcjycYD@>wE4y!|z- z+-vNw+MLBd-)=6q0&?DCPX6#+G-p_NPs@XR;S!{DC>&CTNVSBlwSh{#|4f@mb{l@~ zdq-V*PZR4Ry?S%SU`@a4#Gu$P)?I|kD2;Hh!&0iH8Bc$J(xG2|`IA7C>vkq=HU-Y>#A;k7D1P6nyS^>k!Bd&82c( ztaG1bC~d!dHIZNrL6J0Z#lLmpxwQ{HPy4f1?17t!nZcqTh^h69eU2O3G929cY3rni zMJ2QE&o`Y_$6_dR{oP+6v(A`aHU_t@CSY#NG9)$RxNs-+1^#AlQ82_<-piMA>0%=uXDy774Uqm3}sGaf_%0FO7YPjCHF% zeVMIsJL-OPb<;4*tI#8aP=tmn(#A|;d`NE1ca|i+GJjyolSK}A#n(>Gob^I(;iI4` z*=c#e{|d-!{oF%)1=@=<{A^-SyKN}=;)LQ>V)+u1`(A2!oaO4SQTA3+Y7am95_RiY zQ|eON`do+g+fLHtn`NL_#!B<;v8uC&i=nG)X?hOue9`sx+Vx4sfCPv?`+XPT#kgaY zpV50CYjU|>-$mtp!NfaIQ_z3BE|9hWnvH}7I8r8tTh_HyxV9XN!gEls4$^ud{`Z|I{7RCXhc*S z_ptl{;U6nO#^7bMICxr*Wax!K5)tr-aB=8HEdHQLa2hdYefPz($t;!)YW;)ocytUb z4rlEmq>*tXwgG3zY~$!zgyCy!0ZlRsWh$jC{LH4%V^s?I8rRS=%N3d?qY3nQmvkFV zjVn7glPmNLZI0iMD{Na+Yr8xHf0CcHGB)&$Low1`U45>Y20;RWkXP7@X6*^K)o&_I zLAZ_1=l2)ymk*{R5xF=N{2Oc+RtqKdJtuGM0NV!L+8p^!w<|zL{qat8C+FQ>Z}9J) z>J2WZgYA?bwliBEZH^12sy_N4z zFXNleRqzR!vu zN|KPx{=pb`LxnJsc1d|jat`d|>Y67oOY(C3$HgcH!NtJ!G_wlV6J2W+j&=Pk4=5e0 z=<#ucY}b^QG^^36*OJW4S8HuWtEBdlT17JoO0YFYIe{K`THeUR4ei1Uipy@-yZ!=E9fC18pnk}$|0;RUAeg;DMC(kV8BYSF-;Yi}7!>Yn z$h5KuQZnLUBg%00s0h1;TCyV2OWXHOvFB^fJwiZcvN9PJ^=-m4MZ0T1BO9VapGq{7 z`ymvXKhsz`Ym~)c5Kq-l1BV!rxe;tt0P_Xuw?A#wg_;-Y(k`n&d~9C^ZP$55o$X{-6>T z!>GvbE-<&K@O{)@Zy=9L(Qi3V{+L64ofHCc;|0i0e%}Kt;{@d>r+z#ztj8bi8UL7- zCWrZ&LIlHoYd-F0bdjpc#7R4C7lT1~Wpv61%^bcz3ZDprGp>@>AsP-~sHTO#(g`cu z`-pMtnM}WN8tdFSz6oKgBEI2c?;=K>bsy(Fa7(~$I^q9ZH^}@#72wK$B`)t}U%2L{K$X{+pxO=B(Ju77INzXwuOf+2vI%W9)&p2w`1gi-WkDnJI^M_Ed#rfIc5HRV_g8?31A9&Afb@Mf_9 zm5_S(`PN3n)PM)2 zY-L46LUKTNtu$ny0dkoNPDvKPYg@PREhcH=VVEF^8s2c~ZM7HF{+xwFYR^plOGJ8@ z?q~ADAo>zC*l*nqiYSy4Hq{^$$N>Yun2vq^MK=4r;ap#dCN^Zt*=#RaX1??wjoK8& z_I979Up+L|xDS`v{v|i0IO&IYl=-7Vf118?OP1a7(0BRr(%LQ z(`s_}G{-ol6w-+?d|#h5+__?6CEGAf=I(c456eVc*^~HcJ00~*uP$w7>aSu%A=x(= z#jrJPF+v4ok}FlG z{f4x-7)9@(gvM7}NI^fa%ewPa^2TP-mYQd0vPl;?4(## z3DslHoWw|f%bPdP4icHi2} zv!v=mvv<>xs7mK^+?pFdTrZ~2HXE}dLl0>W(0X|vsgYiKZ1ImivU{q^doM3_4ew|n z>&6nXqx|P+Y_ozgQi<#mtt4_ju5;gix6d$kx#nzcZML^MURGmy_(PvBRMb65hQn^Y zy6Z2cX9s7)0LFK)wX*HWiMr0!_{YCOmKZ?7DLA{%-r8+M!+!nlLj;3r__G>STPO*Z zBDVr`-eY?E!BOu~`s?$s(VpwqU+TG|*1HJzxG%xG-1vdjTY3la!3*mo9_Nu%EIse% zAqX6h%S_tXULEq}0Dr+a6*~_4fSC0nUt}1Wjbu0e zBj2P8dHYpAfuLZUya3VjE9nLA*5pF_tVnFku zYl~xcT(-=zliz>5Pbg6+Rgh=Qv?R z(2|9mqfIYz)a9cu*S8D#oI9nywyBi_&ZN!bPk4%h3h8Ui7q5yzl+dibTFh@|$G`Dz zb*#aWrm?ZvgrcIffg|pQM)WaMU`NEFumJB;B*{_88ge9+P$XS&Ivm^U!Amm%Ac zw=+zJ=xHjyaKyE9^vimMECJ58NxXg@jIK)@Z~Cx%II)=A_;@*8T!j#zuFG#B`^_4d zh=;rS&K^vXHEoV8ejwD(cyS;?LH38q>~leUS)NR2JaM&|*dm5p*WRqZ&s=ZfSTBLd z@qV98L#}ceY)fBEzpKbufRbe6XjJ`x9^1l2#uAaF#BBpt7iQ0zU*UcDkjjKe@cD4% zabg|akF)!P0W2d7-QSYJnC_+oJez_<-QtGZ+{;(XwuSPVKR-x4+#M-cLtaawrMMT= z(!rN3kkC+{a7Djnn7w8TR=JQE?8Go)$;cO_k)=2YhD(R>WGG^W(nqHVe0~cuzu0%cLX)TH#~(|U0V<%;zWP1T>SnlL ztz0{a$m)RVEP#VbN}w?CIiD>fhCKtd2|X>yp47#H3IUG7g-)AP3;(CtS2uerHwp@c zfpfIiA_a6}KBTi4gOcA5)t$Q zxJGO;GpJFA&wou*H_&5K8|2ARm7`rKW;s$|v{P{;Q>qn^TVocJkso86kMRgs*q)dc zPLxc>oBQohMqAs^ROc@G+d4reQ0EkzoJXT);5^^PveWnVFIv zjH!B`E7Jn3IHAW%ai*7 zf}+}Bi$g<9l5{YDH}$l^!tKgM#h;tnSw8Rxm9Sp?n#LCTgPu}!I)8%mO%bbsn~I!G zq&s9br7>J91w^7>Gbjzwcxp%0B3IWqgx_{sa5SE%nwqUZX94@}v&7LJb&?gqL=Mq&Ja|4AnFu6ZB1MV@6mAE_Z51JYdH>tCFFrcIZ0Xclgi z&v<%fShvsQK_Q1Tx=YY;w7;2tm$uEnUS1flE)2MX#-e82FvSq8!-_4z(5%AyRy#Z5 z@CzzSQ3Lg*r?TsU={SWk_wkM@@8X094{$?fuGJna%pjNwiD;Xoka@^jhWn*lpB5XvT10(xdwVH6aY(r)8 zEFuqT1ABPdHe%Trvl(|snw4GsJ;I5h(BW43D$zlnW)nDjMbIQy_!mGmqkHqgB8Wc%ZNm*wugJhC2 zYM{jfsw#`L=!%Y-qJsMIBc?rmAT6QR_|S(qi2{Hf0 zlqP9O79G}tK_n4M4YuNnI#_s@H!}+oe->f5rkoT!kWH%=qPw1CI-E=mVZUX5cy^`L zlzZG_xlwjL>L*i8#>@ITv)w}cheTDNlc;Pn(=L!n42{LHK}6#lyJn^cYb3i(EW1&* zn`EqgdWQ&x+NsX0n@%p<>VY^ccMF%-#RDAen)n)(DO5(XU9k#{rGc2hM5FF0wsECX zi8(IxXD(ja3J3~UM{xo(OHT9u12sU(zt05J8FY51aAt4zRP!-;hi?wgD`-c?XmfOF zd$nq>8f&-qS1a*3#`ZODGi}p$O6$QZD*S|RaZ5tg2lz!YkbFd zSEsgozqgHxt`eUY=HmAx=eJrr@mYK)Jo-0q2lr8*|MMwV5ep!BfM`z!OaXFJ4Fgsa z*f7LbE%++)Hief#!|jJS8hgouOPc8B@8g?E~l_nNagi<3|&cmX1b!bKl4Gt>Ev-}#NRscXPC zFBoQ6_qcBGrJoObZJ%go+(E|rkM1$4ryja3qhi>QfNZG|Po03)MEW;B$SMNTwMvn> z8bOy!cevM{y&39|lsc$ecR`an80QnJbGE9l|9ZR1pBWnZynB$xA6?|7pv+vbMXUEHm}Yt9#y;`GM=`&hxyQhhWbGeb5uWV=CpiRT5|$ za-28*(tEUPC;WTyx?#$KumgOjMCNUb|FpvU#%5#=w8#s9`ggvGB~n{_jlymt^!5N3 zVVmKEpE=q@#^_ghI|d!A@{0SGf2F1kqu%d5@E>~4tNR}PeX0k3=kh$O7yi!&Vyshc zwjjOI7qa3%zMbDUHc&n{HbZ<&t%z=dHFJIwAA8bRelaB*VDFK{pR5WTxpivsE5Ee#G9v^8nFa(9Xpn%Wf}A%a^ekYLXHB3xf6g?jlR?m%LXrB! z>9puX4gDHTvU&+?R!&>Fb_Mkn|7<6*qsWpusY-`i5VXJ+6HtuY;cWdjt4Y=;$sH*G|&igm=*s?;=%AGtoH|f5kQ}1>i z`*lmVtb6mG{Tuk|GbLuTP&F;4xXr?fjFV0~1;uldk2N!ta`1$mEc%vO8D#7O3ah=OY00dsMw2pVL;Meoqj+v%Y%$^}9aoeP^497fQ_$ ze4)uLlvRXT1zV_ygBc?@TJ?Y);TGqth=^OxJ%a+5tzX|A`|(N#1SmfI8Ib1WA?DAEpicfkgy%?tfWO~K97xG z%-I{Evxy)$Cylz(vP>C!_%& z00RLP=s*by1cQEnphyD1D*?*T02tDt3stB@1-j6GUX-E_(5OWLy3q@YK$N2dVLYj} zB1xL^l*Te4KWC{zSF$jcLvpF+YT1!n;u5C}+u=^b^p-gN5_ZBAD#iqp3Ed?{n$zr< z|1z7oOlw+`T+R_07KwLFZHDivZSqU7Fa*}2QrHKu=5?b`|_wmaA~J{)0DPIbAdy+swk5VeKj$(`nspemASb z_zFo!svunLN0lKk+$Ck=A=CELt_bz(MdjPk2XKK4^u=p^9{`N}?l+!YKy*dp&FQdCkR5z**}z*MxOnw`b7I$(Nb`1FnkjrQCnKNt`czgtDzoe zlq?q6=(e=2tuK4272U3Cx4jK&FfYMLdpcsA!S&cOL*i7cE~YF`{7iFiCS>SlGrZpY z#1XK!T{ucJ%F{(>mc7f2;YOD<*gal`3uv!eoi~s5vsic|1m z!Iiaheiw`9{K}aRmFTa7`)gSs+<5@42=s!hAz316(CL7yzuXsR4PYc^P5j=*GG#Z&Xx}BWf6VZ4*-0@8YOhV8_Z{g z_thN&wS{8u+t);|)}Zb*$RDSpI-;6LCwjGAXKG@X-!Ik`nRP{K zZPE_QHrEN;HCGZs5~=n2E!7hG(1~7jl*W5qL`XV`uF&+QZwqTmZ~9D-)PfMPuYE^P z+#E`sK(Bv2Z>7P~p3F_vr_KiKY3KUd+0KZzW6SMqx37K!98ibz|CgY6udg`V0gt}_ z9q@to`{4b~vUQ?D+w1nS6WMK=uOQF{G^e-KZ*|A?7VZR9kc1T{_`X6fv_P-wh zjBsq#;lF#1+CR|*#{WbyZqfd?ps)Z6zXGsjqN>>zrTh|buN+SG5{j=53Zd|e?&@v; z4NM&-Yp(#!?&Pe}FhCag4g^C`1O*T97>~NXjoVVN@eXkD|I}jCEJ^av%UQsKzau(mfD-od-FkvIE0tDY96x?7D=;GRds{oO$ z=WuMevP2mY&#GD^vy$;XEKx7_(h@DrveW@E{{hn|fAIYj3cw-~F#!=V`QWh>Gcg4$ z{?Mi+7Lp}QPOb3qfA*;%vQDl7veh(4`pPi+p29v3a(>t_Lre+e-VoPp&cZ^dAmDKB zAkz=^Z~o?szGCt*4{^T&OC}pkG1UP$FAmZyZV(a8_7;laR+11^G77H}2;Fkom`o_6 z62A`YC?l;1^Xn*wawqA`0?qLQGK%41asX*UDsuu3P9YRw((cf2D_>FG@QsF|u|O&i zDg(*o>Zp!Fr-|C47MH3QGlLWiLZ#@9_6J94FVN5@8^(774qgD z8z~v*&O@KDLEluN@YLW8N*#cp2ymP z!T>Ih2DjxuGV=vCQ$$TQu9}s#|8CH7$nebuQeR%mZEUn&v}LW}r3o77a6qhF35Qn5 z6j1ZAD zIiu5z>L4pT>nog!Ro|5nud)Duvah<+m8_CZ3w1p&D$fQEPzNnWD_jGKo4}@3Z&wwbV`l%OU1L`#xvjo zHaXF=6%c?5y3!7^78CgH3QS-Dz7}i`APwMw3s&s_&UOOU!0!OClG-+H+~;Ut4xhjV zU7n@-=5}4!t9~ZPaDpIO|Cm5;Bv0yMP;Xrn`Tz!7sZ}(AMZ_||TQRi__2$Fovp#2V zSvM03(G(5>^C>$rzx?hmA)WN(%~Blil4mVWn#RfyJD?kz}ENnj-q zIrR%WBTiM35==cdUy+0xY2XJIn1SVhYr__8#nv6l_N5}Y3INlf+;(T-4qL#*goagF z^)_tgZM~}1gemA=|76fv&9!KO7j7pA);{=JUukYp%nXr&MtA3N^Ed0X23;#Rgw+fg zE0=IXFIGFyOdqZZc`yf)kg`5kiILDG@2g9f*uD@ABy;m%*``>>87*Z>!_K^DmDbO){)fK_{c!b--aT|1|jp%?w$+Fy>^mh8MTrlw^nDHbzVYnW6DBMcA0d*#)3*&R(zgpimI6 z@lGo(8<+PKw9z(4kK>S%j5~A@k#l@gFHR*A&m6NcRnkKXvoVnbv@q;t-s6r=w3xNH)IeGO8bGVgT71=8>7J<7M(ge+wL6!+?SEOyil9vY|7!qh1n)pPGS9nDx z5E)DTj|neJB_~UtH&kTBxC3E8mTO>^#i5oX_?9PFuWmJ|W3`uKI_=;MZge1s1vei9 zC_v(EO8#bYZh~@Un5ISqitrZ;kuR0D#xt!iM(>uKPi$Lr_%KZKnLU_3jaeDXnTV-6 ztLO7g|LqR~m9wHGvN!3gSQGlM_m7H|)2|g9{|;&otM;G^)35>{eKv%Uk_86UI72nk z`B1A-IUBwZ3Zp4{NWd2pI98z>airlaeG9fRWwtsAVLezHRN14p&4dn%mO;*^xCn(o zJcCCb0%_^a37_(L!#KhqOHVIWRWlkgje} zbRg_DDbSgOIa7Z{B$=1_SAc}HJ?C#tYF6l|gcKLPt@U5h3%^$>aSzhGRk*6_ZJWXR zzcn+Rm-Db9TfytELl+#eLR5Z~Sy%e8aqF7XF4Lh`#6PkRM+I@}aw2R#j9o{(GU>5h z|4*D=nr;kYpbW6qE6X4(z48Ou!=u61#&4WuWwM?PiDn(S5>OTaObi+Pb~d44SRl`WV8*Vl-WG!HN}7vsu^MznlMfsD_#OH zJ)y~Cq7B|4#upXWYdqI=o$qjA*A-O@a-A03!4-7f*pEHOM_LL5FdKhdR#{u(|FUru zyAfEo*Bw2bk%rFcRGez^DUzuCq|`N!>ISXB5e}BUDj8AJ?=Z^~bKRxH%!wM!(|pa9 zI!WBzso`AE_5E_2Rexo0SX1t;PHiHZBIcfza>IJx1%0*fMa0sOzz@TcOjzPKeB9gM zTv^N*)r%z@f@_<=1b)B*`hXLUngmK-Y&8`aCfHW~m<8y)6~rM6EWiZDHW6ysYjfUf z*+AS3i8RNOfZl0+B7x}1I$;=k)Y+;EMZJZ+PK}jLa9aApKC@oyxBeA- zT`Ont>xG@zT^!kQ+@m?VqG=+!4;e8Nyc@OM!#%v|k{k*SeuHHIn5CM^|688ia{yLx zvpC)T{m2}!CtcIwy}IRH=4oEd!IsUr72gZ%t`~mme)(!9zZw4{-~lB2p1uo=*52?^ zzYQ0xZCGyKy22ekSPvbKSk#fOTHOD~XQR4;U>{rnvukO7ZRch+doZyK|PIeUqj@;4!EBD%irt6 z-VbhlQHMbsWI_GQ9{#-(MiF4`b7CXi-lvzb1>#=r0fNpZNK;_UK+=T{n<`Qk3W0P9 zA{`fO0ufq-=7hl#92w$q6XygMU>FRTJY@?GA6p;z01*JQ6$T)u|4QnTiwV!@_~73#u+i0oog0VWumd$DMtXi== z9Dy3e>1|OY97R;ffcNf;6dWivA<@8Wg0+D)c<5_)?_m{;jT}Uw7(#@eAnGFSJ6S?n zv4thyMawxf1eO8^A=Sbic&Dr`_}aayl{2uZyUeTk6ub#utpan0-2&F|+3U-9 zM{P*vo=_?-a?2Y?j0~8FC6vOo|qI?I=4-DzNT{KkvpJ?i|69jdv(^Yp}cH33r z9e7kh$eVdBmL3;sghjl{}C( zl`OfGlS~M##OO|z#(OKIHAWh zXjYbKV~tH)ucYlNT4jYr7OSwqie7B$w=#Py|4gtZ3wyJVKbv`Hqy}59Z(fT2Sn1q$ z+grNgj$5v|6Emd5x5L3>of7_X7q2?D%p0J+iS?yk8kHf z1=50Nf{T=-K7pU1?Fc_g1dJAdceIsl~!QErY3W+3D#q1FQ`df|Rq+=2}RDI1Wy zFn;{ykb~&ARYPzUKcOICQ!G`%1Lvq_Mr^BF;TqSH(c&r1BvLN$Iy6PNqe_RY=C$IR zHAE7ju)!(uht<@dTFQo%U-fTn2CJS=FtZbCmBoxuG0c=HqsD*ZWdzqsR=aL+XJx?W zY2o21WU02H(6SY4#gZJsYICOk^o$H5I?>{yhqtrh<3>@JP%Q%&j$PDTMnjscb>aoN z2@SC-tE#*y{I05J1MGB1dJ?oKz{-PZvX4*eXe_Q>14=M|!;;=5N*W@0B+ zg2aQA7U~sG7}(l`|F?uz))fgiJ-gIV%{hRT;s{7U1R&nZ!3XMsI*sz%F3v8FZ7h_W<_L?Gu>||^0*iW^PY-|CKPfUu&F?*Q-weU9{uPksx zltpP(E##GzHlIoU)RNcfM%|)Y%`g5nbT;8j?9q-Vl}euj^lH$B&LqGRT@@e@bziC_6H5A4 zjlOxJX)I?sN?5Melp{$@fWo286NiCBsS{bBqR9Y$?h?#yr9?tm+7WtU!L!zY)xvbO z1E(SDS%=nT|F)JkuIq}yT`#HEMpJRR@(cE5H(S|c7kk;24Q(Io=sl)T8MT?2ZE^Sl zNZcl(J%>oo3lrJ2R6#9Q&TZQ&KY5fEayOMViWgFU@#_bpWUjEK$WqU7xW*kgngjmZ zEYr^47d?3Gc7@1=Ye-$9pQX1V&NVNIN+Z~LSM)OOU5?|6s2}&<0c0?8l1pW&Evbo~ zeh>WM|NbVz2ma63)+a7#1pv%%zViD7fSNym`F-Lc0`xQhpASVPBsohpZbFTmpMq*U zY0`n4l2)hBbHO@m`)KjbRvD>?>Oiir7BsB&8YF^)VfXsk|8I6K)?Waac9D_?Z0B}& z*L1i*|95pqGu~DWm;it8S3rA5ZtoNTfH!z(v2IL-VulxHeX%jf<9N>{2#@duQS0-kfSFn;03HZH*h z9}s=&;d`x+Hfi8bZes7pV=68NemwvUtRy89d7ZfwOc6C#ii1ZhM4VZM4 zLJ1i}1WHkfT{mp~7k~g5fTrh($X0+qPzMO;2dwZE1f+nI7)%f-ffM+C<91{_M*vL3 z|A#*WPh8P~Jvc&!rvw$X7{t*W_@;PWaz>pdN07&Y33o;>*jt-NW@L0`zNLd|CILJs zP$UL+(B)im<{{AqAu+^wVUUDYu!OKDJ?-%zbQoJ*K!xGcJv*RO+Cy?vA#!yy6Lj+u zLsb(40DiVm6O5vV{>X;Y$0&A)0dG@>a!7rIvJwyQhtT(Zy@!Usr;uidSt6$=0QrX0 z@e+fGV1?K}^A}p|w}?UZBaeu8KqGgiAX$`UPVbQ)SP};8sEr-sf1Ie3W%D#p6^c43 zBGI;WrihC5r)!1?Zap(3IunVoXm=Jkix^moyT>@xczIX38chHQm6l@E^;?9s|3}S2 zMbMae0w-_RLs8XOgVLCJI%pPZ_8RYomV7miLdaEh36A;mfh3q`&0~&8)r5v)0#6u) zeT6RXSbI7!kM(Ge+;J~0K`4ZxCwllNesYGC8HZ_8k>UrDiUN_r=MDz>HsF_YK_inx zk#hmya}n8nTXAw6`6;~@03cZ@K6WxGA`AfZh#RDKfKylT_ctYZe@}rQ#95riS)9z1 zoXleje+87z_M9KllV#J1KUpDZrxa2EZJt0hIktc*b9ENBiu7b8rPUsOr#OSTdNN3s zXcQ-u2Awx3Msrzas?%@5rFk)^Wmt)LpM@Trai6RKmoB&$#fXZB-w4@oL1OOarh$Q_31S7w@%Wh$l$V{GwiuQjQqFULGrDpoa?bthRQ z-Ij{J>40@Ai+MUKYEc`E1ReRNsn7UXs{~+#8aniOjItAg`PrVW6^%LQmKp0&!eyx! zrH40XvVVwkHfLEgk(xYLhGS@~dw8nnrK*J4t`|xo($QT;1q2*gtEY9Vyox8==O=uk zqrOU+jbf3@mymntD9H!2BvFPatB(V)vf9Ttxf&GJiX=jz|D6aa0P9+tboX}ON`E#M zq%K2iJ@!lMnu&vYuW>t0^9rwW8)x{coO!#Pdt01y8h@}xq-~Y9kO+5pI!St(8vFCE z%(!KVw-JkROLj@IA$zD`27_(6ZyFn^9m}55XqKJps94e{YDl`Ii+!;vh9bIsuo;`* zM;*x*tyIz{-&a2odU2@gF0Fc)IqN*$(LFs&s~);9O%R2TCavGMhtc{dREwgXsUG>U zhQr#BB*A>uz=J0WAB&$isFs7LtEq_+qw5kW z5s=Df!_XVO0-2)=i7cx7hf=G$;xMzftEz*kyN+S1I_tYg=Ci_UtB$D?Ki9pbYsFWL z#bBtc$Lhsj{Kcp1vX+IqnzgwMfRc=OcO?UL4J38D8NYeuw$sGFb-cgR*^_mv$94?2 zB}^Sj;sbz?LE}nSLh`qNGht0hfdPAMMJB%UY=7Ds zI5oDw1XK#gBz0^nwqpCX&WXSFipM?~&vxsjeLMs)R=$KV$m8r|4>+(+xs;8p1&=J1 zDS#Xn%*oaXPSQDqeB8-mi2|_#Q6LMc`l-qz>!@oPvYfZk);N$s$9*Jmq9Z!Ybc347 z$IB@ln{Fe!Nh`CC(Njpwg!N$r$gIqgR29D)#m^kgo|&>HYeQJVvf(Ab=V82AqSQ-G z)LFvK%e7o8kT%s|6trp8SM3x%Qeg%w|1$zTfz5<|>B z)#B{WZwl9Ks|f^c&>^J|3O%t7z0h|IfCwra9dSsyGT0tnpBTN$Lzl`NZP*!Ic_BS$ zxx9Qb8g#+>5-E+VFM6Y8{D*mR%|@IA-9^*W6EHZo{$uoeNpQunJKlPE!sNONu+Jl zroAPo{ksw1%&yJalLXrsfMg~?+rO>ixZSSoI5jJd+dAAf-TciYSls80+&FH2g3#P? ztU=KIeolbZ)Q#0-josOeLfoz0WzFQ>$d}oZDxN@KNKq7W&*qZ;(s76< z%4@a8ihK$Vktx>#Ysf>Y3sh2@(o&wigAVDmd*R-Z#H@O=B$m@pOlTqAyIw)OjyY&3 z4&$eu24B+R?TYF$PH4)<|1`YYVSHH-Vs+K7Ege+`Z>z&z3Sf$KT6G3S&fzKKf@K<9S`d-{w2YEXSDvHI_}L4JLev&i*d7eOv!!l5JX+OTc$7@zUB-P^35uUu;QhmZJ^-1B{9f?%dT~DbjS^N5152qA;Sb>^pDzi+|e&d=#Zrs_^wXdgb(!bFaJOvsmAvBS`qbqh@7e460RYT*Yct>sGE^ zV+ax_E|Ap&We=o*$Hzf}0s`E=1)@$}B|w4d?xBO{&tATR{QgyBD6n9{eGLO1T==dY zyn1uyWxQB1V3q%n|B(X$yLPRk|6d}o~MHfGU@kNNZ`s6{Kf=a3-nQEd5 zr!#m05=SA2G)k!qk%FlynxG;=4j;UNGD<1G3M&t?%t8x})L>H}uDK3NM6$_dA~Ufu z%|tUz%GMOqu*w>X4$i@hZ0Ux~Ohb*f&yL$OHU)Oe4NyOIBP~!v3FS?l1{Ym)1xMct z&pAmaHKRTEF4c~P^U519J(18OZ@s46gD(i`(6cWM81mb%zyAOfkU%XR7%)OcTfjBf z{~>m@Q70sl1UA@FcO|x13x`zBL}iy%wi;)jg%;YerhKv5tR$kb*COfE%f}tp2y$C- zi5%Bg49^Aj6ehEH(yAG$g?Fn!s=TtSEJbUpH0?HI^UOHk?DsM?|BY^7Fv~3S-zxAZ z2~5aRux&OF_>>?|KoR9~HjFQRt>e~uV^p}1fjbU6<>;c6(hFCnF4IlJ>y%SacMx^D z>q4~UhI$7bF8yLLQ=^7(J6yA_@QSUu z2Aga929tvjv&%+1Z71V}cg8l1X!lx3nx>Npm%?CVraAU52wZaiE;sPHk9;bs|BBU! z`(3uz38IqSLs<8&78Rl&IP69D=`Z`tc4(m4pL& z=8p#d?5#p*qCX_ReDlvg-xjUqW8Zk=IrIz2_K%&K{^{$V9|!#Nzi@187YE?ACe{EQ zG%k8qbDGn5#J6t=&VdgcTm%c}wb+f}DvHwzc2ck^+;uQw9%M^w;y|ye0F818@K^>g zlsP%IsSyZsQ|L_R!qMHthG;6?4HK3ZZh1}yvhz%6VwXE27V&n3+MVx`|7INE2`^=B z7#{z!gS_iaAUymb&!=W~z^UPk1*mG3etfp7SIK}tL&HFVPO(N?w9$=kgkv1vxIQV? z@qNX6Ot^mKK0fXdYW{;#*(%k9szGrKgLGUXr4%t5w5w7=GnUk-wm=5@&0G=8(Itff zK}pVHIkkx=)T|2_uPj|UwUAiH9P zON~GS$&_Y94bX%MD3T+l^k8gK07)6#j|LuvNsnUUqkWdNlbb}TCQm9*lP2Ml#!6%o zbhAoT&J;H^#XzRoFo6J=P?pn_3lKyJ%)<~9mn|HoQH2?%qb_w$#mu1>T$cdU$VP~) zt7=whInCL1^(_FH@Qmz@GRSv*CjLRCGF!Y4jhDnaQS1WvI^}3w*_DUlt^!pJ&|L>R z+K@(KZAjW&a>A@4g^H@0kHY_`c8YTN1m+_Y<#m>P{lIXpZja@gOT#E z?rzY*237227rKGZ%5FEh-EAAZ(%IOFV1Ov#C~2!(lhU3RNv_Q#YhyfP86yd{l9WIl z7>7HY*3`!!_HohF@mo~}H<~#e@@R@1isdrd$;VA9a{+Tq;$5bxNUb4e!o{YXY*$>! z(Wqke=$@%E^T=CmW_i!MW?d;qw2xA-1~vLn4x6~X|7JNOu6PDB`s`O$y3$PH&?n&T z!Sb?z&FF$FtYQg6*3q(Mbfc$h*%^5E!Y{^LS~eRu4rBJfLH=~8EoeZ{09se(jBODt zB+`+(7}Kh)acg5OtO-ttCqz5iZ*#qCUH978zt%NWd|hV}gjTpo7Jv&N(PU{O8FIv%arZc$9yi5X;7@}%kGn-rBW_6#~ z#ONmWeD#w#HV!0?eE!e&3KYt2o93DTmSeva9-e%?&MjevGo<4z?r~S#;+8hI#3jyN zJXWXPiq7t(4~lSSwZh~k?>B{1uJTiy+~o>~|9Klxt!mU#Ql+!Td8`FJXlC);=RZGD z&?^^S)(p1*B7h&7q-^cxu9$|vkXpU$Y~UU5^UizV z_vU)n^}WGl0lkeVQ&lC@6(2Ac-0mgwFVM-S7JPkn;u!~g;Qy}i?|%128Mp6s*L&n8KY6-KvgQrEdd@YU`KMi>-zTF6FhoE4gqJ?%r)T4bQ14cWNz>>YNIG*( zfeKMHJD38~rJZ!ttzI&Hm`^tyw)v&y2;#AG?nQn|%MEAOH9v^??zpA0T1^U(op%Ef zs@n0r_5~|(@^qz`W(fY>v68IAVaKk8{}Q}@!nZ%tf;YTC3A*m{gIwgCf10hzf2#rQ zf91ovJWpT*lGwZgEWlf0DoY|jTC*opb3g}#itOVILqoSfc|CrSy(fb@vdKVZ3k==c zGU&Q8++(^8(VG~;nv{|@Ny&m1R6gdTzhx?kt(!ivvOep>KIa=h@jEvjq``2z!LmX- z?!%KE96}#NLLXGT5ool*gF*I#!YHgTj_WwEo51+HKdj@tdb+zJ{J11E!Z5@_RpUZ2 z6Tkt)n=@QM1C%v3)Ix8&xHu%N9~46`!KN!Lv|RZqTpJ%`gFWDzw&*$p8`40>yOCY0 zGAtXWGl-utgTILDp(h%WM|nPW|7#1g%BsX0yCC$P93(bHilRuI2N^ zV3fmAOgAqfJ(8owX%xK(Y>hrx1nH_a9dfp3v%MPQr7K&(Y@4)}az7+&ALDzIPSQF{ zbPG%by8_rmPP{(wd&PXLJ5~h6!h6Jj9IXlruht_jfb2(8)WWo@KZMM}18m5U6GlT+ zpdCTO9-)afRK_-BEyfeMW*oyf)4^!WyYKT5TfhL-@Bk|7Mne2X+nW+D zDuhGt^1^0R!7p&f9o!Pn|C&L6teASd!Qt>kh5W>jB*iWi%BDO>gT#+!vYk_W%D@{O zfn3GQI>KkvMVQRREqutZ1SrRApqk6bV?0a8`$dK%Jfzg91reG4!vMM5!(7|Lqbr0B z#|luh{@Xv`ILf8u zOnv-FQ#?$n1juLFn55Lla~UMZq(yeqO12bBiR{YQe9g}LN~b%mvP{d|TrG%1%yjfe za-+gLID!~FGlxS--UG+vEI~xf48TOoeUhQ0icTKFAAS@=5Zt|*sfEVaJyB@M*TZyZeU1Rm$~%D@~Gs1i}>e8-$r zM~FNUu2e$EEELJy0|wF`Wb!_=LwUQ9w25CK)A~8okR}q%#~<(yF}D9X&{;{BWQOJ&T`&#cO$kQ6SJ zg;>?VKhU2_#8g$B9&O`GG@aGSSV<}SPfi-nP;!ZxG)Ki`#}ci?t!zv-9hU79(}rt2 zmFSHR_zhm0%krdALp@X-Rn$jxOGeGN?z~lTox(AbP=ee(1(nTqP1Pq2Rd`LqC@s(r zMb)%aQQuV3f^h{{poM>x)qZ8Nmjo%p+fwCRx^0t_4tz2?sJQ2J#3T_V6V%HpqeLN1 zjkI&cjaALWJ5ty%%zt`<-XKY!ODAvT*p#(Wa5dL(UDrA-xH~<+OQl&I?AC}3Pnd<5z(VSb+7{fJN9Sd)h!`STsF@4u#lq=}>~g-U3Zrin$`|G&mPr2n8i}*iM}8G*mAwXyUkag zbyvXcS=ZW*GptC%4O&%QQhP^rL|nkP1tFB+6^rEpr_=^i+=bF34FlY#a%k3TgXjMudQ2`1-ax)MRJW+ zOzli&MA4xQ+~s9n<-OWrL0kew!+RCn6kS=~g+DaV#=-c6%2nFS#oVd&RjmySUj5uO z|K*&Recmq%B|7j%@%76t^IeU_URxzoN9hc_buZdo!uj1@QKVl)T~z1|xe+Jr@i8MiK^Pf$ht!HRCQde}Z_|0U)@_UB;! zX@SPdu0!BTwbX+~=tMqX-)v}6o#uyr=;)nf45sKy*2Qk7N+9;9;B1VA2xaq4=Th$I z&P`<>n$wPoNlq%l7@jiO4MFPq=k&Z9J+)~Ca8VkngPo4)pdM@6L}{a~?8dI>$$sIfCP0Zc#;SJg z^SL5u^#KsrtUKq-!5~*nO@^xJ;As2NudtptSVR?|JLTanQh!wZ=YW8 zy-u}Nj>>zkYlQC6$7Sf?Uhd)6@2_m4$d+v7cE}~2TW79kCRX25`xfpOhichk zAWC*QYzA=RzDbO}@PQ_6MzjkMb~b7&D$rK&>Mj6xK#0F*>vCa8r0^AX>5XM^r~YkQ zTv2khH1saZ^fP~RG)G?Kwxl&Tt@X`sREKkmmh&Q}bE`!O+_UZ(CSNPVi(Vq@ zy(sc8U1tan%0gdqiyiROUM#<*>A=piT29}=H79t-{ z9cFL#y{2^EzH%ZZ?4&Sg-lldjUusV0#l?2&h?jU!*Y*g8v^6mGQvdI8-}X4a;TXd8a(Or8c&Jwtr?*FD%Xb)(f_IaT`(w{f+ zINoi7N91bP&xUt+Y|r+lk8v5#_;0syaToQfhc(ysWmA8A9%aJ`%ImVgSU>K1oMUGdgr!prg!>Jw|Z9A zpgAyHSEER(?)WyR`pCcP!FTodWNka|`X4`V(n;`LA6Q*C`;tFO^;3L;s%gT9RBek$q#H{Svoym8i7U zTz?g3c((`cg*N=cfBbQ;D>W~EfLOw$3c)7^qaZve(8xk04HYJYB5~qFh!rhH%!u*g zz#~CCg8UeAh7BA-9C9Sd;>Ai8E?cIEc|ruunjt{o%-LaQ1`Rwt01YamWRHP#{s7Vw zM`_YfrcgnRYRS-^qin2R#mbbbqOMb=N=5qVDcQ1M&7z%(l#a)&SGN>&ancKzE@#~0 zg*y`F%oQkX{#7YhFP6AO2BU0PqOVX23?N((;fAZ%%6j@iZjf1mX3h~jZ~p8!bOq6+ zPlFc9urS@C98f1P5PSAx(zO-brfphhV7V{-4*v~~c(_TT#dRRxq1?FZ=7*h&d@gzbECu2eNU3B1rqd7L&A(llLA!fOCwp)_6DVbViB(%gDYeNC4 zo0YiPCR+hk{&--61b#_ea>tQ3rkUBHnWlhhve~A7Z;HnwoN?YT+SdiA0ZQFsyzM3=sY@NXWRt=o$)tfi>{wS0j1{XTmRCOO zth10ddLylg3a6-{WV*TLp@r`EZMfr-OQ^YlZaYMI@8Owedab0_9=h{(R4={Y=7(>$ z*8;U6>zlOFM7Pzh zUVA#7>%CLwTkg3+FNgKd*M{7+&;LCu2vO`>)q_>L~R@0aqKD1DU1VDIqHp+p=i^wm1btHHDV5&YN$J+Sgof zU9{+2ULU$2N;%w!d@7)V=kz$Ywdw3M| zrLQ!=0bk{iqm!MB1vz4!eoU>)Pl;-Z*nLlcpa+IWurz~TcN~|rUmFziS z0^=nAf)X(Jw+x6Nm!?JRYSVfM^t%pa%UUN#QxqpVBj^Vmu|Bc)CS> z%BwwmQs6!7$+V}cX`r>dA z%0!wdCz;&qO>cuxg`&}&MkQk?U6(GP3e~AZolj4>7>RxMZK-QW zDi{NrSg4A!s{d3CtY+mJwvZ;OAQ#;w+)&sE%b0eoWG!nRx!P4|j?_09%%;c=wY$6q zPl!i7UOyCxtehZ;rb?`7Up)s}z;^bXh&AkGm#f&tVm2a3-C|^A7uV+=mWqPaoldvw z-Gly>lXvy%h)$@|hpyIyqb(v4ky)v=qKlE6d@DRBSkfcnv?z2bAE$Z?CD8CyxB(z; z=RUj6?n+g;a4oEJ8;sqdLU^#dg&lgVOWh0m_Q4u{=Vm+n-JRZY18$|3aRHO97O6Kv zadcbr(u-d8B6A{mG%GY`tdU&`j$nJM?ZPfN_hF1b9TUqE@R=;2VABb)F;WK-d#3Noa;ZV3X7!q?VBz$pR zV(h0nd$e$^U9Vc%8s6k4riM*&q}f(R;3A*%&`T8YpUccy2{V|vk%n?}sf7%eTs(N86$LZau9!8gYpoU-(;2=2-1Awfq-Hh) zI?(u?WQqx4>?0}K$VXPTI&-SzU_-jGlBRT}Ee&Q&^KHtU_UD#e`Dy4nnarX#V^pC{ z!*Qz`s|L>MAzj-xS=&0^@jmUZex+;g^q596_G*Lzx5HGMc$U{vWa5~#h2sqg*~wP+ zfd7*{=tdjT)X@$%wX02oi(7odwJWs}Tq=?2UKoMi#&D=Bt}D$>JF`t*aDC1FVRg@T z%_Y9Ax!2|@iMfJYjyWJR2;q-QG{bFL+?lW2h8{LL#U~wQ zjc2^6Rc2l~mQ1&g z&)xT!Itc_)P&#HB?sTRrI`E8MJL8u;c4_w%@>w_ekiB%}DPQ%lpPkfc zS9_X?&i8h^+PqYmd)<4Psn}|Vc$H%>jWbHaU(b7vAu~Uv%4@ZU1J` zf1d1~w|3_(Pxfs;);wOP#ALyKZO}VE`BWFZ@lOxh@v1&zVjcf^?R@Fk*B-W91AOiU zt|%P`nNkT0{_u$(b<#WD>_6AOFOWkn-_h07_hPbsyV-pqPk3>LK9;nwQ?GUj}lZ6SB{bN#Fl^+wnltcKL7uun|UCzIW%?mjW7kXbFg5masVHsi^1HPf< zwIOBYpbqXJ8luU{`JfzbM;)T#<3->k7UAePPZU~TASw)=Dbg_&A}u~)jzOIJu^LQW z;Ul8eMoHo>R$>rxoH3SS|M{OLZsHuKq2^tI0d}JFDc}z>BM_z{9!irJo|Iq}Mroa5 zV4z?UVqh)u+AYG?A$rXoW+VK~3M6jS(siL8YU2ZDqcY;-CC=a`dKUxf6ZJ)-Tuh@g z5@bFm<32`_Jt7z@wqho}oy5Q%fTYzD?przvBA~FBwJhE`6&4i&-2an|BU-J@MlxhE zDx^LF;4&KI8CoM5W?2EQ;XoFoER7l_mfJld;~josNjBuzKx8ONV@OiuVOS(0X<$XX zh~G8Znskjw@SRW=$w=Pc5bk6>T4D@d<4Pi=r^Vq0s$o!SSxk-@-eu(s_T%ZCq*c)) zPl5~uTFQIP5ERPdi#;O_d0^h%Sc`B^2u510ARRQ<)tBMvb~iW;1T(aB9~=(&k#aBmW5MmMfCuicI8bZd3XJ zqVx1y8s(pvcqF9IR%A7gR1za{d8Y2&W@F+eOQI!Yrb#Qgp?SbZKLw<4n&)PwhhFHDR;YdIRD>Smglg4mRUpJnh#a|I zj1q~f)FzJlSaPby1wJT{25FiOsd!qbk%}mId?JRa=l^=Z zTau;{2BVh3XrZdspdQ8H*l3Qn9C|WngLWqiQt6_GX`Zrao8IY$O$woC=na}gaM~%9 z)~TiHX^HabUj|&heNz?gC!jW`plXVjx)?5UpnohXQ_>RBbtCi4-)oknnyx9P;%TVv zV>NXuMrB?vit4g1BCtB^uUhHut)cFGBRL`}tU@QBLE(Y^otUzwt_Fz~qQQ@TXQ`5^ zlL{-d-e}T*>YXI(s5+*)I^?-F9*N#3g0^LJi7VMOg{NFAw$5U7N@uO&YFB!z{gj{y zR@RxKX}wmZxdv;x%4`hHj|lRo_fR=DjmTtPa&s> za^{}MGOD_;DqOX!Qjo~Jo!5WDS@cj(c^M^Y7G=RIqIPtxsX8pgf^A|-Y|)-4>OCQi zQRc>`EuXw6$5x|Pl4ZtS>eA-qy_V~BCIr9YQQ%IkScFjPok*gd-$hy{+hJMaE-vGK zZP=pg&PFbJmMz@vl>T zmQE-1QVjL3O;WIH%ck#hJgPu8RLv@`_~I_pLav?WEBX$sMzwE+scrniulyD#{njsN z`fC2}sN*)R>5?!!_AR#VEdYB+RoHD^3R&UO300yPX@allGH~NI@UT8G&GoMDR&eEt zW$}uc5NEIkt7-=?um{_3Pbw>`@vjM|ZZL`h>$a}Gs;1Q{>jBpUcXp5h+wcwJunzBV zgGsA7PHsc}Fc4cN28*!~8!fkfvHud|?-KXQ*6ddaKX2Lm?JseuL}5sMx^NRBO85S0 z7t?VPk8ch?Zuwdr`#i5211}`Av6~Wce4bJG$}vfTaI=~0YS`EetEh{v4ZKk?QV;}k zNpM8^+WRcdu4-WoV{+{#va^cqA~zSKI`Si@@dXz#BfD`yBC+z4X?||9?@^T}s{yS- zaagh5C{wZ8v=EqXAN;lQ7W=Flc`+h`G2AM08wXWr^{@|HaK>6NFT-pl`*JV8?35*= zK?(EyvT`xYEW05o0aLINJ}oIb^AvOGtz{rDf^Hy7L^f+P@)~qE+wyYRS2)`u)Qt0; z1+V2!G9~}ALVxQ!V|2oT@c&uXT%dw7tp4qb^0T3)Y>WM??G+?J$MP(P@wj>O*dA=( zF?7l_>Dj)oKu>f5IHaa-e=-u9AGGj&t<8?7GY zL`bzyQ*}$%KIfr#l`}I{<$3@>PP;2d3e{ejXs^qfuCmW)> z73Iyv^-|OI@+2ix$4Or6^7hxCnW$yH1gSBio>eOX2gia+|&$C)z zHc@AGYBr8s&-G_FlmA_d4{1+zK$|woy>xSTrc9^jOS*Qa&30^~vvqe^9Gmt1O}0kY z@c{Pr9s4#65A5oBHgO-STKzVRAa`lA^m{w^Vt?VhqKQp=nN16=mx(oY^LH)xw02AO zYtD0!ada|=_Ze@tW?Lj@pSOU{X!RId3z@VYw|8=H^J&v}hSN5;YPcD*uhRsF=6&6D zS9gDlsymnUAwxE0d$*Z&^noL|Z})GJ40lr}wS&7@e@W`-UO0xMHrW+a%GtMNy6=c9 zwu$dIKxa2@r+BJ)_kee@Xkqphi{FFqG2k}1tfF_l#UsR2xP|w)kNtUz5jqpbvO{0X9+Ixr&W+KlAyX*SMcsd8kA9gm2-b5BhTRb)mnla#Kg5 zJIE(8I%3QEqi;A%k~Nd7c&=Bvv2+;rnt_3ruqbc(u=BB}cRHwdxrF~KECcwddnBR5 z^m8A2S67%sr@5LpdYiL3P2Res>$+}pa+LxduwQmW5xcRY`>`YYNt61QyLVNmy13W6 zJ54Gdwkv)c_I#4~w#RpD@9Av^__w9I z_WSV$sSOTok)!RtYx~FVZlvS7$s0Y3(|I-TwrYdPTL1df(@eh-H&YmQ54RU^i}uXt zy2MXBw5O|MM|oj?_4J)N$OpaIzbVtNvyomQatpB{w&->Y{oK9|c+s`@DgL~ZH>fDd_-CwWOYxUmWCEvTf-*-L5 zCq9Wvqu6`A&*!`1hknRwCF8UGFn4}n4jbIJGLi&z)GN5{>;2}#V&-41=JS5d^}VUn zE&r>7e&8E_#ZSAdzq;6uJ=&xG^LO}vJO1kryzEQ<q^uIrqad^H7Jo&pm0HFlMy4F^WbgA3=tc$gv{Hl2!U$;IOiThn65hymTp(<_QrnZ9AeL= zEekeg+n`6MRu#L3Q6#%}?_Mbq2pFTv;vW&YnT@xV)KkXw#W7pH{sZwT{uBU%RF;dq-!bYj5+MnSnRU-o7_z5Uw&f z@!!TNR2YtYxr7MMoik`K9lG@D)*)oaoh{m7FS;fpjm z|Gj&JWX|MYh(BV=n!9!TFTl6HgA2U44opz41sQD6EVgKSP{Ii*gv~($3q){;y#m84 zuMIH+jIa?I`KvX>Vk<2*)>Le9#S~p+F~%8Rl+nf$Zxl~9+`^MespgmiQb^^7OpeGR zkBm;c?UrPcIw!Tmu*bCaD~~<&6ta)LE%j5+%P;8&Q_L~-D07c9%}i6xHUIq+aKJa= zR8US0o1)XlDKD%rLJ0Njlfn$$3Utm5=jhNw!$3r=P!c`V3$jOXq;bbcA(a%yNin@N z(;O|;kyA};{M64pfkU#mQ%yZC)$5*Y($!a?T<%T;$*#bhA`x0Ug0w1F^M})NQ%Nlh09koD#rlv+YW#zYhH{ED;&C z%S03@-85cM;T4tMd+o*7(|q~W_uFk%oi<2mjZAXYf?IU})>vT`bk?hGZLiigDBkr~ zFfBe7l8rg;SYu_MT{Glp10L7mI8h$-+K0RK_FtF7eHq-7r5qI9bpK0Cw68@yB&^WG zI`wznpyjm?T%wB>4e6zmHoEDVX%@KTfu}y0YK5_0nBlBh>UQER)|0qbjNQ1{?2A86 zTWw)OZaY1aQKdHJn#Ui( zI_0j~YJ;yreTDPp%QILfltljaNthuEXoS&)esyr2fV6GaYg5On=xiup)~ z#j&Z6iz58h2}_v5XRYvHH+-S(Vu(hT(U6TiGUF0$M?fEPMR@JX*$;`BL?jlGcYDmE z9`UutY)x=}Pc$SHGl)ngS@C~atW`%G*+nnj$5~;tq!>qLNI1&wjA``bCuJDHH*)ee za#Yufau`7CCGd}aWM%bS8B0)svWfCT*dcA{xhXEPlmAXZ-!JPk$w^|;ZN=0B84GE{ zUM`cCp>$@0l4(Z09PgANGUXg!DNAj(5|G>kCI99mHC*PBoa5BxIj5yYs{r$nTC@+@ z$P`av&QnduT-7LNmrq7E^O^n3iZay+l~h)fhdm@{Hn-WwASx7=-At%M8QR7I5we`- zOr#=rDMcwNQ#N7qs5>RnERhn^gkU75JxPgAeKHeP;_#Yx+~Gj6xj=+M zG@^Xu=|dM9)SvdWWfZNYMV)xjjB0dzDwWI!sn7(eCIO`SL19TfhSjWIm8;1@sW<`J z(twWjrIYjLK6_cmoEndscuC$WcN$lp4t1_TjsGP-qXO27?iHyR)g?=n_tAESkbPR+ z>S0ZK$*)G!ser|6SeQ}8^=w@m+F5V<5v`Qtt5W-lT68jX zqZ|z^JFEJ|_aQd1C|vB2!YWqX!W6Q6n=JjVklDAYwSaJ?t58QPTDcw!WJp6(*JbqxM(X-N!W9s3WreG9H-5fwc4 z_HelNb&!ZhTwf0(S;F>%u2KJc;?;VNW>YdSY(pwz!_xSwHXiRx9h~DGPZ*$jUCNN_ z668d^x5G>htq?)%W+v+NN*oo_kaDYMvyE`Bi+Y24GW@_xbg=MO)I>z)gCJB}03~ExFTE)~M<+@@U=wQM#p2CDrJ^wpv z9M`(jb;dQM(VSsl|N3W}CH8!SJ^yTV8{6AgDRGpQz3fuYdusyL%%1^0ZS8K6R~k_W zu4JukZVx=np%ggm2=(h;hkLEBJ$I7R{cwm+oVQL%HoWD%)N?YMyj5j&zOT%0?27zW z0Y7-R3C?aiq53F~*7dFzj#E|Acij?4cg-mt@$$x-=RLu3wfbx-ATkxy&1mT$|H8=Q`hdu~Qx>DEs{9W!E^pcL|*JBHf*XBl*%t zzMiHRe7Cl4W{-(3X`Qi^+-mmv-vJ+ZoNN8&WWG4r&7N^)^Kz7zj`qhLuJ5dY+RU~c^=P28T(^c*k#n9unl5P|Ftnx?P%#K->fPy4iw`@U}k1Ca7`?>-99 z_YP16oo?*TOz*&A-1=_sE>Hq1aQ-OJ0(CG4d9dB|ssj)2{$vjXLom#4@Bd2B+9)pt zX-)H-umxR^0UMA8+5c~cNN)#w5DT?13u}-IeXy5;kov@M&xmmUPL1x45D7uf1pBR4 zn2_$CFv3<)0o~-~it9lb4%luG2lucGy^s&LFuy)&3}delH_i;PPyf>Cen_wpsf}#d z?hV0g4$F_=i0=hq&|D(p^B^z~{qPgzZvt6E4$V&kIZy}ELVKEJ}F`a=iqd_ryawf~6GA;8m*%B(>As#q$Gd=S&JM$c%G9Ul| zA^8LaG64PnEC2ui0Dl5$0f!F+1QQe#85$ZJ9UmPhY91?i9|Z&-A0HtV6ColXAtWRv zB_t*%CMYT>Dl981F?=j7D=aQ8EigkbFfK1KFflVRGcj>9G%+(aG&K$gH8nIgGcYzc zH8?mnI5{^sIXO8yI6FHzJ3Kl&Jv%)-J3T%-K0iD@KR!M{K0iT0Kr1FdcX~lVKS4%A zLP9=5LqS7BKSM)7LqkGCe11ejJw!)GMMOeHMM6bJL`FqHMn*wKMny(PM@L3PM@U9U zNJU6VMo3IiNl8aZgM~^-NJ>jdODsf7NkmIaNK8vgOifHpOh-*kN=;5sPEJctM?g?d zOi)lxP*PJ+h>B5BPf|@rQdL({c6U=#P*YS=Q&(G4jE__)B~(^ZRVy)7R#a74TUS+2 zS6W+GS5#P&m03VKSzB9LT3B3{nq5mnU0YUNUSM5dV_scaUYVR%rl+T;sHmu^s;R1~s;a80tE;Q5 ztgEc8tgNlAuC9A=udc4JudlGMv9V=Tv9YnTv9hzWvb3|bwY0RhwYIjkx3{*qxVO2v zxw^W#ySlo)ymV^3yS=`>zreo0!N0-6!NbFHW5mM5#b;8+#Kyaa z$;-*g%gM{k%FNBn&Ckuw(9X}%(9zP+(bUq@*3{M4)z;b9*W20I;NRct>+AXX`2PO> z00000000R80Q(6fM$DhVVZ{Q)kbgK!XbPIkc#=qezn~U23!`(Wg$MN`?B6 zpjCuevuYK_b!%6zU&D&^1+47YvuMq#Vof zXd{g`s%XfLJnGm-AV9{^gcy17F$Wt(8kyvhW*}+D8&F0Wr4~(AxyF@FVyUGRL~d#2 zlT?Pu<&ajCDW;ido@u5JW2Wh5n?o+PkB75JX6K!S7;@x%3O7qVyfwN&UMPEr{{#)ps1rdSZaExzQ^CHs1#E{Iu1M#IBqpmYhdWgFvFBaEOEl$RBZ9ZnSKhY#-wI<>&FlB+MlZ=zdG`)CX0-6$hfNP z^2;qB$XT$iP*`Wpun@a!h{*0-VkSVg_$;(DT5I&R*b1_3x9X0orPJb?TW*@}Rt?6~ z@Lo+XzFW7z!@XROsV~3%{#zMZ1XpF$!4xZe=-X^FD)-zELmcpIp_Tiqt?S%8GO(C7DdW z-ulzA3!v=URlD06kW9LZmk_{aJKnvbcfR`_&9KrZKMgN4bZcAmlJ~p}da#4WgPsPb zM?&je&toXeo(dPYy~Me&dyV^^_+~i17?ST}p23{WcBna>t)hMKTi^Tt>^Hj7-7kMg zL>>SB$HV~&(1`{lU;z=hwXQLcYk6=W1SJT(fL)Lv6~tikM#w=CGH;A#Bx49SSjG~X z@PsLhqa0WGvKF%Ng)o$14a>JjJ@PSE=<8vLd$u}G^8Rm=}8N=&Xl?`o(}V>OUL?3na(k%_4KDrZ_3uT-czTu^rb+3`a_@| zR9Qn+s1cia)E<4|udQ3EQwK}Q*=e(?hz(~quZq>IYIUpXRGwJBD%q}*b+RyxY2ea| z*0b8Pro413Tsu2j_IzfpbY)*q5qivp61A_6oTy-7N7$$T9yYNYRczQ8>q*Bx_P1(7 z=}Rg5Qp;X;xX4Z3W|zxZnv!;{p%rauLO(;CGu9Z zT(!0BZ9S^nO)h4Ao$Re|feTsTViumpJ+6LLYF6j^7rN37aCHYv-RlMtMcZ91i5hB1 z*S;2i;mX32%nQKTrnj)`wOxBR%-hD!x1?f88&~an-&TtCohB~vahcoS=l&PK(-m-m z->TE!`Yu@oE#!9{>|Gu|n2`_lD`0)}BnqGR!WhmlZkLS71a{cNpp0^TN!(K6qFBXP zCTojd>{%Mi*vkY4Gj(g6n8)ClyWFKwcYFNfqpA-7sjRawl9Q}na3WSoOm4DNpA2Ow zgP6X-rE+phnB^3cu+W*^vWvUSi`mz_ZmWT9EQ%c8wXW@> zR)g8x<|KAH3UPoaoIxGxQP=s@+cdTAIIwC~`}x&hRED?(jp%Y)8P{Y~bg+BvXh;Lw z-Mq@iq>oKdde1M@nAS9>choc@Pn+7-4!DwYi*2y+Km-wBK)1d9ZE#OHtW`cYOVLen z{_6VI?2aysG2U*DYn(j8?hAZ-w%tqLHQ$;4&R==`{ckzDoYdL0SHZPy@On|80S>6Z z!WrIhhs&DWw?1^OC0_AFU)@-8v<&O(d_+-0|{~!3Pqa@E7d)z4eZ-e9@lvbc&Jfa2LAU-%k4d${p!*fA_{e zzWPY#k{e*T+}=A>q+`p--o_IA5%ujl{T;sLfmJ-$v;Fv*kAU(RNC7y{&iPgg6wr~K zxWz?Z*>g|7^hQ^A>hXT}?%SRFDuccMgoZg#g6821@8j9+b-p6|2w;o`jT0W!% zAn*o16(Z|rd+kScM$~WeH-Cmlf8A1l_Xl$WM|_a?O&HLBBVcD<2Y>K#DjDwgqmfCtf*NJn1@E#hxsInf5?P@7>GTUV|+j+c9we}_=_S)BUpku(gul> zIE<7?HI?XsFsOlIS9zY8YBo5Er097g_AtsdPpR07b;ylw#AvVRYko+J<5-LBwiN`G zi;b`b$`^wZ@QxKg0!n}gRtSu(<%>QhBUz#?{CI+oc#OgKBNaA$U(#ezB_{0%jnRln z(@2d1xK$crigS~V+PICYsEQcrjor9?;#iL4=#jAqixxO<>SG2ez<({x zlxql7L|K$ZDIt2v6{-+zAU10n8G0Fcdb+le6e*P=DPUK*n2hOq3&Ie@(l4W+1zq`- zIky0qnRCh(btm^)(KeHysU&GRnmC!0WXMe?DT8b>mzbo6b25~$>5$bJ7Ki`@R8VY6 zDOrKJN>EvtgXxVNNtISPoW$9fU$zjMV;G+x1sZUfmie5SsR-!@buYPy@#7?BX@V(u zmfES7r8$OGVpx{{^a4C7c~i)mspgs>5}S7^o3lBYQ~+&~A(4YNhlWX*zWJLB=v>Ch zgvFVJdkCQa*`Kd>6_MG7nLq-S>6~MinLF^3^*ENUbtE!bq1}0*ora+rT5U{)QIj-P zJE@Qwh=$#io=wn@?m2*C(FuiT29)qX1t^#@dY|~2pENp{pB0i7IG_Q_qXZgY3ZXu- zcQeb`oLH#&qn>YqN^qjoBwcUq@EiXJ0TS3_zui(sA&dZbCp0nEmJ zPKupT`fpYL`kkRRWIEY+EP#>=`K5IEnqoSpWJ;!i00p**1Lj$X&2mnFxtoEheQ;Wb zi>9M{+L(Cys(Bi#dTNg1aSF@9sHHbIgV1WJj)$VuTCJhb1qpet z@EDIX$Omvys;26utt*~*Ct`?Cuxt(w}fxyY&Yijot1urd$`q&j*tnz7|dPjQ+(?+RKa zd$m}bwOR|3vxKrLOCgHTtI0R1lb1U%`*$(BS16JsyjOLo^Gu-`w~d;!;0Z-dZ~_!7 zc?t`(H7K-0o1z~kh1!a=gv$ec&=}xaeNfx6QEQ_c>v~mdvRj)ElsmbWyM(my6e$}_ zmtzSmuqI2=0e_{X)0wt<1x;=HOcZ*vtc$bBIzUHzp4-N^>N%}|Tc&2ymn9&!N$Zt{ z8ySiFJd69d9ILT-n6Bn1xg>kJmD{||yS$6zM)O*<)SIAPiU&pV19BSi{l(6cVul4M~Ivh~Ohthar;&wr0z%%i5Da@&i5QcUjoLsXL|Q%bn=^a!>`k za6Auzzd6g&`#N>bpIe2~GRI035)L+nfCPqX(?O3H-xAe7UkkpCm;tkqQu|(U{9iNHA}N1Ji@PQHIjF*wA+mD%fj{hhNVCS9-xvjOsT8j zzczfsIqb1GET^o>!#>=@L9D>qUTfd_ryT|BgKfy zV^v&Kn-;>ZtG-wob|}nVT?)VPTf3py!na$d+IzyP%5khquAEbmE6cPDy%LxWDUt;CBG2<& z&%MmcgDlKAL(HFm$fU3#UlYYI+Q`o$zE&K`(Jal=>@Ihkw|i@Rn(TEFEymnTGu|Ac zeH;WdOrNRj#_0^lQHxJ>%+BttuCo9mDa-`uH@!W@&wXG6(g{CAKqSfkyv*Tc&zsrvD|>Kl2W^eLjc9pG zYzX_hV+GyB%1qm~eZEqfzSE4iJtzX;3Ueu z+-(}!(9Ld=U0Rn--I^z3E}9Ce9h0u6;Mr%+Ro&qK_2AO2)k;pB*DWHh zZQ)Sf-BRx1EKQ_2124PllMX%N3U-ere&QAS-Wi&8@jc&v9pkeb(ZDU-!(E#JzT=&3 z;6QGT!Fzzs&EU?hLMEh`SdH0!e#i1q3mg35QC{d5t_Gn0u$N%VA$zQnJbvaZDWOK< z-Y5=Xa*LL@?Nm1R0W?>zU^nA5&gL#GfNrh@-b&zc5YKZy&W}yxcpjWa4!KD_i`D(- zwT`Dvek_a7Z*p+xFrMg6N4-RB2YCD0lXt0r*&nNGs>N}>m;=Dj`Cnk?$0 zZZ_p@>K`IeKAr;>{x48%=Xd_$Wbrw$KSWv+IWL>)8$L)2oP6h{ik# zk1OqxU(W2!&bspb?2?qxr%BYZOYPX6?Rv6~^)lk#{^VC8?&EHeuAb-X2J6Yo?(HtQ zCLZr~Fz+H??;2hqEDN23S_18Tk83O5)+sv89PldtKJWzJ)b0C|)BeRpZS9-9-FhZs3@ot|(@XjTM<0h#&88QR@lJp4P!C;FKlOtT zA6$J`*o*a9ZwXb7IY~gcUXOEGfVN&W;>3~IWp8n|x-}9Ti@NqvG zk#N?x+>3bcm(VBVxz_hgpYc;E>--e;98dVVU&n&(r?kh{^d8!b-}ssH(nuQlGt!cF zefeYW>@}agosX~@w)PLb^Q3?Mrhodj90j}ooWD0e+7_$#dq3o|-}IGD`+<+ZcdQS) z@8{+4@q`5YieL01m(Lcm)`VK95kLf`3lOMM4J;+eP$q;0LnU13iIT&Hg%&ZS$WW2N zhZzrIv|=rI%O=%8a{SkP1LHie#!lB%NAn$s?OOfvT#a zq${tiwBqVYva-~&Ex5Ss^2@U(xuFCUhGYS+GqXUG4Zs8=tT4smTME7a()lut?>$c4^RzusKNWRT z?lh$boD*B2P`~~9Ba%P^Vf72aFDQwyK@ca@bw4XEbl}$p5@6w>{X!J+z7kDDR-qMH zeDPTsZ461;5%K6lB58Nzu}354x(Uf{!ToVuqLgIvTrpBZn`6AqmFjQX;G5@2}c{X-F7B_#SJ$na>E{1-6qS%iEJq`co)NYv2=SZd-K(I z?tSM{y8^s@jo`wXv`|%;!3sVMVZs%@lVLp{W-VgIC3c)*$Q|u?Qp!J`++)ix4Y_5` zS@zsyltW)R^pV3UDCWOaFtC>fl_>WYG<33 znpz#Rbt!ADcLdVir)oCxYp=sb8(k;Y4RUR~2k>5iwpo2UZn^2+<(Hk)pB?@4+i!mn z9Q4Xt@4fjBA{6$A34Cya=QR9(Y6K8*iBsI-8W%YPN=|{7t6T#gl{w69Omm&1;K)7) zIt+?VgQO#wFhJ4&JKIshRsWIR>%brh_4TZFAv6R1K!6CP84HHV3f}Na1f$~R5Fs_9 zTD7XCJglAMBr)OEOSV_N5{k`w*E3u8x)zmq!K(wl5!`PEO2eiI|X`p3V*0Z@R4TUf*P~RTXU>_a$#|TO=kQ20EAuo8y z3~o@7q!ULBS;)H9nNU`>O5s3&wnd;3L2poSd$Llq%wM8`AU4tdx^kZencR}11Q zo2RBH5haPUtlm_x*F?TRk$c~>;uW)q$u4@ajKUn|C;FAfP1XQ~`eUXW-xx>#$#IT$ zykh`PK(W6856DMs|1R)>5CkMQ1-Usw8WNG>OynZXc`{d=&^{9*F2)k=g8v4}kA8ZC)Py;_#5i4#4hRqkfd1N8EW z8^vN)as|xsm5-QY1YAi?O3(MnV3+n|QyZJR#=$oROLC$OV+}vxVz8{mF3Th9P6tBC2J`U zN>Csk^q~nYk`bX7Ri#W%u5(qQESGpoTwWBTyyR$Ke;LwXjueb89V21WmkBAjkOFjB z!zTv&(*4O4|C%)&AU7;q%{=Nfo15)vXMy@zp$?L$MkQ@glX_aE9)lF?M4cku2~R3i z)hJh8p;kdEKKKnS51sUCKfzkcvI_L9K3t_i+1fl;k|ePz{fTnpx>mXlDWVd^Ym-{w zFEr(juNwW{UjZ9f!4}rA;%!hNFggLg@ga4PWv}3#Ia$gQ@UrvOqh>k#Q_y--wEGS1 zXp#Eg(xP^BO+{TEVjG2aB^XKzZkHi|QP0|DDGxTe0Ee2@stfxzxHOc}Ks}7sv?f=T zE1|AK=^EX-R+pkhLF3Rex?S$}(z{#)FNCp5((&eac6OlfCe^EBn7&uC@SSF5>5JLS z)_0Ff|Gsa2_4{Nf`!|vPpmKnvEMWI2W3{Z^qCs$`U>o@02SB(962?Sk9`wP+5e`}n za#w-fjuoI9-f*3XSI`jOXeT4?$y-_JyxE=@#VJ-Xx}Z0u48^#wik>lzYrJM0pR~P` z)@lbjn9@NWGLhpe-yGpo;8Ef3`6~p zH^PWr;|Ik`XE}4^hI1Y^De+ufwIZ~wTMH7A0R3DO6FPg+&Fd}~t!NlG`q7ZSF_0yl zV}8j`ZGT}Ol{Xz(A$$7MpvGyENj>UPd-l|(Znc$JeQzv}ry8kp>Q;XjYbPWD%mJ4* z{}DQ5NnLBPz=%ErC?(;v-$sPk6TfrCjhpP_GFwTnd?>V|ZP!9C>Ckse0JcreU2Sih z+ulC+x6{{!DCWhnk#2dX)xB;cx7*!2=Xt#4{d1rPz25o8_o%4s4{_?V-@U^`L1+fs z14kVSQ;&ij*2`wNQ35XzZ~zsyJdse^5QoMldye>A#IuVbe(Z`W#|KSIkca$iJ(eGg zuWfgfd-S_m73s=ZUh@UhN&mHe`{LUNbs3gCe?>|-yx#gEOrYo$HGQ2n#p-#)z{$bIfkY;mGb|Bm;P zZ#U(A53k{kPgpV4}`j4kRydy7k>@SM)mcP8G$hmnCMj}R^hh6!p zieNR5-qECI1tppd3h|fX*TLow?}}~bQRW}Su#dgQ(^Nv+yFH$uC?>fsC$YBQ13tSO zKG7?_zXQD6S-TL}t;ce{!$Um9i@xb=Je})6>(i#|3qe%dzV1V<@6$ZistDQxIH@|n zEJ(kVyDbV?1C?R{I6#EXo4?hIKm2pOJj*yASuXT@Ht2$`KodX$Jd%-Hn*?ma1=KtC zQ#s;$z~dW*GcW=95d#Q0J`2P^=rb~Q3&ZO3z%k@05EQ{bBEb?gL89U^|4-tAI~CI_wFy2al*AXS zL`tkg2Ao2_YrZZluQ9nOO-nc`^g>e9Kr!S%R3yU>guD@K#i3h6%L^y^ScWYFp*Ms> zOmL^Cn?tEmzXnt|#Il>IW1ZoG1YF(Y)hLgLsrzuSKP_UyRzVL!w>l+#M;H7w3+n_$b~DKq-;p0Oi02! z%%()1*MrKa-(M>9)ZL(2;+MLMb@ zFmy|{Ohvf7Ni>{Gy8Mo#t4*Y%NBMXLrL#rU^TQO;Me_Sc|GacGfjFZzni-`$Ov5xx zr6)NIXb zcu)6)Px+kB_+-tuj7!NID%`xc{>l`*49bN7&fuIXq#+*`qMzkd&g4YS=6p&KgHGkL zNa~c#6%@3I!cL`FLItqIyW`Hy+)N4x&ypO`2Q(%r63y{6&C_hp)uhk*bWs?!PuGM^ zoRrJ{+|Qzt#X{0e0^=u85>Q_Jg6SO26dV!0_>~?IKLt(31Z`3tY|zDgP-mP>=b})l zy2>OeGZs6q?)1>_^iHD58xa-H6h$8s9XB#9O%-)7{}zo=H+@spY*87#PyEc!RKrm` z6`39d&;adAqRb})ZLN?nJtbXI1!Yj?e9i@B$e@_Do2kyo#L}_R&<~BVFRfHd<)tvC zpE3PQ65T}dyso@ZNjpr>_2e{{%)lax)6|4hId#=JrPDf{M;p!4(aO9|(m{W+mI3`! zTZ|irBh*E0QvWE-D5Xe)lFo~a)Q*Epjr@sP>#+^>(o4luOGK}sxwKC8RKP6L2h)W! z{Qy!`Pj5xl6I1<;QGU6;Z;AqQhFiIe-c(hB~~Y$ z$YXWLJo}nMWY#LRPz)6cTH{h`?NUwMRQnmZ{|4CBG!4u0oFZ|}*i&6obWPV)9a(mL z(RQ8Hc!XE|l+Bghx37{Re#F;6)Yn*(!;jh$9XwJ7fB_r~Sb=>(V zU0aha*%_r#czsuuU0J(q8TOMu)sxwL?NwiuL14ti+~PVH=vki?*q|-g1&y8nRM=*9 zL}=a8NQ_#pl-zAhCI-V+to_tq9NTXdym2hok1bo7lm^nZ6SWmtw?$odH8qr-JX@6q zXy7;Aq&Vl?TfT)S!UMm&4crANfnjaP|EDX)AM6pvq&8-QRL0#2YSU8fq+I8H-YNdzV~7f$n*+3Ao;@9jS0N z1Mc-+F8p3DG&ge%+3^kG(LG=El~wg!U%A!M8!e95jo+}7-*&oPrjWmGrI3fqTb+HO z{Z+`|?O()oC4t+sgk4soeO3ef7pQ&OiA~}JUSfQrJH?`4?A_ci5!J5+DGlCWEe>6? zJ=+m3U-Jdy60W|D%L~2K|Jd>reX-Qn~nwDEzaU|1>u+!;V<4~lYO^8g2&d4 z*9hWKHvYOceq*VNIyr`4U~?B>`CUEc;~zewF|)lP7UUxtz$1>}LuTS8{$jF z+<_I|g!1Eho@WCiVuq#6JS^l63_b=3|TDfQYnHq=Dt3eBsVx(q7EEuEz@e^TgVri5K5C>^YQkP>wPb3iZtBFAYWu`VYp&{)6|J%e zYps^wJ?sM+21uA?$wiT?WAj-R%9~4>wvDG z>CMBn>uXJnLcazJ|D(R&?rmsG7Vg7FY~zONr$%nZep{-BZ0Ft>o}ui@w!e4&Q=)Xi z*z!aCposY8T{vS>iQsC`Cd4Tn64Sorg({&k(^->iZQpb6*p^`1zU{jf<|W)x-8M?z z4pdjZ;=g8Sas}Vx9&qJW>}PIl=YH%IKEvof>Ac#PSjZ* z!m~SXX5AjHi*NXbZ4-a*`bOgUzHb(X-2B$>{r1=XmV%oN?!qSS8+T>`FK`}@Y7)k1 z1qbpPb&#wMsQ=Ii36Gs3mk|}`aAf@Kvrg6{1ab6^MkrcsME=ea-||W=nrmcQFRxmL zvhPDX;unW;|8AAAz^?JZ#&H4faUC~sIA3n&PH;PCaOwVNYrzPw4$gah@Ic%lmBJ?t zKSp;l)Yp3h^Db>Fch)Ku@z(}X6zB4vo^%thSb$FPXoV6o-{tE%^Ni)~3tr{kwsAK< z_2Q269Z&897jjoO<2)DZkMOd1MrES>bFs6CLC=sqW>Pqy0Wuko4EJGp&KX8;bSoY6 zre*CD-;hZ6a!a>sO~*#6W!_D%D2wuR7avKI>~=`@c5rV$NhSjP`EOJ|_f%K)bYJ&R zoAVzJaudGu%6YI@$JaREL0h+OHNZt~7ARNd_0T>7frjHk745VZ(!o4-g-2c~fNN%t z_AQ6R|7Umh6IF4&wl$3JbZZCoQukYrXQwp}cToLS03Yr*PfHeMcXwa;irzqYfAt{e zQy4bbb_(_+C-_my^&Hyu2Dt1(7x)q9kn9fFMW^S5XZUCZmP~DSX_t2Up7;twlNzM< zEWGxN$M!PcQvBw2pd`{X|Mri++i{<{7zg{2H+i%_`D0+uwGUsGr*m0_c~%65>4ss5 z*z@_t!kaJ3x-Y9UAsV0mc^lY(pzn+0J$ht+Yn8_GriXa=uJ($LpVfwZFwfT6W+IHo z`mEP_jz{ydSDhKp1n~QMq7HC$Z|1g7eYQt^m4AEJht+qJdm5#CnVLXxj$}NrhN6 zh_Il+nL>~jE>xpogBpcT3jS#m0>Xrcc>>KblVwexjUh$a8Of2)8^so*D2oI!;iEa-Vp#SF#2ZwpHua;lzq(|1D=fhSMS)S|-H8kqq1l}s7(W5c3x=_1YRH0oZeeFbCf8rbXB!eqy$J^ME9VYZ3s z&ds|RaAd-VCp%``*zx4bkuyi`?>TgT!KW{GX8qaq?Ao_8>+T&p_iEI}lP_;ge75x3 z*0bNn?Hl~~^4q_!KR=wf{NwbO>+gU6e*yA`AAtZ0*baj13?mAIo;3JiC=YUkNhg-z zK~O>t#^KQ!9eVg-7axr=lZam!0-=MGR0v9nmSp1MivrzrP=*ZsaS=p-Kr+S#5b%gm zL_vntQXM3jbdr%tngmizAV~q&Ry`d>)KEq_P$g1S7R8i^P`M?Q4P9ZC|4~*f(3F-~ zUwPS-S!k|xCR-lJ#byg_#+5={cB!)GUYvFIr(l7CHX34thIXiDfLit?C4PSPXrhP~ zT3TwW)pn_Gm}c4=r=9-hDRtF}+8n9OMHi|u&lR^Sc&#q;sxq;{Dl4tChUY3YxlUsq zdi1&HE3m=Z2M)0V8h9+S$r^j$I_xA^42q;g$l|mt;t>gjd4$;pg%!d9BDf`nh{d=j zR!eQSEvB2TwpY@qqm4k?cmzcrMAQ(uL^25zl1KWh?}jp|*;Y_IO^Ice2x}RXRaUH# z1&)}7VCD@bSRAFqZfd;dog2S-nVfXi*)a=wPRnPXDFX_rq=pXa|MJU_E^3fwj@ryJ zERn)YX{DNOx*O1-3O#hGMUQIqr>Gu{tF5&azJu>mFjn^t< zHXa=I`#8V=KN%)w14wzW!kI5jWy4=##1+L4eNOWaUeO)a>0*-Xahyttyt>IK%Rc+c zwa2_O?zw+9^Hyu${5#IN_iU-pumycQ(LX1T^wP{X-!$~mKY#1gSXX`huhw(DJ+onZ z?`%8D=zVUA8=>vQ2wnKzHr(xtJNHNH0>1d(Cn_R?y9+=u|34J+{&*oq{*bsf?4e%) z69fX4^pcaLp@AD|0twznIn5<(a~7mv=d_X;FG*~4t}`9{Qpd5wp^tTF`qJx02A9~O zf_5$ho}g|wsg?!7_}uHk$Sq~)it6h z#VJxTd)nh(_s*fk?}4w2VS^UB$Y(A3frSc+AfFuE7p}OKNF``f+yNJXHf%AjZ$?>y z0M#I*D&#;Z44{FIG%`R1LavY;ao|R9=mZjxpmLVOTm>&E$w*!)bT-jo2g}sRf`DKu z8k-#I%rd9al@WrbG$B@2#}*c*qLr?E;SPH!!ynG@|4^O4A(?b2OCAObMHd~2L)EN9&a=ualNURcd7$`C=M8m=h?|0QVt;JQK80TfQ2?BrBDAe0Z}d#8UmANwG@meV1UU*l(e#zJ?Udrx>A?2w6ivaX-z*nQ_hN3 zr%>eSG<|B#pdx6fL>=RbED{B!dP^jXb82A4Wj1ofz>*Wo*ICo*w|Dl4B4f>C;sOT- zIJ(4sEHT8tz~s7XQDIDPm8+E^89|F46oMWaj3e z5i4KCE*7PZeQYRnki+`&x0fl^>}EZC3{@c$w4*(6X-S*l)8;gqsQoF}Rtw<>JIfhm zgDoZEV3R@Q}u zjJyCZIJ5?KVwnZ3;F~hI!8?7hgeg2>?>&QTOO>OHl3-N6<@2^u1u;~`HDbKp7|-fd zaf$O-+z=q{xPy!%ial&VF_4$GeU2-Sv#aS%YkJTYHv`?IxO>8ZsY$`$ zy(hWe-=68u9D3il4*C?*sc@nWgIFrRE#ivO4Jxl@Q>>#k>snLXW-76nY3`eG#E^p5 znDO!~eJygvj(N>TB(TW~EYp><+}J87b2E?aY|}az+R>gf&e7r8J7+>$Lg+(>+1+h# z%b2=)yjXaD8(b+ka;uH5E>0MY+^L6^#z;cA(#f{%KF0wgneH*CwL-EFIxdYF;1M;6_4R3_~odLYdqtkU?>SdPQ#JwIbhern98n{XOF}G5O(~9C%sS&g62vg`%il@`b`!nr*;#!c2!7pov6l*-;2}Cm z>m*?F?O0qzl$?ki4JO}{*x-5DoeuKg-uWOWexIRP8~Lr@eH|Y9HCFmHArl7M6FT9U z&EFKhBK>_<`$?V_ZlNu5VHbK~7Qx5ebfDYxpzb+f6b#)CDAMsAqa5ND2o+9MQ4F}P zNszSIRE0qqHI*J+BXe<`KXuYU0OGuPV~p%r2y#vasm_NqSHLJ=A~GMwU`*&>+FtqA z2u@-p)*~g_|DzIM;@xTDCUzna&KFbpp+KCBC{BtcAYLDc87eNMDbC_5R-qK4*{M)Z z{K4WwULh4~UKVcTE#5~iej&5)VuaXFj_KKo?42AjrCd%8a4-gEGc|c_brHi0XMi8P2=Acrh9cZQ)Pl~1iwq0VvBf)Kp zMA4&bni6c5;|{u45BlRj0_1yrUvG}Wd-mEX+Fx-J=W%wZnc?4ZMrWH<48lRDp3d$q&g{OGo5tkt*4D#Cwu7n@OL2iPZxjc~ju#;43 z|3JM7r8t>VL9ymwqN7WQW|HVuI>OzYlnH8X4pmkk-C1JY@l|-L(Dp%SgxcD#S!gK= zCx^bNaYARYF=s1ECyK^mU52QgrYLn%lP_Fnzs+dr?PQg<0o0vlJI>>dYNurR=t8xX zjYgWf4JDBt(!C(*ReBxRaX^#KQGND+1|bY4!VQ%+$!8Yoe}W5pF=nC)ltSrfeuk;6 znx=q)*O}gCn*QBo-P)H8Rs@8AA!I0BuH~>6>vNtcvWnz*zvF6yO6TBiXD z>Ok%mh`1G}9R;G^(H^LzlB%W|IgBqx9qvI_|ImPq0aS5aS4QY0hlz!`C5)>T{~fov z+ftS)s_x4QCgqm4s+6ea2JNTP+$s_Cy?=&uH=u*NBzQmhr4Xhzm) zb86(Xa-_3vtg?3Ovr6lIq{1Ww=!H3@Q_7>4O2tnunr!V}r-d7bXy78a7?W(>qCMZv zYHE~T0B$wikx@?cMOEY+ZB`*-mZnhqcpSh!B2-4A)J~tlsuzR)>j*3?!#1qL79=TR zs9I8N3lXca%4u7I?49x{+J@}frmfqiR=iK_%um|1M2LqN%A` zn8K=6+;YMMr-fHEo8IqJKX)jA=q0$Hx)9Gucx zfwn>)OG59=PAxaO-J7i1ll`b4bx`W6?$&bcuGY@^MF}6o5ZQ)pNNk4f9#_ERa7zYmRFK|_ zn8DB<0T4r~^SUb$hfo};ZyTB3>**_|B6ENeRtIEP(z!6`UIn58+3AXIYW9Tve%d2L zGB>v_efcVrmLDeb!cmRHt$Fe%Yw;;F7$^{JE>Q5~iLolL|MG*i@?GF98#PiaxAD&& zovQ)qyRr)=z`@Y=tmhsjG}mjTavi!VV;~=M(kk;Z5AplPFf{Au`ra@6ZuBoVFB~)3 zHg|LKMzS}9p92E$SPE;CxJ){?wAsRQ$CZ(4kTFfC^6suPJ(uycQkxpf7_Bzq(|#@= zV*%2Q8|Y|~3AX{gb}0}O-w0wKzJ3^^JT#J)Z!upf`Z|e4Gj%T~^hP5xt5P)dBDBF| zv&Dq8NHg(BOLAnzo)gp3&8iJwRIE#@v&Lqj$!;=h)HH}Dr(er+DChJ}53hxx!nK|q z9ADtTB62_5GQVkb^xl(&9GOA4RYw<}+~wzdvZv_8|9~=EwM0iX<_a^ZWzwLzBj2uO zA_w&~6XGntYz4#)0NQ3+uQf?m7VlJm8CaPQ(a_B*;#_BOT@N=b;U*;fdkYmnRXzwdpq6b{lktl+RLk?;yUg++Z(JZni|!u`#cAmbUkK zGb4yI@`rnMj&JKY<~M5Ycj5B4fB!dtca(rj|FQu85{xD|6*u^TH#u>aXigeN(~u~I zU-=kg`4#cB2KupdRkSR};|}xk8-q9A%E%s<_F|8>@=+>P$2fa8rDzj%!ajwOg*aqG z?T&|e3pXy12f2_Bd69?WpdWcjd+i!9Ig|_clP|iX+jT4+9>oksJZpIuTl#Y^D{cY; z4@2=j>-c2{^Gr+(i3hoc<&WR?GWZtPF#aND0_sE4m&H)=2&8q?_ns5hgbVm1f9|2Z@s;VA_1AyX$0m$2y!_X}weKAb&&(6|(mF`o9-_zY{&X zf3Wb0-N7R~u`fKs<91DWRm6KOuR8m)JNdM4{OwL&l9k4UH+QCYWR_QZg;$X+u#b(a ze7^I!x}AI5j}E*B0SIr4K8r9|iLc$0YI^3LOxU|1_xy}US7#!25F) zk2k%ij!&@{d407t>2%++D>=pY|F*PuKG$!(=PM+&Q_rHN!>K2>fRTlN$ULTyo(=9-{xmM=!bvki+|V8iru0<`is5#D|b%siM?erYWp#$)5ywu z5YU-(3OrJy?oXZp#GXQGoSa!g1&W$Hg%Aa@gQpG?AyJgzxIhtNorPl_#?jcvN(zxJ zNQ&IZ2PMcgC*QQ}=(;YF(-tP%_3hlcc@Ojbn;7um$A}j+QnBhU!J@)_3P8KZ~xv}eEHYt*SC-D{Ss5GDV zic1X{Bde^p#2k~%y3Di-uQd5uGnF<2dowb~gmBO?#1gaf&OH07jWyCHGh-FXJ`;`5 zJ`J@EQQTrXG|}B0eY7{>ikZ}yN|QOx(n~Yl4w&b>`xMmkLLHSF{PoHPdEJ0mK-RKwPN_VA5e9@>j4@XxK(vq{nX)j! zLu_F@NkxZJwDHD~bQ~ARE_ysy$0o!u653veZ9vK`vy4(pu)Gp>UVG~e)+;Xu1hbJc z;Tl-rGYv*l|4laCTsY1^^IVLnI`N#CV$cTl(=t)SaQI@38TI(%kQ)_QQAj1F^i)h$ z9*1>m|>QA)tO&)wdS04-WgU=Wrd~9H;TAc*A6xma7zVw7%*80DquZd-TaN-x(GW3Y2)71xcl1bFa=%9RB_TJeD5M+0jTm39 z_RUvcd*!7!>VE&mQmg*}CYW)#9KZ1KgB4C#ayU6|cnLc3s+e<&J3eg`Ks_%MWRe#( z9re>ykM!k~UDx#Wna7SjX399F+2%`GF%|dTdBz#|RaY5!Yoh}c%GWKW;93hXEP5Jx zekELp|1GN*=wTv)?uJ5#rC6AVr4pC+euOioeb()>0|xhnaIIw)1QzF*U+!{csy2KP zksh2XqXDPVaD)q-)A)6`00Iy|iz`^<7+ATP8sC)t!uXEMvwBS(v-+c*=#hQ_f&g7{gE5A`-dzUGR3;Lmr-oGI&9o9A0!Q zoe&FOM?6pskS9H;MGq-Pqh2H!LIe%5;6MVw0tt|Wy(~~ji$}24@#sW^@0~GyXj6pR zaD+GR`QUkH%$nEG^+h1%kAMdRkl^@dz{KrpfLRIv0taX`1un304SXO3BWSq^a?BGD z|Fhs+8kRvzj?PaYBwYzj$jK9WvScW%p$u&(haB2ah07U~?@DPJAO5gsv4qua90>|Z zT27$*@}fGm@Hn zWF>)CP)ue_lbt+hqU6GhO`42#pCn=HC_2g(-p-V~L#0NW!!cN1l$32cPONU`Gn10^ zmBI_+0}YYO4k9{RwR18ah_1R?-X|NFM3 zMmtbMX-CtR4MM0U&wc{bp8;hI z4;o5Pe|8QuXSj<8`KnM?ps24C6|5*N3ew!oj+7i#EDk~8%8fp@u_HYxD<_-M(kya# zx1{P(r8=yegsrSp0w>?D*(IJhOi!gmqwAhA_hjc4%QDkb{lEs4` zmGifD<7!vY*-m%@5^=}vcsGgh0MK z*z;Z)WrcOGsa&>Y!{*7$DeZ(~OZr&L$}obI=&xTo(bF!N5ec0YaMIXF)7*wODAY`Y z7t+w@GTea+fmXqvHNlBO8+y-N@bjXJl{O-}whAxJ<^K4ly(v0XB2rat^D@j@BVm;$ zTCJanio~wnDwW3W6f1$?`A0kplE=h+E_C-PRt#SRLb4|<1eeGoLmM++cLImDCHC|&M z``O9XaMc3cLtOX{e+Vv;X~nq*pF|0cHhAi258sA z%@^^%KV)cuH}y!a5+GGy#g7m#@i0!eM&R*AF7hOg<^BoDsv;9wPV<^9gE%kdNQPTF zh=xe7^Ga{$mW>MjAohkXzM}2*CZ!ZiM)s0!^;XbQ|M~)MvLHr&YqNN1u6W`}dM|nY z&(#DV1JnWekk0_#ZvEJe9HcKAtZxw3&;0_e2bnM$fbi`QAkm_M2<0sb*A55`z#B5) z--ht`vTq9wUlL zKcNUB#4R#!5G8N|11=XnmG2ixrkld$^~jqR*2&;~*5j?fmluom$x z3dv6iwUGM65bm-t30--kOVbRzf!>; zKk*+&X)YWs8?~`NSdmG*s!GO&4KUyv1F0xj#4Coe32{*cjM46vkO|}M2|bbui4h5p zu^Ez47nzX@5iTT|@BPGXMlRwSw=oSL5+hWswR$fj|0B7?F|>%X_>@WyMGoAS5*?Y+ z+sq^o*74TZ@iCx45GfBUC2$N75d-n60_~z&x^f@;kstlh^I`!l)3O=dE-l*<%%W`W znqf8y(#tq8Ar-O`N3rMHOa^C4!+5WZ|2*urY;YX^C~cym3B@q}s8ASta2Js=;Tp3R z+0P?|ko|zMB^6T{HPa)tZWtpI2svWrHXy|qE)^XT8|mm2&qwfBYZl{W)#8xxKF%98 zayC6+)^5ouk%@|Qvn`%->3~x>0g=OK>n}O0xC&0wYHM%M=s20vD^9Ih^bzJdNfd_2 zEB7%le&KYgfb6~#JSlAZxbNWXjo`@B7Rf<9*ONWf!9Cv-KHD=s=W{(XVM6~hhPc2c<9Cd2}C|02@jCNef- z?I;V=aEy{p)T%dq)2GHRJi&9`mXr>MLMG8J?Y_?J&Mxh)6iff@ouE|ThC=$fLwC)hVEeDlQ z)v_55^-xVgJ<-w|>hn<5Ar|hEcJi}7D^*f3(JbBKv(O1SZ_rbTWy2`W28;!lg!F%Q zfFo#t7SWIEzSByl)K1ION$+(0m~i}P6<4dV0!s6=zNP+B6Y$zl@3QC3nx=nnq(}M3 zJAnZR#W|G%>sXVnoXEbX#2OVbX}_>C5tp-$C~?#%8&;Vw&K0qn3; z-jZ?aXi@si(+ij|3ml3zLID-U(u0(=TC;9MZ8h!IPU`^H3(XT4p>P=!;Q%&)VH?(A z9~NRARu6VT6&?^XkSS7KBNiYv1Sb{S$V*aDNH8FAKlYD6B~nR_>I?_0+?;Glf;(ET@G(qNvmZE@HP+W z$AHXLdG=Lf5^J^APPg_>x%O5gbL%McW^wiVxX=q|;RvU6PXl%C+-}f5CN1J%5wufQ zhk{?tGYqA15LmTV|KAQ6n=lFiz!w3B{f00lk1qi(fpHtxaUU0Q8G#V|Ko2U{ayOyB z)?zitA!9{01I-H_%Zn@t%cHO?FJ9s=`xR$Z*Gaon08}7L<~0|gulnGw-tx9*XEr2P z7fN|IXS4K4XVyf7W>-S)FmS`K2V1i3wlFgt!5b(O`MlwL8(-I zfj*5GiI3O_2;cz;f*9(>8HvLH*({krnr0KNxb|fQ0Fwa!uHm&ut72*iD627Hlcpfb0_4?1jOt zZX`id|MS2zJR^r=SY+^ZMf5D3zSnERHqc}l?_#y>r1AZ%FcDOsP#HBHXjl5)8UEB^ ziJka~pCXE#cqfdQ&#rj*oTi_B^gylHdzNNyLS&8K@Ok0zcveeq8hRC{N{wj{O61t$ zq*gYK>uG`F|1My0454b*$6x6#3L!N9K(t&X6oTn4eA8hB@|5m0Gca{})OF1DV5!QWu8JN>rtuNPd*BT9k8Low6TW6Ic2rcxJB5t#anIv39t{KA>}WOl2gu&SwW0w|ZQ z(|ThaAqXs>iA&o8=z5-=_?_-qin&0aC+$G<2v}rm_vDNsKQ(^f2Pkg4n$);>e#y4s z7$_;)C;F{671TC~GLOlE4c`VL{1xsH_nZ|u2rF5-K{FU-Su#a*y7Lq!?G%6)GeaNq zrz3LpT%(S4m_mDL-ZDLA{;7J*{{UPF{Z z>24_ATEQ2b!5f^hohSIf;K4Ob8bP(gZpDk>jG#A7MjEBsgBg6{~-te zI8wM>TS|qn{7_j7(^(`37qd(HT4|X9ra;GcoW~)793o)_Oq-tXpa|)?o{8d~CBU`! zH2)&)wPdU$I14Zb+NJW)D1;lgnF?v5R|E*UM|4}y7&@wc6oD!l&ty9&FIp^^`^){+ z7W?)I^ZWg*FV3TJPl+%lQxX7DIvU!!GVQLq&reAS_if|PPxX78^W41Mm&K`D#${ZV zk$QKj5WRWv2wpB>7CRobxtU10tb38t#rgm^7y+U%aXm8&4LL$_cT1mwUw_%H(ORuT zXI8)YhyNH>S8L+LnAbfdA#{|d#tFiB^Edy?pj+fP7l4%!Zha#g&&#f?|AjF#C0Nn! zTqOr-1+skxw0+xGfD?Xv28?{ji=3XfG|5#!pXZdgc#EI;*_=+6x09xPx(K4j2hzyK z*pa=rW9*;(2YVIN!xGHKC>l(hZf~*fW0AGQ=StB z!Q=nTDV*=8Uw(Y|O`XY?a1}W2g16PbQ`TcW){|>F$VS*r+*{sa?B}#3PT7=E^pf)pskLxkfza(+dCpB9Y^R$C|IshOx!v!-{ST%< z+?kljQ~St=;@s8q5c--_RS^Zm7^zMm!=>r*hu8L8D=_;#w`t1CujKL1eknoXih zoz+)1nwYLP=&|k&3qFe(qz3_4{PG*&q3>XkFJ7My?iNy6z1B%(zGC~@jb+p~Q@qr07f+X!zsYX-VKpe}5bDF94o0Cv;ja>4mqmlv zo5R+pFOz$7dAjLVFTemL2N<$tLIEiu)IU^_u40tJE0D+k&|5d05e6EDDeO&bAbg0L9I1|H0of!oA~|0*u)c;I5ejL#xY@X~R=&HL6i z4(!=t1}+#4sNiy-;px(&RVX~`S~Y;!6iLUNU6G4w+Oc6zM6jA-!vMsK8$WK`K$_mj znXfP!>}{^*S!2bFcBq^%I#+-`{Upl;(^JO$KBgWz^Xc2a?RSGI{(IrYJle`E@CZEViii$2$vSJ`W;lU$afBab2PdY+zoecstaFB2nMT1Zx z6q-_7h#qF(q()6rXdWl7y);Fa9f351NSbui(o8C`gc4;m)kID-WfVab4M$Y5L=Hy6 zY3BubLExF5R&`h8Q62Dw7hyFN23ew)_4!z%i}|@Do;~D=)Chf93PPrsI{I0kVzC$5 zqM>ryX=S&;Hkqhvan_llk%^Y1a|tOqtE|a!D57(;4oBP$G8jf!S(YY2M6p5efanH% zR=O+?LJ<3uvCeY!!KYnmRV239!TG07JJ}(I{}MgvXqPF8sL-rqLA=36R9b8oUK;Sa z8}G68uCM|aD+nl{e*EzZ@PLk}V;*p6fM@_~tr-v^Mk1b@FfFt-bYMdedf106vdoww zEUd5+WR9_fYzK-a&t=!QfXwA09Nqre#GsIw1~JXNapBsA8=6v@0p2`>p=mrz#OtZ@ zeTlS3VtQm!ONp%1lA3E8(I&c#DZ8v>Su-0JkzpV)C)h`F^{lnOx~es)YBwrYQ!pgO zSKIM9&^ zLypI}A(GIT$xl>xtwv6VJOKxcEc?jE|99Yt2m5t*!&SPrQhDdP=+Ck!mnq~7;=)x~ z6jj3zejJZR@?>nW{C=G%zkKw}PfrFRj0D{8e$fMny}%ppttG>`UP+6a443A`gSs)i z(fveqx|^51$n`l)^pQ@c zL*43RG)Q`xF(^4K<07m=JUmFkI<>=*xJvdNOO!7Wb_&!Ktak}Uh|+q<+fOY>iONx? zvOdQnWx)Kkz3y>u6a^X@aE4~3f=CEqQ=-xZBSy5K)#f5FgqJs!;SO&qfxO^A zsQ`AT2b4U64YE~*V4WaNf^rQxGGG~?b!aph($H)+a?atHO9ka)*e;m_ns`P=1Rewl z2t&B0r=jU2@0pr6QdlZZsmg_M5Sb-`JRs1nV&~csTtuLSk5%!AU8RQG#*N)&kU3+ zPEf2g1=6Q!o}f&5LG~!jtZXwOp})Lfb5bUYgR?M+hZ8VMri&cI24Ho71+7y;7&=P^ z$}`T4Y-CY5laRR}r~)sMffCNsZ9VOY#VdSdO(fi>OHNn-HjWgdsk&NSLcy`nnRZkz zG>W8>$Wf(ObcjF*Vh|hJLjSZG5Tl;S3=>tz!X*Z@h%rH1M-eB2O;un!D1|IdTUsWV ze)NxRLT0MxQri>U~ zjf!fyjH2{F6RqB~HUEasim`MQIkTs4SF?-~r=#rzR;M zf}5sb-!&Mtd+hbLQs4tKWKu|45gw5+tYMOcD_nUTvd8IBQjvY>?jjE-44AFu{hnYH%a%a{aG{Gd+Yr%Y z!zd4z8q}3B`2-MEf@gQwWF^R{OM*1KVGGbw0xp&Y%QUDA1BmM>a9p2QiX4uUpxzWx zQbK+j&7S~Wq1>#sLLB`sNP!+Sp*7VPm*b6zA7xgf3oc*8bW@0;owTQIj_8i+DZZH& zD5seVELH)#;QtvNb+!k#Qv9|Aj1Qfm)#cSXp7;)@s79fIZ{0LIreoI-?r>J0MPavN z#eS#Du)qxwZSsH^fW^+c9!(7RnPa=Gp6GU4zwK7~gc5n!lPi7d3cbjuJ9)HZ<{yvc zI9-jVIKPoMuX2FQXHK@s44m5%T=w@ZZ>0yAy*pU?G6rYs&mfdD+L&X4kwG&TK51pf zel~$t#*>?raE!%VQEuFR&N>=aTg#muz4Sg)zzT@`*5|CLbh(ucyXa*K@gzNg??WHb z+fDkVcW>V8kN><#C+Vj*Gxhjx?Iw=!9WAuJb#EA<>tJ!U%Epd%1V(EQ&{O!Oc5K&c zzScitMgLUBHdLllce2-6&^B{4cO4?82GKHi8#s3+Gk96x2IVG#qy%y{mP#Urf_KGc z&EQ)mCwa5fTOl_EZRT65z*n9y1cA_7?V^8VF)2dzEs}*ps53mW{aKLWGVP-J_3ATr5PuBpz7fQSEd*UW?j#qr#gM4*ng)Db;&BZBeP<`XF z2U*d6IJa|b2z7-fUUYawF2pt$l^~i1I9#Z7)x~~^v4)8zbuuM?&i8fphktYgMi#JY zQ9)|F27vfiU?x$3bChcY!zZs+fqX}S3K%;>aZGgwWmRT@X0&&r$b@c#a4&;9LGpoP z$p3gEM}yR(2jVt@y;x(f5QZiwjH1K~+GBE8kc(bIa$O<_&T}kh0s-1FeMLBWR7EEN zgpJpzjWASG#Wp{vkP6`_j^kJ(EfOQ=s0rZ27V792xo2kF$8#-KjLiUi<~CO$SBB8j zG-rZ_mZ64@5+5S9TxTIaq4jUl_g&-Vbd+`{q>_*@H%03;hjwUw>bH=G2x*D9eaq5_ zZSshkcwlvuQB<%8SXTy?m~8>Ld}n8X2cTg`FtE|`zevyUlvTq5<4f+&!bHdRW9dQ?DB6SatXqnYp+eS}CvA@wM0fsqx-P~g{R zqlrWpppnoQ0$h*|BY|}v`BDew5=S8hp0gh#0Z4XmQz~h8AgPiENCIxyfTTE-MYTvu zVU2W11lHz&*0zd7X-%&qi$&RMWF-jqNC$2<1WcJqP}yQ_1%~t(p5^Ice((u=89m8J zdBK%~&5#a=;hJKpE`*YnHvoG~#ROBKmTSp5UPV>a8KAaPT6M`hhPi^0Q2#yk!B)H| zm?r2S>4|PGD42=4Jd2rp0x1E@$8MoEkQ!=ilf{-0>4PuVLt(Q+T+@fI$D*HkexW%t zFZ!X=DaxZuv zPHCZJfT3$*XEEVL|5Sy0IHN)bEs}*zm{vC8H!1qr9dn^W-2`;2H~*GA$0rL3kfaHj z773c^$A+Gvk>i0CKkAV|N}Fv%BzI7WNp*>3mx&foKKmp>!uh07TA5C0oKYYsC`yP! zWf#D4IkYFPJr=Hx<6{yLkdZk6Ug@63!=`a6p8IhP@G6FuK&LZ^MPsO^$QYmKF`rCk z8Qju$(}YxudZ-6`tqJR;94MfG(x{q14tY5r5_$^eCaGvjsg?S8RaCK!cRZWgsU~zn zc{Zx)`k@4>jo3F(IDnHqDlJdpI&?F1C+c09nPqhtP?Dyrxhj$AAvm|1qkssb!AhMy zL9F*@tXz~VlcKC{B4y2rUztb%OX^Z*@siKPuyMGJN#TUH6aQ&4^Z``y9S_GqwKuK~ z;{|3baSmb{(XdPH!3D;YJ^|1PxmgJ#vZsLX5${T`y(h2mdTybBi}{L}?FygG^sgfj zuzF;$2L$u@f3FAB&fr>sye5KHhY;j3v;L`qcBR5yIMr=*BPW$v;%`hK>CP6 zN&q6cbYB;DP}@=kHWO4^CNH6Dw|BPR`*1xbuC}DMG($8JA!O*QFhT=Ns3ib4F(K`P zKmj0JSFki&z_)#iR=!7`;5n(8fR)n2o^={$VFI64lmEEs;i|Vwpo$8)VVYD8+qjfe zmmblmqia|H;khX&x|_Sf9SpK&!kFqx6Uz0DqUpNxv^lctVZ2LG+~uL@_p95bXu#{D zFj}e+37VQF!-xoKJi5HEnG-3Qw34WZZ_~8&W@01Zn-74!Q>%YD)rk-BGX-PC>+5Cb z+rAT5Si~_-1`;94VJi%tOmDz!MB4cR{(1OrT>YTo+6S37VCsdyCS8r;_(^ zfBZ@ji@GA5p(|$+RO2D-*TZ=9NccmUDvKU9r2kDiY>1_5va38#3R%3vOT0acqlH6W zL>r4oE1S@3FVyI2%q9j%s&!B7I~%%2+`D#NRx_1_wvKQbUT`oXLNj-Owj1Fg7L-eL zydkkgzF%BU5~C0hQ^n(CKDWd^bezg~q<6LA~HF`>@u+9i-0l@u!2E;y(BaaM0tZlB9y9~ZI3S9}re z(2*_Kll|5!^`8yw(HX7L9R0wU?P2z(KXoYxI5Hzu)Os9~(kC4tdkM&Y+^Y@11}Gz-21Il=ibE@U+wh3XLrv5^Thy6S+(NC?VieUbL&VS93^L8!&F!*9 za8oJZo6a1?YP2`q+s?b>)(K-l<77?;Qd{BOzUAD-P9ny@^iAwrG3V3Pu+lho&2Ssx zzTpH=sD;5*snY*pj{{PI!585FvH#e)3DH&5&Q!wG(7?H-Y zrQWXr*EmBOFC8@T_q0@3j{IeSi;x2;@92z zFaG`C1|CZ*c-VnWsnK7!FGjH~Gh-*=jz z(V4FB51)bBsLkn+2#v7Mraju@SifhA$B6yUpWLykKDv?T>Ww*4lE}RGLlj|^sMm-T z5Ukq~HoG+Iq1KgMAff4C+F$wc^uSv@LV}S9huY9pyMzO_2?g} zq}nPip{dSF?V$Ja5SS>qw*qg++U7?Q&gq4}>EnXw43E=>pV<~2@d1`Jxu`rIVT&U; zSFD}Elef~Ntm37tCd0YpI@!@#3tH|RsXl_o z*n-_(*n>&XanJ8cN%?>u`20!u5U=SCuL+~i`0Af6zT*Y;>k$Lo5dr^{5Q_PDwUq!t zt`ix8XwVQSK>y$lIU@)W4mbc&z(fTSSU4gV?GTYMz7sESwtyQ~U9=}<$eFV6KyShzLH1hk<}L4ozBLTnrF;0W zI#Wju4^H^Nh&N7yX?c_2&>=-r=`v&h5L)zT(xpw4Hf7-IY6PrZUlqN&%C?0C*I6ku1n3GGlKq{kRIv=3tGp1{gZND(Y=44?x%XeAv4Sm zCy)F*r~kW9u)NO_|E2sc@a@CIpI`rclbked^v@;!2Bc}f`U)iQK>DcoiMKaaI4`g( z&`XcP^xU8$!wU zaKW33a4)E)(3+sEA&+#5ss)T(lF74HJ4?wXi-Jl@E1e{gthLyRt4kCRqron`BIC=i zy#j+wF)+O>v9Y)y$xd6>ILIWK%w?P(Flu_I+ zO9VIsB~65}^3KCFQ%u=QWfSQ>6)#j#!y~m+Rk-^O)bKWh@VpO)Td%zZ6`b|H1Pz2K z!2d@Al=N0#aRt`apQ?bgrwIj##+wN*C1{8WnS~ZsXpwDZ*=Vn2kCRMEG|^71WD{aV zM{t}mMjLA^*TzD2T!dY7WwJpC_ZSqxrx7?P63MA9;8#d}uf%dnt*)x_U$nj|_(>EO zK6rpF+wxMb#O{LA0x6n+49&gN%&0CW6k`mC$NsWdpgA);ku!75p@lXtUWPelneUjn zQI{1>!<&|Y{Ft|!ZFZFApu?c8(nxtA0_opAuEJ<*HHG?$%SZ|JQ&mUBdQ?}G+s10? zFnrZjW{6YjmKUw`u*-C%*G{5If#Q%CVN)?Z)k z;fN&;lVUP0w!%!n1lzdFjuoq+O~>GZQ-XymL+CP;H-WWDRa&vbo}m(1)2@|&``F$UuYx7F z0n%!2|8ey+0AZEw(Ff3bpNL@y>Kis z9CMRlig&z|)u}6;c^>p)mOU=oY%U(V81}lz1)2Sb1YYo7PM8or_jnI*nYvhE)MuTn zfya*T1BP-ubu|s)?|#S0l>W@-KSJK8ga8s?{}fq311|DCouNe}CAl)Z`G-?~vt%YU zxk>*dt6Jt0+y)7UsSgqWbPekO6-fBF7(%X>L!^G*Zm_S744;AK`hQY3gZ}Hv4#*{=W42FpoizXQ1BD|4B<^&6wrt++)vMi3# zoZ0)LydH@_F4oKgVRC`TeAWc;^{9;qd|D`T7sn?|%{X)%Pj%`jQ2(r*su`!q+Ial< z#|U1kY`x-VLJ7NrZ8F0?5MaeC8N`m&-eEKoc@fG=bE5Gx65W-^a@%%pznnP1ALnBdeGGte_H z*Ywiwpg6oDRE&yx5ht0jg2kPk)2v?H8P9mbD7LoMt%-nZT;U3-D$LNXZj2fm_6ac+ z%rR{LWM4o7J6KQrt$q*$Bt!S+(EV|=kY}^xxDx5e$70m6ZImm`R;CGF+My%3Gn! z{0i)hq}=2ZaV^jU9qqt5)iu!Ms=RArRJYpAy-XLZbV^xOoLAPfmbXMt+~!Bs8`AZ* z*S+aD+GUTB#txn*rbKOuUyH-H;tbZmeH=m`FbFloCU&tJY^;1K%TGi~_CFcj=z=X< zS;RargO>tjX*JB!)3yqxT1Zk?6t_y8vMmxPwry>5+u2|4_O`TLF;Lk^FX67vZo@qa zb9Fo=C>2+kPlYO6(y24*T6cG80vUSK8$}?IOsd~q9^%FeJ@X2Uj8=|WI?`&d%y!w! zUzY1_*0{7WLy{)-mBJ3IQmKJF>sr_OuV4c_V8t$!zyAF$+@aH(S~2 z9P>$1GEbrA4h6r)whFZUYkzLY+i(UOxQi2_gN`jbb|x^M6CCKDs3EcrejB^@Ebn`g z&kAomw4wFA0{x8=LJ}B2YP&^T9YBG^H`*=2e|kuT-{`k0zVuEl9V7R2L{gspNC+IA zFN%PgDm&J3kB8J;0jTL81W^oCh?*nJjFMu_>k2UPltNcrT%nEDkm0K$9mSae$cIVJr*NHs9?j5ZpCKG z66WCm zcO72wj89jvUSyLiiF!)@0(s_7om7*5j@5&idv#roU2MWUF5HxP6N#KW@Y=~K~c=94rQ;vPHM56oC5r(Nxn zh5OsHKV$Cy?#6Sdd#k`;m>?iU^G@-~@&A@p{s8nHaZiG6NirgM7CxT0mA3!l2_I>Q zUrzt|2d*_ziAQ^!q_aF!Bc0c=Z9cV$``yM`+9Qsh2;;XV`v7}w{I#A%jPRJ!61i~QH zr5?mOf|9cWOSgvF6tvsEnYuobL|9oeeM@OPOQgh1)Wl7^Ip{LF75v0d9J*2TG#R>&WCO+D zNCgSbHNv6$W&w+$EuN0 z6h)!)r|Gl1LxYwK!!V*_gVI_ETa3Gq+&y;_AndzED*VNhWE!nf#kRP+Rtdvol*x%$ z#@84_GGrKNq=nW<024?dH~+-6Hsr>gI=nd4M*C|qlmJJjDZr#0swI)OPt!eqiMEz3 zxNI9HsX#vjBsqwP#~@I-ynvWVnGDjiN0vM}M$|`=;YZd3$g%Q_&oIH9`xz>$%ey2x z7*t5TtiitAORG#sCn!bFy2`>##mhj1i&V_TjGCt$61o$dFHy@I_)ESdNhm}lXKT03 zOv%m+#vB065zvB{WWSj-&6;$Ln*<`ANW&Fh3ZJQX zk5Y^${TMNlV58`QkDe&Lg2BTc`pU4hfP3)?)?$$BnyN$0J)nc9eQXbZj6}Eujh;Nm z&;(4R6DD*N&&u>m%l`~ahvZ8Y+{>u6HW^^SiA)2;WXyfT&(bOmJ~*Zhs(}h9k$kK+ z^wh=7oW_}W$OIKhlQPJ;mi5R?nW&1CYzOgoIEww zq=Q+u*$K^P%|x4 z@=VjJ0?si-(^V9X`^?Y#B#saG!vHnI0Ugg6d@$QEP~GE92W8MgRnS8fDT7Q@Ma@J; z#Y9XbsS#zW0sk;jh)@X`Jxba%N_UCPYqZV#>yO@wNSM&YbxE+{X{l(hwL%VqMZoJgG};z0|=IFx}I7iLPf= zATnjsH9b=`P19hy*7p=WnjqGlDAsM=OZ(Kw{gj{y1wRgZ$vT|@3}7y&=zx!uR&5|2+bFYH zq@q+U<4FZDI30{wjOEmeT~Tb5QQSnlPDNZDQKeQ5mRjWr!L+ztT$d(P3I_g*Hf8*T{T}<@CdELBDH z%(#WZeHCA~rQ%$iVo?O%($(VamEb)TTOlctEiIioY0@vIU%@?5adbTWeLNNQ)B^li zigM!HOt^{d5%AqUq#)7<2H!X~pvfzN!9>+*-Q7Z_Jmrkw3r^xy)!Bf3*+tz|690B# zFYa4DtO_uUk{LGSEY@Th=Hvq=Ua0+-3eI8roMO7duncN}AQqw_762pOmoGNtC9YOE zfMP(s<+Z)y^5x|!el_-F-{dW2q}^n#y?`*rp`CEV?ApV!sNWPC%xaWP!#&(L=1o5q zmZA*c1b&a;4BsI&3LxQ80mS3;Rk$W@K^zw5F3w#=uHbO(xw(|*5{z10mE?P_=hP(& zbtdF~W@mq%WE0+GRuyDnLSzXZW*v?@ROTD@YbsYBq9SIgRiS z?q&1M*7e-wVg6^K0_c4HHFXeU8>-GDzPx7!RmbCBAa7lFMQ@$$#HVSHZauYOV*(&q%U?Suwq_FY{^M(*TR z?nhp3_^Rc6z3YztYrgJlzxHV2#%_|HIr z((&hz7F1I{%%}si3&(H{&hX5dx(@en2`yZ;)obe}@e((2z7A|YEom4Y>yKTL<|ar} zm=cd*JjJbM^j2g11Hh$}6u{0XW;1F!?(bL~$K>@9%=YS3RoQp`WE!^O;{ER%3h>#c zasoH$@O(#RYHED(2Y;AyE2kkf;vC?vyxKEe37>Eu$JQWS>`Kntigt6nU2G<|y*dxN zv*l<>$@A*w^Xc~UKj&ftXYtjRwza#YW(W{5Rsp%RTMSk7BLAsT8Fgdy=1qm8v}r|g z_ttD^KIb8S+SB%P!X|-=9`F?(>dlODu@>-E|8g+*=dm!&SYB{eZ{p&W7wDx5RX=mG zPIHAeqEf%=R+nv8AMQY3-#~?K*3NW5U-o9d#T0LHU?*mACUPMpg=^*%p6=|CHGy8V4@h6n&;MD|Rkzl9r}KQL^;wVYy0MB_ z=XXgaKj9SZfG_ZYFZkkC^IzBPgx__kM`|^NH}8#Nim!O6=lZTU+IYwKXMG9JJ~35b zg^ovikI(jT{=K0;TZ~Kj8_)4^pLysC`~WakjP)o{<9;M zfqn<@Jw#fHO8mti+M}0cRX5{>9U@eU?FOH~e*Zi`)dBF$=lrLS`p+Nzg%^GH?{z;j zeYNfLUWfnJpGvA9^pmz1p#9X5~5grXA*&z?JhAj#=OC{Y+adbG)SvgMej zPM@kwNf71KJyzqaEMkHMR}L4vcF+KJEQJYXzlH@dqQur1H~Ycehe0jfx*zW1&8wG0 zh`xU9&aF@u?clN0(u0$bRFZ}eGSwkZbVYX)P9xG+qI4w21Qrw&dI*(w zEfsj7gaajoMP*`5Miymk@pzw2amnQYV1JD^B#}BIwpfl!Dj7kNPa>(`Q@5e@m;#?+ znWTUJ_1C3{*0C32d%M{u=9ug8kmhqau-V+2XPO!3bJ>A6V4d(a@m_s<>e(llZ<@%b zpnfvvBY#|y=H*(4rZruRhG3|mr2h<3y5UDlb;#m`8HpsyhBIb5p=N%`*=Blpig%Q% zDAFLLt2MfmnXE2eC8Mp6rnD7hPBJOlqDJusWMD=f*(0$=_6lW_$`wx~V3hWdg-2oZVLUEw|#1OYW(JZX3pTcXHBhC2DjRD4FP%dLq53%6spZj2fC~ zY5W3cY=CO%#-f7@A~>nSo>pohh7W@jai#+G%Gy^}qh28L+^1Ifogpl_I<_ z-W2n_H^YCI1gfSm-nwtS9S54?%tyEEvZybv?4slY6J2w)H}kl2vpg5w`L05r<&Qs} zZ<#03WG=ciwW~)BG_ZxnjWyR-Z(Y0Xvd^w{?zF=}c6jRgZuZ&3vrT;Qrovvj+={wg ztXQyJ#s^gp=IwX&5{KmQg&6|geZ{xRICxcur&<&8`i}KO9U8OWzJ!rWp0cL+EnEMR zA!xoi=AU2Nzx|-|uV)`4&9qqeG|ROPQT=)y)xMT4-R;h65S*X{D~L8aY3FynyOZ&b zXF(5k@O%nn9^5E*tp8V02UZxf-t`_5F~_;AecWSV->~;E$HmGr+PqI{&27Y1QiD z0hK7V<%zI?W4t38*T~B{7H?e{gwG$`^@y06j*TI-qc58|J23XqgkMYxCebuFiqKGS zpt4mWIik%&3jgtuk;@e@5m%JK+{cwfyc_t)Stie+@_wWQpeju%v>M>k3i`|w{X$vG ze`@KL(S#oEe(B7E_7b7YBqN(%#6e>sQ<=-0XgPHW&4Id(bi8Yv#1dq~L@Ls6jASQ= z#0N9NZHx(5(V&asw8LDwhL!8AkT+|(((;+*Cwy6DEQdDFcmma*K}}Zal7_{9%JP6v ztENETHc)Lbbe9QLC{?RE)#f;~Y~c~6RwZiHip~*)7%ia!{Yb@t6-;~}(OU}%+0BoB z&!mpDN>B(A(U%%7ag`hqCQ*v9gV|J$ENzJnYIaF@_B2PDt7l>xJJctVDw%2u%<7V= z)W)O_m;ag#AV zny_6UAPeeQ^M*F6qx~vXO}kapQk1n}ov&)uJJz|G6|s4QqgvZZJ>CNNt=+_wN+LVl z__8Xq{=F%*Y)ey(Y>AwDp{`=%B2?@?R*H!_oyf*g+3$Xrw)iC|2|H_1Y}gRv3LV`K37i94lG$PA|V03js`fSmMR=e(m16W=Y&US zf&X7iQ#exycR*crPz@+exw8vn*omjyusZo!B`$)OT1?e3d7+wD6O;JF>|HUG!TP!w zpBAF2^{S0q+~?Kacf_gF-)y6HHBJUOB1SGI4Ch+pMZ$H4-ZNwn$)ib@5{0VdEC77z8TkYo^yKtT<7)x%e^yh z*Ij?z>l@?vootq7e)apuw8k*E+8e2(zxikZFB!N%W?wilEmoXGm&#EAta5$J5h`T4 z)JUT6u2dc7#ul~ArYYd8>zztlE<1U;&h@TU>~CKK{DZ<)35_)!@Pqd`-+Wv)HUCAt zW@t+<;u&kJNE=;k3Q^j>Z~ZGtQu^ru>NUYKmG_X3JZ(gL!7FqTaKU-Ks8XX)Af-p}zPxof4zIF}Kdn0++Z$ zUG6xvWy{kY8mi%*B{Cb8-j~ID-kWG^zVrL<{cd`31K;>K51i2j|2^ZGZvU>x-@eBa z)^FTY802VYUeXP7m<9Tz6T| zKK}PsKICz)`}+$%|NrK_FZY8Tu>oM74WF?YAMu%5Y&{+F*&fjVO8Ifz$r%~8;g;6n zO3Bq!_GF&N#g~Ge6xR*TB<0ji&>tv$z{>&10J%ym-CfP;UGhC0{=uN{=^y_=8SEHc z01DtRfgAzSU;*lpz`zE>DPWhyoN_oAjrdgsMjO~AS+;eAbAeO_hTcT=l?P(q2aZ9T z@mLWCO#aYU3d-LK+T7AO9M8RA48oZV?%oU<;EOdBS49*YDjpB|;Q!(kowAwX5Kf+9 zY1R=w-x6}(>9G^GAyV`$+4vk78&Sq4kzcnV8poU;PDoy6Ef0{`2mH}qCgxmH?VJKG z+~fV6zLi)gt|1$mVh`?)BgmoQA>HrP;VT|ooAn7O>S3YzSo3`yM`4~tJWdBb;uUfV z)(M&P4cN*MHT`~!zGZwSYitR2n%YW{4rqt?cFDeV%vP3{Xeo$Ac zA^>*P4RX*(79UrF7Vf!V8A@Fkd7cBJa$crm$_{xkrA5kP zCr)GK$(26pBJ~mD-&o&iYUeJ#kV;7#MhKR1@x_;vnJ4kp^6?5-4kd2Fg>Dk2EbXRl z##4L}nsDCXMh@q38mDu%7ThrBU?H81v7>on0^fMwE=1meV@W+*S_3Q;173#rr*0;!?&OKiqk4?fBc`38P9P#8>4ZAkc}A&}mgfzn93T1& zyKNT>80!{NA~ja(Q3}faUYx#H=h`cIo)n*UsO=LEW>unsFe78`r+>40G9g27TK36L61 zBfy>p`}ro!Vr;dNRg7k9sOl`k0-9%m>%%f<(uwP+uGdt4s<|F4x{4(@dK%i*Bn7%_ z!z9I%a@!oj<~lZFt}^OVb+b&$cblSuXObE|C^# z@EQ*6dN1}-;FYedt^U~IF6zbvZUg(S?dGEy*z0=!aC@ci z`ZDWvy6^C6?B2%k%`!3GdhoU~?+5d5k8)LI4s8^NFs?1J4AUyRjaj!Q@VZ*vr)Z}R z`>G@AZoQi9e8?v6RPclzVE+;uu?EZW7H97hTPp}d@g93BbU-hv9B7Z4E(;TKA*(4K zbFtF4Tp3zZ7=K;GZD-mo@J|LY+(vM0rZLN~YbGl$sBkIqdLk?r_HnJm+@w)~yy{fS> zkFZ8!viHubCld*)HPtjzvmKAKH5)P?)2|zU?p=BF^u}{Ir?O|g=rwonkm9lzOQUBx zUoSs$6uK2@HYraY^#AcDs|0tl5_;MvG(>ZusraxjQ#4j!6UC|JTM9E>hFnzYX>cfWcDAudFEU9Zaq;r= zNvCvHpR(W0bT*UPKzqy|%km++AVaf<6B~{wBA-g@=Q&4n{W{n#S9Dv2L=$Q#p-ynF z30iY0Fyf*M6-{;elyprubU&{&SF?1l^(}407!?O{8YrDOV{;G^wpqXMR_ESMn`<45 zY5RF)D+%>bH*;FDbI0Lz0WxBXsMosDldBANHET9z7xw*j^^A&jOam=Kr&mKuHVl^Y zZ#$?t&!gDp6aVQ6*>NLx8AIX6Zk*QT^_?a2JU_Kx1Dm7*&|17UIkR;W=doc=c4FW3 zXLB=HzcNg(@ZGL*EYELjkFr>Tvd^lsaxXU_PU+%4cMWT_bbB->;S@AObaun`Y;(7F zBQ|YMka&|Nd6#$p*7ipKGkAk)fVa0w;&O}zb#fjtA*t--MRac#Vg7LhJS{A8lomd71xskjM8# z_iC@YxrlpjA~JbTT6bXMPgUderVluKS2>?UxHlrOf~S)=%Pq2bIhyZzp}+WxbNT>_ zj!C~a3CmoDhvuUb^IevYT>qgXE4idwx_kSSrpr2_d-|uZt)YkduBCS?zfp=bc9*O7 zW#{y=r+0XJaH99+oP)+H*W)CQAMT!SoEufU_PUxQIj~EM`q6o2MfrZ-Ii4eXvbVam zD|C9oH>q#=V@Ecq&!E8)dhK92N&>f{8~j6ateQ7&*X)&nndXRhI5LYnxJSB>>w4rt zIsdV5FuWi8yjwQC2Ydi~JC}!th?x2}pLe}m`JhA4dP{hZ_jCxBaCRQnzBqhIz3zxx z*F2)ifybHaLms- zOqHfkOArutC=N$_uJi77?6K0Qr9JD2D7wA;O( z`!9l1y|HgL%ya#jznQ{=8mc#ZQ&er)UvdRg`USCl+xz%i(}kUDw*kw$$FI85YxSy_ z@Chp?-@CHElWXYzeV_k~s<%AW|EAU-KDT4OeKop#G(Pgc7>7gtMXR zh30p@+#mMf-#ydYsJ;nH!^A*5 z>_XuX{LIAAMC;5m{`6DLG#B$@O~x7VyHCLzm(vZ$iE@+dCOEAb5eM~{e z-yDg~$wRo~5IgOltAYmV*a}a)EVpz|$RUaBkwf~tEYeIc!yF7v8~;lL3Pu(Ij1xsU zPbBa_2=FZGNFGya!@)HG^|O#jCLvTpvntFm(M2tE?54ROWl+*cBlFYJMiq@?q&DdU zP(`Wcd^1!eId$_z8c#K~MmAa9QA|GfOtU^Cf&9`uQj>%ajwi2+Ql~?xoYKlGzvHq? zV`W`4*;CNL%+XBpYRF^&-)gOQ0#5LP$Rb92!gO^NL+J!G_HpX0;1eRAQyOlWDDv4Ed z(_=M;mduYm=GfMe%VajzSxGkeVS)t?_`iWgb(u7ky*0yKoBz)RrISM8J@;Kj1DRLc zp^1)A5u=Yzx@e)9?xS8T@YVNa()!(3wSTRi_BO4#Hdx7(!B+UylNs)oW?H+3Sm7`% zesb8bc%|}*j6*)VZX@1BtaCAp(G51qTb=_; zcDo1yFnZ4e6aMt~I;mOjS8KtQ_D}`F{D97FLd#1 z+81IXv%=j^gBjf44}ti%{)x?b6b#^*0GB%=u}yfttJsu!m9gwiFo^>68x^f+w-Gvw zf3m9{wv^+PhHXzFS&Nn9PFO+|s&ITsbKwi&7(+SAkcOjEp%ij8J!|psi$3(@09!yr zwF%OQdlO*G($u>sMoV~)@YNWLh{Zx?QIZe5Vg)e?$V?*gkDcV8!Q`Y#K&sJ%K3gLj zS2#x)wy%yhG*`P8lt&$AjgPa$U=X|bwIS|ukpGi}Aj^`~L?@1=k;B~NC6O6PM?tTb z%}gdGMHx+8PBU$*>tQZAX|`i_uauWU7hw4C}vOFS$kqOl4MmYXQczY;C)u0 znMG4xpD9$kW>>qlO)YYd8{XHdmat*~SwN=05-HBuaM*E)g1znKnQ!3>T*f+}sY?30|8sMLvcC?{oFn71BrsR5&y!btwfH2%z@|J|Q z0K9D)y-6sYxtCm4jqkcD*9F6(*izU%Cbj?WTj8e)al-tiv4i_t$PaSQNJNe0mI6w# zixxA%I9~9BkxWS!Ll}u%)Fy?&3sDW<$xa!T@>OcHt^Q>ceBT<{h)G{v&)y|b+I4W$&6ENSOuQ;Dz6>qN^Pmn ze;#y*f7uh(E_w`|MsmCc+9pfic)6V(czt)>=_J%UyXP&WS<_nOF9$o>AwPD<)A_Ll zo4IRgu2g5Yo$Uvoom77wce(#(+jDilJLm9%biOn4Fn$lszwz7j9LM~!Qg`On51;s4 znH;pc{`%uB2z<#$e)3JeG1JxlS7a|g=bj(sNJT$-6L&yGpSKpB?xc>5pmOad8 zujRENUiEqAdVF|)@@Vao*MM%np^G2?j!_fsnYX{qo8Ni;Z@$CyoG%)sFR^YWXeiF% zt`9*jZtsYI`%3NaC`<4L&#d~7Z_012*lN$@t&!Bv*4|I}h)@1l&iF*|R`{*{VCMcZ zBmY#8^Z1VeT#)}HE%X>p#Q;##%#8X9&~xmCAf9c47_b2y@cSZgxRPR3cuZtG^9PEX9^ z2g&ULy>GwH&Gy!<-S9{;vQF!G!vZfb3ac=af{z2?4+N($p^nG$fH3(`komYU!&iZc<{2#ehZ=kue!}FgXrHoF7M179(&WN{X$FzmvS{UFb;6cO`ukr#cj9T&jao=+HuF%{G063s9f z@ll2>jv@l?859384&RI#1ycN24HYZV%k;2=l<*cQ59%7S93gV-{Adx^kr8*%9XHPl zr)j2U@c+zgLaMF$@-Y)7=PU^0YX-z0MR6cUu@vY~nQVxsK91oH(w9$ZYBIROj zC*5xdZxSncQV@&ECxbFAE0P^?ZYY6CBxsNl;}S&Xt|{#jX<95Xq!J*7uq9nGRF+8Z zbPsN35-fAl$aJzS!SeEeG9B6SF*C9q-_j^OGA`@#F7FW&K`m1D@*jb)FDL8^`HKRp zZbA^Otz7@{pVn_I!}1}`k~ISK37scOVG*+kEZbR-MsP7xbr`04-5lz5C;^%2nIY2 z)I}3?L1h#YbLlrb5<*4fJav>F8Kew}b3!#VIhXS8c}L`>i3LW6Wem9s;cQyQNq{l0SX z^vn;L)I?)(Md?)dz$Ug}luxS^GH2BJcxp!xHB}E4+yuo(-=|R>^-&*iyd;$mdj~a> zltl6LH96HzGu3v$DqBKzSOHZ*{ZyJhq)S&-Rddhks7~9+G-+TpR!6Z&YZdD*6_RZA zG{JFKH}zM+H9+a_MMafVi`7`s6(H%-H@S2!m-StXu~nmWTKSSgA=T!lN07Evr!fC; zU$=2Jd9_moR#U^3J13G#iFH)bbzw_&D;}y{3pHZlbz&<}LJR>&%@hl-Fikm@HfR;j z;51w5&0qPIKYjIE3AR^Nc4b|vKoizn9r0Wnc4iwkgKB0=CDvIfmRYk;UN2U1=#^R% ztv+LpWSwwsa|u`^)b)@hrTHmxz;q_kYCHb!YS0p1ZFwU#4swrjt35{(f! z!sas?Q3z1Md`7~dA{fD!n7)Awo_SOFMVG;=3{(aB;n zbAsiif`4{UF8Fm%O@l2eWVy9}lN5yeQ-n!)caIoTjhJAAwPiXtfffI_5ns@SR~T@i z%4gpRl?j+R_7sjMIf^kkn;Up^WmsZ8Ihes2 zl>3;NMbU=|nPiKsCLy<#+4&qF)|KITnh(h{`*d$9`I@!)k`Mntm+KgdC0LllS$@g+ z`>cjsj~SV@cSIApomsh!8BBSv{pIKPH;Z5vvqVfvY8`l%V3kff%hO`4)_T0uK8mn+$Eg&3$oM1G)`JrN}@pHm4sgCB_D z7}y#a+BzBB8m{H~t(l>&m%$nCIzGtulIVe3wy5VdKij9u@n2S z9orhV0UW|%vc-Y2CHt}i8?rU~8!+3lE4#C)VH!eP8b*g(v`M?PPaCzXLA6txwOxC) zUpuyCd$wu2wp$yvaXYtl+qQR`w|#rFe>=E^+pvk7up9fhk$bI^n;431v6q{%om;t? zyV(9kx~)69p*y>U!56l>yMf`mwL84U+qfS)vm^Vw(K{Q|`x>xez1!Qf%OSqydmQK+ zw7KEF-TS?PTeJHiAOHX%`2+iOG`*hN<&Ob zNK8&mOooR|O-oHtQcg@rPKb$5J~&TKOHWQrPft!zP)|@*S5S(KQBqM+S6Na@LQ_*v zQ;v{SR8v%0Tvk_8S1Tx2R8Ch{S65qJSCEoeSX5Y8R#{wLS(24nUSL~RPg`18TU%ON zU0hsKN?c)MT$Y$!UtV5ZSzcaQUt?upUs_;bUtpV^VPIQgVP0ZnWn!M8V=pdaadBj1 zU}a}#WoT+;p`&MIU}$G!XliR|JTz%(Wov6{Yi@CDr>bpiXl`z5Z*g*QHZO8>b8}-+ zb8u{RS4(wtaCLTfb+EH`ZD)6Oa(HM}czAevdv|-cxqN(ieQZ~KetUg?e|~IUfPsI2 zyS{?CyMu#(gM@^HPeX)-goUxPhK7ZRh=quWiHVw=i;IekkCBazkB^X$k%@(ok&%*D zOp=q7l$4g0m6(>6nU|NEn3s;a4~tE;Q4tgNi8t*orBuC1=FuCA}IuCTAKu&=SP zcWtq-va+$Wv$C_av$VCewY9ajwzjypwzs#pxVX5vxprr|Y+Smyxx2c%yt}==yuH7^ zzQJ>4!N9@7!NbGC#KpwM$8cT8#>mOY$;-*h%*)Kq%+Alw&(Y7&($Lb=(bCn^)7I40 z*w)wC*VxvjAY4^C{spLxsqi|moK5oj7hVmO_>oBfhzlwc2_TN9KXoHzOn|7?*w{7Ffo%^e< z-Me)2>h*=UFIl_-1-8PqOPABbfBnMsnsw;4$JruF-bT5bkW;J1R+_cx*W6;uo?W?hZQ7r6i#`{cx4Lw|gA4cld!2E1$CD#h&V0E$=kKD! zn;x$^_2<{K%d=ju`#gHzw~PPhJ|4XJ^Z4?qU(de%z4Y+k+nX;xKKk(Y^W!JazrX+T z|7jPXe*P_%m|?XLrV1>%(4wF#3_>W&D}$AyMh!7AaY!bqtWqI|r+8S(f*po3A}1(* zlA?(!QliOt)s zu6Em>-UO=KZiIRUo1d;Fis+%X0ct32kZM+FZ^R8(DW#V-SDdDtLMNSopMtvIr~r~m zAA$9ys;YOUuG;FW1B(ADYpkWx`l>g3(P_(s4))6HEfMnCh?YWBK_!kNHbP3WB{It* zh%(X`t&PyWNUgL#7ILhQ*>X!{AV?lLWt8JWY3{jdmP_WF>$baY4{WCC<&W%sY3~zp z4t8f@`{EhazfAS1D5HW7I*!4TIx6s@3lH2V!3irIF~XQuoax1#W{lm&q0Z_tthJ)* z>c}LYYO=^6+p2QNC$pTgb>sA_*sc`nk|f;dj>`E+mlWlDZ>s+ z{P3f4=NzG!h+JVbKBR479l+O1)HgkcQ(=*Jq`5QjI+ zp(4dsME&#-h&~jeB#Ai5Muz8zYNOu=dliZ&@GpR&iy{C=$$-&4;fgg15h}4bqegUb z3`ct97t1v@G1^jsWGo{a)p*7=?h;OJgySYlC<{H{AVZXhV;A`gtFBHjRQz0TmOg*L$kS??)Ap^P5j%xIp z+0-UC8EMHMiW8jXEGH#Z+CFsht3ye|92`FR&e6s5re3foEA@#qfBN*FwG8M$!AR7E zq7it-I_fXk)lgy1&l3@~1C`2TN5Uxc7#3}2MnCFIj(YT~&!izX5lL2&lGLmv^(IT# zn$os@^L;K=q7v`WznWUWrlAz&M0JYKd}dK;P-9XTzepq=Jus+3C1_C*%GkylRH;jC zs#7m92s+Hc4475uUay)}t#nL(j`p*Lkjei! z6lV;=pF5#w*9KStpQ;S57F+q(ra@`2$c^BY4$IucDpr?|bu2FzD%ttv!3HqMt_ilQ zKX|!`L&ig)^Sp}M(@OKRp)D_1KRVWsuJ*mIMX79OyVCiNs}{ z(fGPkrnz!nu1u~M3wyifI(Vqh#aeWsORNbmv`hJ`?yeT`gA{rX{EWiP79y&)N;n=zTM+<6Pc3(|OPJ#j~Dwoohau z3afMxbW8_bXhV~Z$d6d`qIsH#M?d+emIkVGt8C>>XHCn&%bzG?AX!z|!Na^g^|qB& z;xxND*0UxptzVpO70dYDxQ_RobF6E6&l^?hz@#Sk+pGQ(8PTLm_7#EK*Vr{l(g>e) zr7JD%X_s+ITGq6uxx8(7i8=@27PnqM(JNN(*fHmJG2hV5?sl8}knsP$b+37C?|Qdf zzUy!ZT7=E-+@{*U89FBCoS<7f5>Uw>TDGILRJ(*LoUjddI2j{;O9@B!(?Ui-6n~~+ zK(K(OWWK4?n^=cZl)Kd+5Ba)fE%LF?I<+ZZxx8P__O_QS=DQ&$^`HS16_ z*vUM5JQ}-%^ZfO)2$lB)5K4_#jR*XWU;ljAFm<><}b{&ecVTODP~9*2!7#rb|@!)X*GW32Xm@dg0Yo;KgND4Xg~0hBl**KIAR2P z=ORKtMHJ$9_@^}cXC=Vre*yS|14w{ZV}KGDHc=NkllOc^;D9cm0sk{(nJ0na<#82w zfuhHOSeS($2wx$HdLu}JzGhNkXo6pOUvQx>%Cv$vQv`~}32X8q9l}Ii_XstZe|D&U z*1~@V)`Ns+OF$@mLP%{zcrRndfSuM$(T8#WwumYa2*3Z5ecES%>ZXMqSc#Olg=;5* zURZ`;IEJ3saxp=i~bjcfw*`Qr+|_Nd7U$c5*d-$NN?QejifkPspthbP%Y)?er=GB0@im| z#9Zyze?JJ1xhRj#b!qe{U0G%(PGpF|D2&0RhExC0U0re`9u{~yuqHUCJjrNoCsvS` z$dC$|T1bg@nW&8riBkKZ4-sl5}_mBFTFr*&;mH zWGZPSYxyL9_+V7^k}zphSeJzONO?C2dsg6KFX$!w!z4t|c-WRUlv$ZnS(Q&|UpkSM;kA`Z;FZ#+YN}*)V#x$tAYiZ9due%+ZF!6D zSc@z^@IiBjcn&*k0t@)vE*;qwbM%q?prYWBtl$R<<2e|h@(r0FMU<5xB18!-Uhl!X* zxs;5_oC5lRJnE3nIa>=lq|-^Ac*ZB#8KDiromp@?_QxSS`CI0wdm3s*JJW`M<)Jph zq3H>tA?lKHiDhVFk0hXTCyJs>rJ^xqOy$Umzojh+_LEmaQT!QqILZ?x$34i|qqa7n z3E7+t8l(z3s6-m5V50Rr+wRwra#CVM>7KY}Tm z&T5utX{pjWttOeN)_Sd<8m4C&leX!t3RSA7I!E*8qE+LnQUYl0>aNTPnWbl|@`|ek zYM}O-uf6KB`ns!T2rTl0L^l5u2p=$FJqM{IU?nZ6uzhu8#O1Kn8a3+)h}qhUMG%xw zm$7KNvGwV(b%+Ka>m@K{qc?h|?+ULdo3bj)r%u_jFFUqmTedI@tdf)sit4W+!h(9B zTikkV|CLNP_^fD|d#%H4MGLWm=X-*OtzxRJ13Loh3O2ZRVKs@Dg0YpdFlxx82Cm9a zrE^@vG_pmRm^}KUUOTT{tE)15uQ6-3Q8~J#E4pVJx-IoViJD3D(+FZ_eZN(=XSq7% znzwq(x2_qnxihgzyOUXhA)a%%i2JQj`?xd8p@pkXSlgd_w6%Mxxtlv@0vfjaYNTpa zy4ah&rn|kTi?$sj4XXdUwsNOQxhz|~`YORje8fnc#DzK=Fp+|l`*Gr{ygqgX8xVmE@B-kaz9mV*I_PM&i@Pa# zc(Ms`rU`{fHE|e=qWtTVZ7_ZLDW@IFgF3v!JdC+Y>BB&r#|-SisH?<&{KtR{v*|zz zW3(Y}Yseym$Y}paf#X|0u4}#>jJ|d|BJq2>u*1bA?4@6t!b=CdgbR6wi-5R!s-&O< z#HfHbmscl11pg~oPfW*`o1^XlzM8jrv6U z$99azLM)Bq2hTK?%k(_Ky?oF3EFX;eo5kXMNm!o%t!l`e%mKMcrr1Q0oY1B-yQ(A2 z(~PcpSX^0}$tUco^s87a?6lo1%1sNdi_5r4K+Zd2&it5j+dP2+$;j;7!|wdf@@!Tq zyF-0k&-wpc(>ARt((sl1EX+CJ&y$x41N{_pw@Gl@%vcJ|kk+u4Jki#y$rlY_p8Uq? zveDm6$~l>=gFBN$W_tjsmFw)Evi!U*ZOdHv(vUOLp?lM6ozFL@(P`YzLjcs`%VW27 zGnx~H7T|LX9nrA3)J)yf*KE;GZ8aM#)l*H?RUOW2SeKBz%JVmh*_pMm+|Dgc%VzD6 zmwn4WJTgpPIkk;3JP0@Qhzk^LRCB0>c zt-ri|CdG@_#z>=({ne8l*0lP=WS!ZbyV+^&+0KoisB_q*J>8_O33L4yL+zQL6JQ$( zhqC`I(X(CKCJdVkcH2=sal8G(q!#mg(%%?lKiEoaEB+%tyXAj8~HNx{!; z&-Ty^r&Efw`KShd*w&rW1ij!f<%St|K)eSBN{vPD8{VSV6Tci=uw-C!^YEPfcn zn!zmSP9Zs-7kVS#&7H4p;TMkKVjNI~Ex@yx-Wsde3Ni^L8i5^Il+4%#2$cs>5(=`!T&m)#TMZ&74}vx~`0*1pxu*MR-7whg#Xf_W_b;UNG1 z&5A208M!NLaCIk+YH+}=@!jHMPUdAk)@SayP1(R}4&b2P*{ltNan1&&Uh1WN2`b&_ z!(F$jd;$U&;d2PBe?H-UPTTUE;T!&4g8Maw9pXwj;&x3(k&c9fo5fyD+?S5&m7U!6 z+MKE-wx90bp{^d-(5!1n>W7T#>7wcw{I9Lf*ox@X|M=> zCZm!Lp<#_UhRP_Y=lmmLJpyeT-Sjn?mCy^ zd;a9;uI_;z*zFFNRUYs1zI-`XvGtB&!mhypm**n;%Kh%*CEM)I-sx#xZ`A*O@Ccv8 z+JNmhtL>I`Sv)W22RGLb5Aovak0`I>LsQ`w-{;hfl2U$j9I2CmTj0_?yjlLy{0=N3 zUF?%SY?VHcE6wbguIU2bl*>)TGtcHVZ<$W)ZDHC7K(C}~_{atj$)FirJ~GKlkMWh9 z>k7B)i2m{3tmq|g@?_T4S0CnLzU(dU@-H86W$)=Gh4zwPnclGCsUU#d&c@Uo2`O#o z;eI)pAcGsnXcnLJdB616%%~zw%#S-vPhCL;Uz= zU&NFD-)qnII-gmuFOaYb_j3j5q2r3lQto?w^rvs}sIQ(K`hW3=SU>;G>w;L58Z>_* z-_gNH0<&*9d&yfen)|xH`v}SSW6!d{ANs8R8EQA6N z!m^bMm#&AkBGQuhis2^}7btAZQp#c}M3hdgBIz;J$xtXws$7}Uz;Q ziDK@2L`mn*oI!=|0Xh_@8lrUoDMgBdL=7frkivPQW$IO|WTa~SV#KS}D`3Tj9ZR;V ziXu_ZuKk+U?b{FYWtXFU&UXL0+qiY_=B>_m?{&e2 zvm;)7xbb)HlrK--oOwLx@}y6vUOoDC?C`Wdch0@L^?LN|g(pA0{5|yP&8x@9KHq!z z@%PQAU*G(#{Q3^di;<+{xWf$t&uBwIGuU9TK`$aiL@~$~Vl1!41iT2N z!7S+LBb`cmi6s(i8p*^Fd7?>0oLKa!D5GGM(WnB|NQ;H95KOQvuD0s2#~jV#QAo4U z3X&}(Z{%Rf5<;lUE)Dz2kd{k+_&_6#;J7QwDKD(>%gQnnQ!_F>Gcy{|(o~Z*Hd}M; z&DmkQo~GU z%reh(cTLhxlef*+>V+*%IqAI9PCWbdSI(;9W`^y4u`U6)jb%q>@AiZBGpiY9B=gVqsS!SzH~ZKZi;T~XY%*QI#% z8CYO}EcRHUIUSbSuBILGS+9C%Yg!>#sJ7(>2nhc`0ByNttV$G~vQ}KJy)^k;b=mDK zw0F^jcV2qAaT`v2^VN4>e(U}hV1x(Oo8Wm8KKO6G8E#nc!5M|vVn-=fd~w9xd$`h) zRi=nzqd|rctPCC7R%{GYMfsvVim;XhH+U_vB~MVFHCNSJ%vtAPe+D|K*h7bQ=&XuH z`sk#UW|~^3)jHDYYg<*oYUHyH+o8XvJ|U3EDdc>5>dPj`?6jrPK3>zfck^v~?~QwI zy6-k0@4N%|yKnW|U!QQt<3GId{1~_Ye*F8>e}4CRIcZ!>QBDt)^VFxfAfe1{u6pDG z(EzLE1TYY25)-@x>{7=%3*yXmuiKgIJox`X+6BuEI_O$j4%9KGG=+D&>)r2?l_cP` z1zb(4+SP1$HRX9udWm6#4T{GRk?7DDLtNn4h?uUCVXs};TU+S~47+@dWV68{)7o zTdL!QTA>FR$YmoNP{GSic0^dll8)9>A`_eF#3zOiZc&_K713u!E(Y^`fa@X{jp;CC z8V-$!Gh;LJx6F%WjEzzX$s0We29W>Bk!;#L87IaN11trhfQPU|In8-abfzhq$V**!cNBJc*qN-4RcsZ>8XNc8NgBH!FP+k);kt!6L*D_IHLBDz(r$DnY9CH&Up$~B|Q z)n!L}30=O9w4@_W=5?`KSQKx!yU$dHDxybOScKx5<~=WZ5olcXcJsxr*u&^ByV?12 z_MAYbZ++c)Um2wkpk4o2t$&>i-~jv1pR+A+ZV~KN1&fspE&u_RAUxp<7uUjLK5m9J ztX#TsxVg^#Fo;7;T{xGxx^g~oieaka6_@cHF6I(RW(+|hELog`W>pY)po1}>b(Y+O zu|?G2V+Oo{a`F{2kn`}_q`oAB_C>N&iRHB1HTlWU%I81(2}vqfV4zpF@`157O37Y` z!Np6&VTL=IFyorcyDl?^&AegnZFJ3SZnK-;9A{+D*~HqtbF=M?=R7O!8GKfBUs6Zj z$MSOsf#x=$oi@iLrnS*2JCRd5gp;Bk^`@3)1ZYKjTqOn*3*<=-nkm~Jh zJO#+Yy$T~wg@ji=E}4bALqLeR%xBK-Kid4}I47^p^{d*QkJ`0jF*TBizL9@_73sEJ zb+%x=URpG-;9y30=cOL*UjG`{R~L5HwXWB#QylChUiQXS?0VR9{G}iN90va!cih9a zWSuD@P09bQcA33A2k8F0IRQ_>4RV^agdd^d5&!(e_k3y7cZ##-3CS8pUh>wfa*8OJG#k}Jnz${Jn$`t;=BL^ zz41H0vsppWOFz=HE39Kb_nX1glfO8lKl^*V{qVhMVjV|2q1(Hhp!+{vTQml02ox!a z1N0-BQ$Ti`gEhFcz*D{l#4ko_A?VA(38cUQ8^WmCz>E-#59B`WL%I+&C8i^t=s7_) zEImKIU5sP3*?$TRiIP0ji;)4lG52s=P9EwKLorE+9dYFu_-uCQwlZSX_=bH+flCNC61YU@4r0>lk5K=(2MCbY&TyfptP zyhcmZMtJf9Q2YXW!ZvS=KrS4xF!UO9G{T)PDn#2~D@R2eQ|#EYc-2#Gv5YOF?#ltkpK!fGn5 zg*-g43>{A7IdJ?$P)sFNYe|)CwUkW9SECEsqn<%%FecHG9g;_zoJYJ|M!merdelkg zy2oMb$)5yD_zOzbBg&#goNX}3+Ji`?97M@vNQcy+m?6L?=#bx%Ig8vZJIKndteH*Y zNYlzdAdI(79Lutdma`Nnw;aQkY|C_f$+0>Fx@09XSi@I@#lEb`7W~T>98CX+Bh11y z%)=B)>uSBlT&X^EOaio-yvvlOL`KSF#>2SGh&+YNv`Ws>N^8W(&=gIs{1rUlXZ%?-%D*No5Dj8EEB2-}>R!hpxU^v(UuOW@4O--N;86i(tS&V5A8#QewQ zR7`vDhIbq{$W+3~bgX2o%x0Xa4WY^^n8-lN$g8x-ToKRlget5;Pt+7i_3SV9JT0hj zP5ETWwR}+nt51Pggz$3}{p?RA;!FPg&j1zB0WDA=J<#LqMLLtcJd2ot+_M3!Ea;3* z>6}msH3L$3x`T91s;o)|h_nvfql*O1&Kgm9atlg2(bP;)eY?Idg0SO{< z%T7OoJTC3hF9p#M-AFSf(=$C$P&v&lyg+5yHYM3W8GTb$ZB;mh%gK_S9%7O@B_%w~ zQ$5X99}QAo6;xjxR6;G(Lsd{6Bv$pXqf3E_EM6ref3wL{a0NK zSVJA!B_&t}eGh?P)QCNilW11SDEsTS&=1I z_moYOMOjx{B`1*`C$gp9NZg6;`4(*rGjJ_V@-Y zMF?drKv{9x@_N|2o64oVpe)c#BGXz(49~9BFB|*VMhaKN>#yibRdZcgv{hTvC0)U2 z+qZQ^xt-gnGeTW`L!GrnAg$egyChm9a0s-DLn&Ccw9@N z+TndtZ^Bd$86?Y<-lif^2ohTqMNhFvQEmH{lReiC{NB@DN%37<%{&3s4JX!hNV|>Q zdc|A#k>6=3>eyYsEctkqb| z-ByqFG|oL!&+WuVD%sK<-|{uv4VFgQN?#m>*Se+I$M}bT;L-QJTi~Q!7p`CX-P^zA z-Te(*9Q2&XfzqmVH^%)+u<5A;R$!CJ32JQ+>K!ug3{!2S3KI(33f5k-H?~VHOTf7(QeAO=JJuU1J%ZVJxCy>Ka`6`Cr<@P)ntW z0&ZB#p#&ht&K+Ld20r3)S_3ZIf^mvqMH*S`ed3Ws7UDTc_>{aVo@7c!$Mc2c4&h=Q z_~P|~T`?x(GX7EdMPpJ{V>ZrRH*TLItzl#B9{^t7KMuL$^}k8zCO-%Zu*djIQEv%3_pM>gz+*Z3F2}?q+Y^rj!2W zoW<9aZtIp->6PxuH{0ESp=%qSGdjl7TV7nJHfpRSGE~{;eh#hYCF;JWUMe^Oq^>Ot zaIlauW<{3jWlo`|{sVFSvnift1W4J6zY^pAO)gP6_{>{_E+q?1$v!EeYxr!B~LK)ImCf8Hk`r*fJ34 z0S9uhEoB18er&LnY*K}2B_rDwy=tt^YVj7|YQAcVb{;L>(#FoC#``DKPQ}zNVb)&l zP?l}`j_do5ZQ2eo+vZ0TlUSP`>;dwodd|v>gjC6S(&8TMoAH^+J!s{2MCPu5A226Y z^c3oj?x2t=?Vf7xwvh~O&8;5qtbW_C_H6Z@Aq~dnur}@P)#O?$Ymt`kv~KMhUu%2J z@7Y$f9LI0D&KtW9ayVufpPpx{{euww%41ye+#3~D*=rYbKqs)+2nVEsrf^+$Y_~0e z_U{LwP95GRY)Yhre@=1E42%j7q#01P-qLVF zdXNrx=tbU<%r@UMue{`LbM&6*fr4{dWvH_hT@tGG?iFiPeQ)^IbF=pIK95B{r3PUi z_CPOoV;}TXHuU~R^xO_`MkhW!R&uKSV#j1~4jO7q?@~?I_J9@xF?e7w?_BIPOK^hl zbWL>~UH8q9gjqbN<#&uecb9o`++`a}C#b_!8t%|M2W3+4n^ES6}sX zSMhge_jdp8cyI5fQS*A2cNp(?f6w!?7I@Ys_=7k29S?eQCiI0r^nl$M9bR-=848A; zctuWni*NAP!FG-R%wOhmxNDU|AUMDN(2*~i^JGnmPFs{;dG`M2SpOD}wq|yh^Le-R zZgcUQm+_njc>3mf9QS#k*ZaMPZNG10g)jPLpDv_lTBV1HkU)GubNZ(z#>1C#MwCX+ zyn%1u`YfPag{vlh+vH~2LBK>L)mU;EXEdy|v%E1~zehl{zV z`_9q%J;!@J<@^6X@BP02dB6{Rqlf9f;tPk5a^z2ZNB6C)jlP7D97P1LyU<3NxZQ?5Mui_#u3IK(wT;IjbG2NaO*Jlb^W)2LIkCe5No5F$fR z+-+bN_Rkf~gQ7!ix|$V%$mbuK4nK&8PqCKK@|(^AGF4-xz=a{wJV+1R`^wfdeAApfbrc z_#lK4GBe>b6+&YnLD6WqVTK%H=pl$9QZu4MC7O7mL=SO;(Kju2^rAT^sWhXEHPYCU zOf<2RBaSuMl+#V<73I~CVCjKWDU@V(R8&w^)#O!CYIP)$MXJITmS{bqiI!>|qEu=l z=#l>i44pJwDJVLrW|bx<-E7e{=bWXOURs@XwTP z>gX-F+xiHksaKL!Ws;guDb^Sy=nzPfPRa|Vl=a?G3c6f=b#9hrrS-3ue3=FYU_jJ{ z0h(*V>E@e&P3BmglqD?L98>P0m{yh61}LJddHgZRrEx&1yA()p@>@BSN!x3^0oVVj zrI@&Mkg5txtgn_E^sGV`Z7b5aD!uEkOf%Rtu)-cYwKG*Kdv&wL zYMr&S&w8EW*V0lu_O;rYEpE8oew#MhK)Mqjxr7!F6I>98Eb`aky~;p zIFkMvPPms_IzirP1l!5D7;`T?dBYD!OqUcBAMrxpE8F&GpdIfeO0#m87IE{=y?591qhg+CH1R|aeSGotCB6L8P&@s!^HEDZ{e{(c zy*1TbW3PSKVtcp-{|(+aRC0ydqd_bNSqST4tBZ$o#{|By3&a* zOe|XzZw7Tc6U?lIo0?swY9>S7<&HSK%h{-W_q!yRDP?E?TJnOZL*yCnXtZKpuA-+z z>M=2U+tUe8(%rfr$zIfFMVEAANp|e4*CqxeTGR&4ekZFO!Tdd_R7kX z)_6BIVvKeMoZ#RHXE{Rj(1iY@%fZZ$ra}IZVSBvb77|7>NO25=B|H=)LzpPn;XqNV z;|&W-)-s=Qa%8xhVV_<|!x}o_hC^fz4=+guAR4iit~{j>k=VQ?La+ap(rY5IoT$qv z^0Jq^1RE8DDNGd+^NLx-A~M?sMrD2x9=+I?xuPjeLgaBcYy65Ev#CEg0)>wEYRhn{ z!mz|yE`g-O%f#Z;7nBK-kPb7Xo-7lwEo4w&uJZzOBpJy^?sJlh?4jybD9}uHQj?#Y z;R<6o%Ho(Zho=PI>Q?zmdTP=vmjWzUKPcc%#|{8 znOz(gx%&0K3EB~w*v#Agw3(H|ps|CoYi9rf7E}XX>@geMOR@#w^s4C$ibA(;lTicpBtRj@C>WBGy1{YTTR*Ga?INe4bp>n=?|K0(@IoCky|6;cbS$SKTWU~mC$Ya78oSn$H&T+;tkGy$|LA>tES8f6biuVexh@~%%h~D*5*_G~nZ+vr^ zW&6^X%U&)iWWyZU9sxMOKL9#F#lI3G`-V|q&eAOgRS+#3D~D6kx{;=R$?gh341{mq zad0ILV8(b%130ch42C|mgBzSTsLoS&@arFw7#Cw1`!lT9UF{q5_u`b5>|o^aF!xrbUQU&E0{J%F zU1Kou(EkiE1pB4(?Z$Z?pd9Hj!RG{k(@h#zsR|6;S)%0@k?D=z{9F3X&RNl33*09KT(APb&X@l_o1tzGgc-|{h^-a#J@`kLJ# z7MOwD-tiX@ik#n>*)qtD_vOGLbW@+T7@k1?AdW2ozHt)-+0&r;RLo2sW8jvSsuO&p9`9o494ILUfC_x;PmC-^XcFYRw77IAK&#I_GO<< z6ycC8ArB3nfw`7a+2I||fTgh_AK^ed>6vh>T%u_eP*D(ZNKPB{To&3*oqz!>LPnqs zis(UG8p>QG@I<^dfEyy93e3>FcwQYY9v>!PbTOCfoYN%<87qDx{|zD`j@_vpV!|cd z6rhz3oM6s;g663~BSswYX_O--A0=A<;wDC4F>#!iZCMZg;3ryNC~DtjF~dwL$(|Wb zD#jTdJckoDm{#c_qUBj8y&?xqh1M-alYo=BF@gBik1vu~;RPcA#mQ3~mB{SYqm7f| z*hw=!om$mgB}GXcbsIH)p%hkQo>bC|!Jb)hQl)heIDVrX4jCcxjyYCII*w2cup{9a zhZ@!6Bfg--Rb}znn)F?wKAIF)YNA)>BM(OFXtkY_38n(Hj5Of|^V3;0#j!oL%7EaqAHqs4v2`R9o0IA$g!Wif*g}?>oJJR7z z>d6!g<3nl~>dl^` zqk}eISV}0G`XGf?s1S}^hK^nt4aH!Z-;SQ5b&aT=U8Egg4rpa$Os42|7SOpU&WkpQ zafah}_M{I{-Z6sP8*XN7)F)8oUyuqZHHKyq6dArrAU7uI=>^Z9YU6Unkh?r-4MpjG zPU*7AXZ?MoQx0gCURAc?4Ia+(=hYL^&ip$2Lhek#Cu8VBqtizZ>>*@dDyiX6(zicTsSQtLY|r;38% zA111&ZtGV~C#T;3hIWyrlcK7W8mmx%i663F|Fjyc&MJc5Vy8&g-e3sOc-e76}*3 z=NC8zTI}b*60E^;Y1i5+*rw^i+SlIYYA{7CnNn=UTBtb$gW<6y6xxB{d6Q%!?aX4* zHdbV$4lS!PU>Wvb)`p)mE)acktF{`hy&fG~imZ)U-Q_eP&;IOsX4utX#U`~CsP-Y^ zYHp`q;Jsr1pp)W;)K2YCRIR7o4I*YKY#!{_(rUq?ErW{f!%uPW#%}GBW&h%8 z+VW~yn&s}Q4>kA%7iMbQ3h(gJE$A)nyn!dY+8m!6T3*sCpi*uUnnjWvX^kT9`GP9D zN-F(z@44w*&w_6Xdn|NB;HcJXe5P+Cp_L!%Yz+6?Pud0iqNe;BcP+75l30lG(+s0wGvou|BYtT$KxN(gYhyZWJ!5#vbF| zj!n{yT1;<`ChrJaq`1BdBXk}LcklP+@lloL$Hwdk&u|IiEz^3VrtGkeX>SAca8iC4 zQwrblNuoU_F@PAJ{zmNW2Cx%T=ygPKCS!8$>TV_*uqY$&*=edB9^?c5x){F#7tf$!0DVilOBCY~-GB6E3Nd>f!p1PA<1{9D^((kFX+} zqod;0ISMf(6LAsuLQoCQG32ijdo%z3FIj5yyH&9{i}N`b@Y<>~+Qf)K4$v8i#qhlU zW!(NPww>=Ip{V87Cta4WEH1%4_bK=tP9W*>E;}!B76uzgB+=UO<3jEV>#^p_p7K&O zAVb&nK66F?aWo&|G=~7{R&!TeGd9QOaH(B4gL7?$GdhP{BWT?q6oV9(ttY25C|@y- z=vRn%ag*?E-i|RGyRzO&1sWqMHz^n(Wb{E_Gy`|gFB4^YKD74+voN=CN48Eg_oP1y z@*w}QS1WZtbuXLLMzuGFCVQ8iF60bB%x3ric!t2QNOm}j_$~6Jw^Z|2` zMvMwQr?TGqbT^W*1txQ*Snx5+r$l;iEwA!YBlOGUVqz>*L=Q7DS2Zx>)1`6$?gbvM z4lgq{a`i@!c58^Ws+xh-q9$6G^i+=ER>Yhn!}T}ore3QvHtgJ>Imlh(^%UDQZm+X% z?rt`qhs)s)MGm#y-ZNwul!@NuPvdON5m0d4*6al}Zg}UNQDe}>@XlH_L{qhRuWOD8 za(SC^Xn(b7uQxUp3TnU4Aj84F_TD46HWJSg##o*(xbpxvABXZFN0`|>J}QnYg9`$&{k%O zS85A+ICIXcSY>$UHaB~Ji~-*GdsA(E&o?xwZhgBpJ?i$9oR@zWxq=q|IQYEfgZMU+ z*YqdfbZ{^D*_4VB7VRsWuQHo4SWhP->t%d0chY7ahT)TEfH_kGFM@S!R503{en9AO z&UmZ1rN;TbmGPDTojGh4{$aE#w_FMnA zSMHaR-*!ngF@i_Af=f9^cn3U7Nl{mDSBv%v>~m7tVISjcT1_EQod(LDZ*yj9c;Asj z!vRkw3K)a8oIfO;UvJ-TVj)^X(~vJemuFQKQWRgIDlWDHh4N3X;7+vI09t<`KX_H1^$*9nC^1N z>mR@wb}|4S#_=%vC9SWyJN+B0-|?>VWv_dzc^;{ZyA0DR5VIp7u~&SuPrS1e#X>{7 zw7ZXw53zyP!oFua{zkgVOZvA{ggc(HlV^Ilr#sA#^12Ty7_ZFIin@B=7Q?Htb22cg zgH;FTsLGK?bT%)msw={$ic$%@uJ4J{i?JIk>(*<1)_ZocdwnUeLCT8# z*q?sc6R|7+MiFH*+xK_dH~45G0xr!xl*fGC-+d7F1X5V}&0{`%qA#bFaB|N(A_D^6 z7{zDSX$1u2@(TMl=v_L;XwuwFc4JOu!01L6BufU zc<|DtIb5vd03mTh1`8u6hWvQaB*{aeG$Q&Gs!c^eXD~Jz1cnEYHd#Eb$l=8e8;)iO z9h!L*kshUcn2vGEWQf5Matc+w)6{5Is#vdf4f?hJj#sfewSO93GZL86w5o zg$)P;$6XmE$R?r*dYbLcoi`4_qbmsQl%(YPPGbiX3ecq(Xq%_ z%PCi{M#Ca4+H`8wt69r(-I`YHTC;7{zI7W{Zr!_c@s>4+QSV*4i1QlW3;D0)%7-;K zhU_`?WXh&jr!F0PcIVN#2lM`YdwB8MuQM}$zFB(o>esV>77c!UYS-pt%T^7WEhmWD z%lBVjoBQk=kiG&96wsS*6ikpC1{Vb9!Eq#vu)zugB=A50HI#9!cD17mpoxavQqBAi;OW|&}vB* zg^EjCFvMI6EI7FIl1(u8C6iz+&%DF`iZl34ldGSiVsp-I>r5=ri3KxhF+D48s7r4D z%y_OrDS3(HLm5pGTne5rxlu_geHrGMEp-%B<2qf$x0*w>c~n?S{TXPVdyZA;WsJ5J zXj(YOI^xnrdl}jTY?T)=k1EKFdYd>>177_}sPIZu28T zCi7(Ocl|=cixsTQyGgGKLVF4{74CP;ZWY3^V5br%{NSP(M#@hbLF1OO#nx&J||0Dt#f6XTb6ldO>58j_S|>w&8SRu-W~X%Q4M-{ zSA{p8Xj7A3{%H+&O%{5tt&U#*`mD8{9{cO7pBC)yAB2vxL!^ZJ?Ro(Pi6r#hQr~U% z(?_3l;3gR|CIVtO5(FsZb>W90{GKREdlO5N*mWf(8IA)2Ys2COrUrwBrEyGo+))Jb zu*C2vPI9VCTQZnAzEsYGA0$ZRK2tH&sg7lgV#7o@XM|92Ms_fS4ef0ADcYgNhQ1ii zB5?M*;QjD;$txQ2f=EQ-$qI?gYo2_Z#;j#+4~kBtUiPA>Ju6<3d(k3}Dh_Bqi2<$% z2l19--u6ECeGZLh;YAQnX|jElH|Nljd@lLL6dBdl}61aN!b56K22J$c@aqeX!`XovooViYJ1~6d+j3+#a8UwY2(O%|+XcZpF&wes2 zpE$~-LqB*Ywze>hO06I@JvdQOQnZwOLE%PKxk`_MbW@obnk{TnQp27Uu_|4uVwG1s znLgH~(D+3YJ2X?vUY4fCkd;m|i&JE7(T!9cidD(i0I!18sGDmaQtwhu(k@jGPW{A@ z>=!w6%CTeUN=sDLwbgn0>8oXX(G70EkbP<;k!PKf1O16yC!mdmZ(S%RJ;>0wzKO2Z z6%=IJMbV62Mi0@|L3dlnSC{z}usRd0VMjVkly1>??`7C4NOMz%iB!)#@zQ6J&l zH;R+luV)GW{G3R=1ShT)YEsE~KYV@xDobT8g9Wn`35Wp08P@O)I1CCMc~~PC=J1H6 zumvUP52){B(sJ|HTi+()3W5CCLlJ}8je^I&S@BT#ggQ+(OPo#+e?7LMu7sh1f-#)fMvO zL}$4FTr+vf;GHCS%S+|+9=oGh_F-YSY*Jcu<*;OCskD!1W;FM=vS-i)Wa}&+Om+L( z*LKZ%uvqSL9|OxUbVE>Q@Y=RPu&aF@C=*&l5*4gj&pYtIzWLp6fA3oy{}#Bu0Z{OQ zivSV>F!;h3E&vYzBg3YcMFJQ;fH2I$5DFgv!ZTiQgy`$Ws@8FFMITE(0%5`p53HNtn41Myyy(wWHxtllSeP?iSfMH$Od|{w+>RF4?UMf_f)hK%k*la+p+kC`tGAX^{it%BbxPk zg;dcB^>}UgZSI<*O3(b+QeIoom3fT0^aQ44UeNPC$>%(9afx?a;*9seEQn!>fRDfY zVc0nNJ2C!tVBGudZ+`+LL4Fae|N9B}yIuCbaPeE?0ZgF}*6#rtuf;+x@)#+(3MZ__ zXyrKXpkfXJHIFc8WaeBB*qYB|-soabPXnuL^+NFVU~jN~BeqDe_Ha)HSMc`#?8^lu z3s+`J&3y0oc!AD%Px-2-_Hl+AgBXv?FNuFXS+O3Rq%Lw8noC2ZcO$3<8b-?T+w7U<&V!{{S!m7LE<85DN=V3=c2? zArC8xpw$3sbQFVPG-mq{3iJTQ<+KUOpv0jv@B=G@oT6-G!VUyg4-)-qy=-R$QLtys z0(c}thnCLv8tVnEt?8gH3xEj*oh}rWK^B6L2cgC^bgu_bQ3HDlE$}c87Yt+A#?>+* zoV-M})QSO&@NE1dnhv1*y3pbrK^(%t(H`Ip1CAKbVHwfz3*pe>#BbvN0NwIAO))nAj^-3Ek}&VmFU9To zLQ;?T<(Yy?!es6pt8(S2a;_MWC*)2G`#=k&Q1RZN8ZV9w6R+?FU<*1^{m$>=iZTF^ zQsVRu{M66@G84l%O#c$EHOEi!<}e-WaMhkt0Y{E+0#Pz8=MURw5I@JqknGp+G1xRi zd>GLm%Q6zDGu>_m*$`_9=Tfsk;SGR6JHHb=!&5xRlRV4QJhz||$a65$BNi(1FAr1P zio+t`QzPLs&BEt5*(gn%#1=_XBMduzNI(?3#%rd>uGA)aU5n^l6JgoePVbO@;8KG24os>zZluE7i z8QVcix0Fk})Jwk<8pBjf$COOV)J)G59q@}Y_A*z-tuEBeINs^{Zy2^2Md@XZ$v_}cyM}t&IsdJT5$#*EB4XW(uj3Y_cK>q{| zSgp|ftngREP#g}=SdsP7&J@y`)mfhvTBB7N(Ns+Tm1yhcQzCBxTj6wDgQ56dEss#G zKbr(@8evdVa+5MgZYJZoOu`6n%|sd1QRmeREtOsw4&n^1pzKv&`xQkKh)hCjQ(4qS z5e7!Vf+Jq^FdFJAS?;ukGXimx*GyGZK_-;4FI_dvUeWa`h0|jnQv+)C3nCE(dlgvu z&Efiu3I%|}RyI!rkmCyP@C;7`zSF!|0cUfzQBZ+rd)8-v7HESuFloUn!3jLi0~(V? z7O0~y*;I<)%s%5(YG(x(D8mj0rzj37tWt%EEOViF33Qk8NkfKFN!AAfaH<#ka(_g*V?bit8d^;L8YuIx1J(rUmqaJ4L9 z?@G|GHFu!?MAPsj!4S|8!wA4a7Xtug7B?M^3gvSdc(x1DKoB-z6hy&!D?t&W7ka0c zdaKuZuNQlzHx9}GGnV7Al9pGx7a8bgYU6Y^xV2lcPB485KSgVGxt7F8GJZ+y(w+)q z8J27<;SJ`Aeb6%*C(Q`tHf{&F036`u0OQaw0U9CUYL~)-gv&4hO8RWTn+OO%*}{>M zVC3lVP6hUGS&b>{NF5*7ao^4XDbpVRE%&-&PIFLoZ7%lTiuF)OmqZ`chW*cU=k-Ku zcNUFoKtNp}2T|77w;}d9jyyvsjC_n2WnO zd#x8|?F)P%OEj3)_|li#+;n}NRxtl-jo%o-q6!$9r2gvIj_(+c>rc^Ufzr4$kL}p- zBs6!qFeb&pO2d!*{ zc&k7egblVg?~oQjPHROHsN9i~{xzah*saEvg@y&ltuyP4}4lZtk|xHeb@kYfmW7f(PFO5@cpn%HIWM2bBi z;D~XGz4&?C*`41Rp5u9o1%VH$*oy6$o)saC-&h&OSZOH3jM;c;)EJHV`Dh5*pr2!G z(1ct8rUFiu?}%0Lh}rH2@K7hZM2pdw%@0gF+M@&Ekqg*>{g8(icx&q^CyLWDJWVl6crgtrk*K7mSCwx|=aPcVVO^LZ2oaO9Poi|0mj`e&uTfbSPYi+C zHPdgS%P*=$Gfc6N3a@bspqd%`02IWJLz8krc^N0sP^l-5r$$zrYpI)&*qfD@3-N@U zd-qp$GG@_`0De}U_nNQ&_d0s-IeE(bxXWxB$#>dgf%AV?)%7a0BmyrgEzD*)Q>S?NjpJzO-wUiyB+q;2^_!(GBczL zIFeb84kSTK-{A91pcd!2AxgDV(+XktDUe#EftMmnrUZdj`71scB=hOyc-rh7nqq5N zY$eu4TUe+WBb3NGskOAx5HAZSPFR_^tWopuh_$QB8YY!FC@XXuGY)3MF^3I78Ig4i ztzl);dJ-*BRA!g_Xtuz^87UETWerapcGmzv;X3{% z8L$a^pUWd@X-P2uPss*3Vd!2VJ66F+djpOeo1b+fq3d#JJ&`+7hpg5Of(;^B*RPjd z)BDi*L!lV}h`bsf)Fxr}oc$o5pS+$yJP!Oo?KX1(;I<0TArA_e%e#D}>sC(6R!dtq zV8{-aD~1Zjf+bUc2qQQs%4WI+Xb;bBglY^bzs0)g+`IL>@@_4tJ(jz>yAMTzbGwpc z#M`Wm(I!Q^!3`OhF`CJzv z(UCme54^o6E_lZ<0g#~Af!&_#K)=I4fQ>-Qz1++9)h=jT3dX!L?G2hvHAS-pGQZ@v z`RvI2NA1`o;U7LB{6{PC{2}0%e6stNAJ7R?YnI0rhL82)b~o`bS|%I33;XRQnS9{T zJK`2}zHt-YH@$|t(I?Zr-i!A(ddlZa-8Z^f|3Id5K!L6&mB z*rT8cvR>=2UU}W+Ca8+H-)~{+blhw43eLWSgV3K2QrkUPNl%$x@Xd`Pp+{#GJS-S; ze+=9N4pEd;6jDhC?j>g$FQ{NJ3_B8@V7A>G}EdH`S2Qr)a^dml4J|^t?7yL z`@bLRr+)m&AL_Xt{nI}U`skg)UJ|f&VWvtCR^(tKNMPg1Q*oM;6SlYkqKc6s0_EH} z_=nITgMk1cdPs>=CQYF{O}Zp8(!^AyR%Jry1Pl`uC|ryHfpUWc55yh}csa|Fh(5Fb z9)t<=lBLU<1Nz|1>Cywu2c{gs6lyeqQW4@3JVDYzhSaH1t6G&h30;^1TniM@+BIxf z1Om#I?OH>exE&_iy3G(lg4_~6mP~=dg$4<_Z%3+(3;3;uy)pha_|Rbj1;j9jIJV-Z z4jF=lM-udMh9Tq>gmZ)}I7Eo&94S?nJh68%hY*5Ir*3dS0f7Ls2M1;l8@J-zW_A09 zjX|~W;k|+5_J_P5!aGW>d3Z@%DS+t%tc8i0QxbLnB4UOS@zT&NN#MnkhY5-(^eyGR zgHJzTX87!;vWFkv=XrPR2{QhHVo87)MfAu;1xiFpMIF^bOM?y;*hx6C*n&&{g}cx) zQ7#(ZvSEh|UI=0@9KJ$g8gbx(q97{r!D5Olx(H*8GR`Pt8Ccxd1dclHXrdole8Gkp zZ-fKhQbrz$WRkrl8B_p~=&{BYTtsF^m07@nnMez@ky)0OT?wWbYB2K99D6vVkU|tG zxDkR0CURy%BI!U9Ux)?A)OX+!^PMRz-IP;H5Adl|PffCf6H^{2>YS0Y>=3|HS4~RQ zRF;5+Ry%!&1wmL#0^mSbYO!@vV=J&$L2zKCNhTd$1~~>0rCvbmZHrM>+MUDc)kBuK z!nN89W;Rj?AOeLp8mwQ21}tZE*6>=er~!wnVZ7BE+-|wmdaZG`5+~dLYT9NEaP#)HO<7yz)XfFTM5Ni?4k6a>t}}2GViE8hH$|$VCSYcp$>Ckk}?E4-wOj zHLZ}S3x$oSBFl#wdl+KH75QOfjEcP2BFQDwh;qtZgq$OfE$=9Zk699_U%y7)q*Nc_ z2-@y-g!UJX#D}sgZ@WYnjkM9!Db4h!?b=(fDb`4W-gQV#*Il4I!?%MqJPYM@k>vqn z38VsC;X|cruMG;OT=k$Sr=4>3X{cO{I%~8lg{c{p2PH8>5KPQzg|n{>M4AQJ>e_9u zEd1sz5DB#!X61Js@tCy8idLDf*j5X98^t@bPG38QCoD%u+B%88bv|_u5aq zO2!Mf^3jhbo@jFV>YuDK`!%`@e-t{hDDxJL_HHxIR%h+#qJsWxGoqEn5cvU~*^EXW z;?br)3Gmr-mUbz%7yh&#D5@xMvXx4d2VV`nf$f}hymjk$WsVpl=Uhq5r}eRSQ+}( z=e|(3Pn0o=UkF8*wojzN74@?YOU46<1ZW3)d%_RSilUU3L}!1vlpQXC(z`{SZc&aX zApUB`A7P5EU&=Iu|7OCo=sa@(zue&Peli}O#LJCt+zB@^SfnL^5S1h>;R)yF6}QQ) zZlWS%X&7K95KsYC6-fDCc61jLB{Q7l$TCZ&&zl^|UzBrYkek_q$P zA{qHe7a($Zjbsx}C(=@eSOXoCEX9^KlgUg5RSQi3rBg{c%1^%UeDP~t@$@qkVA|w4 z0PItdqNx+6)aN_b!3i&`9tpk@5@nTTHeE8cS^UhI;@L?!mnJAT z5->Z?oR2f*lT}^5%O!&)ECA$45K}Pm3UiDKIm?OJb7qPSENo$K!UEE4K~#nTJLp{o zX^>+Nqe-YgZL2xZjTSIkWSvEUT3{pThohgCJ`!}f> zVu|Usqadfp$zDNqzx<8rR9SXP`OPMPIV<3F#3MViVwJDh{3}gj^2P?%giz4sUopc< zsHQygYy4RYOcLc+4}^oPYi00swiB<$-lVPs9Lg*8WRzcO*j;CJgf?^e9VCQ|6BZ2c zWibon6SiP(aGOJC*=V}xfVL^FW3EC;s~0`gv>|GsEk0+MxKoJ>a;Krpvb+pLftqTz z{e10{n>8(e9#?S?#iD8+T3q5vG>dX}E_Bx--Md)>offEWY_7XflWK}rJw(lIyBppb z8VIPy(qZLn`n)g&jt+_JX{0BIU-&Bj!4ZQN7ABQ?-vZr%GF0vFQ%#++k(u&kWl`qM zZkCjz*d#w+%g==e^+0Lj8rHiOYs7|1Kn7kI9cl?0QKmM{0Vo!ih8U`*M692{&icdd zS}f7{CxV&KT5Q|SrGtxE>zV{W$0h=Xggj}Z9|O5kmjVS6h};e%ca|)lZD+RO%w?18 z76%0i-}f4^TFACmRWk@Koej=VHg9W+z%4WFgxH#d%XZE9J~(o}uvsMlxyxW~ zPprCxK@NwzebA||Y--+1TCy-J4p1IM?_*FLZeU=*7|vPzwn;1u=n0xzEz1z%7uUE& z=UiOeQ`|Ng2V-)o2y&6{8QwECIXWRQ5JW(%=)YyT?7<6to5Sh6iEcVjePxnijkl4Q zwkfeBP4v1(S&!M{rm4`K<1 z9W`bOdqO}SCRlxyb_&@4OF_{~J|PaybU$x}S869q(t%<{kyvGCczuUgz!X@vc4Lpn zW1KK~>y}xT2Ng1aeCot0^kx>G7ep%udPG1|39|%EMmiUVdIG0#uvJ@VRuW0JgN)M` zv?F^d(Ob{5d#`sz3#VK^*o4JpXGE1}dPZ`^muHEve9f171P4i(hE$G3CYu8p5W!O2 z^EUw}FbiR62$OT7Mn~7PUMWRsXo!c|GZ9i(GMlh|e)xWY2!Ag!KZJ-Q2|zP6RzZ08 ze;jm82Ih$Sl2!`_Vm^_7vz91@2Y_3pYua{i!jm(6S6EkbVA27KSYwIzfgb|^ABX2n zYsEi)G9K~aHFb3V0BNv(#ULSk$cG`*K9PqbEocbGCl>6aXQUE)EEhRHb$!-C97o73 z`_xc}5k*e8I{9Q~1b1iy_l(nMjS{7M$pwW}_Vn5L0>L*~yv;%9EVT}kix+6^Tl56{7ZI{?V)h3c5X)h{CFTAsVEy*+{IWI98i6Myq ziXaL*$qeC;45P4wi4l5ZP)Gu)bBJMj8dnkm zL1)SZM{i;ORRpkHxRp2AsD!T9Mbaph-^d$I)_ZKpac_A5Rj7_~GjhcDc~>Z3UWjFi zHhq5yCro#bj+Te|*pEEdb3R8VizbJcBnAgrPn?vH2?0K(5C=ecJ}Y9B6Iqdhc#%5k zhwO(UNH7~`B?;`2S07nmScj01(vdg@3TA^wvnT+DW^iUeCNxnVAjVuo=$lHCdvYm8 zuNh}SWKH{GlQcOzISG`bpo_%-k)f%PECY3&83#xS3{>?4=#U3T27_>9Z@t-;UYP-R z&=F5^Ly-{zPRM#Lr*Psqo^-K$OK6t5kxyI{P`;siZU&!I_+}DCm*{wx>)4LO=l~?J zmweg(EPknme<_$YwL>M4dr$&45}Wk3g+<41n>By*p@3ZVp=(z%(Q`I+Zq zqC{yU19nVFa#$`pU^04bxmIwkhXF?yF*~{z0^_4c=V)&*SwAW!pOK#bh!Gg$Zxh0# z2~wF#+N6sDk}lyikvNk{qXB#nn$H=X8CiAMxiV?fHQmX1Gx|ct_M2%sQD|~ds0A(f zxll6{mr@9yFCb^5HD-omoCLQRSt*Xfxu40kT*C#Ruv02(VW886mr8bggpr`i_&E%^ znEfcB7@>V`NTJ~6m zB!g8vG=LyA=NC9I|;C7463M^_OJB0f+0CYH*OpBgkl<1j-7oq)&( ztPpuo#|shzk+6ELz<45EN|CTYR2Erk@Ye;v*fQHmrcT*;;R$C<^BWEUtk@?l5;uc9 z7*AUkg}9Y(&^TNEd3##KpYeI8`8ke-iX63tsEL}6S@?M&hZe>vUs8aCE5{m=s-cY* zCL7y*nX0K;rbD2Bsc8tRIOrv$`m7$BT1~~G0ETYV**-8^i0AsM=)*F=*_QZ2o~HA8 zz6l*A(2a^=P%SoJmvgKLMXdB`7ZqwIFm*7Ks!}%eQb3voy&5qbVQ#%3ovW1pts^4~ zyXd89IwuE)rFRwPktSgb-CQ;<4?oN;Db zskDHjTKg1mgw{l4#*Ka|sQii>1xqVN1D6QfW|AAA4m+R_Z~-MJvFk`G2}&wwCR6p8 zQ}igc9}BVuVJAwk15f+6y%MT5RghDlvL!pUYVZZ*Hk~qyt|^MMHtT-9OS3V13gF7N zA>;|pW&j}Lw&;eGmhgBcG-7~CjZyYxeB_w5OE{ore&73P;Y&TROTMMbqhv#^L1`k` z%B||B3S!%;a2pC|TajODh{6jaESk1#yPccou0(jI?$nia$|b_;w{x=prx?3$R_2d} z8={<}MA(?8aT>7sT6>lYxq%9f0;`|BXD+pIXAb+Yiwd5Zml89fDHfaoBlV!By9bX( zwK3(wki@utd$9pyl{u_PwcDX9n`sWAN}jZ$-nyAZY{W!-#3s77ZR@o?w#1(htu7!! zF)KfjM{cR46=`}Bq(hkD3&T~5NgsNos5Qo748CS83B_i4jF7%Vi3RM-3K4m{*@?Ds zyFT_Ct27d;`x`=Y>%Z%Cw@|63xc6y&n;BB5Td7)ep8*&F$y15Hbpl@@+oc4*=h%UMtwI)lcUm+6?7L^jIpP|8765ICg6AZlMp*QO}j z27#FctpRPcEH+$I%R#|2eqvx|k;VxW8u-6Jx#nJG*!Y(#lO4? z&c+656Q(W`2`WfJhw#j+HI?EE##4R9 zVA{@iT&{ggw_#zoZdT74Y=dS}ud(v4zmm9m+Lx))pFMlmfE$DbeQ|v~$a1a81I)>= z6B@mmsGai3VZ*4P>pG7*7g9ivVvN=HRHQ#jFm!F%6}rPWRE7s-5+^;Sr;XY}!_pkc zAF`nx&}2H+fipeOoIY*aw=JDQImAo+yF^{KGHcF8jm$|MA%xJ()awZEB$fxg$n_=F zS{-y%!YTp6&09^?bAq+uoCjYmnyz5K?@Pz-Y=VP$GT5n^tLjv6tF|^0&t+O~6rDKp z+_OyZb1!GW>iO56v(Fs2rwd)!gw38mn8Du(;IKjexrg1*J!>uvY=shie2`7gV=-`# zIuou&%Q4z7qiD-7N{eV^fIstfan((MwTf)DYxhG8t)okd;#S|`0J;t1F)rgXPUBt6 z+r7Oq;Jpguts=!7Z^w;Fe$Z@}w}O|F+)-I44qScL{oHl9CEE<$QU1*lLonQJt={d# zcp%j7qt3m}&S{Oae0(Ea^T#hJ-*XdM2riyyU=jG-+&wVR`VD0tuyCc_!I%5rmu$~- zTDb+TgB(m;3obZ}?cg#P;cw%Z?y|8delG(CR0T z^7p&#=PK*15bM#p>@hF%Ji6@LRoBj4>^XnzIS+I_54B=4<=109S-T;)em!YBRN0v} z?DOR@V!sy|ylBndZ9Nu&oaRGh(i?F92p@yHJEX!0eT}ju(4&i$0sg0X?&p3@uz;=s zuP5k^!S4q8*u^)onKST=&L>1tRuuMi2gqzTF=E|BbhKs@g3?$I|L|Wk@u^7it9=x= z=J=lO@w0$$T?gCUF<+{V@|&;QuO7Ux9`(`gyQMHxo#3&@&S}Nu6A75l!anTD{`$$j zXoEnc)a>)OfBP^`^SZzL%YI7bos^^`^(g}SIKE2VPW4qkU;&Jb?Kv0#{Q?(LmDBf9 z`U>``;#;agCtr{KaN)QrJg~tz?`wbOc%I`SZQi(Jg#(s!2f985j2}ntKmdUN%@;s`paUH`NOa^RUNYiEW4x3~ zeae*Cdm-!?%xg!HenkoY7cbnuh#*0JSQrf$*ssxJ{e1osXz0iQru*U}umS@QEO0>k z5LhX}1|NiwIRhpXutEzJyfDHAH-xDJ4L-b2!U+iUr$m18%WpFWpkuMcs8&2KDv$zr z&Nvi_z;Twyj0i%4Fh&w#CV?C=C=63Ba>NmhjvO+{gOmhdBO{T7Qlmavq9_cHLXC4$ z8>`R;b-+;yv(|z%(zdMdVzIm~wG_L&0^<~vIWPq^)J!WS6)s6O3*!bIbJ-0KAN$uC?RS z@c{W_kVqQYqr4x+1xu|VjzAY(3-}8nh5}KPkHZ1oW%Ims9mG)HeD~$IUw@nZx84i% zm9Ip9PGmG$ae9EUMd)5cPDbaBzTF@wpz>c)B`oyPX_c$}LCa5N-+4{BpBD=fPfh=Sw(Vf=}-+ zVANOVa9|2n7m;V#VL!lNgi~C&2!s}{l z)SF)as5;X%k#U+kPU#pOtdidPI#6}3o3+tEj1Y7#=jSmZn7NGx~`s92*g#snfr zpnNlfVl}UudZg*p$HLr=!i|Ro$)S+QF$=SoTsOi2@fYFM~Xi$VE)N2f_V`eF8 zM~GUfS{m(WXaV`rH+0n+A{`1P=XcW7wiKF186it^vq7O;GOJp>pI2IN(~Oz_%L!on z8c>BhTvozTZALvR+n}JM0s61G&UG#@{1DTaYUZnq#BO%6YhB$oK?0Ex>sYlo1M6(l ztZ(g{T-E!-w$3%K>t*j-t&_v+$OQ;Q+z(&(dRYGcwXgqOVm$*!#d$jCo>HvPV!w!@ z$3}Jm5)tVY6!u_?s2Jm=MWfYi zZyV#+qO@p6Q|(J>>slFaP)~}K0dLa?Tq1`pxWq-WV~ks^BqvvvXbEJB_ZXyh3}!r6 zeuOZ!++FS3)T@c&a+t;3UGC~OtYXFHgoMDYZNk^R>~%9a;r!+s(s|DRS)McHep^KK z=-1Ev73}N``#J(Uv0vl0;uZs-;LVWB2n~h~;~-4g8$C)11?ez{HGSbsce=MrkccTn zyak$w+A*F!@kVLl2sy;TSE-So{GNHj^*M94lEkqkNovT_VA8g@J!uDZE94>{*~rIk z@&OrWiJ~Uk$-*_uSuE@UUJA35pwx1<1EFm#XAsOa09-($za2BU#qDKKB#N5hHBOoz zH%hd|+0O3fW}e%7=l6Po6WEsTdAn6AKKFUQfiAG1vpDF$o{x(r8Y1rYC`N+vAkr7u z3`inuu}f38rV0k|1BScvYL3f zrq@1Z3b=6|Y%)g$Eye~qlY{PTvVF2TL_IEYmHzCW{+R}8*sWCTE7U!I{_G-I~D|i;XaRa z0wR7!GzyVX3dMBB6~A$jcf7aJ2Dynxj&VO$xhT(=&Nu8QYg^wsnI`0o%~zfedz{1c zqo{FiyGzwg3OmR?M|SLGzjUGh(0fKtI_;TG+^9S~s=o(*@TE(2s~6uZSoeVptBD*m zgn&T~Nb^dv)#f_C+pV6fe)i#g{kC79`)Rj+5bqoJKhN`Y1_tom{}r9sz4OswfbaU? z34d3_U!3xcZ~TcJuY_|&d-DIMyfhI&iSUHdpoNeU99g5T1!Tay5d@fnv61QPszQZ1Hlt4G7xLGY`ZpYb3tt*zHpO4EKo1l|qGODqLb}=`#6+CEbos2h2^1NG#7HzgNu{jU+P=!!p(O-ABNV$KBs)5EJ|677dQJaEz1LA|pcZ5fz%(`%Z z1aNx;>Y&FD*cSrO4zQD)QA|RtOvPrqoKpnFd>lxEBtm`!6(YhAsbt7`k;UckFSJxk zC~U}Cj1q|4o!?Ot`&bCz>BY+0$c{Y2jaob+k+O3U#=lIGB$29HD2CZ+$&~`V$4p6~ zF`C)xv8D*OKV-*8q{*7}N!d%Q+Y`Z_RK(HbOt>qa9ngYrvx=lt%GM+|r-aI7sYitb zm@1GVwCcWo)Jn44&A0l>P#h;DOfPJ^x46>BtmH~{q66Y-BIiSov_wl-gw92AP6Cn3 zDWXe>T$mHMkBZPsjO0sT49vm|%v2FT4ch_1|D1t-8^3b#t)P5Q(R}s z{KQW>X{pv5Gt7KMK-^55Bv9e%6woBW&QwsKjL$uTPfX(wBS3=Hbj=ENM;U6$ntDnb zl+EYVm)o>5;xxOpO2U8yOHTw&Iup_G5KFfErl7&68w&;!MUaWvC4EzNICJQ83?HLy%c)Pg#_f;+|2Jk?V>eS$iO!Jf;|rR2~{ zgj0kBQ4>8;Ia51(E6zq8(XkxPa?(vW|6|bo)&K>Q$9(AlS zs)-=gOFUWtI9<>DI8r3Nzb2K{DkQ&F#jhu=EQ+hrTs^2>Ef)fWEn!(nE#1;$Mah@c z!2cZ6Fe}qGbvC3+(@VgFW+P2DEl@W4EllG-yb1vl$jt8QfxNq-d?HsYd>sh|RB#gk zOQftQQi4PE$4QM>MRin0eN=g!SAKLmx`Nbid5a?~D&}05go8_})Kn{FMOr;r^B}A( zqRU4!)$}+pAzf8?35!=vQpNkPoO%x&@K^~;7~^5j_T<%)r9X546bkIT$fU-Wl}0cP z(=H28NNm4{O@sNzXSn6)niISpg3W-n0;>gG`UZ9hbZ~-ScQw4~+o2B!S5Cxw>7~ zi6ATL?7O}^%<(1PSS>@1|1(^~9bEh(S@mVynbNMtecbu=!#cFzIgP~1wcJ<9T+J0- z2L+qY4NadV;QuY2Pos|u@KY|+0#F^-)`j5KU4W&;gsw1xC}G#*P+ckdLiv~jd*xkF z!deiP)ZqQcAw*76L{wGO$NvCWMikB#{#P)7%d{;^xu`3!Nv9iTCoRld?aU9D@J@;# zFuJ7=v!Gk%BFQCY;wJtd1m0EkZQm(=U!}-j`IX#6`c^Tsz-8TH{k`0#>0bag-~b*+ z&@JNvHdE6DIRlw4MLF6#lH>82W6i2#F~CqjaF;U4;B-SDvbx9vm;n*?-9jc|AL0e@n}9tGmM9pdFEELO!UBz6yF zWaU5DaOC~tbyiZ>|^ml$Wk7!8bahno~z;2T5`5j;bmmadDqF=v#`Yr z7k20S*hKA{0~_X!8iRbE(*4cvxy=stqzh=$UM zhTbW*Xj|3~TvomK-Q~x`oBgG(`rRsDX26()igx%pGFIuEJ6;dyEKr=H|HV-u4rB~CuiYKzL_J|j{oSMf+O6cxyC_>2T^o767Vf0V?xSIJ?B^^1 zfgnJj7ihmGCDI#sn=;AE87bnpQ|KZES-n+jRQ96vt!1~yXp@a=xo+Po_B@w+tt?Kx zrQl`%4CBkSI$}nLlwRq=_TN!si*P*>T1{Ajh3mJbYv@K;>7H)9;xmoz=*Z1tmxU>j#$RC`>B|)C zJ*sHK|0ZKau8z{;6vl3B$Zqd8W#{*N=;l_~`kuQtxo>!in^f7558$29{^MQr-gkP0 z(?;zB&&N+Rn0z4q?!24l)~M;;(m#Vv36bm_rB zi_jd1QUS{KrVF#ktCNCf(r=axx?^Q(*eHra=CS(Lh>M<7&5Eg1Fmt?L~a|wrS z-~|kCnS$h%z4W-0+rHs9XFL5Eg0AFOO6j8dPH|c7+l-U46|c?~ckx8G@f(kE!fo{B z|Aut7U3B)X3vc7=9@i5d&-A|jX|pS3m44 zpJj@s^=+;7g|=Q5w?YiqZ~p>qE`J?@`Eo5X^htMjb%*!0#xGWK0s3^n zOUHE9)A4CU){+)e2CiFqNzJMQkbo9I>o#>p%=Vp3&^1WYhJSS^k9fm=Zb!dpDb96I zRmfgf$P4&&RK8IO`Dr{K-bFp?5mxpRF2XV=^FCj5Xy-n})?te-F3!pDZkE8bW?- zp#y3tq&Go{7M)_HiV-GGmr{YsbgI;w+GJ=c1@JHyJ0Uw4t*WT97y7>0|+s2~dv>JSiwJSfV@Egc-nm7K{~1jixX|CXj}QOc zmapCvDl!;oX(3_ruVDus!P{mxa2x)<_k-?tym<1C$4gIGd1Yeff(OITD?fho5ccQa zr(YjGf4u(xFE-Y8T!`gXc`j9uAag6+;1EO-i4>BA6H*vqg%?_gVMGE z)uWHg=Gg40&O)2rSt%On=a5BWTP+N}Iy4J0--7F{xZ|2quDR!y+m1LQ0D4AkUcyU? zyz|oArfX#Gd8cpht{JbJ_GS`hW;?1HR-VQ!TBpJbGwg+dkX4pJs*N2;9Z>oWQpc;c@;qzLKTFufh8N+g z>xg~H*Xy-L3VZBs#wKO3)XeHwwbjvLtt_(AQtNAg{{X_=-n5Bgob7X$N`j8MYqPDk z+ikR&u1%u54}Ur;kjeRw^G&GkIpG$f_YdAi<%9Ln9H$Q@KxA zht3@ef2r|k9iKd%!}rX43p(hU-l?7IrS!Jiu7Qcu+7qkw)!%0wzW3u(t*zH!ckOvx zUdTaOC!kmpfBeLNLX0Ww;-A0%9_Y{9WoW92O?=ba-vGxOyuB%bV=^GZ4k(+jHO^2S zYapH`=s?}j$Z_pc6#EvoIU=+!FQ5w@>_$fc|A?V5g%Bg5>OvT@Po3?0))_hSrF18(lnO&sG$Vot4$Xs55s;WdDv>ZIPFmWDviH8I4Xbv$>9iH2&h3_DU=|BAq!n-v$MU9 zFLgBC3teX<9@fBzusS1tCS@5X0$?S%+$Aq}sl-}E0YXGXNyKotA1Rj6S6RfO)41qG zXHG4e&-^0UXemQ8=1_e?Qpz3x;Z5AE|0Wi%7^gVPnTk@#aeAPn6bhX)$wd|NjbEam z;{r%HJ_4+e$T_6sf>51CGE$9WJlJm}3C2HCjt--dmd8v4LJ)THQ=+V+E_<2F0~oVn ztTSa(I_NOcX|$NK6In8K_$ydmL6$sw0!VGh&RikUq9u_jFBhOvS!uy&ggOuU++)30 zZm*fp94b+zNzG@P6q`!DCh4{*8gRlQs#5h=B+Sqkn!Qv+400p@`$bhhG7y}ClczrQ zMbAJo>XZx(C|ldg)qEM$qi-cnLjiiwh=OXSG_9yiG0IbtmGZC4v*cP-xk^?hsif9C zX*+KBlbgos3yhhWsbCtSy<*m)|B&U#bEK!GjauucO&zTj-v=Y^nbxjFT`g;;S=2O= zRzK$%i>cysp{j!7XGNMxJLQe6vk9xf@Y%!=oo zyfEUGWSOmLUx&!skOr0rskG%`5t~gaq8Fu`y-;MXnpIpDZMzn=Y0uVqAQ;|6v@twN zP*bbg|F-tJ5j-k^%O=|cmoSEEt<&M;b=wEq0>Zd`O>V*nNjRbAoTnh5as?SC;Vu`g ze;jLZO~Zu`GHyvh(eUGMTmuk!6~M)50N!alTac04R-7Yp0< zq0|QJosbdQ>ykjhH@mp?_|E1et8wMYVG;w3&#_4^L6H+^JE-`%ZB_IlSA5+{HcZCU zo%9He37H*Z*1jw(^Cd4OnDUC$x72$xdL5nQ4*W8*L$+*`sVrqz?-Bu5u0cER>tk$r zxpvDu?G?w&QZoZP*lTXIn@L^j(VlXvbhfQj@vLWNw*uP71(QAbTxSix@!IpE_E|3o zXn;W>k!W`FxD#D05l7mBW?f*Vzgr$m?{!cogj7wS`)l}sddPR&w2_aTRNFDB4+59K z4d__`{vkZ!3SamG{}}G@hCdwQ5*IU6@kuR&wtFpm%5GOcZwWvpp#+v*4u&eC%l&P4-}}f_d~$C79l$VG`Pd9R_!^n-YY4WvoB?$1MnJwy;k0wKd0u(TlM`Om zo+r9HKbt;lTY#KLI_bN8sHT^Dn=)rKL+8q;q+dPjl?7$jy)L@(o{H@HE;8B|pZ1-Q z3thq=z9}40|9s+?%lOAvKDvzW2<9gr`o~8;^@DHy>s!C?C1<8k32)Sx3;yunCnL>$ z&v?N!4iO5xJm%ZqepSFf{@d=ha=j{lq95J!>&7s*V*@PIr=IGu-TtU{I`} zqV*cDb)WdjV1fBa*qqm~_1@};%2=ph4@w^UxnB_8-w?*1(D_TY5n;XLUvz!jJ$+Tc zP(cV0AdA7-6w=P)9gg9A2IwfD?KNO48P9+eNX3AleNCX*RiL9;ppkjX*mYsPNgy10 z;q56N|Mrj{$)Uxrg;@-K-wgU8)-YEZ=AaJh#8U)a4+5bO`dktwA|p1UBW_s0Xd(R_ zhy6JrwOK*3G2JGb8{u`LKn>t02FK(WQsgAy*Z~&nB+JRTVH`e@V4<7 zI^p6-ouLNaqOILp7209c8V-hZ*Ai>_%?VoPV2Ph_*C#>R;bGtzqpv9=n=xZEs@Vndqcn0<0dmZ;DdHmjoYEQE z|K0#0BpP9w=wIoPV^1yGIX+`LDib@Vk?Y0d3xT1s;F~MLp+RaQH2PB@;$Zoa(oZ%X zA;#HC&Y~K+oE`!mA3kJ6GG)v0WG&8IG@23xo>@Kp;6}2aym(~&kytkpomYO9Np@Os z2#4k@SW2qo(?MNJx@1~{T^_{bO#a$a)TB+C;k@bNP#&b=`6FNcV^8iDm*u2j2Br-X zC4w0xOY&DSHsn%5WK%w-FgoR5>LpYHr9&Z))s==ea-^34ome3uSPEMInO=Fd6!*s;2Z{}R1P9V2BTk6rezxDUhXDuN@i;s9z!;!|6@kx zWJ)CBG3OmVr*29oa9U(6_M=#wrbcQbX?|uU(#v*+9=J^+LYd}t5!wS4)@)kgYOd!_ z3W+-Qr2ujvj?E?+Xu-Wj;4tB1?R{Z*5oT2WCUia}fD$NmR;2H_XM!eVbOK9qIw+?t zXLCYkaZaXz4rX-rB>R*vU5c3yc7NLNWBNsb`^D&{&C-qsW* zW#Sda@YAofA~EgfZT6T@4ySKkCvgVpfC6Tb5@}IV=z=O~k}f7=A_XSs)`UtZGal)5 z$|B2Lrduc|HJa7rZRbdaCTH#$cY>o>!XD^}-jB8?d&a17CKhXgj*ZeK|9b4GZBCC6 zan#oxsE`8bkOnB9_GX_lX<{bnoGg>CJ*h$|=W|NwqSm01VyJ~?sec|P6JBLDifN{5 zY7v4do9vUBKGjH$o_en7Yr-ZKD(H-^q@yU05Xm8rP84qXXo1pd@5QH|0&1=X>Xs7f zpaKh0CZ$p~>XIs?3_|LKB5Q^gq;;-bb~aGJm=jjMpJ--fnEH)omMOM^qbA`*jp?e3 z_G(5y=k=_L_H54GE#BsBwi<2fIt4wBo3}3Q zyXwT?7H-C#E2Yw*PKNBritOWNE!VcGSAZ6BiEYuV0N|M(dv@?!4Q9;6QDY%h)n zPdr*TEs*AYV|RX}>7r@S!jTewDyW8rQw4|ew&m->=kLjGl;vYAZf?5juB|#Q?>6rH z`tH{PubT>B@s6z)$?Wo?t>&(6q>`_HG^{7_aL} zZ26vVwWKfM%Ie3uZ|*Mc{K_xM*6+#M?+FJl6@@Ln9x9Y3W9Htk|7xuVx9{o*Bwu_k z&)(kzho!cnZZ%pW1asp9tHvFSjs*)I2p`!m=EMe{M1RsJ`wlP-xMa9~cf?b>oW18`6`@hT_5FC}8DYcRCrG$97%v`JUtjp1ZeUo=K*G&}1uM;rA~ zo5)8ON3S{LNM9~fbJ3ojG)i}_9|y_60<;g`hIm#RR%i9X7IfT}GHSdSnnE5@{Ea^*;@X zZd)}SH}-D7v?}lMaWR;8{|hUlF$(Wvq6PPGUk`DowPtfRQAfAe!u3ircXQ)4bf2wS zKB_WLcXc1~GdgP^2>3sTV{9vS=ziFDizIG@H*XizBsn!@t5|w(a(myhd%rhZ$G3bJ z>Lk}U+1~dt#_SYtEGN<`D`}cRXwuv6B-QM(- zYk8rcr+DMG=F!lXkEZ{nTa58CR3ib}Umjs_%0yu6h+Q#Y_)2n7=rT+dAGp&>~#oZ6~_8lcW(BaHN|Lu=lvI zXEw2C_@;BZM<@G}^6#C;d$V`CdAGYsOM8lUu3}*~fxAykC$y`_ICw8bD05}5|BJq# zs=BYcn(w=*54E#%IJ(Uw+7+<>pIG|GmyRoxeTk3%R6AyzZ;x-mmt_3#{rJ{&xFpx7YS*j>^?nd;aGie}<2K`QP67I)Bu+V_Bp;K->|i zC(j%@b_fzuIEdjwhldhz?dYfWT^kYu67b zyq+K-HZ0i@XG@e#vDR!`7BN)3W&46I+`4yZ;I#{5Z(qMPlp@8EH?Ux*gAXT743+WX z#!s6%etZOS{}RiQFB?Xzcd6nxkUE7H4e8Am)22a#8m+oC6xT~KzpfmcHtgEBjn2-k z8@FxWyNd+x4Saaeo+R*X6?C6x#)Frk}ds!2fvf1+%Pp!SQfzoFtQ z$us1pqH3%Sq3e)C=G=-4L=j1p>o4avBMe2rPE7H_7crAe#u#~{(YPQcL#{a`KEq_S zgLn)wwI5rP?MC8`OtQw`lx)&T7@=$ttP!WAaz!h#{0=+mwCvK$E@$*Y!1vrJbG-rO zn{Pe^|L^NaLNd`bsiipQG|0}Cgj$e6{@!$O%{M7*geWh1%doqx5Dn48EE)aK#7D2= zYX(VGtQ1m8U1TxS<8%zu(@$;0(ZMAxEfuLXd?eDyAyu6M$yb?tQr1ypHFDNlXY5qR zDtTSh*I&0w^vhv|4e!vdLJe?CGnX~+&pl(ZPult*be4@Ql%9>||?E<`#^b z|Lfeg=5%pJ*##5|3in)d5h6lV9oY3)X?QixxM_R?rdaB!`K`D!T{msfV1&0`?KE2( z4qI%i#~#_=r=7ky?T)u(JLHeU%lF}IcTSnj{3>b_Zt>zuIhlsU{n0fx8y1NFfjQ`fG<5Ha&LP z=Po#?t#gCUX^ z+IyHOPkL$2fiM5`sC6%$^z}ziHuqCg_wn_CxdwZ7_U~`M>Z5Hx8~y*8W8eKV{h z?fzvv(V^-`k_aLXN3(|24UvdP)Knpjm`KGS(Oz;?V(#D=Nhd zAIk5Kt6_~0k!i@<;gLDNyb&Ym6{+3L5tlA>n-sr8nMlqMd72cL&87#-Q8v$c587fW zx46PGg79xtS|gg^Bg>H#VxF`N;5}`bPdeIhmo!AAGfSt(6vdA;^BZPp1bN0}DpQen zWG3vOxl4Rb)N@R<<|HXuOG|PyXX5Op7(EKT3tq&E@l;tl(RD>_#Y}D?Rp~buVo!=z z)1uT&;!OeiPn_!Npo1hRCK9?(LiVzu4|OO}X%|sx-cqA9t!eKn8C6SWG>X`)qDozw zRjpz%TI7rg9ro46@5vLUnOtW_Un)46LiM6cWvfjEx>LCxHK$IC|I$x`^qQUqRj5Yw zYf_iS ztur<3V-HK%L-MnVboHlB0l0;|u4=Xlh3H^mtE9lzHFT$as%iC!(W+FHvBZVfWFl+U zWNvqPvlD|m>mSit5yC}A91kYEVkIl}wXB8jd^xO^)d~~DK0Syrk7?TzmsrCi-tr>v zTVEBAd9f9cahY2Tvha51!P^C;-fSY|9)Fh32db`|D{JIewi%X5ZYy}h?Bpj8+M%D@ zBYM?!P=so^y0+fFU{iOZraQ)PIHG#=HNHOxhFN_@to^yH&Y4@ zw0>$Xk+ZunSo;~jzy(9_JLbPiXT~HBonb`8JbgnbK0Hcp;bNs4N1*9nBL7O^ImyJtq1k=B2y36av^qNogYd&IG*u{4A zz45J?WY;>(%to`noegcfLYBa0-H!#y8RSzpSZ5JtC*xkp8*sxq*5g)OxzBy;K&!jS zx!QHsIL6T`=aqHz_Nl$|eeA)4nzj2@c78n_V}U!{yacy4eGrZ}a6uQxHJP)+D{k>{ zKV0aj^a94oobg{}*xelWc*g~;&rRW#Aq&UTz)tMpvVIqEsG z>b&cHm12W8?2jLQTvI-Y|M&;HiP&_O>z=*7=PUZP4*$h3*=kH)4pQYxeCETv^PCs> z>Edp*CJ6hmtpGm)@3znN#;v!U15ChA|CSHgZ10XLkLs%K@#;tZo=yTiZp>4`Nv_0W92wqzpsO>@uPJ+6M z3uh4fLg|Z8%4Fnlni|CnV=sa1Fb(%`4RMD%@Wmk5?*fa^2-{5qYs3j@Px6!y-p;TR zE%6TnF$MKd2yf0zRz?v|j}d7wLC~pMvS$o=PyG;!5-*YdQgIH8k>fm3y-p4TGms8Z z5rV@mS#kU@(Hb@J zcl;?CGY}3%(HEEL29D7gp{x_1|B;!NuN|?`71N9x`;Qy_aTe7B4B;seaWNbl@exTP z($tYNvV$E3G6K~w9_0}+hA=kl@dEjA8b^{8@sSz@?b!M)BeU_E4Duk`j;UJe8x8IY z(Zu@55h5K?P*O_px{d)e1|v;zA3u;IC6NhH=hys%-s%k{PjVEWasz?#3ZX9eQnDI> zkq5O>3JDSxi_d77%hVWd)N-;cImnYTi`NcMD0@&Ni*km9B`YUIDU&i3of0ZdGB1x1 zFX3_+S#ls zNs=$`@-z9cE4Okm2~jXn|FCz`htCueG41RYb@D9xM7k&rGSf#g-I6ljau4xpkgU%4 zeorL-(JCubG>20q+Y&HIlPgU#H3u^x4fDWIO)+B=j1(>bBeD2!b0~9jBbn@{nlj#) zl9)o1GtCnzZSy#ZbDv~yIo)#`;Zp+SlRQ{3I%(3!7L38PMbEI(RJL<6-SI2EQliL!uj)J3ZlMiVqVf3i!BYDZbMU!=4Y zmFh3Wlu;WsNUQ4eB-Kpa3{5ljSAli)SWjZ&G*L&jIOWn~{=!sIRUcCoOCMBJk=5&R zv_@0(2NShc$COOV6a|UY&&A^+p{iOt-aLZ}k=9Ga6%50O4RgvId#2Hj1cE5GT|+fW z!OO!k6Go*8WAoHwKNe(@mB##4LaWseQPxwPmSqXnThj=AVm89Srs)bZNjFtpi>?iF zwmZr1L`P3vgBCb5c0reRT8)-a0aZoUb~LrMX`eP~byaHXHYTYyJq!~v#!OhxbV=RR zVZK&QEq3FW)vY-86N^?aOO{`gcK93@as}2_=@x1&*KQw`J_DvTNye$#^l$xkhz1vI zcUH*`|Cd*S4T!KdTB9`6?AwX7yWoPa*sx2@6C1pRr!`fZgIDAc^7k^ zt#{Qo-Hh~Fyo`7&ihu+ro*oVhlNUIcw|Sixnl|miPFHxX*Ebipd%3rO`;~3=^-%k9 za>+Mk(f5IORXbFxSCtDh;CDi)!xMwTe(yJ3i6DRVcYhJK6|qr&|94)W)lXj+fem<0 zb+mCul2+$-hRqj(ZTN0e^LO_)czaZS6DJIhv^Irx#6VbtF?fWFE8WPpD^i$x2RLNI z|96F1_-$#o4jEW?vABkB_ikBGw-)eRW)BZFF<$bGUI~9 z&UkAXlH(3mR6v)L6GWWWge3vJE;xYaxRW_~d%x3}q=OV;IA3#+nV-3tomrao|JjFI zDPuy748Gu+v)Kx#|FAi5cF1K0S(IfPBnQ$g*VXDgl6Ii1^?o<)F$|}MS7nND2FN4mWeF?(s`Z`u!GA8DH_^{eW&?=t80MSG$>k=*V&^x+L#M>YDU>@ z*?D%KmvU)1pK}+Vk-DTKc!b$ZppPuqKvA+#dPL(QrdN`g3vrk;Sd&Xbn|a!&-C3xS zIf_eHtf|DP)q1H<*{GeGd$fT2e3CZrmxBRUF0eXV0S;c_I4xw$tAUoNm6@ZZI8Ym^ z@YoGHQnrErSZqnPEUNH`V>h4ynzG~i3dHTrYEVg&*QHD1D)O2;%bB7K|3;#RB$%04 zdark^%{r`GyR7lpp)YBH6_v5e*KT*2QXX5fO(e4~n>8_uX6^cZ#Uiu|v}s1$ntnMo zrZ>uj_H2K8um#(-hgxFhd37c5iq~2&XL&O|Rhupm@SwWJ7HqgZd+$QPxMjGPYZ{nM zySd9ERpC3jU%S4gdQ}UXwyztl#k>nf4y}3HB@tC8DK2_q?&E z2GskZx$<-RI*u!v-&**ssT;Nld&5!7wlOrMzqok8d$$KXx632JF+1FNZi68_#s{xc zm70X17mQ{5Lp<%==<&4c4SwYtr@&`968I zVsUHP<0ZnVs^xl!Cmgu;y1gelr_EM{kGse}yu;DjT8G7PMcm0(cF2=Dn%J9qKTW&> zm(Re6%VpepzFciMPt4!@w3Q&R`!&Pg*vQdb&80iGZ))l0FPF5s$>E%eZg+_+oqdJa zx6{eIA^gvo{K;97eg^%=+a-X%TAS^3(HlL@FZ#f%`KQ7B&D+|f-)hd;s?O)wRau)xnJw14 zyVn<-K{Tk(IejIFot%?f(35@8g&fFL``r`0)df`Am6piC|Gi_#j@ECx#<$(E0Y2Qt z{hvo7xKAcst2#f^9Z}G?lBd+&&D<@5+|VH%2=SfXF8qmPiQd|wBi z;DLD4h5a=fVc|!7$IXp0di%0Ez!9MJzff~lX?8SZ>ydLed z-t5`F>X*Uo;ePGYzU}Fr?A5*+?*1I~-W>RT@BKdS?VcO@{_oSF@C{!Z0Ke`PKN}do z@g4v1AwTl*Uh?N&?yuqU13&XMpYl0>@;CqUJ3sU>Uf=UYzw{B`^b>#aD_`|j|LnKk z^(`OvW&ibQzxLn$_TwJ+b6**BzxR3H_gUZeK|l40fB1|4_{kyp!D0Eoq4@>h`Jw;$ z$6@-Zzxu6T`L7@QwV(N+e;wGt9|8gZA^8LaG64SoECK)k0DS^!0fz|$1PKcY6c!Z~ z3P7$YZARr+lAR-hIBPApwDnBMDCMYT@DJLT;EGjDw z2`er*EG;Z8FfT4IEif@IF*GtVGB7kWGBqtIH8wOgaB?;{H8yf|HzFQ5H#ImqJ2^Tx zIdyhAZf`p}IXpZ%Ja~CMJv%->JU)7RKR-P{HZedzK0rY~Kte!3e0@PhL_$J9LVtim zLqJ1)endn;L`6bGMMOk^f<;9{MMgnJI5S2@Lq>vyM@K|QM@C0YP)CD=NFNwTLqAAG zKS)SLNJ&RXOHD|DgGhviNl8aZg@;N?M@macN=!>jC?HEpNK1!^OiW2kOiD~nPfUo5 zO-xBnPD@TsOipWYPftuwQBY5djZjZbP*PJ-G%it5PEl1?QH+mLQc+V?RZ~}4Q;(5U zP)Sr%P*qh>RaQ||SXxz)lvY+#R#sJ4Sy@+;l~`C+Sz1?Fm6%#vU0YpSTU}pUUSV8X zS6yIYU6-0(URz&VR$pObU|d*WUt3_BoMK^LVx6C3URh*eU1Xr5Wlwr$W@Tq)V`x`M zXklGwWnpM(W@x3SX=7h$YG-OoKx=GiY&v{wsjO{YOm0|pZf|XGK{s(>QE_r^a&2aF zb8~aAvUO%uc6M`kcy@TPw0T51dTUvGdUkt#eSLg+e{f)bfPaI6fP;jEgQcg2N41di;Rtqj*(GCk&~5@kdTv;l$Dj1mWzp&m6(^9nwXcEn3Cc=si>-|sj952tgWoAt*x%E zWm&GSuCK1Iu&=MNu(7eRva@R4C$8BK8#>dIX$;!#f%*xEp%+Alw&(Y7& z($LY=($m$`)YjD1*w)wD*xB9N+wk!3{r&v_000R80Q)^_HjtpfgU%8vT!^qBLx&I} zN}Na$n#GG1Giuz3u_H&1AU}p28LgyAX(UggRLOE>OP4TXR+~wan#`9tZPwhWv**p9 zKzZg2O4Mi3qehX6RK~1nL#I%qN@cois#U62u^Q9Ll~}N^V8eP%wzX`rvuMq#P0O~e z+qZDz%AHGh7+8P(Zt?O3tgqO*Rl7b+$*r*AZ`}|lR?HY&V{wqfJ>GVhvSn=7tZ5dC zxwGfYmqUxLytp*!(T-C~jy$flx!3Aqr<*Mswr$$A*XiEPn_X|;z=I2qC$G41;_#3k zUp}unz32AoMVC(9x%KPNr{CMoJs-aB-MxeV*GIm5`194ZXJ5}AyL;yF-?yJ1pT7O< z2L0;q#eWxnSh?5|AT9#xW#BBdB&Z;QvH;j1ET??3ii52@SmA`Dpu!4<9BL?Gh9G_@ zN{As&vIGe(e6c|U5LiHi99YP)2qcwEQX`Fh%&6mzJO<+9k8|viBam;TfrpSoB1xnk zL7ss|lSeYCq?AP(S)~$RJXyw;T3*?um0Who!ii$wMc`g&{x#NGS+zOTX_KvHnw)gT z2`6ZuZHC&Oe5!WmoPN4i=WV#8LH@Tj3(#kqmU|B-+h#(hu(PNWvVHrn_61w zr;|z=>ZqcEN~uHp_2(Zj6|Oqqf(63=D(izaTG-*OxO%ATt|XQSB#J4(FhqCDX9RP>M5t@b!xDBmm(~%d;v4u@WKoy^vhbR64+|27S5`1 zED(N3;e{aQDsrzQlB`J|CuT4Kiz^gqEV3^r+atBif?2b)Vq81zwJ&DtbIw4AiDHc2 zrYi2yzezFd>j^`idv`_8|mK5TZt1*fex!3iro zFvMrW?J(RHP5iCJ3?7*G#(UHMs^P~WmprnFyb@0Gk07*&!3}oEysYCk-|Vd9LA$&& z&&1NK`On;9*==4%BMq3+PgR8V(^7l=wbt*pezm(>v(E17s@JZs*kF&XJK1IP9{1dC zvwd*zZNDw|@5mo-T-{2C`zpqKM=!Xofe&7|$-eH0#tJG2yG0#^Fe3i=F~dwg^O^5P zI4s{Ic8EUrvCoIpiXZ3yI46T)eF1-bxtr+vx3vGsj&`$4paG}&L@6o|iUK55?j8ui z2=)$c6s+Lzj7LTdnvr-M9Fi6K0Rf1`1#qK>HTQkp+{EbyQ)EiiDz-wP*nwxPh>yS>Vn?xiNCB?Zva+cGa>nx=xtEkSEy7Z;)q}?fJ>cyJkGoLtxrA}?CPk#PW ze|>Y+Kt~v|U-IgpBpc?4W~2lSSiqslj6(e=ssKkoxW7@3R`sgTye1I0s90z!x` zWh;wS>TZ^i*v&3ze>SvlrYV2pEGb%1+gg>vHnnbj?Rnn`&)M=eg1GIiZg)#x3}Obb zXBuiyi_2d*9v6Ee5(D?1ip&E0ak^Qxu7b7d)dn;8s}KfjSSLB!@0QlIC)K1{&AZ<6 zvUjx|UY&dCS=;y4cfKc1u^I0RT#f11u>I|&f03(Ds`kyQW)N{F1mmJ2^m@rX znf+3R;~X~*|Fa41aU15$1}gm+N}l!Hc74{}k%Cu$;Y~8rkfPc`gBZ#lX0oE=+FmJ} z_|aR|a+V`4Fcym?pu$~{gko&wGb49OC?If~G4N(M$GN&Y7KCNzd}nAm_@f>j@`U?b z;Xel&!-FPFqB(r%B`+G$q;xc2sVwPY8(VpnzH~S#HKgC>rL_! z%lA{?|7Z)WjQnrt#gUgKj5b1nbjKIEIpL;O^}@T|a63O(&wIvJili_EbKCmj7|(Ua zfz5P}JN@Y(7kSC;J@Tnj-C-$5H<*Pga81kHxYDM1%|#tmoGbk7Xz%&Yv-xTw!vF(D zuejEco^)MnyxmQPy56H4b*oFg>VQ9ad$K+jaP~XQ2pV(O4Jz#{Dc3{?hpLZvK6!?# z{qk)mqGRKJ^tsnv$>`nt-9=COM*scvg1>s;Pfqx)6o}t!T>P`4J!3mAHD_stJlP>@ z_Ovftazk>p!^0i%hCY?^ou~WfotSsLH^21p?&Q4vPW>xeU-hfMJ~+%`_``Gk-(=31 z|Cx6=a1cEy_qreT1cH?F_Dgfy7hQwpF;6+RL;UT?6c+L=j_XX?ee}@>ea^RcrdNHa zmwMNSfZ2B)tv4X}g?QpedtWz9nniE|)?ANOf!RiRFuk`{jdR zhbwB9LytFv>UVxU#!)<%Sr2wy^=E(Fg@N5Qao>l6DcFBjxO6SZaa+iRUFd~h{|JU* zIEIp#fHtT>;^2B|2vj@RhHhAU=C>n4XmigM2I0ej)kFsK_knE}WJ4r0fH+m!B8YNF zT2xqthlq$-s5OhYi;eh#(?^Ecw-1weQ^=`Syh>gaMyvS<>ID?UBLBcqUcU6oh zc8Lj+f}6;OJh+S#s0kTzB+wX*)i#a1XN0NvewCMvz_*QR=ZbnXQQ;VlvN(vzSc`@T zdg;=OSm=)J2$FaQgYsx>_IQscNsRbdSLRSLI;e)H&~mrXgAvG#{FOq1|4?HV$N|SR zYQ$oS*qDdpV~48va1lu@%*8T7Hg5Jah30sU=%`^G>5+#fl2I3uRw*_m35+V4lJ)47 zD7lg`h;m z2$i?EZdt=X>iCXW337_bVOqJBl&D)^*_D$SmR$KeIA}?MDIosXkIiV47$PKYIfp!1 zhdOkZaF&O536y!MB%RY>7YK-cDPb_=lp0BxN+*>EgqVq#l~=i#jroZ4h>w){m6d6k zmsx3D@sh3*e*f5sZ#X2QX^;mAh}4*Ht7&qBvQs$l{37^VSpY#c$A{v=V^B2rRj{OE0Xviwg`J65) zfnfI|wpWk^>XXHUn%Y^Ht;vTsGn?P|n&R0xfGL<2YN6=ao9oG*^EOwC8FI;KpCf9V zA&Q(f7!GmxY6n`1p!k%*)pg*>Kv|6hg+MymQsE69C^jfdJdRNN;gu}Y8%G$3} zWv-j}qS6X7)GBQ?hB(#bkD%`mP(= zuJJ0b^J=gBaIz+Quf%vEq<969<+3moz;^giiKt~GLV5#n(9Zi4vVJTT9iJAP2gFvhRd|A z>Z-WOv7tAW!MUd*tF@8~s3hyPU@Nwmo4I174q!S8U-EEitG1!rvZT0=NCp;Q8hg<&ocN_NmbacpqvAV}NE^84 zd%MOLp+>U4>5n;ml8XJtRPc9$>^1 z{IES50}zVA-N?Si8pR*HxbmB)^ozw>|2(-2v%-!H#*j=f&ETIisikLp$&Hi7%%m4{ z(Z<@VvkDxY0x8EyMWf&Qzc$(|judr^;}y%%BXVGJtHyUhfav9M{Ukh4A_B9 z#e+@QB)rjMS=C#Nt5~hrx{3{CCRmQG(vR(GW!=)vI=2Ih*_mC+ZJpD2T*jZ>fg{iX zIYy0q@URja(f60Ttv%6y{|(zsz0MyA)rO7Rw{6~3UAdLW+hDwjS*py#ozGsz$;$T9 zm>tbD?IYo*msuFxR3@)GeE8MoA>I3}k%W*GWy;;H|OYE!g8-+qQk)9Np2# z8`A6z$=uMH`Vk8;W8lIqx_=N0ZgC*}Y~RC`-}=4QYWCd!4d6h~-=i(G1Rg$-^DEso z;tS5;Sn}Nu9^p3!t+yWvVs50)(pA#TR;t;SCRAoZ=fD*j@@s@9BGx0~I% zckJJBE#u$g1k+vPFdl0y#4WI?;H%x;8mt6B{@M^Ox|_|9s{C>^EDU)(uR@DRAaSJl&~Q=1IUz;T(B& z4(FwQ?Q@Rph@4)m-s-gU>LV-ef1dG^84TI`%(Y(V@(nKT{_cy;=$*UezHYEy9+yQJ z=KkJhmtI3_|3F~m8wUbk+G}tGF)xnd9PK^6@WT@4=nU}@-%{I7@snopx=oB4kMUj% ztPXzeP(Jlho(HLr>qnR9i|*_H{Ofp&(@E*<+bjf<2e>rO1_9oej*YC6x^_Pg^g-Y7 z4sXc1T=WTa^xQsdNUq-D-t>MS$;I&7XX&@c4&R@P3L;ObBoE4j6>zoJ;z6vhMCbsH z7iZ&pRVzaIWRUs&zRFZk0zkc^Py6=NF7yw7`oNp&c5i60HStJa@fY9se?R+A-|^nZ zSRik?(pVzKC9sL`Zaqgu`CHEdb6 zYU8?XXcF0^Jmcd@{Ja~&-7{3srjvD-P-kQ*s*2LE=}5Y z|LA+U)6Hs`%lGeIz=G3SmB;w;iW}u9XLIJubLePB#nd@mc+XqFwd2a3eO0Gbyj#MG zC7ygM^yx>TQZK6gclPir1v;*2y?pyMB0$*BpMe7_AbDg!NeC=3h5lR+WS|iwm|zV- zSm>|0NeCLqLW3Sss38s^Vh$q^H6jTl5=$(J1`Se5F@lwPyY9v6u#<7dp=u<`Mx%0M zhAAGOib|TPt`gF#u87>`t0TucYe}}uagr`5qs(hczN(aLFf1d}a?8prrdU2{jtf4OFsTP10R3}^Bm^b&P$e|!{|-kQ z?{smzPtt?*JoDbO6uwJQ(WDSgE4eSe`SkN|R45oR@Q@3E*e?Y#3{7yr4OT_uR1A^p z@YWGS)HPQkdgZmG6+IBvfdd3+kvbR~jZs-fZ){e_9-Wca#~*>Tc1WzUO;TGU%L2z+ zwA5-6E-A~U(n@sGy)sKMyA0FacgK|XOnS+bQ%yGWjdNdq^|hBXSsr!tyMp!%qt8GK z^>bl{PfFB9W|JM*PE08!&f@Sg)%er<7U`7ZPAc5Nf>Hki@YEHwm{p)yog>*nVkOuD z)(rNmgVq99)(}?@dEM2-Uq>V%*cBBh)>xJ#UiR6Di*njorjyz+T55y5{}$V-xZQSZ zCBOB0Tyn!US6#B#HM?DS<5fHDf9I|D?c3tU*I&B(wY%TUn5omz;lP_DLxT}Maf>LV zXxPt5w1awSiXF9YQpf|&I2DdJ26;UJL$=R7_eAb5=Kppuut5uiKvfciVFvwXnq@Yy zR$D!kT|>h^{26H6M;dl$lnOX{00EG;MQO&PHvVbklTjYpshgj+>T0dNI%}=FMG{;l z!4`Y$vd{LOUGUjndwet7E+6jP?p1&8y4%0|efJ$R&pJDQqdZSroolu5p!rv{>BhZe zQ3LcPV1cK@mtk{^xI_9^`iDEtz{6wfqW;6!$fKz86-2^s3|Ao_WP<1ea5Q92^ z0VG_=LIUE$L1y>DIh5pfyOP1+gth{r_0D&{3*NDc2f*VkPl!WIp46O2wdf^LdRVKT z^{^+dCq;34=c3~GtQbD=kxz^0JD>UF#=iA|QH=3}Ul~&ajq*&afcRTZ8w1xr!gb<@ z^b24DKNUF!O71-Iv77`e1i{UDtW$Gv;HD-qvd>{K2eZQA=RO!h4tP*jI-?mNOtnJT z@oZ;=tX*n=;*hMe8?U>4B=`xx5 zQfusuj`mwmc0!fEH)3;t+T3P054c7?t`VG-YoH*{sVNq2NS%SGUWT-<=3DJm3)S?uXC0pByQCenHqa5w%*LK;T16s?|(-lB=}K9Vo%NyP}oVtPDl#Dp|R>wO-V%!-cC{ z=X%__e)O(*)oXKq`B#%dSEYePrc14R-NSN&fit~q8V$KrH&Ro!2BU0ReL6hzxF@sg z#n?C>7+MN8^}E%C?;9HVP6ePKvJqM>YUAKq6$Ui6x+17#YinERqzneQeHjLKd&=MS z*1QuX?prHdT<94WxygOvazV;m=RUWj(UmTV0bAI^q8PjTg66=~D^2iVK84fu4C5lhB@5faCsQSA!f5MP5kC8R(Hj7RxwNytDPJ#c91cS@?-ACnF zk9O>%Ifoo%&5ah2%W*-2->HKc_khX%VU?3DoinY5M9Nb}9F-BwyM^jD%c535m%R*J z;fDE$V#aWWmuTiQD>uy_UUOdCyjM6+*Ti*BF|T{gO~sb8v3!nnc*Fa@%GMFR=zTFb z2Q6$KGZsIGF35cUSzk$C@W>nhVF>yA=oSW;L6$sSrlT`nba&d*ACZtkYi8t8YZ=0( zJ~gUUeQ#E`y3DV>Wvpkd=33iY;J6-f|G{-#V!`^l*K}U-H_~|CJneYT#Fp`~?Swo) zdsCb`CiLViRiVyyZav0HbQ?@gPHdAE-rG)sw^bX}{SL(C~KQ+(CoZiQmx%aY)e7q_#MGPQ^m3i!hqZ|-#b4CJlUK3YTfM~CFp$qb5b9f z-a!wp(H)=fqa!`({a!knpH6GcLw(>>=X~c)ignOyJ*DH2sf#7v=bR>arqma_O0T@rFCTUA-VzUbRB#H&EwQYFT#KJCN4 z4&*lwf1mM%)T%b z!w}p)e=|WeJV6xHJc3g}{}%i!7Ysf0%QzbJI{pj0id#L!F)y2XxH>Gfwi`0r6T+LK z54a1!Bg~3LmRuXih;e3>oMAcME`p`Q{kkVtAlG|Ir$T}Sm3pFjw#OT{ZFa$+U)J9R1Jj&}nQZz+z9LF_G!4+i1lxjs+l(TyQ zHXf9eJOVpB#6gRz5!3raAB@MMN{C)8B;32bJ~KptB%Eo~0)src)JqtIq{jx7M8eT2 z5V0~4pvC~G#xAtL{|n5>V!_62iw~q|PZUo10EJ<=KM^sG3s#`^MT*txE4L{U4 zXEUd8o5wul8}@6jUHrj{>BmGfLi#|&PI@7aaTyFitxB`SYkEkgJV>W(%B5t=Xl$5? zG(3ysLThv|j^w~j#5a$`w=x__k|aTsEXz|&NpsA#bZkkO+Pbv_4PuDN0n*2gtHom! zyG58my`;C;Thv_{VC%&y!<&=kqg zJWbOC&+tr5)m%;Lip|$NPq{G$+VrXROFQ(?O~@>SSnL~k1WrJt9O4|pY5GZ^Oca3x zGXMFTCR|R95KQL;k%0o5gQCdmY^asXObO+}?Bqo48%meZGgl!>0@VKlwSN@UK_#%RQm{j9QYT&1VzthnYg9+=&PbKiNu|^d z#nek3&1scZO$F1m;Z$^N$uZTNQN_=YVaCUVzo_JcReeV}ja6Cg#sAX~U=$f!C9fC~ zy!xUi(fL(;%~u$*M0$GAWA)dvicn>Z(q?s5NHy4Jy;8cWR%&fnhIQCXz1B@FQEfHP z|4((zxKW31P1iySLMYp_b6u3POV_`o(^>UZDOAWr8JyC}96t@6Kt)-7t){S2RAM#O zf5q7yN)QhiSb{BBgk@Imtkki5SctV&Elt{p{n8LCu{NYw%Ro^x?MoVk)n6n+JjI#B z8rfG3PLuUT|NPPZ8(VdvOyFAtj=+Ok{j6{`nIWLBLG@KBtJ$25P`ka`y2V>XA|(Xz zSt)(ggALk*g;t_1TEjJ3#KqK!9j1!4STem>`I%8zg-<)+PXxW(X_Cv|?ArXS)0h=o ztTiMg)l*1l&>CV}3HpMVxz~Z3*`6_7o6TFDmEDWv*-_#hzx~<272LrM-0wVG|KLsB zqa9w_gPHZDvf8^0Y5M5z`g0n5%+F1lT zRb80>z_-`PbxQkllj$=zk$UEl59p&dp3C0_pxUeh$*%23J14b$^+ zUQ+cqP9Yd_ZD8`MUij?XwR6tx<=*G;y?lxS=wzy%WP{Ze(gAb`eNCNyMb_9oVfam9 zBSRtjox51EU){xD-ksqpE!>60QUDg-91dUt9zWzw-t&yXs^#1TW?<=cV0on&uEiMn zy56r1OwbKo|2#KD;gb*sxeor|pcFtQEYgMGS8dx_6&_>SMamX-VI+j%|NE6;{cU4~ z#o;39UmccXrBz%K?O{CjRK?K6$+aUaHsT|0OnkDY7Uhligx+2R!mriQ){5f!k`OAc z0SftJAS^zhq=X;XCxoC(nnltub}$q!V=|s!R;kPtX5nE`;}~XRH*V!Oj#fI};aIL? ziB-WozU58@m?1`F{UqDzeIrwyNwx!BlNH34g~Ub{%u^kdc&!1cdK_5|(k-49Og_Mg zh$Kl)DC)%75ej8*u3bqQE5bX<#>)`S!P<2x?`1G zMf}v<^XT3M3K``KBX;c;~Pex;pu1azynRHfXR(59^-d}j0>Hno?ny%;K{oz}7E(6}@ z*z@Pl0=!=)ug$F#TBICK$^jvOG$CtZV`k`g_2LX!q${9;x+|3kLfz1M11-+zY98bQ zJjxaDlr*@qQ66a&R*{o-H2yf_{_2mGcIlUfX}W&po1SNzjx|`W=~~uldzM&z=5j|Eg;npMEya5(P9UWr#%L zjo7$XEf}R9nhS>H$hsV@CeGx(seJm+s^*ichC3)GEu`A)f%NP^;adTLg#p2wZ7x=k zMr*NlZgX~PiEM4NrZm|8Gy9J1y3X&~e&;j8>)hUL9`0=cFW|=rZo=l^_{3ma8Z zZjyTh#7shQ9$iL2r|veS$S&?fgy=ohFYRT!is=LJZf4M)$U?x)l2cOkKJ8N$RwVq> zLy>UeGwTACr21x69M5qb)Nvla@443RcJ}Z87V;qnaNaJ@#U&{NClAdjRXHHtQJAu! zZseGRv4eSW8TSEmwkK1iHr8S2B}`^S3-6`$zi9&%{}DejoKji-E7d#fgkfRt^%mn5 zc5m2C?F_c_7*9?W`O~+)ar_Q+Ag67+wqa^GbVDz4zRqdiPV$j5lO;ce@@C+JiVrED za?Gkr)!uSfx$5B_GOG?Z%avX!rY*ZO^Y~PVVQ#Rg4y%ueg+Gw=($c$G7=<`@UpY_C zO(&WvmS{Z(z6iHnwnk?S+=U(QaUV~1WIy(07xY0VbpB5Cy}oT~uXb7vaMf(|D_ZhN z*OWs+^W{V0O#f+*KvF1{`);>Y7;L8G*&~C0_5e|y zbCNOXJ8v1&MW~LL$mvw+lI~^?0(53?y=GT-|7U-8S0-}XzIa79@*{Wj0mq^QX5v5o zlb~8e#D>h^6r9^F$k2K((YoxCgLeST=yyl=B3$SOBRr;tl(5y&e@AU~e{TlkWCDcB z7|8OJm+x+N_=lHxrk{9dmuWDV_$lRUuDOq!^n5~ryumFC)#MYc+J1|zQ%g*v9-$hc&;yd2J#<_A^9nH=y8|s zLI7V~FM5=Kc~u|vd3CJo&ghG{;t-DT{}11H^oFMzFeKt=YP5aov+sMOKm5aqKceTK z!l7}bS9oF%S)6=)$dCBRZ+z@;dZ49vsK@-w-~9jXe9yn}w=~SUl2IF+X zsOiy1g9lwi(6HcufRrc;NMy<2CQS|*cG?UIHKMU9^`uwsFcEj#Qi+O)yevNh|L*RQy5;r6O)_b%PM zWA)0N+g2^Hz=DerHkRx#;>63C|0!#{_%US1kta8+Y?<-p%$lEZh8E2;=+L4^lP=xa zG`7^%R&Gl<))uhdat?l-<+_-gn>;3IFIB~*ZHG8 zXZe{Ju@19-WJn-Vz(RwKBJbGB_*Ap~f$Ay!Q71Ja9ljlAkJNf14-_JkJe{HZa zpb!O4!jO3neI&&okJzCDPb^SX0~8PrRmXcbI6+263Jny-Q61u;ksbu~F-M6f7D5yn z_+fzudh9+4j?&IPdG&wlu|Fk zci|TtW(gvTSN!DDPB=R0|CCe{S`{V)S)td}S8d|e*I$6i8K+-#zIi8}aPn2BorMji z7@(A4HmIP4Zgwc5r=dpMqK!JLT5XZWHmPikR%+XBnexVsZ^04QoN}Os3LUA_dG{Tv zr>45yc=519Uo5by$0IAL@R37&>(M7lfd2K@D}ccgNCvJ@45*gAES{fyzioFJs5SJ_NAx0lJaf>3dHYNy3L=xG@#U~1$>mj%r)mKuJOy+B* zhGts1<(4UE_~jV||9ddOXpTuHlw~p{VT2K=$tJ~a?#btz8Do5Lo_lrtai4$s8K|Kp zn|yMjDJ#12qb<9P|0$)GUOKa+n%c}MaG#2*^UkHJ`tzzmw+fG|MZc@SyEi%SIxBTNP0jzJo{pNVacJ7bFABE@ZtG7=T< zy0ji7mAfIa0#Zf-PAy~E8RVABE~$%-y6KV`hH{~>JEJn~EWdm^?l9B*SYi`QpOp|c`F+k@g$F6E@w6Cz@-)%w-zc8l1~Yq!APzU^#{ zZ^Z3TB{d;w|Hnr^vOQ#jrqz_hDTzr`YTVhvl|6DXaDi7MnD(~drG%a7Nt@%z06RB< z&w;LktJ|2yLfAUisqTX&1mVbH=Q`QhZg#bMVeVomv)#$CcfJ!I4tw^T9Tu;LtXdur zjW#qP9?g05$%ujm*0hSGZAhJ1AohS{zX)o<2hlQH5}H+@uSHCJ7UUx1__Ls6iR*o3 zl$#k*goXHh3n?hzAO4E8F0wJD4o#edwg%xe{W(crFTtGnND|1&l>v$kY|#TDxHiaj zDM~rXpqMuI#SLo3gO)7e=|(6rO_C6kq4Q)WHK{@tw$OzzEM*N-NyAmDa#NmqWzJ%m zLmv9@|9C$fA}+(D%Uc*RJ$jp4-umZ6L;k9m*URGflF2f|R6iqKJ#5|yX4Xe(#PyNq(QQ?kTaI6^AY zS^CVDx6Gw3Q%a9_u&I}F`~-`FDNOVkv!+)YRyB=vtF2Yh4Z|u^RIrGI0ZFr{=5r8! zv^m9X;t>zTq?=Ffqp-|vK|VjC*O6Q@&W`*v1^y%;Ai*flG;C82=89lh@tM}4uoYrm z|C}c0{`pl7611)cg{TNg#z~1r6qKP{=n5%n%8L@VqpUP6MsEkwk202|wG`S(gGj`c zHV9VSV9X^x*xqN`mCdRM5C`NGn%^|kMPQ<~qUF;PR%>rwrb62$?-cyW@m;!W{Lke*D)#Lz4j71TMlF~f*j zWv=alomrGKzEl!anMC1$D_jgybHyosF_}{g6z3v3a$Rhknctdc3{n@nTiG#>dt9G1 zH0_~5{{QozY0m>Zg;A%|IJsk+z5V`eaKDp zMz^@01g_A#Gc0+gGv#wpdpgvko96M1x~GYp`+=h!?7m}o!((Rlv-e*34qto3 zM@nnAzx_JRgC2-B&hZ{yDD=*rT*~iWZ8!q~oEg%|!v6dqp=$8N=GQ4r*??#1M!x-0 z2?Ndnk*-CUjRBF4=Hulo`flxC^bS7sS(#p$;R^VfpZhhPejyF)VH#(V)75d^?L}az z?Un8M+hBbi@UdF(L5lIoQ1K;P@`d2?J=U!SP3p`_*T|86an*z&n3S;G{SjKEtyAg& zUPDeNTfZM7DH5;L%tR%GK&sRo7?<{**vW$l%@0|5MqFO)L7)Yi8t!G?1frquwISJUpayoJ9C{!I#v$^F;0Q9` zs&Iv7S)WUplN1HXipUrfDjOCVq5(#ibamg1P1kJ2AnUE(5DFnY$q~0y9=mK|gAJHc zL80f7o6%8W08-Ag#hBqq$&Fc%n!KI@wqooJ6o83@^aWfRvSBUqo{(LjF4p1j!518s z-5drZ@qJ*U+@UezVIJyX@oP1DjKvc{}&hLqt8`EKb7MuvZ5=7VL9eEjnaZLgYn;Wkyn@FWL-R^rG1DV#{dc zMkXH#niM%KO^8$;y@gpfXw>gCs zG@~a-!u(~GWA@Y&7FRrGrca?;Ju(PV8doR||E3Ne;8RkPNJ^zZzGBpM-H;uIe|_Z{ z!lqbeAXx(AM>37=lqFiKrEd1-thFV5xt%z`B59pw$92g|K3>nIWJ(fXB4*#tJL|Wv--9qHh0td#|Sb`;jq9t1bXT-hbsW5|){2e;g)f}l2B*q(Q z)?X3YMrVKJ4P|zpuniqil3sasrcy!|I1-_CE*N~q z=X}!VLF!%zp$ILU$W}Zgf2w7F)}|{d|EO+=LitRBd&!!TrX`a~B!ecSEt!Kc_}0ih zBP@Ev-sKsG|buoUnpV$O25o!jbML24bXtYGm3$Txy`glltZ{ z0w}3k>6K!sgU02wA)$6vsA!7m51u5QGUWxa=?c{9n<6T%N*A1}9(0~fWOm#gX~GUH z;SMAkdg{S>a%QrkXN@k}p%xxkm1ZO{YNPh3)RmzbRv=7$LDO78E99qX#P-#(_=4GOesGG*= zziMYG>ZBlO8^orQO6owf7Ulne=CU?xJdK>?Alw-x ziR-yiX}pdqNX-HnvFnxo4$Q)9%#u{QqAI0#d4#$wv<=`6VyP_*FDj&}puFtkzaoj7u0@#Bw zqNEvWS1h4=n|1kj<@adcZDkLz=B5CpRE#ZDc-QLXwJDdeK@A5*g7i;haBQMbw zlUQY~2=Aq@DnY?+AJu}a8_#awsqe2K9_9XS*Ny}ZgB;3!BLC4NjPCFv0V<;voxip* zj?O6(EAasHZtps=8V>IiGcXm$t1PVR8%po*0UsED@$~wP74uTvF_MSs>ia;{yG)A9wM!$(8V;wYmr6il{TN_pttwI)?G#9Hh zH@7r#5JR$HuXNTk8vvwB9V2dzx!}jE{Bwi+10~i5!Z-P=! z!6uA>Q;$J-PeW6S03~?$Pm{NIhjuB9ff0l^B@lH}vo}zKw|Zx`QtP*5zc*1EHFzJj zQLA=p6L|Un_G>42UoL=b&o(nN@kZAt6!$iTQ+Un%ws2>7aT~XWb9h^x;JpxR!LIR& zJyK1>lSJ#~I~5v5J7xR;;?M2WfAh0ZZ^A3Y_bX6Ov8+CLXF?JcTd4IOaXVJ z|3i2~c6}H1kQezFM7C%Dbd;+EG8h4AL$;MeHU$_smv42I92;XRIBfT2gFAR_pZR!2 zIHc;fTfeo-zBx!c3LgDJhu8Vd4mX}l@6<8w5YKeyG#V+Z-iaS}?S{{wv|V^F!;AH=(v9i_)$N(Qr7|i5cQESx~B7YR9kkG8?|X`IadoHmzR%o zzB>4cIpUyXR!Co8qh>ETYOd=7ulKsI|2nV-d!`A2CkQ*5sH|2C%& zK%_spk^6XjUpcAo`&DN{miu@u{QH(SG$M_nf!FipP8C#-Ik=xhty5jqM!duWp^+Q8 z=1#oD6L~qD!^U^K$A3J?hrGyhd^U`{$)7yRr@YE{JXb8c%lD_VKYO0%P`lc>w6k=T zZRxD=FEJr`vdCZ?<>KJ7r(_rRz9=GrE@V zJ33f4)JHnl4|pIO{Md`~Od>e#%6f|v(8C|WV=u`{M7p_W`hAzejVrZ{m%>qhd^_~L z-~T<}2R_NKJmCwz;U7NYC;s8Lrm`bxd&oS_&wS)lyW~sho)@OcWxG<1|F4RpsJDMU zW43K2E|O1!caLwvr}wxi%r|70_EiJ9FmM9aSGs_M_PlR702HG zr#EQ}gD_;d-48!0vic#3RqXaT=gfLLF??{e? zKPQ|&`ltWH26VA&S3~ASp!lw_A3LP7d^EqjS~>_X+;!yx1UG@*2o~HnP}@O+2p2X? zsPG}Sh!Yupvv@IFMvUS#a_s04q`_OVRzbpo=Fb)oC4{Wt@De753o|v)*bpJYl0bWS zz7b<{LeQ8(i54|_)B#Qj444+G#3jiHrc+1da7bT4^fw>kod~!}=7)k~oc9~-F%9|c0S1qsr zLQtzoH~Z^H0)_~($5b_H@Iw_c(+wJJGIhjAt#fXy|_bTdt4 z_PT4$HUntPF~SbR^G3ypM9IjLe6T`_s95@OC@e8slu;^;@(eT)jF?8RvtBW)7THe2 z#5XoCy;20-D9CiQY|b&F05!nKf(fsBa?T7wwgUyc@2;Cw)?9Vn)sS9&_0APKNTS6S zT+I6g+4cflw!Q(KrBA_6i7ZbEk$6|Pl$Cpr^SW$C~+&6;cK{NT>(U$wqN)#!eel~pK- zBevM|n0=eTxT%fy+G*v!Tid$x_LlE%4^$34zspTmUBcHj{P4uhRUBS=-yNuzVRLz^ z;x73$IN;2I5}4pcxNKM{M?(i);T#rwgoGp~sG$-@VAib44G0|>cB~j{V-V%GaAS?g zo-Rwt&=VM*_~H>D0x=^~$BY7WC}k6ZFa7)%|6?6=j7+fGhu~2RJd$^Hj zGhO+f_tH?L@g0(UyK7k<)3-gwNCpD#|7+CyEF(YqZDI^ZXu$n)qd)%nua0Bbgs2W- zyD`XVllzOB0PppUP(3z(h@pl>s1Fm2tp8w(1gAW<_S^gLKX4>hb^pO z3~hMBUf}0K%iLiNN5V`VHV&IQRF5no*G*Uiu@FYQTvmKgMa@wUCxS7g7y+g;C~`5K z<{Vw@K){*UfiGWS#M5KMa)dMjPl-kuo+aKmye`Q}sm2sgGHo>skOWp_ zDjKEMCdeSkUDVUDnvEEF8Lj44fPT9Z9r zo#t3kR86s7Gn;7@kq_AhARvPCp-Mbr36hvObWV|AJLp~t5(d9{lGL4mJ*icwkh?!5 z=L-=%qv^c#PZ7w3okT%E0ti~r1QaJ|n%d1xPs#e8n zR<#O^6#!8{uD_nutJpP=S+(2Mvbqhe@)ECy%F_yCz4biBDoz*zGgtT86}~diXdHb8 z+_q@&uSSF!m znfLh00gJjeqc8(8h!qPc)B;kUKbmBJd%FPLCP8TSWt1aUTw`|<*E>r++m3m~V;>{8 zmc^Y8+?-1w=uS7fNT#j}J(tYvHZz)$i^VdaT;=nCW4u{b>$x_F-Y&C(Sct{q4#sI< zBq~X-1Xdds`g+oyv0;omDdJxr^WS(TNs+6C0za`>U>+eDz6ySD@f1;LIWMZhcl4~j zAp6=P@%h6fE~BJHx=}-_79}dSj7dwXDBU7M6|m4{E=m0yIB?U{skUXQS=9gs! zyM|Y|V+%K^zNOi8RGyM%Tdx7c-yE zMlpz6atK!NR62?lhE_@F3z1^Oe zL3a?*h5mG-V;6|sb^3a?jBMi@Q;2LE4!axwUac1;zo4n!7VRu{>N(Q1Xz1{exF5wV z?fEOpImP%pah$4~xEAfWd&WtycfR)>R1BBzY(b09`GXJAiGTcjEl%ld_Dn#4?McB2D7} zQ1J%P2+OW|8~{?-;26#!7H%!qbd3R(ssSa?5C0Glok|dA4)rRq5LM5C4odbykoJI$ z=Rz>|BGCjNAq-w{1$(dPI1mPB5b9D86Nm2xKk*sLLlkgl(%dey?v1g=sodfV_Ld;* zyf1VbrC%yx>cDJ%JPa38Aq=_?bZ*N7=&zmLjta>S?#|%ffQJj;?+XF%{un0h#&8V} zPj=YI>i^`3-`Wu4rqKsS43K2(5b&;u)(%n*%nk>U5Yv(7YOXtKEf7f$5z9vO{BRxd z5N!x5w9gDts|t1JM$$|hF^=xEk=*bL5Zd4fu*ZoaU=D?$99W?qx3crn zG392>4Db;i(Gd?%j|b*)0#(gd(lQ@)=~j5nxy}m&AJNxDaM-p55h`&aA98S#PF;e? zApa?HyZB=-3)8!pO$C(=7EIXES%u5jGPvRlzl1Q#D2FCUa6JYtuG)a`HrMGNiEG+>R)N@x(yN3QsZ( z$44o?*U;)~1sysq+HA($vQC<*ZX3ivTTc;4HoK9S89qzq2jT zvFiv)qoR^M+ta1olRbF@6W+5k-P2Uc0YCFoKlhVA`_n%iQ4;^=40b^xj}CbVGb9lV4j+`-h#OdMW4M%z~;;Qis8?!hqYW~G0!}XA zqAXA)PT_P!&*4AuR8RL*Kl;>9|5QJ8!7i&Six!bV4b=Bqa6uik6AN@985Kb|s1y1` zBdG}MOcPVDB2G7zQ$Ms)HuX*CVoq6#PfOKQPZd=GRaN~IGzTXA03k55OL$Ab5Iq4Wo)I(Kt8a^o*MZnp{#cgP0$~N)PfTD;!Oa6PU#|K&O$Awfkn@vL|5qr&P!u;0bV(_ z+dLLzLpEf2aSS*CM#mLpQC2@f!49yo4(%XTWmP9>6$~8?wubbh@((#P64TT$&s<{B z2Bt}cu`0n%N~!VyKf~Xovn;Js06;Y|g)hw2QZOkSj36y?BW))oZDl~$Hc!X2gH@2H zD>Q!U*!D!Y;Z=ElZpBS!E?^<#B7XwtB_#da*Yj8(?$k6hv3avDkDr6kutFCNHMud60pC79bC~ z!xlUhVG&kTCb0q!6i*b~6qsvu2dH9}1~? zpVx|4SZcMFYt3?twU+|B_)J09N+79YKYC}hz28=5rYZ^WlE!%d&G!{`6$j_$B6sr3IE?|d8_z}uQ@uVS3J2`i@Vu+ z%k+!477V&Tj4xwc%R*`}DR$Dheo6sOV~mrzmp4eX160_3$Dkd?wM9+AHDC&rq6Vn! z_H1ymDfu0n{4qwaN!m)Yt3F5N-Dd_l7pM31=`pHh-{IU5R{`ykm**N zK#sbS9k&BoG-%F*gr`7%=0c^@Vt?KQj`_mEx>r=x*R6&6r;QuAhx)jc8w{F=53cSD z(nuJlGIy6+X0LnDoLZ(muFRww`dZPMg*of8`kdyBC}NwLzZ$H^TLQ%T4x`r|dF-jM z6Rp*nt#zfEbSeH(u{H*)VdPXE(F6^?-t<^o);2R!}*U(LudL5nMAAPO& zI}q^Q#@ZCP6y8cSeH9cMw?FBA(v@NZoYd2HR7P4f4%U7>Ip6|%2#$K+IW?hIsZvU| zfKUc$5TVv@oRKQkcZF0M6)Ck^HP~fx2WL{pU$Q2#2PQZ*aupUXP*!3)muk`|TwNnB zm>uN6c`UzK%(=Y_lK4=@9ign70Jfg%yZ+p36E~lFC6E}a$L*cw-SE8NtI@38&#C0= zR8;cEk9VUMRvx!8xZkZA;Qs@D(GR}h>-+B^-KjKzYEakk@!$j!U-4Z<@e{u%$oLH! z-zOUX1R$QJmPS(pyxQqrOg7(6;o^+nS(ISvpcVUK-abzCdX*BI?gv>;MLu_EvgF_5 zoh8X+o*LgJ>3*%i=!J zyN(L6@(tP?>~BN} zWXemZ6f`&hvw+N*G5=>$c+dWwBTq39`5vPL-?ic_fExnai!z5A0ASe%^T;)Oe=YS^hp z;FcmRv{BPd002A{Ov%V5VE`zcdsP^%WwVXv0?1aFX{}?LiI`rpqT^ZxC7Min9;O5`B3$=xwFVhRO%UaQ?01O}=TxQ;!6b?w}{Z?BFQ z-88Th%4=YOg2M(0?ANRJ03p7E`ReKGv_B%o5GhfV_`k6N2J+$8UIZ2hcwh=7T#z7q zIs8C@XpIOZmH#kZO%@qXOx;wAT{2OXmS!+9)ejbH2y+fBB(2C|iz}rBhea2eRFH1L z*@$BkI^xD7Lp$CGWR5*1G!czwX62&Itj32!i5;C)_=3tX?6z*_Xl`9%? zM4N0%*6L8MNE#`maJ}AG!Fe#aidlMGguzC1(|MO2v(A2pfKB3&XP`^PWl-RIBUGC$ zw+!G{)c=0}EjS;S-=1q;dkQwV-h*r26zZjub;XlciPb4BGMNTNXNf1Kct^li3Ow*? zvl7hfuL>u;aKj7_x$CZ?oTExY1;5BzDk3ovajh7W1d1WBAma(gCIe>>DxYM+a?6>N zBa9_7&#VZ`lWM8u6oQf3*e0)?BPwNW7M2a1`-UYF(4(d$$tt+y0t=vPk_OsfuV|Rf z)#!}17&4+}T}p;wTdk%llW|IQXjOrDs;2mM=qcQ*>da`{rm_kqr~z0?Z_k&?8cEVJH%WvN@QG%JjD6(=f!7w5mxZnr}sd)3u+x8>G(H|*2 z!-gYiyz;pk&q_HX5uW{(9$_@{NW2o9k*t0O6HGSll(UP=HP?LeUiM%363_OknJTJe z`3@7Xa7Am=KnEqLfC#qSVilrT3Y#`HHa(dNM2wOQX1HROp5V=Eo~j!LOHq|df#@^v z8o;|8XC|Rl#c2Pt8>~X4Di*RZEOkoWlLpt2R$T9J)Pr92e1$R~3XuinYRBUWrrPUi4Z9G#p8bm&14x>~n`EQYHFqB9+VQn$J*lIwHmiQNF$R=YxR>V=x>RR7)% z6CvRN40&`c-oaM$!#p}~kBQS`$97ndD$PnGGXh2XvH>_kUIZhIY-AxrR=yqzq$7${ zA1w|Ezg^fulbf_fCPrX2y>MbQ6RbrjbS4mbXfYa8V^RVc6sryHE&&6I>%@{y_B= zR0#tut-=JQR57Majq@fq#FY-qfeIYTlZSo`q7YkvPaTm!ChoXYmt578C0?$H-g&@G zrUk{#v95}zd!2)($S#LARH5jq=(ipUt{2d7l~fT8o^JP&$H=J>S|Lp>C;ua=G00Ix zb!@31wI>@s)^mC?rD-1}Sr9SBxvqR+5Hd!;7 ziJ+Kt!OW*3B$x<|t(=m%Nu6LBqEtf2f*#UT2tN_E=CHwm^qNBM@MlbP+;2m2%9}AK zSeREzvns>1Q#G0S%QD5Vnr$L%ne;}cp0u-8?~Ers5qA)K-t&1tOoLAP=@)=ni*f_) zEVK+76olRdbym#4x=Q;+Dq0J*5ugV7if^lENDB1BZ043rXi9^pPev;UdBKrC_bSpnD9 zsN2Rcu_Yug&T?|t(M}>2kgx-D@;A=91rwcCtJ`5{8z&5vs;{arOVAEeHDutgQZ%GW zIq$_bu;ngIh$RzAD@@@G@9iav-Ckqo3E7TGRs|%U?1)crS%20fiK``O6G3YkH~uBH z+B#5c;giuZO3}s^t>}yupjy#k*fZ%w=WBn=B@+slxOTjwLz27P5kuLJfz;_@7a3G; zR2REL)Cr*{Fzb^mJ&cS?|pJ&OPsi#DOG z#po1e>pFMLOeU!xHesk#EhVC4-O2vT<@uqYUg|3)`j(v2q=UiVdK0 z*^y7u*!PA@X^l<8%=3GJruzam{jzyyD1`}sBg2Uc`_{-^DEDs4-O+`%OLTug1R!kv zCL;gYFY>PUU*zqRO3eh({6?}|C=8d@dhJ|da`l!(!VE5h1L4c==?#?%@rNIe2Ab|% z2a=<+(|y{rqL#6_CTejU)40_!`5dlEU1N4BCNMN;xw&871sTBH+$NZL&1dcce(0Ll zFFhWyhy8ONnHcCkw4QbYNu$AG*{8~0dY9V?k!SCk$^WZp$1$iaRa?%s&59b4SOp^i z2mx8&3qU1KRG_O)T&9cUYB?7-AP7EXB`t%JwY~5DW6v0T$h5w9d;^+l2J`@oxK6G2 zilK0ZH@uM@QAorG`FI!+2eYAbhZ4)P@y>Sqy`{$Ds=c-M10k8!uobzwj-V{mbOEPC zs5x^_rimo@W9K{%UyF7%bfUjr_s>I;Dvs`W5NBb0ODD$REBVNXx5(K%6h8N?j&)1a z&)xy#I<-zd6xBRg6KjV-xNuB5V?n|boH)lct~ooFCm|`!;UcouU=*atAO7?|7X9(O z2zNta5OSzPC;CKR3>1KC@qeOFfC88bDzg>{C;t+ypa%bkNQ@L+fa40TAte>)R8K;I zjKpwP#z@pv4$!c8GlDo8C~--LM_nKSMpSy5B}JJxP@6Y;Ja&4q1rrlRawQiaGdP1b z2o^3$)-p3ZXI) zXk3J3f$GzA6WCJ-_ap`pI2Zv9iq~+^CI4o(_=Lm~a4r}uFnBpKh;o4!Q3G-SR&-?9 z5(DKidOe7PrWGBc;Cu@~11FGsKNNdP_%FoMgj8Z&@??ZkXj9(^jsii2C*y@fm_y!! zf+)cxHx!14LtIrLZPDjG(MOL>H3}=zLl&qBlErcCi8y|nNDGwciEgnnyu^QMWq(HT3ZU3MQbGr^P*lxuij*V^t_W#d z=8DJGNVHgsCwPtpvk*)W3M@!)2k=V^(2JnfEJJx6z<7hOMn$G~l!P~oUX+8OS04*C zl1suQIT->{R&+e_2}wwMO^6Y+g8xI}IF3*_W#zadSNL;kHxjn!F&|Ts@EDIwFppUG zKK4kL2bqu2cq_f~kENGd>-Pf>Nm~-BA3m^ui`bA9X_!_}k$Pwpk=SmB0GX4Ce|%5} zmx-B{`IwvO29FszBy)J9fRfoDf{hk9sNj;TI8-sI1`?M(9cYubCx){bVrr*iGqEN{ z@E)8clthUgMky1Zqi#+aWZJ?|>oSMJ*^^=90jJea%J2Zs8Q zmC1jS;BJcAsE-<%(XpXuC5jm+aqpyxv0#Eu*MKMSTut(5K&5;yI-e%kQgEoJs>5Oi zQlq{plsKw`AV-iD>HkG+7o?5YZk%W$K6<34vmn13Af6LB85e#@>N=+Jq&5e0r(tc{ zIY-deYhU?Z&ITtN7ZKcJ`6CI*7+HE(OX6m4F7p z`XIfUa&&kt!)mWMo3lTwoMhyz+BmN1(|EY@JjAzb91(O~3Z~ojupBEe69KO2s*Y*O zjT;fJY)YG1ivI)ZdYA3$u6Q|}3Nf!J1E=zLV?<;v;^MFUN~i-Xw+TD5ayJQeTd)GF zuzRQq6O#?jVGB{KROT7D5m;0#A9TOc|SXc#cHGksUV6dzE9~|-jW=CD8eI5!X<3N zBD@K`>$}-Qg3fm%gg~N83zo%OVo95^8hE(liiMzn8&w0ix}<6ecf26D?&)O`J82L% z!0osZl}ZZ;EV-M?mQUEgxT(1&d$PXyM4(%o5(TuO>$A(bj3k#*hm6Q(r^t&O7`f}n zFQCG;hovse8xT9ZzDC0@+?Ka!IM6zLRr|F+ypCR2SzHU3(;CXt7Pi@|y=2?O?NA8# zg#QNuQiLiHhv8c~RJ^YT>X_*JwqOj#UHrxJ8^#Pem1LF;8^8qqo3U6p3!Qq#-TJ?b zd!la~&9^AWrV6qR5ydo^$DDhzeH@GxOprAAYEijqOo@GM_+ITy!IDxSk$j~sOvz>t zibzL)-kxM%)6(oVbz4v_fpk)EgXXhs0yc%B}o9eQSUdP0@a zoWOMC5EFAm=7DiQR9R-oByrR z^PbC#Jpa6n=O)T?3?xIW&`*{d485)o&7bC%G;tl*Z+(y>EDSDt#U>rnd;M;9+qZVe z*T8Jj(eX^np$5;4NS|slqu@UMIMe)_5p4{QALf;$s>9nShwJvh2V&Gm-9{d#hR^A< zwYnamO`LuS)dNb^?2LVR=XXJ2)o|&ZS*_L7tk22S&tUD-VqL^{;41`;wKlDu35|}w zh7dj+Gpn4vv-8&OQ$Ww1G;|H!&y5Kl{n3G~*C*ZA251Uv0SMQf-QnHKDXko@;B0(I zfB-nrKXbpzyFAe)*>yCU6MNY!Y~QVcznZBZ$zeM|+1b}2EuTBiJ3883(*KkMS~{R7 z00NmUQk~ikUdXc++nbEl=$SsX?X`L|S-BnDzH4l~{Salnyk||^A@-eb3Vq6*y&8&V z$tWrKcU~MlZoreEf~wu(jpN%L3V;3G#GK>Ty@w|(sQqO9W6;cYOz|*q&>(wx@sG2&c)cX>e6HC%-X9hCKK*46&{WET$3vb zY&)gf_gxVmK7^F?xLo+LXqiu9TDHt12`PTPukg^V{2Y6*I;2}bn-Hjp=>>TW-jh!0 zoaxu(*W=jz<29Zv&tyQLiQW<|(Ihj(R;NI~`^o#uoz@ILO%CN8e*f8JxZ?p^?Yu2z{-)lMz;g*?%y$0!lpZo9geP-#_-dAu4T*8z*pQIqm~3O?P~qoC60TP z#Rw^Fx^ECwGQ-@`t@KMjZWXtg8g*Y##p6jJbTTq_=Lx{@{gg z_(=xaR=V;mf7NwPn-)Xojt}B4N;p-ZQ$5c^XL@^;Os3oZ?dAEj90LiNT@+XsHqwO1 zwfjE0OS`Rq_pd+iQ7`+9I`vd9@A!`UR4yJ)uPj;{97a6)umRa6aa4D3z+@j(W*@_S z&eJfjk86*&kKp!jf8~2@{o-u*3MtrD9&&!~@kDCu1|Ille)4Eu{>qO0?>+fxZE2A& z`PBaSmv8evuldAph4z0>X8G;Z^X~v*g9i>s?KUZJkpD2DC58WeVt_nsSWp zM2a(MQKn0e+H{Am)YMw;rl?94iP+%g* z1_DG&@Zv_bAl8E-Gpf`{wx-yXYO+XiL&pjbxt08$5Fw%i-#IJ@A6`-6a^=jEGgts! z`gH2mt)Is3ub+z}lT)cN1D=jxxaHl#5uaZDdjBx(-M`NgUw#$(PwdyfFF)~Mz>3HJ zM-RXN2PBZa@ZfuItyAVhus{PvDF+$fi4utlyg+UVIchS!%qbfXXx+*M+})U8JtpKI<#ORtOWQc5#Tk)jl}>!CZl3=`8l z^V~epzy?1AvDG4)c-6xE&|DK$2X9Sv)&Er$WKdOKS*gobTQMY&L=A;8bP`S0VD<-R zl_eB5+9WZ;ol+>kHd_iPVW^aD$1oz?DaHl2TyrZz~ZIP0jP*0pP1xy}{AP9kKYSE-4`aBM2QT)VC=*_Lukx#MoT zZmD*BcV4~mjkj;T3$^#B)}GwAaQ}Vvrev~BV2H7dE*`IM5;?nWp>nDXWV2$5FXkNM zj5T(%V@wW(w$3DEM1W+I_e5D`(fIx&P|{(hecIZIWcP}kNusgmpM@T}=-`RgZWLhc zve@at!tS*{v#+)~daJ?SntIBgU+?Q&0mGhZ$vLz<{9?x!KW$TlLEm5X)mIo{^rK2Y z{`Z%bzy4$H#~*)An9X}{zyAjyOzPzrQzQv?2Q*v)`?nIs?SKZmE1QKhQI}DetuPnr znwtP43+=s0Yu3vk$Ic;)Mli*6F-qNLR#&ptr3is}a1G5|X1f{IFj}$0A)s=&8sp@q z1U{P-@P;S6q80CWxk%ors{aQzSX~Zn(qrBSqX@$8RWUprycp#~^gYOpsC?w};`pQ^ z0;8#nj7WG~8q+9&YL#MK9e9DcoOJ{`&asYnL;^^VK#3&qZ(V*|U?BbXLP9cdfb}Yp z0uQIiA2l#XckChsS=B`R1Za|7TSW)0cs3|T(38-!n&?c}4v}pjg)3xXbSU5_(@X#- zGL#*(Y)Ctq!P1tpyy47LqZ{7^=XXOSVy29^Ipr;Jij?dmtEjk4W^OWj8%j(P&-1(r zJT4R?WQn@A7)EZQ4LhAOO*jjXhAu>k(w`=AWDa=g!2dK#5`)M5r1VNuqEUP;nNNghM8OzQwb=_%Cq$tNQ3;wBu55?1 zi{&hHc}p7BZY(E_8R=jM6CCw&Q!fo>@zgd9i*XO3m*gboq-e}a`p20eoFc;dlElDJ zQ;Bpj-b?KtHA3g^ok0TinKO$ooG|PI9S0Fwpb8q%*)*H3ag}3bsOzW zM_)%$T+&dNI;8B&ezIA`4i2+AY!u^s7(^ikQ<(kGD>0YHL@m|_bUbY)P-W}2bLe8I zPg6{<^l;Qyxc_dd59s1xzd6-!Uf?*8iX&wPD$qWHH45|G*;v~(UAC^ZpYuXe;0kzM z>n5_TV)z0|CNnp?!OgtqJ#TOWYuLiBx3Gu}%3hCvE+bgPQICb}$xg${$-=U)vW($J z2M5wx{#RO_{cK2M8DL5EjC$H5)0-mG!WeulY-2my3OA1sx~Q$^a?7AD9A}li zuBZLv$5?l? z&&f}2U&9av6tSq+z3jzumf?$1XSJigj*`I%PMKfMNHzkCYE5SSN?8UMn80mLaAnsV zs9g>;1OL7(t#pv)2!lOrnV{^XhBvI9oc{Eue*rNpGK`=WBXzi)lER9G`)KM+3c2d6 z@r~j6+_3W4$3O;kb>9`=q1@}cMn<)CHSH{|LUWtyRf8+;;8e@0(bj5gZ+y*$ zX1r)IZ|5?Z3V=5@p+-XX@!X%R=z>Kh%khcS~mH2Y22bY3}9lD}Eb2QIS7QI23Ex2&Yl zYX41h9hL2>EJ)nlcGb7Jjq{uHoZN)LBDnXkZsLm5$|BN0rR5D~Ofy&J_RcY<^)2;% zfBN87C%AM+H{^kz+Ur+;L|${U@P?!N$`E(=#MLg?irXZ)8b>$@I-XxMqlDT9{%>aw zclVSk8!6B(xyf%r^K#g{=ZD{U;>W%CoVO`qd?icdjW_v|;&m3OpiLXKwfW6IOImjl zfw;&udO3O|uBVTyweX#9tb^V4MnpaLzSrzad!;!fd<7Ygc*T?7{N8TU@&DfD zHuksQ{p5u|-O4-v__xhYVy*h?_5b65Gi-mg2SD3XwW>pxY(NEC>!uL#y|fFy1!OPL zi96mXzIjW&)u}*^I;xOU5hJ;!97;B7^Sb~8JT=2U0Amt;Auv#Rftnk?7Cb)}^tKq( zl~#bV8g#$4fFC8xANt|J9-N;Y1i~QXK|Z>@BHX_tWR}=NtE@x9031L91gtNTfeDZ_ z1$4j$#6kvaf#KVY?8Fo@WGlKy&l*Kp%Del9&G!!H`?7r`l z1MRr8I@CEB+(Ti!Fg~5EI;@17Hz`M5Hlxa-eBcmud`^O^Z7G)5dMg#7ne2 z*()Ja6Tor2I#A<8j;N!@2}MuI0>40}QiMk>Y%fu&rI6_#F+_mJDZ|JLr{{}45tP0U ztUk;-xq>t>Mk%C_I>@Hzk?^}iVjRYZl)+-8$i~~6fsjXTqOgvEMyJCuY?QeO1jD!+ z$xZaelbkM7vqW-aNlawDPMk&(NIK&Jo+c=&D`YB3(@9**$awT3#Qy>$kRc_aU@&}q z99A^Le$+sOh=G4>!xFT`8`{NyoW7}4HiM+T6Fdor3`U4FMz5?$i&T}W>B$ip7RGSI zkjzHbi^Oh3$&>8Am2AnkEXN~2$FI5>87rGvGpskjDw>?hzbra&GA<%8OqO_@oOHJ| zaFO2lM6~0C5z3=Qsgq=>4Wq0Cq~u4C^DQ8tMXHQSyAsV?WVX_b7N{gmt}H*W{7Tkr z&9MYNvs|tAKuc;|%a5eGsngBebW6IFL~snpeF{n@bV=XbnF9PmX`Hd7>yYqZG0||b z=!`1qq|R`v&cQ6Mr7BBq5&`9Mx>O7)%pA6)Wh9-fTyJ85h5VEg!m!GDg^=JI5DiMC78Q!@<&}96oAx4%45&>L_r&M zPid30`J~VK^wF>c(oIskDC$p;#LZ4?&?HsTy1Yb{luHJMQj(k}2(``8>VgU(tmoj+ z4&~Ae#ZEKY$(|GqM@tcONzRxM6fxVAKCq3BTStUAHjB6`8nr8de74!iyQqZG_~b=` z(;*(s!~2xTARSaf)kEJ~l_K>|BwbVjUD8K2(5>sb?Ef0hC|%IDq|zJH2y&tt@}U7r z+fpyx&@RQ!qYKe7y_Tq&KrqBd@S*`X{h4Y(2Qp#~KBzp-;;b0`%p9#n)by*+w5)+l zPn81H9~D$5I#xr)Pb&mbd1M|%1yDw9R7a)OYGu-Pd5lVYQcEq)gS*QvoXn3P!wPjb zP!-isB~|O>(uw#0!Ac0t>y!zUk&(bmEP&PH>dslkGl{qXm!l2#Tu5GpPXWWqH7wW} zDpq4nR%Bh)LKV^>Z4hYvPY0#gYqi*m9W^2o$8P0TYn3OCI8$*I*`|_?P({~t{Ze)1 zRPKCN6fv4=sjubW4&~zvMOc=Oli7TwRijCblmDYr$T4|lutHoNc72Vs@*sdMjjt$-81XUpkl4@F+SD~)u>b9`mEc;_Mcvb-)}^UG`jg-JrQiCsUpLBG zy^S|t5GtAbT)zuNbD5_?XBDirr_?)T%N*7%juO& zdPeds-}41w^kqFtT>}Kggi8Hj5q4j;+_zV7;TMMC7^aHWg)7ni9M<6-=HVXh z;kepah49_|y^K8}!&eR9@+>1rxzS;5Pc*~PfsNulr8BKW3SaC1V02guu3#?i;{0SH zEHoPq9@~-xT^2rL4^H87l)X@>2@^))HI`!)M&mjLB-Yc^Ig&Ty%d{-F_Fyhsu3KKtOUq?l#@AoMXCn6Fd?sRC{>5Tm&SS=^M=suF230F~;^(Deg&x>x zwn{?TGYQM0Z06=r?&hT>WljR;aPC|wtz}g%XI6I9BRSzDWM6kaXOy-^TRn?DrQUW{ z=~D1A?wGfv6KFljTYmnMrDuwYOBWTtWMoF zHlRa_V|DIouU2WV^2oewYnmqQweBjnuIW8iZF|Nu)^_cglmFx$9&PXr?eRWs(f^k1U2g5yPVd!@?YS1i#>8oz4q~1@4QW|k-==2e#qVnVy8bEd zb5rca9_3F?*5=N&=LYTRj#lrsY!Jq7t`2GL?(Rk{@9;M5MHJZ#uW%V#ZS-Dm4^O)G zmbbn|0Qeq5`Oa-%&H!iL@4zEi{cdqRb!qKJpnm)Bof41a7Vw58@B%mR0~ZfU7|QQf za3ZJf2!~Y7Hfac#ZV8`o%f9fY!^^a;@F|b--so@;S8v}D@kb%?zhvaW9n+nrZ-iJ4 zJ0)s`L|i%@>X&x&Kmt@L3h?CiW*=`>@GVOQfx761^6D;fB#*A1u*6v2Za{zXK%#O# zUvMk0a`Tq*M*m0cdd_mdr1Zc9Gnw`XET`-K^_dhB^L)i^o_?-4Bf-A9)fm@gz&6-L zV1&t)V8*Umua6<=n@`iK`w`B|$?_;-cM-OjHm-G*R zZ5qRL35>BX_j3J$F;Gu&P$dqX$X1i`>-`4zI%W0XmPJgaAy6~ujn4fT%K$0smb#IG0b@Xb6juoc@N3pEL|qo zcY1$$^ZzCGD}U>O7j~X^FnYc-XP5Mb?{bJY;vE6rPseuV0A_wwbu?G=jnB#kzR{vC zsrj69I$!qy2lD$UEcCFdbA)bxhxsFy`9-{U{#pBdPy3iJc4NNZ)Pug zq4&bJ?q9M#gCaIhVh*{VDf384VE5Gc79Z?Qej&cTMF?hhlK*0p-}-k9d;R<(^mqxE zH+z0Rd#~1`L*VtavU!|GeYltGm%nm=?{{RkdxDSjhUfd*zqDw-hO;N!SXudm!t=Cq4|VZ;BM1HNNBdtd{nEer z)c;5H&|m$}-}&^1eYIZy*k5+NzkTm44i&W>FyCd;0LfOy2HQYTRc48n|D*>YEJUM^Ss{tYY`Fk!xh+u~glN)=7Pf2O&W#&}?h(E@{QpLg zVtDV}H;yAuPP@2=4bGoKkAAY)9KmLZa0J9q9UG%e)Q95{&a8Zb;jhpxkg_6y#_ zXP}^ffCKjJ`zWNJ5C8ra2mQAnNEdwsQAGv{v|m9DG1y>u3Hmo+gcFh!fdnY2R8>zK zTBRX}9|{#>hfIL@6jLXn=pl(ZZPlWRFS=NhT4Ir<0}!*hb)$|r$`u=Is(IF0YC{4! z8D)<}c4R0Toz^6fu0bgzky83N+LJ+E86}lJ^2i)_rx9IhQH2+#wBb}RnpmP$A`&&?ic6&kYpSv)B~^^5y2w=;G#p1`SvJbH zLk`34SZt2R_ShwtSu$JYS&}JfnnuxXS*^8GVw>%?+BU1DvTUY_rnq2|S;Cp;f=g_= zZL+)OpVA$HQZ0f^s$HJk?fFm`!@v0^TzTwKh%jT#d(1J&AVW!YH17skDVtPhg!kW10lwRV z228iXaZ5l($9l8h{wVK1T#9Ha;f={nruEQT<2Ve%GP963TwBovcgHY#b!OHLz`n$+Ygw&%%Cg7TAN zkiZeohr}gX5q_$C;wl}|sfbN6BsL_RgRU5nEcUO9UR2|%1_;J80<(Z<3|Sjv_RC^w zr8sefWFjqCK}13`n#pkl?etj9L#EJ>gxsb!v1vjffdA8Xz?<3(UwAxn>IH{^SSLFd z#0rMJ6P|hl#0%I%%6sC|i0IqrDGQYsi2*dApt@TtpXdos%`%p#xtp_SgJf1R*2u2ub)v(6nz3l8)-2~mb7H$JRwFu ztY%e`XBcZ(EA>^8kRhUjpeS1#h$1lxGp=$aW=E0v#WO}_0prO{Nqx%9aIzGzD@`4m zX7@*Exs;}X#3?ry3s#+G(4;4wY)^UmE=4ZVQvU#27*dt$SJ zO{!lLyIQe@K9)*UoGol0tJb#a@qV#h`Q@+p`U33%QsL>Sc?|Na);Pa72Y)XUl6|r6Pi#qb03Woe5U^A@rZMEU3QD z8rv@%?Y^}QoP=(RTZ{76w*dw&frGmfTUC`z#|^4`8$8$tKeD-pg{jf1YvFDBAj8z1 zE@ZRo*Pnv8yCOahiB+31i6!;CDrRab(#ziUzIchN{SU|Jl}h@0mA3jNjh*%)$s1Qo=Eb4sT_z{E+&xtXNQLBIqL`ka&YVCXv1WU0Z zT9SjlpDpn>WFkinqevdGlI?oeCOa8|J(aR`)?8)nURjzZ4H|T>d0{X+IMt^vb9BS3 zW>2&E&F2zvoaL+^qpsMM0~N>`MB8UR4@|>}hVP9TB*ldKSPuIYZ?Orwu}4FiD|4lE zrH4680LzRhn64{aSjcHuCs);>elQ3pBhxMak=19$GP_reYBHm9yO`Y?+vu&l6EgzW zw6OEOI~yrb=?AJN891;_YjETYmC^HUUw!|V?Qn)FywM1PcC_o7vT9@8$=Lql#y8p` zK=Jh3uMW4k+wERVP}no&j%kP8jplK?T-`$YYrN$xYkSl1#|h(izk80zO|fP}>ev*oIJGD3@z7zNjTP;9)}@WI;a*nc;Qla#;hh#zd->ef z^l~FwZE~~2T-YUV`Mb$(^Sj@?&3{76D;H5zr20JYxz6)lq=o^416zrK9^As0UgM@~ z73zgH^wg<-@yloYw^-jgleb;WU4^|CogO===N|WTp**qZ=ES+-9`oCke$20@yOm>Y zd&}ZH@3KMmcsVQhTLAy}fq!3o&ocjFti6}m2W@=(PCTK9_b1}cZgk}>U-is)J!b#a zyyuPbvg^DnJyRw<$xV-Z3ZlF9(^9?6S6%+M=alvh#y$PXe|z2g1>X|Uckuh~f8n1O z;U$~SNnTSKAixb^0xBSZESkjGQuRUF^=aG$UYhoGpUXrA9RNkNeY;0&&t8m=K5k|HP8-wn=M-htQX ztlu5NqChYeAMT+Z+F~pCVIbldp$Q`M`Qr2;lp-!-BR1kAPNJo?)h{KJdt9Qqh#v@o z;UZD%Ubw&!B+X~8H3%cManj$w2QZuC@9D;eqC*9l z{rqAt)*~=RkuVw~F;d|aS|6-n;6A3vG7{X@h}|37-6moq>#@c{hN9bn<0(pHL`s(` zlA|25VjKbtn{39d?b`pR(AhhNB>#~lQ_15zI$wm~p*`**K2Bbc>0>fpT|fFG6$+%O zNa9U;)Cm;IJ`Eivik>;%9V)tEm^4BeUgH@~WDGK8DMBPg_Q*J1q*P`kym*U8eq=<9 zWJ%s)SDGYBrsR*&7%$!*7MI%+t z;c)7mbQI*8*v0?T8AvlGz&T4Yr==3aHHajKhXy2T(q&!R<#Ujze(EJ{B7tuHC1CQMe7fj;vgnJdsYEK~ z*nz2y7AOC5?uZ>i1atamp8hD15+BnkXptVNkv6C=#uu_NDc3nEYeH!xeq5q#Xq65? zm13#&ZAF*fD0#?ciH@9@UX@?oriyx^Ho_>IwkWBFBd6|Xobu;mYFW+zXe?@_p33T; z^65bw+7W7|XPPC@-6|U#Y7`>lgkI>P9$2G7>Z2ZO0ElOnuI83n6*DGJdd6vf7Nu>n zTwA2*nwn~?hN=3B-P zlqT!GKI)`0tH?O(af+z4&L~i!YGqj~d_GH=W~1p4rnW+Cx2EaOgei?$>?&UBz-Fv> z<>>#bZq>T#8?6H3$i^cOvLw7dil{K56Ot#r)+@0xs-`&rveInL+H3$^fWLky1O*$p zCQqhTEVYUWnf~PpW~tgz>h;FRxVXF?}DaeZK){<<=o~(l| z!o0dH_kHNgCeRyPfR*0t+OF*b=&W0!EmaZS)Z%4OLM?j|?WZ~{C_W|AF74m?Z5Z;U zxDu{$ZtSHZ7iQR+I)TSzIvz7)1MsWbc5dgstrrmimG&&v z0wpxwZOt_o3M%ZGqUnFODd0A38jh+so$CcXZR$oX#u~2a-YEKE2i8I^*Gg{jQZE0> z`st8fZsszt=7yB#vTYMW@AN`y=$?QQ$nCRQsu$X=?pEv3CMUx(r0fRn?Ea>UT5VJo zuBz^CaUQPkItQKB%EV~~sxS-JDrQP&^70-Gx8zHH#0(E>%bG2|a;^@m@ft&F_Rj708f5?XzA3_1 zqu%<6_>M0VFR}SfY&YJ~!Orm%3o;46ul!yu7UORsYjNM8aIcExEP-(tpCuWGOxb=E z%S1&Uq%j*us`PSl82|Wt~PQ!``8Xja=cQqC7&^Zak3{rGc+IE z+)OhySFr<`O>ttMigA4)gO$_j3X($`1$hIa@Onr*a-2bi$gc`8D)6i?h?Fukq3JdU^pk z-Eu{f^4?A3nck4jtuQb1a-RlsRJ&^=^Y1;MbUu&gCbRT2A229OGe8ThIjiSP&v8w+ zu8!1k(cbhzGjuEO^d5t=?M|=@l2cUz%Q?HRQHM1;l3-uwU`9jo-AA1u3v{C?^#My!y^}G=0^iFJr6T>7IR56b~3l+=4CYtOmF{W8}}!>u>;366&EK# z-(EJ`bT}2UHj{Q-i?%C|Ny&-hYPYXg3p8`P_C?nzVA~KN7HDXKG_k>RdfVx4U+2dx zHgIF^K(@&BI_f?jw;D(DSA%tU*PnC8X{Yfw|BQT z6_Z_HFs?c+b$Ksx7t8Y_+Ze~V_ec{A$jJA6mvP+Uw`BWsW%oDYwK!(~34o`gfakR9 zimyVOHeBbpPWQAk!JumEE?!GGHM{tPv#NM^jfXd`{zB(^<93IuBvps;RKvF?Jn3(b zEfGXO4(GRu@Ap5y_+?W#?#cKZ7Px2Qbd4`~Qm?snS1bQv1u|<#xN8^rnE$kFBill3 z_-0Zku10wb_xVLsxp2d5u;S|)ce$4*HnkyA|iXM`i_2NJ@aayGs&P=at#Nt2Bi38EBcqi*m4Uw{Xu%p z(fC<^_InC@n`e5DLsL+LI)wLns2}yHD|teodUQ^;l$)$Up?6t{gsc-ft!tl_-+H1` z_I>C&e~WpuFOp`%+IY+k>lS;k7rU|Z_`F}Yrk|>^FZ+;FG@ZjHuUD}fUU9T{?VjG_ zGGR-5lgZQtsEPNR^d{)0l#D{xJFM6Z%yT1Fn9M|7- zj48^w`@3hby+b|7AA3?malWH`#;*Lur#8aLJj@G@o(uLO+x)>tofPf-toJ-*OZ>#= zI+$DhcQd9Y9vuEWgMH19J=s?- zfb@2J*Ece&J=ye(N7O4Zg;7 zzT&_$e&aiS|3?1eFnrGkJ>7zcqzg}ruIz)dL#!G)* z3qR|}&>Ml@pC-QIFMjRQ{_VT)*`s~S-nZ}fK0mv3#g~%S8-3jW#zCgD@gK26lD_hv zKJ(kZj_SWW)=2>?*)TqPcFakMb;fo@&9bMdOk8{$Nn0Dl1l z77TcBVZw(IC;qFJv48>#9y3rN*#ZCM%9kT#*8EZ>NY9@&LJTeXp+a}HUX1?uV@)GW zoM69>eS|hnB&|c`)~&lW5Fx&S_Xs|mxbWghh9ozxocVH{&7mK6F1@D{D9EW-pME_G z74O}#N7rthe0cM=eVhls{&@|a=yyoJc(Al;`}Z^A&o6R6X#Oz$>kIB zr#JK%s;CFuI0~z$D7ms~Bj>EsPCAPOFvuVI1ai-r#8farIUp>w zjzbv{5yTEBfy7ZwLd;OoMI)uuQb-j=io^y?L@~t|TXa!WA_}mv##2pQ496T<<*`Q} z`}{M{JMkQCigr*NlF2EbymC-pceBz>EZbaDP49-Ji`4!8; zfhVo>(oDJ9w9_X(4b@>6mq-=WiEn&N)mC@R7}j*l6?aKmy#+T(I&TB^H6&PT)-@`7 z^>y28VWt*aYvHV!+nE2eotb8wqt&@uWnbo&G;l#4w`g+F^;l_j&+XP-LDPsA!hor+ z+TVY%t{3YK2`<>+uPYHz;kK$MTds#iwTo1WC1!hoRU>ot)s16i`s1UCUI=f#IM8fjfIzkBJ9livKZSe-VUL3m03sr0N- z*E;po%jz2J*DF0MEl$a1`0TXNexq%-DTX_4jp?>K^UIGmV&sZw6MS&Me`T4s!k=y& za>XTYcJb<^mpSt7Cr|v%kny%We9eU~zwVVnx7ocjq<(O9fZ=~XetfhAO60#RtbJpn0wP>X}UO+-WNxhozR)^gk?125|xO?7P=5o zc#z>AY8Xd2&T(G!LzoV8NTLqZq8zE{pNh~nDk2Wjhy_F<0#(>V6QUzm3-KETqf|wc zJ&%iB451e<3C2oV5`^q>V&%?A#!z|=k+NE2^j`SJ7|KtMbR5bJVHY-h$?}e5(IX%I z2$w+CB?|w96ks8LcgO>ll8K5O-V+=7NC-w!lFow-6*<{KX=aj|)zl=htckNRijsKU zXc|uWhz^l&N`}-m8-;5{k}q?9|+?dtH^?uOkyHONk&GF zp=6V+pfVwe&1f!ki`m4aL$7yDiGGip#sug^!)eN!(d?XUZ09Q18B&jm6r>~_={xU; zmXpvkm$&;NUm~zie)jX9;#8wWaoP}EZGsY3gl0hpN4RM!l%WtE>O<4CNuw^+W;s3P zQ@xo{sZO;su3Kj z71q=JcfaT)!;R-i%l*0Z1TA6=gByHAj6E2;Imkl3YmrmjWm+|j zH7ir{6wmAAt^)VBRnGE<4^3qemsrmcF0P78`CX8V7^_(=^Mm^9Up2Rp&7Jmi8^`vD zm!9AObgi?UgGppaf3wke^V^>_9LB!R=C6jn?X3%q>qO_8v5LO(mLJ>dM}v9QVHWcj zw%VZfp%g>;TJwuH{b?K<#SNjBGo2kQ>0-0m$iw}NGGQHP+R_@)zBYHcf35%PU8ft2 z@^!SAhrR8Rc^&LxT^E~aCZZBN=l}9#hEN+TgOM`7+?CjJO1=Fr98O7CV9zE9(0t@o6-bt z`K4oiKd;F=hB?>y>`dga82S9?KzF#%LrHYy89i`<-Z0cTo_CG=*6F%ErPbeUa;(QY z>n*{|-yN9jufrVX%yu0fy>jz`pFKxu=Q+09Zg|ZX{^~dFvrjiMvgZGd`=?U>xZX{l zdUZ#g^;I4@;5ol-w^tSMmWH+sA&ciM~=|g||t+)QUsSf+%4<6FPFAxbJLOFvd-sLW?xy)m^`<(l}#}{C5@Zs-$;%|TJ zNKd{nWNZIt-W~7NSAFEzCFHJe0Q2kh0xz%dN}RM!H8QR=!l0wnO6<_@%of3wCXYnc zq8-+t$9gOcdQN~cZ}S>%{=BUtjt}{m2Z~OR^!V@e{O{uSE(Wpe`T8%6O7H-=O@;1D z{#LB^z~s5g44x+N{AB4+E-)=JZ~z*_0%j@%d5{FfN3}jraH#(~knnUc0Yxy)55kue#IaT%4d8J+PNp)neVaT=+y8ktcVt??SQF&mrF6j_i9TQL>GkriEW2FnrH zWbq?#kQUj|7D1=dNvLPK(Adg`hByu7taw3J{A~7-}PthEkjvU94=~VF?Uom>tFc#x09knj@+_99v z2iT+x0`15ibFaA$1p|j@58tK%baE$ovL}5~1b{Lqg>oo6peTvbD3LNLm2xSWvMBkc zDWNhbHPIBQa*8P7uYQ1_sxk{bvLm|^B*D_-Mv?$SlA=x$CFig3Rx%A4up})aKFBX> z%3ve535_kX7>&#H4`rf{!VIbfz49x|k}Sv4%5V|<&T=s6 zD+kH29oKRtxh}rs(GA~dmMV~S=usvifiFSO2{ZrmGPSRpu&@e`t}r>0F~w0fAG5Gv z5FE?0CDl+eCzCSQQ8iVPE%}2pb~GdStb^IB7I?hX((QZZ+f zHa~JZS5Z50(>phlH+!>$pc6A|F*U~rCpaxGxytL3ljex9IeX_hRWdqn3O4g^wwMn) z50g8)6FeL9He<6kBhw^((=yA`JX4Y-#n5R+r!FD1J-J}=lbFJ7)!} z5EV!78?zHYN7O$_R6tL(M8goO#B)5&Wfm87L&L`lB9uJ|jzTMRIWhFq{1Px@6hYxG zg*@~_`|~>aQ$<`G(2~cLx2A?&ouK%v7kojQVg_oODAsPo-HPQaYq*} z{^-U>VKfSrW;#KXJ`i(1|I;m%G)3ifP90MV8x$=YZ9~x$fha@hq~JIoG)pD)3TzJu z-K=WhNleGIM;CBPfAd9AXbR1%^oI1pC@xMhl}?%TO`CKvRdg_;6i9MhVqWFUvxi6A5QTLre8cYvoTWl{#f>qXdmo`_ofN@>hXXQ-`%vGqphB^iD+; zI2ANChwoIOmG)9~RjGAaT$T5>Mr>qtRvGm#Yt>P=PfgdH$PF-|a znYCHZ6FsAqGqJQEq);w;C2||mKchmZt2!; zlffABRvGwqZ~OLd{k9ndml+6ma1Hlxp8;_d7aA5faUHjD8TWA`S8^eD9K^wL$su#i zL31(J8a(%NL6>vWVRScFbh&jw8&DS;t|4?;w{=zbbzwJlW!H66mv(2jc5(N1bvJi; zcXxgFcY(Keg?D&^cN>Vec#+q3m3MNbfq9*ma-X+(qj!3t7jmuldJ7kO0e5=?mwUB0 zd%ZUrsCRn5AsoQra?Lk=)%Seamw49~ecyL<{UIO#03rDV1u_8t04x9i004giX#s~4 zAZ-#LZxa?36CrREB61ZYb`~Xi7#JBD5DywCd>aM?8yy-QC1oERARsM%A|fCqCL|{* zC?_|7D=R1~E-ft%3ob7$FEKAMGB7eTGBhqMG&M6eC}%b{G&VXpHdK5!I5jsoH#jCC zIXE^tIypK#J3Kr(Ja>6LKRi8fc0N5kK0ZD_dVD}YK0!f1LP9`7e11biK}1DEMMXeG zMMXt_fkq}FMl~`3|$rHP7*i;IkmQmKrLj*U-7jg5|tnwyW0k&tj^l2)vel9iK?kd%^=l$4f~mY0{8 zn3tEDn30i~l$@DZPnnsUnVFoLnw^_qwws%toSdJYo}izfp`V|lprE9oot~n3v!bG< zqhVB|qokvxrlf7ZrEzGbrKYE*rl+TmOY%FD{j&CAWs%+Alv&d|=# z(a+J+(9+Y<)6~+_)zj72)z;Y7*V@?G+}hjd=jQnM`2PO>00008{{a07G=_|z!Gp&T zCS1s{VZnzEl_i@9ahXMm7&A)L$Z=!Gk02|C97*zGNt4V_s`R)rrOTHvW6GRKvzg7B z&Ti)1$x|mzpFe{F6*`ot(W6LPCM~A0DO0CUqY|4+RVuKmSA7NB$~7xiuU)~49ZMFM zS*>VurCqzLt=hM4C>iDhnB{A^=H?wW6Rz~yLPwR;Bu4O&AYen-{yh~ z9}b+j@pH)2DQCyLo%83=-MyPGoq9ZZ^XzHA$9+4!?AXJD|F7p=y!q?s&y!cre!Tnl z>)~5(U%h^M`uFGS-*2CPe*ex{GuUFQ6lh>3inW3YD+G>`po5`!!ij_wPH5pJk>~+Q zBav9*;fEcDSO_8;a!6tzgp8Zp=u;6*%VT2Y#+O-m8Ri#bmMMi5W}c}g6=;h2 zM4E>sHlQwaLKazAlNAM9XV>(Z&7Xb-3aD*-jy9X2s-d>$p^JLPsH22>gPU&R6ldwB z$7QN%a?M5O>8J1jD4(eK{a5O#sE$f%f2*d->Z`53|4Pkc3@X^*f)V1XYlXaSk}HP2 zY6#-6BA!?xBgQ7X;)~4M=q!;j{;(vFO}03swK(#Kq?6S)xg(QNPDv$}S57cPnryC%C}h8d@>w$w{&o%rT^r(`T4RVbu_{)=d$1nW6)qXHv5=%fr&is{4XYzlG2 zoq}rd#owtKtEm}l?CQrLZ_KgCu#()4fCkoj>#i*S+UqAU0~>QC%NA?%&B!`i^UlxG z=rgp^UK?$sq<#Tb{Ycc^@u{~U689mglQ$AtUc_sE7LPO3Kdu6!Vb z4?_O(%#;gDGtSB$YhtrKbN(}pTm&&~jMQRVbkay4t+bR+mn(JEX~dqj)$Y!Y^}1Vo z-8k4`_da&p3%?zF+H0$AJn_K09jT<>#O*xYbw`i)-g*ChxZ;6lk56}l18#ld)Ppa6 zfQ*&Tpoc9-KKaWs%8Yr=nQtDy=bwX~gA>r2&c?Q<6Pfz|s&f+Soc6S-#mjbWf*k^H zXSLkvZg>0gT|_98yyaOCd5Suo1~I6-4#MquAcUUumP5kog%5k;>mK*Kr^4@z?}aBc znfX+9KJ~4Sa_qC5uinQ)`uVVaqU&Gj|AdIT0BS1-Oe0{_p!Pa59gqyPW1s>V=)eco zg?AAYo53n5yykh4d0pgS2HEC8E`IQfA(WdMML5FgnedI*OWgKWILA8DQF|;zAq-;( zM?JQRaaMesAa5wSV37zx#KMq`dicXe!suJ1)1M^!hriJkadqNqUDR6et|vlq2?-1( zD5VImDh3jZS=8bd%Q!qVf-#I69Ahla$jURiGL36wBd6|GLO1sDmn6HR9fg@kV)`+T zd*q`sV+D>Ynonkhv|%*ehazesQjv_TQRdp`$o*Mzl0-WlB#jur0lIF9F6g8u!-Ry| zEs&H_Fy$$~mP%Dpu$H&HAQ@%3|Ib$X6PC7oWj=A)#$NgomG;L-FX)rY;kiP<5e1Cu&7rRTPI_e6R!V-8oS4*PmL8x2b$$}2GIb|BYiiS*O7M5I9OzKR$k(9u^q+erYB`K* zP^20*sta9e3#A&-hcfnrbFtMREqc)$x^JV{)T&pv30Bf6VXTo1ZCQ=>q;RHGb>`G0 z3S=rfxR$A{LXfL$-wHdr|JK#6_Ebt=1MAn_3bnVt6{v6tx=X?y7KTbyta6?DSj0NF zxyp>}WUoqD39X2$n$zrNyXik7j+V6iL+NSLs#dnLmbRjNtp={N+S#7BOSYxxoN!x` zyaE@nzO^rY|GHn^8n>{=J#KQBOJL?g*RcpToH)KVU5rX|n%czfcHzg_&dTVlr~Phs zH=JR#Ch?q1gsp2|y56-`NyJrqFYV$hUwYDaB>m)XfAcHj8Qa*u#O0-qcN|~=Czh!N z2J&+oOJu)EHx<63p@Sc6NC|IMyA!ssH^tlGlAe^qRYq-jH>qMMq8CgVxRQyv3yS2(QNw2^5= z%V=-{5P%&f?-IpqUN^n<(xTk5mQdWDAb;X2oY|ITcPcHKim7ur~nRCMb0O>=)k8re)b_?8v!M1uRg2`d0N6G5DT5%By? z4R`p%A?|HANBrVY|8_20ZSIYayw6ux_s8qQ77C#p$&{_;H3dy?UjuttJ)60{ZN6x> zk2mMaHgUUtK4KySy=e$9AP8`fbZh&4>7!od)1^N6a1-9}7|*!X7mxL8`kt`c(WX5Va*Ch7TNgZ}eo)%ok)Np~K|b-hw*rw@$&`e?@> z_OcJZ|EL2`>KY%u>AF9E#dqI(#sgpL?hpP$tMc{AAM*J4zozn+Uq2u=U+w(Hc1j?9 zYPEN#*J)71I@iZu?Vd>^$U7nVQG_kwMwVMO#K1W0t$_XYzeeFg+(%jQxq@PGx_0unfZ6nIY>IC1O8 zZAzGg>c@T_*nuMGdrS6N}VAt%}f(fFCh?swxmrY?PiNt7ZlX!+es9Lp?dTSVP=tV%<$5QA* z0;On*r-+Kk(~8=7hpf1V+t`Y*2#B*dh_q;nx41YYsARlodAs(D?wEPK1|n#82A@@o z#aM>ND1&e30hf4yURGi!=2lBXE=C86ZRmz3P>t4jjo6rt-nfe07?G|xH{w{43YKSU2Nnkr^pf9e0!!X$&XVI3IaAU?-ALnS$@bVPc4qDw&m)IEra# zX|yGiO@x4B5H8XNiXA|00wE`c$ThpTlh<}vJ?W4{WquDibwOE|eJE~ESd>bcXSbLS zfSHu%Xq2o%80xr|QmKVqSd~+zZ&#_0E2&|Y2x12*mjBp;mdS=?IRaOslWqx@v^SA- zd6#xsnxwgbgqfOuIbeP{n5t=*C8&-Z5|xUPm{dtDDq@l+Ifh!o z1fzI#ne;lWH=JDJnV$)op*d$id77q4oz=N1s=0L-8JL3Eovyi*uo;`{|7e@Rm^1hW z29JrAzPX;F2POT;oKnD*GwFj)b0#6+jCALm&k3EWXnWIXoooc4dl_=Axt%Sf58UaU z3fh-`mS%@pAR(EBiFq?_fF`@Co)^lV`RSeEFRQilD5ypbQ#(JZ2H%`JfBZA>%orH*+T3g`pWbY)DdxP!OD@rvV-s zJ4fdwNi+l^8f}$k1$TFoUfCuos-i1eet0>W*BPT^YALm-kvO`V1-hm=`bMW=Pc7!7 zG$Wy0xP>Zcq3Ic!M+%u7=AOdoY3Rb*wNyb&5Hu5T}Q^azVP7zQ%t=nrKG?hI^`~Ln3J)R%TpclUO3CgF0!p z%9*qSjqv#diYk6}7?)w{sNNW<1x2hJ*QQj6smgk(A(IV+pfu1LI*%r;(h3P}NqM1) zNTW)s6bdbemZw7kuB<8~uG%_VVw{C)E^%n9WL9tlxvRbEtG^1SFFLHoN?4F;n#W3` z{K~91x~%?sOtOG)G*GYxE22Mmuq2AG3!AVxPzaz(c|iJ(bV`y!(ufu&uB%FApU0$> zsjeU@ja~8s@_C{oTc55KuU@)lwI{56c&`8kqt{8P#z`G=rhpX-ysIvu%ZuLYeyRwbC zYWZ5Gepsw!tCuv3tT+3w0sFS?aSJ5Ev(i$x4Xdzs%So%nNr7;!;yDSGu&pEMa){ux z-^w|L8?{r*s^v;)Q$v>Lq_yu9MUo4jVJo&{>#}Bhw)pCVGm5jB3b$@+qoXUjSl10D z0wp`Lx}w9nuKTk-+qc;2cS|dq9-xZA-ePo1oWgV89@Am6TqX{|I!S^}0l`2Pt#36Z@1xDk4Vm zRUwtQ8mpl!shKXRK*bA(#|u}<%W!F_i)-mPe#^XH+Oqx$z0u2oXp1S;YrWg6z1aJ} z4&1zjG-tGn^*yP;RL9BQua^oGXEK)c$U{VPcS z%a#EwuW!kp&Fj3Id%y^sz~NZ4p)0`=48cR3y*BF$j8L1G1in6K!GI94r2x3ItF%j2 zo*{CvXXm~pY{E+FryqNzwHmoD{K8(cyf)0JEXp=%yTj7k#u>-Mz+}1xX2e8X$98)qL}QX0KCkLdc!yjte*V5Z0yE8{KE$o$EBRjtGo}}+|8%# z9uBs@v3$InY%g9n#kb6@g-ayxsLSa|KjX@{$p(QK@T4c3t}lta3*==u2qtFC$!V;{ z(>%)6Y|Yoqz^44o-n`J<%+Pt(y)M(wX!TCCJYVP|!{=Pc#FERK*G()^G^!fU(TC5< zh`|6kX_afq0w>W8l+2pU%;gNwI$Xd5|4q#{&AA$f%?T~U4&BoY{nJ3*(;2fvQTd1y zUCFcjZq}A7=xock*&#yG(NHU18Vk(7d7od5($2G0#-YsDz<&Yi_~DPHIhBqlx^9UP1a>i z&^dk3oL${&t<9m`)}_7O*pUpg|C!pH(1NTj%RKVhdy&yjU7NFA+kq|BxD7g~df4*2 zq0337GI^Z!Jv-?N#v^uS%gx*|9pKLm-O(-G(@ot6j^Nk5)7Z`34!+RYf!e9P2jVrY zc%9S`gdmkIxbuc1?(NZb?aPdOY*v<5z`eIJ5XtYf-y87NkX^?A4dBh~+-Tg%{~46l zo#T0<;|U%^*{#q({@@+w3`q>$NJimHOwLsm+eYr!w2iSJj@!p3-}8-Z=vspd69%f(%+;v|cob*HP~|I5!`zUW3G=3|c8H@?|}HR)$AqYDni4gTgq&OOs0#Z6A< z&Ya4aUIiv0t;j|l-v*EE z7ygL~$Dt^GE?cdA5D%-?7xA}4=(U3cnC?al)4dHmM|UBai=-rMEW*n+x!R7$=MPIkmiDRUO!j3D?U+>uV#h-??@<{G zb6?K8U(1_SF6uyczxAx;?-IV^JM?z@_d*Z&t-F0sU+x1e^-=fnJkEGnPoVJ5^^hO= zxG=~sk85FX>zIG%XAkrJo+E8fxQw)X+obKO_abOj?L0r|t%v2F7`w}^y3wj64!gm$ zPxP!?_`A;Q;Hdak&-m5+xsR{)k-yF408M7EEAXB93v`9d9W(al`Bn`2jws^Px^F}( z22?6qXI13~|2shSjk0zd`{FPA20Q+>@4C33`?^0=yx;gx=>Co`+QmNq5X=kCUiqGI z`T5K2n-37OXt8RQYSgNN2nj0uWU!Q{hX)}xw8Y8c9Y7T?ZEO@tVm2{Dr^QFY?x5+0&%ctNG8 zPBS4MY?VSKhZ87Q#EvB!*6dj&E@%)L3byTAxN-f>i2g0e}*Exf{N zMx>H*3Mn3&+L5UrqzZzp8<$!Ef(0#{H} z5o7EzG9$C>GBh#UY|Uoc{7f{`;#^HNI$xtrw%Tm#ZBIUn^K-aAx$ACF?+P^(J3+T2 z|MbxD&MVJR_12Sg()i>n&_GNB)O6EKFZJ}(d5qDBLsAW@gNz7Mt#HXET8&G?4!uO> zLs&JGC=!fHJWgpv{kxt62r!C9%5N{fOHxW<~*&aLK}2Cu;uo9waK zwf=gvue^lP3?w zliIZmN}D5~3=DX{0TzOE{yQD&Lc}cA?J7kT0aomOHY3`-E@;+rN|cCI{}Kps(09G- zg64>pwGMcINFtnC)t2|dv^4K|Ta!xkq=!8mnk|Pq%ta47!kS3!1t=hb8LqH`0fvAJEX+v@ zw+2He4RD5UL16|mV8hbgaEI!0S1C=&!&7Q6h-({S5si36B>szTOKjp3(+9=%P4SBE z%Via@s6}AbZ)9L3=Kc0Jp8qj$A!Zzw^JvI};H@zeqm&gHFOtVO|3;9Hf4tza;INWr zPI8bb8ww)Lxw{xZ5-e`WfslwGByW;X4VJtbCc~1>O?vJEp418`LkUM}rc$7&1fnXr z$4XZw^p℘w))dOIzMDioN7zMRhqpi~2H{TNGv(i}_KGUThhZP$uO#@)IktET5MM zXh~5A!E7eTjuZ5i1p_!vL7wxFzaw7K)ak56GBOBJ5hqT5m#qnG5{CG^BqvG0PY`SX zss0pYO9@)gfo9L3LPTgQ6$;Do*(qQTg=j=^IniIHb(b4uYezi_Qn=Eu91Du$Nis2r zm8BGO&ZH7csY%xcj-n+JjHyWU2)k_p(vPUpX*oOCQ^?Bm|Aay<5(=U!7NP>}s0~Oe zU+vjcwm=f9`&>Y24?tCbS{18VUF|Bn_u8+%O|}eGB3a97*0Z8jtr%_VF@5`6x4t#5 z!#z(j{Hn}AoQSd0(yM3pN`>Q6@1Lm?QLeN}Si{y-yBbuEYFqnXt-7|WUY%`$!|G5b-nJYP&8==P zx>k+$7P!Hs@Gyyc;R3mXx@80w0GEr>`SL`>7`mPu0ZZN0U00^pY#mK)8j__6&5%G6 zFL|Xk*>zrvsbpa<2Pwc=HKviinhfn!s6yHUJ?YD>ODmZ1t#I9eqydC5)g?+-oA+5kscz*B~; zm9M-|u*wp_30AO|9~@>dk6FSKMsux&o96ns6o3*8AsXqdtPw-k$-3@vpJ)1KKl2f3 z%`(alc*u5yV-L}5DaoRb7+upXCWiKsmBqdqq2spRQ)2;4xw|ms^ z)_J_={quU;8`bw7YrggE>V5ay-w=K^tp|Q)f=9jJ_Nc2v+{`-kg8bnTFR`9Mv+KQgJ@IWthY=@p|Ew7r@Nndb-mwtJrmwD<>$8|W^eB!XfIqO=lGuOLb*OZ98x@4zB z#t{onyCZVEYI%FJ`_4PLzXgM^Cv;Cq|GUN^`yI)GC;oov5|QZ@9`QRD_2L`&raM^FHpA zIt|=F01*d-YcrmSAlq>`6WlsP7#jy#!4z9R*L%Mg>_2Y;p~!kH4k{rP2%fhyuV@>+ zYxAStyE_uX07=uk?%Q(0WpYcnH0Ps{}eN<2cjd^1GH`u3Lgoi`D>b{G9DSyy&*Kh{PQlJ zQluh0!k7E8Ofx_xzgVbn!Fv)f1X9Ik!#$|l9aq}G|6z}!g0*N z?mNd&oGavNLqF>^Rs^~88=YCaM`WAHe8fd!M8l{7O51B5WdueDF}Z`Bv5i~ELM*bU zN>JJFK$iqVA1cEZ%PyKs z!xWG>2qzOPxHdxRl3V ztIL~&MZDZeI!r&5`^nt1KmYeif~Yb9!(9Z(0gajg z1VkGoIixgDVMNTslo72EfC+%kY=K4zrOwL45;E$v>(tH-&C(9#&<`ce5Eao8HO&%L z%?MLb6irbx1qM@e&;QumPn00Y`TVuO(#LjulNFS^xBP}^Y#i_+g!eKqwV@=owSXe84Sct9AW{p@bomQu{*r&bNY@OPa zG|z73*w$>EaGjW9z#}%b(+W!0leLgwE4}c8!*_LA_+!q51k&V8q>%H0R*8X|ElQf4 zG+p(>gDh00V6P(~R=^EhqAl8FrN^UH+N4EXXk}W7z1FC0T#TjKjMdny{n)I%T)Bcx zcBD8GoRVH{g$Njb!{I~Pb)B@0Z6L)7)S8~NI_ zJ*j!U#qjk{?u9IYEzGua72;H*OFJ)uG(vzyBf70yXH$yUOUy&Wij$D--NjmYzmf%(mtX{TqFM3nPqtm% z(YxF8eXgPUJI4yfCjf#2;+7*gf-GRJO9R63J-Zk1UBNY0{tZ0fEnLG5;AM4K0+v+J zR9*x|Vg+7c2A*8iwA$#U-UxOEQ(chu!``RTYy7K@(eO(n0R}&HtR;9CTkIRbOU2Wush` z*Bvi8u!9+><8+Eh6DTSU2CshI}K3oAt=3`Fg<85YVj%bOF=Km(9 zTx+&y>Frp}YX?h~)iL19_@v!T9twEvWH{)npHRk7e%l+aBt`?s4|^(LGhz1aIDd^c zd!FMP*y;7fFV$U%e|)rn=HY<`=3k~7eMUkINN9x~cVie~Z9I>p?;x^{NoZUnE4N_#R zn%M!!eq78QCg_ShXeVt@rZ#4$erh6aY(%a^td4BS22pB`*oS+sYd3 zq7GbcQ)>9)w#07g#b#{BMrIE+;HtLjdy8&}rs&AcY|O@PmE2nCxZd^i zZo_^~8A$G8Zt#VM>gJB{=Z0>`u5Rk)#tP5s>vpiM#=g#e$4#E8lmRF49_btd@mYjR zF4%1Wg{oSviuMkg9!uvywbvuHah!hRIfLIi>m(&fi!&&ZGLBE+PGx?D3gM1S{ax*F zJMI<-9R^ocDOYahc4&>1RtnE>>0V}N-f;8ma0hHo5x$C4J`z#M`7 z?rGz6q5FhwJ?6zYg^EEeL6>oJ`qFQu7HfDs-IJ?vE}o|WKXe9lalS@s3~(nm)`2LW za!DUzhL&)s&T=ikaLD8G$@X%~19LLjFer7XOw4_x4YlCSgP`>ZE3IMprMvY@J!xrsN=7AvZQDTgz zh%9h}Z*nL1ahvrV(6OwA4`QXK_DZ+*sowU9xA-leW{vOmYVNo19d%&|d66G^M!SfQ z|L>PQgI-6TwRUr}KkpjEd0loMJokD&$8`ca#3#=|?JWcs_HBO$PI^T|184Gr&sEi? zmN!58q+j}mXL_e^`e==M$Dev`$M~(T`pnLHZ8{YaCxINegOf7x(gt6g?-h?5Yi3t( z#@bP~d#;&vdqUqq+0F4?#rK*&6|}DrW7l_(A7cxqbN_YTHUeLE9}oIR2e;w>oWn=_ z#8>>qxAbeL_=w+hslRy1w|dIYB|81?tMkQ=xNpzrVA~&iasq>vkb=W*Uxb(4!G8np zbzyP(%kYo+MuPtowsQnp?O`Wl;vc8)Qafw|h(ZDZkwIdR44^@Q7V1&Mg~Ead5F%XI z5TXUf6(uf0#Nc6Ljvq8ehAb(=B8nF$O^Re0(jQEIo>;A_>5AsfoH}Lgq-Eh?Yh+CMxxqA2V?Hdgm;J||a5}qb)a9+gJs{b)=Z0%aM$dbExqa5xsW^v>;bMEX} zGw9HwKbIq2`t-Tfs?DKp?K)k%*s|Txu8k_xCrXhx&CJdFcc$P2cNel^B8Hnwm4uJk zV&%DW=+dc3sc!vxlk5v2wZW0Z`S3}M#wQtZ-1_+#K2o|+pWgiv?=zmzZ-dYc7$roi zpUKaKKYsc-gk69^7}%eG0$#D86Du+3o_Q8jq#k+pQHR19ifmEG9byc!;TalsC`3VE z?13L4DXN%78ZAEYhC&QE)KLc?X>_B66ShZ^OD(ljqe>3$=Od3W`6ClfNh-NiP)#z$ zq*72y`6N_RS$QQ{VP$F7mR*+hkb>!-Q`c;K!HKDZ!AL*nQvj3>U~ zoOc{%=qwd6auFb>m8!T0iwD&gV?$u*m{E=tpd@6EM20jjxup_mB$AFUIi-{F!aFa$ z@e(m7nGJd|#)zV`F>I@8 zd??3@irgrqB~J<*io2bW>2jNX!T&4Fp@NE?fW)~$ZgLe$LBa$mW$<1TAoMINMmaLk zT!;CU#Or{<`U-^@40;+YuS*-}5v$>5XDZUoLB~V1q*{n=(!T0|^tBdlU68UeUeEwX zSpUqPx#g-WlF&iQFob@_-py{w@77x-m4h1|rM-mXyEwm%gXu580gowgTLdGV@a365 zyt%}5LJV2OpdRB-E;G&F-GPYBSv0s_m$uhP zNS~0~5*f`S{q^5{KalB`3;(#gf*)Qu;#%=P0TQr)1H2ak9k(yYG4OGdi(KV2XSoSR zkb-OS9Ooi-!Ol&rgO&MU=|C95KaEgzAv+=IJjSTnw8LGIx?LrdrJI|nU8G8 zMS@fqN)>Yk^2FWkkpFi}_+o2yRrzJN|%weC(ef{Rqf;4ziw# zE94>bnMlbw5|aM>CqN|`Nr4)spa!+1=rXC$PMR*1se7miNlBa5olKQEYh`()ppe_a zPaw0rrY)I>J@wSGJQ8A$_kOUll%nf-?<1zORMIKGP48?`Z00ky7(SRrM_DsNLK3sM zO%H9yqik~#IA=6YzOnOu=j$DAca5D;IfHu^*eBRl&njVgP#TgL2|zhZ9<8M`YUX%n1_@nfbNV;NW1x}(hQ3R%dL?Q%3#uiArdCZPt{y0*O8Ix-Il zf}Zrsw23}b;@cvE;FB&a0#KCy3t!{4R+ilj zuaZ3g0hvY^XhVwdg!^_8{n0Uh8P>3fNetWsez=~24zb}(jK~$In9<3Faf_7;X-TgY zbTpPObw!EY8|S!`oo2@^cq~|z0hTzeba2}YTT=6)YD6l4GD_*Vs$eefzdynEH>PX6X&g(m-=P-uMZ1%Oi>{K$ z_Ej}v0n}CH@R+5`$c|ikDS7gmx3_Vuu{W7r#^!UR2_fWpyza&Z zTi(9cf&g{=TTJZ_%2SS6v$Onq>bx7wuYD}5eVHUExjD`w?{nYmT&t7!d7*(GZ~u31 z{^*omYtcW?dCk>Z>G{@VM4$d?Pp88cJkC1pT;L(wxw&oY`Fq#DuHO)O@j`jGHGPLL z?20tJc@fDKR3FG+j-Fko#@WxKZ}jtBqiO_l^(ujn!a%w)alsN z!Cpq8(sIBS4<%gqHCP$Z3Qnbu+)x}5MPGjblY6b*+JWE)_8rH0p!7xD70H^N{o3|* zU)Gr%L72e`fL{e#8->*ww`tq>6-gfL75fFD`@vJyKuvH|p5@(Nxar^idH)^&h92lW zArv;@VHlw49Uuaxo&rK0FTl`(Z42%B7?39ZfPJ7o}tvVR;ry>Y1QBgm>~|j-Pn0Vw%G{(03qe!h9nMQAgql@ z44LHdT>WJpiQ(V8EZT`lp#b`y69(WYo+1HWA=5RTjV)jo2AO#I(B9q9>!c0iT^4;P z9yz@o_8Ev8{)h4Cp=32*8~PzK6=N2(SDHaz3N~Xi8WGR*;yO_iuMML1aUTuZU={I5 z%ypyTnIGHfR*gJjExu4BQevD%&k-WwCU)WyCQkpMVib}hJx*afM*o*8V&Unb-gRx^ zroEyiz!FA*T{*5%^kf-TCXkBsqR0htEg}L3Vtrkgjpa^OJ z96FO~^&Lm*;SMSv*eoANB3#_f;0qGsHKOF!(3wicWuHmh+>s^Vl%rt6WF#6~imc;J z-jk$gqE6-?PJ$v)0;OdBq|t?9r4^u2A|)&KqifJYEX7cR0smas9b_?)K#_Riw~tQ>;N;95%oK?54=|=0+0ZA2y!uv05HDOK~<0fyo_S@?|2n z+(oM33T8lHLM3&=kf@o=o1Gj~VxHn4f`=%Dx;5sYMW#;%%QBtNTT4tmnloh%s zX1=FpCS@qSB9MV5*NA3{+*xT#pu0eGhLYs76|(9kP}W z@DS02UvdgwugPGy_(0+9We+wY!;$3%OlNgk=XF9#?Pce7a%YuvOC#juxQ!=!mZv>V zCSdH*|C#5EHdkiKXJ&4u#)O&!{+)ih(t?yBjzphOng3vson}OqrCGw}9sZ}@@c;&- zrIbzSgOZ#d+9uZ}UJKR_;AyBeqK$F}Cvzeq6!2HIIqC6`=wO;Cz!4Ryt!QF)ryexI z8Qca~XnTU z=!sfXc+lPu(hDY{_T+lnsGYFFtZ_k83~D4L>Wjf=qxz_$CZNWQLPOFN z4vpr1D2*`^-yANf$Z5?@L8KdU>5~#$mI7y(W&hKIvL$X}$M*#lZaJs(Okg!S#D&^r zHvWv7ZqKdm>TNd9ugWQ5#-tK_^33tlDC0 z$%-NYrWKg+0<^ZRylHF7uA&KXMlozcYpP>mhUb~yf ziU8_jE+FQnUpk0tHtg$w?Iez^@9LbyHVY>}O;2E~@g6VT&aGms0`kc%z1b}XJ?qA% z=eAa__3B$OU<>k*NOFAJaI`DeTBJ%iZuO)tQ^!mb~fF6{QN>;6LP=I`vTF6yeT4Gcpp_<;b&f+!?{DY&j3{DL10gX>DzNqV3# z<*wHTEALXG5VEL|LIEKx2#&5T+nzAVx~<8+t>(O-EkLhZJ+JbfEXrQ*e5y`m*h0!B zmiK<|CGN~aUU0BYR^fJBryA}RX#X#ia%JN-u@l?fxf(1*(y!E9ZY6_!Vy%!IB>%QUoj@^fE>tf1amP0n1UCJLhO!l1^9siS1ti#1Lt}! z1uK9aD?shatCw1z?mp}}{%*wfMC?t`i@-wWnK1HJa`MvfPZsAuWCaY{@Cr{a_2%f^ zj&gMoMi-`n^O^}N&xDUvV!2A>(t0JPmMWF8VMScQ5mN*Xq};nc@u)T%*Pdml94Hf9 zE;sCNA|yfu0I(N}?lA1Y4se6%GIKI305i+3G#hgwh%Ogv0W%_cGjcmHs)`W$Nph$K%kWTG%XU-D!oOL1_rC)e=Eg0c*gGC|7~ zKQJ^SrF&4lG1YXS*b01q~xC9FNfK@0%%LNRMeCui5N5HN^=AIK?1um1W&Ub zJ8(F+E;w5==8A&o?tmUkFdE0M9ji3!nlk~%LOP=}0$kV0dcLlYK*$)?r5bH@Sm z!x?D5uFc{4^3U??2vjNJ3N9JWwF!{)|LQR`*R(PVGD{cs>S{qO7=ks!^h|%VH#cxj zR{$1UHaYwB>i$Adum3Yp^Wv+HglRHabne+w&oezU^5jj+h{0$--(x(Ya9Ot%SGUhs z4|G^t>uiUzS%Y;cmku)ch7W&DL@TcJ>7CZPrM_AuE?zK|QuI4xWiaYBr`lQal%F=| zF;H{E>2h~vKlAKTup9^S9Dj3m`>|#3bY|Z#OmnjpyEg(TWQ8SZTlVT{qjqXTHBK}K zpE@S9UUh80wm;jgwBDW+L`5h2Hf>AqSnu{eD)>S#ba1mW{P|vSo3GDiDwe&>%{F(? zLif?8Drg0hmsP4!LoyO+GXm@Mijy${Z}Vm&@C_)yBgpvwQt$(Bw&^(#fjqquWAxqtc;5{4pJoq3LVyBqYn z_cpvQ)Vw?8Hl8ZI;yYd&tVOWzhdcZ%Z?2S$(`cPvxBUG28h9PC0T5%rH}>)YV}sKM zv#W4`1Z2I|I{?>rJ=d22f3E6-Oh7mjJ+p_Pr8@ex!!wwbCCGQX$g4IHg2BkspOs^I z%8TsqR)WmOyx;%5yQ@09-~2+%QNC9++PA%_*7vMuWWSGDvmI&Q0^`pQd^oo1)%1*T zLjN2$NV95=9oBEX*N;Bfi#@iHh?#kkKrA9VegC^(@$c{p6 zKHhXs_yDn9UOwYby$vwq;HpIEi@y7FK?PM#Pp{M7Iv#E23@RUBmz<3)^w03jM!DiugnqsWNK*osxkRV!7xa`_TwOjHC$5~ic=q)5lUUGTz=#4ZdekRUqe_=1b^26jRAW+)O|@DKD>AKBw=PrG%>V1A zC6~ZDGaC(UTD8y0GHd(xY+Pt_>8_=AcUs=M)%fnk`?s&Pz$; zvE#_vx>crp8M8NJkOmeK^CO3e5gkZ~&Hw`S2GXiqsHR{cLJlyqX@8!e0XOUjw|PH^ zEjo#d3lNC+-tD`%aplOFFKGVU`RySkoA5{-!#ai+I9k6JO-2V16(p31AN_*M3hCO5 z9~sBZhHl;4=?gExK|+50`}K2h5t;`3N*j>{W@9X}2bfWZbAT%d#?!~?B03d^%l zLJULL(1Q+>OO8I*PCLz?68Yh%vlI(bh((HA)aWD_MVe8@NUo5u3qol0Wd974M*5MZ zAX}1&NScb=Nv9-tX=*5@oO}|hD5Zpws;jKz3d^s&+H452;5y7Ox3-iE%rFy!49&^Z zOp{GB%RJLf#xj#^O*-ei448umxp9c`U_0-+5!pK~w%WR&P0-~)lp(xC6@~6m`96g7 z!$2El4~QpFz%xL9J#OHW^v@YKrI5c?9mvnW2uW4WJOmYd&_MY`y~_L#oDaX&Vl3n7yn^6(zUkF>-oUwJq~0<2Jy_wx1va>0 z#u4{7;e`usd~!S^3PfW-DXzHYqP-KtP%uIhdgO^iuJme2k;`EaIILzG<=BlIZ;LYy z6+&~0LFLW_CN#Bub8Xvm@6@5|qt54_UtK=d0cG{Gz*-Z8{r|xZpPpK3?5kGWiP1xT ze)HkB4y5by&Hp;=^u@-O{UFg!d+oO04%Z;K=dPRX{PEtO@Ba6G7l3{P+yDh+A z;9U(lxgr+Rq|+mb=?pj4G0ne0eMb(w~AeZ zns_}cQq5DO1I`mgG`{nB4Snic-`Ls*w-M$@enUE&{qU#1HuCR(d5a?)1L!{j67XJ? z$VvhOCcz0VaB+?c61gx2Ms0CcMgJ5Q1!xdD(VkuQao>+3=k3!W!;ubVGz<%@_&4(}`1eM;(NTcxx*IsdxyL)&NRNC} zARrCdM?!X`o$r*VARl-an_v!O9ps$x2vy0oozZ*5dQBz?s!H_KVKmI4&KGD#D@?w! zmc8t!>3ZkNPg%$YvIL>W{&^!@E-?YG0^jzc7aCv+bC_5x<}s5gqGPJCna-3cG^069 zYOWDcY!i|xX+)&BY4e*w9p_NVSx%!K)tpI{7yl%5K#2lEZk_COrymPhRcXxAs`RW= zP$1GtU*=Ad8%^csGM2g1E$XZVy_h3EBn7ipY6jnHq{?{5$w)f1q(0mY%<5x-9cXl; z@VkP~J_=G;#fn$Qi6Zr)qtcab?}mOlUSClYQ<{RUrqZ2yjfJD znii>cOl=&o#8jsi4z}uq>QrePRjJ+pBHa*5w;lw<6F+ov5uOpCIU0s8-is7fQhAqH&bLlgrDfR~%I4SIi`Pi2}GP3Z6 z+A2X{*~?-!TA9@>896%=H8gTI+2rQ91piW6)E;=X31;mb=jB?Z%67K3<=2C8`!AGK zn5r4xmnt|+;7O!nk*1Msi16yU-L#-K01{CNEw;LZPHMTuaV}#QM4pl?)TJ+8&t0|n zPadL}vVa{90}Sh`kdpVv%=xGgX^|yDfRGJ#N4gu6-IRE_J&8u_{rs}L5k!g zFHcxVgYwc&GvxqDnWZB}CWS_o<^L_~cfY{4sb{fCqmlqxjb%PFn(Me`XWQA$Zf-Epz|a3MnKW?4-6Lrk%9mjWegVQ+g;dRIhrb%)5of?A#WQPMsm7SM#x<2S4&dwg z`qy0!wy=lY(Q4UFBFPr*vYFlTXRjG>jG*?Ntxa>ABW!Ma{?{k;oYimt9Oyy!`Omj0 zEn5`2d$&XPu@qZf9Nj?;n;s~PE2U``fih}5(4c8@XizFpI(F=qcYOb?;t+oC;J?*6 z6au>)K9wM&@S#tU#cKe&OaFZ0yZYtyP$~e9ZhYgul zer8rV%V$w}%D)`uPep_eECJ!0cY92n={dM>j*vaZbLf3m#^D>`Ut)h6)M)phKR1m} zONUGk0$F71ygJfE3AAR0rc_5Ww)KG@AHG3M6n&_3n?nUL>sBKk63RpOy5Ak|dbfD> zI1c!v^K$UtzmVZspLjy^mwg%p^GP0G#-c0Rjop-dv+0ld)XH}A>GXWfqsrS4gC21d z;S~@l^@L9K-r^&GL;xY85FkPQI)%P?EYZrU_SoPBZjU*jP7nHIxb})uP6N_h3DYcT zp(4tOT*i1-g=TiB*8e)q?cM_qy5OTE?%`+<@0w@(YOMQqknBo{Phy7r5Kjp6i_6mQ zM$pLEt{__8&;8&}{yxFC_MjqMjtUp7NvhuH3%5e_ zI!vbM%~z0Uy2OgPoF~5GkbJgIWkd%9PRt56jRvO953vS?w8w@#j(1?Gp-RehnvVJ& zsgYoCuvRVN_QTa|F#Al3@A}TxCd>OiO}>7RS5zPfhtT{AO!2OT(~xjRj;#rq&Q5GD(f*)~0PAG~VUY`W(epqI5>`Rmv@KtBP7H&Qk0^o>*l>d)Dhu9l>159t zF%bB0?|sIO4F4LC5I*e;P$V0*NXDiydNN7Um~r{646Xi9u9ndZ48akNj1njD9be6Q za_|5;;1f@66VIy?J+bgigd3k}6p1kLLZW>b4;ih1w5ll~#ccjsk*Kn<4uV7%^sH~< zj}|d6&Z;68ZIS;v5)HlN4Ds(M%+MowF@vCv8L^9@N>J^xCJ}qA+^7y5M^IB@pd+qs z5wQ_sgu?^B5xQo;1S6?O_RSpIE;rOlgkWZb*ijwGAiS0*9L3fgj9K1*>*@`KhH?*H(=}7llu$AnbqDsmQI(W(-(c|i;4vyC?gFh+ z{5ml!e@yJ|5iG;92#t`@*bgDqazQEpcBGR!5dc?qpgJo6JEJo@xwAX7a~Re^Jjatf z%hNp1^F7iNJ=b$R2k#G1aNS=F=lX0t*{cKMfOdDgqFQZ#Feh(-^MTtggdr!qSNl$`7}yT%k25EJY`4ldLX@Zb?TR1R)HL`8r^wX;Nz;0QeQ zL;qK_LtE5ES=11aU_|2sMM?BD<)9fU>dK-A>-LQ)We_TBa4LuNH}g(7qtA&laR-Tp z5xo*Sl2a7XZ#k2&mCka^mXHZ6Knu3>JFAmBV}VO6KoqDm5r!cVy0c7Up*qu4I1@ynz5Cw5)MAa*pe1paw z)jKD(OQo|+)e}=QRZ~rcQ#;j7H`P-?HB{5n4wTC`#>!|INiuEcJsM6(eX~d*E@+Ol z%6PEU7=_Uyb334v6r)ruOEE@zR1FRiOW#jS!?Z-7%Q>nOOqmrs6Cn|H09qf_5C24A z2Qo@Kf58u~l~{VgTfY@t!&O{+;V+H>T$9Q#({(PzlP}w>-0n0Z@l*@>^cbfw3-_~d z^fXZS)iDS+emSoidOkc7$zTgc^^Iw_LRf9BEV|Ljr zC-dH_CZQ76VF{K@6;>1)cRLrhaT}Lm%amec;cJ~E5x}%svz2Ri;0#Csbo(oG)sk!$ zLJ`t{4onS4V~}-Mw^ZZ94*bAmyY3uoXl04bWnUFmiL^+GUVbxMgAMnGvH-A`(@;1?>_YCD%Y!PIgsHG3D}uaeactTlZ9YFn?i0W?8v z*Oz_Swtd~V58^<6<6u?uz<%%dMf3MV8y80Lw|`ky12*&(-qbxgk`Yp(UO^IHtxeGQ zwr?GHsSpCkbPZK4^=q{hVHMT}w3I{<)_U8NVJnt#xs-Ah_JfNgSs z0fu8(hG&?D`M`$f0Ecr}hu46Id)SAI00vNU9n0!>RPYMyV0O_FRaEwNRTU3fRc3iN zR()5B*h@(R&t`8GS9Ladmsfdb%4f?@Y@jz=q+oi>sCp+>gpbt*)O1;6p=%d+YN-<& zvQrey^lGzKOULvOI>C?s7!(2-kOi5LH9>vR_KzJw1x&P3DOGBt(~V6uOexj|CYf8~ zu4K!zfC0BG=C*)DqRke#7)BvKLs?!S_?&j~g+eV*vegf?Kn@guX@`MvyBC%pnU)uJ zjYUC06hKY6fjTReIUIL|!PI=&;D&d&48nk!y}+5V0Gg*jn*XC=Jq3Uvt=z@Rva&-w1Cm|%fdd)7_9-r1dnH)n^lHM-b~?-`8oS&U1Lk%~1@ zfpsC%7<<|HdLbD>HrQ#g6Mgl#U};&8H@A-qS)wPJkP%rDF8T%@xjUOfYq!@@wbh}c zGovInnB({e)D>8J*FY9-C|{k?RCkK(Ft5m;YwZcuC5h_4$j!c%LbPj2|K` zZ^~@;SPO2ssH+t|rk0muSxwDX1TNNk{lKXAm{?+Wv&EEur8A-_nzRAgqA?nrxxo|~ zyKx=c5K1%_R$HLmc%dWtk%5_t$Mgn5d4M+=!gTSIIl&55n!{XrrmF-DZCVu^5JL01 zK*cnJYq^aTdOL$Ug9}=5ua&21ySm*thDa0}!t`o`fUL{=1kM}1(_6jQo4vpKGDpP) z)|x~m>Nh|rHlM)bWl=4*(cSvJc{m35Rl zy=plf)I*(ERJ_GY{i0u7pgH?kPuRzKd7yW^$9Wu&Bb%0moLrUM$obNY3hgLJ6Pp$k z*dv`GF$b{)uu7(!t9l~$oV*s7f_!|8um5M|qnFl~lNMo3fkaif&53!AWx2IOIG00o zb(-dN7o@R6d;y>Utk+%6>)hST+YHXj4-rJS&qGx<=!2LTpn3$*cXzJ&8{o+cz?GQb z6MX_k>8k?8(FHrgC7rOvQvEDlBH}#_rPLxe-P1F^0qj6JMcw0P$PrfD#Y_FPF*?<0 z{AyVpw43(RmpW^o%S3@0T9r0F)R##X)D&X-Lr-@cdtOFmXq|)JIytL9PIZ+1>2Ry4}-0tnqL; z^gO_|xa~4d4*K2S|D6B?e&8`slm9&D;$X@5yr=?w6kr*g@E_h+aaYnmqj^iy*S!X+ zhoSLhO0i>+Z4H^ULw@8>UGq&{_!|)s)L=TR~V@1h{i^X>{gE zz!7d;ju^yXyA*xDK5?=87jmCqqZ;_N6so;@K*+(I zHZq9_rKOcDTbhL91|~)&8vkM5xhZ8u#X2{6?&--h4IMh7)N#x6X5vqufCv>D1eN8h zsjzB+>U8y#RjpgIV*Ls>tXEBB$BNa|)sfmmBP#3|Hq4Kf1_cwv{k6j_TsajmRj6Rc zZ>+x+M8*9phpz~@e*P8&oT$QxLz-ZCARF9HRfNt-@R`d8{5Zyk>& z97_vo*Pd&W{wyH(Xx+Pc_x4?zAZ^^XXPef59E0-Gs6Rsh{hPRFgrJ`}$Ah{~m_?`Se-Tr+>yn!x0mxs}q%+avKQPUx5}-fS`f> zjl+!v2^3&JgaiyQVgCRYN{Hcu8g9s8hZjP~0A1=dR-A4f2$bM+DN?84KnD?spmP%} zV#+U-k|xKkw@qh*4UReLD1R#GhoT6*uDIe9d}KGrvduF4Y_nyAfkhTLQc&a?>CKAm007h92&#FTN0pNTRE` zv50KJ?-ER`iVD+MV+%6e$WOyD#7k_vG+Zo0xj$^IaS%>s1f@tyj{GGfQ9(r!5KsJc zQp=6hG^m&)Z5fMET9$ls&MCQR7MeZ3$urPF%Xtc%f*j4(pVckRG-#eG-87zm{GrDe zS&RWk7A`=>nxdR)p_&cY$W>Tgr=E7G0%I96osvWe^N-iyT-(kz`6q%RTEqtDf3;KNtDV#2V5y)HP z!NPLvWdB19-e9l4753Yq1N_oXAproR>Z-y7``%))$UbMn#1$J4@rn$iXz}vRC$xkV zK2ASm^&w|ThQ}=D;RXkQ$+94rK#NE#O$9S^(uH z=d8+2YYLZJkLEV_M9y{YTT~2P=uY$^3zRNE`$?UGj;BR>-GBnIyPX-&C_4ZYWJJ8{ zo&Su02S<1D%6Sw^9`h*XuHrpUkH_6%GMV{J*^;#D`IXWbwng8ky=c=mJ_3h&wNgCpH;M?xL%OO2yBrA z(8HMVzUVHo-4a1%Od}c}`k~#qaiVYx=)w}#v^{pwj_exH9SN$@kb2R0O@bL4zW)>x zrdY|4fgFMpqSrc#EK+(eMZ(n{DL#@ZNeNXzn$JGD$yN$AsGkhl`$S1fRKiazTd|5T z(z42;DoTgUp`k8Gq_((?!Z!> z&7EBJp#hJ7Gi#8LutK2|pHr_l4VYZ4u*7)+Qy17-lE{K3~#xtHvtS9E6h}qBe zbF<^RkM9OLgP3ycqZxC-u-c_iJ5XUqACQ3Ua0gK|N)%71%BV#x+C{?#v|Jq#PkSrrLwdqj0ge8|PNv45X3s<`G)Bp2@cf8+i zs(H_QmF^ymL8YXm`O<`5^G21tl2DYb5@EteP)dd)>=jwLH4P-9}*5`XL4Vw&a1FBx$O_za{16!rZ?^Yl(a+sH&=UaIpq;&)OvPgf6W+*I|xrAIP>plik(}F={jkKCjhyqFNz6=bd z%*s>$4Cp{RRnUYkw4vksQ>68Qy{AMU`kFf5Lef9;~Zy}$Nc;;XG1q+;izkm7$YqnQOh7x80pDTF86G$9JPu{hj<>1 zqYg?7=7@CEA;2~BnGKd^>hTC{9MLJxmH%b_0*@rV-| z(QBC+qbup?^nqf&6<0K^iCLEpqSGxQ?zD(aKAR(eTG=VzL8gPqsZY85HWT!q{792Q z|Gq|5kO2(ah#Bj?^&G?)JaEo`#j0KUVH_!bj$x03<^TR2EIWVo?0M4DviGDm zx2_%JumgEML(q0)+^sqmfhJw!5chV;U2Z6!o1%Eq@*8KwM|Z>fL?=q#an0543~a0+ zRv2#DFJO2hzqAX7RPIbJh7rdH5@*{3i4^RRaF}qSEE4a$=g)g+G^xeC45AbJ+*H(6 zwztt_c~qs*IaqJ@iZlooHO^53a*%6?xJy+4vc7K zutC_~Bernw|7i4ECr|^eivK@;{P#(CtPLrZS$6RCB=BiRXzSz zU2|r6InV)5(p;7IC6||Zjo@dWXM!jAXOb35j=*t6#U`njR7;f=_5~@8au+`2V#o3p zl(Kz{g$IGrDuxhjVdHz-u^!ff4SEnCJXQ$dW(Y!2d?0sx%hwzewR|onFj-iIZghRX zA{|fz8k!?d=2r%uU{BkDhLyE-Yj}R^*M4vKIUHDI5GXuY_9EeiJ#g255A}a^cXy8w zJQ1j6x(6`==S2v@TfB!l_?Abf19=trh!?1M8ptFZ$OYGfiBy1jIpuID=!t{&aP-wm zE$D*Y^%bV5dZy?~R8>_WrZiqSM4VBCZD<~WF#k$Q$bR=^i(YbP+#m{917N!sS`JeO zS7T>L@MXd{MtbB@ZJ>W+Pdv?MRpLD28K5nDw}I08&7$zaBtkXJ>~lVs{0Chat!T z0vTHcX^;Y#kQ8&78l`{|V{Zh=0~*OihnGheX*C%sl}?h8(G`MQ@Qj1BiNgm8-!*ZF zMw_*1o1zF*fijb&_>wUB2fL|WQzM0z*#J`ohW7Uyj^TURQXY5klest^=Qjwi)0JIf z6xYd=Hl>|Sd79kmo!{x5&`1VdIiA}?37jw{WkM$Ed7kaLp6-dBCq@dD69q3tFShV( z=y#n}qIKlxpW~<4uP5bWoWIMhD_NFMnm8AFwYv<0u}6eW0@IsVN5k=mXI+jU8bHIBI&JlwP?>lf!CyD(Fcx znLcX)oJP8WM;e1fwP?@ENlJxOP->5Sz>~FTrB&3Wflx(Q`Z|V4rsLWKWon}5iUVp| zS}eM60C$ZbNhsxcjN~dkn|L16vT&2YAi98{(LybRFiM1KsE2Bg;-Rm@z^LW;p9w0d z3p%OhF&}wwso=qA?+(xcaWx=&P!TCe%8uJnOTRFh@~%So$L z6p%oR&XTQ{5N=B-MR0H}xcL8LSqhbDimvq9ovYQZb|$an8LOFA>ws6~`X)CXcRIYzya6AA4WB{*i#z;BqK1BnyKkKtk zo2R<@p+N~#GMQ%WyFy*${i-Ftw zwxuhc-wVFsE5754x~OXuc99#dnpqil`OHmSehyj9jtID|(=@6gGIx7o{1>yqUOTHb#H*rZQ;yDjX(r{|Kr%YsCjw4tQ5a59rHtfaU*ddRE1K$*2- zm!)h#y;sT|_F=t83Ad~pw;oKIP+Y!KOvNoK2j{!BP_eJB zu=@+0kE*}T%N~1x4W`qWDe{M@wR=}%z#D6!m&>M+2MDx^q7bQwrb=8EoUSa}xPi#Q zK4+7(5o=nwL9AFB0z?QvtAakv!Z6&zz^anGDTB&t!_11q?$xx?%B021v&|ZBsmRQ!xp+`ayWaF=Bs(zV6+I|p3syzpy&X574G zSij7iziXU1keavpTgwiLxuVM;)2E$xoWKaIz*AOb6ID8B^+10c1C>)eQetQ;mA zk>`5I2$jgpG=(}OewM}=eDXoB$3naG&mL3=MH?tjo5Po6&@oKImUJ{|fN@Lv$v4ci zru?*~e5^#9%E=3{^3h{i8olBY%Lx&etuvNxj3{QC%fH;g+WW+K+RMJI(r#<7?V%K- z#)=;#)x~ceOs6eYq(Vl)Ct#i)#y)k{dcNV)aJ=&xlJ(y<--V%BA5(zl%uR1+B@H?N;T&O`ksfx;{MQzkt zhqw4^%)qU`MF0p*%`4uhsUvclCQ#MijKKVt)f;+m6q%5(3dk00jN8rKW&5^?L}kBV z7rr8dsYXDRQaThwU^nPhh0#@K<4vOSKjT(+jo*QNioA=^h^?CuowUZwv=*n> zFP#6;tIWy}ySK2tR}BuoF~(#kMlTa?6osrKq0Pafo#9I%l&G!RqRra)@!DD%+-;ad zwvF4lokR|ssAuTg!fn(ke$-2C+?N`d^O1=Avx0{5{zI{kubj z*o+h!d4&z0whH1&$B_89$J>&hF(=^_kHm(AL_&dR&<9Gm?4)feUE@i5|>a4Ct3=IE( zxmAZ7I;wSdBN~ul?*z%L27Xx>L^P+>xKfn6M%S=0?C3;dS#CEOv^JCmDK`M5=~d>_ zp3r$E;mxBfuL7uTn8*OZ2PES6Oq1azYUSR1zG#*s^_0qyja-_)+{;uW-G zuI2!K(ZPwy;ZD(*%=6Kz=XQ?tN6+WiDR7_-=<^QsQ;9P9m+yz}y*lzAE|vcRcaRX! z2=D>#u)}bjn4Z+~l)to%@C(oBYTxM&FMkkUv3G|DPS}-DpobWb_pP4smpgz)4)Px> z03t6|BqZL#UMT=VYM>Ge!|*hwa41LcOMNA3Cl5jAEkrL5HbT^D5riq1QusGOaX2sL zqp#l?Cuv2`^AJs}!%F%b5C=jZgQze24lUsf?%=oI=eDBz5DwwLxVql`=TR^GH>C(q zSp30n^}~PR72bfvc*P_R_UPM_Vs8|qAmhQ^@X<-e$XxJipXmv|)DLgnfoD;MK=-T_ zkb0l@>n~-XG0qKqm{DVe3O7KV0vYn7pg(a;oZ z0vc54P@F0wRO$JXWm1+oojMh@&d{9`FbF{jvEf1xGjR0i@uQBLoftEaoLz!eZ4)PL zt-PfIH%nZ(b*0$df;TVTN0_Giox7K=-YbF$AC_WAU93cY{Os7_IFZ<4iu(G6Gzk)B z%~QpS{S-#@Xws$U@Ht(kv=`PzdK4LiZJ3ePtXG>(om=-B-n~`-2JTyUZQ@gC(^fQ2 zAqcGyEM({~;rVptCQvljif5eEuVTD^kdT1D_yXn04?r(oefs|Z0NTH2AHTr?i3I5* z5RhNLeheN;TzFu<{SI(W1Ly7=aDfC7TrfX>9@N0Gf*z|NmSMQ5NFjzef=HrYJ~RcQ zf(}Y(p@cYmXc!JB+Hj*5U4+r2m})AiDI0J6<0VaYZP|89nE>YTcQxrMbT*IQ#NGFw)QcEwzR8dJ`Q%;V$ zbZCLP5y~uVPU}d(LA!BGA*4Gsz%%c>_-2KVR$B#B$G-mt`K#_g1`8BHL12X?w%A`8 ze9#{oBs`Iu9V+nSmkLd6sKaqk6o^9-nN`su5)10~TNh1)(ZwS~nh_=>jbyjoAJeTV z-W_cm(kFL|N-`x5<|9(dmY%G!i7EzGs$i3%(gKPU0>KK{QLobiOf%J7vtl^iN;c3Mw`H7mvwdr z727VkMEt643?j&G$QHkEe~C<51?-^c*$ERl$i)9JS5P5LEf#0IamOEbd{B{Up}g|R zFULG{%{T8+U7+X|JUd5I;m&vehlT@WWiAW$a|zqvB>QB`m(U_mO?q zxMRRR207&7nYh6~4;3*2ODmtZIeHdu;5p|juk`uSPpS{Dd+&W7?t7tsCi)lMUtq55 zRDU<*={CSvdTOe%p1fZ8lhzhAKq3MEosS4&=xY z42K6f1Pc$^YREwvSP(;CCX+7Ou;M?n)O7eMGva2t{;3 z!cj^nKp&OafrkGIR1gIvL?RBch^o^a@Ra|!MD`KJP98g66n~clGIZzMSpMd>h-#2&UtvGt`j`W58qk)bR-KbODbXSP%UK zIY{^%C|CO1-+#R3Kd$))Km~+k1J-t*a^VMCh`a~_#8pX+EDmBP1f}Cph&msE1cx-l zS1NTf9zZl0Dn#Tfeb~B6K^x~?bfSca9W{YY>qiRO+C`N_Po!!IYapXxx zHllM1ZluEFL#a%XCUbG08H!9!nGzKONtUgArBHfgOQ%GsUo55RN+LiuY;uGG z1oM~a5X4C74D3ud2+L!xb2J=SlT^%n!ZW2BJj>h(PsF2UR-t-ZV}ZmjuS{ zp(YI9tid?Ts+&bD0vOnMMO)=MSGv{}HFw1;J=2-SD3mXs90P1WTkuc9&LyC5V!%Na zdp}trjiHfTs(&Qfpe+&AvK38MHe&Hh1a=mI*onnvu>+l6c@0&uI9&2PyIR(IHnk@# zX>65=Qk>`r6rU3kEOQ#unUenwra3gpO?SJ?D&UG6KFt77n`F_hiRx5IMIWf7`&246 z(=FDWYIf;V)qGYJtJ>A-R>ey(l^p?`U`6W*zNl7me)VQ-rE4+ZYFGN!H?Qp!4u0QB zGe{H-k8$jpKHoRsfy&{qA4{x37n@MV77_xFv!q>_swK;Gq zh(!$1$&jUrBZZtuWxL`Pf5vmaMP*BSTT7tC&`2{Tu2Zn#lPVmc3QAC%6_mS68r-L+ z`s2WmO;TiGiA@8^dzfHdP~xVJ398t|@^!&GvN}ESiQk=On8T~4@^-0<@)WB$6ZISS zjt0JSWd>ZSabG&?*Uta@-D?{=Mc_L&2MBxybY2b&mjo*qp@()bgc~Vh$69!(mu0T7 zFzhT)j&Q6WE(=7NO5$9Z#RVu%v5Hwd>SVgOb1#@@nrTWzJdf*CML~okOnyE9h z_sw#JhMebmXFTUwuWO!Ja(?mGefl}j0v5D7JO#%>FSx-B#y5JPqiD*uK+_b3w4(!# ztOsRy!yNW>wKcu)P0N7FXMrt?Nl0pnk20mL?(M2yxbc<#P)Q=doe@QT#F3M{J33LeG~us*W$Iqu{+y3F6lvc1XpgstR*pZO+oIB&jVwC7(VM5C zE2Cj(J=Y_*rHef^n!SBG15kOgr~<4EBDng-zk<1wBIdq2`T zyMdFw>1#WGdpm}UyNIJZ!dtuUTb%EM9Lmsw@&iKh3&I?OzT<0vBm*jdqmIv$LF=2a z=~_NCk;2_kxhlNBD_ptmdNxB~nf>FxEmJrcr zct8k@K&N{;C_^eAR5BqP!c)vK53s1O`=3c6EX2hu%slV%!Y>5F%^SliYeH<}rW}Zv0!%(tpyHT;DJ_N)+bRPjD#6k=~5!8W1Or%G2L_mN?ag0Yt?8aI|m9GhqN`yg7 z1U*G6!xcy>OXNhyIThD>oYm?NQ4}dsG(}WYu7@l|!BZY1G{T)i8t1x4ew@XPEVkuWkaVW})4cwxx$LUKVLXe-QUYU)fMnEzY+FOs$i|v90;f?%YQ)BC+)0~suWtVX zN&}og0NcZX3bfu6L^C)?bR?unG`@a=w|5-JapW^TSt@;m!C3UiQ%Rf>6G%`a$mR$| zh0Mq%v_6Jx$e%s#vf%*3Y32+FVe%+Qp{Gb7FO@}}6} z20ZyjR}`M#`n1pcqzq#$&CxW?{`AlP)X(!m%{=&zCYw48%*}mDO4*#v27?ZE zTr}5|%6FTp-uz9r3r_GP&QKalviwf)WKLq_0q1mq9~eTQ=n5z}DocAs>-;+GB*Kd1 zPSe`TZydX1OqdZPPxw4f^K8$G$v^#jPs4OM#Wd1fOU-C%nU`VGC3UtZ+D`zL(kTs4 zD!q~cC4tIvw|~;js9DfJ`oMIgA8Uh9sD#AB%*_i$xD2(?fMhMDQmV%*&f@gJ7?8^* z@gwQV$m&G91cA|A5~{G#QstykmJCsq?91}xQNd(X>2%Z{U6U@`!kPag(n|Hw^Rdwl zfXOO#Lz)y#D~(c6<9;=Rp)#B(i8Zj0A<%~^QmS$F?cHzY_->fpmJ)xf0m zKz&6t_W0K_z0HB$yjpeIB|2E+?5uxmA!1zIfDHy!LSO(Fk zjY?ZZRDcHXF|w5)!?QgP<=XjUs%gDelTF#ZUD<6V%))$Gm_<)pT)9jWr*xf0omJe$ z?b-j_MjUXgehM&oBw9@1K;Apr2Cbi?lGR81SAbO_0;}8%OhGEp)le)xX93w74OtM4 z)({oWyfjoBWlj~n0_R*;pg&p3-5Qs(Im0j9>UDsvZ)~!)8Aq*T?-eASg1y*1NcHjs8)nUzB?7iMg zt-qF~Uh5rP4MtfJt+L{ag)<;kW(?mFW?U22&pl8M_FF*#W?9&bP0dZm5$xQd0)dm1 zP^i6C3e?9Vl{NnLUjqJL0bb5~d`0+6zQ1G|NA1Kpb>e~CJ}ADsiZiM6o!|<_;`_s3 zz~$h;-C(^<)Z5cE5I%qsP?!PzB zu*2)n{-xw0E@VPhW*PjHWL{)OcI0KAWN3C~k+tL<#b!*V;BDq+QT9mb4UBH};!q}M z!WCl?1rualf#)F&R_-EKc4ur;71|rg0)x!~W8b9J=UiUL8kVro6)<2HXh*~_jFjYQ zE@Eo#M^ zW`-`6!eF$eu4ZITWQ$hl0Ji3l&1jA0>WWutsYr42zUDV(I|8 z(eU1wwi4TI=%=P@6_x9%$m_l4>#D#$)Pgx3F06a*z09>^`tfNoD7`!en;RBboBm^> z?$Uye=E`nr%f4)u^dtDOCM1^V&j#&E#_EW^XfiBqbN1@A25a*KXOTARa)xcy_JOs= zv=(OR+W1}(8#uPz&VVZr>FR5E&47pj>`0yETg~Y^Fl?Si?7~=V$kIT{4d&@~gX*4T z%Es)>*6wGv-9ED9s~&Bu7VYsqZBPFzZ`A(k)J|{NcDaxS>DL}-_FipUGz;2JX3~O` zHk8t@;q8p=ZzHre6TSjmIvm~^FzJ@ZfZbuo^gHl*MDT&$<2=D`rQU9Y z=I+hT)9)7V5C`!Q-)g%(?~|Ty^-ghJbZ_;vVr@O^6_;(Wt?kmf?RO5)0s8M9=W&1a zfZ*0o9baLpOH}&Z;lp}FI!4+CcV7tiXa2~GS*~#2jM`j1>POCQsix`;XXftaY&8LI z5hro-KJl(La}`JN^_KDL)xxrd?=_ciHCJyMm&GXC3bn#~=kRjROAoiBk5BheFLl!{@nLIs^k#Vjc6WI9 z>NdZsc{hY#y!S7*_geqV_tEBeY}=uL2YUbh0U)31Is4koG4J zn>fy4NxyiEzh92e^p4;1O>YYd)OU~<^OH|`c3=5fUv-#Ibvkg;k8y9czj?Sty`!gQZ7hoi*FJB@bPXkkWBlo?0j&_KDRR-XaH<);gt@^ooNsxPt$VxY>ps_eU+;6Tl4~Mx zKA<)hXYF#rpSRppduI&%1cxf|uu(i#o_lJLt5CCiQiiI%)6wn*FC1dOKSb)d? z!bJ`{ii%OaA3j8mJM1` zVMu1omYNG=-o&YMr<9&ge)gQ7j0v>l@WLDTe@)X%EhZ=FApF;`1bW1Sa6WSg#Qjcta!2E z#*T|JCIWfrO~B1vkCI@qB_ zuDbHttFOA+5bO@a5=(3a$0D1ovdc2ttOCz63$3%$B3taSRa<(UH( zxoIDqaiTk?NN(+L5}xDvXTqy~0{Mn}4Z(r0zWe4|hoKliNL2>{2Rtyr21}ZuZ3uIV z@Sc*#jEaQ-Auxi?>Le5)j$SQ+O>&hPIaIuOqqfj$h zkY=kbd3nCX^PTSk{d3Sh4~-{!IwQ@kw~?YaZn@^Rsji$Cy);3)iI52|OoH`EMQ;CG z6L_z^Vh>8jzXhi!klF_)OrgSI#TM1Jb5n2uzz#nwao&|uJXXvV&rEa2FpK;$;Vm1U zc#y5C<|7>=&x&~AE*~y=;+G3<^Hn*ENjK-7ciywsqZdsy=~~~7^XQ?2Zuim}*j<&= z>2?&g)KXV{HS3|fl!0b|e&Kawz<1r#*=S3Oe8GN00XJf?x4!n?H$z-~#C=cvxAxly zj`84?W6pB;l*^c^$&fql8u{Y0Z~6M;e@`>@7fN6E=l6pyde*1&-#@(IZtiqxi`451 z#sG3X&33m#mw0rCw0M=RU;7Fhy;3H;#+@bAjIDTcUMIJt*&7JGh*jpcei$> zDRmG!AOaVtz^SQAe<%6Y{UFG%FS=oDf3o19FepV41~DtVc?uH;*ft=VFperrAstu9 zz4+m=g)p34`xHmA^o5&a@2)RQ(GjclAV;?=GqCG%Dk=ZO8NrKl+ZWjN~o2@kDf~Lus zhduF=Q#3#VEiy{nl`>4G!xpTFXU_x05|h%)%Ym{|L|Nu?em~r0_JS!)gZ}cMETm=~ zkI7KUC9^ew6eKyTSy6LB^Lwn^C`a!pJU)Vym;)84CNmjON!m}H=?SGMtB6H80nL=9 z>)jnINWoQ-O(8da_b z)uLK$>ZdMsQj&s|s2bGgC8Jl?N={OxV`6E!-kC1_y+o#P4Vt`O^Ga2M5u1A5>jYtm z6thaDmP`!cSi@@AqawDTz+7xX70OhnDmAi%jH)4~I?<|LbgTcF-K=K0`pBS2uw-ozr9v5s{vgB$GKLQ*#&msKWK zxBJwGa@WD_c2 z^V#l*+hKx!ILOIWE|DP%T?{{Xhm?Jsbt!7$3_m%_Mo#~Xki|RX1OIq!13YbdeT>%i zqWGmMjt`4n{9>VPU?zE`aefWe6S*O=YDz4uE)9I;J2zCMQ`Yd2lN`P!Kl8dwb|jR$ zdtpN(`N5yHGM=-SWwPS$Xe6Gr)xZqf6ptCcMKkl6@gxNq<2I)^9TdOZ`~vXMSIz@| zW1X=)-bQ;k&wSRiHTzuXjYT-n*Uhe!dwuIecM74e=J28)9pY6(I@!>(w0qs_R*!62 zo^htDE1gN|J(zmOw(HkCik>_1+ zy3m=!URL&q0qVAac+$iN3p2W&4egz@)r5WEdFYYKNsA_CNO9xnZMyioWtum?ga=QCy6{e+Gy@Hnx*N{QOm^P zsGBp(TTSsQUmV=z&bXgP-t@%)nyT#%`L9X7agLcB<+N7t#lq|Ll{4(v{f@b!OcF&( z#m?sS*+|Y2E^UPa4FtC(+nU!bbk^;==tti>kh7k2rfdA%PftwLhw{g%U)Ab^-|pY> zh*+>soZ@1qG{0jWa5bS#?F`@m6wY-!ox{CoD8Q{l9%A~_m!8dc;Hke`Zgi#d-S4oz z_tLj6_}kCy@HQ-b)#L5-#VZ==NavvJ>k|L*u_Jqv%x?DDn`d*-haScZL4E3}uX=y| zwetCXJ@;W>{MpkU@VI|^y4A<~-UA=~zYl+NWAAk$gLjscpZ~rO5-ioVmicnBD+lxs z0O<1^&tO1Jke;aFRhwmB>uuk^NnH22U*oYK*NI!;rJpZ#+=PMLyD8q)tse(cVD~&= z;PW-va56&?H;{?uj`4+}!0B>$%?7iJR5{rcs9{?Ik{uK} zpbr9}5UOGA*&Y!h+LEbT5hC6OE}{PtR^WFzAsU{c8Gc;}`r-1WSPMqPJC#lsc3KUB zVImet(2?N+?qLX~VGv5<1Ww`|ItCt`-vw%yCAwiJ2A@~;APQVHkj@aBL*%dGMZ#Po}~9cV*{S!P>z*( z1>7Kc5Zevp>|oA90_I-^kP@X9aY&egaAjAH%U9})3p&kMf+MQU*6s+6=pCR0_0`V-WmO7g zU=n3f9;HWyCPUq2`HdtUYMnjy5$&X&)aUgzCR;j) z=WwQH@@GaiCxD7(fY!xN45(>lCydr;fiBf(rKlhFCnWup=RlQ0OlXZ^0E%U3f?x$( zHIbHms9Dj~n6LzW*4>o$m2z@tkNRg^W+`+^B^nN6btMRaUMG%{DS~ooOHyUgHK<@3 z8vuRNhSa7yNSOciZ9sdWg~zqhLW;zau1%A^oc}-o4vZ*?=AeFdr;4s9i*D(nj;4d5 zB}md}`9!KydSHUx=$H-@mhLEoE>6BV&K5lA@YALTWs!^(t8m-%;eyO{a>7^p8nR4pHNs^`l=BJLQoL-kA zP-|%bLKSSpMpS_WRo7G7xaiyDljG7#^IK=aisH!eYb1GAwgEM{^8=!X~V8a7Q9o?8Rd2 z#ZKlC1=#;U(1fk7YpEHkz%DDiI%~Su7j8S?3W1bdT?xy<)nygDt)O6}Hx)v!|M)w(S35$?=7F3dWr;FeU_ zTCV^0{eb2gtU->d=awqzf-dx$>gZm9-K6fHp6i*}4OihQ-8Dq)(r)dB?_bVqJ4Og5 z5^IDl?u$-sY7VdU1TW;4Z`9J+*lHj06o!5*@A68o^ZIY+{%`+wf%RT*(E?bUnMg5S zTfTs=?2fPak}qhsZ-K?*;WBRg&TrMqukgxGDGsdh;;jAFmGS~f^iIMFqc8xc@CrX~ z3r{Z~3^4B)?cF}9%vjsKG_V5`Z{YHv1pguvzFO}xC75z>f?DbZV=77xE;@y<{bDW$ zk#GsK@Dx+A3Y+jlYyknQ?hHR1jOAXR2+E_%SPtv(51mj~GcchrA z@2g<&GD>n0cj?5~@dx8^9*a;EhehaWQz@Hr67 zXY(!NGO_CJ?p|;lM=lWi@Fmw_Yc?a;qK#}8bK1}g|1R?jhnjxb;HkNCD|<2b28A`Z z0XA>5HV-Th%Q5Z2VI0a(K~vo?kMr$n^1ll6Ft@Wbo<}%}@)zjUDP!~=%(MTfdCliQ zGrA(MfVRhF`tv^nG~@~NKqnR(gL4fcbT}_`T?TGUn{z^)bR&NZ)u<}P2uu+rkID+v4GqB&%@=ZhaP^xq=r-cP~&`WD4O=lbg_i{sv;zNVAPDiv- z(5C+Ov_+G0JZmFID}^-Ub0IU9>x$usmUL8`G#aP$qGBx(7fvqM^d)n%LK~UBeKJJn zv=k~)BA<0Pr8Q8Sax>S2Tu2&azV+%*Gx`kbT*IzYd-CLhwO!*iLm!u46CV01E=>D! zSBv&k-zd#?wqXw_;XjebPu|qpZbf_<#vp%6x}IAqOOHBY?DcB=xt6PXYNtA2gIJhL`=N`q36A!qi+7L5Gp^gZ3(NDj zQ}3= zaJY+m?AExs3;X-s?uXTo8&jR1xA?rvdsfH$#Cv(UQ+xl$k2#qG(8@02j@SCFi+sRS z`oNEFLfR+0lDo#+`wo8juunO|H$1y%F2n=6#pC=h6L!nHI-++Ocq)1?&H8RO@5lo@ z$rruJpM1fu{K20)p2K|L&b+d}`?^zoLRWXr)4SGdb)h@GnCCmkr)!lJ>~4ZQNtp1_ zt9{8U{nDqr-8DVS-+RJ)dK%k&Y4dr+bA83{eX6rM&&$1cBi}%EjM*E0zps7bv;7N0 z1h|tZPR#_c&;8Taebkqb)mu3t-hI|fJhOYg?Uned`+_ zINg)MW4!2hdgWWbkPQjdUp?bue&+Li=YKw+dp-Z{lYZ&@;3%WM;rl!5b9-*}Z>|r$ zM^)s;BRt)syBcDC)%X6p1ApIl|JL7ICr|!K75`+T7R5DB^4s?Er#;d?f6_00k>6mS zSn1bSzx7|g#j&wz`#wM*$wJ3aL4yXf2;@N{$3h(s9Y%!sV`4>%7cpkkxRE0cj~``( z>PpCC2>4Vr;y(V<6;CPk{0=>-c; zDL{QXmFfhmRj+1^3L!#Qu3U54d@+`6*@j%eoKd@0Oxv_w;1*&8iEiCSity%*WOwe} zzIp-w4Ln%xAV7QEv?23_v6U^3A48Tbg>wI8f0y}b#UPb)=cb-NFa0T$bmcg+f_e|XH zt^WYra=|YP1XD~0A(W6pDk|H;LJa?>l+w*8--HsiCV8r}#S>GE?an;$T+z=xbCaXS z<8FLNP#B#n6iFXVGIG%$9gP&yNh#$m%lzcrkWM%=Gv264iCR4AWIwWt|n?!w};VSIKnUm0WT8#dO$A zzYSPmOP5vFwLu9za-roA#e<1%A>LME?zH9hx`-#nc3^<}<#^vV^`%l|uQnE`%Tv2N ztksq0g|+2c->~&oQ}YG+<8uE!-c-+Hg)KSVpABY>;o}ljm^g%sHoD@ana0@ZjGKu6x*{ zosDkjXbXl~@WGXqx^Th~f7)Atv5wnuxI?az%FzOs3}vy$HhWCO96Z}|&N+8G^0y(c zTJE0h&Rcb#Z8{WT8I{JPVLF~TJn`Fc&s}$m6-Tk)#(_UQPOT&7dh)!%4tuS_%GNwy z<~K(hbhJeu{qf*Y|2KQRx$k~;pg$By_SdDIy>{DoPrrA(4Of40_Te{nYMX?wA6)D4 z?;82!PfnHW%$pa$c}@Rsdeyt0{rr_c?Rk%N4g6ke$Va=vAuVa`bD!_lXF=X+Fn$>H zVBi=yH|&j&bonEmeL@JFB$%pyEo=+)1Q^5rb?$T0BOvGuc)|lBP=O-c-UEXMH0F#i zM$cIs-};8W3v!T&OWa@+gZMih?yzGew4x5N=oFtR;R-Hvp{iym!{=%4hM~hu*Gx#o zDW)!kYy_hCBA5{hR`7#R%-|iLXh(@1>qvlFW9dwHvyq`Oi^m(-S-P0W04)-8VRR%J z%V@?65%P+aB%u#~*hWo8adoaEVi7;+Nx&)5i5(=B9*>yHI+n7O3w&T8|5!y!7IKia z#E%=fs7OYB(Ru%4^d${pXhQ<#FpX;LVJ6+UNlvEjlg*sq9G8Q~Q=*cZrkrLgRq2vW zzVDb;izO9{M@dR5lL-9Vawj`oGRSQKcYEgHd(Retu=|;bqOb3#4 zh$Y1iNrxz{mI`#LF->Vz_b5}W7Ar|1MQBYmYE*_^1*E0&DTN;NOP~r7MCkLt3-j>PPRsGvcn{* zSmBD;d2W`jQDllt?&`4eLGyf89c*Axds?cFWSMtLY+PZ>uf^V{sJ5g}1Do(z$kvdz zlJ#v{5A#;WYF4N6 zmNBTn74C55dfe)+S154Ys{{$QT;`_ss(v*s#ikpiJl?mpb4_n%vo>4plC`*WbAoPJ zxR~)Sn4Q06F!e4wU;tnDqyDWhl-#@5ym~OS&!w+@SNq}phIo~zW$!(~nqmQumAhJ_ zXDn})n8p@8?}V{yUE`{^z$gx6hUdGVs0LZY_2qAo^Go9SS(w7Hb#fZFE7lhu z_)Op(V0de6;|NPQ#X8<`T~CKZ4Xdcb@I|tcL44#S=NHUPelm_%Oj{O%g~ulADgPXt zWj*iV#$4`lda1^-JrRJ3=GuZvmgew(`A9aeXV znnh@3yZhZ0r?|!CT~&H7klf0Bc(|%<>v1<6-~k7l!M$B@m2&08&ySJ9OFEX_p*Jy@}^uq29?Cg8eD`lPU%!)keY|rAt|GjM(>sP@8H@M5CZ1t-j7j8G;o5c6o zb*e9(gT zde{%&bJZu_=Lv-OVW+6}uqs{hwZElt>F)Ho!+7O(m-@lQO?%mY-pd-*`shgyYv~I)I(S1;3K}{;YxeX+up_Mt+y;s(zFBhUpOkOEna0*B74s4f4( zDFY3#`s_^i{;&5yFkeWp1n;j1VX5?-Nl+$k0U0p)V(S7A#ob2!5bKn94xUC9q}175gItL6E{&3 zLD3UMF%n6!6hm z8_e+>(XkxU@f_H(9n%3F;PD;ZaURXV9Q+|5001HR1O+kx`~WNf0001f0%-w<8UzC$ zAR;O!C@mZtE)o$gE-o%GGBPtXG&MFiIXXBiDL6MbI66ByJU%--Jv==XZOh`mRNJvOYNJmLZOG!&kN<>0ROG`>iO-n{ZOH58oNk&ahO-)WvO-@lw zPf|`tMNUgePEAWsP*6`$Q&2lMP)tcsQBzP+R8de+QBqY=Q&v(;NmEWsQ&Uq@Q&v+| zSW}LXR8LM+Qc+Y@S5%9RRZmS-R#{e7R#sP8R*{rfQBPM_T31+FS6EzFSzK6KURhRF zSz23KQ&3t}Qd(D3T3cOOTVGm~ms?p`TwPmSU0+;XU|m>MU0z{bUt(UEn_pQ~UtL>Y zU}9fjWM5)sU}0lmVPs)mSz%#dVPa-sV`X84hGCnZVq<4xU|(ZoXJch&WMgDxW@%)e zqGerJWoK(mX>f6KadLHXXI^r2cyo4nbYxz1ZfbRE zVs&#`cyob*h=PB9f`EjBdvb$;fP;jIguK9oj*^CkhlZ%Eh<|p8i;jwfevOTej(%>B zg?y2Tfsv1ql9H2>la`f?gq4?>mAbu`pro0Po|>7MoQ-^(o}!(Tj-GX7o|1~6mXV*H zo}i(mqLPN9p`xOqq@%F4q@9_hq^+f>s;7!}sj952mw&CGl(C$Gv8<}Hvb40Zt+lqe zwxWr+b7Hu;ySk&0yS%=>dTPMFzQV)A!@itE+1<^{%g@r-(9X`# z(9zT0U?4kfz^= zaN8b!y7-mK$7SVOzN`7HP>EVCT2rOTY(&s~yHc(SJFMK2QUCuM1}xYu@8g>-Q_ubP z`0cw0MIWVJKJ@j_?>lexoQL znBge-aYD&|l|(Y1BYD_yhlwb{F(QX2mKa?jfTTFdj5NxahKktbXy1)s_=m+4)hUyZ zD1Nko$B}g;$%S=F6amANlL@2DA7SvOByiK!NF;cuwFV^)vVj@qn6ZgzCI~4o)&OXV zO)-luaM~hg88d{JW(;?ZpeJmkIi}bHZ}6w#h16Xj!J&{P>OcSj-Lqx|i#;Z(om2#Z znWdclDVnBdo`9zothx4@r`Hizo0UnHqUs~62?uJIUh4nG+^ow<=3{lTuF6V#tM)+I zB}E+Kj&x90MjS9=S)>~C=1WGE?k0kyjr{7E;*$F^X$(T7#Asw6zM+b3tt+6DW*BR?5Ccw_UR>`X6n}JsZi_s^wd8B3?HqoIDA^ex+?pKD&H}Q2C%^n zdu_4gT|Mivyx1oTv>am18?}O-o$Z_6a4YWKeE0wD_qX&2Gw9&ys=IExTgr=X#r?V{ z2ID;5>+j>1Sz-AUm~+i_lZy*2#=+Di3@^h_?-q2#1Jj$ih^%XA+Quxayt2k1bNb)O zD1V9cr@OcO@*OdssDaC#4{!XaI^*N>q?A@lXl6wl&AjQ%?^$N4HZaj!>Al%Y^{Mqb z-iO+2SGj`Nns)GvaM~jr{j#NO*;b{Lp<2Vm2W_5uG0S80|ndB{RdO83?6@~`A zCo0W!r;*;#wD&4kQLKBw@nWkO!G{-Ous!ALkqA_{o3C|8RlJ(T*l?u@=d_Pn)-o3T ze8H>vFv~uF1d!aCB_I*KDSo^F#yb{dz(zXqk>5gK0t;uj#`agEFu%bEGV#l4h7O*+m9tH@hM2YWFD3@UVw2*yZeQh|51^ z@rW&Iml7v+C?{I+b(XtS6*&bd*uYMUSL`AeRSCW@es60ZbOZUAGfP$iNk{7Q+T~O= zp4k8>e#hEl`Pli35oIe{^kbVJ5efecZ{&lKkPN6m2Pzd3mg{*NDj^vy3BgP%?vtQ& zVZcVoK_ghSgQ<)lkTMepPI8lAG&`RwNeI$d%Cdb})TS+qxzb>^RENI=ra6bXvZLYB zMVhJN5|M`iXV#1~Qrs9Hq?svdLU9W`^~}gzs?F_v6ieS!%|hP=s$IpgSF1ed8^KmT zIT8##S=*Yn*h4lx4$^-197bC6iB>=oOMb`*AV3L9*SbRT9;Y-}LKFHxhK{b3u8Zh7 zGkVU8Vsvw`bW(M4N?2No^p#HQXt9%>&oB{F5dMs&L3G&=hp5oZ==8Ea z6)mDt=mVhf!wf6RczR;1+^i#cB0E;K zT2(pk>1rV1S5N!N?M>PFgCOA=F?7uJyLJt)c=?8oyYY1umF%m(LZrICPOC{SZ!)*90-Xr0vBtlG9QrGm|Hi-vA)Wcj;~ZhUHm}z z$5+GOdG-m`_r2$i$S6>QpCOFA+2hIhi1I(DOy%)f`N|Ed2a^xF85TEiy@v~*iyxZ7 zhQ^n__kF5-0^CYnTqFN+8bzsbR7ghFDptU^VuNEf8)7>>c)>iZ=u06yvI2yL2srI1 zo%sBk)H1QdAGT>On+7yVDfQBAimEDgtLZVP2FPiouXZeP+*;Xq({==BkAY!b7z^30 ztZudboUy+`;!%DYl9Dk%M~#@sg)Q@d@;#(nW$IU7{$EA!Gh!Feg*YEkg5Q{}oD6^Vi$j{wUD08pRTa zQ_M3KW`Qv+#K!zE!GOk{phNotLXY<9nyPKSWm_uX$t(=V8ay-c^>W_i26=u_te3FCvrIf}|Y?eFTfaTz_ zvnOx!7T>&r6s_3KOPcK(|J)m*1Gi%_7c|qQF3;5FBwoWh4I*<+P@?sJ@x#Ff8X+VBr+K5pQ2o0C6U zXWVda#kv2YF@ZSpi+sCW*0o$8&;GxyBmI;wtw$|CX3S^ap%Yy*fbVR^ppUx>!2o*t z=YREPbp7W^3@2i%r*AmaZfoLY9trX0){RGzn0$Te{g2Ufr3cUNOKl~f)pIBPPOYLReA*CKvpRCwiAgy#2d z7jiJf)_zR5H}+s&=S74TGgU=`N4DeuVO}L5-@@v6n z50W%VGzWoG2tihOg;+>ATDXN==y^NFd0{w)Y-W1AC{ce1h@-W1w!}q4Ktm45dV|)6 zMsPw%XLGY0<@k$O0(}(bgOE6h*wQY!4B2{4ZOx|*T;`FcSZ_Re<#IX61jPz7ec{U zktLQfCw6A|1&3vKQ^ePd&d6vVIDv7Pb+%WH*O+@7=5HLRg4_6SemGkn>5`pBhz9>B zl2gNW;M7gqk&EecYF`D9ZPi9@r(D^GafyhJ)}o0SryfCAc??O4rHF*xrHTZZl}({# z$`CPKDS2@de}R=z5D7U0Xn($_iySF*Cb@1!!Qn_c}VMdS+*mNmYRr zm@_SDle|Y%X<(Dp6nsYHk|21K@RgU$Xl_mwnc2mN&bN11MK3vKLgbW=l(;L-Wr;so zTKeOZ9fwb#*pU7BkFf`k)F6;rDPF?Xcvj(Z40bLsCzc2YQC1U?sF!nUR*b({mlsKZ za@mVz2%JSHhus*Gv{#besGL^UGk#f(Vuw6|>4svZ3rAR!H-ngpxtM}zj*tInh?jQ= zd$5SXX_S@ejwmvHF;rFpV{v-~m2k6G^GKRXhHEdkp9(p0l(d?rsD!U+Wny6xK0y>k z;hIQ64-!$E06HjA`7WqQmS_o-Ae5fSSz`^?mQ6N;sWgmnNpMM5A`;kN=(e1Qsh7^# zJK1xRR7Z!__?NMfja~OV+_<7CdT-!4jyt)IuHskfB$R9cG-!sxoIFY*bzI>MZZFzkS9$cRsmhw(g_T~~ zNNgcx!zOIH8m3xt7u&K92K9GC_960DFOw;i%2$>cI;ZEktj+qYt;C2O%BP)ooT}5O z9!aS*X=2pKn#4r@Dz#x~i;-rLcOiLCB@FTB`*#6u6qJ5F4?(`eeMy7BsP@3B#ruTdWkN zo5~uTStzFoXss^TU(-shc}lW$YNC!ghuLIjE!#0L+H`9Ooi+a{uGN_+SL$KYNpB;I1S20~oX~5_(n8x^qQ(nV?#f_`|QE5Dc%nYRv&0Q3`k!XPV8Rs=nZ_ z=Axyt-kCmpr;rV@mId3&cJE3zZov`DhDPta`6 znzA8EdVmVE-l{T_8ne;)vMlPNE-RfYsiXMTsE|uN-&uC)T9TUfDvl_Qg?d<23%Ke@ zYW3qy?@5&N#8pgdwKxcx_(`f_XpeA3A9fR~v1hOdi?(}piuS;^0)?P(TfA|*tK)#N zR0M6#maj09p}h&Gz?pxP6L)NxvSxO@b9uN*SE9@qjVb@?xOsw-cqzFyn_(9Cd%w4{ z;7Et+N??ciz5o}OM(agXrD*p$P8p;%?dXVM^^TngiL1-6-LtCnd8%A%yW}Aa*YFIt zmb)i+3lQA9+hA(}S`KR~yaIF&a&f%I>%j&ouv8hL-t&;BH@$_6vXR-Tsi(c%TbI$= zO4+)&Gkml)g@I`JxYTL6>Xx&4IKNP`xjTA{J5_qVRlm1&fB~zzsN%n)7D5C3zXJSf zo0=PM!@FlgwNgsE3(UZ=_*Q4yYZ5%c2V21}XL!3-4-2coPPj-8`@wK5Y;*gcQ%S-U zddCyFz0f;|daSa?Nrs7=oHg8s+nQjH%Wmd7xhVfxx#(-1JF~-x+O4`1i0~`FX;L~J zhQu|B1$`x-ipaOo_o-24Ik&KXXC|Le8Cya z#%)ZzaqPjuR$j_m$NZSR(woNtgO1foL#9%gMJJ+uT!y|iSy1e~He9L16TYq|f=eZ| zFRH^h8>!fcxtU8D+&Q#<7Itg&l9KF`IcdYFmmI-$V=d?&MS7%}={^@1czt9xw%o5~ zMJoxbz-J4~+|?1ZY`|ixrRWu)YRt<7l&c;5%fL(&7EH&)+$+fHydI0kc51kSdxq2u zqAR?Suv46}B*=NGt%_U8lnbsDSWMyU!{q-=V%Jd_tm3 zHphC(?a9hqGoP{RY7#80rimc5dCLe5yA*u2IH<4?y}=wj(G=~$p!d6V>pFH!)a_NC z{%6+I>%!p_&3eezL2Isxc7Q}IMB=8+-rB=4jLpqSx#LQ+k;=Y4i_?@m*nADdKe`D% z{nKaNIzm0vQEadIjLNwXpRW5{ZZ$Tac$Ash%G~$LQ++lU;guD9)!fz44IMG-x7Ffe zv2XmU4qW{u5Vbf`RmE87LHVkjK2< zXZg{4OxM^OxP7d$0axMd2+cQ*qIJFEPb9-doX90E-M-VvC$8p%ZFa9RY1zE4Eq=O- zt#&TR-e=X%MV;g5QG=b?-aP*f-)Zxto*lNJy=<>K%U@}>RDI-5n&fWQnrqzTt%#uI z73EUStIc+aAHs1sm*A+*#CuDHx((sP{ZzcojbPH-e>~XM++nmXzC!%r%>9P!yxi%V zv)E0pb3VWRrbWuV-Nv)$v=O?F3(Z>Gy3#J$g#KJrqtthoF7KUp0(~v&h@Xy*3{w3+ z#DWp{ZPh_8+LoS346VzY4mcEhkX~KU$U6?xV4Kr1a&CI%ln1i7XqG3O%q;BNvkuon z*XG0h>KYF0zka?i?coj&&cc4E8wk?~?<(g^;@$1+4JmNrhd10`{48E zn_MpNB8{yfZSdSA&W%B$%s#S`{N`K4>v|nhWA5gS-1T+-*A^hyDXzYSyz9^YMTI!J zXuZU5_iCrMo+wr5Ctul|`PeMK_W+H98~5lp50$+Z!I0jnf`88$4ovRzRq+l2 zz7a!j(S?(?`JU?hKJr4^>L}c|1CNLss=}H}fejd-mv>_1}u;aj5GQ-zSi) zv&DYqtG_hE&Z1|h$!q6@lLPlP1|Lr=lAoI1Z{MSCmDc|dyqoU$P!ahlNAGf*R}0C{NRRoA>G`Yf^zI+#`)6l(aMvlU>=#g+ z_@Do*pZg6D5IlqoAvglWg9Z)|CfJ~&hKUdmBy>0!!y=0t884nNxR3$E3K=MVB#FS} z0)-S*j>HgP-%F4QV{S}R)8T~(IX&v+u+pXz85K2zlo_$3mMsFaAo+|(TkYQ6#dg+SSk3w- z3gvZ|S-`%=4s+I+vENpGA4|J7P1oeh%8Xs+yv*}w!J(aX+t#caXveo!fnv?tTDg2L zWzVKv+xGu$+_`n{=H1(OZF}_CQA0QGn!Di1l`pTyyg9Mev;Hz#O&BtvLXjt}yEX|(3slMd`cLrd_t8W=TpaW|LqPWk+=_^sCc>$O z{t$G~z8)TwuPLPxRIntXU~oaB067$>Bce_!!NQ|nuz@BMIr3@6l~5|+02eL5K*ok@ z+`uK6Fm$m(s&HUQqM{}c$tMbj1ahdJC~}I&D4I-4Ewu`A@~1@{vGPhcRN)IOE@44T zvbXMP%eyhd!!8so^h?al?X)DTl<5i+tV%ftYjd$&L`$tO!+v2bHOzPkW>7(I0R~V# z8zcYCG|>D!O}f@z5yqN&^s)3eOf%JVQ%-YBZn)wob#Bz=N-Y%`QUYt`%IdV!E;~NE zbMrel3M?-^^!97eKs3^X&_DWk)pgh+fEACyF=}1%z$=6$Q78(3weJo7n4QwYlx7-o zLu|VhQd=aqP4Pr;FPw2F7)5N+#*uKuN8Oz|lo!M!b*!;Sal=*eTC0|VGOvM{48qv= z+LQH=wY~&%lUbpB6EQQ@1PtOk?OfG5#;h}JO~u4QZBI7?Jv7ltkuljZluuKXG-()B z+0j8$jno)#V(au~oO9OsrR1o2%{Ws@6`CH)qN9Y>STFWNl%x*^4-cquov>J9ZO#8- z25ymU)>*2h1tLFrd)<0$vaN-P#So(P^;c?h6tz+ETtWaXD%)(XlgyBP@Y}Y(ak}H?3H`az zMY}|D=}kp8N|)dM2Drct?rb4ah}RHUH+`v3fBV8)wEl&^9*O4?e@KJpX4U^C9+`!9 zWeQzcMrRj@k%vrdO5F;%Q!&=P?qx9R*jN4}D_FP+OdX@)WO!GaAI1!Lv>*nhh=)8R zCXrJ;Glp>1QI6W5@@;UG(Pf8XBp>?hHz0YgTlHa$& z7zypgkudPfU*XQRqAis14x1@a zm%H4J^q42A;Y3l2rW!{TFNZWv#A=gS!mXmtD09kd6JL?&JB+r2;>>PDNl|3(SO<6r-BBDArZ*!Cxk=X z`g}xCdsZ-mXZW6nm=G{%Qmlogc#wu3$INAVGIQ)4Pe#3Trq*$AV{p_?NX?>3ln&#S zk|`7`VW~Ub@ve8SG$Jo;dehk`PaeNy$2`sf4q*Ki>CFCdIM-TD=NT^_)(wytmjJH z^&&k6bc`K>pdAf)(02ybM2-Y!vKngDERnP@Cd4FLBD7bzhz|d9pLo+mSr}2vf;5Jk zG^t$}HPg_*422@4tGKvsL0f=D|}af+HP54Qe@nVN|4c zPm7~nDmB%B)oCg#oN9IJHW9l5^f`At*%Tugqe@S6hS5-CWD0k< zoD82Akqg!9LRZIErQ<`gY98uhca1sbD*d*=wC#467T*8uR~;M3+_jz+B?nm>J_jge z_GY;u?iE0n-|N?M7WAzM6>?S=8p-~qS&fXf?0qK(;JY9U!8?4|XiZ0^$~L%^6Fs3X zmJt@0$&kVpzVMYc>{1=hV#2^925dz9Xq}PxXRi@Xi>X(S7YA3y-s@{TmzovdYCtAxw%_3|G*P+$An z1ldNJ(XG*l=E5r0nW*$!b8BMYWx{Y_c4i@1Xtf=c?%AZ&)>61x=`qz58pD-jw}vg66;ozhOXv-kGo@w)lf_C(;W`$*T1V|m!bX89fkJHJDsypebG7qrutaB)-n zyibaX3uV`p&*ABlcvjt-^bBFYG-FIRY~`PqRtTsAg3x)m1JyHp=tMy#GL2fL%o;7* ze1}~(_wWWio+B!6s2Ig82DmPOEA34e{Y(Z&1j4&G@r4t);-^OSV~rK^jAIu_PPJ~| z9bR}Rbk8jQV(1mmhd-H9P~;<@@3VY;^7RILMk_CN%OjucCXpF%&vnvd75ZFgGB^7L9=~+!AF%g-F3#&b&wDxjX|Dfc2{URO(g~mhB!a3-tjtR^F7!CyXA|)<@pq!v8}YjG+U6q>3g0f zi#m`(I?%C|(h?RhS~$J~zpXJfwIV;hYr^w`vSTwf#M_T>Nt3~%l2OM0`VKqsslzIfZZ-tnE2u^r@-K~3DZP_aR?s~#QvH(}yI zQW!Y6E4T%l!%yqJ_G7|QlS1dxKPR-fGHb;i6B5=#kfY*4UWAw^i#!zZxH1Glc)7e` zJj0UHybef1HS8N_Oc!Qbr0_CB!@H`e@Gq(|5TZ++GQh*01H_=~!`4w9Z^H|P*(f%n zHbeZaEMYad8|i73$3j~l~$0Yk69;1i>-fLJ{Ng{K!{yMXE6hi^Gpt3%N+xxb9geY7xeiR7sqet79}d$9zc}p-H}hubX_v zXp}|0g2RQ1wz~TeTy&_Q#Gwc58rri2RZNU=R7yh3vr^i?oSUP}b$7gV@-BFZz!-W6fQJ%3ByRrn8cL+%6LC77vErwjTxLYi3)V|^hpS@JD^2`jXM>^Xq?tx0B;B3t#hcp=v*x6?3@(B6&l;Vy1Y)W;!dl|Nd5fIW|2j%InRDs1btMp?yI2s zB0OD#JUik^_9Vk3eNTgk&(Dhy`h+i(yiD}O#y7(pCS1J6EU-4y71#U5(?lgcl!e$k zC2?a)rwpCd{00BnoD*O$u|=#(M|8U}ty8T8%XRt;3=PiK05K2!P@7WDY#`BWXrdD< z(NR3n6kSS7N;ppg&%lE$zVu7Au~9R_QFod`!<$m@BG2-)HRL)#z{{ZilRPDrNu6X; z0OXnbd!YFgz%|TC`&2W55hX3a@FDT*67E z)b6y)NR5cUoE%Q2!%0jurol^5Ez*;8PhEA%m_*ejMMftz!&i;cwwl#S#Z~;cRgF-t zkbF&V>(&2<{nB9lRbd^U*t3sRGDKdzpf2CB)w6q9WbnLDp@R~fEC75Em*K6SOgl|gS0y45f#TC4STBog7T8)dD)6N6F*N3~>f%@Mf)4%^Q zw9*KcUpJLL-(lt<6_2H->#dh1XmPU7N#qUe)f=zHR{Oou{omHVAy zI5@b_#m>?7NYtHJi^Sz#A>NMc%o}|}G8UrY)hK-CorLBm z{oUaGT0I8g!Q$gTZmdbrR1=!pZ>A5Dg(?@43)XZc)`S{J)*g^Ww|a|HTgqWfR!uoM zhGe1+meo_64J$t#WhH7>5iQ8fRb^6ir+SbXI(l}93)hN zZa!Al*G(F_x>Hw%X1IpN+J>guVBuP}w&nG^n~bJ7bCGM64rvH>4!zv=M8Gv^g!7&uskzr2W_+)&W0rWY~JI| z%C=q&Lx%L_=bXiABm&nIyCo7u%dYHI7bo>4-;er}LdV>D!GwDss~>%tiDV~_-7S$4B1W1#a*PM{Z_T<{{&2m~d{t?&1$mWA8IqYBn~l!ET7<$hk`IMn2GDrSYPc@68t6_s(RNx|mUzUhHs`Ag&Di4j%T+@8gNzCo)w3 z#zCNpZ2`x?M;>bfKk!^m@Z`EkOD)pun$ZYv+X-h|3@>v&I&=S!KBI}W77oV;4-auT zAM@x|=@K___Gu)Ve%%!hJQv5k($nDRhG=+>+9HSdq@Mmslj-^CA0}Yn;Qo)e9NFvlBjVb3^aU z)^S5`?;GxM8%H;xb{WNG+}tri6NDnj&Gb&`Dc=f}QXVQ{FlB-c^+_af>ys!eH}Gmp zb-Fuh)0U=>CJ*gGk_7^^MYv=KmYII)x@n;=D(?%&#R`(_U*(NvT z>NX{a6miccF z?LznAqgj|Dmvjj`hji!pPO(8s)2&ZDOHfb5?AzB-Z&VeXzygVSvc@@w*59VbyH^+U zFOSy?>1gRT)vka2t^fKrf>eAN`?9}%vmbW$s&4<8BkpZM-US6U`15nR?`f2m_CW7j zlb<9dgq*?%`N(Z z=KO_T(JC(WrT?!22lz1RkToA(Ek}KN0*EVGvJ4z^=3qf2Dne+0@W5e?9|8#`gaN_< zg^V(8aE!r&NfeMmMw0CK5eG_>Lr$(#=#pj1m@-eisOj-0&V*7khARh~nlaRjlOB zl?(K$&pwYlZ93%Fk|v=)>(&XXmo6g1d;9+q4pnfHuUxL=HXCLcu;jX>GA|wkdDmiD zb$?BMHtsa)SdIw&X|0L0Zpg3Q%ct)- z^yo{cQ?G9QI`-_^w{!1a9UZlG*T8#&Cy$sgc;%3ltLOebJ^1nG)1Qy-)+pAF8SM|H z+rJogM8ex)iA|!(fe9L@7*Kc+_+ViTvi3%VY=jY@U}Rts)J8*f=%Ga)R`e1>8C*aV zMR`?p;zMt}wBlDJssvL>GR_!MOF7!0V~sTCv}286{4^v_MH;!#0%66N)O1+kl@*jB zMmc3yG$AEcl38kb)|Dc-*cM}mm6HFM6HiE)L{FXZ^_MCs7C4t;n@uL4x*<`gWn?ju?CxOhSHW9e!;ZzUvHL?Qb-}K2se#1!CVt+ za@8OwPjmP$pgc?@cnn2e=NGBZKURbR~-!V82`nyHDCvDkfmr60Ut_}%1X%&@cI&AL3SC#0c!U8LaWm;USMKD*-%1LIt zA2O!qL9Ap*Se$;Q39X$b>k0qbW0Ptor)mEZM>i%c!ww->kOBZMVzeh89*h z?c8)zsI7p@(hcvpV8Sh;TSc5ZVqPw!TLQZB+Nk*AjPLvSjgXu4FXaH2xzSk$Xui3X z1RIQx2^9NKG3g9995KW|jE?Zcfm_}=#}_9syX+ggSf|Y)qqH)~iWU5g+xFWl*FcYC-~@7g3Xlv$2tyepo|0);@%B#nx(0nL~eL>E6DYE<951$vZ0 zi$)Etir2#;FOAqv!(f`JOocR1(6M~usfYXWsK>2!jU8tbPuOgQzd^=Ff5>vy zU;reH{jKd;l|x_?A_++|J&=J8mqnxiPqw>0$M*o;mjk5^EE1of&Z-~zsYNK8q ziD9c&?XjPGq*d2=QJzup3Rm|#sIVB>Hf(CMcO<-vCQtdxi272Pw*wppCZN%DspyDD z8UaX;!Z*i}R3xiZWl3M;N^Y$bNo0BHEMr-!Cd7 zitepZo=^X?CnYKY34CIruDXCmIr!<%y|OQUUaJQ1lLLwHgUwz8#nfvGHQs@WwS^K+?vX+|ds%+vL> zwwctRM?-6|nRZE{QIS?wk*ZWC?u0MVY-|$qBUKzh@r<0I8EC?(C@_u+tYRhW73Y-J zb;j{H!&s|s)Jh0Xfo~ge#MvD0Iu5<^wY}X@6|4OEHGS4AMIblEzL<*)|jDij>7{i_zuYX1aOZ;DDo?(>1bn+lk(q zJ)%;3LTh?6)s3Bfs=MCJ&8E!f4feXX%h&my)cRP+_0Z3c`8}+D*_KU3LeZN~oiR`k z%#{ic_@a(e@PIkQ;3{PlI1!exN(UHd3lo~TD=r~LW>gjtGn&L724aXC-Qk;-*u*Hl zbbvL?;ua@*w=YgGQ5_2B;?Bv&qR}*F+)U~nue!%W-tKpqmrmS7Z=6yiMy8Y}Yb$cs z%G)CpFM>hlWr%Oe#@?STtN~vyGy6L52;=#tahihSm9XjX6{^mhKgH_js!)CNs3HH_ z;7Yq#!R0phxve6>KkH7=DJ?XH4=rw0+H%sLMrNcXjapeo0H;sF^rb0QRLX_BvpEtf z21OP;Boai4R1vO+Hc@IF#&yR``$Oyp+ZciTe|5sCb-30 z%xT62vF}5SxV(vbM7w-s&8bczf5&e=ncHr6_z(3l^7x4l@Sx4GhYV>i}=Zv?y3 znIT0q@Vf@B5oJnp&|2TR)2q&PRq|Ia8NO1+$gX`Bv^MPpemdK0s&9M0ozf?+JL|#@ zTv6v8(4IbK@|&vu;lgQ&V=6omm5B8H7ab&ye>IUw9yG)P$c@p-0bu4GO(j&BrVIzm zc}D1s56qd~ZrBw&sNQqfg9f?>>*bL*=vIVW%^oe>QSRxUwiF%j2~ZC_ z)bCl8ztNdbRb6Z)obn|BTpSSD;a2q7pb$>qy;NV){f)-J*7hyo*?Irpjo_R3ji1|* zUjzId`qdrYv0wYK*qsfP4z^on4aMMr%*C-wnA{luDPE2Z1Q*GIW$?+6ft*)OUIFsP ztj*e>Ae%YC+W1%=&Ug`WnAZfVkuofsm)*m#n1=>(S*k2zthk;5l25P|)_Bm@&Rko3 ze4rq$;Ma6wnz0#r^+&!2TG-FJLWK`wdO->`Tienlc zT`u%Nj5Nm@S`I-+w9SY zSZYT-xY4GF%5j7lnVo}uflXX0BtynSLk`FsMWnK@p(KHe(v4f|{G>tPgj>WPM*fi3 zeIyVvCf6CEL;}n%@>{=+9ryVnO2Q=b_0%p#080X+OH%(~=-AdwHl0mMBTjDNx%rSL z`J@;E4lGRR3Q1F z1N+>=2KM83nAv=kWp&sCLrN!?eHrYLLJAfeUR9fYg;8Lspj|E`KJH~W9>~j(VgtEn zM^dDrDNZX|A!bI-`8B3T@@Ie6C;!dgPEw{2eP4cpW=US7;DA^ZB4cQ}Bu5S3+@TsS z<|Jy~=7sKEjocev$S1iRPe;k-{f!}x;ZAUsBg`z-QbtcxT7xY-<>l#`RLWeVbj#9Q znNr5uECk{NR>PE`O+49yKEBsNV&_t5r*%$e2Lk_AIV5SS@R2Ue+$yk#c$y#z-h$ZZ zN|B=3mg1#i<(TmR=3kbgE#evC1k7k6CPL)r4&K&(`X`*uVt_hQoDoG!R^dj)9en}` z?7*F!2_O97>1SrR4yKFyis-WVUE^gj|!q1trw9}Cn6@Iksj$;Dx$1rC;Z@Qt$OFqRVjI5 z+kFUD8=+@OtKpSQ zrV=cQ#^J$An>@J_sh(rTc}!Uao2p`7Wk5}pzEfL*Lw9cMd^pFg^6GUy2Ui{B;aW%H zW?Qd*2d+u!t#M`5yn{T+C9$4hmvaB9dO{?a`sEc6NVAHm&SDm{4(8(gEYR+!h7oPH z&Z$V6t(+Cbz)Uc(S$OP-#J9pD&f@@F9Pgq^O?v?rsPURqs57>g-R<;c%?*=HO`aVB6Oa-12xm5+{|Tq@13XspNP1CrvU z;!5s4T(09%Cv|A&C87hi2`S1(VsT8OAYPmNi~|@AYdTsd=-y>I&}^IGEVB;6e@qSbR?1pgB8ZBg%2~T>^(kAE)8k3Tg>z@Lu*$yvB8t)8K5;0xCole?JRBA3h zZ@u28F78>uZD^!6UfMcpr?&s6U6|^?K3*3gndpv{Il`?S%7%qJ+24-UTZyNuR-j<9 z7hsVk;?__6d~PO+lK{iVmw}m+YKL-ov2wWO#HMkva_;56gBkPV=Mqa41Ih)Xu1IDb zpQ&lkf zwSHX>H>}#ODG6LA*bT9b*sh_SXg9L$rqwbN>+KcIt#5kU-8RqpKFcrPD8!1DF>__b z(xW6&!^z&SnOSq=N^=>XF&clYULEWusxkdm?ly;>=gCjg5Jy3ZGgtjpIZH4f&(Zf@ zuR2Q;Q3Uc&Va{PTv_vOU)Y)gA&NE2WbDSDTBgW}L-$_;HrO7-ME77sQ@7s%G;2u&uF}Lc=?dTUv$FO;`YSV51`jG&04LvpKKk918=`WW}o|cJEr7VYp ztCy%WI2r5lTsHMHpYscvH5uMkVn^5`f3;dCT3lDAHAH8@+R{#Gq!r8T;E_qAd(kG`KE|s@lS2rzPNu`G94`%{YvaK4jb?XCJ2`1%&}u??ctFt~2l&uZI;t&s7bE9w8Tv4%UF zixQwdiH~l?G1Hu99V@k%LGz^hcIb{TQ-vvU-(7Rx>x{bubH5^28~J8-Gyhfv|q49Vn^>N8;IIT1>d-fc&mq^>sImD&>Bvy&*Hc%IO zhQI$i+so1IST!Htut6NO{x|a`<<|%HzGTk z2KG)h`@^v~EjQ}T>X?bX2Ai*&wwLs`M_EeeVWem_j*k1^a&bCf^SP(H0t-3>OZ|jD zFkZ_0Qk+LMYyEVrAFp zTnqjx8uqKdBAj~kz{}h5x%ILyIg%H7goZNsCw`U3b<=UOc(b~3FaBs|`^EqSf&&5> z5OiRH0)`V~-1KqS@L|MFmMB)7XwlQfPZ>FK+{5IM#W5@tzQ_{t(V3GS2fbnAk_Zni zA0s;Xks~G;om-Ohl!4^Nh!Z726#bdARH-NoQ))y6QC7=GNu9E?niQG0Y+JR8N>j!E z3YITeWtGi!7H!&zvtsRSHnoDCkFPv1Vpi}8We zdQEFO$>+SSqx=ptJtGQ zgGQ%XS5|q$;}ItwnO)}O%g+I8MM$s9P@2%ZrTWEYBA(Ed^lW)k>h_%AZI1T=1ZR)) zFlGQ<`rZY574uOD9m3>({tg`W_op9J2=9)X2_mMTqRu)5>q5gl0t?*f3Ilxt(4?Va zps>LW9w=y_3^bJKLLB@9@k0(F@L&b>rT~jY3`4Ym!h;qtD1sVoMDd9nb&N0nA%sj^ zECB{3txb0)XH4uefG#~v$(Rmyy8Z8XkWot3rBN;8eI)R0T= zR#X|A4H;OnBGs?BdgE#u&#n=6H{EzG4%*{bHIBJ-oLdl&>BgGw&z<_j1F84I6Odf< z(&K^;NGP$RToTXyFF+TcJQ7C?vD+_P0~JhA!5BA)P(thoB+p$7(}Xeq!yajr?_$dF6#T%OTg3a>p%Y3;@WCuUvS*hMj~mh$$dPjy&G%6w*j9oir~? zInA`CvCZNIuTMu^`jCx_+SD#pHfv_p zrIlyA39dEeqD|+TYDKdQZEPc%gegr31sL3NqlPbG5luhc=nMzmv4{!deX%`w?E~@N z536fR^8{;BIDrZSF3;kEg};;cd1WscW0qBHk&D<7e*VSewQRuu#Em;Hz5$RGBeF>9 zZ=8PmdYzB51|60kxn+Y%k69wzZJwIz_}PpCPonVr*{L|xD7p?JzSMsW0c^wm=9I71 z)n|4m3);Q{HK<&;4^qQ&Tvr^CuB}PRYj%O);Xai%vwe_TzsQBC*0v18(B^HvVol=k zHZ93vCNz$@+-O{rE7i!5hJl04;G~hZX9R4>mvIw@;>=-?Pnwrq)B|{ zzO3=jP6A}tD9g|VDss+Cu0tRK&A9bLv^_^`!n8{(f;ksrMFXoP z#LEP66*pQzvm5P9XEXISO&hY%n)U!3Hc?}Z!d1hU-Gr;L!YNL33gfQmT%vMzC01*_ z^MBfL;7++CSi&+yb^KH;8G9PX1U_~tPZ0{X2Bs#S$^>F1gQH^?+C~W&aX}Q-?Ouhd8cUvUfTQfhPRYDm|V`nC3jLN~YAi#NYE)w$v(RG}hOcFFmTb?7iL zE9|Rm{7S7D%D1dtRZKj%GOb>*b#Sq8Y6ngK=*|98Ca=&qmgN{sL{olo3cU_UoM zG+y?ko_(lu_lVHV){g}%DH%_EDg~|3G0saBH6Myl|Jas9F4+nz5+n@M0y& zGfql2Nowq)L+wO7MCOy$t!A<8Knu|S7u!*MfZX+sbG$}9uF_)~4YH7d?AlxF>9whK zSSl&0+ujONTpgW8xs79eV z&IG2*q^A*1Zsz-{!F0r);Tzv({#mT>xSODBCDvQ%H=BJ;G^z;4mqwe=Gr>9MI4G@P zVubV3mIIbpCH#^zgL& zNX9XWA<4%5ahV;9t{#jg+s7H>Xlt8&d&{4zh*O*6=toh!y7_ zUOYHiQ6|Ojdl`iPEztMPA>U?|R<#yZI4bC~D{WZIzyLTmRhW|M^~#(>`SDtt|P%uNRRe=ExFlN!hif z`u58n>{vV7eqHyvym{Mww0Gah-fjxcub#3j%bjR?Cw$!5PWR%5i=g*jR3Jcc%SF85 zoifN$fxqZMnJ5q+ z?tuh3aPzP!z$^}kW&?BoDlXGlko8)x@D>O5F39bq2%oA5u-dJ;cFzZU4+wpa!?F(R zbWUmPD8rC%=8%p0n6LS)PLa;Xd~)sgVhxXK4BDnIqoR)4xDN_%xuNco6I& z#${TH2hcD5)Ufz?&V1(14(g5mzHIj3EfM|??)1B^F+`T zD-iNlkQ}1n8%RS`P|xuu2-8AtoOFQ(`OwpbYsr|4akfJgIxfyOr;UCv370X|s<0Wa zPqv5;vr=p(gwPuQqwivHEcl4Uu4r>Z3!( za74;*W-KY}nhuNDFtXh64To;kYj6d!J_Vsa&ip-MhIYd%l2a&w5%*4v5tM-!<|F|>4mzcvF`+UlgHSu0(WSo7Drb!Mny?yy zZpGf{E2)t@z4Hsh(&yST9KleK*i!5ifIi{UJ=0PQozTLn&DuP|{Op9=+HfT@A{(C& z$#i5Q`R*|Fa5B}bC>3&&lB<*=vk{{!DL~UQuV}d3Expj9gAi|Q6|5X)^oEA>!AcYn#j9){&=`#YNEL7Kg!G22hNy&;6BU#o ztBf(_&?tc+Dez_zQFrzOn+0aMi zleN6?KBJ8;r7zX^vy#N5>?SEdc?Qa2EIs5BEiE)b7qmea)7?<1KrqCnE+I)3CGWTk zH0@3A$YUU@V<4mI6M`r(w1p7yNO%y!C1oOcb!2tw2V_HZsROvxS zEoD%Cl=Pm;h?cSvcQg?dkrN$t;3|^#luSa=MgM^4?jnH}s>{0SO_hl9O0!g2lkSlJ z!~$Qg6bixA##~A&9c$LO@&!z5P1STAhYbDPl?`K-*&IlUBr-8U%^>?=QuonDKhB8W3{wElY>;MP<87&Ka3+6g6GZf?fXd=PbyPEU zZ&uXMFaxUeMju`f8Ze_VPEccaQ>#F76;zL9S*R^=2(0jHRiyMNt8@OK10m_rSe?|U z9McZ-X*`v2DyKCY!Qxu8Q$G)eUO%iXq0vC&R3BZfT+LO=(6tNIlWN$sq--knl$5kVDHAc%mfjA86Xup~F~L{KPXbYHB{W4}+*T$329hZ| zb7IMha4A+e1gGFEgEV210}(GdW|MMIwsKHZCC@5l6C^hcL-cB+;c(QiY7`a_P80oA zW+YYrcGf9SlQN4|JGh-3Rr83VGpxrlnWM6 zNo^r@Z2E$5OC_8j&K?w3aM)pS0jzICK_gi;TIwNk4P%;2vvOHR7Q34@m;+%8HcvTP9#Ujv4d5~X}Q%IIha2G(zSPI4r=k$ zcjJ+G&sAKl)<$A@jLNMFnt@5 zbh#K&3Az8a_v^xBi-(zNiusE#H5pzytCATomf4J*`I*;vzhY%FsF~1Ib$|)DfaUBq zU3NN$6cqnBM}-4932|K(RNN}KX^zD9Q23pD?`#pu9T`%LFj=1Gxt?__pTU)$k#1|l zw1vslK7m)-LODpLu6qs|C(f3&&XJ18uZMq&l44+@_pze?Z3np;E0(%ttl^qI;I^qj znwKf-3Ma-2te9fR4}Qojgt%jUTiUV*FyLbPHkjGaIJ=plnQwGjGI%bnc}i} zBg<)7G_9K*@2J5!(-tss)@2pi7L?Y`o{|ia7jmEPCM0}mC7n?-%3s4Di~#dHi{JSiQvpI+{e- z#`?WA8o9eP%liOHsQi0_O4uM16VN%B4Se4iaZojgyD9~yVLGTsWx*{xALyJ^&)CCv z8dgnK(J)*oUpDeqaERhqwr88Zg2A>Od+#imJ^E*|mIp$y_z36uONU#zpSRL6ea7|s zLhLzV)-k?`4ay5Nx?@~wJt~F+8vKadpp6_*OF1sT`#Z^bt5^W zwY-u3G)ahYiLG!81pL@3SfU!c%+c-Z#OAvHDqAD-cEDGfVsk^LugWDUmt^6~&PkQf z<5)%On1=$6S08v;H=K`;(pB@ER?=O>N!-M7_B#NEi>7eY()sBinVs9&X?0x1y1K=` zTD!42$Hf%J19hR!db%|UeAHT?30jo#al4N^$-%p!9eS0&4cF0Dyn7CodmXO-mZiSj z)sdp)2SjfNRIrb2WBB`^Cvt=$mRWO|AkD47GxgeI8qSrO(0>}IUjxte9FFC6_(i zFXmyMQUNc|Z0T-C+)&l5vMFNcBNl#1CDQP#tL``GO>uCDp0tl%aFSl>fv7B+o*DvO zC#AlHsNOLU{a@kkbhAFm=7Z~@HPoM^#;aP3I{x=FefYKV)7^e|y?dXGSD~jntv~sr zSe@eIIxCU<`LVO>`qPJ}?bewOh}$eCT&(bcqRaE!tAqVqlvq%bG4EH_Ob=&4FqqA=uJ95OAFC<8j z`bwHSi85urdK}ZedkHgM%yHBI*s0uyGpA0L8Z*XvC2E(@UDIeOGfIsZvTVF=0lVhZ zTe+!JtB$*h_3EuvQo#wM6p2%wKx4}mN;GL;T0BNvd>Mq~#+EluRLpR*=S&tYWBTI# zTUQ2*yESHf(16%(k|>Oapa_w;!vzVH-Nt3YV{qgL5Ga6pRCmJAY$@miqhue~1?28K)e8nW4vA zX2}J}pMg0AzY(fQHNUmm8gj) z1Wn;y5kx3a;e?>9fyIrBgoR>Is1b(h+F_G*MxcVV z9R@@dR?M1XVIY#KoMzH}${2dlooCyn7&_-&bHZ-08+6Lrhuw7~JnJld=}9}Fc;(^L z-LI@p3u}Ac&IfM)vfd#$9be{-iY|caB2+DM8Zs0hZ?`2#9D@y>tLq9n(q-WY!@iXl zK`DZoT46nw_?N)0OzI<+FTyzE6F;)SMXi;PLMs_)rN!Zpdp7e6AvsPW6dq{l(iD`W zjACV#DK=EmLorL#Wk%hIiL=e|*i$A+Y0{a~nmD^O(@Qm_DGxmL@T_JvAbSbSP_yu9 zBvX0vBIwPC8hYr~P!8&i$}Nk$WTcW(%Hn1GR?2CoesxOlt@_2iYTT~Y-R#}3l2{|b z=)SgXW%+4KZQj&TN3P!V5}vrT!A3hSx7B{v?6vqvE^e~&Y1VJJ^DP^AasIwd`lwZ& zF5?*O7MME!yYf2jn7t3D5l80}f)SXpZz~+*h_Z&EvBeG_gs_XV!#gP^f8_216&DL{ zwk*=i95U?*IfS;!OaeNTLdkdhG4(Uoyp7gF>%6p?K>ytI&_utVj#ulPDKu5^q|;N8 z$~=?u$51)hKd^m?YhCl2D5k*?v60PDEsEY+GIXg1GR9Jyf*Y^oHaEKYjyTBbpxprH zfr7=#E}cPGsze8y!M%=g**R9_o})V#g069kgWL@LmAM;o#&Vd$oTz??w^iw|Rk3SZ z1yQ%I)h*~h2YTJz-sPZ*Z7u}2v)#X-*r6Ng%V1RmSi+|Fzy=PiVG#p^Bh)|`8Z_?` zJ8Fyn6maJvAyF?ZxKkL!rqsWONY8;Do7woD2Bw@X& z=VihZ67f|ml$H!vSVP4X4w#QSTyT_?E#P?SnDU~WwagQ_UQSVi(~M9wi5VPJ8c~}G zlbwTrlQ(l^fxb!$pR9C6D*x68y|(!voVZqnOU0zEtpDnevogfLRGGAWjJAii+O@e zl`TD^OKDQ>mzpytRfh;WbF#2RH|U^Lc{oI@uE2ArlB(#6cuH=1b6uWl;p4E_#AGRM ztrUz;39ZLe)0~KR>D(Y0W3&*!+|xvOHC`DqrUp8+L9r&igWKp?5Qff$uZY+qj2;@L zv1JLfTZ(AcL@Jf^8SFGSTS-}>R>Xxn{Y6y)Q%UNDAy0lzOb?4^9rmxbobkfue$|Devr;IpSj-}G@da76 z>a;F!g(Dp7;0MQIqOp_Jf!vsgwRU&}&?>PZKUmZXF5aAeVHdmnh4QKsj6$JqDE#RMCAoMNPkMrl_i< z?k;P&ROLd|soKRC2FA*jEl?#cM|&wbnlS0(1k z5Om!scDk@UpAM(M3i;b$fJ#_N?NlHh&fZ=N3OpJm_6`^GOWQC=4I*}@P37=P7g;nCa8%`rt{19#&0K-4l??6R8Ql;kzyB-(k7wb5f>WFZYIwqUYe8bF;CLQmfD4%aWVv>0(SUg#A%W+>d83C9 zJhL?PPEdSZfdGX@jmz-xXaTL6SoK!Ibi#bZ1NWG!WEKxYfLhdjC0PRgf$+Z2Dm z7ktBaZ9J$#S;l@;_k)wOLT=}L=B0IU)n3e!ZeDkN*0yHvwuJMgbY}N`v1B^yGJ~pv zeQ$?;Xtjmk5`=fBDUW7mJOXKwmSEYFWQoQkWYYvVQZWn!fErT?kOg8GXMosKPzoqq zkXL9M!hi_|Gcgl^5;Z`Z$9ZLF!z;H&r6%;Uth7D+dO(sje*MmTKe83m~8PfM%Nl1$r!ezo?gvhsjNQgJM z*h6pTgjX0u&ZK?8Xod2%Z4s1(T8KQA^H%vaAk86uaO6g1MRy#7_?RU4*CMHxW2e>?=c5!;k%5e8kV&(N2`LN<2|tv04weXGfH@8_fsuSTTpWpl zCbfbzMr;@JYs9t{|F}I?bB8h*hA-d*-iRU2){9UlR5U4*Qm0h&v6VXrUsd;$(`PKg zsEg&Klf&Y5NqJT|xRhD9UYGNPUD$L?c!g_MU;HJF)aWW#X-rcoN;>I91qY7Da(`pV zA7&|+kCl#E1YwOLj&rGaMB$dH7+G{lGL46xp+s_d36gqwC?R=?1?h-`2`I#{kbwki zErAcPRwlIe4N_Aw9P^2el8~;4KqiU*H39^l=Ceth841>4TcALpD~Wp_vU?diod|cD zR+N)b=1M*ob-I{lyi^%Hd4xIieZ)yG#8``+Lw!U9ElioBP03!ySysjvcHUQS?zc@; z`5*emMBs##%SmuNsg0k~Vb7*J4cBanMt|CwWTW|0mFA=i=OF^~GzKA#kaw4mIiQMm zkDBm?SYl#Ap<~oAdrlK4mih3U#5d2tnZ&lN`E!9qM$@1%#vWoA5P@`ek*( z=w36KMz{H#G2~W7xtqW#ZpwK7qw%Ji@8O%D0cLwASU`GyU6^OT_+LbqS1*~Q5m%i} zwm?xTr3c6`8#Z~1!AB$Zj{I1jHzjyzKv`j0j+QDrCB~&g!6j;{TN9;HFgFr%8mE}3 z6<(nX`PqSa3aAIUu8odel3LhSdi@7MPv5K-l8BNLLD^AITpjw5{ zmaC|GWyraI%egwi8hsNqeg~sY$BL{bGIY9EgNsI%yW_0fl(gdcme|Uk)*5oHC$(j| zt$^rhW)o1D^>L5{HOBD&j}z!51L<-bwWr{arYZKP9@vnCqNf@akrV0^TRM3{a-bAv z3r|7|Ahlzy;HW4yD0woe6SgpTv9Ro-p~WMM4=W`c#nCv$xEnURIUDzr3r;k1QBAyD-q#ywk(LYn|Fcq%C2TB zsc3trYul$A^_Ow#4Ip_n8A-kSD7di@7O?koNinrV*0=k^6oCN01)7p|R;6rFv_)q^ zs*9E~8b-~dkNfwoyxgEiDn$@qO=LC^>%%vd#kTxck#5U1)PlL_E>C(v*pLM zC?X)TyBD;ZI_q~TYDl#5SAT^Dlgs*WFEhiyi+?nyzvp>y)tZN|puD0cps;Ymx@(Wn z>lff!sCh|ISy4W;1*qFQNcB3WEyaP3B)%&VnDS62h{9u)C!jz=yx2;q@S7CACaI)A zzXSWdqo@mlim-=gSbVW)%H_W<43h(lldyTKB08#>ddHrsRUwOvVpp?D*tuV5loaf4 zo?$uiX1b|6RO+a~wi0DN*}6A7cOG23>qChELiG0Y5EIMu0!H)cNdCbA9i)bK>MC@djNkF7C+R0d&!aJyy z3B6Y|>_#k1&br&JtUQRhmYz9`!+lY_)%($$HN?X+Bo9QU(|ZsK_%TiU5IIK(!H_^Q zhl!)`4Cso1cU!*lkSMq$al%QI4Hx*xw|W0FWH;3f3eV)Y>O6*K4So%rxe$EauzRwe+rZQdnK-h6fjiie1hr&Dajs(%lgM zryp6qKFt#QD=A=P(v?tCH5N6)pwjHCw4t5cyS>!Py)mDuGW|*njQR~mt$|*z$)`t={h4T<{&=82pZHwX=kV zbgBX1xQp2M^gA03;P~CVjt31>ftgm}!xnkG7LLE*u!&g1r@e*;DWjzi-f+mUP#`_p z+zMQi=jb9W)fAZ(%3G)1pyDu2<1@|-EgtFwpa9I!37yd53UKPC&f?;*1EhZc<2jD7 z_B&*b(Bo}9QMT@58=lH%#Ags4j*bRY)~%^x{oPD{-MM6RQO@0itiWN7m9U`q*rH+Qty)MIuwX~wV?jfD1b{o~y%;7f^1r;XHn@w9_bQ`zua8upbgcFn^IzOwocs@u$~G#pa274@fV-s zuU_L8PXMFt@uMydDV_?R9smPS@+v;^DbDHyzyvqW3}yPg`$O5q-3_PBQs;{`R-J$h zUdU^Q=EFWZ^bOb%EzvZ{Iqsgg@7K`;F7y$0rFb6if7e9@3QFp3aj1;&hGz>DM+ORS zbBzcNf?M#BZl1k-w?`JCubi(EueVi0QP-gI6`u;?VEG}>@|OScq%QI*U*n^G@;0vO zCSURv{{`lt4l&;fDhYe9?`t|=Thj0cksY{o{@;hI*A{&{$PTntMeJ{M&`giV%C7s~ zzOiVnxsOX-O1|aPUhUQ1$OQfM)DOu}$+|XcU;G;{Gb`@4Y2RTe{LKUB8Lj*M{q~cV zh836R33%^9EQe@OE0Z;fREo6t-W0z6S6z^n0I}*%mm*v^y^7`k3f3q%T$)_cMNC*A zWz&%T+LcR{tc|Q@F)Zb=<3LadZIu*waUn03t7Zw47_%HWZ`Y*RL{?GQH=YCtFdIN1 zRVtsUb_5kiU+R10q0Scf7h`N+1P@@E-m{Z5>RxB}aE&@Yq4H_t%v1-2k z11wS6I%tSo!Q*gES-W`e7DNF;uoS;qfE?!QhYaDtA4X)D(0E9a89!{ixXeOxhsT+3 zd~UILFJsIpB$qxBqlIZ1E?nba-5R#xC|PLV#+_R?<L;O?!b%)*5QWMdpR9UR(5WOH$}2_1B1;XJ!h{B=w%)2K z4^kF$WS~1})T<4Gm@KRb7*vp~#a5q_O}X!0rEWX_@RrkcS6^$b3_Dn9y>K^HZ8O%n z@iqVu!~}{n09pxxJ55+?bDa)bVzbqj*IxtlZCUdy5D!4~)Wh$-bk}7!ME?5ok6C%4 zL$N@655adqPr5u&hheu^a>;2awD3YNzd(${64rB7VG~EB3`HF+dr?Lh!$`G9g*2?u zMvi{;7-ScT4H?2dOb#g*C|x<(xs5w!2w@DFL68}mhsq>poo(g@8=q|kI%rHWp@e8! z*37dWdDf9fX?ezq$j)odS$dqe_`Il`JSUa3s;!igs;Q)yIm%G8D;>0xL6gFH(H){t zw3AjcMaw0uKfT(DRTIOwV{#P$JaEAW7yRx2ql6^s8jx1VHu@mZGDC-v87 zU;VP%hd+L>)SU3ZEi8DTSO4}3biQ1EqSA!^%fx-88sz-Hhk3k z-Uh!Ny03!s(_avUct4{|4_Lu+5bJ*b*STZeW_AOM7UHBRuJWXAbyvh9=3uu)DvpbG z@6w&R^b^K~^^T0x3f*O%*TmtK?{rbi6jFn(dfAp9POs7MkZ9NOsFB{R86PG%B_pQPV5l17g`Rm5rpTp%^x zM3beN5>DM1pa6N|4M*0peOG}WR@$dapA-QssaOaK6T%CdWP^pF0@XA2NQE!VkT#1E zfKLD*&1qKCno0qoR)V6-2|lGLMuAE-X8AyJrehl`+t^0-U?q~;6le*NU!lS9hfNCkpl#FD|Pn1qmpwbdpk9**wt6GJn)@q`_vA*(v z#tcRTx2a3Ew$-g|#pPS&%2v3=^Mq;G6fhV_i>dhuE+&%cMe!gu5!!4A)67H%-)7XU z3@QLU?8;Suf(uDdwz8(cEM~o6hBpn|iZ`f#lRfK*h>Hh{_2 z!oN^eYEzT?UqKWY6;MS9Mz9ge78(N@(Zpdcb!klsSJ=WA#_%*vFsc>}G>3WFiM*qDX{Q zX>m0rCNCMF^wgY5nQKygR#dskg;(ZkyXBG&w9B~7ZJ2w*+vAqm%vb*Hnbk}%ex}Y? z8Lb3$iems@0(viNp=9IIB=*%E$Vjow-I^>GN^H3i@GgX9nMf?tlzq63{zUeN?Bi5 zDqJbVPL97r#9xDtA?#mHb!iZsxyWo3eg zj?D8BUBXj}@C(F?;m8rrf4D52NDm?rbH+GGts6D$!Zx227i{?!9X=|=IrhjK zvF?qgVtz*dZyh&LGoeAQ<~tLIrrp;~qdGRzKNTvD_uJo57rhpx2KBg4?cY1OTAZka zFgr=X((P==M%&^3Xu4+t zXhqF+R(|4hyz|$wyeyR1!2RO_E8X0OmjjK|?I3n1^xg3OoD->~L^0>Pw-I0SZZZE) z*x=jp0S{;zD?j>!N5;6PchT2tuJfFmaXBAtqcEA16Fko63}#38Y3cVJO1)9K zu5`}-4omgK_L`?@TbniChS;D~6DqiXs#l^Fh}7q36n9U z0DwlRF~`a_t5}?%D4W2!iU}M%!ZSR?JGTx@Ja^lH#*?iPY%+hr4tZm)*xDU0`aI9u zx7|`fEV8^dE2w*G7HBB|54*h4!+jH zV=+P?hhnn^s<8%+qXnC(r4B<1>N~@(Q>0mMVj|7W}Q(XvQbYl_Ctn*Tb!R6UA#}#x$ax z93(;D4QOO+rlj5LgXt7TC@ay zv^1%hFa0t=WG(y$$w`^R-&9lL-#Kwgj$Aa5{ zZIs5bM9U!rM{!I`?TWqGg9Pc4%XM_Zc6`QMfky@@BYK;U8W2X9y2po@1(eGYmE#g2 z*sp}Fls4oqu%W<5G!#fANc&SrvBU~&bjUeGI;G=2Rm((kNVc1>KEZQ}#-z-F1gb+M zNgHcEL!6Yy!ak@ng~an7MIi^Kv4?!vhpzG}cF4rNqp_jlDaFFTL@7!}BuGWPwFDGL z#Jon!WUHhcH>KP~B56u_s;9z0vUrh7f=Ncv14|lgsD1KE+Y-WQEYHjTNXzGh&gOhA zvz)(F(KmY)`imR02qk+KErJWJ^C)OZ()?V!Wpl>QDdwbjgzNAKs|bBc#v+ zb(NIzv%p}`U{Sf_Vqrj83T#-b zxVloV5Ss=BMkv5lq-;@KOjC7DHR@#34-AYq?VICa%E((6d9*hpl)O9L)2n=_^W;5S zA&%xevU??1YaGWx70N}`$U|LANBxbl6fTK<)B^p^cg%}PrPSN2!~yL$+1u0~NRm$l z)qFI;{S3^OJJm`EAALN-`KmrF^*Xk+zzTX44RF_6#nsIJ)YZ<^gYE+hSi49+ep{lW@@YXBkG-d5OqTtX)34%(ftjw&eTHRT8byuW}*R(wX zlwvn~CBcJ@D13~N+MuF-^;h~lBY}lR)wsN>{7ZofM~1yjhh12R4P3&#Pvf#!@uaf7 z#RHG>PwDbRj=O>yoQ#7pM#%U*ljT&A3)MR)6@&PQmnEJo*vA|rSIuy~_p=I58vq+H ztkt!F%hcI*<=LLq%vvzp9`mnm7}~y~2|&C?roAwPB;ISf%%4%-I>E>7~3$=&s`Xlm0xsBRRo2eD2X0@Oh{g0POPOh%cNDLWXaprOr$zGJY?56^oHMM zAaDSSURZ{6DwCNjh5mb=!DZkF4%WfVtnjrg%X(JIG6rp6FihHpb8-eNUc(ihwdxgI zkIaPsYwH1wJ8m8e|7G_%| zW+RniwmUH~ISVajPhUvlg+#^XT>{SHzAz@^GB#s)Rohcx!Wh*a%NP<=Z}nV^d8S&k*E6KIDfrm}+9Kv+$~tC0Ng-t(>5I;2z9Gz{W@;Od zr{>kHpT6d7&P8oLf~$_#JnV$9a3hbEi#nr|bOZ#2kz?Eejz$JgfyUp2Z9#daM(YeO zw+-Zc7R!AufI>#p;96k*jpqlRN|0L}mmpD&?O5BRWO*#9@zSW!WvDBGgzBMaRtcDq zlMy5-VNgi44Pl0OLWHz9Al&0Wkcvx;fa;i#2v9?Y<{f6ARcQfp?bnt~R9oeuy9OQ3 zxml?Rbd1OCG3NiWcw9uO~y;{NvjwYWz$#$%l58`E(}5%aMA53(9Nmi zW3SB~inY7exPvdGcJToNZlgO&W3cVpZlGdPrJ=QEF&5jY9_}5_ z#ZxIq&vLqc5*bvav`JEBzL9TPREwu1dL&^ zl5;T;z1L|u!LJO@ZhWXdjc2#6&!yIq#XzmOzHj!F&%*6z*E6&JrYP9@@4FQ6`yK2^ zPH?E18fiUjkqxO6i;;!@aRdt2*9+f55e@T(FqMw6$2i;UUH0_*iH2cr@fRm`7uRXt z4PzPyO&d?9R@dDZP+Kryz=ds4 z?P8vFF(&S*-fk?nh0pw9JOPyg+Srz$@RAkaLN9Qpgvxck+kv(V{SCrBHun%<*)ped z5bQ=H#&6QeXZ?oC_ZFSRO;k2VF2F|jh=wkM_G89v`41_R8#k z=XOQf62{r2Yi&EI9a<8n;%%UY87}pYFZCtgT~&XjYcKgzYM@t_NM|p)9=rA3)xhh1 zc~i)BT`zKO@YS6Ep4KL3S2pc#UCxFmZ{k=7YMzc^>z4T|r%q7#?zjW(WbzRw7zHrz za3KM&Gj-W^zn#yaN@?6nH}A@iqpizhcf!nDB79~-wetd|_xN6Oe7EDwqr8C)jXMK) zk5zCN8>V{oCo*s*8Zgs2=ljZ5!a{=Na8ANPTkL_Sns{*1W2>4gu7>{0c$OIY-#%%O z*Zcu1`gScal4tc*ictnq`OdHPRFj06hxyg7c_PO>D96A2LkC=_a?>An{2PY_8wXp4 z>X@(fqi1%chk4JMb_UA{Pq~JV^#Q~;^F1filmSLPe8E0(?DLri0M+6&D%P57VBZW$nIJ&XC2L{!{!jzDO4tFp-ibVrMs5x zUc!tib0*DVCKUg*2s>q|KaO-oovQ(+rflaBYr5I}TMebHmiioofa#RjY=p9xkkS zvEspK{Yrc$86q-9MJ^|VWRTIqG&;J_2?T01Y0Z47nE2p918XHoUcZRlVFQKPwISe6 zpnLZL0}Bw0|3IPIHgN{KiN~-ZWI2x;&M`PR-o#3k(4yC*cAb2?@9y3M2nY{;JbCd2 zdY}KEejoeZ7b+n4?%w!(;2b94UqN3z@c#RAKVZQLLqws4fz9C{!w(BOmqmd&Oo7m5 zm{3?DLuJ{LVMoF^lHG8ramEG~M36WI3?G&df(Y4_p@NIk6_SS?GZKQw8#m%8;Xo#i z=!a>SOxW2%1Q~-AQ5wOMQ90Q}#w0Qhg=Gs{CsDZ)OD@rrrIty$gHj`bVWo&LLQTWe zHA)zXn>nl;TiW9k^E z|CH%+OIiP-x(Y34zj8K^r;0{e+N$Oeu)ePne(x5tPA=}ah?~2e=jF27u7y5x42j+z(ijGVP5_J|y;ZcH zDyfS&Y;a}@58QCu5t}_rEEC6bamM##M33?4R6NawVl)3yT8w$Ab)F}Wtfk2ycWq}Z zMaO)!(SW9PHqe9uErt!cGTpmk*q8?raWEs2M1+MMKe2~SU^0`M zbOjz^VTezrB9x*Wr6x<6iBlHQlBmQVGtAgXSMGutX#pTDvqH-O8p8_)#2K8}(GGSj zkb!a`pD^9xi~OZAY%$`6kzl6GvY{=SrJ{irM;M>IQ4TpP3|8y>=FPta|L8x&8XOQ( z!y0XtP>49hA+OZa!*eRJ20>Vj5RDUqN8}e?}8zu z=CC|tITD@@hAc|R+aSg}W`aW*-z$nm!10dby)j7=S%;GlN*RcBvSZK79xwgaM=Es* zdn$wAoNmFUlj1a|WO<}Idg@c2{xqmU#othiYSg1jlX+&8rP7{e7pFqiE@;W+E_qp} ziWH+x#ndWSyBaA8s$(dxoXkW%3et)o&l=ApT}400(7EjkqSnMFxcFtlg}ShWFd7`h zS`@>))~a#qgcja-*q#W$Q(N7Ur#v%IS4;FFvXRAsVzUXKxaJCU{|CCl7drcfhY5^| zRb-?tvM9qX+Uub+cq?ODE4Wuw%t#kIQb^BgJmT5Iwy`~)M!|+hla8g6ve4sJqGzY3 z(X;2{OA78_n}=TTteskI@#O`Fe9Bhky?Rj$1{ zVdeU1&lX@7uoot$MeQ}$a86FJ>{McR-kDhW9FBw=_Jm}Oh{Ozo7_r;Btb)4e*)41q z4QU{R*yL4)9~pEs8Z0dbIgF~_0%DA{BUozTzzACi4x^^^FE2*iGoJzF=Vq7{!AKkQ zp9$TJWmF^3?~txX8ja{jOQs?k!!CAb8Q)7|I@5@vZ&hy=)i;A$&7yX*e?Ph;HI)?} z9d)Cr)8sQ_JmeLiVJ(DD%i0gmxUP2H^@Sr$uD)@TuhZlLH7BgO^9+`-B<_ug>G@a) z1@QuIfNYChj4rs+n1-%VZH_++vp{q($jNLgkgR(VFVm6BPBLSRip&XQXr#*Wo-&rT z+)O$yg(FWw_m}rqI)_xU6lRv*acf!JkP*^k|BGNE|D{n|wdyox0aHm2TJ(<+yxEsk)^X6Ou2f86h=X$!fOQ4)o$rdx zmes2kFvN>xaYagYUkz#zmxDZvr29%jS+F zsy~&|Fgu>ipCB%pD_`(YSE+hfV(tSk59d~QG%1ZocWk@t^V9R2jitx?=+gxB2FR%6tjB-#zbhuX5h^uNU^cRObzD|I1JxKl#W1#n&Jov$MJ0VuBy(>P2sq zGBtAKX;r%X^wV?+0|>))oBB2BE{3tEbL(7J_SShr0I)}W=@N}G3M1-kX!GdpI|cH&>9$8VKm^~hFNuDMQ9AZ>SA$1_g zXkN;w1n~h<*1(ygMIWjy9;LBh>sc4;wVpJ{;0*d-1-?v?{G8}zS1$Zs@A;k&`XI|K zf$;f)bNQZ|8K2D|VG=6eTQHv@MI46InVHO94T_#ayk2!_pMj~GHvkV($Xcs3(Y1+T zen49bkza1)&S>}m*+tZ@P}ushpA8un|Ft;QYRKRHsaUPlSc;utgl(JtF^t*tUk*y2 z1xX3d!4fji7D+K60ji!0GU9X@AmC9~7otfark*C+pwC5~9qkX81ssJ8oLq3=2U^L< zB%Dim1zgCS^f)1(Ov5>};Ke!LzlEF>aw6*8;2O1>3QA%%ghNOGVKM&TF$SSIV1mop zSIRM=5;l7NEVE-u1oU%YdQ3O(F&E)*~7qzU^X} z7-T16p9LxvEM*`mT3RukA}X@k{~<|5@y*--{YBqZ!!IyeC@SQbwH1S{oHRBcd(9v~(qJ`822KC zUhdfb6@&ybU<6vq_Z42juf!!2aP zel{jAPU5{$WP)yt_goZ1@Q#p3=VqqarG@5Qj36vlsA$>+E|q3Y(k5!M)oS_-O_E%_ zomxV{Qb&Pk$hm@TA|p9@rK^-pZqAo7f)x)2Cr=7zj%FxWQKK%ZQF1OM#zoo{9tlU$ z;(;blL`vjeUf(NdMs`XO_~D^Ha;L9&=XWw)9FFHXqGuSvj$E=({{7g%DA>bfqjfbB zF{B`gIv_^qXI1(R|4LDUKm@5&7N&8ssmny*kSgecTIOY9=7W}zW^N=$wpmZ0*;iHQ z67CF0e8cTYSAbsNMm6Q)v1GiQ-bc~flvUp+ej&iAW@@&Ti^|;jfhW{a7;l6?AON3z z5h0E0=&WX1kJhTKIs*mik5firr0JWVHVI&wiQnCuZ2fAJ24*E%V$s!FSJLG;)qsq_ zrCDaFT5jpqu_bwushP44{i$J@YDOCID!)C1#ZAoh(cYUL9;?Brx!&TD7-)g^sZtJU zpbjcVf?{SmsAw&Ogo;{6CTeqareO5wn0$>K@q(Z>&jOa{H^QDkLaeb;9K9(Jsivr^ zz2=856*1P&{~xTKfPgFsgzQ;j%Nno`%EcVil`36NZgU) z?8YqxLM32245Yd`Ym1v%0g(tb zEk10e|FHCcwsHY~o@*9`pI%;wle*wF`kYmQ+ql2P5- zj`?nenMEU|c&4MaqJ~6;?-2uOE`!_(ZZu_TE{$$Z$!>(#7eI(mL36HFhUBQvTBs>hO15?ui=!TsyYs{ z$YZLiUzfJ!;b`XqZW%icZNk8A2kTVWzV6q?DUvi~`0i@=eD9u8uO~(!+*WD<+poUz z>l*s!6Nm1OPAC=ok2LupEeORq)bIEPun>c>0ehU{W>grPQQjr27h|L(T`uNg!%)C6 z|2AL}=h879_moIPR&M6#Z9;*I@Mzg!vz;G1>4z7zA_IJt}Dwd z^BVCa!vo0m?Daxm+9<;^#R95kvW%Vver)n4kLT`42{owR0dnmX+JY500#hDw|C3-9 z-Vq?s_3$&eCI;&AFQciD(roIk0+7vf;LeseH`GcnUN`WiDjFS9bw>Yd0=`@AYL z?W;vGF+W$cP#g8QvF2fBbMI)M2V1if1DS*buKZLTs^Cv7q_g_4vrcohrUe4!#&b>? zwZ-1z4tMW3=O7Cb>!)5E!}_c}kBl2Rtg40HiJF$MrF9q*ORhC^0@Ps%Pjojb;v=te z<35CuG}D6h^GAyV$5A1ja&uF&8*R(BW=HjJclK2SAtYF_|7n-DDV%I; ze=u&7^vs4?o%Pr~2PP$s@}1)I*}fiK-=|z}H%QU7eC}2V!=Mdctq;}V-L-N>C#wOX zH|_2z^Kx(l`s~-H-pw^%XBf~o>s{`CVIrq@*RnM(=Qn0LHJJ7H65Huzm$;CK2!S8C|A6PEfw#GUC%9-W zIE=eEVXQcdOYO&&_k*7*jqQPJpPesoSB8t$h_896bhsGf_ihKZfYw7gq;aDc^`i4D z{$SGDr~!an0?qi~kLYnu1E(agc1RFWooA5hGB-b`H0x5~^g8ASlb(RhbxR=meLi55 zcejdit*;}Q08GnzhaPpWa_DVs#LhSOTA{v)`BIO0np3m*{@Y8x`Dg1`9Sp+3aC@A~ zd5+Nebiy={+hs`+d0cS+myfX{i+ zpB=cnXVM$JnvITY95+ z_#y?oMF9h`2H@GReZDJ1!NaJ&G=QsO8YZawj7^jnIQZW?ysg%S)RQ}^Yk1T94l!Sp z8LK3@a{MbF_Rev1CdMox*R_$$u53AHIcOf@^*Y6Q zH?ht?yKoWx81?)whxXDdJ=A7<)OtH;FPXRl_h_Ry|8DLz85-@nBR{r0__3&hiyZ>k z4@E8z5!u5%zN7tyYwV)yg1-xVgI@Y<6Dr*g^B?B@eOT7sC-l?)L2n+TSQ>uCOQ+j$ z_Ov&CCp!1c2J3qRra_%59TsVjs1oEzXa@-fyCn=6F`Bw~iKHc}RnJ*Gfvy@gH0aNnICI*>>GP=3 zr9mlGN`#2&8>?vWOfC9Ts#ZddB7yyy^~)11X|9@evbG2jw@KJ;kz4j`-7t64j?J@o zQD3iOAw?BDxNB5q>M{jdvY7E=A2eX37|~$^|78i69b{&(nezhA2|#}iExJJ&OmZ6c z7$YlLu1AMFB_p@ZHCT1xN?qDYN+|E$y_xzJ9t(Kz;l+t3---0*a^=T|pQE=<7;@;= zpJV5AT^wRYfeP_`y8?A%#|TDuRgXeuLNbvh zgjj4T!3z&eFgcqDGcK%9hzg29nQU~bMxS;ZuqwTHOfr#Go(gLUwAdgkFSz1Tg39r* ztkN#L^11^_zrZvy$HKxotdmFVD^HF*|47)u%{SqUQ_eXLaI;Q2Q8>W{Lzuya8A}qw zCY*43`NYsn{?qNZ>li(4I|L!s?l=jZi}X0@sFQTjrn1x2mF^A&^RCFqz;BB58bB33 z_E?<&vj;AqKvwxQ00O`K93!KStNiusluat($Q9>i(qWg!U(s*u}7qI(nXX%z#_6nz_e?! zI5LHmvPUDY(u=Nx!Qet+Egw_uhbte>l3^|d{xVEq0Uq$M+dwtP%yPF4^Rh zIZj#Sk!5)ll$T+)v{9F5t{FiC|6l2}Uv8E0wBMX{=FYoecRg6mGEyZ?>Gmv8wdtp! z&P>f)t3WuF|LpKG*zhX0xUb%h)wdO8nRS*yCd=kAuk{^Qj$QWIX|LUO+F6O&W|)|0G)o83|H8q@sLeM_&9&Ujt3kyLmlJNMY-VR(#eau4#~i z0h*GPP&Wh~-~eZF#<1p89y_GIShoZHxNd&xVJx+ij7HU8LQap7c5MsSXY;e4$i; z^OYf0Fi2}`Tm0s?APPZ!*+k%M>|}k~m^J$p zK5cH3o81IoHob|>Y*N8I{r$AQ3PZH06f z@v6g#GLG#T{4|Im0hzCCDPfVWQzfOwMmYzr5sq9*k~7bTm7+;$lN|iyE=>u_(}h$m z=tQL|W4KB(mh_b(Z7EA(YSOHsX;8ytg)@Yio0M8{cS@8d7MCMYW-6o`{(`1YfjTpt zBJnkQaHXMOK|}9lzSjM#C<_YgIu&(K3l=M_8ug@OISN^XxfG<8 zwdG~oQoc`i)RAXI>E_H@R?UvK45jVtue2f?+UPWxMf3zQk9x@~@)W7Go$aPL)zhMk zgQ&xc?N5J~F?QNTm!vAeaaE99WV{f@odZ_pAJzLH ze5Oqra*QwH;0r+aDXV92)eZyro7Nj=E|s;-ZGTVONmL&AqYd71E`v;2b#6Gr7KSbB z>P%rAdw7HB8?lII^Gk=Kn9wP$1L95ECjw>HoT^%piQ^t=L zl8|P-z*;Nmp+YJ!z+2ho$;O%E!gesdB^KH>tW(d+ZYOw54sDf=8r#J3#Wtip3|Gut zwXUT&t_z((oHrVyIq$Y$Xd&)Q?VRYhH8-)I-3Fy|ILG@oGQZz#7g|IJwfZ)x)1e-9 zQM3GFuZE+Q4K6Qa=@G7uTR5Z>+i(Z)cgC92(O@bZLyeQP;~{Ns-9JujV~@P#N%yD=_J|IZx~ zQ}1#rua5OfKOKY{W^CIlwpO56h1(AsbPWU$-Nj{R~( zfTm*_(unaK?*viq2SZQ#7^g8@IVY8%wlWT;?4q9=aVGk?o40}GjIdl&;vX01NUtN>5vJBu`?GS6w@q{Y;$_pCafzb@n8a1yH zfsoIZ5fd{}6E)EhxX}LY@Ciqe6p67EUC#Vek!FI5o)qibvT>Dg|E&PT&MMo<56(jl{Jq%f@~E$u7-tlW4-~N|FR=x5D*dX4>M9N?J_pG zv9?T%0ZDQ#g+W}5QZ_9z-6ql{0h8%Aan1Ju8=4Pkgc|H z_{=Q<7U>*`i#e@JA+NG3y>lTqaPB}eF_kR(@Q^Q0^Vw1pHj#=ofr%Pbvpso}E$z`g zg|aUFaXWMqX6kb#^)uyA6FB|y5(V=>Kd1E6Mkq9_Ag>#>eQ59OBxsT< zVG$SLT++-zxl0&5Gdx8!Ldg?Cjd3{rO(NNoK6{hIQ{NmE6gP@Q7oLfUCNms2x&|1wG$Q%Z|Vye23*zear+=K!f} zH_J~$C2GGs0w|$k>$81oz4=GVc))ZSerk zMF+A*f=)CJ-P8km(M_qeJex00AC*q+bUg+1Pwmr3^wg&KG|-}9`))H)g%wW+bw7F3 zfrRexh!jv2wJ8}@JsZ_glXK+g@ATwPF)I~C-&6_IDzEnO$u1PVPUFl(6^6(ZA08ta zJMJAzlte9KGT3w&oAW`dR95BGQEPQknU&DU|8ljGgc}s^R|#=g@AO4SO;d-p8^Wa~ z_jOr|bU-6AVx#r`@Q=O-Y)(TlW3yFT=QTnPQLzND6@_XMed=6y(g?p(UimL&p{r$E zl|Ep$Oh1ZRNIB z=r(TWc0|XvW9{``uQo}c6=J7WYRe5ibK__CRB(+IRgczS>v0E}tY}wh8sc&;3G!AG z7f}P3Y7KN^t?p~Zz+&g9g!&Yy_|2A)(^Dg8muO#m@$tfxx$7CG_^mx%1S+#E8 zHD+~pcWt*jXV!H0mOInbZ>tt?3s(FHxA25k6Y!H9A9ryXmr#S%!o2ZNGxvJA&UlO0 zYNZu1H5PQ|h`PMCbb)tG+3;%HFV;YcjvbZicWbwI@0V6^Sbu-_bmR1YE0%-*R)dSw zJ*z=Jb0^Sf^n#t&f_au$k2NSE{}<0>C^sJw#uyYY85o15xK@c5JwI_;>GesXf+hn4 zIg_((%Xfd#ca8gREua(mE{BDWb4i+lZDp2aqpN=HH+S(jhj%!Q{Wp;L7GF0wh>uqs zi`Y+(*od)raa+_n8#iquc#FZ3iXYJ%u2?UXxREJYbG!JHzc?P{kr7@YQW?~YUlowq zSdGpSCwse*nm6z}NJo~ngg*cke zujc$UJ{g&&vp6LS`iTkpp~0k-PcotT`GQ##h$lLf!PuKeZ$s$8g5m2 zyG+q>U58XXqLxuQrEi&@Rhp&6+Me+_rgwIx?Q+Bf8mARmi*>r0ml>Ml`eK}xnuCFr zfV!x)nRAmm`G%LN6-GfL397YnqsO|ct(umND^@ZKUjXP)fG~#9m4094t8cfY1(}uc zxU5~8^zvDzk?l7z|F`gx`CkM2;(D5=cY2_cc1YtIBk;O%A$qU%d2=QDuPODVp^lUh zDJIJJjHkM@y?U`Vd%4yP7tM*XFMCy*8?4Eie{I;aTiUDxJGXC}ulbOe-`b{a>Zc2K zKwX=zYg?fh8c2QPn$=saHTnG_+OKsxuyvVyd%KhiLQ=nWxHJ2>_q(|JTW+6@ow&NP za~M{fo4+|bx_{TW75uXY`IlETyA`?*x%+V0TITe6wOKou$@`hh8!)6nT>Pk6*IT8mtDM$CNfJM8{WiyeB=-QaqaBrH)4V~GD+Sip*tA+j9EZOot9Lp)a(p7x7^L*YGfiQy< zWiK_(TRneEoz!6#)d}3ytDDY=n~%Lc)*am0y%xLEn$f#4&24(uMiNw>-r${nE|-*`u`6FJ#b z$$8V)rQA@f_!)>Iny-A>nO*1A-phF&kadOjsz}{R*XfPh=+oTpk3Q*_Ug;UW@9&)4 z0blCTn$N4=$ob{My|2Q1ed`Ol>*?LYCA;5OUPCiv#2wh?j}%EYALrA4)>&N0vkT#2 z|J)&69q%9B)FD9cVZG^9{o-34;{|`}slNIU|J~dDtr?%@gWca9ALSAK$t^d#o0Q5$ z_wvua<2PU6`?lHVd!?QKy6ylLe%tbc-tUS2^qpJv`M%Eo{`#Lj+*$wmHD2~3yyMGj zP;g)IeSvv-f96RW_#fZvbu{vQpToUDMBbswdmr=B9w5XB9B8Frm4gNmDr_=E$&-gp zh&VJtv0{*k7c&CU!Lg&q93N-Y7#Wg=$de^us6@Haq|1V!h-I4@j1(P>Elr5n>4AgK z3_vSL5K45YQ3@A=B7NZCXoIH`0_>Zrp{LcHGE|@d@gYRls$55e9ZOcJ*|TMT|CU&r zR;vrQE#hXKOP6I`kV)|3l(-S2Ux+E6(4!OYEuDGr>eorFYPJ1$cIVb9dB4q@_cV0y;>jzgYdYrgm*2~uUzsv} zIQAdkRI?v48N^)}Cg4H|HN?7JU)u^A_v z;nn%wcy`Vi9+gpA!;U z=xL`x28rP`*IY9wavu69<5xJgT9m6QPK6I!A=0{Pjylo^E3UlWYHN>heF|i#d=a_e zrA9*P*s_W->!`Dd7JBHI)UI}AmWY5!+O?>urfs!`hHIvrbm}>-n{Aeh?z(U0iD$d& zPAji*&8>3HI)S276T9=C8*Lj-LMyPcks@m_rko~B?1#d>C}OP-|3l1hU$X@R&4#pX z8u5rFzMA5$5hS(ZtQ>a?vacuiin7Wu0y{CN#VTy9fixE!;J{4=yy(B;(%YFY+SOAr@`SDp+{ic=E`wG9C#Gse2O{`1a_1BO;t~=Sh_r5sV zz#}far)*qeOWefF(wW00=Y99{9rG>t^i1hZIN*iH$o$%g|0{m%_cJ#xKEX@kZhPmP zy=IzeoBy2om!nTk`scNaUi$5<6B>V-@0xye?(~73?B21CZ*Zr6_;X#O76?Gc?ap@x z1K;mbwXp7Ok9!guUQ76+3uS01d8o1*^qhwl>0R%HD8!zwc*H^tTF`qEtl8KcNW=1( zPl4z|pElMPj%2hCOx)9b|+gjH6IY_%Z5P4+9&>BObH2LO!+)jb(&k z48b_VXOXX2Tf~DRiP)xI>~MWP{MICL5yZ!JBRbYGh1I*G9sB6V z!{M@)wxp#O4|&5yT9b&5+OV3P z1DZCunN3Izk#qF?BsoQjPIiKFohX&3I7u0cQxcGXAPi+aL&`;2GESd^eBm=i$WxaL zw5LE7XhA1%P-HIDsCsm$QX%ThyX+L26g`Yo|EGG-i_$b(>Vu>wBQq-eP;IPL` zs$vbh)W$aTsbZXjwDR+!RG zw4}7>T?>R)s7Cg-WP~hW1*^*z{&lc~l@&r6K-}Us<+zD$C}SNf)PL&MwiS);Y)P9% zS-dZtmh)&hLkrfks#dNfh3!e9Hq!EfwshIR+Mm3^q4Q#P4e+IIO=U~l+D48CYR7xp?#30p z>MbBGWIEpx+w`Uj#;<}A%-{b0x5fPp@PLP_+-5TM#w=cOPH{@r20K`Oo{jJ|H7n!` zd-%IQJ~D@sm`d^L&%8_iFqO{J+HtgJlP1oxUAK&4`|5SCPu(wY#Z2a*%2>e5o$(%P zT;?3>c*i`htYzm+XQFAf!bGOs|2rn896+ zDnGwWX}?sSHM1p6 zSyk8Bx{qx2Ay|EyQdGFk$5yhf!8;yx_gQs>Ms$1|U9ogj85*LXce=fuY&f5K(mHlC zv~B#*X;=H;p=R@_o9$eurdW`1mUEOKyxBesdBR${HLdmiUU&a`-B5RvbO8?L#`iq!XeW5t%?@gUKi=m* zj|kAekm`aJJ??gAOx?|02RiTje@dTcf&Siiz$3ZjB)5#c&QW++?D_OT?>gH}4R**! zp64h#=mHXe=Hj2-@@mI?=KbC6&eL`DvR(}IqmTR4JN@mf54k9r1AEkGS@!p;J#_TW zbuR1v-@H$M@}uqhG5`K5(nA38=Wl$uY+po!+NXe}Wq=GwYq5h4<$y2X z5L08Qc#sA`RcL`E7*Om7g?xB~&@*`;=!YVxeCWr2N|J>Ghz5JG2VaPNVd#ir$Zuz; z9BSx#lemVJ7<+;jY_!*dzyXAGSci5vieGnu?bnA%s4{>Uh^hE~BPfMbXo&gb3IAtt zRJDSYq=lOki89!Wz8HkMqlso{R+gB9-)9^d@`lQ&HQ^$GLMV!NsD64VbFX-cA;>GM z$coo!jn$Zb|3jEh;1+Ja@Cnpsap9<4w#bW*sE*FqMKD+>TZ4wh=zWmLK#Xt;F6D;a zpbR7Cf+q-#q)3FZNR7C}jeewo*{F@JD0WkrhbE|bn^$`0NOYg~P4Lr&^~jFEC~^Nd zjKo-p*j z+jIzXHjxxrl1BN99I1OGnTagNl;ZV{NErx(r;l;?8~fIgoA;1-7?V3`kgix`IBAm% z*^sd4jS%URKgohZiIEk#QR&!nQR$Cd2ZJu@lu(&~^tg@!^MELM4f)2B&zO|~S&Cfg zDPI|d|AM$GVY!WCd5Sz~g>Ha_0?37m2q1O$3zqPf8YyRWxplV_iZi&BNSBwFc!r{R zl!wNbRQZ=niIo6Zh|*Y)hsl^ZcRh=lmXn41Ql>hjWu^DDwxsW=^Pz!aN*5;kNS)Prjm1k)f!5Nta zf{VGx4VcNE&gYu+sd&;Emk2nT_Zgj-$e;V!j$SBiDY;(2P#b~=n&`=!rr4c>IG%_V zp2F2>Ojv*5>7eAvpk+yzXvrnws2XQbnbHS(@JX4ECYbd(fgjeAkYk$1d6!V;qQ3|U z|B7~Ry@8!3nxGCTo8KvxJi3I#N1@5}pgg*xDPW%am!2hIB&;@~#@UvY6iFaDpDH?~ z0D7XZ)SvmeQhNEK{)w6jbTL&XC*_Dl%q32nMMCn8F z**LWLo^ltZe9D)q?x6NYN)S@pTf|ID%W1T2&uT5qmv4! z7y70~g{heur(~(CcsK@5s)ct7qIpJP#`>GinyRXLs?dm^z-El6$*Qdim(&^z|0t;* z>|mWwVx`g8soOcMyy~mJ8lg_erbD`>YFe%k_o>n1CB^s^N zx}x*8F0k5^t`m>1x1|fjt;5D%oZ+Y1Nv=A2t`y6z?Fy{H+Mw@BtU@B0;%KM#TCbyO zk;&?>5Nons7qHm6vWT{T+1jsmkn8mK6HvQitX1xrrHsGoBPsNVJpGrMTSR!wCJn>jm^J8Q9M zOJf+Du|#XG@H#A`Dvl(2mQG8rkSVBCJGE6Sfz8Q8n+ct2sInU=vo!lB|0`KOotdj; zyQ^$Nqz&4$6&ks13ba8Bx$U~MM(c2t#)#@8w;?OC_-eOftGA?kvx#f9#)i7C%BrAA zKKNL;>F^9z`L8!hwvu|g75lhCE4f12yMD8_m)o|C%c+{%DV+PUTc`z}dj@sNtgrdJ zFx#Ss1VO9Ivem1)TU)w}LY?bn4Our`I2);MTeOvGxx8y_z#F`n`nuCit@`*@?WnkKtFyU#wtm>a|Gw+Ew^Cqxc+OcB8;M!0H$HukKlX2D{QtcEW;NI!!f+T zo2qycaCGXj#NVOvGI4i$}b~{VTXjK_JL zw#miES?s_OoM3K4i#V*bo>0L!alB8u!#$kALOjcOOUsH3t!li+*-OcuDWHYRj0B^? zi_6KJ?8ih}xey@AqfE*Qj5w^|NU03Vc>y8s3%~Z;lR<3D|JkX@kS3Q3o6AWIu;NTZ zGJ46eTM5u2%ym4)08`9Ed(3*QbIQ!j7;4JG1I^JKpMa-oS@rm#$`O0X$;OE z%*$IhMfy0P-N2f)Oi{z^&QuksVWz4E*zD*d)l&jNy?9=qz%n{gvgZ|UC*Y* z*7+((m%Rz{`_Gzf*V!Dz1s&R-9g1SBy_Mj^Tzv^NRM?x`$rRn(V_ip$ozW9byhP2s zlTF#Nj04r(yqNvfbX~fN8^uOK5`JCY!7bJ1JyRO2EWS*m&f?ug_}GaJ+dnO|(e2#R zO}wAy#ceIdl)~L}i_JC7+XQ~nC}Gv<4cd;Xx}9BBfdI$DraXZukjvf7tDTeb+}iXV z&(fXW-59FZ-PYaxJ-40P0$$R+UErr%K?suI|NDF13NFbo-pvf#u&HFy;>*GouG-HH z755#~e{93F{n6Du1bhMDtQ>RE)te0t)hSNUyKK@kj^1qym>WEU3LU1H?A2##)72Q; z8;;CR@#7r6*NM;k_22pnGD}v zesjW$jXye}7LCkg4&?18ELL*VNF4=ij^=tQ&2aA2%AM(RzUg>A<-o1ieEvHF!M)yU zohXgkscq)JHDib#<}${yS zppcItX6k}I=wYquVjk_r1?wFytg{~C|NX767ShpLIO4k=sZHHIq2AP;{@zzU>;`z} z3;5b z%?Ugx(=){D@s49)4wkNc@%PT?8m$Fsp3j*n1V#=ahH$~&F7Re)@|upT$vxgIo}Av? zy6vp+3*N|_elx~hcm*2qIsU-aZt)oZ^8%Lfv0kL+`RMuVLy|t}6-@H09Pm4@V!93A zbuQ(O+)4VB@Z}!NHQ&^fV99^3HIX*)o zAM($9^p#EbNc+mgdFc{=@&ixsQ6KnJFY|(r^*X=xhp*+#4$-JSg7{87lYjR0&gx89 z`91&k6u;jdpY%X7!P$=Y6`tdIPx{Bt@Tc#=roa4_0{HNbFm}-TjLHeIl=sXXdD1RN zw%`5UUtGEm?YxhLy>Q*1Pxl}$;_5&A=#TxOAN|PR@;4f=d3~TRulmh?>dF7Qt`EgC z)A-rHfn~1KYOnTc|NXV!0O60{E8`>N&)?+fd<*)1LkL{t?{&9Z0KDqbeP57vP zm$&Tv-@OoK2?uIr&Q=x~(a-ZWndsYJ`)aTDl&|(`AO5pHf?6`!<$uVLjqOkx@^N1L zu@C>gPWbJaMV~(IrGJMd{O4jyOAGqt!1evNU;DR@`)jZEYM=WWUgl+#y#tVC&GIig z(>-l>Pg~QrZQHi(X}hOw+qP}nwr$({_IJK>zW2ZHo)MeP-}vnn%x`CC~#Vnx(m z$^CY_NsuxHJzm}SiT!>^GmOV`CKNGw+4Y171#o7aeCD|S#?r4qUuxc&rz?`BF~#z z>%lVap*BJ6mpk?UQvWM{(CjZ9$Go_oNq@HgGSLmqN*Rv_*L3jbo@lBwa3ZY$u<;ND znC7{Bw($w8(hqxmczIkfw;I^`3c%*umx(V1@3B8y(DylY%S+`xOU}0(sxX!}?^iq4 zuezSB38FP8pB=OT{gp@0Q2=+`mAIZ;fbt}8>$I{1L!ea2&yNp_uE(vaxt*-vXBK~1 z7>_>VPhpMK`h3p>`J%G8^NCU?c?596LwdKD`95(8&d@pyVrUd8KWEPM*7WD?i-qZm zorjB4WcJ5AWeF1ygueHhf9=sp$ zQIvoPnyt``3I>mGOmlxbOJU&ph&gq4a$?qkL zQBlhVg+pJ74QH_N7d5|zaZ5K04e!l655K6n%&NQAS(?6jSKzta!+B_ea`5_69Knmr z-Fv>MTcdf2!To&RAV}Bv3*`8>f?qQC?fw_1I;FNaZiv4_Kvt)h zOr%cPA2%ZF11w}Y@rx9iE3V#_zx+jyN>y#FR&1F0U*LDX*iLp9Og)F&_;_5sPwTuY zbRA81J-rZtUT9}%Rb=CAKBBfQf9cX9ru>zLU%I%!f>RR9n-{#$T7eaR+@~fB{#@@- zZrEsIqO?_n$9*^vo?hVJh`9$X> zPtnbvpgrklf4DbPAT*mmcx%?lgVOI77gW#`)##O@dF0h#(yXY&&L;j?o!@~cbf zudv}K_{~^(d@q)_oc`=4ovs0$0@2kO=diBqBFc}-CbsG?{Z_$dV7#(8xxq*Ycq*#1PVy*J<8^# zQrdL~mjO-n>5FK<7ge0lx4=xdxMiPdG8^Vr%U{uM!hAee-A>bd{n_qlrs|J=H^7$) zz9jR*5Vt%G`xDmJXE62UOZlMdg{St)4%cmUBJL6E=qsZxr=A$Qp44$40w*9ZefPY5+m%%TX z1=;_-|2G`7zhGwnwd`x4Di5>p3+BI?{R`{=$JBpa|NmDw+b#TWl`lO12hP7Ge_sJ# zK>xk;f9U)z`9C$kKz|AUi~Jh;E2O`I|Bti(Ir!VfKMVd^_P3pXu3s3w#{UBFx8h$a z{{Z`!$1nL`Q~weEZS!x*|CE0r|FZLM7k@kQ=KPXa68_8SztsQd`hVYl%l`im_P3A! zxcQ5SUoijM?7u?&H)a2ccIqGQ{*nA`=bx|t!{C?uSzhw58b*J0KBRy0_RGs({r{E5 z|5*NK@E@E1W$}Ne{((2_{eRgM|IhR=Jp4DjmmeVX-#)%ke*gCEn-DjHfk1_nkZW|m)AtV|57Y;0_-tZeL@94t&6oE#inTpV0HoZP(J zTwL5dd^}%`SAds~pO;UNkB6I2K$!ol2?`7FatZSD2nq@b3X2Mfe06bQK>-nwuc3sf zn538>pO~~QIL{Qkd{=C zl~qtwQBYJ>Qjk|tR#8$`Q&Q1TmXlIeR#s7vQ&CY;)6h{1jZl}9R992c;AYiOmeEYlGjmIG3v2U&Vhep$3v&w#OKVFj z8!Hn-D|1sTYg?=0GHX3GYhxW78(W*-_BOT-Hf2@6O?B>OPjEDaqT>>V7O z9BLXIt&JV6%^a;Q9GzU9oL!wGj9wkMnOIH#;BS0N<`YKMzMgKVN?zFaLl*|A62i58GgGx8RWQP-Uqw zUytwruki5jh{4gw$e8FL@0bw3SRd=ynAq6(#Q4ON#Bl$_=8ojZz|_>VG%dOG82^mS ztjwgCtUs1nIR&}-MR{>v`MG)dt2+f1H6>*gW!Xs;+lQ5j9+j0O*h3p!oyi9uX|@1X2^hrPq9n#)zM8L;|;8g{LXUyxx~<9aF*2H`^W1?YIi=i}m{B z?}m2-mFN$evfKDUpw%1By2y?I=(rRs^s6O~j$m2sGbH+3LSx)K>Wj7dGt~uYDpxq2 z0G<;^J*zX!-$IhUW)g3G#twdz(2#NyFoaUJp(A_ z!KC{3($&vDkZahAg3QG0O|cw>`bCE#$5({4k(zSQ)T5m=4PZSPPmc|(B{z@rO=MBP z@*uKWOpSCx^hyeJp&ZcBtd&lgdJ}&=4=g96{|rn{0@vK$4Erhgtv4(~7cm$drJsyf zn?861HO+s3jW5+u80Eks@nvx(HwcmpH+S6(2rFv9EO|F>{AO_2zS>e<6t8c?c%C%z88uf+Y8M zv|m0T%%m6f^U4+%vJZ^*;ngUsB0EHArM(a#`QLMQTRIwTcY_lGSP4;-mca=M#Hf8r z62x1F!8+L5W2HU0{O&F4bKmXL%fzD_qH4oB9zd@>a&JW;X?3+&;0z<9 zVtO1y%w6{gV`F*dr-9Q7eJOT1L7>H&>oAaw{3A9LM>=Fx>Ce z&OjcRKuE));3dH72;upJBdKF|l2hfeZ zCqHrna(Ho(0x+q{G@NyT8G1f9Gnn(y>V8Man29-T)DYnp`uw z%LMBnmxpuINzSW8LkP0YymX%y$YWJ&8D#ZEjGPuT`j|xNdKoFrC>M{SyAZ+9Sf;ZQ z-Swl`WCLKL;_POH&l!I6EE3hh8pDZ~Eat~Xi-Odu{RuRn3rfPCCzDvqVmW+FDrP1g z89A+;s-SlSO(dYI&6wZ7m;s|O+B%i+OHv`+?A)zHfb=HZt=%veAwbPXVX_`*40;nO zgN%V(JT6u0tmn3!b^ns5v51ynC2SOLa@o^$D>Q6H-k-DxdNrQB+vByy*)5|C%nYboam+ET6bUU^Kc zl;fYBP99-V!Rs^%p)z+Mh*Z++6-BdOcQ z;hXYD_LmW%8Zfr%?^|9#uARU1vHo1SPYy3Scdeh%h%S`OjxmpiRGkk}@|ta520Q|~ zv6N}8P~W$gOB*L!BXTWBB=x#g(fZIAX%!s9=AESCTmc0W%rfPguoO+D)Q?M%#pHVq z!bu8Dfm4pQJZoA@@h?P6XUUh_}~%5_UP6RCCtB=H9n9EqJxhqaY` zl(Zbr82<0-lxa#yrmi2~)2jLdJjc$Bjmw(dp75=1?97jV_fi^+V>dG2IE#6J*{5g1 z?dw?D$Nn@9mk<_O+v*h5!zz&V?`WpZK{KV42gE8SUg8$o_`x2zANmE|Z-b4oQlK*~b zjQT~#r4{qXMhfI!JJE%byF1#JzEEV(3N|JP5GMzah>P=B;i4P~=QgX?R{^WVo9`%$ z828!D15dE5j{8H__C*bSHI|}qiR=6Amk&Eb?RJAKn~9nLn^3b$<1`bpZih)HKPnn2 za0vui4{G+L_!Lx9e62fi6EYjmur&*O*z!Di=YZQgz{})RS%2F2(*4u%}pkSuUItkpGiKlxIoP{mY#4e?$(l3Qa=O;W*M-wAI6))jruP|rsz5Gzg{i#X9;6=j}R0FaH zT$`Q+X&ZobgT1qS+<1#z5t#$?OdO0z9Y*dIy7IlDn*)`Txe!?+h!;&0Y=e?(F}0h5 z9_+aglgR{yg9C=GgM&rdYlCN4s1ag41i)QB8A7f_aYOx`nTkVX7ZG2!DZlOFz8{Bh z-5}!Kpm>=$yB^@7_J#&4>5DzH4lKG<9s5gx>DCq7H>*mR1?&DSvF`#8M{SAGGY#hp z3a1dDV>AlH&hzH>!-!~9q5*qksS5foS?Z9_4yp7C>&iX>fc-# z4Yd+ia~$3K=x=PS)R1g{n-mj^8vd#B^)Q1O9b`2Y9LR<0UBTo%gcb{096p(!RLW%b z^EgbZ!6oK5M)-^rT}hDc53{iV_xRs&`}q-8{sF13Hr~zdlQ(9UjI1|3IQj~{TTF)6 zc`&(iw$n>c7)iE>xAFCAsfE0<5>5%KA)e|h37sbi-DE~@=4inWQ8pc#=bM3lTqGo(pu z^BEqldA-Lm$im)POL>QC$tpso$S$VgzSjQm=rLp|(gKmOz7Z)YaWCkpZ*0Gniz05~ ztiPM5?u+^k4=GcFqppARYr$uSUXA33NGM;4?~hGGQMb)#&UqWo=}Cc27UT&)=KUSO zXRMa{YLrW^oMT1$8`Luz-2Cbq6xt$)YmCLz6rt> zNj=Bv)bRbL@HcspHW?fOdEzfObKE55wnHX~7W=Trkbanu)O)qguF)N8cBBi|aGKyfSnV^xR$0F|j ztTpPt>tFp4fD)QRs~cWY&z8$9#G`s%4g1y02Ul}O$(csXBuCL@+)KRzmx}P+EAHfL z{a%e8MFJKQ?HJ7~(!`TjFe-P&Gl?OR{Qzbf2d}kZqm@bKmh#1DH{$N90#*0qA{D*v za<3^LgH_}$rAr7NrX)PpM%7TG)v?XhJKnhPkU1p9<>=0E53Dt)Lm}v^X($v$aJ8cJ zu0@z-MYt3dGvaoacDYkxO?M>4mL;@XuWl(w`P3AI?BZy@PLc$EqItJi1=!X_KqjZ1 zHpScpUZnn^L9gd0NO^4ac6%0PXG?KPjiw_m02t@5vl&ZQ18w?)ku*l!LMBlmVW&q+PP^`!{9Gi@ z{91vh*1p1JYZ2q9FQ!dt zu)~%-lTIP_ZGW>V9B!>LE2|7EgnZ4J;LT}(Qbi+&LoRC@I?eeJFV;C$emmAgIhxxi z(e5Z=$>Uy|dn;ex@8w>m3-YMuyi$>&Qn&sX@Ta7Mf2acp+}ga^VaHvw!XCnbBCBbs z-l1QAJFcpAC?!pzK!Srn>ox0S(BM0Jm$zVp;c4U$z0oIT_og@OOY!)|>jixxQadJgfxm~_AaZ!Eqhv2*ZCGh- z1?hM=J#;O$+Bip^)V@n>f<;R<`ylm_I|6He zvd3f=Tbbzsj$eJMZYobT#ppLIXC3i0eYJ#@rct>0kdfN%)-^=)z){@Q#s=tiL za4t6t`jx(0l-BkF@lODxviEV^Q`n@%5zms*$m#o}F)GikrL3fDq1kCQ&;;~J!BlL0 z6bv#fD&|5aX(2Ebb-mL2r=wNpQRx@5+`?v=5?SgSiM?kFU6Qq5Rk0S? z7QLZON?jL!q%4}lWEKiF+gz*-u`XeFu1z+_l6V#)qb-Xjl{~x-2WHSPVW;w(%!OCX ziN43DQf7m>ue1To;qTH{01~N)EO410E71InY;g;&09D zlm-jfqROqS)*NN6x>L&t4X|mg`RD!j?Pi)#Au5*L#~&={7)a+-3#Mn?mSN^4q+8AB zgQFF-vubPa88L5Ei@NgTyNi>KZqYw7huAOju`Uoz7 zH99&a$>%b~Fk|0$_*^#>j+R%fM)!szlP^3SxQ~F+cl{Wm7+Lqc;<^!Ya4)zPI$QT@ zB|9NkR__283+9a0t77{?Ui%TS<9*|c4UT`jf41?XMf8)dPn?g(v9H5eZ&RH#X*C@9 z84G3Yu~iIBkf4BUwhS|&s~rj|DWV#iR%R8J_Pd>yzMmYr|I8M%90+^nc7hEN@yvGl zY!$Og@bIF_ET+7PhG+Gv`5tldHn)cvdCXHOK6zH>WwjRgK8VA4nN7D=Ku7G763E|~ zv}QK*!-`C~`2^2zJA?Yl`CPJJYu#fclP76sdHmp8*2Eo~PpmFz0OE+DG>>r0MOt5r zcLhP+rU+D4TzBP4ep*+b^m%wQ^pN$g7&hF|#<^llw{S1)Ecb<}`{JF_A-9*E*2l%k z=W!t5Rcc4NkIoGo1?k-pE3)aU_=y?SWjhxM^A&2`CYF{c8eB)XE=<*$tq?=dp$6&F z-pV3G=Qw2aOTP2LoaH)U#10n}aEdto&rWN~Hxky7z8!@#g5^#z`Dt~qf*GG=c?spl ztX-3(Hw?g=Oxp;#*O66KLm!)`EzQ$RC9Ddh8c6h6261HRiE z9bZ&(iOL@gnb=S;dNL^}ST>@z;J3m6gFPW6)c%X9InI-QE#e!9b=y;&{(2g)33q_aWJg2$xEyG5r?JOY)BaVhX(dG z8DZ=vQ54?MNN-;){5TsUi;v5IihM+jwRHe|;Z%YGUz}e3x z0>9>@kO>b3YH%tKhz}_LNJ6kuXwOe zd4IU3PnbNU2px&PR5-;uRa%ty`FCtTF<{pyC9AZao;0`BY?O$qgfMW-;6Bzo!FYJ) zXuNh-(O!yq?H;nyHM}&a!eKM^jGFUGpG0!t-Irdp&xLYiXuy1ppMEk>0bnI}s!t0e zgo34{*eGO@#|SPahXo8P9l=qzB~Lr)?YkD5y%c->tqypPtwUFLWaRcKZagjQXCaI8x8YR!YN7ZR>qg8XJ1#X_s6Mt zpz}yskzxGp?2i0kxjAn5#=MI`BpbH{728P&36|^S_*`b(jW47g&vz(A**>fgQjSy@ zDFcN=_6SObH5VtQfmy@jlzDMyB_?)BK3R;p`N9XdW2+Eqr4t~z2Y|z(n$4+^U=WTu zlSX;m1M-TdBNIob@gET8i@Ffc?qwsG@`Q8aJQ(*i^NqpA3rmi3<^BpOIROC)Ony`m z6tEssd{N7G6|KnCi`N4IgL)Q1Vm)F?;;I07`4I_dSS^RyC&Z7PK){5VV+e^hTPgj_ zxr@`P#hPasR#r;B#%;f>p)Nt4Cd{#yi|ZuI4@C#LJK62tL&~ z;7)zVro)RUZm+vVJ!wAlW3f{9LwZy)ZKGgEu1Z^=oEb6hS^z<1j-V@s!}_88EL-aH z4+j|tVJNL-{@n{AM`-(zVAUhGg7!-^^O58WkLSJ!PfJ60@gc5ly`s&ROJ@f})vQpH z_Fmm01oWJg+x#ZZETL)FEHR9S(jqQeYCQL{)&$3qIiFi!7B)n%=fiYN__<|Tsj8R{4TRs=NwXpbq zLxdB6a4CYLzVOjb#~4Bw^!E%)nfeVO?Hte-Rb}i;5J|}EQ5lqkV&e!UNm=?~yNSi3 zmre#knrXp021j^&lS>(+gi3FgBe6WT4iQf`X7Xcc4+XSHipriNbQ&8AVGH#`Si1N= z2nvDNN>7EI&GU0SJxe>|OnBT%5Zit!JF$Rn1~M++?~-gtdy0qHP<}A>uiSTEBh8sj_KRnH**)y!*A3L57d%u z)-TY<8-8#NGN$opAu=3I1lwQ?YZ@ycv7(sOW2`@hzFM5D5j#M4Fe6|rtW>I+GX{Tf zBk86Xm#S{Gdj<(w=Sdf-pzvPuD6(otfqTuKkXiY&SoYQ*ukYWU; zlH)Zs*-Nkj!V!ODg`K1HgKQ9X#xQF2V14<<`mx$v>EfAcM20id(bof4U~!Cb=Alv5 z`stjSc}-!f*zK6}x|K*ziMfuso>K*C+oZ^Wb~Yjn8yY zqM(u|rz+owKGkbTOH;87V3dfuCM2rNu3+~!-_;aWXv%^rJoA353dR=d?A+e5wK^Aw z$qB$z%At0+s98T2Z))xBJIvA^9Ic7`0SgS!ptu`6F$AllYya(N;961M@iOsH`*?q; zg+wT8PSg@Zzfe873W@Obilyk_fn%Qs;q5@s<{Eo1$}~99GI9#}1SjKZ6ZH3_Nq+iX zL|A(wbqb9?4P$cBNziIXxWkn5I`JLD$4V2AHzl^$nFz^kp}2s z4(B8sddwdcrgZ>QHIUVpiT>C-?~``Rr(HJClYCh)Y1vB4G4lA*bxXgVGZXNr+jJB$ z=vdj5W1ca00`bjt3bC`D48*`$h&Bg6q0;(%m(!{^+sr}yG@UucL1Jf(g5$uNV$va^ ziXjoE8cRN=mO9HrYuTJ;@X+{`)}7n$gtZhDI?VH%)dhJI;yu?32~{%pm$C6dsf;;} zm61l6$&=2iP0|;geCM>ut;ZHGqGCJyytFB=$)@LJ(jC5`T|9q=`taPQIrl30x<>D$ zDDt^0LSQWIYL|60IK&&@qGoh(71k01euI#@{T=wHih*){+in*7R$?bdyPKJtC=uu3 zRYOjeGb!uAvh}6NcSqg4BBR?D$7Q-I$_L&$jNSz$4R#(lqdmyOR6lL*kO!JyF`sVB zNVhrNw~edGu;l5^aHlNMZq0B2$XTZ`%CO8IRHRCDv$2cEp^wTAodc&M&Yoizzb2{4 zeGr{jFPkGBby!If>1&J#1Frb-O0g&WPrHl$P*|SS$hnN$_LFFT;sH+c=roPJk{a^$ zGuu7A#`VKKDoUNU1`)NDKhs5nWd{Lwc!eFJHU=(->;$u|lI$0JZti((edZ|N+$`w% zSVhYk)g@Hu@@rA9X-G#}V4g2tmjVyzIizwN5>-f0xp`O_KDt^O(;0n+a!5%$Fd{Nz z8KFQKkV7|_L^bewd(KV^K=C9V7^@p1E>#?{iX#nK@HKOt6wc=s?mdN<1xgTly_>C! zYv`5@)iUPJm3BF~6*QDKhmU&(tiT*m%SjSW=rxIf>R&6=Nqx>9DI!ehs=9_4N$q~S+ytyM5bMee0bsSZ{$x5Ocx45 zE46KHeN+n2UDP21fZs8p7<&Iwm!^XA+Kfky}3^ieVe(|QnD1~7(zC`3#V*dhHf6?%|CR7IRP1;JfKe!}-a zDzYF!Q|Z*PxoGsHzOo-+Mpg=dOwk9+&1ch>1&rK?gnCnXW3mQt)>!|>%Wef#xn~Tq z98h`YCRZE&LszIKNgNs8tGpL@Xdad~E!3CB6Pr$~N7xcO$4B1Fli(mG7{Z)9&zYjg z%l0ak_8xyTES8!saM#O)*xg<|A}|%-&GU0KmvPiyQT$;Pw`iZYIDFJ2^_Tq}7mY68 zL~MWAN>}{qC}6v~q-?JLxSJ_`ms;ia7h)-Db(-jt#;}zAu<3M2o2HP)Y-%QaSAn-s z`24U7Z&!CYt9C!RSuW(dn&bfV;NW}j&`*iDIiAsa$ybNLvAx{sIZ5X5+RS73-dw{8XiKRd5h##9qF3yhf3_e4QlyIdY3^qje1EZTLm_O7t+#{ z!Lyh`B%+9omf6V@OfGLv9+v@Q;{C2An0+4)@E}-t;|0>{PIIUN4Iji5m8EB*U3#l> z@L>Mlp9N@+nFbo5cWIwS@TfvOpIDt9?GsDq1d-dj7Y{!s#7!@eC&WXO8*9wt{(66k zX}8=N;aIRCT{|==R)Z)d0`c|FDcpt{TmeoewDDU;dAvJ{Culh$t&unjd3VjhM1e9p zHr^KWlEG_{`BC@ca6}Pn8lFgTIcAP znJ)s&@6i}sEac6rMrxIVK6EdrHZoH!+WPa|kJ6a#Oo-ff(cecc|k(y8;+*R=a zDJB#WPV`5tgqbE0D)P_xNgA+VN-#tgpgJR~V0|vHdBHYcF7BVIVR^Q!^`NwTP70Z@ z%fc6d6*`$`|%)!Cj5Zaj~*o8J{B}B_YD9qNky~%|d9tdR=WnT=B$G)SF#p zYC-zxr;5q%C4G!Il+vJLr3Uw8^$oJP7%cR1?Dn2BP4&>+#Pq3#xkz6ajcwWn^Pt~o3H4va73dd>lyfx;wH3qHx{Gafh$ePhY`SrGQUj;8(h%`kx1uFfUb_tbs z3F8rox+>qWEUXHw7c_rcg;u!Y&5^4+NEMLIj1dxQbuls(OKFw#f-Qd37+j0mEoa2W zZ+H=jUWP6nrH!XojDH#kZ(eLPGOc;mPK+t`{Ulv`%-yUrTjsd?s$r;o#5AWpp^-r- zbwDJ=JfQvaaQjJiMdFsI&xkNWTyu}WzaB#;?v7m&TCvY%3F)I>BX}j{fHu=qN4P^x zUVeogc2M&x!vSwo&dl{)#c2DyO?4)Uv*@0miHf@FQW7t6g)q~b1MX}Zcqz@6lO<)ABxN(FC9qQd` zZ8Mn8XC;$~e$;v0izM;y){CmxM{J2aETYHl2AeHmOR4Pk&Xa%QnbG6pK&6zes^Gm1 ztRz(Z!NwzI+QanER`^gSirXgjTuTwqo^}}8ZP*Kg*-<;#16DeGj@cZC+GlD|y}qW= z$5!rk3@&4o!eGY2bXtV8`b8%xW<+J+bz(S*te+)Sk%??@3|-p(rYYk@MIxM>^~=bq zVN0z#`C9Ve^J0GGL(zRfpCsny^}4+!7RK}rU~Sb(|R*1^33$b_dD82tZ`98rRriHsc-3GVxl}q zStMvAFnmZVCo;s5D_^?y3XClla_c(gOur(`spE`cZluY$@TAQ~tUC&^1O;`8aUlgx zE)qvBg~Nu=q0r7~xdW^!sP%^vnRyY|`G^oal02`lN6l?o3-dSs0p2&@lM|e2Sxlr{P39&?+u8^O%4s zIn;7w@X%RL$8^__=yTnUTQtTGO;U6YxToSL)x@g z^?GH{3@iMiZV(@Q@}eygt^oy(*;s({QMU=jdf)2|3R+Ot80heG?}&Q;`0)4l*oiak zw5#bZ$iVeW5jySj0lYn2pGe2AgC?cipcvr(iZ*KBmKKrOgegvCCi{c%TF6I$ zrI%+&HxZ|WjR2Y0@!!>t@U4JouBrR0e?-aLP8zLsZd42J0RjT5HZJ=%r*&bR4fd36 zTez*+X^h({ua7pTzP)dwsF)$n7jJ7@%Ji0BF6?a1+iFEC4A33%8A;<6p<~uq)`!Qw zc1WKY+{;#0Z2-kNc|uragHd$Zt==el!W=)}-R>EfRdyPInwXI!DAzG7sXY}Y#{X^< z7Kj@}nQG3uc%0G?ykkwe+fBS=>&o6Gy5ShQ zhh>Z49>|s^wByI#BN$H#a4;csHYHs#dueDQ9wN>QxILo)b%*7dhjElULZkGTQzvMr z#yuWPXZ+!O;b;9|?X=xr=;$Eg!cgT1T&ue&eFg{C17M-S)UR@Zqj@x-VK(4fL%(#X zuAqlpy@S`$JAZUZ!gfeuPQ@-%8{p_nDJ@3@n(4n5osSeSS2Ir_6MY z3UEW=pG0Aj?Q%9Y-|~RjR){hJX5Q|`d#<1sE(MrxWn8ZrKhr}O!O9#$0VKB)oeWSO zcRHW-c;4Vu$QXG}-d3KGf~*bwTH=P-9qb<$&g{FfX{tWUAp}aSsMnKJgheL%KRD_w zK{|1hqyy61*nc5Z^*RhUt=y&=b`RS~`t4H2gc+{B7Yn;FwW!3-^tyIeG?G5_ySo)n zJ$mK=^O`xAI6pK18kV!Rqn@ZYBBQq_&-9=)0T%g6K;-WAP0!mTYd--O>kjU5Ho{e? zt_CLktulR|SielSOp$jIugPtC7p+115G>$qx<02pu->iV?8LS8lbz>HIXBR9?Yj%ag zF<98o3n1kPrTvAy^8H3pO5)p!*~DAO2*;D7c_$eS?k1#&6`XFN<^4q>F?bW;$=K^j zP)J0h=(Tp{K_b!RvQkZBa(^t8vFE9N&_0}kKTciufJH3mhJ)E}nS1Q1OB21V;QF%ongMf(c?~W<>?YrU{d->ZCc%3x_6f z1?HewrSO)A$acA27}}}jD<;R~OSVTF71L@dn^MixOG68 zz0gdQQp>*@?tG5K1Nugn_8TKjoAsEmz0jn{GLqdPTZsxqVY!)G->BPf21VuA`JDVF z1bAlcs{}A%)Ch%z6Zc2skNMDvdgYzKO2wcwR_06*Finn9Od%nG1_z81Y6cPQmA;pu zSg@du1Wk(BX1Sl1*p2G!iUCfu+?mE{i|jL2NsZZfAWL{{oZ1DE_>)sks)(nY7-uVq zJ8EI2oVzjSODb2$Czza*GGsgSBjSjg^^?vjFKj2y#)o@zt*W6Llgewy7stZuFty_# zo1k)GkU|e9Q1KbfoH>3qgZQHD5&&nqX9v&JlG5x&a9rYZ-_K$~3vpkf?RR~%|1lLq zuU5O?qZ3qF-AjZDQs*{w6~xFDe} zvIzcbFTHNE%niO|5(hYfW2P-=^}$uVi@X$dBdxV`zM;&^V;WHh8|p$mE;qGD_e_1U z;dBl!WknlGqTJp5jSCP?{@w!9)RN7_BCSZFF%LNs@{Eex`A~rXE;${mj96i0n&<0} z1ObZRjPJo!>4y`a#80XpWi3msgQqEaC_fy7lpzE6_C4bmkyyZwEXcW70=rP(i2h{7 zk`B#V>pXjwJ#GDxl}gqRjTnVwV7=Dc2}@sQ56%h*f4UoVOnB z`|kcV34h&ufiyA6g;f-ywI#t*dk`v3;FQVUSTA}V3g$WdjB;M`+>G%<=i`Ws=aBf= zN_hhAM~*+=N6yy~?C-p4lF`;qe(-A;f{2>aM*`E&n52CN1dy zY^A}32zFH=*TDeM#(AtG-kDxhda}TwRJ`|C#Q};E;Sg*K2_!%8{`Ka`NRL;&+xcD2 zit!%ORK66Xnv*sw;Tb3Sd8C(e(~M%OsIWOr2*ElF^hUF#^X=(!!dGDD!1t4ca;ZS=kjh_sSN+&&MX*ZyzSHzGqovM38vVx@Q&z z&_tiHe0<+vtTl1Dh!}QrMwM&lZ3+5kC>$NwMk>H@niO{qoo*n4TCVN8F|>O3z4@Y5 zWg^sH(Jf#J+LT?9n5^*go^zAwMkMD_t?lNoVoN3}_+!$f9_Ah}0!1^$8I0A|cfXC7 zw3l6;yXpZ1%h+?&J91a=GV4nyux?&D_7`eOjrC!1Tk2)~T4~t*xGXDSTM~ir_p&4W zrwL42a=zsb`=Bor8}Qs_h6Tp+Q{_w(2$}cTZj|L6^vX&otTq3}BCsJYRpGDZlS3MH zz+vgI>arVA(4K@(=FEvTSXPldy_qh)5JYDT=#xGGwF#F4nPaFrSvWCnDBEbKzJWxU z#CesKGy9;%ULsdHDnP0{Y=CCWv6!#hZ^(49vfTKMja?ZUJS489lwHf#XA zPY^rg54f=haDnE5if+h74GfNHx?aYHn_x9=qfp9D6YAQsjI?zdL|21FDf-Hm(pts1 zhNS_<+ob&P#g|U?R<*@jS3MYMnm;Wo240Uds*eNYU@6qJ=$AMq{HiQ>LT%a-q)lyr z$1n`1pqMBvWy0@_`10&$wF|8>0hAi1kU_E2XqWJv9P^h*4QDekk@w(!G1QXq=~#$r zpoK zmnMGV{GP{{NHTb-7|yi2{~=^-%%L&P4fy$U?Np{jb0*3gx^A`;r4hi)=ngrN)l7FH zI#R35OQwEZ{|(!8$~FhaUHYD}SNhmYEK%TP@F@mpxbhF`4_ug=-{udJCv&e?RwRnU zJqqS)K}U^O>Hc`V9uS!dXZ^cTc$|%%zU>h*g?q$7t~>(T#)paNV@-oDqYP7a0Hru4 z@=-04__j1OJ%-TEr-)nQ$O@r!1K}0dmQD3@4CNxY&uo311jiyOhozxXWjDcA=WVRz zzO|N_O9OcAD>m~4Y zz4$1vK8kH%)btGkJN}<{q5?PD?pq(+v=ftPC%rVGV;j?FoSQ>znVNJ8!y~Kv4|B@! z+bVjQhdEfxD`;~FGxa-?+2iR51DYplD<=nNIwhM%>**(S!cN~a&U@r8I#lEvkS`3rMd%^#J35~y!&^*ZfEGcJ zUbP6m?NeGDpwV9=2XWCHY*o$@E3RT;*pMxw#Q>O2|BT4(KHe> zkk1aVh&JV>T9&3dwrxm-u|6VQxDuccx6(FLcz1N;K6J+d(kLp`NNP}g9pf#8mJtu} z+eiGx&S6YI)Tunia$SQvXSBpWG`3o_UK=vH!u2AyfkR3*a_Mxa^nhDewl-UGKBd-s zzM+;D&FTa5^&EPr5JIuBWcJLx$Q_p0~zuTDjADzpE>SodG5N#ODdrgvZo^BDzB+p!Rde= zUghu`(4sYctfYrTe2~H;2rDmu;LuX*8A=~MY8eCT1(}dxJg3Bi+6tJJ>X@eA8d!o;TRR49LCY6Xqv9g@DTu zi>pczlILPj?wa_`2!CvYEsSW*o#Yzm$Mz>c5gN|Aq#OP;CN>%s3Bp-6zFgxBUN(Eg zMAh#1YYZ0DG!5h@jodf%^TlHlTt|paTr0<0AC{-uFt$24#40RS2^C?L z23n)T*3}&^j@Z@XtklFiF@5zWX{a#gkcx0iRVXyZ(pgwncOr>}P02*>s(vj{sU}bB&jxNZNn9k*{Sb%CEKPEYNS0Up6IhZGn2)Ebb3d@0YK}_~kb!Jx zoZ$=y1-w$%_J^4 z931kWiKf|K-XW$kMXJhZdL3s!-iZM&Y^XMDsp4o>J&cnza+o}*=4zrVYdQpm^66?)T^EWWGd={SWvy$r9Tn0Y3C$^N<;w(J zor0~{8?J3_se`2U-!~pC4)EHIZfelkEyIfHpF|#aab;KX=Ec_IJND?t?#8Pc?&$F& z4Q3n_RN|wQm#uanL-JfW^=gyiRYAh6C`Q|OT4Yg3|CgsmS38C$j@2MqG;6^QX0k{L zuOwD^oCLL2Yks2XgNe;XjU<~Q8iCT=fikVQOrvL(5ag(9yG*Fp`inyNh{9P!pr&hz zwHVY2gkqWk<=m?_IO_H02ni)7w7^uoJSy7)Y>7^4+}a^Tg5%(RY#-L;`Z{bEC2PbY z)2NbaJZ>HZ(l2)Ht(hS%wHYqRzSH!G?2&qBC{h#3vX}j;Y(gqz<|3qYIm|;nE9vgs zq3B-w0-*1q?m{VKn(6{<%r5NmC!&T<(4wigbt`0Q3)mUx_YEWPGG;{hoAEO5KE#%P z#FWFulDldx*m`KyMz8zP&4g*1hnmp7s)*g?|KERvZw|jLZ=L8;7?#1#({jFVdZNX{ zf~srat>|hc?fj76R%Dz7m%%1kb#7qf6|NfpFY1Kg0F$iDF6pWE+_TbC5EAQml4s>j zEO3&eUXszp-K>_LaKm`;eHt=-Y9_TlNMzQ>FG*CA#M@9Q2nsjWA$Y_mdBe6Shzrwc z1u>z~JyF_XpMX=|Iq*+Oo2wEf^>1n}+aTH4f$WCfxOcuM%Sp6T7Wx z$>IDgrEfkV-JY-9l8=>IqV&o2U49O)a(sLD2`_X`^pwj|LWk*vsKlJ1w(8eNJ9DECp+6$4;D)!aRWDK z$bo&1a2e<$Loy0`WJ7;e(Uu$1R-tTex-H z9q^Nd-ZwwBc!_0ItT8@A`BE;@E|gSQ3fIRzFHrc&sWX)1V0Ut$_Q*+p zbSjI`gx;_&+w>2c^zuHop?+OBcoMb@b}^)360_Ezel}=_HZaqn6u^pU_cT3QYBI;F ziz4+y7WE&(wii?L%vnulOLzz` zHx1B{lgWM}P}_4G6uDYI|D_-Q@lVh32Y0ux(lvtvh!n5&nYvp*tIc}zPsQoyWd{z^ zE^TB_b}5(bPx`k-bhdq)`G2?aNw=npu*0FEi1K=fPRaKfQsY7ZbAvlLgeSxZD+h(| zX5&inl;=T0AELxa<%TD&$Hpk{{VgGPUN@^8|5iGSGy0E~Y;LP>rU$oh(<ls;^EfM(il^i=9eHB#m9B*wxr|fyk>B;qN)K4;*Us*`Kzj@KBD6Rs^111C*YIwZ z)wbZ;j*BxV#~~{uUb>HJ`Wzvri%%lLe!6DV zIK3Y{!BgzYayP0!H;_-RlR`mAAn;eFHOB+6H?kw%UTLm_TWP;8OG_58V@VVU`(vWF z)bZ}xu=iuNH+<_tvopJ*^2ofVxlRA1_mc7wTf3Vd#7JXkO6#zlmr}l^t)rIVp7S|` zYbzbDdoh3I9VN9f7tZBvA{XluG#^LVq&tspajLw#d|A4{>vq9A{OEct+mE<7TQ_i$ zbFEURrnfr8KY3g$cMn7xm9gq`tEde!b;q+o$Iq>atN0afUM#Y%$^W|H+d(zK&q7zZ zeVkIU!#vB!|7j#6EzYl`&O zXp3~|zfy#Ayop0GyB|8Gr?}!3V(yWp*Pq!QM>Z~{Md0h#R?dkQE>Kr1oUCUqLWni5xR5c8Ja&_a6RIMj?=MO<1k}L z@P-+1975__*f5FIktb80&0s1aR!3oHadM&fWlf%;Nw48b0} zFc66bEqs7M2PYITvICjOV1f`q6s`$1xY)wP6wye7jTYx{@r)=+6QwWoh{!@k(n#E~ z|F#(Ug2fgH6LNz$hJeI~6koWysT-02;UkbI7t`oSgBC&(!rpWNDVdGD>{3gP025P| zi&SDsC9+U@DJz+ZVv?u1=z0sOqU@|nDXiQK2Tq`_a;i<4?zt%%kfai88k1HktE)PF znI#%>m?@@CxZ=9>rn-7kXBSX>)9Z{!Kn;vQ{vH~Q5XMyf4?yBfvkWuZVnuD%$4v93F|2u5S z6(h%nVf2GWz<4Wn;V%ZW%~lr11yV?c1ViPcyVRPrt3o;1E6Nv>wDQU;8`CSWVoc_R zB4oO>Ihjw9EU8nO6{-klML~8&<~3moG#<0gigQk(+%o4Vx0GTktE2j~3LiiN-Dl8i ziqVJ~M1_t@O{mqGDlN2wsr1seH`RuyO^b4pj=x47Ixtgv+j~_aU8NQ9z-Mc=R>BMa z*4f0f-GSKac9YRF3^SZHS&S=_oL_2FQ(SXtGsYHl@Iv3bTl!wpI6WuG4Z+;_=3BP_ z*xTK9KZ`4u7q|oM-TlCTy#=vfgMA;E*MspDe)RV0Xk&~O9hSr4q4SvD|M?pin^;F` zO(h6GosUdaR7WuBg#1KORs>2?zGwMMkNGCLnLkdpNN8RpzKCo_xm+kwk;0dSQF^MH z((06srMc~Cckk6j}IWlS$bHf*))e0_9>9Y(&s5 z?-SpTVwO7>`iNM4D^6(ycQ_kUjZRv1_NQ~U*m>2-El zhq8y&v3d=WUiwr9LqEpLaZ^+c5|no76~H7q~S87d)Yx-3r;l5`k)1f6N{bYIs9yPHS+~)F$bU*g9>&D>FaT5ORh% zLtSOjTG!;J5|fBUCt9G1qKl_E_r(Dds9=uxEP*`}=DKusv5P@JKzgDg(7{kZ`y|acS+ZU7vJ|f%pID5twWQf8mBb86|5D?IC#pR~X|t@QEpge5 zO!BfUu@vUnwtBT<%B?0hQBu@!;VHie?+W@85T5*20CcH6nd-R<*Tz1 zHJC)b=~#tP45M_^>>^dl$C`fBkJ;2Guv)8D)jr7-EDaUjw&aohh4O!9o!n0@Gi)5uWLhR4CrOszyV+R@rpQby6#u&(?#$y4FC zrso?;nLOEJv1u@aLGD>ucP!+wl7wbNOGQx2_fupFm!&6T2AfPN-RVy4PN&TB)L?l^ z?P6_$ZDNa6akdL58xUwsI_6eIm|pePB$s4$uc*)(-xk`~F_$e&%HFkyLqO?z{jH`K z2)$nh&s8$?+^Y}u%jgS}(}qYaY-yjBV)#t>S{BA|Wd|Es1`S}cj}~aOgqW-jZ?+>K z4rq1$6I$sQ4>HI@v88joyopiCwX>hxIO|B=%66T!B%O*5(pq3#&h!iHIk&)RJ3 znPSRq;p+Z|%j2LSDy_JUR0H>9Wu{`esannnJ}Ss%?0Q+M&c z>E<_wqpUdTl(VxP0{J(v!Lys%;z9Y~EHSxPcN& za;pQ(6D<1z)g9)oxSOaVv4%KuLXml+|G8vvvv;_)3a758m4*JsUU8%eib5Txykzzo z!~Fa34LhFWbd`A^DGt|8Lt3sHS8%1fHEYB*{L!*QEMxuIF40fet{Og`8d?9wmoGe7 zF(+*I3>uM1ADGjNt=4tyEcBrN{=J_V{90#hBdL2d>FA?xOMU-RnC=X-$6x7UzEoyp zXT4=yS8_0(3-;+YmCNCmOG?OHewdu!l~``O*4_^HPX4TD+=ois<+OJcVgs1JoOAH+ zEO>z58vjNk@vLg`+{*Ea5AqVs;SBBbK5lUkjx#o`u5d{6K%lWu&-28?^dO7$N-osq zX}>s()Wx0`K`; z=7s!^&Z3|a3`HBPFUgcl6spe|XzCHZ#<|SlXv8e+P>3hY4gIvnC-84e)&dKe3f;tQ zgyumW@Qwi43%ukj4GAS|xM68nNeuCAD$r~snvdD;E2LTkfMg-Y%q$IqVMPuQ0U0od z77*f&2yzgo(L!V6*r^dSP7-5q@*42=HgI1=&I2_~_fU|(Mi3MSCP<#DXM zyWBAl<8e~{O!@rH|MqP7U`q|4?G=n~9YNt1AFmKSF##bj_kO5c9uVe2j`MbfAO#EL zbnXOA>k={X>So9mWg2fOAD|}f!b~c)VyQZ% z6s9aHyKyR|a!>{ktEBK{*pdD$DDTWm@yPKXv#uVQArI?|@$~T$(qadV5bFev=h~_z zBk>^{PXYNd^D2?k|BOxtUnO}UQZNDR;&6_QE;1#dNFyI}6B%<`RFCFrFBA)=6e$BF z&xIsCG8H%SGjXmm3gmm1Lq+iL##SaKUCI|{sw~+n-;B#DgAf`vsufE!417Zugi@x| zgJ;UIAZiAbHbT5e36{XIDYsHN^`R<*YAVVO9P_8B&W@I_5$ujDER>NDymBl|NEhob z48s#Dh^B+?&<6GJd2Z`1QH7B7tRrD=FAb5v`e_jbDPur`SN5eizUe*>ZayPYA~Daf zLhl0^Gcr#PG$}LsNbnT<1@#^SVj{GRTVt3B@v0_Cb8{nAYRl0F@4 zkgVq~CD2^-YE0$xo&;&<2Gk-mF~SZMK?h7hSMDP}?DZtHKPa?9S&*XY6oxYJ<46+` zR8Cwv^rGnTL`BpXYjHzZGD~XB*itiw+*13L4H$SbRP|^`&}uiA5NM3@M*9a%cJxPq z6zyJhCTK}WMM(?UZ7kp|NS|semlRE!bVbU9QS8thq>b;a^h&)9xS*%cc5M%z)fTl! zAXAP^|Kl_AxJ6Oz)30Jf6%R4t^zslXvQ9NFKOi$tGx9Ml%RT+{G68jXut`w=rF3ww zP)l$UY3o8qEjJ^kFci`fMPQC>QYZg{RP$_KEp>ZR#Q^z(GQ|fNO$uUyQ7K1tqsmcL zSM?-xG&y;7R;g?`1tloLCM0yVIxFbz>H!?Lt5>7+S5q}rMk1^5?G%y=S*w&J%;N8O zaTZt1;LvknZ|7hA^{-Sg5yh2S5wRi1wZOFX0Xwf+NsV*bG-y+@Fq7y`G4MU~)M?fA zTcfUBO;STo?BpZ_UTLpDJS$Cupgtcg)=;W3r&a?j@HUx^OCc23So9U~R&V3A_db1kQ#LCpC`Vg1yuQnXnuN{9s%BFu&We>% zo%Jo>3phR_O#L%kyW=nc6lnpoco0!Q^Yv}jRBrwCp%PPS8B2&#S6e0$T1*a4)71mC zR>DlNPucZr7vx=M?$n@wP;Y>28#Ff(bXygYQAKkxWcOQs5mR$UqSHDs?v2@zJ@a{S{JU_Rr z{9^igHjke5L0$+xIOf$&Q7%n)uX@kv(Lxi4#x_F%G=qOPc(1n8|Il?$ z3loeQmO-Ug_CkXMZ>}>h(+rx{L+^5(%r;^q5l1@sC%MP?MznhiR;1W8J^B%e{}6mv z@?l?5Mo)EOCFw#Bz}kC%{W{+3gAqxb9xT zg9J9{8aR(QBUnD*T}G3jaJS=Dw|3L^f)y=xM-s4pWrF`W67?0p?(}F$*mtcKg}YU9 zRCt9Cl5!XpdA~L@PZ4+rxx^auKi4#S{h07dQ)pwPdK*cIMfo>Kbd`@-T!td#|JuxrAy?XD zcG+A@G3XdrXycB3wlY1KgLBt{4Xk*n6^IVi;tn~kRW&cPGR6b&qAaN1V7y|9F}m)lw#_r1z?L zr%<{Yw3s><94tomgu$sLrewc)AGXl<)uQhru!38~N`ACq_$evgxNt%0jIj4L% zetncyyHT-?xhatJvG;er#c^fN*vF7bHbIw-zmKUp+owOY*tQgWsDQLfH@iD^H2#L1 zNtt(Bo3;aUgqM~gXM4gg!?p`lo?CmJc)PdnSrvhswx5=V|D_i*msdhYa4{Y5lS2_s ze^|Den}!&WwQ5N^8eaGaAf0;?a z5B!9gu@@H~=m8z7tEoP=jP;v1cS)M#PJjWtfYcb9C1J^mZDSy|CKbF0h1|~Y5<#{*$l01bauP;@tCodx28#^IBsNT<7|NrZz0)MiULqX5 z{A2UMX}(GxuKY*;H~!LMMwd%0$%3dp*UZmclm>i({|_AL-u$lR+*79sy#JgOEj+3H z%e4(Xwy!ALDX-C*n!gCW&^^4x={3<6{f9`r5|6sm$2x}f7;M?S(pmg;P&w1pdEcSC zxrcl`D{jvK*K*>k!BzPl^Xmx`K^JkcFW3i^3>Fl`5-bhAVhh9xZ}e9=$m3)6zIpv) zkr{!g(2H-WWoO~~+${;2i&Eqe<{{V13=i6YdT-BmMlgdntbIWN$y;}q+utdNsFy!0 zZ*ALowJ}s#F!;7bFI;s`KcD`D&$_hWee&`;lTkd(dFokts+ zbYy4qMEt<4-XSrWpS2#e=#>QpdY~oLUG0hA!(EU79_hc7;i(uQPPfO0 z*fzgAiIo`oTNM6kGI7R5Ofrbm%X zbKZPlH0X(nQ>!X%`ZTK6tP7lGXexkTzOF>elI7Y#XPC8S*t&HS$4{ZTbQw{)YgdzA zH&79rwVDtw-kyH{0;VcAuV6ZPrgYqJCNj&GC@B||q6MxeSe#=%zRabH6CylF+hFo$ zkZN13QDH94Sa0W@Us7{+J!eh}6{le$jLgGPIgGYhvV`nNNQHd^Pmm(5ZV5A6>KT@#^8rr!W7${rtuH?+;*p@VGZ% zfd(4b&3*s#;{o|Ao77w@h~0%(vc!8HUphh~}h2Vu>c6NKT3)Y6D6z zAzpaNDKS3sn;xXZLfj}iZWak0K>XHFgc(btz-i}+S;aCyUWf9sdghVFBbR`-0OR>;(_+4WViA&g6yE^;DqvKCkuua_M#njF0NQ% zi1ERT;)J!$A$buC@VqK)ba z7}Ie)4RxJfAlh`MEKhc8ry9E@swjNHnGzZb!yz)Evg(NO+SW#)wymqG8Z59i0@56> zNjwbgamXeK9juTXXK^gk4ior|)<(ivw%b0w9)a#n9=W*qk!$X`0H*uxx&~g;u78}1 zNcz0yU9*fU_Hr1_|AyE!Lwdc~035K20^{;xDtmMy<0-u!{BYJ14?l&CFkReM@`iG} z^_Wh^z~t9GbCh!RZcf&`$IfenrI|DTjOX;<-z+{@-~)}bpG2D_=$mRMt+dpOGMeX0 zJ=}k_qT^eQ_55FOT^VKNZDOF3jo7Zon}%g9Zm+7%uCg{OTIHy1c1sv=&NiAW;An%` zLfqeCL%3h9h8W3rd$SIA;^wyRwgZ#O%GNn#a_=mZ}a)|}uG2W?0qo&za} zMnn}#V;sxO|3`KMH$0_L#-dt`_mUG_4V(S|4IJIhw+=(DOJvQk#j zSs}~x74_+feUJns{Q8$E2-1&yQlr|W_$R+L4n$W5si4;;5~{E*5RR7MgldrHwzxH| zHDs(Hj?$Q_Eg}pP2_ubo`lc-wju2wYYStGLXDofSQiaP>1`MAW!x_>rT=p1Z4t3}` zAi@KP8?xp;8YH58C9z*HVj>km)VhHk<8`$-rxmY=#e#uimlmX_t&;Uhgq%?i@rzkv z{^=IxNe_CaT8aJUn3B)@F=mTI;L%1FCPI=WdJ>%^CC$=M?kO^(OL62y+eax$epHZn z^rst7{~AjFeUFhb(Vx*kDZj-zv}^13SUItLDi3)p6X+m6)sZYTgnI}* z*+LmQ)9&$clx=)$MIqa=@u6v?T|t?hJc^U@(UG+1JFQ4r`%oHS)(5n$Eu=DOz{Tv3 zwg9z@CpUVMlqPSWG7!Q~DcV2Vc8PA9>M4&R=+oK^^_C7Ks0kl&)TAnPsf63k2ZveM z|6$0qZ>n%$RWXFZtVXk|lY3VW)jC!IWm8?|P^&%Q(^d}!ldsIQ#WKPnHjzf&ZN{V%a_S!{0);#f;2RI&!O6h}V{;z32!wx67BA|F|n^1U>* zR;!aCQH;K!!MG{1ot|><=Re&3_MkX+DR8ZHDXqn^kHw``b-5OoL>APP!-=kW84NbY zQTJ4&3~Yn7`%7Xa16i%K7BHCQRPkP`Z*%LfnUR^^^>(hSHVi8c-%Kw2(Bnj&gKsr` zC{NR+@XLNf-6`&M4cW2Nzz0U~UO90u5rWadg@te#OPQ;=C0R)oW(ylPW>Zl{|8}yf zC22|Z*x{{lWW*og~d1J=QV)?dQ5qanOY}bn8Ua3rnO^ae(0B zq8SabM;{l`a_MJ4v4`nQH&&semYK3Od*kepZRCfotf^gFNL7~@)~I$LOI^$rS+iEg zu?8tqNPT16W^!h3>ZGI4glk>v)-Q2i|VJT)XdE9}{s--t(9Nj- z>PoMG4_MH%Ahe<7xy5Vbjzd_VgTsNx@Zd>&;+RpaXF;nnoD-C?CSQ5TMf@cjW0=P{ z_cX~`Zflm?{N*se)0$+y;z-gw^)8q6C3QZ(9bd)gKenxoO;Yq$os!};=DZwB<8&Gw z*=?RcIjKS6*Sb2u2r?(;e{~F70=v+&16) z_H@M^?r{fnec7x{ZFY0c_NIsY<8egh=T_duZz@zVeAf)oGI)&F|9*!zB8zr#=BGrv zBUrg)Bat^j5vGAGVR@bcY9g>6?-VC zVzahvaI!PCMtfm$d$<=VKL>iecXC1}8^d*iAX8++w*p8QGz8Rif7M(W6m6tXI9^6e zU{_2PXLP=FevKzU*QZtCM?$vJePM7T*%ue#S3;-}98d%YgHwR&R~}o%I;3MFH3T6I z!b5nrV8w8MYvwuo=U#!BnEPLWHcWo|7%LNdaQ?wu7-+^GJa z0_Ta~=5^PIfz0(?cUiJXJs%frdb2$oCz3Ucu!h*j@scrye$ zBQk${SdZ#(j~a4s4LFDi0+;UPf8gRwVMmAGhc5+2FmP3Pn7Bk&)QOvT3<{T+d?$ra z$s?=8PYuyFmzO`Ih+!LfQYE+>AGwmVXi{pyipbTGF(_)fmvS_?bk3-YXR?|n88kvT zG&mWOwF#Oi#gjdGj5Nkrz?N%lL6kBDi;9MMH}x7y>3EO%lw0V0RYwSCc7aqiFr2VN z&GsDK)l6yGJLh$d=x7UT1DuXChuEi8M`T3m7A{~VXFzms`o~pvDToL{AbH82g&0KZ z;gx5(9Xs_L2uP08iCx@)U>*XG(&9Rc8JH(z|5XzSnQUW`t3rIHI7mzunwS}aDOsCu zWP4n4nwv*lDR`SFD1;aFdXVIsqq&MOsiH(Po3v>%tA}&exNC|5Y+9p?3lW@2@{9?? zQ<|Adn*pH#q$^H&TvZsA#nYg+p*kUCmeomJD3*WL4fkImXsrpZT5fcmUrg~JNMOocZm+w^nUYqi1NlA`Z=Cb26$b$OqO_$ghrr- zd7g<^PW;7J-uQeM$y{W&p|^L34he-NMFkLnFt>(E)0mW@XFaj_nXtHv=fftSx}rB& zgEmP?wAP|hQe-rlB^7!o#wlDV7d4V6|CGA;qelpQJT^*o1C5xrSgI7MzM-5yWu(oi zjg8=>S!Sh6N|uVlm3z7hmbhnPNv+qoH&L3WC^Ro^*?`lO4)C|0;^Ll)sef~7uG~~# z^k<0R6o3TQA@0~+O_Zz!CZGVfiSh)GoQQahX{ZePZ5il|P3Rns!B3Rds6i@%p`oxr z&>Nq|k)`o!dE|i;%bJ}TtPTr6i-dZxC91I5dNc!jD3+=&I$jKnyjf&rquX`?rq zKamNr7@?~^nyI(ie8@$O&Xzp0G?nIdtalo(Xy$Z4%X~pAuhFVS=(c@Z>X+8K8IB{g zh@+Nwh@P#(p7Qmr?OA{1>ZWcv|6f_#o(&?W`Ouz~Gg76DCzsM%3P`k& zv6Bjl86(=EtfgQH-gkuKg(@a7uts*URo07_=aC)yqQ!=>9GbDLVwsh-f~`iPry85P z*(MRYu`?(sBb&G$dpt3EnK`Obs>l>X=y^DrTevE-ChLU0`cJoEM%Fc5)>f=LYd~5` zm4zyHx*N4m8j1j!pI?ZrOY4qF>UB~ZreBJ!TqRdk3nFOsUI5q({l^ZjQ!w9JL{|H? z_Yk)8;Jtf^khqYB%vw%Tc4pFIknU=b+#n+axmUZ;tqwVtd)IGEL+kmsuWwXEI7cs`k|R=|Dk|Hz>>?cB6^}30v&2Q;9klj;pD< z2Baq%x~W@~tXs2XG#9Xovld9ZK09Spsk~2`wDrqpLrb(<7{lJhhG`b1Oq;x6lVuP# z3Ww9XV5*3bgSUt_P5W|RP}GMFq9G*$cZ0WP?e%{2$A9dQ5AVvpdJ4SKxh(#d+wdVncU^52!=Taw<;o(H_fu z%yMdX#cUpRE578Lt&P(};uI|lYOe!_uamfVYusO-{CNmuY`Z7Gb6mK0oX0U4Yz2H6 zBbbCGnYrMj&Yp|Gt7XWBe3PBK$SKIksU%!slf#;BBwx{-I%I)e`)G~HG&NY*Tpxs1ib6?FLq_8h`Oxa)t_1%%+Y{fsZ%_Z zji(n9uyEKxRe8%^Ns({}k#=Pvm~D^Ts>Nuv3_Xzmu`S!P4YIZUdbEw(xvkr?joRo$ zwHK6-ar@NV=taEMng(3gfE~$Ot<~ks+!@v-o_e@o-7}LX)*i^fJ*a}x?bV0u)|^Yp zz_=%JEo>=kjL5}3KnYC42E#J3VN^iI@Rz{qR?VOMa!+G1URQhMq zir)YZ|J78c2&}WdahGjS%&atBuz_HZaoEExZQ<5OzX)U6Kr1__Oo>R{4MFrGvrPa3 zVB#fy;wCN)B|Z))p5iV}04rYNU?2=&0OK?+4LKg$1VH0GVB(3_uD}t2W8e$kEQtcA zhWCq9o5{Ef-DHQo+^QOBY+zg<_<>h$<&H(VTOQV&DM}+~p$?pyg8auhXq3k-=C6qr z`)pYvETJe{j2y+N!l~CUD|rxmGScVdU3gfz6=4)OZJ>zXd|T1g7KUK?aCY!T+?g8{ z2$=_--x-cK>6>lm#H{VN3pO(07v8%_)!UxlzPhVKxbq&TOtoAE4Y4f@D8AzX0OKg$ z|KhI>fO31UZFS@+O`(8|+#4!Pe|s8KZsoiR z(pU4cRDtH#j>n9Y?Z-{+AAGS`4Z2J&<{^95Df-nRmKKt7=Go5X{LJ1O{DX7t$9HY8 zb#BQ-;+f4YeZl3!H%lFh8nF7!-#rU0+ZH%V3h;?AvNNUK9uy&r6h!zDpW`$AE~Y-n{;j0MhUdw>}QSpa}_Z13BIbz0Tq@p5r#Y z^1`sb}16GQZE?QovtMu-=EiM)4V=dIA~dtv9sVRYem3hTR{MO^Sz zMaCe1+VrX-#7-X@;vPOh>pws1s($lDALBOe`Y|5cv2Ob>P5`bC1})$Fv3np-%wrc_U+#0eUF=UU*31WN*0R*b3U1s{F_fHEC2xkLyRIpau{6V|Du2Zh7BD) zB#;2&#E29LjBEE!<3^4dJ$?ikQshXIB|Dzf2&++&6j4@2vEigmr()Hh=^6%1XRMvb zZv6rZijyKq!EhaGX0%zibmW{mjdn_wubNc3$%Ch^8?~*`mM+uQG%PiV!X%EX2nJ(2 zFa)GUM5}gf+qVqOUL+8}ZC-|Y6Vz3kS6JIOe~wKJ<`vtlYh6o)V;YVbmO!SaB-%r{ z&Ka0rwuoU))pg{cr|Mu+>EO>9gy?o{) zGT7g6KR%+s8)xVi?Zdc?wJq1q~{OB%Z1B4D=I{(|%@NMmzN zqKE(^%&=NgsfnwjAk!ulRJY>dvxCwo4KvT)KMN{o8)vg)iAA8xS9vUndiDx+Jq9J zhZY4E6$MqSF{LeLnj5E|h8pUxmX6vSMsE_O6)#X8dTSqqu_n28fUXXb@HByw7#2ZR z+ZmmFLe;CIlp-{qsF98=?^T<|2CK-_s|(p=Uo#e2|H9kC*;q@Z86cu5aZ627*s>6{ zPDD>N6qLM|X%*KoG?N2#&&|vgSWh**DA=+*MQeb@izU2u+LWE7_GoX9_L*(AF{bxt z*m)*+XqzP-+b&YMBq47R=Jy72m;0c(1+8m0#Okkyw_bY}w)epE;0?H6ftw#+de!2t}GnX;*sDSpopi z8Y_W80aye;7|jw!3IUYn@tC-wfif}4OdbmZB+A|K3K8Vs z1!2b*#90H0tvh5Zp7)mtGAw2!?ZA2JmM+hAj}Kjxt@oeZn*$D z3B#UrQW+k7X-G%%fu;7`6Uy+FXL{ztW%+0cKJ>ZLm#=(fj~+Hm56w@1m&hM7_qR-; zfbf2$2pNuod9RbbFM%%-QfC(EvLIc-b%Bf=U3^urMIGY~L~NEp{~=DKhzm)g8WpCA$3Yj=@#>3``sNguh*M&j zMUp?oohCOqylWNp6rzb9zeWkZfUVM9Pi>{V5E{#0Qiwj}qh>Cvsa34PtCs=xap z8-;Ca7b<|+x>f)r-31&O0~O7s{}ceA4>Gz^vURpDxiIAFa6mPjntY8MdH zbyBywMdhUNva5zt&S*wU1?9oK>r|(nPpZ$I>h|7q%YYG(tGeteQ}6Rt^`&os)9jDe zxb~y1ekcwjN^5|J^1uGo>4P;g!}oMV*0UnmGZkWKAHR^q#Md@)wpn2e z8m2H4lO$nvO~?#P5Er?qEvyD{Bdon>?W1ooBL}_g*+@}JwjBk4yUC@OM)I4(9knoL^Py1>1-b*_7aUtU+j>zepFt!9v>BMlpYjmw#*vdAmGB!G*8XD4IU{$g(7Z~4pz`qTNFaqvie)>eOJ)hx07SE3%xaElfE*x66(D*#k`2tky#o(=oEiORm^BKn27wyl^)n3$vc`qS+vv zhjK{&I)9Rx1xQn}shy+t#gIglRjBA4MSK2&m;b)i1w zLq6pLrt2#^QwtF3TQKPRC8bL*O|w2J!ak|WFQ>z}Sc|%2`aYq+x~sdo@w39?(~hw7 zIyoyf(TXi%sl7Xt!6mXg-@6tO^Bw%tKNKq={`lHfwF!pB6#b!$%8dTA|%)lDa&gI zN(l^i8-TD;h62FF8q32$>Y2942o)3#|N5iSm@Gepr~b1$AN;`|6vAkf#+P%n`hYa? z3O)!ct0r{9t%E{Y(!v5@k8A{xHgrRzlS1`6p9i_VFHDc^6GK?z3H0j_Gd#nrv$bgQ z#w#4P?LaU%G&&oM0QQ>$kMS{V$``W|Jp8aLLMR4T=#2ddA!)q3WeG$;6hshX1wwR2 z|62q;NC?8?tdR^Y+LJ#GGs%?H8EEhZZ?J}!^nrrMr z|Kt^6RF32*Mr^8rRal|?)5wlwN@xtiX?!_qtj3vx9v&peRfDP&u&U?#v~Qf3a1=*{ z$(LOd7g*D!bX3QEWJfP_$9FV8W9+Y-Fvh97M|^Z9Ttl#Z)ku;)`8!eA|t=Kv^ z(=<)i0f`SJ$giDpwCzaA>t|U9cIKkOHIaD?Fdg5)lt=0Qhi9Qo5@^7yCgzU z+_R*TT`XAj29E%Tm1Ws;SkaNaRb2(y*R(jmX^0#>08Ox{K@(ZK|G-&b?W-k4)@FHF zNMclHh1Ta(TBSXz3c*rqjkNO8)@{{GujJ10GOuqfwKXl*u)0okNmn)nL-GJkK(fDx z$9;hrYIV=|#K$y*1Vbf0#pI*JOuw^bKYyhkHwakfnal1Q&?=~m1YKAKZP>vr+K83V zNE#j!;s$E~iEP+dj^)_3(S?ueS~DffuZ<7Iil<1$28>wIj8%+Q6=A-l*YjKtcqBXaAyha>+wWUj@hjMbLC+c4BNK76O#h=>Z;`OP_0L|D znS-4J5t7}&WtK-J+`@&}ZHUmr4XSa|36({f$E8=L$cSkely2*tdAg?sW?%??-DOoq zZ}Z%zfEuj`RjPShu+WALzTl0`SYH`kH`ut*Z6YO2j!KYUXn|CT1>B?E!`p@77=F2? z)k@vnUc;QqA}k2|GFR)E!{Z&p_1^F8 z9|t2PP;*{*ouBiCi5>< zP3B~m>*Pi=*0IU136X3^T^Y9B=sd#j&`%y39lZj~=)JGQMBi==({Kzr^6Q@X znyQwFWkg-#A_NdOZOe1yzA1i%f^?}g>E2mG3@aYL->tba?6m{S11&9xm118?1F$om z2^C&rHm>70E?P;A<0w_*bs9u|LIylukv;BX4~~XEeufAZbWXj%F4~ z!;LNAOO;R*$|nc)$OWZXM_q(~z8ns2FHCYdmZ9fBDEv#-Ova?o6gc= zUQ5g6x@2}~TMkhDlvh$IYP|e}#ONESz!mT;!?k{jS|H};cqWjZ>5zUAEKXFr*6OY1 z>ibRL>fT@eZRc*Yg(f2t%tCALj^=}I>k_iI&i~|{$YNIM2E@81Dr&%Mz=rSmMre9& ziu#5OjcDk5rrqeyBow-a#4h0VW^evZWyxN&OrQp6crtLgEIg%4=;drCPG6O_S}{d! z+HNnH-sSHk$TbV&9L{O$o!WiP&)mMt-EM?h5NO_dkuA+8zdfdE6axG41z6zEW)|34 z*&oh`IWE8!x2|I!BhnIKT5z)XL~A~F>Hr^bCp~XMEM>ah zazSwKMeb|Rm2c3c3f~R~cCg^UUhGG*8{r{vPMU<>$<##_Y%aG{#|GR2H*lq`Y?^zH zJn$c*ATCoh=?)i-&JhUEUTNmFaPz=$sQ>10aUEBgHoG<}=?ZD$*nZ*-cX1P!@-=L! zT8jo&u}*Ei>6oK&8&?kE>Q^0SvmVdtAb;!@?&q$Cr^ixq1YIF5M{vfK22|M4DL?20 zxAMGGM&yj$ImSQsUS~WAXi6UTXithThjy?C@JiB(OM1#G)tzq$2ha7~%Y@)re{(&z z-J~GQ+s-XgwRA$~rbFig!Bi&Tdlzo4a4p1Mgu&d;<|<0h!8cR0Sw1yQKW*8@LIj~4 zPyh6zAVLUVVxJCUSE=n&H+8$+mZ^62SBdq)j#6cxtcP~keD1b%c41xz=yv#Z@D_Gx zFLq;(RES>k%Y^wtt7lD>_G(XPod3_@Yv-rmaq~nIa*F)wIbY~%=wF$id5muCA>a0- z)dNTa^p5>kBFQY|4z9OSt5yL_quix?*IE*vWetz%SqeZU?wsPXfb<#H5I6Yvnl#T) zcs0~x>ZHM+c9=qc`}cjaxGl`ZM3W`Zs{OcyDSKU@8~=oUaKhNAot<_GpB8meU=(Dd2E7{G@kA0hfKl zhSnN@`gLC>v;SRrS5&0ZTMmhEugCW<#dLHnwQ?kT-oN@Bo?e32=@BO%OB3p$F7wgf zA-YF(0i|Z8#`_3k=Dp|ps{aP4PO$6rc5C|$d2Kt{7kaE2+NaG=+{-_CYta3**7@LfXF0p(jqbj2^L&aaN$6N4jJN6*l-cEaqXrtTc$B%GmaiVf-EL-WH4X7OrFGr z^4&OQeIjxT^~_i%gH~?hyfe{ZnVdOis`J+FqQzx8UqXDCRM63gi$d*;Ds`&Ts!kR5 z%<5Byuyx^~&5d9ooz6dp-_0RC`c0^!1j z86Y^IcroL~J_7I1@nS>+2n#}hkU$~A;SQcpfDSGCvSTiD^5T_iCoktFBq)1`Er4G> z1GWp`ZtNJb;>nL8`~QZ$Vg?QufzLR4q&zuYOwOOzEh&BaF<)W^Tga|~1BUHXRJ$(T zmkDv~g1e*7977D8_Ic2jdnXJLq55>(-!~NLevU3Up|TR7fB+6yAT7BZh+u*XD%hYg z4?eh!gcE*Kp@kP>C?QQ!LAW7R1A%i1JDW)+h>Q423umc zsb@nDkMgnFY5yj)#+PWb)plEMeHL1%oxjn5LZUSgA)a{5l?k1yBmx->Yn!EqUKX>` zYU*&X?q!~=sV1^Xe68d+h<^4pvLAbrsJN4X%QD!kgU@zIVTRLU=pjQ0b{HU+Ac{C5 ztF4NJqLtSYSnf+SZVO{n{~;7jHBoNlu8#Ba7_X4umShc)R~?z;l}#a;d9_xY+9squ|a8?uZ4!GravzlbkMm57iw%r3-+jSBOlAFs%zqFU7WGXT12g-Vr?nW zCWp*JbN`$$_v^6xSpxRgdEL~avdlUQt=ri~C@rCC4)>8ww^6gYFG2qzjIa@D z4|L@!ha)~q;)pl7Mu|AuOK-jRf~&YnmtXETzb`WBljol|B;q01n1kpZOQqPv>OAuY`D&rK2s zIseK=o}*$y*+-GH^vNT2Ez_c^?(42b$|)?_|qc8gPupwB4Zv z4X5cCs>VmY%FTg#?(5+CzTihT-YSrod6X==kq&H>#2X>XQU9(%4t2zX9{8w7Bq0>Q z0TQr)3WSi{5ZFlyHHa!mxg=B^2tgIvi-M_3(&9e$KYw*0V>_D@a#B;Vv#sicL7E&Y zSNNmMk37E#mSl>kd4(ZewH8~}Z?&-1Z6ee+7iMTQq#pe! zNb@mC ztx!fb6p0RRfVUOml?HvSVc+}S7adlWM^NxP#kwKv*oJOnlCEbQ1?-6A=+VTI&31xsLN)RrVUW&fdaBq|qQJW{~; z7;cDJ#t)^nkoK$uE1ri-X9|H9$TdUO<=LHh4dSYqco-#O2E-ZP*3?B_f` z@RZbuWOnVrjuW3!B!D%vL4n!e&sj6ll7_H^Da@}5AB>qB-ZY0hZ4hdl<05~3b4cNA zY6bJpr@d^0L#xPQ;l)#p$D8atXRRK3X7(_Cs&UAqx4|4AYRB7&wPk1w$R9JcR;iU& zk&kR-6L-Xsf93DA6MV=>l zR;uossqJmD;#-m6*ta;&!Eb;6JKzEbIKe}NXQCJ!;REkC!~YvDjs-i?rdo!Fs?lm9 zstVVCLwWS1I}Tww1aJmo6SusK4_NUaXG+5@)ssPnC(NTHLct=^+{ zG_%jd)U(#M*41NmjeuF&ppK|%3)h6L$3gPAYAc?)5>tVeS(G)l;u~Kvu+d~`YrE#$ zjaN>^>SnQ1)y-#*q?QZ4<#q@-u#`miKtj20=Ds`c2$ZgxWvUdLEb_f-pEAVHnhazh zKCA9syy6+Z)yJ0=@>!Rc<0~)u$w$8Injie;#f@rlZ2W>l7(MAd4rz~v9Imy<;8iAl zatp6K_OegWGtv=i9G*4io~H-3tDWs=U`#E!t8&hN7yne<^}NUOd2M$5%iOx940#f;%q>UUg&E5)LlT&a5Hr(F4?80iL8n$hln~hb>l?eNd#G7zI zn`srCWZ$sOlVcgm?>t|wHDA*C;P(iR(+#1n6qOQP4Md$E{e?qEV42w+lykt`wTT~C zNuUklAPQli{5hHWWm||?Rbj1ya$(XY{hyi1&HpB8THg`i;20p^_>y!)*)qHrF$m%e z?pEbY$L$y*zVt{U#tZL_-35+e7^YnoZXgI+q6c202wIv}s0#|FAndgu3(A#TP0-63 zR@XgZBH^2qxyynn;70gf`hgwq#Y6u{4O}z_TSVbx0o~IjVXo;BYXJ|}cmNazQq~CH zZ6#wx;a28-Q~a^r{JmndLEJPFSuNO$Il&ernw>O`THtL}YB>cQirGrh;W?I+Io6H5 z2@y`{OQ7)~9}3>#1!7eFQXwK51mUA1F5*6R6QeO=JQl+{&cY=cWHlXR2Wlb-njnpk z94M+_D30PpLSzX!Q10oJ#ZdraZLE&!a} z<&|AjW@Wme8N=j4RRSQ~ED!@R4F6Z|jX;7+SfYb75QB3f-dK7iA5!N={-n93p<3Q! zBy~e|QdtyLVh1kO9-WC?)TLhDW$TsRC=HQc!e?M&8VpTCSIJ;76lMnAXGdZqS}NcT z%AK__=5~TYMCHNMrQ|NgSo0uZ70{6^C0}RKhJDbA^wiJAy`T>M$oZ8a{9#vsju==u zCN0VXX<}n9lnE|e<4Atq<%J*RP2dT!p5Of@$~lR15hp0w;U%4*RytT0bR~1PigY3a zkzOZwBIaAx;M$E8J{Z@N_MW#%P;Me=in+;mP1jUz%xr z&Zpzp=TfQ!NBSWznV(Lg;{O6>D3dmssmWq?3RXtSDcD>G5k{RE1>N#Z4PrIn`RECz zw2*6IU86N>on~lK#-$CyD3gtwxeeI1#hu(So1g`W*coO=ej()XR6D9m1wEKu0q2A? z*MjgU9>OY<1ZkBjT2&ohHHhVrE@_rM=BfFrY#t^xI_Z;=n*+iYIZzUt>gsNZ!gvl2 zs}Y+5;8XOKo_e0?wPI_Urs+jCrAF>*#oY@xj%rYtX12XxD>A8mx=U->Ol*xu6sAn0 z*vy0ST7)WUA`NN$QRACpC)j17Pwt-EVXA=M;3^^@{N<+u7R>4#2mpn`L$Z~PP6C3w z)H)Uja1w~A#%hWvC;vRss(!&&bmpqA>ZGM!YDk7;Z1yUNYTHH;>m=RWLbcHS9h#Dw zrN=mHEJdplI)JqD>?>hHQEZ}^YHPL{?U@c{x3Vc(xoEwNt7@XA7(#6slHE=gWh_o2 zHCTs=p`^+5Vw~L8p>~!!VTx0Jp=#--=AoU;`D%i>CS*=+14>Z1CM>v8$HFR%d-iD2 zo+>%+DBINNCvL1$SktT`DahXHJW{94_Ny93V+J~I+r2D4r~`LiC(VWv9RBtYq!Zt3a(Dm3ez@q*|bZMME%s+wGrB(0}nj<;5AO%*uE>sTTxzb6(rU@N9h92r&Hf1HTB*&7=bs7i0Q=b~ zyzbG)l)c^ow6-hxyso39A^b1un`Jvtf)yq zDq|8JelFScws6fJ;>|wr3B#z>Vy``hr~dqc_$EtmI&8$sUM8uV#&RtCO62PqZUYyh z;zEPs0#$C+FIn<$K;EC18CNaFFv%wCTLN%g{#SgI1AU3{7}pn392^C*D61FMi8)JH<6v$rfVDoX?`uQCV5 z=l_kqa!2!BM1W2n!cY<)DK5|N_Zl-VuVqTR2^6E4?Np9I|L2{&Tzrrq$?ob8Mqa~#x7n@uMx^o1Ij{kr8`>#$s-H0K`i>71#< zG7Be_8IC3wHRTmxMjA=uFiMzo$o6$h+q5KaL|khlqw4i!@1xzyEEhXdi7v8c-=j+> zl!Z36I4vDTfeBqA!TCTsL-qA|NY@gl;#lYD%wK!}C?U;s13O z*zgAEJ#R1vTO$XcH6^2+Vpj4DB9uwg5nq#ba+E0uMVG~HWf9A^tgjMd_a}Wv zww8POneV!;b2(Xta)5LBfPcDv7yEvzd8ku@n;WX0lQ1Tb0V{y?o#%PAyLJ~e8ze}B ze3-HlZ)mW}BLz3E%{gTz3-Xk@OC}HHHQ4z3ZR(>xW)--&z7`&K)25~id#-!CkQck< zM#Aw~S*ZsLMQOpd_kx9C3IAcsRlYj^V_W=5W4y6ze8~HH$CK=sPxfPuU&-%%nt%Cn zFgs9(46#`_c~bk#CnUsZX3iUVg(|e4`+JFte82KYsEX^kuCN|AZy&et1h%Q7zwOdj z+Y>}Giv6LFTQ4J@NHb!5+X1z#j*NaSx`Es_(p?5+q zJ(ct5*ReOxTCn7-?b)LGZd6+r6WdHt!M{D?m_y@PGk_Rf=u1 zZ4L<F@0`oJ$K^D+ZLqf5-2(ZE~sJpN=3oXQi0}afYtDJYFK_!y8PD~Ll z!0Ko*F7+O|>KLkMluDwmis?w38EGQPA&-1KrlXof8Zw=5fP85sn~DVKr<;7@38!@8 zc|s@^;i~Q^uNrv|N3FmN)2pcjDoQvmQN*i0HUGU{xswe0|gIccE1Io zbynJCLnr}48}54olMOMg?GO=`)k9pm1QTY?bM30nO}8w9v6%C2dBGwsnxE3=f_Nfp)Ew#jni6ryTA z70w%Yio>zZsamWARQ05V3J0P)t7{IQW8ob zAm%v27ekIJ#% zG&1GM2>ZXjGwoxQq^n{&YNvsVC1SO!<~nV&y+6C%)34@Uxn`aW|9f}Ern)`0(FF_y zJD~5PK?VB5TmStp(9hcXZN>!w{{Kx}f+Jw-{*b_lDG*C25?$%W_chZ=M04+o;6*N| zkja_EU|(xScxb{enT&8rCJ3EU5Gb|dsLo{TGF-~+bh6jUsdjjhpUa9xgq7{^hB&aD z-fWkEk6^-xtf9p-dGS}K z1fQ!YHb!ms%6Vr*Big>V##>?I6x#%2>d*;BIf9H2RKKgdrc4#txvXJZbBuDpB(7=8}S~C{`@!G)sC!t4n*=B+)pq zlXMOyjIEajuEZvbX{C|=Z0TQPcv2g>&4;BmElp3GTD#^Fhc*S?YjMicp6>3L#zbCX zZd0jtt`!Ttt&@R@V4HrXfD$7*M^nx8w5X}BnpbTjdPLe>^Ia2O--KgVzdBawx-pGd zi%*G!RSEHicRsB!FD1N?-Wz~{z3p``{@@#5`OcTV^>u)JH(Sm?oWQ+0=)r&gD;V|` zfxsp$@Lsdv+bt~kpa}igNCW#u%_-C}LsA zM_H+hxmD10Jb!=j{(qSVh2`9(E>`)-WPSL?|kSGqVOIuuN z1Y;V;F@ABUUsel1(s+nB&he>H9n@adp;{lfU#nm3tT1RI8}U(EERZ1%COdh%)BUD3 zpNv)YQTdvHCagTjGZW{AjLXW&4Vj@WXK7EH+9Rs=ngM+018M}zu@(8G9thV-V0vs2(8$>Y;aKJCu#hFG_quc9XKX;D4Znn9L zy=C+zI~cy&^37R3H7tr^=RGIydC#i__4>fS{Wbd0*=t{=$Jgli0|R`KUi3O2+>TTK zHec;s;;b`J5zR&PW5I@4^HB19iW~Hy!T#Kw%x^w(EJOJ7r3a(MtisMcxD)O`u>I{Lj{@EA9^<{&`0qUs{0t1g0>)Z)s!h`(0MiwTf+!|t{F0jvh&>=A3{B#igL`#QGum)8t>p1NR=l`z+ zt1wUM@BXw9|46M1_wTj*FEW6P09|mjZUGv^;pE8b%NS6}#w`Mw%k@(3<=_zXhz;1L zLj&Ih4_!_IQ|_!-X#}gz3UiMNSFj9iNfJy46vV<1q0k1YPXxt{>%gPtsw@b<53ImX zf?@;$l+eUx2XNxg5l8U~A<+~IF$GHz3;k{jU6BjD5Ef-Iw;luGAP*5|Z2=GPM2ss9 z#o-npq3dAJ1GUR}_^=q|Fy~;-594qdexf{9E(0qN1Od^Yst?buPWs?a25pG7+MpB) z>n`L^8&9zuuMYckuvA<~g{VrCGRO$u(P}op?eK2Jq>vO<5fxW)8~ITm&Ho|`UojwO zaUcnjd2A6E0ge%r!SakFMSAfUWA6{^@E9qwswVOdKd?QVkvyRBB8hR?gs~c}j}6kG z!LZTW-p(9TaU5q#EhcI9=uRbNQu?lM2Nx(E*AX6Ngxx4f{MOCU+|D0kj12Sf`HZq3 zg%Th0PB081Pz3TR3o|Q6GdA5)9Ysp0wpX5hFcuFW)FnHG}a)($Lm?&a8U#K-=;+ZBs#$F()@N*{&ljzmh?L(?H{r zI_FXni_hSr6`0?opBQX(QCJjs(X zbF@4)^E|mRMQJc8xvf2`;4?j>&hAqH2k@>aC@B0q5i*_B<4jXWdGrMwwMTtaJu}n&u#rfiGCnJFN$p8fZ;?}v5BWfq z2ShWeyirXmuS#JPON-;Xwsbnct;R8C9u8-;|ib}LWu)LHq|6dTM{OXUR-b3|wFMHZ8S4mDAC^jjk}Tp?9Gk+nTd zK%ZXmt`@afKy_2y^;6mPU4bh!F*PCSQWw3FHGT2&%>N}fsnuUeXEv)dO)Ky=b8cX* zf=bNPMGdD{Jr7~m6j%*&Sl<*Bk981DG%&>?a$+!J^>j}^Hc0z)T>fAZ12=&Lv~WZ zja{TFJ0s0SZO+^zKqy@HW%o8_XHaCW7;dy>`8IkP6r!{8lL}IDCO5WDch_w}mUxYKc#n5l1$KpojRExb zag{Vlr&n+*cwB21d)3uk*UJUG_5ki{>0J1QTR4Vgc!p&->4fiwQ&{K<0lhY1bCsdI zuxMYwq37n;e~ma`A9jH87Xwo_gOgZv)&Jqj*ntC;n1GM?Yq7X?+tf{Q_jVO{fiISK z5sWW)Br1pu->Nr_NBD0U7)DLjVQWM=9*ipxpo2YiU8OgKDVU9~P=p89Sp~z|5-^bA zEY76ikP$hN6?u^vxse_Dks%q81$mMw8O$cxk|()UuUK{s7>Q@IiE(q2FH{(7fgGfG z8LFv@Rat<46;Q`EfnPa3!MK5gcQE+?9RJ0c!dQ6qagF)dcEqhs=J=W(jDw-~gYh_z zNqCui)CTkF+Mqd_rFojESZF9t-9APeJ(t_>Np7q!Y(f>J6%UKo{z>e>@Z-1GW{n(e884~+T0TN)L8M>hz zx&Rvb>7K3tqOPJX`l2y9z<^E|1Optja)`}^qsQU6Ncxb0IHWz-idFZVJ^7PikYIsr zpWPSs!kHs?)tviTrN7yB)48YF8J63bmTS3f4=f0CS;Yt%pOxC62Rc8-c*NE;!JMNX z`r(*^wyF2Hpqcuh+b*>s@uz1wg5SBQarLNaSx9AetN->stxmHF_@xa1 zpsyOTtv8<^@sIP;WyQCwNB`Tbzxu4z+Ow|^W%%r^RU|9hH$cy&u5nthX?s>%`nG2` zr{#qj%Cu})S+8lEr?Lhc7ZR~qjwQbh`|>*kFNV#xaoVaVfwmP#hh1qly&pBU6-6~ zyS~E~f!7(RlUuokT8wWnfu8#e#C9Il?6zy~FzyL)g5>`@B_q ztFsq_U3;Zafqi3Jz*)S-@q4emv{y~nu4@~d{kxPRGQf3w$9=lMeY}mAJIK#AEgGDj z)i_8uJek=TxrLPn%m3uUJ(#b#Ptle8}gLoTDE!I#Z_4@ zUwo6_5m* zSEu;YM_sQ8JkJk&&w)Is2i-%bj>sX{!@v8`9o=|q@)5Kf!-M_Ue^kpcz030s*&%(| z`!Qp4mt;kby+57jHaA~ougqooez_gh^}Bw9JJ#LzLB}c8zdhaYoYrx@-8DAXb=^Mp zz$tu0m+QIG4gcNQA8)VpLjbm0-=7`ewR+MiJ;@DT)BW7XgYm=}DBGiBxIup1y;#rP9l;k{CYALg=4pZn9m(@O;DMcQ2jJghzUH?a z+LIczUH;{HKIfU62^Bs|e7Hb`ICIGy;zOR*U3B7;p5jd%w=+KKF`*cu_{y#!I_Uh( zKfbto+^|dD!FtEWBv6e6zl2xr9EEim$P-V%gIFkfVZCAYaQ$396Tbvy9^zc78l9Bv%l@BH=!yX#55 z&nq8e&A#|0*z8+`W9#GW@qO+?pISU0nVH6X;G zaA89W4;f0_FcG2z3zNpoh*n>cOa z;8A0S&z&uP>U2RgXbhuAdnR2fqp8!UHlj*}N<<0IB37F?nL=f&*REN@g54sPtQfOx z(*LSaJEu-tK7Zm8$}`s!ux#h@X0tUa(j&jf#0eHm%#+_hfa)rSi@334xsVe_eyqrm z<;s^SOWv&cZKBMdG5-ZEdbA}_wJ@pf^hy@%RHZI)S{=H!ZAr3e=f-`TG3U;{CkMCV z`;|F)^pdL!rdzo4=fi&kcT3$f+cenMqCLyKtamKnzltB5Rjc{(IjUY=&z|bk_wJdR zKq_A=r(n&fMPshEJwb!>!Q_xi1a@>%M;UDdQb-I|kkW$=LU`Z^5!LjcfEMPY;ZQ{h z)nR-e)<>U+B$jyIc;uzH9U9uPc%6051vc0;ut0JcEHch`<8;vVC|z$r{>US5rT-Oa z8j+_V3EPrN8hPX)PQFIkkVeuaWM`&ygPd~5BuOQ4(fN2~b!4&u;~3ew*y4)cg@+<~ zaK;&gdhpe$)SV`dsAqg@;ga8er?i#lN);Abpn(h~_y7bEEQn}?5<(j3fRtirX@(nm zxF?<<0;Q>^oTlfWomidtCRc5Cr|OHWS~(dsGdhFHj?T;|PGLKa8DyAW?y9SpO?nyZ zZc8#ptgpjL+1!)7LWyj%pJ5Zvw9_s#Td!pTtL>R)cB|&9;HIh~s-?>80;xny(7&Zu)7%r2nFOs<`2< z$!)fX7_!MNcc!s{T`|R`MlrFv>Dn7HyACJp?!x$P^@zOsx#s4*4C`qqzW`g{Z`cF>J88i( z9gMKrAyN(T!=OIh^twrtJ1){h2W_J+)5L0+v$k~X@yLYt95diZ(yX`4&${gR%7)LJ ztm20^Mh&&dX>+a6iw~W5t7#g2^xaD@Au-cVqb_&XQA_RmrjB53EQ@vms4v(Yjs5im zyl3G1qGh9qQN5A^!;>kazoTR)+{>H9zL|LH;i;M`RAwy7D=ZLR~vE0(bz zKyiV82OM7U3Zy$nnGJZq3!d;KxI01_4{ph0o$`{fyyhv*T>pdK{XECQ(9~*h*Xv96 zZnd#ob?WSwXDdukPraBVsNbOZ+1q?YBbwrSOshod07SvG_(0itu&dsl}hZ z_(|J&Qg{`#AO_XAK{lQelnCS`)Cfq&DPq!&POQr>vWEkj`&YV5YVwMm45u4m$%WXBvYO$ECGH@S$_AM6 zovjq(Iu)i)5NZ*ewY()RCkYwq=>;6W_*-LuxkqOjbePR7=H!HUOz$ldh(tuFB7>o& zX)dQUa>%Hdc1THYF;bhPv!yp_xk*`ybDV1{r!S(>#b)5*FHAFK8$Z&{RhBW0@ysbo zPddwLjN0@OhO`3m{DC^FOGvbYvnMT z9u1vP?O4YEe)FtZtfyK_Sx@ZX$BQnt47z06!J6Kbl{$STEO$D>p91x-K^-go`UbR1k+QYiq*z0G?0(|s$(bn7hHsc9XX3e;y zSk$kLZ>;1zzVM$*RqmS!tcM^|7Q;hkFp>2fBMa~O$pvOIwcs};Ko3{Sq*Zf`X^c-a zlX#vp1<=*pE2xEy`Nd@>o|YYbWks_Yn>hMsa$`7%N`(5y1~&4Y@%-l_uX?h%OvU9M z*_!2kR?@@oi3fOFUB`f-(q|H zBrKejo9aB^9f5+IeemlrMVc zJg0+z|GDF#9k%XEuQ-s2Zt)^#eE+kpmU@Ca@pLW`Y2j5DywGz^b%kR+>u%pV*Sk)6 zNaJ=uXQ>4S%I<uO-(KmVb zhky8&YytRrC|H0dn02OidjF_*9S-PAc86TO#BJ*qdlxo=xS|@q~sDfXIC@koC%~yp}2!q0>bJf=(qM%~{CU>nTYNkekXefain1&fh zSub;VZOD6gb_|!*fote{cvxkM7K6_gdQ|v_3~_~6n05sCg2KjVza@fEqzj(L|Xz$iRFY%_=E=-g^sp`S_p^}A&7(seSRo?Ip>9n z_$}8bP_7bjdVy`02!xtgiEOBb!1#;!Lng6PhoAUT(vf(KH;T|Bf2GKW^@ob8NQJD} zg3=g`Tu6V^2Y;xifd7s7Dmr+E?bnOs7>vSLf#^7ixF={y$Rl~!ZlqXV&}fL)IE`Ra zjjfo9^w@>-w~Y!Hh7I>{N;X3eD2eari|1&L9QT9Mb&0#!Ux7eTo=7e9!fGK1kFzL? z^SF=o=z@vDd5sd1$~TDm_=*PDkN)T;G-xrW#(*`*h-O%7HK>g0h>+!Igcp~NvBy#D zNKNn6Bchm*&$yA2w^JpVkNIa2_?U$rS&t`Kf{BPr#ibXxLWYY6lSvg|GZ~cQC~~|Q zgfyv>>IOL$*OiARcs;2O*~Kf&ID_$slKd!dP5F5XvXu7diX0i28W~)g;EfA7YHzTT z+J}8xS(s;8lmCN+m}cpa3Tb3qd69XuKoeOln`3BhiGV2ym%mn&c{!I!scczjm!IjB zCaITEiJ2?%h=3`W$W@RBd6>&6mNxm2Lr9y7DHqamZiH5PZHbZoNR*sulz1tC_9&X8 zd6d{_ns9kbEt!jqV3k+dntUOUE4hh=d6SFzmDs7B%_UuId7H6$KgNMw-2i3=)th>l zp4$kV$w{2<6BVDVu=gojA#E za50{kHE8KJNs#$_Z;6PTd7-D6g7m4O(top?i5M8^cS= zpo?VzkaR~QE6Se&N}#j3oid7(<7aCGii8v?8Z`NSz6qp3Dxw^jq-+XsND88*$#zV- zoN@A>s!5pJL8ael1_9cqJ<6g+)1_LPkb+vM*m*POc2S%Wo_|`QZ)cw`2%2qbq#_xT zZ%V18Ii%32UwXQLcsiv7Ifj0UrH$&MFIqDUYM`nbqii^#uDXn}_Mquk8;_}obPB1R zIjKx3sb+Jjx_W|Y+N2&<2!r{YeA++FxqyZ-s-;@1tIDO&il8_&sDJ7+%`lmisd&wb zn*X;dq`CU5U+1fnT8QKtthc(NhXqg$_XSgmmHR1_&dRK&O0CgqqgyITS<0wsay2=c zt*@DxX=<*TnyKI_uHd?)xQd^hx_VK1rFr_7f4QvNny>UKTowDQ_iCt%`DxdhsJkh3 zjvBJwTAw6Kn!5^!279mw3x#tEm3*44tJ$t}_pTH=v0n(AID4^R38RX|kkTrLm2|Ul z(TTb#3qk9nXqvR_Ij{tKux6C9a2lrxdw|4hqSNWH$C|F^uzkmRrZyX|^IESNYn$8Y zwQeDivg(t9VzU9ew7^=jPHUu4yP*O&we6x`TDz_)$q0TKlV&TlIxDtYN+qUhhF59-1 zW!tuG+g46}_ulvB&8+!jV#Ugg6I*9T#6f1nj-MB4Mtf>`p#o)%(|!92ON9*|BuHx` z!)Q~!X9qi=V_K4aV!eJ+vilaZvtk_XOqFC3qGmHZZ)3f4lBFx@j4bIq4RoRoY`9s) zvf`}0#{IVJ?%fS3qr(!jTk56piNgq2E4Tct_i_zZix{xngQ){(sKNA#x9kCeW+Xzx z^k0>gZ4Y#10PCU^(psUfnS_Gx~-Z1<8Ji5iaIv?M)*}^MvDLGcoTWsF@ z+A+HzxcXHmI*%++*|iAeazVK!KrYUzT37n{sRo!O2Sg`&B{Cx=KZb;@JY<%uzuuu9 z`zu#D>tHWB)g}NM9fN6SJvT>#f1o=Q_ymq``-A>=m}hp1a1FF;cuS0qOPk-$XHGMpPGAuWm;dA6+kyalGbQa@>}E zLPn~%=VB;Ls&t>KWhas~lg=`;BdL15@7a7X537F+b#mXMb%9GV9l9v-4_H;jh(Sfp zSa_omRZr8zczeb8OU5@+PCZQ4%uE8Yd4$lZT$~5Ybb9N=z0|a<$-cYN2_~ws9P?=iuF0n` zLgLSqP%Ki4$w?qp1(^2uDad7jH$s3wGUapJhCmomQ`%W9h}YA#ms~dnni~5UpJxyiMI-o=sEgt~SZtsHk-b zYqTFxq?lXV*f@J&(W?i0GXy$&!qGdoWGIbt@@=E@mz60-*!#PjyDT!>PeQ#a7(?)c zGcuKj<=BNkagKiR9QAkVb0we?%Ix)BtP9ZK_>0V@76q=yO~8Ha%}^gsQ*|lftcye~ zh)1EmR?=u~je&md8{lk3z3-x8IizReo(!Crcg=`wZ8_pWx3bT+zv^FE9}fi_Guf0e zX0PPn9Nu~H0U~$*`5;SodJycd1mhe;j;$*+90A`vGr3Q#;10+RRHUYM3_4G@&rSnc zjwed$_Cb1m;ZAs>=WMWRzdH5r{jRgmF1ya^>NYdNuj{5RTV-yfNbkhHQja0eMfz`? zh1~3Xu;$jGaTYJBZJMh`W=&wb+~IDSd2SQIk$j@h)(x+T0{7X!E}Xw^KeCTrX>OLI z?^d?1C1g(#`|o0eW+!_07`xYszHU5l&e*H(Lnq^qeQ(?yz(M$asgZ4gJF-H?tv`jXuOm7{eo&Qlttm%SSyYvTJcb+0XB9_D-tDQS6ymH4v}@4! z?==u!*RkCX*0QfMvUi=`9eB}?`@OxI8kk)_t_`m`c#rK6D_?vKH zzd7ck1^2a8_pVL%avS)rPW?J%sD3UzJ3jeJ!Zx|o`Jw*tfoFYqZh2d$`#Bu&8I601 z4Znpm{r(km9&z=)p>c!+xcdpeYU>@M*LLN)`yKi=VSJP>~dT{SPrj3Bq)i>yskA+uX>M#g5xN}ee7pT>nH^sO0-M~%R zuRhu@{Fw9I?H#2^u%nn$%T}|3n6YnW4tU@dkY5(#ePQ|*W3NB5y6Q>$iW*C6z4~s` z_Z9#8y({MY;QGmGC-;-`Shwo2aO-0-=j{XzI0Og!A1T`>yMkNTjA??k&&}@7^zEI} zPhbSxSL4+sA?YeXsDl|D^iI{rA^`hyOW=X{)|@#MNuU*&g!0v47@jI7}}ck1(C z`x8j}wHLE?Ao~TBafF26HA}0y(*SObtbUZ`yq#V>a$bLxul!cVeD}<|poFr;(0!_s zx!>Fdo^8*iUw}!h4CK8_k2&7~9x6XvrZ|6DsKeOTniXshSA||(<=o$Nx6A!_uLT|(!?U8o zJ*;I-qg+wzR(-8c0~4;_VR>G6a^5wL9a0>>?0r5cwugIt63e#Vrmi0+W3B>p5nUvJ zwbd}j@V}pB*K2j3LS7Exwmvs(Z}xDXTdKY&Xg^+@ApI`SExL4rupQSd06<^rM692Pfi}Q%^wbOmxm9w)> z`w>bOm(sYki+4;Tb@uJ;cbAlly^rO>jPU_I<)7$c`5HP=mvUi(_TdOYXna_ns`gGzl=;4WkGf^Cif=!^WsIj;bflg-L4M!K-mlr7 zy!)=Ez<;%D?JUuJ=%7V>qr&ah;D*Tymu-BFoPLdg0pB3r|Kq3M2v`952+sR=7r|1_ z9d?CGX+PO3>YEjkpg_IwioPoGE!4?*yMhnB@0R>_Ro(&! zCN%eL{aeYa*3}`+$5bZ=5cf`$=j)U9V@iAS$QEwaNqtxE12`h~7I6J_60Lbd>!%EW z`F8d5^B0)WNNx9j+UdJ<=yv{N-iGqS8v6?q=4&7K8KnDjZJR@l&uva2gf=F!&-Loc z_pKBjI2dyAZPGaAmlY$h*7wEjHuH{lZfbJ4kN0i&x`2@Oi;nv%gZ6{(6KC6|FC0@^ zU68Yo_XQpv$f4Z$ZP!@#rCsQ0X{P2|Y7KY@8 zxNAp`ZTprKIE#F*ICI`ep@B1`uC950z&ZuW??$5I^XmzG8r%BKga@S2xq730c)LD( zgXi3cSsS)~edkedqyB)kJ=^wCKgNY4nV2u!{`wx1CBQX|f427wwEvx7SpKJQ@Be>S zev{s26}EO@|0n0duI0l1H~(AS|Em1o)Sdq!{coNBk3v2!^`gT6qvn5^{y!_f&HbN; z|0(~MAT=}X{oH>@-wf16xZj80pZWj!T2%P&{7wF*{fGR`%J3iZfAW7(|D$y7f9n6= z+W)^!;@hOQQ6Gubf3n?wY5V^r^p#` z#l%F##3jT6gT*DJB!WUEMFb?pMIYvkoVbXbl!TnLl$^YhTx5*AqO!b~uY7obN_wH8mswyg~YMQF?iK+=nYHI3g>RRe53hEkK>KfYWnmX!< z$?8cd8j7--nmU@=dYZ|pnki{o+Pc~*vf4WO+PVhXDH+;nnL48UI^P_6hPoQ6y1F`g znu>b*M*4a>`UXb&#%B8ISq53ThFXe-MkaQ>fv*2db_Ci>Q86*ji^ zwty--dq)RL69-!x2M1?|>Uu{@Lq{7+M<*92XE$eSGiO&1=h`MWM;kX6CpUKwcW*!U zrZ$h3PEUPxPkT$x@@g+%f3NSQ3*J6H&Q?CI_P(8ce*gUcd3*h9Xz}-O^bZK~?->dR z4iEBn3ifpi9vlk`3lCS33?H3}2nmWv$&47Ei3;?NijI$-nvaQ3jtlXPi;haDYfPM9 zNecH$R+mmrNl%LjOG}7JOV7;6%*l*!%S=hkT-(XY&dbg%$jd7z&{rxLpDv7WE6GhO zsckAPt0<3m1C*8kDyk~0>#AxRYVsoLN;B(QI~s$W8cX7vo0{4!wc4BNIy!qgJ zTe^Gt`kW01`g(^4`$rsfMn*;_#>Xcor)OqnXXlp|mR48Rw$?XyH@5e;cMlH^jxUZ5 zF3!(yZf+hPo?c(y-rqkyK0iOdKEJ-c{xkUhSAqckcs^`*t<2!Jhd@Npa=XFo5B^gr zPNi0rKNxXDXR=T_tKT05#m^%}c4sJRy)#sH5L$IO=1gbUlsL+OJrcp>Xwv|BZ!D3+ zXStOr-hj>N{u`7+d#vGmx?HY6B1@vNd=a2lZ{a-_U80&V&<}=4`am zpyM(09|24mrqMVe$y~V%m8`bMvt>X%S=EG^^FE^~lX57I*L%7zJz| z_nZ3B>TcdQ;4L)MMBXDkZ9Ab~s0V)p+G>8q z2>aV*Mu;@spu-5NjHV@p$*UWK<}=gvf}sVj_NETSnI9z*6{h`yQ%#0UPsOn#=TC8B z&^!L=cGTLFr41*ylHRbH9F)nThEf92wRv*jLxOj9n2k*4l5RPlWI$e&U=l9&w{Db0 zn?Seldq;Cq2@Z;~D4thrdld=M4X4ax;4Y5q<|RFr>#lsWn#ZBLDF)4T)L52H>;$S# zr8QARn(st3TkgLZ9I>#5Or`Za>Jz@u-KGtf^UG#gbwwD;x>#s~utF|N-H55=WCJIj zu92&7maEi)$S0K6R1}p5XdjhmB5SgQ`pOrQ_wx({?YCle?yy8O4j(CXur0 z=0NmbjbtFzb*gp*E>Pz-MTaBp_OvM5rf@L+uLQ>^SxQCE1;zYT2_NGylqiO9k5pQo z(Oks!WD(=f$D;2Le2t16C~S)vK~0XJ+4A&Z`om6{FQ?)W&DTbkV(q9#i@mgAOKNfr zSQkIT1Zll?q9J(T``}k+Q)6Fl|Mb_#^ED9o119$exLD6Gj2k})soYdt)YG7>fXVJfhTb9<$aW<*Gn1O^u`cLiS+p2}ioXgn>+=w%OSU zv%>k*VU`e*?bld?#sH*Cp#O^B`eMHU>4*L~+#)^REk0uv=l3MODEVRMjAgn2$b%ax zw;;YX0<@)+J5wp3Qdv}QTTmBcc$xWLde8C{I=MDlnFlgk(*K@6<@0J-w3+@lIb25u z9=P}j0FT%PK0btQtqD&-?2q+bJ02KpbFYL8or4~PewP#)EmC%DP_7YLA_B6+)S#S1 zH#&N%1seg*o?UR&K<24h^T0BseW=htk%GkF;MVM2NE);WEIFev1<7^z!f7^IN^|>h znt?|>Ry?vQ+0Y@|P~cGHS`1d)(36v>*HIZ^#Eu(JZKEu=BgkLxotHf4Hb;Le$yn;U z=N^Hz`|KB^G0wtX;Ixm(Oq`om%Jxx6zBm-T1kOSs7`z9aH-2Pb{JW75aS2J7$50~L zU_5o4TdwcsALlYh{c<1iajibM6jVBbAtk0#vb|Vc%t8dJ{RiR0+X^=IzBm?FigMGc z1*P%7=FDk=eFi2L=41HLAl>4kl413kzvK7Wd()tC>E+xt;lrb)nmGG$Fas9v&otNJZdUjyNovo7{$j^Cs)Tj7L3wme=q4txfffC zF`%lfCs5uUsFZg?#3^J*P~Av9W;F%{M@CR8SJU*jPE6L~J%^!Ww-DllHiX1p^B)|; z2@%XYV-hMyYK!dw+9Nbiw2v`#?ggsCZi`MHOJ7w#DMk(RDYuRdR=1?bEE5?HuEl}v zCP|m^z&em%lG|j6+4~xD6LmJ7kg^f33V7#bl5yZY8 zxdR*v_#WH(rmf=`()ASEl1Ke|J;I&`1^UxWrtFPrllpM=eyd2P zdMINN)E)Q!e+$01(gN#au`X`qkB~4oa0?3hV!(LHJSib3l?c0`=D>wLbs4hvZYyJo zIxIMqvP_?cxOQ4xd17&kecLVKf#N>kw^!}n-{Tc&l$>!$u{@|mLo91_zJFQL7F=CU z23|O8pTkt!S+1@OJk?DkNN?*O%_J2t&Q_*H5*bte?O8dpGHbHtO4(I6v4`4lG1e$| zNKr9a2b>mXvg_y9dPIeh))kX8LsRoP@4|oKJUibGw4@Jco;nSh^J9dViwT)Ix|#vP{W&g$?U9#TqJ}L2*E`j*zyVO% zue%i9ecov2J>=?P*F%nYvKZfk@$;7s1DxO=iOPt zmO(G$<8Po?g62o&PbcrtWlCGqfMr(xU!**P&VIQrupX#BLS|w?A!0`8{39P!BLwe|-!2*rM%qi9c0gk2!)_kFUX!U+Dp&syT z&QcHVPN@Qz{p8uso;1S5SR;zc$ND)+7?H@>{igmNO^~u81~svSW~`LvKv!itHgjty zD*s!Vq)_v>qk!3EI}w$@>{8j26fp_5aOsu6wVr^BX16@iAm!8`XHa)FGF1X6`5qVd zPuGb1Adep>?)v$T`NP3BX2Cy>)Ie@bs12PE)Izo&38YZDP$4Ld*+>+PG<;7?sf?`v z24)XUnAi&1qQDLt}hI z-4=qrAw%6B=EEDvbU1!RRFwunj5??^=`^Lf!v{s^jJUUl1_N4xJrn)@o^bTH=-HV? z9vpFX)Q8aagS{S;Og#NsH}slZAx>KSM=s_}ho<416cuP4VD>ixED7JCh3+W3zuPjS;yGByiORmO9v3Ay+^cRW~D}M-Ibrd1OT+_bY0S8y_OJM~klzHKjdHCNGPhJ^3l@ z>Z$bnukg?`IL(+iR4H3Hl)cQcqb6ZjO*9Px#s(=-lUepqA<-JuP&5sD-@*0H2=?wI z@gdYgL_SHSHE>hT+u|f?d@rfN9ODr+!k9e5R6N--Oq^8Q(Upw3XEpib*or;H(&)wd z-arrxg#iZ5$&=g^+$5!tNkQ+#Nt21hc}{&HG*u-p0O@H;9|%;680}e+eJgq*jE4hy#k><(@S;(mNpmDqJU6WJ249AS9Wj9!Xy}QRv=_< zp@A=(21O?2s^8C2j;EidHW*~K z7?EGb9?gjgIHOr!r+M*l4q?jK!yxQf6!5&a40LDz6rqKZ&WDnDbcGOSN9xgn z+c>$|m8^(W#U(~fT~<0QF)v-!s$>tgip8W71I^~TG8PIuvGl^d{jyw%akz$6v5=Al4D}@o^~u!~OJFz|ZdjI%JN*Vl0j+qI7I*{wsoX|t z0jqNv#!=PCbEu$%vbR}vet30WdBFQwbb~?p?_r}6Vha$=T%={>O}r$?_9BS%aGmsA zB+6Lyb#@d$ZP!^7!FaLmoiRE`FcWwkNd*1VeW?ux-qLH`BznE4#!pfXAuNnal$XdH z0z8^=;x@vn+5OVwH;vvUjfA#rzA;Mm0vEcYhM%N4@|ulxcGZifjU%}^#~?MP%r)AY zMY@!h2Aoam?%E%#tWaz62%b%=qkx|;O71kEV-if`J@4j;z(#wrcc#^c$ah2;BQ&SQ% zEj*RmDHf7mMDyA*8qc+`;55zT(4`o&oNnkFV>ImZysa3b$B@&lcCAQhA{SbspGUH* z1K^ugSFwm5H-^!xA4ykuoJ>qUoAZu|8lB z1Xo-NJN{!#ejV36PvmByYMgXzfr>sqrsBY{>2-V=#D#N44V>G`MiI z7h5U|m#U?YtsZf)$0vQLG-yy`zO${uCL%o5G@~qvsvsIY>?EQgl4@j4NGrxGZPm5h zX*fFL2=$062NbL5dS{f~B<5xVxj<{|1Uw_wNTwVnRDOAkE!?;oi^5EOy!B#mc0$0| zYG^>S!{nsL2?Cu>Y7}Z|oOV zJ48F2aYPe1S1r|NQq{#Y3fK(fC3^rx78XZl25Oy7aTauCpC1Rt%Y@1{l)@lZJ*IMG zi=ZUN+xWyO8foMp!Dh=H@#Mb`P~y*QGU^73EdI4fC4=KRT8GIUDP<}X#YpV_eXsJ% zhqB9-DX9E%MU91>y8-IBg@;UgpwZwT&jy_}kUwpMPJr1luH|WuwLyu0&JyEblWRki zGlcA(c#^$FQDiM;`MA(R{-B+x(wyELODmS^h-vlsoA?uEfH55Te$fHpXTW)QLEeeHLt(nGCLQft&3D!p+h0o zS}E69%V>hF7Qqxv)TkxPJL_sHzZYOg>O~4MJ}>G~FXLYHVomB;r==j2k@iQe*GHCm zt*-+HBLF^oLKSy$sO-08sb zf~guJUB~Q}Rf5Q`$IY7B$u*a=p+h5?{{uhJHGl%iax%B)uk@ zi2lY~Z&%w*5Kmyd-EDq3b^09)FSRYZ+nv+t{qb;r9g=$mP1pSwBD0zg4S|d97dRn^ z06!FX;WS?|(phdF5=R9T@n0^d(4PqC8jyX6gi`hc2;>gt`KZ)=rNjYTzfMJvL>PV* z$0}(2CBU&n7%ygfiM?XKy-1)PDd@r*uXw9ylPrgK%`B(xc=M=3qS^mN%X3}o7hc)X zp$5}VUNWvg+cmRC0njN;Flp|0!!T+P&M%Ov8+~cGpjH3zi8#eCh&YI1vd0gcWD&u1 zevlW_MTE;TCNnN(bxcN_Vsm4YmQi#`yrpT&Dz`jXIG?Go;rzZi-%H*aa0^VJS$tM= zKcggGtAKc3PWQnEJT8i0d3f8c+*&BzsQtvujO{l~8Ke{ZSBU&~z*2MJCO5dv`&QJt zRau_EfjPD|8zpE^zA(UTB3UCvkHBHe-JW=Hl}s=klHB7F7SKU|Tz_0 ziT0Tav}|bwym$-`L?~?N64?fcSG6>woJ%txMvw0{U*tDBmXG!+?2%)4&Fw|>&bc-7 z$qPHB#6DA{9r!DXuht8|3O8g46C(S#?FI3s&)M(cYh>)q=zpCQCV@Gh65cD_o0mR@ zr?)r$c(0lOYAx&?sM0AGPY7<~9{C z@Lx#yduZz$h^DVD!nrcSAkoXHpc{D=GqMl~k8qJhTCNbf4H6psjY8|#1`)+<>PghM zQ)D_3mJgr*t?Yw*&uJCgms)XDeoap0Ws1J}I)mEFh!X2{F}{JZT+;ONeLuup z&~0vc;_0XE5rTe*+x&q>03`o|{|ig6ZNast@9?iMsDAwK%4X1E>2bk0OQKYq5u7n| zIh&8L6>$1x;y*Os%eFb^lLgpnr`=;XC^rb01-_TqsB#IWU*iHk5X+2uK^VE56HTmp=WeT&A+1Gu#lyai*jX{clwV zFo2Fc>wR1PgOQMAlx>O!=On8~0he>vmFoU*UR1sPSglf7HhCh!#%{BO;3uS92be-` z8U6{*d3t`s8gguulM&9uenOIvk^>S5h=PtVu8ao_us|seLG*i@cz_E$YT9JB2xXHz zO8x>yu=;6c&VY3e4fn6b4GB03miBYiP{2r=0_trdb`sJ>CCg4oLAlx|e8 zrgh?h2-se=h$)hkiOodLT23lZ+eVkW%7)F}20DMgQ$aVsZrWBI3uo#qLi7E)S^S~t zQtcU4vH47smGgcTzhY3(?s&b|tpZUw^ovAh_DiuAXF$9Mqc#lDU)SwWPfk?$7o#F8(Av&*M z5j)~G4{&tmbp4`2Vz^xK76@odjHPM}Zmbl_o=G4*H6nkH%Mt6k!v!xhrfF7P)tO?h zM6}#~KSH%euvW;tCV)^!&!41Dgh9o(=8g^QO z89!TNggMk--|t61V@&-0SzY6+WK4;g(K&iW$q$g!Maq5c^2%+gL>}L!65^_Z)O2oc zZ@OnsF{kl;Jz$l7rRAtXez^u4OJ88bVEsTFQ0)W3D`NiW89Z$?ry3 zUm3i@tUK2jp+R*zAi}-x@L(Gwz)T}sxT0+uFR{*UUp-F1j&BW-dB75lnzm8pFq1yZ zOr(iv$ej|F$g_Eb!Nf?xHW7g{2G*YFl*5#S&$+Hd(USX?Ahm3=(bx|C9Bcr5&NIm* zlw;6yWPd~@c0Up5z8Ogos7%hZZ zxh`3`p~|GS=lAMj&t_jw@X|9*dK1tv?%S*ke>AIkdA8M(+ z-0!O1D`)uC9j}PsEYZU#(A@~d{wfG@>)0KY0HA=R1@@P`dkS=n@$&4Ud;u`;;~{=z zv=YhzqUk_D*0|`~nyf5Kf$F<(k!__gbzB_d@sJY~I0JlnlB`t;+=(2$YmTgcu!R2Z zbEuF-X+iM_S5)G9fFm36qj161J7#aaFp49E5Vi3)A|hi`l{L+Gl`$YFK?~fzcHQ4} zrPs%X74$jmarV}+lYRUNU5h4Tq*P<<=sy3^-7i2bMVmMeaQs;ajy6Q|(P#ZC(4!&9 zQ2*;ZuBn%!-s2xj+iwmV1HLf?ZpQ+=#oDZklLXnZ{vu0mdrxlOOm6+R`q@+%ND%(h zofAypD@NM#=G*%#U&QSL zYhVfyE!g;B-h23Aq?gvih|h{D#)J81JDnz20SyxFW_iAjI9qn$iDwf3KE|dvM(QHl zIY#P3G!YuO#(@n=l6#q$tdzV1Or))mlx*4VQ`2ue|ZBBog*QCXu@vpVv1 z!O7CZF?+YPrNDvZLZKbC5Pb@m$_8nyfk`QiKZZ=sUxfLCV$D(2f4Gh zK{#Sw4g-NRDf!L=$)tVCORbddi;Usl{d9(mGZ}c4XtH@^Jr?&YkP_1ho)fG@tWO_J z)A4*tP~%{RY*i;LD)CVHg>;iE)1O$(7w>34v0!mH zspz04<$%zK*~N40GV$##l(SJ4EfeFC;T7GDi2~f(vuKS|ys%1!<5SqMn7ykRR~7Q0 zWiwcyu_OobuI#e9^pL^!6Kq&?zb&y|gyx8-J4@w@pbi8D(&!+$u`%{!g$z@-lm;pc znf?OkeuFCROUi*(cDjc0|Lf;xlIG#*$Y&NFVU23BFkHZOqEU(%*>!3pMy!hnoZ?uU z@BMyuJt`uDDp|r331Fk4AuHLY%~BFc(V=zUG4TTy ziP(FO4jN2=YAxqG&P#O`yj_SxC@DHA(~%;Nc_CB79jLwzpcRNM`UXKO(MiMKb5|G( z;e{&)9I_s1$~luo9a{28C-OiV2SAt8bI$W{F-g`TY2ZKbYb?lNy{olaM+R5Sb2zIY zMlGEvDd`!i^Jl52Yy!}0g(c!wSz4k7tRGp(WuuO>!yTVQYWQL~^fE~PT# z4jOA+gKRV>jxq@e#>Osqm8CxuV@DV-PZ{%R^KXuiWW5z?RLg0QJ@y;(rRfZAFKi-| zfv&L8Qixo3C5peR_H}_*Z zw?GY0}U;`s!)c3Ne&v?32l zWE*uYn6SUB5S$Ybe{^vTp%tCG8!v)LmU4r*HE66Pg=ilc8}=p6$*lkn9J31ujrb~O_ug|kXFru1}~}2t*5s30=vb(Ol)ZuY7tB$&9)v7cO6_(2D3t) zO^#inmE1%OJf@JRtOX;9WAdNW)oNvBq>pIGwPC5G6{rwOq|IoT_I}$0{y|)+_*4kp zYTnp{-IxT8z%hxOGGpTjq>A6?Fwta3)Q5wWi3!r_U=+^vYd5#jx8#xOf{pD65+jW~ zO~J8B)wsRB9?Ea0zSGSFGPlW3=~nPNqk=}>g|LbVvpCd|M4nW>r27+m-}aD*5w>ju*%a%)1phUGieJ^M}>RP1TyTm<mzf zC^z8u2460l^iSLyCO1q~lgI6Pw40ky%{RL|Ht=6&zR_Div>5S}qa#Tb{lD1%n4D?naN#Lp$?{>rZ13Nc2q><=Qub^d_ zLkiR=MXts~9Emj@Jv{DH`nUdG!=vhSqv_al4&Pt+*aLG-m_s1=WJ@x@&MZishT*R7 z&rgM1lf$X)_h3Qb#Ia7_ro1XVy0Q8JtCq6vRxv|KtPgsWL+u;oqp){kq@C7`cM{;NH!BN7$J{dTu#Gw= z5<85)T%MEmf__#?@w{~Y_FYW+*S0NwCbh{}(XSw7lLU{_&wZrw#gp=CHnnKQ9Lb|AYe*&~8y?>5}=)cGHBx0$4 z51Qk*1{%j2QCGN-Tb`J{2;K#&Egg}yQ(>QQAjySW>+pRG@DnxosYhbLeEGdo_RH$o zvnNF!r~AU;aMdBliDStsbJ;zu@Gv0SAv5$@Rl!oMh5N^IsdrKQ{Njh+npegB$MkR+ z5|MYnwL?&rQ020ofz4-p=!>?mB^-Dtw_BYQ`#cep06vuI0ooqHNE|jSJ_8Z`FHwGf zT|X_5+n<6Vuz0kw2c_}@fe;jwVq&dxLs6*=lEDphbArPGtfm@Dz4xRdguH<$^yUv_ zGlde0bk-D%gctz|c`}n|^@skyHDHl;&X9kBC;xsr1c)Y)Wj0tW*MS~9RpJXd?9XLc zqF!w_2`Bh_&+O3dq!2h7NEw~p2%tg30#nk3iVQ@=o$ooYR#xZ*hKgY@xv1Rl54fvE zaECxAn5w69X)o5cU*30HxiPBnx-T<@d@HoG95u-qw+H4u}&D{n>*e zxlqcNrUVV1gXVoa{1N4!SEL{pIM}if<@p-A5W{r^ff$36?QEn7N!GfkD(Ns74`vu= zY!Kp6q))7qXPO_l@lt=zg<=7vzs{Ocp#!zPv$~RP2C7HFY>7S@f#^OkOe__O|KT z9;u4rRaV|c0k~c?oMO9QErH?89@D_9Y9(li=9s|q$P7SbP|K@9<0;h_9C1lD!=CUt zk(ZuT7~K;h^-0{1F=|Uhh}*Q`B#DH!UpI`C^>(+36)e}ZLBGTZBT7}HUq$Ye5+E;9 zCrfSro1FYOUX=V(m;Fz-ntlY5%pH`8UHTP;iir^b9i7w^jlqe^EXKUaG#hE086(Uz zF&V?|pAJVU)4R4IW=%j~Gf{L}K4P1Bc>%Y3cPmgtkmcnoj<+ zlSxd>6p6gf3$C+iB1S=TXJJj?gKmzu7_2_VP$Rnw(#U1Ad2ys{CUli;}ci%U8Ui^g9G9S_LZX@tsWGLC(W|A&lEOY&8~T%)f3$~VxB!FQPN*S5te`AZ?0bPh#!upD=H=bN$G*@= z&^QUS3kY0{c{E&-?;p=6nxfW)wLH!PanbE7K_5rMfH(I`g_ne>nitb1{3?F@DgTtv zn__AZghSi)JI#n5WR%%ix%jAt*_{UwK4^oR@Bf2|$e`7e#$d1PpA>#j@T>hCI|T$% zGVOvHzDi7jQUSGh!tpg>O!+|D$X&`DH@{n&e!*DEu-+5<4ty*sKT+&bz^G>B@)!BU1a9II8xG`k`ox26911R= zh{f2&h)e#|kkEkOO9+Y~sKh%Z(+KB5?HY`0?kN;UXOf*3GLvEvFzF?q?9%qTNb%D+ z8eUQiUTqDu6H(B+ZP^n9G`V+BoK3@MvVpwH1Z3G4=SNIwqNDV;$SB;-d){zS0z5z$ zJp>GRU4W6ni#@YB8npyIUJoT7S0Nb|dseZ(kc-wlpwy(37xI?<%??R8<|Q;1G~Qs|k*HS@OGQf06FH_Q^gt~c z@>xf2*_?D6p**^X2(K$7fGvI2$g^5NW=TX?c_u-_e)~&nh(;G$CH8o0l;vvh=J=H=Rh70|7 z&&na1xIJH#Tg(p9PEo@;|8A07+PnD0&s_C_{ROuwIdF5yyPRMmoW4?A?wP|KF`-Tqm ztuMIQ|D0ckKx}RgmGhi44d9B#*0BN3sDbs%=Z}Uo)PklUp)IX02_HJP1W>2Ac_&<{rJAqg{P7fx zn4wVzXL?`MVsW5&oa^d(*G+CGIPNiJ^Xw`fzfO;Ul$|PeDB0QA>2g!8M%P>{T|U^} zBh>Q<&}|d6-r>GF0RhO+bnEnP=I%PCDXwpJGj3e{Jv(I=ZFNMZz0ez2nDzk%_pK7Zsa@VV|C*K@ z+zv6^hQY+;4Mh4qOT<-%5Wd@i1yx)qUKMJI{mmL~nGLR;8jcMV&cs~hd6OEdoO5lS zk@<|S0gugPkFR0b(Cv!rl?Mkxod#|o9p#Y+x(e)R&(PGNt+F`F+;uQV=OqsUrjxGWoE`#QA)X!}_90me2qXf?Izo*Lc3>c`|3?Ty74=k? z9fHF?G23expe)G7jlEAiMqMOQVgSORXH1_cU4*!Cq9Re;y73aZaA7H~R@vcOCjAko zh2i4NTP$K<-vyqCK|m6EfcG(>_+glBJsRL)-}!w86e5%mYT@9SqNb6EL50W{Zrs>( z+%rw1|KJ5W_df)ji1qI>ZJZEn?KghBhSRS(c?G`k#skU)q3M#eLsK zTI8M;Aw?b`4N=rdmPCPZB>9D*#kI>a`k-Rs*({zUqIKp4tRzdu|5PsK;$+eV3OwA5 zz#l=}1w`89#c2#q*3A}dp(h3qW`Nr1bc`56;}~vCjU8pmuoJh9%u@Q8RC-e(t;TVd zBg`phRyNtQg;&hD6F%A_S+HOQj^*h=n_KOX>Vf56dgodKT`@QrDToI;HJ~`?6C4dB zK;5Ny^kwf!8(;b*RcyV5x}?!s23%-K8<6i4_Ey7!D&*CP)_0UjQg( zW@brF=-XxH_Nfv`fM!h=oM`fv6S`7dpag0fjB2tbPQs?vz2<#M1cajK)={Vd5o5-A zBxR6VtoaQ zWm>W)fJ7NvvJsLdSqV}i@c>kgn&s}vWqry=C0=5w1!hS3U16HeC`lAzMy4{h)JW>& zDC#6Zn4e)b9{6d<^`TOWdQ`i~7K|3^#Bt_@j){d035Ry5hrVPlB7zN!=;ur(6wZa4 z{-=s|DvKJU+(=xYE>JT8Q%io@7nWg=2^aAMXUqWB5f2$0Gg%~j<%J}34( z+aYd`G`J&JhUGo{hd(A{l?7+YphI^49M7ST$`B%Z5S4^W#wJe{77zMl5XGOWt|pxTkR$heb%?Mx|9K^W>oM6hJMM$hJ1t^`qtA$qR zodsA+z+wjF>tQ0H@>o~Q!8|C&=)6`v5NHxA~cyxbOLWzAaG%_>dN z@@zcl<&9-6)htnX!oxiVn>-L#xZdL$Rcj)eCoH5-)7=q&@Z&-99)0WJ>}r5xKVnK;JmkTM50 zT<{$xDR`9L1wJAqBIWHS$bks$u{Ns&=7a9aF7e{7&#lL=vd;tN+IVg5a=cORS}h#W zC-ai4*LE!{%AbD5P1a3s4RT$-?iaj~|DwFsYhf^`_o5qymfPJwlF{c5r z=H_t$vlHYxaLPh3ttv+#TM;{#|852AYB(M;Ff`yDc7t}VQ31z_BmZ-Ntmg+y@_HQ` zv0XB+uyE3r$I@PFB5DFwI^yP@;Iy4GDj(~uz>4pIK`R^7nmS}GQy+pIQYvQQ5+9I% z;fSDB;U$&iEgK;*7qj@Lt^6kQEuN~tC|pWXnh5k_{7Q4fk{|hD^NDCPH?Q#l$6xh{ zDooc_#IR||1~4F_Gy61xT_!s;@eJf}9SuaElYVTjnQo6;Lo(1>^hTFMN7vKyF4wrp@<>PM`t2m! zfn6w0WcpdQN&spFd|N02{|4efmp-_u)#40TO z(v&xUb5i5BOS@(;XY+nuE@O}ED68`tYjr!<4l0Zc&q(EuIi(y{B|byuoE*barN@iy7IEoIv; z}4d6>?Z?+ji*GuS_CvHzRd! zKNFg?v1XkjqSh9R&6sejuoDH#;vF}n=$3J6(UF~rAS*~AAM#rwWwv_v2}6xRUw3*{ z?d{dxJqYSq{(!GlVnXJIAf(`gRgd7@N`dGAzM_qMj~!B>+Ml5^=)r> zaTm82Ms|poi;Z%*{k_azkIzS|coX;#G&Q9T`AM#(9IU-g>b_jdX_YuG&+L5zT4ULd z_qYd3-6IP5dVhCtuBWsj+mS;a`CKh^_^Y@rj zk`ymRoWm$!ov$!|-GJxhqNX$zo9~-fQWEm5|JwPTAKKhbI2F3ZX^Js6FC0;Knr21> zhZ{S^+UTHv|LXk~iBt!7GU-_C4SO5ZH>FpeJhP4;Enssxus&Od6ooo+WNVJ6byL}} zb!#+&JbbjmdeHH>K^Hkb8iO(@S&@<8qstXXlfwC=AjBVbmM@~ib2+i&j@a`1eWM+j zBl~6VvL_bvOJlq95g{C7yR|F0W=`>fe@>l;AD+vd`uR)3-1NEQM8AmWy0d5+6S~yv zcI8feR-Z+pVvFTUh5$33qU^gjEB4n@fpIf1p1_V*(;>l67g7p51wY`-i884(Ja_x} zdfPpJP`o_4BX}qISEeIAc*+5GJgCo}BfBNZoBYYc&k5q54Ws>^mg{^66S_{eNQzF( z8^vh*|Eq}aZ5$uWB9(iAcRrjO%+D{qG0%Bu8O+dod(lhy%)`a&^ZB?R-s$r)ipD#N z#+Rh`e$*p++>+8_KckMZ+Su#HA*~_GE%T&P@U0aJj^9y~S62cu%57tc6j9Ql6b>|X>NMsTi#DVNW%?ii)~pM(c2)UB z|0z(T9%M!^ixa~{g)M?e(E?_R88&mnSaI@Wila=H^wL%H*KeD^cXzZcb7o57pFKB@ zNKtVOohFhcRpyI$a9p>1MCE}iI7*;Nph%lq_3AWg)wpntrYuJ^rO0()*RqX_817-Z zcQNy=Y}xN#!g3WS&U<)rW4W_YbC+*t^yuTRfs*jD^O(`(k=Z=dV=~t+YpansFcXb3#HO0;FuYn+3?jBLnvtp(7`xF%9CLh(|Hqkp z>xTVhb;1?AFI3y%d(PmfJ?Nh)QP7ZEgC9~pvXij ztTL~93M#V1TrrC*x0+MVlfX(dk)%?%vP&q9bSa}42(zoMLiI9@HWM?0ZBZd&^bkhJ z+?3QsH1@c(FA?R^tcuSFS!9q<57~s2R9Lx9xl{oZkX7Scl|{H#PjzM1<9Y+`R7yzQ zl~iJ?`PCl%_^a+Z2($aOJoVI>u)A?8{6?Jh@Ei7;2knCg+x+(1ZyZ}&1uz@tnzJUs z{v5Q%x9lifm)&lSE04nuT^nMy5d)OhFcx3j*U`|9P$CBx-)wT=f(f2d|H>c}R@fjd zqipL>8cB$VQa8QCQ6(x{GFZrrJ^t8ZoSOQRCNS&i5lEuYTp7Zd%D!m6==YvS{)BdPH8u+%VJlu^)>hAUt?=H<8J9{WAILqV1v)C)*zr8U>COXZcr zu&o{&MNduL+ z1Sh!Bv9NT7yH*PI)1AC<1Rff*r?BQt z&rk463HE%)JwI;GdoMHJ@(xCp^GPpee+*>LwiG3j0c{|(|GS_5@@G4(dZ z!cK}Qf)T&=HO~Qh?p3VwXHP2XPV)V8vCmT@(E8;_H{LXP)tk{rXU5UUS}AHs0^s~2 z$^nWF)}q~m!$~ySKJ8VdBOc{!nL;Yk(VCRB1$8C=-kHEexpaY{f$7y+iP4%;thb%A zO9^@UTgwR5sjHdbiC$zh7X8qNwvjF(LO6_649yH4JWf@wiq&DB*O=>)25}&G*TEg@ zuD@|FdWrQ``=Sj~?~?C)-%CvY+O?asQl~mo|M5D)9@u|sU~j+bww&Xb=(vylUt~G>YbMsVmSL}rlUHsc(Z^b5#uLMf zq^`u4lcMywymen;68uun%q~xea&BNqbI}Ozl#){I=}(R3npF0sxE1Ww4S;HpLM~ zI}i_d7JZWaT^%86B|S<&#kFlQAir1R|B=>+s52gAoEToO+-+d>2@it4cu?{k2%26x?6koV55?GFSY^1nFl+XRaSe< zhzY8Rv@61=uG`=u%(`yi4LNz!|64oY0d%0J`JQUBO&b1Y^Wb;rDr{yKinK}it(miB&yP-86UcPZnhI``T?d)hfe>ui zM>N=*dUhdDzscXXuG7*!8MnW^%y9=|fw)=sX_gfnXuY?4_D=X_xLHtJYO2@4H13|5yM6Ux$@AQCT zp-@b>R!{cO;=^9g)y&7`|2Sp@HRkthFW6>~WhP4ZT&wq3umwY~O!VYo^2PXw%zu(k z=^iWfo-WynF6z3im!i+uysYX9ixj3$$t>_S%x=KE58Tww`%r_3hN`<{iT%_p8`i=9 z_OJi`um9K~b$Vr)${`-w?7i+|&+5*%P1uury5@}Dmf=vYJBR*^P;AmrHkOo&V zqoCvld5`FHFrn(G2a)0ieoatr<4*Lyq_zxW8Lmd$DKf)u<#%ex}BmU&US;pfV zxn=A0FdH*S(o7`l9B<*g$*lTu^7M-siwd}6su>du%0%w-C=t~PDdl{k^kUETNG%E5 z$FgpV7xiezj&0UNQKLw4jx@3LObh24Yv*|F_h8W^Px41<(H3#B`B3bAjxZQC@@9aM z2qUI-5;3Q!ksyR(8bME;KrbQ7Zfuq+8OLw!&R`3*aU1E549#I2{f!OVAu6w`&)&gr zu%+JO3a-?F&%6T-tI}NPu^uPKHttRt6i^T!uMqu_iAVuE5|8bukP+j_E$(*@kq9=XC!Z|B*`TA6w~Lp z=LKYO_p-;LTrwAxkQWy+r0k^lPICy8jtRg|+)!k=6yv#A$)zMvrDXHsl%fK$km<+_ z?O-!R+z#%JLH))MH@s1YwDJt&k01C!4dLS|-7y{GVJn5B8{`ixr-jY>BRORW%oHvV z`A`4EwUm9vlkbJ*r>vK z6cbJH^Fs%d^;|JDpGOuE6i)>}Pa!k*nx}f;=Rq@*=O~mWBNQ|nvIbp?)jEk@n8fvX zr-ERvL`{>&G%_h%ls?x9l?)71Jt-HDKvX_;fSQK;fJ&EkQyO#fRILlDL?hj#6dPhy zT!8fb=+G*?(*C#wN%1eQmdHA(GaaiE0Hf4L$FsqVXhYZ%OAV2)(z4yW^nfgl10&G` z|G>2O%=AqAGEE&SBz=!RGtw_ZG{Pp+O;hkjI8s6LbWih?L4AZyd+z3-!=D7z#ol!X zsUim*6H!+N2ozN^kIz#_^ zJ(m>`!ReH6vr!IA!z4q;Dve+#6;A)N1Pil5`?IuW5nQ2dX+!MQJPTdZRbAB;c>EMz zEi+8s1VT47L-Ezg$`&R0RY*8Z7{}G7=(8pRGc9InwA5$qMhjwn63ZGD2;E|0{~fhr zX|x`Ir8d@KJr-m`_Jl^(NV(Hw*FheNbSGXm?_3shoz!ujlXE#2KJstw)Gsa} zkaUstJm-y^;0vApuqgeiE~T~U>T^De)@X5O!n74pE7kebG-{a*YCREmHF74`A~6Zo z1+!LblNV1H(p~M9GfU<&87o3t%#fZpNXXWCC(;*lP$O1Tco+3VV>eQTj&1=cVegh< zVX7xCE@Fofj5?GHOBFYKX>D|}`;Mkn^Y{Eth;d~VDz^o4{mxcVRw}g z2;#PpSfmPP*K$En2X*Idb*ts_IKy>cSK4Bic6sEA9FifkHF%qrch^*Y|D?8r!IyZ8 z7kC-Ud6jo-1*u(Y4i!}*P{%eUjW=cTm4`LdP;oH{S$KFS5%??#e9sqA%ojF6I5!8D zVn)R-09GL77aH2GeuWkbBS=-1qQEX?H9nSq7xzbRH4T52a#iS8v9bX(x2$0H|0Y-r zy(@$E(VHso9OKH(d^Q4m0fXC@1Z*F7bU=L8P#^&HYl)- zQsQ?kjt^5xVGlx&8A&QfP1UKc_<^!Ge}_4lTXcWNn2g!)e@Qm2{{(n)3HT20&pqIQ zWszxyvY9z!b}SJug63F3($bvvtAfv~kAFoG8AWIbQtE0qOebta%an*Ib&*N9k)QT3 z`I$9mIrT=^UQJO4Gg*_@wL#kyG(q|EN^+E6v4=gAQ0p|tR#7C_mKXbWXi!sqn2$vQ zR+9jhb~9FbZ+S#U8cj4hn7s|UOqDnMr4e{V;%=!+A1I|rqnZ2HZVE?Q5*W`aH-KX{ z&)B#vqd6Qq*K>tw9xKNS@9Br%Mh8S zMI6&On|;-_g_?oM+3lR#^3Y8@srtW2<*F0t@`e*ty_&0o`;e!WYFmwwFWIbL5V`-k zl{*TOGYgaBIzgwKT{(FsJlS}27_WumtSR~!C3>PIb$c)BqOH4oIfWMEc8GKNMIWV? ztFWdbJ9kV~Ga`)QGO0tZEU3mgvjrv-uz27wb}1A0RmIqu{r7+qd>+z}wfij{4j6Np zCH@-RUE)pw!_b^5&yIa#IHvl)+^lo96rFxMCx4re|A#xeY{`8yN$4Su+7^El%{T1>>!knmfN>dsEq7RTyLKc z3acoIV;Y`GsiiUdsCFw8z-!UKrf{XPz)O4m5?sOeZnarkuf~HJ*nz_BcyO5tT++rY zYeDPK^20&=j|EZ7K!s>nl&e3uv5^*$i`a!*_ynuugf(Vu|CPlRITPR2Y3by~sXNCf zQb19;NVI#qMIdWk61@LZye(R=TZE$*=utbmTb&NFb$QC&+rY#Z%Li7_DYhVpdA|v6 zexJs=F6cj8rOIx4u0}hj^>AF&TpS3bIW`zD1eH zBRa1UKH*gv0}c|3#oTHDI&SOvLq%zK|13V;DSP9`?Jk?KGe*fUM!wf`X;mob63&9{nWY5s8LPuep-KXP8@BacAjJeb(fSm+RR!0N%YT~*>TS@A0Am7Ynb zO34(k4Pvwpf70CTImL@@>L>j$MJpk0M{cnm_0)R3g&&4rxY09R?5X<{O&w4`{m1h? zZ3A2S-#)K1;x3sur$VnLsRGN=x3}S!??W2n71kLWI;sXgs>JQ^`McyJ11WJCr_nF4 z6t^iCm)W5uIlTk(0V1D3d;|9tJVG zWwJKGTD7Vd$XdNBT4UytWFuA#|GU||i8H6pYPBB0)byy0P)>XX3DvX6ky1;EnmUD{ zVoI4aDMmD*6UC}jtY3hn`3g3ykslvObntL?!-cgNw0eDOwJqDS79do}8o{pKxqH*v z_$z_~-wAo=;vg#_afQW-6X%#Aw{X=Plq}dmdV&XRBd3T?*#tinSt<{Jv`NLpbVIad*}to z6oLJ1MHPNt!~h{wRvZNh|554{wT2N*mBL|%D7}J5tA87gC&*4 zCiF6dTVuXXb0=3pHZjlXryMh9e27~cb`^RA4wnR-C>^n`0<0 z%j|xXESM;Rc}`;B|5eG!_*8>yG!>r-mD)Log<-6Bp@JTY>7|}Bo~VUXVveQZha{eq zOG%fI5@U!p=6GXBJMswRj~@+LB$D~?Loq-4Sc!2x>1Y&@lpa&r4n_jGvk{g8BjaUD zKZTjn!!eB%Fe-Ac>1NF($7#%*N7ZSsQ|LzZXVCY(g=>R{p6AxInL^NnS*}uAZLv@z z-I!ldN7`+zp1yi@r^5ECYS^j1cD1j=f(_~gvjP_?WktVz9Ix0#9aeK+Ll>H;#vV&w z)zV^%EqQ`GO9j7xSM9)FULd7z;DQ_7te^s_D4u-df_sP_-pXiR~qW=$g2S|K&@8sGKQu5F!e}0t0-oi(iVw=0x3;iHzDcMKMPW)$>0oz5(%B9us$*I16vjK>X%S-dFp}{y2F4?`qhnsSgf;HnjJ}FMw3bTKyJv zxWw(RGyQv%BrlaN0cuid5&Tqc8hJppiOqqrDw_mL$r;|r>NiDDlm=g!E9S^8gecgX zcZRb-T5gALeN&vX4yUen4KYAbVc`NTAfd+;QVx|c3Hv6uLvfX^T#}O=>695QcooH6 z?IE1JzNs*jB(aHtEMkH_bW44$XHNGro$lPouq|paO2Z4I#O^tdG0syW4S9z@pXZKs zK+hT{Yoiqlc0D!CkrQ09BhB(CJ}o_BP33b%Abo~W^|8+_8a{YnKT^$d6%c|ipZ3zWG$)hKNmw!D=MZDzZU1}vC`paP4RMBNn#F^Rzsc2H`z z%oPbMhreuPbC{hoCQ4Tz(mD*R5zizJxtOBJ*mcN{?n{v2BIM1?sWmB^h}Snyhs1Gm z=sxIlD~swXNU{W?k2*wOi{7~-Ysj-<=J01cy68oGD)x(F)aPI)G7rY0@i*$ItUv1* zu<79rOfUjdX79)eQLLTX1NA5OhoZMO}anrR?)%1a+9OX-Qdfe08X1Wpp>PUy0!3{38mhf9n7LF>K|Jydz zyNNKuQ-2xN1Bou1xiu7W(_)t(WGh+C?B=?dNT9HOQ*-6yP`c3S+C#K z9x{cTN3aAG4Q$QDDY&oggr`dg+an*XBe5KoY+?^-#S7z$9gS7WY#b%l!N>;?;OzU6K#i@;`GjyIj>#cFz;FuD@9 z*D7CIa(*0F->&*~Kphq9Soup=a7l~3>>BX1l=#*UA(*fKVi$uQOrqtK{{p|t{Aw!p z%91&4HYC}&u&Fa#*b$$&v124sWmgOn8CN47<)}#+$!HVdtytF}u56E4F~x)FjC(WI zo{trp;~w8^aaGtvO@xt@AIkxBh{kB1gfp40UiYlrUb3I5Eami0(owbSq-~&FWlPBg z%d>2A0>#{AFEB}n4SmfS+SMp@;%c-!4bGJ$IKeK2;>7zE#o3*!j{4%PJCDetm~#okBk2Y^ zhL^luzgl@RUK6cRed~xYjAZB_OZKYNX-x)YOt~h%=`xVHUuG`7L&#+^ z-`jQhChwb38N%*<1-#E)>A-ubWj&wJPIzWy+OZ8)zYK+}XT@)RGw!oPBk1DHW_E?B zTJlKKE3T%$^uAi2X}`7{_B~74zW|24Xb2X=DhnQnb1q_mzB)(&n{}<1$2^Es{X|ZQ=vNuSdb|q6JS;FTeTY_5faC{nud=fzo8^&Tq_k2ajbkwIq21A3a;A2o1Yt=)3Q}-q_ zrbpoyelsF{B@uS#M>G{@hQ!o<-G)sDmsRUxXUvjr{_{WO=4MjhQuW7DbJsWySX@_S zZU7j7t0Gfy7fL-fcn4T`vZR1?$bgK;RA6yS5O@}3cW|E*WOZO5C)6Eip@AG&ft~k( z3KxPYI8B2#awjM{cV$karako)doM_GW(8XkqJk`w|03|j61kTp4Z{q*Ct^j?YUx0P zKqyD=01ol=SQIg9YUF&W7KB=|f|TZ2E}|54U}Fk}S<}}p^;LDGfOV!?CruMMXqbj)MG3W|jY`mlZYVh3mUjdsQXwUWk%EW($A^EYcY7CzWOGV$SdhGN zh}e~gYes<65s3unh!<#hbY?B+HGye`j+#htK~;^Sm`q!hNTByRDQAbJh>9jDgMCCW zUx$+ZGA8g?ex0R~uSk=UFnfKXM8YO0FcxF6wu`*@Fv2)I!8nYtRwg51jL4Xb*C32b z_#{54VYgEu2BnH7B5PPkJ5(4m+V_pr03%)L|CLvlB}xZ|L3ENcXk<9xTI|@4xwTa= z2?qI?RVNsS_9$KmQdPudkafv!i{Uv(V{ZaEkOV-84TwqxhqWrGj%lwltLnuUZi48c#Ky#4nJpvLDw~x5y26{nUuw8oHmzF zKv<(KCJoc~qTMMb-Wgil*gZK~qS+&s*{O9jF_wHFFl>NO5JzN%c7{0wQt3CJb~%3o z>W2WSfBlG$N#>6Sd7ttoh;*o2hPYj4CU8X+pt$sxd+3+dBzs@bfQ(p%p_r$Y*Pw66 zr4WipU!fFL(UB42nVk2ap(v7&c2}}Mnwp>z0P~?PCyfyHBBsf4%~T^*Nkw&~n$}b< zIw=*e86tADMLL>mM^`;f$dxuX{|*^u5j|QCyr!!flRa|;YQe@%=;Vsz2b0i7oda`q zm_>CZ@snP-J%I*CG{~9NrjD4{If8m1OqNV?$*pWxrtMjGd)aMaDnj%Lckzawg!x-) zT7a|?nXN>pb^4`&N@Q>9e`j}cW<{?aS&5Vge~PECF!XSOCNG;gO{CBZ8%mBu>Y+~O zf+s3YDKvgp)Dt3VZDcqF&Ln@_JdWlq3hnDk^SS62l%Cu;jp7PVJ-x_e? z%9!!lc=s5n<|?iMX)1g-|BzQ(8-*Eoh?xXs3$F|LW@x4~HBh#*;I^Ltw{iOibIZ1M zYqxWYw|M)uaVxiGdwFC4xM(&DU*NWA7Pxb}w{ZIhdz-h1OSpaOxO^*ngFCm9E25Md zdM?SFG2uPWwhQSYs<0?*%~_i^a*H*In%$?WvXiNC1q)cTP@o1Uz$Oe!*mXe~4{7p) zjzzn^6QlIxYENknEMpOG38Lqtu~&J#n_!*ZCs94wwOgx~;A)WH>v&$nwK#>J%adC z!2Bz~0ZhOJOb!Q(|G){Xzy`d)4D7%U48i`}3>+$~((rpA25i1}beJfq2IjHn^0TE& zjaK;zqFa>?i<(9XR{@3$&xwgzS+OmlVhklb$1-ze8`0y z4vV}EjqJ#e49Sr!$&*aUjBLpboXCdkz=jM~A1E_9s(ZRuJI$(1oNC2dKnb-QP{&vc zyBC&D*Thx~{|g>vrFEnf9-6`|bCzti2*v7hB5Mu1DpY}N-S z)0=~rV5MCM%~c$O6Ysd3D3xJ%@+aSNqfY0F&(DG}xa9em~3vZy)ibQ&>4;?T_;0it9 z&`4m>6a56l_5@3S(H%{!A5EPhZ42e+n3TzpaZ7?4)!8N;$CaQE;IE23Q#Ku=@@$1jYOuu=I0Q*eGY-`ZX z5DfPl{|wFW2U+da@;d|$&7Ij}2F{yxol6X-oCB1n&db!z80bT2iml2b&fbd7;k&Nl zdWUw0#@nVheYvLatj1*OUK3Q$fn2x)APH|9zv0l;8DQ0c%*U}o)dSGa2td#QFt?2D zEI!>XJYXSe#RH|?14Mw@8+`>_AO@}N+HL^buPxhSK-;zb26s>hx!ni5JqNd~+O`ec zu^rsJ%>x%DWJPTt6GwU`!qU}tFO!LSsw)b47AW{Jl1%qeon@AzY*A|93eC3=lN!B( z!*T;M!qodWaRz9>^5h?BO0RX4iYTAFj~wX5#-y z;v(+2@A}tg#jb36Q*XSN#PYs_%hm8p4tzY>nf*XmJpi38&;yXi2Vl{u-P^p4etlr6-EYEqb&Z6wQt1;)G<3 zHWL=|*(ztSE~#kOnh4TLM9oA*63jkop0D#$+4giB&;F10Z0PyLTPU@vj+4vm{cPs>`P=Q)W{}Su1 zn?M`i&gS6OTs{`gUf(@8{~_t09{V+j9~9v zj_>vUK5Agq<<+^?3N*?B7Ugj=jl4Q=@{oB$d_=(%Q zO>K2Z&~4o>h~4RO=jV78buL9L*x?^HFk>BKotm1YYUpp2)JgiH@?E=G=jgq_-||c9 zL7vB_e(J%1>6%^xs9x%mPU=-n^!U8$`0dAA&GcF=)~sNfO3fu_GV7l_|GQg)eby_e z5*XKjxy+BPzxwUt#_Z#We93Fi_HB>J5e)YbtnF`|mhH^OgzZz|KI3{P$4zg{LEiM~ z{^NkmYJYG)6eN=zw-uO_3-HEX$`$p z-^*N837ARCkLsmVgWvN{|C$ZfJg@4D+uz=>xBxK=K`La#4Dzal|4ZRQh7DmoYRj3s0FzP!!}$oPXyoqV$Uq-8Df(|LS!5|MZO@kt&wU)h1@>+RbaX zj$M9ptS-5dZx><4QHQ{4n2}}~WrQ&%7X;!$0D-z5j4%L!5CDK20BV>80FW}oFvMAE z004joH|$VC3`-m!7!y^jP{kJftKvUSZbUFXW30Kxnq62q@W4%8`%N?_h%Dns7r+SZ zi14~op}Y^KoGHRTxQg%-ECUE>7DvhoAk1)Ll#8qy##SoqYy>rl(Wt^ z=ZN!8JA84*8DIWXFi=?*p`ys&Ft}g>2O>Z;1rb_$v;!*1gUV5rOhO4#O=Dt-Q>k=n zfTsk0BC04-O*O!%s3NVDDp#j^)hes9dLTlLN+kY`i;DNr|% z^3z3YB@=*(ZVfXm4#Pq5K@v^2NaO)n{G&v2x-9V%F+mzjLW^=%!QT1cqh>)_cI@%T z9)&bA;Bk{w5=nTIBSBKB)6khryC(FeLULjpGfae=$#qPyiK=T)z3gO!PPFsH^G#Ly zggb7y?I2_Vm24y-cuaO&{ZlG15Mr^=`T4c62B{V;bLgyet1-~JT^=uE2i_SkU*o}`ZiW{d z=OqGp$!p$wo+QCSLJu|4o1VV_R=t0j0WICbWF|?tz3^$xYGDK;0Y-!(4#AH_U^~

    _DNC5m(w4UjQw(ast{WY27d~Rf&>RS)qoG0yPUBkzS8&02Iq7K| z{F?^@V6w7=q=Z}p-`FysLZXbRg^9@w+hW)zJ^3(*bUUXxWp}rA3Nf8~JCs!{l|@Pw z?ukysC#qEHhwA|HU$`0l>g^R1g|HJ|8fVQJ}!>tT-yIUIc|G>As&24Z$E8N?9HnTr`sI~BfIz9e^Sr|PhaA){Y;)0ZR z5+&!goMlpT0Tp|_Ybo&>8PkEq)Tj5FNJ9EoL8o zIzji2#+lHx)EB(sRGV1{Y~N!`VyC&_ZLlQ^UV$oVaHZn|L>8fzMeaL~i&-KUxwuDW zLA*u~TN^C#B&i*3KBqWkP__1o|D-1uw>aBD@Yc&nX2F-mT-`BCx1C@fZV#tBJG?Yk zve!ME9I5-!>5}=#|2I^fcALdra^a-IpaWlo!nirHn8X`YeV+6nSze$bG7(n$YL^VH zLYX~?9O@zqSKx=>5^)+#Q6=zL3!G_BQaC~+bdiK2yy~;wkCiN9H7-RBCQLRXE(Ms& zTo53HZuHt9z3%n&R*{Aqdk4HKW=eUBP;8N;DU;9E@F%S-fN#Q(%@4(PM$c?y#|+ zKD9Kp{8w9L`u|r+5-@5^V(+h>-sC@j`q!T`K~8A4)lBxWd)xoJHM>SZJ94`xcvB+4 zp)z%wJ7I~U2Gk-^c^J2ot-hnT!2`Vg>%R-^H!}OW!%Mtz`oB4QJi@CR$s--K$rj6d zxbsVbPbjL;Lmz8$HWU(ouu2pDx*F2c4%5>qepxi~`Yz>=z2Un!6PXL98M6P(ML z5&~p9w*M0-Sc$T@le+~px4N5{yn{f-p+LgR8x6ETQ{2GdQaFKQ1dFmZI|(}^+bmM_ zs8viB%0rA$$OX?>tN-dA99)anBSSf?L97|8Gub#n(+wU}FM{~NpMoz-kS10Ny6hu5 zuNjIzBbzJCJhZZ<2eYbaii-{TJX`Y`T@yV{stY5WwH9M46nR1BlZ4r;JtM3}S__Gr zbDxV7iW!VO&jAT55S$yU4L-aWKWqayD;B~)L`A%;#%sh!lt?o>1aT`ZMxn$?q?Aw; zs7y>i1)R1{bU=8!LC0?rVl^AcyslLm3RPgo&!r<3|MRfMxW!a*;}&ipKV$scEdnYm}vX%sR+vkpd{X zu(ZaXgEq2krFE1wv+|!0(Y2)e5Hf_Y4FNwFsk(j4m4lf#F2c!k_1H#;Jba8 zw^A%cIC(|LY{`{m$$B$EjH(mFbEBGk6V|-5n}ob;J3)oZ#XjJK4B$Q(v8JOGy$hj1 zT)8-&*#^6Ck5|~ZN5ZS=v`X{ZB_Jd$i4(r+6CZVax(u*Bxe&e~IH(<}zw?ZNuKyIv zXa%LJB~Hw7{w;G)F-|7F}38#cZ1pKoxIqZo5D{YFI^{FKha+wH2ORm_%RI|&?*{`+g8dRd2Fqjm;Owhk%f(88m9{+Gqa6&j3 z3Jlv^vl*h$8oJONjZEOuH;eoj3g}P~oeC@xEU4&A5^XIL6{rX_$qGQp6^$WUd{G=# zNZFjp+FVl{1y;z5Pzl|OZ-D?SIX;k>y>G znxYp?z&NNY74#trh_j*)7zO8|9n4@;pES$}I2A!NpHKocZF~#qvDd`13y~o{`>Z9F zQ@REVATB9Vfo+RJ3)3$VrCd9UqH?S^F%(Cw(?0wgB3M^T7nIC8~b#n7B3Jf4K8!vC-Us1v$hp+0O} zHvw3ID`<>s?N)ECDN>+>Z+)+*m0EcnG;%H4qCivULX5@WS>~e6$?yeeSTKWN5TVhL zvuzLsQrog++qPX0lEl0*;Anq{LJT|AwA z+}z|@hzi|&l~=^#B(KamU3v_qeOjq~S}8S-hT~mV_y(%IT3Iz-q~HN{J>4^Uosm)n zWf7>b!)RQvoX8q5lBf9u2M0DBNeV7(Tdy zLXfDQT)Z81-1}`^)&;!DJR-}jyMZKGxXlSpOcl_Kijoyz(JkHl-Oc4?NcmM=atc-b z#ZWrRSXJPKbxDXrFqfW^s=I;(I4B->p__8@3{mJT^F-QE(A~Sb-UUIB-(A`R>a_*S z1_m(>{|JyjF-*gR8{BEG52UE(WnSnF2Wrp;`xpo7JrD!ok?Z9VbXZ~sqT(rL;wP35 z6|f>hox)DazOCGxtxMTvVG@Qnk}am#iF8ET;( z>Y-6IqBaOl&IP*3A=*SE)crBV;{rmM0dyvWA~QTV_G%#eR5{LBke=pH#%BpK(6gi5 zS?y;nL)m{03PDy1j%@3KZdnygWQFGITwSi3W#mSd=!srv)l^73APM%&+ zJ`Y)N(j7)-ZK&R$_TJ1UTlTW)jIQNN?rb%}qn+ku=e24(dZA%f>ZEpRqlRJEc4+|# zklAKxjTRcHhBJ)4>P}tEB?yD|UE@W>RK_Ik&1%R%AuIIr?eTzJpT`?7=SV^8dCji=OEluHKf;25xBVR{n-;NHk)827|J1 z%^n6!uJ2!+A4ZnpF&-_`F{uJL|yC^v66mXl%V?=GjhT<3CK=k+bm?=S~;&Yt1vY-~~J zy06AXH(GPkwjnonc6Tm>IOk{!&+`eV^E#(+YoGJlws0FpWy|Jac|PFhK2ZYo}$WXCY04E zb`#{-7RQ8&Vhr#evS(M^5&xH9Taa@+ul8%FZJ-zWp$~T4&LKZZ6k|Nd}Te zcVZCfhtIPIr*v_b@p)fpeY<$3zW3z;;OhzHu@8>6Gx8#T7(njFAs@+uKXqE&Zic_n zI9bTk=4qS6dac*{2DWHKZ!`OFhjJi?Y+#3O0P~br`KFd-_84=?UisZUYsl7;Z8&xc zy;##u%uUVtXE*GP;`wP86rcb3Z72F*5Bk#&`sj3YGoPEK-`EgrAW%caXk`>gHvas#b5(tcheR zS0Y}0eAOX#EZMSVX~;;c)+--1mb{(&vqFUnL38g~$=jywphJS%4skKz1z`^wDj;qE zVKD>6jt>-wELp(h%9IThNQ`1L=fevWB!G+=GULTK3`>A8VBfxZ&jztr8+$g|+O}z* zz%6@s?T|}<1ON9Vd=&BFzx^=!m2CO1Q_e$XO(`8L_1kB8U$35hI(Md#zJu>0Uiipm z#;~2+2CbXVA~o>NI7SR5@2>e3)_P`MP7 zO+|(0U4<4(^<5tV5k!%PA4(LWh$E6H(MI;6h{`G*eS};nC&7qPS2JQrlo&SAC`L}v z7~~TiZ@}^{P z#z6|HZ~x3K_aJgnPD!PensPcLO-Lz~lqa_AvQ2ux%$K5lsceEus;uG%34#3uSgU`R zu$?!yF|Vl@2=s3T5pl;K8dNP+HKmyUQB2irU`<9Sp}JABK+?W z7>qyyo24}dXPYHh2s-b$;|NiTi0uK4zIaN zwEtfpH0{@_UVDtTP|F)L&NJytge~U6{f$rRrHyvoPvM=nw=G3Yuf2a~4K>U$k40v{ z0)Gj@m}TO+G?rB`+_0L0cC4}G8&_;GY8YqyS!EDswmHa!F4}|XG}k;V$_lC+9;M9* z2i>@Y*F3ZBHb<9tg}(voU?`^agv-xF)0bb-L&xeD(@itqywlr6C~uKi-^*0=eiI9I zEi8g9w)bOSi~R87i}K3ZSGTLx?BcS&@24e^I!s6B>utCFNZt>){(bWwc>PXpr7dvz z1Q?=_CN=4$aZFQK2P8K+jzREpn4_4THrF}N^(g`xgp&kDCoS_6FUsSxa z#OqaMe-*L{QAou-uYC`TTJ##($OpV1-7S4s8Dk7>wl^&BVt&vg0=%B+#^}wfe{!6o z^yHt>NFl;8w2CmBIm5H+CTQw0a(IYD{QP^AlDq9#;A)|t>n zuX~R52B$umfzpMc%$@F(6f7FL3>qxL57CV0N+BxGd9vi85s_Fv{3!8KWyFdc!6idw zy(mc1$(t3s*1d+1ZHrwDpAJ=&OP7$6i7Z@S+sG)c?al8iZw#a0;#kLDe*bfA{|hHM zZ{!t?Gz(XI+(2FYXheh&%z>a=Bq14D$U}N2pZeSwBKgFaNK#USsPl~pGr7A>9tWD# ztY!;EIm#!Aib1bS|T(kM^(>r^4Sf?WVdK`7%Ml1WEiT0+MK& zXCT>N22G`5)0}6%1~)S6^>BA#XxlB zmHT;foZ8E3IlJ0MYHHQ13cbWYKoKCwQ?g-Mr+I23-YZd;cHrNrk{}n&tnxZ*kt}L)xI4A!MSQlc>XLS`jkVqZ2j(!m1f~%mR)%{uT3g%h z0=M+oF+J3=kYoy@8pO@5J9?|kiRkzpMW*R|dPESsrC7xe{{Q4ww98)ZCYFiS6IV`T zL|*i^&pO~8b9%WD3TK$=8`@ZgDA45L`Pxzq{_Q7z<{!hj8p3 zA-hA#OJ3%$)j{NTTw@n+ZdP|uOl2uA*2?5~hOHglV8H>&+Fk}Tnaez-cAI;cXby+9 z(IJgC8!UW8Jdqp{XTkf%_0Eml?;tVrz{Fi{uaabJ$^Qsl=#(AWSBj2zmKpufcmIt% zDXjF)u!fAv#*)MMIS6#2YmAAA+M?E?>8YEW>X23zwWlN15qoXG8MI(cb9Uqg+Cb~D z3}OdD=QV$Z7ZK^2e5l;$$V{!_^pZb)r}Z{2Pob{dp#Bsb#Z9)7nJl7=6hj>AkhYIs ze!eMtJMCW)H@S}m@3&{j8S7X#yWwtkibHkY=BT%kUC|zGd$z_d*3$+dR(L+&bAo%; zbDs~-Fwh_-&ZtZQG3N7(xrL({pD=4FdjE&t1OE>-5Pl7cZ~y$`AOH7e&B7;U5sYR)ANo~o);6kd z{p({N8|;O~-;-+Wp#t~&-(RlP%Ma}HOFi1^Px{#}u6FlRhwa*ATg9(>Kk=UN?)on+ z-WlwRG*2Bm2%tI$piUu^D=dOr@tooL8wSvmJrxoIF5Z6~9GvWxE0EFT$p#4t)~{Ha z#$DK?sE`wN-hxFLl~9GGDM(}~niQShDkP1wHQ5YuQ4M+#tEkcZ;GPcpNDunp4+3Ej z3Ly`oOZWv<)wNs_j+oUY7qfv_h%p4W%?>5K-*uRe6yD5Nq1G6D3peH8{`Ft|iJ>$B z$w>g9si7fp4Ilw3;q2vtm&lW!{TT?%A^%+6RR%3yV!)LgveN@zAa=pu23DS-VcrKK znx!F!=3RvtnIPIZ+)=FH=m`t<2n8{)h{&a$X3Y{?H4`g&Vyr}8C_djPl42>Eq9}%9 z4q6{vOkXRy;ww&HrpX`{HDN3~% z^4}Oj!Aa~ynq{3%tzp&SVgc?#9pIc95S>P~*a9Nn5co$O(o>#Q;2!ef;du}S1|sDR zA|`pDgHW107R7>$U?-(iGk)7`q~MY`2V_;6av-7<;fy7s-Vo&hMs3BKXym zwp3RzqE0Go7io#pF`Cyh`Xn>z;}}AtHEf@!A=k4+7pkqCF3JLpu>w3r&N7r7Cm@8J zh2tDHpd;O(1*GGf{Gmhf3kNwr;ZKStPzI$Z1yeK>C8;UZvC*D%9UV0;A>aj>)2*Q~1RYxW8<)`Gi*Y3f zIi6nS1LohFW{_r6aJ&aBARMXeKcR zCH*<)Xc}l~B50QV-#K7I?XBd{kjTB=Qg9)WO${K_$sRSTVRNyW!9W618BIK)8Yc;^>#1TS(9)W`SyV=4FryDUrTsVrB!9W}2#&s(YU5 ziPUL2jGg*XDV193npI`sX(^W?j{^1Q3klS$39GP%>1B?o61fj}eP)_6t1~()PeSW4 zro@7_m)Mj~oXRPG7zFw)p@wd#`h9CPUZ^m%0f`Fgz?6muAu5}e5u-v}#69Xf-Xnp6 zX-!0eyk_bo?r7an$fx=!LwaYcKIyCSbw$322Nf- zUTwbaj28AJ+{uK$Cgi^YEU60YkPa+j9&AarEt7ttQLf?Efot8~D(ig@$5!mJ@v5vw zroaM`$XbUKTqWWWsvSa5S`}+%=47)MqdW!kRosUm}#A#8doEO>g_+}dsR`mMMI;H;9awe78hV(G_nEVg;9V!jy> zbp#tMt{IrD;rga;=77o`>l&e{qGsVY(XZv+Z2nFeOuXrcfZv0X8i@rkPQB*CCRc41 zFrDsf_#xmH4C-DXs)Qv=jdD^4HY)DED6n8HBR1UiT!$K+*v{qo_K~O}FWQo? zsV?uvI@3JKL&vUgUrK@r7i+-~URIuC`=W1{ z#&7Nw{VlL2dloV@**Z1|LphT zSnTmIl@6OEJLcG3vXJuUIXoW$ZON7tAMquh^EqGe{T}kEa`E*Zd$MOU%ncNSn-$OU z{nB!!?hB8m)A+(9hzPL%$|NKkLp6Br86WL0$6hZkFo=e(30xpB$#II(aR%3MygKU2 zfbHkGrPuy3HV3jGW2XtDFpwIuBde_oi7iud?aZ zIAjDOi$eJJmZ|0nE21JQmLe-=A2}%XDUzbk1O-4t?m*MBqwVqF>@Ib1*+Hj=6Skfw zW>F-E9fp!M>>cewD09#vl^5qZv+GzhVJXJQ|*!Q<$KTQGOkZNfSY-8Ke+Yt4dzXl*gp ztW}#}r-s>XQ}_|EA`R9UhJzh_UaNBt^Dn!K=w@ut;wl4Q^e;yM9A7tfe|KM7^9hA_ zjeIvsky!|;G~O8rdKa>MuXknJv^cCYd zmu-AClUwYX7HIjFpTUB+-4sTx7!ox8f~HzlA%#}}Tnr|!K16v2jsM@_;@_e5)A1rx z{%s@;@G{ZOrbN7wQtZi4Ed2dK!AMZ=^cw_5yBSY_y6FJCQ zdCjGFExBiMW$TfPntngI@@|%5-({I}by7al913E}|Gk0?|cn^BDD`Rdyc$u^GL7Mue z*LRP%x59D*WJfxum#+$Yd%@nKQDyjEih5Ch`w;hcw^w7l*Z{ z^uQ~+P!6)A(>tWcJGeJ_I{!E~7$&-_d}saTrK@dngSra8JHyXBU_v?y*ZY)LxxQ1u z&xNU8xRx3GTrvi{O{U$g1@u?2!58tKk++J%Yd9|pbF}E+9RYeR-Swb2d@kYh=nVzW+VjGvAB5dS-&nH;v)PJKhU* z;X}FeAU*-Q$M7@$qdUGHK>nC0@j3E=SiHefcv8JOeXV7F=9dYspXACNV0$KX+?GBt zJ0aNHIZatW?9ce@gYX~wEb&8-sg9itC>J*8QpF4BjxRE%KVnvH%FlM}{5lcrd zA3cKXGEz)QjvFIBJPEQFxn`CK3F0I3P)(bQ8r{qZiBiu>K7sxeI&>4MqDGHiBAS$G zQ(oM@eH#V~DO9PNc4pPO)M?kIO2LK=x^=8ruSs*IMYUE`TenELYQ4s8T{l^rzLqtM zmv3C2dT|DhsWZ^5b1%)5i85q|3lonoM3zutLjUE;J)q3Nq>=~?&zNO=)-hUiXw#=r zr&i74L~GWsUB@mVo3`wgLJ$A z(~U>Jo>u$yYu~GB6EBU#zJ2qa`BFHFV0-ti;K!G*>tKER^zr9MSnz)S_zhFK4J82y zG)bfb5hSU=03piAhX)~iqa|+~8H^^v1S1Qu4fi^$L!*YtCaS4YDbXryR{0A<4L{5) zISy5f5yrRpa&9;pT_mcTcIvuo#ur-zGQ$f$IcyI#*x1d9##Vq#vdJvNtig~F{I@JLsz0le_Me z;j>Q@%tKE-_TEc$KK+tuw7*C9;}6n=6ya>Z1u<37zyzt>G*e5lL}CdyCWOgCB3EQl z#vg_BO2lkPTm_g{QN)T>Aa7i?Rb7X?RhHv&?JG#Iz@ZDATy{-X*;8jK60s!1&_aqb zKj^@MC!>6VK?p(f(%UUp3s>B4YiqEN2IH9a2ruwB^AW9J&@J6lnS!&Z!GcA~#y8=- z(>X!??6c2b^AzTTLeo=p(fJ&0c+yEFg_t2RKdtyuPDRDo;!wG*#U)cwO;uH1mmSuW z5JybW#FT9%IoW(&E_q~O^=%HvoBwkxwnvj==GkQ;jm$%id8J*q1rn;=;M#0G1-IKT zp9YiMaj7QPz(Lv|S}}LmNK6UH%48QwJ;)9!Se%4og{yv-3#(4Ige&+?K=Z@^;X@UU z&pwAAzJ(UQ{nK0GO1Is(>BA9ETw_fa6kc;1muK5EB1 z8#M%}p-zoS(Vz4?AL zp}*;icyNj@HoSlT7gspuA^!n#6`I`XDAzgR?nso=NT|hKXG4#zuuDIYw!c;a#V|)xzL4fQB<Wyi4SP$JBERaDtFZ}%VE4M>p02~b^>Jrkq=*~* z&Xcf((XW2@S>yc}<;Kl8#8LuOTmS)>M*?xHd7Xj9$I8bnksQ zFDlWv&J~tB0RQ4GrPfh|igY(21h>sXbhD>ECF(dq8(PsCwU4Pdr$N&h)~k+XR!}wLRH>@k)mk;6xPqA@cajUQ z)^>p}dYw`|8B8I}ph-2cWeH&5)``MZmK3Gya}5w(i`o^hK}0JK5~{r_46m1G)ox%7 zTh_mB1F0#a;6pghdK0Mq^{**O`=F$PY^kZG zEnp&u45~yGwz56&Y#&TXn2MFDyNz&;Hbhpl`ckjO<Z7rM&)=rV*yCP)Wol7kdxk%hf{D-Ca0cC)jw zhmmInRjzWxeJ=5d0UhYOsu+6P1>2#Co6u&QW5$UdZuW9KJ)=Q-(%74qnK3P7_r+-_ zq*1+W%8*Pf=ws?jhj&}-2a9e&MI3u*0VmPsI&}RHShY&jU=;}!CdAw z$2rZ#9`-%k%+FNm)*U=rMlz3m)j8Aolf$z@hp)S5<@)*0f%f)u2TkbNh!O-XMzjqU z{r`uwz5u(9hH<5XAZaV2v}k@Qn-_h|((|Hst8!ZJB^prb`R1*@t7bKTT>WY))0*M5 z=A&tK&E;M{^4FakGv*X5@rhRswP=Q9vN3$RaWq>U%)s}?o4V+TCxYcP2E@yC8FM2r zp$IoX0S9nS0m|r{=Na(%&w)~~Nq$w?KSU-)nfB)Ow0Z$`kv59K6XLH~PR|$V%z3^H$TtWJE(5)Z7<&<_D<%O*H zP7Vo`TJbvCts^0^WqaFX2~rm2;89&p-gm4S+gZ)9{N*v9`Cx3G^Rw{$=RN-i(f^O0 z^rbg_4MvZ8)Th1%t^Y&oVIO$ld{~Ocqp7b(4ch!NG%=pIBg&oy^_W6yC9L`|?39tYS@Bk4o0Tplo8L$BzPyipW0h3?^ ziZA$n&jKs(_~hl$=56mXq?nRq$asuihAAu-w;7 z23zn2WiY6`4~EQzzwi!*Ag<-g&j_Q<2ccoX7|i`TX9%Cr3CV)*h_I<5$^MiOLtMdw zuAu}caaj_P-XhKBtj+^OAQhc%=U5Q}T(RdKAQok@+h)<*R51lG;1z387gzBX(J&2v zu?&R~1KE($I?&q0r?;9y0Z@?OB*pqD#qAQ#1^=)H0r48EF%U(94`s~;Ipq*XF&xh? z2vz1?0`HwlK@t^l9Yc}L;4thqsaN2SCp<9@=kdW-#0^QY6v;3b{r?eLfQJ_aGQ5xUA2K4bj(arl7%xpmI7m(wpcz$A52f+#;LZ=Tu^UH{8%c72 zbVtB)ko?54@6It+%#0k>@gC)I6WbB-f=o!x%%66q5-kxQ)2XcL%+9dQ#TpVIwJjjG zZ7G?OhYr#wREQyc=pTI%BCXOPC6WTQau{n2-oWdma*Gn@aQbj+JuYj05alEL@KV-N z8?iAEr|~4Q1XNUVFK2S%k}n=>@_;xEC%uR4@^3KTY#koY6%-RM zZE|w8rl>&?G~WLE@4c-?Aj>&NjzsF1zU(!SODM6Ebt_@2Kh}Be6Ii^Cz2=CjA105HH`9 z4La9`348Jh3$G;)>e`AD>r68|#Z!iQu@yJdGfQ&@;=qiAgG`R{JyR1k)P-lWh6Ee2Feg_VgR%~)Uqu>@>~p*K&^2k-7+MCGaEUEE^*KG)gg*O0_Qrh3HBtCPH0~OGT9v zy|n#sfiK4_Okcr(#xzCE0x%IP36GFEt5Z!0(~?e8>21i@-vn^T~C@>WgM9E2fGb^om00VOxRj(AgFpZfh8-Lw?bzd5CskXw65l~XOXQ#O@k!!JSvq+C@NXqO5Wlry$c^-KG8 zX8n~+rPFR7X{mbFQ#2d_w)yVtK8qGuk9KOu^D>o|M;8`Ye8*XnwrUObMJX1r zFjo4~;}-F!aSPaiuUBD>rcvooNt4ujJ=c517fZ$0e8(1j z&DV52c&NseanyHphu1_iGjH?2*8k$RFN0CgFAIx9@&aT zc$qZ@bqC~<<9G zmU~% zeHlG%`lhvwpA~|k!B;n@MoY%`e4V+HTkXoUl-@`>`D9k2<(8sLI)@)Rqvh3JiI-(X z0i-YKSZ)}pEt+jjnw?R)mJ1h;Y5Ja9T0Xbny!45eaT%xed7m*i7c?WkjJd8kg^?|_ zYRW}QLo%U;+BiWd_IU1^q6RXMDxT)KJQ^=Lq`LjV=tV6r3Q~P0^ z0D&Dhh-=!dReJ(h8$FIRHz66I_Xxl?3TniOr*r$S!Nm{YgCcmUq070tGg-Lb*M

    =}joAjDMiIDaBIXF!R&ui+tD0Fg5f~^;J=w7dyueXt-;g7+&lnUX zTjOxt8qfi=NB?@lIont1_`pkkpyZD}-I+x5pbB$b;Ox zsC#$eWt(3((G6T0n%qSB7Sa0~$8EjB7oEEs-O)#zv?~F~W;!Cxn~*otkWHNXIvveV zmdyY9Y&$)YC&tE4=zmeTeGlnWiR7umG=6vZ)d3gzTs;(Nechv6%5OcZf7e#@9M$Rl z)IHn6Xa8B)$2!Z2z1WlXdHuv|4lRk9Jt>`isGnKdqkW+@z0Eg$;io+%alzE11-rw` zZ*gH782cDj^)*) zIM7kP+$lWkmBI0xJez&}>}%fU^Bu#7z3m^pfz53_=~-EoUFa`;8^#>r%^c}#Tkn^? z5EY)`od@cp-oRBI&m}>VTb<_3o!4i+>yy0k^?VbA!R4nr?B^Km75(Nj-0k}v?(rFH zBL5xP8H3=JcmRmr?{9Ah(cKBn-?s#G<8kqS!j$30Xt-|@Mg*ED~f z4IlE4yTFEDA1nWo7*ngCe6!R3@z1&N+nLcl|MPcVa$lN^=)UfkJ@rj?^#fnjW24~- zI;fAT^&>v^CBY@qz{XoOKT9k=N%yAun|bGpcya+wCsD>6s3sxJI5=~v+%-;} zIdti|$ub$TXib?nkJ?n4v?5FHK#yb==aQ1OIX^SQ@Hj!lQ|rNYX1{)3B+3C25b*Qt7$L=LW33 zQKvfb>(9?0=6(Ol zmu2eELmPi|_gxg?N#zuJ@eQ*|dGEzHUw!UX=wX9GlDMd%j(!MfgbGqxDTNYZdSRxL za!TSMx%9M-PvaQl(SMyrI;pBBg6ArWFUCq^jI_c~B~ifQJzX@-mKwM)PmCl55muxl9Sc1tI8 zB<1NHihk+@XnKMY8WVi#9cqaqkFpx@z^JZDuz{8)oM4BWUh1jB5C1!Ca6yfPs$o#d zY_edg2bUOz$5UDLs*13JCF{wxwxzPIE60^9%Ol+QE3Y%xEG%4z87s_C2hptT2F+^5 zStOWva>~(2`$6u~!nwOGx7>oOX13OjTOGP^IiNLV5p=!v*C4dfuD4ISsjj?djQnv< zs~m&xG`N7GuYrcjhiJhQ3yinKjOGn;!KXs(ut$zD%r~bj9Uk%Ge_HI*sM2W6sfZ(X z{P*P{lPq${CZmk&=Pkc1^SUvYZu8Bk|E#)RLb|f^&W;_KdeEC`L(46=>+*~4m`qtU zmfATDweVz1ZT!^Pw!tje4M4!W0tYOxz!qZ<@6{}N(z^y7j{ltZHhYL98q#_5*=?WY zext8x`mHj~ci<7BvI{2-3+|uBg}cB0#;Q6#YET^EpZYLIz|3v#fG+Y}0#nAo(24GW zr%Rm#C%CK%8fFVaIu_5=B^cU0!x_6vh6s0e3Tjymc)uH-3ZsU+aLJ~^rg(>u*B3US|# zPmFq_qQ~}z$_l|xaj*oV{gBd;i_q_t9Mg}8+E@@)T;nGix<&xKC`DBo6M>yeW*p}T zx;m0>S9r{$uk^S_CZP@?u6tca)HTT5;jVW~Lt*e1NlvFV(ve!SQ*9;?y$86{lJLYN z56Q^~9c7`DCu$og@5U5fm=YVjSmN5olgd^Sv!GcCBf?^7xP#78p{&%KFE0i_HRiHj zyga5GxuQU3I`E?)b*3~&T1`G422{D2-%t+VtPk?86G{tbBGp;W;*~R!Fhpc_dSjV* z`t*j-18Vd%d7a4!VG8)%9wPLK#JG*pJpTk=G5-fT)xK5rp;*lnCF%#!t~!)NuJ~m- z&M3?TArzyY)Tr9v$kAoGm4nR`=`-g_($!JZu6tw)N_T;Qmf8-c)k>i_n`YA$-n33R zr4A(B6auVWvYn9qsZb{?g~n>@W=PE*L7D0{r;gHXpIU*aaiW4zS(aHMiU$|>;%A|vI@;T2w2JRVYe^8t)z$U~mtK8s ze)m<4a!^zl4kDO*b4y^|vQ?Quc&l$04FBBVMiZLGtt)YnOI!i8G)jZ5?m3}LUBlwk zx;YIm4hukH?wa_-@pQr<#>STMzTl`}>7*riu}@Jdl_vZOt&QW$W71AFaSUZ8eto>( zhpF`;(wHq!GPY3H7TCa0ZmX0Ntl;0e%)!FFV{s>3(k@fDFdpp8OhJq$4tIEkWDfCe zMvUS%cQ?E8v?h6Dtfx%S_olU}(M zk!}Bkn<1VG=s+u4QU^q|ed$}@um2W#*kgnQGaOCUVWMQxPlonx4_sv{YkJe1?(~;U z%55$$Db%UfFq+lbT$=iD)VTqgcGu1B61UscZ>~VMWt1mx)c9X{R_ZEpJt(E_ve&+L zufPd?ur3mN$f$*7`dS-s5M4qUq6+2~Z42;dOWV>IZ}7FVtnE*4+vDBl8o0$xW>S~h zg}ytpInik@$HM#FFlY4+k(=ozzyW*Y;7lmbH_%eDSI^`5cf$uhbb_~UMTs`JR+TPB zg&UjV6?X~5nO%;uHhEwdw>Y&kuJM|Co#WZ=ILN)tZ6`eo-5Xw{U{${I#WL2iB7V8t zB`X0B*nOIA;odgRk@KAco&S{l9%a(#dQ#>Gzdk9(fNX6FpaM{n|N$#lA7a=W>CqrADh`}HxW`@DB#b3D+R@1o83 zjd2lpNLAkGL=XPc1viw2E}r4ScRW*sk)cNe5^q_z!sRiKV6I=f>zns<=f}?d(2JS$ z;3~c8`;hYJzDxD1cRB21&wjb<@XI!uf+4)O_i#MJ)_+HjDUXo}-Tz+s00?{mlTgR! zN)d8PPM0c-mvmT{Tac$@;=nVJHht7*eQ}k2VHaWAw|$@IaU&OQFvMZyXMSzRcIwxD zD42q;25Z4GXYxmX^@o2zr+@$D!+-uqfdZI(0XTdxAxo7qamZGH6S#u~!4p(BdHfN5 z(zk?6NNE*lfnRrhV3&bZ$bsC~fy{DYqn36e*c#?Wf+v@Lmh}OZw0_=Jh5h*8LhRp^M3Sd3odeUxZw)g^v!XCUT3j4VesYDa#7^+P7;Lt1l&*4T+Fn2ne=fBR&E-ndz)s9xvD zis-13ct~FeD2ubGS{w-#=EyePaD>z#IT>}2EcuI5D2)B+kN*gIS}2f}NRVx3kR+!< z5zvr632!q8e^<6t={1KHDUsrsk%c&tt>}Ou2_YZ(j`L+?Ov#aDbdvH|4T{%BEtz@x z=#|*Vk5+hvWZ8j}Xp_pwjEy83XBL7A$&hf_WU7~8J%kRNID6P&PZ67%jwYPyX`K3boXDA+5XPKkDU;aR>5~8axsS>Tpv_5^1lpEqd7xa88d0S(N~141i~<^usr|XBHR`En3aX(xnxhIOrn;jA zxtagr;xuc@s&|^FuX+PTAd&Z`p|whi_O~8RYOG@Vt6X^&iw3Mk(T>A9sqWd4?@9^F zkgTO*5)}BX0xN}_TCmf4u+^HdqAE0;(54BAs_B%iWCN}ad8FTII_A1h?OLmWsvd%x ziX8i{yE?M)TANy`ulJ>!CA*(crfijJpJ^1SDjK8EdV$eOt!b*T3VW6gD;;pRt!O8w z-O>gUYNxHLuzkU#eyU92=&`s*sO~DVOF6Y6>y#&3tVK4GEW1%8Q4A+3R{j;U={c}9 zd$S0Ov&G1!5(}|G>n>YRw5?XL7JIQT_64)rr-)LuIp(zXx3N$OgptGORWD} zy0SA{bc<1+^5~d?>#SsJvx{i3XM3ZTTXr?svk%L*ErgRZWVC5mp>%n7q>Hh4i!W0e zD}c~8`^R^>O0uwzlTegxrxn^3qIjggkw{knTNxC}a%8k1A1+AG(eAz^MH!oHDyRmDCvs-c6W4i=mi+;GfQ-rwW z3!<&hJpM(&I5r>4Uh`$8upZ__${M*0Ai@g7vYl&`( z9j;MS6x*G}hyr#C!P&dL61=({T9FvMwNYEdw5y+mORqZ4Y?{@pq&9$|6WJa{@B?!E!#v!iLR@=B z?8HRuzT=CfN&LsRTd0ysz69}`z&VKTOT<=ewl=E8TMWZ7d{cKoG(DT3(W}6ijDn)O z#(J8utEs(lM3j6shejtA;}9$q}4DqFgtmT*{_g#VC`<*1XDq?0f$osq%~#nI zFk2C|yvSFa%hHOrqr1h`+RL2z%O)4hCFB`#N*q*T9T1Al)B9M;%*IMf$`UMVMTxqg zddDH6>&hM+AP7Ab@q2{-I$)Yw&cUd^x{S_T{CVu$&hFfqen2$BEF3|~ zT-M<{3#`u#yvz@LZ{;c@{oKPUQqZSd(|erCsyv$u?Wlb0${QTf;0(^iAkKPB&bN%w zjjYiL+tD+;yw5ArCG9&5>Z4)e0?~ud$-L2}>nmj#2i?R~ zn$7Ul(>{H4vg>eC986+sjz(?K!kf{K42f#U)aCNgb1DWG14cE)mhEStHaFA z+#=rNv16S~W=+i*SJX>AgQtdT9b6yL05^m}o4jn_%N%c$+wPXpMsQ3##T z3Wi-LZ!p+C>l@RE*rB`9om|IG6~{8dFK@%HOz_WqGTAm=*_NHw)W*bZt%`pc$d8-2 z$vwz&5ZYOZ3QJMZN4?#+tkmAErmD@_o>?^UELcTzNC@d0L>jIPtk~&{cXl?4owczG zwvC2Dt8s)+wtU=Y{m@d@*$TngZvEEXJl)i-2C+52;t-tKt=$)G+U6YLdmW(RE#BV3 z2uagI=WWSilF}-z+ah4Py#2Z`LVt1yvIu6}H?2U*ec=E8UAvm?+yI`_fr+fmC&i9S z*%02{-#y`&OR8V&NSVkRg$+(+rl7&G;kC_rSDnwB?A}{_y#Z}wbDVc|EYO~#-~8R* zFAmo&zRidxZHP(WU+&_Yu((0Z;E7z?JAT?ezO!B&T}^G_WHZ%Gtyq*i2KGFmPyXI@ z$=G>Y<>2`?_>D4JzU5qwag=S~YAxVm&fGJO(*ncj=6L4*3J$wqBs$*aSS;as4dq%L z>OC3f0!-(gc{OlnCZo|Uqv_-y4(dO$q$B>-F8IuZ&K|yKl#P3mwG$#kBOc+QrG&EEeY&eHM?H-R2HNO|kHPTb-TGc zsO;sw9^--u<_06_bx6%n(P;jf?8?sUZtm<_9eOJk?E`G;sP?0)e(i}Z>ai~C6Y0-j ztum58=!Kp#Dz3%{tlWE%jyG9@Uh;~F71MIEY?k$@H?+rzt!_DuGYU^;KLr>dVKNu zS?(tT9-soYvtv5g&wu$j+-^ayD@g9XDE_>ar4RKafBKHR`f}3x zu20epBeR;SToNd*yD^f8t*D;H%xiFZ}G!wLX^g+)VszANRnox_rR=&UX~` zzwD+D5F0)Wv>=e6K@15ME@XJ2p~Hg{AV!c_kmAIO7d38l(6Qoxef#D;8x*31Nt7v7 zu4LKr1dEq4d?3Nn#U>XhZgk3odD8zUkB$Zo3{60k!2+a5lQv-5RKn8=7#U_UR0`O! zXIZnpGF0To46rIz#1K+M60d34(yqlSX$q4ZVS|tzGB@ts9eIzGJs0eYlf8Nc4-RY> zZ(+lT>mFVVSjuB9khesZY{fEV#hEKp+3dKpXD?tvi=K>G4QVq~Mu#cgnziZIuw%zb zU0aTAHm7lab}hU2ZzY_4=iUumIPc-cH3whLxG`bf%(Rt5m#%tsZLUxgb|JSrUGQVS zi#I{O{CV@~L9A!r-aW$i@Z~?WNMEXVjv4h!-JhQ#$b3dB=_i*8G?2iUc*;o(1{Lfn z!5I`VN#OrlJNwg| zG{Kk-DQa8`6J4}XM?wEo*M_RlqO{VlGF?&AcVR?CUVM4wPE@LTJ=0%Zg={rofo-+& z*4ZvYI9G-jHrQc^ee)II9|-|-I&g*+(^!9*_4rL_LzZ@AJoCi%WRwpL$xm-v2KU=R z!L@)~n$1PmKMvQWio{7zgu|;8mZecrPN$KPMr742RWN>!o!IGrS6y1xhN(W7;DN7x zIO3{l?V67^y#Bh#rzv($7U?iHN6hSWgF5Mt4~jclk?F3sK~yPB zPgu9Gw`S5ytwI0eJoC)kK>HfCsq03VJgmfgZlNQ$n}p+&CmHX=^-eeOz5$m$a0D^5 z`S9m;PGj;CPdp|?8!pe6M9M86{^`xj_FVm}MGt-S&snDocG<^%|Ne;Upuhh1`@bD( z-7TYczGH?UYzH^uA+LA~R9*v@XEF%pt$7nf+<#WKt?6Yja5F0$_Of@Mb}=q1Zh3+6 zAmyU+neTk(OCS9Hhd&qAPIdl!UF&LiL;J}vhf2E5{#+LY7{>607h@ZCw4n@091v7W zLtp}(*r5k9&v{d%paY_(K|xLL1Bg;o7jHH}$#w5tBn+B*7FWJBoN$TkQ%nnAxIQ83 z@QpFVA^-n&NW&TSPk!#>BOYz2L^cX0fZfm-0UH6xCYq3e3S?y69Qa5nQc;pB6A1>f zcu6XB(2E|-8AQy&voYRFeC7cJ`HIFBHLh@CZp2+#;26j_#TGN73R(tqRy7{bSFK7Sf$|g5OndA*gX-c&bkoT+3YLQR# z{>cAJnEtSxGu5L$_vljArBsz&l7m1ANzh!JrlgAuX+s?eMQ29xq0($#H7oizY#L6R zDPtGWLJ2vnp3q&VwA4^dN-C5BRIC8(=StZbQ?%0brDnxt=x*xImj23ZucR0xDv<|H z7=w2iBbF>`N5?yROnss5Kjdh7^Ip}je1jz9xbN{H^o(;<%_Oh-H}d# zd8^OPGOqSyt7zK_*VC@mb1WU}VS2cUVHWatJ)9U(k9u3eD%GJ7-BX`R;Wx%wR8U*Y zCS<8~#(JqRq?hFsDo1Hf=&CQQn>B4~)jHbJs+OPFMeTLF`(5r*cc;(+gD7HKNMrw) zu51g%ZEkhDTi#03slgSlp^D2?Fp9{k;LHL@iwBB&#i*;A4Q!XDYu)OKH@s(^nsyxw zUe+!+yJdwif_XZbMik?xd}Uh!-N)Ych8U?MelKFH*e7!AfWGxj2}ZY>-P9K-@AnK0(vFhSM(DccUVui$COA*SmgoGqyqNUk7^|&ER#hjeTrn8#~T* z<}E;f;64 zTi)}gcfH~5ZhYrk-}~lwzy1AhfbW~f+IF^_L!0Mo>-pLHT=XpFHI&m-W5f9cz}a+~xU(dCYBobDP&(=dmUO zfeHT4f*U;H4W~BIiBrpi>-ge6uS;GB%3*L&95YEKccQIsVvY8R09*g(de^=Fbpe1~ z>;)h@*h_tOv{OLsYiE1gHSqQ%kYPl=_TKkY6nDJmUG4#93mIvQW;XYiq<VuX8@%CVoWvS0cjvJ_L9K^f^rI(z>4j2u)K4h&t9S8> zIY8-+j;Nzo@q3MJUqVBO&|aB!;?gJIW1$D1@o;ve65UnXApzK0tm1cE6L2|e!%KL|X%@C*O5lL@2QL%;M}KfW^> zU=Ssy`#oj(Ipb@#l*+%wJ3bWDzgO8mgNv@E`Is&F5^0#Yy(*#typaP$z^hZh1xzfF zc)$n@LJ7Pt3go6wy1+8Ky=7uQ90NcQj6eCKzY)AJ(qcj6i?AsSx8QR@_~XJOz^lsh zHdD#L>r22LJU7H~Kp$Li!jEDz+}kgE2`4fHF(`aODa^tXygwC8K`Dts z1@l5!YCcp5!>A*KEy+V2Ji{KOA~j^gAq2k(1hY5P6AMf(P^hd7tf~#PJHY!uJ~Tut zJjFg_zCT<=5^O~w)50z+#i!vmiAy5JD8omD#O$LadKmFLO0+~GEJ95@LK%Y>B^Ls{HI5%j|n#KJP*KUb_jXlx&6yd6V}#j~lw0g^hr$i?c@#a-mBNo>PPOh--1 zMDjC;?YThOJG|;UgO0jGI>b0qG(~+RL~2aMX3RE!yvBYUNP={@eAJg5RJw>8z=<;` z{18WSj7W1_nPR9&i@ZpT%t(#gNRI4CWB5pr3`u1e$z^~BlRQb3Oi7Xy$(2+|muyLx zEXkOhNt&!lo0Q3$%t@ToNuJzEpZv+53`(IKN}wdlqC85ZG)iVr%A{mUr+mt4m_ezW z%BF-$t7OTi#7eE)%8v{P03rDV1u_8p04x9i004giX#s}~0|Oo(A0Z?qB^nthDJm%o z2P-WtEff$jF)}kWHZ?XlH#$5xI5|2xJ3Bl+JUlx*Jw81yDm_0zJ~1vnKR-S|LO?-7 zK|w%4LPSDCMMFP3LqtbIet<)Nf<#0^L`6tNK|e)-gGNL{Mn^|RM@dIWOGko+NJ>jd zMnp+UOi6@@N<=|QdwokqLrY6aOH58nOixROiA*mkOhP_PNk>dhO-xQuOoxk3JvdHJ zP)>=APfABmP*P7(Q%_P`7U|CsMSz26LTV7g}mRgmUTauJp zmYG~vQ(RqNU0YjSUSVBdVO^M;USMNiTUTFTVqcn^U|(NgVPs%?d|_f`VVs^~TvTFX zWnyG#Vw;^~U|eIJpJSe(WMyY%W@=_mNM>JFW@KY#XK7}jqi1PrXQHKPV_<1#XK89} zX{4uWYi(<6Z)>HgY)V3GWnOG&V{C13Y^SSkZ*gyJYj3KpaB*{SY-n+Ec5-%la;~y- zYh-hAaCCKebg{K|ZDn?FY1 zaDRJwf_HI(golE@!-RWsgnf5}fPRF7gNA5ehKP!Wi;jqadWwvWip9x`iHMAYdyI~d zkC2j(hkcQfl#+{wl$4c}xVx2&h?a_cmY12AVON-tiI|z4o0E&1ouHnJb)KG|o~o^% zp`)OejiRNfrj&N3nTe>WtEp#NtEi`|tgWtza;~0(uBM@`ud}hSwX&jzvZk4`uBx-K zuC%3(w6(Xlx4F2nsJXhky1c%4YYL3HHA9VAz+A+(1Ou}SnsaiX?y z7%x_|crIbbjT$|E%t+EAN08S}mRuQfTg8PgU$UFolBUg$?sDoBDKe+bo9|#A4BB%i z&UE1r4&52FsZ*jOkHWh*u&ThTShH%~%C)Q4uVBN99ZR+>y|Yy9sjY{$?L2vKndmMF|K zC~oM;iY$_nnmT30DC3MY)@Wl{Nx4;5JUsR&6k-t>Bjkh=eaIRqARZavC4uPC$c2lv zp~jSwz_;CTkz|Qwbywzhg?wPr2ZonqmO18`^I@T8m{PFmW_oY(P(*x2^iZdM{K<*u zo-fqk+?*){3Z9>WW|t?QecJY2cC|Gio1(WF>KzJ_Qo85{Bt%-m2ba#400RgZ5UQvI zg!(9`5?~O3X(5K_WNVAy|8^>*%?VdrZ?@)YT^9=i*qv{@j%VJm)Dd^xpky$IYhpl}CW;?Bdy>=Vmh~Mrq%)0EN;$W+&u{+F%2|9S9 zi6o|IWQ!}ttJaMG2Q2WwH_m03jtKAQBVIph0}PT%x}qeLO(IfdlxR>%F_&9%yr7wA zhVf>|B&XTtdM1AvC(29M>2k|=0t!Q)dLnx0%{VVxGoZ9)3W1^upsMMhEm&Hq(VO}# z0@AUeIx4A72b}>4t6p_lhK5W$>Cdxamn*QueSIr=%|deQft);BY}jYlYV&Zyt&JYC zkbG;U%5{rNr?;c<{|z9791~b>gV@&V?vU~_xg?WBCjPE157vvXFbeW};fn-kuKDJi z=alg03Wq+IPy5O;F}HN2esLKYZ#>=W9)p~+?6l9$^6fC6jC<}f`>->iZwEhfp~MgG zHrJgt>Zs66AH8(*(0eNN(@@V^Th#+y4J)jS4(&A?V~Y)b_^^ZBvD$LC4J^%Wd+aRu zr&K%hnGN2}dfxBtJ-C0la2Kw)p%~DWlmo89i6}_K*ie8rL<|JF%WevU;JNO#FMg@3 z9{8{d=QhZ}4t@nMI|7{uM<*9_?BaA6Tbv~-mNksE4t6m7SnYI2CmQbVhB1>?@8I;Z z9Qv+@z!M((|Ad${oApe2sahVLwltI1$?BZyzi}#J#71$ z`=DjM_o44uUtAxj^7Jt?7D5#AI~jSf6~{Zg@jqk=oVk+IKL!DAOQ@Iw9{dP7$@Pyk z5W$ckh4dH;EpkAHjNA!LXF1mFA{-g?pe8rzL4zG^gd_|lUYG$fC_QNrDs8pZUV5EAs>^js)Ce+Q#WOZmRJFY%E*Y>bAvN&aoBtv*RAaq(}b2v4Dn) zUvvhk|0Q!7u#kvMi8L44NMpcaatJNt|0-D_3~G>*5RIsf7RHuOf>LxP)W&@jXUY`z zO_eS@pDSe;OI+>}ohaSmpHwS4q*8k0Ev^FWLw)gTiV9t@Ri5Px5ON|}FS!v8Tm+TZ zKn*F8bqO?~2z?;d7>dbEO7yOHC5t>1g3+_|^{+ijU`Jg;O%&K%jx5 z|LO0k<@_d8#r8|sS~YBN%qkz2qfgX&)ml#d+j}M`NqB0BkpB!978Q9wc{viU38LUZ z9l1ga0)rlQ&8v9FtCe;Xq(>FC=tV_Y%I>Pjqh?&)07pvF8LG^NEPbpE*mtSOVv4nE z#H?k3Ia#HpRE;-HUQMO8%+M~=HbHad^o%J1E4J`StUPUiySUUeg7K(G2=RDmJ4Nk; z^P_$nn_0DasH&nzw&|Q_YQYLmAPG6ep4E^Nf=Iha-6~ZNImy{w^5YmvjHx`7%DCPnMX{Xa zbdXYusq`{O70beUme03kK67S!3mi4C8KtY`WwbdQ=Qhh(m^-y|(kM(|JvVW*r2W~S zO)1Es!nx4d+-7WhGq=x1)oBz-dtO}gS%EQuS>YSCJ(!?_Y^ds$IK;lCtBId|K8=ZAxr3P z2eWN#j(Kca8YykJ_INW(a1wRSZCT|Rw5O_GxGRj6!aB*^c@7^^!KT|^U32DIrFR*_ zYEJ#Ud$(DwAD&*Q?|y%}#_>Tb)B;Tk0>YHXW@7Lw|g}g}%{PLBrk+VK;rn~9PbIou2 zXD<7>x0lyv&yJh0h;KsX8>QKFzgUbVhjvZ(7w@V2yWUMlW7ffrvOE6U>#*bf8zXo2 zd=@{SY441~<5keL!hOfGo;%$ycx$~|zhDeQ%2WU^N{ymW%IY@nYxqk9pB|R>7qQR9B&T8KwtSAYb-pZRrMS>-J%G z1b{!_TkjKgw>N=sgKwB7W9#92$6{#(pU`SxZ=L)<$hmh<{SJdY^YhY*tfOh=p1BOr^Djf)s{F*CYwZ zdJD*aCs!-4V@;=G9A~6kxFvOO2!~!mTxK&Ta~Mxl=RS$ZhkXcSLbfh0vUUuoc7+&& zhxmfV*CJ0=SB_YYV*x_GwuqG2YZ265ml$6|*au;kKxO!eT&RR&gO8=ydSZx*$TV#` zhi6T=XFc~{bry>P31OcSfJ!uOZop<=_$4-lO-a>&AGk5u#%^+h9>=&g{HBb{SX{aE zj8o!iI@W>pQEDGZjSEFqPX;e5m>Lu^laB+AvG$FW|3f*ajy?$%=$H(*fDKtD zO6*lZhWA|IXKcqNk6y5FMmUnSqmTPomHO9@0r_Ay$BSn(jIy_tP6%{7HH$pwkhO#< zY4VkLMutfxVt^zG`Qd~bSspX60>Kh>C;3~H7B?W7Y0E)b?__{q6^(Xi3uq&1Dya+B zSd)x-WHL!)afeVi34?;;UF7(an28nV^$~XGSLt53iy9nd2NA0g$22rVkwJ!Hgp$YmUUQEQ0H2{Sdry(Xmgnf zvsGJ`!hw%AEqd9P^CXq{2bj(R|@oYGl(MWc_I!0ePYLD zgT;0@n3;~aFzbK~omrHixiIQdniqCjxcQH( z1)K;;Vf+`I-?oqpS)8_LF<;_B=+t>$WO7P(k@s|)Q)dVBCmY*>TU~dN>ZE(_gkrI@ zows#+tplFxF>BN!apH8I<~gM87?S|C3^s`h6hv22W}iN35Imw34dj`hxod$XF##%| zs6?Qi=#-YwiLc~XfH@xv%An0=mB-?sl-G(@sGGkzkW52Gw8)AHxu(KNdL8-ze>R#C z`G6DIG&-}KNywKTH-$#^EEySmF}h;1|3#PDVuAD{hcfD1IffiNT55aaqvHvrXCZPn1reZ3UX^C04vTSgM zrtt@0ZR)0PDvZ8)kp4H8cuJh&_My56qK3L)C5mVsHXVWF8wX}-m3A9m$p+7&Y41j; zjT#DPnjesAo!lvb8~I~ZRi2z0svAY5MS6nICm_*Rs;TjDt^f?z(4?u_h+1ZM4igSk z`l=MQ4zW6Z3`?uEN`XE$kF6<+!d0fTDXbV3PP9vTRzjZsU3_3$7<7kK3Aie)_G*SgF^!xQv&gswt_C z26n!eKQu7Ibtuo25b z;Ao_!d#@wdQR8^EtIN8fS!+;nun6m)8-=?L>u+O%hwIwA2>QDXx~3G%gp+2xSV^}U zTer2DD4d17yZF4!N{iCFkOxq`5lKuOJe?7pxe3igLNZ8Z5V}rL5Gd!Xmq&!`ZB(Qgj>BCW6_UC0u`K zoS@MGCEm+E?5cX+|1+)~hLW-q6KftgymnhhaQsYhtNSNm!2?f)tB86zqN(tfqE6 z%*m^~aV*DCn3Z;`l^5DmcgCU5M7<^p%70wUk}{}d=$2WsB!_&cYj|MKnF$-5EIZt; zK)Yye!oFZ~MnOw0nf$K*lqH_5pPQ>X`i#FX*t7z7VyDKK{`*~4+{zL~7v|-v6`^rm zYn1yM%DJq|K@h=ZD@T$%!TPwy#tflsOs5^pyu}))a@xEqijW{Z(xnHjoEyTfILN&i zRfZg>#kk1i|Fn8CO0H#-xOoPE3*YgPb+!?b6__bil+P%>uL#6U*M0Nups5^(b> zj{jQF2CZvnab64;6hz6;UVK9D$faq!u5`G*z^tHmV!@{P(Iq{{_gB(Jj#J%AiN zB+b$$jmN}kVIC+2vkHN0$(Dgz#DFlfF$=z}*DO#dxR0FEI_<-Q-P7<~Eh&k~^IVu> z#fg)V&m)5dpbMl-4bV23%HCybRgJ|_3d^h-4;LZ34Q|&4{Zyg!d zO=i)N|7$-=pXZV5MO?B}x+2^B}Zz;c>+!&;x*$do4h^UxAZm-GZ z)bXv#?!e0S%|To2877q4I?>-&>fd@N z|7~nU-Ga)8&0Fdsr&MYS6y!p_94|@aFi4ZC%nYfUZ8 zdB#{yC5#0#_4ra8-P<9%)Rr0aT;A%>5$k9M!@}FfJ@kYjR_4c=>s;CE6-vi- z>gE)0TVX!&^QYWBy=i`(oE52P-i%y&K29fIMBJ)a&yLdvBE;nRA3F>(GOjT}|BdK3 zCOF}qa9LV|s8J>HI_Z4b1A4tmRm5){S-B#9T99e(SR7@Epr$%q;QL%`^E7R|I%&Q$jylHzJd31fHQ_I&MJSyFzWI~{PI7m&aqJFHx5BJ|6loxFV&A9 z`H?>&dzg)sZ}|pi#In+Wvu`XUeO1P>5B1P){(<;p>V z29+!%(vXV7B@7Efgi*r9kS7m+T+G2END+b!HP(9(j47BOW$SWwfY z&6PV!lE8^#=1K(#g5G?2(xgtGN0t6uk#wn3s3SaFpinh{KCJevplMUZt3s(j#X4nE z770c>gwmRY(C&CQ4&e^giZ7oAuH4-6^IpYhZBFH>I$vA4s6C4=jQes}-i58Qw5hmtVA$?uW#8WY zd-(C?&!=DC{(biE=Fv-sE}ZRp+=(Wf{k$Q@n0DfkXF&!VbWlME;gN8{3CE*txCBg|0k`b^Eg3KVL zx{zXuE~1R=h=HM+nyRQKT>^5*9eq^tNGqR&vMMgG%8JCU5b6=7v5;a*u(fo2%R@3w zLXxDk^74=`JaCMYw9hyq!!ghJ-0(8!RO90@%s$+6(9sBs|3t*tPD`W`&P?Pmv+R7c z4L3}Q>Fqa8F+7j9=m2$Y)KEEe1-oCC+wPZVmH{ukSIsLAJ$dlS=T`i3)pb{1du^}5 zbZ!H19c&={55WhMjZj(rDzvUcX#dR6&=ehI4Y-5CWHDP6x2+Myt$4)Iu5rt)=v<79 zqVZdvvMjeqcd4W+-ExeiV9 zP?Hv`_szQ?nX@oG>|~7NjuVq?u|C--8B$VDK~z-DLL9l}y|^81y43(_?X^jpvs74S zFuktR)#qQ9XK7sho3H+Rawu0)()#@$Su zT^i+ea&mOrEtuXV?|lkMCK3M8Va1~oo$}EKzT8W!Y-+-d96+6YnG z7q8f4jZyB)UWLuf#NP~~IW{oc!QBvvgcob%xYvXx9V5$R-IPg6k zn)#xMrf$6M#IqXKsW+W9>2_@WdVc!rf6v%qh7sq|3e#3+S+=$W1dbm26FgItaVceu)Zu3sCp90wnV|3RiS?r~JX98WZtubzB_aa{UW=}ZTL z%fax4R6(6RqO^g9u`qU1p&nfxSeWr$3RAlQMflA4EKp^SXerB?)zG({?}Vy3>w90;ps_Vv*-wvq z?3Ftzv<-CVMnTZ11+jYO#{dpckrFB(oemf`=uM9+e%RInEh)h~fva!an&c)`2*MF+ zP?WkGlMCS$!%~*gA^Rd25y2w48Me}tvlLwo5pc^C#)xySG|O3jn3mbCl1Nf1Vgh{w zJUl6^i9t-D@rL&hFuG=WR+Lj}_|}z1|8?<-ZJdtwdiG6C&FzfBu_F0mm9u3WsC&hz zhO63_9qrKZQ@%il{P=iJeAcHx*Pv%RZUacN?GJ5WgWmpX;Gf1XXhLV=BlO^adCQMeIt|pO-W#~qVN|=%k1hzClO;1R~ zT@q6;zWgN+)q+xxX6%WEYg`hmc^G8^&WX-!9HCHw8hAFZP>Bkj%qY_>-$@FAfFh0< zb<@3Zeu|vuG-sfWwyCG7GoDIwt723I zWlfO9Obv{EZdSD!8?Jiut6+7kSkbD+vKlQFNIPFNxRQcXEtvK^VG0j-Z=9^tgF#Em4kwzV zA(maEKYSBhD*C09mTgvCeCZa)bay>GlTMCnV?BNBr$JQ=ahK@e9|L)13PcN~m^I@9rP z5l_zqOeElr-Qmu0|K?D#E>i_`uKPo&vGt?ZOz`{t=%Wiwmmx{}9N0eUt=0``#C<~gw=oF;hj<$T5jGjzBi@oEyH~4d*58g+`oJI>47|~s^OBH zMv(kT%Fb!2Mu_mn`rTYEQjx0B{q(l0+hpc+T(8Eyj(e?aPNyDl<-KTU(~8a3DcdpG zGpBs6=yAxM2T~n+%_}tc+jIXSGR}@H1Nf9I_n{N|=&Nr!xy$WsO?p<{Q1{&MNZszB zp7Pzkg)O~d|LOItTRmX5q)Cg#?&4SykEfpQ7E5DH@ZNH`m`UzP+)pOmq^|vQP?e_M zJ<~h}Un}F(1H5+siO@&hy2{>%>$IGv-o?++@S8Xed}CgD{NJyy1FgA!1?!Dy>#xwm zHpRL#I>Q@3TQb%IqRtw($kMFVi@gf#n<1enkB~Rvqq>LTy`S*C-vd4n6d2-*t>Qbr zu>&0jlpq}|yHVPmp;D&HaXZvwB0gidW`aAr6B#VZnP>sOx@j3DQ$I@(JSa0DAzVLS z>xGHSDj~as^hvA7gS^Ni270)^{JX-e*gt3s2UkFw|Jy5r5)IDnIqFdChNW)^g-%+svz7rBIG9{M8c6z3pqpr(-)i zoW?(N6F($7O|v&Z489t2AtpJ*+QP=W%N>RprcKL1>4UyVL^nR6#MARB%?ProinWa! zGD{S{^2>}+beh5=#UrF9NkEQN^o>U$wslG;SbV~4gCAO~#mdtwXgix-0KjTPhg`J$l>TRSM0J@Y`8EG&qwc}TU(`r4%%Y+Tx&bprfwLq9n=Cer zIy&4iz+}k}q{f)!!z@V>nykq}Je`|N%n~%j6C9Y7T*P*BCSD@SOeo1xvq1|Z4X4z; z+ets{$u0%+1FJN@BFsmwR6o-!x$vl?RPmX$O30@9vifUCwPdz%z(_;d28jet|1M+& zxtvQxI!=yU#fw``YC$8c;>$^l&c9?tk-|Nhyw2*pL+dn5pxlTZLamcpOvY@?ZcIe* z{6@))x9%J`8Y?(>2}-F`N@fzxRjV-+xx{5cvg!dy21-2D6ven9OTVMvd(#_qRud?YyZP1#*8cuOAPP(j1u)NDoF)(g}4XP0hk(5qJls)d8NpsY_ z71d4_&Bk`S#=}(6k}6O0%*ph`iJN*)mISUs& zv;?aO$r25}B$Y$#Y!?8%R05w1{};sTesbE(L7a57|lIsjY&tNQEH`3A|TW+ z;k15psmWnf^z_!`gU{P*xKY#71-n#B?J*Fsro9o+vocLid{XoBxDGAVPV&fAq*8!1 zODxR}N#TtHRnS*auPUt7D@?P4q9?v$2GQf5Lh{vL9aUkC(}{h~|JO@FPm0z>Th_vS zQD%iqKuuHR1yKg z(A?A}<-S6x&%m44OzB8F%P@tQE~;!zV=B3K#hE9}hAw@hf;HIuYuGcx2I8zph&@g} z3Z!Qt$;N`yV)e^~)3EH^(-v(}MD%%f})+5W3O*__x09?U666C9;!<|`04bS9S+?<7=8>7_T3SSk%9mAj~p_|-l zvY0z?wJePu@H(oaO%I11#?@)sG02*rY0cM^)ouDU6Jal77|UTx25$h<*PS^E6{B1{ zHZm>X&tp@u)m`GK*xrrW=IqOaiNonME=P3RL(2*0O({e~3hqVTZdKu2LQIr>;W44! zW3^t6HIi@TrKad!9sVux#aSRjI7(!-%A~l+3I)F;Vu5Q2|D4|?hSCm>B&YG60fpKT zWx}cDl8N|p%@eUydWFyJ`q;xER(075{$zyHv@U_sbd zPE*etKwjpr`tn*eo(yEvVCmB2x~aZk4&Fa=x{k${I@Ulu))(7yDQRZjCizJ}gkBi7 z5{xC}jxgkxMIo_^Egj3z9bM$@Mcm9pJ!B%+?_DPB%Vc@}J{g2p0VZb7-LB6yRXQl; zt*XaYiyQEY-_hlZS56wKL5A;WU0R)G`VwQ$OEZ9ytN%f|uU-bMU;X7^6=r_6M~_Bd z4wgqaX2u$6W)aRQ9<^Z>eyKbbt(IW4xz%Qwb}eY~W@&^s8x7~D$llXZK}1gHR&r!M zu34R}KpAxB$93F0+2nVeXWeQpR!c>KZeq_huVl_F)a0&H{<&N015v=uSe@!h@q?3N zt5;lTDx?RsUTe1YVu!XLTi#{O+Xjk$n_)R)k0R z-G#k&EUgCUG%^LfvjtN@hOibn)Fo@SiYK(5h5zI5?Jd=x44u5UhHHl9yuYIB`{EzG z&TC?Q?vO5sksjXVEo{TSEe}*_&2DVMZEQvKY;TS&o*se7KD(EtY`K_#?7o0)<-^F_ z?$MN_@lB$~?b(mH(@e$GiYwn$TS2{G;wH)$0=}#=aiyBR%F(=1dt*Ru;AHX~pK2 zP70k==eo`2ikNZljyts#UY4LO0=TK1o|wu;Z=6hWC0CM`)#j|rY#x5p&aQ8p)h&@8 z5v254Pktgzz*Lp-Z+oU+P>u$0vg!^#@c-Z_aEZ%D-;F#1ekW*w@T^vC)lO*d5SlYO zHn~!dhTib|xaEQJS_FAEFC+&vzUVg9hFM#^M=DVpc;bkd?!nAM;+666^v-Io?o2mq zbY@gQ{`7x2YULYsAno)aA99<_RD9Ou?fGs?^EWB9&aQD+kd+stt*$}A7<{WfCu zt><|JJV^N_?lE&wb{%n|M>a2L-*IqNJ#aaP!a8@nH_h`d^t?Xz^ZLL=0PMd(|0Z0- zUA*3gMHi=qIw0V6)DNb^7`Jq1cK7piWIYCINapdG=4MjQca+k%Q@_&&Haj@}zF~NHo%*wOons zjGQg9D|f0;Ht^qO_5_zER$Rz8f^)i$xNkaORqo$eb$QEUOPzyDo_PmCH~jy(`9sHf z6K7+-mZ;p)8>U=$Scm3k&T*Z_w>;h`nK^8b5l`4|P!o2r>fU4738}Adw+77NYR*p~MIY3ldNyVDTaX0~s5D(9n@W0DU6u zu_?a)O@|UQJhTzyC(NV_XEJE&)WOq; zBu?1uNe~jDlPVeALF$uGCQX&Tcm*43tU|JdC}9=bR#QPvrcPyw+lubbwPE+(m6Qxx zvR$?Q`nrUtOc+0g0{;P0yz17bL}y3wrQ3HdX1ZKAbJhGdCVo@7_HsNs;8wcU#H+{kL%HcTL#e|9oc@(E}4*@o`{J zK$TS>LMaec!Gjh+D4|12DOJ=45l*<&4jpdzAq6gZ$l*{WLIuGJDXJJ^OB17g<$Z6;_sd>DQJ)dfjCgURzDJ=X}Nh28^H46h;SP ziXp<-p|g0>8D*JS_NZu@*_oex#J~~@X{G5Bnq#Z=VOlY<(PmpVrHWz+eXbbeTXL7n-?vcoFtO?;H*<}7D);xsLQRs~qC zRbFYUAP*2xh{3nuN@Zb&CUS&gh$GS{Zn!I|3*wj`fhnfBn%51ms6pKF4hQcl0jPY&~G&hiKJ~nhe~M3gbLegOpkVl z&`;-F%&4Vi!zs{@p%W|EVT&!?JFeC0^DMyLvgU>DGiB(X8 z7c&A9lc+=ol}lVptl`I^*s(0V03u%zUbWz~vS^v9cn7i%^9m8h%J}6^!ia{TrboTT z=%IR_${x`~W3*EtjgCFbl-2fjsnlF5X8>$Y)=DKk_&H}A^gAT}5~)ai$YYQ|6JOJ^ z^S`xC(rw`YO|N+KtGv~XlMTE~1ce4I#0N*Q) zKJ$rBf4^{_Z3JowR_uZvh+HH@$99ccEN!9B2o*7y<&D6^O;{Th3^9V`N1Z(nR}1tc zvqBlI@5Qc&I0+mDSLr&GaqEM&3!w-jw#y&xu7x9lTnsC@!_ue;fo`4pvM-OT`yXZ`spa`cg!; zq*rfGC{(A)(6{}h;qHRlO{qK;T}`#BN1{p+tWx#4(A5gSW_r}$akaL!vm#iLsW{$| z^@?2CSjx0$JeRFiEsa`UdYh-2x$Z(|&ypY+_o`n|3f5Uc>t3V?n<>LGMvhdYRbxrj zKFFYIp|_Yv{U}?UddLI962``2cT0DB%s&QLE6*;>KqPC?f;4Oh+q877N*CI zso}Ki-Nb!0x4Io6Z+&@8;Oeru4yysbjQclkR@Jx56)uZ%7`mrQHzq8x?seJF5Q+rV zi8AeKccZX`?v6FQI2QyM)4@C)8jcUidD?wx1J2u6^pG}R5@hyP7$fEj-`!(TI=BEl$KS# z^#js*5Gf{`*1(qeu!*g^Ca(m_MTUt}n)Syc>)mUES!A{4HjLF8?kA=R^*fd(B)gF`Two9nB}gv*DI?Xb6MNm$V*AC zt&`4EoikUr%!KaGfqo*PN3+<^M0U?xsB{!C{pr5f^3$gxdM_jSgWJ}VxU(MTt%p<2 z^0isI2UY6l?>C?FR(rhn({H#Fm^5dREiycK_L(?1=wF9A%i5Xm|g_`AE%NOv+sWso`sT!-9Rm%}y=ndb61e=)5 z+Re>e>J3%S<(e-oS@C6H?135Xz0d^^ozW#5(lL{msSdPBfwaZo?*U&|311a8;I4fc ztd*6UcpJDi-&x#99hF<(kXy#pm%6Q6el4N(z5h`=)mzfUT^^l-0RkKXCjXx(og&NtkG27YAs!+k2BgcyVj~{e3SnRh z;T-4*#Rr;TBktA>VU8mP9qoNYlU0`}vY?dx*uq?0E6(5*up&s-AjjyV1lE`=w%HGQ z8xT@POXk;`C1J`X$rnKo6Fy3Rl^q}WmGwmm`!vTt>Dy~4SVH;RUCjb9gd;Zk-61Vp z$C0B`ei&1tpGlPmJ)mK6SV!GqRQhzKiG3PU{tvo+R8_{~vTPb^k(!m*YCK{wgdZKV$q|jBQRZ*A0TvJ@S zB4w_kNRFiMArCl3WYsAjTi{|A)&B+aU7tIpo4VE0W&(&#-sENEBum-jJi(pWFrzaP zr66UYAQhT5E~R@|Siy~Ng$ElB<%VHlrH$6DxD8#+BZ?)9 zpJ0b}7NA;&BwJ=)54spY%2HmIXKLc0s*T`b)|!ThRTL2wLlkDmq+TSx97Q@N>5K%B zWF!Z+h?6x5=YZZ3RY7H9=4E0*NIu<4ZsvM=CTO3&3na2P+d4#|sjX#jurCr4b!Mg-_!4rqcR=z$ifp5AGJPEbj{B9|JU&dud5 z)`>1oD8E&xOoF6d)!Aw?3Z?YIeZ^ZSWM8Ga-5#}%H6RpjmQQ5tCXIfhJm5oA_GYOn zoIRMPsotnatOIezp?O?m{lH&?*%xdG4Rj3P9|`G1`4M#*DQY#40Mg^A8Ag*P;FB(( zDgEQBji*j|j#7E4w7RDV2I_kHShf0EL&_eFAto;kiJA@x4*dpHy{SfSWJWB6WRhul z@#&r#Xe<7p1EQz33jdc@g=U0i1)@p@x{W0;Gy?0CS9%^qqYe#!2_rK0B&CXmppB3B z%-^7CD&0ZHFbLehaT2KpU|FW0qH?490msJaA;XPo`6-7QI!E*{BWaw&#<4=nVp=+e z76Be9%NFS!1{Sh9=}Z09W@yeUI%@~n60{EOm3Ha08tr(6snY&P4sNSM(x*fYSDIeL zxaz05I_(UQVx7*V>Y3nV;_17FEh@Uzoz|r+&Sz(?%@hjM!CdNqJw%x~S z#?fKbq#l8aPr89%EiA;oM!wnR+`SLVs>7kNpQ&!+hU$zLbiR+5+kVsrDoRUx|(gd2EL@Dkr-R3Fueq=C$&rF3UgVw8?@FNx#>YPw$ zU_=ipou}QhrlV}AKWR#77^^bk>S;75`jW<`I*k+>WmsOL=sHlO_3OrNCu!jy7((Ua zPNl3Yme;tUCM+Z8?&@>;%8&l50o`w~@?lB+s2?5|o&usE7QwO_$mzPEC0Z$64kXda zuJ7`g?f&KM-mc5x?&{G_?|Lh*1uxYOFS!~oyQ)NFb|e{0+3spFc{F3 z*>20xD*vD6G)h`@Z=xzHz%nZN5?C+3(fb0fVco(M63sOJ0&5j+NU`dyJ{|)PPylb^ z9`>(^jV#Gp?pRW;tRB$%JjMdwYBWaDEOdu-QbXcNhx+Vq1gCN6_R2cCMb3JtvT~7> zq9)LWuz8B`2urIYr!Xa>>C!r6(~fB|l_}I>FO$izMTAM4+VDY^qEYFvmuRwJrjTV) zFA%#c$(iIUdmf;Mp!Oyonq`|fz1K*+Q}||P5fUT0L8{W=TQjO}fXzl%K7+_%iZjUX z+1!RQ*9JL+p*Zd;1!o81wTBsJ(gd?{;MF1JeXMYRF6IK%!`{aHk?~+akK+|a0p%*O z&j0f~his6VZk23s$z2eY8nMq#@*_X1Ko2xQLu4f%bYJ=~LgsEK8|IgZqD9tl2|}5e z{*q??rwKc6m_={A4)I+U@kbLVNOO}wBJB-QOqq2NyTzuhz>h1B!AKLVFrVGbq#R>i zab9Ijpge3A!*4PC71KzgZLou^GExA;%{CW{9ujhK1}8b|1IwbDie6<=%kk#2;ZH7D z0_*SOEfhKYF+D@DAm2>HZ38~LVj}w~Kacd4*=6qmG(i`1)FN~VqcFBIbVe&Q6E*a; zPV~@`tL~DrvgL5+IfV-YrV84rNQblxvWYCSHDt#QA&wsCVVw^e^5FCg;cT|wcK>f_PwaSU0zJUiDcY@)$`88vJq>JodfXA}NtHT*oyd!Z5JOwO!{f z+FtTuqh7aqG7C>_D6=VGdx&7OghCMZN}fnIJw)#1ZV)5u@>y_X&oV#Oa@tN1E_(DD zp@uwFcY+g&XPd91@+4kfL7!kMMcwfcqBCf;Hd1Cdhd-8VS9QnQ_HH9f+aUEg5a&3s zLvIJ(%TBd#()Jw}6x2)u9-|RgGVuBc3v)Yn8pF-5C`ukjw`A#O0C%JeZbR3M-ly}S7poHO?EU34Zht=bCX-}uaecmH;y7uulL!l5HN zPFK|a5Rx9NWv#cyq6<_%iNYOCbEJd#ZJQP!V>*drx{k|+wiu zH<+TgUUw~i?x#e9vV4Bow&%!FE%xlqsk4R4w^!mz>f?5wd%2@KPmsIK*HR(YwCb9> zgMpUV>wBRopxNK!)gx%w=kZqyE zw($colnOYzD{%})cu4bk@n8NFW4>fdTOi;Dz!$ntp0@WF4QLPiGJg2oCw$%mGT+;u zZ%8~Cg6yAI65*Gu$<8KZe1^s&;iiNrb+Y>V14KA<1H~0Icn+aJh2$76+@>v+C1|cB zRlE~P4;@2@E?QwSClRAUl|qs+*@X%bl}kK+@$wNS%a?|L36r!E&8%HJc{XbK<4I7V zDTg8{derC+rAu2db^3HgQ>am;R-Fn0YgP?gxpo}_)c*s61Y#*XAd6tE0<|S-d|2=n z0^AEIL{QaQR)X5JUe6lvixzNTy94wUwnrrJ-^7diPB`0j?NU3FDW{wpm&6Ghn|%bq zyf?**ww&db-V6F9Bubn}c0|(#79hm5&K8*CCp@S%m)apysA8kA7NXNs{<7rGjbsCKngVPOSM zjgMvWIJK790(Df{H0CuB=i&|)Sd51)mT+Y2-nFInW*HckX&pyz+TS9#H``wlLHpZ? z#6{+p&dF5=-E`xf(A|Vc9H`v|={4^bA?K+FcG&r)7vP6XqzCuhbw`JlReT3C^Ty$m zag$enPg%+1OCFPXG=vee-j*Q3WqJj9nOT; z+60a+K`m`PPBrWq$>X*q9M(1NLj6h4xK4zwdE_f~0Xqo10tS(bNdg$}fJZ#cbuKS5 z2uA|p;_bYsqB4>LT)WU*4>u<~aserZp7@cCYJ{)Ru`7yDoMXRMv4}8jWI9j^q{p(i zJ=_=)OFvQM$YixhN20HMZvT2&`Y8FnI$82f7~Gkkd}e`8uFrjV$=}cfh?Y;+!Wf{y+{2N8s%rD;XH;m-z(wbn={)IIx3^79 zIrXKpiXJtqNmi9&%B-f1yIW!j_wh}7j0cWPO)RAs!`OItPp zs$6C1ZZZ=F&0O_Uz}%`*L({!vdPJ<)j177R$1(7nM6dj8Yp{xU&9}A}JK3~XdZmMr z{kAr*UpUWa`~NyQ_YCoeoKrAo2`da}1k{O(HIivh*I3jYiF!LM(H0?$kB^e*vX~_h zMcA|8&T(#9p9Kcsz(raI>#?J#O>Kj0vLDv=O^~qNE^T=XzVVrFWyI-ecEu)BP!{mF zz#Xm!p|Z)F+DXaBEn9J=Y~1&WWq_I_TXUWJTuDV`FDRwVlzwd9o|!GS-TkhUG5CYC zYOTs~o|}5FGgdkei_G9uAZ(^L-{-6~$AqkoHud`*O{}z7hPGDX97m0146mHrL@;OD z$l!KfqlxmYT@_9D&k9568aCdc?&A46c|_K-9S&WRO01p}Q;SBR{oBHTw2XEE0~8dF z@f}H*x&K)C4RCgrA#Zw&CF`OsrYs{=oRcgk@J4ybygV|SXHaeYIeW@Hb-@Kcd0d)7 zH!WG_sZmua?&vCw0dYbrb!$6Jx0L3(5A+zb=^a)$T!yJyCU1FNi%hqcb(`Oj3|udv z(KpuG5(!LD^rbT`Aa~f2*9I|Y zs!wFaiyk(i9Ret+MO>I<>?fnHc4vxhRJ~NZn8wo6^{zW=jMd)QzQP7uv5gJQAOjid z5TtUG|K!RjABxFf#=4?vil?iOSG(U8`~`RIWTGpw zYySp@jZ`z(#j?TIdS@;_j`p$%@5x*#ESvupG&4{51gp`ouKH=AMXosLio@04mPaX} z(L1@5Xq-8CE$}%LEb=5~gXA$+D9RJE(Ur$e)rL7aIZCejn%Dg1E`nC2d0sJup$Ckl z9qo*XpYmiJ9d4l&;nLHL?Cjchl0=sC*9Si+eIsx%*~Y$a(f8~!hwAQpC*>;L7XU7^ z-OJ$CAKcl!K&-;q$74S{OZQH1%m5{vg~*h;W!z z1jYNLVhYX9?Wz#|#P6n%YEjM){n8FkzNY<#1N`2PG~n;L>JI=@C_LB?HvA8&#)t15 zuh51t&=}9X?g|fUC2`s+t(flV@=vbntT!&t0#8Bz;DDQ$VU7HX15M9!ASwhy#~#ul zUku6xYpx#DK|(xC8#DyO0_62z2j*Z9A6#trI*=A(4PtoETSkl4QmzM83yY9$nuxFn zXDjM9K(^pZdvxmmF6qmJQQxKw3$5)6vF#YSt=X&(xwx?Wys*pE50rjs-T&P03Cj!4 z@&X19uQO()s;mmhq)i(E51L*hGYYQ^#}WU62vfR|ZSas0tdS7*@ab+5+5S);<4&Dg zfrL0uz}6`d|I3dW$`L_M6;BL96f7G^(Trve1z)ff3u8k(QHxGTc3KgjTqk#6Fw|yb z26rwNSkXpY@ppP87H3f$t-F8YSGIks4RPmBw(9j!iVeuB+g29i0#>%u#OC@d%l5-yW|3HRT&; zNYMuJEd34f0E{i`5cA$r%nY&fE)WWqMTi`*Jj_YrJP;BW%s)6R6aSqkFcB)%3hMRv zh(}BS*T@(lKrAMt<)=a*qdzZY6;S*#3+GaS;#&t}fs!9QQGOmW&vA@+VO< zHC2=QjFB~mtSHCv3%ighr_q1BE-Ixm4Zksev{5JRPWpn14)5(8=k1fSQXMl6&vYs@ zrep%!(W&4@d#>mKs}nBYvXBl?M-)Bi zIn_~^pis-|MEkOCeKPbJUsE>7Jfs{f`6E32|+Boq!!Nt1>% zG-dKQoi6;I2`<5MD}!hQerCx~cT9gk74ZjR5`7#E30_@4xi$zpO>) z9uqzLBgDLj<|fOqOwr^{jRn~u6zlWg9PD;LQB1+KG0(FG2`Wb%!53O&UQ!Y>eK1ZV zVgQ9O?}$l49W*9B=#`?c`?S&-hjBGQR8fVp$aYef7Bv~I%q)&F{feqYO%zUYGew!h z-8z*;H|PvuG&rRVP`DCQy^U*lG=bVHD04E|;r9tD% zS4!5Fa#dH~qtAlSWy2F#yK^rKEF{sBS-Er|D6u}lG)&FK1gjNjt5q?zwdVFE6-y7+ z7=%C6$ieah7`7ov1{6=xHHvIO;ws84ajZ|N3Lc5nIOo-2Nz*qC)ls7`QJX3Yv5-*< zwF)6sQn73{Ap>D;^C^!i%r^BVMP^^^XPAmGC;x?XENi8wHn!}PGvKPRYC<+v?SS#> z&T(ti%pTX_R+f1N5mmNxW(N{x>rC{_b3L5FTWwYaAw*|Ehi7~C!h#lF$aHt^vuL{& zX&VheD9RP+lyP2+b`)f4skUmbmH-b_0Tu91z1AggDQxqWUkz^;h4XA(mCEuJdQo$J z?)7bP%0!p4FsJ}-F*OVs7H{*>T{(#xB{rFO)Cqs|sjO=>3pa5&4{?Vy&uSEX4{h#{ zl!bWAZ;BJY+Ur^N>W?hRx7YKmFV_Fz4v>=cSS3-2)%1mDO78e6J>98riKH*_V4-H_k?g|e!o_I z@7G1Iq+Pjlay?FY7VTxBw4^pS6Bu_s4j6%vb#%8>_7?bbJ;b1{b%H6_J}vlyYsAF( zQ*qLW);johZI^dJScH)dH>mR>RQA1wcX0*P7m>Gm3s>r37#WrK%=W~tz{`dUua*E- zdKEQ!uXk>V_zI2qh|Mf4mlzz?kCkBcRKHk)~cxTlVpx6|Hi3f5}!H>IjhsecX z@`%$yc%QDvi1;Q5-RlTd*mw=lCWpcdFIi(>n0oQqYdV=jd-#U4&1M*Nhe6qeD;a~x zZ!Uybdr`TFqe?fgtCc|rN|d05c~X{fRFlh+mU8R!5^aikS^u)pcSALAe94QI!$}IU zN(rsi8V59GMrlS5PqNxQ9u2(b$=Fng(~89YpbG*|YY38b2v<6Lr_9@z^0HG6q!< z2=C?hz?F^~=Yy@(#8A*j~xDd5!a*+qy&d`B5>Mc@dhf zO}RGp8p@=mu_O9O-R)yPCWi8jxur{%(b2F?8V%={nQF^NAKSW2wH(i;dcZeJ0JvCO zEfy+4k&FewDtB+zRjfGonLW1#8%&yI3^0|Nwm+n(V_S7+`@!`=UTnK11DO?`TKHbk z7AjL}fBUKjN|D*Bt^b-tEzLxpk2|@^nxCCmpUII<40GduzpVwd|jBjhlOU{*k;k$!H1ftVOyJZx58!bn+2RQn-%1+Q#;>@P6+l&T3cMWGmuZX<5i*34}K8dLFF)Gf)#kDSSNMfrRQ?C9|=nL-@r z`G92U%$Kv(>u|-<@zzHrr5*RbdtE!m{G@J3-GnH^%2>@w8^LFATs|-x@c45Y+?u6b z!h_b@MXVzc>UO>Lc)rDtJxh@Bg?52%#D{*|Cod1tUHMYH-BGsG$?$KR+ccXug5a9d zeVlEpdnomNVe6XePkG>#qTl&(Q2V+Gq3WHVL*W-5%dbW%q^iAb8KPOWNY7Gi$#>2Y z)Jjk~kpD1S%y%vU9kC@!da`euNuNAB+Wbm4CwE;QLoV^=qaEhh!Op1QLy-jm1VLjCKJAnXgiZ&4!aP#^6tI`F=_?YZmKBQ)+IkFm?6)>YIu`;A>)h3^mC?-j76 z=WvHqP<$wWmF{Ctt*hI?y{Y=W`BveqJ)G-NS>v^a0|Uz<~s# z89WFrA+u$k^dySJr;w&TYW$?R=uncxjT|=)c|qh094I78j_g9CgvygBcTgdNlI6>p z9{(^{xS0W`1PVG`BJl~qK?9*dhZ0?pvW3#5M;9nSz#yvB1P?^ioQgFoQl46$dR@wt zs?n}0N08M30&QBWYS*@{TGar7xlrlW{i=3=zPu;b9Rh3w1StEN)v*F`U%h;8*H}zVU^}b6jxIwHf?%!NJb+w z`R1L&1`Lp@XQwDje2f$@${TG8)V%p)&l^KoH+6lK_Fvp>(c(=JX7W6wPb@|6!h4L;~#gb+rEPXB=b zvV-A;01876gC5dT5HQ4y_)IbT$ui(BtDutJD7nmn;)nwgNQxw|h}aN^1?|YALOv?= zUPj@OS49{Wf!C2ouq9_!3Noz(QcCHJsrmsP(?NMC0I(GMVM4gW_6fl zZaKwfn`6=DrD$Tk`9Ya#a%GsEdfp|NoD}HA7ZoD(Nobmd&ES$~w^1osqeRqpSq;mb zK`Cd{JvOJGr{R{Hl{H|7Wo(oghuln?cG~5ToHizDV#^UHoOI1KzGVCDh%8=5U@|~HM@)^Y2qR36^!9jg!u~}Y5k$!7Ofzu>Zmf5llKWE0SGC zt)S3R`3Tjf=!7tWQs$FVH^3uoO9J83TxqR7EV&<$o%*;>E^G=y^N%PQo z_H38XcKY1cph5F#ru9W1_BHV#kG%vFkRi`8u5q`~gw)wb^?TJ-w|^!rO{!_665C>s zRGI=F32d!0)%%*@v;k7AZMtGxrT*qC@0jRr2yBl>l68kUxJ_8&!CN3~YxUxC1MUBv-o^Ghe zKAY9idxjC;(xyT_pq+1Oe3awSKo-A5%5P+^vEK=y<~IFls*7l>5+fxQH#8bBj2PQn zYG&gqtOdsg_>-U)g*3qmVz5_5Op>!a*%?ad?G#FS$q|#7!NRpLTmA8dhB8O6bGQRR zV9WGG>#UZ9HirS>&;#}t=SzJMWdhiA5q6jwJ zIf6)FTooCarZJD{#&>62Qu6MZC^yz_Ncz^5%NysV~`*R z36(;^#%O0_O~;bPv5`SAf{%nwmG&1=Wclxt^^_FJME~fJu5A*G^kmjUPL)7Vx~im^ zDP`KAX2Fi4Qf{2+DF$2VFu$4SAypz=!$eYvS>om+COjeu1II#MLQ@-5t;SR_1P{m^ z^M*Q1P%?k{%dBb@i12vFI^1!NYosuA_o-GAQSmKOO!0|yo$EKjnGjcuvuRmm$V-nZ zEAnuWr@K3&@I+b0mMWH@V?<8%(s(k4f~9*@BWDrngit>6(~d5XBxeQcm554Kk6#HS z3Lv`BHSIuAQtBNdFRMu9Md4OX!)Yl4Di$(~^nWMlZ2&XsnA%O|fl{(sOJVui%EYRs znwii2aynR_{&a(`R8Mt%_td#rr#DUg;9o_DFaL8Y1gtS+hYgF#)veY89^_qDdgWC^ z1i|ALmcS9>TA@K|rlN@qsc(M=#$VTc?wcwy*z2Sj!lT+~ozG#g)!a$k!}e{a$^GZD zE_^4i0e8c1oLTcaj2O$drk@IBEJ*3}*@S`?kAHFKX~Q>2%^H+3?t^V`Z#=)H%~owU zg=E%HMoEqRb_5rWq)OY@t3n*NorD0{d4`KM+nrKZ%W-g0SIIBx1{H}zP4IQTR=C}Y zG!(@J<14dF#AnEG&1)9xSk-%=vYvI$D@an>gnsL~ z2mp(yss|&*!P0$n2}U;m7`#Y5~xC9J?oM#jS2o8$3(J7iO9IK|7& z=!dPB+R3tdv@2FIiv{Ci)W*7K)pK8sEeh-$%a+HPH0d9W@&oR|l(8E&@~DP)z~~M( zWIHX}*_^vqDWf!{a7!hY>!#&I5X-t-wytJc)Z|{-lFY$TFpTo%!7J}l&1`1#FxNYe zvEtjT^QfV^^aUeh^qU&%Qsxh@!RJ2DIbAzc9O{N~FZ<|D$+A=tf2EmCNWU(22T5UP zT*2`JM`F=Tq;wlj=ZnqVasj{4OENE9W)EINlG~@FqRqGsWSWqJzBOB1={y2R^ z8_Qteg2pfry|Jn#?b2;sYn36uwEyUfanx4|YK#s$+Z|Odt(nU5CJPxPm$a;KMXi{0 zQhU2e{#b9oZd;pHo7)sDVJ?AH-7Xi^gBL{in0Z=M5Uyfk5~okKU`Ox7*PGyOPLME# z38I91qjdHfcvYAAA9GgD&k28KS`#vP|911c`$9P@F7Ala103Vxy{j)$7is^BJnt%W zSlA=Ns%JlYS~I6PoD$7?bV6#?A3&S5$iDM6j#DQnZnv{BtI*JjR%~Utq|QP7*rIEH z`+!7ku5}$6?W<<`H2!sjm5}q3DYCW{OaIsZzHG?WZ`((XwsF@k?P*U*+2VO}bf)ZV z9Q8LmRZV$!UCKsF>1J|dM*k64mS9YfR4xQm>ERtDf_Rr#RRf|8XC+_j6Fk`QwK@*aJ)3kzsz3nm|$ z=32Sud=sXG6vk}$lxk1NVLwM=N>@+-C4GiuPqfBjU&nn=S9OFG0N)3G`}8v7Cq3n- zfEDI-MX@Z76g62$KdJ#!yw!wlNOx#we?H}Q4D%aDb9-iXgPAl!9id%vlV7+bB!B~0 zNc47w2MO@!KY{0f5g1i?XA7+daalA~8b~_m;DI0LZg`ox7= zn0?1qg;bbX&|@69wS_XqYulKOb|Qu_wlmszhG8xdsu{hxPSb2kNF254Yq*$ScvlGR>Vk*!BS8i7yp640mN! z70HQwmTwG)k)c>;h$nF8uq~+AADhQ{D#%0i;(`LnO?Eemf@6BXP$M0egW&{V!B}#L zfQiERS2$ISTShx*fP_Jbj?73?Ec1-ShkemFjg-ZdGX{rXLNmL@jb-SC;kOlG7$~bX zhT=#SzGi+lmj927bdEd5b>Z=pSptcL<$jVCTy2Ag^(Y%7XLn_GDy64sfw)wJg&p69 zEui3+1Q}+Pc93)F8-LdWXP^nHfo2CeZx-iptiw2}vkmd(RiOxiuDEd`l1x=3k{bDV zkcXLOHAPBPfB-pqHfM7-C`5kAaVhkRp7WTo=v3F_kBcZzuvJP)7+ddiYMsS&*Ef8? zd3DYxjZ0~qmE~cQHHA<&l~qZN-)MDBN0nc39_<*G*ZEOnS(e-eh5h$n4g_0T(`_C_ zj|v%=xiFVuS(kh`iTzkYX_;gysZFdvpY=I7y0je>7ZHKUa-u*Pm!Oyf`GPIj9W*kL zOw?wLv;TqTMQ4}!c^2pmC5UG7YtQF!i&p3h<;DFT^mhIp;$c#}zI ztC*SBKo1pSIVN}^70QukrKy=|sQ5V__sFMf<$~Bmq9uA@HQAbK&@WBoZHqWPB{OQH z1pk)7MqCAiF{+@eyuhow>Z`k|481C>!%D2g$_>YgtjVgZ%gU_H>a5Rd4$vyC(Mqk; zYOU8wtug?eS9)~bV={onrAd=NTeqDt7o0o+W+Qc$XG&$pmK=Gge-?$42{;}~7^i%B zpGJgK0LhOO7=!ARh=pe?;Pr?8xvx~Pm;-8fl4g^=1WnokLko%^YQ+o&!jZ`!UXGKY z{4t@;v~c_uX9ij?c$$*5@;7W1aUWU>{+KT?nUe;#sv-h#{OM(gFk7n73EbFfGCHH? zh*%2~8^l!Me0hi>+6SwOOmR)w;FQs{gH7 zX>?NON7K0!qNR`NCa${2t{jz{WBR4=DR$jSN^Sd=Y}y#}`lfK&EVSt?d%CZLxt=$I z3bX@*v>0z^*01_%c9~G9f%+ZAlBfLvErg?Z5^0g(QbW+-L>4P~EBlyVRSn#Z=_69qnzb+o7U&%g2^^C61UMV3X5+KB zDyu~c08QJp%e%ZsTL3v*0KBlY&MUoB%e1_Zy#){fQtG`unp_pt7C%A0-#et=%cSP3 zzB#H4U;8sh#ywxil{B!WM`)|G3L4D@qdc~jLk71sNo08`ruQ9A%QOTFBy4AKj|&?^H`YrROjy$5i#)5k_3_M#G` zPPcOeTY$%LpvP~p$9(L^d5p(GU;=|o$RR+;0*q>Le2&GtedXJnS!!zS3nA)Aqd_sY zj65d?4Q#Zd`9t3lX6l`vkNNyXyOj$xG$nRz0f$MX=haAX5wNHT^zK| z%gx>FylD&ogTS=jJkHBoyu(Vyw&PH3UhN!9-OCqqh#8)6!yIW2UhjH#v z338Z@<%?m%Gr%K6Y@?smf)lkEjq0sH^{iF&Pf+pn^C# zXZjkSCp*lf7=qaF#E91mgJrlQTzLCOnsW-mxzt^8b3_AD!AtaKi&4O@yCcBQbl$n~j8>E|+ca z`x?_r*t|N#+oAP(8B-P_IG*o_Or5C=C+(-?usI04k}d%yWBzg;*{ z{DaCwiNEO$%39~%?AR%{Db?_M8wZTQ{4Koj=9o#GdiGhLZd3o@q?gR*HYtJDaSZ;~ zdW8s>>AD$AadVBYpX$qmOA)`jlC^t_pI224J+Y~tq74E-!zt=h6J41_$WMQq)>7Ikihk%opz?(7^1uD^g)Z}CKGS#(^DMveDp1IWUIXV1=~CWmlwRpAkkliA z>6)JD@Tl}i2h~lz)ce~%pC0SS$bUvcTZ>By5rhBl``dEk=CAt6b+nSub8tjWU_m&o z%*ilk#?ZUilnDsQp%}c&X%nI`Y!MC2vLBv-!l2C&E$xln39Q}X*v_=vz}gUB?cScV zvd!Yo8__rrtWO*8=icG7rJAr)_y4kSUSR7gB7>>qX;6>FrFOhPpY#Av?hzoh+PvE1 z+`Ww-?dS~qvR?rBEC?YF^0}}3yU+2x@B6sV=fgeb!;kYVZ~Qi&@-v_O%YXdE@BDY3 z2BYxkN-gw1SM=+u??_Mj+V9i$O;Ym>{yR(RrLHOg8D*?otMYh*8j}Hb;pMBu2QPl6MJP zy>W@~y^BG@g$f>C%%w{s#o?JD0X&8r`LQaVk}Y2bx&_xO&YeA%0v%fP=h3B2e?pyF zj+rb@j@-}zM7EIFwP)X^{n~YI+`D!E?j77VY~nGD7mpoGMm~Mb;gA_p!v>GTYY`a0 zo;|?>?%hGG03Sa43g~Q}6GG1?`iB1u?d!6K@BV%K`StA^ZouGs7dz=8`F{>E>82x4 zKm)5&jKBW=%cQ|X%JYjUJ{WP(!L}A`iVi7!psqs>-$JA+Jm8WmhzKh=FGUJlQOK51 zDE!OA7yFV9#u$m{L#Y)tQLjU?s#xNnr%ov*ms5^JQj;WEd~ug1{^QStDI!Y8Mmx^o1 zh@QEss;i%PTAg#lf4$;~fY1p#D`Q+ekdHM(~z{4diJC&c%Q zruO}J!w)@V>(4nT-3wrT7iQ5G7a!zQNlk4`cL;_#871GvAXef@C5vQo$ygc;*-3;9 z+DJ-f&RIttbkbpmoolX?Y8Hybc{!_~$OH*amGJba6EO93X{JGVi76(RYH|r_$0#c) zB%pBM<);DmJc<_-Av(B?EEk%nP@A)YMp3VBc@)Gm0$v+jge^S~Zn%N}MP|*lamRPvH4k5>Na9$_FE2J&W|!5Ra~0BXwmEyC zqxM<>uDcf7Y`>dMeXlT<7H8bl!tQ}@5$ z;_WI1J>BWfhvc&!_5w7wfzc5!5Q^jP*!G}2wvBlU(-*;dfEYXkLPyoxQIj&(B!{uD zM}(A^AE`jGjYZOX;VK_qB7wpkz0Z^Dd!ITq!Vy`N%^@1m#y1Fi&e~AJg+EEd2VrZROliY^(n^d04_YP zGb1>Gm&X4!si)^HytPn^z zgwK!ZBg7L1X)%*FDMl64Bqu@XzADl57B34&{+7|QE?tGES!u^A-9e6PY$IpeAluGP zH zb*=lSKjqV@Im)%51T`o^+m#oJR@8;3-DqlyXC4Z*mScb9VuOosDqlNtcCPnTsY)-qUnIaVuihPFfCbz)&ekhk zF%WEO47=cT+G?G!n%p=U3t0+hvBI8{>=5!<+0>CSvjE*r>|}>c(1w#L_)@|bQbD+1W5<(Z+(1#ubqOq!1)%kS+eFb8HL)8YvDVV_xCai-W+^b)8 zdcqdI@MA-5n+$K5!~68GptDsv5s$dUpEdCf6DnNik#wzi#&xcZ$7dbG%MORGno5wS zXrTq0&_)6Fn~}Y2a5Tf&S{-(wk!|QV1KU4IRx()9_el|fEh<+c20U^qWiCgz%H!qk z(?UWLn>4T)C0R*I9CH$LPc2SE0m@Inp>D_wr4#1xh@A2Y!H=xFO*k=06^KC$I%7i? z2IYW4sx*cr7>L^Ucsv6FBku8J3l>Y zQO}d+#VNIRPCczu`}cJd8)&Q@I&|CGhsds#D5W4iUGG-h=~5d+H#k1^i&Nd|Rlj=H zwJyFzrZ~t;ZnC$XJGOD;X>Y1Hnn@x+8KJ4fCEkrw)0$3_ImwA95n%UF+N`wb{x`2` zR`&t+z3vD0MDT1%T1_UdCzG&SCJo47PUSKATTIRFg(tY({Z8;<>j`O$CwAx)J^Dmz z^Wz{FImr`57bMaOotYL5>@^MZM<~Mfhp4^nJ#DO;gWC5s2mTdv?nUDFQ?q3ZF^wO| zMnN*a`RKZ4d1%c>T4LUSX;QpVCcf^rmt@#9QFi#nKZ;`zmflPfeiMy-x}|C zzcZda|M@GU+TuUt{Jr2KL(o|c(cuqMD?U^^KERnV^UyQ;xT{N zGl@oAgL5=TBV>y@TtdI|AGNuLk^PY{P_ctO!26Ypvjp}4`3 zkb%?T##ZshS2T$T8b^Bz0CPk~e{_R2lE)Tk$J2Yl)QduTtiqPuN7RTpeWbl7+Dm~1 zNZtF#fh@?%Gy#Mp$b(eKh2*(5ES-13NF=PtxFXHb6e2q$OgyYLiTs^sE7OMne2B*T#olW3040^UJ}Y~w@=^{& zEH*wA8acTI{E3HoxCd%X%Lu>@6>Q5mflE?J&~H2)#&bm+C`Y|q%qTJhI<$|#+(y)c zLUJ(z+L*#wS;Jh4bd!Rrz@2`E#*>N z@Spb*5wFv#-gyYM3X5eLA!9m}g@Xksqt0DOnXu9+VJI&f6@aNiw>{M|lJH7`1JrX1 zRLrw3EdxXXy}TWi69lM*$+L&Eqz8@!8h!sexCM0}9el+fM9}X<%1Ly@PxVK?v>_zG zP;P`rAWe|iSOZw$jDRdBPX*P-+)G%6Fj-yEPQyLlkT5ZP(HNak82we7t5MEmwVqow zYEjMd@XU7%G5Bzl4IPyy%rE6zKHaJ-AiRkVWYWM(v#%^c5{wfYc*J}h*9o=NFKx+I zrB!l8(O6-HW|~P^h!pB^16Ak~Cex9joQi#2hJKCH-na=BiLA|Q=T{@)-g141h`4v%qRM+B7u;k6z5iP>N zl~b1qhgQ<6_xWFA@E~gNS5gw#Q&e13{2z|!*KTNrUBF*+>;(xfT(ZfAeE0PnI;QC8p3{Kp|g|T>Sx+@%s7NJo(*|K^ z2BcVE5Vp>cV&HBwna2OM&aRMPm8@XFf&~_q;ehR6unJ)jZqq{fL4{_N#j2-)py`E)(QJo7R7R(~1$Q}^hp8ezGUEV-W<`$() zUhP$)h32BA-kn=kFHS_=>ZYXo5Hxlxf2pA~R%6iA-ZnOi87ME5fKNe{SRUrNOMX?K z&0oYs%=-o8oz2oxXeDDXN>ZA?fnDUU(vq5r$b>3(W0V^hIVZ~ylRYPhXQfvNPYl@-E-6(FJt%QQ= zkRyyWPQ<_ngSBZJspC<6^@3^YwWQG8V*Y8LhG!CW>DjC2x4!4i_$jS;h>&fzj(%i< zbzqN1Tvra|tdqK`bB1wvnaLw61db()*671t?210%O&%t{c5F9YMMSh zEH=I^#*S+KOo)^wAiA|}{u>zq3m4!fAP%H>)M`DfSGB+){*&r-3EUJS>l!OkK}jJZ zI%a#OZJ7U-=O%{hxb6&!a4tp8>qj2IkKR`S!YLIN>=j<&hJ#_v&TNhrX}<<=Q1);3 zKHR=0C0CjSf5vQ%4$2NbiaO8(mAv1@QCZU_ESU7+JWy@js%bwnp}wT$E3o9WCT}K2 zUipP$W*%zYF6!Rq?GBLUq>h$m85Tdfr9H^O+(rQ zgeVu1`tIj61RdaMSe@;iweZMEZ}eWt($FBg<~AphWCAzvP=4X5c_L+Miy{6pYu3xV7|s{1sB*5hI2RN25{&CRV@V+g~`(9S@R|=+v`ha^3KxJ znuY&qZ}AuZbQrhdoI6(4VQT*R6sn~r9nbDDUh-S3 zo9?x%tyNYFVooC`^65ZwC9g;oY2>$!a%tNUDx!c@X=@`SL$a-j&FiC@c*W0 z$ewmFU*$Kqb2<-jZRhq<#`7;9^G*KX%^^kA}N1oO+Rs+O>s^K_@j35fe&?2FZhEu!{646ED>BNT7QL~Rh?=Dc@iMmd(3xpsPMv={1tS8;(yc!HmMgRlF9N5iN^bsXm6o_qKa zvsQ^etBU+)=(eHfacx@Xtbk6Ydp(G`*mdd=m9k2n5CMi1omD3;aXx103O{#xW^1bN zP2a2qd*wgPRnARPU1*Q<04E!bhW(&7dTgKlZa;dZUtlZ|CjE}3iyox`7VVS+eRaoj z*)IOY@!-HtOZjh%$I&)f5d;6;!cy_u6P6cCLTb8Zbo0kPkfGF{DLsjKR+JGhu78OB!DQy$P=qWm<$?RqfpKtK?n~q zjALfv!ip3lPRgk9Q>l&~Ilc;+F(gTmj})Oi2y*4ek}h!#GX>5Zr$v!&R>CQ1=cZJo zR=EngYba4*!i*YAnrtbvrcQ||eaaLn)uUE@VJ(U@tJYgyzt#dPb}ZMjX3usV3v_MR ztyP&`(}r!E+?mgCvCPX+<;A`dEdqWMad2QUhWjeQN~@c>W{$hrYP%H_s4apCAI_5b zVd2bLJWtWQIZwpUPEIFDjav0;A+A{;F&$e65ZW0-| z7v2H6^5xEx6AvaMpT6o)i2R@cBKwH$4-kMSAl?A^@C?4A*RWl?g7Yb^-_hiAmeEv* zuzM(hMNOC--Z5k#fq*5DU<4wU*mQz-RxtR5gGN{oA$u-hM}~zLVyGWMp4>NxIouF3 z#~4)-UO$?|mqS3d^ip2E%v6(3 zJRvxrP-JNp61CN=%918qYyO0$RnsuG3|=a+ z1ksQOftDv`dhW=@XMO@I7>zwC^I@UjoYIq`tT1{fokCjVP>uhS?vW#(qU|XfY@2dA zTWzm(%A2Ubky@&8(4pGgbE~QvoOIM#*I*wH{%78J<+Z0?dh3~20~(1CL?s|+D5&3l z1xg6ufC1uhAc-3=D=UftLR&$16~d7%AMwpvLxFm@!5@Pk+R>l6VEpPugWh7>tc*5F zYH6mO@z|q}MB)iAXOH6B(q1YN!(9{XJ>f-E3K_$~kslN8cTdBN# z1{SK(pxSoysHUdcw9~CZ9d&W9R%g!{&ALJiP>EO}LVEwWg6*EJW19!Ye8J9z%`S_q z(&7s#Hi7Jf%la^Ef*%0*px)F<2;a7|65OqV=eEH{lIa#+2H}2R9j*{U*9-Z+KvT|h zkw)hGBft2jv^g*W+f;CqA)lG>l&4>rCB+d}srtkfe~G5XZBj-%$R6W<``IJ^Oye-( zhy&A{L#tf;rGX-EJYFzKVh7Eg*vv~EaPL=H=XK)fv%UQOJa1`ABOU&y;3v(r)K8zE zKC7)#T^)U=yVCUq?S-AI*kf$D^`vLPrjp6Fc3!du^(;H>@-5Jd`%8-c1dxEBE~ zf{9Zcy5L|l5P?f{32NMl4ipFOfe?h38=;Xx@{Ru!V$M+_)STrW+>2e4=9twqYLOh%Qis(Py^-d7M8wM`O!9=_y&v>no4HcspMNEWb7+4|1 z^P<-U3Wn>0*E5;qa+Ez4`iqP@T4NPGB|bNf4~|QlqxnvyzB}S^YSXD2m{PZjMqpun z^*fLK@Mp+n4G>uYJQm;N_O=uquz@bJ0lOTSKxuI+K6(>ZA5=lE&;<@~7<$)9R%l9+ ze4~V_e8maN;mTB|5RInXNbfucLs1xUhC8gG>!h?gi|O*0Jp|$~cd5%EA~Tm9LgLSw zSQWaw!XcKKW*eobrzuuZc+Rv1&3MsGRmlIRN!p{F_R3h!%7KtlRwQ5f-dM+X<}se@ zOHTXlVF`nIrFFFkLjr6y$V2w?ehz>DL9I0){xR~B3Y7u_StQ9x`rvI2WFRLqDZvO{ zFhmyBR^k?6nOU}wghhfSDqHzVlcLZiAx&p3JNJ-Bybw`~lHoCfDNG-lX@|M1DKL4u zQ=fKBnKb>W?PjvG-hD%hSP5S6Qbx^ep0P$Ro2p@8LYJ#H!%&|DAj!@d)uEZwtCQnd z(DKAicf!+-=#%GKn^Vt-uE>Dl>It|Ou&aLpv;hOPD_{F+E$T6pq7Zs0L>Cx0!xn3y zq2$^{HJQ#H;36 z)7nuI;H#gZLAnmR6`n?cQac*u)Pz z6`K?3Q91iUUEytmD&nvPI35ET3kNO6&-LDnYg}XB?lz8c)Nwm|Y+bd^GrR2DV|DyV z-tIyRSK6Tgg_z8pBkT3M<_-TWl^3@z#%*$M3xby0EK1)m+wDV{I3Sa%x?leK_ay*^ z<(f}AVCfe4t2R=NK}K6U*<#qiLB%tK4~E*;0=mL{R`7x`OxF#6^)FbAW{5>>U=z2I zi9@++6J0#AHF226Y$TdKtMECzk&Yxgmo26T|?Txi#%Fdv`kDWt4WJi6&}N)6vvY zOSP(3ZBQlW2L%ada(Mq67-d5}bYZ&M{zZvv3PLMH^b-V-QTBNMyj;-uw z{};2&exCZ9zZS+c?+rw!=NsQH*=j<^Eo%?`_R#Dg|=2X!lHB zJRdXCsNP1GI&krAZ)<*JuGyLJKhIkLy>#QY3($=F) zTw*Cn$q|W&&vB~S4 zQ9QS~=E3cApx0LGH)8J5u+H_~HaZwI`DQpPgDE>ro$9flee6M%b$wg?>eLOmcEfHz zTa`D-!B6|}*WUkjxK3Q+dFLR-(-l{{cd)($A#POAVST{EBOF#Dqv8AOk~OQ@za+l> z(90jiT|}Pdl<&6AfBCQpub;v@<_c9eRJ0JKd(IYSBZidd6HDeel-MRo&Fh2L93B z>AfEA<(}*{pv#;FC=i^H#agSO$E%5#@zD+PsYvqSm)&gK@-<6EAyh~8M4CXuIfNWJ zSf4wnR9T=->s-kaGU1f~ zPdvOJ6nevOHH;Ekp%S)W6t;u*QQ;NdP?g*k7gmE8ej$~R;rkJVEg%sB79bn)-y52k z>Uq=Q*u?(;HbkS11_Qd`9`a#k)JFd4p#vshA07rPOyJlli$PT&+EIwv5s>k5Ac}-w zukD>9yj}lTA|_@cqacMb%#bn2MLLM$IRJ$^ydWv+f>Bw|C-fU2HrMtnn)sO#ut&%zT-U(VgVlD z0}5h3!Xa=OqVFLh+3iZMWguZ;+(VHd2x_9dg@ZawAuA}A#8}A_mLoVq129yfT+9V9 z=*<6cEfb<##&5YJ8^R(Fz8P7~nejkZOUl?u=py!Q;VF6|prznN(t|qGq=gNYWW<9r zI^(c~j5Jo`HDY5nrr|UaA zs*D`69&g#6TDBu}QiOBlH1vulqFCjXyiktSZ)cUR6?XSm?8`6 zrWArA6oR7*UKM;6rZKuhO^zo`E+)u)Su7hi#p@b6VJm9BT+-7bvC4@#O3+|>+UK^o6Ms%K4a>8gE zG3UGC;aPI!jpC?_zEO@|Cw3+ePzdBeDq?qDU&g245hIkE zW5^N1c}A&ldSvY&#c6uUeEnL9nPf)r$)xm48l72LQPnNtqNi0u`sG|yid_GNhA1z= z1%}q=JXl{C{$igZCZCojQ&y-La^yymVGCBNV&c@r$Z4YbLTQc6Kfb7l#VB(A-!~E6 zbMmN<=ID<0D5vTu@bX|xzo{N_1s$bs>NcLiNI~=SZjmea>-{m2Qul}mB`bY`2Qc4Qp@f>T7&1?S|UBiQ3p|$3! zpJCoGnT`!rEByHswo#sMrY4)1E4s?8yC$bsw&=UcYrS@5qu8O&hU$);4;AHW)l^c^ zb{XM_Aes4x;y_|w0r0;Vm|km+BfV%t9DX;DK&9>xC{Y6&-f8_N!wPx(;F zIx9^cs@?)>^}xWn2;b^`@+6;O>o17e_PX#3A1C5&!S^yms6d?q zE9V2BUg#z!Gt_A1s_*5p?`jNfbr>&Cbm=WvZv`9C@nUebWdr{;09)Ug@XLO19g}QqEpcpG zDHDI*AKw$X(2@4Sunf!5j0VFG+i32>tG$kjbqYaP)GZJParer`*KJA>S-BiDT4v1qPnp%L>4S0yx;utU1w+C=j++hr#9syQHmj9^nCi*pQ< zv-lt~6FGA7FtQMP-E`%wdG;_X>?0-Tuq7w1CYNWMybi(ItrJ;jwtnHYzU}HTEbHpd z!**>g+Z(ib7J&ib6~SIVwoLg{gfOqM2*>d<)58BW#IY?fGc~vIELU?j4=)OyMdxWV zNK<1|(v`u0**3Q{H*4a7d{Q_A+&CMq%p$ToZ)LeTF7KglB}?*E5d(ErvOOd9J=aqw zBM3nks)`k~TL5lZkgx@_?#TwQQ?{~gzVC+UMB#?<#y0f$-JUPoqwY~t8h|sPY_BCw za2;p#JaY6j(85QAh3XsSw{bi*w-lRwg1fZ8*g+++qGe{@kl2(bn{t$ z=IP*dBueXbPuyfE_uS9D-BxWuV>>n>mosFW1N1b8A}20^`k*zv(^<3gJSR;(A9Z_w zc4~yS6_2)QOLNa zZy)z`!@_ct7KlGLUe8WSZ+D(86>DZ_YZ|3{Mq_fzX^OcmV9&IeISx*jH;$ioY$Z2v1{T}>S< z_<$xWXu+XaqqPv~K)5{Og{Nwpb6NifB2mv^u4>s+omd?8eBC`}+7 zO1u5Ir%reSlO8-}DjXIvr+0deAC2a2a}U+25KlE8D+!OBKrD!S)Bjf$T>7CGhH9Mb0fC0S9cOes|sQ>bsynsM*B6QVrsT7 zHCJe$yEI5MiJ^ZIL*=*y>b%+JWr%Y^c%W6gNtR*@K~xCXQ$h{(<@Cq zQ3h-~`N(?mfU|m4FBmb=u?8W0%`ev1Z`=jRb1g=EAJXplLfU8KOJ+>VuOH;|1@|QG z#9JGYH6VMPgFJQf9fM)To$~pUO6@^cFyU*Sgs!}_TRV+&`+~)M!9zFL+k}m)3%Jvf z@PSwG-PPvv{9mD!7j%(G7QICry^`~v)AX3>S2onMJ~>N$EB6o8`&p|iZwmLB!OQl+ z<38dtrr3;Hef z1}-#Mqo1QN<4b?v?*fH#NF~bjT_-jjs>q_xQ6jG0xF3Jd+kmMwLFh|5y%YE99|l0^ zh+%?YL4gMmYDwZ`N<)Qt%w%D>#o>{O4=ZNWxKScUj~_3F6tWOVNsA{@raT!;Mm~Kn zYt4$eiQD zuZh5h75lZ%Sh8L_fm22$4MmY};ezBM7b9Jg4G;ZMB&TAD3KszX-7tbg3K%bP5Lv<` z3KlS9(6pH|8Bzabt5hvzR%=S?CZ4*cWfR@%RB6%EdWp)s8nb8CpP3qq=1rZuc=G7w zp0}HKZ+N(Y|F*p@U2)>Ij|*46e0Xnr&!N-X)^w`t>aVR^+J)|&yJ*>4gAd=@oH@P?W-UrJRide`-KgEz)xR)eg6&s46p$RK45R8grMNXoKDgT=8ZTYtZR*p zOghl6yXLCUy$uu65W|Qh{4m4|NsP?p#Uzqy>7{!*!|A%4U=wDjr;I{HN2FqG ziY6W}D+>?GI%DK4AtOn0teyO@Y^x{Fi1G_5y^^wy^6HQR54SpeugeuhEHO-w^2#uc z{Q^T%F)05S^P-H%h&05>s~ll%v+Q~~t(ekG$wf~-u`5!%>VU!~9PXC$Z8_SsY1C12 zAU)?$M41cDxq2|oG&*9GN@~VW@kI0*An6RW&Qi-ekB+XS2;#n1|MP%?2NYmnz%{3A z3|ANY>Q&8I2@KE%FHu@GEe64HaKes2Y}UgsQB;XoXsL}hLu;`Wb4)O+U9m-%URn*u z8lfsG$D^Ry(J2{8#SFTaF+h@5&nBZ>t zQb>g&hb{e)3sv$9CT0vZ&eC&eT*~E_PbNmTlx@3I z)0_WUCK*(bH@QXBPD2hFRr7dJ#b^eR9t4M_nU=xnr=cFC=&7kjy6LN-&YBa>r0Qbn zV;f|4E@u-ycw2)xTv%agvn^ZOwlQ?uZI-}=Q9Ez-~0vB%0%pjmz7&}w%xzHS+e1O8}9q#jvq|AUo6?j|9>()o%R1H zUDu(V;JWs%Ck4((3Y=Kt90NNE?h0T#;8hKbpf+N;fpx(0paeh|!eEKe0wgTq31g+7 z%Du&PuNs{MEp{>BSw~~Y_}K2Uu_x8ckcY{mo~QWc!)}rB6q?+r<`@4To1!h7 zC`4fflZYgXNjQ+0Ol=I1XCjjnJjgMNMjo&dn#cqtu$fIr@PV6lAcHp>_XKf{6A9%s zr#TDZr)Qjo8h;`L8_0Q1dA4Al;cO!-=C=okh~bZXq+=cR$xnU?0*`s@Cm<1e(6R~g z9_Jd@Gy?%lh&nQolB{UOD)~ZWfb1Vq#V8g0GCY~0EzIlbC>e>41#g2cCCqc#N-n{|edn+A*Mh?4y1K3*{xtPau+&v5*G5St`-kz}1DI zRp?4u2Fr)coow)#OY73Ct(nbjW^+v8JOhx<8K(@gXHJclXFczk&wbW&?|9LS-|jLL zpz-jPTYSh6FS?42-s)6~;@_x_=fzo-uA{@+*@$v>Rn~nIoZNsfa^X!DmKbY%P(4dk zx3LZDTP(xqs}U-v%|jrX^^x0pn*Z;gVe_a%=EOXhS0Dg?ImGn(GESuSgo1~Z* zY-VXs8_i`#fe36`$^q3X0o?ZXC`qYp!K_l1rmHIqe;og6VpEaP8yhq-0EI(%Z(h&U8?Z$MeG|lvj`A?YsZZ5zpo!O!rT;U}@xVk~@>{Bxw)DJgM z#cwT862X^!8t=7~-M;I$haBW|4>`$MRb!RweeYEYE#sgab6?Sb@H40R;R|o`o2N`tl%_^lP>rb$kDl*l)vZF{OZEz|Ies=nMS%>%$&< zva)(&t<1RE#iTaE9(nFx5;@#^4}5C<{-1bn_OP$p^1r7Wv<^1BxgVeS=Odo_(J!~p zAuoN&lQz4mmc5r-zk1?ILzlW>q`U#eb_hgOA+Uxe@F8CyH0uu)9=E&oWugmzZ*`y7@r~vS$ zFZ&!X1gj6PN^k_BPw--C1u5^?zApgd&;Ndh7T%2(LIE?5PU*Dg|B4Pa6pb~+j|ZEM z-wNr(9O?%P&PnpGJB%TF08j{J@CjiM>}k8 z1qCn^KT&(o&$IeOxS&xIC(#M-4;zE9iT=j^I&TTpAsJkQ-JY=NxUmpLQS88g6zz&K zHm5L5s@uMe9__JKOtBVsZx;En4C(*pA1Tfxc2O78!1&G$5D$_VRnQn~P9gcw7?Cj{ zB{IyIkqQa%;Gz+%a0fNgZzGY4-mZ}wn<*pH@zExWvXU_0o@hA8aT`gJ;M8#ntctTCP!%}dvY*+vI=6+asu)UC9nZyk&gm0DRI#ZNly8ykMXWeDz8r%t1>FD z5*Q~>E4$D8m=F2*@-5ji7Rdik^TyH|^RhMdk}VBU8!0h}M$#=64hJVC9hQNS_7XOQ z&?_&}0BJJ$I?Co&Q7{RUCxLPykaA%TQ!)LqTJ9*Hs1q_J&R{yiJs2t(ZIb3r$uhw+ zGcj{Bf6gi+&oe)BD?_s@Npl@dGb3rR{BF>*QqvlwAuW5;HJ=U>UsFHl5(n!qB?&IM z#<4o`Q$ItpG}+NFDRS_(tsavzIXx*03kw#X^Fjv_IyIDFDs;8DWn{J+B6-u^R33BL&P(-_K8f6f4?t-!O)mh7=TmAvuf`5|#8l z1GQ0slN8IUNqdfzpfovcZx7O|N;mXMJ@r$y^bFOovCuFwO=3*D69pUZJ6Uy2RkS?a zbRrQ_Jts9z<&+)iv`zsv25o3h@$+_;N>?5APmy$3_i{E-@=)PV6It>|clA*rHAd0k z3cZg~O^Jl=F-lL02`DrUO2NpaB~(F`_{NnW%athC5TNSIWSEjnM{rfqln>+8RbQ1m zSrjzaQ#5aNPILd&SX&b{<&$23)mJ^sU-{EFqjg51%24smM~OjYzJn1B)-@CMO{kSC z&&^4XQ(G5+b4)^0$Iwd2Rb0)rWY3jkJ9Qw$;17lYRWa#I;k8xgm0ru#JY#iMaTZSX zbzh}bMvGMvjrCKR%Y3DX0MfIYxZXE^-XIPMx9YumDUmKGc^SE-S{+b z5tc`*D`_2(VfQm?IqPYU!xbbJaG|wg)vanVb_GRnW4Cs5y7nzZ)@(U9Y&%zMT{bFX z)@|YTZR`J4Ztak6>(*{@wO@VKaczNUe*vhh!f#`tnjAN9DfaacSN@U}X@yX6y(227 z)^&H+XC>Ea$Ll;zS8F9ya}MObobz+3cWkW}Y_FGWL)TSDw{1!Hbi2@IQ&)9ubu?S| zb@8?&dr4@O1Yp^>c#*V#a`$ni*3qcy2oJY$`!i9GgSj9#egAhshg0^hRz0l^TeEdA zxt5@=6ML)ofgLz|CD>fIcTKz3d%rgYR}^(~R%dsXXN?ztk+fGs!~F8pc6lltleQdt zw^8+1gon`T@>hi|k6DqU)Myxe;dcrTP)=93K>_nYFBN(PB4i^ND3LgcC-{gVI72JA z+%EqZgG)DVH`rdsmwZ3ie9u>hVc1DX7%lTOXp0ti=NEW)xPEVwKZTT7qm_RbmrepW zjzbt_J#^iX8p5i?A| z)&T?{mvuP+czKt5`Imh;n7gfC3i*(Y`H)Fekr!EtnORN2H+&~~ZavtNK^T)k*${K| zeKWyE?~{zL8IAv!lnd_aUbt9Od5I90m3Opf>)4L77>Eyeg!b4BF_Z#?`JRIrnDPHv zm!+VXj~RlKxtW)_k*64vsaRGK8jF9}owb->zZnX@cs|3z(6pJ0D^`?;u4zNLmb$TF zF*;Y>xHg2Lz(ja(&zNclco3Trmj5u1Q7KztAmqMwWTk)r^m(6eI;VB|3z@j5vv(8{ zS)d1c@r3%B#aEiAd7)RXnyopLK_fM1cP!-XZ!P+v#d)JS+ElWUoImzwaAm$OL-TJL}xnup=bB{R_f|`*DItpz8VziEwBi3+r%hX* z5rAvy`kze}sOtu=i`toux{3f&K&`){nUVp!dt$elo4_c#z7?By9Xr1@jfEeZx68Ru*BGmHwX!oiz5{r(IlEe0 zG?r7SavXRLda}E}yTbW7ybW0w$@?YFyTj32w$=Nfp_#oEx*}Ejl64!wYiX0gSYY!6 zxCbLbVHoPv$D&RTB+YVw|D#Xp1PvHxSM5s#`CskyP9wr)r`Nnx$pAF zRs1WlWCO#T$gS9!{a~(rfF~`y$@AH}6Clc^oV5v(7nst@^%~0^eXsi(slPn1;d{&j z>=(!{(~Y*b@mmAc+|nC+$1!rUPZ|ad9Mn@>SMMCpm6yRiTkya$7UOZk1wGIQ-Mde~ z(8XJV^+3^I+rt?>y=R-tOB~EuSIkqLFGqo6VBEgX{07sU&QU$OkNbE%@y$gYeTCu0 zbTcR5Z`zS})luBK+gl=KyH;{twCJ(c+5Oht9RjKV4G!Iy@xa&18`!V^{L!aG*dyJx z{oUB}Jm8T%+3$tZO;{`p9@?WF$Uj}U9sb6%oiF9QCcd5FcU#<9UC+zi$W>{UoxI&` zJ>-9x9@S&st+xi0VbSqD-}&8<*PGbg``E|5&XMGH_Lg?TBAW@;(;HseOWmX)-ZFGN z;#oPBM`f-E{^6&4EoOemTYckQ`sOb6<3V2JtsaCX!{l|508;+fSH9(k{iyrB*e6}) zlYUOcxW%0vu@@fP=eOs9-aoN@?ulMTn|nI`Z{W{<0EL`@0UqjMo$&%wTdThCZJhuX zfR&^?0J{F(zkY2YU*E-k?El@Y|6btr{vz*N?S+KCJALone7`OK{^!G5>w>!lF~6ako=D34 z^I3e}cpmiOzV)dcx~I^%0a*2^0)UslFV&(G-YNWRa{S5v&dfia(0`t0YW!&ZBzj*`e0pxeE;lhU#D_+bPv12!aBR78RR}vz^mkm*#?6{HT&zUug4itKH z>CJortF}Z8-MV+~UhneEd9Ui!xN~}zwl4-~xWFKN?8fdgFGUAD(r5K`Q zt-a==FC~H~NG7QyZ`?T~5qRN=C*^rm zqSqdK?p+C=mh$1mLw#W8r{9?T6~&Yk|M^FifCO$h;amvgoyQKjTd(4 zCyFZe`KO1263S2rgC8tSN|mO2Oy zr=ltY4y(56>Z`D}dcdr-)@tjmxaO+suDsg%s;=F6_hgh%PFbZ#$cB_GmsqM~Wqd4c z*=4k0R;xsr*k*g?wraB3Ca0;nWhX6oGDxSLapkEexSB#L>AQ*wnrOT7&I>PU?&e!B zj)qd|nx*FmbIEA@7QE@f27jxtw@aAWa1i+ac`0$0#3rD?#Rz1qu>l-+>@fiXg)H*O zB$sUR$p{^kGD09*%mBtA1CUR5!xmey1;-j&-m^LLEbX)tQ~PDm*A{K`(GDlgEt(9U z(KNf^zJ(0AbmbWgy2X^c?u}d*>h;(A+6(sB@kZ)y!mGuouh{+i<;%Y>f<~{QYa;A4 z+I2tOEz*2P{PVIENO1GU9~a&-%7`bv^5TrM+_D3ZTYNyUHdk&r&X@}xc;@E$47lgc zHY+s6L|f~3-()JS`qFwsJ)^j1wW3yHu%E>V9JqJwdfWE`T6TzNn$0%8zmuJ#+QJXt z?=I)KcqyWG>%RN+RqW0B>Zdasdgqb@o%cP+s%pJ?{+q z=ksq5t?7Rctv~*$V{gU(X}VtPTt^mX$mu{=L)Y!1N4L2ZPkFq9pzj`NJPNKYg40{X zEy9rwb;ONe4#b=EMo7KZ`R`%-D;;|__dV~yOnl=jUkqi4z8We{WA3|I{FJpr`_=D$ z+RGpRe9*rlpzw$%WSRi;wlESR$bhj^mtGQhz`HdOgBHBt@Rm|R3I3^3j0z)Vz?eKN z63|~Ad=V*5L&6C$v4m}`o*b>0#Ow)?dzX754EH!g8v3!1HauAl;ip49%I}BtL*yMt zr$Rd#v51hYBOR|;pjIG5YNLSv#GI^#HLY0@juAwj7yWb;Gj{Q7WVD1TS1HO(urh2S zjHSUe=nc4?LmR6QqbvuwOHeKmi79Mk5H&|gJ%W#qfJ~+`14*($7L!;#jOH*G8O&;0 z^O2DRW&nRl$u`v~ii)WnTKLdF28OehSv+Md%Q(u{=`xkJ!h5k|}ArEwdw(50qC3e=!}6{H*e+aZ%`xur7ouug?)V#hbtXI6EvNPX;4 zC!0~DYBjK3CF)<@GzxK2ZC#u=hSaiFR_oJ|bWMjistZtx8$TW>rks9cX4h3c$^_ z&XSnaq-{it$=vm|wJ?osIn~PExIzk@>^IuCs)wduHg`vKJlyH-u@PK1c)I^xu#$mj-5m$F zsM?KicX?UTU@Qo<30moHYm8Ra{`%nZ~$T#f$$XW5(E+%iY{>l%Z^0 z;zG8#3kdL#_bg-~L)XXvMly97+~6h))5&v|GL@ldz>`Wbci`PHoO|nL52N|515+4Kk^;A{%F#;b+Zcb6O8<_5XCiTp8JlN-xs_zm?9- zi}xM>+;lFqtE28?byI!hS7$fjQ4aNZJD%$;cY30=74ymih!R!4GTO~4GrcbbIjiYqpI0-az{Xu~d5yelNc4Z_JK&$}bzOVB-Bl01@Yn66tV5Hq zDNp{&Bj0sjgS`osXRWYn?zDeP`tzY*yWhK}`lOE@_6$e;7~M;45CgC0dI3c=if(jR zJVyJukN@r;y!^&5zVO4(KOYtF=i(b*(2y5^%;#Xvw{b8xVbixYb}(~|hEmywd+_Ia z2=`j&Bz`f~dJYFxn_z7)Mh>+nBH$u_^EZOMM{ke!d-r#E`nQ4!(0{`6e*ma_S~r0I zU&mhusC1k+HIzVXQE_ukcv6>V_-A;9rGGAne4fygD7Z! z0?1@FsD^r>hMZVPD#(S#Xo-A=b?!EX!?=m__Kcq>iqD66H;FR{jc7P|{%C^>s8R-$ z7C$I(s5p=TM|oA)i`z(xNl1tl=a8hwb`9AV#!yQgXkQYtitVUq|ComLhmQ$XkCk{# z_ZWlCxR3o9jlkHF0F-ap_>$s;kX^8bNyLytD2QQZAl+wyc*ul$h<<9ff!lzQc9m_s zsC(?#k&*jF?D<&M1;*_>?R8iyvtwVuy{#Mj!%7lR#LO9O-RLNQ)U* zj%^2&;#Y_`=m^PBl=X!vsm7Gh*L15U_LqN&9#ce8G=&h)F4@g!-cNhn-RF z`I(}*lcI_vY_zAOx{zqpf!=_sdPAVBdYy%uojV$pnpl|s{@19sTBfdghSv(D=jjY# zS)}V}shbLTA1b0bX`dlFM{^gYQ`)TDWU4R*BkQ=D)M~BINTX$%t&7K~V5+SMdaL1S zppNvbZMq)}D_R6euIn1CcDilF`mU!`Pf|*0p*o+Ihp*|lul>rUtD37>^{U@mu!{O) zv}&-D^|H8{ra#ICLK>MwS|)7TuoEk&?^&-U>Zy#p&NX2iM(bLpSt za0`UNuO<7VXo|1`>#_yQtuN@UnwYRY8mak_v-?()*hi`68mAP?v%}i1;P$C^`iF#g zw0hd9`gv-a^{gwZkzAv;BwMZi>ZP@+Gb+2XFQc{pT#J(6>a}0ndtzsy5!;@5xvQ3W zp;J48ON&~f7qvo5D4wtio{Mxn>9LcBv3L2Nc$=@N$+wkjvRxXk*qX9~8>TJmpa845 zV0%sFTB$&2wz8A9JFB{Di$+7dsYCH^k(o)_(pi(6kk9+O`_+)i>u?X*vB=uKq-&1Ad%7VjAR`OC_Pf1+8@N}C zqt<(vv5UR7tG&0|1sZCz`>F$cIyKvy4r`hJXA8L<>bI7g!Ook9c#6Ca{Jy1ofQWFB zRtl)xM8kipuvSaLSX+1md&2sgxB|Sw0qMA!B*RCOz*IcL41B{{yo&T%!GBGPf#Ni;FOjN$C8^R`d!~sjafN8>TJiQ|vvxgSN5UaCAbC7yW#917?mpioZN+>&g zvGBXaVeBt|N_#!EpK8p;t?R~bjKuzXjB;Gb|4YYCY{%r8yJH)^lY3G?>c@{9Ad_mt z_j;_d*u}X~$TL|vsjLJbP42yh}gW=7OjKAS5y-CcqzYNUjjLw`q zA1!>$$Sfvf*~j$U%z$jo@h8ZGoX^sHdrun8_ne_+?6jnoH}wq8Dvi+NtkCA%(hfb$ z{1efXTTM~?n@=#x7md;O%*7tsFJseVBBazH0)JQAVZF?0ub<~$ZN4m_^BFxJPJ-7=k z)hX=Mdfks#oz)Wk)wx^KjS0g4J3XlW?5t+JMTV@jhaF*lAPaQKQ;lHCDGk;JJ#G#C z)Q%O^3{BS$UA1`J&Scv+GkK(zUDzMo*cE%o9PP!ftX#;G12=Y@}1H(>&cDs7=%n%-Ywi%3v&~ zJj}Kc^Pa4*X8Ahc+!Eja2ma!deBRaj(s`ZWGA%>_haeNKyV&I*Mp~&d+u>YU+0pI5 z{7h+TgU!Ng!DyY@N)vl#j8|ac$BSF#EREm-yWlYn+zt-mq0Qca9oWaPgBmX6X71sE zJh~!&r;F{g+vwzv3FRgYwW?Ow2F}SN%+OlC+gBUh*!ksuz1;4N+&R6}-@M_lP1N^2 z%^;r5@X4xjUdxc7CUJe|l8ohetyDB#<5d0N`?KC!J=%gU=WFpGT!Gm&SV8F+M~`@?``VE?&7MhtB$zptbXLN z9_@*a=-gGBrm3(0O?lf8e&v|{f4<(}y6x#%`2+GCTC{NCfgUDe4he-GjJ~!+Kz@?*X7r{pqtL=SdOqXs|!2Mi`dsD(auLo$9%+>v~<+TX^s`p74AL=oC%}68}iU-e~as?iDZJ zEFACl{J#4=;$+?Ej}Gr?k_u1A&5}^(mM-vq&gaBv-oM`IW=ifkZAlGZ;W5AN?LP4V zPx2lg?>?W{XWidluXfGe^@fc?%KM)bSKwA|@>~A)RQ2sBzw+WPxmB;g@&WO3*xpqw zq10~ntJ~=R7Td9fvDn!S`1UK8{ceexew5g{?8k&wuV7YavMgwYY;0XHK9$g$8ZnvZzs_H<2z45+!LEG-{$6qPnvuRIFFCO2sp$%~!B)riS$@)+|{+ zUC$y41V|4ZxLmhNp0ABs}bn70p=e{m`!}pEdy@w~C!hGw3gktFm#=iY~J2eX` zY#7m5L|OwWLJrNpvr^CG^H0E-22_l&!xB6Yzy%9D@C>{xX^Stp{wk}l3NHjqL$oCS zS)@Vrpv0l~ z;^c%4f|4|7J^Su^NWUeO#PQDnk)(0Z8v$)<#zJdk%Fr4yRLjB^IfPVFwBWJ?6i?Cu z(b1?>gz?i#Hw=tZQcG==(L@gw)J{evvkbE#cQGWXRhzVx$y?iWZC5sXo$^;Rv5bz) zVV7&}IWehAmRK~K1y;>9ab2?^JmYL{P9)~j_B=ai$y1_iIntHY{sJY`+*HZ`J@wpm z*{xO8bQ9cktw<-GcSR6A$y8K)`R%tvNr)ZGT-eOE_GjM2tNAR{&=5L(go zlUHAJ9T(bXI~LYiW|J#+xd)DQPFa*g26^R-&vd!vXw{_FJ!`Sup(1e!Qtw-c;@i*S za)VxYXoMFvI$_0tR=Cl8-wM@Kd-DpIs}xVv^y#AjE_l?Z&@xzSM2lV;RdpTX5t~l4!2Ji#RK2C zPk4#u>~f`%K3emO#RdscupGH`lMlHzy-mmQ$K6#uVHtQ-q|_-w>_}2b0-v# z$ht&KeA>#cp9&*I(g-15!I zZuk81Zxb9j%cjP0Qt9Y<6|K5 zGEKFheoY+AqP7s5_vEUSyr@`b| zPjGH>2nh4DJ?>$TX9BsI(3-eH6%x>hFmNQOpG9n^0 z;~CMYMpTvvkZk5!nb9g-EL35*5QYnzp)_IQ z7S~O0n$w=Enx{CAq{>#ZQ=PS3XFAd8%8-^thNQ5>su2B1%g3TrvS2;zIH}53Rr%r?Xw@e%-V#nz0@SS+xT{a;O3AvWHm-rI zruWE*0=b1ko9$^TX8BrJ!dCUN8Xe?S>8O`GLTa)Yf~Qq2d)$iYmbV&9)fi;4Sfjwsl)U5o5<< zOX4QVoryi^O2hi!_pX<{GmUPn6j-RXUKg&}{Vs8~`vC4Tc$%{5t9e~ZO4fo9Kd((M zGI6Wk_PRI3=9BM!M+({DdbPhRrmS*DEZ|i@qrgE@B7&>kSah2JX->U}LxFY^Ad&de28P8Z7HwDm*6Wrh( zmzHEbhVYNw>|@f#dBPhk>ftzy(IXdQn}N;bl9?P;#Bv#Pf9CH=XSrDW+DpnUPH~_I z-DEGDIVPTfZcSt5lSt=S$7xpcoA)LRV1PPb)T1VKsZD)qQKuT!s%CYoU7hM_#9G$R zp!KU=J!@Rc+Sa?~b+3K>YhV*Y*uxHXv5kFfVkaBf%4T-6oxSX5Lp$2jUbVEXUF}}? zC>h%3_O_{gZES~I+~5wjJ^}&&A^8LaG64JlEC2ui0Dl5$0f!9*1sNb99VI3n2nQb; z8YL(yD=aQ94hb(WFfbe$GBY$YGcq$YHZ?XkHxv#xI662uH#s^wIXXQ#JU%)uEIU0w zJv=-;JwH7^K|VY=K0rc0KtDf0LqHl7Krt>rLPSA9KtV)CL4ANiJUT)`L_$SJLVJEh zK0HK1Ktx7JM1X=tMny$PN=8RWM?pVFL_|kOOGko*NJ&RXN=!*hOiDyTN`;6^OH4~m zPfSHbOifQrPf$!#RZNJAO-MyfOG-{pPEJozPEb-#ijGf)h)^viP*GA)QdCe>R#1tI zP>hdJOi58vQ&EnOQ&3G)R991vkyKPwR90D4R8mz|SXGddRgsidSX)+-l~+(pS65h9 zSzK6ITUeBqSyfP3TU=R{m|9p?T3cRQTwq(4nOsOiTvkt9SXEqIUtL~dU0-5en44c% zQeRtHUtnTiVPs#KonM=tU|?ThVPjyHnPFmOVV$62WM*PyXJcGcV_;lkpQ2=CXk}hj zWo2Y#W@u%0cxGQ&W@l?=p`>R_MQ3DSXJ~3?X>4g~Y-(d(YG`L_Yi??!sB5OGY-VF@ zZEtL-tZk#EZKv~+H3bai)ic6xTQ zwRdi3cX)evw77Y4Z+drkdbYcJb8CBba(jDzdwqa>xxIdMY<_@)e!RedeS3g{g@M1r zf_ZO*TvLRFh=qN3hkJ2{g@lL2$%%@Nii3WRkCBee(vO9GkBf+rla`aw)|6XOm5P6s zjE0t$n3$QHnaj|cl#raBpq^kFn(7?(gvO z^78oj`1||&{Qdm?{{8>}0000000000000R80Qc#`_l=uDZv_)7TnMh5!-oyGQG-UY z;zWk&GHN^+5#mOV2(^ji_z~mCkm^#BBdJg&%5WZC%3Rm3COeuqXRcJ4^JY(Y@qh-k z=`-ihqeP1){n>Npxu-Yn37kr`s@1DlvufSSwX4^!V8dFicdsnIvu4w(UCUOgTenJ~ z!j!8MWJkMq^S<26m*dI5egR7y9JsLH!-xqdR=oHkdoEHmoedWtERqwc&TIXhsmEmANeEV)O&G{ zFRXrLWw-DbpECqrl*=yyCODuk3^oXhD64rQ;U}#i2u?m24kXrw9Cqm8hagtCrkCV-MjR=` z5l}Yy|XHH?-;Ibz;6L=bW$AY3H43 z<|%`jd&;Rm1F~%ZLZE;Wn&$$48hU}CFX%~tW}JD(#-yUf#i?C+VW5T2zObg!b}(}@WK$QX(gFzhM++OAL!Ze#&lNP zXQLp0tntWvlFTNY42*gwZ7O3h!E6S!OzNT|KdK>copqM!&XMN)Z*KBRXvClxUo5B> zKldBT&d0UNs?-0PA)KtD6*nhv%ATyN9MSO_aI#+O;>ei$^#>o&{2@CYdCMkPp|!gl z_=?+bZwt${*k)5~gnY;U?G0Mz1}^yE__~I`@^I$5F*ZcT=lIMpAANaQB zN09vT3owz(O)R>@442OF!&F9mwWcVC>7~b7YwhyvFK3LW$a>m-d&nlEoG7EB($2E* zz@uP1(KP2C0Hi)!N*bm-PZwkq#}|Ek9#s2_bQ?gb-86ZtK97C&y~Um+___YNYQ0PX zyuRgPZ>&ezXA{RP`(3w9H)YZ8PfRg(!&}~@#f-nE9puc<0e=l zpcEt*k;`DvC|5cBiAP7A6V{bL7rN1@Av~lro#~j5?qGE?;@J>)$P1nh zcXuYEnJi^9<3OnYYBxL}2C;ZZ%%RN$(zESVDtge{*9O19L+QbH03E_ zsLEEdvWH~U-JpKJJ4MN?mN%3XsEl~1A|l{-Nerf=n5VO-K~YjtoRAz^XvL*n&q~Mv z9~Xf|8uE>ijGJ-VcLugLT(R+eO-mTp;z+E$8OM%uL?E>&GPgml({T;@BYXlm$odsB zfeT!uKKJSWp`|<$lI?Owj4D~93~~^Jo8)9CXSK{U&Y?)5ESN-BIk8ffGKB|AWelNL zI*YmTl@|3OEpw1FT;|d4h=L{F182>9B1cI&KA-kCqlgo7?MFt%%@xu^PP}bV7U{g>JMVTZWQ?_* z%mSp|0@O~s@ohqf%qKtRO4qGK@Sgw`%7{ip40^T3pq7ZH2Nk+d5gw@(tRW%lFxpTR zK6SAg_2}vh7SfMWOqMNWDN2Qi*_aY_cf6$OOFcW&&(@TfIo0W;keO5n0X0odbX5}1 z5JGAHGAEo#Q>qxpRysA3&Wv8WgH@C1Muz537+_6qw$iGfuZESa{~@b?&dOYLO5?g| z-Bw>G1Xlo}qpsi$FIL)x*ML?KDBA^WU?X=>gw_^FLxR%XI2qAVo=$a^#b_*H`9hBE zm%rwFQ%i|h(#@7~z{n%%o?sSK9ENs?zI?DPRT>+cHc@JnQVtDyN?cV`2T!rp?W+_8 z9uV4=P25x4Qnkm>ZIX>wF9fbP&(lWS>dS1MDwcfM_S^?Wx1Q9kZg!Vjz<9d#p3oqR zT*b@eMIz-KK`F0!&-)9%a`$pqc5h)lSunXNcCjeD?`!jW;mST1qyUCpfjN6p%{sXM zhzgD~fh!BlUXBW;uwihXg;_ldv#yxp+i6pMYNIhpHpC)k^PW4Sw66-xR4dNyhu0+I z-?+HakH(EOZS0=r9@cH;bZ(DnVHvjK*jWi7kROH2N9_8dfNh0!mdX0vCfk~yafz~& zr@YtnS`y2%e(#nI>*X*1R=;MZsFVJCXfc;evd!M`vk}ZYH(L+Rn|iahWeVX$)mhJ` zMV_Dk9B4rg8m=BT^r3f~Xhq8gX-{+Xn7%A5-a7TtQQcZM*C^2%TXV-p8jgVQ(_C~G z{8covxSj4#Riz$rts8ugaf2I>x8Ql!tH0fCIuZJD2z!&?F z1;mVLsQp?SjF39z0r%#gOYZ1+1~I^K_m1y%pnsj$-m~9#eg(Yb%n-c)F~v!+j5?NK zsA4&p|IGN7lY3<}m-*&NOnH@QcITtzv;ITX`6zxqwlY#^xHE4`g?qcxOsA4fUgUJJ z#|q9vZaQ*0zGZt^D&z-N36a(rtiaZ4tHifC8RM-Hg}c7xW3eK)d$ge6#8 zwgs4oL6>tLZ!>tvmU8%|Y?P&PO2}r4w|H$JLmT>)i7L3hYN6Oedbdv$#}$bI**ISb{3s=-ajAbzTlcMz3+?c;1o zh(@;fiME1@y+e@kr;4jse-eg}Wf+B(S6U4zD79E_4_IQ!h$CbehLC1Cj<+w)bONB4 zR8RLr7r7f*1ab_YR3j zjzsxnE0J;Q$c}oIgEBISmG}pImyKM82lp6+`QnL1_>Y2ielDkGTnSpH_f|CR;(YZ%Y$t&ZBxrg_1CqX%$Euxrc$v zm|WfA2ZR)H83!0~qd|e#f%3G1KM9n5Q+Gp2l#AGiBcTj(my}D{j^02>lkisfD3zC( ziKd4J`3PS`2xbDwgrk^qw7F4*Cx5A^n`7xxv$Sm~HJ04QiWJtCQ-M=}_J3dWYZ)1B zH3o)fQYrzhkTd{c=Q*Vm{{!DUfft# z%~E6%p$iTdYo=*P7P6U~DT5bBAkhb!qB)wRd7tq(gi5EH`^X2yARULLk65{Kp?HdA zMv4>al_kTQOBi{(8JxrkoFNloi)Lrx1_Bewh2~ac&ADzRH#wJebS9O0>{d-H>0bPZ zTQr8EwZNdvC>>e1m?^eCjhUVnv7Yx*q)>UJ9Mo{l(v7B8d}u{4nz^4++K!Jo5&6bfX`2^{p&43#BKnYP`HBg-kYLGqZmC+$ zlbl{yBPmLHxX7Y-xr@-TOM7Wtv8M^ynT8MlW1SQ@hb~D~$Vi@(kD4Q*Mgly`5?5C?M<)#qH zkZkFjgW5xA8KSAEM0u*5U3g_}3M(B3mAzVeXIff=Hk~qRTj80b#`cSe37ES=so|MW zpPG-DT4;xGuIPFr2i2)AIE@ZMpAeUspsAmuN^zxXR)=E{>X@pkTAHkSnolWg@O3`+ zR+SEQqh&f#rHHHTcbmDYrZo4P!1}9jT02nq8nLKDc1p1(RiY+}VL}CXk3dDP1W_+J zddnGm)Ov;tbw+&YMUTcF2#cd5D~FQ*Dwl`}mD)&1LOK#nDF{HDuIjq3I>~B+^{$8L zEpb(^ohc9hiFewtuly<$-+)O0+ocRTtLR~{6O*tCi+H#?dT&aQ-_fwXI<~^vp%uHf zAlbIU8IgC&C?5NN?G}e5J41EZMkt%I{3fx|nl$pJMPZ{oU-YQM$YMRJFX39L=6NGM zD-u7;O_Ga;!-cfIw}BnBlfU*h;KH$10x_$21cZA8B8CZ2*`?YAA zrUkipmlc+0J6oFtq8w|x6PtykmA0pKoFL1mTdAjGOQ?N%jS>}|rRNEo;kP9DZ`%5@ zgZnj4* zvbTUMvv=~Ak|s52n2nAWo7mgEQxz;YJH6R@o{yV%>2|P_i!bMEaG3j^Ue&(om?88U zzx!0GPiuoBfxo8vuSkkH06T1RAhR>;zpaTSuRD;5*O9Xut&Vqkc&otAl)DTZ!M%$_ z`8T3)dpr|dOkLPB6n4S9C}t~GIT{?J8(cn>wrRn5l2)f!+k3~|(}LieM+Wl3Sog;~ zXg}qZ+(;M~86*z@k{iS}D5+ ze5> zKveq-M;W!yC&Z8cOvL(ISecg2Iqfh{+|E!e%2&K*P;9nZJd2b^Qg>$5tIR|AoYYI$ z)G$NKw-{ozD^k*##tMzWHOnW(=#l7*EL+r6mI^j1j4C3jz2_7^54EF{I_6FYG4ZAMh-9k;uL|vR}Te0ZPp_d)dKvX3@9!@$3TcBnA|h+KNF|n`$U5t-VyI0ebh(d$%oj9jW0{A;{8U#j5V7- zP2RZwSzt6?MICjJ?q<(A9?KS!v2%8}Me|FTJ_4I9+VCwJdn%nr{#gFPTkOr+TJ7Yb z?#5#STn08QjVnftyMQJ<;24d3Cy8W!sQr&V-$)muKT@{{B}RnSEh;IJ-@)jR8s%AJGtKFzG2EIQ`D zJ<{n!aKJ6>7GIMNe&Gs)?1_X9d)~-F5fU6;chWxXCSMaJo=LYpmDt_#-mcvcnw10p z{Nn#uzz$~U>n=kNoAdiDTAk(85nS*6eMJRb>A5AgNKV$z>)C&ayaYXfFNwncP4KmC z;B~0c-3p|8Oc@W)>$4W&!d=5v7HSVCpU4-gFv#%@biW>d4vs7j`Wo_Pg{37=?d<># zaIW%F*{$W9;wrxC9t`d{Pwt2wS!hd&JRd_`tl6@h>2J%={TKM3o?@hK#hHa@@2sLv zgILY^@2cK_8Z3>+)q6_MMYYc5%B=NxU25if#9v?6N&6lZ-#^=M@$acY2m%iouJ&TF zl*<10+@KV5U-xw1hzX93C+_!M=J)OS#M}<^oa~P+g!qOpv9g@_JHOfNZZwYnzq{ye zQ=e7j6uk8FtogTms1F@9nCoP}($x5gQiURdC=ZRkVs}v!`$0N01>!jwD&qDw+&?2gTw zKuFsdG<;y)Hcyr%$&u@JvR18-9W+7=uRvTe@UQ? z-oCusK<-g-O}v(st^39kA?(G!zgk%Wd@sNKPSHaS1TU#&!L%M*b>6a}gZ{WGyYVH!e+DyI^1h@os^R8gWCN3?MlWZvkdp7F+}c$9f&r=E-&>Pe)WOiHPyUSY8+L`2+bOE;|g$}9K)BkL-G0DMkAxWw}A zuL%8`;?1(~8ca>Yx^i>QC#1XLi0`gDEQQEQ*euZA23@UC@DRoSEi@k*b*#1#cnfX; zN>y7lH`pXa^f!=bt4_Geuv5+qM=LwCu}=XlPQLDLlFrlED@FrHQbOx54E(GL|-U8aa#tfJV+sA!gUcM8eJsD zq7I>@rlTO+b@yF(Ltn%>KL#!y_*iP8K^t-WRzdMbmdYvtCTs|UY>MjOf`*EHcmf6 zfB{c&YtGb`jjMAy3Bofp3tU|V^#)i6wDGDw!HRF{_ozny%iy7R#qZ}oH_`UL?7DS2 z*0;RYCC+5e?rTeDtBjUNWT>t7S{toI@$0)$WT;z;zD;OD7_%vtqJ=8D$f+FTReW*A zPr{d{q`uLnUy5$x=0bt})jV?~r_9&xg0Y?t;WxaL6K#mij96keSErcajHSLa)Dy-o_`-TIa-JI-Lml-RvI+y^B}fC zsF`NHJFDlTy$`3AW=%o=*gkvMvCB5d;j~Fvdvv@Ff*Wph=cXIm{Bi4jQ6b1dEJTfS z1&&7KB3vt4q%L~wsBsgdpu6Z1xg7E99p1=AOnUbl>P{aeC&60P-mO%<;MwE^6mKU1ldFpw{ zdsTDVW4%jJk38gq4$*|zmIpavRX!YF`9|j|*a=HLdNRY+w1pNF#qU@Pb0Jx}6`{7d zZGY4HUzz-vH~a)pMN=UdA85f7V!Wtu;HpsHoMNMT;Ugavq+}&qf({Kz?j;Dk$p>{J zLQry!N};1l8klt@Hm#6hbVQwdsu#mG-A8@vwJ^8s_kTc|BhCG#J8$<5`&XPWWx-jRdMA0nI1Ud*&^laocA=Fj|r*R6ao~RJ`5TUmN_Mg&1P8%Nffp|Maf_zhtXLZC+0WjxmJaLTQ)N0+ zSXP)%x@BVyTkG1N#ulhS+vSIAcGOMfc9+oPEp2b;OJ63pRmatB8yBojRbi}rTRo#z z>z5uMQ#W^5(k?*d$<71YGk|aZo17!vA- zMWPov77Ji_U`W6No>!YK+u1uPn3bfx3xrdLSwJ(JPIQXTOTg$t@f z7y1LnWMYVKi{jEK)l$>sZ2_9}7s`O75JT8TWw%;F5O@$oD;C%dMsAtQ=a^)%!F+5WiTNAn0HK(Ru?{ltq_JzJ zwwtY_m1~<+*9R!2X>24q@8pedyWuk>j#d^Os z#FCC)r2t7lw!bg!Q{VnJX*6zH|J69H9 z>Wb6)#%|2`ieABFeYQ>R99O#m-DH-Bu~j$BA&8YM8Z~-)ZMLAMXz1(DOXVtY`Cl_vKS&`T`3Fk{d^xwg)n=LbgVTHTr7to6 z_03`bdpg*fG#6_+X{xRNW4hLAJxg;vC1R?n11hQEj@q+5FF3w;%a!J87~Z1~lqx&6 zJC%nqkU5IJh9RU^3l|NEHS==5C}WVV@U^FyJ`vHI%XzPn%RcQREb!|=k^mu$ayDb*_H#n` z+qNu;!t#0qim*8=bS3=bCP53j&l8p67(+88yQ+hwldc{@zo{OtITGTV3 z>%w+Z4$_;*|Dv+0U<|`w6ft5XDq}>zVib+2i$h{$Mymp@Zn{7{{4`LbK!u~KJ-NoD zv95{RMmg#(aE!#ZgTbBr4|05)MzoliN=HS2M6{AbO?)IPk+LatufM5B9KpN#Iyrsp z!Ez`Emw2}SQi{jPg(WFSgKWRYbHy>CJcoqHZ@at_AnTE-MK&ZUj3k zG6-NfFYZFbPPm1{tCuEA1w){)F;Goyb2~)*MoFB(eVT-(90vTM!ibm#gW572A&0`c z%6-&I|0*m(Al#sJ7*4SqOC%wOvTVq*c*W=1J+-thwoErWAv!L^!sU=li{#6@>`pN3 zNKA32@03e449o>ovA+bT>DkVcBv1G}&l01(ahtuzl+4KNE}+vIK2)-ciOy8R!qE)P zx+^}{RK&?)MD~(`YpgB@1rwz7Nm5dncgi+`sKke8ARX+W-vrL!M2E^bGj-S`n)s_z zOimKbHs^d!=v2{%oTmq?DT&M>bhD)&0?#kJ(eWg}k=)DEBc=n?%Vt!)k3>)0>XZXy zPv+1?Qi)5KT$DHLw;-Lj?jaT&6A$)4AN(9OaneSz^Qsm)yJBIu=6Jrlv$(iA&_z@} z|J0mG>vW*JbHUFfpaPA@AFWU>&N5F5sHF_5oHI8sI0I=Mdf5p zRAMLjaYz?kyDOZbSGuqa%uemJ#i-K1_ngvC1xz1BOl7RPxg3sY>ZK$y(#Z$_684X_+yG-i!Z6fC_weWpimOx+$69}Gx&-uMoq;wQ^h!A#e@VtK?K%HrOrz|9T~05 zT;p4D1cXODWI8ET3wzXjo9)mxGSxu|31jl zn4H578F4Q!|y*6d{ii8PIp6&DtEGQqVgX zu|5}Bhd=FBWb>pF9YyBsAR$E2Mhz&86CEjB$mtx6HU%sEqgTT4!aFoftKHX86;FGc zSljy7!>o-@<<2LfOoc^K8n_G7P$!4gSnLT&TaC}sz{85wCXCHb193Mbv#Aiwn6%8; z>)Nin5y!Y1O_Vi9mhG%rG1+Jx+z_PMb+lFr)w@fi$3*g3panlO3)isZAY!nbCP7+s zRacCQ)W5C2!nla)oKeUys!df=MC;o2tPI(;H-9DA;=xs-GSyS9-MoA{|Ghn1k3b`a zecgnEU3;z7n2}N?vZCj>+O)V&Eyd6J#L0yTR=>^3yj`WhHJiaLm~p(2L^6_;#T|O( zHIQrE0Fqhq(i_N4yIYvYY<&pq!`#drJjhwOZHTOV>9UL37ttMErEORCdePIB&i)*T z)y2-NvpOe@UD?%I9<|-PoM2Jyol;fI-X+pA{67hERkZb(-x4^C)l%f;$lYzj@2uXJ zw8n_+QvHmY$K{vW!Qes*Iu3;EO3zo972>ZF@%{{DjUH9x-|E~p7xn1K1hTsZL zQVHhZaAJ+Qt;KNzVVyA=zQ7DXbKc{<*c0wcjEv)3l~@O^#`}cfkd_vsKrp?% z93I3S9@_FX-?Q|eXe}=S4Ur?Z#~PGezyTL0J_bL9;-8ITN?NQz71TXC+5kq^0(!rl z%dAKSV@nO=F^0E7rqZh&6WaEzCCA3RcE)r;p-Y%H7&;xndeQW8>hDCRz$&K;Ai*6AIa^t8SyKvR*1S9 zYmLwsoml0AewUm`to=QQM2(4hS!g9ZQMzVAywf&?tY`%GKoLY6J>c3*XGfK_3^fwh83OmX=qHzQ*~@^2HPFV*bKza?RC%> zo;;A1cp*Sqh!0PwrbD3yG!KB|F!z(`4#1Kc_7oghqR_3#ZogQ zBvgVb!fC5(=x)NhzG__7>!&s9K(uCZ18lbh=FfOwI4h&=eP|Y+ZYXTgr1lHG z?(3dIty|36`wqbGHf-|#X+vJ@^A=msE|m5b8uzZx-q2%vs4dB!-USZuPF=~+`*Hq$ z42e1NbrSFg-P><=I2-QX1ZV1ZX5td9*`l24|Mu<4^?g@e$U$3!+-9NNgA!}K_@f(< zi4F~McwshX!&K=bZYPmZ1%Y^z0V{_|Q^ zxAY6%nQz8&&1(%64qJ(Gz@Ev%Q`dK^=XXtA!z%am&~qblhHU|@^_Xn!qki0{#u4%5 z^$)RGs;2m0S6!bZ_IDyg-?q&-yKsf*yHn<;auxZJmzN^82zX}bnoxPa63&6}AeVpn zmv`|Al|P7FblttuJJ6*U3#yZQ{BW|AaRd>f*+>YxrIF;kEyDx5sBBcAtcys}$+y46g;V z%Dx!syXilMjpsg9{(F;PzIs*{Yd2cc6w!9T%@oh}m_L7Lb9`>UxR?6&$2HNY@!E<98LxrX|bWf0uVq5wmIW448$ovUO+%#;G#v08Z&T2;qfC1 z0{ceVE0FP|f(SLd1nQILWlNYxkkAxTa3;>2HFfe#@j=5+7&jNnDC*-VQVR%`K9Jz_ z!qXKrZVc6NGtsH64Xs`crIn$cLYp*U{dx)0*05e>jfLoFtSwey8){``w-#PZu&D9< zg{-fqMR>N1Q6t#Wq(_MZ|G$lU*D+hjczfAxd<&JLO?m62c_ZpouVKTaNeg{U^kUPQ zHIGK-iy7=}vS&}z^%-%lU-9zk>+PE)@ZiFS6EAN3IP&7~h~>Jy7#g{D=!*S1NA52- zaMaduhem5xIPU1!k#}caI(qTy%da0_ZdWN%dgaHTPakgE%sAgZbSozc6IY;B2unds zC00!Rfn^|3RQUuH2@paxK~NJdkp+eq9uyUY9UhdFh8uFI5>o=1xDts?shHAIF1Yw& zM-sBw;t43iSP@ZaVZcB_3l?b9j3{k{V~ik$1e{4jE|sB7SSh$uPf<#_!&D(uNF@(g zh6v&aMNYV5gjI6I|I%7pjak-L5~)R2nJx|Z#eZ%32icr#W|bE$&`?8NG0*H-pBQKu zX2@ZMq8XWFiBiU2C!non+nbj7BHCZn34>Z@s%3i2Hn5$h3u&}F~K!Xo&$*H<3 ztF0!d%`dHdGV62Vxn><-+JJ+rc7ElWopr&sH>`Wby2spom?hgNv-zckV2=D5n2H8p%?X)lT zeFk2tu3CLH*2KL-opUimr_6Q4@T#jj&k&;w)2{hdDcXK|BP_AT9_#3`&DzcA697Ux zle7V;C8)mOhWqj33LZG4pO@+qd1Eh_%f&AlutGd|1uj6GSq|Gw3K;s(q2rtaBm zIrv-m|6SPCMW-FGY^5jY$SrQ;K~~-FwyeO-g+YV!n;!_|AmBAnZ~noSzhv~S3f%5u zkt@;eDkr%RQf`DO(i{qdgfEL+&`hRFVd?CJlGBw-bt4%VOIimzj?u1mJ^Z2V9_KH{ zfh%K=s~`ovvY_FS%yZ0i z6D#pCYf_4!-x|+{zUk!x9aV!L9`hKSt=8v*D&C&$1Ml9GN`&~aHN>+Q?nVFt>6c&;8X_8x+>kaf;BGK>%=QRt?>#Nd;whe9|gB;(0BzYA2y~F+&z={=M z0VQeC1$OMB9NnaTD0`q!ehjnMDq_J%$yp+jc7r5k>1ahu!xEO3raqm~|A&7fL)Pwy z#k@S&NoG4#+Sb;#HfH8-n<=h15pJpbn!!To0@AY-HxUclCKRb^!R1~RQCWT28l^j3 zmQ}aAW39zo+!-3S0Btf%3m=%1X5KN^^`GnoPDJOcocCId9(UYinqy z7I|O%4pDi&gPuwGETH3{# z-YmUbn~@C+Q<$

    J3JS%q!^jwynl7t8*;UDp9k>KYGI|2H1^s(Fv)A-Aip z0}EPdRloc#2Y}7iE5pVkY3OM?#GQWEh*xalj{vN6KZry0>V%nlX4I3||R=~#jyxcadx7izC z_rwF;-i8O)yI$`0qK#Cafv2bR`8ELa4;*|-WNiA}*j@44&s}89qaJdffp64sekZi9 zL(0R!>n-Pzb|z>;OK}osT;-1MaK{&~@t%Gf@+2?$!c5I1|CFaF#zAbk%W*6EmqW>_ z4Y&D^4!<$s4m~2k?s=A@>SVMF-D3zEqdAkEvqQb2+1%aom7V@{sr$_7X&-a?Zqsc% z!q@9)UXR%Cp|!Bj!@aAr@BF-@&uarvI=QjCLLW)@J?p!_b$^dV4?SEm4H`~-_ia-* zX={el|B0NLL;ah2dWQqo!{=h^#o<(jXn0JYW>nGZJg!77)DG)=B3=`MW7dS z#0#O==kWyT!CL5b;4l0nGg+1_P*;`lo&qTy z@ckZWz|-)7+4doyT;-S4{DL-&oz;XJcQ_wOoX5E-|KanEU-i|4s<;_?-GV_;og9r{ zVfokFdB=9h-P|P@Uu6o9MN0b>9KjhJ{KcP;&0qeB;KFs*;aJ!KE*=Mz-~i^~#1UY{ zjT``8&gALT%00-aftsc{U`jwBjZI(#azp}RAm~XE>T%!(PMGOY3@DKxa8(n95swNY znHS9-pTMB(v_a6+;L&M@-J}x^cGobpOw&2tcmW|Tw$s_|2CG<~Z=@N0b)B$uof9sh ze-Omd-N-$_^trC#7g9H*U#9O~cT zi57-U8m{T#Ir8D9otQXANFd(QAOZyo^`Dmn{{SLdfWj=|Hdf%{T_7Yzq6bnUGo7J@ zP2$a^nBe)HoG8~R^oO%a#tX`iBvxXqQIFAW7b(coeY8_6Zie_wQ7oR#)3p&T)}q;& zia%xDE{0q5*+ZJKS=E@wGg`+x+~i8`QBC5cOd1CjvRg7@N1pM8I&ec44VYkwO#szh zz7dOo^_!z$i+o7s`Z1XOdCuQ;V;eG}CcdLW(w{owVLFaw9ZDQKiX0zG9xR2NA->q^ zyq1j37zMTg$35c43FJWLC6!=fgaryhE~Gbh4x|xYzcgf$L8J>h&lVv@3?dUfos$`5 zk9N5cv~`gyzEyZ_BxVNT)5ww4rDQv>|71&!+j`vtL;<6DlqR@wApr@C5yIq7l3Pw1 zBS9TfGNPAWEE01#!#DVfAe~P*+#56&OP|OMU0LJ(tp;RCrh!11ks;bADcm@MqgPUA zb=o8UHC`W%C0c4H4c*#5vL$wcz~w=O>j>38@?%kir(Ny{%Wa@u7NlVohF?~sQ0%3B zCgcD{$iJRR88*+l6 zL8jsRX=Pwe=XFwC>lo>ge&u*_|7TnBRClsx%`5XP5!)wAOF^U~cQlBrH=w8+7u56ur4aYeQ7HOIpnlb8arqy?` zreO`#78NIdrNS59Xuq{47gD1cDngxHB9K}OW0s-oaV5EQ3n8$6hccG&%k@DNd2mHA}O%?A+Z)K&Q@u%Dl0&$7Kt=km$IjbKC?OZktmjcB=X6rw? zr)8b#Kzu9AUMaUkf#RI@=wc|wBfv@2x@ks+k$-Z5Copc&=)n%=tI^c! z&?x64WN1D)Dog5U-7PFm9xQPtRKq%9r-sux#Mi{yL&e5f)O^>c)+zf^YCru zo*!#&1Bc2gtpbfB9B=IHF7Sdb0}Gz$7AcXAuFg(y@uIGi&eR1b9=yn7zJTY7;8MKA zE_xytdNwV&!miW)Y(OqB^Q55F2JZ`lYnzsk_N-RCP{dMjs90~V(R;DopIuCIjrxdhUy?Otfm?+ z;_}bgzy=bhUE>mA(5an$LC@s=F94e=98D$QZ0@r`P=EGi$`+oFG;ka~a0E-RvD$3u zTCnJ3Fy~l^ih*2|E+Ei`XFd*Q=4mO7VcZN;$s!MJY7uQDqaM{>BMkrW?}lEq4Cx*Q z*}zS2a?;AP{f(>*MsmLLo~Gi=} z+6D06Henbqq4`>>;mWVr8uLc7aT{yHFLx~Z9c7;QFC7cZGtTH8wLljQ);WmvrG~rIeWDp0fZOO*PUpRXSuw74b+*r_CA0x_#n?nw|6Jb0PDyPa*O@Cvrerjw6F|B<~9d z<27&qVwdi-%4ka->cr=zP*Cvh|KT-9oM4>Ratv!UWXGHp#jLO<5p(8^J-@7G4{%I( z|F%o}DZpYRESEJN;B;s24`|!sNh9%R*K!|;G!P=Qa5UpH15kEkwNNj!%cMh5c0(Bl z2QzPR{8IC%aG5K;e-NlN1=y5mRpoDJjH`-iG%bX}EG1q*~ zT)#DW(=}YnbzR5xT^}?E3vHCPV=qZWUXNq(;7w&0b_s1D16=Fl z7x*VOFqq)jrqwHuuaf5p=IVDh3@|Y(r0!`EH@X^s)JLuHKN39OZq%4iv%_eX| zZudp*93SjZ7g!W>L&N?PiZeBGz?;4fGkK9HHXL>SLM&1)wN^)WaL#BOSvNdm|Moal zr2G|=lEaDGcy}=WnJT+=d7Jlnqqjh3`Fi87m8Nct#5W4hH_}q8II6 z3S(l@^{9czd5*yaWoJ-=hd3v)Fo9d`7ez0bbJ8dHMQU%jSBG}dMUf734?bO}P8$bp zr}%9PXLmd^{=WFR(btUswrJ)p3m|aC7Y`3X--UgtloAx@a365PO%W*KH9x|2k1)v91hb zG}4_j!c}j__+4p{PxCE2G&N7(IC$UhoMom%Nn;u(?5W%Pkf(V45^&}&#zp2Z;b5h# zhc_$NPa5I68|r$m*FYnOys!5&utPFjT1k7OdFS;tvr}-g55!>OPGaZx%OzFrLflbI zyYa-xRl4wi$GMrN@~+J3x9NOaX32N?=ZJG6QWMm5QCA9nYwp)2|gNoAT zP|~}KLwCe_*)yMSy#aM?r)Kks=-CIT*#Pq!`-xdOf^EtXir0H>|2w4T{n~L2Xm`!T zSNzT?@B(L(#_tw}qq`^^TIhnj<9A8P`}417xycVS2U968sS>mr|9!$tD{YmF#bB$= z6NZ^n$e4B>1}YBEhsc`0n$TnPHJS3yH}qF>H{M0KZUsHl58KgsPQf|-C3LS^QT@)$ zpcxS#)~}B**KOA)bH4NabCa)P4x#%>5+E(C-tYaMOuc8z%AlJex_5s~<2d<$y7`y? zYiIVV$1KJpq#KB>aCJD@4)Wvky5!q`C}#0G&6Bt(Fq0AT}z3r`T(_t4%z z1PM_%$zpNIMIRRf_3@Ypb29QkZinQ zjQIlYTO=wMdHQ76tf#Jm_~6;<_wUiLvRD;1YzuKM#U0x;j-rJpB1^5x3hU)-+OuD` zF6$ch*>bHyLocV5W%-!e#+!VVdc~PDGikGrW$QMLoj%@dYV z^>R(9K#`E;#atlTNwjobz=$_P{Cy z8S~=#4x-_D4HlkwvJCUhpcWW;N{M(oC`*I8G-G8hxzqtFIUCI>W|~SabEYc<#%aSUrHu1ulC!Y$ zPA5oWsREdkW~fnt|D90huLkA!FG>+rO;Hr(;^9$MBIPP-zVLloQ%*NcY%vp8vo5gE zN<{G5&U!i4Rn%4;##YesuFV_nbnOjZUWsKF9p&EjZ5QjFafk6`bqzeZccPv4@4su^ zHtyLvGp^i7w?S7n0)HhuxzbH{7dh#a!7EmKry5&rfT`w?_E7zds$i89*4^QV7w-Lc z-)q#k;*E(%`RI&~C&45tgT8qMn>mJ_|H+e)hw_HDT4tGhm%+>(2A;uBiTS0NPLt9! zIk4#mpM#S5Xf}vG%8U3lFrRupsdznKtJCTqfTNij)?&9iDb%Gwk8<4s`w}j|K&@<@ zI$L7&5W81h$55u4TLq=^nz+f$Gm8nK-t@LN6P~AX4?!JtSZKKAl!rMjq|P^T;UHyf z$8o$74!KGvIeAbng}On9f#9>PY;bEA+Yns>J7fuR6(}>KJ00mn8D$u}?7F zU6Ke%$l>J=dKF`+(0-Rt$~N?&ZVVF1qU5n9c?_i_z29j0lsrR5B%%4_=tltwQh|yV zCN!Ir&O~Yil{$8yFmkD4lQ*XFjZ$Q>)z?o~`LCOXq)$4>=Vy^47ZRytxTKBQAXCRqOG0jQf4OGrkUlp!Od;>My~3$jjh-lP4~W+zVg+xc8W7!a{O=}fvy%re%RK@{nuIu;%eqP=UgKy175Ij zVxJc*Ru)2oG7t8mq#pP*3KzC9NK3358Fu7_YBb3WJMm>7`@A_x_Or3}V~Q~b$+-%(Xl(9h^U7ZZ>D$s}rADS>)k%8Y3#S&=_=Q8wV^G(ad+igbuO$}9{J|R6 zOxt50drNB{+1l44esPm{!t%*(0N5!LwvElmab%bF*vnqBkTsdiosOCbhJ91DZ%b`x z8(A&`TU$s5m60zq^`E;_V9qk!$&HU2?AI0hz7BJBct2gh0X`MI>s`Xz+&kcF?Q7|{ zfewQE|N9;Q2W#B@u(u~$iCnVw^112@P*Gdj_T{I4L_ zzUeG|`YTUu6Kx-tkwr++Dsn9^)$Sq&lu5E+l#ckVSTm z*Klw1=E&rp?MPb8jxlFM2z)7PXS+V`D|yl$j9jp%c4?{<-Cpf z|13;Nlx*adjw7J%qH;&4Is#96it2VM-J~zVdJrVCO$c}JLaJ}O+|A6&444#UR^W~8 zy3hM=<(Yzr9;Tz;*3bRALmq+!AMy-`{Otgt|>e(*9I~4;9~SF5CWa8AViO85)qMduJv3` z1bt5SlrFY}F0_VDqZlO!=f}2cYsp}6154`ah+-oyP-;$LOL9;LTQLVS41uKXb}Z-= zcMtiD5be_D42(hvc@eyX%Bk#T(YmS~*iQ@XLBGO}8OtNA?5>*bp&bVAKfYn$|JY?2 zb%hMm#SFC%4HfKjCN7)Q@c!U1;_}1)(h*W9P*A=m(QZdH2^fU_tM@)H&tt?;eED3TEpTg$!F?w|EEj{o`f^Fv#@)F4e0voa`|3lHV>hTkK z4)>4?jo(u%kt!mR73fcj1K)Gl57t!PjWD>ifsy0-OkG;k#GsI&pNcK zChg6uc=9`(F$OczI+*cq0Bu)<#9ga~%tA#|ju!Yh= zQ#X}70L{Pr?i`r1H!Km~Io zHBr>w^lID!F!4gyAGBZ;v9-%pnuTYPo7o;Iugwa6_6%(W;yaeb_Z6OFR z6g85vQL{lE#K|WEuFxvgQh}0FANFBAmETxYRF#sdWVFD>5&m|_91o{eGDRFC7l;a~%-YIT)7E(kfFYRc|-7ZizBC|Jw`UtV%IZR1}iPob&@YLZ?`8 z)MGz(N4YIIL31oY#Xn- z95lipbQN1N7@@FWrNJBG_RvPuJnR-X=V7cMc2gy`dyx@v24qyp=@*_$9HVk7KPP@~ zlvOm;gpOD zo5*&5ci7^r%WWBTHV!uK!YNX}7kqsvZ}(P-=hkn}S2qK20QV9=M!}^mm(x%fg!7bKbL`57fT)3j+rcI>-2(C zc3P+P+4wY8F>yRCZ?_1BEb$XulUIb>#3Oz;$0j$VNDz62lwVTPY-$(}Axv>{0bh6Y zHO8)a6XS;qR*1cqZvEznaWjd@SI~-5M0ayk2iHHS82$`zi*uQa!x2`~HCDseLcaEY zZIwMQ*I-uG5ZCx0_fgjrxLDmXW@(JY|9aME^SA>IQh)pyfWP&R37C?%%~A;YKjrj6 zJ2H`j4){ikoEw>p1NI{x#v#>nE-LwH!)=BUL;F}5W(E~*1hsYexrYOm7~R%{PPt(z zb%<3Ne6{d7$TwosZzrEvecgjVs48RG&i;KADGiNuW#lGYUF<`7AjK?ry=4HNOKkkHeui zb*;6LHv?6Nv7?p^cdl7-x^}o`Zd=Iilf)A z(*(UWfH!o4Ir+vC1Gn1yMB9xDhO;(LYq83dh=~z>sc38W$?X)WLkOxmX zUd2>Tx%rQXO(u8ydIR;7|JGI)fRr@~x1ST$w0%+DUNrDJTDs#FeRWcN$9IV{L!oc{ zZ?-YOCfcI08{s^NyYtPvzq?eQ2$#qWy(4$M+51-I5%w@|rF9iaQ@f2rrm`bYzfHG} z`)G9m9KSiNf(=4@wD~?4*(q%BacSVojryo}Vgfg*!mT!VxwXA3bE!2<s$<2?Hr(!&G}^I46M(O`YHg7s~HNyyvxoAP1eUfE|_JT-8da~kG;9oP>EOD2)npB5#8T2TIA1*kdAUp#B;NVO5U zpjI9i7YVj!zJw)wh5rg%xbl}VIg{JWNBQ;8W#xZn%h3rH-D8}fZDnu*Bpg;$-qqvq z9Gb^<6B_>w9E3bovdT3?-JMW$@F-*83;xuX+`$0*$(cyu!H&KAx0n+WKd<~cC3}u- zJ&!UT%)J~-|HqunF^*;w#?n^**u%(XxAlzq#BI`To}L67UH^<~h6V z?R>&L+z*m{1)DnD<9T;2$!Vh<2r8xKC4ZQ6n~R1YD+<)EGM~4p4+$Tm=znbV0peG# zRHaZE?BqyOtzHZp4in}O;=_r@m_eiVEu*)M+lo~T$4*|od-(F1)Q2+VzLP3hmYl~i zoy?gu|7BWyw=$>Bc-yr3e1m)z6~7q>JhqqzldCXfo?xjzj?QFUu=C1wllkq!_m@kiB z2oBK`+Rs2jp7EWviXcZ{9y-f+x|;g?cdS-`B$SW>f$0edL}Dz+kXU#Ag#uxJei`z{ z|9aU4f?%EQ%I1LeY$lu^Bsl#Nqz^(Hg4d}5WJmSpu+n}Sj)7FieVVisC<@gay?p8UAz zqEQeQBZmwM_SasOdhyu}9WwZ7Y@&|Jo@}30P+D-$nX0OAr259deYDLAU97g&wi~Uh zH8%pQ$nlD+tFPk6T(Q^fR$WfVwl<-6&W4Afc=4TQVS2Y}re1t+t<{3D`L)N~|7QE~ z_+TLb!uLgjArx5ch=};nn5P43ideaGMf=AbB35c-gdYBTDTo3G!Dxv?KDr`_EWT)B zW2Za}al|xIbYs3eG6ZR01(};hj1m(WAf2~RC32lko=lNNh4v&AIql42kCt9`IkQP% zmQz#CW-^nenkj)9)|*Kh6=hK+FQwH{1eJy4oWB?vXw+T>)wH3%U=pdvV^Fm%e_qoS zZ-{+?x@o49;_F$bpsqddZ>yGy+Nr7L3fXbKwyNsgmk}$huEF-bE3k3feQmCVkA}Fb z;r{(VvMVj?TC?CyKDpTYwu_!{pK41X4SJq!57?DN#s~ABeAAKa0C{Ls^p+UI} zv&=Przhy~bmP!7ZJ^PFjJ74XIB=I_m%;YOmR>gGvPD3sA%E&BIN5`~SjkSygFJbq} zT0&}9n6c5TYzf>n@+F%CPB;GM{sZR76y-H9mrki zSuEsEvo3ca&uQ*(eC>qS6VTg;VTLVQ`z$wITg@uVw4bDxsnSG}h3$$p|}TAuO;NvMdyMX7L#n*x}{0ix(c zkxHWNh{!qxF7P`$)Q>4kSwho%05fWXVBSKgIN+$#RO>lh1wp7nT=qt8BednOa@aw~ z*&~-NEM_Tb2(W6+&~m#|9k@uNL&^D2LY0~1z=GH=6p1pF+i~57l-NJd@C#&|LZ-CN zCNL?sh)-3dVl(5y#Tw0scwa=GF2LxqAnh$ zx8S5uH_DM+oa3_(9hyc9N(|S&mXVIcib<7$Fje3WDrI?^C4ur%hGmp}q(~IV>~d4L zVrr)>{Hb%ZHCA@nt(A4fYO1CRxLuM5nzGU$Fh2-h|LvxxyOJ4dDx>O33kY*rD>Npw zyz{_hW-fyV(&kyW`!=8MwXN0M!V}|40~**Dl&1sFaRq}(@ibAd3M?)>4Yp3gCJbHb zB&hIk#8`vmvv`nN$Yqxou_)j(F%B-oAu(}cvn&=$((p=^UJT=Iq~nfSLha1(C{a6x zB*)<3ifQc_+LOS<5%cTGO^EZRBr(3s{0W>PKS#0-63Qg-Q_Xj|8&26Ww3T0cG%yMNWvBqabfd0Sv%!O zp!0F8aa}z|GV-v?vOsW&lcX_|IXICm&PI%XE#vNGsm40y@oG<+q;jySC#JOtAlanQ zmwi@DL966crZ>Id2Byg_RV18B+LYZg?N9oHGEiKa@2o;v0JW-zcoL=M109DIb7r}MO)jC5JmWzXP;-GrM%lz3ngC?}0%Q`Girw*q8 zpBGqtSWc!+ia@9`czP-gR0C&P3xegeP0PAqBJ#yuzLWW;XTG~I=Gtf*e;0=#TPGpO zlMR)vCmDUgJW_Nt=q2m5ug{dd7jr}_|3HozJY2R~cc5eI=zz*=mHm`vgX1R91_{?; z=CW#^8`~YG6Bgqn(xpX(+kGmx*B>EA0Q&Z~^-R%BOEkp*H9*S0SHCWu@g7Wy5Ohv^ zD_)%{4JyAQJqLJ>JmA9-Ij%b1RJGPp^Jz`E!r6Ogy6G8jcE07rv19S~7CMHk$Dx`v z{vwW}SHF)w18DGT-U6ds_jf(8+oid@c=z6bDpIMo5AViQHz`VOOc>KQ{JNnRz1CTj z_0Ee%u{S0=++e}e)3_(>tm~M_N&`Eyn{B?E^-&!nS-;keCeVVUcoA^#lSW?)c&V)H zDYwA8-K~6OfmeM`; zI7KiAFbya#p9EA7=ZKD1DoB+<9XNs!hi9F)LA~^5p=XJeh-egedIfh%Cx|v;wSq0^ zS7xKTTv|$G2$8r+rECge1duFLh74_&h5I zF}H&XW3zN(gkf^BhhCTso25{9#D+vxhUb=cXX0aj6cg&VhF5n{8zBv@CVxlvMRv$# z05}lJ;0(^NcjabnyY*xN2vdINgudt?^Ff2mjNhik@2fP^S? ziAQ(>$xO3IkOmope~FMDL@Ev$OoVBetU+B58JH6oIRQ|D7TH2~nSuieUGM=auqbj; z@R0=8Xcvh_d?rICw=cUlZ_4PBKp1>gs0YIcjKmmO!qu9=SeLU|JWH2l(nljpwj-%v zluk01nx#=Kb7fVM5l=a6@sN#Xw~gClm2ZfCUdff>=nY0Dmc`|E#h8c0^=+acm)kjy zcgL1d)@}2XlKEI_%GH1!b8-L0m+R@52l-U-8D8`0kVRmh_lckRsh|7FpZ!?`Wk3l5 zDxhV+pS=R0;B}x0s{fz^N*o7@pDs|5i0PSkA$04xku*dqnyH~8S1|s_Hs7*VGsu`i zv}xCbX}*VUFzIEgxsOX2KmF8;lhun@IBI|}YSkG=*f~wImSk;kl>GD%)e~;QsXgDf zV^O)4@6ZloxSYsPM`8Dz(x#28MOww_6GHQ%cqmWV>21R$5PYJ4EcJ)FHI2X}rgMUa z@X}8F6GFISPMG7SZ~B1mNqG)nrv!kfd71!ss;3=rr+nI{2k@tZ>Hyau4u^`UiK?iJ z%BYR%sE-P%k!lW;N~w|B0V*+GkQtdM=$>#2V4O1~=@OD6IiW9CkT-~uIA>^|W|}`Z zKp_@38WSV4`Tv`=ib!*qrN_l2^EY7z;hX4)j$acOJz5F?`J>vmg}f!6f+7&zC#A79 zj@>8}PTHI*BX(6PrS5=@+%}y_I5CokVl_fi_eU~c3Z~6^rhG@FErkeYs*WC}rsatt zQdOB4XQFaCrw*sDj7g~dil>5l4T4Gu*N_bTIt$}K33ocM30tU=>aY%54dWoO6HBob zYq1xLu^BtD5lgW}fT@5cp`^&EXSSEo#YzFDk%#6sqbQ=M8dtF=swwAk_tKKS!;CW< ztG6kZ*v7MAS%f;OlRfL2iA1BDP@A>(YCoxAJ{l1=f~&k{rb<^4P`6P*BNbW4Qd7Bp zNI@3ON&hEk=#5vItzAhEeo{%{FqXNBls@S`O2eA#Mt4wiHUBe>`{S-K`eA2L2Gj(a z1EYW|h-UbzvimxsB)6vnfVhdPxQkn`0MG$N(6|5~xdT86MQ{L>I{=V-xe42#9^kp3 z3%a3epbGjyHfj~)%9CDdx~sdEs_VM#xVol0H0I#3$^sgJR+=KKcqGdMyeqi%I=Du} zM6S4@amrL6xvImLL>@Z4u*#x0awqQhj!C*&`=box=dJPNv*9{4yv9)x^1ZovM)AkA zIQmA*XMl3DF=0DB)bM_^0DjN<43fDwah9QE=C`_H1{A=*q8qyR*|`Avy??-EMs^fZ z!T)`^&@p8?o=X{|ux3Uni5Ro@w>ev~3P`6$b+7Pwxxp|CvyizT91MQ?pR-T~2N1a( z5V@0E4T4&zD0~2pTdmZC+(uR1KV^`OF-egbhW*AxFhG)`r**2^1 z3L!9hj9g%r*15As8;2foMa*Xqb33d7tICTzxvlKDgpdFL0RO@S z;L5Ii!if90mTSub>ZcA6puOA-$=JQyhf&?RtQa*SP05AyF^#9FMx zNIb=Mum^+S&;QH_0Nu|5JJ#=}G$CaAkE1fM6F&7P`)oVQPI z49%HY!GSx+%1c>x{Hi4ukjSIV_?WIA#Rqp=ggLolVzj{Az;)M#${?`~yr~HFXN5>s z$(CRhwF<0kyS;k6&7MrR(@A#oaLVcM!nthD>0HZ-i_WY}%ag0iDh#<70rB)zx9JDEuQ42)+U35FNwdXI?XUg(jX08(%e^iU0&5i0pTpU zDx3@io4Hwi&Qd)N>kPy0oVgVk&nr^T_8i6g4A7Z?*_;3hn>`Aq;MuDH+Mn$TqaE6k zPzk4P+NrJDs|^XQ-3YH8(6LR>4*jvqM8+d4nsBAtlZL90*dd!(sunzD9;u#rUA)2V zf~kpSLieJ!dah3>Q-fGoM2Nm~!GO9ICr<_v3j7D&fQ{d93`rQilSG!7W!7p-$sJ}z zg>;>c1g%@A5nfjWlUfbG91N2x!lCfE$xz>j3(Esg)hJvIx;)^lkpBR>Ji-9m0kvGt zU%j~pP}akozG@j;i?lssL4U}`)^OdOj{n##J&i0z~euD zIJrAzkKYd}$)Ryl1}V zCS8;to?yJ&YDjV1dYx_pU){?^NySRsIt}8dTa4|h4e)IUhhpBzc*w!J7Ge5!)RUc5 zgX>ebc9qVs?VQ5O&g==^=^mU6AnfUm3)TzX;tt@-jZL|Q&Fm3=tCs9!ayjc9-mYqi zHM42lypF;8?zg`FySYuc5tvnweYjlR%Q7$HU2UIMt^dndP4jt{1Wpd_;NIj=j_#qo z=Neo-m(~t?;S_D0V3uHE+AKjqiY+G-E};g9aJ&Cj*)_E#SFF1t2c{^h={{z?!3 z7LDA%o5t26yd^zQ4Y;Co9xni)2-6uLLW~$Zg8%RhqExLsQDG7>;gg9HFI`NOQR607 zwRGL`rHi8;i3byvLKOv}j)J5dO$ox%Nhg$rMix?Oat=gHqISKK)$7r1g&pvSt>vYUQF;|ep z9L)vIPDFAJ>byfbXBaK7TiiwsQ}=G(H%R>E5FB^GAH#)z2mxG!@(UY4G@oDrx(5e- z0bIwP9XlcI-Mwp{USdlXP~{s9vXdUpn*a5yV&Q_1p3{8#ne5-MkAIYVQ~dqsH!r~b z1{4s$N)CjC5&6JS!j`!7*(X8>GFYI(1{lb|2Qzd_p@bDa1hK;xyl7(y8AODEg&9;d zZbcVg#34o+W|Xl;8$FC}#4L8)aig0w!Kt^BhNP&-*?cRGL^kyJF_a)hb3(U3Fgm5m z5>=DTHQH7ih`8Kz@}!|7E2@YlN2olr%Al-jsy9?fA(F|LzN{%tMF?8)PL|AMQkbEL zG78XMlydAbuC{^(9Ywz>2OO{(+NBt%p7CbUX+R^ZgISVED=1uG+g0YFu2 zQ$pI>#4p2&;8ZQNutSxA0VqkP*8gf6o5T%OAlpI^$|Aiiv(7m41XDNCNG-J#)hJEN zMYQy?4%x;`Fqy!~&P+uNhDm0(iVcQ*Uz?ny$c(nO7Qew%m zbgW`bwUzJh{stU7!vOEQaR2Le{is)mdKGr@yNSUJw9b+>cG_sMlmQJ@zTiAI%W1^& zT0Bb{Zs#a%BhFnNP2^c+l95{uUUk`3{axGRtsP!`%UwTB+=QMf<~>aDWpzLy5AJ^M56tT%}-=2Mb*sK zD6wQ?7}StdD~f>*ZRD+3mZ8+Kz#_To1V8`=@LS+ysGY&h&^zN8U~7IAnaMaVg_2Ss zvXr$f|1Ap!Y1kb89{<5Z_YnjS&eqp7ypEJJ_MCMBfXa_*zE3 zm(`1P=lj{5*yptNg)0wBiXYPGXQwtP5rGoK+7wUoKTFz1P6V`{pgw7m2L@1*EiuYP z6iC0~bj^ZnB8macc8qeogM-^x2i`WLj&>9?9miywSS)o85;{s9Vd+dYr11^VNYfjK z(#$Ukbq2l_uA7F_6kY~{n7)~%2(GfrUvPMffxH10z{10@HUq-QX#*^e8sg>NFp*a1 zlaz829sj5Zw*N}L&P(FD9U&FEMTXAK1&Bn~7$ure+xbh4?U7i15Oy9q=CO`=1ZhS^ z8qz)Xkz*DLr1vB$P;DvFr7vA1A|Z)Bf=XeOHZ=`Sb!xhyMa^hW1RMO?`BRO65>5bI zqO(eA5eT9(PzS7HM7)MS+PrC#3gqOLB3MB|g@m2B%q1_skqU5nlW%+*K>}b{02i|L zg>P+Z3kl#t2Vi4`%3_Koxalghf-@PZ+RI-Lcb3K6Di~-XOc4+#n#EB>DxW#lFXm}a zdqNgj^IITJMWH{=s$_qdjUwh;R?~t;G)bpDS-vE&TGo280t~H}L)&Fq+M02q6!qOj zj|WHI+W#?*9KEAQKRQy97B`~`iUYw+`bU(0Opp+LZX#jINJe_`sx-~%PPLoe?aIW7 zr;KE0Il0wU+T^HL>)b;qDWv)}C5Hv{YEeXM-U%9Y6W0|gC`D#Vs>K(qVO0hm`ufej zT?a1)whj#wd|&{m6#*?&tKmH5Bm#I~Q>ZdaFbJC}S-qvf*N_!=K>T69^tHs^)Pdaa zW|_CW&9cf6Qz-LyhR?E&yi$B*>)uBQ9{1SCWsp~p<@IAA19>kVK=P53jN}W6@rz-s zZIp#f(bl@wqV4G|e6&2=juMy4#szbj0VX_IJjjoL%$^lb$KD)IH>O#m?$TJY#T=K2 zzW*3uV0S~iXYP{ey8^kH{5tpN@J=belPt|7K4HWo5&4n=bgF;Ji`Apa;=U`9<)ORe zg`wW^zy5uOFHzxuwzP$=rlzY64cwPhvE{@Em_;?{Vyj@pMJ?4pbpqOw7F>z*uN@#v zV1i9z!@A|xMnjk*@yQ1y(FStXiP{R%VNXF6j&>utS`k4#?W-;;wbB`-bbLgKfYa~PY!cTtmpI_>4SD(~?fmLF?J(p5nSi@_XeE7{ViHt?u3n*FyZ3sMjpa*Xd1$e-Re2|BJ@`AIn4shc( zTMI0?av^i#F;Y7_P|=DonWd0>D0&e&k25kQ(>x`ky3K2?&Vw#`i?`9jDAUt7--12m z>9+;31Ap@}*BiJh=z%aS7=!D8>M6Xwsln^}zivYd>}x~26Tdi=!#S+HI<&((#KSz) z!#$is>+?e;V+*T`xf@ig39JhugS+x;s`|Szp{q2Z^Dh4CB>AI?cmF!S{|b{y;f7yW z2f^FF7s8bbqd>S|Fjx7t20TDfDaGIe3a5ad4>LOiHAFTBDl6rP1#$THIyKZ+r?yE<0Ou!-D1w^+a;G>eVo$d2^LjWh!wxxY({ijpMB zA51#0Qc1h98$x+6AQ?dKgE|I`6+)oKQ^A!aJVa893%dG05dZ9(5$m5UaK2noM5&>S z^jp8#;FB`Js;-d?^P@#aKy~W;DLzC7SXepc62wHal+4&w?eud9;gEKpu*mQn1N&v zf^?%blFfiCF7*JIEF`$ybaC4sK;Q+2YQoP0>Xqymb{vZOne2wWXZp4F|G)Nru!5D)G%dR z6;eE~ZZykP3%9yFr>?U>=KQ{{^QIH6j=C#FiR`++$;SFL#2$MGdZ354sRut&hInAc zMSDiU^1#UB!3dkr$b6y7tj6w30CDurCFM>vGRK&L&T|n>*ecB~S|}<59VM_g-`dBG zV!eWs&Dmt5)zlB&guU$iO)Wgg-UL%IT&|`gr^5I(5^c=o3>6ULy1&957_u!Mke})t zQ|)BZ%T&`sHPh4!PlN%6MP<}R<%RMrPiH6wbN?IzSDdT4f)&X-Ctln|!01W3inV47 zw)o_%X-P!)LZ$vZ$JGFe^2(r62&$}b30dV57^DKqku1q_2oxF$vG6L*@QOw`21hVF znBzZO!27PJQ& zMMiwcQ9l9}6^+)eBn)kY%;$uw#{^2t%*=enO+noP&(t01n9_!VN7pf^q5OiWz@Hr; zf-QB;``B0c=#P92(~14ZL$z3nWk}Mq*n^DC-)uvN?jb)`NYNqOHL3=*}sU= z3SkjLu+BkcQ`<})E@hs4bJT{_C@v*WXa6XG?+Z#bj8DM$zxgCPI0ZYlB!E?YqEnsK zr!7!b4Xrm33CyvHrfHUuY}EzL3dsqg6G8?=5zIumRJbx*wlc6dT~T6_l~=ny>Ql0{ zkiO>|*=;i`KWtkh^cbBifZRK>89>3nbBDl%5Mn@w9TkK&piojFtgnn$3b00YT$hB) z0~ZK@?2DDQbG!g>SlxVBiFMMH0nO$J*e$A3=+Z773fuVs1cvoe+ zMm|KGVK7z+*0okFTU5Sa3#P2B^x&nVg&VC0W=aRhwBQpx;a^7JVSb@HV&l%$%zmX| zGP2>@z+o&(Br-1Je~E+Flv#a6F5F$>A--MUT;e7cV%}}yjGbaLtc$5DPFQJM~Gtrmq#? znZ_mut!ZAhX-n2=ApbI;$b-CQ0P3KY24>LYod#e5{vny}j2S(_4yI9gPzR-5D`1AJ zP7vmVM&Oz~=QEf0ka@4OAoU-HP2^?$qYF z?oQo=;&Co#h)ggRRm-bWXuyUA+PzLAM$^N-Ys5Zc)&yjM-pv;#=-&FBt4?6SVc}RF zW*ls*A3mOm4myjjU;RB$z2oT9Mh3hQ*09~_2QBHVjNe~CY7bf`Y%)npM9*H3yl#-` zoW5zC=GCPB3cW#3-=1k*LT&>ZYMy59-!5w625xA2o4BFTP9z7)8VBB9l&vj>HFbvZ zEa)~i=B*Cz^#ABgD;?`+Rvnze5kOUILFMMygl}q&>u;X#ZuaK-p6fJ)Ya8(I7~pH+ z{A&gCPzD}swsWDDZfwTRZ*U&N#SYX=sKSFRZ}2Sd#%O7Kd(uApVNcd@O}K_yVB!&1 zn1}}L)22!kr&S#SG!&oiXhXV{yyRigUy`Kh|D6V(*74}`>96o;MiFk+uI0%R@~>r- zU>fouI?w3N?T|2PO1=!)zHyNj%&fF0c<^AT4pwvMhPDZZR^5d^#y3bf=ItHt7q(gT zfHTw-4jfkRilWY#We|OK>-%m~2&ZfOw(lt3?=U3Pac<(i)`xjGtgBWnb&SUvkg^Pp z4l$@s6aTR9#MbXgr*wirJ>Ht^6I39GtFLTfIOQS z$zM%Msw@;@81evi2$W9AlD=rIaP2DZARm_oRjxz3QtfAoEEcmTCQs_%UiM~Zc4ZeL zDIaM7c5-QN?OvyKB4=t}Y7{Q_RTRo}oi%ebC-*c@BSxxHh^RHQ@D`Xk863D@Ifw5& zkMDlQbH%RrJ^$}M$7}WwLjd3ReHZk3fJY2&GLTymHRBD{G6;ER@Q1H-e1C9#&&~aK z)cK(EIycl`@N_c{Vj=*l*iZ!uCy-M=hM9$!PA9i&2PUt8@@zM0TrX~v%qygG3>)ut z|Np&d2z91=;_aUY`k)tjoxgEwH+tiC?xerzqi6b=mU&yp_8HHnO7Ze;7{AL=2KNYe z?G^7c_xi1lgg@Y661jRh(I#=@F@8c=(?(c_mv8#!XN_mlNSFA1zUzSJcRt5^fFGWL zFZkwKGlgff)XJ>}hxmx6c*C#zep^(2vv;|_%w6bsl%EffhZcrN^#U>ZR3A@gsD#eX zjGzAYqo!$0ymelW`J~Ha=9clI_w{C1YS@Qvq;GoHS9+vR`r3bOmb9Ci=6#m0X^)1y zsq@n$m>8}ecdaIGa;F38!~@8B1(|z=kjD(Kum*BF6HTmg%W-?)fonUT`}^kiy8mbF zYrgxv&wD@*a4H`3zeg#AXQ{wvk?tpd#b?d_o_NI`|Jj6CfEWdg7cNkP;=ox$2FyZ* z4HG(y2oWL0UDd|zItWSQMn;@m5sXCS(3?kqXwiwJa-~XG*TzYE<}4gLcjvy{Bo~gE zw2Rh?@k9ocn9yUtjt)Iq$`huht!~kZij*jQGAD|c>OvT*O(#hZ6-Sg5}8s@1#eZ&0v7lbNIBxzOUQS*~ZbpxUT`%e~E-Ic#ll1OIO&ga;8Q zRgNRKVw@twiOe5DkFHV4a^+Z>WABpu`tv>Bzf-R+eu$0oJAQgd18Yk%<;z#8P7`p=HcvV0og8FMiP&luz5> z10zjOVM4L>y;e-}%zUlauj{k??paO;vl2~G< zn~FH1rkvXGU8n7h2cM~=o@(BDwaizaKKM~W0e=n98lV}sDyW67XXKiah8JcS?5CGD zq#m&)0@@jf1~~}quO>d%j5$6@ORcrhV0+Pz+j86OwBCXXq+mtLRbx@2aP<|sQcek_ zyQyGRC78CcsinR5?)9a<`SLqvmuAMx%x#QvbJm)$MY>j*#av_yGNkP(=!XNjXj+AR zb{eY28*|*Ts2+iwteQG@(_6t<@>Uzk2;YWBgTfj+?6V$Y3aWOQW?VC>sqW14s;$}w ztA4X?V5@)$8u;WSc&PGnvoIeU^~^ip3?XP&FPpQoPeWT#xc^^+{jJzzlRfq@d(}v8 z+DfUN6}xYv;+4EZqIs{~T>87O-h0#CZ{Gk*wadLDpM0>5<~S^Y`9OvcVIwr(-|#Vd)l$NEF^Mg+RNH?OJ%WGsI98@QOqcpT|5LQmv6t3l$wh zleH2$5{TVt;2f6~nI$HXjYwi*`O;^SDOM4eR-~dXuP944YH>^Oi{BRoN6cdWZH#JM z<1f$X%w;YqUOrhI9ZQp^I(agWT&p1|i?Oss1pgu!;z-65XUVW>H4!X!0wF3%wM}Gk z%$=gVVA}NoTV=)$UXxi5Sypd&;#!{sz;7)b)_=aJ+W%fd*+jp zvFaxTa94w~)@lMFa2m7_noy!{Em@_U7{uTz)T(NPadh1)MhD^ywkcIB9v$quMtV}i z9`;>c51`d@+3Fm59uky4 z93?fYc}fwgR+Zn8SV1nz*S;F#uSW^2ULtI}4m$;|U}l@i4|7U1E;fs?=Sy4F!d5(MPEA_D{dluvA4S z)JRVBtzPYFacL*kp)nxE82|x`C6HFV4S&g4@RGiocN;-9KZp3*0TWm?B_qbfB>@!0HObE=tCnq(SF82p&N~8 zK|8w8bQtc4i5ole$ildlRPl<77J&-j01#=OagAjh%2G=x%yG16cRxlNA;TJ??+lrD zgBg}`N>)ic6+CML@0Op%F31t{r-jJWFyj7{`1spimg5%SFENE3h(vuJlR+I-;%qp^r%ok^X*}Awx-zfdRiI|1 zlf2cSe27D~=3J2>pX@o&ZL&@Bl7UU^n-81I#fCP|qwVu$>yKa0?zw)8K5S}>mL+2z z%9zRRZ3-owPYO+mwgv1Ab8F()h&ezJ1it5Y` zTL0MA3S9M1E1^9I&ph-sxA`Y4F(Mgoe!e0e{=1GYf8^feLb_Bc{S@Rm)Z|g#f2CXms@&$yRp&{|xs9IakzSLT-t3_s>%|@j0^REwo9fw=uz6sx z)n3v~n!zNB)A?S_dmKs7i(5zIo`vEBOp zTf4Cz*O8qNdR?qIKr`SO^##D#4O{?NU)T|vyyYFK;ots|;luTx#0g$n2^17~SmL=A zTp?frR^Z}m5NRjyUub^%!&W!?nt;o~Xb)kMMtYTyQHU`7GSs0 zm^J?1$2lJ4K}0EmjK^@Ebzz=vX=5PP5g{646mekbA)+Ei;xp}>{rp^IF=8Vk8ww^O zV)OzvT298rpy>qPRDGB-fWuJe#725K>n|}qB)C~()jiLTMMk#BQ{zit(haM ziH2|K(KhxW<_RKcIi5G7<2R+_IE4ILv+q6(%+2T!8T{K1(&Z-H-7S{E z^9g1a5E>Geq+4<%I96LxvJ-GQ989ho8giNmyw1~oTO5L=9u_4-ZQciw<531;1a_t} zmS$89;_+o5QXL{wI^|Qc9#u*iWX+Vbz2lVGqq?A8Db!weFs47s%z;^B-2g>PREv-p z)BjrZU zM`pfZdv2zyF%);j+T&oyN8q7oj%GNTCIlK~XP%`}vSw>G<$%6s>OJD%6b!-S$bym# zgEnYv%mY10XoP-);XGEF911)rm^^T(gFa}24orxOXgL^)PKf9*by13{XtR|IA2kd= z+L2f;V{m?0Ds9v_*w$8Tof7U_kILV5LZ^>nq>$>Hc;ZoG8l~2dAuCKKG=|?l(kNd2 z*OXG}ex7D(XdbT7kmccLe9mEqMV*yK-G2%wK2BxO0S1WD#6hlTo8rWTPUty!QU8@G zLp$69KHTY@u0ved<1eTKhr%g5;Kk61qJ|pkI~1hr(GBh$DxzYLFcH#LmZw!gP&F3qUBp5^g;J60FmyS7no|1KHZU4sX(Hqrqu$6g=slf*Ohi@eh%fx ztsEZ0=dE@gkG1No25V-Z<9=n)khn>TlAyq3s7)M-GR#*N;Yg#xL&2yXQlNu9zsbP=yuJoH7_GZ*1K^mcKEmj|Y8TZ`U7BesO5qY>XhJSDTyeNZhg|L0ChQ1} zW!d6u%`|Mi3Twzttd~}7u0GU|Wh@2S>fM&+c!I2qa_L5pschZWqO2)T$QBC;w+x-sZ@f6&)#f3$f?jKZF_-3({AYHiXd(rZPGTS)M6%k z8c%8^Y}VdE?D|0+&~74#lPxgEBiSq2qOGO!Ztn`$WiH3sK5V5TDgUj;?UzR2B--w3sI~|ZqTfgtxw4c?#!xd&B|ON-hvTl@KDt56-_tkC#->w@9@9rY- z@6OmgWg`V!?N()KlgecA8n4^pYV!UV^ER*ZdLsz8E%okgQ4y>2`I7cB=wJx&_dZ$7 zmgw%?FP)O8iF)XX`mmZpD7r%KhyE}TCvHyE#Gx`RqLlB<_OP0|!{`Dk293)Y^{=~D zD>ZoTD}dH=NH7GuX9A-duq-e*&B5%Ru^i~Z12aSiO)v(-asM3muJF=vd~mSCLhSK& ztn!Aie9~&PUD(aBBG?y!P#5fB@3j&O{vv$Oi+fF&gYn8?f;WxiawPvK)(HA`vfh zRMa33Wgm+p2?O#U+wI#DvLQ2b^}`D|c!926UExYk9^Kw4ta~)&IF9-7;4>KPZvoRkt zGXEiJx@|N6Gv7XQYSzY>l;*uU=-o1Uf?(KC^Q>ZaR2lsHl^8Z|qL{WRNmFg8yGWKaK_2$tPWSj6) z7jk7=_gH#FW@8{{Z+2&Y_IDpcHazVPc7rqw!|7>tewnsrjrLf(W6wJ3=dLTysq#=m zX2%Kl8wuGDla;qfVvYdY6>T?4bgqtu_Uw4I5wr^&) z3v+jO6X<$-_(bzaBlkjx_fdNL+=s`e33|8wp7>#{`m%5_FmD>S}Zc-nr4cB>;+cX(E# zxMz!Zd6zhev)6jd_lu|OeWCek18{nqY5%)Om|Lm=k<<8X+c-jLd4#3ZZ=0=v6ZY!n zq)yU#kt;YrH+Vn~bYwd=T}ddEBQt1F`ITGwq!%ztLS3G3IEQz+mw!1_bc2r6h?tjC zR=4+eYqgr2IxR42I4qTS<0G6yi8#%|4!F$T)%gL>xN$_YUvz~&$~bfx6yxo4)w1%P z1IwUaW+)VTk}q~RM!BLp`ZzwiS59x0yDp{Yj5A}lW^4MUb9$$Hx~N}8d7Jn7e7lRM zy3x%IN~8Om*Rav1tGB-(|WL`xunO!rQvApJ%Rf5WvYY8qY4l+qS_U{ADYB!t*=B zH@uNcTxc=7#5*^WTl{>^t(R`>jBY%~f4#?d26R;QQo4G}qac~fh{~Hg%h!Cm^X$4O z@#eB!+WF4Zua+s1p+P84ebq<2gkLO_uSVoa_Kc<)*qgreQmVFxyvW1)*`qyshxvIU3EQ7N-@|?C zF)HQKefw(Y%m?sm=Dmh?tN$V~ukNoR9Wq6CbEZ9HFw>{3c zJ3y#P^+}Y#f&&i{M7R(Zt%eSB?Nat_9lUt*=4Gr$Z=*erA2niJrwv)ehYAh0QkfE5 zyLRAyrDV90=E<6tJWbMxGnlzxiQbG6GfByi9!HTTHM%rOQ>RLy6qPzPYE`RLv1Wam z6i?4tdg_D?YDkWrWafHZZK8FpS}bqv>=`6?Zl+3%;&g%Qc5mM*e*x=#t29Po!$;S| zl|(l&8OMJ4MwT3rS^wo?mt&q9*FoRDee=d0Tg-_xs8Od)rCzn#RcqI-dBp}cn|5v5 ztZT=@rJHx@-LqHKMO~UWV>p4CCO4W_adYF(oimS)m%4Q8(EHXs9v4jGUuSAn7D`URP1C~}FNiqJX7!HYNo zNkWl0ddIk)E)+|oU=8ywYs_C%f^ExR1^s`Sv0l9I!@6Na`GkZ%@HaJzY^Gtss5z^aS0UOldKG{aq9HpxJD zO0AhAW&dE_6lPI#9*`OanW1UoMbW!ejbn{^gndMq6I z2uj#)RRrV?9%~_pYqQXj_bKaQBp1nUv%S+=b5#u2ZX5oZE6Q@|W|6X#Pi42u&h!DT zmbUn2Sm1!=1ddI9hoi0$IvFREazK%WO5x2HCzo@!3&wmqu^~ouXpk!oxxHgDrnHtx zrRfC~)M>|5=F1QSd_jvYa%cD5y~)tgn01zURGUjZr{|GXN6@99MXVj2Vaa+~g`JxjSVLPj$Or=swpu2qKJx0?Zp} zurr?0osNa6Q{C${R<@5>qhrzo8Og4sDpo;=LOkT1g?{G^%Y@26J=>WPNfxtbU<7#` zVOjAwCB2_n2t&1M;dzYq1_0vCVN45O_q_Ks^NkUV=rbet9LSY0^n?&?P{SQ?0ue{h z4=bSa;0OoU7!g8{EnD28-tbqzhTtrLml4|nzch}+5ezOEyx`<`6PpU+t4>35 zLPlyXga=EQ9q|ak;%rN7)LUW1OlLz?HV;3?h*YFtx3Lz|;y^lNP!B_b%OCDiLH`W$ zr8^ar6$2JO6inx6Q{i5NnOaEM`2?aADdwf zQA(4NwM0Qo%%x3vnNwo^vX?#8>F-F@M3`)fnvIfX6<2A^W#NITUvR1pSYS167HfMj zxWQFtkX5Z(b*o%m!3(6CHFSPY0z{!vQ=KpibF|T&LlFuZ5^)x3sO_Ws{Qsao{n

    LI;fE(CF9gDi&m6h*deS&J<8EfZnmSKt0;F4 zfe2GB6_qhfstYj&Q;xaLv>KY}4t4p{VU~lotgiq(u?Yadk8x>h!{^{Gy!BjWVQ#lG4V zul$Ug+@#`H00Oe0nKP^kLg6k3DwdH5Thp5+`Y(WK5Ke=->jOXcQO@pd!61B2XhUn# zRJQiCu*45bUD}J*VYr&7yVUJ+_aGkv5s1#bT@iVPKx2{vwl~G8Q2$Nr+loO0Xu+*i zX_cCcuV{kDJ8t8TPY~o047mtIF7lC)T!tU}_{TstGLg&hj#A7_}Fc-9chGJ4V0oA+Pl*Ji^hk=1~TBI`rXgLET$u&KP*YJ8AXI zA&yk9n$@ay^{ZhWYgxCN*0Z+ttz}IKFFb+QRM0|~;h8q8fO5?1JusUYi#ar>Ik0Um z_Otmaf;jKj7K=qMUB;BJN@|;5Z@Tja_XyHtiyO0=4fMGMUH?BoLpagJ#qp$5`cltFjC%nTN@v5$Rh!7iKGYesf@q3vd?m|)rqb)YDP^IvY~+1mhK(_TF}u5uG;=Y+1; zpacCu%n2fXie@;zRqE&rNBZ7Kr}XlQI%@w!#2cT6v5Xt-%gmVV-KOjD*TLS0@itk@ zE--tP&z|KhD_Il5VBJdPa}2q&g6wIBvXniLcfF@w$w%;e6ny=6wKCL%Zc91HLr&(B zf2&$2-+V8dA@2M$q9a2q~S)Gp5Qy~bpM+h-E>wQN$2S9t~>obsb#I} zv(ZwDt=`|YkGj-lT2*PI)W?sT1BU5$HT@OvWm;SryBf{`5kj7P90+uujY&w1GIr2tz%q`#!* zH@~llR`RnO?fK;1#=U&b^xBNUiZAtc>;R9B^<)9|>TT&%YXNZ&_zq6EQU{7m0vdR) z_sXH)CXL2~Z}^DM_?im&lu!GX&$^!L`TXt^h@tvajQVH+5m0dZlq|NQ??^yq6~%&+takmcg1Y8Jt} z4(#}X!st$|wdl>>Gz_LjkO6J4!iFb_Xodq{Ow`=aJ(xl3;70+=kSzF6e*6%wk}nX) zZwCp{*IJ>LPDWK?a2Ld55N+@go9pj*5b%63Du7T3jc^E`P0cp36PvBS)UR9SkH7dN zz$ggwP;utK&GQJ2TC4?w_{Rzf@bOF_s>Uh=#$d5Z5D>{w0x9eaA&|8ea1GnA4vkS{ zXiB#-LV2+6q$Xl_wvHGjrVqvNSxnH&9Fe?e2}=_3m=ZB2{xA|%PzS{?8w0Nr_bQ;q zj9V}f@(AOv;ISPG>;1^=Dn^m;Vopp`1m)_F^sr(KZt;9_ zF?-7Et;SIGq><4akQFdtKWIo8C(;)u@EASvBm%COZmY&9&>8V?8aI;NRMHyf2_gj% z8@ucgIHVf^#~VKc5X&(qCo%BeDcEMgDdfTxt}x{cQWMuN9z8J=>oF7^29YLjTqbKD zyUic@j|#Q0@nlgi#w17(GLUM~Y80RWmS!x|Nu9)Js^IJ)9})pYOAk5HwBGLZV8@jb z#Vy}*Cx~je*l?AeiFk-8FD>wjp7D9|&@NNbxT^7N-0CG4^Nl=VW@^$KPo^Zqf+u(K zGS6|X0O1Eyqo0&=faGyliqQR-vc8ruUM&B~38j+es*;1M5G#8wG3>(dJg% z+mad+^SD|PC+8`Sa0pZ|Lq~>lOX**?=qm69kMiGx5BQ=^oUW@v6SX+)^gNi8!^jgDQ4&u#(ilsWP=WQ|C@>}Zl+pB1Mgz52 z>l85!HCm-rQFAmm7u6-#^HFhNGXoGS{s%!5XHu249h+2-wh}hokrh6bKi{kWF6SRh zRX}Z13sJR8(UnE2DpvJ%U&;USH@&J5ua!sn5LY#HPR+w$o%KXZl7)yhVOdnhpbq!S zP@1O6r5x5xp>0R1+sv)hhG=8XL;6NwJT_`OHI>LV0DCGPZT*HP0|qdV)Im`Y^>=D(?lEAY3neF z_B3^VRVOI+iA1n!t(IzuZftjSW5Ls7s});MFl4tCNd6}%o{L+Pa7OHNNi&h$^rvOR za9!o4lWg{H%XM?+HAwjE{Q78Te>QO?bi2Z@C)YAjF9B(d6HaBwV;WHDI)P3FHI;NP zXJAxp!PaUwtvW&Eb8-Iy1PQY)y)$&3He<`yXsPvV)fR0tQ`evh3@4TQ>VSQ^v~CA# zToJ>}@ThPN>?roLZ~fLPO?7aYH)rp)@peE~eRg^k_na7a9Bq|j#jvC#GHf;XB6l@8 zHFp^DG7G@p(y>Ggmfm^)_kvR?tE<+_JD+XAwz*cK|hbSEQGEMYvzBSDdgn zd-)f_BzFy)_HtV(6f*aG^Vf9k7j)YfbStntBUWhEclA)0-)i`VWmt$|H-%;Qe>slJ zzN#VFXGyMLgu(w*fFGEFNz-ttSSggv{FLn;k+<}gmrK>HdFK<00V#_UGK8VnfDib3 zg_BW@IC8reeO*{YKXiOC_j`$$YNs|=^K^e_m@jphht*?<-|dIFflEf@e)HId7x|52 z7l~^(3&L+R?dFLU_f=T{u%HbIPxeSJ@roNbUDwsViXy@07L>Y@&S24S?|>}RIE~l% zS6FZm)Ao&57*Cbf-aIB6=8{Ai`IrA#F9}(Zf!Ri*b0v0IYJ=5~Z7f3zwRMS^bshPU z|2KedkOy>=X$oMQx%rh}6%X!aWKlMhd)IE;5oW2)+LptVg;&8)*()*YF8D?eAk>@n zHI~^}5>fw{n)UaVyN;LX*m5nGm(>t|4LX{QnTDacw2rS}b6A+?SC5%FMg16|7h0M% z)@-X;iJfn`T)CS;I;2I~0NOTXIat2($7IdfchTA9gs`&!(w#RL!A`?|kT81fxl9*V zXepGI{nwHGSr5}NpmmjUV;DUfIhZ%MnB6xa`ZQUaj;SSD;3}4(En2Fvx}r0hfBjcm zh0_*2x=clQq(eHBk+O=%IX0;Rl-ZhqM_J@NxV>T;KX226CKx$X0H-gRtjH*9CizEe zxrkjis1Go;h#&hJs}K5*6FP{Knw}u~Ipueli#bMri4o63t7jOar?cm``ib1X)G1n!6!8F`-++E4;iL)vME6z0rqf+Z&`S;Ar5R zty8(JVVvbYl@(0-KCL*J2)}(V~P8_5j;FVe7TwX0qy^| z$vq~#mwc%|T)dTfvM)TlK^(J74ZE$J!oeG~A-TLrd?!u(eD*cP-CK+9Q?BtS#$nvP z|LojqR&ZDK<1kf%sp1fTe7Oevgw6Y>54=tDm}4OP!IitpzkI^Me9`r{PD{7LryLe+ zX(Xc2Egikl2VKO;e6;m>Xv6B9>N(BTT%-}W#m}0YaTd-OPtLPcrbiE+UmLyT8LttS zgav%a34FK>9LWXSq&Qo{p!}#iLD*>s!!BX5Gd;{FUC}k2%H`DHBHGcpd_hhHkEi_E zw_2lTx6Fx_$e(YVKKisvJpj5Hn^8T_SDn?Fjf>eimBlzejk!U{-T|md(+P$(!3t@RXFD-R9WfLxJF#o52@8 z+xyt!g}&%1JHs8mxJ8T7GuvW~UfU0x-`&aE&HS3l>El5@q|M#jPd(io0|3|E;~n?Y!QJY^I-N*eq_@7ez2EzHVDoi5?BiSH{pvK?Al1cH{aG$J z!7}yjd88izErak6*85CDefDYJ$Za1W+{~$ShhV{j1`{PSH_MNrKM)ZadN`3{s9x4G zV#C%e)TT#^Lb6)6v7|6k7ArZ$2coK91zYBlv!R7+U1LfzW4t4pseX>mh$u53AFqLTm0L|U}1+oN#bB2-HE zjX*Vc(TowpcduW*!5E&QLO9A3BSH`-Uc7_GjK?roNG?G_1BS~H9w=DA*?{NI1wxA! zK)ST)(E&HHL|k?HW)0ptLT0mqc`i{s)Gw3PP{m9->1fP(6>+DJaa{^ z5b;f2c-|{KIBaIH9YJ@{;Jt?*J-)p7^Bi~_N_~8~2|(F3OPFsNa|p^QCgW(#{|J)* zh5;xAc71(O;DKrUg86;Z$y z5bZ}_1Lu*~WuSu=YAB)T0s7~n7BmXN3zer|dC!BMz@#UDKUB{)Vza`%w6{I( zdCz;`yAwSimy9AB@rX!Fq7s+L#3nlNiBLr1HKs_#Dq8W1Sj?gpx5&jSYGQQ%BUu4; zA~Mh&V~im@;01lxx(|BrfkcCuv4(ZJI`Ry67X;b{JGe$Srtyy&D^Sx=m_ik@@Q{vi zVb!dLH5v+F2REDz4s)12A6n8ln^6B7J=m3iPI~f_o}_>%M@h<3mNJy4Or-D?@+xMr;d#_( zBJ35)EVQzo*kXVw+h!kEiq4j@RGcnt**NED$C+*=b!}9mTQbN_Zrp+^@eHXvg-XIt z(bJwt?N2@#RxWc@;7^9~5U%V~H9>*NU=4lfz#x#B7*>?2%%c=O z*P2w0cC@G?jH_Iec8Upcu2r7{;5CEVzi;Z4uQ`QjOl>N`a=z2AkI5_PVQ zou@ezw*|KrvaM`wVN>}T)zV03Old+B4tZEmt^P2fcxtFWAR#vhi47{v5;U4$Ff`xEkor_#^lCy%%Ev^Rl=oAU>cEFF#Z5xi-UyWuq zt@%Bdc*hISd(j4aSY@w4#pK@j+E~8oqHlOr3|os*Es+@IZ;Ai=Rsnz5Xkk2Xf_;l% z>pHl&YsBz`&w1G9O4+zAGLmA;@{1087{nn4^N6)uSt6TQsX+d2iV>EsMz$DiZHn=F zyB88X%y_HeOYMzuoZ~hp>b^ZrahmrVC#l;-uWChM!1pS)WvLz&8T zVeW~ftZ55JSa>`Q8d-|rWhX1z)FmV_cMa{`HSbu4;o(7fuNr6d&IvvY6~bw`x@SJS z*1o`gb)fN^U+@2RSICD>_NhOLKiNlA6&FFTefr9LiB-$>zZce=2iwlt`> zvB7eX0~y&ywYr%^<}wdi$gOs&uwgyDIMMpvq@A+~#DIXVI#SnYGXr#l*lS-0`{4Bz zw!EDxgCDE8(8re8yOo{jNrTMUd!n7Qe(BbaJXmp;``g(L&bO%Z?q-QOi2A;4-jDh3pWm*|gFJXZ zOTFH<)^~8m!1(5OHBLN$e4gtud8%8#>dt#A<}?5BK||d0fd{?#dk=O6FC%~g1THo8 z@bUKh-qfG|BkZQ@`mkhsY;s<`i$9*s~Z}jCE zj;DOL=6n>mc#%haeU^cgcYWA*d9GG9vc-8K*m+t0jueGcs5MpSbQ-vU zTlhR4*noq#h+;T`COC?TXokHPc1p%FY6xlT2ZseHhpjVQg5Ictrq^+)LXI1Oe&!g5>S&KM2#@gxlC>9) z1Nk>TiyAlH$wcrongcIgI)7^HT5*p6s7RCXti)SwC>l9C^E zh6cit!RV41g_K1(lx)TTV-Ro^u#-HQm&$m7KiP;0DU=PlgmIaTB{+PfsE_|lxr)-( zU@qu}R9TfAiIP?slD3zJA4!=YWD|3!6k`d1u%LI6>5&N1B#FaIrdgV(nVN}H1ck$z zfAgBKX*VYTOtPdm4se^enVY(~o4nbZyUCY98JvJAXqwl3GI^AR`I2rqky43939)fg ziI$KVmYYe3cZiv?Xql|Td!4xr?z9%t8J_GGKl)Ri=6RmznV#gip6WAweQA7uDVKp+ zoP&v+5P6u0Nh;0RiuzfRZJ?IZ8KB}Rntg$t2AZ9-Xfpyz5Pb-9j0K(q3ZP{cMHE`0 z78(o~nxPuHp&Z(w!JwKT8ltLs00al0Cc1qnI-fIXl=exO$%&u&`Jeyb2!s8}g2DBl z8=0fkxqIqnK=6m4750t~#GMU_dk-3!_BejVR7^~|q)z%vP^wEyAUL^HrL|t{BdY?&|nEpvT<>#0Ks*+(Q zb2}QEe=3qt5iQp#sN4w+^2j^pMhsvPp%S{BZJ8Z#d7N*`gwYs1cu9c(pr!>WoNlV3 zL|LDN=Ax6ToJ!eNfuNX+$$m+q3D8+UkGiLUx}ZqPqi4rDIs+oLsGT%Jq=*WDib`rf z5vw}Nl#ptwiWsMq@`2NMshB#6JISe@`l-wcs`N>zr23-B`j-FhmI(VfO{sdP#9FMA zNvnYBl^zC`l?hbTDQ+9tnH!{{>vAGo?Y_ZmkU9HW^yaTKtBf_Y#cvGm%s=Xwhx99gvDT}yxJF9z}x)7?jV0NIkI=EFkvW1%p zGGV&*nz)>cuZ?@Tb2+w<3$15MxjM_cyPLT#>$7lMTi*)0%p{eg+maP)yV0w_%YskIybY_T;wzyIoVWjhMV+Ucy#qXlyo$ifroi556fKM( zxBJ70xw#dbuNDlm7@Wb5OT0#W#L!5#r=i44{JtLu#mUOY^t-J`yM|EN zjj#HKJuJO29L8RZx`6w%u6vbk#h}k1yNksVi0sC2JjC56$0ur#U_~Aq3^4J@#7!K+ zBP_)+`N!ZVx`s^1<`&GQrzx&Hk(7DC2oX!7`y2w^p%hnlFUo4Vfyo;XT%c2I%jZ-kp-ptLjtQZA+ znd6GXw2Y_9K*@`W6W4{w^;^uP`p)ni&vd-Z^_<7{+;E{voYH*K`+U%OD#)#<(m3jD z=Vvn7JktuD$Th9BPW#O(VHIZF(BrHm6kWi=CF2oYNY!);0}1=LFn0ywKeE2E|Ru$h|Q^tlG=n-1Pa`{It}KG~HZC z+8yoOeJ#~33)`C$&;tG1-+jVtY#}H}YtCGJIC_DoK52Ro!3gu-+{8?)D7h?F5_5!)GVu*8d5S#8OVZ6OsTx%w5{Qj zE#WtQ=4u|~J}urGdDdtR;!8f_O&;T@jm#*X;-PG_!dvA8{^!}s+OdsjqPx`^-QYc* z-t~In)Ficwl3Ux`{M$@JzOz!094%g7E>{6}(du|UN z?deH<<)%L7_Ks0FKIo@f=#Jn7WZuq*KH&tY?c@FJ;6A_vPjTd4={p=(nV#;m=VL3wRLU>{7k9 z!&&V8YVm$9@$|0GBX7Tge&7hMbu=FG{{HjL{^}22y)9qx-tP2`oTLd)WD9?_N^|bO zK7ue##1kLopndG3+~+&b^IISBD*ex?t}@duao5}tYmfAY&f^ZwdS*WN+)mbenDXMj z&AIUMY26eLY`*_lAM7U`!tzeJ5BTn4|KAvo&t=c-D@YQ{&M4d@Pt2x^ls&cuf94z_KyGfkdO9VKJ|D@R7U=ejn#U9bm3c^RfT3qnziLTKl$7_PF1+5nK7< zFY<+U@vKB8Du~PKfeN%_eoxy_L97b$7upmpG z9^pB($j}_hl@(vctj2O?#+@Oj0j;_7-OzLwpDvC1vt-tjIg3sm8@6e{eD~V^i(B{Z zx3si@`$kUq@NKEklJ1*)tJURKX*!1<{rPm2)kBul_;KWR3>>&i1fTFi1oGu2oJapa z{R4pn$1j90UcUGF-6!79j{p00>-<@2?~l5dY8r65K5S`bDV{`=UyIDM!eE^755j7EQAQhSBvG~= zX+xwnAyvEWM-@#JG07D3>I;uuehVi`;;5lTvlTsjFiS0^`;f~G2@EjImc|4yzm4ca z(?0#ujIX}*8hBH^_6Ab3PCG$(kInGb^lv{u|07dOK?mK+kq8ykq(KdJnvkgqaneCU zFOi`1ODvbfRLLWe+?3N!i{vp>QAceNuhm}r)W{uUWR*xCL1hxwSUtTo#9XNHCO9d9 zqcX57E$wnrFe4>aKx2y~G|)25{FBc;-E4C`ISDH0&S$T6R?lUb-4;;nh6T4km`?xM zDM51=G-C<}A!UQnMwfXM*<`m2R?-e@%`{(G^`#ZxQgb9V;D1#$Rp3_-K2=AA7oIg$ ze*N`LSBZhk#aDdk1vcYwkDa&UKF`e7&uHTudA&KS&C^+M_uNF; z4eQVeg*heNA2I|Q<9jt8dQyjfeR$ub8)kZ7fM+xKX@XBJ*j1;chC1t38Sb}ful-#* zVqSOUW*5tlwwGvmIc8htZr_Fos%Ez>nPj^qIQj0BSH>1(zyDrg?vK578!DZBidVyO z-%VU-$Lp;$?V{0Mx@*got~@cUuU2?!%{jL^>V#z#{p+JKhnz!La3y=VUsC^jJoUC? zr+s#~znf$-!CkIf+Vt|?J9xhPCLZv>V=jDVni0Lk3kxqiyym9}mt5`0tCu}<)V1f{ zbMLk0`uos9cWwOe&EFpE%u`3B71oC<=NyU)yI9!dH@^RNm|HHsci-8gmj8y=KmHl- zc*s*0^%OX$L_`l=&XW`?YKFM!1k-FXZ5g#MZiR5F<$Sv!CrSm_N7iZh(3dV0nNCEdmyBcLzjZ0vG5pO)PFHH^Ujw z+|{VYT`-5%+u-^_7(*_Sj)Y&NnhV9JA~1pwhF#Q+`+x&K*FENgSiJvY?Rcm^9~$w8 zdK=;$iC9NI?hlFi3xyMHR>3G{;Xn%ViK2EW$2h_fjcF<28_yWRGFDQIpfe--vSvC? zb}x&$VbSW=$VPA|!;OhqWFr~*I3>=ph_Cct9)qaIKK{{;eheIlULY=^NKhCP>_yKC znIQ)@MUko0A@&@p%*jR4NRmvS>SXg6Y7WVgV;orST6jrJ;%J22gy!j%IhR{-gOqL5 z)Hsvr%2wi%mbcu%EG;0*J=PMQ26QL>6j;n)`pk-gq@W>riA-jGvY=-);R-X!K2>S6 zF%JdjCL>xJh)xun7VYFC4ayuQsZoBUe8wp?xJp&Bb8z>3XFdOYM^9L$l#co2VFQ5) zuS5vcQv9r9OxLwh98yxElMLsT$Y;%q{`8yI{2UB7r_g9tZltN2Vf@HuoVzTOpd~fw zN$rW!y0O%yM}(YS|0O$W<{(X)U7=A=@zAV)TOe}Q*_m2H-YL!jCxX| z5p3W4Xh_wb>eH}U6>C0Kno{z3HJ)GPDp->^Suy1Vpbs1-q%NDFoE}!4Z~g33_gX8w z>UB01WoloII@%_!wy1D*s%XEW3c-FfIM7gqTb_zo-X=D(kZqG=fjeAVN|v&{{3S4< z=(yw}vaHs*V)_S8h5es zoR4sc>)x;??~u*y$#b8FUqn4mr&pb>bjNEKs1#|VNd1H;w2NJ$l17C?3$JzWYTBt) zm%NM&>?m7TCH1nmo%QwYR)J_I`MwvQCk}CYj~j?71{AuGnyE4U`(gmI?Ro+p@OLH5 z;1?-a6{!I74Yl@TSZtHRLCrB>657;E_SrNsp? zm;%XP&N>2u#%dxWds5T2UOB){j;(W!kyN zy!C&f4eJhaTcg-)3`YH6?LJpq+x(t(fz2D`Qo0e2&W3oz(LLXmvOCfgU$jl+t!W<| z8o8bp1|gToR#8XWzsZgk0%{*GMVPcLcL5&kNNx?Ht?&w9(b?q|Gj$P&dkuZGKp^PHC* z#Ed=rV%3guvme)&9B(!+>F#2QcmCvs*ZaR=(|5qHJi=rh0R&_f2e zB}V%j+uret)&1^)zIL={JZ8goAcO2~hRw$P_IFP{Y<=`>)mPo`s^)Rj^$-31gP#6| zdj{>}6;BvD@AEw0<2j788{VV5q09dcL$U>8<1h8gJa0)WwIVqhtFixky6QtY!lO39 z)K=Kp1cT+Ziia>u_p!-s@#`(SX<3M1+ zBv5L-{^J)E6r;|uKUdqjzUr_ebh9mBL32W{A`6S}o53ygxf*Q1w)+p@6QrZ-I5E68 zP(N3mbin@c!Y@QAZ)u`( z!7nlt3b@O!W9zsBguy9v!>Uq2eUU>u)22AQzdPhQ4@$uygsWuw1Z7CP4Wo-m6vS2R zy`OWBR zwn>0J*_)p%yfQAd#`2q|@yoq!G{k#?z+X~CaNM+9_d>UAyGTG7`(=c%f?sa#zyN#U96=kFqCk#MGDkFhZM(K z~#*1V_QOrlD+sFwUIC%ugWE9CK^h9avHkw;Uwt77)oS4`{20WlfyBkPZ9K_-Y z0BXs;oYcvk+f00000e*$R%hYSP* z9R>y>BPJ#v9w!+VDJv{13gcM@30XO-e&SN`;6^J2y*APD@Qt zOh`sdOiWEqPfbr!O^Az5O-oNjLQhXmPf$@$P*YG*Q&3V=P>YUHM@3OlPf=4;QBzh@ ziHuT>k5g1uR8UP+Q&Ut`R#a74RZU4%R##P5SyhjaRgjWal9X2~BUe{fS6EwERZv)1 zTUc3JSdx`lT3cCKU0PRGT3cRPm6%&zU|dyCTvt5JVq;`tV`pPrR%2ynW1XO6WoTtzT4iQwWoT<< zpQ2`GYG-6(XlZL{XlH3`Z)u~ZYHMw3YjJ9&scU6oY-L|#>soS&bepp${2qotypm!+ntrj~rBo06%ltgEM^ ztF5oBer>Fti>+o{t(twUu(GkMrLv!dva`0csh+d8x3;&qw!Of)y1l!>#Js=3y=`E< zyu7}4Xui3(!o$VH#mB|V%*SzL$H>Xf%*@Wu)6vt`)7IJ5*xK0I-P_*b;^O7$=H~0` z>+kUJ^!4}o`uqC({QUj={r&v@{{8;`{r~^~000000000000{p8_vy1Iu3)-@<;;OA zXsw~cY}_ImoTzZ&!)O>YQcQ?2p|_74$%*7RvZTjt5G8`_h%%)~mMbg5oXJt0&5rDD z>cqLT=ec)4-39fzPN>nNIf*8tXS)vH*uYTe4UtJkk!!-^eCHXprt zXwTML%eJjswQ%FgOfO8WWXQiN0}FOZcxB0!g%b}}d~q@1#*Cjyo=jOI zWXqK?U*0_VDjYkAJc&|7IuINl8#r88jlxEf*L(`emXyfRBRYBFw0^*VH-z6dVq>~Q zJk+OD$6<46G@P~}HDQ=bzatyWbR9EZWLGj|`Elf2P`CdM|Jjv!KB%2gDI=UV^5nH0 zLDv!|Hm>{M5Bd7VfB!K5vV4c%e*g~1Ol8EVqDwFYvI5J25JuC>EB!Ip;D!G|1I>cK zba=~$nM@+$h^ut?4pmi^rQ(V#w&>!ESUnYwQ1Z-ZR6XG3UUI9Xs-xq zN*|_eVx*Evf_4Uy&Dkd9AyfKcC3{yu+1_XCsmI-yJ`^El4`M7)!kOBk*@BwgS>Yy_ zyOGer2da%A=XP+mIVYQT(z)iHaMtN(1rz+{n+q%;D#D@-9O@_n5`e&{q#9U2!K097 zTB)U+ZaOJznjT7kXKhef3W;ZIM`)a|O*h*frn&zXsiWArT3m8(##h`N*4f%CDxZY$ z8V$rE@q>5og_kCIot#JAvQ`3nYm~uaSsQ+Ylvm)k<^>oFw+3F6Ac*@Zh}pOnVwi4* z8-B;3gQ!B{$u7NEqgIMA=Bw|%{EoHbPe0`-aF0O|?4vo%OlZo&qfr>*k)|@4AFONW zO0C7Znwf_uS_+4ymwASX!xl^|+vlEqz8NygEFZfvpMnA^tEw1)x~idx!k}mko%Za2 z0-=6-X`wgcJgd(_6MaC?qgvG-!=tS+M9nwz+(dM*G8eUcgH}zQtx;lqF|WRoZ5*%Z z70aHqm#9o8bnc~c$FNV7clLYDhHWh`e53#8L?YYzf?T=48VGJ?=oZ*m;_C+PjD`$) z`|iWH$y*{Z?4a21=A3t~?>pwOGw|pJm#*Wulv~p9kXouPv27J&EPJ-KomNFA%uu=I z$HsO!rpR^6+%gbpgW)#J!@t~eYA65cn#(a8?X}K37af80lp?M4^_Y5~z0FLM8VHe= zOKmf&Ur)Dn8F8{!Yu0Ahj_lp(6PNz4WMk+2)5p&r3feB)(!}DHD*Nya7m4UnDlpU> zY#DAq0ip%sf^;7cor^+^JD}nqCku6{B6XIlms>nX!V;RWD|L|(=}gDMra)~;5ere0 zUe_ciU93v9yAtspR2q-br*~Nj9`XN1Hbhn>FM!5V8P%2;Gd($RdF~S)&8)}0C<QC8T(T5h9_qCo^6=qRLJ#t@LR!=Vn1Ws6&0iIRs* zWEFm>Oy?ajWP!4t$>^3gmN5~1)!bqgJ%zM8%NKsx7Zzv={Y_Nc$^G$)x@Sevu-6iI&86OY%z56JG& zK?@1!Tap{4C}xq72#FGc7u4vs9@j2ZzUvr8iWedo>to?IKzp~F`BWQ z-Rr76xhl?fehq~2{M0<*=em1Zk*)H?WIyBg&w$Rgf5Su*+!m_LtR)L<5e<-0p;J); z1vaA=g9loKs^L$k zU|Zg$Q5hSh@qmpjZ3peAa5zHT|%GjZ~mO9ZEoj9t)&toYG39V>PN7}9R(w9SBt$6n- zKaSpps39v85TW)KYC>s4|l_r_*aXK#+?+(Nqbt=Vns zXs*kq?DA^6IrJ`d!8;q{(C=9)=D~RxncgS)b-nI^sB!wHWe7^bvizmse#2bB3en3$ z114~R4UFJ&NX7rbaBeV!pAj$#PxvVmgj!B@X~UjsSblw8n=wH=+qW4y#Y8-@@=Bad z-`dv2!jmhyA*vaNkWO1Qvn1iqs`E|3kcmB#Atpgd#nB}&)?IUQzD%`Q{ zA-R)G9$IirhpXtur~St6roKDTSI>HG=N(n%>07G$3vQ&t&fMhso8JIeZaOnvaGW*# zt!`Sfl&ovpAnOpx)loIZ%Ps3UY zFpruKYOh8!SbA>Q%0~yzAaSdlze(^Xz)P zp-OhKfBnUj*4d5Io?}n3U3>_qDqvT~ovuB+y9~!FlLg5(QJ-zRYyB(W3t9av2mbK& zS-AhXTV7@%MaU)4mOS|(uR$`${BS_O1!h@lmd@M%b23U8o$t(lJUji|r#Eh?r(uW( zXs^d~P)1wrW+u3Ids?S^p_Y3P$aUiJdz{jHWfy#C7jVT#DC@*d$Ol{ohh103d=Y4U zf6xQH6-eI&P;oGhjm*= zffML}i{edEWr0~GCmBdqXUA#%rgp~nft^NgU`2w|Wg8|aMJTu)UT0o;CwEFFgU|mb zNiYaNe#BQD2Yv8Ec!Fhk5>+=o)-6FegeU3mLc6n_@f14+T zvWP-+W`$RXg-B6)pT<+TF?V3tM}W3^tgvooc!p@GF(l@Ecrtr%$a}-0j4KvLb4Z7E zc!wicW5maS$AxJY@Q3X~D{_=$-z3#TFnXdsXSIgbR{i&o-nm!y!(=4`6yiL0nePdJMd85Z)84*&8K z*+2~5aErKT6j~@l0f>;l2s<_OLlZ(V^uY?qFhf*Fg9wF;k9J!Rs6~wShF1R-fibpw z<5Z2nw~ZV4J)6dj-Ux_5R)RlPJPy?`Y>*}=vyO~tSCLq2GI)J9xP3Wl0`gmK)=fct@BnELl>OXG(>oi>k9y_EM2{NsB>27#{hNIYBQXxr@N~ zO9Xh3RJRLcXe|IpXve5jZir&hD1nVOjp68%Ke>}RS((NaRx@^XL&;;Ll7P1*c2^U3 z6v8H{28o{Rsq4f&1ox>ofqLxpnc$Gbl)ExQCSf`IJTmU*ly67zb-q8Jmk( zb+d+L1R9>zm5&J2pq_{=zIm8v>5`~5oNMVg6Eir-S!QynU&^3h&v}Qib5qm(?nIS*SvN zX<@u5qbnDN>-MNc@{$l5u9M1`m->vKpqht9vxoBObp)56&1kCp30I-f z2XD8GrFo^UN^x20pvN?;E$4S%T5;-ygSqM@yBbu5H;Ry$cpb%fzv-rO3a7;ir!I%A zinD9H$%K0vLVgOU&>F4PRwL{14Yp_z*vgmbAc|+HsNeqzNjHk4ub`v7BdL&iXscPN z6-cCN=!}{QpY=L4Z0D)IN3R0shfex-`zZtzH(mdFn)Vo=X~T&2(~iFtg9yu43oBdR zfvXT3p_N#xUdd_|6+ss}8Nf-H8(WVGQgX#voX(I*Nyuj9)2AgmZ7R!oEK9Bb0ueE5 zok%Al2DzOy6qv<{>8RXX~@*x{M8Ysp~qLmU^`D>Q&h&PV(BcOlzN_DiAzNL{OS< zlUcQ^@;l|IaIX`zQtGuCcQ)BWa{cJ5>*uKP<#>~uro=I^zF48%q8XG|IW=pR8>NyEPXunxR~^)beqNU_8#B5hih8eAH<1hUNPE{+wE(JQ^rYQom$ za~jbO5rGsftU}&~onFYewz?kTIlhmZnB@O@u0cesY^cMMd7kZysfqZ*-}JtoYs5(_ zx<{OLML9+Mi?!ZmluBu(GYnPJXREJbnX)mq&kLrqDaL3j3te``6AY|FNI}Tk#=P{# zaC}*yFot7jer(2Ux;T;bhsSaDdEWqf3D#|O_Q%>fvx2dVPVs*RyLuiRL_P-_*ff^cTHuX2ZhOJt6*i^@ut#Q-xv%)eQ)ViZ-Y zT*?Pw`^8PBm9pH*JBXXHuv%$+3zwnG8QQPT+QE`%Si_-m=GTfDg3*1;ktnOoaK^H9 z<`B><%}a68)ZDES?Q!6l$S%FjikYc04bI>H49ScMJVH9tmMkmiyv~}O&hzZf$@R&5 zGPV9o%Cl?A45)~y*0t@(sux%Yi^Osu2hdb)kF^`CB!jknMwJfqwlf#ErO2oD^viL4 z(H^EGsmPFLP1eZZ4uA{NoRycD^ALGiFeaT8x=_slD8u1frsvg^IIPX)YST1bwDD%x z%~;Mr3ylKDxx)uEK;6^JRRE>yPTee&;uyt$QbkE!(C7%6T${?bdCzVV%d^~%)l!nT zX~qG)wvJns7Hh$S?8cavXBF+im>_1vypd*xobgA(*)T$KJ=gyNbSjJy*XnJ4EVJ2~ ze|vqiVXS(K3)l`kxjHHc!x+3&n zrlirTlv%~iVA1&_gt{aDmDea8-Pb(g*ZtC*NVdk}OQ*}ohppK6X5KEQ)A@QdkNl&c z>u>4Z-s{cY2td?z5{Ti*x>9S}Pz>5J*_v6cH&8XzrEO3HZAgk_(1Ta51rD)Pxi<;E zroBnqwQbE0-l2&{i27wD>GF_k{eD7LB6S(p8D2su3JirC4dnn2-mp?24&8jsxY*g! zZ`z%K9jV+6CRf1XF2061U9^t>ty@D=<3YTn=#8Y1&Y7VKzxaBYrYhvN_rwSaQA?d3 z`NNK|i#`YqUvbi_2#nfVx{qY6(Eqe>T3$?C?&Y#>ryd;U9Ib>LRJXTIB$xw*ZQgUs zt%Y%}*Kd92bwRVz-MD;yR5l#Jm3X>xT$Zxv7)=q&iUdcF4&X4`*`#agb0p#la z-k6PjrK+z4ySb<8D$*$01)I%HQe*~teN()4W~=g?4a% zj&6x<-{u^o?b_{dq~0+9f4@7vcAk6{h>}dVXVjOd?xO5Bg95Po%&_msI-*{En5u)T zF5#B=-xIUd!7I!D9xfQ`+YD~om^IJ_pYRD!;qhnTzwoiV{-fG;(Fs+Di8AFoAQg?+S6V@H0-e>&#{3$-Y@>`iO#7rZ}T_)Jwb`d zlk$NZfS*=zD2PcYa@22$P4BD9(woR{PwY=~8Ua?fa;8#!Z4GqUx z-t}JJkjHkNsY5TCXZ9qd$1ZEAeSGZ9(A;n@_uGm~b`S3z+w7j3O6}Yb?encgx7#=vdvart68OP#k#H>t%@lzj-43CjzLLj8xdO5h0dWwahV{Y!e=cPCs*3Cwad4!Un@$M zX!#4run)vy>Xzv%m@s3=j~CyVf#ro>#XvAerrX(0Ud%swjHW!|1BuHmRGSvjTEpuN zuw&154a2toY}&PRQ?M`r!tV#bg$Hj?ocD3uAYdanz+3`y0yUdEpdR4@>k7kj-zn7H z(09C~uUAK*BT17m;O{!d?!rc3FzUI>pA)~+r&OZOq#lyKJ1YGA0FA#(S~2iHKh{d9 zKmZH;&Oit+@$Rt5(pqS}xI)no!UcD+=EG>R5s?^@O3dR#vq(RmCHUx#mUsE$Izf{z?^+jIqYc@i)C_FYoyb=@^*$zVl zaYPd>K`}*d5oysy8Dk8l++S#9CBz#)66sxdM*R)aa{0Z$&0;+GK%+c*Fk~f>y2y9g-95hbQrHTW-Y^7p}X})H^?U%T=2f zd0FJdB>ZkcgkEOg)lu;smzm|qa8*KP9(((>{Bq2FiYJ{dZA_;qDc!NrsVkF8nBmh? zKi%Q8F`4*vwmp{9u-Q3o3{W#kPT7q;dk@d-$v6|6)VmI(mEQZS@+}FjO9o)T*ye`BmEB!`5t_ds>QMMxxiS2WrMjmCvUEGVhhz4Dd+IS__W zUz>YU=R_nrgTVtFAzH`6T&TJ%d4+JT`wZ`3w}r>V4m3LKnC-X|s~rY!ce%5S5KZPM zBI<5tOB|omy4O7AJ+V~J6I%49$AT+Tk9*Yv)%LddwAaXrY3F;Q(%4s;@!V>C;A<8e z=9j;%8IL>2x&sj3LkJBi$XR-H*54#FCpxkTf*g7X-RdR^1tQXb2XtT~1Gm7)u_6!~ z8<^ung24>-Xoe^85IjWqNl=cEbBQydIaEjsrl^ogF+?2%fEOOxx`<~xhg z&WE+D-w;Jpr?DOC3=->uwB~|DA{?=ZC4<>)n3%LEMw5I~ydD)RWkoIjZt+t^3zZkY z7{>2mjf~JVjooh@}V)S&aj6nu~hfx_GwT8by-p7y8VrPIF_zOc?qkxGV zq(2`ljvA<8qV1TdoX0vz#BrBPb>LRZR= zhP0F#4_(^N9`+JYFFU6k{S-1}Ci7&>1m<;;S*)g!)0)tfA~msj&F5wDi`$HesJiJ* z1ynJe;vA=NOjS;`5DzuDyV?&=)lOeNE1tmu8}azqpgA@Kj{A&hLCH11#Cg!$hr616`)UsZ?tTB7>wsL?~(ZH8tv>Sd1}#xWXH8Gxy) z*5`Ll3@)nJGE+%Sm7QMwsx4QgT>Y^uoMjDb9{b2ES=`f)`6QxS1xUAn)|HTU^&{YX zK}SmfLzWAw;Vx`(!5m>SUY70TVePxobSM@}1bfmq9&6H+QudYRuq?4KOU(a`Rl%Lz z|I)&R@VsTZhCDSK-Vek88eG#}Do}1D+R+vf=nNq-Oc2a>e%hC*OmcdwPuxF`i#GW2c&p>5u zgfYBl^Jt1@%#$XEVSDJHNp-}_DX|8ay4!5_)~S+i6`^BnJ~VE%#*8@Q(%>RmArC6l z@3d~I*Orj+y6um-^RAebtVk!vI@OQd#3p^T2Z=?LBN_>8CO0TKB7s@hm^+F&(p<_j zTZOR!W^(nU=5Kw=siMl;##;UMa&J6`KO$FsM_w@hwWMogwVe7~YCAG}b*bG#2F$%4 zl`=x15=Wy;x>vK87~8SV7)AcU!jfOp@$*TYnDpY!5x@ z$zyucpZ@xDR~q(Fr!CcqvuJlW`qAFbx4kL<&Eod35xHp`xQ*rWiSS`(;MYYv!GqV* zvp)zvVj|aC6>3kvJz|29)o&d8l=E3y{K-_#5WUnzA~ZE_bRp@rB@;g{YnCmraTO>-w{vt9u|=fa!N%MfLN92Nm2(>uM^lCY+^F#cdY7gH4&yTI5( zH>Gnu*`mGLvpw8nw0GM*qvJr|JHdB@G3W|AZBs7c0=uybG}BN!_gSs9s}|^6wF&7D zKWHH9%cqOmJ_6!AH$sE&6DBrjJ0crDz$w2>aIYz=IP)?E?jyYR6D$rwhIMd>`Kv$r zTL+f#hRLJ6{R0tYL#6)%zyKsdk2ApkiD|al^n2 zl)XUIKy~v#K@33>13?iWK@n@W45&90^gvkICDuwV7u>Y4a~8pvK_H8mAY!}cyDlB% zgB2tsJ+e3eikt0&5`of>w=hCj{DN5gKJpSjRYt2##$ zM7r5a$D)G7btA{yYP1tmG$@M16ePas;Yz%5L85^B1#dIx-3FU z-LpD+YJ8C$8YQpQ&ABIDUxB?OP z(wNl(%7L`Sz0*ea8kdEmq+SFlXGjK+EQtg{3SvCTlYELYV@Vad24!4{f0;>|W5YPC zOv}_so_sjj8Ol5qEzhLDJnTT=sK7uwG(;55*Rx7?oXV$c%1mKIK{UrgQ%!oT3$Fvq z!0<|ce6FuswLYVt0^Ftlb}CD(WH`K9$Q*3QO5zszkuJN8wj~@tjk`s^%bUQ_n2szw zXShNQQi=CU2l*>T#562bYD_S6xiPd7_i7=@q{%fLKsdz7&%?|*^f`VUx;*pA_W`Q( z5duFf#|ttGwSych^E^hoZEmvf+; z@kC6;JWnHyOp{vAnUhBMq|8q{i_4@>DZL$x;l`Yrz@=-A|Kv8f=3ADQ*fK2XcbvxrPE8iQ(5`S)o35N*w#QT z$Wes8<%1BJN~cgLp~it$0ck`dCyMF!Ee!s$!Y z-AIW8Ox7L7l6>9zW5$;w)l(8FX6!$&rODz=!r!ggK$TT`I$J8uPwh1{xGhc8oVVuv z)iHHS={-&ECDwSf-s{C)tNYvS-NWwX&67=80`-q@`cLxh&42^7J?)x&j6MwspAkKf zgEKfKjNiJeLe4FwB79U_yWdy!*>cUhrqw=mNd}JF(a{B3O!~;fRNw^`sRkB@Glby( zWXoM9{g(bMD_VMeKfltli474jO1(<$(`IK=42+;zWJrN`juj$E#(@5)XK$TQ~NdZ!dH3~ z;JAWXZHU@qa9uKXJa-5~QgzAC$yzr~MmA>BH?GWu_D6rR?l>}H_O=3}K9e6u4({@|V7Pjk-csSE1#X->f% zBJ-WZ#l;pxykK5U^%?Brc+#=hpqZfSeeX2~Y+ z? zXo$gFIO-e-TJy!H{YcW=n~@BWdU)&M#+>3_rCjc-2#yB-8!)}DNm)YJ`0Qw*J1(xF zQtCG1?0!J+ZQF|V?mnhl@rLZk&N^>KWb__qo^~hpCg=I?X-28bU2R9-Bl4ol%}d5_ zqc&~G#nVspQ`+|La&hgeHs#nJaN3U4p7q}Z*QBsMPPMv7vxX@Dsip7~N$VlWaKr*} z4&T}jH@1-)@r?c)R>dq7pH*a1amwvRXLhX`=WZn@$HiXh@K)!bmg$&&NA#}jnM!Z% z74jg*!>#;j&!$%Rp#qvh>R|FL6SJReu6-FhVm$9Up{*3I(%1D zPuEqaVrUlxq9yQ7Ht?OL$Og|YI!J4iSlW6ma{ONyfC+YC=NDq1ywN%K5O3o< zcWyjiuV&x#4j%dGcJXS5Rzf#fZJ*ec-}Xe;*c{KgOZ-Ph*Dak+_e#fk2!m5}KkwRP z@@BT=7_6>(ANqnE^&rD{XsM)C4DfFWc!7uYS-V_=cS2dDmxbrl2dBWHhDw%Xo39YJe#P30`)O|9I%0(&uw0l27sbMCVG@?rX>Pz~5$_ zfnFr1Q}B&>7sk(`3Kg55^dHap`JQ~SDZwKD@e@vIJf^{be3e&es4M(p{r19TbTjRgNS=(Q6o99Cd_a?Y z_NV;cd~eK;d2({jmz{SfCuXK5b+77gr3Z*usT^5S_UvFUgsdI~q7%o=8i;!);`x)u zA;wk$HF8Adv7;DlZ*MOE09Dm4)YJIf83+FS`g`!- zf7w3}0SY)E5d{`uUVV*bz-aM=adV01Fo3!%Dz1*SoNo)eg0i!xfyFN#Up*o-rZ*y3fFA_1Cc8(s=x5~hW^ z1g9ag2HS88Ca0Wk6}|>usuEfV-E+&?3Tt=cA@|*c;vJA{uh;$R-mn!8VIH!-6-TT9 z&B8a;sLF1D;SAMgcp$dd0@xq7-9nfxxEW4+;eXM}>guZ%sA}zLCu(+IW;B{O+K0zF zq2fm=b~wWsjUGtGseo?GS4JCn7P!QcgQ_G1ZPY%rN5(&vS9Zj9NSQY>m)E?^v{> zNh>`VVUrH&B-DwS^eJflX_}&jpmu6*xu!~s>a4f6ifgRkojWV7)ZL1OaP4{@ng#D{7YxMU&KCp`lq^POlpZa`8ueJy@uDcjthh`p&Ep9;q^CRT^$AjV+7q6< zx4lF$O^i)bosgm?wW(RneCVmqyRybUu6fNgWs}b1#+EY5ifwFk%6P?=E>=X?9VLlMyhdS4CKh(IX^PIAA~KYx zh-sQ4h_T$_(V8|rF!robV~mD5%y>pxIwD@Gd!rlQSjW$WkUn-?73Aa>D?M^BY#Rh* z+Z2gUKkl=Uz&a!%^@+f}JrWEC4O}6M)i_dK5|a(RWQj1RPEN+No(ANcYBrNOcX6p-_!v|O2u)}KS3|gmGS-3- zO{E7NI@eDw)-p7LYxyp#$`!U~qxo`SjtFMfm2&o_x7>(LC3>bEPBW&dWdjhQbR~`D zluMt`=`oRMuw_C~slH7a7@5b4FGckjIm^b+klQ_*(PA4oV+vlpDcyAFjI3h?A1yzp zS+#D8t)_A-cRe-9ik`}@YBmoF}P{i{0pOt+A zfCDVq#_|WV8X5sZF*@1EX4aIbgjZTw84C8*5|#)-X-Y|}VbMM8v?7t$>7E2aJM^+i zVlYGh$9OSYToBW?z94Qtffe+GS&SUGxz5wezFk^%iPm6+cj-m&@x?FRS=z&wcK7Lf`4HLg@=w`?7agjnuCd z>WSvWMi9&I8?b;ad93Z~`OfZJ@0S^TxrkO-!df#YiZ;t((uFp|9!gz>PwU}`y^bYv zXhVtJu!SkfcD5}>kuZ;uTiw!_7R=N!72%=k-X=AAo}lI}a`a>6B7c13uVkp`_SMVm&rI(X5zgz3`+GBwwJ*s`Iv z9m7B!Cg0K88Pga_G4IqAR-<|zf0+lxt>To7w@hG9g|SpIT(nhOZx`Lj^;mbUwB-7_ z$pn#durV|2C?lIR8CAA;ncZw3@Vp9&i}nM9eqJ)ScaUYywyzM&?QW}0uy2Nz&C5k@ zawGWN=q50q*=?VyE4b^>F0Z?hWo(V+D^nn?*OOUl;a>uAs0>*rmEmwhmr9y)tgx-6 zE%sU;{vw(|3?^-H+xMD~>EX_ByTR6hvWo9R9!iS`#EZ_Slm>NAiZf_KMBViF>Z)t#cjh*Y!1k)+n&^^_11k;eca9D>}(M4ZVD zj~%j%#Tg)u0frtn69W2$JS?7@xEid19OogItpQChgr3ld8j)q-(qvNyM$M*)pdXZy z8eO99mEd?82+rlvvH=<(9T5HG+zbAm4B8;^t)c+=ks`5&^xYu11tGn74R>@R^cf)! zCgGI$oS)f8X}zBN`Qk7-A+d#G4CY+DnV+I**cA?pnHXcZ{M5ez+!e_LC|n{n3Q6qr zL2Ti|<^^6H{-49a7+jc%kJuQ;!O0&|-W^5-<#FKT^&R`CX zXEG3IexGq97`Q}e5oV@;flFdOB{RC_ON6M2;-rRp-DXBs9ML98*rxe8Pod0cHp1qA zF-A3_p}{>B;93C_G5GUYusfa#;kXBEUMrZJ>V?9isTPh$Q zwj;0}O?814TwW;#;-eor6~gSFp^)J$M7)?U}LsxpKhdq4qu=O>bx@lsJ%ichXPt=R;Yn>Mzwv0(qUOHKI(dW z=vtjsrY5YyS}KWZs=1`16o%lSsb=n-n3UXUVqza_qG~m&DhRSFFX1Lp0@E6nEKLbQ zMLZxm&S7-wh?P=BTh1dq*ux)Ur?Aq)Hso6ZjwP~EX$Fy@E;QoE zdBS^ktFLk3RH13OimN$&l$;voCgLeFwxB3-q`ST=*#2p~(kr?YYT4pzzUnKQ@#~fC z(Y=l)Z>EP{U856Dl{#_A6!fD}R zgr-HtOZ6!KC0tcPRp+ju$_mV(0B`|gE&?NrIhG@?g{9~gV6W$cnrk%Q{4uEr57 zLawf|4i6y$(}g7ya%N2Q=q%|0WX{}O)B5hyBJG|Wf}5IeXI*Wvk#B(B+;=H240i3F zeytBbub&375I-oPo-OtAp!NR$VDmvxNygj2DpXP`*7s6@Xu_@S)rP)6J0 z{%yQIh{yI&0;{na`|ob;W1Es8{h};Gu<=QeXa2&R89IVcA?e5P ztW|WCkbVPNKB5VKRdynT>vnMC$?hx2QcWZRIIdw^SgRNPu{a#93d1K0L#?#pRP&&a zOcHOoqMP7qEZoste)fS)^6-GFD-cs#^s23cO0N-@rb#AhO0LGyN!ykQozS5VZS=>h z7;M<77NlV+Gjp*s`(&j)EZ`0<>jlVa`JAp&t}@lJ>~wP`f?B1T0w|D-1!``@U1M!c zu2rs{DzFuaEu4%6qE>+a>yo)}T+~cF+S&v%+{f6OJI3xc$*dq#MO)NGKrX{=k>gBU zGITL;RLMjN3uFsVw0%a`UD&6i&9Jc5FAaA|D)+52iU{ID?kJbXdNpq;&T=g~FG~~g zExYs)Q`>~L#}X&#K!tBo&IZ6XXuolmaAeEkcCi-=b!(n74zH683Z>&5NNbVUGrRK^ ze)A_xbq+16wWM=wArqF^R5mMVtCka-wzI`jGCUW>u@17%TJ!_v$yZ(UloEyGS@7!u zw3%}7S;MT~MXTS%Y5*_v`auJks6!x{!%ZM9=>6^|S9C>V9!HNWDGM(!)TsQHYN=3e z`Ka^Bwr>y1a!U{YacB4GVZC%NzwKu7vQ@j60X3+31y~W|U~1SOO*Wkg1vPEgb{9`Z z`Mz=vv+MYQwFE(RGcT?hr7F(h3uZ@dacVOdrchX0B3UN`k`6Rp6Xas2Zdh4QKRbn< zU~pBqOa(6rcT*8JNp8oO=fdeuU}GEuCed4v=eAO`V#l|9d#mBTUDZm&Za((aMl)qs zK^_!}-kR(#Oxb4N24|mXFb}I@# zS1Vu|t;h`jHaSFTRM_+BW+~7fG!{uNO%TnkE`tSYgFt8%oxC@E&$nBvv-#Z);xPaSBX;KWhOKcqkn>g8wjsb84N7t!O{^yhiwEPPk>lG+kAVOX47%k*j8r zwyCl>Q5(qJqOTmaZBi5C!Q$^0gSfxBxPg74)<_focH?rVpN_(Wj#q8BS9CT(H18Vu90ng5UW{jglzHISQ>{`o9l(As+@t6jfkd*mOJk`D&stH6Pug5 z$fOSP$N0j#M@esj1w8ta_=;OWgh?~Mw~+@{`Y_&G>yY63lwr>`49F7aR2FGSRJO94 z$-_Ie??^YQgS-Udg(nkvk)zZ?q(n6-S0oy+DI$3Wy*OianDA;$NthgxM|y*{PbqjPn+-!^A6|a zzvc#qkBAQZIR`=5qL(sF^6~NKyuN2IZ;R^919h`&8uNFU^;>lku(|c5t}c0SZ;FkG7-!&N@#LX z!|;ktoi&|M@wo-dnJzwGVVt>hCLcd|`o!V!v<8c)LYnB9dX;L{s#7$kU>LK-@c2Rm z2$CU_Xz>EGj|g296&^NSB5@^vjP44-8{w{Tgo6IfZC9vVEJj)d!kyxQ6|^ zGv&}v6q-@F2Ki4+mzF1AXyaqM zjjB`pQ1j`xigiCXcb%?M^-tc3mjG2^h8bOC!9~De5?tz&0}oj)!3Y)fWf*6Yu}~Xr zf^i0#4(CY>M12rj=NWDYG|`(Bt%(MlZCXr+opoeP2gMdk^adH1Cb1;{n;6-tCqx^M z;l>;o-8u3@5tR%PpL(9z1(c$0krKgWh7s_ZUslWp7;9j0&XQ7Yd`KA;Uok`AXrqdV_V<#K|`DxMg96s$%gofIod zQ>irV6x6#!<&?0y0u!uMzyJVIKgT}nfCE?~3l6l*O5n`1TEi=?r&vos zjkH{a!!^^{u)8il4OSCv35Ap$OxV?cTNbk_igS*k=CFk>Q7U4x4!5oDXcbcNGC9dl zMEh(A&pq9{4L%6_o7X=4^plRie4CvwK&5(lCSM6b1r{X5_art8_5v0<>19drB91}g^ z63}zYG^opOiX1v+qKiKIOpi@OC=f+@1{Tb3$F;)I=?GmE>t4~zx`(Q%Cbzpv#kQ1e zOfM~4F22ql^{i0Y-c;?l{W1(yx>rpM>{~0_dsb>c&7BA05rd#%0`UG|t*9rferNDth2o2!%FRFoQ& zkbpf0u;0~1qEKO*4aSeZ3nz3~!-uC?Ug2wqr1)hfFK%=H%^Q(c2m73pZtkxn;z98Bk_cTmt_1jFkx!99?$LM4%NbV)6R^{oh`DDze0b+SN2WBm{JV zD&0bG$2HbzjaOgm8sWlLK?_Q1Y#Bt`2G`cXx4G?ua%s5f7| z>V?G0oUMjK!N+XPa)P-WaNcz+>kQ#pnmNj}C?~BE60ur$0N3sk7&>IlhHBrTPPk47 zl+@{II)5q{7JDeV*wxEFQo9zy!UzX`*{(g!5?=7uc(8yita-*mo*c!Iq$Po9di?X4 z(auq%>ZObr1Kd%_kQAg6p-g2gOP`45agmut>3B2$TN6j>2fI1FDP#W=nkE6LqGJg1 zb`KKB(-^qGQeLfrlVg_GfOtU(w$f8@JEGZIm_rcK@|K^{MPis2ezGiU&>jrwvU9GOQ)h*gLjii}=}@`y1#^3jhD zJ=eLO*N*TZ#*-;ZaSW#p1jq9oAZWDo)YO%=8K z;$8XEIjxbAKA|L@bg)#QfEHG;1g((r4w?;xR!kjRa*_PjbTp0y5KSGi1vtKU(R*aH zXocjcJGl5L#pCdJCZ4E*a1Ci;Eo8;||Jq8My zK8|3+z1DG!J1Cm0cL+PF<&uRO9PDQ0y7@h?tHi?HDDTqBrOK{S!z#=t$EM5QovMiP zrd2Z|cglS0TY9@%#G(odzVRI$=zKFQwz`?RjuY^Ee%h^T{xHrybfr*)yVL&SYoBpN z<&SA{9ed%F!Uqekq{Z+%3MK53FF78D2nw+dJ)^_Y>+BxOXJQ_k_{4ZrF_PeM+4R-8 zcDv<`SMAOe6z{T`9Np@U<<0q}a#()p z-L$nAs=4eXwx#-Iw{SZ!;a##b&zxrOlvmAo=IUAZJLd-uCAwN|TsZgq#3pY}oPY*! z803q=pp-|IXw|853I1RNTIbP3saK-uwVlU;aVhYVV;r6LuxPm2kR#SusZE`|j-_^G z@Oa1h@aS>ug}l|Pb<&hvq2b^aXvR9T1@(0H2xRD%*S^*`kv?YFnlNS9Lq7Cz|0Z5t znUdKTT;{s3-Q3E>aw=Dzrn%t)ZmL6_R9PpCmr>a5XxkFp;y&hh$sL+||0>{VPqvi3 z2{{H&8_j;xny!M__Whp!UB7d+O0kpOgNmYz+bo+%QYM|> z-b|tpqtM^Etzs8Bjmd761_ll`-cq=Q=ZXccSgCh5x#t`r6|6%eq*Y2r|l`(TgG$Ib}xzX@Sf$~kz zkZztDPVo{h(mvtv_`|W*Nb)8wwkpr!Fb_m>ETXI-k>o)GNl*0d!2|C|{E4#*hH=iY_VdII9ONh$QjCK4~O959_WEdnRY)2x94 zqeo40M#NHP14obqKhP3Guo6MA5>-zTWd;Rd&en2ml%7N*U@!)Ej7b@DE(V=Io3mKK3jw0+2bz%~}AiaFpr)%1)jJO96Lp z0T(b7O347ps3sgw!V)1sT&@!@55>@D9yW{BL=bugG85(DM*?y2+9Mxj#}8$pedclUF0!DO=NjbU5`lyR zWuy`R7b*1mNF7d#v_4A%apn|;N4$1a5d5it^ZRLe2@k|5{PJ}IWO9`Z;Ea~}9==17Yk$N@%-gc3U{GG{FX z{fbR;1lG0$BTR8KIdikV!=LtB!+wjzUC(=~a~+msPDpKmw+ z5nv~bvk8wd`%cs*hmt7C4EfmXD9LR&yHj13F59LkIq9k$#n3d*Z3gRz?~^_qGG`?6 zK1Yr*PwpW5vob#qiQR8H}f^!W4`Zf0bPq%jwXUHjDaOc5bku%u{D z6h0CaL-J77{hBNm4UB|X#@m(%EwRQbjRH+K?6IaNiS%_;v)1Z0yo zg|h2Vl~Zbx`)u?^U6t%!Rp55DVA(7tqtaHj5oUq27L`;EAvSEr$_;a)?!eM%TJ`AI z@tIx+ORvw1eLOioA`>A)VweMPCtT1g`{}C^}74-_Tj^@+!&Y@f@@m$?h zlPXR?HE_i;u>#wbUTMwt=EntLVH@%lGx^nD|24<-C+HF-$l~e1`nHt+cF-mk7FjFR z2bnKoU$bK~RR|S#MrHFQSU@*BHgbovEv$}JO?JFcHf49EiH5ahUDoZG^j1NYC4+Nj zqc$AXQAx*TXNzO2WQeWIWetdyL5kEWuNG@j z4=}lQdE1me!?tU83*|b`dDGS+UraLDc5ZWI73LOEgQjl7carjU(kc`kjdGig4DB?u zVgF7Hd%dqGOJ@D^VoK}_Tp6d%!0@0Mv)XPhonj0P-312=Zj zw|(7rx-izx*@HC z&PUNJHn$ja7MF{G1B{zuGltS)X|#)lF{;qGWJ9!cwRGwKZYYYYa*L}19Fr4AA%$4& zSlyD89I=sC!C6>ek~syXjseeLsj`h5EqFzTg)fbPk`)&mLbN{OT1yyPx!_9=*?2c( zl3&<9FL@xlwt6>NYd!0eb2yZF_#&s|#Tf7fNrGp(R~aChPlLFXUHOk-nTgV9Z`oxe z(a~YoHF z4_J|Pni-gM99W#oC7e|naDnUl{tXY>nf=<>(G1z2<+&KWLy79yoKYB%lR)Aye&+3|f;B`eHhHV?J4j~yh-raK|0p`6QMn@}l%xM>eS)or zM4Apqx~Y8{1&)oeSK1dDciv`NMxCyvB?Fn08K<+(i~Scd<}h@P)T+m^lx{%>#yI-? zZH|w6isjB7NjH!6c$`l-X9<_+wwVdoWk}O`g6JtNXKiSwaD^LBtj9W^&6;YfHmy12 zTfvsC2{K0BI!@_t6N0+Iu zk+ns;7O#4oiS=fS+NLi&I<50qrRWK8TN-uSbUD|z|2KH6`*3PqA${rKol1gE9PIE8 z>`VnlGGQz)m*IR!?|Wzjh6{RRz7~_`A$zd0vVNhZR4p5|yNjp)x5b5YS3TD` z{n)@WT*KF|Sb8;ArTLG;+1{{Dg2^10xrHRA(Jb9rtK%804clhem&r2%pCHVUul2QV z9H4XD$h9ZBLr+hgcU?dI$kBEpnFKPSm5~SJK_g<-X@=&koYt>AkPbVGx_mu6Q^9Td zL&N-}Q@YH|@N=PUCL0IJuz07ZZ>S0UR2>}4|K^-z?>n0DJkQnl&1IT}2>lBoxK{jI z?O2n~v-8hs7NyY)wn1a2#v>3$)0y=4o#P$2;XTq#Jkr@oE@zyKZ2S{*+<88I5|LcG zFN(TBeW7s})hUvcBN`=|{MBbn))kY=YrUhdoM8Qn%Xb}$=b5*Ieawel+09&*UK-p3 zoQKZ*C;8Xe^J34h&9kF$+f~TikQr&q18F?cAJwC9#Ez z-Fq|(MH9vE$>AqN%WWfJq}tKfGm;m}9w9xFT6nU^Vbsc21Un2)$KEfiTdr|1-o^Xj&SJ_xJY@XzaTIXT0&PKeefBrY6vMoA1=w%-1|76i!@nF>hcN4Hmpnhn_ zbfhWxEFW$pub$sK9V7o;_m8}}$=>(l!Qc|U2-K%7u^raK-03ysBLy|5vIyk3>ixDhZB(b5x2}BxCko1taX40dLfAA<0#Sx91 zFLX#^VY6mS$vk}27(@j#l+2kh|A*Q1g)1jWGfoIya>dJ4nKMM;98~xM#|$1}KIshl zG~^wl1!X}Mb~WqGU|el31BQ&8*s>Ze zhHCWbCzBwSv||^9jhi4GrazVph{!h`(v@9QJDPrm$t0{iXb_n$$4{0>f$qhJ z;CTJ{w;y584WS%w4y8b0|AiObPyz}Y9QXl-OHriMe+y>lfQZ$Juw04*>Bb_9Je(Mu zZ%J)fL5vnvM1@Ha+Spu=2;J7&j>5UrVj&5Mcia*-W@u3yuZ0AOA|tus(MnpR#1kTY zwB(3WG125@SB!z8iX%mJ5=%}&Vw8xGg#gt`RFQS*C6{q(l?F7fkVeohpnqKk{3XNl8ayBP0mU()Xqnlac2{E2>;+bixuBuuz zuDQoF8?Me0givKR!% z-33efhEg2;Qs--PKQ26>ybu!k5<*BdOhmoLkwof~9Z6N&l?7{QaKvbWDcPoTx;An! zW@2d&aJ@DHXPiGpX3H&>NruTmY3hkp&wKLO=c0guBj{R)C5kAbccI28S;ssYXSL3;E*JmIyh2>8cAXiJsr>km2*zi_r|v z;+yw!`?@%<|G%zxcp{D=v-_Z~4ATyA#79!>;oeNrk#X=*-kQho9%lq{?jw_Im8Nu_ zIp-lC=gj5XU)HJ7V`_5Ev(G=1&5db87foo=NT1)9IAf*5e*5mL|8!tc?=$JEpqY9Z zsGH)ywNs#eJ&Um+q7-K-+u7`=wzVm6RBdX*+^}FgxGl_X6g${%Eay3Y-U( z#Vie;jzb|N*S5&TxW?V9a*>ln3vFNpd z3sa%q|CW+dmFsdTA34nr_G3A@k3iYXO4t7n+SF+OI(~{BFc!*JK8K2Tzp$lr~*ex zL=k#&%-L`_K_|%A5s!Iv2H2FLDu;%We18li`j)o7LiQyr=woDR45$m6jY&>8a|I=l z|IxKf5Re#|++-&`xyM}u!iTH?ASsb2L0eJto~m4BD~G5Y4nQQ9$Z1#SI*6hV_7ZU| z1e`B_dCU>caH=npzGF>s#i^-RjZoJ|H@G} zb6!;gvBD}=ze?9%(@d+u)`~1|y0wkl)X*(IcieM+(>W6a%6nb;R*9)M322<-W4mZJ zDy0>;2ew0ER)C#2}+e(QWXW9@g6_PHkr~Cd<^6;PUHDSp5kV{12ZpBT?XKFiV&OY zr7}{dyH(7A7j-f0VFgs^-N$8Da$6QqR=G7^@@}rESH>?{-?l(r70>@mf|q;?TZWQtd!3LTjd#PEkgaIwsYQciiIxella74e}^=G2Br~xPdb^ zg=-URJG%MwLV88#i!Ns8C`%c=>8*1Kt(?RxL$%9Z26K16Yr-;b*_hhy>Y5+)%r}3t zKHwxPvlq-tY0me~P1G~&{?!F~)*F`O)UYX9LTD)lT-olWjXe3xU`2m8pF7(MuI~6@ zq0lkI3X=(C=g1oauYne}kX}?ety!)>LwtTIxuY(H@|2sJ)G05=HAGF0LCG&_@;FD; zP<{<#GsD=iRwE|`xQbg#5^S)rh1!6Da8M&Kl-dh6afcm1UYALV|F^@CljquO9$kIf zQ%+brqgkysi?y5Owl242BqHugRqbLHZ&cN6!)QPEUC<hV0bQ`Z^Qm16bfX8qmGgsyUCMXM0aQtV*Yt`f(bAn*&` z+f>zR+wG5Cau49#bN_m`MdxJ~wrC>I9`omS^i^LKf^9RB{}g>@czm@_6|;DZH*aL5 zWSh1nUerGr$ONc|ft*G%xB(}Qp)nYfd=+*^(pN^0*FZk8c^PJXXW&1vmNG%ndavht zp+-^j&}zIVDBrLbyaj8pKs2P07aG+*pS3iFlonNk8Z1=`-#|bgH%3LFKejf6pj0+R zw?MD*NJ}S5u+SMq_6ylZY{wQ`RCh&x)i1-bMY95BZpC#PR0U&WD*W?X?00WR7I^Mw zfZMfIz0rZ5vqJAdOfz(U__sm&!EOq|e+gI|BDKxag%m}x(IrVcX5=K2swc@ zIFVT**EU)6QhdY=CDwAxXmg0fKSsw|zjuUr0XFAQTGeJePX5 zH5sPIg|n7mW7CCA2Zl^%hv4&crF1hIc!A%?Hy^@WHvosSvvq&db<;7316c&sws%yP za6f=$l?aI9(j=(IfNJ+b^tX0p){rBT9_0mA($cp(lc}kt?JZM+G&DL%0f4xP1~QVL-`?XtHp*Xi#?$a!`SIVuDA{ z_ch}43lp^$hJl1OXASCDgr`xBNO*FT!WYii|Bd3{40ec67f6MBpfy^kH5eJwj^)Zwvf|7kr%mb;pLcQR*`7t zegjfV`xkcufo_xNiI-SqSm#8PRe1OH5D2(BWp$7z35j^OZ%vj?j?j{hmsp0-H)UXJ z-RFJd$d*S5i@V8iJIQgs`J0w@Q=R4-GFg|p=wukz6v%gm!&pEHFjq6Z*&uDwk zc$Hrfm+$anEH)2bsg2tRdpXB+Jhe4cXcTFgYc}}`jj>WpCtM07QqU({a7mXOc!sSw zd3mXPat2L>M}GR{b*33+hlx%0R-lWS|DX~%kqlZcj~QKImpG33h`!@y?h=s#36ecD ziX%Ci^rm-z*qMa4nyh)7uX&1)M}C}8bhc@MZMlj=35>nzn>}fxHpWmwS&jxqlp!;E z1jbJgk)sm!XkKwrxqzgdWOAWGG-+{a)_I*)d5uaKG%)va+317Z$$NYhbqWProR(u| z6FegIp4-52WcZdfMP#B>oc2MXC3|rm>}t)9}1$MGm`k$q5H*x{?=QtNt%uKnQb(a zFS?4cD5Kbeigp=cw`fPJCvmyC|6xWMq&&qGLu#Hy>QK*Fr8!8R6J=sNSe4!=k5A(c zOk<_n_&z_FrRzX^Ja-eZii^i*n{%2_P?&qqP>VY;rC=drrqBy)N|)dYK6iO&!v;p8 zSDb;#6M1-0G5M%-)u-);nD@$i*X~WF!^q>8jGo#o4!@8s*0*3sH3-{t0x#etaz(Ix||Is3$RFx zvv`L|`jlePN6l!R;CZansg+-;QDVWfJ{zT>rVQc;QX>A-H-sDy}`54c$7fEETmj zg*Hwnbmy8=+Lt`dLaB6R|FG!>wgGvXc9*aE8mRxeufOE4{Hlm!TYm)$sd0<2b9=BJ z!dC0`unAbA2I;kYYF@EPMSL@=c%}rRN3A$YQc;0n*2<$wD_gHxj6F98sXDSk0i*!u zvL_=GxWKU!b#y0ngw+Warj?b!7JH=ea!s>7^iY+;M}{Fwt(jLw+t;EkT5FKNwD>c% zX!Ae8hJA7QP_QdO9Py#-M|XA0k?UfwV|#Caimzr1z0s?-h3KG)!**q7cB^(?s5!9? zt8NQRm>ODbhgWqB_K?X7n&Df#CHaPfJDajA8NEhH*63q8hzxx|ghzNkyh%n2Rbxfk zbFqMwuWG8XY6VKx|FSg0zxwo0Yq`6bLO`es3{5Hy?Mb@O2yEb^rB{gyy+=qbMy$6t zYTz)0GS+F_W0!Oijx8FE;ix6=c?|K$yJ+dVzdKxFh@)D&5em~B@&&#FMz{UPyw6L- zfm*iGJH3XuwrneI??G+C1Ok@HOtB=eBFep;DY2l)JNWT+(ekN!8+ku`BPmL7?gqH> zsyrS1gKM0uI<^h!K$Vk%zi|u?D)zFidZKtlJ?2Vdi{*ll8@qr7$S^obYw5Bh7b~EE z8jpb%6?_X9%nsb@89;bQF?>LTgc*&zjUxOEY4NnLW;IDmxwlypn{|cQN{eSYYvjtU za=?x?tP61f|GbtQqqOmdXJpuL<^#KD}!346uO>%><~zU5mwq8P@%(zk+pX@MJOUree2>TmJ+QMAR*z2Fh8 zLaY6&xN<`a+2E`wg=4cOla9r)KdJ?Sb-0P^3zPw3%W$5U6dSGjipwcBx(k<=M98J4D)(cb1W32pd zfdEmkoOl^bbUV7TVzaf)Op0jCWMllF4hv4=`^!m9%zC!W%JkGx9nw%O)m4pEQ-IZ3 zozeq*LV|2=Wwrf1yYKygKmP%XCJGG`dN1biF1vQ1ycf6F6#-c*j zSjLvvog0Hvw2qqh7i9qra9n#&N}WbS3NM|~319*MK-;xl+XO({jIhzt5CFFA+q6v$ zEnN%JFw(Zo2;+d<<3Q5GT?`vd0L7L+r$N(v{CvID2Q`efJgv_Ptekwfr+!P+^EQBm z`$is3RclL$GJ6V6I=665vVJj5+*QvmU{p)Ak&hmY{5G#!Y-G4l;R9KFa^O4+gQEQ zz0KqsZQBOG+qAvpPmbiZ5Ykqz(o#;+N517;J^;@Rm-su~>{&@BywB8!Qf8ptWyrW@ zI8$LPLPkBCmgj*GS=gHD!C=G7&M*!IJ{W^3cth;x*-Yy8_TX?8;a{uj*!&z|tXz1U|94bG zdPkjb-MXi@j#jtvw?u@(|D{tbN?0A}MII=oy;h8v49|50rvfSy^Z8i4%W?`0E#XI0if=|4FKTo@f42UIEWO#8TNfQkdgse<4z_`m7 zsUf6Bs$;~+0ty@mj9~F!)vW=$MhzPi>mzW)| zW)ApH{jE3w5Wg}A!6cMRit}N;vCK=!5>NtBkG=A^u#Y?WE=H?8IGjUMHtEwopQ9QcHYO19s4KPyZrV~)Y5|zt~I!-UR zP6NmM8f>*uPqnU8#vohM%vu=I=%dUcvNHaS>tFxoe6v}MO z%}Ak)O@rS2tX3f+3z|-%@61zWL|x7@sGPSxvPw@J{i3Qf00uh_dI^q^QcJ1(x9#Wp zQqa@6<_5KHx=F=bRK8N>J1+yGJ`>i0Wc{ej|B!5rRR%G2UEnleO%2v@+48u}SSgMO zB8M0@Ux9PaJ5QrG+olzc9_+5=_FGKB)mHUwzjbX~a`82oJBQsJIQNL*17p8~Q>+(6 zb@7{bz$cD3-b9GwCOZ&Ch-9~2;b})0m4r2Hvcup*q_D*zhS%Xj;#=fT;~J8vulpZM z?r>z1V=5(4Rg{U}5@pP32cC7xS?3-9f$>C3h`|zX@UI(J%G#EE(IrxZMl%L6L1j8~ znaWh=ARVzF;UJhR0CedxgToAF!eqhc38E={>Qha;))Sm?jZlzM+s2S*wzK7DUyDK> z+otEXxXEsJay!9cgg8VZHl~QiQQQ%Y|F}dU(uo?ba-iTsSh$iDq9YBg#ROy3!3cJ- zadsoj-hefRV`c7hokL?9J*S6aNeeio+Y9ejm%7%?k#(=bV_RZ}nAsIiKJmhx^aQCp z7Qm}_zN_8#!cs9m&M0{p10*B=AQeI`vUb(8UM4xDz1yi_L}?q|!6gnj|AG=UOn3WLuW$efWvS7jXY`!tM)$^Q$q`$3be$dZ zxR%z?v5$!qpGBir$sZn)58?x!q|jDMLOgPNCq-BoxF^X`j`VxoV<{%F$01Zk=#y>0 zAtq$H6jFYId`A6FQI)E`^}Ujn8@k{A$Z-vE0MjPG+~o?1VWnK&tV)5pYE`2F&CxW| ztQ|b-;?61=eU|WqR>K-Cu_;VNY%_*bFhx1#peP3E)tuYhE9y)Xtv zO*d1IUR0w%+-O(&b~+(^{}iRR8)R?;Dcnl|5|V64XIb>KB9_)vU)PIdev(p#O|?{x z@}dt?ucyiEEpKBNql$Vs7RgI$_dOKDDHop9)b(!Cy}1O&I|Vw=0o2#NySndX*Lv1s z?$@j)C`M==LeN%BQVY|?0yVKI69m%Lt}zq^P{#?ObcPp)HWe&+27A7U(XGD4_~$b> zYe9~XsURT!o0>#|tAYIN#0>G3s`A@U8q>FN3z$M^zxcS-pk@zs7(^5v>Z~*rSy@JY zmP8kkh1gPy9?Bi+i=<508BCeV%)|0X`Iu2v7B9Ny1M?z>%e&RQVvZ z56PmpPF=ZmA0A<7!lb4w9<5FpxlEZT2oY#%W+4L!6G09@32Iln+Am-X6Cp?1yh$#{ zqUCmvDF+BBa86m!QnKjS*hXYExpYy6Pi?up;oGiGRlH8jb%zQb;Nmv7zD^FA5qVPQ z?hcwaM)-{LD?x?JGrs|TqNpVvafxG6r+Wr(4n65TKa-cxr$o!SR+;EcB-$nWQw5|6 z(9F)r>588<|0Y6Y-fT0Wff5X;2r1G;05I%e2hygj;v#qu61RNlq%lA^Fjy;nS~{P= z(4YU<;f+?ax~?mv$|huepHb4f>tMTZl}qZ+0mH?l7sb@x~NYo-b0BSS8*!G<)te_?)q%AFj zn{m_I{~4l?FWoBW0+~n_nV~;jS~VNo7!(4(wvuqp)T9>Lx8FLX0n`o-gEunMxy(zz z5n{k3azFYnY*P##lN`u74H?j}ZHt{h zAU88Qmc=U;+b|Y&^MYM)yeEUa$=jQ_0HYtYEguv@A>20O&^+IHGl>D8ENiFEix-%( zz49O{&9bbdOE$$Z6Z;Cf%AyRA@T>yNyxzJyg8H4=d#c^z4%{0bjte<9!x!NDkCN-N zS7J09S+r|X8E_bexm&o5UIATG>8peqPCK`Yinu@(cd12_duNRxUp4J=y3L>#*E z|3kmB0+=MO}P7Df*|N0UA$}z(n-J6zeBx>lwufLB3n3a63U1Ou-ezn~ox*I)FhLoIx5? zvSooS*@`3}9KvusyLw}eA~eDy>^Bh@#~|D?feRj`XdVlhtb|B5dZZ%JU^Y*y6Vo67 zW;-@eGywnGA&4tOGrYs`BC3sJ!#MPw7dfs!>oXeBg_(JU;%l#qT$z`7hIZfuJ`@1Z z5H`!$pe}Lhk4K+T8anZc!s?S8|RX*$EqAZPWdteqycYPis6Gcr*pBWL6hujz50@dKlI6w2(2*GIQ1Y%H!Q=0 zq#>?@!Zo}h?V_DPYZyCli8#Z^gKRHuNC&A}hmPb7e*DZpbQ8BjnmRE_Pus-Iy2KV+ z2*HC%z8!Yi@>TWy>Uuw z(KmI33P{lwJ&^@3nKXsCMPnQjl>9J}08VLOzlsPohj7P7I!!b55!IAWq;k#lkTuxU zx;d1yl_^E}xy?n(rm3PI<6#RRT6+B!@*(Dkquz7jh_`m$G49XjwO9%Yrw?9BZn z0HIsBq*=Cr3@Wv0(of1RDCMS%o1rIB8$NgtJTWSG9XU!O(27(Aj?o3L2=}hx8;=^jz6Qb)+Wf)+waP@gvenb)0$eOhvMu|J~G|^;u?tg#~Ci{^G}@ zp9@Kz+rB=?pOiI9-Au~hU|D;J z25Gvhn1xOhOB@*EIT`4~`pQ|U;aQ*+-=Ou`tUQnu|EMFUnAP=-1c+k3WK#jV*w>;3)Bd9@ajKT38TVKtbv%MYYXhGs)i?+2gj>=m?Xkvr@FP< zyA_ML*n+(+xEL*==K&Dp7$2rIMR5w70g@UaNa5DIC%I!j6WEmuh{W>q-bo#WZV**# z=+)_pxB^o&&c&WY0bN`WDq0KOID@It9lngr8C7G3)g7jQ?N@!3;#hcE**y)~t+fBL zMPBscg2GsMqDp*94KMQE8|#IlOwV|TIyIh$G7xpMy6VV|GHmDo@A`u->tQS(~^P!?n|IGU|&T% zu`OG(^+p187`(YDbm9tOdEg)V3+Pya!VH&uBh3rm54;5xEDf8!1+=JGjt|a*Ef8UM zRD>9L9#f08UD$*Nty80MI4q9cUAbYiWrwKyhFnmI9zF@9;O1=xh4qp%%Prkf5+GN@ zO`gzAfi)m#xP~Xz$bX&UDrT83%?W3ahG57AvF%la60OvEK*JC-sr5aVnLRf7UY^HUqfz|F<1p+|9D^ch2%%3WNMS-peE``E`m#Tz{2}feTEiI z#+;{y>Q5%JrhY-zLf`~u<*=(1QbFIY#xfPX4_V$YTfXJ2fD3vt5=7`J`TQ8b7Mh|BI)9qLL3?^dk<)A%+R|9~5>9pHwFYN=*#pAF=))tsyDpM{V#0UyR6$+^ zBiRL~2C~4OL1jkl$1dR|sZz8qAzRqU-LxjwmgmjpY>Nb9F7;AXBWTgK>7@=bSTJqh zrmA+XiPnZ67$Ei^g(a$ zsitYxitkd6O#04nY3*P9|DNx1bl_gIV6uL*3b-l4-Q|g~T!7)`w_Z`^%9E{&a9xOP zY!C`XIdIn`qiw92P7QG*u#Lh#hfyf)5|_i1F#AbSQpDQa_=h$}gQ^$twZ}L;u z2CS#`ZO4)q|HpQ%vU2fP_w;8WuS$C-R_*DJdMO@qYoBP~miq$gA6hR5yk{SPXqAdn zu2F*)8AO<{lEHHe8A6OGapJ>@78h2N_Uz%oRi{odWA;%K$&!>#QYwb77|NC|TcShv zawg51HYrUguwV+hDRBV3$syDWQ7J`@Wlex&>$&fU+(3`l7Ct|xYd7ib)R%Tt;VjeSn2#=l0XS668n;Q*g zI)4r-|BQHgRl(rjzKzP&?OeBly<$&;4P6=a>CmpL0+oF`cUG@d>$>&bRCn>^Ice7( z{+DTB?ANysDL;O7YxeJ7%ddYw|NhbDySJJx0|qFdf$u$6pn}LSm>_)(KIq_T?H!1q zg51HEp@tVi$jv(OfLM-*>X=jFI^>LljyBnF!;C4j$pOhi$We4ra5vUyqmCG9#F13A zjAW9KD5=EKO)$xHq>@WU(t%DqH6c|HSEW%U8A~aZrIt|z^~O{{U4@laT+vh3Xk>+k z09rU~(Pms|Iw6-}h0zI^op;{lR$FupR;LCoAZA4vgA&?U8IEzVg(u{dVo4l?-S9`F z|B-$1R%EDw256vMU{T6*jC$9eAfmX2PBqsNLm6$C-KLvZd%$RfmB1bJ*?_>M!*&etus^l`W$u?s>N z;kD+9yB;monn!K8;YMp=NF%bdYp&O%80@5@%DAJyIRgA|MHy{$kw-xeN#sgQQW6ux z57TtP0Rr$eOqW~EiY1pAXZdAOF@PL$R$F!T)o!_c^{1L?RZ*#%u1dTzo)*w70-i4K zNtatO+q`L^Y8aZPqKkTW+g*yNMk!}BNa|>&nvUjaFO3FRous-@Bi%BW7}9ki|DDJZ zilGCEZS17f%Cd|&^pe=$G};8~j5J!OTi|x8Cc9yJ$$I8=-+!AuExY7Kxb3&S8t!em z?^@e9;pm>b__>Y4QaRe36d+&8vBn+?@1<2#g`E5tBa`VW%)1N57S3!v#;2cYVc%y3FFbAi z%uoyZGtlFwoTw$XW>P6$O{;3@WVYpbGG@e0EwzJ4|B`h~DQ4Y&IQ;uBfdAtkun0&% zY-GfM3Or!uh$Wt5p@%)9Ko;OASiwuB#$Dnn+~F37xDSHMaUvXHL z<7kBninkPx0q0^+c}m8Rr#zu7Z%kbI4zl(FJq&aP3(}(=Uup&c?-^!$-FxHC+{l7E zhEIHjDqj_pH9<2V3RdrX(;9COni*K8RrFg0{o;d&S-5C{`{D<_9SR6g#6vP5Ahi7~GhX={d@WMOqON5dPgM29}W z0~vf2#MS|E6w66Sb8G~=9X+CmCOP7glBmR%l)x}eL;OmA<;OerN!wja`GMn`iYi*^2IlAx~Cm`W+pvy3TQ&Zpg{f7 z1u;OhbZW_GA?sWqGSkk#A@Kh!x*h-Dz(wCkzr6O&{ zFgB;IcGVJ8I6aTCOj*jGo>GLV?4T-L2)S9lvTjISD&uqt$!z`78&J)q4MUQ-S13b7 zIAqlqWMxbpIbzT2`9qkr|#3WtwBu_k}%DT`*L`@(mybvcj%bAqn zX^cwiOs6}U$CdxI$7qP03qbK{QSI>)pl}QXW#Xc6k09tvg_g=j`Y_zS6KLm4)G4=#LR+uPO_r&moIZo`&a+yXZkp4_P@8G6A{BDa*~ z8C>I5In)j^^{Bg5DlC&)x1{BCl~Y|?RaGfZSR4aIrrJd`nlmeel+_};66^8Cdd~N{ z*Q_^kDKZ2mO_ESkN!Gk(W+NNg{}Lexi+DpT__{ISaWTP!<>JM>=+1YNDO;?0MIf%{ zfb2PTv2>i1U^r};IZhO^2OS?*{qR|e4%20*U2$q#j2nr3f%fVYx^;@xQ|DbfOOF|v{ILnp`GUx6C+wSIg z7_eaxbH19RGM5>>@m+I+2kT~V0&C4UI>>$Vd!+qBtegJj@EBx8)&j?3oPZUyg2QBJ zH@kC6hOGzDCOC-~A2OTRtEh(e?9Di?s(OIIjff$Y&=Q|`dZdl0A_Zw=QopP>>!L9@ z5@#T%+HF1R>9GWdTWeguAFjot-NY1V>jB+ zp0>1`9I6AYGGE*cD_UoiW^SulzT##xd^0w;SG;*vkl5xl-O5%Zz5CsI_V9&K;F~@h zIur&*@S*!HSidG3!X*Q+W$?-}QUArlmoBKE|8k~1%szaxzK$`ZN9~5%B>B`VURB<< zwHo`t$K&y;ZLY1&<7kDF*eX2vkcA!Wm;ZX$KrK16pA5omNttk{lyH^FRbA%lnvo_C zI?;=6^k+5t+Uv>JbB-C-@Or9v!8Mz3KE^M5f9iYT7614c zD_P{ZnB=j6<6#`EFar&Jh&wPKJefr=}-sXi~2L2yq;mzrJ;Q54J1sa{P zH5&`IU?Qp@Bc@;{HKOj#U<@AOC1N6KNQapL(O5+U7N#2y_LUF*Al41SBrp-zEsP+z z;Q*EzBm7Ser5dN;+n3B)p+TYARgn}5#aSH$Ia%R?fnqMYpJdfv4`A5+trq?v0X>zL z{OKPVq8N!q9_pdo9|9uG5yx?`O@gR{1A^l-*c2j~oXYhfIX)0jQBWYd+;J76CHi5} zJ)(q&pd{iL&^aP_L}EQoB0u^gK7vXnQeq`uBRm$EINjn|S%h;;1$zyn6GG%e9swy5 z8ZJ6VkQ50RG=UIoWJh{r|5)t6|LlN9a->E!Ligzb>KM%T;af|(WJ`urDzIHbveVrC z)fHw;qKOGSb)hs?&oF^uP{to3or!97+A)3_J7(i-yyLnohpr?W(IA5Y;vpY?o(%F{ zKtj+t%3RE`VORQ%A!Z`rY@R;;qgn1_KKA2Us^#v@V_IgVK)PUD5@hJ*%eeVsxD`Y< z)R;;p1i+PIL}JNF!Bk-yCc(^2k*q{9FlJ*q=3}bFVm_ucycA1q(q(4EVQMBSaOOz) z-ZgR~yyY-b*_Wp+mA zA*Ki|oL6Z2;!`q5G$<2|2xeePWKjr%B++MmX3~A)r+w;YeP+V?;iZG!WPmbQPPUV2 zeo<oY;gy07FUU)mdFhvOPO*TBDT-o}Vqt-zsq!Fb2K3Vj zz`&c1WSlYv{}P_$*U;%=e5C)O3ByUF8ouFdJXLQlprD4xiQLO@PM};KXK`&MiH@bO zJ*RV`Xc2~`Sn}wlF6nha?P_!D`8UW zOZ5+B#-uEk;a$)s8m>_*GUp-MlCCr$I2J0oe#oIB!%nSeCdyig{;CDxS_ihFic0Fe zrsyELqotNAr_!jV`fGN+-oKtDr~<5rVrs$`Voa>U4jSu_=0dA#XnP^)@tJ8w1_#8> zLe|uR|D9Ioy~ZK{DuTlXDb*n>xzVH!o++~$)&eBJ%ff8T%4`Bm!4w#P0Zc(H)WXc> z?9Rfh&YC~~48W%;oVNlJt6(L0)fjJrj=6@T4KXdR^kvcT>dZOnh`yufsbh@PtG(tc zzUu3~^6S_ptiT?u?u9488f@C0ZKsl|AHglzPR9St!g)Tdkvc~-tixV9M68mNu@c{C z5*kD-t&vJmG28*<{T-==Y)=8M$wIDwq9;XU5jw@{PI3hU5I_KMZs&UL=hiIF7DLb8 zEaxTwHPmb|+-x|c?#sGZ{~0a!NruvLqnANtiI7=4=xz;(oO_4}nc6=XZY z$j6$R@-{E~P-^$eExfj<^oA<+4&7T0tl8pd+ivRB9xwEE;P>t?)s>f*8BX7#ZxV-Z z|E}-vjBj&Hh&8;1N^z2QVB8g`QCq~~c0lp{zUM>AS>;Y0TA{JVwCn z%}%fZRInVsEC3Kd&aN&2kHgCXt#P5jGpfa?Tyf!QsC{swdjuy0>f!9_;JpRUUezyzDX9cMEEIP=d= zK}{!s&Yr;T8Y(yp2d0cCq8hPsQWhioYrA6OWk?1LEADA!#`4-TK9gnUeQKE6(g!uP ziHLBNq4f-H(>84UvasD2#6Gw;`sGHhj52RG!vIK)E2g3 z_ci8$r&caXMz3^4K6V-RFV(FytqPiQBnbJEZ!@e!w05l^Vh;78hFQV%OFyw`L$)XO zLd)_*&F+9rr|vZCbRFkx&+bGMK(lQx04c=w2OKm~d*~7q@%cD1|AqNfyI7y2B`Rfb zGSVi?8&>0z-LSOqb6CH}V2|+0wPjWnbiEvOIk?Cv09!z$zsI5O{x&#%*AT;KEYtOR zv%_5ntn?8BJHSI7+MykWw|G~}HmBevoAC`(6n;MTG+3vN@y zvMS;2;U+FtlagqwG-`u%hPSk2@8B)~aLzJ+RE|EEs*;CrY~i=w z67=4>_dc&0IP5ihm+Oxc_X_C0MVD1P_^c*a(?#rRRLLwHZfAlhu37k%?q7 zGm)dKnj`m*bMc{=7*of=d#AFI%d;h~SZ?+zb>PP@>`PZ`GI%X9Y^`gK=W`6kVdbT3 ztKlk}H=cP2!FwV6}@g_j)F} z+2Spm6GQ^;f+Mu4`7*;y8f?s}O#&YzJKKKaRihEE4;#&2fN1%J}I8$Dbj{5kj zS1Khl{1>Ad(mqu#x%$M{GsTArcCSm?+pEUQA;*(Qj=g+N+5GJ;a&?a??6EvuzeBB` zbSyQ9ZW?rbOT4g0FU-?RfX_V5Q;nN*s5lOH+=diiVK~W}Y|uOXxwCOF*=8mp{V$T0 zM<;mGlfLPrFQDgLG6b})B!s7dXyB+4SF7qsQ-nIWhV=}!o%j;9|EbcdaF}y z5_a(1hil!#{ndJPBP;S8T2(2Jca0Y=QT_FH9ln@LEwFR^`A1b*H@mU(u0;Q4na>uQ zr}^doeV}T)BYXbbiqyOh0zi0)lI2Z7gJcj2B)Cvv!ZZ&d);TyaVnv4x2WHf`Q3^(n z40)le6^6?dlZPH{LJ5i!%a-WSby8FcP|cb-aK^})Q_rG76dMNZ_)#cPqdS8@Ia>50 z!db?qqN5BBx zDd#-x=1NdFyXYG3n|25U@EmLE;;lE9!jXn1hJLzfsSlZI$|9gf6cI$DViAd?k}4bQ zLX}{;#IT8w(rE>rd@@nT6HipZM;kff(IK>=nyRX+z6#4LCBecizGKuH2byOTgk?e8 ztaR)!l+MU(vXor=>o?AT19LCD9{WqQF#lO=qcRp>tLr_pLhGY6LDob>Pu~jb6S!Zt z`wqG%pOkYr_~s;3(M1uh^2!FY{1Yu&D2>$8GbO!L(@jg6)YDHlWl+>A+jG*?Q!7O^ zLHWv?ue(`zwf)`>8}ZOAM2sc|&quqO|96jE5Df*n?)6i4b(Sr_}fBt|_YlBh-? zfy^<)Vu#9B+YqN!YFlB6luDl?&%vr(u9C!xNp-P2)EBj?VXL#b-cmJFw*<4(OEDoE zt=Tm5Ei5&CS8H=$mF8{KQ1(_;cwtEGg>+0lDSpvn3mKa@V~si9*kev5PF3WHRb|*@ zl1D{(WKlu=xMh%G4tYUDa~;n-UjJ9gwnJb`cNycy#{^VA<^G#o9n14$M{@`P98ki|LFc^I z&_~}7*5m{Yk2nO)zW3#rJ9WHF*<-KW@eEH@wkjB3*#_tW3o26m_S5pIDEq@Uu}*Eq;ckansV#x=4D2n5SmCLKW=T*D=MwBxl3>)%C2G!|D-_hdneJDVm@J zxaI8=DOnyQb^$EtMGtBn(%}wq2t}ck0xIGBtJM>{N_mm8)oTy6TWlh1gkPt61kKwABuG&+ADPrHINWnzDyJJYFB|!$EmH#-TqV}%%HSqE5hBe7Wmi_`pcUkaJ3)0dUVYx^9>9K)sWRViA zfK6@Ev5t9E*8aY!#=%LGkA4KCAR&m(e+|+la?!>i|1^zv;=)hsyqN1emZnImbDy+J znaKLNPr7hpb5vTG2Wy5&)Gc&Y*(narTG>lp?sB3LHNp=uBTg=5=2=>F8ZMx zqQA7FRhTr)@Kt1sXsO@WG{j6%&QxNeNSOW-|*1Qzw9q9lK8O8e5FrEsc7A0v# z-%2!Ay447ECI8+J8^P0!GUA9gwCgRQ7gD&!m9X0~sn)7eOO-Bmr6c+YBnAW1RLIns zjYO-!rfE&0?$Mjq1S$|d>%TkdG=Xh2Eji1%RHiodsjYQtFy5&RU&tbnRi*7#xe7Bv zdUce%Et@1?y9%LM3f!r<+uzCXTlG?((;O;e1e(XUZ?BRZZ6?}I_B|@d%y!BgLVi-HZqatOWz&e*9}e1uYOb8 zUn%=nF$k8RJZ0+&0%IAGR<1HXd%NJUUO1A!wXm0UvyIK+hRk1XtsnUsjxeC_uOODI zo$uV?4_}vu$ihVXrn@|gpm@W2Hguk^*5dEJ7{)P<7BXwRh5g~Uvz7KNkhwYJBM12g zoz^sxL7n6!GnoygcJgVdiP|VnnaXJXuOYL%fTYA$v__U|t?QVI;8`GgC0ut)2;_Y3IiK9A7a=61uW`ZYy@PE4%&L+!$w9yZCbcqr}KRsC*Ti$5b~FsoaNcJJ9J-};|!eu+fb`} zpFiB6z@t9(jQ>;p z)dr3=XR_Y)t!w?&lOzu37On7`7clJ%J;bZ?sYoo z^(uAIlm3s2JY+IjL8jPFmD(Ba>g*U!^(>^f9E3n-FZN^)0m}+kLgUsbjzEy%nF5N| z{wDY&4d3#hYN8MM5Dn&9PWg^Ur~nQonqUKuZ~CZ@<*bk9Y)<=Jki5#k`ydZB3aIBi zFQ>>a{m@SblP=TjOZ?m~^8PL0>$J&&PM=;t?k_K;}XIIUGW5AQTnLm3)(PQ5K;W%P!_9Y4^glK2M?H7 zFb{bV=V)=~Y|!W2EAt3(8Ii6T4KWc%tMh^o>Y!}nj1UrW4a-ii6}XHSiU=E@P{9Ui z%4)|hhRaqu2NV_X6V-7YYNG-D3mA^$a@gz$nMyRu@B-1G_@*UK82|4K{Z1ESaSefF z3%qaq!q07b%d+T=XbkQU`x0*$$?XsN@L>KB2#Jc0n6U*=KqNy_84nQ} zrEwZZ%n_~e5y8>b_AeHutsB4bB~kGSi48cuu0O!TGah6eKXD$>aRK4+wKi!&7GvQu zt`PF^0$(u#CIBiGzyYGN0x-ZTFCf|eu_3uqAp3D66%h{Q3}(r^f&Df_1^TmR_-sPX|=QvzCZHDOaK zJ3!g|5jD#)dTi682rUg%V=i%VEHlt73G!$5P_WigB0bP8-x4khLL*o4FYgjBM{+Nv z69Z4uBkfWp;cqZUvoI;uKoQ_T36uj=^C}zEK@AiuA(RHbAU7TIm=eM9 z5b_rLGWfa@@N_3Fjq^C2iz0zBiL&n^F>(xQaSnVA?l4a;sZ&O26h@uVI}2wL@I`dLeYqV0UlTmTh2lZ4(c~mfev`D9p7h)?s ziBvIvgF(TG*wE-KZ0pX0zk6;5yRkaF%vQ<;) zb6(Yx4kY3zsbJNyGAratF>V%)@-pu+aF&WGhxO-DU@L8;1JE>Cmz7yh7G<0D32+IQ z;;mY-G=M(t?l)8*HK}?)>Ri4b6!<++|gm_gi_dn9oJD~$Lu9NvlM?d z_aXUeB#!o7*Z=i&O&4`pAa$3vbeVQuIdwL!9yD-K z_5;{e1UA5Xvp0MNH$qLea1GaR1EFOL(g!HC2^yE19#`1nppGc_?~YUX(lWH-OXiWAj-y%K-}I8pV`5C}Nl5cr45 zc!62Bb($~`jx!`r?{<G zIE^KEg4_6wr?xw-7XRpYck%dwk85=XS$WCsaZ)8dzN4EB`H-jdtQ7fT`?wynxtuZt zCg3@q<$0dpxt{I$p7A-K_5XRF`I%Sz`JeF_lVw?QadalmD>&UMm;E#rv-mhy5H*E4 zQA45@UfG9%_@cX4nU6U}nYo$Q7@FN!n)5i1t9jZ!0X)ewSZg<(bB&KLr+6u4SEXF&R`0l`?;u%nyBgdsFix3g`j<3*NM-|@Df^~7rI*; zS|A@faf!u?9|9LdqM|W6tTVcSFSVo1+L`y%Uhnn&Qca{+dPwWoOiG%5ruh=nM{<&b zb0AlboA7tZ3Spb~R$ADFd6$t(2b~Lh;0%H1D7mpI84Mshh?<&|O}VlgA^$KN6+&UN zWug!mp$SA;l|_5BN&j1vBlzCjZW3tW!~nG-yz;d(qL=B=x=b#swK}2&*sCd;LB)Ek zHF}vH_^jo1Q)RbDDcG$?x@xgGnc|wRlbdRN*FlU!r=c4YZMB?3IEwk&J9Q7(o;R_h ztgQo6$qJZ8Et-6ZxCQLM6k4ZCQP(whSY-=0h=JItgB$KFuzFm(s$tt|WSc~ElUi*1 zwsE^eCK{}JJDE2cxP_ab)0Mc{n61UTt{+i@Q#ut;n*Y)$^xoPYZ!)+x$DG@dLLif{ zU-+>91-XT=QTtMi&pVl`k}M0hVXEW|JWYs;SWd;ayzP5&2fAp-QcRhxSopihd6|Em zNQ(!Yz_+a^WdC6tc00ipoUF^5!9O~}CEQ0Tym#R`>kdxC5hq_g+&?Lh%p3EsMVwU; z`;p7Dr@fmuR9eNOSsEXBiC_HA3)mCn1VQ3sFNTc10Uf@}chFya2JRckC7TV3C%e=a zi5?##BY72yPHeF`$vcHBOTmzdE3qlcnw$qJy}R~RKmqUpyF{{ zh=1GAe;m{a1Ry(o?L!zkSn}J>z-&?#mt2(Hh;=-MHCZ z=chKq|Lo^Wb<2w$x(^@Ldz#jno*86Xo2OoSc|E8J^M={_>cbHt6LdXRfOT8N};~s<J9JB zU-X%o^h^JGP~S#WfAv|v+nvYchcluVg7)h^|MTA5^&TLI2wdWXO2LCwo+w<{rJyIqN93WGjxfcVrmf~QS)XLaM3u}Y zO;~CW4w2VKD<5nEbik&VT7(0 z3=Shm=-;0oMT`F*@aG?Zvc+b@fe7-T!-5IYrl4&@03jiS6AonIX_HA78AA>|cF{y2 zeyG?+B$CvTiG(F)6jQv=giTGcT$0F)GB)FrGojGv$BH}}rlOB_1y+|}ME~k#WROVW zHDq2(mQ{+7t31gHEV}GsC6!WUrQ}yd7UhXGa=9bUD^;af*qJ|?IbvfXb_iL97&0N} zoC(os3NoOrrRQj+IYi-Z4hAYHZn~wG9u&z9Qb%yb9f90$>piNTb(BuZ-KCIXYAF;{ zlBbV8=N+o)qwQU__bw|w@2&b;Qdbq_GzMc_cn`@SM zB8qDE_+v0GYLiSWI?>qEi`YE7W17kWsV$45gtg_7--0XdxI=bZ7nbNIsqVOFHpQ1a z?Z93OWS#C#p57ee7DA-EYC0L0EY7t$i^XKM zq6__(D;cWh4-g|N3w?CNC zYKjbskP--aMbiP&k6bjPEQ{tM1_tGeKFLW0`M}Fr@-bU)q$3Kg*T*)-FmFUD! z_H>@5H03D)RZmtLwVKof=>WIb&xShiXq|9cC;x=;z=3+yslbfeMk^{!hk6vE9h@jw zjdU+oh!BtEIL02kT1M=-m8nWiszK(e&!cMZq$yQg4;u+d@91@}Fb(WX`&v_V9&wX9 z^}q#wI!d4l^|3`&B|b;G&&tX*t||QI>Sh8)sZ#YuIYQ`Fdt@%HkqM%rC1z76s8Bc> z?|3E@gluQ4yJejfk7}JGTZfrIkzN6*btS14wdsSw(bQ7lB&=Wm8rbGaXRycB;d^Xq z1VzLuu^3=16t@f6$arUaL^9NK4N7h13Ga<|Kxl4*rxn5~kvp{r#r zSfzv|ib7ME$SMU|Gzu0BY8Jla#ca>8DF0sJ+OuKavsHAI>)Z(w@tYO)4s_fCdaV5)mMFKz0`!6(2Geuo-sFhSRCeoQBu~BaWwI zPdrpAqBzAX9&cvxyCYZ7xDhkHQH=@l-X)KgckcbOpZN&e3eH!^{ta|TjU1+DbSuek z?eUxs2iYwO;;g)9p-SD*E8nCQTM}(J#(7xXW}(S z(apG)EDTEu8x!{Yc{``sDtb^gMlsXIbUzR)e44te`#}p&K>F&T;T7lHHZ&?^jte0&c1GXup540svA4D zHtMXh!_7t=H`^#`@1o}+`r6Tx{Tbj$JzTg!-?b~*_4$?a!W|4Dxsy2Hyt+W&`M!JJ z>)h^N&hTKxx5!je4$DNpz0nn)^u|wi=?RCslPM3RimSJ(ypRN+IbU8gnV$R4{_%gc za!jsH@Lgsyd1Mt26_kV-H3tp1u1PBx}96^UD0H_GvOdjNM>qi*b_7-m8 z4YObs;KyVP7lRQfeg8sWd&vg@IB0+Ow|_j?g9;D_{`Y@(7l3-eD5`dRTZVuEvV6?P zcn#=)O!r$YH+?dwS?HBsu7iEY27(;Og+%s(;FoCILV`n9Td>Deg+X#7)Pg7YdhLgD z9jIXN7k#N=gYfhKcsOeK*Modm04)%NRz@8}NI8Y~C=ybCMW=v`)r1CjYY4Y%kT-`C zNQH4VRWUYk*AjIM1cqMtg`60M--k?M*oG>{3C?5=tY<-?_l9xkhLlK$&{u==7gPb4 zgQTX1dzgQHsEfQCaSESG?dTiA`<*pA-@juh#NM1h6? z=8a5Jeq2Y2#bOxfmyTWmdKB1-?;h17O8b2iIMDBjyzI=pJ$aQ!f30;_n6@@N2i!vjSF*%buxP$n}hrP&m^_G*_;g9$>bcmRjI;DsQ*^JD1X-27#NV${- z#FW*bdH+#qiB*@FQkjxsxs4U+m=tM#9cghFDN8B1f?%0;Q>l_<35^d3Vc$cPdTEbs z>6SJbm-#c zLD`q|w41$|ouK(jmiLe*B2XImPlVZ-${CrK$(&mjiAeZ|{PL^a`Yr(HnW`j;dcm|cpZNjWSo zN~RfTrf3?Y(AJ-H%BG=rqZFj0kr}5d$QHejb)D*|<8`M)ny206p?sR8N*btpH*Lz$)VS*cODpUKb+hY3~3DI)c0pRMYu(X^%j3SX3Is{c!d zss@UnFUpYb2&=3lt9rVpwThY+imU9puD(bEd+;2OAPu~_524~sQ97lLy0436tjmb3 z38}1J`kNKltk1flR)-PO8gYeSoZ_0Tq6)Fe$yU_{Xj>Vo*C;UG3KjkNu+cc53S+J? zS*7V}tM1yaD0`v4NNUvJn#N!Y#CII*k!4yYto({*S6Z7``ku*3sXj}v5)r1u8Jt=e zOZ$nf)hV$UtDqZavEm4Z=%}h17p@*_r{qdfB73gHillw&r+=EVCtGtZ8#3oX3j?Sf zH%kHfN~QjKtdknBKMReR>a3UNuqs+DhTyQ)s#Fo%w-c+hwDoyzda?Ebr2kMiV5sO8 z%J7X;TeTkxvYCMnXt||YyR|htq3U|8U~6+@+pDV}fW(m<)`huj$**negsA(ra4WD9 z7?B6NiP1{5Rn@S2`*cgooYm^L+j^~rOA@efRvVivb;?4-`=wV)hmuPQtFpYz+q^RH zywDrHQ!u^MTfNqMy(W;o+Pl5kn*iSXz2FW+A-N9xzz`h45#Rczp6{Qg1M~r3%fPyy8lJ1H;zkvg&Qn? zdzl@3!zxL*sKu=oDPW2F4MKc&Nt?jMd&E|IyrfVJOx(my{KQZk#Zp`gR9wYYe8pIt z#ag_@T+GD@K(?18y6ii~>>HEsyR#`QzwkJ)YOpgxdk1hldHI`-`^&#v$h3ajxO$vm zUgfkrTrU7zoxj@_K^z#-Gs6nJz&_gqjNHiE>Aa9Ez0sQilw8RSaLEgR$(g*#oSexj z`^lgj%DgDbp}Y^CYk=9Y1#U^kd03BU{I3A3#`AkwRQ0-?8oLGiy85e>h>W|nOT%{j z!&~>qP+P!>3k!qM%aFUwi(I+giLPtgwO)Ix0AR}e)}cAa%Ksy50k0g(vdp@r1H*NT z%Ww=kF>J)P`^&*>BEY=Pc>J_J9F9>dwJ52~iM+yGib{)0t7$9EnX9>RS$v3yZMi9Ia&vEfH8KwWdDSmdU}+R_sqsA&CDvj(ro?G z{OZp#4Ypz(#?7H+_os(Cz0GNTn`_hPlC8|frs~R*Wjn$3qjLv+d(2UcPJ=yD8*{C*UnH|ELt*<<-n~wd{yXGrB>5J++@GNFI?1sCV0_c-Z>M%hO}20=w0}-PQf9&0uYl z);!h#m(ymw+{kzW9y;Hk{oHOHeKJf}LM78;#y0 zZP;DG)QtV%(JkMP4cyTz-1nWD#_b#vP}a)r-~Vgb+@B5IE8O4<-XSdv(KY_ydYv{A zF4)`+oXAPcP#t?4?HPqF4vULG6A0fRUf|D7-}PPM!%f^am*QhU1TAjgmJ8!CF5^cf z*A9-_bZgf-PT|&*-ATpK6`sye?a|-;bc9{l2fW(yx!57@+apfmQO?T$9Jab>-20u} zSuW)-uGSHn=b`Q8lJP}kzF>FT3{iEdR-M>pPL>i5;V}q`7w+TWJ(wQB3>%(S9WIBw zZRe)0)gqqiP_E}^J>`>K*+a+TgPyjw3BCO6w$7dCqU}P4!05)7<5n;SyI@ z>6M=BXs+f=-R5mt#IP;W8=eUzZR|~c>i)qb)JMX7rJ;iVz_fZVV zCOq^EEz3!t^kd%imL<-rJ@Qo_`2X&GK*;{FzO3~s4}LoyKvI3o7FFtIpYW*O?hC)q zIp69IU*-92?woJQp8xrhJjozXk6o_uMsG!a53OD*31bdMP7nByKl5bId4O4e)f?+`~t4_YmfPTuKCtH>)w9(xLNnyuJCA|?bl7+A`f2GkGJ<; z_=11(aL%gCzVF)|Y0z$wUoZ29k1wF^`%xi1u}}Q)Fa7gm`Sh>zCVtD{4>~YgWxA zHkYB)q;!IE$xW3lVeX7MbN>iVEH$4D4Ji~TQKVYL1nrV^DO06Qoj#Qcm5rM(X0)Q& z+I3CUGhV5RjY<}5Sfq6J^!bD6k)pO(+xBhT zvqi$@z1ufOi@+N~9A4Zwh6)4=UO?VF`SS@8qEB~F9m0d_)*C3`-W|dB1K`6KAWt5^ zKJ@goV`smfI&=&1;eS}Jq5ggR2lFd;*#9{3-~!BOKmvu64Wj~oN+}kdUg{|fqsj^; z8J>8$#HRyw({RJpT>mTUvkyVcYedgBE77hFPjsp>yjpbeF2*iX&BPQHOD(R4N6dgEkwy_~+~Tv)OdD-9V%|dQNDrgraLV4Q%*{$H5j1d10sre@zxm$FF3d5@ zY;Qa7#uKkR0@70tO*6$Lb57>Kw39yn^wTp>04MU3OD#ttDZ@caqKTIVU*biQIUuy@ z3Y%7{a3@9|J?hd3GnLZF*Fr23N+Rp}F;pXagzQBabtJV^A%is4M^%5kG1gW!Q*l*R zAz|hlCBYE~87I%`6jNdYy)sK#uJcOXl?7j624dVuwHF z`r?5{?K)zwEygj{S;4N=V5>RCOtd9^m0RSI`<+_ll#yOmZ*+_HduI6p7rI))d(L@h z^lbL|XFLH9+HrCtm%Qbp^QQORs9Q0_QB3Qd6sATvC$wt2OK;oYv&a4#^@i16{q?P5 zw-K!K=9aYIs+pn(^9)JP0bUa?Jal^RgE`>`gCQ)sx-@2L~?iHI94RGui~jrz}H| zZ)q|S-ECC3LNMgZedoIy{MZ+;i4kvpStASX?zg`k=1+h^q>N;0cf_;>kcUfT72Fai zJdE8ic@dm;C7i^SfuSz)$D2R%h zOaIK*#v0C0j}NqA{erke`<-fttplVIb7Vw?{V$0?EF=?&)fFcm&@`6d;S{ZSNlcm$ zjU?Qn88J9UFZPUsV(jD@O&LNgZW4St5MRAmvck~G(KVQxlrR_(Ln&5qk9}+;FMDZ7 zf-#bazw~1c1&PQ-8d8UYQ(_YZ;!GnO zlhA}YOz4%0(p(FVLd&S#ahiFQrZ1OiPkW;3hptlOF^Rd)dn(hI11+LJ4a!U3J=0eq zn^D^=N6k#;O_R?`B{>IYI4^cnn;Xq$8N*r8HC}N+@N9`K8zsv_)v+b=v?L#CSpUzL z!jzr?jh{XPIZW0SQ-A&xCXEQ$P)F7%q0xvW6tUJom(J6i618Si!^zPvc1NS%gz8g0 zYEe#x^n{W?)H+LwuXExtCn_~5N*#FCnZ^{KYCWnR|M^pb`VgQv-Iz}88dtf}m0>Hw z-7XPRRJk#ffk{;=Qz1H0ih|UWRgJ1t!$?)ecGRlZbgEVf)QPSpYEkZ7mpbbh3znjl zu(>3yOxJqauglx6OpOt=!Se$}iolvFK2D^}5#HoMz3ZBW}fKpdv_rg#rfT?3t@Yk*pV)F zr8)iVGHZI%XV&kUOPEe=D>&6}{4A-vU0-=89NY@;H8as|Zq8(aGUgTZu1}?jhNBWhNZerAj+w8%QI?D@B zTe8c+NM2Ku3J+#N*X%Z>n#Xp?1fmFq8Ob&nyNP#3k;kTu0&~LCjl?MAAqROS-u#I*U9;qNK!MBotB12Gdg#ko1t6{IZgq#+4+KlvK%n zabz^hpvMucA~-b&N-RbfHniJE|BCkE*oFscbH-WC_8{#Tqa$FoZcc8%a|{;S^16GS1^fPWJpufOEiZ6tco(qvvF&&?2Fy#7ytpPyVz_qtvtdYsVp) z&B^S~Uu?!FiW^VR7&H`4^u$YlRR2%Yd{Aja#I5|d`c%jqE6?e)Nx1A#0}avlak~Mv zMC}y4#>`LFn9C;;g{?rikvq@iB(qlZ$MqD=K4~-f98A{4PzoeWhO8|8WK6YEQ4uB5 z+C0)E9YPSbjilU61$9bOlu-yBO+lIS|JISpRg7@^&<}{iH^hzGBIV+UT*i=#^eNz){ z)JrtFwR222%}Gb4R5bOnHb7AE+y+>P3J0}83O%Jyl2B|6)qZn1)zr{`3%D;eIIJ?# zMZMHmZPPWaRZC@3Sj|o|*#Ct&CB;&-Q&SCA7+f4us0nmfA;j(ovW*22?v% z&;&oeaf(DeQ(0A1?leMx(pFoQRBT-}Ufon~(1kW=P+-N=yh7?@9moz{HSS1S=9aLv|$H9j`YO}}E)k%d!q9g1a5nDTiJ>X!L^~)f0f(O z%TaAYuy2U*O$`fB*m?`2+KS4bd5I#UdKRY--KR-Z0K|w-7LO?!3L`6eAJ3~W5Lq$hJ ze}P0rLqtYMM0|fmMo311ghxOJ~0L`_RbO-@ZsPEbxyQBF`&PKk_9Qd3WgjZjTZ zP*71&QdCfjk5N-qQc+P-Q&duokW)`dQ&d${R8~|~R#aA4RF08VR8dt{S5{V7R##b8 zkd#+bQCC@6S6N$FQA$`^U0G92SyWG1T3cC@ms(q1T3%sWTwYsXhg?@uTv=9JTwGjT zU|g1(U0+{bT3KFSVqTe@Us+IJVPaolWME%jU}9roF)3nNQes|QVq;`tV`pP!XJeh9 zWL#8aWoTtyR%K^tW?o%pV_;@zYG-9*XklAuXlZC^YiOaRX=7k%YHey^S!!lxYHV(6 zYiw(6aBOR7ZDn3M ziHnDdj){PIiHC-Yc4~@=iHn4Nj*X6vkCKj{ppS)ok&=~?hj^8hn3t50mzbNFkcpXH zQkjTxo12@Ro}ZwNa-fupqM)CmqNJmbb)}`HrKhQ|0000000{p8`|;EFt)M}0 z2+Ju9mvEuOhvdwio9L|~Lx|}zYTU@NBgTdoKS~rC(&M<3BTbg<=&t2Ndi7w=lL@n( z&38EA<hKm*=Ow_>HLwZh^(``O)F7&9876%a^bXbvtGb2;2Wa$po z=@ReOzRS>=g!^}(&d6DW@L(cEj3P;{3{TzL_~+cToBx<1ym+iwexyAAfr4eU@9*G& z2w5-M`)*#qh^-p*$y&Di_7|U;k7<*C{^-FnpDx1`V<0ZU6nJ2Q3@+#(Gu2=dO*HU9 zn2R&R>X||vOa~f1&0S9xEU}u}0wTXe85A@lsi&KMkipQg9$+*ANGA=nqY8xjRL(8GtQxAT9>?m`$GHZp&eGXBt+%B0 zFzeTcm?oZ{YO>abu*Fhm%CE={=bo1jANL*|<3ao6d)0pbh%J7K?Z@!MN_s8sxa10` z_$u%%J|Qy|vg7o+J<@0_rqB(#@C?NaNGjh?IxBu2KJ2@m6P1$B1a&32`O_U8q0}9N5T@CP%C($i0M$L zy4A6+bqLB_mAFK)+EHwR6pLE|gGZi>@z4pwE8fTdP$RPCIS*&g!=ChBb||Q6&w40J zq7!}AJ@0+bedANw6=CBvflM)dU`!sNc89;|%`92{1Do*nmMc}2ig#=)4R!*EB{JAA z3wSio-I%4vy%~^lfO{Z-4)@1FRuFOWNhISGSuF)EvR#n;4!mU8Kx3R^geT0TCQnEs zo+P6goot~fUr5TFd@^CO(_IZ!dB;^Qs|p_M0}wanL*xY!Vz$iN5l^O_C?ausjFMWW zq}aql^(tjLdm{LRWXoy(3!$0WZhB#E)#p)fuM1MW1QCO4Rgj_aoKvME zwdS~{>FhCDW~Ha!0(sBJhT*!{&F)*>s$J|pl7k7`5Qg$v5%Fq+8j@ttKEY*-W{9w` z@Qp8wLTQd={DK=UTr6XIbjk=SOtK}V*X_u7JSbeYz^<#?!)948lfJa8p#7|9Wgt_e zmiCyZHRcg{8mnbO)3rj4t*nSz2Y9&rxVXZj*3j}kO z>#Vf((SQ|;u5_`b*1*NGyGV|!b*7qZ?Ob|aBAE-rDQdMkeKeQP% zJ&l^EwYkOxe)@j<-mwmQOfu*8IG$AK|r9-5k&%QrI>&EZDN7FO>DW^GrTTlUra-oAxXXhhwzveH)QcbNDJC^l5NHfuN~xH_F`GfBMDkNe>)lFDamW5Pb%M3IXa};|Uq3%uniJ-z1F~& z;J5!dqZFGQ_+jDyu~)th`?~zU6o2*)!z5Dj$h!R}Eie%BjJz?0b(fz1e7uj&|3UX( zPotM~7M3%nH)2Z%DAEKKlmb2SR&P{yb+2OvSr=L@)*G_7Gr{L?yTJrC6LtaDdzSZA zxwl5E)(T!%fz>f&4YyX7Fn7y0Qp^Wv#Bp~QhYWZ(Hx`m0?~;Sp7g&JDeL$EZDKrko zwix4gge>P0Eq8v7H(+$ee%D4)w3B(cgL#zjaG1AwTKIYSw}m$)brTqd%i~0oGJr@2 zV@-E}BNlZFI2$PDfYk(nM`nh^Vot*p8_~gP=y57_2x=nOKc_)z1s8~Bmq*06U2^3f zD(FC&WjrwdCs}zEcLZd2H7I@T6ke^sSJIauJ$Nrc_=zq;6k#S8_OV}xmw4u%)z}0*wPmh=7PaI+KBgcq=(pHhmz7;P!fV zHi_W>D1-LKY1SAZuO=Y@Nl6r^WUN$)lz4-Gm4j4feMt#yA$N~ZITb@_Z12zz{)mL7 zgoPakQd}u>45kY=Cuh}mF+5j)bkjVVRUQvXd1%6x6={)RSZH!tG-^p;|+fs&nLnyfDUV3$cj1s_QF)qDk&pB+5mea{iXjhIiIprTWn0;719@$<7@1HaGCAjnHz|w1 znL7}-nU{4boh(Y7$_Pa!iJseOqCVzQLuMp=?7VLXO!)@K(k;V{D~m`IggfEj|7UEQ;A<^mJO%Fpyc;2TA7XlNsyLh zkhchbm3O2?x@T|79m5%e>vU4Wxt68oe7iVq&8c)M`l2pMs13k|BH1&}Sdy*(CkQp# zowRp|d>}TOa!fG^lhYTIks72lYJ!gmPdX`?3`Cg_G@ogfnZD*x6-T8PrzHKApLbOZ zRmO>3Dk95J4?p;&+y|RK0S?VTrej(Z|Coem${&dspwN+M&I8S}hu`gZeYonW)Q%ou*P7-7Bn9C>|Pc$T2{gx>Y#F zn8{+HE?KSX$)0ExX2@5nPkNaVCqoG|r3lNVuCsXHMP&{0BemLIAg8PLsG44;4!&v) z65$TsP^M-|tcm9m566VInXF~<2Nde8DjSOnSw zhWe3;R<7ODfq-dsJR?VfX{qiCnULx=in>mj`hftKr!DK40z5E_ zt=y`i_XmdCVS6WvDJHtJg37aZIh{aTsn=O`mBy$Q_^7&=wA?ZUOY4V8cYBJ-dtf)C zq(Qao>aSSKq%8PBUE8_^%c{{gng?|t0p&=}P`ew_l*@piYzvA8`nDFq6TmvGbvqPk zDsx@QrX(dqQRq!A>$eRlt=Wp4brhoAI=z2Nk;uXh-u=we!I;eMKt3d&^ zgJbJ=w`!Wb8+aBQ4#pM@34s?>Nxa3Ygj#txpmuK&I(+)tyhEgC(+ab_M55}8z1F+J z=N70bI~*sv4!R z1G@zrB3epUcvW7ILk+kangfdEVa^K56pQ)ebTt(@1) zha0nXD$>n&$6Yu~zLL!}jCw;Axx*CBL<=mmShMK=o4)9*xzR>V?d;C?YOlDb%%)pF z@A<2rBgl7 z1U;Hg$<^?Mu_*-BG9uOoTGnRm%h*8CG1r)FeP?gYw=CPy_^VEKebRXi*@nx7%5%Lj zywc9?DT95kaA+I6**+r)$;1bHjtx&KS=s#m(~UW~2T2;WdpmrJID_`wz@dGt9rx5v zeTnRc+O+#PTguvECd;nPQ1R3 z_>O72ww=yNpH9K(e!=ii>ZNY#r;f3?P3@}|Wc%jeGyCe$9P3{`m$Z)S-`ns0p6ist z>%AV+Or>EI80~a^CJ7gN4~blwT3iTg*;#Jh(pluSCakmXJh8+=*7} z8`-!457-6R^k>fW2|(~L?UypSxH`R~@`Bxm7*pG*>@o>Yl3;PT_vdbH|L+?v^=;09JCMUDZP=!aIa+U%To3U;DqOh2=<`eV zPHXl6`#ARD3!%+wk4No{3=p|u4K!5D+QDDAk|ErLsntV>5lu~$SaBdQSs5>C+}N=ZB1w~m zISh%>)R{PTXl0W&%$dt*F@w#l7VYB8Rh}&EjP&VOGj;p;^&?u;=uxCel`du4)ag^G zQH|DX=gz7zY4h5BV_DUhFRfa+j)PY=o>_Tl&#Gl>wdz~9>*T8QmUE(;NhG~gNdpoP z9V~nWF$&oSuph)ovN)RMH}PS^Y9vFRe8uu*%w#lYj=`cw5+6!HOIhK;#A(zIB&cTH zx^<2nv01qitp?Cx;8x9&Y=z)SP+e#rb@MSDlkCY|Te{Yxd?v{2E1^!rKz#E-lDNSm*- zP%IK?G6f5ytU-fXLP#2Hd}#JtFNeHQW~<_lF~>jtBmVPxadMf zFFgivWU#;-6AVl-6SJh5U_LA}vdY@ztc%Uch;zNpKKr4FJLLo+gw!yw;Lp_pt&L9E zN~=!L=#tz269q;UJ@irL9EHv~1QM{+0ZbFM?f_0Jg>-@Iih6Xm?qnO!zVcuT@4h)D zB^1C{`Ly#p^Ae=w4)oHKb3g&#qjfZ0Eo%^t4>`2e*Ze5bP{au-Waz>MjrC|lX-zV( zMK47p2&F-mnNdY&P=rY)fym|QMqiQ{hsU9WboX6&5~^|RX{l9^7qaz(C6SV1TqF#> z83UIXa;bqaMi(19H?K`K}O zjc{x(U{=af2|ITr(20&%4NG0bR;Rib#t>_tu>)V;B)cbw#)heLSg!hnyC0GdWgi0+ zqJ-1Cgg3mJK~HB+tRAJLI7R7Ur)Df8Rndf&HTUffhu@=9pz1^`v*Aq! z>8sDww6?17yefVbnV+@lm&RmmOMeSFNZZ~9H$YZvS#%>t*z6|2bnUHxB0*pS8_7VL z=%gn-`N=#cXhBS7QYu@KoCfhlISuY zd=Br%=u7W~@nr2Gqo0a6D?IL}cIHEsgKmjK8sg_zu_%^FBmzTu;*TJER9l6(WXr#( zaCEF>CH(|hF1kgA605)#vwpIZlsx2bx?o040!O$3P7+7fz=tM1`cbFAqo3d~i!n&* z!4Qtplq+Q^DhpN@g4SVlyPRnb1NcrCk}flnfhSF8I7HgXCrlwbC(Fz@L}!{)R!ME9 zq?o8g<3;bO48WrDo_ft}>TH`=?PjA=)wFQ(GPZ*uFsPpkS*TUPCmKhvM!3fa6M0OfHI0 z2WqsE5^NVpJquEKP$*w2MVOJ+#tkI_OoT2a&2ARe{&SFup%FWjVmv?dh6z0;l8`qDZm8^E1 zrCZ5cUhtd*SZze_H`SZv#wu$ge{I&$!9A-=exR9P$hJ$&53vYPK zMG8d5BLmD6lX7;#gRWqO{{;?xNk|_XjxTDrGK+tG7{sYmWlU=tP#MM$#kp-`*Kl;Y z-`+TxCi~%#jkDutp1P^bjoy!u49%h%F{=|0fEKa2#91dZdp%9xl*t)o@G14ze+di_ zyc?_(oAo^C0drk#2Pem_7mps{BVf;LQ^jO^m|561xAj|JF;nZmk9{*P9ylUK@Y9i$ zy)%OyT#rip%Wp^Vr8^H+ATkAlw zhrL5+Xl`gY&ylvqM*aP7fP+KeH6-|${L;c%B3!5VHP8=O(Qp{YSjKxd^`TIW@r-M{ z_`35?d{lHas_u-cRA&+=(UHDM3ca@eA_R-c$A&x%# zDJOlTfX4^6m!^I?Gc7~SW6WxI_k=AuqxbOjeVl)1wZ)a&c&hqX{jaW~kgFqd#Z%9! z>~FvIist0BPJXO_8l!-OoB1odGNWS<&WjqpqKMWLy@~)j{uw&MYBD2$K%yg-{&KT= z(goSGqdTZQy?F!Ngn^-t&p$>!{!xJ{3%gdz+kh072x#9Od%`m1;hK3o(P4 z3%e+h6C;dXI=Jf#!Z4z{_29k&!meinKk*|j-`O}+8@%auv1o{g6kI__+>vo0DbvaqYY+?70>KlpLFaqE=o>i1 z0tr}}K7*S(PCGUwNCgL!9V5&;UK2G`q>UP5LL37;rKbP_PU~AxbXQxD2ttv z5yNIAHFjbjb~3=6gTU~rMyA_F{y9BwRHFynkG`oTypW%Aq@PmMmOo@H5A3bo6P!Nt zGgdG`vdD*ev_};T2TFt>6zYnr_ytDWM1TxPfecG60Y$fyyS6jM>uL$eIJStCNFYkW z!=tf_iy7wHNGBx8=b}Q8OuQ=`Nxd9KRrA7^Tt?f0NteVW_VJLSvkztIE?t^Ql+;E& z;zpf>x}*$8J1jAB97^8PzGtNx9Xj{+sEHI2Bw*l)3KrpZf2|=f@F~p>@Wz(UWl({|R8jq1ozmgaP zgD-P~Pdx-j360X)D@Q4%1vzaut6MMt%NC5-GcP4gcvDR>-MSANB^aDRCo#|lg;Qk3 zO;xJ&-9Bh*Nh(K3ul zW6X^k^}qjnMo!(-+#$dMG()kWMuj6)zhb~XO4V-+Sd=JBftAnDW6!(r%)DqiVEEF6 z;ZjDDkYIpEL~N1&Uj0=v4XvzXG=Q-Qpr8k6~3au$U^RV`h!_$0zR!SY*zr8{o3)0-UIBE5t=qg6jc z+YVU>aOqF~etBCm1qZnmEz&v%*L;V4@vyvgP`%w-W*w-%&CJ6k+%u>U>lH`ERl=9S z*>8nhAQM+#j8PU{nad?LiW5}F-MGv>THsX7AR0_qfm*nO%zP!0PUF{X^w-L?O#9Ma zC@ocln5MrP;-0+)?q~ zLzV*#=&}B&}a9VaHnI!(>C9d8zEY&AA9K~{008ZRG z#o_@TmfP)OFP0=eV8?aq(h*FTCnl*iX5){FTMK4jB?-DWrbJTGmy!+0Jnli21zc=< zoq}l6!qo*lO$}gb;aH?g8CK**W@OHl+#7b}U%XCktz_1a*HY2k6aC&xCBxGdPf4ie zUb0{GtXk0kklH=vD4x~JbW%Q`-i3kH3`sGgRIn_);+x2cI=y8Ptkq@Rl25Z+dawuo z3HD`pF^5K65>2EE7<^6WL(@jvAFtEs57ygd4&g1XP>6%sXogwh6er-PQ)HBpSos-%LoJo`oTYYwjhyG^`jontJ z-TLfIK$Ves77KSo@aUPVFuz8u}w7;fJmUgVtS+;ApepXTYE(cyBgWOVjnF?yd= zJ2jc^LI51k?!+C&Ox=1W{_3crFG0|fp%4e)y)Jcr2W$C1IAMS>AMLS z)YTd3!K=HMF@0+aifcF)7qO@iDEXu_Mb?nr$6~f?zlOf|yeU3LI6r1fb7N_ui{H@h z+|Jf!$6n{hjjnE%!pbEMoUv^7!|crF6EnKTRyk+JKE}`14w&9-(AAy-F>TXT!^VPU zAk1g2r6?t>YH2Q52hU;x9_8HDvN+gU(1ZmC%3a(p7ev4_G@&eJkSv4-rMy;z2gp&kW4h`fur4Kc4n5wg@Fyx{=QVsCeg+Y%*SqAAbvlil`8s^ zRQUdBtqSV-uDtHvEn2+k^A?|_25>gy)Bum^xWeDhU2WEO&!`LQnL#y>SX9;nv}C zn0?E|E@YD=@0+#q_C4}%?YN$LU-Ep4GaNlQ<1qfP9TT-qSDnjIxiFz;kB zH{}UgKG!`lhm%ju6v~^lmWsK+%PjL;J~i!ui_%>;ExW|O`bMepl0!SUWaa7Qm;7M?lGnDmtfKV(v>^ld8O*l+ZX+?}48 z=JNEb3iT*o*HwqS$^-499&+m%?N{HPBs_Qmhw8AUb-YI0eHM2A-reUL=XHerb)VxK zo66m@ecfY^EetnAihYq6Im$eD_GkYQ|1@jsmQAiil1_T=ZTBP@f8IJ)^z0`0JKbyo z_i?vWY<3@bq9yVoPvm;HcSp|m$(Ce%5Os3~l({p;Fci#F$6=!f@_%pjFz<50X83}{ z+633yiXZzwI@n&%MvT{xgT7~ucW7gWy~G-0iz0cF-*Bkx<&=ktdN6B=H_(=7@eGn< zIYyy=OIB|`l7;AN#fO}npI#olIlx%y$k!e1v%F_s_r(Sp?v+c83)g$k^i2^tCf9cy zmhVbGr_N)esc%m6EBY%WK0^;qs^>Io8^d+q(@SxQVnXg(ABHCWc?)S_3KuydHRqI7PTy}t;zJbF%wm7 zG+e`&IU{B_EucxUc-hU{%5F7mOYc$(tSzuNY)&cvMOu^!m$ZESCR46#`7-9rnm2Rq z?76bNWpW)G`ct~uId$i<{X$JnaI|r$t+5LC?N=(-s@w5i*L$~b;KEa(Z+TYSHLer4xiBoh){TEqjt7Inu37f+1X%Tsi){d6isHqTnF%{l)n3Jpcw8M@QYGmPp3jtW36h(5EVo5-CrDINA zMYUa%N;+wZS4xSNOf*a_c2-r_NjYU#VSzROrkFYnGo2)1&0-86b*S-ETC3<1S1@(m zwPl=w6h)Ypo9Ja2Vu|_5SYyAyqYpoyDY_`5jd~VNF|3d_h-rzSW{qvF#q(O0$$|6O zYHaEj+;5}~cj|K7`~r@0t&Vdhm(Y3XM|!hnCnFuvYEOx)2J@D%&0~?7c0a?De(i=RNo3Ezq@8FaJ(!&akUm@8~iNVX;X0TvC}3y zk=H#A*|v|@+U-%c-{QTuxO(?J?zrVn#w@++=6c9jb0lsfygr1zUApTwZZCjr?;WgL zamR#sz>FVkB)gmgtd(C3BlWPv6RRF8#xz;Xam3h(S&V6K?v*iQPTg6jn$O*i@?3JR z+45o4tTHt#h9bI;xi>riyrPgwN*c+&XeqR7MXyCmHuFdyo6}8)pXyPbN?P%(EH*Y?l0kI%b%0k_EQf3?b~%*yEfb8=C=O%Pe=g77m)O3z`Nxwfq(m(;Lc@0 zCe7$QqtglgDmb|f&WnO-Q=qassIPyy?kAq}3FtltradvtdZ=4n?9jxz60R^|u+y0A zw(^EdECLX=E7J}|l@>c8qlmH4lkhg?oZ>OjFvlYl_w2zu=uxqXnn}jaDzyqa&E+;k zvmU3yA&+(F%zEC#qBmS6zBMkVjiFK+r`(5yfm!ECU*l5Z;upUbfz5+hi-`XA_d)&z za6*GTAOrE`A4T#Fa3D-%w@B1LN0KOl-a^*~F9;*gW%526^rQ$S60B!2(r$i4;R&6T zLKMdCIHr4H46~w1(v9i=bu!GM4tvO!w`43o!Yd*Xjp&!Y)X^+|Y9bS1#Y0dn&x+2p z;;T|Isg-rni!NhT8dcMjIne`BMH?SD-Dn(4wMv;)+ZR?GSB3PYhe-6})jQJ_NMWhx zkcA{*wFr670`k*=<0{|-14_w(Zpe}aI4HOzr@2|WYet|{=O-VkP{s+cpZ`PB=9ZL7 z*BLC27rdAtTp2W2vXhlEY+WsDdAJ!)!VbE03s-J9M@+yam`7wG@TylBWI7RwQG6y) zp?43_G=?-z6e%{pnT+pQqnp6rpyFP_tH=6;4XDuR`#v28G1e{Y~;%^i!o-KbO^3 z+OUO7X^Ni6D%n2{)<`O?A;xmG#OUk=Vm}O77-qUVVczsFd>Lj=dAd`cK`|Ut3@U6L zbq?H2^Qf3IMpBVOM(%C1spe4CQl%PB;TkQtkWwWQ2X;r6u#cVZ+T&N5<<5AfXOQ+> zD|WYKQMXF6yWD*w0^aImCAadFgo_oO==j@TShR_< zSq5x@$CMMA8WT^sWbkLR^h!FVR7|C%)WQUln4R{twao-EqBGMD+WyQFdR}dyo=S__ zsDn0U{343g2xA$$s(iGwF-&r-W9H(xt6l|L9qBn_wV9P}M@BNNxBFcuKbgr=o@+rX zGv!-Pd2OpiC3WAtk-~plz~AxX)i< zgtj^VjEV_I7SFdkG1TfD=TeIS77!+MU5Y2}Nqe|?iAIz?>M_Qd+1rYc)uQp7Q4JFl z%{Jt`4P+|4;!H6E8*tn-r#t;vP>*`6TIF$9$&yu6!$H-nPMe!6Iv+3_`PC+uwRmT( z@|2gC*CL8?c^y;{V3F`7SH3oux82^H54&;{nQT2Fvg`wDPRBd>aV+UO>DC;D(UB!w zjwUMIIHR;ygF+J!=ggN!k90J=8!elajs?cxr%u#_w_yGZnL{UfQ}0L(J@A3EeE0h^ zi7@5+lp`C+5u?=pK0Iz%Q_~PPyrwh78j7z^)E}dQ#ZB!sAV+s!@U3#uFYojJ zE@QXIC@<{FL(lSF;~M5J-_Xq0Ba_u*FxlBQHqdb{d!0}2ysjTP$xTq@s=$r)Y369E zcUik~>t3OpkCxIW{5t}7)W!-XSsY*rm~>Mgi>EL~@E)AStM_bp)ExL>1kMbY(fhTC zc01hTUiW+yr`_Ma75ceI9tx8m_&ZuI<_-{haSv*~P`)?C~7ziPZ^iS=3ct zt6gC6nN3gxU6e?hkcAc)5TMVjPsm|XUhxtQDW7SX!gQgJyG080{Swy&P4@Z!f)ldL z6Us#sE?xa$*oGC=s>og2wb4Ah-({d5Jj8<++8y?F9~U(o@dzH_iOf2D1Nb@I-vwZd zMVuUd3WvFgSQ+5^_>kn;*O?Jny)Yoz^j-oEpU8RMA;#KUsT>Tl)dtFy>GfLbDb5I{ zSB|9M3TC1vVq!}Kh+fg)3l1I9NtOcc)ziI1TW}jCTwx^8LNXkRP~C&RNyZnd5j|X^7FJ_& zsD}C}O8i-vFKUmV6&I&m+R8*4-~k{V8XP77S}P`?Dwd#g;Y!C@UXW4$PO`ZHYwa9Y z^$Z>=8wCcWB5GG74rF+BV0T4g2tGt5np6+SoU)+cCcfT8QUxHIoFD>QJ>o}xtby>A zojcx`7~tR?02&sK+Yx@<6bd7oodQbE-yQx*G{xi&mZVGF&BvU%7V;#bg{5{7z z%Hsj@VJT{yI>DoxZOsS~;#Q#~?ZH75e!@#uSN|ZQKpN!9IpQOZ*9RixkF>{;P-5%N zpaXegMN;HkHm0#%Py~h|13iunWrbiBg85WR?j&VB9!K&*i7BI?8PB|(gZatUazG{Xgkj%3)fiSr8?p*F?%QYl8>I;4 zojhJv{uZccr8Zmx9BxBc80R{M<%?C-XqM$zZH;JxCr%3DTdG$uwj~;|?ys1ga91&SH=$qW8Z6=dTT0(0`(fWZ#zKLN{F3oV}LvSEQ zaJr~drXMywN*OXn7iAx6Wt!hbXT#iBF7>W>V>42C*f(nVK__-N!{{ z;|xN2&Sh&*;g<4XBDyD-_T_uZ=U)myU{cb7s0Wc8Cg&oojNXGb9cPYq#^Uj4ZSq2zoE=pqkI@WV zjY>w&n8I;vXLb_nb~foo>fnDx9vuarCP|8*&QDlDV69{+ApPXh-C}|jXqa~En3CyV zhF*PY3t@7H%~|5;LC7W2DWKNrK=ffJ&e!cV;$ijw1e5}37`zmzy(^*~Dsw4nw_(iY zEUT@Yn}*I}Emmqw9f{94Mi%Ye8@7z@*k$+ZCTJ{;+<~DOZftJ=tKS{Qss z1{bR6ita-^Sf5KKBNC=bz!3v;0+({6UEgg(ImFly#bL1;>yk1l(LjfzLY|1`VOp}H z0|ufAW+cF9B`==W@g(D@sYo775GrgT8m-0{?1m;R5H4&WC`@{sWc?{ai3a6kM5o2# z8;oY`$#&s!>cdtFWi@zM-z{ZQP7$mY9M8`G!{*lAHgV?1uFQpc`kw7^3p4gW}+`<*h9(tHmjfX^r2X zd?>=6)@KnRN{9t%8t&mnY>AY{9@F6GH~?ves0jxxo-0h5M>A-@r)8=g*YMzF=o zLw`NmV=$q^cp8e)?CMU*!qqPA{;Ubx1_~3^@aYL2dgSiTDK|-Ye>)))af3~aS0WaYjPKNh9oaT1!H$l~{KI*{%hvHEi;q`It zuHl{yh9Gw^ArEr8Io}#=upzTCA2%|QJ~9KBt|9AWUCwQK4cWgGvF|!@`()88hK||p zDXaDH)n*_;Ht&2sZ@BQ!lTxn{Ut5Dn;6g-_*j_0Xhp3b=EGIwaCgl%6jx83`?JZMe zmvFJbW+X3Rs~FqwhgxV%>F--G>@>Zmi777MwF9VLEK`;&ABPXaOe#$=up4D?8^aL_ zV_LT2Fo(Y6GI1tn%BB{%^Wx6`vpmytJ!@+_+jBnOGiS=Fv$`c*ZZd(k?y}r6 zmP+~ZsGYH;Ec3x;-7|m8Oz8%}G?qhu= z0UsKy;szAbMeHi!Yo;BBmKg3LAka8+Acx~Hs^>nVZ+EUQ0s1q%26Uc=FF`+`(ncmj zi)lkUbOs8seWFb(f2~C;#MvGsE#LKG;*s~R^krroCF&^^%d5T;pGueSezkP8{m>=E zA}SEohtBlI;EoPu6cXwGjsvgO^x392_~vCu=Q%gBH-GaIb*%Y?3XpQF1z#|2*w*Rh zLNFzx${gG*2#UR;q#u_s&<3?|=ax9Tj8*Awpjze%OKY$Rv|Cdq2f;P+igH}%Qg_ zO)nU;#TFabOlx*8-E{KhsxpQjXb0srr))oTQ#k|eNX=n!61Sxh3X9n&ah5Y|cZNM2 zbrYW5*QrVXpQgJ>Pbwmjagp!|4>vXq_cC}^syWUT19ZJQ_tNehVAGDnC5DuaAsU04}8>DI2Q6mOvOK{7yxaE@5+wn#oe{8Li*q$%7L?KjtLc9DH4Uo(KVw{!WmeCwIkR|hr!b5=DRRpbjdi*C?s|kq-eWUg z4EKak{Em6#>ei(sGYf9BbIE8O_)ZlD;wrd2yfGX{vj0&q;Ry#Es;U6@3~FFFWsEqE z#=!C=k?Q0WW4rE>{Z~xL%BXZ{Dq)$h)g_` zX-LH{nI$cvMr3?=Z#)uvg!(eq+5pKx+-*oWxyWOLWp!`v5;lB`cm5a>L{u+=M(O-P zsIV9RyRx=#vNP(k4{q;R7Vad4&j+|H_WaKSebBc)oqPM#19FRdE_Ra2I2v5*Vqw!) z2DlTtsXj^rZ-ScJ)a{40R4S|RGy2!Bc-Z6lrK8V|^$dQ~@Qr&qVkIg`ZsgE67iB$9Cijy)Y=>#en3sW&O zSfmts*{WfrWX*0OQ}z?4tUfJs6&)(;r!%EXl~$wn6lzp$PNT`&*AHt}t@NyA>*@{v z+_$h|!;L+sZe2UHWXp1M`z={2Oq5_*OKVjtzOT*5s(lyFUS7di;Yka&X;|W-vtTk) z$_w(_aK?0U^F?c!I%}RBC1f0|m{)S$#FcWos_8XAFkJV|op%ZW6VAv|bc{(O1` z>lZ}G@Zo#>`0eSlN08t?{to-)!^eLB!2AR(AVBy0d(S=s6vP_XoA9_7$m51 z|3~%yvoF3BAIWH_9=gM^IEsKXj<_KYQ4vHHJ=8+QCy7K!N#JHP2%+DQ%!!YQvNX;n zjy$>~q?1NE(>6iGET~PS8k?ymk$}PJI9yhNY%!nu%;+Ve?qrHksS2H{nsM~uXE4I{ z0*kG$!ZJ;**B}K8HoNY+N-J!-il(e|{*nhTP8;nfo@n-b>`zuB8*47HbiqtBV<_7x zu|z{vjn`gV`}MWXw$zPSmX!1DjU{iy@`owDZ1y^ph;wn;?JBsIS}LDRQ>{PB~y>u+s z(7MagUq>x9D@^%14IWbOF*WO1T_p(^SA)4!E?HSIrdGEt({)#0Q~MiO*90Gnr$JuE z4J2oo8%UxtkCs;PYMFBmTjqL%&dAAw{5Cw!JtS9Miw{gUbc{#;{M~rLD6XaZP-eI*X{%5jeAqZ60jPhuQj)whW$1E978|V*t1`s4Ql0 z*5pPGwi{ikj zSH}?Xz>CVu9GI+>z2+G!N*ig3MF<%)m*sFShMP&^-1IU;#)f+)|0z?OinTtW$!|IJ zyHgK2>9if%DQ<=tpweJ+lw5=;7OpZN)uckU1#ZPtlDSj_`!+!vj>dy&(;x>^Gq$y9 zP%z5?&G%knLKqeW8>hg93TeSY%y>hX#+2nPYe>zoJ(7NzTqLrHM?}qOPCMX?+!DKm zyKtSeJED`~I!PBr{h$t>Sj=KP<$13!cJZDL2qQkm$ez)8M`O9$osJ|HN9irhjc)|v zc;r|*I@+<1_x$09(VH2f5?4-z$^HFgIPdp@J=sDHOM09DVU2s*WJ3ZFUda{$A{nVnm z0>s69=8<3f)GLI_C{T?;PoW1z>lzno#D*qxdCc1g6C`S&`JwENh1D4u#I;t&Wow)Y z`z8;~N?Jm$52W#vL`aP?NF^YxwP2VP+(_wEP#y}WIju%KmYS8GMv8Dj1siaM>%pOx zZJNFWVOH7^)AyN{IkxqCzJLX%~(DXATPh*EBL^EpCP&Jm@h z$N42^qN9DQT-6y@y4KaM@?2eB2a8YF`Sq^>mXKie|7*^+p7CJTDd>XJ>pRGX%)Rss zrysR9*~+FBq7fyJA1Ry>fyq^6i)dpyH;hgb6VzJKGVLI-$)=LlNlvca-fCe)+di|3cl6d#-!1N1Ea;A0?lhJWq~JW1`pUsH7k*MTCUk#6U1nHxU=wF>^g6k-!1xcuXn~XfB!qr+4XZkG$OF+ z>a1A=m&~ywNNl#K1I6l`xnj-}USu;Yq6=Tv!zudbfj8?!IEWXdl$)3a2pUi~b2dtx zKJd3#{7ATo)y1ETZ4X?M%BI>j$2`t$j}he#{~)8vfj|}RE&W0&B+IIoNo_I+3ln83 z|IaB^z6|+HSY5ufur5N@_O{6)jU3wSW(gtlSRJQW^&&QU@O?BA(S2&So|wd=o-j{@_aQ0nQn# zsHx2>F(qg_;7fhN0k^WQAC`eklM3apGQWJe)w?l!BNy`o<96FUp< zdehs^1uitd@tyBJ1N@5r_Vb_zZs>xi|M6jpM)=+pZqVU*9mN&rmWe7{y}3S^=_XKsXsOG=6k{0Ym>Dn!V4Q3!wVUfS=UFCh-*WjipBsH` z_Q0B{>QQ^48~c~bokvsq$11sPW^Pm83|1-QiEGEI%U-@Zal@?jPmb5jVrn?xy_sls zBi!AQcVe99UH7#2*X?g7BkSZ2SbgK0?&ZojhjrJpha{Zu^hGRV8yt8e9p1fxEIDN# z4p}><`amcu*`#d;%IyyPOLul@v3Iwq$2_)hW4PW#sC;Lz^-z%TszOz_Un{OU#gASC_94}0z| z!QKyA;7|SnW(oDq{x0Y1+-Ks>j^VULrOe2^G|$B9@Zl8V03%KB!tLA^P!RVB4j+(d zGVlUntJN%U^E_{V7_m-BiS%yD^q>M8(m@0TgB)~4G)%)Qd`b3H&;?&`6kAXRPqA%q z%LYT~s?g_CGT{e{uj+ts^_cJ5oUgn#uV$W)7r{;eL4*pkuZ$qC|E%x|+m7wNFd6&p z&%ChT!f?{yEY-;HME1xG|I3OEqp=CoZQ+h+8>g}G&IHw5lbm0Fi^@?VG=2^672%W@WC83CFtm(_HZc_ zN%6Qw(IG<-23ZglGqMG_Vro{A70<^jQsJ1gVH35h7HP2--GUN^Fq?!(kzR5)DDV27 z2&IBi7`HDNi_sXf@SWt$d9ZL9;qHr+5sUM?!o-<8&M2+NR6BdG1C|d z;wVicq7Zt{;0fw02V~4CAL6{~air$aELTE@4Do*=Nj4IRB~wZtKhGmHW+VbKY1*;_ z4blS>B@>6r_PD{w|1MG@CsHEUCLY4#A<^b*R?wC6uxBI+z5Bw2wZ zErT;=F$hxs8Y4$h81A3 zkuq-rSx#geQ6OV70!wHwnSw9}3-bjji0EF=9P;5EVCw0>!67Fl=;Ct* z4dtewEmtlR6~HS&U&tgs6EtNgt5#A1ixB!M)bf1s-MCRTr!qrbll!Ph4DHGdlSjh% ziW&XQo=!A3|I2STwbAa#Fgr=B(V|m3`z{-iL*tOM@T6clbF?|pF~aZ*P0C=-q~o*V zuW^dwMxS!zGVHmCN#+QmE$gz(zD+J!A_5ve<-< z+O*0X5_2&(QZl2;gq&@B;Gzd<0r|euE;y6wxW+*tRF;bB7DqFrFzqNd4nuh(0cY|i z;lo2elsAuZzS<==X_GdEawv&X0iaP0A4(~mvqqIDM}#v}$#EUuaFTZPHP_MLf^;3x zq(}{LRFRY-Otl@4BrRR+OrQ!%qtuebGu+(rk~#rPffY||qEBp)$_%9(L~%alv#09e zS?Tj3|78#8jE%|QK|fE=8`_~-McRABLMIZ0GDk?~Rm>Wb!LceS&MVo9nbVbViwd~ z)+6QA1Do|(aj!5dC?coTOpWXwC(!%Z=4*%IG)>jbST$V6W%4?dHPNX=Lnr(y)r&9{Md@}$@2X)t6`&3cMkN-_ z|0*_PO;#cNZexGc8i})TQLSS~mSu;e-a_DHSJEttGi9L?Tvo1SX^T~K16mHr5oXp{ zQwrr6@sdacYM`x4Ypy+mb{wpNb%(Z0A2VC6b=%tXTayY-36wH#uzX_b%2>$wd=RU& zc5AIoShGoNaYPjUVBARVG&@FP`OjbXb0$JBPA^-PIYK5;K;GZ^SLxFQ>pR5a4J;*@|QBY;*xRT7kY|Bf%) zVknsolz5BRfkad5nuNT>?QjwSBG40GS43Yq3vDTvoXUx90~R(V^%%Xcd$&;T>{k2$ zY4=FZ zs4j(n7Y89Z+u&7`Vfc6{k9qZAd6oA?bhx6{ihApJh#ytOi1@ya7>ScOVcDe_n>cmy zc8w^FZwYXUMXQRjC3=H1i(h1VFSHHi7k+&aMBhUn3-D#$=!~B-jhB#({{eVT-B?Mj zV_3gan}4-trxcGq*WsS^JfQ{kX_J_doGbhWszr(*+kg)e07qmp^~u; zcvIMWvUZbUIEH0d9=pVb$H1UPEO~wNlo@q3-S^Fgafsh`C+}@GVcC0~u?z23&}6J{Pu4{nwKR9 zm7!2a4c98`>U~oTZjHtIKBk!m@tK`jvh{~3W}2GcCT3{OluTixew8u0xk>SOk5gCI z*wb0B!uF7Qo$K?dA!UPOJGP%1_U2jXoNae0d0ksEj)x)1{P~|lbLX7yfaym{CsdS) z`J%qKhZ{QofN`%Mm0;sq?R*liGdg_Z?z;23c9J>1%2z4>8hv}2t)-ceq$fw$8e)r) zWK|We(*sl&^&O>;`T#q%Fgt*4nh23~ljsDed%KQF+12W(jYoKt9#}wSR~^h*kXxG+ zLs7OPoP$L%sk4Tj|3?wr_(?^HKXDtXifXunTet%{$vW9k|B<_fS$a~_bl?OHqj_9>^*XveTD+I?I7oGf94V#3C9k3Ps?R9 z+f_C@V$hr*!{oCm3SD`crp}R#xwL@`#m)h=B4IbR|LHsh=V90)ydt0V==QvtbEWtM zT{G`_%%RSBvDVPTn$QV7*in2?WyS~^-FzFGp(Xv7DSgIU8PhA;uH%WvfBddlw7V}I zmq-14>5-R_oH~7d-VfWPa|6mBR>}t!Rafk|w~nMJp3z|pzcp9C^%risHV~-=kz|TW zP0GfUosU0|=U^L5=RATReD~a0gFWROEZD-Au7JAzwn8J4By+kzd=_ZN7Y_Za&HdcP zx}@APSlvCmr}u{=o!;Nu(%Jgnm3X`J9lLMyiA7!DO{B>|`nypbW|EhoEP_di+{Hn* zi#?V%i9m;WSt5Ms)(e%o_c+7beYdN7_p0=S%Z8rlNj%GpUf1myhn2onOP%Q-8=QPnqF36+ ztLUPEQoH#bd}Tjhvfj$y-BAf1M_>Io=pH33x49QSU%gnZDy{Y12r{u3x) zBx>-Pep_ZyxMhN#^E90&KW3BQiVZc2cpJ38}pP| zxB>(R3^tNnIjLb}1tKT8@JP9Gq{)^n|6hK%*#Txw3OsS@wD4d-gP=r*8r_N1r_iNL z6*SGD)WHD+RHs@kP*v)Ltz5fi#R@hogRx}2COG@`YFVuT`nkn-^y!ElbnA9$gLjVJ zynFdVWpM+DjKMyHC^0OUMcfS@04G*~7|RpMJz~t2XvQ%Ums}_q&>V!VY9eQZbIpmBuPl6oM6QF!5LidR$ z)gi}Xe#=;s&4kU|s0SsP;3Ca+mi%HPg+g*eWRW!aS848KVULfva zbWuhf6|!X+AvHE0N-EXV*-vEBlmiNCrpe}-rMcN!QF0{}l~ZehwB}=)~X$ImndgmboB9JT`|~cm!22j*|sZu%~e9|dbkL< zNtIkWOYE@OZdc#1{22-0|2HynlTL{yLWpj1Sx{ZwW48W<&}$oVZ<9< zcG)PJW8PI}tIL84XUZ|t$!g6u)10TDhGO~`RE7$wv(G}0$rJ%<;dyA#Ni!h8TX4B{ zlwCZK5o)MgWm+k4w%+P@j5Ohj zJMKC09C&U%>Mn?`|AgPT3p>23zeVqf5!a{-kOO-@;4->ko*d{w2FUS@I;O&~v@}Z0 zJjKITOvMi+uW{o*nSUHBuO!#Km&aUg#J1Kp0d*#wDw8R5`f#_JbNe&P1^>`KA5H(x zdbU7V(Le)QR{e`I?KC+FP-=ELTcj?NHH=saQ-x`ulp+_l%jk#^1f*JMUWKYRt<6;3 za~sv*=8}_fk5<``+s zLOs|zkKwq*yU^{YKf@!K=}^bI)veBT7%G<)t*D{d(av@d``0FRcSK+OE^`7~Ucx+a zMpWQvVLi&t{}P)AN9RG$YvW3zd$0!^?`esIj{BZRKth5|jc6?zm%y!cjWiFGl}RMno(lf( zf_WU3Y0RdYN_bFhlQLBzD@@Y_u+0+c^nF?alYl;~lDjgV1R#Horw4~bud085k z&GK#LYnuzRw>{qErZ;25(%z)HLS||2gzfgo@vcrvV=U3>tQu} z*iHM$b*}!roqqZw&T*16itEDH72PE}cCJ%INs1@$nJZPOCdrG;ex#0~DCW(u{abqc4X!lL_;xDQs^tyXw{Rw8vV(l2R!b zVZ9bZkykC#f-j=yD<32`TD~017Ad36r;igsf*XTW$SFb0#kMd&P$u`JD*c-Kj62Cd z#k9CUL++BZ%iQJeG`b9=R4WC^f}Q9CM9rJ9+>y8PoWfk{pDQkBHB36oaU zo7MlO^}X3@W*wyIV)>G_3squ>;sCtgR($Y|TdG`q=^8o%zwUPS$}58xoFE8S{}CSa z(4wdDK}9ASYhf7PCcmb&Va)B57Hc#y97~v46F1Jfon>Li{Ojmbke0M>Mj4yKD-%TC zpoBUmU`8{_6FK7(H#cC)`tmdwyYYc*SAGft0$Bwm52|cucCJ{SEafTh&&ryFww6nY z?hihfrwQz`mxW`(##nb+&>ioZE5ltiw|U>MOp{0Wz3*2|QIU2xAnl+SUKW-Kb-&*`wDwO10!OeMQS+jpR5x|4C&nm`vu} z@4&V$i;SB|8A;?g^`zX|ekmn|8}76JPuy%b6uIqZQ+vBP-C5sq)};HGgNtvY^FBqr zbGToL7CBVAB$bSPb*a?4h#lLtweIz%4wCHEUb51dtiLMrHtke)Y5ipS z_EnB@?({ch3fNt;ySv+}U>-Kev3pZxc5nISUA@K;5j8}lH#fE>|9um&Re)n0dqY+e z*j3qNL@a`RB$ieSp-}HdaX!>@%h6)D#$OkQ3+^#sZk1U0v01B!V5nmtuortJrw)Y0 zFQ!u?Eteq#l6y3mS2hSBI+6~k_jGsk3#is2KZbn&2V*5BVgrR;Z$xyDl`;P0S%5c! zx>9+9FgBovYr5fm=kt9|7ALgTb>>%o)`os&r+!J(c5ir6eFBFF5Px`gNLf~Isnk=o z)qRYIZ!NG0|Mz!B6?gembz#*}Fh%D?I!TMOI1e;8i`XEHzkq`xr)m6= zle`#o@_~;@@SrvM3412m+6L&L6(Os#YoW=RIoH{ST=y= zsBhJDfL+;lhxwPFx0p^RaC%0O%cO8B*g4C%|8d+@Po7ALnE8p1uo=Jd9Sj-^9VwY& zq@1I9o5FKW?!c2VS%WrMJ;nfx6GC#a!!EOFnyT58$SDdOrZ}Nlgu$7Nyv7K{GFr~3 zi5v=}x+aB4cUdLsMWQ%E(y1$EIEK>|mQ9LdPU@b|_MIO^rBynfS6ZG~>Xz?!Qt7E& z>uEq*rjPj3WkI2b*%>~}r7VIunEQEuaR7DtX@FE2ryz-;1*(Gj6Ai2#+!8LP+F|D0XOG1~Zr;DKyPx}9XmQQi5gQ);DY2%ha& zZs(_^=b5ZWre`Cjc1Nc2ccuu`jfBTyY_Cllu(tkNY7e-ZmONLHLYfPvaZCJ`GZS2n?u?wXRRvrnplJI#KS`zeH|F4`_L>K6&kQ%A|nrY;6n&DES1luk;$fEZ0i?Y_S ze`=u9C$R~*u{rmM8k%z#dtxUTa*0~F9RD7$Vm-47%!*k4zrkh ztQKOl)_PV*7qvMN-l)1q!ypT>CygMKX-So6hOSpPlEG_5^>k+9>#FT04 zi%f}No0X_G`A#VMSNc*RuSh(ms0}?FvdrvdbQK@xcrv3Lu>T7rfIe886*$X3y;3w)0{ zn|PWs$BI0rYud*|J0*=r8c%w`s=Jl6WKX}idlsXilq!ozNiO;lgD(jW;2@D)l`LZn zw@_%3lYpTxT!O$m|6hRd2o`F$?h1T{H97Z$5Pn-M;Q$ZUK_CE2iwQ#`3cC&0pv=ph z%+1Wq&iu^KJPpxI&DA^&;(*QB%nsXJuOwqo#X-JM_{DyEBw`!~WL!sP>@W5^U*Srf zwUMO#=D!o{z~gaNKns4}d97531k$6jkWsmSJjipnr7~N{=9Z@HX@5~Fx{u4q{QS;> z=SzUel_p!s9x=({^(a&vYb_$efopWpxSRuV%7oLKGhto8oV_PpP*%7%os(o)K?AFHBbs#eF0_t49`&3aonv2+mtk8ZDx%0fF5$t%H z4augP(U;7SpWTq{dN-vF&P~{a(zc0x`kcFYyE1&8TOgG)$s;4WLcthKftI^$Y{q)0 zO@Zb^lt+jU8be7;4x?PDNPPk9z|sYQ&JEVlZ2(%$)mlB)(_I7IPy^fD-Q3;P zO5F|FY#&RTlwRC>i`SGDL(&A(Bpt0Q=?f7ZX{&2IJ$!T6znaMVO|p`_xsW^Ah7Hgd zpoWRv|FSL{7Xb~~f;`ZYJ=rmH$gPFhHap=KoPS`Mx)~HfUkTcY8JOKQd2{O9a>mDa zHbN$j;tUzfE2y<+VjJA+V;2!wFJ0TsTez=+9t}4fKc{%c3*6R}%3!sp`ZT`619GSd zgg#jc=wQ`L?$zB;3RE5CUTxM@ebrqJghgLVo8dT3)*WY!QTjPk}3clF8g?^8{;0%7y_E_1L zUEvgN;S7zsq}!%I^yjA$!~Euzj4|4X7s2t(m=7)Lil=6X>C;6EuLb3Rg7I*z%`p8j z{|w_gAT~Ihxs&4Tdc(Zwy~NGtUxbkziX-C!4a*JYSN-K-t<-V=08~xpW1ijU(A?I2 z>;@3#Q_bX0EeF;;)@ZKg;-D?7>dGczluvTT>y3PGupjsY-)De~Yk=qdY`XaT;dh+h zeg5A7F6aTS*l@S9gU;Ch&ftkIkBjamLkq3>{?7=nz}pAF87^=QO}aj|6A*@rDXBq+5kj>y#sU|<8s@NC-J2n*nly+2ZmIcH2b?z=IcSoE}XSjpx;w~YRg z!|Ges#(w3`pygn_-Pl|T%WmaEPxQ+T)zS{u)gAyf!0h17?AxB^YyPU#IkESI|IY1w zZH(0JIte!cK{kKqIUxmHs4 z7X6>0+fmZTZ%ex4x<(s~9N5_xf~PL{K{S+u?LfQ!5z$6M#5v>-`@7dwC9irbrj54_ z)4?pc^ZDW2Pd)8G4*=Ev<>JlNM^Dy2zw};S>`VXERQ}v&UiIECzUdmE=#2^HE+gmc zJC}zy&>2LPVH~DWp`;(l3sd24x($so^Y=lS1NR#W9;Vmd&CqGj`Eg4Roaph^qnMLbnSy zd-|klvzLLLM7znUAxA(_0RU|3>{a8L9Gz69BIW5*&a(?BG(BS{ju$47z)W4?!335` zMu2ottM<~=%edI;CXDO0nj*Ai{LTSvMhy|dbpC=WrKQy3QoRr}hO7mY7R8W}UPfwJ z^JdGKF;9@dAq9)kBQ~5y4MFwl2dx{pcCg^}Y}vIVu%>-m_ioz?c*oA|8^Q1d#fb}N zfL!_U0|W{*U(Ou*^yk#ALl4kTJH84Oy5k0pBFFggZOoq!oM(^v|MaCjgn%DkJ_+|G zRNSwRpMHKMSx9#%FD=RfX@tN-3{*ErKDtUws(?ED{1^cq7 zLy?qLnW91dPP}13Kq@+@!U!#dXe?sPa!3jr3cJy-vvO>q#~7X{!if^~L(rfM+e+d; zL53`1k~@T~P^Mg_gvgoS}FiaC#_V@Ol$ZF z(@Q<=bf>UJCACyjPenCVRZ~q&G!tFb0KDL4jV`p`!o!u;|6Fx7PC8$Iy$;x3h5a?V z?Y{F)RtFy>v)KvfL(x8Ir`4}oB7cl^!t{zvaKTs01lPqQCH%qLY{wKA!@)N6&_fXU z*b+!=qn(#WGTKxZMk^0=W$;l&CP(h0RKd`uNgJL2>Q^qPZ}Qrlz`TtC5x(Ypu5ii)*jH*7_N-yJ4r44A|`u)@+r9 zt?k};y)|yQcdfNje@)sD~2HH-+d-qKC~w%irR-M7j$``xi}g$bq|M=|96(T^$(ln4$i z1oS-N#)-JNV%;&$xSV#v(S^%Tr0GbPK2qj1<&r&JYN((b6Po3kZ3f?FJHvMwXbf~- zXpWvkRT||@>m2&%quWm!r=5EG)0SKI=f8jdZSmZJ21FUbeZYVQ9N@Bcus{a|sDK8P zL)li7Iu5Z7aBp*4YUH-T4ARYmc*`3I^)|v_@ohWYxggsvw=EW~tAc5Rp|u#NILk#Y zT?EpcxnN~O$>q>HwqxDr5;wYn*sF#GdlnFb|A?X1oy1?mvr&$ACj>NM>o18olaKI) zxrbFTc%s8uDDN9EVTKyACk!nJzyZAw2hkKmN2$o~ekv<~ z_h^QwmI=dTh7=S4xS+_VXkkvE$cY>#;Icm%Ko~D;S^Jb008OCJNqL$IO+uM8_-&;z ziQ0*0qOz2%EbS?y#2-)e_diS$im<;Ex1h3 zG}D%}BQ3*+Gf)VxUgIr23U!4<4F#7c`|IN{1O#-{85k*Hkf3_1{!26vKDl*0ymGO)j ztE0$-Mk%P}hid*SRJ#GpJ37va?&?L|b0RNkR8S6hM&MX24olMRmfHa)nh- zJo^AnAUVRG2BSh`_gSNQZLx_lf?bv*R?vg`)r#-(j~JJTByI{1jApE8Kv6poCRhr4 zBP(B>a#{*Q^{kSWB%e+~7PCy||5j$=0HsmBhp9u|1g3_^(vFDrDYFZ z`HDKk^4Gh7&2NPM(%-;_DV;v#VZP=$8aR+BL}85L>G(;Q&*oxGgq4D2TW6unYSyz? zFld#a_|Dr+c(fue3>oUmgGa2^qN^A6 z4;?{$>PdkHKcQv?mNjS&V|ZFho5Trde+1cL2w+|7YS*3QC2&`D>DBQ1_l4#?D|^)} zEBQtT$Uu&7n~xJ$VA_{~|Af0M0l}@3YZ6InQisG~El|eapFobY}Ct z%MIX%mI>Zd1&<%CdtDa$s{Ef3)3ss6y8Dlr3G{q`>Zkk0|Nx>iN!)CQrN9 z%>)!&rZb+wd*1cFcfR-CGlg3)fX0%&7*8rqs{?uKT^dC z@2f0mZ#Mhc|21)j+DXge=ZCGwL*{Xlt;+uQ$AA9yzkmMozXtm_7PEpp$xEQgv%CX@ z7C^f^F`+#|`zrtgJ&+-?s7s}sGrjqt6GNdb+Dbj)3cagyt*j6^4hz0eXuy-ZJu}l0 z6TG38o0!4)gXWQ#nnQ^abe@3XhI4qMx-+U-1HWP$GUZ#D`FlQo8-VD08fx3A>dT&w zk|`$Jsh^0#K~b{MBepk#KLX&CUco{m>_X>AKlQT%TLj%L$ARRsFbubVTtA`^Jn1?qJfS+Ha;1B$p9@%oU9iZDyhuX812o{s zFNlJVT!N4kNsv5(At*_b97&W+$&>U*lqe$o_#0&60fDEIMc z|HP)XlD7ejybD9KSxGoeB&OfdjWPS1B=DU?hy;(YCn6fK1j@PXTc0PaMFCi)q8XpV z`;(>uJaAk+jvBT~+C|wzIVv)-U@WLnI~ce8Jr8pogc6ZBIL2bwgs9YrkFbcmkdk4j z25DRgY!F9_<3{(A1Csf;#r(obk(yFD$UjVoS^CO%j2|KVL5+JEq#26FR2fus%xl~e zgQO+YB*@iNKUzAPZ&a#d3ks@u$nIMut*A1tNCZyc&EBkq-vmyLWP^^h0*@@t<3!Hn zG)|Rd&W+s2ATY`3#DN)bfibkszZ6B|Fg{~AAAyw^`~*+P1nfkp$l}6X;n>XeFdR{t_Yga+T=Jr z$x@=33O>MtFC|w<2-7q@Q*>3=b!FEzUCH}Mrn$KrUg?!N4a&M1%DB`#{E`^jScDOw zgb``EK`k-2td{J0k_;Ul1GGe6TLd_AiHL=XtMm(sYc_ElC84ObY~`s+Ab@9^EwLHP z>M4|?3dB}~29!Z0O3BP5fK?}2o$UP3R&AVxx=z8UIT5KrUS$Sc|Ls{8h18!tQ54Mt zlyC;AF$Qxj*HOU8U1)}D5QjFB25X?&h_#EMZBeL2)~)4Q{#(Ny#W+p5QJuq4YeiBf zMcZjT+qIRx;EF0ATRn($TPBC@GS2ZQvl5ErWNG6>W zPu5V};jmX=Vvc%aJI^b)zUWIuZH-!;%KP*P%$3>pBPLme7lnPCKeI|CiUf$2*onnZ z^I(KYvaLH2DR8_o)~hBi2&C|FNBAjHK*~7qy8^eXz1nwU=d+V#rl) zP@{M-2fMghy6D*m?bYlZzN2M_WZ>KH1__xv&_&|Dr4gAZeYs@#Z@pxM;*1zG^s)umWRfZfi#GCO(4jRT~5 zgR3)yKY?Ig=W#(vwWzuTOl(kxdY}g#)ZRwD2=ukyU0vhh6F&Rw#fw1Nz8zr^_J#7T zTDnMI^#$Z77UV&OSdzoue#zdLh~N9=W2}ARZD5Bw|4fGTL*PobI8R z&_IGk@MQ`WW{@D}g(}@l;84!3&qyfV7D!zlRtbq6F}*y1Weqm7|>baZ^Fkt7R4v`O|%7lFq z`Pkg4^yOtH;y^uk2Jt-xs1*i7fY4*WNMI#qG-%o` ztp;&;h6s_GyX=y>j+eHGIlp8Py!HiYoL+8lW)Sc%Qn?8g>u!CvSV7PYe z*>3GIpoQbS$Z609iyT*sT6%_imAq-UaXcBW<^K!74T;yu z4eCkUC8M6ugnb;*IqD4cs&sDZ(cvoM{|iNia#d-jg{iFOsU?Wk%_p!(3^6Ix0n$B< z?9H(b3k<=-y1-}tZeti7hXL1y{f;RE=WD#)>jj5r15mf=vD7mv-p%d68^bczw6FY33A;raN6g`5B1&CWzzta9 zai=0-X7?V3k4R^~`IJF77g6ghcz1`acy~Ep;(B|}?^^xUU6AQiPHV>o@&3))$J`+gug+m^wsqpd5~Y|SI6;IC;Y+>ZHfMSjQ;qL-*}O4o=t!Jl2`eZ-}TD3 ze3nP~)gA*K5N>Yh-p{nJM_jG1f2(5T-Q8@t0 zuG02xclz`8_PLbv&{gU>2Lh}wZ@$#(dg|Pw)%b`_IV})-UP^DNza3Usdp!8p$<9cc zhxw*$Y?shlz-NZU{~rG*l6td-XS}!YIM#H9CvA@Rc)@pg!;f_z=lH}w{KeP*(sq1+ z&?S%-L0qg(!Qv&3oj6gpUXg-$Yoex#5-n1Uh*9Ikj2=IN3<;8>MuraFhRoy6n_8m2+TywV8UDsBQ_$$ z!(9wWwup)I=8o4pi!C@_L-{M`7N9 zf<=lNI%L{(|6^BbH;Eg@8Ou=Yq=;@ek|8JZ#Rd=cA(_y!DqC|`ICJaTxjP3>UAAMl zw_an&Y;4>0wTA_J?|!tR`u1TbgfAaq==+0M`zMHi_`yctXtc4Vnpq3Bcbha1I>=sx z(M%{|gA6K|p@tZ8SeAvj?FXWWA$B-WK?^nX&`2nybP|g%!Wd(VFPh}yiW;rdQcy9? z1Qkv%HC5D(LK4Z8k3kxV*UeJpc^i)C7F=@GWmjGb=*3rGIglw> zVTqlY1Z0;zkePvz4Dv(>61Z6)oN0LG0ch9(Xc`BnL9t+LcIZ*iFQ0&x;fKwY(I^+e z4Tol#|8sO<*Z?~~#J?ez?$Jjpo1(?YBdxfHO*Y_wXAU@`e&bCws%k@xtH?MLD~igH zI3J0tR9T>Ni_~XjLb|p#ARxmcB5bk7a%RYV2>NQGd>4LLp@k4qYYnwomWZXc*^bz) zx6pDc(5z#~SgwucqML3>;+%Wq7}O0FR5U#*=~9wJ4H@sf`!3mUy+}5x16ER+*DQO^ zBCMJ)3~Y%{mw$p8v6y3;nWn{s{l{jVn|bA@o1P5ni5qz$0c>ZeWf3kbPUHu2wZ;(F zn{UB&^Nh_mDpzMn-$&Z#zBh6N ze(I~+bciG`z44?vI?|sV_t!su1u&5Rdx;eempHaiLNks-o#PrvIS}Saf)m_<1t(QW z3}S|anjuXjolrgdjP7B{8Rc_UkOI@BjC?Cp5UU_Wwa_e&7w?JOeqzZ)gxNBATGS;i z7wAi<0SlOXqF!ZQSw$;mu}X#_BUixKMK69aY~ve<7|kd?UdFC$xLk-ZS~eLwu>UcB zDjFv_MPg3%p+t@{G8`XwgokF}@ki5{XC3`$PyYcDfP%~?Aq9pwLL5Ujur#1o_=6xv za_KlwHp8|Dy2Vz!DnQ6NTxe!&*j8-$FmCb5eQ>M$D zDott1OP>p!%M2~W3}IkX+hUu)OA1L=toDXf# zG~u(n0c~hG%AjhMCoE*q<%V0Uz+bTzo5#!LKto(rVdnOjvbF7VR?Onv8Z?Wi5LB0CkwgBB~I>8`{UwEzgWdChH+Rr&FC%qHpgX@s-Z;9 z#o(%z)Qui;e{k5TU@3>BidkQ4?tI^>(KpGTG-hE@JYC2MahxFe#FpX3WloHDJ3*nR zv5#F>FS9q=Vcyr6%lv|C%l8F!p6HyA!{&rWGJ<)TSO_6&?m27lz8@G_?>S3-nnPGr!8eHA4GhlGHN%rl^d8sku zmh4<6jBYtkSk|kPFreGc;EQJvQrOA#p)))8#di8o+$&FdRz3JxC*S}}!2fl+aQ5p6ZWHszK2ftnAMMen zv)j*ZeJb?1zExWszEgn^Yq50Fs}9EB^)CMN6NCJMe^dEE%paW=9`1&JIQ+3DV~P7% zcD|{LSAJLR{1=S^mWs;{_nbUETodACGlh0fJ#S ziQyQMVHuiX4WVHgsv#((Asf2k8?GT77E#dr%>U0o!vXeCH}PDoNDCQZSOrQQd~x6) z5+Vi`;vrsOwh`aqSY0Cy-@u4qY4k=5nt(>RP6S}$^WC5gZsPP^pCPCQH{|WdT9gUIc`$?${(`;8)6#ul?*-yE+8!brqBkmHIDX?eE@C;BV9-4xJ@HCU zQ6jlfKn46)3|;{z)*`0S8}&5@B^ZM!ipM+1!#j|JHK-r@NztVJ8!XBqLN1{Zs*#|< znnOZl6gs0$vEMR!5Ty|#FluB*a%32GB>xuv*uuGDNcx(ttz$D5Q?NYa_N{^tu?MlG z7YKrx@BJP)+N3y|V@~$f2nyd$o}*8yV<@#Fx$&7N;-f>Bq(oX&CP>3Lc*l3d!#vQ# zKc)jVgx|rDZD^rrceH41`^6pHr8d5 zOa;l=Eso{EebG}wWmHC`sTc#n#iBx<=3b^4r}d>2${krwVOh3jMXIIu#pPbEWyrYY zZrWyV@}^wQrN%kPQVC;Q-d%2H(f=^@nnm*EY?|fN1g6U2LN>aSl^G^YCJ6=Z-gbIm zcQ)o@O6Ft+A3g2Cgz=y&2|c=jYZLgFTvr^BeHJjNM&{@HuFCsN|d zd~yS+h(|U+&+_nNlCB0ra^Lkwo;|hGVMbeZ%IKc*=^=t=WHO?7mKiSW=-N3}Q3C0? zQJ=D06jd21h;#=`SxxFt2&)Walp3U$3aF+!VLh&7u4yT3a;Zsj>X)u5o1$u%9w%;2 zUI1z+nrqDU|}K5k?QEPO7FF%h^4ov_{5(qHCy5wb|*67Hc8eD6%Fij!vd0yhWS+ z=(FBq47Fl}HYGW5Z2!lO1GRqZD|#zdHeo7m8m3aw#SSUEs_R%{X-?hhzxFGXm{V!>)>ciw5#b>put?9eOQHpcVq!yN+nMraqXe$<|Ski9&u50e%@29?R-QsNW9&gAFFY*SkapEi4 z0Wm4%Bt|tqjM^YF$kM6JF_wxQ;t)r?;Mk&E!VSe zR%*u{X;axH^yr|b2{M)5qEQC3$`bQI1F@(kvode9-tJ=ZKnsQ?)uZ0>IG^wOuCV%Q z^#4VlE-hPhM`JWdkF%H3w|5C9@b~6WVvNvn7IDc#?<8mki zi97+SIs>&Tx3fFDz$3PDT%0bouy1*^g%wiQ*0yjd_8PY?jt#4AK*R33&Li}xUG^cg zAb)ist0W>HrfDv?9*Be+5cmEG7 zH0wIClks6RDOJ~ov`7!OT3a@Rkjg(!C4SCBRYKYmlS5$RwLQE;Q&Tcqegk%UcRZx- z-$r9bi>5hr7!y;I3cqnK^Yb$`H(^J2NpnwnEN=AHbZ2|#XP1Rm2)HrCC~K#4Q7d@X zEjUrH_JdO`BXDjM3(Jo+H7MmaPRX$J@wPArw}-ndAw!2Ddn-iOm`W?vM4Os3wRAo| zw|Fb@n*Kt0oA-4GWmoAoj@x|BbouLbv4e$E;X0) zsh4MQA(0AbkGZzuv_4P8IRiDZuX3E1fCwo2vNJoJ%Q>>sIpx^-rUr;_Z@8xvGi_C@ zvo5L(nRtDtb$Xxb%Ee}|@Y_VkcBd|MqF3uIJ9-_Ps)UyGVXp(cvpQY+!^g)vN8uO=qA9XV|SA>Qpfx(E| zmvL*~D^2_@Gxg(uD!a1#*G6{x4m?(FEn#~HI4GKvn>CPshp5kkRXTONPiR571J~Ds zxM%jI>)U-c{5X{6@3Q8j$#;~Cwu3V8S~$I_RQ$z%`C=NwcaXV<=;wmyr`Dx;;fMUL zhw+=6yy7eVQFpG(H1vSD{A!w3&HFjG?ClLM*M{{LDQJj1U&zxTV;qwn#H zETTI<=z2rmf1utk>?hluWxvBNgiY}oe)k7mvC$~XGrssQxc9BR(ZAnwOg?ESBU#V< zhhu)`!!7BfxaZTCa0~rw3fz4k8bBCBl}HaCgK@&VIkQGg8aNN#)R71Xksw8kE@q@O zX;rak*tmTIH_lv1a@dNo@??qSMwcyNMv5sDQ_WMTW?3Ug(pyNM>%uLYH5BJcn;mZ& z&7`y_({S|a*}Eros#JO@lP+c26qhbqDq)4)I@Xsus8-R|idapT9452vC0b?PXyn>#=8 z!o_sz)vag8u6_GO0Y(X_A6u2UnL2UBL4yO z?}z>b6vBunphz&Gu$HOD!3RLC76|E~obs<=yaEfbApe1ElgTH6dPmMV<%#DTAKA=PPot9Y z<}f^NmE^G$k&% zQaSzS;mJnHySya}3qWCf~G6O_iis=d3cVQm4038)VDNi?@{KBYv;ksZNNc3~J4h z``z`rE*Lnh&W2I)#nCofFu)zxH@Bwz?fl=k z8dgWgd{6R)WG59~Qgs^-n=&eS&TLE68v(D~_KF+VxZ)IfFQyziL00Qa)00FP_2UO! zy{(aefB2l5kth3ek4l<3otCA)nZ_A)<~h@!!N>a4qRB7cvn+oS+54?gZGY9$$PYhj z_Jyw1ey_joF>LvK%?i=~A5*Nf1^)wQkDy50%n&d1?9BvpQvu(meAMI6%+ua`t5nuLkfTUij*a*iez(MSZVU=Oi~yB_`tih7CN=~9<48wt#M zrQ@9T3fL&4VU3NBs+#%WSgOuY3^qfcAJyC^zcq%>efPs3uJR{~{oyAfH5ps8Znl*K zF3S&lKp+ZD2(AV`kZ+Jo&q|o#!`*?&gBsjmxIh_745q>(HayW#>eV8`MRH!1OXUDR z;zGyusCZ(0i|mMZJ88&rg#V4xml3BJ%s8ctm?|6Q6UhXXz0fI`zvSgJGbc-77IT@% zOlCE)na#vd6EWIUrV^V;#VU47a1L6c=+J4#TAmSE?&;nh+xV)X$&sG*w33L{6Eg)tJ5k8B&|7AJ+dK~O=->Pi+fX*!8a^jaJhB@z^O zsE$C$hRyiOyWobYg?;Y*aAkjktJ+1 zpo-C`Qub+LjU!pn8r({y6}V*u=o{av*0}nyen+6IAj6|l%dwQ9W%Jcv5qn*~BII&} z4aQ8uz#y{%sxc$XtPfX^;TS54mpE zR}WzqL~~Q1LH~x-)W@p!r@6&#Ms>Sc_cC=&DVFX^t(#XgdXC6LF7ktqY|8^98NmIW z?>Y^fF#Ia7$xM!6l8qcdcfIp88HT5FEnH6w_e{fZ_3(1fJZ3eESfGHE)k^NV-|=vW z&L&>*o$-7^KGpWUGnVb@W|C0tT5`OFHgpOBOXyYF8OTDm*`pzyWh3|Z!L#k1Q!$<6 z8*g%lfJ2iGpB!Z{x~$R*CbgBZoN6u8h|BaWGo{kJ<_WX0%&|tZKErJ5TelgIaE@+D z1%2r3Cc1;qYNU`=iD>bDvZD+fHb_4TZBH({lPsoovKP&0L^@j1-7fDiY@9{jUX~Hp1Zs+*ZL{oT9aJw%vlmeZjg^}SC$WmH4h+k$8{u6g$A`2n2ZvYs`q0f8hgELY~4*&eOi$0_N+kW(~o5iC&OP9AM(8*bWl=D4*t zuI+8Fn&vf+ba@FCjB2D~t6J^m&#eb^=e4`4(DZ7&n`l#P3b9N>@A)~q3Qc-%x=T>5 z^1i36<$o8P!Ub=*!LPPq4$F1nWoI+4;}Y>=6T71nSM;-;9e0q2V3KpEyWLA}ExnsO z<%%^B+*`hOV#gfVjSlkRx!rHU{P3&mC8u}$EaRo(+sR1v%EfkTMHf18n?7gvy3-wW zq5pHW=;~!}IqN-C(r!K1VsERMz5czhC*163uf4;`u7kAyI^=+tbIC`Npsei_Z+5FAYU-1lN!RCGZ4MFxaLm1+R|`BVr*KsR0?}%nu!r$pTRj4H0<+jHv!FhR8?}*-7!HQTxn>9mL@mgyZ$r zND~9l63gxqG0_}hj~uVi!n80v_Ac)pkPRQO6G2iuF$CXH7H5$k<^S;(-wxXd zPZwv77Y))BfWa7LK@a&b8?n(LwecBw@FDpP7pjq#^durFvJN>CrL=$>6JhL_Yq(%- z5(RJ^QSu}+F&$Mh*V++0Vvz#(@gF7Qq0y4t`S7;5hO2%2FI# zQZ0+)CGp_{|LzNE@)XTb73*;nT@e;_axeR_FJ0~@12PwdvLK0)D-rT28L}ajaVrgx zF>#P8dyw+9vcaIlVX~6J9J3cSv+zoR9iD*)EC2%qIGBxr!sY@y+GD7uj z=jNmuuF(z+(;!u*WI zF4B;ub31v_6uq+o!BYXuG({2MOb-A>t3YeM@jc-*PS+7e=Moxv0&L7^Cx7uqRG9TY+pl~PqrG7*eQGm;~i=X!eN zOD~lmTj?W1G(a}60aUe2(KJn6^*mFvJ!w=<>vUFYH9KVD6JgUHdG$UupaOn1ScTPB z_w-No5;p^tP;>Jy4K*+i^+*$S4vTXeo>W%1ve3#bPU# z(Fo05yI|!)u3+c2USCyTUld>IG-vzOX2p^vboF3m)?kGeXcJap^>bk#wqYN(Lxyzl zgpFdMb!C&~T62L}l9FQ!^dLbz5kY7CDv5ufH(6FVM}yw*R*bN7Q^-y zZ}Zl6OS4w`7AF4|a9@{bftPUm6lnohE0l^CNJO3Bu1HJ6Y=w?mtgNn%Eso(6}aCp!=H zDwjzYtk!$6uUAlaa?!j_D~c1TC2z;7))6KW%>eG zD!As1cye~&gw@yX@|OYZm3`jyc2jbOW0*PS7>3(yci&QfZ#Z!K*Kl{3czYOtgLr@= zR(S=qjD<3ZowyDxtNFYYO9qp zuGNtt`GGT6iaoac%y!*2IZE2W8+>E0x-C(^I5EVydXHFLb+ORS_+8JXb$!(V*w|HD zn4V<#o#EMqMPQ}pS)R|DJ75~0^?9G$nup;Uoq_qE=aM%A zw1~4BQ4v~WhuWD1D1u8Fs}(xcaN*1qf8w;Ma6F`1kCySQWA>9A89=i6UAn3%5GuUVGR5L}hLT4ozg!il`d z9o)!s#S=&SyDuEWGynXLIee~J+rxFbkU_kNo!7QQL3-~4z8e*{d0fT0)y8#OvKR9c z4Eyu=TgDNa#`z_%S+J^+2qg;Kz}Z%H{fZ6O*KpGe$;;Z$0o?%qe9#HK&<%a1DZI(w zTD2Kn$~zprVf)HsTcxH|%bWNhkeS58JjLnTODEaPi_($!{vU0)Bq*M0rhOQ11+mdV3=(VZO1?;YP+p57yTmH5~mc-xa3nz{Kda$JeND$_9jn`jAh&KC5t7^*$}Qk&tN z-u&>?UAsu0*72@Xt`O#-4R#BF07COXT7K>2z20H|?SoykXP)2lx#s)5)&YJ{r@Xb9 zU8~hw-Hp=HryV)BK{+G$@UvappB;RAiECMldY(Sx7E z!KK>}(Ep6kznWgp{`66w00uzyS-lXh*0}@d5wLA2Q+9e(awJP-+G3&vcB!SykT~Oz z)VcF!&!2<-1RW~0DA7fUkS1NKl&K>{f*3V*N|ovPVH)m5~i|f zGykQ+`l+qkw#nSlt4Fu4-MM)2t}}+Jui8j}$Nt4-Oj@>Y;mCoDhRbYKuSI=S9uOE# zytwh>$Oj}>&b&Es+NQ(Uu}yu(?(5inYv0cOxA*VW!HcJ!hylNT=pm@DprO6{4ea5| zpHKfi?fV`$=m%jxzij>hj*!9%v-uaofd&q!;A^lgXv2dLGI+y;6jsREg&4Y~AwvtL zmc@ssK@{Rep_SMfXPpp}l1hcN7}g~)t*A^*IiYCdjX-IpBVzU}6bTft+@Xjj>fEBo?a5krfeI?=c#u{~X?f?R zw_bmo*5_%bpC%Y;fdo2es)R8_*yj(d`rzuTt@0`BhO~Alq8E8a2bzhz+8CouGd99f zPQS8P=dn2Y)Z~)OI{9T5=yb!&}385M+1veC`%I^^ohytsI9v~4p|st_TF3V*Z4-cZJc6p3vQQ6 zS_Lgz;FJrGT<4<87usfH&FtOF(%Utg!T89Tzdr*FFlhq|PB71o#wr8M3TtfX7Q#t< z@#Gd)Zu#YgN|0Y7Aa_1#cp~qEUdbz$-m>ZYrLOww0*dN7&9I}2GpsuAY%Ai28?O7* zH6AMq(!5mS;)}pTtt{YDAAhyqW7nN^S%itnHQeM2HS{%0t2L+0vCum z0W|PA74e+MKqiG6h)z=~%izmUx51db4rZ~l8SQ4Lxc?RGb|~~2{{lC(Iw3}4R*RH`xFERHjpAh3g#38<;O-OWywf5CK9-^sx>~og*>}5W5;YAu( zah|i_$3-V{MQ?1%9=DW-KRXeQg}Fn58dtN%63)y(4oG03B*#EHQtXa(e4ri~b)Yv$ zP=W`r;OI1nKn`}0b**!mA{QA#5%%DbBb1#R36eNU+U|syRR3WaTUa9-4v!8ka)#5C zmO~wy(rV3Hn=g`Km~qVSZACQV`G$zScBJD??L#H^z$e8h`o$Wvtm3)I;fwFtZHLGd z3M}NeM9BQhMEgr)COI@sNv6(@c-$i%5wOi}=24rN!$1N5I6*=Q5`!iyn?R+h1}#LB3^2WdOuI1CGSoDtH|0SN zcFNOhG60+31gcPj3Qh+qY5*1x0$9iyvY;GD4(B9YI{(!gS6DLih+(u|H8ye4UXmqT&x0Z_GwKbF z3NsnTbXPJLn~Hi}k1_9C=|)CVippB{vY5@RW-|-b&U&^6pbhN>Mk`v=1Ebo0Q zv8p^Up9JNUs6j@u!b8P zRjM|?xE?+(ay|G?<~A3SD3-20y$aSATUV?n@ow+Bs}fR5_Pc=F7FJs6O6%Pvz|Gd=gwBP&wviJpa<<|K95eSATBO(7tLsQ zD%ZqXO)+#??Bf1a7bi&DhK=VG5*lY$8EG|IcyDZ`gZ{9<3k7nJXE|mc7g?`IMs*l_ zJU>~_8Zky<<&?d3Ump&!zqHiX^8$QeF#n&)8mPvMQB1i>oaXe|M2NPub=p%DSo;GR z$o5a==uL}7U;=Rjx46f>?Q)x&+vpx@w~bm^8TyQ6PXrH}2kv&i9BN{pjUJ z`ne>Ov~vZ%;#!6E(kVi#S&5}^QI9%*_ajnyUH#)#SNzo&$9SxTTx%n9{NhSXBCg+i z&?!-I%N57vU<+z&GI|2pW6l>*NPX#ic2(f-{C39XsBVbq`@`YhIna5o0HUY6=j(=d zyZMOk3e{He{V~RRej* zZQnTDTP^pvr<%x$aqrxg>2~xsiW zJ4XHTMbCWDkIs3?E1jsB6S{Eh{d34uz3Tl={o}FzZxwHS($uwd;5;4B>%!0(D*>fg zA5JwN!IfGR55C3k4)P*?obJd!vd4?he7okIdMNkrFV^Q}FOGEZh41*``}NGvD8BJr zcRcIx-Ff7^PCeYj80B@Y`SyFW{OMo%=kIv$r=#=qrEj#=Rqyz$`(NwfM@`@-dxz5q zF&1`mA`G|J3c9y@4#;~$0T;lBQq4Df6lixES9f!VffmSW7Z-i-AahY9c-QB0*C!1~ zmUs2TeGm9zm8Ny3f*+CRdH>zSAOe(hg|dD#sCkLvenuyM-lln^$A0-|dZ@>5|50MB z_kXZQX|V@mW7mMYb$~TiG!WQ*oK}L^f(nK4N`3c*#K(cjcYMkxd|-HnDED{N=T`31 zaw3%p%|L7O!xwntf_4~tuP1)vhk5XKgL;%Pt0#jy_=hwIgpINQ_J?RisDDSue@W3ICo??hOW4Zs)&Zt z_bqH_eSQN*IE;D- ziAy+%*cgleNPv4ld;c@xaGlhN!NOK|c!f>zdm9*w>bQy;*ot5yhN&2j?ZsCkXp35i zc=u=wK!jvi#4j|*jlh_Vq>_yICwj%mKnapFGE+#Phl7?gN6@%~m=}%vca44ma2P3x zuD6W_cN)2{fWp#JB0*LN_=}*3g>A(oT_SNCC5!85YwehVttgKc2ZBFFhk=KW_b7^` zCX-~=CI9$>Uj>7YD2N3qdGJR$v%z?mlqx&dkVTb#5jl}W$cPo`h!)v;O}Lc`F(NM% zg(C@;5SMreM@B7|HC>pFG|7&y$ciy}fi4-B>GfaLcZ>Ge8Ioamr!aG)=!-%bkOFyr zM|qGAxf~w`kpBXB5arKzX`IC9gUN{~ z5AY!%B^7t05d%V)j7c2SX`PXoTv(Z%1jmunR*4F` zgM#+Sp8xB$pZuvyV8S*$R-YdTY&6-A^>~-OnJl!RCSX%c5lW!BvXuvFloCmy$|*4_ zz!CZdU?TMzP70M5ilNszkl4wg9qOS@m_j3}i8f_>oq2HOd7ft}MWZ>Ju{aLuGZsL4 zU_BHjj#ZoTXb$fAqs|$eYI-a~s(o8>7Ft-Im}#Iz>1a)=pjTRhV9{%<5UKdp73`6e z4Vk6YnW0L^rCsW!8o8acVl-e!p1LxoxwoP$nyP9_Y+pneM7o}MWu&#(pEin?ZMu%B zC~_Yqr?l#)DCr1k!f`n16`$E+#JQx6#)mX$spzIuSyXabQDwYFH>MGF3%Wpd zng5*%K-H+7)@Yfa+MQHLsvCi#xYt{0s;a6wr}+33=tyJ_#a;|G3#{s{wR)qsil-U4 z3|^uw{`sdr!8-(|eYtr&ND7#dh^(24oXkq05a0yBDqn;_vGFObcK`&|`lJ<;t&e$> zR=K6$TA80Ju7}!{F!hxsDI(Z)rf8b5X_S&S+M-thbHsOw&X*McyOX}kvur3NEQzZX zNK7l&t3mn^TzIEzQf7#EiC*P@g2`wO`>>NoK$8Fq6`OX!uu56mwQ=eQt1-1y>rEc3 zt!BHeiR!5(d$QfB5HK>TDY`qR>TK&;w`tm^G8<;Es+O@jvpy@Zz363FvA1Ekmj5a@ zt43>=Im$O##DWBx~8kPs*9O(TcWqp zzO##%wQIY#o4gadcF8B8x+J_j8@$TGtA9njZknfT;k&pFu+jO1vh23@=W6vf&EFlY5+8JjF<9kWc^^7OTNo%(a#PISE?9 z&j@wYTaXA^!W){lV@$>xQVGxil5`6+YrL6l{L1tT$LKk@?nS70EX$#}6gJDpzgeR` zoV@2s$g+7Y2%9I6d#tI8gp2yfkeqIsvjr<@$*U>N!sfxuER~+DrT<_o%HCSaCaYqa zxv~?{%Hr&_GYrdbY{$5JwCv2kc?_?7`*v>Gyny`6_Wa92Y_LngI|_@;$sDz&v&`3A z#b*E)SUkr#>)Qt844&@GGa3D9;Tgl6B)?rZzs!QF z?3&KE?9T73(pqR1J?4@{Orv!?peB8b`P^~-TBV8X&;LBVRGDwp%ghQR25h0Z2u;oQ zszw40kq-UEPQA?(-Ob)y#vpnK9Ssc(Sf(Di!W#|J<_ws$i_#~3$7G$-Ed5N{a?`x* zakf0uxJ=W#k_-Sk4twRRVXBam8O2Zi!ObkmVGzVe&6=zmm>+EZ5-Ny&(OzOPZyCcVPxWs?6)8D)} zv-8;07|MKI+mJlecCcfDz1xFLBm!C>mFvalJJA&#)sRhauDi-EoXVK(+FW*=gw7t|p?SuFB2D;tbgPjR+V9n&0 zh>H!?#9iFyZOU^1!!DcQ?k3qZ4PE$7|Z4 zneC??7Mlb{`uq`tPR5qL?8%zwTE6I9UhGFL-y_r1immLGp5K;E)hiax0?u0-V&j`G z>O3C3BK@*{&e?KK(%fEAFq-G4sfM)P%}E$W%@69&PZI=;^Ku@&?}U8jtGT{`2XM(oHg??5U&dsOv{cmOpOKCiUb* zD_J@p^8fbo5)IK9`qwG1@+^PoTo3dAzN9i=nP=WrDqHQLe(gMu?Mh#kO|k8DZrKoj z-8n+Ic=wt`&$JKk_Vo*MGRd^nb<9z(y8kiX@+9Bhd~n6B$=kZN0b5`AU+?uJ8}`pm z?KRH?o&GCpo(o-#_JN=FGF-PyqW7Oa`a+K;@jSpc`}BE#>YxvY^f`x=>X(3D<&r=6 zhCcF+uOE8QwbGpU$p8Y4-}Qv=@>E~@k~a2~bk)bgBR{H zD(>ZO?rSgozDUSQwvVjs?wv37P;2G;mivr72*fUBiqD!J5d6&E>@I$h#*dMizUH2& z+y{a105MGsEDT$t80%XsR8yP+c;8i_|Avw?${t%p?t5ICAFt_RW^*Db0^J zC>~C@P;p|$jSaVWEF<#d6e=uN#!MnJ2+kftc$U!MfrZW*HgG_l8bbxrt6jHd9XdAb z*|KeqzU}%CE@QouEd$<6cyQnx`W?py0e5cYqLZh%%mX3WyY0%wGeBTUy z`}pwXW1P3aFg^OkkAy)R7{3x3CWR>;Qg%&$eE0(U9!tBeaKqLs%2tkZUx~svt z=z35t3B#~Us=6v%s-_GhbpNA64+rBA!nW43aFMJ05-Y`c!cvi}vS_g?4iMv_u|}mZ zobW;!=dwgE6o14fm9_?4&@se}gbYalDNB-tCe0J?Gw-U??lmi^WG>4qZ4<%}-hKmf zIO04&F1hEomg(_B-{?5-mZPC4hSa!M#Qlk&1V*Hf=g_u%7?pf2h|a*y}+1GGPb z68x_{0S$!oKt_q2RKXB&q*Ox(bJX#}PH)_B(@;m(p&S|ZU7TP$)E&n&&J)LBiPxgr5qLTdh+eR4w`h3*TMESc{(tMlP)Y2ja9=Kp$ zHO&;)OA%IhVN`S772<~LvQ;f#*J<@vSpaE3mcKE6mE%`^vG`U`b)8t^OfLpDWpIQ% zRj6Z)h56-WpQRa3bJ4|FT57=s%??(4+cp{FzU6t`qRBN{XLWC0VO^))?F5;92g+m6 zf2GhuUV5qK;%cvtj5KV5$4XN5b@OP;bDsQM?j zbjaU@caB!1p1PGev>^W!--G@{6wyPvR@~ji4F)@L;fEjGVdIOR+vLDMJznnRs~U0b zy$sj5W9wD^nD6M#K086X%2uiwzN$y7_}^n@JblL>x4CrGNvC}DYrO=TbD}|)KXm<* z&L94zqjcZu>Vu!D=|^_7vt592m%D4&i+BDypV)$zun98Eg3VLjr3{glI<$>@&&wbP zM@F%qY3>&%tY6Tiu|gI`!-Xw0k@GxQsqS5HQ-w1e-zbPa9`a0Et*f8}#kfP@*^q`9L>~B}B11H` zQG_@m(cEGf2@e0(ijH>7L>@n}M?T`Qj>-EVgF`OuN<17IZI zx4J3DZ*)dXg)xSvMErGfe@>LsA}jg7NlG$xdkRGtp@GFlr7x8X1XRDq$jGnNl5Aug zWDVUo!$A6NgF3`Xi~!j;R6#Ffztr9_{WwRu4Rd*E%q2$jkPKG+iZ6-`SQvlUu19tg zlB0|wD5KM*1$2&y;uIY?!&%BxJ}q9VoKNjs2}`cEQkxOHB`t|p7-B(lpK9zSFC~^t zXA*^=#EfG?586y*cF>^()g@twIR%RTlb@NZrZt1a5*p&uquc~3JKLwuAzIR;Dj0w{ z&zVjsqVxZBBsFDBc`(3v1?mw9++rJKv9(y@)Rx|Cq(1X`MuGOzj7N=VG~*Ms{p)T2NmZ*R6`CRA zZDdXL*oQKt7^r|TJpBD zSz7-qAS-8T-Pyu2i+sIqPOCfF+!nUDgJrH^?>i*r>NkYQeJXK-d(~0lmT;AYZm@Kr z+4*8OtPalZc2_&z(0Dhz7Y6TyUn=1!Wev9Slr241Ngw&v*Se1i>Ri4`-^J#)zb=OH zfMx7kga&w_3Vrb{zcw?bDmjUNdg)gUNozxXtT0aF7|ftn^G7i1X6bGaCyn;Cs*SvASx?8+d**Yvn;h$Iqq4pHIyCFfEuY!h zGux?dFkyviY-A5R((;D3z0q9hPHTGK`35+P{k4WqV1v|$1oBYgt!;SU+2Q4eIBC9Z zT3C;p+z}VFhii)Nj1vmITT}GA8=mE0&)eQyM)}F{T_}79)VTcq_p=#^HZh`N)CkA< zMlHB-hWDJ3vle2OCjQ-uGuh&R)^40!_u=*CRFxyXbx3&)NfVbm<;T0a%J2VO?|2Ja z-(7a9%(=dDGU|fbhqMMJ6wdRJ2iomFN4h$PE^fLP{o;7WJ9zMF>!yeF<2JQguTc%* zs&Ad_S(j|U5#RDlW18NJ2N=p{!Hu(f!xPtLl+QI^=eWak?sd2OpMjq7LdPzieFy0! zo*we4w;kIiAG_jX@AV1E-uCqNH}0bi`NL;nRR>2qEaJj;&cD587TSF0t@ZiPV|?_i z#m}M6-H3E^GirHdz0_U5J_C2X_urpA_ysQh@Pi%Zj;A^A$8X?*S6=uWouJ5*Uwm$J z*TQQB{n4F2%B<~l-Cu0InYum^d#~-II^sh>9=pBr)4F3TyxChL^OOI;-BZ5@Y`?Pu z8Tb<;12n#e^T6l3jr+^L<^#b?YA1R5KL8BCCW57KAT1WJ4L$ z!Y&jsio+$G!_oUYjzc|lTRkewLo2Mp zku$?YggjdT6;m9~QT)SFq{a%oL1yd;ciTplXs8{eF&@mveQdCA499U? z9CIN#O4LQlQO7>>#k-3Zyh_1-(Jo`;!%j@bdR)e4EXa`LM;@#(!;3kA6v>aA6h|Yd3pTwR<#3eN0KBRLRI|!1HT7-y%by z63Q=Jglb4gg^bCWl*z1=wTPU^OjMVpORrFHLG7EXW9E< zo!C(Z{Y=c){H(W}Mu_pp1D#Fu9M8DyHyw0J+=^^X0$qliOT)bw1_mP##Y|553@7h; zp$`So7821ACD9QrQ4>Yc6jf1!W6>6^h82ZT7md*wrO_I-(Hniy9Mw@A-O(Q9Q6K%$ zAQjRfB~lwJ32f(Iy^o+JwH7@Jw7-$K0iP|KtMl0LO?q@KtV-8H8Vj$KtV!8 zLOeM_dVNDfLqmUoL_R!3K|Vx9MnpwOL`F$PL_$SIMny+TMM_LXfP+RwL`H&zNJ&aa zgN8{+NJ&abN=r>jOioILhf7FAOG-;iOixRPicCyPOioWtPEbu!R8C1lPEATqPEk&Y zj89KaPf$`&i;huDN>Nf&Qc+S;jF3`}kyBGtQ&d({RaaC^NK{ZwR8mn?R#;SzlU0$G zR#sP5SXx+7Pgqh+SXNY6Sz1_HTv(HrSz29MOGa8;U0RiyTUJzCT3K6NUtL>SU0q*Y zUSVEGKwei;UtnQhU}Ruah+tu3VOdjQTv%aZW@2GsVq;}uWM^WJl472tV_Q;VUtVNd zS7c*jWMyY$W@u$zT4iTxWuc{JXliI*S7>EpXlZL`YHVp)QfXmXX=-h0rK)OcZE9?A zYhzt&W@Kz_aBQBUY^bemadU2DU2bk}Z)sz1aCUETa&WA$ac*mIa&&TMUvh$lb3Bi8 zYGHG8c64%Zbai)hc6oJeWp+H1c6NJrw7Gb1XLxyhdOwzWa%p;VZF+fldwqX=xxRgW zf_`^%fqHU*f`x*Aeu9LEgMopBhl+(skcNDAhlh!XvbBkZgo}W7i;Rwpj**UrfRB)q zk&cX!la-Q*fs&q~lZI`Sm6(`SY?_dXnwgoJo1L48ZkvvSo0gWGoSU4Vp`D(eproUs zVOpf5rlp2&rKYH+q@t&zpQx#-s;jK4tgfxDu(GqXv#+ePw6?Xjwzp|qx3{*rcWJq~ zxVgH#yS%--y}!M_z`nn}zrer0!Nb48#K6J9z{ADD#KyzL#>B?R#>dIWZ(_&F%*)Kp z&d<=**4N$M-{a)v>gwz6?(XsN^7#1p{Qdp@{{8>}0000000000000R80QU)GM-H9A zY}l?PRJd>(xN;C9YGde4Tt$Z#)2-XcQKQF?5<^}TIr1UGliW&vY^ai8$CepEwwtMO zp3R#$-O1d^lVnew9f1n9Npq+=qb_;U(s*RJ2cEGGgUESRw2!hsW0Tx^Z8;>Oh? zOLpAYDWyM;66NV}0YYaBBrt%;fkez3MW|1&aKNC#1|33J(4cWgk|k27aNFLUx_2Pn zX-a3|1H6!%;h=1f4xK`V5-re^Z^9irga;c&qDx10`*{=Zz`5`LcGar9o?gt-4@0ls zll#k&;@|E>zkI6t`16a`oxGO+|MdX~%YeoVXrO@i9Y`N$eKa^oBC))33_eU%$kl}y zW~kwY99mTmUgxCri!(@q~k_?eIf(?v0V857{}f#gty`h}{r7d~ysUPD%mYmz>GAnwYbVS>~D7y|$ca zpQ*_lnPyt|CYx=#M&}7@N^qx~Y2Mk|1bY7Yz@Qv}R;OnOD3Iu)f-<^*0G92M*&boq zxh52+0ir2tIS|^wr+OAq+jjhwYFrSR0*9#`kob1orf;DC*@CE85XWD2a++pbZn}8~ z-gK>!YM+n0sJGs+!+et9gYY%0A0zY0w;y-pQH!90x$vT`FUph$40*-iV(ok4IxC1F zm_i0@Zds+5U=|@OY&F2?+{d$Cxv>T$?>VQ*C8wn892~56;AwNtHH?Z0u)&&&Cuyz@9V~IDvYH$We1dFiuO6p*>k~-gi4M~+X4KV(EjHqL{(3 zt{RWrwZ|Zb?0a*(V-0-PI3tbn%q$xUv(7FjU;NREMjz?V%cuM*(a0mc_0mmmr^VAy zt4j26QeVCMu3TpwoT;cY4C^BBS1TTSBck0*dQG}5rT+W(|2Etg>yh3JH=qXPt#8!$ z8v%b&E^ZADU7!2hD`K$?3rX&R7|fuD3IdOm%t9B~P*LVIN5Z0*@F1R}n;a-Lx*?VB zbj0x$1XU+HC-@9vuj>-fXm>l@>23ubiyaF8e%Pnr5pRfxDx&kAXS^ontY(&4V$hzb zD4q%AJ(-b0p57FN>-AFFndW1!nBnU3hp{EeP=_vX0fczu!;GEjIb7@=$8`BJBucS*$_pkFok&d9 zFfWXrqM|WtW<`8p(MECNq8ei;reundjND6K5KEaF@~uia8>`hDrKufW-JzB4+upc{ zWxurj0zQKDBQ51QHvty%o`rniv+6VdB78p6L6ex^H7I1kNgDK^9HK`h$>9uL3}YOc z+~nro;>mEZ0-o{Q(b%wQN@_Amm9o3xDdRY?++f= zzZb5v4|%NL9#>e;d73qlXDzEC{h3dFa`ZOq*&E>gd9E4tVkrehs9yKl6%Jwq8pVPr zCnY-3w_FsI8C@7RU8f777Abb3D244tnnTLUjaEGr&Mu8q)y|6`t|PS4O91b!xnA)%BJt+#n9GG~y%Y8{4O} z_pS9eVMP{3fxuPj(i5$q%P!rnwN?Rbt6JEUj{@;lIJ-$upakWsde5VD@5%$L6P{bY9lXkJpnZ@=K_#eY{P zFw=Q9r3D6Xt-9#a@03>mYZFd!oQ=rKI@8pq7>=zq8Q@{+g*Z02bFPl|3`FhNvBk7n z?us?_1>&A6oZ~!hIjJM2>VEmqg*00`H_hW9d)k#iCNLq{YUFPrcF0L4ZzBPknOHDm zEKrX1UiV@mhdeYg!SEzubdDL&iTTyjA8rmTsr58)t$WoZyHkGO4qrts&=UuH~YS0}JTLCO1yj7?0$M@DhypRym@( z=5?>NENsPY)Z|hBI(D(2eP%SDv&LxdfyB7Y(n`nN#uZ?rVq2o{pGqM5lkc)ie&pz3h-i*o7|F z?5E$H^8$BU+nDc|rLxUz<9f!V!rbt9=yOQ(3k2 ze$rNFR6=>WRDbwKXZdgBv&*&_u*RyFHJZ~Yd5 zh1N~?Mq~E(dQByK8kjbxR)S@gR>#L9qlR`3hiU_qWQUYOtY&>MScnN#YYmkW@?aO> z7k+>LcM#(+goO8l@&+-I_)@?JZAbWiwgelrG2rss6RTpmR283pa92m%G(KZ`uwoRt9brRzn!nX&+r#3veWQaA1#}|SkSS@`R zKl%g<%Mg9z(l^TZLGo~YhS-f%v2hGU6dcEQjp&Gv7>R?|gN8?m>!@b$q<)O|hMLHU zOxSbzR(U1WS)(|MmuF`s_IbmjGO4(VswgM|sCfwpiwXC9eZY^m2!TnJOyM&=P}OhK zF*MNTJNouna7c`%@^#UrdY}_j9EgVlGe~>*jL{eg5pp=4;81=8E+fb~5mbGz#*N

    FGo9GOvmanw8lG zgPFY!aPb&{eJ?+*xcc+V2lOETyT~9yK_q|eY06pv;%z(=!VucclvmeY<34!IRFE!FK+&+|g2QzO>>ZF$8sjJ2x|fHc+r@rm99nN<@IL%O8K02JBR2<+ zK~nSr*}(s2++rR8p~g}5_6l#Pn5(2M|3h9Pdnj!gcaIs&zXB+~vb;*~G5;g{lL>$a z|AH3Bf5@*aL9Ty4@6FF|pe=3xCi;i#pOgP3|4-6?Wd?A(3HX~Wgd?u{n=N?Z|HAOI zf4|E=`d#VtH^cvilZrWR{F%-0Gn;=31I{uBkxe#`c_sXZ#cwhHr^5xZ-b}LI|3ZE6 V{}<1@x>M%W{d!s{@$vCQ{|BNLZrA_- literal 0 HcmV?d00001 diff --git a/resources/open3d_visual.gif b/resources/open3d_visual.gif new file mode 100755 index 0000000000000000000000000000000000000000..02b1f869777023f0766ac64e442b0a1c70d44def GIT binary patch literal 947665 zcmV)uK$gEpNk%w1VZsA91NRj%000000Av6FyZ`|9009300c`+SNdS2O0DsW{jA#Ix z006260J5_Hxem zDJU!~Eis;DGgvb>H#ayuH9NCAJ%vX;YCb?fKtOgsK|zy2L6t#4pg}?BL6^lr#Fav+ zPD0W_Lg+w5L_|bHctuP@P*PA+s8&~x09jguT3ei2Tz6Yr%UiczTgY5oTwGkBpj^va zU8p@>$6R7!kYu*8qa^fr9|ogV_LtgY|`PZia7ghK2fuhqs2? z0EdT%hp>5v`-g~#{D{1Wh~NQ;;)#gnn27-3iHQG+ioA)u;fd3TiQ&A7;sA;Mii(M; zit&n$k&Kd)l9O@(l~53smgkk{mX@-bmc>DrIl`A}YL~>tnVFiJRAHc8prD}3pv#}3 zZ*QV+Z=#`HqN1Xv005|`sH*3xt8j0ttE;SBT&;6=udlDLwiU7f0J2%G0by}jY!zP`S{uEW2-zrjA0!AD2JTwKD#>BE*m!^6YF#FxY7#Kk~C#X&&G zTwBOpT**N|$;rveprFiMTg=SN&D6HeLqgF(K+)0B)q()m*Vou8D%iDy*w^&h_S@V+ zLfqTi-s0ZiK|$b&0pS3N;fcH9yourA{^9K6;qc($|KQ>P0OGvj;^DpH0089(2j%7E z=t4p0mX_$m#pvkh>4Ssn008Xl?Ck*o@bK{UwYK&3_4oJp{QUg={r&y`0RH~||A~nI zy}bY7;{X5uA^!_bMO0HmK~P09E-(WD0000X`2+ zoJq5$&6_xL>fFh*r_Y~2g9;r=w5ZXeNRujE%CxD|r%fOt?uiw9b0}CEZxUk{Fh!ZPb%($`R z$B-jSo=my2<;$2eYu?Pcv**vCLyI0wy0q!js8g$6&APSg*RW&Do=v;9?c2C>>)y?~ zx9{J;g9{%{ytwh>$dfBy&b+zv=g^}|pH98H_3O38iQ~f^yZ7(lr)vM_PJoYg0^-7} zU(f#McJAiW5A5E)zWw_x#i5sPkPmwN00t=%I3LXD5dKA?oO(ZfRLi zjZ`KH>7|&8HD`_S-Dqj1poW?imE~#IUx=cns_Im0CiLN|u*UzI)PZ#VQ=Y82<|>qb zwKl5jufXD@pN3`D8tk#ip5!1w?-iQtv(S!Ir9hmT8tt{%esrLJoo4Ipw;SDZi?QIA zYwo$|rmOC{?6&LfyYR*<@4WQZYwx}I=Bw|%{PyebzW@g;@W2EYZ1BMdC#>+o3^(lX z!w^R-@x&BYZ1KeyXRPtY9Cz&T#~_C+^2j8YZ1Tw{r>yeIEVu0P%P_|*^UO5YZ1c@H z=dAP2JooJL&p-z)^w2~XZS>JdC$03-OgHV)cw0IxHC+&D>Fm^4yQSZJQD^P7TEhmp z?AK(UrC)?+vsw1qWpUan+i+_dqK1;jZMRp7A}HwHe2@Ps<=%Y?!5Qz`|rR9Z!zq`7tfOEo*S?HNyIPj{PWO9Fa7k?2lh_v)n_l!JEv>! zJw)%I{{8smmv8?0=%=s#`s}wK68D|BKR*2O*Khy*_~);`*U0ZLegFOopa2I*zyccZ zW8q8Q0fR@s1q$za4ve4#CrH7GHL!xZyC4QP$iWVJ&|BpYgdi+Xjt_n=gccA)Im(ej z6Cw_UDlDNuMkqrU#tnumG>9N-AqX2n%XV#HVJ-g@qCyq=@K{xI%L<36kQOE+hCKuc z1dj+N5uN91Vp*XGQz*hFlJJQo+@VLp$2kR3(MnVr9jeBnMK0#>AGNSR6L|>6f3R>P z&)^&x_9(tHUdcb>Dv#Q}5=Rlfksxubp+V&M#e%G`kz!0pC*miNDIUahd$gnR3yhMBa685!NDv;f&-mmEufxLWf05`s7LEiON-$ zM2jOF;XhHCLVN=BAR%lhLd+@7a1JDaK9QwCos&e&sf3{e$!INn=@A~fv6uD)1Rx%O zfd>deKBnX+L6C_|gRE4e3elkE`p6PZj+8l;8y&GqV$5*nGmSO9`b-EUJz7a=GP8R}Z6~Rg8j@6^(1{2^COS7l$HLuW7ZKp$JuP4XKM?bx zrDS4H3vx=ZX2hsz#Q^1cX)}5P_=l zAp&syhaAP5Qqcx@r{VRlfU#A`f7-K$u5GLqMcl|%%6EiTwJ;wr;0Nu_5W^q_=0Q%` z;Qzw+!y|@PA~Pz8aKwTifjDppPnpPWws^7>aVZAIMNC!l^#P#-C~*h!#qT!so`wu^ zwph4B3y=W_-tdM#VzF9fMp6H@h$N+SD}qf7>>`z0bfX;~VAC9Xl*y!gtCBk^W^57I z%uldDAOeAo9>Q6izKriYWlLERVgLg>%%KXqtW|#QvxzeNVs=-_*D#W`(7$@>7R&r* zazfg(3WjQrDYC-~_cP4@m}vyQ*+U>x8jc5UA#*o;>Y$bv)qsNTfqzU)$GVuJ430KN zHhtOuf|a_v-YE@1=5rao3JD zEn-Ajs}OJZu&c4~20yOL;e~LIkCZ&Oqq@4Cc5^bmYA*ha%pQ&y~OYNka9=tc7^-L5=JscL$hdwUhbcw|L z?T31_#1{;0U{{o!Vk|gB1R>#!6JCaWAV9W>S@;=_T+xBRg+I2PPBK6qQB#NNvkS>Z z^~PKC4-v#JED!-5HWCm7=rcq`jM27-dG6p_CqW+ndIkZIB$9`pEPB#D_>Z zChYg$TeaSkq)=X3Wb4&|^5 zTZCZ`AXDF$AELJlE?9+DxE`jHaQ)U^UkHUeHiFQVVAV%|6+vCRmsz)UXEoG@ETJDF zll; zil-0%U?JA1E&Epsx!8@M@{J1ke)e;4$Hsao^@I0z5!oe#BC&mqqz~U94%@f~EmBkW z=n?uMQ+!Zkn+Pcq^@VX(KjG6k!30-0^ku}T5PGE&HRcEY5n~jFVFz}S90l`c~oBC7E!~rX}dTRf}mAeRegpulpk?SEa)ihcTRy-KORV04!2RTI27vk zQaGWMck-0~7?tj0jB(YB;8l?~!Ikz>NZRy+{1ujDaBQvQcDW^!Kckiy6=vPYg*Q>$vX@{51ejoSm@1K7XgM#E z`CkgjW%&nP8(BYdX^C8v6KYwN1>=?exDbNrWo}tsVCP7y$yaCTY8vL4M?rv3qIICT zk>A!xJGqx{X_b|!o32%w3q_bI$D2hll~3|h!xftUg-k9Mis&PjV~KR)m5?%#jD-Kj zk3^A2M0F*vbx*?Ck#Sa>JQ+D~xoT^vi7?@uAXb=5A)QWAou&7hIz*S`!=3;6g4OpD zdihrr)tMyGlct4!JhD>$$d4!#3;N&>KBb%avwm@9pF5RV3ThE#c9*ONoiGuJL8W|O zQc)}wdyplK77A-k**-m3d_eeIsTXl9ky|V!PtgUS%!7W>6ed&$T>2H7pBX?i>SR^f zMn$$}74@MNv0)SEoYBRcF>yUu_zxNiCI{L|6RHpW@KqQ(Kv{Ya;OAOBm5@HF5lzRT z>-eTQad6P&dU3*-VfYXJzz<=nUonb5by$u_8I&H{5=F$CL>g}?u{q_1rH}u2aFgVq zSR#%khn!G)J_34kIahot$Pyjtm=vd(=GO^w8gnrxslGSZbMSr{M z5tzhBv{b6Z8Y)l~U*b7I2PmfKWq2_ZR_Mrn(l(Y=wh^p|a5DERktAAfN=tA@s~(YijZ z$gG2jXDGR8Yl;!GC$ZCK5%2@4!QxgNB(S%{u|$e{CfIWb_<+d>y<*{*wrl2ZcB`a3s^so|Bum5v8=lKtj2tfphLyt61E+%h&Xq^ugu_CH> zIx!|}wJJC}5M3)hn)GO5ma!Q@D82$KQRh8} zyR&v!Nql5RD+jdkXJR6hX7uC|)4GE0g|MtsAb=DrA$hvm-6GTdRP2yet1jPXy@_Ehe$IY7$72 zkPm=GON+9_^Rxpy5Ro{e!@E&XCTK{BVyt_R=jH{*84!& zDv}2AwcSgJ`8TF{7YLJRN5V7{bmUdMqSyxnHIoEk?0VGKd+1uF~_Q&hyaKw*L44Sb-)4FSP$;>3t~t#$~% z!8;M^Hj>AyXE-5If&c_wFvBiU4o^S^lu(Z`*b04sg8Kik#t)&NZ=%4qw73*gh^%uzkr;3H40m; z+Y>{q5p;BPyD-YH@Bo28U#T3!soTOv;>0=_J#sop$s@(JtbLb9eCp-5aIst-$OqO(etX$i~yFkBYc@ z6s{d1MwvxXK>3C7EEf(PR{t=-GNq9Iaa$u)U5NjS%)KGfGNH`T>^tUJ(90Ln27M8c zw44#uekta7o%5*d>=Y{vRt%|mjEEoyd{s3a)Vg8QEm6(@ZMy5U&ioo_gXVr&TzWH# zJbZMV1OdlS0cQJ^QVLOHQ&t`PbJC8lc!Ii$wE)y1+TglaT!Kd z+6Uec%xQcMl>Jdl4It9#*MjrU} zim#kIGiq+l14}*KawLIgF;rm8J8|SB4uDWqRRsv-MBpWDrhRaK%vW~y9+yU+zDEh2kkWVY%Z&IBmgQH= z2g8tq-OmBZS`9qUM@9uLydB;UC0TVU$4@8}gPW$$wj~ah1)gHMS-ad77YHGq7r|eP z)zWdP2;s)c(?33*&MR@!#YT4JuZyVY@g6$n*@hJJt^=vCMz z{SR;M=;VFW6_H7(I>ze}-@e1Ni)|A(X`B8u*wu^+iYGdjg@*S^bQZupjqVnzl}*Kt zsyw?H7G4vW{yWUh>UP{yy|fV_YM7YFe}YB;rmPR%Adw;D4c~xi=rvd@WDL)M>S(cE z*mM!@I;@s`8Oq)g;tbv9d~jSI5ed0-zwKaIHtUTEQA@;G!W<5Quz`ZW58uEC$~#)Z z=Hg_bTzU37;mXmJaqcB?e&zqJJm)O881Y%^7G?@gYx{)7p~af&29rjdMx!%oEZuB$ zd)RFej*6CY76DBXPVVjv6A=&4a*OPn2VhfMW*?gKkXgZx#8S0Dkv(KtIf7-Z1)LpL z4xNx_QJxfX6b>lZ4{j?-d0q7{|LP2nJy82Rc02W1Xm?+fZdVDS{C1aEEP0A(0l?=U zTt~pLHdmQ-*!-&%_7n?gjCdU(kemVYA8|o*8p-Fp!dSkCTMR`28IF!#W{&i2!lhuj zXj2b1wXTTC!yRY?&t1655esh|7Vi0!gQar&uBA_Rz_zxQr`jTmYeNp3*x6XLzxE*A z30@$tKgL>b4-}t;VwL}Ia6z6IGk+5jn%r#%P9U0W-Vw^&%ck|Bdxv{=xi6TTzUX(V0K ztzEx{9b0y5)Sds{>Wygu>OY)1^Db8WFX_#m5f3l@S#@t##|A;OR+v|JAjGq`S_q_% zpU;r05tgJKlT{;Ezt|7rpdqXZ8k@G^z~>kT)BFeDi4y1QRO6@2c zBZ4f-wpKezqYR(3=){B6^D<2}*JN`mvcQDyA<()!X{r8DV(GW!7L)F!#Voooro;ZK zs3|4s9MJ!Q2ne_%f(QWNL!34fn(L}SVoB*wkObP2qA(}bs=JCDAO*wGNaLO=@FQpklE{K&VbRAM3kcw2RZdYI z?jPN##V|{8(R!0xD`P#4wNDR*IAVz>Hm)T9PA zl?lM0Ur%Oq;iF8UV}}J;DHD% zh{gXKmR|;K#)2q$q9=kiB5EY9c^2BC1ra(*MpLcSmZ7uDhC6P#TRd7gr-u3BYI)epZ_dd za8@!CCGE!oVdO(Xn>5ah>~k#5e26CB!_Ibc(w9Hj=BPS}2H0koGD?t>X zCq-$iV)&Z_<^?%x6bB!^SWu6I>7of)4In2{8(GqZqN@~(x0Gp{e+4abCjE#|{{cv) zrqroVbyaV2!e;6{C2gz8bAacftx>bULXa_s|At2b0 z%%f77>+v87S1gLQJXxdM3ro9P=B9~b!y=<(EVHDnEH;-84Q=cQqTE7RARPMW$H!p9 zsozc|xCa?7aj(eTuxbmt&4q7#-{Y97R7o;=`f9CG!cdAFQ$9#l2%a7^uC;8tp@?#5CSU%D7e6C2m)h{`Q*$A2M!f$f9TT%GGI z7V0!?W??UlwY=rG*y=y|!N^E>Yyo|tbE*9-%XU$j9(qA0%fISLI11;+DDRHSvU%q~ z^4cD=ie@fn;~9jxJZM61l%c7!!h_m{G-kuA+YF2v$(LJ5ybUWb%Ix8q^HWC})Bu7z<;OVLYdu>;C`9}Y8 z5Yv3lsA%AnPNQtJ)z60ZISJemz_bK31Un;0#R^KWs9Dtv>Pw>261&tqHcH)&oo(I> zv~TyW*{F_jwBS89ggugH;M4kl$N^w`$GdWyyt|+FFI*sN^MW*%Cbt)7Mx+s-rKcU~;>R0DuhG^~5 zekjDvvzZd$V#FJp~M1HMp7!- z&vV(Zi-(OaZITe(O&3U#Jcs zG|46eNTA{|w|vTz~e%&2y_cX$(9)*fPOGRHcCNx>7$^tCm88H7`(fy z2sV1-E~uEG8nm#7NVD01qc$N6=vXAHp~3%fSR-YE70XjL4S~6Wh$(+)g;wwf0Ms~R zQnzx`EG-nomB0~LySRPgkHQ*~hM=tqG`Ch+y>K9aaQKEm@B`aJhzT6HxYIS0fR%%| zg+zD;0x+y2l$X>XA{h}f7aT<3i>;aHu`Xn_>S31(>a>R0zZ3f%aB057`3G^pfCs?D zOhm4>z#rByD?Z=?G629gRKZ+R4Ne&fC2YkPjG+>9sav$Ud|N~^q{g?1pKwtmUz{bx zF`HY{!4Bys4O$2SP=f1As3|m}$h#wT61ms=#%dJ5#Nrj77(*IUB8$Kt8tS%?qL)qN zKN%6c70JPW;Dcby3qTOSo_R!QGK>E!<3WYsgEe9!KJ1oKF(Q*Ah%58KfYiqXTtuF* z8|je6(LzXx=&^H@qJu<0Xo93Hl#nD-BtO`Nj*N&gNypIIk}y%52vVeG$vzw;qL1sO zgqgCFtj48Thf>%iC2*FkB&MwSN>wR93M4P~!-(A+8A)0> z*F-K>s!1C8OSQ~L^vaM& z+oi2q?1&xm&_{!; z=g1N;iKGu5M1i_6y*Qe~I8vJktnrL5%yPXH38~X0fL73lZ}P~tpw3A-I4I4H9f2%4 zqPq5+%_O`}$cdFS>yrP2OPd}gQfR}ky{L&@F;W={EJKtrAAAf{aY}L{2$iu?@It}2 zz|0vPwufMecPO!iIJn4cEXV5-=`52#REWRvo#A>@sKk?akqO`kGU-wY9H}fV)6s&+ zDSfh3&}fAU@drg>Oxpw%{*k1-pako>P_blG4l&8NL_Ww7G@xt2W3tdpy*bp<37M#g z&18^Jy^2wdFz_SFchZ~;X}ygE$1`0KT#dE;QjRBrpShoFk@^vpl3*u# zBvu&ovSv*ZXH5)@X;wU4u5mThcA5(_Q4O>E#kYhD2>p zEfU#9z(kc#E3*G~<;`>vFJB5a@T}J@v&VeB!j@AcvwYRADAgBXM}h1MbnuHb+$3H* zlM=#+U1dZ+%smNZoJ)O5i~Z1xIW5q97eC=g346H6WIu5jM)oYdi8>}I69i7P5YMEM zrY)vo3^VmIGRj-AGlLF4bW@(SIqu*h5p{_A!U}oKQ%V&NfhEhP-H38n7F;k`MZ>*A z4Ic6lk%==o+>6d=v{bOoR1*yj=<31}JKH9UFFjJ*@L*f(sM_Y#qP~#Y^{ZPwb5l1{ z8{MS5aw3b;WLLn2O2_z4ppA%W=~OLyvV|SG&c#;8wcCy;F?5IpvWOIX6e#5&T!}ECu?l^dHeB?cP?55NQcti0G5( z%3qA~g0GDth1H5d;hOi2Qohx}l*1@T<)_`T-2kTG)5?_oZHT4Wui>09{EfRYVYGGH zS+2+nf$#z+wOubI!<>y+l1Z)tTruvY;5TiKsu(T!Nx3Ye)0cW4Ld zK-~ZH7-6x{xw^Cn30~d~TRW2rktc=@7^Ye(*5m!K3Z>{qyAiCy8Ym9-;?SVE46X%S z(1*X1RQl)yO3*ZY2!uZtJJwu_FWxCPeq)6hE{DORMjgpiB;Y+(K*9Z`g37Vz0ALB@ z;R{oV@7j)9C{%~ohkV!vh3E;w`6b59Pq^sinnEKFk)zIWJJ3Vl%Od4uzA)s#p5_3~ zuK`XcCA66!z7obsd;kM`z=!IoiRr+KG$zzv21h8iDBqPU?Cso$bDxoVO=WiH;Po0H zLD7V;F=-w&Meu?rh=pMQ0`=PndjJ3{-~}`3i&)TyzOW8)hKpIo!L$smY0W*MT4(<` zcIS%jm_A;tcHyQTBjjjPgw&{o($F4@0B98$0x$rczPOZb9>S?=Y}8#0N96WFaa`n>fCqwdf{l(1@&q-m}oy@_oYkFXXvT+OhSS;i)16-J2Qz@GA3i7uA6cX$FIAPfJWTe_u9 zBFw&rTV@6kCO|Wp-@mg$IW)w{L7`(x2=|k1 z6lZdd+v_2sUYn^I?EJ%;6w`MA=y!l{^csOq81C6zY{>d486M*x|49GGBiP(6L$D)q zCU^4;zK{Qml8!PSn?gkiEp17Y!z{mX%{B;3Fo9I~a-od!B#LeqcL@Fgn_7)t>AmyN zxXVv_^GUCmRUVg@=BUG7vE_XjfL;eJzlVHq0S_Sc3orpY%U#Xl{6iYJh}%FoYH`1WeF`l8EqMOekXctq?}GfiFKiQI(9JMk*_5B7g-@FadZMfmE=Ef_O-sFv15I zjCHfL4j(=A4G9b2wBi$O`l#G;FZrcs6_n>=LVWAweXN1EhYM%|UyuiS2!b=n2O9u` zyFR@K*oB956s9@Z=8lacN=KP{f=l5G+{WWDXZpEU5!u$x80w9vhYv*;vWc*VGhhaK zzz2NL1rZ1W6EK9k21b>c8gHN_iA|^dymxL-HjTi}46y)O3kQ})dc>yt&j%F2A!;5#y8o*iuH@FB#A z5+_RRq;Mg{j0WFXyx8eo#9D!3jjSc8md1zAE|U3}@+HieGH24PY4aw|oH}>%?CJ9- z(4azx5}jF2kRXCZBXT4Il430jC?6iJ6}8~irCK+#Z27EXr=NTX@UieuV1$AW6Lw;_ zRp!czPi;26=yswYZx+1i(@XPR+?H<#6E1A{FyjBjiWf6(EcB=#tbz<%ocnHWLz8M9 zmXm5QYgWfNJ9f3`(xCFP9jYW!RysBeaJ8dS&=>Qx)6BO!GT!jkxsf4%B&b zOz`o$^Y8EfKY#%WC{$GI9n_gnfVl^oY~eN3nSq~~_uzI)4ftJG?)`L&W)!jYOjOPx z_SHo=*@oLo<;239KH-QcQ&$7ZI3tZU+IXX5QMm@uNkOf&Uqwzegw%RfNp#;o-1&#$ zZ5BQAjE03J1Vm&OSV);%H=P(|O5xCF<%|CfT}IJtIGTATnrW&zlY;y~hTlUulBH3S8wX($JOV$0;9IPpIW&uA^0k@T-lBVP+ ziZKC3=r9(vGwVZ{PIOzS)mnQkwu#Z1+_14CRgj)Sjk*?K3%N%qo{a8^612{{4m53dx{`L%^4UKQDY)h zZd)2w$EUicdKT_-Oxo%%OyB+^RmlG-)q=}N_g(5!xgN!<(Uvoj2;N6c?WgfSKcXC% z#6=r@G}7Jm=um+?wk%YWl8%X_NhyvTW^tRoYnM()`V)&kRm;?wi{Y3g_D@;|EYqfK zvHho4n8oFm+8{ZJ^KCaLy*J-|`_0ppIwssMza=slE66GF6D+$}i#9OaZ7GDZ)dcaQ zPd`Z|oLgRjWPWx}{hC;&vVX2^P~Mky_2i=-f~EK0vCBR??MddTGG0&rWEw+@*y=d7f+CfNYZptOalvGh8pHXIZm>f z&V1%z9$2-`pvoZr2!|l>m!K1Rh?@ohrACHF&6V{MjDmvADsA?UV~(*W1hw4*h?&oc&sJrBrq2gCe9*fn%{xy zH<5l25l`hLBsYboN>x5ZkvE)4i2C%VU*+yzy$M7hElNytE{};6v?zuuiVzyoF(wCH zC{Tkc)QVlFTLvMBp8OH4k7~ttxb(uoLfIijRkNv<=~xO;xwT^EL`M=4+plzYwvp@- zdk6t&FbA2DoXP)`Xa2%aTu$l`@C>!Cb`9!DjQUO23`-F}kYZ5U1hjHM^&NduRD?9= z82FeDuSP4A2v?f`A!s9LN;4?gT#FgO5c;H$iwkDT!Kh>|Il@o zGk{=>*p#n~GmED|x~LMa{^co8a#9n)LOG|pGbtG0gC8Eynj<;SfL5w1Lim{zfJtzs z5`i4b5}8f_CK0r&{VsStSW2b=&v_2SSWhsTz21Suwhc|=R3n0(^s=s%@3I>(*Xm85 zbOr-{5JVvEu!je{aIYrWjf>QUywEi9YI3t(Df5K4oTjGpu3up=4iMOiGnl zh7zE$?J57ng$bd!;-DUZ#(jv@Zx!j{70KDmBXZSBX9$3S-sqzh1YqH!_M(q`M)$#^ zv++yN3SlJTYK;Pf(q#bVkr-N8%;hrJ$9DW2U1mZ5!!!WxB;!`z+uEFVyQi zH-7&c6y5S94fSYd@vSnNN5iSg2p86j!N48*u!rIC`iuu22@rr#Yq}6SKj4c)4;Djbz4#Ex&U^}I2Pdo4D=3oSYMxr#alp7e!K6chCPU z4nxu;Vl}?u!vj(R;Cp2@l%s|FKJ5}Ee+t~uIu~Hc4;?kRueFs_?mO;t&lAdjcs=b) zN+9^rkIp>73-8!sbz<*J%t$vQj2}|B7rh|OGlhIYSB!8{gN{A$K^)BsQT5<=dea6x zS_eN-d?1%qmAs4AH%~I;W#@R^(;rQ|kC0M8j1{B`YlR0XwY+fC^Lft(#Tiq8gkWVH z1ho!wjN3t^#x2BzCH00kDAr6gU)v!WL%tzXlQ(zPu9x@x6DOs^xm3~+TBl#Sls3J_b9xF~`LMfk3U=Ja7 z8MuH+BMwD#2!g7>(i`?x;h~I7cMvFP( zg-Pg9FxKG)#YHhu+D=X6f;C@r-wsPEzhA zY8nk#rddT!N=s%`uehR1(u6Ew<oRCoUn$ zMVOP3W|Z}uQPv%k*q6oqVQOAy(fDOl-b6~_Qfvz5Y?4u72GMKcUS8b`I{-m(j^#nT zW6fbCS^*Sw4JBRHW&7=;vtg%xK8&bkB~F|KFA>!i4rUNpl0o_ki0KnhX(s`h1V3WIWe_ zn#xfqgjzI*P%$V@P*W`!jCD>6aXwzaAR`3{(}tQ3GV%XReLmR;%2h2q00t;QkfJD+ zTFY*}#8os6oLR?<9?3klWI?=HZhWIL&{26Vl!~IHAQ(eCvL=OQWgo#eGZWk!hM z_@#te0;zCfq;iTI*9a59jHn()=TVv}pEkrTJb(q*LLzVh21J0N)~aImqDC~$Nhm6T z!f2z$BvI+ksYs8RjT2Bv$JL<^$VH=Z$`^^YXHtUdFrHaW`IFWp>7HgQ0O1O9VE`cL z(Jj0KA4H(0+N!yB%3^ASk;uuSB4`o0&$30|UbX)ZL!N1Ro#2%sO=UjY%}nbi#+bj# zUi_8GY#@)Sa#td0t4@9s2Jji5-OE3$1_4NiiJq&)8b_M`MAckduTn*G*lQCy$S74^ z{?O}8SRl>pV%rhwjZiF`fJGV-=X)wsAr+@nvE zc#dao`Ur15XQQaBW2~xvB_$Gi2Z^3fTR#7au~E^9xNWU4pXd%yElgG`3{NY}sx6QN z0SIh<{jKa8%_X(#fxu+K$m_1aq)CVqWRa0=9)#^45#{KpxV-6N^z9MKT@-09teNec zo)eO(W!vbijfqcun2eqliXYqpIy`_aw1TTz062I<1n>d1PU-CKr=?~MAvZvWMWU<)FZu)!6{Gql_JG1i0JCw z-VRv=M1bDXs@_Im`c~}v?&a4WM$-lbuug;wrbxDZOLORv;r&DYCSt7guS9TVP>k?a z;Ba8|r%yodzDC6PY39wrtdLUCi1z=C1HwoJFHbrA*zk-*IFyG4pl?^IZwRMmzB0zP z=rB--=?o_*pDiH`b3R9*P^4`S0j%`l@i4Z@E*$S6h9`ST7 zu|iA&Yh2D=^Z|KTz%JBL*j}+B7X}yq1j<&-?>S`fUXC{XgFg`A?|xGni{3cWMEfEQ zSiW(PIK>ZxWQ;X$a5_jHQwvzA>Kk9H+d9D^lSDXZaQ7V`n=P^~Ck78M21{`U?mkCt zIs_SOlWt5VFBxc;uB1;SCZ>8&Q^1X}KrcPEj=vJk5{3ox)a5H{Z?-0iRqR3pbYltc zvN<0w%XW=ENo{OSh9O#VUzGn|OooV~j!p0$>q()9C`XLRSm8)Y=%$!hmJFjmaWfn* zF(u5CRhIHOH?$h<^G=w>Jk_)R>hD3I!{`aZCY7uZzUU?|#O3JxUm#?~n3X0cwLd0s ze#VuX(gtN~6-ZR(+^GMh@)UMYTJPKP^#BJpX+K66Sqx4{bZiDQWC0&Kv@>KQHCyA} zPG48Gs+CDEp=ICPRNrh>W9#UereE)*!fp`vls0nTgp=IRo9GV<4{oa%6+1Ic;oc8t zBt)$Y<8e@_Jv*Mgeq(sUub-1_`m4Q z8GmexUSmO?a1ColQA4tJGlvBLK_~q0SFWF(*hEWH+1TpczE)<5ctjr_3;6_ardDtg zwW)>&xw<;)O8x(@?z$#Li_CM0xICRWa)i}vgK|lsZ6ar)_&BRI-<+js_VQpg9Xl^= ztkD3I^q$`3ND8@-wB1!#98k0^Xxya=cXtYRcX#*T5?q73ySux)Yk=So+}%Am1PGxz zNAA6+dvrhcOZ|X3X01^*_TK9o{}PFbc*HOj>$PHBU{Hrb5=HKqLJNPev!rG{U1eiy zbWAKVyAr@w>>kCiy5kil+@uuBkd$kDVYW?C++7yxsjm`6z0j>U;v`|Ct<)5kY1@fP ziqMb4t8xS-0#Nf5LDA^_`tK0f1m2qX2S3AxUZxE1n(UoIUzeZT&~e%iQq8X6sq{Q$ zpQOf~eFPSY0WvK;!f_#X{J%~PwoKtc(}Bpc*4Jt^#taQst%fVG@x>P#qe zpqlp%4ZF@GUe1JiO@S^G(bsMr!D6%-pr9C5)RPMX$eh`)U*O5>q2b6TLCh2p+ZTx=1g~{&jZMWjB1Mfqr(T$|8V=)92#h$R!av{W02=ntRefe^a z7;BA{9LL3+tN~!-t2%Cj2R8RrQD1d~h<2soujG}O62Dp?Jtw~n^EZaKqE<2dRpJmj z=+R>tHA#8hW$o?P4ra|>Q+!zj*3R@?x2qR#O*N4x6X$OR3lyGrytf|qQ~VIJnE0Et zrr@svMX!<{87M76P7Lhuxih_vF|krhdFVQ_L3dSaq+)x>PPA zD%4)7;Q(JPrsuZ|U$O@hRA}{PI2d#~9WWE=0uBsAzX9K`&gPD`^+Txm%NPL^ z`(2T9P&jpqC);7DOn8!l_zueK`DGz(C|n#G1&FZ(`QGynBE<#i-%dVUTUTodog7aX z{A-g@y|=_agEqgk~&3tDi4m20n2T;cTWnWJoQ#+)031}dr!!Qx6X zmqf=gGLr|s9z?RlDJ5& z6l1#RiJ}?~FVw}mnwUn=Ly;@f-$;>Lirbw;r=R=6xU5=~A;lmp6}gSXYx&opjsB0Y zkE@W`5US3$OMPfklUAUR7HI$)L;4C6x=#ZcDmApdlB+~`Rk6PF{n=__h(o3#ybu`c ztFoUS=5lFWpVn06iz@Om<5%wz?Tsx93-*CJkVKm4^@dY-CxfX z{mf-ar0(-voegP|Sk=Xc6rsN@Fi|lOXPv9AuGS)uQe?%M!YiK7;?;l5(a%MIT=k;M z^_|+j($;@tOa>~v*hx~aIDKCq4AB48zR}Qfl561KOUVkXks|fKk=Gl^_`dWu<8@}a z)pP~AizpN|9Fx7sWeiEU?+tVG70uE0VOXr*keD!Cp!pJGM-EPX;E`L+3KVD{=DH7Shr4$k~N!-KD_s_0Hs)q$3KpXum-vQaU^#zOM%gTtOdpcN6N*iv~-?TDlL z8iK-kfT-5hcZjFzP4E@}W}ZB%x4BjddMWp4H?4Jiz0LxDIVoEIl`Op}DjYv}7x6-) zUqnq=h#Lt*jXx9XmW$fGMq$e7^AT&gNQP@1Di&w_Xb|h)YL$Ao-3)mxjP3(wK*-(r z&hT>I4kBBGI3|I4n|sS4@ECFK6?aNgIZm5Y^Ly_N{3(@zC6KuO$x zu!b}eQuB)BEqA`(#8^J1 z7_k{Vk%yME3di)xVvnAV$f`HU9A9m6d8RaPeUz059;j#yN3q34wdPTC(Wj$yQHODl zE9{XY-n_DBI&ZV&|IRBHCV67kO0M$)y_<_uW7dr#)`f#EWq83+9H2c>4Y5Q}bSIGq z0pg`i_NOV1H|u59u9reASzHEOvm6%eQe25O#K6mBW0J+5I&LAZfFU;wlp3P#AwSN$ zeK0AA*tegqtD@h!@#d~jtD2tXW0ZHAGWPA;kmjAi!a<^^(sBHC4l=)=XED@aQ$(|^ zR+1bypn|@v3+4%6SQtBN&dl@WEq+?B^K2fveqGRDGER$62b1Z6etLT{HPZf=(qik~g z{crWf1hczg6A_19CzXPIm3EQ_Ren;Uv}(=VR9nP&t|ez{x%mmR{8w2fD!0UT%IapS zOB3Q-m*Kg3AJlK+_I5R{=SbDR40U?_^4JNXQ3AbUuEe1hp`#tOV#1kc?kgh1yX)dl>MYG`S?0P7`M7Ef@)3W*7s`RRfXJXm* z=09JD{*lrSpf;%&F^`R8v!~S8Ldejxb#CgtM7wvb`?jO7C{NSgcd`dzbzt`;=b2r^r^!#d+kc;8(Xx z@49b0=!S~No)N|cx4Acl2;u_FQc5tg2NNfqDDrPl#cL%u9c4e6m}%uEV5fvj7#pmw zIucaWp+^VNcbCmdW6VbBQ00Z??aAOYz=lU3yN$4Fll4%}K1eWDP4ngDA4j498VgE2!8zESB&7uAGl1$wRO8Dze1wlopnl2k>pSucI3wEw zI=bM1#`*`r_=yvNJlH6ikazezj>Drac#pfR+3}Hi9tL= zN&Xz^vnK?>j5|`i1?&@q_mR0$urdussf9sBRCFCHW__ahd{VDwGNl#F)XY2z-MM=Vw$@!-Qt!V`&wKWNLB&R)ksrIaQgbj-*uiU(gy&pfRB8RPrKRxCkITjbwsr z`<^;_lsqk2VsT_czg#maRQ%2k=ZzKz*l=&J;)RCDZfgoarjSjVH@UHN05p4UR@Fy= z(6&OLsA-`!tWhg#1ZkX(`esvk?YCFw{8%Fx_|eSh;$kNr2)GbvHWoFmrSKCa@RKDJ z-qmKGPRPS)0DZv%uZ(nHSS88!7JoyR}i;(j&LZ(q@zy>vJf1se^MEfo`#C31&Md%0@VoLHZ|b$7r8At zeEI`*5{+e;9D^GI0@e^7aR>&P71_1z7d#Ea^KopIVTzsMk># zHu~w|<|F-|4{h_oGr(tQMd7NuYQbu*jYs*z6zzQmr!kl>uRqaI8^}3ANCU&skJv12 zArU4Z*eJzW*?us;58*(|xMnzuLAx@c0AA9lgbZjoYg38mngr?61nNr!mZn_|>#0*h z__uQUW;l7lGAX^0eA#YQK*5k z!fjFu%c2i3(!pcA6qMgC89no`CIEIao~f%3sgWtR&ILgjk|U%km?GvDe3Z=8DJ)Dl z<1Q`Xs5`U*Yi!$~=?_*KXbHG9M@l(I3U^MK=UGk4z9)t{w3ImX>DACp;m6>JlD~`Y zTH(-<0jiB5><2i9Rem!jr*h+7kHRMjB6jD%lLd0^ui{Y=id1g$?}^kt9J+Id@X_jj zq*MF{pBTX1vYOdZ>g7}x>rK+E=^Ck2#0t}#RPKU<07d#I$x(MqrMnaT6l%S8BY(;u zg8Cv?IlSN+lKbR_7%i6@j;fo*!qd2KD9k)8C8VHZQP@z`QA!E@5Y6r3rq3lP_EL>C zT2!IoEA2;=S`QFkg?!@-r)HeoU6J@JM zlVdu7tEN|(Ip3c#>i{R-35%C~X`1gAe6DoH=wUx?*#&U`6ioYwaE)nnb>hXtoQ-1) zjO`qOP;~388cQq|b1|s&)DDtq9Q7}1Gsz&i8ES4EtDOw?4A-wmINg`kWDmcb4p*tJ z&)2}OGn((KTHG*)A4A5k@LzxYV9}+%C28g<`gw-bI;a@!lQuD|h-1!!P&3sal2WE3 zzHHEa=(^(MyV>D7?Mk)O0%}I0HG=|Sn?dcpc%c8V?vS+l3*bAQ6411u!7i~TuHE?3 z9&uS-z$!`ca>EDsR{wO0iwcLh=OGOXiQ98T98}aLFV|XjYFF1GB#vNSeC#T#>|u9! z*o62?9@lq(OA-69lpPqxonmDA+aJi~b93Tk1<&u15wKHBdy%YrglU6r#UG?i)fdc{ z5GCd(I*+u zf?Z7$o3RHuKrf4@y0ejhN&I?}ip-408<#5O*F2Oig0-d1FSd$vMzN2@{UGeSmLspp z5|_7Hq8xH5n09}Vs+0n=ub78~oVh(8Pi%jtC?`C0@2i0vhcCx4V{?K>JCpimQ}~J( zIT9K^;cd9Y34hQ<#2I|k`E!TiLSWc$&yKJ{oC#60SkLEilXW>BqE&?;2!xIwW#TR& z^myhPGYsOq?#|-Eyjsr#RU(5W97RYneF>>^I&G5DdLX`V0r9Xk-gF%br|MAV$Rsvw z39Tp3bPuM?YpR8cBeq*afsii#*CbLz6>Sh%@*8~Mks0&73x%C24VhQoa2GY$@`vAd zE(`T*bFmkRpK;XPw&dd8l4rMJTM|ZFOd6Wqc?#m+$YBUj*LJ;JGYrfou37d|>>+>( z88;)MMbKG+m06W*hVD8{MGh1&kk;S>)%@ddv#uKp{34T9>-+uIE;;(YW8P68U9*eN&1p^ z_$z~x!(#rpINNEIjMz+VDG}y`X{X^9aZ6Jg9-t7P_ zRZQ4(oAt#Mfs6yHmnWC{I(#B;toM0cjx7jcq}T*01f62vWbZu-4Bg~3;1jVB#zc~) zS`efNm)W?%=w6VCI;rLeEBQX+a z&dMYzi67i8!evqKB*u*{PBfFNd(h6Ny=`+iUx2Gr4(1z3o#ohV+tmjZW9y5s_IM=c zfV6c&y|xAQa6EJ>bGtK3ng~vxJ|?uXzZ{$lxh+}wbM!8V2U~V4zr8}fLS+8C$|7n= zW9?)520zoA%S*N>mGYH&RWH9p$-NSHV2Qz2*KCVD!#~|1)h~~dzf%P3;wP?IRkt4h z1|`EaVU7sFyvCdHE&&eg-Q?Dx&H`e`96t|b5JUq}RsRqn_)Bp8t<6ZTM8n+nDnvEV z1&w9OkGg<)%+)MeFR{9z!L}RM9wk?nm1~DGM;5EP3;btxPCMMif6XX=)w@3rY)OMo z%ww)S|AKAl+QZ7-_XF_=jnxF*gnYzuabDYu%8>9iAbvor%28NoxKN3vGha&Wuf-21 z^4Y(>)ZC#$7rMgu7IYnh)0C1r?xy1Jy9Td{ac`tI6#y4sknKD=ytJk3TEZ z3KZ}(^dPN=PY!AJKE=v*SBuA%f(%m1S!2!mk0hqzBrZP02X+m<{5s&Fb5gjMv+(BV zEwB!UKrp!_7H9b(Nkmb^%DNR55+C&zHwQ3w+EYB_3OTg01>^j@Fhbe=_o8T&M2>3f zsCoU-m+*RAxU5KuSS{(&0PX1~U!qqaJLlg>AMCR#b219Dk!dQNYem54z}?sHI{O<5 z(xbpa-rucz{i=s7&c*RCr-&vGau^?->Aj&4ha2C6VUP&~dGB^wl`ROkua_@~heL1i zgwuFk_BkSAg1+y0deb3f zVkZF)SQe%x^ZL-!X1?>3e)fwuV2C_vVMmqk} zFR@Bjheam+1c!Oo&vPHXubfT$L(*c4+cqtx6aRvsy;;_^ln)1BrUpkfpU5XL5wpBR zb)^o4TWE5p*)J8_4A&X-2A~RbTyL}?X2k0ET7D6Oj)W`M$31aEmU;sKthdl35C&a3 zISn#?4Fmht1@8Xr*6ei>v-o`~oG4s*G{YvwPxI(aVL6>hu3Z7X!}m?g z)w4alUi5)yTh^5TlXdJVpiyUi2}Kh|0GozkD9f&^_WgY{a3vy{E{h$lETZ)QHKem9jcauKOp6MWmJ( zQpnp@R}{q9Dof($oaTjN@XY7B3>??g!lB}7>ted_G8XYHlxs#hsSR7#j4f)WWU^Bm z5VX>8Y3HV8O2I7omeOdRNZtZrzz#}KB*>0khY7w_8p`eZ_8P=FvKsXlIBm<$?OE1I zIUNV5S4t6!mKhqNUM@`CM+^~l5@5p|yoz8sL|etb^`{uENSJNMwo^EYO@jnZGcM!x z)LgX=9mCU(y`u^Cr7=GoXkGJ(Ffwl&NIO{=lc&Zchm`Y)xQdqDnkD1YTu{L$A%wIW%RJfc=kDp)cZ@CCr z@+DZg+3e0*CGhXdoQ0$M_*{7Z@l5EB;OSRoox@2DP0%uLAwUL*!#ehNxIJa!~|U z=;0<9e^dlsCiD!Yl{2J>W+cUNb-cj8GC^{5yAxL^2*) zYMs61EW1wQXRr*NNgo9z5K20QPu(sCWIhENVb2v^NVjC)d1EdGkj_fWyX6&Og=aT+ z0Tamm;<9d{qJP?xT*au-Td1&8W$t-VWzPMe*4l=v(pP<;SKcV+=II#Mta(VNFr<0y ziv%f!GkldZQ)xN&CQ}=5D}s4maO`{gdHSKAJk&$|^V?)C0db;|-Gv!7GDDNam&t^f z`wx0|g;`NOo)I9}qQ7P%CP1R8o{BCG@Qyj`g4Kpe)h+z=(lOzS6@C|+-4xC$zI-_3k3PyER>2ofms%BC zBg{SFUaE$AjtY2|)whv8p0K-V9ds0fKmmxh1ea`M549Xgw3bVCo!5NJ_1tQXFOPe8 z-~ea&oz*8SZ*a|CUMX=OUCf5Te7BK|D_0>F6g4-Y^D{}vfdx%Ro=z5bEHG)k2h++q zbz71{uhvGXBg5<^%~z2;DGD=taP`*BA(%hc2?cynjfTLt%DKJ^%Ul;Bs!Gg-8OSz-NE;OLj1|TK#Lb0&P@gLeh&LacpDUoIPV9D z&N2!SS*5to{s({5nO z%`MXmu1CcC;f!_r^oHl9#ekkQTKDwxWpN^3G6+Z#6SvCTrF?{NPNu+5%i>aRvUtqL z2tfS53(f0I#(_!Ik#KFb(`v`?E{av%%skYLbW{FLABk~pmcJmx=g|?ucwX*;5F{aJ z;7__Yr{Qo6&qGNs)#+3oQ#5wE!cz`xo)$x|y*B*%oTCriR;1)TQUPE5Gpnt)ZC%Gx z#sglZA%Ua~_C#&w`-Jaz@TBvf-aSkFx(ki%DlS9lgro#K=KfkdYeQ~|1Ow-@BBuj*@4;tux()4C>l2o zNjZyCUh8mC4L0nwpg{B%$?;|W@l$&S7qRDJFXSc7Fc(b%NN3+Nz`af|eHM_+?3>j% z{NHifIURq4)o({sF05wm2*tEJ@O;cTEcOQ)?V=V8bc6)Y0$J%|FD`~z!66R^JoREq zG~Xm)Df}dD!kNg?C|v!@*RWD?y_v*J;pyC5FkCKI{o$>Z6*?)C(yZXs!*!xCmm~O> zQuxQq3B;Dd=+)S~@kIJcWeCm{onRwF&oQ+#!rN&OdV?TgX)$56{q-eeF+CQ~PmBYW>L?HiuBB`-iw1h$@REFX_2 z-4HojgvPd)W84UNa}&1r5Ei~`?6NE#)wnP=CQn5dsFOCTzE~}`MXczYNeJ5daw3ru zgThossKX}I9@oxoDjcSScO~sCsZOcUwo)VP+Yib zX%^5t-F%fcLO9xWoSL(!i^I9hx7{UpriygFDouwk?}8=fjmaAdFrqnS$(^>Sx-TT`)i2P};Vs7%HwQ4q3K%J4>K z{+o1hxKa6tx%k0Y8Mr~{8EK*}pf4?eKL=Um zCpo>{xgh}LpVqmVlU#cU`9X+A<;i$#Vx+FonZCS)K@d1X^BxFd$>EVQr5HvF>G{3c zxF&kajrV4^h=kr$`1v@zgdMEYE2*&LXfp>ID$dE_@L>5F*Sy5T0(`cx7rBhB;QT?{ z95i|I9ek-b@;r$pF|Zz>H^~8K5CP{7CXZe*%-pM(EwEOuxH+3|bpr#nrv&?}SD<;0 zJC|1*IodHkRx<$k-+o27)nqn$!3Q#ECHTViE?m~^GdR_`(nz9IByz; zOtog2)w{7JKTYyQC00rdMt2<0O-0ow$Jm8FEe&>`HrXYK06;I)$eY$M*8|#A`y1 zwCoTHt_>CNvr*`F4pq$SlJ5$`!gEH}34^SX1N;TJdjos=4{Ya08d80`{eL&qE0DMQ zU|w|F&v63RU5Ip!;o%O@&96G~<}*{@Gny1KS`_*)L&&_`S|WP8>BzCzuiAlw@DRL! zPt`hZ49H%Ej^SQ=GT**EyTX@D%+}uozo|HNG#fZEA#nwOe+SWngCIAz+JdGD=8*<| zD-`47M8@PeaZC}140d^Lbs{W)DLzfG6JyB(w~}ia+g`5*mtQ+1QmMkPFbkkWz8`cq zK0vHo^@2tFPsm#Bh7B6u2B{UhkraU5V=)v6HJoZ+x)rc{w|dKXaM6Px9g=$-ut*&Q zfP(hJ0na@mAJlYtsotwjZ!`rp5ADcNgZ?3qsN?1^xx;Le9lE`R;(n%LaiiG!)v+5n zy*fHVE}go?btSP1)4{mJ6dokKqYm3mZ3;5_X5Z5>((AFCF73wPO|f}-BDQ0huuyay zK@;-T-!-12rNwn581QtkVhy)2rXcx`@4YEsBsIHKPz5KluU*$RrPGJ9E*Hu|i|bBn zVmF7J9Uu`Xx9jW`)$69gLRoc_%^>HDX}qVA?^05Y?CAqwQEa6?MwL8SJXN|I$;qEc zlFba$I#MH(A7rAvdS0u2Zc#JuCC$jMw!N(XR;Xb2dl}b#2-iH-&iIMzgJwlAhA^>! zHZfHmAVOJStWA0|y2-cp2*TWEiD^N76a875k;y+GnpZ{j@~G+Vz6nDy2(&TF2fuHx zuH;i4io1D$gX=lWcr~}{j`~tgdv6l&=4oDREEG>{n<}gjz!Zi!!MoxDeZJZ8bOltg zlPr|5&=rBvh#UR~1JpA134xo97YB~${IAr=BJQ6IOk5C3%QOn3f=6OA?$t`uT|eD= z8W)uyuCXcPNH7_}B|Nd56l>KO5(FgP|4uBYO!G)ccM#r&bo0GI zmy9;f^$4P5CA+y8K07ix51hsKp2v!-RpQfph|{6*!Em|tDm|KQ2ld%9CWLjNy?#@)P6`&= zLqB(D-Dk&jt4(3ZN`V^m13(Z{iu1=l`s2f{9`F!HD@kx+ z1LI=`G;4+7aBQ914J*OqViEHYH++xZOho^hT49=!G0){-VcRqFU3g(D-m-oG{*l9aBLAf)gDESaDHosxQ zZV1#zg9~ncJtwF!nx0w=azm~RS}JmMHJ!HzqDHo-_+b=0rG#jLlHdr!7+@}kKDktK(X(W;P`7iC-#__<^5;bHG~i9c_YZh+WT#)=eKEOp5H=f^2dt$HwyF z?F7m>#!yJc?Q$j#*)NsA4#ZsLUCli@y43ARN_iT#g11}tVR_TU^ze5j?-j$YaGwsB z#yRE#@Fv`#dp+%L2L4DGiCs8LZG4X@Rg3(M)3Gj^?Xo5uV;P_7z#GsXhyfYooDk~% zwPoOasE`-UD2-L3r2P)&D+?a`1Nzx~3wV?EQ)q^Q>0?F2^74ZV04H?2k1|{IAx)0e zZ-`cw&6O*iCezUb z>e_q!Y-KUNL!=Rny{1B2g@LPoOwDj$Cy8V`0`{edXb4&cqrv)}=}0s-i`Dk}z4-*` zRmn+JYt_KNVtbLhJs26f0MkB?FoSJJop+rPk&Ld2);(P*=PHc0UL3Z&J#I7JuJtP0 z2<@1b^SGuvN|SINcqF(A|Lu@{W9gHT@8iRK~ywkb41{|w2 z)E-W_LwTM!o(p+Vyy)KraLG;U-#_23YWD^tZbe}*HJ!!a2)An*-azu_ALUaJzDVlJ2Dk`PATWSV{;xjReY7`Wd{;Y~j>mmcB~Boxo_0};icALV0Mg`s+F zSj0dOVTqy2jCNyqx$X~gbfsw+73+RWRb2~s@Aa5@f;n|E5wEhB!ccH!Hn_+jfMil{ z8i=uj{wU2&ng1$|eSAvvV_bZS|GlN;qNFDrJ)EuuB?*&KbXi%N(Bnx>mh$I0@!sG_ z6!U)1_Bp5sgb_I({@OJ<5EYZhFoEzfuc5AO;PI@bOQ`8o25#l-w_!J+@1g##Unb$U z5{RMRns=rS%dKsZs`X9J3e}vwOrPb1b1gGor%EeMB%+O%sN6ayrO;u@z&t-GiO_WVU9L`0C0h&(`dj4&>kA2HD zw*hs!Qp_}!@CalWL@K4QuVc?uM{Y@0@kxqCj$BFtIyg^lz}I;gIWlKaUL+*-Hd^IS zUv$Vibjf*&u``dmmZgQ2X>T!u<8fL7*WbN?LCdowJ2aNN2(gAh5o&R98^*GVvQqcUC-^o*=7c-4m^8ZyeD+}_uDf0`f&t-S zWX&ZZuVd-#r)o*&aYyrgQ6%qLCyR6yLxQAGAA4WBY|N+$1dp#u1*%wr2$tq0jC zlqpFC!%|u$Z~14_V-395P05lddEK!^wa%42LCRQ8k97oC2dkrX&@*$izh<(pF@t zi__e@t7RiJ3RX+YnVy>Ab#=*z1fxea;lR|%j>@;Wk5pAqQYYw6L7@-_s#8Xo@I7cX z@z1E*&%e&r-BqWATGqXKJ)w`t!vrL?Qdu(mc`)Sudq$nBH=nZ@Mtfmzqr^DP z{1WATQhWW?$ZjeZg0q9@;?m9`cO1;5>|zex@8EQgC!18X()q)q;w8%0P+8i;$VbZ2 z?Xk*m&8Wj`%iYl5LllM;nt@{F<{vD(1)WzN}uPrvx z?eR|jU#m)fZ|Z!<*}e$)`^OjpKFcE#%Q^UM+7Oy{J>*HtIrK5o2wsNQ50K#;&g5r` zayHkCf67X>wZ(wssXM~@R@cQXXogiul?sq|iIu<|#pu-?Q@=9%`a3KGeNK0LG?kgk zx_7&Ks`|SC%|jX5t4<`sTA-*LLQKrZvD}H5clJX8TTB}6&9#WwzerqZI3mr_K33B^pM@Q0?CYI;`;MhKG;Aj}&M1bLCq{70ic>e#{X>`tB!IsyM*1N}h9XW*Bo47C zMsdwcDJV{DC{AN9PU|O57bi}iC(ckO&e$gonh|H(7H7T|XZaZJ)f>Wvkq}tI!=n0u zeE?4Bx<@8tQ)Of|5lYl zJgwp=a)~eP*28KH)Yi5xMJXEt8PNni$Z0UEDXtW|9q;@7m<6K-OhR-u8tA+GYvJe% zi*X`YzfySLTFhC4Bqv&}p%(dlf>i{RE)-ys@htB0o12xr)N&+Eg4rPrB^i9JVK;l{ zM=}L{#Ac6~S|K9}?m}N?SxY0)3z>mpig@gM0AuIi-|CjZnm-*SV*Iq4=7gRmj*KNB zA^&HBYLM6vwEvkP7_mYpO-qShGr? z3%{PTivvh)Or<4X*T3n&{w_-i$2pP;TQP(A<%TZwFmI+R`ajTGUq~}ZJP6YNThIvq zJ7`J_zb@4hi5MIfvl(4g)9`pA(txK4m*YX@d|4|jxrn$yS8DnJU!7u{a0V>xm^w09 z)edB0ZoRVC#(#f*ULdNP)QgoROQe}BpqqY<#3N$Auu;9wi8G->yl4@MQ4$K*!*?Sw z=paE11TIM`7DuX-4%7ZTHR|fTCFMX5zdlh=NJFW#gShVhEAt<94x<4WpUGlw+WR-Osm-f)R*6cW2?UPn9!bHvF$*Keh0|@Bi;&J2s7$-IxZm2Y>lj zZ2S#3G0cGp2^E0#u{ku}(>iXl(7(uWg?pj^VTci&HbCK$4NnyYJyCTf2b^E2D%O9t zB@zXxFt7msr`chUc8~xF-2a{pEvm);nhnRaycyJFI^_C_mQ-_ew=WV1!C;S}c~9Pl z%wX#d>I&Wrcc5|9?D{=t3i^-eIMm8Z2RI?_w6b1I(S#xolp(u18=+c(;Y1nJaDxT2 zNFxlQC2Q>D^bk)s_+ivgn8W(4jJ#Y~H6b*~_g-*cNwMm| z-nNJR?TOn#@f|FtCig4s;D0s}rJXfQZixck|IG2f8%b7Qni)SFG&-OOh!%s}!zaAD z!~TgP$^}xTIbXm`Msg7Qj+^tEBEvF~?Z-VhKmB`a7vJjX?qG?UA_`GrI`F8}I?;Pt zQ;$pwxMb^WJg|ZNvQtW%m`xMdie_;q ziC96AOVTsR)k%c|0iat%3z`yuP&iJXcdIi}kVNo^q$qz5pp3^0_k8CMEg%Cy@dXVH zg$FiAnGDWP#*DBTNkfP|Vf@T2uTmd4J)%n77NAd9Y zJxu>H+~Ne)c~`E1m2)k@2SA z_uKL*D7oRF$M4u}iEe+kHHgWd_$v29keJT*olyB{lR^Pi$54i^6Gc< z$$#m4Uvu#vh%5k<3INPmgy;(}mxc{?yOH{&B=r`TiXYhWIuz zuGeYvEyLj!_8(u3!fr!oH7eCf>WHdsa}a(KytW|%dl60+h!;V37}XNq$1y3aR8F1C*?dl1WeT19C@^Wdo!A^=B!oMZqT%#7o7;5mqseO1Z2l8YwIaUP>Tlla2Wq2qgC%6C@aH(R}y z<)fXH86W)e^}JY>`!|i)LUQzZY0_!dYKayf?mxczl9$ebNILVPvM45MBPGu4n{`!H z1(Jz5baVDvbSt%p2qS)59H*=sHY#wJD zscBBq1>ueKOTiF2Rr9h6+zist{25P6=^LxK##?--&nuCrv^MFw#PQE-uFq9Eu(2Ty z+(sI6S+p{og}+{u-#vQ1NilwR_^krNnfW^mX9ev4dyl_-MaMnB)B^4x z>{LTAn%~Yt74FcMT!J^Y`i-3UKzzT!!l^UfNQ?Z* z;y;qe#)Xjajay2LUl`ZNw&<{jJhVU_El>+C4n&VtbfiIM<~58!4uya$Q=@DpNx^XF zh*rGf4=I$zEmHCydfX!r$wbNRN$!3qbYmuSG%8yyrjR9>V?=gX#WNa&hvLa16OojW zVg7@P6q+EDZUoE85tE3s#L?K|*vXerNhLIVOjdZfN`vTelt%Pn4;@p(PF_$Toa>%J zS|||6DKMH(8XH!s$r6klv70L?2RNzp|HDq|k%`9?r!UKSOj8DGgXnZ7Lm~%D@mUj^ z@YKy&s-z)YcEp}m1g1qJVo#0QbCnqBr)Ko1Pi_`OpX6*L^m4~J&pm^K^@M0dW0Rt- z@vS8oO`<>;*|l6&#R5M>q&PF8Q%P!Zi^kMb799glKL+t|M3L!CVI#P^oQQWhy`n&p zNKmPA^o&gOAvaeURdd>TS^~ zh>qg(BRtg3eE_kGa#j(j5$H@g*XYePrd3ap)vZkU!_MJ8sdXU%r73~hHKSl8sc!v9 z56@DLu=211wK!u*cY)EP;8v}--ECZbyAWlX_gGpiWfO;a)a3@Whbl!SfjipEItZF3X954bd>cVg+j9DzHT#6GXC2{c?$e zd=LU6xmY_q?GH-|L@g#a|H2$f>K>QNky1Y37H1R$AR@r$#1a4l0_i4*3#C&D<3`HI z)bK{nJS%-RGS5jYDH6HdW;o-QvQS?15`7o}Kw}^+7{EYv{UV45XjH>Z^zuN$`&@g< z2+@po7C;4I3}Xbs8{W_dlLbBG8ZXtkns#&q2KV6=bK2BKNn(u!+*_x;SBsJCp{8S< zs&NY931qNiAqu=NoyRd-NnUwnGR=^vri2jU@pOCm)A07AS z3{P-fjdW4n=Y3k@ZCsRKMg&d_xN$(0YBuz73%3har+Vk-82a!H7p4)Xg@FeFVG`yI zfuIlU@C2n5bOd2w2sk2hg%Alwe)v^m4hUld;X}RDPmDJaGz4fnNPGW)3wuBSg4a?S5pJec z5Frl2KnXx_aqU%!7=lYHv=DOkNHErOOw><-5D1`HP#n>N8iysCCk~%BN?r121OR=1 zfnv7UAkW}c;WS|kp@~dVL&BGYNHVX8=RoKg#l1xzq;3m*0li$P`;AdS-) zAdCea*#h8n#tl_z_j7iid?rK=lBC00`pX2Lxbq>*xo276Crjkp3cc3==+4 z|0GcIMr4f$e5dtZILKv)G!nzcUZG?aExCg%;Rm;{MSf6SqPQ^EWHG11f(3DKYiD8* z(NzWaVob(Uy+?%hR}eFilyV3WOlblBum|yXmh(~-9O9P#B0kl45ESTFs@9V##a%JT zh$K-C<*0Hm=cObasJ>B z7{L$xpbz~}G+YLm48f8X@C^h23yhgCwUu%NftvX8N>=zwJNQwPHg*l6bs?D&dnS1O zK$`!c55@@&wxNoKSrL5T2R{UTuW2x~HI&=uUl&t{)#Q7+DR%wDo7t#bt0R*b|8WlA z@Ctk|gnw3Z_qC3ym>8!Co2R8yzh+(5d7atmZJ^|enip&XnG#huZ0UJ*GRb6vWfQV> z3)NMf1T&piG-C9bmlAV9h4PDO_)C`w6@W*e7^V}F=1T>$az(X7bT)poxt#rCJa$+p zj;54YlxnnT5^h$YcLSmW`46k85l~j5Cb};udN_xsL>akL7y3ptN|rQMq8iqnHhD0T z(^7##MJ)PAF1n+8MWrxPq$RpTHCk~68Lkd&!R>J zXpseo5ILoJ?H8@~;yJszRn6C**A-~g8WE35u7U?p-a0+dbU&SNuFR5FQnsd*6k{Ta zUoyBcbg80UmUBkgt|!HelT?K4Iuy0@5AtX(O-FzbnpIP`MDhwS3%gaII7uAFZWH^R z@VIh<29G^)KNU%;&|*qE)SX_2QGw8(g(@;7L?LWABmE_e!w3^c{}f;#dT_l3vO9rd zG_gF~v$2kPefpZC?UhsEpb!2~qRl!olR2>FijZOz3;J+)`e?H)wqas-X-*SnLXi>5 zREPM}E|@nXX5?SfR{{L6vO!Jc!>u&5vV@waCIhcWlSn%Ssci>(FTr**X<9%@vcSqI>Q@wE+d1LKE)NQK zTqOue+Yd~8ktZXv;<0mn=~q6ttS2FG^>w$fcCex~LBZ2Gv;njNaeMzzxMn6Vh-)>V z=nwp$x@`-oCc`}vD!W378?|+10$W8uC#nU}RH@WO!%I2b|I<(vU}PNPc#p>~M%y+0 z;I&v9Gs0zxOQ(9ShDgLHw07B4+PiA_d$;adLZVxh&E!zFw>-t0FX_7$u@DRK*1k1^ zP^;&MMwFlD*IU=yaw9gWUCX}^krAP5xD^07FNH8?J5RVU6OPL>(Hnx|#E7h$q-()l zF2SxzNJ{2-!4(0*Emd9+!>y@=rsRq;Z1IN1xj3A!5gmxA-JOJN0&Cc_Gng`>Js0TR;y=k?@2sJ+%3B|wkvozIb2N$sfaeDgC2;z zO!jD)++CwZN+9{HL1Csf2~DlcEjf%q7~#6~6R1!spB|UB$MkGMDVs3Sy<@D(`WT%$ zQ5!bWFd%Yd3yL^p_YdL>S0B8MGAgKhw1(1lWfl+!;h<7BVaGUoTAXU5$D9z_(HQ&e zEeQg}C#omIi$R>+$(?h1&MJx|SE0WMcHW@C-dVeooP^m*X_mVam?J2e(aYiDz{&Kg z`(n=h>NMixQ22v;<^x%*Dy}xFuQ!-TC%exU|NMyq$z~qi5Q2mee!!CdoGnc{c{vR( zUn&rGh&peGLJGaF32}%A0hS7SU20aVDS(>TGd*`%kxxf6@A9kt+a5kNLxrPlJY(w!SHf;$is6vze9 zJ%Ei6PUlPcSZ_!7tO6&oIe}=x9I0EI)fEr{&e3q6wq*?=viAZaKV2>58@S7a*y9`4 zPmNz37NRRA5P=X@jrEakixW~N%TI3p(fq;66YRNWhZW!?lieLfQ|Mn9z!3WiK3s+|WxFFtM^|bIJ*iLgm1%uKd zj7|jMPz%?^aT`fp_{TX>Qc3s(OWhOYpa{Mz-h!|PeSmoU5FT=clclXI9IQ}G9WX%a zP-ccYC(YVNxL`w!y{k49FVgBbCVs+OZj@>u`WvOQYiG|{()(4nD)eru7@2f74A?M29eSt z`QsqVUX;BXw7ta-Pz&T83wMAPQCS zUqpPhx>0}{x(kUF0SOFfxFI#q|JdRUjw)d5<{A;={GzZ%txgs`xOa}{NEFQzsFUSH z)upkQYt~$!1_-rq4xtt(f>aKOC37!+>2b2?I!dU>eJ%8ZKV|*F#$(hwJ`yN3eiliv zNqTBQN6cWMQYw{AyFdVlrC7Vb7|PCMRW>8J?&@#?>-2*Z?kzB7XY03F$nm)AyxyH1 z$4$^Sbs9Tf10jVk<)cPA>n&mI(aa&_4W5hed`5Ovk)^!+dH^x~5_ z@D8=HXWNIaEyqrSQe&yDL7VQKlh7$T-xUuR3#MGWwP_0BoLv2kXd&$}ZYX?w6Ez{u zHBK&qThbE0+9>g8?3d|}|55Hc3+%FM^QYs$W=-u}k?C$`Lk>aefY6#;_5cLk?*Rho z55*+cj`YRSJGoBdk3RAxA$&T?=Zrk_rZj02Y5`v01u#q*vqo4z-w+QF4&b1QfdC6q z*Y4YbP&1M3^OD7wMdNhuw_4So{X}Fdw^W(7lb++}VsAyz_7Hub53TS3-oOvah$Xr1 z^nH>Mmj$+F3@@Vn!JotQj`4=I9*>JEt3e5OiOlD<&>i=2vZwf@KlBc~{24JC5q)3_ ziLVdfz>bc;+|~lN$_(quf|I*GIaUn(WZlvvk%r2qnIG9>%J^rU=5Xb3STbp@zf=K0 z5P^^%6tVjd4M&*X|F8$mdGs>N`#M4d^+B>>36!E#S4)Uwc4P+YfA?cQ}*jsSrO|8^0$ z^)%!&r3aDL#hY01V#W$hzN3lwS*M&d8(+qpS@Y(Dgb%)BsuZ!^wGgKo)CyPUR0~wo zT8QxAV8L<`{|2_M&^GGYXOvox<{UgFAOa5vV2DtwAsMC>I-LYAT>5m)Av@``%&^n- zg4Df#2OqwINu8I=(S>LstHFO)TTi8mp#q;hWibqlARjK=wcQWS^@0o_2#QWT#{6^0 zj|d{L%AxJ9>#RTnC!}ztcWx35!PyF8u0jt#1TjR1s45Vy_NcN-weu>-FM@*taq$5R z_#j{x^-|Lcq4SJ+XD;1DRII)18Y-ujs~Y+zB*%bcvPs97D{LW}N^-JFE4}-NI|&=} z5jca^62!=aP-Ia)K?1Rb0G(=yfBh%miABv_1o7q@v8YP*G#y)e#~4?MUFj=5k95|t3p?EIr03#gmov~h=~c5Nox(R? zf%9!{yq;D%kE<}%n^UWR8Cok>&&=DBB(jAe@Egy6bLS@gi{x1uns!tEhhf2+xY2W+E? z|3*Fx5{H1VHY8}wxH=h-R-YM4ajo-&293F=N)mc(sCaD~sDgNUEN-{&p0J?7?5?Uj zWs6i#Pe)MSqw#sj=T5j1;oQ474@LWN>7%)}5A^xWy3-s|E?RpcM46@C0 zHa6)X^`

    (_eQoOQaSeEa0g$-;cxu;ee1|61?4)?1vBnAp~0OS)oc^m>Wnjjf)s_i>zjoHe*3+PG3R@(cD%L zi+#m#OaxB|zw?jy!O@P=Num!E_7M`*q<91Dq5me;pm?>XVSf~!TkbR%B3eu;xoKHx z_$RO;X^vq=VcQ*FQabREq$xhsWGFi$5>Qs;c~(^5TXLhC2AwBx>v2h#P-Yrx9Vv=@ zlvAaygEOpvX@FDYnLcJ!O6(-$ItLRaMM_r_B{Jqg1yfxzr3sPAEQOa7|EU{H_V^L_ zq)LJODW)q^r5vFtt3VU$C3d>6v$4Z)y<$ zggA&vp}F+KbKLVuhtBtppw$Yda3tb*oI}ut)NY^^p%O`QQn!^VbaUQw=S>~LNs;`7 zT08giSZDK^Q6cg2PM2M_U~G-{LJ|xvLgGG1u#XOBNpcph*bV$UZkXH!6=f{ zP$tPR5MoK%+Q(Du1PdYmG-_ZMvM*jaR3s$z$!V&k#{3bls;FvB{}w5t(f_bzCE%>f zH65}R4D=xo1M$Pv^7XH&1Oj_t9pX)bhSv1i6|gNa33F;HP&$f_bWsv)Ze{j9mE;sC z1X@uWO;i^_hDkM&mB`tYHPuOJXR;MIPAt3;2-&b!I*o$lsmN8-v;kDNBBT;!km(Y& zZK-?+{cd_aq`QIekc|$j7@xw#8uTjdJwc%(NG5B6y|khgvJn-raJxq#8bzko9UD!o za}uC*Xd>rTZ-hN`J1#1xkEk75e+Kvuiy>HE_=DPMxvtO3M ziEVunP>{6F!xF}^Pkxietrdi_Q3(?k+tT6jL8z(nliEOH|DlgQnB~ToVJ1Q-@q~fU z5GMY)&~=_@FlpY5nKj9YgNK;9U$MtOdOLEC)oh^WrO<5k8%>aZcZ&x!7Rv3zZADg8 zfmnQYL4kQyHNQ8(=NN;p4-qRF@1@ZAWwV+T97@_ol0>4CkB;MAbEd<3=%FBTWWO5? zEFZuJKBxt&iGf+NN`t05+y~H!!meD$GZ_%NuD8wkNJ-N6WL+)OIiZ12o`n*UgUZfy ze${lc6H?yItjIsk7ILWLk}YxY!A7SxN`A)^w==Z{fOS@>pzPWiOcO@7(0SNfX_ujC zGJ7&NJPD?Mk|YP0Sa2o=b{}~a054Z3Ga*fE8*(NKVITAz! zcEM}AkjI)4QEgMi3RK>Tkm7>!@~p!uZ=Z-pDYA`iefPa`?+Efg0iR`5BEScZ_yI)- zrlXS53=^brSmJ>qIigi-teH+^wRHiFQti0G!Z6q_Ak=l~Le6rke@AfuA9BnQA`T4H zBHpgm2qGK=2*fTPpmmo#|Hg?%xy^iO2c$;v>m?@HJVL~*-_P{4 zp-WNv>3$ldy)~tETGFX^U%!3x^v59L1U2z200=&u-Atf_s8Pf;e7#dB=&&WDu|jX; z(FBXG>8s3>GZ@sR01emwqA~6hz@qp!j4~2e39f46gRJS8Dk{0cD6@)ykjU$;?dUl1 zz&GU!J(%z<+F^?K!;YJ|z|AoT5MmT#JBTFO4x-DygAj*r*+cPj4-6Zuk_Zh6 z*(KkJ3wLk_<@hM-U_s5enFj+s%~G$&*e6S2pGfhK(}ToZGzw>%6-o)UWyvb2qMwLp zxQFF(pBalE6jLBRySwM01*zAETl#s)&f;u*l+@ zo_G#xWQn!-6gc^g55%K`q9)l24am5>SadoMgeIC`Hg5FC#K52MV+#~XMTl6Kxrl{+ zU@n#GjO-h%6eK$>`YJP`4(r&)|Cm6D(7VWZnw7BB5e-z13Y@PP!>Ve3i!nFw^LLkJmadXR(5(IS62XC;m%xEVo>A=FEKp>gAuhh8L znvz{ROSptVN>L?ojHSLY!Y2R%Fbgw~;LA^1Oq*zlweSaiP)MajH6we>wUaB6kU}R> zlIX(>dgG%4`HNia~3-Kz-gtdNjAzHO5d1hFgsY&7#k&?VZvJ>jOP@Qq{Fvk zf=3Tw&cwhF0`150VYcpQ#ROGR@u)s>%D_%|0vuYZ6f}qs4J+b9CJO{X5BUdi2)Fk{ zQAoqhW*pL>@VxWcC-z`EnPSKlby5Yzt0%}ybU+L(Q$G2u66E8z4s?=gGdMR{h??3L z2-(hxe7?K5O^uV#|C4w-dCbCod(t>n(6@u7q)@)!ypnHO3(;CO z`P3&6^N!0?N4yv>EBzB{1gI31(@8aul30&^;D-u8%r_DZg}}-Ri99aFljE=oKQImx zgCD0W7(gAL2~~(6#Uw}MN%!dvbOTUI#np;C7;+IN#7sWhLF^fSVLG{I9Y2d5JDE&vB+l4)oqPN%i^=HM2D-0mol9W@99=yksR45hg;}_ zb07f2S*8*!(gOjGXE3;1AhlZsDi>r^HW5U(}MsBBnT zONeUohgWEYeh9EUiK}Blio841zEuqMOc-w}KwKTqbD0WUK-e~f2=X}B>tef}}5PH+>*St)uOy8i4gH$`2%__z9T9OVF9l+7JI*+fw- zS60mzzQBN8n2%8d4&kLq#b{fd@TqL%11=x~Q2`))p)q<}iU)ZPQc61SonE52rKo`* z

    ?P6gjDq)qd!H1w$>UrS%d^f!un71PIITvrjj@-#!f7Dl?l~p1dR*2-o z|Ka`98HNg5nU5NQuJV&cXfld(y);7s!M%Xu4_k`!m8}%S&gLLjEfxy(^rNmojW4FC zCY4h`Molea3NEOCNyrH$*-^2pL(t=2VbKtAz}FEz+&PT24~>#R-rQRNWf8K@rD`3x zVU7=CI6_v8>K#aj;iJbYjWEVlI7Qgdxnx+w#@?(;AYQoavyNvB zif0}TG@CSjiJw)JWxM#_la!bgGOq5ZvObb!#YKo6C`;VRP=`&F8=}vcP!fMFrJ&_9 zB+kd+@w-1}-wEC1Y?izM<|9P5qN-||_G;a}-KBTP*NhBdk!_4o>=|7%GT#(X|CpfM zPkjlR9cG!Z=%bj89_G9e+@ZK#f$2l| zI1?!Gxif zF@C|bp3!K$4wyDLVKTU_MpIEHmV=Siq3N4VyfhmX=AKSr>$14PVz6ozALmu)x&AGV zf+SyJ7F$lExLDnlHqT1-)Va$%YvhAOXoPR50DdSxhF*-IhP5amx`?2Q|CVvq%l2V> zvgTlg3BGlZQ4G&P$v4kl3<|^L0j`!u*^kv7;MA^X#4S|c{0AjC2z>yZe=xXJR#oC2?kmC?Z2f=EhD%k_;pr z1LuNsxY?N&ij7Vb`yMMG5FX&c#_-N<9<~Ir1XO@T=cub1A08d)2%@4)z?$Zq# zdcy$p!Hj~hq0C#EIoZy54u_sKVF1$w&kP3#xs=#5St8l|3|@3^M0bR^7z%atV1ZrBZm}}#vaFT zuz+yjqKBbC3e0G1lIw!130b#fd`r__$Mej%C=xmKE#V4FLeE?rYFGymMhB6VvM^j| zp-OKER(*535VYU4R(aGpXfJl@sNSpE3WDMfOVVmgQq7RV*k58fyXS=9=L&!-Ax&KZuB0VZ_lD=`n(}~ zsQ3z5mx$PGo0&#_-7hke;uo|N3GLy5NVGWuuna{FN&m<4&%% zhy}-pXDh^Z={T69N9&b%kj~EVTr(zK8q8gyml?->Cb=aVv>%dgFD@28g%3k%kBG5Y z$>`z!#M+{+GJWXB$TnY$q{>{iEvW0qV&?09JOPMWwf+&TWno~23IzoTQb;f%#0ups z2(nP_VZ@9YH*)Og@gvBPB1e)eX%gbQcPA5;OXx3QLm-`2a#X33Wy1=AaFr8C^QXyY z2|L{c3iMqkGSB`QHA-gazh`8g3eEWL=}?GRD?BUO^efn~V#ks#Yxe9$gBSl9L|8#k zEw}`kW;9sA;9RtN_wwz_5eOMC6*vjDK=eQ-Nvqs(aVMC@22V#uKkgcG#t#|Y89TD=^r(PMydTZQw#CN4N zqgGxz^-Shmqa&|w{W^B!6KUUtYr&z$?YQ%1FK@nNZni)aJ$nM&DCdmcaaAau6FKID z()a#T89E|pl<`N`Go)o?;DHIncOHWcI{2VQ0zK50AhjiA*FSPu2qA~%31p05g58x; zQ=Q~zoFMdh^WBN7MWxkh(veo$b7n<{lxJjk#ngWt`uHP|WsxXSTx-FV-eI?uqhXLu zYF7ae6^Q7C7m1a%9Yk^M*57`uQDv2L|6PjpRF9uo1z?R%s<|ec`jKH`i)pd=j|CJx zB%XM2wz=n7##CU;JBs*8CUquV^;w*?9Jd>DO7)hef&ykl-bj!p^v_n8Ue@1p3-b9X zs1G8wl%C-o#8jOZX^AR$p}JbuOhDAKBvwydCYeX~tyW+}WU7W^LQ09*nWbflWT|Dn zX8J0#&04o1NOCa*Q$ce&do6qlK?ItD{Gs+@ug0>5?3tJ?8x%y(2$iWVq6pKbi%#vBGkx<-LFeJ)F z8;#mT7HQb?j{uK+v_}tBl`&Qf&omX&A61pLM>;pxqt_d~V~n3aDR%Rj3Wp6f+if>h z5qGWv>$XS{f9CMaUFQu{&@|Cy^U%G`z4k_-DH_(*=`yty*K|Ancu9=|X*c8>VU4lN zV~Jywa$@_Ib5n}kt7}P_16?`lsaH<0S>?RPtm$Ei!v`OmWAHjnRU=o_!$8#%NFaRI zU9jrK8;`BZ8jVVmmWRLk+>oArbxQ@e(8UKJ3GI{5J|Qu;RCSoePEswg?DQQ?rW1ZV z`|a-wv)FG+4wFLPIUj1!|Dk0ZxLCIkkP{yWfPsYI0}S?92T6#B8XApdN$(+DQxxdF z2~x00$`MoN!nP!Yc;|khI!tH^B^C?}P$TvLfE6&oKmBNCe7u?8M^y3*AQa<#Ex}z# zF7_D-Rg)U3VkO!UR!jq6tClx5+2~{uzFfb!s2jXF5)+e)wcmW8P z&`CoexDvGeu#9Hxkh)@)Mk1xhHZ6h6^CIJiJ_Iiu5INyCR6qn?WTXPV|Br}AP8WHLTlQouEtYUG>owBHx{wwVO&a=TsBEuC>@Cf z{J^4%0)QU(fCwx+Km=ldG*qAyNjZv>5L`5Lg(|(O50_*u+f)dq1+_|k+*KMe7AZfq zd`ne*Gq(e!|MeqzA>9e-kx_`G!UI_#Mjg~@MX}6pl34vKkS^FE47KR2V;L3Gl=D@I z)CnRRjMG`W+O&RQWu79L%ZX0-4x<7{1T)w~Tgf6l)krm!98s6LehS#tiba;%td2nt z3fVfn)^AcnmQ;w6Q;QtLup5bNXjNk=Y8on2u5*aKh`QOiUJ4T*(84`<>!i|HHX?_6 zQ)VD_Cb zlGSt@ImPY0^fgOuAiAlrcI8x963K}r@*rMiuEGpu$*C|)7d$?L1tO4#Jxle&vc~3gEE=4%ZSe|ZEfBs<(L23y{EF(lw(cB$z&7!D& zko3aHeaH!Yp~o1I0Uu|u0Rr*BkKoFhw`RY_a#HIC{8#ebg8-6 z;{r|ag*@mHgfryB24J8^K5{~8T$bm^k3tE#GtMTvpxc|L8cR|%5?-6e+pSCyxT9Iq zYFV0QCM$+;MtGR#jo2}G{5Yb)9&!d|&;uX%(1klqUyaqx@pO4|3k7O zghu2a+uH|S_Hhq-$b;??iG>C9QJh|Dtyy#t4p1WP_t{rRWPonT#}fiw zO8$|T&?hJNK5R`x*qN;?Wv%+zOFu-0nM7-SBXlv}?yFzAIC|zMOQ${f`7)8b6>)ecR*AoE2`k_T$l+#-%0{j#n{a{9HAz{jNMF_RviuKIQokRf6#drzA0}ucKJV3Wa zL;>!^TXY*5o<;8fjT@4L&>YzU!XYWHgw*ASL1f)Qbb_o9V(jHj{5{!YMcoNj%qN~i zSIiCN9iA&%UnBm*l0-m4073*1Km-&LW&jTfa>Q1MoA7i?0x@8)fP^wRV<}!E<5k4o zlvP|XL_@>{V=UrH0N+YXAwlp8F16TBNnnJqV@I^#NxeyQh+UaX|DQ-G;Y$1>FhXJp z*#bEH4@i7REbv1ma)mW6;3htwVWUHqg@d_c5T@d+e1|r6;E|P+uDzVt$k4{r z6xUUV6V}D!Sy4jn-OO;yWO!I==;M-LfH5}49aMlYc7*dtRToxPV3ni$P2>82%|iy| zS4fC#bk#&w$asCn6Oe&>wA&bIOL9?;Ck~B)a8s7>R6@mH>z!n)s0608irBdbd0>_W zU_d1H0S4>=T7E=s;(<7$TnZ&A5+#@^${gyHT~xr34cUGr z2gOh!3zbUHcqLVyOqOU%ZkS32)`b6oMZ{c2ne1icq}^OV|6(8X0RgmvKHS0Wd<1Wq zPeX`~Ua%xxs#m(`jDa*1UfQK;a3)_uAd_T?onS^qRK#}#Cn2?n)%D`Q9Sq+g7fJ4; zAiCoU>dh#k?Us(UeZ(mI-nj60^-WJrt+{GX-H0i3`^tHV#;A$?Chn7rJ#9KMJ#CN6Jc3H6h|yD zLP4U7JuQ=ZHelf_s78i*W|WdogGQ_^8=gfE^K}2#SPP%u|s){}|_3N_>!^l7c9df`y27hlbIY z7}iNF=z~9G$Wl@QeFf1PbxnV!OVu=v)omR`yjNwy=Hd0ZCn|4VPREjz%fDO{rfh6#5uMEc63zNJNIb z6m^7`)Z`jhn5mn71jl7f@*wJ1a9K%W$3INeEtC*c(T`)qnMo|2>@m#cb!h1rsuqnF z-JB|MadDJh2< z(q*ZVerzTPy`%p8tp7z+k67pARt{o;#pMz!)80jskT0Km48f2^3rsW)J6~bKawvr4DCCUEA(TeE!b)1EL?(#~EYqiAT zsVys68cETo80ilT5f2Z7qS*LeaxjGq1xT<=#d?9n#_Fo!mTv{|N^Kkp@;X-fQtCv_h{BDQ@I|KqgAMZ8`zlFsDmjvLF)Zmg%ZRQB^kjo3MORf6UjDkxEJFxo%QHbHL zNn9?;K??>CFIn`&w773_bcJt9|CV5_mWzf260PoA4qNUt1@B-S6?@HeCP*%NB|)68 zhx*_897YY>@bMs4ez;azmFl4RFwH_i(3E^E?WRT8&xSJ}}eu%S$) zkLR4L^WiSVDoo-u1=^OxDkrk)q!9p^5qP!LqD`i~-ZCBk7H|eyCXWO* z4cRPh*k1`VSRgBuh_Vhh|MTf84@jJ{#j2<5C2%Nfh4^wrB}t+aJV6L&^U;L!c(Fz9 zrK9kMvtwzMMK13LhXnkVL$(fEFP5!J&7$}5Qv=5wphj$(@ZpCAhym|vNW7{ra)B|H z4M8JP+e-9M$;Gg$@AuNgverp%5V4)mlJdS3SU}3=NTM-b>ObrP0c=iQe>LP_bWQYUTfIgQQt(-u z^F(h*n8L)<;RJk5|Es17iMTLy@e~D7Pf8lz8WppvQEc(aa)emeW zwO(LI=LYW%dt)Y!O!DZ1KBOWrZm{HN6)*#ZQCkxjcZrc9clRBrX!$Lxb%Z$dgFf)X z1N^9VMgSj7F>OE2Hur_2ah`5NRb>NX^`e9kyG}k0;PDo>@|N?u8EHIojJrtpjNa}j zpGBG=%V>`;`%OwDzJl(x!V`T1sfo5&I1jPD0CjjU#CNo}MXujXYpSe4 zaqo4lRSi^Khs7p3>BTrHeS0*1GbD45oYTjw z%$q;pUd$}(8<*Fr{zT2h(3m3bb-c9Stjif*=#%cZj4-2j-`Z!-1xH@W_wn5&d4uYG zr%;K52La2Q2zi|&CTOed)KWTM#Y_c9d1F=J=Z1qmU@H3k`esM?qNAo)uae1>1sNu% z)ha6aI`d`Gdf~?2Gd#cpkW|dlq@q|(GDzj~}wm}&4#oMKs{-*lQzbU*L|Klr*o zTz7_)|Fau&xQQM2Q5@`+jC+co#p|hHfS*o%!^rh=`itj|68M2Q@Wa1tVFh$`hC1r2 zI}T`kyHPY-gtNq^4*RcdDMavtH}ol5NrX2d`TST~P^S);xFSY}`aJ9Ku_a4A(~K&M ztp)f)_V_`ZsCF1XyvGzLzQeREo;OQSX+&7J;G%4x#*$)xRhfYa(Ic(`fe|$;M z|9$6Xk(ivL8KHU34=Phv)j)HKQyk%ColUoYcb*fQj`Td1JyLkR5kmk^IIR0VZZ>St zyZU}3??uKEUkbE?bu?EfGdi`;CV%5deblJ3730m~lZDg3J}#09Y<(lse6B_On$at- zzL30pxpi;6E-N`H^gB8|o2vtoOJYaiCb|%p7=QGWMRNDXC0CzT75lEWrJe*jKvXCv z&_cOzfh-98x3EG(hvfuWVCaw{#DwL_{iC>%V@Ho4L537Ll4MDfCsC4o_s- zCLs!{RZw8WTD1iIlqyl8+J+VdDO@Plf`vfd`b~UjQD8=nd@;J^I+$=_!y?};ZJE@i zLozl&{v<3WFR=t`v>Ss?o>HQei=f7GN%r5+A zp;l&-f*|_E7_qPHau&quR!bYW?6<@-`fklJBJ%0i4YL$YvawClPGdXuD3de!hSE8D z{s?&_d-Wnsmz+HM8PBEcvFF#npMU?9?|8xty|Uuc4KCdp8f!q2#1ad)xT2G;!Jme^ zD7wF(bIw13D)8c&gfAD9FgShy-lP4R5R92_c2lu>t@qurVt3 zQXEf46*Y@7ClxOP$;FOdBLl&Pi0W}87DMB6J)xiiQ_w*PEp)ak4K%VAznY{7LMK1s ztu~U5!);LwE0oL7Kmv&fQbP&*=czHvbjT(9{ETcS&{7@Ayvj1uvsH=!-RP;OW;J!r zk#^-suTF9WRoG#P&8j}1C>ra?f0C@rr`#TDG^()tnhmXll+7&D3xy3wpNooJO(Pfu z`^TnUJ94Ymj_k`R#h*ZXiM*H6)a+R9Jc6u?PI$^CmwiAA_M`2%{{s}=a1l;eVUId_ z>?dU-gm^5Uj>G68wc_ecsz*DbO;b)t*r&c(S3c}&g>8wa2*rSCS4wO4GCtKH4@hFlBGJ5e`ljjuY)6+z^v~%o;C1yB- zJHZEL+*C4WGJ5UFDfiE!-WZa)9EkNgZqbV!itFpB3iO!Uj-)QT*oJK?2N!Q*8NKo$ zd2VVZ$Ip`q@bcEoOfbGBQ;S}{t$J;vF0%0A47rm&m|>Iv|1E0v>8bDUas+eZJhQPN zQjkKcxvmJrOCdcyT%Opwv-;D%yRRk30QF8yHZ_BqvNbhZ3ir1?J7amLa3?ui2~b_@ zGvEPHLz_hL#x~$XkJTC?JG$sBX%wuS1rf-V_5o`&?E~C9+Lu0_For2NW7A}Y2b33a zFe>Ka48JPi31H#MWhoI8ca-L~4DOJJDN!Jj5T(5VjYUb#c}oS(622ZTF+aL<-g)@M z!Wo^%UO@RycvO_a7VU2%aH9+voY4&Ms3b=yF&BWC$3!)*@j1&t zwb-bdv*gi4y4zo4Y=tU!Q8X1eELb*w)gyl-AydU01W17j-OKG|))gfP^9iq&{EtY&@7Xhi5 zrvL*C%d=z`e;LT3(8`s~oLnd02udXq2RL*A4ngp!$&+b?V`Oor3zs$`{DI~tI4NgO z0tb|ra8NX9q?uH__##hm;$`ibiVj5r$lE!Sp9)h}ZES<6PWr~873xF+I3C@^8yuNJ*c=7;hK$XAx&fn-2pBtWJAveNjM=~j5uYdnrd5&{HL zMTL9IF%l^q+Dw)ub6iXb)Hxlp&hrEaSq=kAfN=6vnq=~ETit67n;K4sNO7g>amrW6 z8e08aFLS)fNKHAi*0y$pb8yvIl|;voinS$=4myhkh7pJ*oy2o~Eek?->q?5=GM1Yi zV`Cu`Rpd67RCWqb!$R5-cXsfFsq~4TmXbz0!LKIL%V%U=Dq8Vsg|r&UjVA&6k$&)F zwMBaCc{vmji2U}iuT^6b(Nc?8RDcnI9m#yF1^?hGy|$Ik++}BdWw(856qi(-PvNxF zHZ*ESq>SvMS19Y#KnAK-hJ_7%KAYk3j(A!|iqL~o+ zh85UwVM$^!a|Mo>JCce+qFTO`7ISJ*mXYdG$`;R&s3?=vwfIq`;D#hDZf4+*k9bET zYjHGmyB7>)ro!g-B+9X29hE^_Pwb$loobm+y-v!%q9Az8oi^yQPz*#+f5ad9;KxMz zp^sQ#?!|gh*0rjH#I!uD#&u2@e$ldDT>tp@Pzq_|K#b(qYQp-PE{Swx=oP40@-8V% zmKUiU{TN7tB-5!WsdOVezkPO)X5RJmw|OhgAEMbKr9O4kVxf<`z%{)Bd9~EK^jADW zi-)&iRzhOI4_wssBY+mLQJ>@M1mUKiXk?^P=@&Ud%A_$oX-qDq{Aj2gyFc%#^gElJ zL&Dl;so?fFpT!NzP+QlN2SgeWF;FM7vCNBd)FbvrFSbg`H?3uJgWP$T0%#xg!!4ZsN_TJC-pVy15IK(s>EpE`#<~_2D|((gTxq3g(8> z{Vc|tT+z*cyR4-R`+2PkM}v5yQzq9(1zK;GfjH2-yx|lbuVi1K9`LSJvH%dspa`6I zjAJ|Zx9R2$tR??*7@Fb&l zW+tj_`^2ieKo9h|M*OOy?Ee~12twcp&c+bt&c=cgfX*Z2 zZs+t)lT6~Tp zh=`zQ!}qSEc7geeAvJVrue?HvA#wTCrp50vGrpFFYj{-HhLEtjCa}H*m}deeK73>(>tD zYYsvo0PZ?iMo`*DivNtlg*>e28qUE$2)f3lrJinJ9o7%s6QgCQ1a zJoIOsZY8`@F=19w$dm)3&@d!gt2zV$4p+{o>>?cUFt3EZ95=ImQRJvZ-=J2Q9ut z5&Wyq%HkLW(ISCR5b<%yCT2RkqN>0o@iM8FUIZtcEb%DHxfCuOHLxSE%CHiPUbYcV zLd=gq2!G^60{@NYXmU~~Rnf$-XIXr*-Tq7c+Akni%OB9;O4d)eET~eDgdr^kTb#vw zX6H8M=p7V6b|li*hOiKg@S5~)2q&!+3B?h~(kS>uOuA746$8Q|r(wQvBvNuPNKyLK z$%~(qW<1uE4Q-$AgJF;$s&>Q2wMX!3niFH3cKz`G!U+^3M;ylLLP^tebnNd z>=Mi-&Msr(%W^0tI!hsyb5Oo+bDmQz&TwQdryuN4I#+PVtfL$dLIxM4dz^zWZc8EX zP&^Bz7XK8%7zU6-mSnylvc=%DA_cJ^cdtfQqpBEmKiZOudP;n}1 z=SUGQ;R^J2>V&!^u&H9hL>ts{9`vlxkSRUFLW`;VgED=?4bh8g0wER*BpPGHgcR_z zJTY@$DK48-Yzt)-C6v_wQ+s1Ti%-Z!a&XeQ*byZilOY)M*{2WPltyZ3H7@Hk5Xi1?_%|7~`zXxaMVF&Hhw3C>w|p z9^-Q0td97!Sm>xB6mcw8Rrm64XxXR97*`sTO>&e<+At}c^mAP7B-y~15&uKuRt962 zo;BgV)_0)pvu?G5OjvT`mr#1^Q_JuKV-c+&#DUPE{2C}1%@icac7RFgbW~0dfPg5x z;Jm`Jl-5Xa3GUZ)k6Hv)dO6a7lI&6gkBhU{07D3Ak#_hnS2ryUMIsezPw0iXjDt58 zJTkb3PorD|cgQ~FlzKzn1_Dgm$|%!S7hBe#FefZ-<8Ym$S1mvh$bb@**cd#|S!69o znFe|Fj6=)L=YkAzS7YfKcu>4Zbg!#jU|8Xx_H&)EU4~EaLc>Plm?PMOPkcuU9qFCe z7jolLjt2!wG&3t_Oes}XbOg5?*6J}Kq&nvIBqqj_3fW?0E`pr09RG{~2!I1^Mf4(! z=)Sh8E@HtJg^lMrxr4shlR55>Sme@1a_ic%VkaY&6YG~i`6RrHGh$L5Zz7&%X-qP& zxReQ(1%)hjc}#nm|4ge>(GMNa)Wj^JE>utyZ&fnYk8;h10os-4gcE$=3w*;)lfGCfgkvR zJO_ePfS5u5N!H}zhPyH$z692w*?q#opv%He>n#b}=y}6gB&wEaK3S))O}bRZdOtZf z1kZzNhb0+?UqqR9Tyjq0go=nxHkP8KIIyJQ$F9MoIr!R9qGO?c2x5}5jTB|z7CNX- zsX;WQZUwq8fWQlKPem4)F!{k25ps=Yj7`_CU#r3*cn>eHtCbN(X+wH&L@Fjlm|+1( zX-D|8QagKPsctOqKM6_O>f1B$JL!t9gM`8>bUR#Ut+$`U%%(v0iZ@z`DF3_yXY*)_!;lH3Ya};#q!HD< zJt%xZkS1*8UNVa+SY(AYBNAT;h7<#wW;mY!1T&WQzkNi`1pKF1X;E74C)SI{P6jz} z;cVPBB^q2)2tsw!RL(*N*v6D>DWV@b)Oa_Q*9_`GKpZPPCdiy<9979&B9^4KHpCKk zR7%@%VPuwLBC=rPKx+cSy36vKk0SB}rJm=7jgE!)1XXk+F?4!Tj?Bn?1WS`>B1-fh zh$D{J6|P>;8xS+x;_RnrTGli(EZo)@57&Hr_U6iBvs+`n9ylsY>r%)JK5yd*3L;OW zb$!^;r5xA8payAcV_}0S&p|iowK#4LZl+g=rnzd^zZaYs6*C-ZHje^Yw~Al>{EXt}YptqG1g2%M76FH^g+{sC zH$-(B#E4>VYRU@Oyd`&mJD@2gR#k@}>Ua0%_e@90vg?q&`~k|Zd2ELf?lLAsfbGE_ z#0#DPvtNoe&l0;B4gv98KxP?)?&T&&QaP+1CIp>UN?hWNi!WVS((XplT%5ZcJ@Cd^ zc zRiPNy#S6~gIboa7JDk3QZ6vghwWPd?>RFq$(|WV`1%w>GQuBqL`{r%>n=+j2F>b}t zJ$7&$iQCbhbhz{TE(A*achvNvn46jJ$7hHOk|NVRO<7H@eB+e7q4l2AhBaqmU`>kq zL+%F5pmCwaM7{CrgxPW0eI=jRi(NpZZqPfH>npF~wXPJIV%Mt?yx_R_=7!_YKlZJ{ z@6U5m@VZ_L`O~|`pQj|DWz~WJLXd?Dg3A5-SIdIIe-#>5C@1hC!-fR~RhZZiqJ@DK zV);Wj(4fPSBukn+i85tMh!PW4sQ)FfV#W#_Roc9%vYdrl3?W(+BxqPWV?fK{Jc=}_ z(xprr9_-YRjM9JaPLd&0_2NIMSD$J1TA}N~e_*e6eKz*syR~fDrmgU!Snzegkuid$d?OxWb+-GhBuU$MBaYMOx8^%R!*Ky;*4!x7~zZZM= z`DfEu=4FLlf(kCkU}mIE#Q)MiK2@~SgGa$+VMYS6R@zacg(OI8ssWXdh$KDt(1sZT z#gIi>HJB7jwHOrABSl6KySCuX=|6k<^f zQ755AuBqfwZW|J`P<1vXwBbS;76g+>CQ(RJh8gXok!*RMq)?z7-8j&WJ|2l=i9L}R zNFe2O;ze_tz6zXL+i^$WNKA@1UXAGW%?S^IepcVop-aU0z(V_i)ZSGl-j!olXSqsV^RN=(&R`1=LF~`AO5z zCDEoSN#y_%s=GQJy(vra##EC-qm}!oM!PZC=}SAA%Hz&zH#zOaN1D5)`(fl@NOs)-iA{^r*r=onx|^fHK!wu}!IJ-8*kck;rTODuMaApK zc^$^3TvQedpImhL_4lB__NQE2N9oO1NfD#9G4}=134HnHpA_^#r*YO#EUksu`bs2P z9K~5ADcr`ugD&FD$7CDqq4_X} zs2Ax&9{^jQ=X`Ro`eg=*>N3%6R8&6{NeB?A0|bTuK>!flD}f7IprR=8DM@9@c1beI zMsT4pAOHIBDNw4J47F!Gwd@LKCsfO?pa;FSI+<*4$wLj?uA~?`B86`}JQI5`IXIP& zA^(9BBc`p^5*BSzjAL$Vs7p5$v~pH}AN)9Ar2aQQbn*;=v}>ml^+lWt_<;|6vcM4k z!G}~@?l$Q>$!YvaKSgn@1oiTnC3VW1;1U+LR11#z39Di4REyROWKdgu5$ALYqXv3v zAnq(0jRjW3Bn5MK2I7khP{)+vqYVu~u`MKm5@|se#lwxBs~x zWq&Hmsv$}cw;;eg9})o!?7}dZ?2t1`4&jGB{2?zF zfwxCM3F?Z?Wg?mIfv6QI?*jrc5O2J~fb~5}6X{8l`Ra?IPm~B!80A_Djuv2MD^7^d zM-~sd(N>Kip)q~yy<}<`q-|4KT_wY@3r}REkm(gw1j{y#752$&woIRt*bi?60)CZ5 z(mP*#C?^I{5aJ+!XE6|gQ1wF}{qV!;4Cv2|Fw~6o@)tX!Hm+HwHhxTLj{iS!(c;~} zugwlE*@c5gAhnW^r5Ux(3;WZ;WfEr1f=Sj*{sO*ihAA&?#EF3t%;{V=1vI5W@^)dw zC=x%BIC|C)Zju`45Cy^;{a}bVB2XJaX{MgedF<)}8c<2&#Mkz%a7oZsi&#`(&PSrQ zuGL1B_U!o|HbH=690(?9gR?&ORqEFC_#!C%=aUwd0Y zTl~?8h^@Jb9IAh@;77RxkxDb(cT(mSh~+3KWRO6bIockPk()v=8Cd`wDFVV*556|> zsMRqNow@R^*Sx%4_&pOY)0YKvZ(E1dCSa+uvBZSt#*LY_FmF0R=Kr-~SV`M}ldUsQ zDZ2~v(i6uKAV3B1Nsy*`JTPPThdxy2zz2?8et_!sWYCthca`X_PTv_X1CB9|;u^v1xFV!kJ^_j#xbeu4bjX0AEWSH(ft|Yzgl&`AO9Hv$LefQVFJ_sL%whLr` zQtTaXnAyD%i&%t;tCrKR*p7zUCD*b-Jn~89V}3)(2QNN;w>dHUPIu-(mwF%lHAl;2 zYKk!gOT+ZN^2fUG+e<%R8!?D+tWFY_a8)Fj{f9V2zxI@j-ACsB$Jq}Sk|5vY_0?&Y zIy+_ihVgg$c2BqS37j&iyvf}P4jCTvC^92s6U;>`VL|kZs9+-%VXoalR zFj}*HCO0-R(kHvf9WN1oJn;;!LW>R~Jf9dmZNofYkydAM8N~G^s+baOxQTX{gUhEl zc%evT1&*!QjOTb01+f|JH+H^gh)c1EPuGaqMr0&H5rJZejM6~!NEx9bS_z>DFNlt} z!-t!AJb^S(eP>K(fl}MUQ5{7ezxE%jh!oz@iqG{I9b}Le#d`zkk^6)rslk3IVS$nn zkMW3wkr*!@Xggw~HP`VcVJHa3@PHplkj!O}GPgr+vs^gmCF}8jfyX61SbR&-hB4TY zbeM;^Vv|j&83|!1U1$;|X_CE2l88o+e4>&~R{tp(bRAsv3=G4MPKhTolZ0);O@QTY zAQM->1YE%tkxIFZjd7MDm5NLuC5j}G1lCPt>6iGWV(oa9@3?DLi9T4ll8tyI!T~`| z6DrFA6hLrGf4MDbxL0VIf51Xk!NZpC@qYj~EPLrf^mcd^R$u~2NY`Rln`ljaNSRHk zNmBWQgjtwGra@DQm}b+6EK*ug;}Wq|P?u2b`BjSB}Y z&*>LfAz-MnnP>?amsy-nNtLrng-J1X{A3ij`Gi|(o~jXBjhP}#gB<@Q8Q0UD&M1S> z7(7vtAN*%01Q?e~!7>nYGdy=O&{tLmGXI~i*(XQyos{8mNOV7A$tZ!5p4rBpW)m9k zq%^^Kpt=No+=+kcW}tXNYxTwy-m#H}V^LW_M{MMppypBLxS>jT8f3Jf40;^0@e{s? zo*yVnwIqz1r8<@ACEyG%Xg9XH;zVXlMxCe zQ0O2W(xqECn-ZFv6ncdvxmEWQOYo_sJ+wGEIc92EY7Byn$Tt~?L7XRXU~Ci~%eNG= zB4us*kx*Hql@Spp5^P&Zp#>G8TnK5If)PvgK&*pRg1ScCk)3FHoSHd`*zu4B_?#!& zfYO8-i_xbu_%^?isg_EMF%h1H>HnJsl%A~xREpYUXX>S8hRP8SF0~crefNt!}h4K!GRVK4*DQ& zy=tv)3XZ}0T-_slqo{KOXfyEvk<|knF=U+3sugpQr_U&=>H3<&v7^Y*8}&4!ky@tO z$4)OHRUCm1ys9q%tFC;AWtu8X+}L}6CzSNMnJh$j&v}%nxlG$s9=#H;#It;dB(c6~ zdtrA~QA0luk*nX@qhvHjPIVj{0kKWPvn#txdFi2i_lM?DJ#p1;!$*7pD3==*e05o( zQ6Xk}Q6;nTrx)w9mfBA=(f_Sis1rgmvx{|AxXQ5T!!PlI4mF`@B73!+xFk+XV2J}v zeVR;cp>yrwoRtBceu1x(5g6%Y7&!A3|0;9#xwcyhD47N{R5vNA_Y(aeFtl2;%HgU` z#bi$vvm1dwh5NGuQY{<9JSEkfYFl_g$3mhjVfdDV874EdSUnoFCH zF~9mrr}taHGK7bJ%CCepOepIW9Gt<2RbGHr5cfpDmEkS02OY2^Wa>$~6@U)mz^lCx z5mx1@@FJE3Cyh$;2;JUVH#; z3xP_qxfG_NC}^V-I6_^AI6Eyi94_m|{Ai2#2q_j22tEwHUtB((XuVlRug26M$ERG^ zVWO2`O&XaUt;xr_6vpDPMT6E41Rw|xkc*L4yDRyQ#Rx8!GH#NheeWs6x%zEC@tB|! zT8+F*C1Z-W2LDX|x;;S$7gvl|FmxG)(FvZ&w?M1PYE)kH)FPWPXy7nP5%2(_sUfu4 zp7&)r^HnYbQE@;TZ9GA|zQIL|CJ5eeyXRzD*VfCpm$GZPVEb3U4T!8zY8`rE9UYQ? z2x6+_$gdUq&5_e*=Owch@O=dE02uH95da8#S&5TIg%~0l(991XanAyQJ7OBd*X$ye zlPWMVnfGI>^PDpqoTcBGvDf&i$x6#ujJnq0cy6P-8qSVjc&hba@k zaYDsLeg7w(MO3Gx&jaxa`oIr;25rB_qw?r_HDM7+EFl~cP`|;}_{AC$B!&SgToqLE$JmW0ja_4gAvzDS&k~7bJ+>%jTv}s`KEJVPz1uVg zga0A?#35Bt&hFjBc-^@gMke5MW^fgnv<)27f;|jUrBvO6|C?p+LAVHxBz;x^TV2zB z;Ai%X6TqooXgu1+2tPfYeU(xoF>Aqu?cvKItj>d5qllb3_$p7TteJ5*7KLGcrQw{p z-br2K@4eE%+z=!#PYWk6zZQEQ*uK46o&yUy&;4CVzPUdMd92%{`NHAzk;^c`F(#E^ z@ zJ*82ki@dJKu1lNlh&D`^fejNY*h%ps=+rpu8t(TU;z*C0FyxUP>pd(PjwRW|!UDHL zTW|If7z_F!l3~wXl!L*4pD!R&N4r9EHKf?rcyC4j;m8357Et+u(66vK?X8nhbi=wzcX6}5_w*cZixJbgvqqS<>W5lqprNWZBZ?OPDcbrYu*X<4l}6 zb?)TZ)8|j1K}S-2Rv}_aiXh)TB-6B$NR{v2b!wXM=~AI5BUGprNTEJ&0{s!RV4_0&k1kIdl^I!7GES2siU;o~`>&45@uV>%>E>XuT z4bR;ue02q>8ILavIa)AT3mWR@I_x6gBdP4Rd#R(Nk|OFf)KVkPJqjzd5T)8K)Nn%% zPfF`3`974ZsK$;9uY!$~d#$6g`uJv{n-EVFEFs)S+*gv&kj@h41*Y8kUkf+C0jO`R}$GDHdU z)Ra7_)ErEr$~F~s)JiFGDyF1PEl;DSfQXg1_2ln8npGZSXlUGlntWQ~V<0O`( zrUXs*-C&d2udug3jCP?T%L0S|aj?bUGKVM<NZ))b7Wf;?f9WmRolDW!x4{6C#TNBk|3+HZIGL106L` zAFvRLEpO_{A>UySX07R{spju~lsG9y;4s{0?t>@sm_@mvP87 zgfD7}sj~a7`wlnx*1aD9=Wp34@+X!%FxXg9N}YW?Ce%G;RL%p zU~E6emf&7(;zu7`9`NIIU0JJW_t~3w{&@ukCAL!o}aq$qss z2ml1Z>=0&2TI~~t{(bo4H;*mbTbc`F%ImtX{eVF(6I*Th%{vb2($EB!z8x6_LXlBi zt&G$_`9)BI6ZA>&5*QJzknJZ_npmG4^);-C!yf}$Q&P(DfRKr)W8#rk{@5fF_EAuV zGo&F;;?kWEVXPArp$heQB9e^FL}y)U6#th1w!ITRpcZCYVN@&^CI+I)a5aP&`&LIq zEaK%^{JUFPEGHEW&V)Fjf?IYBBrppALSzKNzykzF5JA);btah-ab$*?EI!PNdE}#> zEcFyj(QGmE(uuSba*>pw@d1#^jvBXB3j##ojU0<(;}#>g`~cEuF6?6`JsCkS%8ewo zGsqkqm)s{e%WGb;s(_4kAt;n4R|KJm3Rpm?%UUT)kTXd9 zJ*6sO?BHvKqE+h#wRY8o%S0-6m5Nn~jCS!D9}F@kc{<9aGg6Rq`nR2fG`EYmD^+)> z6q2g!@Fp~qZFYT9o6-6=phPKWg!0>%6LrcjHKAu&I%a|I&9@*FRY-(CA_yQ@z!(8$ zRB4NOEc}!vAy`7ulmzUOPF?YhSu9qG?RAY1a?B}zQV=x;!W-V$gVzKBPkELXk`(~NQxT@k z1#x(`!nsHcZ9)~fDF1}0Pc8|e)J*5FN-sIZoJ&!Tv)sFgqho4}MQ8MJhXb(}q7D*7 z$oK$NnA*5Jle@?h-)!XEau?5==CoOv0%)~LFMGQ5%PuB`jta!00^h;~m%>NM)UlGL zkX8sngj`4~!60!V?kkuJKTC^t2w{AZJhvu*3v?JHj!xu$P?}$@LmL zG8b>lQ``d|IMkXOWs&&ux&^T?KhE+GFil6x;V{@`Yidoll%+1<{xwTlCucj}8K~GL zIGsb;ZwJLj;r~Fvngvu{fkk;|gkuSZK4LMnc#%G(m@u?paVAAx|U zKVnWA1cl`@5qN+iOr+nl>&1uMEVt?qfBJ&o#LX}H>dH&?L0sXeGux9f1_g1lr*YGUz@6iU@tFh>Fm| z7hnA1SO0sT{$Y#^TYwV7FXrA1=u{XJq|o~4#AkI5^tRGI{Nv~U>B;%m!f_NJcmj?& zDH{?KY>S1yZzRDF-hI5;+>W2*DAqi4{o;me4EW2o%0Ko`8g$#FVH(T-ZU1Ws5AL7;>yIY6?AH$`cphL|}tvH)8FgcV#7l6yfV zd@@m6tl5&Ixa$l~@PhclARct8n@FC8Xh21&J#U~m>?pHa(~|C6LNn|t3_L6tqd`H4 zg>X=WvH&9XD<&+gC`y62c(b~s;2LkhIC{gS*Z4S)J3~UmvxKrO7^{glg0EWWJs{CE zDgOdIY}*BKnLZL4AauBcN%5Ln8pFO|kYzf$LL5cca*)Pg9x<6Dj!`bT_zo|4g3>FS zt|P*ZP&ZVIH!ztmf-u5kIS2^^3=8TAvLi)egfb@)rHi|^B^n^mvH;|AKK8na2Vl3$ z(?v#vqkJNa;Q75`>_#+{n26Y&(SSRIX%t1^gl2dGEzByfS)zUbhX{a<{+YVW5F8_P zl-%G(Z`?TS`y#i~Vy-w)CW9WTscDg;v7_bT||%6e=J(l3UP@jsVLeB#j0P z2d-!iMw|`TYfHrJqhV^kC+s=02!tm%MIp)v(Rz!O>q&SWVaga#zO7khoMuDqxP@{;{ z4Slpv2VI<|Q_XE;5dZWL61kkDu+8ldN=(jF7hwL=Tw~?QszHv@2_2N;DA!aacAB zpgZ!&Pamz)v>DQ2{0#$43?t<>+f=u5uuV!t2%pmxos-Yq2-2dnQZ{uN7mLJmGOId@ z5F527>6{UpiqiBt@z9KI8jY))lC_uPeq*YGp0_RFWbb9 zTG$2i={uM6(=7l zu=PoiiOEv)YtRcB*L^)$Uo}NyH7i5fp%)E0wo4LtWr%7;Sc{FGgo@SKA~5gpSEb=N zBE49VJr5vdPJ~($L|mFnouZL#S&~htgNu>3urvt$RhPY4VE6NcNT*zfd8G4qG>!O`l)5twroAuea0k+L0iNeiX(M5@)g%?ZN*@eX1 z(Oun?;HI*@6COg{dS%_i1DgBRUHEJSYZj<>D^vgTpnzt(07TOTL1kC?&MzI9StYrHnS;RO{o}- z1&s1F-+hgX>J_{AeO|hG-Px7jn;jkU!F2$>m>{?UnQ$330fE-{RdN@gf3V zNCQUN`t^ugAb=Vvt~eEykDOo+4y5<>h|3_#Mw6_(Jzj}*;14$6pS6h#J{U6Nm*Ps` z6|P~X$yTD|OMJ>;%M~Z~wPAN%&q#{9ytBGmAd_MV;B=u<)x@H@&h7bP70nbl1!Iw=hC9q|tg`V{W@@(7n;2Ew{K~cUk59ghimhgF z-X2DNG#Q4mwiDNBwu|Zcvv6+bdAx-k0Ndro4VqyyS9WI))-&gAyL?Psmc?g)zMJU@ z;gh&uJwXf=4rm~D5{r1vnT;A6-rc5E=z!+8lS&PN)nJ7HTpmnliUwjJ@)Hir8SfR% zNd9O8Mz$yM4_=lggGOnX-mL*1iJM%rZvT#HoNf{b4j6XEqn!R}Em|0fSt&KCV4yzg zG~-#Z&03^x>g!_Zj1Cy1c515rAw;FVt4^DjuIe5gDx&simf32qMo~c)=i(q}l?Cgu z23KTuQ?*`eVD;acMUsey>vk>cwBcyG-fLXrYnN^7Ha=;;eo%BSikrUSw;pV=rmbIz zV8woHarSD+o@|!FRLZ_=q}gK3-fXk2YtH^`{gkt)2JO!->Z~Sh)NZ?$_N3KLZ41O} zPXcV$Chc#Y?Y5Nd_knHOCdNZ_?77};nheg^y=SaVY~WrxD>=N<_) z9PY*OrE+lA=YB!4*1yPjjM`ytxBscX1X0!Oc0v5yTTk;8-6g1+DvO$+&hl15-5x6# z!5{Dz^ zTv(Kofh+8mi4y@2#g%a?qH)0{724WR4TnH+@oC>sOOpE!)s z7!Dhs^7zqf#>p8p*&DCx@d(EW!!C}9Fm5o9pEoUXkyel;-y1Y>a&rm_ObHxmtkz1c=c-MZ+IWK$3@&+zjqYp^-34_eP_ai4)*XB z=`v4tK3+FE2jf~e3!Vb|u&?2zq(Oc@`*sG947V$( zXM5H?&>q)!x%cHFiX#g^hebK?yMJZ8v8Hm+JYWBNSY``>#14m4aer`l!#{k)N03ob5U>6Z z=6397UG3lg-v0#^@0VoY4}b1`e)5-E;XnUh>VCvNa`oq<;(!0ly`k}!|K2r!`|n@( z&wu{!e}JeUa3H~g1`i@ksBj^}h7KP>j3{v;#fla$V$7&@XYC(oWfe*z6EbSTlHMvo#*s&pySrcR$ijVg62)v8vn zV$G^`E7z`Gzk&@L_1&{~V$Y&Yt9C70qn*f@Z7X*!-MV(~KK%DC?z3lj{{jvyc(B!f zZ{re9tavfw#xVc=6ZotXV#k&*W6rEuA>_!LKZ6b}S}^3jqfetwt-9;s)vjN|j_nij zz{!|;l__6Pp*79^X9T`JC81Xdf?X7uVc^NwYX{R-oJx) zT0A`Y^5)N@Pp^JG`}XeN!;de2KK=Uk@8i#}e?R~J{{I6QpnwAsSfGIiBA8%*-6`0h zgN=3f4jG+1SfPc1Nl0M@?{wnVg&%^L7HtA`7@~w`rS)`FiBB>r(wfFh* zr_Y~2gW8-)RG>_uNRujE%CxD|r%zxwQsepO<39`{=;M@JcIl-}#mU%V1z{%n<(X)vIg)NxzE?q;9;zwl zoOCX9T7$|}aHp7Y*6HV;a^^;6k2FbFS$Tjas%V-iVx-}MryZvS}r=^e9q3Y_Z5gz|&po*~8YU_Nh9+X>+xAy95cLdOC(4oH; zYwU2h2%F$&ydLZ9v#;f+X=Sxt2ko`kLRJ~Ir&*irx8N>@nz!MWYwo$|rmOC{?6&Lf zyYR*<@4WQZYwx}I=Bw|%{PyebzW@g;@W2EYZ1BMdC#>+o3^(lX!w^R-@x&BYZ1Key zXRPtY9Cz&T#~_C+^2j8YZ1Tw{r>yeIEVu0P%P_|*^UO5YZ1c@H=dAP2JooJL&p-z) z^u3b~ZFF}+9<8)?V?DU^)37z&C)8BC=H%2>XU*BwPipP8XSbc`^w(quM%#vBV_o*z zdF^`Hf|nJ|_S}DgrzdHh(yg~%ttS7Mw?%62EjUo4Q-VPGfKHJx()!)IFTY0d&u{>>jP(5r;6v6I7t;wafd3qPx7n7#~dkb@u$p$JDfz3`EcdYvG{JNoj#6N)c|D|BEBT}ZO71rQz+zk7%i*0}ql%HkPAnY_z~9 z*=IqAOp=mZl8!(s14y^1%`a>0BSF|$i`^hflGI5EUs{PuBH1ljO|m8U<|r1pq;ZY4 zh#O3c!aYKXa)QlDdR3aU<|5_FFTWokjwX;62Lb0E-UUsVN?K($4b3>Da_SAEGX z(=9jm+ER`Gq%oaOC1)Bl!q7E>v#kK?WE=Bpo|WR2rP&*+YFR`XQ8i1g zMDeU6CpFHGI#ju4)om@`DH_T|AOdo|Y)GSe*9yFoeU^16NpC5TEdsEkEqUT{pEEpZ zy*7;nrR*a!YRgedmAw{WYIpg70Rm&7z_$RzcW*P=&SvJg$BiaJ@%l*9W)OSJaj#1* zJm2R`<|KF&??Co@rCKDDoE0`LMbHd>k>hTdmqEr5ZrJ^HtBo^jZi%4N-%c*JC`ikCqR$cPe=(cc28 zo6{I+-p&}fV-~Yja?WP{U|%B41{WIa+` zpu-Ko0JwE!hL zVF7)3Lmxr^#dWeTn_?3myvJWerUfb^n%|BeM&)E=re*9C3 z6a)~+0EAe?f**k>M>(I|q4YM@LrP5Ox^HpYr#iA05x~F$I&R)JE3_cYZmKPx`tD;Y z(db8iQNBq_PRtrKuS#~ZdGhYyqj;I<3<>H3!j8;@hWXJ100c?zsLr0FQP3+LX+X=# z__zBr?g6^|LEarygMw7c`jw$&jVl)=vXR{cK&n91DG0GgK%op>{Br(Y%cnka)rp)t z=3S(DgY;IBj)ZEPLyuvWuk@$zJ%}0=+-aIHm89jsz%It_?x&u;AW%K8jP0IAz$=7z z)82jmjtD3cT5ejAb#0oc_fb?=RCkp~l$Tl$p+h4gBzdA;?k)K})qaT(Q6jPq1n2M^^KMBzaD)n#M}3G$K~;OT;0=MG56|{Be`d%~(bt5~mt+nBX$Tl-R7Y01)Qo@ljN3tEAEZeM z(TW{1SEOfI>F07Ir4f)QiABLjG9i7sfq=w_5QaB(NhJ|fbc*sAA-n@qn50;_q!9R6 zb$TR90yT?fh=?F{5&7qYvgdA=6?R9+fgQ+L6Cn=5zy*pBQt=p(>A^d`14N4dWo^|p zIbld0Kvi(<1aRJfV!zd6-$(%AD3gVl8-Sp5+?0?EksAd@0IydRL`IVlqKXtLO^k<- zq{oxWXdc!k6MBRT6)=>zm39}=dd0>$;Y3-*c8e;p3wToje!zi3hLr@eMZR;92*FtG zhgtRWcbGv-yFrE_Cug=rZZFn#d4z6$Aa(@cY%H-0!D1RZCzob6GR_z>7p6mpVODgA zX|SkK_-I|^HH+H#5hGb_Ay;^z1qcpDnJaM%IwuIv(|HRP;19r{O}LCp_nQ00F(I*1b`0SmoXf*p&Ys~{{xz^lzYDgn1?2g6S0j@84}T_qWZuO z;s6fc&YauNaRQmLfCPCD0M86pcX)ve6R<> za$tj(n^&k3esGt`l71bddS4hPT_`d#^*8zTX8$=-1DX^H_5ce1=9;hRSy);U`4<7V z5RU0-F{+15rKv#rX)?*9d6BvjUT2gTld1psXyeJLJfn(xWt?h8 zbflK3J~0-M8WgD-F|Ha_`&FD@8m1UCks8Jqprxh-!E?;ItGr1z!DQm#|E*nT_GjXTv zNJ#aXF~{>z+&U)C_nP4P67vdB(Dtt;!LY}2Z)x{a>3Mu`;jk9-L!jh#Zv~hkcTfmB zhNM=W77JbX$}UjIUwEXP?k0bOh%>4Ps!6vS!!>M;1W*V6%T0 zC8I-pNrXHmnBQ4yjH zVOD_45Fsixe`|GfYl>A7L#y~Nk+vdT+ch)MS&Q_MiF+~=c}|5oo_7?wCD{_llu)UXsf2Z zeL=iN(Oz;y6yP_x|6){86;KJ8ud_sHUc;Xy)p?htex&6{@LLk6##V2pQna;e9LhC@ z+Z&%dPk<#bg-B&BHHJ}#VO^>!)?S5{NN*byEa4Q7bypzLSn%2tFBFaS0TK{ph^*|A&C6xJHHdfGfcxT zb;Sk4k^tCNmdp|X2KLA1-;Qd5L&=0u4ywQcqULzA(6bOhR#$!@lLD9UoGFqqR(;q?A{Gy6nWEAeD(9^a!;PhEKTat-5i=XGd z7%8_;{UrYAhH70*)toPL)p=?e5?XC9Wg%Y~0aJO2((H=Jas^cz`AGvsYUqb+D?!Vv zTbP@>qaT}A1_RB>j7xTuGQYF5*t5>h(AQQqs53p9=>=4bSdHxj$UEs0?Hjfm=w%qn zJMuBDv{9z=X>k56YO1(cP87FOfi2QxV&s0D(%%- z)!BYDmO_%40}OoGHDeOG*mC<=;UR-N5h+P#0S^G#?`tqu=|s^(-UTCHn!OwH)t{KO zX#_=Ao7S-n#apq&uPp(|0K&gK5g!k*3nkbfp3^(s3oz06#!_xF(o^Bf{5ya(Q`ZfE z$-B2+$VY*I7$z0oE>Vcpy3ExZu6z)Vrcq!77B&v?kO=d#EWVph($zLz0Wvk=4pFGP zC0jo3PS?8=qa}OBkVnS<&=W*K07mF~77zh&{uay(-={*4l%6jn9TO-m5GMVN2~{*~ zcUN~u+r?XHKwtq+faly(4r5FJCiV*K2FQ#)IV;4_G9E6MJ}>fxS|`Fg0|8iDu5hAi zY9SX}DdrRTRZdUvyggw7?(FJ-&q&mhe1(4x~Qt?C&X)bfN8f%WT~4iZAu zZd2ElnmbpyjubKfICPI*4t#L$ZXPF-aT@Om@vy=d&g}69Q^7=Z(#n(DBY|6N%6Q$) zQfFm((FztW$46@92Y2Rz^PXh~0V$<%SXAYmisJK^T@V1DGJxgI7%}cAPZADhri-;{ zx%>8ttxG;?PhyY6Rx7iDaA#b)3wO2)2Qg)jnK(`^^bvCR2O;pBUg0CNfCE8OI&}8H zUJ|gVnpB8lu-ADZ4!wG=LP5VAh<_5$lz@-4ATy_K%1K~jw?@Bq6j8n?TK$ku62*>bVrPCk}7AY5LLbZNNI(G9eeRl@> zF+Utis}lh6?AgBx1q(i_@Xp{uh7BD)gcwocM2ZzHUc{JD<3^4hJvw|wW+1_W|4c4C zS@0dfe+XBS{7CU%xe68pm7{6ULIpt;O#b{?%R()l1O=ruSPqC6GJ=jGjrdMsOsZ8M z+AQa^Akm5sYVG1&-(8a-4gRCMm~LadYG)R7 zO7o^r%@r!&G%bh^0X`V~@Np^#&``;xZN4LaHo9xrs)t$>=Z@PR(5IhU3Nt7>k}TV= zK?fg%NV(+%V+}$JFT^l7#ya^aFuD>mPN0Oy08YAvZaR@P@&wT=tD1JfAOf*0aIb>) z2#^sgsKf#Sh|u)Quq~lbwCI)y{Mds)18;#!Dxeq)GD;~wlJKFB23qLKDYxXZON}-( ziLtsktWYnUVsogL7AgCwr&=Pwak7F40A!bP+LP=ln8Z@#86C^?QaEuQfH8tQ_~2vz z7o?s!tRyF^3^dX}A#LbNqypqB0M`#yE09q(qE5=n0x!Inj8bZ2Qg#;t2&hGzS94lOD#F~NP}z6??Tf(;8QK?M zgAcCCxLt>97f91on@23jRXMOs)UhG@1Zvt384iAeLrq6ve^0)b`!UUB6O z3pSj7tRTIQe?&THTc32uv2%JW3cT>9X~Nc4iN*T00)4(|g@9Yi=|>*|oZ6`}k4a(? z2$FSW8|a=a_Z*`QJ={)ix?c}&eAiMptHRdFXP?*KO2o3Gtd4>{G+Ise$iJUtLwl>8 zz{3eoh!==(RA62)Y8(MUlgJk zg5d0fC*;(J;wQgTU4|!`>DcNvvJ)0frhg2X%xv~&fd@Qbfv@6&Np3->|D6OWa(V~U zJa{tLWM(p?;RvJ5VjKmb@Gm<$0||2#^^x+WyRISgVLqD+gJ27{yjAqd0>G9*;#i6gU_ zUe|sXkvIy`R}t8SAbjXRh6p7kCJ|Xv!a24@GHWN{2!uEKG0!h`=b5BzrbEi2CFn4w zm;ps7bTA1bpQ)sw5CK_uV1go&lnRZ@e26KDa!ZP=GBQo$Wrh>sAsLNDNQu= z!QN6gQahQ>?K@)OV;=+eLZKslfjd5GkS5BF!>VT za5il_J*Gmc<3_iRwX7vX)}LjaAyEFq$vjcfV8L@P)6j&LgPzC*CDz-$;wvx*37b#? zL4Z4W!y9u9;=v$kYeB>U7Y$3x&^Sms=t8mr)7^-IfAbPOuTUV*oprkR^FIeFF~eo4 z-AuY`!!g>B*zh`bVk{yQZ|z|ZKn(STrZY4Dmo{)MAqlOIDY=+$*9F}<-VV6^YSqRp zgp2CNICP*#<0rOB*H0r&uRRIu)(`}m7%>|L01*L0l{w%LGN_zSWkEmpIZO|(dC<(a z7~_0OZXdbQ!y$sDNxGMq8ArNsK$eoF+xp{ijQ2l$`0*=RheSWONUI*e6Mg98Jui0< zOn@U?cTy#nH$HJdCA9YHs1@rUd9M)}yr7$NDCu{PP|o+t=00+m!>XjTjVwmj;Aoc5 zuWeNY0zs?ZP;1~$t$;BUk<Z^eYJhv%1v(M6ATc(F9=!TMp7&zShOcb+b7PdzkPJgYCq4_ zjkr`U&qeqj42;kZlTW!g>1&Kq!#v{Q@a_%s0#%dm<80q1}U_V zQ6dOzh;mq!p70@-qcO|!kJ6*Lb{s`wLbKSF-eHPKtD)S#YrT_j4&;E z6D~|lms$V^i-D2KNtm5dt45;5hk%~==&i;W#=OWZ$B3ec`;CD(HaU|;ZOl7KO2780 z4#8MN=30qAYz)CbwKXcndTfmb{F z7%8ucz=<*`y(42AgSsUBvdC>ri=U7|Dcnd`97R9;ymAb^o^ioxc@)V~AHhH?OcAsI zBe{{JO0|f(+8L7F@`vWB0G1Rp3yevLNS9iIh&NL%qPbZfebx{ zbBNX?$Jqo=wpgl>N~sE&pCjz3Q<dI0JrJFUx^#}3RKY|1lyEf77i_3=Lo4cw(il}x^5CEK+6na3vnRlbK+p$%ur;I* z&BUmSpW`|I=W@PagtKqDt*!FX<`}rkj1qk@GWWVt*Exv+^a+aV;#dBc=G5Px+ct*XYJ?Th$C*~$eR>X zJvi2Y85KIbR;5<7=ntBUiS$qjGVp@t6g7mfzMUi~xw#}Ai_e`zY}4(6*A!-I(&6j>3ox4-BR zJdHkHDLn%uno_e@)iezJJk_%ps9mka_{k}fMcU_}4jYn1N|+fyIZ=9S$eQD=lH)ni zfkzGG5~vW(|1;K}g_k}3ztKw;hnkM-GK+KL*Q8}z=P0^Kt$-&0EPtR$erXA!*%XFs zIJF|$F2SQ%=})v3v?~k}>9oDgx-$}brq8RpUu@gQwGc;qf(c#M>~kt5#YkX$&R1L( zaj+3HdyIqq%Y%qnjeX9GL^ueOB*&PJ!^l`$VN%GI-3E~vMK}t72nT)ehQ5+l0tH+D z)qK3=la^aZl(TU;R6NU1&A!qFA(OkVNW8eub6xRC2zfl#*~MPrI8N{w3i|Yh#q-t` zwY!DHnX9|rKp_CBsRca5fHNzsIOCx64GtSKvJ^}UwQ-0>i6*);$n5oBx5Zn+(v6`| z6QmKedEJN`gwB6SUqJW=-T(`zs3J`I-MN^<2WFBd94241I3arybN%0YG+Ej8TJxw3 zq40*txXD7?68V*s&Ty4G`905g3~Dl5(=iAWdWRvhkpmGyOp0EtB?E#0BcTi7zJQ){ zIpU-^igFNB6%HHBg%ByyT285wTaX$)xC2|bgID1+Q6sg44L)c!2`&&bJ~)v73}&tk z=1)FDtB<_dCGLyhaWZEGPaM0gn$QPyDANW@)(XA|gZh^Yg$drMmwDNPZ(vE*h`Y0` z%)%4nU;GIr-~$8dj3C0x)cv+!486P1mnxRywJ0&xGE^>;-MX9&_;0;^=2MnOkerVsc8Mn5x)7B8pRg)V&IEQaw4>UH-E^OoU5tC5nk5u*v*s=+` z0%W|yF#&So6u!-gz@&e&&xU-Lb6W^E#D!L{jd1t}0w@#B3S;Kgh|zOClM_<8Apn19 zg+1s8SmDXo5gng|Fco!XzAzSHhT2SZiWapLu#61k@glNI{`OZyC=U4Ctbsk1MjX0lpu$M66fL4q4+97r`%!96G zw(4JFOe*#)4q^&Mg`nuO%Hf~c45`^PJ6aWG`Dk$Ni!<%KhPVYKm;_is=@81o{UV9* z$xy!>Uz+BN5=$Sg=|QNo&&&!K*fn>z@kHVHRuUs-v8FUHUqjAW+V z)=UB;!SERZAOkc02V}k}efwp7d!P4FiMxeTu@-1(4jHcC(Ys=;v=+=}1_}5<(~48R zopz`I4ePrhWP5oY39NtvkqFHiICm}%as$%6^_k8PhD1P=j6e3na`CSk8s0b3g-ckF=`Aa>8u5Yv;YtX2w$grRjri=?}v?YFq zL`Vc_;sP$n*Vd4ty>(Zc36rwjNW()6uv4&`TVUE~Nv;lDP&T<7Kii+Qz7W?7u=)u~ z^Q7LY6UChCijTU5G_6LCVfnt%D|UYv2xjzA}-hgI;&*fhU7=yLgt7#OKT*}It*3> zx~FA}o)#!Hl|~g3?h+DnJm(w&R*vivjYBPUSyHO4a3JRi!@iKx&kwdi|R*$aX z%a;OmuwMj@NkC)v_=a!jj9Ulh0G760-`r`civSAWhe#P?pQ&SuW9f5fN0CR$?9&^( zSZ4nV)i5nR(v17|J`Ia<53>Irtu{=n8%wxNxKYae3_s4 z7WAGc4f2F3`tIbh0@TaJ{q&aS(3}_&tpXdm z!OE$+AcVK6YvyKXk|tr`0ww4Nf6&c4(q2rD7C9vOhJc#7;=H@n?!Fc7spx4&H$=Nj zdW=|2^41S&t=|r}nn~1|l{L1=NRa@)-KSdUhuDBtFeCOyGK>we&^qWxMs=~QI zLNCFWEkRQ4AcwFc&gfy%KGSHyI?(hF2NDFUYix%Q?R?1+ju@kpM(MjEf z4gTinHG9cJXA>tTqzO{sznl)=Jxd4^A;*sbW#;_% zQfAAc708fzmh>jns8Xj=t!niu)~rxTma9;dq=i}&YvHuXkfb0%<#GzL;MJ_wxN_&x zy~&g%T!-T7LWL-iFJOg6g?8#0wVa^;eij0iL!8&*OQ02mPJViGrOt*u4Oe&v^XFuR zDiNwQx{iXAC3b3?Ee)}FMW_VMfB!s)&v%@p@=s63heO|JE$ zN~cr>g$oo&(8;xzLCa-Yv13cqpSdF}+_Y#*(4BxgZ}0v+{HlV;zOB&qZLQnvLzd*- z7W;nj2?!rYOUcwzfCGB8RA&h)xDa%yRfQNjZdKsecA6C>RdEzqR@qA4DFhQlhCTBV zQ!0JdzQHlr=h3IB*I{GN27mis}Ili6wTYzoR)>T=9M*1nJ9eLpu zoIDNG6kQgebDeMWffOi1WOzzZqj)BSR7W|TNLfcRrC2AZ!3rB+E%eO@AXmsCROy(p z*$3dP!%92kcov4qnM!>5S#En!6SkHd9bSaaO2T zKsqEVz$Kg9nzOTcW$!@$h|yMozW)5fY*=od46k?pRwR;j-^L{}5dZMQk4Rn`wcu)P zd4XzF5cw1>P%3TI({V=qbmBl$kIYoYH+%i{Rr8T)Cd+FUr4w5voeg%d2r?J#e1hmx z0WN|VL#kFUb@D`7bEg$%xPaAiB2Q9cbmd(v0jl^=?k43lWtJss?ox#T%QohjTZHCp zYE>#$T50iSvu(SPuKAl)M2);R<10QLPW3 zM-#}MAw;5xOC^sw^wB$%a+xJb8rI)yw=8zer%ONNiLu+}oiknlK?&YhNlCjvO;+q; z;H`Q0dtDtq^U;a_Oy_mMy42+ z%rTWR#ygBb3Ho7Vgf=0VKyXne3%Ww#aG}BY7{-Xg$w|jXSR9?zWlL51NuhYsk*0j;d1gE$ zsO~4ll+kQ-3Y=Gd(B?oy_NY?-P}ry{u(Z)|2qr74lt{c$f$K0aIg-6$r zlN8KC3O5n|@{n*MBMc!WbNQ~Ll!YRQkqs!J^{n=h%6qy5m0J|?3`H0N2v3-wRP?xz zxP_x11Z)o;6H**0Vx=_8Q&Djcb)2hsP$`u8jPlCDLQfvEozv=&F%`l&X063pznlpl z-RUTF%?vw9LM5QA#1IBHY*72i$3C*6N?rwrf94`&EDcu|@|fx)`P3&yg%wYE*6cs_ zS_2B8WPR(?uNlqLPdw|B z<65O_V%j^Sy~U zZm}Tm0^p|87{MIKE45TlDY3PJ!sH109~ zV!cN~nIVEg&?9TYMcR8D43c*|L4i-4F@ph$TnzKL$FAYXOSSpoo0#cjuv&?XrE&`b z++rep5U@kUYEyv_Lj;i6#)sJWo_~f-AWH&kL+DBMOEP#wgO_U0H4p+i4k-H#Vm6FD^-eO z3_F5h*_rey~`&0P4Z1{@c=*c!2;gk2Z_m4qJeM&0Pmm& zt96126Qsi1_116&fG~_<=;*6WizvG@uJMNC^C+*q(0pv>nCeYfSe51VMPPvef%GG5 zsyt8yz;JnaDfMMegP8@cVk>{Jk!5G7-B z^s3-Gz3*m>jaxoGm*vQ+sCDApy#hZLYXR(jT91l2FhBsDJlkJyfk-F+3cihJus{Uz zu!jjCu?~75+~=>MHNFK!>mj$y0__MyEdG@AFwdFZe!qS6QYKi}9ECJml6#w!b|7pzfub$HfnLGdLmWf})S0o^%>Bq*6Zk?lm_ZPnK|a_37%132<=9z>QthDs!|V}H2R>m-vEGn` zAnAY-3C^8PloV?Lo74!zFR|CNoKF3<513_G?Zt!^&c!b1P^aL=Gk`%8EP@%-!3I!) z2n2x%7=jVX4{_WCE-0ZAN=0ZvAt5qENx2(*ut}ahMyp)mO>79JEyNm?5i>>7{Vl{8 z+D93_$Qd%o8hxNb7?Pme5Ho;*25?Fppg|TuLN&l0YxvKL%^kK#ML7f_Mf^i6K?z+k zT_I*1BD#m@yxRb^MY04&ssR;q_}^qi1X(EF^pKH_g^5GO%6VBuet_B8;Kn;p!Up`w zKGZ=ryaPUPA8UY{#wZF^+?to!9{ibCC<+A)_Tr@dVtbhXj`z4sN@XHn{0r9*g;_j< z7rcX?d|WZ^lLryTB$)=Am>b7sb!QYQz}Iq(!WtovaZ$5lL2L;LKp% z3HD@8SU@;@WBR8>Sc z^hrGS5u#LyXk?AJXkrEELqCv325N*_>O>&aPWIsc&d(Gjd_11_eb!!{CW}}h75@=)e~J(eGj!s4B{EMczil5|WH@RUf?u2O+WJbvUh6_UHT>Mmc=uLE-(0W`NP{gBmctdB7 zVrDpGLRd#oMq^8rN8Yq!NLIzUjOmW1DPMpRYt4sK(L`H@XCIO##2_g~MAG`b25|uy zPdFZfq?HUpjZiHF88k+gs_0jZ&cDE3EGXJ@>Y8<4g^b=MfzpKBSp;R|sG4r-ji?Px zKpjm)QU$b5-9%_h6;>>P(A9zA`7y+8(#XuPBVeeGY%xfPsuGkmrv8=NhAKpNQiO%X zf;XrI<5dKeN@iR(rgI=6r#5Rw^ypa*V91#XI_LvqC~I0OCAxSa8@}H{oM?|?2y3)U z)*MGqY!eG{#8_I1vUY_f1tUj+j&A_}Yb;pci()F!IfsYM)gunfyg_I$z z6zEiF>cz3_1_CV1f**JeU`X%?T08;Ym?~X}T~kn4g^~4$F^ct|CtAEQI4(v67ftbM$kkA5WoY7MDt;yRZ8tTzN5e5RODc1c|}E? zw36Rm>ep7T-1rhRa4~Ow38-6p^fwske3wv&Kr+S}s&l ztc_6SSr)AIU>cAKBC4R7qEId?>5Y{fN9%goS2^RdsIN~o@7#Ifu?Ejjw5?Oj$wo}C zMp%IEVnCyMLj+*JE|9F3Ehk#i8eEZ6^iHRw-Y6}ZDgIJ#FUH9Akc!w`N^IcB()CLvWZ?=05^ovj#6cQ z=}{hudNl9u*w*enR0vNa9FWayw=_t2I+{Zj4VsYA?JxnLRbH<5H89^13 z403)fhCc9v6i2bWkqmVm^D=VHtkmr@wponq4{}iRjo~i~^~wjkhsY?w13Z8zzd{96 zbP45WTX+NUZsM!V*j-Ml_nG4v#@0j(;;CG+heU}=>nKa(22`D;lXPcER8wy>N!%`D zY#M83+(n#uO4}v~TC|z+RwD`-b!9a4YGfCwRlqBN10MvOu|84-3@ch{V5EleWM9Ob zt;iY5i8fXLCxbi;l1H#bgzYTX+M%Z1jUI@GDRD z<$&}~aA9yQ)`5m8(moKaUiX6xtacZ1YPkusj6|`aps8*t-IDK_=?!ytFk$0ldw)sn zbnJcX3rka2obldgsfUz=N4P|eXamJZ{PFiBwSrp={xWz5G4UH6GLL}Vi-u8flmk;d zfkxN=&s`A3lu(WPmJC@GOK0i>dj^#GJ%l-139lGV?SY3Lqm}bO#j%=njVqN;V+D@O zt&Xp6$JR$v&Fcnd1)B)MXi!4X5XZ(~E9T)-=9Ed3H0pjvHb@?LS(7O&Q)6&C+EV~H zp&8CmsmnQ`hPljFWxaTq3lNZXcc05-;s(aC z%4(4m5yJ|SIC(1)USzu*F~#dr$s-4BPe=Qap)lIUyI|;Kfx`?}2gT!o6chI*Ds*nx~wfQi%E~p|0 zE?;ib#|5%C(0){*GABJ3-S1jSpyvp$bwKSo-WZ{K7MJ~Filbw~`)2Z-2%lbJ7tfQSQ9u% zX8Eq{{5nGkXn=VNw-x9g((sJF)aWb{SxO|QKGUy0RkZ#gXJZt4L8~B}q~W5hyS!SE z+vET`S^ltimc3;t4t9y9XsCi9u^FOr@ukp#MmqtO*okUXk^-HsBzZLd<|7OQ2snAN zbZA+#XVIoryOwQRw{OiNOqsKs1(UTb)Jj>a*1@?2{i3aiF=GXj2>Z@mS#EI0XJnqu zwX4|ey$kl5E3)r3H$f2%3ykfjC>cR_Pii)q$~9f8_H3!ww|` zfH2HFoOp5L$B`$`{0=ouybb^1)yp%mK)}$=4$hcZu0z9{$p7GHRzA_={w86)E+WVs@Q01ZK&Ad6G)s}2zVeCeR?6I2Mp%R zP{R#5>`+7IW&_GQ=kjvsIq3c}NUou@Q?a`d2Lex`iYy{;#p)>kG7YZ!;Co1@20aW? zBFknM=m1J~_&C}L1lgu$B&{a z%c%F*D#s53_^5ynupWrOj|T{N4N*f;O;y!ZS9-@N>^>T_rswn%WJJJZ4X;M>CL0t@ zhy2lpAG|zj%&=T1YO&C?kY&|bkp6M?H6Y1i@~BfM{STtj7+o-_M{9!YBXPdb$D0Vi z+T)G~Zdvr%cj1jU)n?hE)kcC`ToxiaJ31_-LVwi{9dpP38xKW&_Z^R+iV#NYy)dyN z17dm0DsAG3`1`5Biy?&xV3-s%%c+dRRqY?%_NYJ~efkJh9}B#^Z9s}?uDQ5(x?Ppt zwidEAIy-f((ccy8f{{;)EWkzBi;mrSAby9oE3>F3{t-D>vCU9Xpt4T1Kd)utc*xT# zI7;Gd%{u#_nYB{ex(fQawjNgUZ1L7mf63sIJbF z-F5Sb_QQ;HkgQIQN3m~S>d&DNa`niE%#O7skVIpfjP+zk8@`0JOIwZgT)g)R!Dct=gQo9-gu^~svinVr#6!V;?Q163X+Q^^?0wMh=jfjpT|qr-c^toaJ`4 z0E8b1qLN2dQlAw?6^K?vIvXMfBeyhG9?dx-bkK5>=AVi=eHnEB?|7DugRiSdJ65Hr;m?qDBVUUFg8B8Do zVUmhS^_D(c9r+Y?m<%Qbp-QF6-fq*NG!>JlJ#8U?%F3*TLeUss?IGVNYF53QIQr2KhHNKlEe#JYb!i-|-RG-&wTXEyQ^=156rcs_SV5nX8k}mS zvNDMuLw_jIgHF)0twj!CHw4=|_T`Rw$<9Tb)|jdWD;!}}sX+Gl*m=sfErMI!OI@2N zgk<)3qYaS8o~Sk>CB;2JWE*S4hTSApGFshCZF0e@HRNh&yoS+8Q3q2CbDCv(g(?T* z`nAH2TqLXXJepoUay@@}=yTzn2}V8E|4sbdjw@_sid)&5v6oEuHY_>bFkc0&0NN&8 zNR3q>=jUGyhqo$S!%I;IqLG6Tq_h&D4vZQDCxnb=U?v>YdkrQ!{YG}f&U!GkaQ7$% zp>2rE9I=m$$jHd})Q6kRE6!T8!|rIei!tG%g=-vTnZ~9Q^m=dO4zgJM%B5&!`LOrk z>$u;p@_i`=sloOuB*G+4%CSOn*LwIZa2-h~0WD~C3(DXi7nsP&oZvtD$=VVVg}cl- zYBbxNXo@jMgYLzn_v9&_Kk;PHM-!o+?E8?EQUtzDLou3(YaG0g;d_cs6j`}DqPvdL zJ}J43k$|}1Zt6&Fn9^3Gif5Ec|D)A*sjf4WO&#nn>N35o`!MAa($X#u1tXr`8E(z| z+mIB7#zN}L6KEUk^F_-l-;K#Z`^-84i~A=#U1l-)jL=Fx#;Fd$thZ0SAUg*UQ@0cL zy}@W6)X)f&0@=<*N+eGhQFzxiStnmt~as0ceEropR(c)x02%k$Nwpm-Q0-X(*{QMK+zef}s(`#$vdcyzw$N z*LZ8>T+xjsS}A1uxQ4SPcj|Qf-%Z}e7dtD-4}|K>Hpt&EMORh!@Og+E7CEW+_)4eMK9%e|f0GM2i48dyg; zEq4(cX(Z>?S=yHcux09;gf4jD33Zh)lbh@@BOyKQrFYhcLFx9vY)(Iuq4ZqlPM2Qs zU5me+3_AU;s-O#e!#7EjT-Y5~*6DareIsPk9}oF55=g*Rw<$13MAq9#!3#?u{qQG# zn?D0o_@?)MkRa>5#I2rs!1dnVL#&sZOI1G~t-L&0h`rpGX|mSw2K#%j@BDQ8Vy7LY zW;2d;>g@B1u}IEw8c`l4TAuKa3lBVMoO zAn-fP@BQ{A%@&O$|NezB@T>Pqi;%AGg0jNweoQv1j+qW<)>2}~q+(I9j)y3NBow4Q zPDO}7CI+|Y*A(yqnQ%DxEh0EDdJaY+3gRRNPW?`2{&=Zo`l|_11UF&=s18J2ZczE2 zZp6sYprA|fk_`0X$87ee6F9*c$_5EXu*ghn^bpMp`LHIQP}BM%y&Qt!Tn`Iz?hgCV zvdD<+SVDAeM<_tU_h2UnHRb>zu?*YLi^@ZqhAbI40k3R=$r#aN777Uy@f2^%)}+Z)kk4&y;#+jEJvO5$nj!)rgUQ4YC)mvuA^{8~ zOx_R=22F7m|EX~$P%nH~5wP;21KIBG)-UB~#DlUey!@}^BQSPkCCT=%8u<{Hun`Nj@H_0{dqj*QWHGr4VjKm- zPoSsYz>Ax#0?zn@!gz5KbIN&4W17h70Fy77mav8l?>5e$pZL*>XwV*uuL23OAY%@c zPNd}!@*hyhA7X;v@&bCM(B{68l$y=9&_X%}v5jo;&_u7hqJ$CS(IXo|`VP=Cez7DY z@a*z~Wd5%Ry$BAYk@#kEEc44Evd!&6;$FIACqd{BF@h(AG9^L~A|s7hQW1PM&?x;& zF1pPg|2NVY+0n-W2^b^KDj%Z)qEcMwQJDCWrc92awnCbs(d=ZBEYt1}%`2i>&LpN# zAb3ZG$`ZZWNJZw7AcWGj5~D9_0SJa}Ciu-eKCB};$_2Y=-smk!L?kM*;`oZ_D(YjJ zD#bDc2NEmJ4bd?*0PGy?qn|RXphmF~xz9E&lO|PBp73j{I8&ECQ*^EovKGcpV)G%* zs>?*^5^QBwstOi0A~r9=B3+9waS=LY!`>v39T~+Piw8K5aVtndH*quj{Gz)s#gM|V zrhJLcq=_m_@wE1_GN)6~cETb-3_Az0S>7m)I1S4nOD>p(CmwB|l&VDf!UDX39b`x* z|6VgK#UnGrQNxaI``%*=ok|_^6L;DpBtz0bT%td3>>vFy)Ebk})W!$l&;dmc4z=<@ zOYh36b3)}WKoLfkYGrzgtvf!P5vu|LFRe;Ujjr!kn`!$Au*xiawWj%HCiGa}}QPl67f|E3g9 z(yK)lQgN&k5dp{bE~0xPCp0_f!`KclZOc}oYA+2hEZMCNVPiKp13TKSR#(+P5R*Uk z$5`R#I3s2xCqk4`O~)ehSXB-)la)dl^&|YDAN;|pEP{9VVM8aiMLfb>WieB^v?2N; z5MqHJa6wE#f=pi%DMLv8Vo|VY^G5x$QYfrjq9tT(6*}bgNN`mdmu^T16@KF5Y=G5( zo-D4I!zF%`$^;hQ2(|+iwR5!a34=xG4ujuPtUJEv71K{!YbG`GVnRB0bwssHTSc4_M=H7vV5X|Bx)n-jLm5 zYG@O!XqokEleQu8VnV`!%QRy3Kv1mgsbbMjpXRRFTCZVVb6gX`PgImLI09)}Gr5jV z0#h;q-6!JKc2&v9Z6)S*YGZadBuE2Iv+_1UrO7rO@POpP83nh-uCa50w^^;RaaIPU z1fj*o)p5BDI<&*zWN}aQNkv9diDbLCik0ClP8|w84}|owl;ga7B;1jeDWd?|J9TnyK5SuN$Kz^ zGEi52(bpa4)rVg9edAMoxae-{u*!sDk+L#Lo&q(>G5eC!f0d_TQIGDB_Nfllcvl87 zHpv?p_$TcPFVKM>!oilHM@*sjmo_yP1%V8n;6%a7MpBe>O>;}jlp~bV7BQ=A-S>9m zR&8OpFjY`vbd!ZQ>xSu;E9)#qhYT@u5%GIH#5d=gO4zinoXOzKw!g2Q^Q9+ zdR2axMH_~RtJ&YY;r_naw#p%sJa zmFaq7&G<g>(7W@0vx zv!evQ;~P~9)`8~&YNf{Sm=v*MBoMrrjiRum=DLgnG=#Pz(%7gXu9=++gN#ihasDED zdb*XT{CdHoxtGJxax7lTBO{wKH_G>*`FDSAqRM=kpBpqeu0_DeDl&8sb<_kh6tsUO zY@h!FR&N}~*IE$&<$Y>zJ^F!;9=RNX#hu#diWMu;BsNceuD99baHvDYJm_t}31_^Z z?p#SyNAtNk)2H!S3YGgwZ^mA220lAlxp=Kj+t#bm1G4Eiv}}jQIfKX^De?#;tC^El zI}$qe7$W{B!~0xJigvHx51u1pCsBGJ|8N1Z!T}fV!qEXv#l&bK6z6aX!`gu>mWL8v zjO(3ebI`dQA_QApwHLS1m^k3}@Em(S$^$_}uQYUA`4ngF&oZej_6>M7fFy{BF|nixfl(uqJ4BDgW#h2uGDVP$TKb+jW8q;;i1zo;0u zgwsCVk>&}ijO`VC<U(=jmJ8<|W+>)m|AX|KS^U%86@bcZf zmGLk+(jQkHElB2Cri;N~ZFCi+HqUCb_mQ+SpF^kvB~7Hq*Q5SMM6f_zS$D$pt@G^~ z2h!D=w%!Exo$c-Fx&i>g7@1E0$_cW7u)@E86&gBx2r;6>i4-eZyhw2&!-@X{)iPMF z0!WDbyaf`((1OB^wJg+P|0qc0MvOEqcB*i*!n>I@d;0v@Q{yu-1K(lP_>L&f3Pi~m zCA#z9P^VIfKKrNi*-)H0eOi5H6sSe14WBh6Xtsjcv}VbYNlVrt(VJg`O6}TEX;`s* z`}+M0II!Ts4F$z)sBvbwgbT;=G>lMCxq=6A`7=D(pyYoo5e5pGxG~0-pecJi*l=>= zgOVL88ps)QLxeAR626Ta;^K0(dP`R6&|si&i#IbgD6#@(+qob9>*REA^XWh}G97pn zsZzQ{tu9PTW~E;6I-%p-+OsKk!bo#EO&fk=c!g%?(|Rtsw?B_@(_E!Fal7f%2Igldab zFd}mW&Nv!MF?rNmLoJnPTw$9XRNIVU-6SK81tzASe;F+_6;4@Qx6*YxO@)?T=5>=*+Vzs2ET!=1_hUjh`2|@`aFbYCcV;H8^C7qrse(vTnPuUJSY8f^*(QL`ZAlZ8 zZdxU4w%Tq>|C2!$sFon4o$3apju{4WnM@&yo7*jbw zR8X5ztcA2FO0jZF-9O)k7~(wpQ!V=FGue(I8MbHz z=y2F!qK5IWdNzEh&ode{DZqBlbfRXH(WsQ!1PayE&JiIcQLP&3 zP2NsdKD8{$7u5|XUp%!b^Rg`ii&%Yabvc(&ShCEfbT`Y5dFIU-6r#|D@rIycvH_f- z)+G%r|6DB3%Ij0xNiWCQ>Iq`#1<2n1Yw|+gE{dw=3HPRGYoa0N_Phed?fZ1!{fDeX zLgfvc#vnK8_k8c5_Y7S%sVDeP#uMqEd}TU&D16?jwI0X9=7nYQ?!V91w;p+poJR<{ zEmDJ1`?H!#69fFNc(JZ04a-p0bf+8yMWkK-qJ%NB7L&Co#{#&(g-Q$tKZ_-YH#?FT zjDmC~no=##3;xjE0-pyJq`%(#?2t|mj>st|X$hSg*#Z#52bQR!-KK$V=m1O8N4dGwaS{IoF z|AOm83Y22${^5n$NQPjqq683Br4Mfar;1Vh%ZuK&FqizXVn69j)F6_*oj9?Ng3-># z)Q6^B(JD+W49``dbV>VYg>g&C-hXtZ9>XmUk$DMATJSO^=LFF^ciCYN@A4&TF>;p4 z^B-_JWxvjBW_KI;M?b0CvvVjsJRjBLI}HtRWdvRm8f4F2~p?J>{^tRCp=*|6-&y~ zAz;Z~DDkNkesb?vOueU2uV>J-fT>KWlo=)Kwm0N)j6Uo1sads!oJN(cU6Z*QxODd? zXNCwJxHw}^!Wfy1DwB$L!fWV;CP>+B#2^0Xhd!)HOh&Smia%LQU_nTj6T#JM7BI%E z))LdRrZ8m9&lTHh%Ffhbp$ceo6grZ4TxSma7I!B2gNg8#uD4Y@swPMuy?C?9+)2CLx z^qqY^)nxwUVSXIaJTL`xStl%FUBo(;h1QC`94-)Y3gQx|ZNxaO0obYX^bddNgCEw} zBOfh-*R5%!P$_NZ9Azxh$gDR|{s@OPV^qR+O>nx0w9Dp}L%QC$q!U0uZ-;;S(EB3p zh%S5~D^JGC|IqNVtUb>TzqCpHJT+K~6f;EdvXQ;OX(ok>7ObMoo$y31J5NF3lkYqo z;gRN0ltC`gT649;5H>N6Dh)Mof=>RF zWJ^>5Tu_V*KqxYv#bO<*a*R3atgr(A8!{MP_P4B+Z4SxV7%@MxvmD9E4ZFIMq+Bal zYT{UZ9(n3y1LngW5#ef{R=TuWheXy1^oz-bq50WWZ4PRX2wm1GUTUg$A}y7sFj!u) zJna@ndEIZ%VUGx>Ef81Yc14fE>oMe`HVM#ZqNkhm_T>C*hol5j%5E~N15Cm)l=8^ z+@XgZB)K!yZvxw~S<6p*>bl`yM~YK%sOIh|PDx_o$l4nc6%5b)x4a(x-Cr~+P#1=a ziLMxGY@Y4660)O{5%Hl%gabH*_OY=v8 zT9gtu1tXauUxbu%dlh{!R}qf&K_t>346|M@@_~0UKb@gb2-7(!kr0>{Dlh^BPjD$b zLL=>!WDbKBUO+JwNP0U}e1cUhbpv+46*{jcYoYNa1*T5|$b<~na8Ut#=7Dy3V=DzU z5qps{y8<3VcyS)*R3Hb0c$QWlBs(wCY-K|ekkVamq;eNgC;-zK-u8J$(_PVJ8!z%_ zg%>B#7K7(^8W>UwcXT5NG$WnoF4lw#{QyS)(t>3uWz#Z&6_FU8L{vo;YeM=Pi17-?lXATE;RcMMkfk^gYE2}sWj^l6oFg9 z5H7P4fr1WKBvc{TC%?FoFh(0J$3`c0|BQOV7}*5~7=QtQz-|$+jn3#PC^j!K@p8JO z840wFM}itHQCEEyiAp(%;6a9bW-KvDBHRODAmxNyffwlrb~EuMy4I5sfgb%t5kM&) z(&382!eBkMZ%m??co}lq##jqsSp9%DojGY$*?fc%PEjK~Jb_nhdk5_;mBox)hSSz-#I6ipLv=ZSCVV|tdzZ(5c~ ze%T>Y<#03^b`{rkXHjsfXoWh_oxmnLBLfxS0hAq=Zx>3T^tBtv7bPmj50KFqItMie z3XFmgPCJ@ijwgK(Q793B0pMa7gLVr95Htem7mj6bhjyS5(LwXGpnJ#~%|RN%s1VRG zMxr;P_(m%)+LE}!e_IElW?_@JMmZ1`ikazf>veXgWf3yEfVXl)vGg8hDvOomm}AOe z>++xrQCMIEO(ddS`VxnO5qP4}U4*Bl2~iFNAP$YO7~!A~{m`TjplQv*7gCCbFjl2* z^dqqeE(>%u??xD$Lyd>h{|Qf^2*@d@lNf|(su!EsPlCBZH~D{D0fiEgm{V9JKUooP zhoNw19W&IApOlof%1FS25^R_%`m(4rNgxQJJCRzXF_&%qatnMg6BZB%cYpx|;0F&d zbM+~Ij)jX4N-m6QrEmz6sT!{?GNsG59GbyJ^udqN8iYIfte&Gco#>dLcs*Qkld*T0 z!aAbCQY3zgCdJo)nL;<_S$fC@u$pxm8f6=M)}xI2qs;*%$0(_8M5zO*5PWc{4H1@m zU<*GM0c~|{hnA=bG#jQSZxFdfJt8{LRtYhdU*!~f&n6elE#&r5|Se# z4&qt?d_VvZ;2GY43m7o1EZ10xdV$!NuiiH<;>LkNYf;}O7R8fRfO~Xh+ZRL$j^?u- ztTR<_O0{iU9%cryF@+u|DwO0i7P{pmxVBnJ`IpNnxjxlW454CrM5KR5BdF1>5m{r5 zx3U#rk%ah}elWAP8B*>l8gAtfMRs(E+bxPaG}GD_pN0-6p%PPpXOEF;#W&Fod zhnZY^|Cvm5jyLp)L*lEj=Z-rJz9dRsSz9eHBa3VCCg12#7V5>TY&N2jp9r&EHQH{3 zN0mDE58;Zbf}o6g`-`AVQV7f{u^S7jLwQ$2hto%|R@@hkdl;&cWFu?L{a6*M490i? zm^@US>u6wK28HVEPYafvk0}>)9F#vBXHDomm&m&7%t%N}Ad=E>G#E_+8`fx!Q!nhXUZN@mtVnMWFs)1r^&``EpuVh0u`J$Ukofu)Yb9yjY z3v%9BU&p2vlH z|89-Dujl+WkOI_!kr*fQreK^UV3N7)OS=}4%jZGJz(QJ-D!IfRBL7G{9-BhHqmSwZqzt)EGnI&K(@O`H|e+I~W9*AEnh3q3e_wL2;+27e_RStL47O&5Z(j zPOm#VM15BneaYwas3P>YfTbsWH$hkJNAd$SFYykFGT7j~YI=#;L&e=7^W2<$|6o5+ z!V6Y*K4H_?lqEO)!m#0513gL0t=|-?ts>3WSRL96gx*iB-W3k2acNxFCrF!u(0y$v zyTl?(JmC;-WeoluPFrP&@_(^c&-}#3Tv4JuQK#}q6V}a6t4F4uyW+L_c^D3iuS?2> z?bImSg5o_JTVpPgk%zfG7#%@Uq@xJmJ><8dmt)<*xMF6rtgy9g%e9x^+F7RrF*h$> zWvbZU43Q+yy5$t;Sl4IXMvl<6t(!{@X@DE$;Wj*TSvqXFXRQi+PnTG8uEDt`%&*)j zNg`MX`w%(AR9|<`XAY;HOWg!cdcVa(cf4v$F`kM}p+Sn|s(R;vtK?AK|L0B4t`wn5 zoMIY8a~rF25SB&i$lc(0ZP~c0$1pv$VlmVEyl?Q)ot1RQ!U88>xN6FsKDn-x2f}AE za_XVF;kQlLt`2_Kc0s*!NZZ67A>!rG9^`t=<@q6I6uY^9!J@!=n|q;qIOA(Ol@lT3 zy|y=Ja9I;rgDvROjf@A>?xM**lHt z5T+}|oLj9(chONk=H9`gE_@IVgdK?L7yj=`_Gf6B?QBEv%UB8tL@p?d%`~vx^m8#c@c%ssocX%i##uh;)3UtZBd-o|BNo*>WUHb-gr&dvbx^a4Ve#JZ4K%kdLY@2M8|NrACpRo7$>gok__*+=zN|BHpP z#Kc?m$muN*S@Y(iv#ct0c+Vvo1C9x6p=o~`;LG-WLB4-+N^jN}1r`-_Kd_k{wTxeZ zlH%|4?OpvNZ`2<1f{(ILEp9quFyGFWz$yB15}iUXi}}^PNH46{iJcj5qD)2gbv#?> zQ(APNm*E-jwSRBAr1GYp@XuH1eGWpb-@sXY_!h-YfiO&$;-$^*uakwG`1kN3lJ3q{|)?C!C-}Q7A(~IR}i7X zat;sHQpivuLO}}~9yIt*p&)enVsSKRf#gY)DOIjy+0x}pm@#F}q*>Eu!-8bUbn5i( zCC-1(4ocMdOy^OgM|tv0+H_~ncR(-FeAkK0!Dmx#8YJU0<;Ds+aaJ%YG$zmrOgn95 z+t%${xN+sqr3+JQTZ$FI3jEkHVY!0|H98a=k)vRQdn-^(yb@#}lO-Plu3OpiWz3md z0)1DNjM>dN-(5u+^`FmYOr170*y&kksdr_1)@d-S+|Mh)@*G=Mrqt0o*J>tS-1u?i z$!iMMD>0)5Dk#{ZLF*TvUk{v-J4uSup9d0j|InnMTFTY0XMY*3)tx_a8a-H> zY;4Y*pS|u38^7Y3I)jh2PGrMutg0#i>z&trYp5#t+LLfX3M-tjoPjDhZ?EmfGB3U6 zu+u9rg%Z0cuZZp%$S{LyVXP$d476}Y8rPGGtoU4tEw!+a`tL{l;-am#*8sFB!L;Nn zXtV+yGHN)xfYOOU&}y{uN-VQHY{iD2E6<>e1WECva5P+~pMFHt=%5ZS!q5Wh?n+3( ze_rG=qs3&@a!)?p0?McyLjw-bDNh2jG6s)I%QQV>8tovjblj*vuY&98tlpmNb5l+` zwG2Zs$3kqOFpV@Q9KH0>=b!AVD{;(;ATo8j|A^k?sGu(q@^n{TqkJ$zUr%!_sG}lH z&NJK4g3GF#Cf(`5qpFm2BjGOX^;&F+6Ne8zcB$;m6ge#KIXY89EFyg#8jP=XyMxnI z4C$mu%y}0oG0trB%@(cHtd-AL_@pIHKmtvxtfxi|opjiSrZjfihODycVSOvMc&2SJ zhyVi&V1+9@Wn<0tyTd9P$RB<9Y3^Lb@*+7_!@RR~pvV}y_-0O(EmTR81{C-*+FFaM zF3>2(bjc!jb`U4{{Kffcs14ed4?q?YhX4yqt!dtn+3X9w^iV=fpbF3l2QrJ=+!9U} zNdmPY6e)@&oG=AyJ8HaXL`x|pgQ^cH|D)*&deMa0B5kYQKKsY2pCL|*@w_9~cj{f< zZHSKu@MQ~BI<*3MPL38T`Mj7%e6{5ePhzajkN(LhkP0Acv2e+2H*TW=`##Mpq@b*8 zNTF$;i9WLY6s>}&nB-_b-I?4kaNDD=acXfEi&Ir`FtEVmnLrI!b+$L++)UBI48Ni5 z1|rDKFz*HrqlyHh`8?_CZ%?X#p}#dYpO9*ttsz|nVZxgUl>SGPjrc7%UIEn6sdW{WWsF8Gl`^QEU8e*x1O!^e^r)uJ4MAWsE=04DLc z?}~$r%ieZ!K#~AwNKSK95sl~?Mp*_)_o>T+!tx~1R56ib`HXJ}`AN;(f&k?zM+Eqw zu|T}yl-tu9_@Xlv&ZQANG6Yi~QnryWmB%3?b6G3l2PZQLqK1DYTc|*JOt`#9QNj9~ zQuw2i{`AW!kh~G1l*mZAfTb(X@}){{l$r=4Q=D@l03SBOtq4S*54Yq42W5$p7y;-_ z(Q(O(Fyak<;EHq{VwW*h|MDi2%<*es0s;)jdC-|i1yG3G;tjWfK#MN zC9Bj3ki~6Pzz?j%$t^I@L6@wrMdx!#Ea5gT>lsu{l*!de0J<1Z&C5&%p+r~8de|oR z28x~0O>7P_oF)P3M+0jh;fjMWgD}Wg3uGqan1@)-lSVMcz5h~@75A5hrs`Tb9OVR!0cRH2cs1{u~0@b(-38XibF8X5l_ZkZgxp}qX9YP zXMcKV10A~_a25=yHmRnFG(zyFpz~1x8GYyVT{U= z`N9cgdGTk1P23S^MYOsNT`)L(#~wm03W>!z9Evl`u#8HjHas~oXhqq%>`l0$^`t2o zftkiK#w53V`I2B_;SH0#%T{CT@oyVaC8?sssYMY^dlDtU3d;qNU>UORK8oEcCzV_O zsGg9+oD$3Z{}YY!2QPz#FpC35W!2tD`dt$=9007~7jtt{OaHzjcV>QQhKETn4=jX1Zs z#Yq^dn28}zikXF;GGFX$bd#uU8P-aY-6Dus1j2RObC;>pjaX&z$TmZhcA>8k8qW_(g za?j_f|A2e0eXx0%Qp(T*PasQEIkOXO_pHl8EK0d1m6jEY{MQK)PsEEY)68;{Cg@;e zQN1HNs-oZLz?$W z?@=HAmdw3!d+GRU#b-P$Fnq@on?Wq(K>ot0|Edtr z@B$#X10}2>Q!++}NRJAdh;#}YD*PMcagutJ$6et#BXmfKbU=rYm>wJp=QBDBSwjoJ zfLGwWjCg=@RF>N#0C3QUG`T%uoC*6##fbC5iR=}Kq>u!O$C!ji2yDp2{}K(sn~_C& z2JzqnE#eE26v%^kfL1`s??XsQv&sG82vNMrK54SztcXm!B&7W?kOQmCN^?oRYz?qPj^L=d1Of{2phGqB zgIhU3j$ll$0Y90LOU;Z;*&IC~OgQ0#P2{);E9@M_@B?AY3)H;CHxd!lA`wrViKt9C zb(~G+Y)))plJrv+i&Tz(@HN!ii;n!wPE1O*tN?!ShTL=f;7hnXobfc3stFr2XL>1$Ph`XO~ko~v?@9-UQA1hK+6h1nS5#p#jJpiF-SmQO7hIZbyQABaYqH+ld?$A6eTO^e9xT} z5Ma@aiz*xntwW;B&|3nK{!}CM7#=J<&&HC7_iNFc$vGnxIRop%@FNfy6%qnj8V)f{ z^FcV?ya){N1Bnn15oHN$I|(68Mo5XkBrQ`jEzhhpuC*|W4U~&XVp3SVh@w;w?+8AL zC`&-N)9L8aR)eB5{Zl}#kz=&ZAx%V83(cz{5TwdR#%v)A|L_)LJP15>jyi3kfQi%r zB~myIR8H;GmY~0#jKwGNA2+=wg7YZsJj@GnO=ZDF3_!@0+B4nQDq}$qaN5fe8a_|8 zkOJM+d)teh3stnUMk2~KBkY=vOv{9b0K^QL2Y6M3tQ-Zr6|ShfD+@db&y0!6LehmCNDT>2bL=Z+DTgm=k&O`tikMcC_#aKVJaE;Q!0cDI z3mKR!)QBMqpK(Wk`PS}Cj|eD7!7@sN_##2z#oxTcaqS!i(#avMLV)d9GYuKYK}5zH zkW%y=buA=iDUUfdN2W9*mO87T8c#>7rd}Pzq`TOf|Mgg%eaBo4Q*uqcS4*@?Av9JLn zcLgy@Xbn6p7v$Mimmn0^cqT~MRII&Q41~6}tGcIX(3C(_%AAWiqAD2u2FU26m0Gc{lIWxjk{-%(_m&j^mb zwT~h>4%WrJ?V*{}+KZA*%YUev*5ysWC@6dlzlj50Q_(hq0hGXPU>J_Nkl|M)=^sb5 zzT=<>KiCd1$%qbqybt~dQF5nE$}QIO7S=@MJ<`i$&EyeC=H)a4S&cnj zXj#*7PNvFnkw8$wUqrzH+&lynqZ^Utj!;?{8#9bN#47`hZw6<7-paU~U^899-t9-wH{%4R@s-E3rag-A9rDe8g zh+N#?Z}0}D+q-_y2X8<~=#^)Q|ELpP(hxCmkHYER*|6J?4(f2uRY*eNeZH|5ZqNPI zMGJU4#v`^L(?BK`e?7-Yh;>XTpdkKCDXUaMCi2uH>x36Nal@(o3<8_T=asY z$h3ipkKLHO1KMV8mdeZ2Yrf7)$mD7iQ=ZCwrQwAmI4AC^D2n{mc>Yv@djJK7w8SqNMvucxLx>e@PJi}7=sv!_y=V7yvj9Jm#0k8sV3?g)S2@MNwN0W6b;*afPlNemxy zFyC$IDwaYm5q}5-T*zJvF!FWHMDm&|$~eFRbYoobhFAz2qB~*<88?j%b3aetg<+~> ztmqa(kNf6bGFV&E|CWmX69mU|xYM*x^;B}|#MN_RIdWqr`{QGR#z zkav22c6Z!8KGcHxF7_=9LC ztRGDdM~5F32M#JK3K4s<|NH1Eaz;Go;g$;8`}AgU;m*;Xu(=upH_C>noc5{!gPRO2 zVQ|3D{HHlC=nU0SqX^R*(5)i4hS2-+*_JL}?0;A*owfjOqvz!4rqR}X+RvA4?Bp_k zCDEbaW=Y^z=XuJw1wQEVz0-TXNDsMRZ#~)X+HZb7$wnGZ$Br1WtRXi_a}f57xXZ~! z%HV_K|Njrx$1Mx6QssAk^RJz*iT_qY3yOy2aF;rQhp*RA91PK(!raul2pDRiOc^sP z*REc_f(GZOo(4qfs@ezcz0$`?2{UVim zD}n_AhtnEdsTVTk%9bx<&a8Pe=gfTt;#4SC;LMe5^V?Ex$WG^osT0=u6#N3=FYX6X4~nf#L@%b8DDr8XWh4O8xq(hy8;1m zM-L0~gMsnw&UELZelCAL{rdLrd z5fDKH6|&l^ufGDjs*jltS`c2-ZK_d{O5Re=jQ9N+thCcoTdinf?#bGSD`8hDtgmuf z(X!au7cGCp}`5pBCN39ZzH`o1~8KvYW5I`|eofGt`~wWkdghsh@THBAl?o z3!^lwfnhG2ZG4f+$*{#2W2{`4bQR1On$g93DwVQkT(Ze0gXOBZtpPe$$0F1F(U2(9 zT(iwJ5_>66mIjOOM|iz#v(Q89>o2+xtL9R;sru|ur9?v=HNx&5otn4b|NaUzK`~3+ zwbwLP$+cPqJ&o7dtLk5r z9y@cbv#UDXJw`6O?z?+PJ4S%psS@Je@m{>~S6U8Hya3lJJmn<`e!TS4n>D)f|K`Q> zmY!P=QuNe=A3j&F=ZS6Mz+b64^Co>SzWeWsgnr;+UcLRmhKEHy{QL7CH+;{}4_o!a z(su9%mhSa0f&A-|_R5Dk1afF5!y`-b6llTybtyXzl-101q!P4U|1g9i#KGYL9p{VILbwwDzgw77wU^uJmEae)yvt z4*=CcItVNsIx>_Aj3gfdafb@vA{LgkAA-Im=^$O(ks-gg5x{k3F#4g$dm|G;|X+d--M?2~jPx-|`H2IWLF#42pqQ;6IT`5ajWh+vy zhyZ{Q<~h#>lssC7U@hJ03q7S0+N8;xJvygB6iJzXA&Z4PU8)6RVymL?VF$v1oScjb znF8U&C0=AISE=XJNG4(uh6B)YfQHkqqSdPh%gOp!CY>9KC0=JtD_t=tm*r*ahM%$O zSY`s0w$?SU;>#gj;sZ)`UI!>mO8{R$pud^D1nbzrZfC0!S^w-;K>FCrf=?wmDOmEt z#>kJR6SJbd9QVl3r~-BDJJ|v zqQ~CaDTFLkxXNvzYvn}Dr;rPT)!OKCtLvPKp^mQ*p)9aeYTfT%mxB$wR)Ef`D)WLj zz00|+Th;5{HHH>Yxl8URCHvm{S|+}}O$dD*B{%KjH^83ZC&T`$K>;JUL;6iHgI(w@ zz&1F-KBU@9(AziyOE|;$c~5`ag<%bY*giV-6?;Q$V$4*`!Y5v_?$AV#4zoDM#e1-Q zWo+Xn{_ADDc3q&xL=JArD z+o%s_Rn1va2~%`)o826m&sib#sF(U=)D2oMs$O+N;wdA4cmp^Bp>$TT_dM#>x}z0) zbaD3t<_q8EAG<+^K4LK*QKKYUK=W?0S;}nR#)R8*n#rDPvgg}?x&neQ)kw;QJp+lk z+oIjib3Hs0O8i;8A_`K0&CpJnj@$iugD!s>!3*Mt8&3g z!uKug3J}|+IIlTuft$LNBW9rFTsY9(tQFFX>EuLr@3;HqQ@}KV0?ve&qaW6`&3K>><&Xa=`S3LNZu}7r3I*T$={0?1!w-2|se35D(aki~k6`vo z=Jv>6{lRN-aA~fa{Rvp>Ae*{~pR|$R&k3N3iJO_}pN)uLZnRqZEntI*4SO}9*(FKr zRiKTrR{@UO`?*#J2H5Q7oCumA31ZglrJx3mpbCDU)IA3lH6>8yt0Ud=jML+Zd z%u(PLlHrG7pdRaVIH1@T;xVf)Iu!af;8cw z9ulHRY~U5j5&aEfx)@y{&fCZI-mul*!jzpOw$#T61y)4G7_LjnSz=wWPH9M5!+l~Y zdJhV&L~d}RDY9bXd0s2RB1tqBMf}^`$Y3nm+6yYg5&BmS;v(v4M7-(VE&gIL8smvg zpTbn)F@{nyCd@K2VM1NDd*0WTZ(}ppbN=O1k7r zGMGTZWY~e^OrG9?9iL75U{2~}Px|Cf0%Z@?WKh=JP!grY$>dQgdAjo%brcUwr#nf{TltOG4}1>z=I1P z&J*vg-HXj0|2LVq^XJf`OP_d_KyT&(`INQJ-1qeF;KPgmm#tm9TDxe^iXP9tz5Dlp zZ{0H6m~q~-j3Hl#&%eL_|55FQ*Ifc(m)v5@c?aNv3^wQ>N@m%$Uww^n*W7~`W~kwY z5NVfOcLsWB-h~{NXySt;r&8Xv!JlfV`Zv(B^SB^av zY2=ZX;i#KN;{3x{cSuGl<&<*$NE3xCdPe1zTy`myk_J|=+FrVOY37+|21Q?D8QF!` zYU(MN=A3laX%b&z+ILr3Vu~Z5mUadz=%5uPhG3InCPtr}gf{Bvqo4ViU_o#K3hAYo z+Ig0I&1HlZiJ68f>Y0!&YLsh?jjHOZUEY?YH2DEAF`DmTT_0=%%ag zy6m>=?z`~DEAPDY)@$#*_~xtczWny<@4o;CEbzbt7i{ps2q&!Y!VEX;@WT*CEb+t? zS8Vac7-y{U#vFI-@y8&CEb_=Cmu&LMD5tFQ$}G3+^2;#CEc47X*UMuZT-zFcN zxq({t+Na6KSgKDl&Gy`*X(yk2L*7jH-i6u!eRq~}M?^K>gimF+((`S-(UpZaPL%^Umd;?|)R4`=0L8S)Q z5Q4BkEe0pWZ~X&oP1KA=%5g;t2!f1REJql-C@lerX>6IHqCu{R#WNBFi#DnuM^wm0 zGuetc#^EDaj%cY`SfB!AJjfaCD9EdDv5$@d&3tmx$gP}8AW*ba7O8f~D;7wPg4-D+ zF;yMVKt?i!NeUXXm_@Z!Ad6e{jRi0wh+0IEfO7hW=Q=n_Q9;f{<^knO9$AuAn(>TK zEQc8Zb;d(p2qUwEW+p|56-#xGcBv!7l6na#?Lf^*u4Ks(S#r!oA_b8ah^E}mSPMZc zDI03EhoG>PY&v_L~A8m1u*+g_54Q}QCg06#xj%cB#}&M+0ITTGpbZ2kxdE0Q3qyI zejLh|Q5 zn5=QE(%B21VrPXCMaRvfzk#4Rvz7tmnf0TI~66`@#K1ErRt z7s~A?85uYjLZq^h{KY> zuVsA<*qaT`hbGomI$vqqsX7(BB!w|O7u?9)S|9>jyP{H2o6f1`^`T6|Z*wzDQ~M5f zc1cE;l@kn0dA{=hz>~o$b2waOTVgpQ4}GX1*9!>fnt34Pr7oatxnbTG1fMZXv$)#H z;xs}u!DW;$h~@lB5L+a-%H&RbBfZ`SU~#2snJUw$`)7fOnmOu3i1ZXqE@kGnwz$BB zKj@lgLTj-=(w)q$`IuXASl}#x0K@|xfPoqlci8IHe zjEIFd^bv?9>xRwuj4VjQ^HlR z2?TG0m$V1}-}qzQnqYzoc_N3zfCUI(TUIg`iUa^e1eCqSXcr{Vq3!Z9;dw%nikwUQ zeq~#2YvnDzx#bAfO@YfjljfEeWz(7srxm+ykyt`?%v_tzkOYow@0)g1J zOttLVYo<<#miH2pL6UZjxE0C;DtCcF@7!h9WU3UYrLhH?(vzDJ19N9*EDv(pwS%`G z7SM3X*G* zu|*bu3ut6ZP6rXEQ&{O&C!f<*A+dad1!SpJU@W#}1aV-@aeoqF0cJ5@aRqreb_d|l z2Rmncxg{sNSAcW&43wZ*3fEc>=q1@Bfg-VdoMsp7kT3EM& z+jb5XH)kyNaEtUZ)J(V+2u*cz5)nirR*1sKc19P5R)K+BmRbL% zS)aIo(YFhHAP7fz6pr=}Shfo?mxXe7OrI5Q645>TloZCeeyF%1H;7g(CVL8zYm3N; zk+x*@HG>p!MFg;0N8xH$L}P%Lfe_(ipEz0)fm2$djLY~S`GY;cXF_*XN1rBEtVj@| z7ia&Fdx$iDu*4Cm=WAu?X6%F~DM*A(6PWB5tXHhNH23$ajei-(OCAqcj(Notj2>$P5t*AnHhjedfC$jDz3i6qwlwNbtS zglA_Ecr-pbcwfo~XCzS$H0El%z+_i;WytjaeDDBls1h-m9(=Ho74Tm-c_jb!ITa~4 zz9mA^=vsJKCbKkgO=fZ*F>{=CNLgr3;!tc65S1xW3j{E1ez0D&n3Y>{IyAISWq1&n z_LmS5lxw$gF6Lb$QFw?qcLae_7(f7VnRE(~l7f(2E2x)ULXpql8wKVYTNYRA6Nag# zeFzy6Mu~ciR}&e57m*o}6=93HbrzS2nN&iN*b_KaMKmbIdNDQ;pC@^wh!TK+n%995 z{@@RF0hw}%67mrUba)GlNt;rlnQB8vOOj(}s769agcwMHDIo~N`49d7zz_b=58|f} z{%}fpxe|WB2f4Qg=MbG0V{}0?Jvb+BoKUb+GEQd@!sh!LD5CEzYfN)5Ou?KrV0L7+W5rCI4kzTVz zbG1h?Kcin(GdnsdTFr!n8W&Mk$rM?{kSV%eLid9ox)Q@RbGJBd5hEI7Cz&BLKdLiC zVKPtc#%Q7UIfl2TNOmNlF_CuR z%5jpo6p)H8kw&d6Ws;wCTK}3ezZW>LCzrcrOWGL|_i7L!$83PAH`)ZM-BME4>V!b( zr6`49ljk(N6dRu?vi9U=!l%3^D^f-&1Mr&9rmrlu^1W0z${ z+lCI!^rtIPdj?fdR#$1j+7YK{sUR|Z?ow3|OH^-4OXe7LTg$cdlOqYmvo6L`vv_bX z5luhXkCWP)ZrisAVT?gL6Wx=r=5k@A$FB$xk3hDDZ>use6GX5zY!KX-{><`U6_ zrC+$X%^{BB1WHS)lNFFn^+KsKg}Jd2kKbx*o$I+^Rbr-gNW18h=`>j}8@5!Nvy)-8 zCqYP^VieMYO*cq8JOwW_WpkE#uzQP6oZC356C2(Cg>RiYt5bTk4{?d;w5SU=M(FAh zyAisKV-(l(Rn~j6r@InO%e;L$8Qf^T9+^bh z6uQZnKiC99-IG!87%%^Jb{%+p&2~oO*QshqH-GW8UJ0(dH?SD@RQjqC&t`BWf`=Mm zu@<2by#qq2BR$!qI#;>FQtL0G_g5WEj^@}8R7h^y%QngQkfDpO)^=b3YP99s5-&`{ zM%#gmTM-y>e(3j*__RNslfn5i!WiaTSmX^Mv{t!V0bpz(+sb_t=DPM6QQ}KZ@>jlL za>dN2#X5{Vv?Im|v$+^XQvRS1{P30=a+tkjLmy8QHi4 zOGecyj0yG<@ria7%p5(>jE9DknzY;I4+RnvoXmW-+L>A= zK2Q@Hw!AH#`qJ*Z)CaT3s6;{Y%U?cX!eXI;W7__*I@+X7 z2Jy8Zf!W&$XlVJOLcvPL;DA7(2%Yc*-)#ZlKmfU?53c}U4za@&DdX4T9az3Esw@yT zt=cyw66JWij$URS4&{;17Rtku^cRo|E49Lgks>wb?V%Y0*JB zP7-W}a(>EW$h1tr!DudR0sMe^7;rb#R2S)}>9c}l7!kJ}!%JwM7WJb|A&!4TSH8#E zf+-eth)x^%dQ9oI3wvMzwg4JjcMyF68smMH7b-6r0ptCJ>lkzC0-@~$;pHZATBp8v z1}A@ZG>@UQ=R=Waq_u7s@Cp_X4i69-2C*0KR_}=I=hq%Dw{Fvv`!L=X25 z`&ffpnmmh9(PkH|kVC7uo4s-Qhdm(Vw9JZ6KTV_>Y>tedeaZ{DWlL`db zli7pS^&6?|_g+eHKE2(mS+{VeptS0PMB=H9fUMQ#wUxOE#_YE3@UjURA+6gs}Q#k2$KE)fq)C0Q4l|0CqIl6#;p+mUomiJjO*Py zB#&mlei6t2dZvbnYTchud2yG+8KMy&Y~J=4pZE@rh8bpzY7cP>v7isF@BsSI4{sPI zKN~3LY!d+S@83Hzohlp@NYLOxh7BD)gcwocM2ZzHUc{JD<3^4hJ$?k4@fn$C|0;|; zShC@}cPF2PtZ1@=g@}U6nXEzEJqUvQ=d;^ zUd8$`ry!;ocLn;k=Z-#q1a?_%`R=K}tZm)Cg&S9{Mu7ucB20L9?q0rq{r&|EljXs8 zom2*9SgWF-3OKno{uecJ1q=^}9wgwWTm__?XR?J#QxPS`I}xAG*wiydKN0v4h}G&J z0n>#4)z-z0TK8_=hzB!F_)Kqa;>C?0M-JFRZHP@x`sB&L3V#@FL12i_{Ge0#ZbZv4~_e?4?f5M6*u(a&zc6hV-bffb zJm@5%3Mz+sii#@>Z~+H9Kx(J|&Usi2kYYKedVK4>Db0?D9a zj1tmp`ZZUKr2LM~NLSnOM_LyOS6p|qvX#{L7J}?W0N=$oUypVzsIibr>#DmDBW#zV z5JOxmSu$P%<`W}SDwNTHWhmWW}Kuv{)OPh-9aVQ>|04%<{oq>iX?s$En65L-6B zY^N_7g3_3tv<2GaoHpC+yN&*vamL1g44db_5H$`y7$Z_QNVo5n+1`PEhG}e^q{~TL zpBQWVp)VC95nbTKB(dDk2$<<046HTjwXduCa=Pw}p(#wfA@c6% z_=7w(D8kwsvT3U&S8r+ehWOrIH(Qaj%RPI`XCKyH|7$5hBsFrh(O{bXyW6EZ_=Mx5!_=StRgH*vz$tXE-M+hj@-E`Fqjgc}&*1oqRS;p97k z{HmDV7?I%F1c%c3$Khl-zPU{#CxJ-dn)q6KkLn?*L;`2f)npccG zi4}$1#w0H>sYSZFNF;?)v^RwbXXo@;>cVPxZd&6{hTA_tekX7i&4_EcmAH;{G)Yfw z8f6|32&t;cw*(RhW28!y_Y!7C#vNhLgfq$W$?7Ar@~S;;^W5qdI62N?81+E2*W?NH zjiSt@M#jb!zh=@Q2B8Fm{dSUBES~)Rq ztVhrIF^Dz)IN7?)2R{5%B1>Mq-;nU9t+7+_j5B-^F-zLl5D63>F-fKiHaZi zu%%u2I50Cdv`4U2XA1x#0;fKXlQXP{shsmFwzchXAtN z#^TcLRa@qh>zlbJgkXes$cyw~H9E(jXA0ad_^@fDBoLx;+4z$|QjS2}q1v~2fWfgH z%)9hD%&jUnMZownD#vH%`C2YsAP%Ruj4?0%b>*%_K6S?m^{1Y?$-RLH2n+}Ua#(t~ zTR58%bY~%f@cgTUVWII;mqN#Vs?4#T7zz!Vk&V z233qxwEidpPzyqQxYXKHlHsyvWIcer63!&g$iAAaCXANSlqec|uPlXerL&8w5pqCTISvMOu8h*LKLGAc{d}esgp1mq7WFx2|OW@3G`B~&nt)(`iU{Qu#B*a zh`_nMc^SitFf>ex47xiJU2_XpQ?$;5J}VTnv|zrOkvl*;y|ycej7vi~1jPW!BRV3j zY;lTyh%9}01?;GRW-G*p7?{W@L_^!9c7Z~z06+!`ko_P-yCJT!OFC6CE|ict+S!{y z>!oE`u6s%g1FR3F3&m(G5AhJCozNeD;6s_f812D`z50@(+MpP8!Y{lekmwG7@P|n8 zL5`3eGW@{3pdyq~G5SD2w^@~X6pR|lERaJ8xND?~TSsXWNMHe%KN=c3Q5HE9FE1HK zG4#bP`Xzsf4=n_u3eXt;6C14Z05yF4i~IsMn=v$&3CR2C6`7*Qj9@okc|w7LHdiC3 zeDcVFlu6!5s%B!sdW$WJ@Q`!az=()R!Vtkc;fU^-7%@Sd?8!)#CMmH1$f_X4!J=0)h4k+OX@KJMXLmouGuli|CNJ63x?yM59gP578taTeAvrsk(7x4A1R$6X*4&C$R?H3A^DG}=$ufh4=?b7b^(yU+z1M*!e`r?jC-{n4G?>h zBGVy*lvLBJu*%la5feE>Df|uG*cIT^&^gsqAN3b0F_ew-0wDM)cl#w-Q^6sz3wnvf z2!*d1Rk6wY4W`Tw>Qt`X(Y!-(D*9}U=K91<^;J4uh%(`}S_lMO*i8C3QzC&lGHgLV z9gu%W!dnmr%rrWg$gsFOR4(NU(F2Vm0gWIrsoVLX_C&;Zpi=MDqZR9`uXUR8oZKISfU=S7McZISfP* zK&vH=TS};~1;VWngjmo+%M`kb#Sinu7mnfBbLfLwP`0&QR4h~4b1!^z#Wm z*n@BQ23s&%hG;HcJ5|64wpLSu9mtMN`&dBJvMjq>Hq2D#{fh)clt?)!9}+}OC7(6I z1;Wjt)3hnKJ*%2|3bfUS@!i8)(Ac+YK;I491?{ZKO|pKF7B0~aMv}q07|hGn-_v*} z_n@e)*c8Bpsr4ZxpFjs>X;F7en;vONk!#q7z&R%(0DbU=e|Ufi7DJiZkIH?UZiT{0 ze5Fj`;8rM$wZlKEwWqq-+!AgrhpQ-1nZN_4&F5?_#&`#knTY-{j0N@LdYWDTTmj#5 zAb@^&TrG(R9fY{`owD9tLqgTziy?ql&{(f3m@C8`ahpfkwBo+#j++S$@)$EaO-R~w zR@t(EB2rw=tq%;FNb^WzaS@8h<%4P=6e{CNS%t@|Y_*CdfKEt+Jd<>T`=gSwC=#e75!DI3zU~I&sURW$j}o7M%B#Pp&%}YT{sF?$)FIViq3F8erh^KTJB?@zn~h~V2rlRZCFl(@`ekzhJe zMyrr5)2pt4-2=g=;KzCTk+}OAE{kQj<_$^FszZ)1 zYLpN=dd%pRkcpksM~*?N9&5;1tS{PInL(6mZLAD(87C8KjabJ2m;C1jT~lEy3AS-6 zG972njtix!Sw-;?S}yI;c91mFR2sw5>57jX5C$a>hS!K~dG%*}Vbi31pWoF8+%5^$ zj*0g;I^YyK;6UCvQJvx=t=S1&uB+00ikdW=6`s?Fv=>W4-}q31E!lxy;D=X8 z&%E?%cWK903d02r(Vn^FUb>wMFDboHvh#n1{zh#}f5{C9??f72aFuQLhbY5Qq1?Ppl1VHsGL~;v zFOPGni_;RN=3J0b%C5u|PQuzaBPXLH*K7!%ra?~u)jw|c>I{&=rEgxA4GMUd5`%|IXQTTaCiBo)gam z(Rl9MBVgoOS=N_x`O6TkR zjL?hJSN*24dRJ$A-o{I&jf>91{Lfv~a8}g+-S&K#&w816jN5XpJPKerguepplXip) zgb9S~JtNX#dZk1$^ORhwS2zK$`rbzqlA|lpkC|jiydg4u?e7S^P=vSGhhM+Y9gPc{ z@CFiue2j=t-uDQ_Z4e$=m!Dm~N27o82Z#y+2NEo3@F2p33Jun>Q10Krh7Kz>GI{vPEWj408$I4L408B;lsJ`U8jL$-uXOO zPLMwfij0L^Nk(BOqD2b=4N4H`Gm#<>zI#XJ>(sDf$C52;_AFYH4J&M=2(s3{iX#Oc z1<6*QK9PdSdo{{tW|-A%L=L`D5xU8 zC!Zc1>hhgm)KC3(>O@#Gnbr!sKHDt#HtyWIcLP4kb|l8Qed&Vi`_G?0t!o9f)8Hq4V$W z|9@_tSyaJCjg|8eT%PUqP;&zb0@8p0N;u(Fg#|?&N*|Sji$D*eHr9oaoi*7u_!f z3D{YbM_MTuVYS(I(v#0zQUN>vPXIApbZkk9%YxwnuunB6ka40<)ZwSwQ#PjL-i%wC zw3>_W8ib>ci-FaNp|)yzEV9WS#@t9x zdS(G!RmwFft;$*}m`Bf0q7-4q7>iPt2cdeCn)mEe0W%^LLQhQnc@LABh3>XRN#H5pF=5hDU5 z?6&mOoX_!D)O#d#^$$@0Da(f8JNc4IY>y=~d^67eE%xoBj&??uUkS^FY0gErWN`(? z>=>J`Fv(UixfS4Y6nI5VM$bJ1!6E`C_MD4S)6d!W8qrRTMkcVD;W`pTDHBBOy++GD z_g}=K7H`FlHndk;0$R!--GQ@YaV;V$;sw2K-}xs)1bGAD#sJXMu|TPepo!Ne`KdTy zEK65VYc>zIkiv?eH&Dz^k;b#&t-B7>%4HYS5L|8%_|H+%dgkcsy(?r45Frs>0ZlK( zXeU7$Mnqub?_mA*2%VU_P-eX1T^3MAeb;gDyCNudkLj`XvyB&;&UrpNf0Od zQ&CVZBq-@Tt$rT-AX+X}AdlbNp3&gUEzH58_pY>_?KQsYEOBn#?+!caMCy zzym(KfC-3^4~Q`EIV|&2*r<|3N4@B5hUrS5VmOc!`e=n#yy8?Uaw&)yXMM<`Srf5n z5GYECWdqq?LZsrrh*T{lHmufUxbco^Kq4Q8Xn`Rxp$XS$f`t{_jlO=uIjfkDf0{XD ztl}0cGI&vpj(p_oI>H<$F{qKpqM(@|DI>Pr1XnrgQJ+|sL9@J(RwQ}`3q&9fdzb(c z)9?itz_397)}^J2CZW$IW)@I-O#nEf$)ZtAOh5r#7H4|0xfcro2a$Trk!dTj>c%%QM9dewM9`W0R&;y z2iWDGT8SB+EIJ_-`Xmtj;LuGprV{chx4rH?(h0c|MujY>Sy`PMdvPKT4BUbM+^s+# zxH83m%r;MVGb&mT%-3@0!$9Cc@CDy1;R#0?^#j%H4~OLz4>-pj8gmfl z>L^bDjW2Y9yySxb;R)>E2zZp762;jV$6ofVM1q=|);Kqu6(iw6nvyr8nRzNkMzWR{ z0x)5v6%@oiU}Xqy2W%^ zB^ZbR9~z@+gw*N4Pwo$WuwXzC1Q_b43GL4*!Ywc5GR>(Y(z>BGCz%-gG zcxs{`p7DfF-P$1J{@=0wLEhALwr)OBL`NV5YTew~CHB7e$A@}R+vK_APXEigDRj)J zQ{-m=KM9ks;-1?0r51$pF+y%GC59vFAN!L^&liI5!FU1Up4oWarc%9@443S0lcGV= z{`9>U?d@fglnV=<;Qio<>p^=$Rt&U0DiOruiE29NaMCtNH;Edk`T_@Ong`o-T%&W&Jt;ybkv+tV6f>F`*O@rc*z0?* zkU-EJ_P(P@w^>?}kwd@!$)ep{OdlnWFdlohnNM3Z6LGW=c%a%v#9sjtNeb%7Y}2394UF#0ZNmSyc32B9;UO;-Pcc zlHKUfBvMsvs0ck7(ph zv_%9zV8(qd{hhf(Ty1c;i6e1v(m>7J5#g2%cwj&_VbEI&_AB08%E!BaRTt%6!fl z@shE4fftBGV*nwNRGv|A1V?zoKYHGl1R%1I(S1S!lL@{ zk0HjN*Rj_|B!!tc9$yY)c)%hyiiK6CQAymS`KhI5LI={gWQKMB8${p&E&xws2Ih2-0VSx{B39vW zd}Cc2kDO$XRScvXedR(p1b93EcX3#HRRDIt;VW9^8PTRr_{m&U;rRkKm-_>0iKp7I-`6B28Ik|WroEmYN&p`C`#;(!_38`OyMma z1xU^pXK{*-Ca7i*i)aB!0@j=>%3eaO==??hVid(D8}>(MU;qaA<~QKNuT|mCpb9^r zibP3ek4$D}R-)1giaENC^C4*i!YG>Jgx~pKWSS5cIm<1w#S>5hL_nDbg4w98(1J z0R|92E9j;|D48aL;m?fJYhtJ&(PA}*+(?cpwF1VZIEZWJUq^&lD>eir6dg#_=YW|V zdFA1K@y!NqDsfd%Fn%Y1SO5Y1Rt0?jgFYCm?|DQm{1&Px1?p@{X53`YSr$qFY)q_J zwH|EjMTuQ_pG$gTa}I>X$+?y9H0#%isenEA&? zyqhfK$}}A zLKtmb%zO*Trq0RU()BzHP!TD8QqLM%ken=;xV{7>yxSe9tRJ{+Vo4!b2Erw@6$z**u0%>ea)QRCyNLAbj2Gqj4!GdAo z;@~=_GZJNgj3e2UMhC&F4*DqUYNqTOp~5g^E0#lgp@UYYSD+!T@{*2JsMVe_gi+~7 zbn2{6K+%PKZJgZ7KWG{sz`}ZR2_7~En(*UF#BW&+Cqkf+s96Q!rPk(945Nn9_`c}S z%nq~AsDo6JF=-@jKIEcK)7Gue)2_sUAlO5RWUXjWql!%h{ex8zR))NfCSr$7-mDDW zNL`Ytr(T?yPLa%HAOmOraFJ~AnVrxdHpg(K7vPMAj#5QAlEqZ91sTQgWjcoD^2R@i z1A?tZL+ArPM6Medo!ic&<|N$rbk7ijj=K6wxFRuHL}yqeNq-V;@<>uv8IyuUac(G$ zr~rZXnxchj@iiu+>)fIsVCfiQ9RN{4uD__3Q>Qp#2X+=3QO7S$1rvTn99xSe`fmo) z%pjI(L0oGjMO*wu3MC=(R7i>tNHlTeiOQ#$sD$hyu^;~BM$VO%(J9{z>BQHKMijK-Q%@zDkaQBbl;tkyPAQQK9x z$}Kgr1h+|4`I>KoxTXIdh%v2j*(g>nPAoC8b&GW=OGK{l_3|&H^T@q%X4aGTNo|u7 z6_6$~aS<6|zx6xO|C8Kal)R;z5-)Lsgf=z{G~AVh5PI|L*sPx&` zfy4qX%y#(&@t8)@moi%|&n;<1o>byW=o~Y8%LvS%)3g$IMsc)W{o6}~L?SvIG&i?# zj741(2NhmpYzXeEv9{#B9WPhFB~;K=uT@*G6x`m(dwdTpQzEAhBvF{;T){U|{dILa z%g>;iDQ{;XlFotm1Kaijy?1Hscm#3t zFVe_*%NhNd|D6*+sS{+jhWDFZ3s-*oZBxU9q;wZbtJxtwOOCuq_tlb;_YRE5xJ!-+ zJ^wG5Uff9a_|{fpYO#5 zgCj(LT~!3c!j(P21Mopsm%0_r@o|qhI`;0YOId$M_xflUiKj0h)7OcTMSKbGBVndP z(bAzn|4^oPH&r#1QMfDtM5%vSz%ID#E%>6fYf>}g&4}C1#`;BwA0l5AZ@J%y&B*2K z(FKBHAFIg7@454N6{+Qj{(zes!1j&xf zmej(q3WPqy0tP%lD=00Gv;1uk=nBc0MI8LS0zIKWdya;!+-K5Xy3?bRZTP`0zyj9`Lm@x%Ct|b`j+n(wxa_}cu zF5{VW%sIIAt9L7V?HjiDl=s}Fu~WQ;^kRJ=1cDzt03bZTn2W#g_YUztw@FDgKvWPo zkYK?I7HSEyAgDsQauo#W(^u}_KYs%S3HoQSVL63@C|2Ca5hF>HCrxf7gR(-Wkt|`x zBuM6&Oq(|gzUxHh9Zr@NJ5}ImFsR9*9D8!CS#W8mo=XKnW%=wVL6QGDRUP=!|EEBj z0{w)mps#{AKNbW&5I0cj*RF5j#+5slZe6=~@#dXdk!n)CZ!>Z|=}}?Be;yZ>3rF@P z*$TBPaCP`GWW|^s{|XeDS*}j0oMDa}iq|q3r;)wB)hZL}yU#`=zw>)@wNEcQ9>A(# z3++MQR@;IWKAd=QXhil}4AvM?`0%rH{lqj0B8Gu4s6 zMe}=2Dp(WM3sy~mxYNIZOam0xWtp`y&n|PM^)ko`vMw&@I(n8Q$Uefwn|}I1XFQOu zg6Kw>NSt+8&SVQRzF?chu*rf(BPr3^Rx1iBLy>D#E-r(F3dnc~|1Nk#ZM#JHvhk#o z6TP<OQsvT2k4rM2=}o znXoDf-jR9k+2^?0wTUo_Jd!gqhT>9a*j5cQS0OkvuGZM+C<|%BP8R(c%Xy0$II5Eb zh56-}zh;@Bo$D`hl=~il|w5O{jvCXP0S1yJqDu_gKO&mxfQd>|k~3BNFx#G56Zwxqlw*(f(j^ps|b-1XOgz7@cA z(Z{K}PW%Ju0#84Dzd8^Nz7_Nn|BRuypCssS-X{=)d z+|ffQ0iFE4?=JBXiu5Amo_PsHY&nUEpt{#R?bRhIRVvV1ZWa@^iI0ILG~s*XN5IO| zPH!^e2>$?skf==uHN{JzcnX+24pE|JCuB{P=pz*Nb>%fAY{>%^B@pf<>1+6@3He%g z!A&&>b$kkq`8eXW(IE?qKJ?-j>5>(}jY>`lDG9a;0=Tz4#A=s;$e9o(K*;UL0%&O- z7-fq~ za^xW;5VRMgx`sYhE|Zx^c}MLwLmsPa26t#Ah#I>Bz{QEGaStILG*OwQShT5`B#B?2 z>SM(AbWLYqvx*_dR?9nikX|_o+7&%W70e|e30jE40^uXb^9htb$uwp;5$Z$wMQ|h1 zJjf=wf)I$z50p4j;QT(~zmZ6)A!brn&%)7AR|aP!~#%eVs$IE4C|mQW)P)z#X3}NPY-c5M9nZ(uYJ5Me8!5b zp@eOEE{#l0L;JFoJn@Oiq^D2lBQUIR!WGs1i9q#Z%z}RJwdJiFI2%f?+TJj*4H@i2 zk@gngx{kQ!B?!w*3ZKl9VI*3KE-uHG|JIs(>ORz+iOFv27n%T48JrMGegRZhoEXTt z_O)=q78*_Uj%vOWePgv8dSSU7m89cEFeB*&$(OFUdPfYcN{)EMA1(5x$y5&+ToH)} zuMMv=0dEAoqD2u8xl2c7Rcwi7F7~nnI*4tEc*;dt#(UdYm>xd9W|N8_pf#YNgiTX$yhsHIZnX7bR$x;Eg#DS+7O@4%n z-3fA*uA^Ql&eVs*RByXD=ruB-XUGqI43EhjI}BR*w=pv9wwbk{EwJNK<(ClpAd+RJ ziBqcPnS{BfGTl$JTMVW>4S2w{IW~49tsi3JwO)_5;EuT+@yyU~g>wS;l3`-yJR_EQ zXt`07$~Q7&vEc?!E^6^6D*k;012UPFBP6_I$>QA{3ccO!K1& zTeq|lH~QkFdE`s-HF^Eu2e}2OacDl~ApDyxyIC2YTzg@SSJh<25@M!Z-+Qd3&LS?? z<3K*{2{li8;N&KEZRK`)|F|9!V~0PjAE2AsRx|5SmX98IStottgkySrp}b)k+7Hm^ zCLDH{>F&osCKk?kZ^lLRbv2FDlJ=ftK^=b5oCifEuw;ANKl{MagDmb@20YHL(&6Fe zN3MH6u-w}dAi4{F+xJ6itMmM7mh?j({Wz6E>PNq;#1?6FEG4Th4@P(Y!;HAlhd0I^ z*!q!4jaMb~M#59#cc9tl6YQE5xm0?zr+y_Wf!=*+f^&QI+&M^c3p*{BViDez_x*nk zl37hsScSt*!XNtKBDP{I%x&W~$*+9lBe+5#Jg|g%Z>``3Grz9+^rU@Fc#E=$(Rbth{o#(YtSl261(cg4$8!Q?7p0Gz(D`>gQ;n{NUmi3FJ>xq^&)&QT59E+xm$*;W!3<;6V2s8R?l$ZiiN zzCt8cZXfxPm5}WBfG`M!4_%N?+{DEjw}T(}Y9TAaRdC4p-b(}-Nb=lm3MFqaHgN+t zkRui6BEtg=!%!u5aN+2Mk35BQ{*8jbkp;oB!jy|9^a+K|23acQdP2?3;3)y`tCnbS zCwq}H|9sM$DnbzJi|NG8AoYRqRAs4L1w<4`5L87v!bEQb@@H@-w-n(WaA!Kaj<2qf z13NN}5JJ~1a+w_H;M_%ngoruH5+zFy9MNygP~v7x(>PSBo-D0{XmTe0!~84+%ZAEB z;E*qW<{y&^2+>O*3FG&oY%Js?9K7M7VlRyCB8`<4>jvJ;PRGV~(oKNQI+24)3Y6X!|~=VT`~_b@IV$<4})ChB8Yv}j)F@~2oT zCR{_2#!xtY#uxQ$03|9IHFGf&A{JsHPLPi}ASz6X#xZe$TSAbwG-+zwhbr|(5uO1E z|90&}95nenF&i#idF+{7jrXq$dWQxc%;?t#E6M9_YR&ET7UhjEYYbd76mE4mY zHKX4KWkQl=NLtY;1cp5o)LI1xnlt*Z^ zQur(8jE(4IO48D&40FP=f=)N9^mTf20DtlyiOXp6>`g$z0L6t|iV{VH$8pTaL?VVw zt11)QrvkAh10~ZSB=A%`?)aLDP_>g~{yQsnRk$LuXbqbY7Me3DgW|BK8Y0Z~ic z3qq03@Ep?{Dl{;6E6VbfB^Hp@Ktd58Lte|ohm_`VxiE6GcKfo@MPC$gHFr!%3^_a_ zUc_)O#Y#tjgg!!uW5;aF>~n4RHqAyBFKfcHwrtbV>PEH9NC>2su$6BqHI{-BaZQ4tD`V}t)WZ^GOR-sX-zr`$m?=zE62z%6c$Fy zQ#jfsM}~`FPDhalD(@gpTA!x9CI>0alb2Ed;6Ved5Cl|A4??oiTE;whPJk zA_uKS%QLoOmWaNDZLh?*97%rd!)#+i_0aalpn}e1@OhFXQsXwjTBrS9BJ^aDD42tN z6F6S{YFz=>B$(15h!AmIr5wCvob+K;oHrBe2wxW$B?#kL6cNxqswB{@gle~iafo}N z6IkOE8MTm&78W!-rF7_TWHv*0b1yE@>`;MBWmVBn^%BmQHCx4|=W3Ui_{Gf>EPh`$ zirg@YGlk-iN~r{Q-Dq`;_^0qP!s${KkE2XzHELL}`czYl9E6%r{^iv5YfrF;}^h1_&`C=0rs4eZ)wtMrtG? z<|O2eXgd=za#=6hMUMutS@w3pk zWEIv}c%l?%JUJIMR+35UW?3E1m=8?gc9?Y^3~V9QQe|R@8we`)g-EJdFl14)dQ5&x zQkX@$I7H-zRMc}m#5{JD>1Hb<(4iluvCtH^lP?lY|LBJajc2wdml76?o{QFfX!K-U zC^~8jm-K-=fYtl<#vPq`B|_R#O7p3OO}Qd%C{&l4DMck<@Jrw<+U%l9gie>xs({nIZ*XC1-Rp%vlZYcVc9{i-%df&(aEJZ0GJ$9lMx-Mq99#cbuP?NN~ z%(AqZQ~=qvt>m5q%Xl$Ql$>`8B_fQE)+i5fFc)#wa?PfnugW^X3l!lA77YBH(5$tF z`Zgw17?Op*WBZEQmxTj!ytZHxdVeKE40S5#|8B;qRkNYVJ9Cm1nh}mI{NgD#XoHf1 z{sbgYOL3s++fPPmqI?m9LJ^!# zJU$tW6pdk0g`BqXjFY-JUde}qjfu%xmYf<)th!=nPMY;syVt3ukoe0$%RQMxtAnJH zTcWiA9WNU9g^CtdW@}xmu=qZXw=1Sq|6}%_tAk8}5W~}?Hw3{LnB!Eq?g?yrJjz^a zOvTCpP-C7G9)7$T@;lqwvFvE-n`dYwnB!JyujG5PG zJLhh)&2hN0A$7WZ#a=82Yr^u6|19GuV4TacN76p+CSMWuKIiU(UPGwY=yi4S0<%Q+ zJ2Hqb96mx3jS-z6{e@Ur+7TDXFe)+L!3#KUPd90K5YZbRbeT>>BLKG7N}Vng5uoml z@WJSGka@M){DfDj3Q(3CDoCyFH*^&0?>@mNR0$U8OaGmE{AFlz0Tk zN>Fe2z-*t-jqAA{%q;<;LIpBpDm;58upmK$ojM(C2(h8Sckd!TdnYiXMveb4Zp?^K zBN>ks{v{+S5aCFbo!|2eSW$C(*lzI;YdCe58Zd;0tdG^o&_M2i|dTC|*?au!OO z+$gAm1q6eGcBH#c|el#{f| z)bE~w&IudzLDvmo!wy-Y;=+v`FMbS361TyWyi;l(E%2$%q5oDd#|%BX^y$YPp;pp>7EbrlI|PBoTyvg%7FtZC9rsyjsd*R^83#4zT1K|* zmJvj==_b>2PCdn;P&tA$5JDtk6q1Su1_|PYMjnY|l0J2{9ZcVuR8@X9eTSAn`sg!d zQ}hj`9+OsC&=pp5!Nt{rV{Q~4m{IAMk$?V4g%x^A0v6YKTpehYR?&sm9#(P*0t67} zZ8>M5LcN5VkR9%rkdPUQ_-IZgRg|J?ujS;>h^@`WDMgRg2BSk1wT4n|HkO)^M~bG( zn}^6XdTCI-!D?u&w%+;|QwZX9*OYy6<=tIqE%*{Y{Ja%jd0ZYgm0mE_qL`F)g%?$L z=go9jwQOqS|4#+|@Doc@ec_7KU<-ojo?LiN_ZVY*9%c+B>XJ)uPd9z$o2)2qL=Z-i zs%X<{PB{l~bC<@Z(WtZ~3S$K=xkTeg8VOeuP5*F6@x=p?gy=vq<}_JLKV`^azxAGs zGQDe3JLpr0$pt2_70d+_n_v>kQ<}{Vx{fOgr_@W%YQ!D7PwH6{T})l;lg=@3o7hrR4eOn>AF=E> zRdUAV|5jAvE>31wPBRvjx>I#+D4$l(9LRW)sm1!qk8UP5qbd4JV!-+TJ%54GlF@C{p+vp7SgEKTZd zKEHci;r~yvTjqti%lRcP74U}Am{K9u0gy_EnOOtzQWrxF<|)aF3Od-~j_<6mULD(z z*bqmV8!_x`&H>F$2!^lRXk~>kX$`}c(~=fqk2Epb3Eyh+nj4OdM;9ZUPk!gY4GM9H zN5hU@?9vt0C5Li8X$#Ko_qfcd3wGGa4zt|Sv(?#UDr@0KI0!Qz#hHs&VFDAh;H0{y z{~$(5RYPLdI^qQ&Fo=k_Qriv7x5GwBEMTd*k#l@>LOsUjd7!G^!&<1D7?DInHcZG& z?pQI0i6l82BMPNNM8`~KGArEC;8*Tb#i4}ff6Uq$=X&D0*qzCZkom}b2*|U4AO>o3 zB3HJm5-q8$M=?(mh+5X8BnMe75V*KYy85KaTOmqPVxvhP2UaORD(`H)X^|k|7NbKl z&xN6KNe%6WxJo*+kt!m|9>p0v@g;Me>T8@-7IUNiB`|+Gk&<~zDXm%_O)d+<91@du z%Cr13LEGwN(NfmW{&7)ExpbNZ;s~d~Ajx*=!peAXLJ458vyLzcB2bd|Aw>l&|8UeK zm_y=eK8kQEdeVDJMHm*Ne5FJuG@OaVb`nk(C9gz1l<7x1(i&!Nbf{kY)6JL|ChVlg z0>Mhl%$P`&YGvh~OYxsRlZ6$YIR;GP(qg^9v(U4c%TH=l(^?Q{q%ejj9JsImF8bjQ zW5|&)M75P2KL`3`rF`ZbCRXo~AP9wQTjS+;8ki_FZ z6mqz_YL;Hs($)W{azyr#m4EsK3Ic&eI>q>tOio>;Ms7iX2Vmzar7hHB)DkWG_$ecs zOJbESL@c{hpsl#DiM49G*$bHneU=TUNBKG$K;GnFfIY~lnpa4TXp=69{{$y`WJ12m zHkKnjEEuVbf+CM5cfE6(YW+GV8RR-9YVr#VR|S$4rMUK(YE{9MaxQBEuz(}bl?O!7 zXjFS6f_PUKE*?>$@{#Xlh{hG&ZN)%3wrhD`+^QQoUxL2nDsTDUGs8vCYO;r-N}?t;^kS(bR@$7!L9u0M;i}qD6kcJWIHU#N!iRilgd=E z#KswoPE4pv!nk`w(cxq;OB&QL*J9u^8LqE=nXdGBKm6%OTeVcnyxcONjX8`$T?6w)^tKN|)<_xo7M57BNWOxEo&uwaKW5{3)`o|HLDC@v`YCN0L@d{zH%s z4{bj5D|J(2-O5ZvVK0p7+-A~Uht)DTKbdyN>~;}}xTfQ+k}TG%j`v6}MX4F-k3iom zZcQiLEJ6cI&125m1o7b(d%=Z1_#uu0SM|V1si2secyt1lNh!S2w37ccWh3W@XjAJ* z!6BS?C6PPp8gpAGpxbq=2)e|ySFAQUURXu$WZE3J7j8_bYq~Q#;;AqGlBBk!mmJq> zNzA8_1h_?3pN_tL@i|`tafdzZfdCk2me7sDGF6jV$@!-0wUQ-d{<)1j4=@#h5DTzSU|<1$N7XuEGJ$k8U#(p<*C$b~J$#<8^>Pk%0GNJ%!~%g>x}$ zmWYOka#p4hjD`;K!EZb$Ag6_R@gaSwRw44n|9uhAEa9LJ{SbosLxwE)6jSqVN|YS% z2RQ^46sX2HVMK53fmWheDCVmUa#=$tE7ye+ExDEJrCQk%PbWiL&=-C9gG%@{YS!luZlx3vFkq^KOpz9l z0+k=(0FkB>CP@Qxre=(2@|1PxE?pHYg5U)QORGECAwI%$(l^Adc(2NYBRj0tL)DPKt@ zknQ0gv7nuOGI0NedGjchW-&oXM2ySlmc!|d_tJ?xu`SdkUc{y_D^+KMV@(=y-cx5y@ z4g!^gq9C1V6lw)C%E%-Fs$tDxSiFWC5BQ9hg@h8CD%f*{O<14b^g|)RTqM&dGo_c6 z^K7JPqCpBBOp+}7^?5sYV7Ks`4@!$2C`Axfp?hHx{cwu!bSLaLWaU>SdbomuC{YX& zMnYPVHnD~5xlAc!H?h&9f01@E1fz}>6gcXMureWQxhy>Z*T%|BgK&QJfQ)<7TLbnv{>pnF{tfq4%gj87=)3sj%7})Z%_o z;RUbSj9uB1HjyZ9Ga?7^BJ~Ft6Y{Kf1BFBZq&1scVjMo@Xj@H|w;JI$#( z2#||wvIhxak6RPMm?o+Nl=`+EWNQI|A}E?$yEXQM+e&TM>bV5_q6Dgh*LqmYi(bGH zuHl-Ic3C(WfxXRZu3;!Nxx16QOQBgi6C{g%g7G;4=TB)7WXP)sX-mH8`D|?)BG@{; zbJC`WMHIM+{}i~PiPrOtViPhNo4=Tff#GJWfq{N#ffwxCxLC`O!Ru6tMlFB0jG1A* z4U8Fb3B5-+8KDclM-s6#W)}GCH#PCMaLT|0J3`_r!pta`M;gLUnoFnyuNds8l;>VG zI5SW;d1jHqH;l7&xt`Rfsg2^odxpIuaumO3!`cI9ovRbafwUci6a1^h<|r9DY_&tS zTiziRy;~nZ9FHG(vWy{1Tmf2UwYp#&#v|O7c=a(G62(Cwstyyy%4VCEp~aPhPWti? znRUl{TDV(eq-KFtkbzeEgh1%$#?Ba=y?ZmA#m#fVjq%%%xkD z$&M_g>T)%)&_G=k#N>=J9R?qU9()pil~-AMXd^q5VNb# z=*ek2T#QPWt6a8KY|NSV^u~VSwQ`{k29(BXl+P1QFC^U0`^OU21jq$Vw?Yd&EyboK zslAqXgyC8iUa5!>hthkw%d#3Z>uZMXQE$RLfv5*SImx(Ci#y`tOHT9C&Frj-%xi!% z|4GubDG}kYv58_9eVg4B7;N`L*Hs>T+fjKr)eTHQi|ZcnK^O>xOme*!MY<*9rg=`; z6u6Mg!noG^e42pUHX3=b)%&8MD$(FH&N%f`Hzqy6L(0-d&40bIeDZ#XDlYWlAatG7 zbB)(IN2Pd5?3Ylv3*3_VF9{u1goV{E5ieH;U6TLDl zQ$rP4rN$C|qPAV&R`-lwL=NQ3ireB?)xO4N0wSsiju|Br#j0(3Hy*w>2Hsgd zL=+APauIo6b>Z`v7r2Fo8{Lxy1R(9vKqBrZ@qCcaoi$1k2>K8;PsQd-qs7SLPX&ukstn0YyBwaMe(QY z+v5fIfAbi5!ToT&t zqJm5A8w6W7A{p`xdG0o{qCQ z-)P+)*Os|P7+pkQp^khF)#tL}EicNL64@m7%pCVpFg3K^4IMPK|8NCSSdWdd7!l)2 z-#gA}Ie_qFfdB}x744`*=cGOpS|%=Hq40n1C7VrTky+wU%~j!00dizUVe0iM-IZOe z6Sfi1*QHYZ2b&Eo@<5T$CC?kaNv%O`ku8z(eSb19UjYGz0a#&+uMl}ZshQl~I_V1+ zox>E~;NiP9N3^6%m)yA89zVGhvy{(4&fKHB{+cljUUCE0RSaH(JRJv%>?u+EltS;a zpEAz5>`dVYt#FI~@StaZG-Q0PN-5R2&@Fuq9|*Ew#LZe+CGJ^+RU2;n8PCN0yG5>z%iAZ0Vk2@~PXH5%(a{+O|b;_wdv1`vP*|NZml?_UKz5y~B`(9q#S zhyx$ivQXh4Lx>qGloKRq;lEmfDp;UcK@iD{8c&KWsd3}UhPD26tYy+r>q z=TD$Pg$^ZJ)aX&9NtG_W`0v>>GGsbE>(r?s)u$Sp{z<# zTHrhqrO%L|MP@A5+2hED9w(@`-{%%9r1> z7RQDtTSk5gKl^_G0~D~Qi_iiNHw2?n?xDGU!;Lxtq55h%wygUtBj29c3aO^}dTOl! zLlkku>TV%mF^A+U2M`Pt^Uo)d*!zhi3uJrduO*(Ui{O>&v8= z+v`g%6BTt-;kH9joW$TvN4%>{P@)k?T1gdl*w>r_ z>Olf8;*GD}{-esFP-nBW0tj7tN2+VPA}-pWjD03sV#5{pFT+rEG1#6a`>{RrV0DP1 zS(BO%*UGl42}qpW9d}=T-74^^X)}td$}Wqm;5KeyBU7z}1rz5M4iEYgTWZUMh|>x% zj`E=C`t|r@pUhR&t&Whp6Fxo*<&eJfEMoAa%2WnkOv0 zx>_QL;NIw(y3(R;A0o3W1v{?OLu#w7`RZ`d6{^M?+eNwEz9tKYYa-vRC^D?mRy!p! zjHR|iubNGEX{+Rx%|M5=UP>H5II|#v|0g#j_1UPV(P)epa?m{%>&t^2jhl>ww=x)6gdd~Zoj?XUsT@~jF8+E_Qz{%RFHetjIky|aEjhdou28sL z0#yV!FE~Sbs0|N%h&QRMUXJ^mvDcsGd$&&!r59Djc@3FKu5Kcc{UuOX`cj(s2G%Wi zJrFQUW0-aL2BZqaf)P(ZUD1w~w9?6sfL{5MRi=_W0x~2m3S{BFywMMhzzc2yOeqzsW^Gki@#FsKhaeglt9p-2?(qNx<7 zM1b*w;6a*$MnsyaJa_U7AZD3rU>e0nsc}S)uvN*cV)6&>?svr9Pg> znRST?R$FK)dA1RW_im5s;-BailE(mQo>GVDLemo0FVOl5Y1IUiF>?= zAY_-yM=}Is1zO7#K?potW|KDOu~A>V6hv)?b3h&1<=nc$EMHzsjto2BFn1xljPS!C z6v4nPwgNL~2F03Ux!q;5{~}HnV)H-U1ZY9irYu+HNm(D-8No{U#+-pEKo@yHMIz7& zhk%if7O3Z3@cGBwY0sar#E=ScX3*kg3z8^hX-lP4&Mx^=H&sax5h)cSki^1o7{DkD z16i~ac{ET&!6HOD2|uThw535Y&s5W~i^6;X?0{so_Z#a!!?NU1iy!bQV%|yvdRpNkov;69F(JnQF0@+u+;0jCp+_#0X&(pn$F_7$@| zn=68UcuHtTYd>Cm)UWK2-(= zPf!gn@wWBdq-@uL0PZh%DVe*736Hp>T_NcH0QTtlgD8J=wWD6YL z#xajt;QVljH^$k692Yxy(-1@KbrL_+w}1~gEHX`bVedTgzL51SicJh&N4f-YKPGa1 zQ7j=67t1Tix#vkRJmVT)gdem82}g4b5`o|sg1q%{kc(_rI`-$wWB$n@6Fj&O5ssGy z6a*jYdy_k8|L!tP35PfO@R4T9PRq$6u(cLUWvH-tSozQbB{-XaB(&MyHiCyJ*Oi!BUW-`C&ts&ZkeT z=}n9X*wb6&@~(Z&XD_K(p%Uux-5jDJtBO&vO;IbW@yNX=K0wyYt_-I0bZf(f+Q=dP z^%|-63J20wJO0a@R{ir{uc9~+r znfa|B|2v7w*ZH6;4OE6+7VA1SKh7iexe`8ztbACD$BP65?oG{}55`NJ&aweSU*Ek@k7hh994`3NL;D48YIwGKI1TzNiYc z8;bNAA>p{ERQnE8YLAMrIzHf<%P0pRP>ZC|zG(7^nL0Ri<30f#K@!}&xNy0X6CYyJ zC)fEudFdZjd%%t842XyTtLqaBtTNj<3K0Y$Xi<)(`Ic6)3Z(135hYKcwIY*)gnoYc~kItqO>M!+^VTcn6BeK$nRQegh0^GMGx@nQcL{Bz!|S z#67eSJkc{QCcKyyOue5tt;d5fXqzwAo59;s4BfFi3pgPY^uCDElxs1cM^PFB|KY$m zoJ8?Eu&PM1xk#$#Sdiqp3O(FI43M2_W3_sFGJL537AeEvsff=gE)gn=Ow5aaX)y&$ zAxgYOIFyb%Gz-x>oDb2eb|NGB)3XAy08-qjCzC!4;Es|KgePdlPDv_UQ49@B_b5HD@|3dwIawLn4>x2q3V%-P12( zvqp=-9aZc>aAcmgNymV+tPSxxLyMbZyQ%7!2|$>-Siqx3s|+uMHlTVKX0*GAz#v$p z!e~O6y5I@FDoBzX!Ol`NxPY2%oQ*cgkqjaw44}50vzKD3zIVaLB_Y2S|I;G;8Mu=C zNzhZBm0Yt1(T(e1wH&F43g8EH;0#;f2QUf=K!Cv~0)&ydKGb*(eFGuTlB^R0O0e`h zP*b#jaUc}bj*-9*Z>RutxQIF9;u~NWLs6O2SJ% zJBzNE!`UDvFBF7hdZCMuEj(H##@jL7sSoYB7cK;h(V8Ei^QVL&%!2$&pb*W#6tuyF z!a|xKtZ+f1qKN5Jrqv*br6jC7IER#gtq;Nz%dE1@)UTC7%(ep_4=YW+@W|2}Keho) zpSwNX(=TzTwg~u!xD2F7(+6Des0YZ+lqik5RLZHsF6`T#;~dY<|FaF$jHd2MIjkUz zi|88*=oK0(tAFSRJc=HV(lh+2JC2+v;UTeAGEVXgP%;axlVYo$R89`53C_?5otnDk zpwF?103|Yr`LY0GxDvjY4DZAa-XgU1^g#g)QK7R9<;x;yTTS2W!jqUXiogY^l+V(b zP*tlaFW8N=x;NSAq=f=V5$)07)2_253aWVqFGxRya;6$#2#R=0`Gh%{AP!3S8w=Qp z%}h*bq)zbkQ7x@Ri~!D~m?E{>iR@|$Sz$gedOf3Q2WNTi^!~nGvz}!-C zt0JI_zkhf}ATS0C@PaW_HDmOs-n5s9)Q96})84d>qGFp8{|!u)veTd}yQe@ndjSF< zs8K}#0@jQvwfcyD=!cvGANuqLZ%B`#5r~}ZPdgXbU6<<4Kl4*vmApUqm}Ixt3|IaX;s0K$uuB?EW$vF9tD zb9jJ2@F~?}2|n@&YqMH7ty8V7TYw6S+=!M7Jf4w=jEy1#cHoD1pagVnyQN)-xNHSC zeLX=42Yt{ef$$N#o!qbti_$VtKCw1~WdwG>BdMsWXgrAL6a-0o+}O#?umA+d4TqG7 z+_iHi`2qv?^@U0&He*YhN# z-O;ItWQWexi7|zWDE%A)(6F*XUYNLu*ily!|B*!IE#IgTi`}rAG@7B04PhS+C%@XoWck`tQUpy}VY`@%BTCYS;Dd^q;p0W3KI)E&%U?#FA>eWDY#L%r)3GMVMAK#Z?Nh+E*;nUIx)khWD0 z=Dus?OVlg*>?dYnBPFik<2{kMP(GCiHNE{MWy3 zY$f@j0v!shz?*~S%AZCON``62-t2M02myA9PvMDNHRg}O;?d@8)W$5C!bGJRN8CAW z;$fi6PHoyY+jb_(>@&S=qT|}`ZT5pJ)^@Q>LC7!bYu~PIZJz6JE-1!!3#vYD=oVzb zW-FeRCCyd}fsStN_R5bYiu!rr(Emx!X3Hh{Vk*U*>G?r#GxDa!g|JqBVo^zZpTa0my{+df|m^+^X` zisz1Szn)|TFW^9a@4m=z4c~C`-k&j6?=K$l=$>svvP0NT8WoRmA?;{*E^wwOaT$+f z<2H`o$svS_>>LmBq?qtz$(Y2cZ&pqm_ZD&{zn|;|Hi;RW4xg*(wvOg*@~=LUDL79~Z|Nf;^O%Zq;*M?1d+`vT@-jE`DE~h~dh)elZ*j%(IEUTBsdCc3 z3O8RM0sqcDZ|pu7^Gp-*(*MpD6esIKH}t74JXDFV)^7Ae5xqyxbfs<(|jytdbTp+A_Aw>My@mw19dI#%!XUA|* zEyQro(J&E8C-{Po_)%%-#qtqzXGgB#1Dmu(Bky>L|M-G7cYC_&U8uMiBY=(1h&iNo zkZ*Y%0xj+U3AhCW!~Y-vjW4&g&~KOD`F#;7RB5J?ISiDSxIoGaMdx{>zjV;L1sN-3 z>LdE1H=lSP5rI$oY8R<J zLkt2)1TL_jOl%8q2aw2;nz!$JTLfPv`0&(xY~gW2J@@-k=h_b;w^p9|-#$!;M|>^)EMm=il^)<^7@%{af<$=jZj% zUvj((bQW6s>;Dg7EGqErU#iUie)Em=d}JxoH~+dN{@zt5Aoh>)XMe4Ae~vdC0O0_1`i@ksBj^}h7KP>j3|+zK!Ou5V$7&@XYC(oWfe*z8K^5s9FMlljSs&pySrcR$ijVcxBJ2IW7 zDou)YE7z`Gzk&@bHe=PR@19v4t9C8hwr=0TjeAt9!A@k-%FU~HFWXV+NY^8JWVFKZ6b}dNgT;H6xb>N;oy^)~;WJq-$_B zY?)V4SYE9;+9XNHLiR)^X9O7JMRcRI`!(-p-#U(F}wEe-oH2H z>=``y^5)O)JlmP|JIUzZ!;i01cm<;3TazJEp`0KK<>UhxpnxJ(Wz~8Gy(Ca4|Md4?1%f!)5^X_=}#lxS-;-C53^Hwv5Si6%{1 zAF|GZ8?KPE!bvV`g8j2?x_qwcTjR1B|ERiC)-PJNZo=KueuQa z3+uZhm6b2bH;W3bMritqAh0m8`VXGP!WvSXAmW^~q0lYb5zC>)J1oQ?kNoVfHvcP~ zwXHn6sB+Isno0G#+Gbj@)@Ot3U`F;0H0{1c36f;cu43wu&S>Mk;@uB%Tw231N6hDb zMdw|(#(O)|wA;UaJR!CdYD=-Dn!OOk}9$G-!n2!I;7o$6kMzz14MftO1b(l9e5141x^0+L{~Fc=~ZYX2~VwgRCD zOK8FqqA-QmF`f!r=t7}zEpsoNAq`O?y9%hyH>9c|4|_-w$kk$Yj`AT9i#QQU>Bvq- zTp|-Y*q^u|D0EGnA{CePL>~FWA4#kt7mY}iC*r9V`p^f_x@blo)+k3j(uX(rF}T^C zF^(v#(duv!h*$(Mcyioh319TFSpiRf#i~~qfW03>BzoFYh7Q|qjOmJ z$VZTlrLgA&Z>G_sh=+zAQ0L(64ClXRboCN)v^kMLD9oBx%Qp*Fke&2NHp zEzJxkIe&D_a-y?B=S(L%+v(1C!ZV&nNv1sOIUsu4GoR)er#}1HF@E|ppvzgOKnrTn zgCaDc3SB5e8|u)9LNuZhov1RoNzsd9CYKo9Xk|(_M2>>=kVV;HNK48XqCC^2D%Hy> zSL#y3z{H|2ooQQMqEVXS^erFFDNnxw(4GR-DL)M=QRm{yq9TCJontIzD{IfCTK23@%`8}_ z%Gs!H^|Lt*t!PVYT6lW)v{dz$>p2pmYTpuvL(6DnNDu%W|;5F<*QNU@^De;EH++{m$`$B!UGiX2I@q{)*gQ>t9a zvZc$HFkxn#nK7o#n>cgo+{v@2&!0ep3LQF;CdPp^i7H*nw5ijlP@_tnN_An$(S>1-prY=(a(xCb{21&>}vO}n=3 z+qfyN=EH|T>suAXxo-V8bnfHGlPgEfTeU#h|6U6(K6|)5l~fhAif& z*v*m0MAmUw2KKh$5C?hAC!G z7Q*(|lLATk<(O=?=_X2A0>|DzAA$%Zj&J6v=bi-$hvr|=`Pn3cd=_fxp$x?dsD4`- ziRhz{B3fp5PwGcgq?l%^XPl~WH{y(%hAOI?Wi}_yQ3MEQ=BTW;sw9Q068{ultghB- ztBUZ^n&6+e_Uh|{{pp%!Z@(66Y=8y=NsxNSHtTG8@0m!@lgd78?X|cuJMC`5V(aa< zqfxtCZ!tOt?z!l4c8hQBRh#a+@Ww0ey!6&<@4fiutM9)2_UrGz00%7azyud;@WBWt ztnk7NH|+4k5JxQW#1vO-@x>TttntPickJ=UAcrjS$Rw9+^2sQttn$h%x9sxEFvl$O z%rw_*^UXNttnr`uo_GfimUJMl z>mA@$IV$+#Vu9x#t%xh+H{+Cl)gN!L0jIb`kyq}yQ^C4sXXl@n&Q`FOyKDODc72#? zYu&OgyIZc$I{WRd`=-0-9KG`n?!YhgUU(A5E*?V9bV9~6!8b2de#GgWIF$swLk1b0 zFz@{KJw0jQmeV8j&NIJrZ$3|~wR`^h?6>d!`~8v*|NJ=lej@$&3uS#L_vi0_Pw&Xy z{{~3F0vhmu2uxtFw5GrYHY9oFGYA76NI`@sFM<-JUIjPk5c_fPgCGpyQsgJX5-!Mu zC{zdp36;VY4&)ijbD<0m;=$NxaE2|MAPy_2!yOLr8QSxq3;*{gL>Z0`g9s{O4oRp) z0(OFbJ#1q8ZYYrJNwI;GD@YBm2(TzR=0;59qP^^98ZypFj0ekF7zL9>KvD0AX!O_H z4nxHxVughlj3d9+HKjV@MQt{$qiE;?#ExuEkE~i3?2NNU#CQ!wiz!Dr1R;o8Jjaep zGb18fRVjh2t8Xlg?humbSd;^@<&?uBL@!~}?w7GIv zGL@zrNG(uFm&if#DzB8~m|#OJ;lz?9b4inl17O*7M)+q?Z950afl-wxA3IC~f!6rKCoJ>~ssm^~$a+noJ zB_-<#oq??Kns&2*KqgsCKRP6Nop?t;`DrMB)~z+VEQ&d8NJ_F`q@D`3W-$MON{F6t zo5Qq>MkFebN>XW!$-Cb5g2>TPdGsGdvZzO(lN4WCAfDq1X+knGOo)D_l7h&ZK(={G zd;W!{0%7G%{RkMHW^ao<4HfiWqaT!j(V(bQWi`7g)S_slrC?O6OUpu4cwQ4JlOza5 zU3tU(0j7|E%4J)_YMx4^if|_Z<0Mr%5Q#E0pF=$rn~?q6ZE1ScwAvFRTE!+~YdXwo zp3;&I)oNw(C=nQXwY8!mm-G^gky_AHm4~w|JCT}OiVPE#30>%K!F#yLYPOQAgllqx zIn}6E2Cz##O8^B3-BHy^B??7lJm(4A&6q6*2V2&E-MMyU6yil zkQHw1T`&3H3fPse1hHawH~g;@AGNbb70rGRN8%E*^28K5svqiR%A;C2l~&%xUK7el z8|&4*$t{RylPF|{z?aDRLaB9EN#cbfxBtrWJc@|}8)ZR6fB+Eytu<}>4=e+Mr4X%g zXy93p(LN`~HNERb4RYF+tPstHg6B3JyUL7IGNGwd%%y9x=I#vEAgr_i282UVA4j#% z0*Qb)_L^7CBKpTj@iAWe`00U^wk6Dk^q#DXU1G8{xP+#pJS9CROR~454{>Y+Kw4w$ zrVdDN1$ z03ZMn0SiO`0~P>Io9|lVnp`@$H~)QbMoL{BZwtIu1PApgE${?J#KIf;U^ojQYF!^o zJT&W;Upza0XAFprH?>%>1zPi9R-Za4Kksa|_iFFjMkJgsKa_?E;_fFr;i!Q?M~5Gr z!Mx7-(D+PtEf~D)Qvo{T^@L3-fg5P2mN>N8WUqsvn%1Xtj-Xk8(-TYLZiV)ivE&ua zf2(U{y^Fh#5K-kW{2&Mn&{)4%KI*7BeofC({XZxFtY|PF&(8Pz?^_SNj)WZZ#`-+8 z9uB5{_npvIGG66Oy)nBH0&xO(91xN;F+ArcN>_CF;h zGY(A5a<`Hpra??qat?+~k(YFQPKa#K`0LDhgKDM5P7vtw-^Y+Kn6hYU8v_5ADD>)QbW)1X=DT#cZg51 z_gTMWWNF1s^;Z#<1RD+3h^kZ$-tY}T6#;>URS5xAk|YTI5DT%O4`iTFzjzjKD2xwc zfEi>YE0rOi2q?T(f^&ybt#oNUCJ}-t4sc~}*aZkQ;s+RTYHO$v&zEuJ7zjh?jSz7o ztQU;zC?HQnKFCIA;n-UvHHO^fX`R(!+Jsa`WqZQ55gsRl{l-#*Hfp=T2Uzxg3K5FV z7hZAk8D%z+Z2uCG-b9PQgk*NN5c61UnTA>Q#E`PKUHd2zg3yvxH4|$#Of4n|c;{xDj(H4#0F! zl;m+R_5cb-l`F9e7yt+$QizLjW`ubq_EV7>MLj<@P@M%&4aE@2C|VGgO`92)7Qu?G z(Qy)Wcz|F{4*&?az=#s55P*OI#Y1_Eag(VDCOXBHR5E--m3trQj!` z0LI5keg8QV$yuCNs1j5500IGRE_IkZ^_*6cJUJzuUq*m57+T8JIkm}HzZPL6F_#^u zSOj2n=a~`@a0`?)V@(F1Yyv~tBUPsMc^VOpp;c;VDQ4$qO8bWrycwWigaLSVp(1gh z1V$qZ%AjN7J1|6Rrq-0QmR63KXxLemA6A=9KQF6Dp3mraE9No z2O5|%(Zo2)Q%Hlyb$y4OCpTJ5)R!w^q?slT-|!8$@Ctjd2e$M8QMVFi`T&8j2ev%NEgFO`xk;f&Y48nkOcAO#sZ6L}A7|>!50Ar;H6QY?)eBcL+>M`h~S(pem zaAZICS*3JAhTk_7HBpLICTw{660*361VE@{s;b}Qk*q@*b*gAoqH+WX6ti`BD%BIH zvAah7JG_FNcK2UTN6=tWDwNR!A zu4YApm!z&c6H7bUs$X+GQQCrbmtJa>Wcb<;i{W9)C8AO@IlkmZ8CE$k#6L&$PO#^# z;U#Mf2Uurk$&{Ax9)m1~LSa}y;O!^dS-7ZV)2eZKU5A^#h$ zCp#}AyMOU?kQK*?Y;#0L$UH>_TGi-E!={%vi;y5zXo`VoOS?-3o3lDQog1oyo#sxn z={ZDXB$cF4cDHgqmXHN|5F6S}O$N18TM|_hwapSqXKRJR6m>?jwp){0*{WBjhZjgM=kLLnQEmPSTppLu8yf~#4V#5xz0T{>2Sd{>UK;8^sgupa?aw3f9z zhJ-(1M%;!l>E*bWTXsJOw1E?`DymVUHFlrdlV=EkFcrEcK~Q4Yq{);TxDxCfHl9kX?S6KQl&cQ&ba*DLFeyp*#%2YmyjINJW&W`=8%MiN&F|B7C?GTv6ZjMiv>V{6jFA zR&gxslwj7ls3<#RB!YCYnE*H?iv?c8Td*5CybA)g@5@?%gj!D&orn2E=<6>q^}!SR z#g;ovjjI^uR;(rrLH{wa(}`u27=IG>!OBO(32}C}6~sN}V2hWtM*m?y&b7uBkXnW0 zT-QUs2t!jj#=^(LX115Af-@7_7DKa(m=rdp{kIsQM`(aM5rhoDN723x_OWhN6V5fM zlbk)*^E;9}$&XAvjyy0;*i=;*NZRL^Y$;~=N2DpqpPejQM(~($OOv4Oxti;K9BcE-$BW}Zl8Pb7EMy)u z&}2X*J=f3@$5KX%g1;09iPsXJ_{td8c_}47E*BDQnlFfkp%p;WHjUN63O6}L%hmS} z@SM>G5vj*JxBVl9&u3u*fph0Jxx}^9iCwxqDTY45U;BJ4n=KIh;E-Fed?J+_a1M@- z@rtXs39}$=6Jxw(?D0$H9TVw;5g1SoLB)Y6?j{w6g6(^CI5OlMbH`Q0ZR|Z?%H7iB zEp;_pO^bD8fl%9AXUr;bb|*9wT-*}~whNdRgA6pW@FK_lxl2l3GRVx$=4`^h6P;3S zi(_4uh>hicdww_#6LW@&TiE9}fe{k23k0x;sQ)x-om0CT4k>)CTaCUlUd=p_YLOYy z;_R}xs$5K z9L&*x?ti(8cQCmV`E5z>KnXwq1e8#`-f>H2YI%0nH-n|eu`cUl;n(E+Ud%)19%0v5 zHYcBcWz&A809X#k?gdZH6Xl=?*kdNq0ZbreQ5vyrn%+fu%-)gyF+p^`@5MjYE)x3q zWIHTwR|e8;9+L`X4CdSu)&&G)zy*A;O=)TY`p^$SWfn6L&JkPin$p}gEY1OJGa4~? z75Nvqjy(t+xS~~EA4!S;57$4jt3R=3jQcs_%qj7~l_+65EN`at-kn&!zkZ+Y#-Z8?ZxT_(e^_=Ecc@m1ofu)?M&aHP6otxmRt^LZ3%C#?H>5~>L>V@0_rIc! zV&59#c zQGbW6alWy1zZ$H?IhqSq@yvz&08v5UK!ODg9z>W>;X;ND9X^Cu;orZA6(LTfSmD_- zi4z6Bd-l%aNRlN@o&XXSISLj{}=}A!8Jyz>V)ZaU}SR4BDV=E!I*h&?;9y7%c1( z6xZM_3lRv^B|xAm#f@^ix<#B=@#2Gr87CYY)8^QlWJEs3oLTc`&Y3$|-btosr+1e{ z3luH0G-9hnjS}5@HK|0Ug8yV&sBnu<1q^=pR4r#AY48eJnOg87(t`1Zt2;BcT}v!n zib!oI^m7yghF!HDh;zL3`FQf>$$JEOxoctb?cKkJ-}dvn(4;Sa6ls>N+j34{7qo4n zrkpCLKZ%gSfRCmscmM-Hwj*CRxOB zrK6sDQZI%c&_^EvV$tUu0vuE88IPg@s!BE2Oer#%#G8*zIp?IaP5?DbsK>mRtffDX zBEaCp2pZ&Lq9sp63jZpMf)fsi{#3LRCAjjs(jjrU^fH!yZh62lvz9_LQc+RT&%DS& z>PVnCM`g9u8+D>`Jft8k2(GOL`lqP@x3T~b0xH;}K@5(H08qFFG%h$}mMc!fG+m8I zI%Xf@%a2wt@Mcp#Vns7nXulmuvd+3%&0BEKMVGTu#UsgFk`|=0siYQ_*C>hZvP+=8 zAoPQPW0~y8HbIPWw8wNI5|P>n_;}@kJ#?$8pwmF|3DkoxuE4p?(0g;NnlJ`A>4Shmb0hNDT;+Nc2rrtQQRIs@EQf$WAMn** zgHAimuxE{?djF)NU!thgv7)ZJYp)LiNv)Fa8_B>4T`Ov3lWa{@D6OpP6R4k!#GsZ~ zzS)C-x^pw}*xIt%%9w#|gL&$&vA(*Th4}zvmj)BY^&gygGV^fD8;S~Sk*GBJa?e3l z3nzCOk`5|n=atvxiu^3@%%R|(t)r`oN=mo~Z~?Owool%RoIN7g)2RTP?e7Unul~H- zXUiE<*d-ei$)=k$qGuJU~t&y5CZ{8DgO~le;!Dbu^@;5!tqOB1c8oLm^=znfP{iVzBqN&A%t$g)Oi0Vr_b87@ z2Tz47+m#ZPrn$U^D5Wz@Q7}Lx+yH7!|KJ0Wib6kwsn7z(AX);yMxiEAN011SOip^@ z6X|W_kXdvI7#EYCgp_G1tAb=E%|pqCgzhF?oZ1&3S&;s4<2B`P*R5!e%q0<*{!@mw+!*pwoY$f&(Zl$t!4*zQA_3|5A0 z(Er3HW=c~gcX3ma6)7D>aMKlA0i|~5BOgTIg&}##@*r@Oh&cX2C{7I%g3vF z3fD4qUgsaTsM%AjGcSXf(lB`e0vTrI5k7`AWPU=21;XK{iyq`Niwu`dB}$U;33W4S zv8lL*x>UE6<}{P}rdwV)kfh-0l^#LjQ*t%Vo)QFJTJ0*Iw6c|%Ndz+6^ny{|K@`A! zG%sOw)q?b40eu*9B&jI~CYf3iOO^*C2o2?%_BvRVC>5X&ab!Z@Ia6_}bgNe?W&eLr zCLP(VP$2>0sY4beoYx%2DS{M9YWk@!*uYUKC9K+j!V!pYP=_R)`wVH^Mp(#%=Zc&e z3!2tNTi%lDED&{wetx1@gGiRE!^2HhkkY#9O!kL(iim+StB^(QQ8sa@p;--*lboF8 z9nzxZKyky;s*!L(u~Ug&qsr77vBy;ZNbFw)+gtjcq@bR($wT3a5T3qOmIi_&R#7I| zsYb*$(?y(X4(!z*4KSbdBg=3@WDp|_6u20BbbMJUojH(oO*;QQSRUxaYHp)tbgdG8ILGz+a*(o^62!UgCJPhB4 zFp4)17?dA$vtK*eO&{UVYD)kr8e?vb&qn*0l_#01&3W0;t?ChRW;kOq|44L5f|E!0I#K}= zqfylMR$vaea1@aau6eW^;kgr>=+$nt6=LTIed8NR+S$cWh$UrLBy@(Rh-E7BF-U6c zuomf7MLeRDn24r(zJ~XjD7CWStdY_c!tAk`q~nGhMTsogiA{jo5dS0nY7Y>KSh}Ku z6mP^rIJe%eyA4fb-mXh|B}sRc;melQoH*VePY8=Mqr$_~JFCQg8CH(X2MO_eV>v-h zL<$a4<|Q0mpcIa##4@sXfWABa}t?XMu9&^_}z< zf_Z?;A44&psK&rXi9iKn;f-*Fo#{7{+#)md!?Vv>RiZ%^(lrT6x!1O!?J)1_Xmnbc6+A;~UEATos7Z_(K&&miGewIt=JDAI`$l_Z*I z4UzQF{q8U+G2d0srT{oBtzt(2c~*cAc2OH+U;=ZGKmQ(w7Vw5X{s9Qey9y?# z5lQ9=+V-7LFDg8FH#Vi%dD4vjAwT6}SEz*djKO{N-3)!N;BczeQL$=RK8x5?LK(T;8@B?uO!F+)1$;xVp)yd}LCfepl87x51eM4b!b|KyKa|DiSSQ015EIHL8H#{i02V|f zpp{5FcjL7x`-wae4nKGRmIFhLNW~Y42}Wwh2y-`|^Tcz5KI+jd{5zLK6QxtkGL?{~ ze}KCNl&|QU#dEw7B`l}Mf*EP4g{%9GzxX9Sv!eVO#%1b>b`vB<;VftSBsMX}hQL1W zYDG^Bmqn_aw*$A?;-ENWq%(}gb_qs`;s`UzkN@35$BeWQbySqC0;%Mph;QhJe?Yu1 z#4)jW7NxS7XdDQ%U`dtwx9$1}HL<*xG^jIls8;NvgFvd8Ac#1jjPTLMZy~@l^n+9Yd28~oxb|UrzFf5u|TP$sgAHpe((o> zfXB>(2-JBJMLftI+z2fM8>BjkxF`ojaFn4OL7HGBQW2XrbT@>EEsq1r=Cdf!j1-K( ziN>=@hH#AL6Pu9OCW^Vl!sJa32|Sc@E&qcMhmU%Q%*!zrGzbt9z=W`g?rF~b`mBBk z2dT@yo#2QyWQj1WiBwe1?~d<(vpk+@J_u(mrJg z5+b1~Nw!~HrcQVQDg{1&z*3-4PDRR?9IP0!$vfvGi37?NGkp&UQq#n+K7$~cA*03( ztrF~Wi89Hu)kM>2bjeNi(^gdsxNwaZazBfR2``w4D9j2rYf!Sd3Fkx$F;tZZwA1~v zRKJPSc9AgiAh(5Rx2Iax%HdNSDXLPHK)ZymY7NS2b=7U9#o1UhDP1NYxE1MmhC|H= zzcVp$!Kk9(G&x$I3hugz({T-Sj~`@UrLEt zv5i`#OwGv{r1BJ?V%37M7yo|Hhd_9%di|8ts}h_9ss@BuI+@TZA`A&so=z>;lx2*o z(7KU$2f=xicc27CD5!i)6UnfNiS;U^=!br&I={NnfW(p5f;sKky>5GvksX)%;3VZ4 zOqGRNHW3YXG#^p|*SCArIcqeNcsSR%SL*t!f$%yyaSphIsxO+8Xaz=(xWk|lA5o3k zxZMn$I9Q;N*(g=i#uzfCZ81>uw9x#kjaWmSjStflM)65j9BHT*<-CUwLJeA^Q0!8< zrCd8Hhiwvww0eRkz#Uu&ET7~i{)i$3ywSDb*fq%_Y-Oi99XbWG22uu^I09+_ghwD#IMOqiJ&Iy7$!i^7cpu)fakCN4| z3Cb@*b4H(I3_{Z_YFrA6($S}xoYO7e&7dp-`&!;4%Vv>|Sisoe7^UNFURGRB+-wzi z#ET(84;je_MKTkV9go-TS58%prCho$io-dSK=WK+@~GPNi3@+5VTNQKL5KxK6)0wP zqUq%@_Q26^xjNwS13(#!?t`icdZ-rptKZ^VlyKCJXr$H*9U?qF!F5UI&0)-tEP@D- zW+UG(-eGU>VgKuKq#Gea1!fhN!+?`|fL)-ytO??b$i4@r)z(~1wLJ)~8I1rv*Ppss zH~wPIC^K9sC9bHAjGf_W6)3w=R6-58;fhEtLyf0}mK)-OT`-Y<@PlXrA^pqa_af3F zjHe|N;s^}S(bz#k?$zwMZj8OtU=!d+?BXxep zMG`pNWx$m_YLy^oT&W*isT2pJjkMY5F8i=6=?STBD4vMGikXR!ZI7Zon9jMO-q{0v zK!@}TtQDq-C&moPHA4xc3VmpWSD=c7iYn3Ux|skapD2x{>}$=qw8$afHp1l(nVYMn zT>rRuZM5zQ5#zz>vEIeN=gTu1lKjG3cm;3>yMWdni6cR3MhtJt7=j$K&l?kPAON4W z>EH@Iu4_)Sv?!&T<=N&B^r|tv5HQ1YN7=Er9F`*Ku?a5Vg5YjRV8yMYou1@AsbNtv z45;Y@YN!U>HIfFn5ys1?XihP_g+vgEh$*y+WRazeEwL0p5Rz^5<_z;%3P#Co2=b4t zFeIipYzUSLK7eplVT*#MO^Rql69$)@#UsDyjt2Q|?MY44i@yJ`jhE&<*I`@Bgs* zRe}_mcH@X;CXxv74TdQ2>&C`XHR^8UM1&v!E|`Qd5%Zq<81B7ES6)N!dSx%4@`W%E zfhZvZ@u(09H8Hvs&q8#N+ol&iJ~DuCnV^c)9x<_1%Bnfnfykp$7l-CZ&FFERq1@?A zkyGg;OUzD5r6>C6%-WZZCFeJ|K6&2sw*l;%20F*A6WTlHp!&50mmp z0a_3UqKvl+Jg&kS{E>e5Rf%1kT zi|&to#L(@iplVnmFaGkVjRKJ+)zq{&;DQKMfsm&?n~1A3_?rfZ3IYcbENJi`LV@-G zY*UeiUcQ79Cr$|QV~Cqkf|RAR*X&gR!nWa#RZEK_yT*C#{2j_mt4@ZiFS zKP^`ltU!>0I14;})lYPcbX z*=;7>K*|v`5_kq}7S>5 z16F8dmzYw--INQ)H_&&jb=lcKPATQon0&hWDy-NY2WLnET?f*26QzX~QMs}hE3m-H z##Bl(1}hUrp0>3RJ@*U*iwKz5beWq@_mlJ^mzvJW^VKRTdKFRA?d; z3-FFfB#CQ5xJeZCv{4}UNJSPPQ4JG@t3~EHXqHf$5TZ!vC#uN{YKlp+`iZu=vIA$& z)UrrkHHnnf3@dE+R3_(sj4dn!LJkRgB%=M(QKz6#P)oQU+! zStqs8IvkGoZ_7ko& z6GQ8FQb={!2LFz=I$~JIh7G{!Ye|4y*D8{ctH8!qHoUt@B0Ha&Pef^zXpIsbEtT3_ z{FKaVn6l%FZM$-i&JYSSf^7K+g=FcA-)M9c%%#S=;vd!WbHCl_XJTkF$gDJA`Lw)h117kM;y0ltm=W2`EK+s7%^ZzxrQJ2+(y7TB5Kvl87tw zpZWW38pa(E;-~-yA`n2RXg$j>7LMzuQi}#gr^&yJ|6;AL|NR4pl7C7Vz9_}VG*K5J zP~nK*Tb#r$5J37_zym};uAEOz;6&5`6{%EUb9jR{FcP`-1e)OylJTDiew}7?$Y*2{ zMPyTL3?N=?*8K<^&AmlA@IeH`0zbThp$!UZ86BJOUB=)7Kg2?GOiBR~o$F!X2qs}f z_5Y3Y1jokx(}alJb+MIMv|tk&j*OKK{#8Xe5Wx8K10NXLLA2TUxro^z$ZHtG6NH2K za7ISm-*73R9CFB?5sbgwju`%i&lwk3?BTv>RL?wL3$EYXa7stG4ia`A2K0j$+Cl|{ zVO=EG1Zr1ZXax?5iv&5t26{#22%bUAMk3CkDBfQgz6#p8l&?5p;<+DT{F$5>5OtJK zp%vlG$e4Us8(c7A1=zyyh0gS?$v$bGPe4@)aRd-t0>`9R+C7CZj-oUEhEvtcDY{=s z2%@8%#7q>K4*}D4nMg@^U)X>n&DbJBF&13(1?N>j@kvB8l1IX27pL4>bO3=fWdD&H zvYtNS+tNKFKT6az@{#>jA$%x>d1zxzcv*BnhcO_@a_EI~1dqPVjL1la>0yO95WqSD z#sf%1fUL;}zJ<*R!Y~L#AoN2&(4zW)PSEuuOI~B%MaDlq65lva;K^Da>7dJi1#4kd zLW-JD2qc6_(bu#_=w(G1(jXxg#MjhhhJ<1=v4pZ%z$pcS4NiwtT-Zvsq*lVk`?bgb zM?kp0hp=Knbjx+TWYU1gXUxV5mV-(#8CV^JHJ-#3(u+mlkx#|NEvTO(;RHcsR$Oj~ z55m~$T#m+6zy^i`T&7mKIOSHhq|R~3?m&^hn8ZW&A7R`PiVa|jbk#w8mJe~3y)gew zi#cWeSuWNpWr6V&>-kP>Q#lVz*4nY0BDA zbwq+;#82V{FEtxN6yYq+%lynxJ01j2iUmqe1XC1HSkNI>?A;mm%f=8yQksc}S(H6N+z371o$cP9T$nx%h1GOKW;tJV4h2Rqg7Xkz zz2w?2LIpZ_13y>*E(B*nf!zFbqQ)GoVwQ|jl7R$a&30uPN8YKI>Zz-m;%Z_SoIH?+ zT4MqthiUZ$5HMMh4Jh)U!!inBnF5Dm#Yd@egxa)*g@05KJA*|jX-{7iG@V9TA8idq@LN>ymn?u_+dgA z&&&G8H^JOX^b%|UQ&v6&i$)C@6hk=RLeDN^zbvO9Fe;CR1j$N8m!=qQRz(PojOy4% z;Ibps9&Ve=S=yZlauy9p;6gZrLqFg`(dyUMri3CCmXuE0p!ENa&NO6ZVaM&U56cvn zxfO*z#!%sUm@zcT6^(6lq=lmvS}6%cIF!WRj?5MUTXytYs#xq*0x3cvF7ajvb>wHZ z3J`1Qg#`#9@|AMY= zQWyiucFA`%tU&ZbEHJD@RA5qCwPedBAZ0<}mnV@Vv#uZ`#VPJVEBdC+jfq=Yanrix%Vw;mWZ54lOohz=*I1 zD(wlq>y1HWt}SaS@siF2XIYt71Ykh=F{PXc4Uh7};2`RN;q0R>9MH{h#tyCxE2V(Q zunylbTPUdyvy&vL>yBolTBWFc8L``5N|QW^X#CCn0RVzQMAF`zuov~#goGjvPpE6aF)Jfx9pAAm?~;mC*B;*vN%&85 zFr@)S@2@?n;HAzF#Y7Y@5+L%8Bg+RmK?$p{gyq~38(SVE#}=S;&PM)=A7DUViY)_W zunS8@adoFu6rTO8@<#%S%(x8r`7(J@>MXx=#svS6;(pfp<%Nh4GY%P0h?Zqo$rYd= zBtaMh5Ij)8KrvyU-H+`?BE|=REz!)mSd0njR(KWxVC0$F0s(AfY#EQnP$eqW1e(>` zH21BP80Rp8s2#(z#@c5>YSW4YPb_Be@@z&)@Y^SbraSdBHFMdq;`5GX*>4{7Hhs!I zyJ$uf$x#y*-pOQ7bY;eb1p!0=0oVd4VQl_*@yXsfw ziJ>|h+MO`&M#p9VGC?f`mqp!IAQc}K;8+sIiEQmLHHxYDE?%42ui%61sL~g z(`#JZ=vV)34X<(yzl@z)4vch)Xm>Y4tR^tyi*o8sS{OI^e9ZDD2wMIaTzidmcgMG} zG(L7?s^A~$iZe6L2edKQdke>;L_l%3!XA_oA0*vS7*Se;Lj~9ev&O|VZ}uD2a5HMk z#cE1_dpGe$B+hi~9#@BIFRJt4&M|k%dJhb8h)g^AU|0C;{`gS25EpM>MKr!KCLJrA z{DUz(KpzAG1`IeZ6nN602uTPb8MyxtS>iW=0SM7uTSQxUE-BnNw=;%|_K6y#zD#B; zI*|nc4lj*xl&;JBAxFtwb{u*Nc?*tay0|pQR+HKP*AfSUk&~(lKvqM)g zflY?}6w-^ui4%2YLB@@~y+zDqQdd_?#VP?H_(3c1LtyOrb;QE$_IY)<#(+R9M}A8s zei&zawuD~=k~_NowfRIp<}EK#x5Spa1jcK~vC_sZ2a#>`492%f(+tN)y(`!T%(BKYQ%Dpih20)1@c8W2S5j@fH!wT)_pp$-SDy(0ZZ; zv;xkOzVO;&-36YIc#8k1VVp+U^=Iqc^j7Hn5IG*X&pq}bOcCAvTBv>r@6AXoFR5=+ zMaBi^RC=z&1{rji)^;`7tS<3{@IT0{KKMh^d!X=Gte$jh%QyH?VV~>NaDoYia49QQ zz!5arwCRg3<8$gS6HWKu$=+C#HnG#u`^H)aYf-Q|OLqs{bG+i$`1T*oNr)O%@&ocD z^CaeD{{QIy-#kE6sPK%8zzUt-{WG}GU?+qX6fS%>abhQf1Pclz^Ne9fj~y+36giS) zNs}j0lAM^5WlNVYVaAj>lV;7CoifU-Rmq5ib# z(dg1z<$6MWN9OXOo5lf5e$krXZ&fe;!5sQ>jp> z0!=@SDm87nx=XPljjDJw#HtLRh4#$y-$dHOk&pCh0V2A%bL)ECD)1r7oWdFE)!XnQ zV!(wDQzu=q!r1JMJC5CmJ!R$O@8QRnKcD``t=m6xocz-0&WQbWE9$h>1gfi?*(|F{ z5J7TV?X`ve367|yDrgY3`bwHmK8MN*&ZXrbswEJ8=2Fd|>Go==I*&qoMk>QTx(>XY z5~f@}I}1TGKp~6cWTITt@8Y9|qaV>!LBU>*%Ky5yK0{hR`x> zEQ`ittg#<|M5#wV6%C1uAk(bW(n~QV>%Ir=SUtyL>U=cle#}4q;4#<7z@qrMl=6$SF<@w3Sq_RFw(LR+kD&P6SyZh!WGVI`gvL!Yvn8 zmsbDE%2SfsddeR`0ueU1Ys=+Mq|hi0EGJ|89OzKA96K*ZZ7ZN?G2sf@7QdDv3s~iq zS*|bMWDh3EC<+x?7gf77RPAM#Shb*DFj32LV3f-7f(%M*tI%J|fNYs8f*?*Q;X5mE zh7&;Hl(-}pM+&VkyeQTPu!F**E;vOQO}kMd!-|?Ayu5HCWvA(`+wQV<&Z@Lpe?5&$ zG<6A1d*^Uqby|l&AVHQu*jO0GYTDN);?Lt?qJd@u+g=4anOS} zK9t&OGj7jw<;r`201TdZo$}jp&poWkPsEt$b$7E$Z>3!mT=CniI$TVuiC%EQ0}=mx zK5@K#*BxPi+f=IFu&U44?d~U=%;FOV8oO-2$|hD~?P%odER=-d!3hGCz<@dH@z4Km z*595xCUmoMKPOoUO@m`xtzHGI$Wi4>&=ZqDu<{Q9T82~n(_gY=l@=;JWis4KiQ7oh ziJx>YWcW#6LEeNy-tA5_yUP(t#K8vw#6=QhsNV%~m_uig5K~R6iL34u!T$`%cpsUU z^B|`ia~Y%`{BVvDork9f7H~nPq9Cj+sKdO8WGgE)7-}pc7Ob&oS|+hw89j0`7X=0( z1o2T8Lx>*8c%*4mkzoZ0@PJrw?hzOeN5OO$NZesBQ$;M`OPB|tAs+4|Jv0B@&8Fy~ zh44ckaAAmouJS4d9&jg@A<7F6DM**B4Nj8s-%G?7Gp(HwjVx@BdQgZmW7Pr!6?gz2 zFpx(J>;R7#;h`vj8BEQ9k}awZqRdMU{F9>q$ZqA<78n z9}l=iI4od`Ry@FG`mg{2I{3b!4z^N74W`c^;;w|AX^I^YN`(}}Ab;SZtLWfI1^nSF z!LiDsN`w_+JqfjuIAok_Ii-<&@|U0{3ooZFU)DZ@&ag;DrhJ)jWDIJ84(ha*U6PK?(xGdD5?LCK;YsQf4$#VRhnD{ zV+g%zDvoQfXjq^?uOvOgW;QE;4sYngPEhejI07-OXDSk_!)Y#)VW0SeME~GNIBwIhCmt|N_&UjB*TOO7;8s`1^(tCCsH=ZCMn7URDO=9yIfc}g zP|YO`wuZ)LI+DQ)JBR={DiDiUyy5{10tAGV%;zRgdMXWGl4B~yMMVo;?aQF*UnPEvrvut1>0AO7I%BNam3Mph-% z5&q!i^!MvgYJ^R;3Bmn+M}hrd0YHc&N-=#cv=J^;Syz~GR#cEfTjIR$ zdR8hGHBH09!H<4m(t%CcW=qa3L{CziZ2LyIR5Id4Tl*=c#ckF}*9jRnLu%%`qXa)) zo+qvlGvPf~3(4WLa4RbnHngght9WL`1a=yEchg!pDT|TQ|xoO!?^#Ddu|>ANrfAzD%~a1&AO+-gv|v7 zqC!(g(9G7&I54SuGolre(4U>HkiI?7qorfa15BG`~4)X)ZD zt|LH5y?Stj(y8V=ulLT1`SS1;=OahhO#*$RvyuWgHp(DqCHxfUULcQfKyZ0P@crZs zIo<&X0)gNXO)43L{*EC=7)w)?pu%BkuaC zl->vy)$vv$u$vOG5od#6lu79zXBnNz@0=;J7Gx1W&~W|zr4q$(6-~nV z*drpggCRi&6z@+4jcK)t~ECfPS7kRw2dlY4>rhT+iWrGEagrb=9(xiTc(Y~=**nbqg?Lqwytp~BC@zRA|jX4 zJH}2q^zUpm0tvFf3nWoULJuLC#JEWEE9v9(YOzYNVkMudLc$Fq5Xf*K?o$jgs}!%y zydtrNaS^?RCn>8f2WwLlYc>C94TSpb3nk*D95O{9a>dY4^8_RMf)Wfh1V!F138G*L z$bcM+Nfi-Lk>1E0!Ll=Fq8+&}5t|~DYA^P*Csc|;En~8ge1qe_>ox3U`y8>51YyHg zv+K%3gdQ)oe#C8FBjp4WBA&90((ovmGO^a98qw$*sZCX?MJ&2t2OkeJe<`&jCG9{Mh4*T~8}R8ihf2bdMh z^iMbeFchybF_Tg-psx;vO@*irDVQ%av%-`LR68NGdHPU6YEghkP7qJa{)QzC zF+2ec{w#&}+;gxr?I8c}a5uN(N7gHhl0`Z9!q*~l&IUpu$gncxC?<--YZQVLIKfcP z$oWo!D<97*BlJjPViw&IdLpp)Vumdf#wLA&@A$1mL)2+F#Y&xtKL3eF7$rZwZg(c7 zKhcl{mvYX0ZN=a$DH(;;J|wj?!Y{VM8JdAbr;{>jD@=0}N%_=6kMLbwuSs`A(?BH= z1@B&fLZtq&Ws315Opr^-iAOLE41;6`&7&~=Gp?#pK+9uIokfhyu1Z64Bg6t2vLHCI z#ozd?B7h7#`*a<>vq2~{Dd3SPaEjU{kvydbdYq6Xk*O)DG@#_LH^KCDoDJS=H1m`b z4TV!IET??j!$JSuRBC=xg(|=eJb_q?X%30Bn1-)gU-d}E(hp^$W-OE_d;+If;!&xl ziZbw51`?P6ge)d;N|gdVg-I*^vRD=4SR3*whlE=!$0E?wMQ|cGN(SCYsd9dE6X`8T zIdfqXa$BA(T)z@Dw~a{^4?%Xrp8A2|aPcVCWeQ=kqVP4CVgu=t^m(Ep_2N~Oq5>Mh zvo?k!xygBP;?tD%M+RbQ-}Gj9j7>r6#aA(_$GAG-H)kPjWZR#bcdG z#1s!$o1$jO^JGP7C-8&|&jolCqAHZM>z+|!g_ghQEe#LWW(hV~Epa7a>R@p~zuqW8 zGeit84`Kf(^0kVVp+b^rwR2p*1ZrBW`=5ZR7UAV5*C3)KGvnMTrAWy(683G)A~$44q38-;O~A z*ZSB^R#}!-&ov@s?PEW7A-FR)nkr7JghCQkT@EBzh3VUX^$GNn;apE4kjY+mf|-zO zH$*opNEb@rOmBS6O`^|PHw8!*@MPdlQJJkVs#8w~BP{$jIXN!@*Va{gm&q_Td7(m3 z4R4&#j~~qKYV%IpqzAFGqyq~?PSCP)Lu4oR=z2$Ddza}1lZsS$qN~35AOVxvgl&y( z)=mG`tBaadga|@}oE6AU%TD1}OO2A(7L(oriz_m-f?&6QM=w@UFMGpnn}|1F(C>JA zq5??7)VS{?lMrY?%U25TaVS@q&;^+U;hsQFY$57EmX|3)4GK-v5_wk+#Z+4=5{1(j zN>+Ga-9%Ydc+f&`Hg}WPNQOfkB5g2nP|%i#<&JPg1vayHfTM)VF00cBkHh>Bvf`3n z5Q~}6Wo#kBf^+C4B`9m*f@_yFfLXR)Ve=M07ntsIjJ3k89;0oOEM>MXh1r;WHKvC5 z^Fi@uXtTpTw#AhnHa|Jyj$xUPkqcu5xQWe0=*n$Lk(eKF;lg5r4}qCN{&d26RgnKc zMJQsSAN->ivv+wn=t0I4dVbYxnT_~*uccDX8d3R;*?4_>){WeEkK=FhR_tKw=rK}A zo<`urNfe9T|Y=c;M zDQtNk87QQ;OLKqBrD0$m~O9oC)iUkc!Q2xgS9tm6)BnT_4lAcjQmL}#8kA;ceGiz zi_91y3c?~h2k!Fs)nr*CR+x23c`@0U|5l^6ZCWKT;_yn>mtju_rK)C^<_kzzaG-XlOD}gZEC1`orS|n;UMk1&OvD0u#Y| zZP_F;&Nht`;$U5z49|LZwbjG|cC|G|gnl-SV3^1I6=r&zCGdNtfjmiA1_7N){Yoo0vnt?(QoU&b=_x>eh|bTb}k@xxT70e0H`wl|BC)qanej;3C8^vTt`# z_}RmE(HZ>+PcJI2oVQ=B9gRYwz$_(aX0V-fq8%j5z+5QC?}8klJP(~SdulvP;fohzu7mOb<{Q(_<8XZDxa$BVq6t~meF~e_h+Y)AGCLCO< zggGqW9Vl4>xp>SyoU)IjURh(ErDvQC9EBG{*G*vP3p5#luc|)_?dIHQP9c2IU#3*peD)x~Nzi&`WOsj<^ zRFY}BOi<)52?g|1% zI0WK%c;u3G#qRH%)u*h!w0u9B)*L<@U8zRj9_}+ExA`VZ;q~ri#&HQpE({uZB#P6~ zbx;{Ma+2^iY!Vj__k!YNCxD<6o*?J`{wVIfZ#MThwwK*Bd2>6|Ge$%H-XoN48ziY| zy{7(b>l{kz_Th)r>*EhSxP_!Q)x}KdBB*_g0pcAQGS3FSD_9|+!i1eFka_oSp~Q(4 zD_XpWF{8$f96Nga2r{I|inRm@DyOiVAPXle{CofTQX$NU6)cpSxzL|KiYi-H=*KUR z1(rhvRZv;8B~F|c%UP%;^Ci@hMX4Hf`A<--m?OJ-6`9qg%32nWrq+EnvLwRp<{uWKl3ff8TG_xusz$pjXSsQ-4#``M47NBRIFx+(>yDvctU;p zB$7SJPaiHpvGm0<`Shq!3xT}N{0_IGN#Mk_S9N>yAJmslee!G3xnfi@k-+d=PORvvt}4YvPTXRh%jQG|#Y^b%xPE%r}jt$noFhrGdL zSY#zq7+FLFWtfnMWOPE=*>KK?k8a1-g55L3n>7+iJ}@xxDW zE45UPH~I)=0WN`@WZ-Z8^aGx9acN~Adn*kE6_zuVv{FqL0Li9I&v*gEnn*@8R#wl6 zX(o~h-Gx$~{mECCp0`y7S#AEc2^L(CahB0w3(2_QLl;V_;XfK8B%($#?l@wLHa?>w zs3k&l9BPr3*5X5g{bS>d3;AVfXo${AYpuBf*jsY`8A;Jff|@s6Ik@P<&zfl7gcNrJ zcIo9pC6Nb{Z$lNete8WU(q3706UXj~!Wkffa%bXcM95juJ-q`447xNo2R*OPZDQXZCj8Ta2btbjKG^OZ}VHCQQ z*!CIsr_p-gzn|nu|)u2L^UDV#?D5(?(v8-!F zAnV;p{vAfR7I`^UN@MGmxMV%?(D*fR4;aUQtaV8%9eMmgto8t^JbZeK#cXqs7iH#M z0L<4+%!WIN;jI&~f>o@@5WIveh;|Y(UPWXis|S^kc58D9LtMfcVK96i^df`3=)zAe8!}!nN&_YwdkT-3SyY4gtBgC3=4Zu20F5MBOD_m5PtR( zJKGrM8M7NE$Z8ZJ+*vP>c>LF?Vx_RjD6BP&EEuFb^T$U)k})kRQrn7zB-uo0lNL

    |LHSp zt}#=&j`EcGG}Hfc>}pT=;KQKWMH7_YG1%_5X)5gX%U|NGjM!SENX48pFo*J=#NOGw zB6UVjvBB3ht=GG9Zgi=8lM~IbNIn5UP7vXUh3Sxjovr9(p&NOYlh*hfqg2bGZZSrj z@)WBl427xnGT=*IHaQ!QbezPJSwZUmMTf9$Y0=;q}X0% z*xq4HFFXG_j`K)UNm(_|o~ddTJuTQsiYa%!^lF_&pn6K+^n^L3b60FFP#uLv1Uw$8 zDA3R|m(xn|ENk@wsjOAG+TqNF>~+n0B61-8goiFoTWMfQc3k!{x4HFv?mcn)LELIc zl8s%gQjf>T^Tx+MAM!5kUMy5gwuX_E!SL?pwlx(uc*hR%mBl`LocXB?9RxkeRRU2G zct(-G9eL=Jf+ZHWFgZy%vw(s5BP~BA?OHrOD*F`ZPr5?WN3b~6mk+%grx1}MSd?ljceXi$u1OGq zK!^V<_iJG%J;{k!Dc6#N?8Jjuz!;HUowaa*j_n}Yf=;=K5yi?DXl8mNase4IR~@E5 zqHt9tdeml!257*VNU&;~%|fJSkwwvlu^^dA;Sz(k5=-ckBkLWhwo7QC4jR|ozRlq* zb`>p}k3abV}vOa5YahE3|^ZzV@39D&d%TebgH zF^JX|bOZ8?Z|3q*gG5lJE_e35h^e*cjxKjo%jnd|i6mxAByG#Kcx6tG8NG>clOFyz zc-1MBcCxrg_aa(t_UMujvE1c^&&anC1fZ+bQa;{GNR#wilWGlIAv&vyRsuD2B|nqwwk5|{;YXlLGWOJ8*7T-P0Mz#}`T79=)e z7h58XHEJZ|UPBaOjZo^9iOPxp@pr};pW_Ao=z|tRRh)@kE3oC5FPs94ZRgg z>{b_#;}KjjWqR=^Zh;+&;00wdVBUulcm#I4XJ;&^He`oe8bljpcWl%&G?f8|a5yv~ zh&?d!c8moVBXk&ic5%J;LCOOuBX~_Cgh4fEh`q5Ua#A$GLT-TbX>{UL9RY+#qG%6A zKeYrq{-HAkcZI=mCC~9mt>k3k#yV8^TVJRbJ_w2@(Ftm`euRe;i<18_8RTmmVurQz zceqGdzZWADff>uz7c-=MkyIHngjlqL8h$Y_%cUABsCUej8GT0>xmP2+H+YBGeMD#$ zG-njzr({d$X|&dUO421-_*M|*6S8I&p>;kKbVWm`NA>eGW1=PjSU0%=P0|tw=>aF1 zhmGH;Fnh&U68L7iHf#pTLTsoo3Kk zje_`(hGSf-e+)L zvKG(T6nW_+dl`g%S)oyO99dLw8G0xM=YE~%Gr_?(^C53*g@4$>TfJqE=h-?25(xWP zj4KgFwP%Rm5uApVoMp&@2U(vaRnO#Tno&=Glg{oY0`H0A2fQZ;LZd(7Q7;!j3NFO9MTcEa=n8p<1zy}yW z01>bia~U*TS`?2no^~2hpO+PL`W`?gMz~;a|6o?^iK=NSLv87n3o>Jj3L%0w7{SR+ zM=GfdVX2>aU=a~P$w-#9Sz;BTR4e$SwZj@!s94BTt^7tsB&ZW0m7)E$Rxe4Rv=*JU zq=nyeq13XbyUGW@Y5^F40qL@XbjlV5WfVsCuWN;GrDdW$B37`}9SQ|C_nN4(34t9( zUSfxPItnTN84>z9pU8)Nu-Te_BHLvNdmM>5dI>9z z5e2ZZ+GL4XAAxC;?KgDoXci0m0Mily7!dyfuEsF}0%56VItAr!14x8D0%1gP6k9cB zLTjj=37VmKPqWvkZN^_5Sfqgzo8TdlndCtxNLYFzAtx4X8!3zVc`ymLJE?JuY};i} zdmR3NZv5J+^`k{g=oKb47pX&?OS`PcaSH@+wVSpJ1V8{4n-{ebTI_K;riHQ5a&f?ATe2mqo{Ql8Vomv z;EJ=j>s!9EXl+C+cG(n8#<{Kf5jkUyx=K7w${G_7@BndSb^b_jNQM~H5w~#`g$}yX;LJ=HnpoXd;9Ksoh%U9zQBBxr#dW06)ixu2Ud7~J5O#2lq zIe+Ghgl)NzHPNj#W<+11oZjs0h4$rdr3!wZdWI z6vXu$iKBB3=&!Weez3ZZ?UxtgKmfN86Xg&HcR&DpfB{YH7G5DB<4DAFM9GJ8b#Dd5 zWLnJ<)i}1%6TyWP|KbG&sLVoZBrv!~9>%x7YpSwjw|7g%Gm9k5$G7Whf}_fZ=#{Ao z7BRO>f`S&b!Mss+sV8=lri7`Xx?xgMt9`hNgUd+(;*b#HfB}493qlbA53sdP7ZP`p zc(FGmuzE@l#vcV}urp`7fsuvv7$}%A%E5dZ6!}tpHJUBkyI~BIzzB@PYt#Wvy#e|w z9GQK+Omg8wG2vm)tMdPq*b337H%d7%dhJoM-`mrvNW(hkQ0}L?T)_vts1VWyAurA8*F`Z zjHwfm!815`9oj97fv$h|l&N7^hs{*ZdK}S_Y7rH&X8j{Dv(X%_6uJZ)h1}NDA|>ZQ z01rFQB;|8!)GpPDGvv(I+X&pRoswMI6@N0s=a^xlZ8bF(!rIg-a#M~GoHmes(Zidh zvRn~08)zCqjm=0h)U~5yc&K#D+}i_l3Q<*bB+sh7+Ssv#kA~J#lr9lKEyav}en0@- z>94n|gMo5+!D9c4;@r@aT;R^)%}~V;a+Myeecq?|!MCiky&Jp{To~pp(0Cih?rNJF zoDtNBHeRPY1p&qx=#~pTybqpF&Z^tve3w9?5+}?Rulm=b#1a^g$KH@`AbmO?v9=2q zqj;UCl_OQJ*y0C1Zb1^GCZR;%f-)*j-VQC*o%zd*BB@^7fv!wV#QS!1NZIR0%V?|- z9$qlt$QrMiu2>AgTfS{j{z*qye#xBMfYEAtOyjI29;3U~FM$q$z$Gf-zGo56cuA-r zW!Jbc9a%&sL`mFKvw38ylQO+-4kY9tD+oYfU~UfK`H7_q9NouwX(8y(`}u)Po8eB4 z5z{6Zx>^5iqVb+2POq{li{`EAT}vhtEtqre8RL=m2%=qPO5q zxD{jzYX(@WNFLnEjNC+l?bMp@L0yqm>dO<;fgWt#B~LYzonm;UL5C6V)=F)@M&7B+ z&>dej>P|0Wm57c0E;o4=FTvv#z^hk_lBIk!3?HLdk;1MdzK!QvMm*e*9uhT)Gc^e_ z)=vNPBNdSN?!thSj@m%SIt*mY4V{=jV~X5e7Uh?!FSJuO8odwrpk~OrB0<<)#_EOS+#DxTF!!? zj|s~O5>)PBQ-rlFRH#+3Y0ij(EYw2lb#7R(7A%y**j6uCLAA;XF6;K6+PQZBqP=>! z!r!~^#+HT4)$(P`nJ40Xr_y87q#Akl42!bn%9bdV#*C?Y=IMfCtJlKU z|Bd~6y)D_te6aBlhT1ma^RD*571y718yNG1EO}2q_ z;^`=+!cs}Uf}Y9Eq2XRz%0CiIH1R|d(K|@3^r!>xIkKRGaH8$%((5~g#LLMfx~>as zuci>IWvmM1^6I^<)|*Sc#}sLaLlmQwQYzne`miAaJF2M@0=sO@wVA>UbF~M%)Gfe~ zZmKOJ3^&8bN;00QASRMn8W5wTY#MP&KKu0Z&xD*SryLfiL-0lE{(~~EySlq^Q5}7% z5j*t8EAp$h*uto=y#$+2$qI~#aZx}^1(Y!Zqhg3aIe$}0&Msq>b=Lni8}jf#HD7dS zsW#mth^3U|jO`$rTE(&{qvAAmS!SD6uDq`dWz3*NU7TvtjJ$(XFG~xOi%GXUWvf0- zKl(Agz_R)XF-4|z_T4Cb>x`q%KyvlfvuM?qREKtoEmvKUGl`6WS?Y)=0q=a1B|G7D z_+f}8atQ;`*3|J?s{t@=5e}+Zyw>=jEFxBE{mU?Pu%d?i+LKUr5BJa#{&g4b| z2HD%S)}-&R@yOdQFuTf{%VnvA=9g-^xnj&;V^;-DG^qGPlS2QDf8t5is<=d}VLX*q z*eQbrs{3)sqjb()_AZ_qqP9mJH|!j_#Y^PszP2wc7S;Ruv07N1SgK$HkNkD37<-Cv zg;?eL)ut_r$-sYU8@k`Yf2Sya8@p=iODK*%eciy=3z|&aktuxju+I1fTQf$w2 z!WJQuzv~WLl?m0JMYt)FDevdopK999Xm=Vk-fLoCtEB2r6Q`-3>QH!y2*q(r>(Ig8 zM3dajPiJvLp8Fj5K<$8~Ad_*K>nhT=wB4sZ+}mErI7h9&9EwNn3CmuHbiJ0POE}eo zU<>t@k$EMMVsFvXf?Q&i*60sdwCUf3N&-NM5Nw7Wic|ks5a=sBEe|CwWMUJeW5O6w zFDu$Y%gF-MA`q6%dmn^R_wb@I#(d=;76?Qj)^fe`tPqG!bRnuXLZ_VR>~;%E4VfCU zyVt-5V3%viYml>45X2DhdtL0HPr#@jQhqHK*3_N~n|V$| z9wcZzYYO`gG$%0G5-KfWO{}QmFWktiD#nr_UQqwSnOF{Ufd)cnLF+c7s#MNn8Tn+9 z%mSUgY)+dW{3L|rGP%$tMlXRdNMHzKP>{aRC%3xaMcz_U(m)7~E}|7x(6qluvI&s| z%Srr@_$z@NGNe0&7au307QZZXbJ)wCV6yo+&P_6dSrlJdOsG8z_@f|ed1U%{3Q}Im zhFWHSMHkoOGYIs$+La=mX^CUq&nL+FXq910|q6cwQ z(P>?jfYgGI2H}&sudKyTaos6ec(=HA;!=cQ1!e$wx5vsdWH%H-rXi#9&dz2;PmCq) za>Q{93;Efa0_l&Fc6 zC?ct!e*y$J3sNK{xXCPDimBD)hBj#oL}_uodlhl;fdB;HgJ!A)O~q_-O+l>%ZEZ`w z#W1FFgPq{0vYDyDbo80uHR!b77+RogX+O_RDRCWgA>sm5L-OP5!j{Kh2wNq*1n`3( z06~CVT;{xZA!Sf81tr9^hadX56n84OR8Q{KsdV}6TKqyb7soWhP4q8I(CVU{p+-3b zHk^SS5|v(21TG_C9L{E{)n(3D$q$)ug$u(63@p|v8xGEI&xcqkhxn1+r~ob+6~64u z7fSXGp&$amWw6<2$>_|^VNz4(va8(qt#8ePPRN9B z0aUk$g8+btCM3bbqP3t5(r^oLkM>d{4CYzVIMDH!i+;z1|A@l=`kBd z=?TkviiFV+TObz3HdlN$HQdv65u?9raaqdP9)(}=Iy67$#|PM8oUWw1n)Q$l9fZ(U zPl!vG(a^2k(l+ zDsi!-xk!N+r^4r=Il7zLW>%uN4OK&?FNKMYO3CVF2>K+bZ5-fRVp0orI z2SNVLb3vd39OMnSlpSI<`c?~L89AAc3gV5$W1Ujh-b)DiWwOCI=P$;149hwx`&MRW zoe7apak3;&@Y{s+rN94-|5TnCqw&NfeSYOJkENdcwFLX6{q?_rEf+uZyY^7dWL%nB zU!voe={PL12#2Rrtc)NTgFquvskHTwK8@0^vYI~!aw7qOqj^z@m*|qys3S2MlO>WH z^5cq_7#1}#4q*Wo4;hhpp|@aJK?;N`MXP{dtAJg2fCwlyj0m;}$SfoACc+cIQR%Qu z84FS(svS9(PLZG-fj+({9T&Tv(c!old>ja=rPF{F+o=i48bQo>EYw56pm>p$O1b25 z3I!RvDrBZ#s{rxh13>5|DBG`yXpb(E5g}X>1o4!)NUUl4kMnr8!$`IeyN=%yl%!LH z@ms^LgNs$OB|HCXr-Z<)mO2gvNs}K^4qqP#`6(8hLFX=!$l-?nuiLf=ZL8K;+C&* zxavb0G|WcL`-t%K3|T4=sQ51v@fT{8vBro-cA^q_nF*M)M+Z{0w+IYyv^jC)7HC=s z)p8f0@c;1J*40jdeTA%VN0uQB8L2jpfnSyNHze( z!^%jMksKo9yOa(p55r2ExopH%2@Tw6l@6St;oyjbSq}5d#N*h72#A1I9J@71iH?xV zK5Mxi%Sk&rOeZQY@)F9SdP53j6*TfY@|e@5IM<}8WKCl zL>QwWs2wvweKL+XydEIvkUL?L2UCa)%%6DxSwN=0@*QkswcK={)2cn8yOvBMIM+-o zi^vIo2#2!&5g{Rg&FgfZcCs_Msim3J8T~P`@;klb)C_S*P74463y6RxxG@5WpIXU4 z{W-?q(5dtsM)Zt}huRJ)+Dq_sy;=wdeZZ9Gs4s^S#{(TVu*yK-49;`onap@DYD|t> zz)V3bJ}f|KwH%sjG%X%(#8kfc}*#{?Ixx=|!WCq10V zW@9$F%#h#en5@&$Gu2Qv?UMLx(F4jtGhK+k8PVjp&m}oCj;avv=q!Vf5TU@zIhiaE z9jRddoYS2O1VK<<%Clr&JMD`hhbIgo);Q z$>rdKy{m;Y>o;uGhhj@HSsjp<6Ct7SkVW-Yq?kEm1u3bzILG75p6M6ObPPE?SL5O( z&WxkOVVJ(rjrjB(<|t9Y2!w16SQ1OIEv=W7%n7AnzxVUAJ(brck|5z5Rz5XXmXcV9 zMbP6iHyg4Iu0*@z_=n4kfLI{0ek(R~uz)*w1*yWO%Vg1%Bo;D-JA|;&iw&tG2||ni zh0Z^F&zglR-%!|)6V6A>9qBv}TZ{LG^r)miY1Ax z-OQ{-+z^#N6p2^vGu?b4-Naoh8v{FAMY4IKCtPLQ`FIB}c#LuC2epWRpM(pw2*;?^ zTd+OUot4>oyGhnn4%&b|QBHTCl^7*A-l~Z3?RL$%BxK2dF*q><3(UfV%@96n0tlJyfn8 zteu=;KN(yoMqJeWTO2-K{%}bGDG%pBt&SRDA;t*zss$r{5kW}ecBNJ2{fsGgmJNhS zi?CrFZsIm(B*T4Kq@Z3C`4@Y#8ru6|3t+U5@CWZLk_YeuGxpK>Ify6!)-&*s<2tSx z8I}l0*5gPnt%@Y?WQNs(@P{I1=7NX-XGY(XC{-N7;%e4V;hE)lW+V(oB7<aRv`Tcz|7yp2YIKC7$D%Rb7M4XJ4-8B!y#%R?k;XNMeMuUml$M zWek-G-z++4{TShc-nVrY99V{41p?iQejF)o>0YB}1fkyLq2oaR3ZAy|q=I-_fEH=E z@Ct(lk6lP;h%iG&#%PL$={|XBr5-elvD!OvyLl@=Upm2?(&-A|>1^?$eS3gbv_Ya? zs+pkK$|JfL^aFsBgkFf0^aGL)T9nVhzG!cuO8~&avJ+xU3jwO zhh}KCewegAY~5t*doD)5#!WPY4k)a3wCT{hCXYqDw4)I6EH5yyS zm$+F=8K!OMUJ)_k5ERCMyG@J!Q!e8cvdZ9YPNrD@F7Kj-Z2^yI{m|z0F6%YsQ(2>I z%YbjEs!g9T;JyAk{8q!rMJ5X|m|`(q5Bcx{uZZ>yaeU(tO-^sl1e)RLoy0Z_I<)H* zLyI%^gS`8>K*7@Un@Iu+Z>=y8XRX)YxMdQ5&><(Zr^fIu!JDyOh?EO5%=oPg!=?(j zZ~4ZN2e@+D1HN{QG$bb=znxl6eu)5qv)sTVge-FYPHi-oW;O0|mCOzFp4i|~bBeTE z8&3!-kGLTTWI!-ER*!Rd zGQk!n%ASLa{XB~^E@rn-=L@@Q`OeS!WMji_HF3>Ef+?6b>10~3_EL-VB^PE58HpIe zBeiYr<2Yl=P17?L_PqO#Tj03}2sZs#&f!WhLZV~1Fq&xm(gZA%{kC>}?>KjnyJvoE z$AyhxX5I<_grU9zj{4zDA|)ffj#b0}J`fif=j}94lENMHBFpf7-+1$`jkiwYL~2Y_ zU5>{aS$$a9W;XY`^E;@Ocp_v%*ZUM2vN3}H;9FYfagHx>_#te)S(@hhs$O4+4EwZJ zJ*9J4k)u0{Z#Kmh>9mA%rI~Hw(vVV*EpnC4d2w@>rwFK`BZ`6eELF#hI3$#7<%f&Q zL2d0QKv*543*v-8EOCMQvTj)4sC2D?Bh!xOLZWl5@2`H>^+r7@h!*mypxm(lSbabT zj3|0+UD<-p6jW5;F^YT4kah0sC$s=Y!{D{Nm*}AH`$l{bo)21Nyou4r47I4R<5Smv zz}7qXhLb-C&L)r5bdT(%SZJE1ylG_6?|ma*ljlrtX)im%7qX#tjytFTZ%{mg=m*=f z!-Hs9Z*s@`sHPI({lESENDlC*_kDi<_xNnb(>Y0W+g$5A3}`SaA2(n76N&*5JB9) zfff4c(-)|gKYtD%M5yJkoP~0NT1t#5Q=-6`4d0RJgm59vniK!o3@UUe(V|9=B2B7v z=~0g|pXv0;bSl-VR*}; zYayJzekW6sSqs#H*O^e?d3@Ii>e`rS&jKwhc{1h7TOT_HYk4#0&YnMm4&4*1VzOmB zJ}&refSJW>_$&N2h8RJ^`Y#wQHCSS;o2@nPG0YDiNyv9YIm{&8s!9zOH{i|Nj2}0~nxy znBmk3cQu*yoghE}VIEVcy#&ZZEY)JxL@HU(jx&KNQ~@A@RM=cdVu`cnPx#Av>1y|LK&r$Q&L$a zWDy0X7>oz%*q||;bfU zTK5=Efst_Dw@#~a4E7_he+Xh7E5CyS&l&cRO%O^wKa*)Jz75c^KEtI=`uB8kKEq3Rvf^Kc; z%QN5n@;En5#yDoBw21VSV`>~KPcF%E-HGcHBP%STFL3p#>Q61Y86{5mz&tR1`yo9(bw(;$YyK9QO|h#v&XJnV{GR2pW-T zFo;4N;)+^U0i1o%h%&Q4r7|+Ooan12RTIVoVt5NafQU%~frUQ2;X^(UCQ+iP7ZJ;7 z#xqu@Wd-sZ*5cQZ9}UYNLkdK2NQ5JSxG*ZeyU`VYBq@KiVkD4r$pdb2IUjnaSq}LQ z8XM`zM|zG)JE;I8^`|Nh;846nQd&WXXoYtYX||xC>)O29y>_8z3TJNY6ZTn(~|{G>zDsaZQb274p$d z5(33ef^#em{KpI<;KNl0i%k_t2_V{;%+Ew8p7fk3MKK7yku~OTe`{TxUgabf4$44o zE2s~z=swgWN)Qj=!vk9SKIiPKqB5PS1+xPce17nf5R=M_e7TT+V(lka6hujHaZ()- zwHD@k00Ei_Q=s7VASPzrL#G}f z4*j??V79a>UF&MU{X}(IqIXAyk#-#L;!KL?j;YvK&Ou9oU$=Xuy&pQ zEM@&wpSZSBr=TNQSzMUWYl@Qq=j720-wGEDU;r+zt1N3ka3S#Uh_)Cn*v$bTtS;oft=MxKO(>$ zo!TP!&Nsmd4pelC15k*#a=#s9A@u@m0X`6*iUwXVhdV64`f?Vq4>`;S`O7m?9@Y;S z(D0MI>fsl|7-Z2=E`Glnm|jx&zXT-!0a&>rYaUT2F&;9JBZ*gp^jD+3S%_Hw7;@uC z<;X1@iPCq&a^E6jIm-`<4v;fr<$pdI%27tJid?MaGozWB4?%KNlM}r{?KjEby=a>2 zZ0Bmh_R1%_4<)m;<@%zC%y%9%q3birZ!!sqf2M7qmlM;ah?mfkcI~K0oS7((499qO z^r5dp<4J>hvZKPRe;@rcpy+ujkq$Mh>FQlAUHVW`A+^&Se3PlRI@b!`*QYlK6l~Q^ zEy$s@u8TciL*p8>ewCW-SYzofM?}!Zo^~vG{nz;oWKg(<_BE;PZJJs;+0w?>K(bVo zn40<9>PBOwQ=P=SoswW#`>ZyeI-U2`4xZ(Z(ERXzipaWkx zYYi^7Y@0jUjAl5+i~Db+q@P1k2$n% z9{0bKyTK&SQE%CLldkJMF?{bk;2UqUmEJnhIKA1lw2-`9Hoji8<1K`4S1WU;UyX=8Qpaa5$2!Umq5QSU-LZFIt zgy=;8I`9LUoC5>7hYVp@=&0TUMqmgI$Uw}HQB@8orIt#Fp6OMS`{|9}iC_$}N=KB| zKO_o8tk~r7!C5T_hq#*z0%4lv$mAi)KK%@j-9i#($L;)J5ISLzjoB6H2&N#R;b}&3 z*xD0nVU~>X6M3oO9Xbyi zCJ_+=1py>NBJ7()Fkfe&PMG20A(9RoqJ|;O&<@0vJuMq`+}L8cAR=1gou;jZ?^P2q8rd99V>jlD$r(pP zxZ>30SDQg2H+o|_zF=3~l*Gx}Q=y|e%A=_OOb`B_JCflmnoQ))V?QF=9CB8=ecnAv z%^>>YK|*4l*;2F&is+!>K_cX+)R(?_P=28vL}DcW^GIYgKGH*Gq(6e-Xkd^*e&jrU z;M@@;H9iwRmSjs-cxnx8Z5GlrEP3j~~Qe7qLObjJl(xp_Q3`^E!Uarhk>SbS+3||slPWq)5j+@lw%b_(4Kndnx1|eVe z)YE*2OT2_HEhb~iAj>fs?igmi6hyL!k!3a|{*0Xq=_9qg1f*yUXda_mMwTIF)iah1 zOK^^Pq~>JsBWtSWx6LI!Hc8Mq&ADuhfNc!_-Ao1>sYY1l=EC76Wk$wk&g7*G4ou)e zaDAp%6vslWMwVP;a{gU*BnEEnCHAG&&$vk}KuUFTg{_SvcedUqCJ3PIp)qi`6TAGp$!7bOF)ilT}pdJkvRHhh&~@A-j!Ki&U~iL zxr~R{SjC+PD2>9MjmqMf1rCqul7{B!u22siov4tqT3hlRU_NIX4TMY33``&>af;ao zsf8s$>A*c2f084=O{lfpVAj9|L$K)or@$zp{Y{XD>9jo>h~CoOq^R;_(S}w4lBVBH zp6Q%E;cmX3B2H+qAgQ0`Cku6(pblzEzTlZ&X@P1ef?m!`k%^-=o>G=ho}wv+B50fT zX@{CuWoqh!-sNW<#Z^*Eo_fTdimIrhXQrNN%8hA-Is~Bt3XbZj&m@VT`Y2`AY6va| ztF~$;TFa4cprtNqi4JQ3@+68%D)xb@hEAuJzUjc2>a$YY=EW14#+$4es!9MwoAN_O z5Y=jNYmNGyoRVQK{@k>d>sf3lyC6&jtt-3E1hG-+rpc?a{>W)&YrcMErOjwuDvw8C zX@(Atx)SUiMvJ@7q_|G#4fd)3s6H$#`UGyyAhNz{h3rhmexIqaU9{3;CT7ylV5-Re zo~eQBOUeYNMyi!wDa!)jGubIoNNmX&Do5IEV+xwFc5L05?9aMhzp7;28STJBcZorTuOX5_+d z*qS99Qb=&!?%obmkhsMEhV?Gmf)LWir`8Uyy%leAL8?k3Z%-y~uHgo}k#6{H#m+`A zkr`@T*<$vB8r6Dl_{I|Uif^b%E$NVN`AVAcs;|hhZ~MZp)4Xr|uIv2LFSnlW{X(hz z>hG!YZ~r=K{sQo{b?*S9ZTcE8w|;E`GjIbt@B>3|1XJAqN^o*ga0O#<25ayJb8rU_ z*#diTayDoPJE*X2Ckf9f0;BK>vv3Q$aDwsF!M?DAqQ%+HFk{jg2;Xp3Uhod{@bmg` z5DPKuE``hvF;Moa4Ii;f_Nx&u@e@OF6ie|GQ!#k}aTV8O4`VSz3h)+t@fU+}7^{jC zi*Z&eaT!x38Kd!~PoD7_8)X~2aU9F>9MiF=TyY&wqZZ>aTO#WoBXJk|@g)LsAQN&S z8}cC|@<�BKzVZGx8%tawJRgBvW!F_X(d_@W1BVPA@Zy;(OgOG+S-*2=lW91OOrV1O*BJ6#y(V05JeA0{{a62>$>p2pmYTpuvL( z6DnNDu%W|;5F<*QNU`F>e;6}jw8*ie$B!UGiX2I@q{)*gQ>t9avZc$HFk`;dNb{e# zjWToU+{v@2&!0ep3LQ$cs8Na<#fiJchYtY)w{RYvO0}xht5~yY-O9Bq!&?=cHoc`Z zYz3)a)2dy|wyoQ@aO0A^ML--vx>5hh-OIPH-@kwZ3+_nw=0UIo&<q@-qWjJ&t5x$ z;nxXDm)^d<{rmVsUq4@vKmYsy1}NZwa3M$EK=Wxv;DQV`=%7&hMb#g1|2=5og&1Zi zl7Wd~m!NJMhA85QB1VYbd3qT};)*P`*dT{B*|kw+F4kz{jq911R8!&Eh2xJv2Kn20 z;?&iYcuw6koRCa5>Evh9ja8OFd_=Y6lUQb{As;c)8`6WR}@gZK$1wnS^Gx z>E@e4m6xVL@39Hzop|QC5@~Y|v=LN#1}f;FAB|O0Mt~Bk=%S1!RHSAZDRrcejaF(Y zq2n=ygkx^>Z+`^>gubo#{VkothCl@>#exvs_U-2_Uh}e zzy>Squ*4Q??6JrutL(DOHtX!O&_*lmwA5B>?X}outL?VjcI)l8;D#&ixa5{=?z!lu ztM0n&w(IV@@Ww0ey!6&<@4fiutM9)2_UrGz00%7azyud;@WBWttnk7NH|+4k5JxQW z#1#JvV#OGfSX0Ivw`dbnd37vuh?Hrn;>ajJSl&%corPY?Fb7y(WlRnCa?Ci}H{6k> z-JJ8!>Lr%hQg#AubkJckMfA~3BR!nZU^OkZc3@TjrkEUEJoVOq2}jpIS6ZYI*JLkN z^?ytQVVk>#%S2dhE1U^$Z!$ z)UG>Kx97h5@4yEy{P0fBPWxW#_X{}> z{`eAgqBr^ISCoDF=$r06`{Z{*2K@9N^h_tG*RTKn{JZ>q|I~Be{{T2F;{oV)1ca7N z1adO%6el6?=!EYQxGb<$scr*=jYi5*i(0IYg3AKSO#o%L-)yh~g0O%fQs_YtUdvxys zNfwp3#5jhMAQt#XEt-fPK^##auKZsS2O__`Nf48*GE(9=*^)ovaFvW)2_oHylMb%( zW~oGB88<>oM4r-5Qn_FL4AM3Mf)SUeni*!0(@74hP>IG&$C8OJ2P@rd&@$p*7{L_(%eFIbw4duwibsEuw z7B%ERl9-%$4kUsTj7Z)F7t&N=rlcg#O*dR6Oh?I7rqZw$iBxS`InqH3U?7fiP3J0e2u!Oz zvaqcT8A1S9!?c_o7m| zhBd2HtXq(GR5GX~hE_Oh(GRBbBOJXoXlH?I8V_Pe#>DktjVt3)Kmfu54-jlQBG5z( z2U*C;!SYTyi`s)o(*JZKPI9wsh+IZgz#IDThIxk^Rp74D${OY+L{|7^<6-~=88jOt-cEuFLMNN)*ztpg@m^D{uXP z%}jW-f(STH4a;zQukpOK@JB3Sp$~ny`@|6WuO+j7DYoH`Bvb|wmCZiz)0!sPEZiZr zu@ul zhlmBhWC1|9o$eLr!z=!Q0Uq1%;jdr#W?{Nw*+-wC1hNuU#_{q3} zlE9Z-yZ<;oo@OQfgYW&|1DR{BTZPCmm7ZrrClpcKluDmgk3|YYV1JPEdNI5i1xso~ zUzcP#IJ)DbRtS_hV9A4f=XI1x9rg$;2FN}vROunUTDh=^Dt z@>2mFH9uRl5EE#M)(3~LHCLG^YAUpeUH5PQ;0IIXQgH-W5g=FuFp5!F5g5P+ey~#5 zfpxE_C%Gd*advM80d+nEg-WDxE2M0I$Pvb+6iY{DT}4%ZfB~t&j4UyOnbBhcu~F5C zC;AkN+1ON#h=L=Cgc#ROIvu3k2{0eh`sx^;5T0k9VSo z)5Au`XAsQPbQ1x44;Nmmly)F7X0kSr2LX+d1``f>L=gZ8u4Ivff{(P-NC>G}H2<_i zSJYr1>09m8Vk#jB1VD_hWE&X)2n0}kt_Kmjzy}>vC8sBofx?J3nOcG&E%5-iFm)Ah4sIBia6*e6rEUhsV#vfq&vX>A21?>^4&U$! zx3CAK`4oM{eJ}95AVXrw9V-^ALacF-9Sfu5bC^3T; z7L7y(n5f;UVoRvaUJEfWZXIS(#ok0;^WB&!7F0q@p z5P{f9DE~l}Is}2sNmV1clqDgbD1mS>nVy3ZK^4Gt%*k0RrfQc}6u;$)DM6m+LZDNV zKeJ;~HdS@!rEH4X6Q0yY2ud*zL^^syRIR6BEwo%HgG};q{2eIW@jyWfclE zu_%j_WDs)5R9i?{!(^Z+@t;!mlM&i9zSKqywxUM!gN+n%Jh_cQwxi{FkW0jUC;Bra z`b`bSMYD7{B&MBf_Mru_8UB`=K{<|ewo+-CGQS0knFe-13N)L6WCr$bP4$SrSabCG z5Cd41_!Xx!ai+E6Nx*1v@+4z;nlspFq>e{b{+3SOPyvCkL+;oTz5nHOtY)V!p*+)~ zr-1l*W(uh>qZfl$lNmQtT?9;m&=1~FYV^f;FyU@NHleK~qB=391fe&bfT^(3qqjMN zm-;z#NjMvdN;wyCdAWptC#wjNO!G979K=b@$rJ2}5a}i@w0aOmNrLL7In62%u?mSA zM~Frgi39f%$y9cGTBLgU6D_J%`sAX|@~j-$eADVD4<)Q4gI&9-Y^Fqo_NI@{KCAv9DY@IUvQMU|4rg`bJ?&61|0x;@Vh;b%Z~m5qD%{ z+T|?Y^fKzIfdn;G9R#l#qfbayMBsR=YG`_AmaHaWLu@8(+W$ydkZP1^I5y^UZ1+f; z%`%eHIW_lvl*!^nTB9{N)YNQrBTT@=kvDIl)JRZg_{eZi6x_^2f9RtwHn)Efuv88 z^igA5WQn^iC%dl$h#j;lTo-FNlQe{~xOKX#5DOM{EdK_EEBBp?+K_I9vl)9z$ZNVU zYNhUyO8y6J==7w5fM;ldV8S^y?z^t*>7v@3rJg3dua${u7J!(`5*M4beT$^wLrKm1 zF1kgM$a}Uq)OWO7b8({={{@1{5vzBkkNeA7nz6h)qE=aGp|caE}FfG`@aGhp%SI7VgIGZu~2H_`w|-ll;!10p$EDb@i9fC zEO%T1{SXU`MYNHVy6UEnoXQYMHhgG9Y(ln@~*z%h}v_xVN>Xvq&@7u3-}n(U{_bxyq;In%^-r!!O! zL1TdAX{sED6R6C~3|ntF&OMPh|F8>KxWg(hD`_Yh5cD?rI!QoJ`B>^q~3uGR{I4e?g7yhPf{bUk#e z73~u1n|A-n!!nT=2e@bfcgqFQu)0ETv;QkOXQ*x}D-dIB5g|ET%7jYkRnt2`SO%p} z@jDcxu}a5foe>5Ru39VV1UVa#yLI_fgA1$I>Je#ogn*~HL~%^wWDFGm1YYpGGNH1G za0?ye2M+)UD8!BzaX)aYE%FR6Mn(`o>^nPMl4lW)fA)I<{l_gKgJbOlVJxAw-~|jMdINLHsnc`WTM;s$6ijuY%1Phm?s& z^bLD(2eH5pSaaK6tlG!I*_KhQ4F7{qy8WVN?GdVmg1cpEUIcY8_#4MeRZ6$U%Log4 zp*ZVG9P0`#_j6A-W61SFjkm4Ss>@NQJCE=9y#|bP34O{cEgBIOM_+~u=TLlmAk7Fd zMciZ%S?w(OH!wm9L8d|3Zq~aiQP2sOub)Mz5e(A;@n-!s7Dd}BObEwDi6PBC1O#M238wnl{*~c-0l(>`Lw=QsxEN-t85T~F97TRC@>9y`2F=*As~x`L zYCb=?zJa8Kkje&c#TB4_RwY^ZjJ4p_4*;0%m_n3LUd=5RG#hUQWzQ83OAHNMZI`9K z?p;uRPeu0VlcQ92UT)+TR?2y31Hqd~`2f)V(}5-k#t^=pQKJ+2KeuoP=&*UB7Lh|d z-n(KzWo|eqIjkhCiQD&$|JGZ)xeK6lXoZ?mUFmP-UKBDW=nruVwA~61;0^q685oZ4 zWwP)tvFq4zM;%`+f&VNNO6;=Km$<_SbQaxd(?0BG?}l#iimniCQ<2n9DhluS^WQGY zGf5u-_DfZ;08vZOzX}Bn9z>YXV69pe1QsM%PLKsbb2`K3d)2Wv&3cv#^(G6p-5K`UAGXws#X2CY!_*+JEnO}~a6 zTlQ?F&;L5DPOZ>(Xr(m~KDK<3u2Y5_;U>iEFk@Fi5eNtt`G5eQf&>b7JPf>oAet3b z(gu$+BlilfcW(Xn%yy^T<;{nOAAdAZ^Y6HJrJr@HeE$9Y|L=*tf^f5GH3F4#>mcnM zvTQh}EP_d*!)C3Sw*w1WOI<*Bqjof2J9Diyd;bMkG6H#< zEmoJ_{O-C08*)h{g1}VDqK9YY3Dyx6tjsOj3i2W&1{)U1%5(#^siJ;jiG`nQ@l@-; zq{zsZBJB`pnFT_<+~dEkLt#1Y4IV)x2OOWM!+HTBYb z+%?JIuij63?YB4WO@v&3oYUCvVlZOGGzEEOgv19fN<@2lE~GL&rMn|<0@-e4uxdO_ zdDL$O(!5L~x<6%)dj~`yX(aZRj`)T|>vM@fu(FcpdC6-KF%G4C!lqf-C;w({Apo;# zaf@4A&Kvr`#pcS=C!g?2fC(IsulnP`0TBv@H^iY>9%HnGJa8euV-c0g=M#!-1Wp=! zkhk#0zfBz^5Z;J|1^iILxxoY%{vgX$x)LO%#3+Xc$=c`mWgGQG?~7+dqf&ks7YTag zamuk$5Z8ww3nC!k! zO6o*Y4!PZ4q69cO(r<{ydSo8;Sb+$HBoHdWK-hv}kyz*>7N4AoZPdaeVO6px*NYDg z^?owfN7L zh>TBA!0AhJGD?NP1RSd#77-*~#A7P)6m=?aiwIn5dRn_pBWt(EBfb$}T7d||Or<}n zAj*v8+z9~tr>my?sB;K~C^?~(PRcbabAaFj6~R`TfAs1pFad-?yx|X0$xJ}23F0N= zMX+A!Gol6o3NU{bIUDi>D4gl$OLb~X*gdNugKOur-t>>@JPS6wWC~1nQIJ0L;TC`) z*=qj85{guCDK!#O&UOkcoW8FqpXrPwTiCW49dxT_g`qH+vy$2E4kZma$^-nMQXE;N zBoTOO@kj>HLUlqB5BUn#w&pUI;!vSn4Qsugvai9~l&o!P&HrK1I$7>P409HG8irsJ zfnD69L*DEkycU$AZI$0PuZ#sds~4~IkgI`^-Ogt zF>;AyZz@wiDNDHT-5#6VXqZ^pvmk)5MM1>Dhi;_}cm@qiMKLFlVNHi>`O*oExj89O;}o*p4bAAx1vOdBH=q?)KT<}C-Pne_&^<^VAP*ILsZc4 z)DzcDq+iXY&4h8H9<_D&s(53Kdn?I5ex*l7NR8!;uK&_2Nz!MWB!)5?5933H)Z`Vd z@P{YYxD=)_@hLL43G-BPU>p-^Uux4Lu<8Tm(N;#6H`Hol(}Lu$Ajwx+DP7zi8f8A? z5O&6NBj)}AuY$B<#qvC`LMmiWz8Y#R;pNE-Pg4O^sme$=)-Jt*1Qah;;xIs8aOwtNsm_v0LYtnRvII;gANGAbQO%8LQ?%(N6%C$a>i zPQqFsAxk)is^ROY!$J`Dq*=+5tmUh3WD$M-a4>IUzKQ9Clse+GDki&P<2<=^9cFw$`}=n~$y_K}BJ(gWxMgNsnd=owai})*wJ2)WXZr41k)z zn2IRF3fqAYB!>}fUkuq`;;!r4e12fRb3xGqYo0Ap~6 zS5lHBDVArFvF&O@BRQ4;+$GpjL?z3mmyws%NI!YQy*-o@VZy}G$RmmC2@kTv0ZK67 zDI;7Vh+9}OtiUC(^R7gsjiED(H(S5TAgiL|4V*eU$=gI%5r}g52V{a0J>12PQ@v2! zJ}|n!Vc`QmC@Ol%sjJ~8uuGA86Afgk6;2~T$|Iv8ti{u-!C?%Pp^Au1sUvVK3;dBA zBmorrXq2)7m=Cc4KJp~m5<3y=K<*1H8X-GBF+tVJIOwBBYBWbc5ue8SMXyo9fpiLV z>>@%OlA!rVfO(D%p$;-}3IBBjvI?w1FfvBy8m(m6lN~X))8jjw!X7kKNZRN{wwMTr z5yq6%8nyABK)?lp=&u5?kxO(W&(JY|@f7C}hm4Dh6+aIZJqKE(p3;qwZNbjGr> zKUpG}CxgjMbeOJ?2<|YKs*H-6^akmo7JJMQX(O4Z8leif16YtIzO%P;qm6O0kNx5U zPeL_h3p+lMFauc)S>#HBWSk$;h^m`Sv)$!4jD7FjgSNXXM%pLS^oO0cw?FwFii zP596;J}@w%YK5ENx&La*%>_%1ewhT6D34Smi%`lM8UZl(AiErd%^Qiet|&-KF_#Fb ziI>DYv0RG9!yA^mO${87y(&+HAhZhT2Xy!&($TSGG!0#ZJ^@6<6(azR8Vfs1Hbfza z(qYQ!{12DI%5q_gHCiA5-Jr+Or;RZ)&ybndG6-?NfCxyg-+T|;sFl5(4M@XAvLgU+ zK#6k@jZfQ`7y_&ILr@$kqTjGAsvD7JN>LhHO`q!#_UK9SOdZAOkZ%|v{7g3Siw%!M zq>BV6Pw9tN*aLs~%%%8Gn7YFJ1HmLwzZkudOVSkau^l^tDek1whf;z98BU$pBG2K? za+pirw9U@=5&yJ{3{l&-FbWM4+JkxW(S(A}Z&|kW3lcV^QX+wgS!)nq3^dXs%svee zanPtq;DQ;t!Z58F50cPwS*i-4OB~e4+;GLQ2nsLa4FY(EM9@f{h{z#a59~4|>0>`b z&BSimi-Sxj<)AyiIX)Lv)t#6GQ1y^H1*N;pRCU2n6?*`|5CrL9IDJHtP}C*%-dGEf5QSkg$_x7ym8_u(;qBt-A5)&Ix~Z4oMr7!Gm#7V2vpE(OBzEZ2iLQ)3ap z@bjF93Q9h}fHKk5vs0VsBa7g%83t8Tg>cqerBjD{S7NL-2^%bFq{?n35{XERT7VE3 zOwDR>RR0uIB(#tx6)Th3=vCA~#pC&zyZOe*Ahu+Yw4fxadtJZkv(2jGuwFwmh7Ay~sv&a3|fqwwnf)G)TO`XI89^msq9hs4X)v!qv3%8ubB1^yX zl+upnTmBdih>4Yr$_a?IAh6xSsoehQOx__8+aWB}>v#*~*v0K?uDN8_91CBP=?d^%L9K+E z&~PbzYSJIwxku}@Qbmis9NPDV--T(K?Lgl5%D<@S%=qx!L}3f}6^U?ZjBCl-jD**I zXoi<-5gHsH~kMj{Y_LlFj5 z-JJltUk%UC!&D!kN63refN>3q5xj|Tr`DO`GCbjb$tRzH5WIsBN*K-cP~D&b-~X~W zUt~!V8^sA9$=+(+;OKDcGLm9UJ8le` zYD49Gwij?UE{af$$zzV2Oy0AJjCaNnh=wN7^9GOBNC=o72Tc8300XT181m!2|VYfSP4%wlB&(}@0`WUGp63BE zwZEpDBqmHvev@n}=l25wRS5(|s7mgjM#Uox7R$~7T2JK=& zFNjm3gy0kbo#b*P#T!y+vM^mBQDy|j?U@olpy8$Lt?0Bk?caXP;vD6#Ud@T!Vu?@$ zWB7*{v+RF4@2T*gJjrNU9APNbY){j)$kuK-Ibc7g;Q5N07M)`6>Cebs?Em;|@ObGF zmEx*PQ@{J-`lh5=fsoZyE9@oaUh)F=Xbi%0Thb)%VGH7Ze9OWvi_I;y%#3aljv)qr zDe*DxopD@=LYpsSS>BVvS7z&t^KcEcH&X+#Pe$=Hno+IH?&-O21PUXMJn)@KRYl(J z8cpy1<;Ho-iGLMh4exP@f)cMra9NaGJJ3EDscb1p>ll!;5M}8 zDnBC_44EMRL5_Y6hn9>0AD{p##n8d0K6i5xvu(yZa$jQavwIOak6QVjYO*k4VtgT3 zoG~=#+&_P>K>zIyzN#jNj8DVpAZ-+8ZsQQkTc=Q-Y+G(hr*z4R;%6Il5Wkm1i5t;4 z!$S#PRJZjh{fe~?W*IpTsMs%+-Dv1O!oJ>KrY@FYzHXoAGHjO#tNg+|;bYDRQsdxdW=PkF1 zYrpxlwL?W{2iyU3r-vx2Pn6Pb5m+*dWP}?q19;?caJMg(^;OrY@P@@Ttb}-iC*TC{ zg^I98d(T;Qt7M5B))j7zDYbdJQn{pl_SH8&#}4UrK^L2s7kCsl^1@G}bC3E6VT;l$ zjR}WBlLs91+4{nJA!eQS2hF0;sD-I(rp zYE`A^Iv2fJHND)A(cP~dOJ72tLlOfT8LYS8gb4Zu<@Rl73QtNUOppDr}6vZou~d)MpPAe)D>T2q`)Ohzb_URVZliAi{(S7cy+<@S(y^ z5EG)C`0m}rj2bs`?C9|$$dDpOk}PTRB+8U3D-e8GPSC+xFjqQ!_Y7mfe-~2{tVQ!@ z1)CBXO6>G6Slw6e5`?1!n@S+HmN{idMIB?dtU_*sx;9mbB3DCCh(I3$FdS zGvU9u706(nN>Gp>pKTSkMEH*?S&r{Ik!koWCEouEfH}658Ms2-#|EEn#98pLHPlZ9;baI~ zU>EJA6Gp53SWs(JCX|#(1f}Rxj1H}(UqS!n5Sb%GG0W8RMXeGqYQM3gKl1&&HEs|71 zKO^%(=4zJnLC+w&)n3uYy)7=WNbSu)Pd_AN*M%E?_{gTgPJQ&rRZ>US;iDr!?U$#aHt#9ut}{txgoL~h9|UxR9SD@0|XH80Byku z0;UBeHd!o0LgM~A{q-OI64?Ka<|UJ3q=kty-9FNt03uIb>se1B~!EQyNE!2SB*Zu6?E#R`Byu;(opGt7@6s!y-YyHv4AFEOh z3}Ao{74XI#2-%Rt1fpjTY2rt+rmBxT5q@I4BqpaLu!W6|flz~qPs}otDlvqJRqIeC z;pPy$l+0gguRz&~Lh|L0hcmM|W;0GzX%8a!Lgnj}cx9*{&k|qh{Pb8ViWipd9 zD|6J2P&X4%{>3SDo6-@Pqs$9J@=9hh(piM)Hs&DFBx}3OO)kR4$5_BDfzSsATA>ds zE@U=}8R1lX;+ijZWSx~!i1qj>5`i|epa#`k0~J!O7&#_tC7g^YVQG+db%c2b5uCL+ zV#BoU}Fx020+(zK?Ui)E=w(-xS#DG*fK zk1E+y8Oj`nlpPt@LVUF{Z^kdC*()b>EMN-)9P$>oIE&t%U;!^!)SMeth_alD5W8v7 zre-~>cVw6loI(E!RIW3KR#>OE%lNY)-9$**7_y`oezjFAQCd#WbWYE_!wGw^fFG#p zJ}*&)5oS@uL&x*VqdpR?mc8sr_^J?bUNJB52}HJ-QbxFZ6`(~Gp!MdM(KF%XU713Z zM~y?6!dM^z1?htsA>xx1>F0-$bx2*o>LJV$x47mcXhzUjF1137j$^~q*6lKs-yYSb=`V!phUN2ldF z_kI7(LAHDwDLac;xOiuHMD`GVx@LaJbmjFJa<{>do23dFZl0aIX)+BN+RO5*a3Ktm zl=U)JnjO+}xdQ7q$r{K}X0~Q-1@SbkrEmK1hnRlW$I>E)a;_m%DZ@RIdgJQT{yw=( zLC$2O?fXqTMev_sE-jg_1b;OqqL!(g>hMgQD-Q3MJ^%7DY1s+XiDFN&$@yL?L6TiW z(a4NYE_KfwhIb+Qh>-wwZ6(Y6&^3>B#C}fNI^DT&D*yD^GZOS9`y;5}t0?N44AoiA zy}h{E_tB@Wcgf&z-8~PFOkUh;ZBOsk9K#FKN^N2+QR|Y=Ho2NJE-i1=nT=yjNnQUO zb-=$PQEga}GU1H=_PvuHDJ})*#4YfY9h-3=(A;^J=G0Ebl>OdN9_Z<7B=+$zTk@il z6hcTI?|d(0*p$Z2Q!05B(La5@rGI|Xc3+b32rPXGM>I;*_$E=px?8Nzh(xp+Ca>I{ zQiCOlV<;02=v_wB>p3=XIaNgsxVhB0(Ycr4jjDa=VYMlC4EkMNQEwI5A5ihtA}PTWl&DbYjpA4hP({{|%^Ct$#rc_)J(WW2@q}A1`X024~hx*=^)1hUc>d0&oL3+-AL7? z#2m@d5fWeXO%U-3l#p=OT$o_-9ncAW(Jk;27Z5-M7+x0Y%j8VQ77EoHN{^h>;6&h0 zam2<(`9s^WP9*Y_4>E*sAYwD^nx-|;OSA-gP14}FnGxol2g;J0J=v7qjt)(b?(D)< zRlrD902frig(YIws14^mhWDh7>?juxB3{D{Oh)iSF7Cy`F%9AIMXaftRYalvlwXuF z--tP1ot*R_ zJ1!wsyhMqF15ZICKQdgmksm8E*&A|A(M*)GxP~d|j%Dl_6BgfkO+z`oSH1Lj|-#1w?=?QN^l=U2vP6R<1?lRKTMa+r6h~w-t|1# zS9FMTXyfpOl&^aCbh9N2|PXtXA0NLwkrr-6j$ z64sslokWAYTPl)PcSz>F;lxjk)O0Exe(p=ZJ%_PC=t^jiKAI$S31&ekiKN7sKcE9a zsOTlxWmmMud(z4<0w*3x7Fdvh5+H;X6$f_&Ux)&ogTDXPP)wW}?UhLQNk2B=S^>sO z08^5L!@wlQ;{062PldL!yO=JxT6>eNyhAvvJlRPaS@u*QAs>Q zFQTHHc4|}P;9q=44uVSReB}RBVmUA)I-mpab!X~eWi)1)f)x~*7DzDKR-g5uhI%Sm zIV!3>7iGYQ56VqPFsd}(>7s>4IRMkmajBLnXtFe2oSF=p+$T-w%C3F|7Rq0gWh<{1 z6w8s@oB%8Fun{zUMAHPOtInP`U@7N$WRjL%T&(|ywQ`zF`CO)EM0$QJ%1w!=>7ykc z1^!s#_#wpiJ;ZiOrGhezKjflb%?XOi8pEY##MNrad}+u9-?XNw#Zp(kcF`B|tH1Sz z!{JYNnrxS5ns?ORKSrsmmJ%lNMS=p^qmipKDcUmCYsRwTk8G?edIWOKmP7pA3Oc69 z0@}Q;$hgkh(weMru#$@<>L>Xrx+*NqPAs%$243g|GdZ2wonp5hje(GxD1L;M746a1 zmwsZUT)--*l8V^S*uc5kGn!G>)@{~yM(4O482RB==0tOV%cNn3Yi)&Vd|3I_l?5VX z+Fsdtm~5&*F65Tc8BwFadId`6ZO!JbX8`|!<*Cq4=x5%AV~?_@k$RzW?vaM(MsL(4 z{v{YU3P$5%nHmXe+uo^Ds1LN#3Bl=?M?|cmGMtpkER9l!%hbyXY1t0h8!~xoBx%H@ zmZiOtL43m6KEQ=GqV1J*?CgTk(%!DHey_Dv;;X#oMiiqOrlzmpt&MudOjM=)1QOl6 zB;)o7g=&{b=n8Ti!5O^6AIO4O_7fm=X7^&&Wn5hsNI?Uq(-(-i~lCbvS1T)z575$3*cH6l=-}9ZmGg zZWWgtlZ8ZZA;$}baV9-l*^vw1|v< z9!_vT;;YqIK9LII=tL8LaiZ8~bF`K!25$RB-Si5{zERFG!|-tc_0~AE%UM$^H^fL; zPwn!ED1}mS)b3K=kFc(mspVHoxZLlwTQ`+$x1Mg@finlLO_f;VW$ar_R+cBvMWw1T zS|11^-|h{!wL{!-z{W^9hw{rpXskFezi~?M!KZSj6n1<3sA+_B{IumxRsxy{5EiZhER_&G`XpS zF68R&J%x##ly;loOsH_e;Wb+8$q?738-O#5O!qce#S$SuIAtK_jC|f6R^+-^Ofvi5 z{AD%NwRe1XN@XMPW#g`0-uK2Bgg(FwYQ_c1;%*OGW!!L$a9o-<{M4i57k@kL!%+3% zC`};`EiV^Ft}Xvvx;PP%Z-|BCq*(Csce973K(-VjU5K}k8P{%!^J)7sV~VpxiVUcs zN^Aqm-dovEbmIa)!~*b=U8-5ff{4k(Ky}yr5en+x6dyU26?u^>IZw+7eCI0C^wIRj z;?Uk?mEXvGn>Yeb%N1MlY2$@f=z|DJ>~jq)PrXpVl8ed2iDTeG+Zn4~Rri8e%W2a` z%V;;Jyd+M?^u00e5+gaL5P6|gxTbN&yd^YY|4M~}@u7zW#3u5VlDH7trm)9FMT``d zI|bw_1_3>HrOooCjg&wao`OiV$$si@*hd}p&PxWlki#x9m6)K-dZH8+7ybDo{Ra|; zTMqRH1hxMMyi0bkTg8aG^2wTUR5L6RuTPS|czr80TOdwxz=VGALk2UYTD^I7AGmA) zN~mx^Ovt^+*tUBtg?y#mfN z$_xK2*jFvEu6XS%#i+ihBok`NK+BptOO2C`&7V{!{k%THP!76^DE%k>(3{TI6ZVYD z;ub!D{83Vn!NmY_@D+u;b87I>_8)um+$|e6>W0KyJ|v&KP?NDFyM&?#$4D_ZtUdM1 zT&G?s1`=CoO!)o2*oRL%fkdF_-sf=3R=a`g7uejzrP0s4n(&5aV^W-sl>d>!2pm9E z=p?X0L4yYol6i-aVMB)xAy#-dkYT5V1)mXosF2~shaMw_EC^C$Ns}j0rc}9-WlNVY zO_md6K~TAWH)UF=b@N}roI7v2{Asfvzd$r=&D@z&=Rbe_{FM|GOCLHx2+Qd#SPTDV zSFb8-aZOrF5t%1~dNPz*!6w_QZ7bAj>ho<{taamBh+9GLSHFM1{(DD;qRD@S6ILK& zFzv#_6*6Y*^tjW&%8EsD1YFXtV#x&;XVmCW^huAJ9lm?^PW8LakT=JcJ)3sz*fOv3 zbelWq?h3kF;4vYqRsHU@e6>S-W--3YiDVX^{P(PM;)0JavP>e(2I7c}{zA&HzW+!gXrN~vbLc>i z(n74G`5;>jB8r~L4?_eO3{k`pNpvVF-j<_jxX!qOZYANI@@FOsaKS|o<#PXGiywa0 za|kz)uFHz65=HFms;eqM5+S*Ea*M6B%u9>O9);{_EpMXwN68X7B+{^j6hz5F&^&r@ zB+MEE)1WaUqHjULIX zNi++~)Cf%1EVGLuHX%CD&$Mz~h|SVaOQ{pWJnHZvVr{L~T5E9ws!$X+8>%Pfyom+A za+s>XxE>#Z@t~h(QuW3mugy*`GAQ}(AgW}u!PF$1s~cL>p06{grodlH>RuBK(uzDQ@v8aBRo!!I zF6|Kh&u^9YQ!HbVa%U)Gk$tX z;AAqqB&ZAWC-U@adg@V11Id|OzKBK#DwV2y&GRJrss%2-hK@RWmHtr#h?lElDzV6A zE&@|aU0!w`yuhwD9h#f<{uZI=Nsmn&q0E_#<-EDAX(hWGkw@MJqR{jQf_ySa$;dRI z67G+Mfs>l|M#qvZF(yU!>q$8D5sRF~g4QpQ-oR=&Mw(d9`GnUVoDl|b!a$TMzYp|h=Jg2IrnhL;)vXh#lGvW`vj zn+r|M76h)_crdtUD*v%JFWbc{qQqa72vRK)-@Y#V~>Wv6=0f9$Ae&s>fma8UtE zJ%_J|`c#gRvomzIY@C%b1`q^s%5`BjBv$fCH+wRqas;9R;Q*jVUpG72O$@WDDNs~@ zLs$WwZL$@s9WB3x68z-KFUVEuMi@I1{^*Ui-QA-ylSC7J=!1ZWJd(LIDwM0rE-ddl znibXK1=tQpF>BF_CB1t@?4T7%*O3b@!omOBnKng>^*Lizh1okjDvYoU(?~9tCs=`4 z7omSaut_YWpiWS?B)1c2enCtd{KBtr;Vnq|B*m9;a&Bg&H4&qPlR$krflFCwk_faFIUxUifgl$=0~ImP(_+#~6KD3mIzcWrdmQH#(`YfIDLzb-MU3 z)eG?*x9qm0W^z~x*2srV0%_SGC`_g4-7E=8RERKnOPpi1JI?wct(pL<01%A9ACLd)Mc4ELwFz|_ut0#7J;@nzc53$@LJRXW$h~xv znQN?yL5Ly>>E~jLnrzu1wIL*#2$l)58p~X)D1+G2nkf?0)Y-Tn`B7kw0FQ2{zw(Q!x$Bygf_*} z8`GUv)FC#Vl>X5d^Dt0QZ$$_kLA|i$8HUp-WGY8E2B9!AE4m@U_Dfo0*@xAQY{g2+ zYp#rPwa+_(-=xlsR%3gI zRR#WweVvZ5Zzq28Of?LfkO#eYf=26{FFDsW9eCZ1+?EX5>BW}Yypebsgp*zS>!pG& zeU}F2x5yMS^cf8D zmJL@#Kh<}li897JAK?>Lrayt_548{jPi?m;(txZJ$bNGZ{BU_;9E1J+!+bRr4PLb` zWSKusZ7cjDDUj$YlEz((%u`g0h7hMIU?h3?fpX4?kA4DvdS+^l?JKsXCiH@BDo^eD zq~k0vp}a4Qz9y6U>C*qqul%NqH0I;4@I&+r!qDu9u~w)wl&r#10(vA3|6Z?OWQS1H zErH~2MWTX{cn^tg4m*Oxk)GZ4!)Bq;nmfy5_Aa$_ zW&%ZIRqU&ZydVoX@;_v0BTccVEX|)(5Fk%T+KlTC@h}gYXEko(K8jHy5JMy@vLf?C zK{7&FfQb$bu_wC=2t5T2)gv2yO(Y2LzC6;3tcp#r(8GNIBd>d0@hQnCzV zGmlg4cvG5|M22ir=O{57ohjBnMLp9J14Xl(dP8z5DP^uHH^U1l-0KT#GB{i3Wo$+u zydVrgvp42XKl}!;r0#c^McVd*()?;4DRd$#L!g|oG5D}ffT`LV%j;ARHcXDm)^I#8 zOI!bFOcI4eHlrpr)9uGJ=SMQoaR81_5brfyor+bB1CZVDtz?TETGOz z)Q)Pt+G$K@RVnmQX@VxoP0>x@!I{rbvh$&SWB5m zhWhS+P%|QQ^#LNF0v=$tY$ZorB~4rO9lB>P^l~=3NkC(cb>3558DmpN6!4gnPvWKp zm8~F04^!2zV_z^2BNOc`#Em#q2R*D0lar?mkt-p#%LWMm`5|fuZBSThUZoL(Gz1s%Pavv5{;Q(ph=M)+$%XtGdgjaK2-j?w~EAr68@E<%qIL3lzn zUhpx?)>1RIIxMs@Q4c2U&yM=3Lp-EYMS?$Q$w;(l8iJ2!JZ7Rz>{H4q(@VJeOLoO66JYs6wUDopduV3jrol~&8ec<)6J zB7gt@!9@*%fxYR~Jn3f)bbEcT@KTdsg0x!}rFrP(JED(&b9YV@v|9hGt;%j9Y$ud{ zH&yi@sv)EWBi19T0E)>%!^s+kblX;hsY+qqu5(@{IUHETlCMYTsy5rO~=Vi*4~U=M5|XE30XN@onC zW;owUth7gzvH~_g87>I0i|;YDl;oGL7H-{GYZJozR1iu{Q7}Z8bg|5I;|X25wbRhW zj>k@1Q6uc0ik5abm{G>*zDjg_SD8yhCJ&j2Zq;XRAy<v#&Zd^g4Tt|tVm@%ttoi6!8zsPO44nOB z=whaChnRkLL2%282`Pt>uNjqpqh1BDYc#8FdsUOFy3MNS91YO6{8~Q{>+E<@VRUiy zw6&P#Bd)Xbt#3M1O9DiC!h+&zCEPl+?-U3n%6$O^uxa#&8$$0aAOifLK5_3KpxJ_H z4x8^~-%7b@|MRkeBx^K@jDMzprUKAd1-DN^!Y;3=;+b02_l*b2xNf;*0qK_O8bnz_ zFwoicI>h9{h@QEdbV!L2(^*6gII$6eCj8(Q_yDrQ$1SRvl9#(or|2CgTdVzyy6g6k z_ev!3jXfv@7XX;wdak|)ix?ZG`!VeX?h_~?Fc97}Ig}(m!>k&+c@s}UIAyjqDS@A7 zoF^XA)6iIVY8Y(8*6Tvn{mwh808u0;!aK_riy7R;+Fbn|iNy16$N#_ zOM=Q-NyAcDb_c6YBAv}sm1vaQRpPB7Ao~`aTGdif$T9Gf#!P4G3Z#_V(8WoUvABxH zu}MOh)NAWhDNHjzS46h6Y+pF7W4pt=7B>G@mRWup*b}jc+#G}dw$-a0qs#QOA%EOrG1DVsUxhA+~0xDZ(Sd z50W?A^>5qz+*3^~HHLg{B*=KOs)Rq<@?w8JZSW=ti;Z|`Y0*RblDrTO3YM#&25-Iit z;DZoeC?)K}s&eF&Ep(#SxW1Us+A#kbEdH3j>Ba3<%$rkdIfq|+PeMjPkIOOe_779O z?#I*X#VOBMVo_d$+mWfy(aNGZkQM+%SZt89!B1z0F)R^2eU?Y3$$;78Pq&$G`A*b;29H zqBYaKq<%l&GuL*%t(W527rT5~g7MNn0)9bNJ9e2(0FToTBR?{}`g=0w+stp%IcZ}( zW{lrz5w5v;s4lh%`eA~<0K!6ra)Jbvd-tw_!i5YQI(!H*qQr?5D_XpWF{8$f|2kD5 zLna}{jt$9pocJ%L%6|x3B3%EOa^u2x&pMH5_)lg9kvbJV`&gkS&yFT9ru_K~WY48c zn>u|8HLBF9Q3Y8r7*iratT~%nt<_Z_!3tm{3L+bptc9`*|DCNh(5j$XXUnCXYoQ$3 zsBSF)0b-Y`;K76oGbKsp8PAF&Oa3%m@tsYEo!+I@xL79{mYq5WW%%M9 zwd&QZTMJS-ajV_SJ-_~wT}~j|hhulE^)2?Q)x875>b<)5^<2KSo``M@J-T#?EEOis z`0p{p>(V7Rv@G*v)02;pB5Z42V@{JHr@Ma-KfZji1!qJ3D!ZU=+6!}|WmwBv{8eC; zK=q{+Ac1>bm788r!8ZS2gc42_6G+-w)Yx{>)%08iSz#s~g)}LK-e=f(_fd!wuE=7G zE-n-xS5*;s;6HHv2a$ii1qd8{0udNke{$V+l|pL;SY%H{y~t#fjMenfX%baeQYX(` zNEkwsE%eNQ7GX&hQ9~ick#;INiDsH=LRC(J_OW@ESTXtMVMID2DBwd#8Wbdq4CZv4 zTLl()<3!l`ipVKuE{u6t-jq!*RBlRDeJo< z%T;e&vodtnRo>{cP(g2LO!K1NE{P$eCOSkgNS$2?bkLpk?2whvGL?{Moj80^rV%G~ zA=EO_ZuXco>9i$1XIF5cV;EfEv*!l&OR}15l?wvo87lZeT&^=Myrl|@=>{| zAg%b?=N5rno2{0~yiy0!-EaF1VDg@DF_` z64iUQQ~@jTZX<}I4oyUYm<^&&VT{9|3s)qLTR=uO2wF}p)W)B1yp1gV;DgfDF3{h)Ndi`Yavt%HuVN(7bPT|{a-nU)qK;t}^b&52}$jyU*0 z00RH)f-p3kSCO#C5On;=K|t)vo>Hc^(Fw?7`RWyn$_U8yu>@zX1IkIp2$?Gm#&=7R z%wv+)Nbhtee}Sx|Q_jc-KM2AP7@%WR+GvppK2T+#Y~(f|m)p%0cMZ=4*r)ItMzWE;b6ERbZAcUKO#QPHapGr!>5U6jT2y zg)`h>5P6vMBn^e{!_byS%F&oQH}X#a2USsO zezQj~dZ+^7qOakcQ>K<=)JIT?k`S@8Cz7euc?`2Os@z1FUBoIxSOPJss+A$PxW$Lg z(hq&~;}!&Xz}z-ME-%sWerFRZkI3R5T+qg;b}0z0Q06e?R3H%O7|z&ewAQN9L{z+N zi^8n)(TsTNeU@5eX6qS3o1El?+?veAB8%FF_~R7>z(52rK+u5NsYchL%U9}@FM-5@ z9{?g-UwAdQJZh!4KYHVWQd>!p;M0VR+v-HLv@psj2A%SwZbm}0Ee~1OvOoVhEplTj z!-c%IwGjw}I9iKW0qK>t4ykC|xMtW|1j2{zx@&#TMns2I4msfn!~n0dE1rJ$g~1D$ zW_f0?gSA9q1e301w!4w%yi+IGoiKGk0?YyTlK=`iBZupA4h(R3BL!KndSj!nytpMd z`mm#caOv1}7fyT>O;2hs*g*ci?(CwX=B4fdeg8~r_0unVX7MO8H!6_tG5lN!~0>YZD z9LS`#)H;8=mM*77k-$B8$y3|XXBQd5hS+Z;*b2#&@N7&DtK^xmr~v;z2;Dqc8Ia7j zK~j-=v!BcS>V|inXaR2{RU`g|$B_2%qb=k!EG_pku!D)MA;cOLVLBrn!ZmI!%-7BA zxz%M9$3N)GhX@G5A2|(kp%OGWJ#Lzn1?1*{jp>Pdk>)eqO+3+=*`G4InU3@{Iu8BDsMjPUdF4(aCr@ z4T`ha;)WIRmF3SP#~`*WQo^hhWx1`nq6o*mMSAM(t4*m+SLy%VbLExenL8lIpt*|dqH&d9SzMreR3uXP2@sE2dhaTQ;P>+ zKpenE>(fPkzh*UhVLwrcM}W=ynpHiSfq#l&XFs64_GDMTrAt`)68X+&A7dJ9%#Zz+ zH-LFi?j$v|=4Ap199zH(go7Gd6`dZ^GFCgY9`}TO>(~D;GSq!lp-@)QHSjWKJwb5( zfDZS=FAL&w2~AQTd5@qde^ zfs&(kO#xNevuWF=T~Cn{;eb&DK}Q02gB^8#G#GqWV^tzWf?NecxCI%z5CL#u6=d)P zEQoc-@)?h|I9x&*^3)bN!E{H+QC*QxwC5j^*HiNq2=RhSv}cAsrf!F0Zs{QyQKNno z;Ri+X06^dzolpx}h#fJ}XrSRNcXK6jC{lMfhkyiC8F(_R1{#j26>kwEM)-)wL?t3+ zhiOqYUg#85^BiJ07~=3u0TKwqpb!sMAX`Wg+2#Ks-ZEXdlQ;*si5GMh6_|CB2!^*O5aRZBI z)pzxFRV9@X-t~^L6bPQ=Q;|e?KiH7{L=qGBW1b;W6UHp~n2!~)M)Fkw7{w6O*bmy3 zjf~b_-4!au^$vJqYwJD@^_>WGtdb0egak1bS{$aVjSQ$fh*RqJlB4|O$Rf?bxFO!%Lkb<(QnBjFl zxj~RXNN5ZxoYAqDrDB=eDN{ezlZEIZSK=*#lb5a0RZp;3FGGQG1D!aL8z@%+{t%Ev zf;vXAnHBSr;JH?m>7S9sI`?-J<2nB!Vx$mWU;%1mQ*vYh%O{@?dJ8|qTlmRuC;=1? zlAZx7OB>3e>BJe|2~whUpeFbXilCST&`fI?H-6Aw0fKVOR-HO^p@I}9`}v^~qMJF& zqxS7uH&P4t^ohX4~M3qy)fwk45 zv?-kyu%!3H5BkugDR>)DdR?Z-Egov6($OUwVv0NZrmGj4>~?&>166)No%p#%_tQiE z5FjdgEqd{NIa8bqX^C~Z8g9y{d>NdKHki?YB5!9SR^g;!3Jce0h_!GBuvuG>g(1B8 zAQ}3or;>4Fs{s-LG8CvDa#MHENum0yy%Vh8v8~1GFB|8r9h!0Bs6j#+VW2mv zRuKV&nr&t&4smso+BJv|(5qHhqi3~ebUG3^8m|57ulb~%qd_&M+CJ_EEb7XxDFLnG zAfs(DuXLoatrU6$Tb_y9UYPl>;X-;D3u#!Y9vBv+#;0@#YnBOHQ5R|>5%2(dDzVgo zhi##+zr?ZqLaZ}OY-MN^3zHK3x)h2eqyrfNV5+eBS#QZVrrHGvEgKoQIf`0ovul#7 zPuptB3Otl`Rf5rLP1pYyK|6*4@=Q2kBN%YBDFFz8ATkqCLV_8NvM9A}+qQ#b8igo) zvAQf7x~S-*u6>$RxCsaZ@U`^u0GA38BJoi^BbtoSt!}Hh;Q4-sdsfm?HD+aZdHNv7 z@c?@JS76(>U+W(P5V1v*Rjuf>i%Ytt3sMItbn8TwiUh7eXt|g>Kw#?!1P~{%B|{23 zP^yP{U0E&2>XWA16gUgK#KtIht6fx9HKRM3gsGe}I-Ok`yTIcID~kcJ)wMU`h=ypc z_Y<&-!%hY`yggB+;oGq((l{AOwGE+>+iMh-`*}lKO)0ywZdtUqxjs!Nyc)tDjz+jv zqP*l=5xxt+sRjQq#>*v@krWo|vyJM$&?`!3;Ri$cxBXE8?5U&fRvJ>%XmL8gz|y=O z>_Rb-zga~v=h-{ZaTVc(pX~aB(OUrz@Bl+8Lw9AJ6lSp$;f0HF5=%28wbP#<48%w1 zJ_3vr-r;Ec+Cc#7H5GsXF|5L~S+(sb5YGgeF;Qy#2X!`h7)nujT#^$NLXkl%#u^B> z&DU;8^HCi{#8e>&5ddb638pQ4Hn7zdRty(*_QSsvsTI>D-8G9lQ>h1>xMNJnAX**A zqmtS}vB1Z9s-dzhC>QbRApvr^z@uzul@oZ}y80=(GFw)bkreaOis<3Eg>1?_R-iZR zAsDtWBJux|BdZ!Kn|N38w-uGC6>tlED9H{1h!@Dmd@&jA;m1C@7#@4d$4tCbgMC37 z8mEbmtN{pX#DelV7qtwd;k65O#Rm!(GjL(Q-^dq_F@uX%E{=-K=S)=vYP>s(PtU>{ z4x$&aK#fH!Jq|0O{~%&0IYG;DuTk0+N;bmiEYK1MxAazEA{7)L3=zA4FXf~UZ|tju zlClI4$JjVfEy9fmDbTtz&;)IO?{*Z55rgcT5o6kq{16L@$q)CLdsXPLL5rqtAwHVI zv*7Fy0S&}8+tDM(C~>Qj1)RXEA>($vhfh9RXlU0Ki<}^(H+OK>ZL!ktRmX0T)|+68@4ahUlc&*5MJ6AY*o0NoZKgeSS%RVWm>*B!MqNJA6c z%-v;EoWZ{)U}S*72X}W3?luf=!5xCTy9IZ53+^7=NpL5@EkJN5!9C3KuXE1UzS(`f zU*W0hr@H%h_kEd*Y>XyMx)^arPm=;dvjs9pyo)er+Oc7&e_H&*aCCtEVOq9fYciQ{ zx_|Ne42i@a4eD`m1(0x=!`7wk)KO~!eUbLh*DNh!9nVwmTpe{=hbHW3d##mc5Ncl$ z{1(9;yQ;iC9h|j?9qC7|kVm%C+nvR7oXX$fRGA%HuNZ{^;1>Zcii#+=zOBS0v$21@ zM&riJBzf}Q{sX(m)v!-iyFwWEu4?QFsvt5>?h%I4VhlPwaH&FFQVTCA?N5{>{`^GQ ztV!wHcL#RxqEotH0byd;5Wm%Nn%O!9et;W2kUH5zAvx3vJlshc`>ZmWU|O^|XXUzw zp5?W7Ky!o_yy^J^v@-6XJ)nE*;B^(Uu&aC+Os=79!&|HgI*i_fD+Y?y6?4M=w3(wYwWZ$LlMa>sVZBN65|U#ODqiO->C@0$<-31~n zggO`)t96 zvSdP$6YPn-FGUJ4yvj&-bmnLa=^19-<+qjfRQ}=Tr>3MDNEDfcY2iOr)C0okU%v>A z%pO$q9)9&6yj`O26geV$cL5p5ylLp{*k689uum86`wfeU&p&L0Mlq#0JgPr@Cj35j z4?|^6dt|vU_k%(W{olz~&PFxXPm9WZTBw|>EuA?I7jKw3i@Tl_Z?16ovKSyd02G5( zcXk^bj*R|hUNyS|iN>N+Dbt9B^#kY|83uxOfl?sa8;TGkd^3EDaRx~Qur$ISNxh|0tvbCnr-SvSGo41$ zkwj_To3)mF6ryEy{SZgC7VE;M*2ji}j}?+JC>D%7C+bCcx9!RMyKF?b)WHde39 z$5TFVolDU>N=8Cg2lrpTUT?-i0qr8=m-vzGcuF^>20u&|fdOddFsy6$jvHN`e-Af* z@s9=l6e{+dm~ERbEMaMHjeq)mvRG^I{hJmQT*!LNZ!GBM=8N~`&PWnPH8j;p>OjW* zx^C9+LR>7cWavBZrX#}OGKHq~`I_kOu1jiX2{=@qO9{Y0O*I@W15FrO%)EUg>*Kyb zP4zP?sEQG;PO<)bOEwAz7+aC&N(xtlXY%`$qas^B@sYu~yd8o;NCmn_-q%^Rr z5;->N7{#vbJv3B=@7|f|@nf96Nqwc-k=aD-N_?=>38?0yd4{JpRKLeUPC%1pIHM^4 zUP_(O^|jw*Oh1nIUf1F~&c%1lg<6wJ!Vv#NOumsJ_o53oWiJD#9_6Nr3mH#22GP%V zRUcdfUoTWXLGV@#eD*Rrgg>z9->+Lk)K_|@@tfs;`NHG-Y>)545pwrXO0f*9JkULw zM$?ejeo|wabm+5mP4zKP6uy5f&W&u*=}I0+&)!MW&=%a41X@B)R{>HetBo$4TLb-j2bPiwVp0)3;ZTMGgMrKT}%X|2$Xun?=0V z?Xwod|1=m*IQ;T|={Z;&A>8} z#lKq1dV-*UiVE4k%SiI#obD-oT=@Zy{?1ibHyy^`&mM19pf!Ol(%3KO5o6pR)<40b zXJfyrNs;cVCfH=lQg8w?8^K+&sFh7DpGO^%O_whvPOBl0=2C2WO zrl>~Iq`lx_jt^@|V0JkUXrF47hKBro1t2TSBv>4srx04=md#*wp&BD#UMyi~3HyWa zNC?eg8$Bnpe?r$a#^83H?J+n633r6zEt$HBn2?`sen!vGf(CQV0v2wA3jcW)N&IyH z|3iaHsGgk}$v*+XIT1=wh>I5GVO5eYV}zK*z%B*Zlia4r7$#m-sknX!vElF_o0TL6 zGS!p3D&MFUW!3IV_J+DYm>8*Poez4T0imBAdh zP840B>94KvCl#NXrW~{w1q3HGGB^PZXI58h3k)d1$KX*62)bIOXntA_FS85-pRI)i zSxq-1=Otr|YA8o$npD%{m_>ZHWI1bsohuur!xLc8# zsnbtx{H9 zNJ*OSL200Rf%+FSjhA(~2CtUJ@_G|N0|V$rs&i}C!NuS66@%?(uUfQIW0!>5&dA2I z&Z&AMwVW5j%yvFI+i)k(-wUc4CyU*`M1MPpwo|USJ#>m+vrkE|7~dA)_nk$2@mPB? zZpZZT6REgikM%mgQldnN)zD6vPwlb=o|w?XilJu~Yo&dv|Cxj==K z+>;OTaOXH`2mGUZq~~4OwTyVR^!PD=-4Nx$*;a|PZb(B#z!*Sz7==2^K@EjW^c547 zW#N_B#+A|6O$oPhl7~K^Ch%;^m>UdTQ>JNBWX-U*`f|TyjHZPv`lZ*5Z0so;=3A!U z)wvoZ39Z^-GCuR5*Nh!L<@hH#$pRCRQ;lOEJ9&%VvOui6U>P=ce8$uA0;y{gF}jqoxc>b2H@V<&(Q}gm zpP~A<+fpqYTXEr<mhOLX zcEjKHe+sI2@m*~GV#+#c7b`uAN~_h#w~5)5oZQ2tAA=e!N+n90eUKWkdzWrf*cPZJ2ux3xM1zYU`%qjJc+ zuB`HH-M(Rt%q6ay_4FQpX?p&p!E%uZ=skApc+u)ha4IoJKK(o<^!tN~&oE~6-x%wF zJu;}zxUBJYHQU&Htf=p_`^XBuYPbePw(o45v}zkDoq7vQ)DvlZ$LJe3x%AC%HBOLz zUDNm6F$r%*w;Im8?%&1d5B^n)X%x?tG~+=*0l7v%=8&rW;}qztLSK2v8&FEk>Vr_C zK6hV9vbvIj{y`ZSAzhnqFXA>z zr$2&V0+I$$->>$g`}go?m?d1@x}!`b5X$Hu&FdBXB~Uzafpro~!V+kUVvE1}Q36CU z(ZE=wU~E<}jtCf635;h5#&-Y{gl7W+2KL+~h;I96tHJv{5~Leo@^di7E0_{Zl8RK4 znpKiUM3R;?%UMy<<_jmiMQ;zNmVs8J_lx8xJeXx#l1+qVZC$bzQmA}0XfPznStmfo z3gJ*Y!7*GU_AP>J?PZ@Wo*=}Q{bRSzjf*S|=|k%+W_h@xml``6jT_M|P* z>evAwQQye8s}bw?$|qY;?8&#Op3$P*TG;_q;@eR(kId<4uv>V&(qig+Wb1$Xq+Bpu zFr4sjaQ_dV)NCXOpWANZ|KgJ(LMQT=a4KfL2>hQuX^l&TfNRI&B~^bV{amGLslxyG zq}aGRU?vT*jzn!r)q(2e7MuU^Nl9|tMCF2m1>#+9PXF5{rQ_mNOm@)d4Z&ggFQ1ey zYfD-rxnH~|oXUP{`aeFY%|K>3>#W<#bdg4_@qc{MgaE+Sq{9vv;4Ae}8q? z4mD42r=fG`TiSnoQs2eZc58~hCA=2LjUWHzlgc(Oo_Rsh{ckT_7yrvA^}XCRLH?a7 zSm^Qc@_2LR9q;R~R)0v<@jDoRZ&Rc%Kaoypc{gJHI|L-dxD(E&CyQ@+^bd#B57+B> zCyG+LbvqQmEAT6fs-kK)mTK91M|-eud^etF*~cW37GvKek?;O^FKL5UAXiC_NSh!^ zg1&k`wc65$)KihEe9>r-)Ak@k_l=K8&rC~OJ3}v$=`h>H5?JA&oG*Wv>(DN^6kwM) zf|u&FY$0d-iR)XCX~$e7pW^rbhfk{Ga5X|`U3WD~>V3``&hwe|I(|V=;Cg~`Yf!V$ zyZD?dVs;JxW{QVGZrK4(b;WH)QlAjve}cGnTA3QTw2f zNQwN*&kx&reMTA@_)p5C+I%V}ChsU3qgkEBMvmuacj9rE=PXgt@?--^u=ZHO}z$^;+@p$LkH{ z47$keqVxX4@sjnYdEt2bzyBV#KOO`;?lXk@zx$*f&$lNXpF)oRj0Iu6T&Ml~_xJ9| zS@rMZ+20@p_m@H#B*y+LtYp8=(%-O2o+WUi=B_A)MU9fY*}&#Vfy1+5Zdy6EjQ63{ zREoUdq#7)0`cNpT=CVIK4(TwRY&dOeFeN|@12vvLV$(0x6np;*7K%lrSe=9*6rdKO zUj&L8b;~tUfhR^IFT>|UqXtLn4&hL%87S<>qEd0=P!MBDhjQ$M*{Nmf0|$fAk=O8S zGoKlk=Cu4M4a!3GCwK$4IDDkm2Ac`HrF_Tr(K0{#vw6D{M*kjgQaMq(4CB7U$S-DT#)RvcEHj};H6FJl}MFK=_Aw3{xkE#UbA z>9R=F5HY7y!=A>0YawmB+#~-)q|#fZ9&Va$#Uh{9A9DMLYU8Gy?^8LHYi@g*mJn=> zW~&Rb)HcbZJ}Ds@k{c6?bxlHawAQnelOihNPJ*|k2b0Tn#JtwS{|8C7Cq5EJtf1(w4k1ZkDC1g@A792A@Fi{hT@`sLl-vYZPmSS(hOj@Kwu zn68?9uT=JBQ$4E?s3{X&L$#t~>TN%sO{#>sq&v^sBNJ2EGc-Z9abvAD>nx9KAd?bY zY1Q#Cu%rru*aeN{c$Z-&6YCn{Vp;_@qGt|B*ytQ79{O1k8Wxl89S{evo?Z7bZNtR znb8|54GR1$VN2sONLsiyZFOanBjhoJBm`BgA2@V z0!BnHqQHb=9{BVV4dm@CD-g9IjnII|MC-nLt~RFUnbu7Ro1lL6==c6{38A+=xY16Z zusXNbfT5Csq{&Rr?AN3JTeZ-KGS5(|9R^4EsM~#5V*2iN5-z+6B9?)H5B2G1m$R8Q zDIu8MnW4dDlg}}~X-(nEDQg?!9kz&68yQGP#$r2DL1kImR>kFmsLKbtMGNHyhFb*5kW_;((K{Pb;+{_*rMbpekZ{8w)(dQ z1NW3(vVYm3-ZavSnp3iZfX)E3@Tfo_We>r%y3yZdD@}nqCcRM&g{4%dyC;a1fO*v4 z=|m~b2lm;2zj025_kPrOQodrv(cTOVcsc?D!6>^GA7g!#kS2l!E!GtN{5xf?u;*b? zAJXt&i@407@`Da6?G;>|Bf<8l{0oWm-%J18*HXSI+P2f}PkXAM*e+DssTtDg5y?CE zylj8<2MKqj)0lmXtbZp-)x9GF8S9+ZUVlQfhM#(ME1o9F(?h`^Kq0wXZL0qiBwhoyxNgP>Uk+MPduzw)L= z_{YK)t;%2<7Ws`RcaDPDektQFS%o_t)Mpde|4^hH_R9<6CmoC>E#PwvL(V9We^fvv zk8DPn{rbHI{+w*)m7NbjsbmzXN7YrS4b&MypootFv37}pAg<1%c zs3CBM3i||nw2(bn#0*jOom_O5r3FTIsCOT#6T$%NvO)_E-G1ZwfYg*bG9whO^`V#g zh+s8jFvPtg6wk&_Qp!{IKC~Q4pf!jWd%<&O!`;9F|1q#sA zbGyZt%1S+c#^)C$&;_~4$kS{3-ScDNOE3l@*i=-S1Yg3Ojv{3<#-egY9e_w`)D#MVBjon$FUq(6XW274}+ZdggtGJ($93Cf|t$Q8A{ioHb$8(d@pr~97$Nemv0V)E2iD-A#y z4zqp;`-+4e8VuV}lmy%fA6>$3n{~m50S0pDAm%WxgeP8DByyJN{-O1Z*2Nb=P@x;} zGxoINos+gmiDha^v4ZX+brk7SO)!QYn{P-b2=iItH90cxxBy&C-c3Jf5hmOKW_wC{ ze--H6Dt#aJ`!}Ysucd`v^N-wM^4aL3jW{A8cFo-fgV!(en= z#w1r_3$1z8yP&p=XLz~^k!l;Mdqp!MTdJqnkr9#jH!2d&X7^ToWVwO?fM7pMz#O?` z1u`)X$bv4*lR-LCuKc*AcQCMm8WrAts-4(=maskj;l(*Q`f^HEwiH;>+28?H_9Zz% zyp#{cTy5h>Fo<8`n*%%V%`nXpzY^j0Amj3Rgz_=vg*Hb>XPD=7Ixg^KkcQ+JB1!ck z=Nl2_c4noRKn3(tm-0oh^0rRcLNxL7`O^!0RZ$}f1h4d@_CK4JYS+L=U&w>Z0P;vF z;Zv3QHfavQbS4u_yn+#hW$xm`6FIv)Nksf=*H6mixC9v%$Yo5iWtB^ERG!F?$t#m2M@=bj z+DGo_u*PhIuLKzHA&ZFi=-=UfVc8yH`gkaIY-r0CB1~GO&q5dSgj(sIPM)gr_!ztC zF2B=NwvbZsWXh;GOIIah#0ZQ&w#hpAaeZxzFzvJh_?R~0dkR%#(w3QzAZTgD;;dwV zNFs&Spk}uEV`($Iw6*48q3b&t1^RfrEK2&dCwVNR*? z^WC#_S((JChlrHoSQM5!NeIHOI7A92#}uX=MKl;RYz~eb8z;r?-H^P-TtuMrE&)V{Q{1N!80R_(u-b3}22syzE&A*Xy*|ZHU zEhPtdk{Q%m1pQh}YOzAb6XA*lFmW6H;#gE_v@VzEPau||g}8EYI(zGLuH5kZT7LvI zeT}em_0lbEI&Cc^@Wv5#66^sHHZ{L+w{8gWmyfqEQwJ}ilp}+K4eg3N?K>o;l@eN- z=J@M_5rRHOm^=8CgvYtC(|2QEwY2Sb-jwL;#kT&$FIrXTNTdltyVB3E4c+Uq9|p*8 z-{#TnXx!CkM)q{Ws|DQ78!QS{H7k~G!DQs!cX+jcU4%fk%R;Jgec$fF_+HGop^Ne9 z2bEDTSU7PmF>SF}O{OF8U1z%`S9ZOZWeU&R={rJ7UK*Mtk%T=x9S8%WE<(;%8E)HB zsv%=xcJg1VzsL@FRH0%?Jh}ND>P3VEV{p5gt+uMt$MRa&%9mY9Jd{=abuSwssk(oJ zY+agxSW-|#OwD68FwE6!6d>=|=$iylV)uX^jwa%jhe&)6Qb^NYM2^#0rMQ&*yCpgD z1$%_W5;0c25zru9O!c0`aki(hS^5u(Oho!UdqIHML+ZoJuvSHvr+& z=NyLe5V;?2Bk*&WP04k^ccV`vZH#X1)sT>%%ax`;4F<(NmMc4U9mghU*%&Jr|8hW4 zYnsW!5Y}}xy@7EgaY4Wx$#%7MB2?IFe8Rv(6BhDdMDCN%5i(w|7B5=NC16gVNXzDd zR!!Y42qVi=$k`wnV~l&UiOpg8#<&Y+@WkfyksHeEd>9%NunfRSbBHVr~JVbO`# zX6O(+e-WD`iT?G{gvzO;Mk^nJG(8&I7IUlR`Y;K(baqJ2wXB%+0Iwy@=rQm)>E`MztP+KfeN@ zS7B~iek8m^wT11Xb{h{m zj-S<{`xp-7U9ap%2v9}sguyU8K!1~9#DHxhCtl15rPX2PjPpyRKock7TlPcBg%hd3 z%SgGCUxCI}2xvzI4&c&y*e#H?sb3JCSvL+`{6=J{haJtqKWNh$bCf6a zZHmN#*2@R7{q!|eS6R|h8psW>D%jmda*vj6dKKFw-c6n>C@^Yoc>ghf>7ZBhqY*90 zGysKh204v<;oV2@@Y>YvREujN0Q0L!EoL9x>tOD#502<~nr=$6ve|jAz}?gY?fpvT zJAzm7(HPD_i8C)tFd+9(RqS*^pz>T6SdXF>2=` z+~F;a(#;yTtkewT$bnzZ1`Ie%WgShEttN+*E(yu1hl4Opq;z>9gtPfaH>?@m9kMfm z6J;RAqe8(0O^aKYJw^~_zoX)~jx4JZ9RLuUe{8v^{z>_KPB;f|8#67TFY5=Z^N+2S z!3psJr(P#xK;2fN2q^8xD$r0eRuy(MSUGo~Tqr~6eae5Q2qSNpb=%tI5PCw62#%%_ z-B4lkCA+kKPs`Eue)P0!7S#{ZA{zhmQ`|F9bC}2+8LQzlMtv}B(=4VAUhG0T9mVKY zhl)`Z7{LX4Q}y4Y5q4`SC#a8l$n0gw6%3|xQIEPFK$vltK3D)gj&x56|UjQ+vowxl*L2c-eu=PrPTNSgl_*E(MESGSg} z-WrX%G!%fUe<_Ycaf`{nPwHm{j-+kHkVNO-PadkpJ-47d0i`L?O>hw>aojSVCb1-` z);Kdwp>-pKAvx7-%ixgNO%$iGUhY;GSxq$#b;-rrlu;+?H+;ZCSpOl%R7o0#h6=BE z+#gmqov85PJD`PiW09C~@>tFxT7 zty!>a%nZCYGuQGP)g$m59y4DsPa#f7x=N>U#g21S>wk9Ty3=<&)sssrsS`~I=+kJn zsh4W`KFXSUYT!BqiKjq)ggve0BI#Br4wD{P-d--K&g`sl_|j>f#HLj%8S`GkUz`>^ zh(jlF<8d=%+LL>_RZ$kIu*E~P+es6;aGPS`>ewd1`8qm?vY7OJ`u2Mnr%1PLWjU?8 zaG_#@1C&P(Hs5ZPk1h_V3YVhrU|;aGiNh?a%p9X%#)?uFD$Mf>VXcy?yXBy{VA013 z6CszFy;fi{ZtB||eXk461wdQ+Hoz~2O7|hZ?#Gf8Hdl2zd|ek@TauZ7MK;%$pDqh< z$l)R@^mw9@T=9V_k^@Z2qF!rkZ7tNFrk-N1YA#;%9a2OjMd~sg#{RPQ!tyYNPSfuV zanHfejgizE?4`0CT75bdSWoo+!^+M%Di(0{eR`b(tc~&UA*0`aL(IgoH@dAljrDD# z&32MCvmM#x+ojCY3DVc``Ra>C3?>WW->^FT0$p+!8uE%J06HJ~Y8V!IHfG&i!})I$ z=h0B61)8Z@1!+XBxiiWf)1q=T&uCsirzRuY{yGZggc~V_r94=h zdIxW$bM(UqhtMesbzrn^Ta3sbe59_q$ZvQ>z^Y*qw3gkCaT_ed$30cowe&-@ahb2H zf5B5!1nGo=6P1n(=GM&yOTdL1!Q6^O2=XS`Kg#}XT3Rr2D#@ypIcVOu(H06^YfLQ@ z>%6p#Ob4KPQd;1t?969UTl5E6&V5;zUi@`_wD@U=*zX{T zM~5+l_E(CXG?Qs^se+o0!w_IXU~`Q%3>!6%!38jk8G#4Pme*EbI^F(XxOXhNf(HUQ z7HTQB>7^enty7H$E70dF@Ugb#DLF1N#iWWVT_;wl&+wwf>V|2cxHauH2sMC+@oL2< z5|x`}m5Y;+;wIdLjt+|aJtGn``{#I1tug)6oTL3IceZv?(*wG8 z5mpm0J#X@c!+q0Mo4OH}nVRR@$hV`BOAO>+6EIp8l=U!3IxNAk;?w+0qDG(x+?@1R z;t`!NL0zX#lGNIw>rCZ=SUb;B!DLb&uZo0A?a(+mmM3YdKHRTv+|XW&HLzu3f-8s_H-iX<+LAi2N%vn9XCLPE zX|Svdcp)M~KBI4bENyP_G)+E?>Um=pw+sxq;EF5>e($iif`pC z?QtVhc4YZJ;$s6V)tW;rV!}MXfB6x*-!sXcX$NHml&Tx4UHWmgRc}-80{?z(Xft1I zGLe!W@V9{UvgV)007A0KJxW^qxEg&2sH2AA1hi^oX$up{ERVnZu&hrK2Lz~7&5e;`7aJ&$jF`QR(*{iS( z{SLXC#YD=f%HV>-6zhDOF6OYdLpqb;FzC>EQH7&|X}DZ8F-G3c^4KK=E6 zGdOKYf!8Wz8vfV)=fxIecu7MV`y>|O-UQ$u6S4Fmgwa%D3WEv!?k(-n{dk64c5BhwzER*4oD@}&?oK?o z+pe}od^sV;MwaLa5+Tz4*eDdSa`S;Ms(70CM7L%tN?>Z%1lQ(N%gST@j!!ntuO=6@ zIETqt#I;!La11tBNVh~{d?;b6hGd^@)}kT=+B06k0269`8C6X!&PFBaVoFwqE!p$P zbV|vtV2{1E0MaBL>U5z_Jrm^W9^%U*)HkP+Ax&gkM5zV=oK4DQY4b#GAudlyKRao{ zZTNG4buz(H*l=g}+(=@Xg6bv2Th5nqh?J1%NREB&E7irlno&x!HMg zB|ndOqQ*J@(TtQBRA1Xw@o01hZKv)HWJaHq>*o>b#-NSr(ebs8nowh*0+GT`xjy>$ zL=lcyPz68r@_4Pr$t z;-F>vHIgV;<=%>Kx)I?<2{V06VY$Z%1ux=yJD|&i$!zvFBpM2rbJsJhL&5?QAQPzC z17zPJ6lrzX-)#s{kC9w8QQ~Mv5NijLx+0J+!=Czs^;Z$R_Uq(Qn=q<5zSPCpOnmqt zJgi(jx>ehw?IF)96w{wvlj7XQ+9 z{AiVwq?w_ZyTt)cNF{~6gQLG*3IEp#_OAi5j9TDuw4mJU*PNZC=plp=pkkj%PhYWY zA60M~Oah8TLz_!~n<^L`3moP`4yQY1S%#6PR1e_6QQezL3>oR|9}TUV#hC=Blk_m4^SR?e*+b5a-Ln=w{Zz6$Cf(khG-90yw5Jr*I9(>l zjaM}di)il}t$gsX!kurX9`nQvv8cfd((s%}CY%iOPG#vtLh7nKcsojGH4Mdsu*-Dv zQ2)no35C|}Lesn-zO3vg>*zmI6)rT3sWrc@0(e?>_yC@0UD^ z4r6d_At)pzRo`Y6743fV(PhLw#yHTXEN$eYufNmtgJIiY9vEDZ*7;og)m{%&gJy8J%F0x38g{o^W1ZJA2MF*?Y(tG9|P7FkMe zyq_Fn>s8@QS7S{#=-8Dc=6runvT~wkFTL3Hi13c=FUsl;E*Yr8v1c!_i8An?b@Ro| zS)V2tYb~q!&JYSFC20^^GC zm1V;d7$t{m3!A0A9U9F6F>afh#iX(c@9lzNWdxGtoh)lE$OYfIkzsI=4SXwy1~rk- z;uLpRB0-34m+`ZvYpQIdi0YG=wgLj8v6`>RAQ2SHdE}g#W|=-t;wgIWYE@s6b=4T1 zU=jTL3DA_!`t9y2_Q=B6xm3Mp;v4M{F#<;E&Uyn&18>?qqF!ye1hwSMfZ!R6G4o&Zqg5p5v?qxU(%%@4E{uJF}zGqk2at4}Xps)&n8 ze>z`#URocj-=c#fnO1>^y?oqQ>RFoS6MSXSefw8;M-c0UGd0yih)uL zVPySK=_5Fgi#H0cOX+4a@k*?=7iWw~4t+N%MsX%tseQv{PRAxfn~-O~5#y)*=84hf zh$Cl2FqczZ0`BvNCQ-_RG*%KJrI^X4VbM>l^OHQA87zBA$WBUG=RZCCqJ_D1!e)OT zj?4FC8xP^3Cc*bl3bM&JC0-n`Y}qi1(i!s8L^ELXmaPwZMQj6Aa8Wn%f8}h8!s=zh za$k^gSLm*Vw8>(#@4(b$*0hnXHZmalOOSbd*RD(xg{kt!$8Nk&Qu8g%4as8_Y|o@(jFI=AA|o zYle+E8Hs(|)Qs3)PYY&W{j@6P1&Q>4Q{Y?CRzEc}0Ux&hVJ-5LMc1n7kQvtC2IkE_ z)}l64OZcVx1@H_jv+P3G64jx81@<>O^!3a9K_CiM=qG#;W%e7eg!RKEyu{DrQJ=>XJrPBq z9$NS>q-JT!aRSS-_}rt>>KK6tg`Xp`!{4k=is&$~myLa6gOZyCZ_5ve)l7B zG$I9XG@1um0p@HB;vxcJI^6=5*5FYo1EN{Q^rC*DinmQ=^NH@vlnK}p=v4;?9!K#Y zYF8p+d;p?sH^TXjN99FQRxs*|DtDmJn%L9ZCHzq$Gi*^+Ic2Oj#$P@K5KnKD<-(X! zA>O4>v!4_(O*h#b9;$}0!kp3b?O?^DW@gI^|KYvl6)NS*ZRH$edl^s%gHjvL&;_wz z&8P~;4UF#Rm~i9z|DdqJ!ueywIWju$dzX@YiztYq8TU;cNKCX8xV0Q;r)R2s|^X_6@8M(y)(aNLKX1VSsUc^ zdk_?se!n=%OujQUV@sX(0Obsmtu*J3Sr|?SX;u>x|kBM!@O^_y-@qpbl-7xklwRad3_5nz46LpK8HLcVW|Fru)a|%VInWFoE>fd?u1u;*P5LWrpy)^i;C2l%CvFdd`Hr7DFe41*; z$)npd=4t4-rhin4$4+K9jiD$!0*&Bf5RypHg$iMaimEH|tpvMMqRbL0^#&_MXJ4<7 zjdz0VTVQUJAp_+jPFD3gg7cALonte|%Ma!*tRH|Q7hyr+QvQDM`)!-tDM0bnYEwYf z4oa5}pLfqk`rC@8fsb+tLrdlf{o|)7)9+CMF0hy;kEk55X5&au>p+lp)}v5^1IpLV zf>G~JcD?c|Uu+B*6t+)I2k$NLS=8;&KORMTS0T!cBSCCnA^iYl)o4$`;mnJH9xxuf z;YUmU-1e=y;Bec1fD#OR*gQ8R2=%*V z2WYdF0q&vbJ-~Ay5IwIPSAZ_BB5!+#T9k+P&)1Imk-Y?P@?Vd2dL zqW$F}ScAjuI-}ytuEMV0#4cwrFSmC@%vlI%b&S4@1HaZ5x6WKep$_wbf}@mB>~nqrV&9r-q+_RHw?2y zl2O=fcDIc4WeP;!+^bqjDZvN8WqrFgv&sxsR0Z0}O35V@0J8|;={56YgH&b7Vr(Pb zdZ*jbe7*ex`&O@?MrMoZnQd1N9Kv5^yPIDiYWi5a0m_ELe4p7O3*S3(~9a}11*G4xt7wo;YsZscI1X*y5-RV`}dcO$X zAp6&?mt*M9B2+Zisul^r-LFOzQSdYwbXMYeQFder(fOc;2)(FZP-o)Ns zPYY9W>b^fKIkJIZ8?WhxVv*v7@AM&YXV-O+9#G2))oXwn?K*8fCG3cDLzZ5 zfD!5NT-0atMCCi_i$YLp3Nty0hs|xUl`9V2HJ?_kb9lCp$TMX``D`K)FAkOMv;Cf# zs0IyW?Z;h)d31MdErioTG|L5@dS3#LE0e2kJ$AVEMG^6{aIB} zwEe79S|%g(5&pUe?&E0OwH1L?P|6&=p`aH%whcexafv*kxa7#UoM_gbi|$(0tR{@b z70umwI!CZgsz7ZHMnTP%H+r8zHf0_PZb@HT_2gj(yzr|_6)f|9ZWf-09ziX_`u?fC z5b`5-@*`74GQ{F>a+cePiur>i)V;>*kLwoxup4-vSgHm5x`f2}Q}jki>-!U8)xyY< z5ii*nGNu>jc3UoGeGQ{0FSn00^XZL&Y9-;k6DT|9+^eP#pL#Hd*CCCgI~AL1&FoA# zqpnDC-^w7Wj;giC)+WAgAFq}Rvew5ag@$dkdKSLj7${OlwLxb7{oE%^o&~Rn4;G%L z|L}5}7X|6VA5=mIvgh>>r|5i*DqJ^HmScO~{Jhm+Ma}E%mOays7-tfa#i)b``8ONz z1#%+Q!@PZ>`f)6Lr6n3v=(h2O!=EKJhXO_KeU}tpavY4%TJ#J&Ezmr&{c+ulw`5LI zRp{I^c)l!qR~fPGzkk6SXcP*-;r`zXG|2Ut8gIb1TBy;NTy?OFGxwH_3Oc(Ce}mmf zg+3vkTMCqFT;RwZhEo@9BI=8c=*^Oo3WO1%LixN~{*AOuF=S0m&}$zv2p65_@1%WOEZK?uD?aDN*6K05ci^ClQ&* zxso=f zA2DRr36n{oLze@6CFDwJZ?zoJUl^Y#k23rQA)w0G9X*#rfz%-D8)f3LRc=(!-NWXu z?BAi0V$){Wy5&f+f}$lZ;e(oRDGhdA*egdz>nv%5VB@k<7mU4&OeV%NHaK< z08)0oXh9`P@L4EF&A69K=JDdLY}MBzN98LL(-?Z>%>d|C^{?oFTuPQYp&bG&A+I(D zju%e$1RlY*7DD2d;C{geS zQD|FG)d~%q=GVdx_hA7(!IXnoiSKYX+vI7DW;^ZD)_ZaHVxrS^+HZ51)jqQBa6 zX@mxn-9*!+Vx2Zi*yjJFx2MjdN9gbazha2CRY8@eWVzG<@--9b28sG6Mccr6V^H z?5Pe~GA^}iL5v+7;C2#314`js+)U;sSrwVx-s8=tt_rFF$=LMEQ;7}OXhY`&7L%G% z-{+Bn*c{8Z7TXzlu1k`u@3l(H)N+c|W^})Wgox>EtK-BOumWxXtf*4fzSSoYqGlu5 zqTMT@)XwcD?pB@p{Uoo!e+}5~>1Y-ZYJ#0oliX!!5m3t2t@XG|ka?27Y6equ^h3vM zvlQ4>MBY1}^_1Fgs98U?_?x7YxLYgT@GUN5zGl>g%i{YOu1dLJ0M>Q5C{frBFB!YF=GvjS~;XuU(_J;KpnVK|un&{Z+LB2)+9~a@n&tv=E zhii|0sh+F~Qi^Lyt+!1qzB3ZNn&+_{I9OB1O>o$F!`j z=;NnrR5024T#ETWhGp0%@7HSFPDaZ`X!T{U`M`IOwg?lxA#mzL+|%Z-kpqJ;#?2tZ zuT=rlW`Rt7=4of0q1jiAKM=nv^897G<$RO8z3pmb-)vVaI)3~v1H@0otwryKY(iNKx8XJw8r13g=_uk`- zaXy^=1LoH`$9U#*-xmTxZCS1jX}TTcCjSUf`;z24?am7mD6Uusx?{UH44v-_!SEqr zG9AeN3Er)1BzaA?r;Z3H@g2q^GM*FW0$ZYNXfxN@NNSv99J|N_kHpqWyH{lv!3Qk% zO#r=9eMO&2heJkXR#^N>B`?Ez3&r6}?7Vz!T7lezdbN{&_RUt9H|eZ0fuX_Nb8+|| zR^lk(?dc+e67LxcacS3TaVT;SwgcI5_yj8P22M_ak&zLe2Qi+aw$219ZcWio0Fkn= ziOs1rDMX~A6Ov^1IF!o{v56GH3mI*Rd|*>tbwAASaZsk$Hh%WrZ(fXl_hG-wN%pIZ z2aNZg_2a(vsb2G#oE&d^3n3LM(s^DGR_gZ_&xh3FR2My+f)6RWyVw1gz}l?Z;t2+i*(<}FSmj$J zl`7o?r%43;vLw0u9{nAva9RK&bhZnUOd)3V=zO zgfwD~G^{7>0f>>@0nti_dL94_M@f*xmdH~hAI-D2svy-gL<&Y>4fB`Nrg? zbCQBd%5|-HttSbG_jSUu3MGVfq_ZFW&(pdh|8P$}OY`_m+?@qozAr)B6P&rhQ9n#nBfXKt^gU6>9t0pmSs8 zK~azUA+?taNplHJ_W8GGq$P~Y4tyJPT{FhCpU669W~!9BVRrsO)`$jVr@zfB&vcb} zHI&WJ4<>LXJ=RyaN|@SHzMzQxQE7z$Hir5X+4Eib#drMSNP0i@2ZuhtrnLEDQ z(o!}bqG~+C)HexRI#DmVQ07RT=|fNA(Pb*i3QkPdq+Hgl-_?ZOssXN`mx1$a-;l!{ z-A3y+RFu6$^ff^d!pui4Q_!n6=BOw}(=cW}NmBJAriRCxZ)YrYpmT`w?TV`N78tju zNwqxtMjyM!v-8=H%s<=Amwsb0d%hfGezvA`;y7@|(X;wp}DB~RME{5DKG$MGZu=%H+@jA(u) z8R0H{aM5-2&v?wmJa@HfWDrPpwp$9}tD~SIUtV;hiYKcLb7B-`yUXN=J_~BUvY+f` zF$_Zjz0CDIA}&@jU#uX=6|aj(vZ!x^3;a+E^d+%MsPWy7tP#-S#J!NOp~f@5AX*GY zjQJ5mFNi}OoZfXbI`=`N!;B}0$|u8`DHQ$M_t=5JXH$<^QY6*Q1=jaTI-}r2A3t6} zUN__G9Cw6W;DHsl6q}SK>wFFmNqb|pg0XU+>qLn`qo@qME!Dz`&hOQi=iXtku&l&C zP+i|(NyLt~nHmL0@%-KijMt9`nyz*BSsicEgqhfTt>S(kiMtMf97s3b@!8zhWQk%g z*BiufD50bwk}+9sX)F`h?;8#-^FeTp16?HBS62)dAS{%ti&fa}x6(D~(-~Hy4R;t6 zIM>;*)b)<7mS8N*BuMJR%SJ_Td06-jJB1?T-`fmM4#4OV(p>Sb1S{@Tthqz0ftNa0 zpe$*~shO3Fg%?S$2X(J5*`c&F;TF5tI7{|ZojBtVR^PE$ApuRHwqU3WU7|}|)(n%b zTp_14S4kMm)Xm{CYs~U+xRdqTX#;e3Hq8%=J88E^3_4rp%?R9y1)w+%VK6YO`qw4U z&HP&0HA?7ff-I#pOG`pm;yb?-Lo50qgBDTqEHT#xo&DVMKLPE!d7J} z$(0cf(@JU+P#V!`3OoT*N8PEM zx0<#WRuYsw-ElP0j*P-?&gZuq^_IDv(71kMM2mY36QMmRX0nQ$}g_qXsoZ2Rn2>fx`D^M85aKDAz9??SdvFee-hOdWK zkI(xHu}GeCETrvzFsMsOTgpc|&MlEE z*Ki@7wQD7qH`CL*QU}|)ObejLIeu?DHt-9wq&qF_(Xf?QKJssJ?`3F$Y#_@@+`GgO zRLpfmxOQXl#~bWvvxjl|mO~Spr_yLXZ&(IJd}*RgdSPM{w-VFDR)cAHnV!!ZYy(H* zb9iak>Z4d>sT-{r<94%43#3f--|y;an@SZlPQy5buW{?cJ(T}`8w8X-c`ZT>!IoLt z^|W_L#g&R}f}bW9IhtuAbF^gp)oyQ%x4u1v3f(}9t^YaAqdKEEg$h-0;DJW}a(OC` z+LEj=H!j~a#)cubU%$bps4YO~9~u3owUp2_UIHZ8i4IeF>g(KRFnMv-3&ZUORGvaJ zH#LETIUHXas<3M2)Uy!-n4;H+&W^)E%6I|~J$eHWEUlht`fEK*-#o3}^0vb}D{F&Om?0@F|UQYP>Z zJ@yX%3%D~4M)YL>M&G?*1wv-S^e9%X=VSk!hIb@9JKsS;A;_iPw!jqOawH3C&TT=@DULUY7zZCd;UTsFIj zD*T3JR;>TI2Z=$g-u2%;sbpH$&G$+~Ge*U_8%MHiPk07RFALVgu@r1Zp#y1)Bk4Gl zZ#HSXY^KACbhhO8QLHp01$>rp*nDYLDy34%)EYT$c}0jvAI^T~Uuu>sfHoq68*N}w z#(<9;pW2{?>LRC?d6X|@*v3P;YAAf>+D_I6z4*?zu~ZAgqun8^^nS6*MQU}}a61}( zPkTSIz7Zs<(|Vg$=BG1c$V~p#6rlUiltb~G+!Vc{_l=80 zTV0rmLbFXPi#;_=PSOv?wKmk%B>7%-Rw56VX9O$z&4y^d$taOkIxW!TU2%h_F~}uI z@fS!*2-us+DhsA!1c;K=$AvP@Qdo-f>3fi=W_emqDKel4&nhBW$;D)^nWybUqDYaV zDZ_Pr6)HTJ`>fh1_u|xMBP9ixyAO8Bo2zP9@5V@g&N4{-h}w1Lyn&R%M-qg4IZOP1 zDd|%v8sc7^E6Bf^V2DelwxN}?`m2u1ZRjp77A4lDu?{lqs8?w$UcN?Cg%)^EufV5I z14J)2^2WIVyeOJ$$o>?~YxkN07`!zl^im}LRdT?ZTBrqjdc4NkL8#^?Y+5qA$ac9T zEv%F_=D`>w&!4om^^F>n@`5+*>D%3lR7MgU%9_>0u1yL^a;D>IxBH6}Yx#?HF!eG5_*F2g^u*pe3Awdr~`Y^sW`!o;Hl!*K19nnEed zYt5j+w=UXaPr2i4Ms#8i@``B08%N;E z?TP=l5i5|OjSM1K12?!i?z9O;G5k=^L|{F~G#uBDfJoG?3OocSTpa&XJ?LNZ-F#Rt z3b*Z1ZKdn)^sHiWpVGbKc#ZAj1cyM`Q^FP#_dh)oMeuY*pAnube$#0qO%ks8%PpE- zN)nzr;kstRaYzKBo6aGACuVQ`i7@B5xkpe&-=35%BTkA1Qkc~?>F3Fg_DgogQl6*D zX26(ptMQJO=hPCKo{5oFt=)SB948kDf;4e3_V!6}#UobE#Ef&Lr%_dfQ4q<|PqUU= z-!zGKV4SGUlNVyRd0Mg;%0or{#*I3lnUhc~;*hO%65((~p1elQ$ROk2FqcVFV5D8d z)v^e*Xw%A=M%b`SQq+N9mtiEOSeRY37l*-1diQQ$5~HO}Kj*V0FZ=Nw z#a5*L3;B>N66j6{)ugRIHQR8jli2nZL#;2BFGJY-0B{MM54xDr;^RCMp!u+pnCN-d zRC=gADPu9`JQF^gWT^FH8~!qx0e07Eb;VS%#$E+T8Y;G`dohx20L*4gXzxuJQzinW zYWG_O6MctO@bcR#Rsp(zMngEM!flZxdlxf8HJba)W~DfD8TGYsgcX8o>F^^hy)|=r zB-cF}dOLMX*qtjGJ!COITMya&Aag#v+j#y5Mh0RXQxN01?6=A4K9S;9eRlkUYj*l> z#d%e)kSBQHL@0>GV>ymaa8g30McG|~ibA+7rw%Mho*w6=B=pm63275vs_YaoGHs$< zVv$2-g|n>aYuuTZ`(NRq=!!7-JvtL+$+Az+L}ITx{M$<~ZL#u59`)j~QwUb3Zx4s> z7w1jtGts56zDsAjSJeInp!k!BMs|yj)WR`fAk4g;8cQKc`5LN$IKskE7P`vb=V25S zlHS%0nf)-E)5rRUsd4EXSGT*hMS**v5?|jMq--s_Cv)M!DOO;Fqa#iAuE}%)>Cj|T zp0mMSXG0^9&jKGKC4d;!BkD|Is%tu6#vbiOUmUkzWwtylTHkhra$#sJ!k-GwE{*U9Yijz#?12D!o=>gKh$Y%#Y<6!#ebJq zqmTPju8f*cVo;9-5+&1iGz)*U{D)o6`8(@MH|!B^r-tkvM%vkZ@W;Oos5R zKU(E1^s`|w5ii?h3kbsuiL$g(WdQjsObC+MyKM1b#{T4VYyuG^I*7KKA?D(?>L0WW z4_HbenA>NH-#V$*L6bcijDLrKx_#FF#EItCy-ruo6cLWTxViluTVY#>X=ybbio)z1 zIYTvkwm@e8f+vVo;o5!ZqShUBIka80nY6vc^U}pZANz-0HQ%&DnBEb)A8sBk3Sk2!vtolDK|yZ%xdy(0+DYSD6PcN#{w1c7gS*fuEO-@8<>5Kd)oi#C+m7WEF0ynI*t$K)_e#aJrF?n73?Vg{!- zAf8>m{FdMQi=gG$j4u{AkJ?`ZhZs1QVMVG!HLc`zWr!q@(sPpb-4PXSO)q z!=3aOxgv)Kcwc;mTf&mV5`1VIEq{hbTMWgDTHg@IFb|32lW>qR%S#)<*1?iYYctZv zc%4S+CQWWjY@4Zj#RVRyjkud#o=tq`4iVY^qd9Q&aLDXpBKo*&-*Vpwe-PriONA?^_`==&SG3gGCWQ_ z#U)Z8js<~iXu$z-{Z??VeacXQFdh(cgVnDy&QLh=ry6{ao?^~+m_j~(dzt%>4 z*!*OLUkV3~$p%O|?^el6mJS0X{Xrks6Y@zbm>%5i)rjO4D2ce5>0fGIM4N@nK!kRFua4>-W~(MG97q z`V1F@3`NNzH&xoKoh&&XKxoS4oJUps2d%+50xPK{D=5{G=bl8Mf{j$H<_%+KFtM1k%OCWQJ8Jc@mF_p9w{mAxMdAO%s@w%G8`uSwU41m=SH(ozG_m-1LTa!$UCNGjkquB&8hdoCNkdSGme6f~xBNvhoN zp@M#hl0peGFKu>Jiav*|>I|KFWEr??2{*NAl+(eh?F-o(1p2%L6aUCOt%Vo?75Bj~ z76Nx`&9pccO#uXuGA}jst(u|^kzGfH_EmQ5yxiR2vDwT_Suj@AA&Kh|-EG-P{Sbo< ztAbsu!z(Qv93|Q@S}m4nvVUi%Gq&;{w3PKzEDhQu46lHqusHuqe-{0_c<#Z8XB)7aB)L!M<{^{_2K9XN9$|OxYo|Y_ebE zE;%SeU*q_M*-7?E$m0L1W8H-eaWeI$V1+@t&FLJa)wIQ6)vulRtRAIOL@_8Vdwk-I z*wzrH7#^Gt>t&ChE}6I-ulF4J8U+K)eS;+8&QM*Ru(DOK;7q4z{YP5R=^X3fTEu4g4G}OIFX!(^P~&^ z3Z;^W9lA0ihXeJwOv+H4Am9vrYZYVsQ(f;jluau(B>)1Qbx^t?UC~690;nQSCkfe& z=0x+4Im3&hAP6&$!pF=DW?d1REF|+yUCDic_6`%Sm3V3HywoQoOAF)NuIf86H;5xLk2f1T&s z2lgsqZMgH-clkQ*ZR9KnOqa4rvdu(I7gl;Y%6I9`6qU%7SEgSB*g-gAoC@@bSj{|7 zK@dPs{h?q8`d!FFy68x=RMc#5J8x)$^|&=o3FuBUGt&z&DX$wg7pMuAV$o9z=0WDE z*&XRGwNWNKk?3&>^>yE6v)zPTOgga+UdX=o6D@*w#*JW0y9;DjG=q^dbhZHbuJyk` zG)*;(1=!69@eDdEYmFF-jLewWpN?yX2$!kxR4T-^-I}p6_p86Q4za1tR+$gst;@o` z-`vTp@Ic_J%InxRZSj2uf%+M-B8D6)R<=7k8haKR@B(`MjFa7&D3 zwQx-fWo(V?&R>hG0f&K56y;oHm9F)hB`amoCnRq{5GC1OVzW-|BO8dl<#F&quO3#W zLr_we?F`RF%TIoEP^|B%T{Oe?&IipGrxe;MlxB^>$~rv+7EoBvUY0sliM&~CM$WwC zgh@`$@Yz+dP`8++dx)V0wz1`wl#LYR`Y3%eV5hX|Ke=vUy;`BIcYrsk{`IPisQ?K+ z*8vH+)Ap;pWm<{-6e&MN#C5o{mV%cQ-zQrNaJcF_EklmuPUFk^oSMRrbvVqP5b(vSd<#F<4Or%UY5r|8o8dw? z?57fzAwyn3pupW@fS6}76F<8`HJ)l#@GsA+x{CA*E}6$CS{uX9en^}T4o%RdsKWVh|94Z|&c8p+>SHRZFm^sv2U3108A zD4qt<2s?)skQ1wCf8{Gi);EgAIbn1;{^=}_<8W?55nEQ0&A8k z{tE9WNP2Ip?A$p5I_lAhmmCaOO$acz5G_6(cIW{iqi71P(le4T`GEte-?i%POyfe9uvY!{-R4!8g3diZ{&b$MnRlMqR$`%b(z1?9i%8rlN2 zMnYpTX*@WfdGBw*J(>5H=$mVBTJ3;0m)+SP*I_h<)SZzj&QS#`ft|I|D)Alcha!Px$J$>D#9z@Yyskbh{L@j4`rXG0p zZL&ijaL0un4e^g*5)gna2?=*;x6$BLgcG+&$HEqLX85Y^>2rk_@BcXTG@k}1S?;|0wR1?Xeay5owfn~BDpreaw1_q|E;ndh;QvJVPzac-yx0}gNE9v>Y|DwT@;3g zFuto!MYU!mJ^Y9AAVw5b#!MZJVKmT;xbC?V9m4$vmM(lCZqnFxEb$i_IH6 zFIMkjkpa7>jxJrVL6>dcvmt`7-nbkcT~T&7>szNF&9$25UTR^+Br3_K-S}Mx2yE(s zBZ)IN(A5Fe*B>cRo$6qJ36twa9C=_G#`l#}k(qb6FAaT3=G>BBVpcaW9f(n9H3jI% zr8I3J_FjuZU9~&U^8LhD`KiIKzMW27?CUI9*NrW$fh87pI;*&oPUVa?8|LzzWkmk( zikd;re6{RD@BFtv9{e71E*bA`8#WB#dm0Wp;Yo8E4d&#Q+AAT7w&+a#9y==D@-*Xt z4>(>9?_`f(KKD}LSU2qM4x4vUlmZrpx8J8ZQ=NX$Nq=i|p!EDr)b4XHf}xfGevz`L zziOPI_MXvko2#qilT(e%t_~y@d_DV94HnvC@^X*F*42JhS@%t|(7^9%z8@GIq=UiwRHy<%Qy6Zj* zOuCr?2q5eyW+K1GBS}`rWg#V44S<9N3=MTPLIpKJkOI*8%Hzh=M5hQPc{m&9AXq~v z{ji3bw6eEpLCzl5>tP@d%*1LX>OF>HlwxvJA@rb-vS0)__Kvh$&NDAO0o87k;%sfw z+X(r*^^0k4qV%pNx}DhQoV;N|$t^|UI;>ek#Z?e1H)SMDS9lb+Cpr`uB7`0vW8b?h zY&1-tnd$c=i5e6JQf%}>gi0$)#{E7CYtN-3KihZzU6{O_Q}HBZl|7|Lz(j1=c#84w)4$(YqkzJ3`de-UyVf9!d363P>5ULamLP9dvk`z3{Fl~zBDeQ8Sv~jj` zHQJ#V5*>tT#G7C=AS65qYtzqikh9DwpMt(9C;6dU|IczuyrV#O;^Z&IY^hXV{CEu1us8a6wnT3mh?IZoYFkKI^=?khQpjKW6?E*^bE zCY3!C%*bsJV66m-FJ5M8W3{ZYshkw5x>XSfyZbE9f)TBfd#FQ$!U?J0$;aovhf$ybij?R-J?=Nqh zg|NohRDl~d-I$%R0j)O#wO0$BpR*7V4o>;Rln-TZf{?9w6(G~exD_;`Do!}wsgJ*t z8h-zw44{GHsv7E$uNJ`gElm>16oXvH_R1QYZa|9F8dUS0K*=vTLdeG2{#{BBJB9OV zz`9ftZaY4m&k%Tg{7pd907dE!J)W6rEQ!H2(^XPZHsVmj{<2td*XLJo-U3;tY?M&T z+cCihEd^UklxtL|VK1cQxYu)L?6MyQxl%-zF-jYF0~9RJI|fn?)pb^LXg}G12qo!K zt}j=CuSSyU>LO|-^5ccpxdF{74h8>KCfx7h5s~_qLXR+EF?_04$uNuO0I-&iCsFkOKoSLW$Gk%vVLgj8%AzVuDDg=geo(kPmwU`*yjt2NFZV^ZHN=ht$w$yny@ z4nnj!IOjV(8w?~j(Abg@4OzHC3w7Q_mpteGoyDq6F!9p`14a{H^`&95%MTbc8hd`q z>cd(^?vzZy>@3*b5WAnybL_4DX?_MwPI1db&bhZgw@voqwRuYMHtKz*>+_EdMRo6t z8QTrSR1XX6VB70*t8P1oZ$!82WP;n#6$F1agF!w+12uN>xv??DH z2vTuDGgsmtzWybWOg=}QU?Nryo};6DdQmQjg4zG)7#Ux-6Wf0;6NQQ-u*00{uO=e! z@nQLwp9g1zR_c6!;OQR1whaAk7iC8o40-g`@9cz;D%r1J-R`-Q+wPX^ZWSwmDwv+i z0S%Ky#Dv;l$zL!T_=!*gygtvMk()r8f6+RH$HZM@vR+jT&kTnALfjb=2&*0%bmXMw z>K|T#{?(N_I-Q`?NW?i95)c)#po)ayq{JlcxBkoF>z!sG-wH7s#h53anHl@ng!!nUR zMFRyPnXSqerz}90ElZcuej!~V$kS*-XgCV&W zD-Z9vkenRSlniy2{E-q=$CIK7m5ANV|1g%amYkBC>@G&SQ;8 z*%;E_36wHuV) z5fW)mo@Ua`v3km8T%E~#fIB7SC2W=E-%Mq@9+&Knj~1l#=M?>87p0>KNn1v$$2dNa zmsc|?J0v18hsQi@!Z2IqGgF$yg8)D)4*G&YjK-Rs3=2qK$B5QUBN|H2<>k$^&YiQ! zz=`r-&!CXllSgZcBSHYotT5HC=deBHP|4sTS4*H&WOvi$FJwXi*_1S^7Fk`izSx5zBHBXo`5V zj@&Xvaj>_dA=%DD9wQ;S5zSzf3s9hO@At4UbzMYi%CC9LzCTeBaM4Xdamh}W0Vge$ z_>8cP5|$o387r_5F;kKD40~NO^lT~(;IPof1B2FbNx5)|tRV}2MOe_2zo|p+{!4*> zbi`JPjU&rCgdy$ms!YuZmfS0+m0@K=a0;+?ou#abm##WVmbb4;BDW+JVJH2eLS$yb zt}R=Qf&tENw9zNoD4h@flEo(0Z%m&zX|nZPy+AID0vYf_S3%tq*PWiH3==P#Edq5M zhxxgNYN)n_&wDkRD%*yU2tzeg1^o%8Y677yN);b>7hdHF-44gPVUl{d3v23JdPKDT zmOhwdG*~8GU9ZHj5UO6HIf3iEo}R}x^o1$P#vDY00UJrBep;33j0ro8wadx=O5cdJ zshdARUbG(xrfDX)#YStAp?qqr#AqtQXM@$l2gFiw9(+N;(V4#`95=yomTl&+RTryi zSa5M|L##z$tM_!TIzq&_h)QK#i?D4k+$gheqRC<`p@N|-+)5GpfJ)<(Yu{b5q1ixB zA_Eu76w9&J;!$DB!bBxGW}#mlM4+${pX$=x>#B)hIB-g{12x)ReQoe!F;Vs-U2B~3 zjUiLLHMcQj9oRY&wNYV=x~Ay>tXehPnPoi`)fF2pZVcU$$C&Ym?AuxSq+QLY7?8a9 zfSp|wHr%$5shEHFXaP-P`-2r?DOCEuT2qmFjl&WHaanUWG4>$<>+a|Psw6?RBuJSW z>WYq@p_U|q-Z8z-?$V|ZgF9O19QNFC zxdCjXRE~Sh;jBazXC$xA-VbFYR*3-Gtp3g39+nBi+3qh)FL_57KTt>G>3r+D_`tZS z&pFuWJ#V|1?|)I=lmm{k^~>~9R3k_ZUb_l;F|eM05KM1WCdaE z3D^LTJYa_&eT{zF;^5?o)>`!v+WlPN=!L_fpvM0R8=SCXly_1kUcWeu$=J1yq4c#5 zB;hHTLqD9pCIsYEh#5EA8WwmdB1YH$s^$~0jzM!EH_+Sza;!)ba^Y&9V2tvK+9Mrt zOp$q0#x6ZhaFE8I7z1W4;rX1S({Qyn<@%WFcV2ntYsXS8x<07%nPqc|jZG!41f+Hrh!*SD!0O@vCqP@TqtEk4WRb7L_bMr2i;ov4Nec{ez zP_F!x+3zvh7lJ)VnBr+h1uIClnnLbQY)oTDwG$VM4ZPK=q4w!%O}Pw-*0CO zL+TFe=6L!T{T47!hSA>U31ZN_6H7v>G5lCXsxr$W91my4`{ppVKFwzTl@pnWDl{-_ z!miy+$nGFuB^mE8T4UB%jXkt0ZI3^U3*Qe)82TNIsz_eR3E+m%TA^Hl5Th1;_{%Wq zUPps#x#KewJqekidvU=b0?mP!i_gH$M7Cm$V2=YR7+5kon)T$vXD}Le7$riLn&xBo z@317W+VYe0@=NcsrGe--_V1xk7{>OH2Y`zGC;2^sX|||{Vm-(z{U%YY)d!1g0Q{5{ z(;9(mJNCNCAU=jFj)Ym$RIxn%(4w#cKGXOJ_9#NSU;c`T;9BlXxN=Do4!wc(0&swu zRLKZ3Zx@yOsBeXC9XyEkS!7D(JKsK^@Flr7U}n8LKd{rY(W*nF>efEx68U8w6;_U8v!PPJLBOA)h(HbmVbZ|la!DaEj9trJ@caNm0Flr1E=>cso(AJNG*%b7 zUodMzO5Z@U3|zF6rq=`7zA(h%S|J*`U)kmW%e~cO#A^mc2vO&Yz4ZM3AWN8UaTf zd`BKK-=5m-3fZP0#E$(77;a%IOZSiXtRW@&q~Se#DBN~CgO1#Z1;MP@u>E)x=O^jg zR78K*5g#S*4}?V{DB8*ooQXq^)=vGRa z^DxQ{9J;8M8rayNgIeZGq>Wg(KVEez-AJMr9)R!g36Oeu2QW$Bf3pLDqyoSo$X&k6 z&4GC7Kbs5u9DDB=1J3NTsDH4;{UZ0h^+l2%lb3#@UIWPu+k{M4Cj$**E&Q4;*sP8e z@PQWv2?%MnVT9MPi3G>f8&iZ-Tqg4`0au-@#OtsD=>5!Zy<;$rH+bL9>Zx1z-#q0u z3vFQjKu%!4!vt(?rf}n|;m_UK9{)jWVL=y-BWXy%j3#*%4^GR|>tM8TOrDRg86@rKinvNCw}_B|Zen{A-mU`bp+&9ML34&ud#qs7Z@dMi;Gj3sBP0NX5 z+FViN{q$>T{|1Hf!}EH|OP%8@T>him6GeXDYrm4t;+@nU>JeAOzk$NrTlKg8mPd^H zE7IG$(eF3ia~lqkqm?}`^NpLVWO^}duT|f3XMPl{&HW|ZkxO)bZws6dX8Gt;Vs8QU zRj}}meodbH@sZs4{#CF0B#cLHgf5DK@j&+W+E{wjBJhG%Lt9TfaN} z8OX*v;Cr%otdPg+es#FMbfQ!w;yHOKUQwZ3Myi$4OvierR;5y|Sg!l&lNPYYn;ga5 z>0+7Atv&2)BgGFnJbg=r{fTzRlkIm3{mR*$nvMeS(^s8B z_x?u9o$dZ#%|}!N?_<1oT4&b}*C#vQ%(g5K#ZZ_bF{b9e!CyLS07}W#Pi%hgc`4e`H1@AhL?K9_0LG~53_ubi1|M!LL%dAXIlq%E z=aM>>6uM>IV2BI-+8rb&m!6B@+jyAF9M|hT#|%0fDJhi5+wDocE^(L2?CQ-SR)r)u z&25oR?C@s@yj7e8iT}&~6{~-|~UTY%>c=Q?!ae`_t%E$uXu84duP50P%6>o#U33xfHOc&XyD-Qu6u%7bT zOmY3w?-zB`M7P)>4+=6Ds~eOH%Mtr$==Zn%oCMF*By&NW4I%?grJvnZ-XBZT%Q)Wz z^MQn7>UkMTWV_#a+4%>)6+^_QPkNA=gZv2|9zXInd|ypNXnY#j1E;x@Gou^nK%T0JV=zO}p!pr28i=j~kO0hS+yX zFWhL;GF9-0Nl22~2%cF#f^)nK*LBgh^`b+sElcyrG_phI5R< zc#%vmikChw(mmjDo@9`+lcCA{i_SG37!&OR+pPXb4XErpR3n#Zeg4|l^jy4^+|2#Hb*^-C9U@r z-;WR9h-ki;*UmZdS@D~t>n!~nH`oRCzlouS{x8@WzCH!k*A2@ruXCA!gJf3T9~4HA zNU1c?)XG*)KbLmCJPjs7(gpNDlfp20H9i|ChZ58wKJ$F~q!sYNWR%7TuH-^!Im+P- zhvTk;q2GX`$`P79JIMB4KcnL;L#&~QQT=)F5ejM|zfqEdAL+X)kbx@EyfNyC{6ccRIC*FTLDzW%s`@sGu3D$^PU$HWj8)0l3wu%KCx(V{_2sKm@b%+F;vQjIn z{XxuNsQ|qeh{1hrDd7(QpD}^f&Mb};66k^i+!Boi{X8DSbLa0mrHG!$A;m-8Vi0F1 zqcVCzrX;GGW`0M!Tl95!zX(IoNv7;``6*+7sGP5u(3}aoC9gR=F1YP^wNrOJQCrzg zE|4=?m@>16V#6iPpbg+&R4O>>C#xt~g8S{hxGvQ{CX1|k;`R{q#e!Zfk4b+LRqUs% z7Ilkq`+V7tfe{5$ZbZg|vXe({%iJ&R7*Xg`)cgnFhYFz{EJ`lMPd&$5839jSKwY4a z+k8=0v*9kCIJn>g-De(Q$cYTEeS>@xcHTv~wQ$pJZ)x0X!I|1sD8e=Q$0D74U)V#}YXqOVT*GMUcWLZY0S#@tj)h+;3e31F2pCUa*n7ySONjR;Jd(YaaMFtsBi#4&Fj<#ote3nId zt%ZWG+;GlaEog~>>uC9xXMtu1S*@03;g}M*jD3n=s1BCjlwWT*#4OX%-AZX{h|V2O zs~ZmQvkpd5B=LnI`E*c%B(wK-6*G1FXR_NwVpB@@{jsM~fk7-sZfNyUrTW19S{5&+ zazEh0>`ysMS;wY|^26Q0!_1}4zrT6UcR0OO(@{pzdWMkD9-@6y(jHiEyZ%==PH+)U z;m@H)NS4|^367m21>cO(!PSKAPk7;Ce*1IE@)+^HEHMUe0`DKOr7RWAj(!BIiL2UU z*oZ8pm<-!Q@H(u%^+DfWoM}^yHOJ)?wOH1pQEkb+b>u5t45_Krki34XeKzdc*D{Vq zkI+$n04!j;;)?zT=%j6>E1T?ytG>H|HD>7Eor%{GPIyecX082Z)4tobz@B-}8LiTN ze;2S4FFj(CJa;Z3^3RZ9cw9*3M$%I|Ir@0YT$GS+jrr1Z#x&V2MQhAj)lX{$TfDJc z+aaUkG-9KT6h7Eo;jT%kZ>zs_zuJB5-st~sYXq;i_LJJ9Io{vSj7D$0;OxF7|BsQS z6o2AkMaNK$zrCHM-sa}9M`!=Ly;G##?*r<8(F*zw`uwfyrxp*gP-Q9}{JO4L3I7V1 z3>;-`@plpeE(f&Y<-A@uw`+i2Lnr}GVKn-C4;TN2G5+vGD!puDe7^(H1vtl9>K_oC zc#R2uI44E=t-JucBflBA;Qyd%NumwB-C>-)gV#&hWo>MngjRF~{2)BxKk=UP|8OmW zH#ile@mYuua4VyE`7Lwe_%nOfF#Gr4GlbRW<@x~kI!l9#Xt=G_#@e3|b-yn>R!~xf zICO*XK=$b5_4>L0Y?pzS|AmKk^qcXxDV(3^Yff z5$<6t&1DBtlm0(QCDkWVi3t6_A(djz`wlw)Kcte?6cgkXi$(&)Z9n;YqP24F^A7=$ zNVctNp;Q`c6*O@I91ZRSM9Imv*R0fPx7ut?w%7g&;lo%x{M%l){y#`1(|DB{+I9`z zullWCum3_S9YM_|`6&-_T}^vGqp%nhc{7y^LR=Ym4CT99j;69W8uJ+(oQB5pxnhYG zdfLvGD8!TsbkufjmYYnc3KaC5PiOJ$*{Mib-Tv$hh9Z$TXb>tw@#L@CpX*ZGp3IlZ z6{4mDN$zlXYq2W!_x`)yE$;+pymfUy-<&Sheh<_+_n3p(a(3$T6&3(EXiaszR6|zqnWuyRBXEh8X{0~wIEt5{>$6#w|uAW&esRtX@oK=-Ybf0?s z9{_tmgumroDr%{$w)&&0jaL64>Z-2RYAcAa`Wb4hxAy95gSje7=dZ*T3m~j?A}Fh| z%r?6ovW+IltFzQrOPs0)No(!4+zO{{wzPID?zp5$JFT+hrmJpd79texy70#9SG(X& zEAPGdwuNq#Y3A$izicJjFTeyB{MEn)C#>+o3^(lX!w^R-@x&BYZ1KeyXRPtY9Cz&T z#~_C+^2j8YZ1Tw{r>yeIEVu0P%P_|*^UO5YZ1c@H=dAP2JooJL&p-z)^w2~XZS>Jd zC$03-OgHWH(@;k(_0&{XZS~daVN@R0TswHvO(%9Owt!&)Hul;7-85BWTchnZdQwT1 z6Nzuv{T**$H)i+V;PL-9m*39uEqHXH{jFi(gC{PWYTQMp_~XE-rjz54S1wyrU(4OL z<(!L#8=#UY$k8*MywmyVb~ObkldD_w3>mPSj{59t@s^vc_B!0D$ib3au!A5h$vb!#!V;E*d?rjGNJi*A6}oVL8+@S*8A8Ar+HfK0n&1t0 z*bo%*@Q3MJ9}xeCNW>!2Fos5SA-YJ{!Xzs3Qk@_}6P-9i9a8a%SOnn*vlzllF_DW; z6i5`O2*!M61VC0Sq%t9tOh_7G$*RJMh)sm4WgPuji&H{Uk_M@T zGzGy+N3OCUbzH~@qbNmU*2-@pT9!}Xc+HhOvYF5%NF1qo5NWzHodfCRKwgQ>U<%}Q zu!CLcyy^cc1{ny6D1qZL%bAimDkPHfjOHAF=}Be5@gXcs$m=?>J~Hw%Q^>JNWU`5p zO}28H1U<gAJ?&`M{3DQYq~O|C5hx8 z`M6F=_L89mY2~WeDMxhevLG@Y$TJei5Ypw-qBSK{Yu3h5knnMl@N{P?f$CMB-jb05 zt-wRy>B%{s6`qlFC_4!<)rxSF3{=gkrZ6Wwol1m{o75#x-Kkbpk|eNk+$2#4A`U=A zV4VkAt2(J!jtsd}o!9&63bCqJM@7%C4+*R;At~5)t}>PI^oU6@vWo}U1OpgA4s0E} z3x5C6Qy`CYYC8X6(gdMaAz!3pS35f><_(u3jP#>&14~e8Uh20Y@gwH=@PG#Z0uU8& zr*g#s$H>lgo{dZpC>4cTWsMghK6KYVx_4aSZi=Pi49Qppn@UyI*Rb)d1-hX75kLBY z0A?j^VwLNMcOK}rYNh6-E_7@lCFc=FYsF(jxZd~Ka+ude4ycx=^L$MsF^U?}IG}SSYJ0jsAKWP8M zA6chB42$GMsS?Tg&FpzM@=!rgSym*LVxcP?0x1k7h%b=6)s zyNO;P@hCf0nIS(?bB>$DEu+V(XDCzTp@6Ue1}cP7NJb#l8nJSgaYs)Ckr>gBrZGDs zjaRH<+S2N>r68Wc4hwigAF)`ne#OhqQ2*n%Pv!`!RqcoB!Zx-M@B;=czz4bBkR7g4J=PGKil|=BHlI z!E**PjLTivda2uC>;^(Y{}|&CTWGxhK(dKTobHBL84!>JR3|;GOUF32vW)-E_*`@C z+g(4~qjU6hYL9E-hyRM*awM&%3C`*KA;`m(U2w-2LQOzy8v~DBW+&sg>Vcoz;J%fo zjh!r7T`nuf^jkAXYHoAyq&L%U&8{j-OK?cCG>|_ZEqpb5YHAX|v54yOfh$1ee>_0e zDQB>F89r9h_R-L@wg}fNQt7Uv=j$JlW*|c>WpFR|crFh!lhJiqiBNOZs>Yq7U1?8}XL9H9UM;>J{Co|sjUxs?&?egf_5&V!i0)h{MST(JK z-pf_j;_gdkGE(`KVA~S)AAztyAiS|lQ`eJL-6P@a)AdW^UOc4oqRpW2TA zu6u)HO#9K^UB3^NEb}#c;9AzgAF+sqKKKzyC$GLBRuwzj=O{gvX@-S9bk=N!MP`;I zSh`1867g-TCI}B;A#nvq`}PlxwQNHLE`=3dy(CTkAP9lr4@>q*t~XIaG=PKBJy9hR zDi?Ihay%NRfX#$u%cNyN2Z4qHM}EKup4npb!6m zbU|lOyz*$>^+zT+44m);%amDK*eeJ$c)&!1Eah{;Muu7@Ps;!0QcXxdt3`p#^GW&@ z3$37mO8ABGMG#TfT^4{2`cMJ-Fbq$iR^c^>%#vmvF>l}(hJk`#2gYP(_e-B7i0#uz zjuC{A_EY*5e3WujjdX=hcuu?(2(b_d;SdNU)`=7GW2%@ZF9;EUrDkNfN;CL{i#JfQ zcU1D`dWs?lyD%XJMQd1=gu9T05ny!+=Y+}UX|5E0zh)>I;y}>|ClmDyS|@tu=WvcB zi_J5Bs22!<@PwRbVYCM*BXxDt6ah-e2TFK7x$|6KI8XqD5Hba4_edrqgjZWcRGef~ z26ue~K~BOlgLpVc=#Y#FHfRh-KflCXPqlu{BnZ6GT3r9;jP=AXmUV^mm=H>!1iO%q zKe>@&0y}6%M#|Jv1))mTcaj8wXFS<}`ZY(mfD16@OTt2U;z*7w_iW0vjxTnA7lbX% zbCEBC0YCV5u~Jt?DJB$^Ak*kbya!%&36%q3l2a*1yChUh<#U7=gc$G-h30Q(sBaGt z0S~YXya8r0q%8t-T&^`01P}q~1dVS=CMq~Z<%kfi^pu{sDpEOoNM&_uHDrRPY8ios ziPcHZ24Dmrbsscj=+h7nfB|5mVi7iJnRz80X^*UgUpt9%LpMBn8HH-KWXZQ|pCx#~ z2MD!=jna~YfDoH#8JgZ#ln8;2e)L#5#(KReBS`-_mkS4V;-+BywR(*rm5laCmBmK` z1D>8Jn9+%s8b|=!rk#{Vo1uky|0RF&8J;QfKp9eDwDLz{=3Te6Y zpq^-xspe(=;15oMpR#5sQ)y4NKmh0<2)58|Lnoj~f*@mbpeQGWGM8}m2Se-GVV{*O z9r0`!aCeo}5B!j!bEpsfP=^n|euT$*qGFOED;g{Ac}u6{NSX(l{wQk9 znTXd(KZ+@9{Q#ur@D1P43VU!9Lu!Z;@n3gv2d!`m2u7hy+95y2JL}~=v6q~aRC&S` zE7`fF^aBWYmlLuDTZtI~>$q$#k)syCn7jYbp?DdmNuoZngPFc`fmoN9=y|C}6fGYL zr|uGjifO25$eKgtsT1*EiyE7b%9D^B$zbyj+ap~9+Y>Mo-yb6#eu zf3=SN26(MHCf4IgAn2HIn5SqNXr_JGRBp54N}zYE>ba|X znk)SI63?cQ*J>pe!ajMWQtoqLA$FI08c@=?JvM5t_lh#3`8@~%JKFlKzGP~B7HXRM z6Mu(xk(sY8p|B;xTzZN{r9?PTLjgzvg|G=Lt^ebv9Mg?bc%r`(RR#E4 zeOGHBR$mYn69Oek1FNOzimxDxF@XPsTdVZ2&r??}SOMOnauYj}WEf*7I8KaIl1#;| zMKQA*bF=Z3R3!U5T*#+?BxN%FbuvlQdBJ_WVq6RX&?NS$dRAl7U< z(V*58by0g1S-3D_OArOCv&z#hWYCFO%d+hfN6}Z6TFVm5R8P+JiC;mU3RAaHNmh7U zJP2Y{&WKuW)quLBf8ii%;A#>j#jzeGJ?WVhQM6AD!?>6VhO88~oTH*XE3v=$jy;7A z1*v!w3lrXmJF!Pth$|F`XAp`@Fs2)@jP{C9DxB04JL|=pOV&wDNtg0AxG8atj|fdJ zC0@`g6TM3@{b&#`yJ&0YQO5r}J+O066)xQ*R6Xq3gRp^Fktb#T=Rw=8TDQzfZ;`%)uBIzV<;q>G&ilTd3bgL!y@BinG_ zKlha%2w#jG5%?r&K0Cqr^u$u!#9b&z1`}#&MQ{kQ zNy-?UxcEE2>95{n$b5>m2O_lVmtfngLAMrW9BYM=yu$@CL7Ci5P)rdX8!wAEX6fWn zqR0=863Q~nHG}Lx6-9Vy3$mC=dI?Vd zWnMLTXS$4?^;6El%rFGUt^dG(CWxsDcC5?%G`~qv0Qf+oOS*C?5M+?d#{0@LXi%&v zPt!MoR<^JwE!Q)-%?J3u<66}k1Gmbe)q;K2T3R`A?98T9r#@THKYJikH7%y8wwaxL z0%mo!aM_Sl*q%JlbB$z|6^ZS$*A>wjiS#eiDVy@d)p-cdP!q=_)Wdb`v%Is>aJDMn zs4bfYyK4V8O-tt6zwNpGhh+^1qul!fVlt<}(E{C56UKCWJy!JC_0lM9U6Oz2 z)>-LFf+pZzt<(W+W7t_=16~n0F%|wcjs+7gHcUY3Z8z+rPe2yAclEEKX%JR6jO#SK zAYPD_>VT`N*(V-u=SieNAvZdQ0m&WJ)}lYVGT|$3*!i@A%EC=6h|txPd{YW&rb**Vk#vxoJ6l z=7L0U_P1UV_XG(<;T@q7f23`@07;QPQ=C;Sn4T!oI=6G~G<}?#Ld>H8Dbh&uo1<(Lrq^M}+s_t=$w@H8y2>L((m%Z=~vDy#x(^LO0 z+85q6#{TXXz1V&(>b;j=6^YG7=u0L3KQ0~e6k$u!Bo22M3*Qi%x4vUf7 zNYg#Aos_UGv*LPs<`(2&I8<`Bv@QJ;iszIe_6n`QZN}j~mju*9pRRl{Rhi6Q7JgN& z&9L}pQlZD1J4j-oho=L?^Dn_@^z~}h6$sTiA-uvyy5SravbS-cF5}!Yn{E&|{-KMV z_k0P@PXN7X)zYQ|2DZBKZn~lm$V@ zMxQb2URC;NqhPG}(&lMLm6f&p$e-ItKxGV+a9Jzu%)TsDRcseOo`4VVc9Q>YnbSgD z7~uMB2p=_XFuG4yv99_OQ-}rn@L~jH{L+!|Kkp+^U!@9TYG>u4MxXda!5w&Rjd`BRoL3~ z;8CSbJ6aHAAtc#aDJ!ULdr;i1TjD;hH7JN8NQP(~mZkVk;#9&pYY9TU*22n;B9BrO z_aH3+47+O4{0q47X3m{Ge+Elt;@5XLL7zsQTJ>tFq!A)~2T`lwtXKcPRw%2v=vk5m z7q=C8IAyty725Kl{PNb@3K43R^E(lwv43a_;wGz`HD~B;Q6kPxmg21lT8VW>dtj8c-R14CR%!H?2vYb4?d@`C`h5W^6VT`Js#!xo{d00@$LgR({J z)Fe_T==1`QMK&w&1IAV!(5D|hcDeDNXL4*a(ABIGD>WY-I#mBqMHgjsDiXz8k4f!_ z3Q1B(71V1b;0o%fISe8|@1>P)`7}C*Ix@z%3Oa4HDV9*GlBFgmdfkE}lXS#MeML4|{ea95tyz5=DK1U@BB>xiD##E6Fw=^u7H{8*>#c%5WC=lK zf0C0)j4%-74gxA96|mTz!6_jf&&8L>L=8HW#A5RWIN;O-8#X^G=k$$3+zLVqq=N_| zAc8=evmk;D`M>}WZ@sNl)wX~DPGFvpdiEkeFt|*R3jI)%(Ac&TO=Ow3W7eRWXT~|_ z&j!@Wrel9HcPJHcy9m5ad&`X=4AARirV8ldL*rOM;mm=uY;&nZS(Gw(=Z}xE7k6U2c zm`sX{Q8;dj>fY#|UEZavJcPZnscb+cN4D+FkgU(9eka88EPizcw z6%>*XZV_4diEb#2`Gzh8rogr^eU8WjwizLOP?5%65g%lcNTEk zTPlQ|TUcmAnj#u*&NL8KVX$FsX%O_DMIZl&hF%^FW6u8cxr*#dYZ{DXChY?=Bbf%0 zJz3v^hNG@ksmL@*%*l!JLY&{x5ot&97J|A)0B=QxLMsu+(n!*~D(MbQY8hc;zLQ8r zrf@(g4A3VvDY8j!Z;GO68zz@I&GryVE9w8U8G+nb5yYU#dJi%UB2N_*q&+AvIFwWG zj3N#KoD2hgs73NBMi5x8a4aAKLc=g9LxbQ#RY^G(l5zw)Y8GW)@G)T}wb`OC7Sy3o zA(RFqO3;Q*q%raH$YT_Akd5gLl^Ox(NZw+-S;4YWDHH2oe?b z?QP!bhc^PjkAIHLPg;51kzVsqgI=XX^7&asiMrIF#0Da6(}+@`nvqaK$dnH&i~NH2 zQ8%sWizXGxSysi$ShbRsoa=-a)FR3v%9LZ0Q&B8p5r}@!vnK#T;ZgoHRHh1rgQx-4 zM34#?Y4+8yJh90{3;PsjA%%$tL`?s0c*n{R_f0EEaDFiFd3?b@dyW5}4>zWN&EEEexsjFhvI76c;(!S*z zRaw>|Lo;V8@n;@WW{01wEyzy?q>y)z$$G41ZXn+_v@;daCzKn|HhEhSwXKFe=~XW& z*E`>wxU7oxC75i2ixIuJ5VJJJ$?}deG>7=tCn$+7Mh=u+Kz`OZ2V5U^{1exq?MvS3Sj z7H`1;kzNNXCktD^8y2`j1OWfxT!7Vre}S-Le5XfP_Ly$Ifvrf1n_@yFC3dP2JaL$( zGB0cjjaR~q-w?rS6apVaFHGKxNBlP3>!feZ9c6Gf^V;KcP7FgCnu$2hW@LTff~2l0 zOTl>CUiVBGid%8!hs8YU^FDDX*L7H2@J2Fq3eu(-KCn`F>{acU^up@$QAIC|N4126 z56jURNW#&Nw^+Kj3MtD$5eq^vS~5g{HJe~koEb_NJ5b;}3~$KNW}-=p*&~(mL|RMd z9;=syaz+Y7u%c>|Sd4$@umA?^qO%F%qK%SfFBLzCTZ}LogI3Nmy8UWYb?HXl#>V$& zkeEg?eYQJ)iEy+iBIo~}Sf|be1!#6&#_RE2I07_j#VaZ_?wna5fU&>}Qv^!M^maJf zaJwcz0b906-8NS~h3p(#y7TFm0!KnDQ? zEwF{Nwm5dB%1W#<8?c)kXa8u3qI-fxEcfJmA+)vB1<%ESZbyFH?3gM6UM*EEjsVLc zcuwrz!L4vt%P0|IAiUulI4f%IfTD3)pdMASXglY;m56Q5sFOyryOMyKS0}wXc-m8B zuam!Y);W4r#n1mZBn7z)O~fINYYhyj2(Jj4PEvtDgd?7v7t|z)_9pv1r9kvv?^kTH zdY-h7`xbGu&8|Y_+Ls^9?y;%N58r#;{T^EeSq@A%%$aMJIVWiG1M~Or3O{JBgmf2> zPEZ7y8@-Dlo1nrw*L$e*`M#Y{ri`F16x+4}G`|Y;43V-3hrzDNiKWWmgL8tr!bpg% z2)vCDgg|HowD=N0D35$GIs|+{dhx3*f|;_(D++|02mBx`A~vAVHMyarVgWk~Btp+f zw&Yuj2pbWQ_>_|YmHZ1F!q_-6D8ZaSL1zJp7F3pn8#0nvI2#NK+2F42qK_)XARXkK z*>Iwu(7ykws0dxVnISB~Is6RWE3mtOp{@}k3jlyBC%0G z{V=)-WGK87D3Y5k*|-V4(h=N~q}v!4{}_y#n=O9%2W?{r6vI9t#6&toMYF(&^*KC> z*ahbE1_JQI;%W+~%AOm+2}Grv9dTMnNv)~Z6t~*$-t0kMXkd>#3_eAG%tf18y34k2izjWdkeJiMauJv zTs)1rqcAlQ5&r-~$rG)yTM2iIq=qOBuc$m@Aw**d3t`K!SzwlyfACVZp`1@{!xR2=6Kh-GHxP9KRA2kw)Y$Ey|+DP{Sozy}wGG3B$XB zthR@=7u>+P&%wqyImn00eYK4Aa32h;u(Yq*$@EQBlh`ss>8BB`K5}lYF zl4dePQe%l?vvG8U=E8XPXdE6erJyB};lvdnYjDsH7sF(R8yxVvg4%G;>KnH!;JP3LZF0>8g z(~13j!<3A-O^k@=j2olNNqO6sknD<31PlFw6b5zB9|a2GkhU?wAF%tSKtP9Hx+DSt z#y2F$-Gn-VXg(;j!|iCqN>ovgGo~Z~C@bT^9gUmMYsc!`iGq2+0kNO};T-=UWz+lU zn~h?va~nMwnyXwZZgv@}^#xmqnt1OoB>(xv(R;j2uxQMJ(K{tq?4s#0*@oJrV>57{m zodfM4&an#k^ckLb5R@Q@<$90X3(BRaC`c@ghCq_g2*Ru>&`Rr*h=P)asKjHnSJue1 zr1LvU;20HA1jzBEc@#hBY*h4_4}@UI%ybiY3RSgQ72w~&NTh$k>Y!5~YtQO$J$i{iM0?kz zScvQa6a;LQBe_ApeVM-%vH0W+m@SlK5-7ot&M(tl)g4?+ZKNkSlW>RyBTC$u1<1Jj zUX8Gc?6Ek~HZNo`Myj|Qq-^*miM^oGmwOA0+-|jWn0cKOID4Xu^2XD{^ z=8L@d97r{7mCCe@k6hcS5C=Zc;5S&UZY2N9^WqX=Cw3`EuwfEK`(Wlh z(ZNVxhyu{C*i9ZhTaw|cbkX9~c)7$lI0J4_bK;*@iLV4C5}ut$8WxgU(8yho-ByWXy<)z4~0T9K%5rA%JF$X)%{+aSZBX9*Rb}n#BkpArpV709-f=^)g*3 zlEM0ej}OIZ)o5n;dw^QthgaAGec*@7xskjR3+-Z!^nHg!;DdhHgAGR3QjVF07)U=Q zqlkuSiXEl(P^qYeizq|ud}~q_3XZx?s?Z?n)brfbc&gLG7cV=AUDk(IXoXRIfLp*v zFfGf3J&o`~Q(jw+!G49OiD*c?jl{CzuAq&iPU{t==FGS#(w&{^GH5ZAQBXRJTIkl? zOhdnYrnq*%h4X8C@t26H0Dr)MRtSWDIPCw)Ibo$`T+T=oN3%zFmc>@60G>i@h_W)e zvq}DVy3UU2e`D+1390`|?b!=t#|&u&)zNj$R?u}1eJpL%Xd4Vz6}4j*S7NqA3$4eruyBtz91JX z5q27KS(L?(lRoehL-1yZ6TZY3o^k)xps^)k=i*1r}D!SvL zcwHdoWbHtX#R0mENXWKny)FX_dZE=aLiJ7usWdVQ>U+5WPwkWPossC61*g5P+$$vz z2P8>^x=``LsKG8v+nfi9R54LPj`Apn_x7k0(}t9y(x}Q;ON1R5bc{a@e?%MY zRbJ{{%)I-G$J|~Y{0ogApX&Tj-QuFT$yTf&o9&Ew_#hKac{98?lZ^5kk5cnj;&fxP zm$*D*`C!zCV5c3BghZ$SJ}`RFfNyqhr2PEgSYMN|;pDpQihK0zfGRvNw~Zj5R!4mJ zub;ZJ@{Z72o+nN=`*EyOhxx&Cv^@?Be>ib6P=dS9`+CfpFGS#VjELPyzseDhz2EP# z;TP>j-pf3pWv>7Bf#X-jd?!YXBw`@nBd0y5x)UVsC< zR}H357O^|)N`)ibyoiOkAieU6(a>1?eSD!9Y3;CIFMIkamvP@uIpRp9yxH+>=O#qz z;=d`Y&Ck9gmtd#_2r^`zRVc`(9|3pk@bMuat{}vS5+_ouXz?P(j2bs`>hu`w&0e>-cfknKenatyt>`5J9#6SzswKWJtdi|2{4F>|H^grC|n+ zS(PiuPJ(wXrFk0SK!Kf}Rjmy9H}K%Xhuh&hoO?fKDmrtfYj~TU~$WHGomPSoG^hK^QC)7;46=c(C zGx-Nmd;0m6lYXaBm)?36T6iIbR=riwNyeRZ5_B9=_?&d*oMcu@17^4)i{S0V)<(Bf zK)`tv3Wp$zuq{Q#Ya6XpB4=IzI3au!k%8Z851NJ%bwasV!I4U#c4T`@NwgGi1VuSe zT|MUbC7AzVVh0y;6-c+)R3VZ@ky&$@Xp&i)6)7g27^0@1jTE7Gp;L8wBvWm?yo@y1gWNj-sBQLv8=k1iWVga8iTfGq+mx#b$V%(5IvvJYDoCRLm0j^T|motrx0O#c9D$Av4O> zNpnG0sB{A_ZSj-7W_qtqaJL<4bD!Qn!p#LPv;DI6uB;kZx z`~5d?YK;r$Tu_&^R+Dw9+HSxFy$LwvUm>j(u#_k=`BjN?C7HgMUsQ9kB13&P*bT!? zv)1@gv~NjO4#chJk;^`Ne00rv7RKwM*(I(1s@0ihSJOVcPUSqFYf@e$S^*H_hOIeP zz;+JSKZ1mVPFc=Ji&5#DPh=ocR0*y6X(16wJo@Pi)pSrBfB4*lB(9{-zb%QLy88dV z667|&1qHG@UdDHpXL!LFoq5dpt|C1=#ZDuFtA#+c$C0~PYf)`eSl40(6+y*@VL&^O zwqV!5{E-lHWHO0n1h+23VNEP+af?cjRlrQ1rqt z@(MNea^gl@5(|CsLoEEu3Z_sa7W%OyhEjln#D?!xT4fZ?zoX>aDpAN0D>1JnH~ST;G!Ta zaTy?w_?`Sf5rIJ2TiYtw%O_$aHmM;B15LA=lk7-Ylqr)bqY1cyVT>$6dNP0$uR$C;a zoA2@MoZdXBx3t!@RzYM{qrBNz5K6mRD57~4c!ECZc@cHi@;5ySYfdXxOyVMPD#wRh5O0=et6C8&&#FFEsX(c%In=Uiy5}#5+TO<7pM#2Fc&CwJ$ z`cltIQ+gGLrPM_RQr(dj6f=?Di-AG4DOgPvsoIT8T!x~~A(M#4O@9BCje^jdQkIz& z??8lYuv+6Ey%b2*I7O9^(Wg|s#~&*-6Dt1*U*7ii9-q{$tzms^oyJO54y`3?a6%SC zfeOz|;uL~pV99odxe@s^UpFzzR)NG6hW7@P$jIi;4xOtJseU zSGVR>nkYlb!ib)CY2^?|5`*#txXDr{PBg7Yqf4(U7Ob(rkqLmnYZ95rWIKCV3PpGW zv5*L8HjZsCgyGdmfXR?#k>i|S+1Lp$Z00yFeX2*K>b%Y1azg+10gz!e`X>Z$aGzwU zu3#Ja!S9qNDhz&bgma8upI&Xk7dEGgWfzk+y%=r^&WKU0gdyqr1VVhXE{nZt5iUif zwozp^~V^wa*;RAQ73m z{hCfR+mmxft~-z?gSpV?`<+4b>^ zBO{I*4QNRLIIgx`HMFC>qNsk(piMm)d|wU1yq4z{Di;45t}WQN;K(zXahr)E3b)ok zBUN=lTgt?wE$?|(h`@urvpX-DG|4J*K|De;oM$wiTiAN4gCTZ4_A#|e=Y-f-dBzh0 zhE0BIq_zO3H^wtgxm;gK*#TG8@s?Jkavv6sSP_qm{BVoIG?1zUu?s%HU=Mt-wqP*E z%0Cq0+*&>7E|a;q#)B^O2`YJYK?y9?R>-ywrMg92UQJ<;E)+gsU^9o|!vhpO!7NL} z9ssa{Jn}Idc^-Ky7O3A(ZrPCxPjBdMhj!Ds2S=0a+UVW%DbwSAq1ZI#I~=-;A0Ds^ z0$?Blboq%BZDb>_s~`(u2Tr*~jg@ey^Cep6UNQeA|Mtzdx4hJ|)k#pJqFGtECF7D4 z(EU^$V}s=-g||Q-7AX;vU&QO%Xgoc{?ifoU5-getooCifsFXUK^Tvm2>X$t3{8qpc%dIhi?FgEzyH>${mYj_9%2}fJl*i$r^ zC$SgR)QVl)i~}NHoP9)|d4)J&KtEXE1HAu2Mx2^ec-=iH95aYOA<)C_!3>MFSWF1Q zB~*a1w4g^c8tug(8G6wSCYp4V%Qz^G+~*Uel(5Q7Lnf)#p2X`G;w5Jk%1TR9}kzu@1B#Zwt(VkRX{ta!#DsUdq% zS*>A^Ts732AYfV~tbfQF@#nl8McbFV?cB zN|E&$1vXB^kJR9AAXd@^1*c&Xny^a#jgeru1Upv9^~ICtO&yIyfZ8P(N~jP=c-=eb z;$VzGCq$tOvR#146NtsfF&2bUUEV}a5^gvlLjGh{#G;t=l%m0hafBRXjMQNzT zBuWSmMg)o!Nzbw3|FO$sH z;MLj?%1lTLcK{_`qR(0sCF!*fDCR}${6%HtU|k^|8Bl_pSVUUYAyU@Kgd`r6?4d;z z+_hcA6F|pGWTZuS-8!t~J>>s`3wVHNUVsS@gYNOmy*T39)tF35L^&LzIt2puY~8Qq zrEU6+9F9^@w26tJUWp~$gPdPO(F-MH#$VQzZ-P-reqO8*$%h?9_7Mn>A=){~1Q3;# zW>SRLal<=Q10>{wA+&%YV1g#>#x6pc!)RVcpu-%QN$CX>_T8p@noI*4MOAd7ag@ZW z9ORtX+jeCmDOF{jBxk_b;f3VV(LCpiz?zdth?v0-=CFfvu>@LVgzY^83y6R`*nTFA z?q`v@OLL&bL23jXUQ$-N8Re`-aEpaU)(j!p0SSu9lnh2P z{l%J+2JH>utj_-{sjS3M$>jmU&r$N|L@4X}80t8#NoDkdJ`^Nl^6N<%l`$zrpG2x; zF$I(|Nv#-1ETCcWSO7m1Y>FJ^W}X2v&;vf;gD!|b5SV}=r0Y+hDO?Ii_u$)TxY^m1 zMw-$q&0@&XL1Zqa-A@SUZwMz@wFSv2X+-z~I^-q`k{EFeSYc2kYTQc}4H|Ap<~9*Z zt@uMYSimt(Uq?Jg#yXcvkeNi_Lx>i{J)}YwKteT~XUfX#%3{Tu{>od@9{;G>7iHSr>JgHLbP2@_6ze%mNNR?Xn z1N}e^GdBO@an=GZ@B@~HOl76VJ|rAO?880KgFL7$ntH{8wC%#EXMAc|2ZmVQ<}QZ7 zkAQZ_T!f6}S%5$I1ELxo>`uwu-WrEdNAFq$M^42jvX*L4#eGOiGbMdtu~jF3*$Ml4;{Q@`rWb0XG>5{kU)F8yMMoSY-hoW;$y1Rt?b-wxpN z(IecxL*Z7%%}C@HLFilHE5r4Nfk-Zukc68_$py*FI-cGAts7A;*Y7H0@F4AIJq3fx zq6lm6BSt6QnDCs|FAM7}bM0q=dPc3-Pc8UMwe~53;v7inl0e|_Tfi4Z^aDTOLJU#q z?p^@mM*%xn#`?otKxDPW-D_|QX=u?1dxhm`##g%L`?L#% zSU?~+S1e#D*sjj_%q-5S@1x%C(DdtVUCE1>u_xz9VutU|UgWw2LM-%ytGvr{GAdAb z7Es_+qy7WY>hFgzu=x5hf&vB0uv8YxFIGHH`+ z8YlC!#Bb(E7ohN7w$KZfAXz){P?R3WcnD$-N#&tnE!2z-ew74kYM;MJ9n#FN!KH>! zt`aw2^gWMb8p^Yx+654_D4Ga}LQbuVYFC2Et~ff*IByrv=BPF)okp-8SU9e>6pR%; z2*WN+(LFI4J;3V$LIp$sMFfv@9OLggnA55-p-gf#Q^ebvUq?W+&R{fEXS6NxT%Y#b zTL?}c1yO7S<~HFEot@e`=Zz|jPiZVs(N(2;;_4w66J9uRWKJ$H8Q?~Y^j#PQAP~R< z1cEp4gHSg{Hlj!%2;?_&2ve)?`hxMZF!mNHcBb%$kswWy*mG4=bz6|gW}yFG&m{*8 z1Fv!TtN0#gb2^%gbyrPT3mJGpW}n3&6Hdkq3f+DMr0vc1L7ig%(N7pN2DE}V+yMp< zz%Ee4Ast7-W)0#YNp3&0a`Q_GpXrsHwX$%sQ%&?`U-vDS#a_O!q9wG6Xk(V0nJdO? zk%T0~m|H3tX<0b4N>vkSpSPyH+RNDnoi!|&JU|~fS8$&L_gO|k%*7mqgIHwcq{`x> zQg8_4huiRTbvH`;PPdFIi%2XtcGqlX%^$rEnUUsDv@cf-UTUJ|M*9ZH!qA5AQd$3QHE$Y;R(MrY zrSlw)OV}Z+NR8F>ky93p!b}CPvUBxixm13(O8J;FUHNSIM35QNPr%of;B|(W1qNJT zE3|?>fP)}j1j&pGE&$OJM0h?yGnBfk6HT&GM|CF)x;88MMHflw!N-RHtY2C{4p~i0 zuuV1kMT89UP6TOC@Q6FbQEtL^Nd0M}3Tjsv(^P1Be>L@Wl!PJ>Kp)*@6H6eq08GU8?P-jxly=KIQwqTnX=UcPkN~N zWcuVF9ZY4^ol0+pr@Hrdh4)JH94Q-p(eC$D*`gl%VTsMzE~tP>YpwP%W574?w+7ZuVPkD?Av5}h5Y0HP+e~uL{QF6_ zTRc&fR<>PZ`Z%Z z{I(8)i%FFb#9>sxIbeW!_(+Igt$WhEbK{5QGPun@Hg1%k(@!iPhh_d=eIXu9? zZS4z}#lE7BcC{wmk3CBKI!Y*v-oLJ-XfwI9hbD3U;OAGg8s*_3{v5Xb;6%J3^H5a6 zE)sXiY{)cT1ZKiOT19U<6NCMVh{G)iHb=zCbIeg37PLQS3aYb(yIQ&;_4?~OcKOSF z?B9NMi?Qu4cPWw(5bu7ftC->cKJ=b=PtC<85kElG5+tZx1%n3>CRDhPVTFGWA^t=7 z@L4A>UdlEx1P&`v-X{u`A`nPM^0R-vJ*>(K&J*JQz8Frg?C#Oc7k6YQ#2Z;=U<-}C4+h-Rus}hzI0~HXmo((P3D*-0t&&W}jd4k~ z4=MI~|Au`FswEJ8`uPVl+K_t9B;JG?Zla2UO6VWgri!gMz>ZUiE8jrs#KV)aau7rf z4HD766H!c2#T8kkjk|+VQk}bwLX6{OLl%nyQfBG;jYsDp#Y10u7{!JZ$*Glca6Zwq)0>*y4*TmeHe! z*Ro)|8o#?X(QNawY1~jFrSr&>6{<+aht3nEr>7!*3tEhwQdh38JU!_=i7bePpVc0u z3f`i(CIDhUoxjG}>#xDyb2N{}UW+F5&Kg%U zfj&0qJh)Oul&>R+`Zi?g+Vz@ez0RUpt)EltF=ShHK8bKyagz=4rg04zsHUBUyjQO{ zrmx|$5-O=&ggDl^p^yw}F6_}sFMT?Wp^ee4v)LoG(85U8=xul7R%lYj>V^zR^(eOa zX4AXt-6d3OvH%wgK%?(y!t(7mK{T2FHwwcGZ8bXA&aRGds9zc8uX5y!6+U#WrUu^p z^U+6ZY*9n<+@r~WzUfBT+wM`km}dz)zcZ{N^(79#Y#A8>I{V31gEk=OnhGU z_PL38=w~kb1y6V2a@3<(mJ}i#q#TqW10`-^Aap$^KQMIRtn?KZ*7ze%Ni&?5dS*eU z{l-ZcB-n;5IFdG1MJzfaNmb4`m7Xa_V=Z|H7chW<9Xvo0;(Ov93z#>H>CtE9U=4fB zRg1ywWp=R1A3}_Dlf@wEG%m{j&h08w5`w@_I(u{C9Y<(5CmLoTMpO>P2BC9^gL5SO^aa!$*rn5 zEY7WQV}AOFANWv{1)dNC;_xLe0UA&yfio?FM3i5yq)fNn4?EJl&!iR!rH2#*nmr^U z-9DBQf^@4iN)eOa1nRDexUV6|LMGI*L?5P|GJ;8KC%@E4&+DLu4+Ovf0cygCXWW7S zi6~J>ff`gMZF39K<+*v6F%pJfP*&5}bIx#8bPt=^kG300Q^{5D%yT0hU5iunv~6aq%W{ zqL`M+;7}q+!Vw@F3d2MKQHbvGhd+`CA4+x0p+xl90B2^Rf6j%VD1t~xR%jB?eN#(k z6-{EjI4hN+l1vtSXQxs`P9jEt4|~u@1#D4)o8Ez3+$`*Ikqfbi0qdxMM2*ZI%h0bb z1ftHo+f+T|p5M}>F}LHaQ{l6ob2;Z-$Q-S=IAvC}SVferMM;9vnjov7m9}28EqgbE zl+7_GGP~I89rU69k5*tH0=DoNSqzq31uN@P=UtJ5VR9%+@&&SC4R5%Dh&ISJ3N84x56k7+vTs?~j)lx2#N+@6NMJd2;Oy8FVrAu=~3j|w2&VM}M z75oUq1NQI$N9m&vUWq4=7aV2lShB?@D(zZ?RH;MRlZcxQB1ACrI)y!#Tl~-mh&N(Y z>UtR=hj|W2g}CDCUs8e z3=)V4MBo8{IC8qeCuL2u$ZuCx5ocZ|o${P#%N;3*h$Td%??7g<=Mh&0yrB^HJq77FUuJQQD}C)It=#4*M}d=P@XU|| z`{B10h(#=L3Wx|8tBSha^tm@J98bs6B%G|UNm!P@Q#Y2G&ZJZY%-oN8zYo>_sRozDmSh+IP1icC|UcA=^+Ei#vRTA-_D z%_S2dX?Q*vF>v4sp%5wb4nMd5q(L%Otm%V z(%-W9mT6?%epUPDL0^{C0=_2}t#U>G6Q8*m#AJJ~QrZI1bG|5sqxQ6@;O&BR8W}gr z)w4~q9WIuIE4xgs&5Iacl329tS1r$|MPGw1v0qg=^wlTb!X9@?)63oPL|pT&|CA}n z^n*bD@J1lGg${J|WihpPq{^6za0g1YBM{>Kg1LsBus)vn%3ew+9YQwDLICM$^n?vA zyg>HIMmfyQ`W!GV2#;AfjE~gp?L-DG2Ey+qNzLTvPtf7~EFcg9p}Q1`f4)iX_+qpF zSPX{RCjuSNgsKQxGGq88H&=~Ep%^LTFLgJ zsV!Qr`z9mA7y|{TX*?h*5YVB^OvXr}FV#rQyL8FLl4aP?E~x$qwKC^DXwc#2%e@@J z@^}p5TuD1HPoWxO>t3P(wM~Q??FjwQCD_hclF$qHB4tp+v4{u?J;q4DiT$_*7rdb# z`eFST1rhJ1si1|LJ_R)9uOk9c!3?i#LM>RliO=H16eCDoC{Oa9M^0Wyk5-G(glC*I z?+`OM}>kdf@B4AJk!Kn^8M||R>4>@uw1+g<^#SjIj_Gsd>4CPT;Y+PE+ zB&X^uE=zxUgd1(ACi6rjJx02uOq!I$xVi)(R*28q(29mnBX)2rv}6JQHHjp!Z!9}2 z|H|<$zyg-2GBG)YX3A@-rfNOJa#Av)1AD?zd@&}ttKLpV=1vVhvN8-)@YWa%2~q8&Y|HxX0|2|_`g^E^DJ!(38_T*5&m zi&7BkKl*K1K=LK^Ar`uD!HUBxSjB{xvPb7?&(>2mtztb{)I#&4Op&nYM5GC#fC)4b zvD~E~g9zsc{>4tt0EZfgH=Vh^?Q-JIdL*H#sS#n*X<0WGu_RuW| zNp2@E$wy(N^k6d;i;qMcuO#}?y}$ww#{xLA013Q+DFlT?`x7dyu}=B4R$1;i-BF_= zRWb`zAqbV{E|bEb^d$NMTtpKgzH@H;5og|mUibkWx^zMR1ZoL=kT3s<`M~HZl5I`V zk|p}m(0ZZ=nQ}AAMub*{fM69S@a!O=$5!3-R-3TcwB?3;kyi!PSIxtZ!mLMjLc?H! zqgu^KisU=g(rY5H7EA>f`i@J6(}gfnDv5BeAc8ZZXjGlTrG_G5Lg!OCM^BlfY{roj za79qCH4jS&BmMJTRrdWbF{360hwL>{scR;h6U^)`SX)9pG;>t8YIKrN{Z5SZ8n%U2 z3t~M2K%deLgK}vtRy5v8Ow=V#5^OEXQzhE{BkkVs2!UIEr0@70l#bk!WxZBg>LR*r{C4-up6DbCJg zKj&1_&{6`;ioTUp^-^=BCR!M6G^QdNIH5!Zjz_t++Zd8Pz4s`NmpH&^*4Rk94aG!2Ag(2anyEQ(EY zj73z($MNQa4D|B%;^~1GS34;3BTzNZoT7yPrh;|tmm@K^sRSo7x9VPxw`_HF9v_8J zq{Mp9XBYjb%KWr5r^G=uZF5rgXuo7Pv?+T2ZyQBpd>zm6{I+9KW_Olv^AAg2)3&q{A1YRWrcLJ4}&_-`d_B^$N z8<%Zx-_YbVM01)L_$d^ey7n=_N#lj_z&^75H&YJ{|+*tbmw}=J&wfH zEK`Yq#y)UQsCIE~h3Qr&La8`{yFPh~3kYc^SO1n+Hj!eMDVTy)!eZsbDPcr}eltYx zP~(i^R}i!QVo-Y{8LU>gsdyDqwM=>cp|l_Pp&#@vWgv5+T(Ue+;|(cE0ZF+~XtpMF zG|eVLG83aDd5#}lIeld8WammoI2MBmcYFp{(ZrWFGVkQF2RH&QDsScYynfLi7MVmQs_#V^2etByLdcJUwlK_7GijD3-gWyePVsZxGs zVfFeA(IaGH!5gCW0*S+%o7(Q;I8z#BtQU-8QMh$oQ90%imuI?W^0?vpgHyqWYqpm8 zk|iS8t6lTD_q>>|L+pO?6;j=jY#}45sadl_EUVVzFxt|nu?ez+8k`-%plZP%0^zrD zRhz-gug}(XIuMUVbS!*qI9wXFB?f#n?w9Sto47_N=>v8P+4er%*M;s5(ztt)p>*E z-<=PWZjS$nD2lDyc9HE*Yi zGMjyy_t&w?HrIJ1MX1^_R2RjKd9F#KW`?rFb7MzAq~h-aIkG~1RdMg33$utXIvUkED6k+5P z9nwh>GbWza=}&VH#G6-8uOkR`Gc zSrq4cXFk3E;vHN}aG~qnEp;BEY3(1(4E@T%%=}?TLNH*Zs#srg{r-*hG;F|mNm~@$ zq{+abqMo=Rd{PkN(GOIci*+$fi|Z}EGfJx>M%Z`Pnk&>BLVg)+t0N$9b9}`&79sWr z30H%2^;lvG_wW8m^8WiA3o>in?{n3f1D~j+c{Oap8@ypzHG?M5fgi%*hnTb`u*5({ zxrM-NvUfre5|I7^eICu(zCHgjA)GBkj6i#WTK6Qq&#T-&;`azL;rKH3f0XnWjlK7H zgaG1Mg@K*^86-0>p~8g&1rB^?u);rw3>#Lwh%uwajT}3A{0K6n$dM#VDl`}|ALoI#fs0(t8v6DQ1afpV%$DKjU|a5Du}$mulb zOOsr?l8jjEU#*%oX-Z6K@TMS?F2jn=8dMO>vn+YK6zb30OO`LKdJP%zStl|G{~E^l zjxbM;oi4IeNU>+)3hyRQ9vt!+)WaE{k%?UKUBbc^D`0+>n9OF3g9$$rU6C+C(41@A zzKuIK?!`|ZSG@T$=0LSVlfvBWGr8U1|H=`~=}#ZOxK4ow(&w+|YOOE%_4{Yl!c~|~ z#Y)tTUZg?sW6QDE$@}yeEq>(|(RPEOmfm0QwDyc6E2 zY#Wlu7=<7*N(O9{l~`dyjdkLgYOsZgQG^o4C|IDTo{H+Epq_N4S~?Bn(p){w1f-Q& z`UD(*Tjr#XKK9LJU9Cs%Bw$TK>G#sEU-orusx>kx$P;9krQJ=l`V|peZ5fIGWCd&T zMIT&n*`}OAt~nc0YLpdP;<=he1RHFd{&QIcvzhDRu;@|bkfjwhR#=Mfl&X?PopQwC zs^~7vaKj6+Tja080a&F%0v5+ATKQRfER$LB!%uxmqLW>34+B`FRRY2UR0S}}C7!+x z+eRkKulAG^N;E}upTu-^Tc?(50VwOX1orvb%pEOA7@Ia&{(jHgmmI+awR7hV#GZ8>D6YA#SeWn#RA^I4(FDV zy+BM6DnH5DgP?_zpzLTYp_I!Y3)zrAVPr1Z`%zzR1(h}F331fYlWulYnLGjtm93M_ z0aph#K>p)(8Z5|UBx1l~+G~zz`(tKY<3~k#4MtImT8i4XwO}&jm%QvIA)(?biy`re zEAgaHW@rHYc$= z5lwwP;}c2*!!+UrR6N1e$uNXe^>4^$URT+9*-{a= zGDQeN5jq2k1x8xNft3|bZn_){|Iv?Lp(AHq^xn8`Ig!r{X0;O)>!Jq9I!&c0A_mND z={j0EknUB4byeYsGy^croOD#wjT&h-#5B!(B#Kf=+oANr$YC&S;4?q{EuQ0{TGWSj+_kSHd->AM|qPjPUAPykXjzHlF(tO+c{-hjAFXB~9WK$nGgpXO+TV@Hn^@K%hirTm>GhtR=@A z8LwlN=q>R?Nj_Wr>i;&A8p0O#nClv3jd{ej*}PSw3>tAB#|jb$-568H72b-XTbh9e zcU^?3;L=)i-92-8(TCZYc!$Q%oX#n6{CS!G1#lsqAPsR>f(S?87OBO%=tn7(GiP1Y z$0kLTPA$e*^{#@#=$3@bidr@cnfX@^Gk>JH8MBZHV|`3)b_CHSdW?rd3XwsDm_%AD zAc%R5Kwq|4A}OY3>)Hj#fwfe-20NEx0vqdad!@-d=5Lm|)toBo7m3^QM?c1#YaJq% zp4Q_ZoAf)s*K$iJE>XlgC;>mAB*qI{x(Wlu%@9Xlr8t!AVZ^ot#;?@dWZ)*S2Lmgk zZL(FP2TQhV=KPr!-%C-Uq0lYz&5R4*9N-o8GV87?k_Dt4e0)c}kKCIzF9QAYUw3oHyr1$RhSpTR_zXp$U5w z9k+>dY0(!=2z6NoZ8`)51)(Hc@ep3{1nVUpNyt8%BYqXJGh7lL zxE66Eu^6-zKainHyM%%2k|-inJCc$*A2^3KhF8kBJ9i}`rqg%bhiEcVK}@qE50XdK zw_;}jXgx7q7m-$kLJ<{&R_>yMJV=Qfp>x{-b}K^Q7g&kKM<`#Y zDQe?1Y2zR@=7@uW8p?Nvk@0-j7%&_XJIf?@;)6khSZ&$&a)MQS8N!Dj_;}kCR~ce+ z#~68&S6}~-GeqJ-@OK~a6bSUkh1J7riKA32VSqgX9LGW!l`~*>!aZiebti;SxN=h- zp(ny974^4k6G$dJ=Q7;*CoTthts#3EF=zwlR{|A7dDOEF~TBj6}ZMSj8XI#`boz$5}Dv6EQNLGbOjmbD- zssu$UrHF@kZPn#FzE>&4#1Vo)8k4A;l^7@b@;qvZSj1r+My3_D7>}$uR8?{lHNkW1 zlMq0GUi}CEGdsmez0xafax+RJX&X~Tjutaw!W*y&pw_Z7Sh$ey*$`0UG*44Ii|3bN zMQpcYhXcfzE%hRMx1A|ElY4kOX?2$ z5S%9^o_IZZvzpNM97JVs{G(Vw!BxX?5&8646``Q|K#RNCHNBZ`>*g|2Q8_pQj5PH{ z6%YHd5=@I!6MHh6b2n6+qfag30=#&*yl7xNLxm zlYpucEe0Be$B4Zbe#_*Z8bOng%Bx)Fj}592^9W_RH>Od>E>Bv2hl4F+>J|gW5UFP@ zs;U%OMwSi3H|K*%@T3%4mXWl2ldzdSDkB%g&<bb$};224sy6zAK?0qh~@$-60WqJs#g(Oi9H`^bcv zI^naEjiKzg_3Vd)o4t_pu^0Ct;|5c2+J5?&ls>4 z$Vv9%Ty4|iNhQCQb4-UC#1k&XhpXi!2-=0Md(TYNv~-FRdO;UfMCynP!79* z0S^FGCNV57Eq>O5H@m!+PSsy86RUqgoM-qBSkYhb`8OvO%t)se4<<} zw)lI|_N#NE>e8jQMJ+oF&+K@ zg+(2Te)|{f#oT3l!Exc(esEwy!3P*Hs$V>u;$t3^U032;5lD?6&XFexR~8bOly3pY zqYVTTsn1ruf~X@^63vLD}Rz0E?Sp#B>Q4W&oi(0@6<=9%^()=LNT(P!|g2TE1aRGJ`QCM<6EHIt@Nt%0 zyudwS$_xP)cP-gMq;qZYYDt6X8Zn}Q?k<$B)rk4yhW@o%t!%hF%>}$j0aTV;1RxF!bO#u)2YnC$;z1tzOzUt`EcyhX zivdtp{DrqgCP8k{@H}K|Iqo0fR^XP^5gWG1+SNW+Hm#e{>HMsU9zNLqzhUERobpD0 z%&@Hc;|otn>(0tm93YoRp;T1sqn^CZ3o3jt5aMv%eGmw;Y|pi(xABTkr}Z44WnhFW zD||h!rtH~DH-X?{t3C7LCjaMh2ZzD>k*^*9zuMT6%Sz2~Hnz&A@f)9$fM>gX^*c-+ z63FAjQ13)n{v=clGp-lvNAt-V;W1hXCstwed~gf5KmZRQ9!3EUATEp!tmgl!X+uFu zkFyuSJ27f3RI-V@PmL!~R}SIe4YQ>a7o+yzTxb=im+_66(jMDmwcpfUjC-hNj|ta+ z7mliZMxs%3B7r=-=pOf9nzP0$Zx6{iF|pzH?0>aT<$NFF8_M-QPqqJ8 z;-1v9M6!gfV$MI`>-BXj>z6GHHW$W_^tIm^A-BF0H1UwrcgdQpeMk5Yg_sov$-7N$ z+TK^<3_&53O&o>N;4da@Ujg0#5Z(m;)v9HoRt1C!6)t4h(BVUf|14CnP|jgFK^8RD zD)-N!M?vLkMVN@NT%djl0bvkSK~T$&|2!@{2s2?ujRs{Fd|BaN%aAWE`jk17Xi<;< zFdDp=@gPKnG7}=j7*grWg0=irs0GB!GpJ$3jwM^x>{+yF$-W~)roua^W7)dx#Lz8L zylP|O{nx2e+qRwFy$dVz-QTug3o9Or7@`$LZ@8A#S&Ds)hBF#08FOSGs1;sf(@KM;y=aK z3+oxj-bx58qspr9GWf)E@IeS81noGJgpyFRazv{zr&G()2S?tU>dPRglh4FfCmaKZ6O$a;t{waed?zlZ*J6)qKyV>5GkK5nutp1)Ur;e zpZL@+!|42U^;NXYyw6VmhvsTaq3-Gn3_mY>EmKNe1rrOe$^c_Crn=fX79z@Sh3hT+ zAf^7=Td3=;c%=+O6mlg}n z%13ys6V*MVO4X|<(v&hNtGD{}c43Coixi?_uS7^N!>SEdOMtt~&o5p38x}8H?OQ9^ zHPO12!K4y=*rAAdy_R8^W0o1h)jAwYV1zWhg@9Ywb(hR|DVn!Z7x)1*Pcu7ohCWD9TB2+jS#?~mMAv`%Fq)5mWRLKNf)~lgGFv64^73ujY{MIGyX$+B;acL6yQ#ZW?NS9rE->rD5 zJo|vfk}qZIB@a@Q9VUdE-Sj5tl+vM%j#FwjL1hW08b{C3#Ed@~T}lJVte!SCldzkq z?0&e!_f61Oz5)~O(kd;(2sCO!9qR0idRB&1Mt^NpYhhKVk*IFSi#f$8=!Vi0iAHLx z-Xn@@J>Bx_O+Y9ATHj=190h631 zF-d`6WVv@KCuPu~kU@@SUkC@*K9J<)AL%2=+hy2n;ho{Hh#9rn%Ef;(q09vFX`g21 zHo_~8QidKS+=tNdNTPHUfE^`S6b^;PT%4};SaXztY;w8-SuBeSTVl7&4n1Y-WyR9Q z+Q6voKA>f+#afGA_Lc>{QV}d~g-9NU8d=N{dKpDw3_`r- z$Q;vdwuY-(^k?f^(^}cXz1ArKsy!hM%ElaN(HseeSfqeam8hmHboV)=SOg*`h4SdK z4T~7tQin1JQgMe3v!Gx;P-JS^@;?&uEVQI8$?Z1xvcK)+{Q~VNsoc)dta3w;np8*I zP8#A`I8s)>2Pgv{cU6kq?`u-h%YqevG1J>fMa-kY^Oq#)*0V$;P;)NgYal zCy|13PRLm0oRcIK1!<*TI9XeT9r@*$zt{d;&XzRphqoM}we$FZqI{*M1N%#1S~SqI zT=IFh1?HV453*UVbR(aqI;wRy&`9HwA{or8&B+ihP&q1{C(hyBS_()*SP@mCT`1!Q zyI~o6)R0LG;4zkul}V22VIE#HkM;?D;!@&dpKQB*2RnSmE?~1{+Bs)m4wA}oJz%0k zAFWoe&#q-s-ZAyW{ESoAReOu6#P)zDZ89yMvV=Ej zGH9OmfEGJWe?Rl9w6M+NP9==|;SF)uq96L`M;aejce*2(OPj_24!962`-?9>m%Gd@ zd474+ULD4ORo9RGLf)A;v9pkPQwz91twOhro%GiXkx~t8fX2(Hk8(tFte1m+{NF94iRo@WJ$(L0saRr2>*d zVxUJF!F@7372J$6u@d^5jK4yoTVp67`Z_6Ws`n6w5%B|q;DZM^K<1K)ThcTQT#~hW z93k9@KnMpEnLw^G3D(Gq1Pm2N$%t^^2NX)WG<+zqNiAIexetlCsa|;t`3sgQ`wpbA zlKcCNV?mHKTMLnq!A2Cb^Jqg0K^OE=89HP{0JE9t;tj4CyBZP+oI64Vqm-Ph!KL{; zd2$MqDwI-GD}jPUq^hY(3Lvym8@BO_FifT|G!{cT5TsfUE=#e=7_7k?MtW)vo-v&Y z@CSeJ2n@)ghmhRLSS1D#wan#7_761lhM*WfW)PL7?E435Rc#oeej2!0fgscxQ9rM z^WX`cASE6N1n+6RKxsW8L@r)ryAkS#?2|oJF~^Jl?4w09*(MC>E66jZ+;a%PU^g@b4Mg)b-&?PVafr|I35@X}lKa28WU+E! zLkx(32Oxkz)DITgp`~b==4rF1DilHRhLm`_=|IZ`bhCXLBiINR@d1K3n!Ux$q@oC( z+7gSsq?@R_kc#;*r3?_a(VL7hG@$9F!SK4)^esfWjR?rf2#A1I7?B9j$;a>%hghR) z*@;>d1av@&w_7t4S;Rs7Nf+`Qlsp=PbI#%ayrNz+ExCx8iK)#8iN;?ur0Uoh^pGpx zJjaX?ERHHq!%~-jaLnYi1${V&3c$+AB$JmImrYVA%(xtG2rkm?tZfDQ5C zPSn&-h`LSpD#${cqS+)qUfU1LILaSUyjl?x+!C))e2;^Arufp(m*N>d-~(~kh2-?e ztw5LNGz%9C7Op^(`)LB+i8eOxddlgsH`BrKe)4s|gEHPaP_^Pz?6ur!75`_UuoQ)QIGKf-nn@@&`(Dv#2t8(RsR z^@*6jXtGkZ!o0E!P3=-H1o0v6Ot=ZOukXRDQjOUux zxci`OE7_aCs+9B`e?W)-k|(2`C*6U_M-o=$A=nV5M0cB6O2xE)xP=J72`GIA7*amH zY&;<{sz2(f!erR3%^Fp#05;?q&M+q(%+rT(Hnf0F6Vc8LLa@B`EkdIY*klOS>P<;S zv@khA`1}WU$q3T(f;xFy-a^^0oifa;mHfcl#g!R8xCM>;jF$L0jV0HOwaSObwO^?iiFZ^n4IWc#3?khT;0XS>zQo?2A>1 zWY9Pfx0~H`Od_@aU=^%@U_xFiK+2zaJ(>6DI@~-oOU;=F*n|5uih{rZJ*FXS12`DD zVlDFzFOFmjDhKT2Ugu<38fN9f`mFXVn|lgXyTTKsP+N)+w+CnieK<)A5ak*?u0Hl& zKpr;qrDcc;UrOm==Xi)Mc4kXcN z!@mGtWourYTty{mCJR)K=P~;`EmkV9bw+RgW)IU@3$PKg%Hwm60229!OF7&V1}riL zXnfWh7nYo$Kwx_N=ZMZFuK8bRof1PvL27M_Cx`$(5J!auW^p)Z5+UeS&06xH9kFdx5RHV)jtuJ|_!k>trrdPxR}(_6*8? zo-&!Y%f2+3?#p@$?2EF$9t07?F6+dWiN#)C#~$UTj%=5Pio*nrhwkj&Die&h?Rgqq zKN8O21rTAgEQCI5liG#TyF5YYgL42Co``LQOkjKxkgV2i>aK1yi6&sK8m(pjJBaCX{akS-bX-o_KcI|?QfD$>640(X& zJq{&>Tenei*{X;vuCCZ17jus?bE9rv7MaERe36g; z5U;O;FFSv9NRQPv87zD?-M{3iFE(mH7pK-JhX+7&ykHTE`d`NI8V1VoiL_jc#PCRO zb#4r8QJk`u#aqh#^3Fh^qt+-35J|791q=Z71Qdr+rxLHxE>xHC_0c>Rw{uo^bqK?9 zElaAsjMQG!E7L$B3^;3g##XNQbsY)i5*c>j4qFfvj48Jp`tr0NadT(CDD=SZb~m>R zFGGX{q-0Chv&{@_H*kw>4PQs|kr3sneGEr~cVn|_NV&@yTlPn9D@Tv_emf`?D^WAQwu>kX)oNTKA z0w4fWD+)ybg4lsp#mo#we^iIGw z6^C3kjQH^a34e$&Ts)DnKYNpJd6nz7Z+t1m7`X3HFtvA?g$zu;+>rorqzGm6{!?>F zmoh4c!+3%U5Lii*^ocwm*tsYAzO8AJ7RQf${ls%E8?5ZUWJHSbN_FfN(R`g7Zcu5^ z+_>H{q6E@cglBwktP>mNkrMo#ZP}0Y=%+mG;w{Kepi555WvzhRAPGVLAoFYk{slE^ zkdF#V*bSaWc+IE}66K%1uJJmbe%i5n`Nuf?^fHt4T~HC;W%USvc!0q|t#TFwJrMVA z;Xi}~6f(pRAz_6I3o9-JQbAG0jvhaP3@LIX$&O@Bl6m*<8JTAt-#vTx@tMn#HgDq0 zsdFdKo<4s94Jvdf(V|9=B2B7vDbuD-LB2cr?4nMVQ!9|^^lmHGlbt#}#Yi;*APW{2 z);b8tB143H*!m&BM^LSf3>y+u$}niulQOM(q#AZG;lhTQ?met{G2_OLA49&`cWPqE zU!Qe4S&>Xdtun(hELXv-1#jUP))FLW?OnGOZV|Xuk*?ixg7WJB73}j}r{`SA!w{W!Vu;XhNDFnZHF^sil{O zHQA&fVHQ(g_$~F%0~Nqxq&NL^Eu?WI1w*-l2XljQN#uZh^ZrQHG`nCA5#1o6j z?20?_R+CGVIcqO>g2dv@cH)3q!GyRR#7}sycAME%2O-*$M=gF?FU2?GoU_h1*W7VQ zS?O9-S^G%%qS zNL#XqcWAp(Ctg_afCa_`JzR@EbDj545$oNz-$-qhYq>b3`!?NVlay8j5Ji_(EMhh2 z9y*+OMM>PV4j$Q;f9rkq<|UcPx#*+wly2QN9s7>u1mlfUTCAZ)rEcXMsShWV8B|s| zr$6>6=~{c966(JFUcB*)e#M~8|8(+%1y6VZ#O|%%^twgXYAE}B|9Im~Ea()>-Smiu zsvG&oM@l{V>$A@#8UF|lHb@I8Vpe8wk1sz@Y1QQ~075{$zp7~sYA+Yj2b5|d9&R_lL9Ha&Y9Xl7M@LfFG}O=~U@vrz z%skSi8)7kwTI5xaIHNrK3=L(m;6(kN(2_5H%_p~!QUzStj|uU^Bem$B0il>0e(BFJ zGu$E{`{>6Sbuntb8d?Q#@i-t@00=1I*hQv@liP@9FLa8dKAiNC6tZq1fT+bC9r?#k zf-;mkBT!a2Hj+#V0tmP9f>uycjyg7wh)GKUfaMJ>%;~DGZr;+)uYfEK4aTIovf zFvjzyk``m^@h1im#6tX~k5>RnZQ`)zk#MdV+Z}BTnw8ArHn%i9mS#4^za~{A7SE%gS5Ne&&(t!L|F{KP zTU(J@MzKAsApo?Lb5Px4H@l?ESf?0d6Xn)IxQq1ePMC)kFx?`xci{tD^EV&p-qC>R z#g}&LYv21;z-1R3Nm}gk53?A^`%qyDQ z7DfDPVqT;IFP^{;D>VTLALN!`0xVd7|6nU!0P!c1EvG*#wJ?oq93ZLziIaJ`5>0BB z6R=`5BqdJZE%*SyuPN>pwOpKX|Hu`r8QVC@Qf5wk$fRTXvdAkbkt>JbV*BH@2gKrYHZz;n8pb=(>*{HL00%EVa20)xf#vh;I_; zKMUz00V7CR#aHTE;~IT;R`pHdNgr18s%u{*__+lgsUuL!txm>Wfezc z9gBflp!wL!-ZrW;dl)#+Xd>M0cfT) z+vkmMR|_WA)MWb#|EN9nxF3G;#?Jc8kXe(lDBee07{`;>h{G4LA{ww8`(jG>{ zupj;rV5N`^j+k5jp~*=oAW|$}0Yc!`BueG=g=JC7un@=qY6;#&U=PYi+|kfEIYVe@g5xdVVFE$NR*UW*~SCN#$5!W-X!85 zGGe@FCZggjM%h9PnMX+olNf{_#A1WU9xVdn%0tm52BO7P19sy&))*;Xl`TezkemlP zdc1@h&Wx#46{>P?ZgWGvnx>NK8k^o2|sWmKLCAI#$-eic-b#B@mI zR_2X4l8oFfTPaHwk>y#!CD&Ypi7=r4ZC2F8WnPNY6+zC| zsfk_!CJ=hr{6xn=z68D{-`~7tU;^X9Rn0G=js=!t?o}jKGUhsp9!MY{`7EZ4eBWh) z=17PdW+aa(ie?V}jU0yNYUadewx$uHPiL~GCuRvJY6)zPju%?yY_20;;zSJX=9zR! z|8VjqH>ybejm~iv=W=3+VKV1)ZjsjjC3I4!-{2T=R%dqJO#EqQcOH*!PN#Q@r^L{n zY>wx7o=L`BXH=OddbXbx78GT*r+b>8ZHCeV(q~HY9(EEF2j(YA0%v&s=YU$uLGdSn zBIu0tVS+Mf*5GG@Lg>xdXIP?Tddz2p0^@}$$8o+AMq=n}W=_T&=zpAqZ4g9bP3DI( z=IKO5VoGTJgbOseD0Zmm{Yf2pEl5_Vgx1`KS+GR{rD#!H*W+<#jpiNs^w^hq32|kO zS)XbO@)|lw5+y+~8lZBF`W6BWlHKTxH>EZPl|54`G zmZ>R?49#uOQ$Hxpb(S_3XuxMqZ*ih#c8C%n*x#2i2ld7qy?Db3tD8FX%Y@%uBE7w zD)$YXdZOxopi`OQg_xqKpZ)|9LTar>nXmPPa&pa8(P@;j&Z3g(QG`>j5-Xfps~mw@6d=oW+!2A^GiRxQc73!7HW|DiW=0n7XQt zG*GmTYP=HM_RTAEpwqIthoa^w`Aw+5-d@7~SimkvZIxrbOyc+islra7|7#MM!!C!d zxQ9h->y%I|uv#n)3hU*KVb+9*z0%e}T*Rx2?8qMB`SeWQ3@pq37wd2mk@hRhuAZcs zQr^_o8cj*F9xTtk;mO`$-*9X>er&fstI<+npxVTOzHGaws*he!%X%o(9-**mCX8QBS?8Epi0yLA+6sDA(JrtuUq@)nd)RWoum=#JXPT-9jM4 z3L?hl47xh$vIef;7NFGb5d4^{#GsRVXb)4Lu9V%b?s;Oabfn?y((csa=1eMS?dq;*My{4H-0lKz|L_X$@DlGQwrlYs z?^Pae@-lB$S{CG@Zu2s4fVr9DN^kM*n=yPw>|XEinkYS;t*3Tx^R6m%bQ7$OukMy_ z0BMM`K9K9CFR9H_yQ#{`EUNMFYy3u_dvIDH_)8qs#WLcr5yG$jHrhc%(m&i3WD$%3 zV=u{F51q!A10ydwC6g3UaPnGkYG7&xGq2`GtOql%F7a){ic$fKa8zB@Rc?`Q4r>b2 zSeBr0nYi!_!?13h?hWg%YTA&my|4~vXASQx5I3mT67dluaS|)>5;JiVJMj}kaTH7O z6jN~(r>_lLv03Wg5NC0B8g3WIE7F4T7?W`soADW=aT=@f{~EJ#8@urvQzsU~F=i?c z7tb+Z{-hk=abxz_&+ajXe(@h8Xj2I?mi}-d^P}$^G9t6%wK6h;8u24b@+4DoCI8VP zTksqjD;%@+#LZC$lmQV)85dpn@a`Ot`S& z!zKC5+{zR#;>VC9OP);GY}c*_#qp`2xU%QZphJru?a{C1%$vnICe6CF>({Ug`{m3R zsZxQnW9#0{ySHznrok%h+LSK=zQ200VzrOvkUH`prKQO;|L(wC;N5Z|fG5T%7<07mZXkhQL5>taq2m!Kr-zGAE%_2iY2BP+4kj4`zGvYZFTVf>JQleE7i{ps z2q&!Y!VEX;@WT*CEb+t?S8Vac7-y{U#vFI-@y8&CEb_=Cmu&LMD5tFQ$}G3+^2;#C zEc47X*KG66IOnYM&OG<*^UpvBE%eYt7j5*>NGGlI(o8q)^wUsBE%nq?S8esxSZA&E z)?9b(_17?mnkm?1Hz%A-W0$QqcjFwGlsN)l{PI&(3*WlTN8kMO)k<&u z^vP3y{q|4!t33DM>+~zZ75y9j`Q;M}{5!2TR4n@NXJjlw><_OH`SIuf(EIxL@BjY* z44?o9h^E!OgMbJONhipVzy^Bc8Od{?1my?430m-i7|fssH^{-LJOhFq%;3Ln7eWz2 z?}H^YVG38s!WN>ig&QOx3^n+{3eM1h=DT47bErcLzOaW9tRE2n3Aio;4sn1yJR$&- zsKf!jqZ1%(A_0XcMFhU%9X6~Y^UC26CTek7GRaOk1VKg%)Pf;d{2~~W@}=gC(E@^~ z1s5$~0W}6pHL@GgZpu*$LBNrV0~uoD-ZI?L1vFVjI5J4 zIr5AJ-tL*uOq4Wja;I9RM3|do2^kf_NJtvwlNAVOd)`?84*uBFAXhBN4-aBaGCbpe z>5Qg8YN8nn=|q@}G~+8(;zwc@gpv4kq%!|8Mvkg7mi#G49Rb=tf}j(i2pz~k6WXVx zUQ9e2}e3@5#!|vT2aSRmknq*d7Q@6(qLJZGiCATZx*nj6r=R ze+--5kI-=!oQRbR*x1Ww%Idm1g9Ipf5r>4-o@WY0=nS) z%ughM73pQe`rKa*q{96b@J5V@0T=DJpRzUMgpZuxXMHoHhh6bq(`ZCm%DBd6y7ET0 z3Qtx7S(RCx?SRFa5zPuB0#N!#ADaxv2h`F3qwHa9dm#B*^^S_p{bj8{&zVwrVHsRO zI2MIij)4=@XMmWZzNnDs1?jtUb4fB-}UD)0c(M!?s8m@b@6%q9SP)1Qg10ApD+ zRR^0oS4yTtCt?u@e&{0*`7D-A>&oBR(i$SujerFrFsoZUASPVoA^}7yVtw{ByRQW# zyuIzMW;?qh2!8gAYHEQe#KHox;71@*DOhkOPnbVmG+s5L*;+(^+;e{AgfSqF4JvR(LB>-$GCpZMM=K|0AfRL4W7+_350r?T^eXdOpT=oYC3TpnLRZybwBb-$J%>t(6?H?WMW_!Xm0UpD?o8w_w%Z} zB5aG3&DA$6In^=O^4sG?#gJ$=nTssrMmajn>PqW&#jU%5Zl!7??o{3k1R&vGB70?h~>)bM9VRWb|IF)yzsk9=}-0A3Evx7stOt*H=&n zEGr|h(QqohR&*9ib;J)Azz_X^3rdD`1;s$;CoRK=FY7l>eU&}FRZmS9bN7=^qJ?^@ zCt|E84z4F+oW?FJCUO6O4#~uRbo5;WQBDWdfN=5;rREVlwRz5>Q(`6%5~W&aq;|p8 zUlh@EfDl~~00?=4c!O0AvG5K4a9XbhR`rH?nCDum#t5AN1hEE#rGilQLxW+ZY&Ujj zK$v)D#BL}TT*(A-adaH^MJV?458{QwAJS0`^~WueAMXLNwEpbrd`ek`bkeR4Z5 z7!tm+egl>xKgDqGcOX7C>V=l%=D94R&7hxSWSSl8TWHdSRbc7E$gs3NO zcm_%Ymrod#j3b9a?^9*es3h!hKwan%y%c%oLR;`hM{~qQorDXxplld7juT;GAjnMM z6nm%#2sFoDl0+WLRf} zd)Q`o%5-%>NQ_W&j^Ctgxpro2r<45QJuH}9egGCi#yT@dlqa%42*pE5nO5(3TiD21 z+8Bzgv}UT5ZCzPDB6S>(mTm#&M-LZT5AXmsh(^xCKm1r^%|!t3rj~5!BR^D531NK+ z5m8-Kl6FZbO_zo!hlG7uJ`pJn$4&sJN1=k*cfB_Z2f6RB6NT+r1RB23!n@Z9N zG*%EnWu0;V=7B>goMSan!^Lhug)e4i5mC8`Tf}@DKV>qx=9K6Uw0#A(N(9Xc#ElrIfu zf&Wxu%jQ0Hd6F!;pwT&e{-6&xst@1r4d5^npQv;i;d-YP4i#_?muH?qN+X>hq9qDT z)kRyc8Ayu>r0ye|761t1@Ctjd2Q@)j4f=4R*b)TrXsY3PV;Ur`D}6VN4hjvA-}0zv=zold!8t!QFww~&jO68lw0Y6oGc zv|F1x5$twpmddF`8KRQen>Ti3IWm7Js-@`@OV7EJE+Kpv<*OL;M!{(}{J~ZPAzSiz zVrOM^D9RGuLYBr#K}}jRm(@qf`Zlm>Ex^fnqNZ4B)jihAm@A>6Cxfgkrlx~qETGDH zinyJe*Gba)6P6{OWcd>6S~A(FpIGNO`6v)%&}VF?tM}xCohcMuHmcA_C)o-z@fTnh z)va&CI>OUuyP9qS#z()jN}87ZI^N|IsvAZgYtJ+BW6-?a!`VyQ!YIXIE8S=xY*jCy>1|lI;Pz zLt#Yc^s?;>F8W!xSJ{+{%UKo+H%GO$6`--I%4x{AWQTeZou_Q$#2?2-pa;xUw{t)r z^jHhLEp~};pd@?~wN(n)Icw{>1R;K6I6edUS}pNM1uSALw@p8c66Lp+1cpTwuz;oJ zrson%GX=Yc_(zWPW;?4j-9@Mdp`u{ulyBse&jYw4F~W+5EK!y{Ija$B$vUKVP%m^& zB!t2TrM(VwO#eijz9LfJH={oFfK{9|b>f!TB53jzk3I%xU-iGUh+Pr?_bmD;P7~}9 zA7oAk1x0EMlDmmJ8T-NuGgH_iOn3ZC{?HHpKzu;l$6OO*C)Q;_1zYS#bzD0V;-qc+ zD|E6(PD)G=t5Z&qtC**%Flz?0Ll+1rIK|;~gqKrJeWsSuo2u={%7d39U~q538L{LQu?qv7Cs_ghpr5Q(w#8gM=UmC&iNsWk#E-IO^U7OPCsYEAs&}ir%$B_L zJP`?;wcewz7*l7z5~YeqaNx?sR&zy0**ay~9!~_hSnMq+$a(1h#Uh&oNUuDL;+neN zi_yumd=c?X{p_m8;?ECLEjjrQ{tyc>{7b3R%~FH0gy_;pOo#;rJjCisohM-8C`VEG z$c?4ZE?l<`DSYX}q)quT#lm-?XV3+gwTQ#HR}CM$iNE-hl=24S~H!ho-|9B7rKZ?t%5DsM5TFy zWvYlB($J%0PPb9z0@Y44jrmwmc0DSVTv$ELWM6lN8RkdA+H$lJ01fDXR>+aV0lV+F#RxTghb z5}C6X3iB_WG^>nTH|R_T`S^ku3q{`=-(|bSv%5{IYIh)(h7}FkQkXr1oRP`YQ9%JX zbkp9@^J1qhH86eEx2nR9RbU=n5F~qI+N5?D)loUduNVE0@-}CH`V(*E7t)d3_d>=I zJ~cx+u#~-9<#d!zrs08nwJywWUS@%aY!bD6qVIF!JyA)-7^fJ*;$*Df6`{UOv#jki zmPzcg5D}&om}+~CZLQV2Ks~HAm?xlip3bcirg2qbXNie)Fpc`y3o(sRQ{$xO(h7~r z4)MWC3FdcokgWw&DSi9uXLQdGA08KmY`?Wy%}T6=YOeUlIWiAQ6%-E^Iy!kSoVj zb45&bP~COOwgu@5G2Y`tSli^(wN4VD2Uf;#>RymLFa8meqgj3+b`>y$suNcZbE{WV zJ>3Yx3g}7zz=&60f7(>q7&%_%kJ7Dwi0em zuJc&v9AoHepU{Gap^A7`Ihs6Cz@BFA8v#$Z;|GAyhZXRB{BZ9IQOPoB#7C{~EK^Vd zkIt?G?4n+LzqPP+b?x6#eI2KV84E88B?+M!Z^fPo1IU%`@7aUB?K9WTZD`w#kbJdy zuEkeIvGPOKbH#^dd~gS%E1jtW9nEDrz9KvxuPltOGGagWu88S^k7KI!!9L7U{Q4`0Wov1*9nLCkzY|!)d>)OqqN1xth-U?A!BK=O9pg{TT;i?u?tE}Bxv~Bkv zt0QsvFo^&^IOz^YnT5v2sFt7kY3<$sgkXxiQE+GwpFeX0;|NQAG ziS|?zkUsWc(;q8Ig*Dcep6O)93S|6kLNaCLwb%5Ts){sVm#S)4oHjy|s)NGJPa_Qr z+LB8u4dNpsaVqdbEiMo0C@kcBwMjx_58@+R0tCT;4-A5HQ7MBK1215wy&OQ!F1t*B39Tyf;Y15qOr)> zc4nUbZD=e49`FPI(_MyZiatVZb=p^`hYC%+6!pxvYqQTzh`X|}>y#ly@f#~63?1Et z4=O8V7=v5*xP>pfXv0m^3J69UD4id8fTX(;daB)lzJsxF$zdYxpz&T_dveY7&6?1a zg3<~ox@;y3xiE)x_CY}I_NEm~0Re<>zZW08uHFQpgz(KBB3;+PHyrM%?+j8Y^4$x^ zb)m4yKEC;wn}W||nBC5bt4eLdsUt6QLzq8-BU=qyTLRJN8NI8UtP5IUeQqG+7Ru&)Q8KiO)nw|g^XeY;fr+8n3NLl7`6-5Q*RAQVs>e3NV=}v(>lL+>7C83mnM{E7#%mUGd6!0wM zfc^@g4uyy#$h`=H5P^-OY;zZ-{p3`tsh)B);+kajWp!J+R0XaEj#iwoH#(yoOHOw> z`)O#2qdF0)XeA`eWh87voLRjp@D?D#(T;u!+fKemlLvVOX&RY}Z|WD5xm<5i6v7W{ z7K#A0agI;-vPMq=&U~%V;MpEr+_W-9vIV7l}bShgEZNXZ}ym|WIYIbwTJ+x00AXPUTJE*L7ECrqam|w z>4lO1K~N`S;zr0Rt46UDiX1aTO0KnsAg%;wI?+R`A1S3UYLkfsbs4(g^hYcoUNa z4?_~jQIf{RF-U38DB1OxR~}>|7Wv~xEP0h}$&e7jSNoWcEq);CRtDU+nIoOD?%MLboQj_Rrpglxne}F ziKUPp(Ms8O?G`9CqLoU@DOuhYmqrBvk8gt_SC(`oOoo;1+sH^k<#G?Lcm17&q?z4} zltwrc^kt>shrhEa5kbVAOd6{@S@hz>Gbl4_EsZ;0ra*1i{Bb*gnKt&mplkLYlGV%QOl$i%5=4AlcMdU6A0uCGG-R=hCTxSvD%^ybI0XpL$LjVY zRc>^!JDjVbT(F^A>d&mLW8<_UMAxx<|Bxv%wqF0ZDH3NJt}NlLPI;1Pcj>{&>rk*NxNbH_R!~E5#-Xs zyx|Rl?wensfiNy`LEcjJwL1f!;SfnVOG3*;Snzg8jk4zDUt`?fC#Q^Ea#`1<tL+LAtsnq#Z19GwlE@Cxi<50k3a#&}56o$a^DJ&}(}x5;RgL9eWhB~<@hf@@x* z-0Br7cVXy~)|c_v-?f|hLu^iyz5;N1Hp^1P`rwD4?<+n8ITifOO&C(f$KjMEh$wHu zofp-&Gca=Z)dioPT@I&d&*nE>Y?`>Ri@l>L%Vf!W;b9g&*dw9swMFj#urOl&< zJ|KR_6BHrd&Fe%Z-o(4z50%*PkZCcgWSQ$*FQekugyI;4ZL`P{Ftrc}l2ejBTpE~B z$<`(UB(Vi70O4s^`Di6Q;o<%FKBF5#ruRZzJ>s!D;HZgzcs?=_r+NE7?F+Uw>b{}C zwA-jX+q(;N;RELQ2fLxaqM#4fqls`Rh)Kzk4eS$a6P8utAv&oE2Z9U-VkiG&x~_?| zjk=ZiI5m@Ni2ixE$KV=v%RUcez#zm7`0uKPi3%UFA=I%D`q&Bo`LLmZkTQZ3!a2;0uj`;o zo4^PYsENUVzW6+oN56 zQi4O3YQ#xgAb0Dl8zjTm%LuLU5nEx8V+_S-6s?Fzp@7kgZ+UKFG$7dW_1sKTL!OG2)0kM5#wqkwAu^Pr ze961zu*>e?!r0^-gEI}j^gACElCtu}j+@Q#w2Y8}q)_c3 zqpC=v-Ri7U*`=p}iu$az8dM7GdkNp@vp{e{;*d)Kow7N4rJWd_3GEsO>^`S64Hx+; zMjTG&tk5292u|CUt?QtG@h#u5jb{m9A*HGR7mKIJzy-Akhl>azV8O@Wq|dy=PTE5bpHb87 z05Yj83SY!1E9F6u+nABGi32>Y$DmOHB+4^|RIAtz9=o5Sil@7nDt+JwIjx}{e7*Ka z8zaI;tjbf`8;iJEGZnoR?i`ANDf|rznl2z*UBoS)=GBbK9kk0M-=sviY#ftnx^{%9@+ntKP&6Wi-b->8>cFK`$dm zbnB|5=sT(yjrlmvnYG%VuvDCYpN=qwCoqONoe`exSd{v*{<cDxT5+F+Zu!@Te6W z4J}^G5%};|uzA6$*j8rswX5aZRs{-Nkyu}vqtr_Xf{@pxB-@(^Byw#CF7&Q${1bPi ziYHUS9060!KuxFQCi)1nzV%%62&uS(p7?lzib|W5xvxOIT!g&Hq$8)2MGO>)yv>N7 zCw;6LSzVfdoMh$2R~(Z6uEd2E=d?#h?GxGHo2d}A z1{qU4C#&GH=>J) zli>J#hyYZP;bA|1A%F;g4pu=2bV1^}lZxn(0bvdlSmb()w57-m_Tz?h zG2p?+ z5>5$Gl3fHg<5Es6{=gX?ndOyq7wz$4{Xip!fMXTKsA{GR`8WtQ?uT$7fIawzKMRg! z?cYzP7Y7}ie;|NLWuW9M?M>hM*cS@Xz<%(9R)~dvcmR^Vs71@$#DJGQ9>8C-l09gJ zS11ur*08>7C4W-smF_7u%@3sUphbaH%v7@}O3^SrsURty^c^~x{$t7Lo4eTGK0M<5 z;UPI*S(Ex@;mNk1xCJGcgp-hGZH!g_*&5b!Ypw>I6R}O2$`2yl>JZ~-?_;jDaXtbL zEsf3&jwH>Pk!Ojq61#bo4a>jKs!YSk-mIcQf%$7L0GSd|E)hnEq!dvD)v`zBYQxBg z*_sUh!cr{=d*;^j!JFz2L8GhwIbNo!X(m?R#0F+Bqe-gi3y`6lvKNzT%UqT+v{ulo1Ey?%yRn3@)UR$j%IL zmS+s`gIzdg`7Gg_MC3rj%gdGzBU+9U*&F>{2p`Go8w%x+1P|`>@7(UG3zezg+X@MC zpMiE@7PJl_2^%Gly14wG$bJiys#h_V7rp!FzaWs=c$&NBvbueUUM|Mo%LtQL!j|f* zIijb8)!;6ZW|R&8 zjQh|oB?@HK8Y=dRYnhFfdb_pljIZh(G-UE7&uvFRBR&@n{92n+ZIyWHjNTnX)J;CJgl+!GW*^nl| zAEbGfFXE>!Ds*T>slUjJNk{~eW{mK#UoyuL&-`WFx}nYhbKUJBPha9>rF8NY7TOLJ z(VX?%o;79J57fe+uaGNwau&$G+JUPaCAgMO*nwT%@D|kbqOI4;;X&aF7fSZa=9J^< zTaQozRpr?l`lawnKlQ`7pPc!MfN5!6qA36&QcCjmXFS>ou_}TH?9~2l%Ft~8r$NJS z=Ia?bYqgwCrt83Zmr4S43(z>Ae7==lkM@0k?=ka__XaTX$|VN#sgl34wjS5&V~LO6 zfkya-bEtrak2j~`LLM|ntD>UD;F`Q}=a_g!rO5OSV^CsOTc;p2`!#iwzl^_OHeEte znW~?fi@O9ncp)@F)5PF(tLaI21p;6PXV`(AKlkTB8J$L;jHsUJ3e`scbj|43;0|WV z;IiFit-A(Bsu#t`_#=7B5AXR$fG6~qZ+k2As7T8>Lg9i*FrY**iN@!bum{)7AuPcu z+}3S+)2_8h4-}>yieg7DHx%Fc)iNs={KF_b5ih(1`5GqiNeVh7AGNCgL_6JddkkTy z0B}Hbd2MpchYKI+4AQEFC*aTipnI!S_o|K8rL;~%WvYd2PX7DFa>~EaNd1(C`PUZ< zC0UY56`Zg6q^*dl@YgD=DvUC%wbV9A+~Lx)#EhD7T1E7-7NNgC|b z_25Z_E;()#$wwR?0=LErHAwevP?lvs`Yb2tpCAk6LV77^_vTsuk?-uHI&t6D8%S@vi$0rq9V7OF80}`P$TEC)vp1EqZ}0xS zX+cUQZ*@pJ_QOyI1=U8KTi!NU4eQY)MrqFlC)V_CNUaOcZNke zrk|N=x~X(G;*=&sZ92-)Qx?R@&{NW3$tkNmNn|CfikA79gI|SIPCrvB^^=xlA?Vzy z5Cz(6M+|9{XOlm+x-7FSuKB5tg<7O6M5PYa-+aw#`%`zVF2?G1fr{l>2plo`fRvOCK>HPt&A%2#VRQT1a(*!s8MYE_SXXHibk zjMQ3W0AT?^invzhb{U<0R7e&)VUbYkmh`hwA67t9MH^4MXo-wD2C~?RE8bImU0GZA zv|ZgqV<7!pbu!G0Yd#WHSxX&MCyM;l&M<(z9b`<4@+}%wcQZ^9ydBlL;6Nx2WKox~ zY4qs5oBRH|YBFjSm5sR}CZ8a<=p&_HfvF1ovJ3G^)IU*q6G$zYi?x;oxK!XxgdU-u zyJw~$W@3R4iTQbEOg^oi`gw*#Q^qSL3qAh-(DPdXjpFZzn7`WO4}U=9M)~uW_=bX& zTEs#h{Sd?xUS||^S-=~yh|c&HHMhI0rD(AGj8Wo-koi^QBTHkNyEGQH1F~>)jVqL? zzQ&cS+~-}Pib@MJl_#T6r2@uahdyEf1R#(G7w95b!%|W{<+x;rjXIm&R>qK}#Y-ja zi^%RggB^Q)E{9&!7E@*j75^{=F#Cax4Wm=N9)3|~I(h;bo?wAz+-!qA(US$jktDmU z4mt2@OjtUW6XHPTA!K+AT6U+Ne{^YuZG2?a_;nks{N*Z!vSHm^(k zkS2YFK&i>qL8zo4m}sYqU}2>M?=s5&ivSWW7J&&teq%yMa6n$g!;ngGNHYIXCOSDaw`ZBI za~4Ppn^sf9bcU=yUg$(E?s-T%{)Aw)2t);rC?G_ZXE&01+M=8mOD}ookRH*Sxc;^z z2Z4s4^t`C18W*F3yfY+SLs#9{h|zqtrf`C|43k`0q8xEVr0x+RD^*In4askc2^mNf z$5ON=X_K418!1rDbe^7+%ptS65UMJ4$&vlEg+NfmL4Z&ZXsS=9Izi%lcBeYtEJ<}Z z!ih95MwUVn&Sw+7Ur^KPrzBO(6Nu97r zIoP%gODEA}OZ|BAt=JA{Xb-bZ@_+n|CAPvG@|@ zTOWh3MLQDC&3s8x$f7KG1B@_B{w0}HYb1SRR>g_Xt4|J%Zo9}zTK167l_q(HF__Yn zyg5dF8d5vRO=J1 z4cbibN?XUw0^qOgLM|2X!$It96g`P}G>@T|S9*BC9+sE79{d}h4Xx@`Q}4A$3Nf#G zJ13EWS-FH03m-7x8a|wZ4-sIaz{u7U5R%5e*aS;c`Vokfp>feUnH{U6E$#Z&xLd1Y zSyPlU>wLNTuz2zR!viXffe4&fF+)+2rz2A1qdkKXPhf{P_{Trf%JxXZoi@Jn?JA0V zaA;>X-9a|`zdXOFetE7%Tp|#NKCs0xr;ZG|Nwd%pY3ON;aE5OI(JgwEw|KxoUh(UQ@DWK5XRJdy?UhfjThHmOoiQxX4t-3?D;@th z(h*iy_WXxhNfoIo%ZTzMG4fq_(PIT-!H>C!M36oIw?~tl7bF3hDCsRHssh1|R0bR< z^GZ70zZmh-D2tK&b`vJ&3K2pL%&qzw=S!;o^`L-b4|f0r0r*f&{Tij|V)u#CwES~L zTSO25UqGP0!>B+Y{{rC;=l~jagO5At7q=d#Xm2#5vin*D6;CM!G=covcC#8n_QFk^ zC*r68*Xw<;uP*&Ph`e`4$^p=WEDwXRu6^i}5AmbZ{Ugnj&(vcLoS3A9I5>&WCP^MV zkUj*n%=aVUClaR5_e2~70K`Ge5A&`o_MAFL4uPgS62In(-WgIp{u>{C<&0iFRPlaq z;KXHs2b`Tnh1k%J9gK7g+%JWndQI9_m>(Zhz_{6;_+dwc>Ny-p~Zta9YeGQR=8F1m|UC;%&D+S z>@e08Vvy= z;z#@i1DZyP6g+puk|v8gijan3GTdK_FyH_ATEh0%1wiSD%m{ z)C>fYPy(Vnimi=RD*j=}nMvmq*-c>1ZMYt3%wpJ8UjhA5C^c0$o&~5_;Y~~mMetWq zL{nUO|B6rmRDn4n&MhNXDHr>Q&_TdaHSPrP9Z5DGWSPWbnl%qH!A2RH9y|?+Oz6#i zz|k|D95D4F08-seq!3#GW2o^@ILb&+NC`k;#cgyR$?>C|Tw`{nmb;vw`V11;K_f!m z`Ip`pj8)Bu=I!!uSgSVIf4!hXUdSj@V-1WCWwEqvOP7e(+$x z@yJrrB4P*|Sv(k0AP+z2gFl>1P;8V|!p6yUB}-<7Afbd!KHJWSP?+TecR}SysEb&t z|0QUG#SsmURr=O#{N$q5L`C?U8aa;Nk%T`i4}C}?(=p{m;L@#$p9!K5ngjyTnaWTI z!UaL$VEhl*DIq+erF$?6pFK#tSYl{Kr+}Q%>$##s5XvEj#)4HBShZIyw&O^|rdDQ7 zs-Wh%HJL$FM4nUzk{}d=6$pk@&p-Hs_XGkus8XMyLl6Z5F2smFUYQbRg}M~vX%$6E z*hQZV<8?r%bT;U?Xj=>7SylSv(XkaR@K0p|-^W6dEaYMP7hl$$0#z zn`TNG^`e_>lt-i_lZwVDL11Sp0wgwRCN7}yh-qlZUWlQmdP-xMjA)4-_-J6ywT_^>iNRCmRi;OT3KVXk}UZQ&5 zolX=)d1}R}7{~w(+@yA(&rnKww1pP21&W4+)L8&Ogd@&1kEtAoJH>);8ce0a4%Ccl zXjSG=D5*|up{e3zN{-ihw8=Yd(qa08UW&#x>I6;&U2GiXNR+Gbu?L9&|H>gDh!z=+ zgc)nLEmg7b8KhOw2#VUaPhD~V6t855o5=E(4V8IN=Q;^%Va#X26 zE3^(IK7fHe;KNI#&BKOja`4z7-9@k{ti(R;TV~GzW*^__NNn;$c3$70IjJ#qNXQKw zs+4R+JzbZ{A;hdlU>dDT)EZFARQ6m|QCh$Q5C8#0z=|mA_Y7a#*#iJrfjr~`ST5?Q zhR5Gf+)Q$dY&ormbQsh&Zcj8t8OfVPoF>ay0DnFM5EzfD?q1k=|14zMoiTi4v+l4s$lI0vnTt z!4Afk&WsVsi#fBnTycI129|#5WpVvL(8?HQ&bRzSO60M zz&#u+Gl)PT(8I<2@5HI;2j^7a8byQH;UX__SF{PmI)!O15dTn;VBE$mMdY8ZBQoJU}ei|H2+P5CL43Yp@XzXaE2V?LiQO z2tWcbJ4bksAE{c*n`MbdQbY(B>w+$`9W+D47=tA`QD zkK7el#LFpmM_08-L%4(zL;&u#!Z%>R1nPvp97F>V0|3wiKOaOYjDRKp^{}a}P|H`b zU))Y#F z!kAfaZeG!tEpk)D;stJ^gs%BPD}aNg?Zj;yV+EB00K9`v3(E*}!Y&)G;4a#m8JuDx ztRdZ>W@@tRWb14<%V=BnreIBG?}*e6skAV~F}9wyjP`kiwr2Vmd}{Gkju#}mAur9$ zIB`=72MCHd6d&9I2Ef7|+%-=?pZBCeaQijUJ_IJ1Kq?$}A7+Pg197o1?8=}A5@zd4 zSsl?N?e8Edc28ndYhUR??is?#_w)ng^bdet_aLc@VFZh0WU{3~MXxH^KFwtakCQ@B z+4xXdT-K6cmR=K|9}Y)!yhZgs~oLlHnzEKRY??Yl&6-}F{%kl zjbd}F6=ryLZ}^;2%(uQx%g9GKq^$Fp(MYiPmauKxb|;3uIM@+~6|1>Gv*wwI!!6t% zP|!#%r~v?Q!#h+1B;SNU>IRkrC6ECn{9nbRO-cunor_I5Xut(D!5@@DJjB2p;DZf-fgekH zi3}dG@F=mQbibd-q|?2*shkPVw}(u2nisOsf7kwTJH>A^pNkPw*y!0V-N^`oF(?Eb z=?pmo$-8Tdnq|WQJcB&c|3NH>f(Qh`2ZRAV1U2p`I1<7)hbW>7_9w{4NnV1!1-wN&o<&_ z)9t{ce1giUnur?eH>rlJjjOhx>*%(sGQ`ir4?zr3L=b~|ts>#xY0fOdYT@fJz$6l^ zyvF=OFFW?g=7hA1j0h@F!oniytkcdr@vLmliY#L*G0jecu}bpL z>uW~0a1@L#$#$e{BpfTqOprFyBQj6xLNc+fzy7hHA5M6BFiO&<>@BROuq?19-F^eJ z!V4k74Xcc1#Rw!y151Q}3U+uv5nL|~R@h;QMXSV%j$MljG8I~fao=uo^8jfhbm zb^4Jdff{mDx+9C7E>pP>8;BA$b-K+WRjcH(C{tJM&B@DpGLcM(?p2dik-!A@A947I zpbsT@|9t@1a1l;e;Y<6p6-%@vJMB*w`7-EGLv2K>T5ESqY*HV8v{)d~dOR4>zQA4h zt@=7jxgvpDyv#|BeCq5cp#rPYXr=0@|eGHoI4+*~BpjgFy6Y#xJsNRaxq( zsrE3q1}Ux2WQ-d6>o4=5E$Fdn&Dt0uMm>&9v)Mu(Y_ft#C2Z@qlykXiwDjZXBTe}| z_({HVgSTCI-+j=*o`kkHt$j^G*P~r3cz_QA9Q% z8fP3kW4$4Tj9Y9g?$M*R+w->JMr8+$w7ft6HYB4lQq$iw0gv~n1)j_+raz5P~-$c`p{KX~hHJ6e@gq zu!(IXpWUQJz_T#RMKGh-`p%a_YefonG)h!K9KtdF{e*TqnhqS7h#f>0DLA4s|I1i} zM4R_q(IXMWB7%74puFuVBLuYB6VJj%h1vG@^Ej~1)lj#LDcgs8_K{OHF(SQ5k+f(J5$$;`L>MVDF*34KN4Oal@46;ZD3 zlMZwwSSYC!raZ7IQ^BA_aB@u!CNV!NtiUb!Q2|m~-~k4yrxziz%~$S|pZDPo6Vrki zu?0z5r(0%4uH~(d{SZEn!G%1ZImq!a(^|h2VtS6pwtr0XGaFr}hmMrMkJ9X#U6SB8 z2Zv1t?kqMrk)$|@BAkoNDj{I}*;TL-mR5Ffmk=o-V*011>h+VTMFq`G|Eohsv0f(Kz@BlyXV}uoWMWg^h5vzPnlis*CsyGtVv$Rt!s02GZz&fYeUwlkCTfO#nT%ePsha%#kVstp>K>aa7u6zb zxr|{MBRP~FpEwVt?^5XoiL$|@5Kdj{^3httxEAOl)|KKC8c34^ia6k>J zk<3iDmw!>rWd)&K$MCfu7W1qx-{zx%M8_uQj8Qe=LfR2sO}(!<;E6!#-nT+&C%BPs z%|=RAwIDGqow(;kC1PR_3z)}8JyZG$Oj}`>~Z4+%d+$(l9HQ$f4ThltiQ zsKw!qd;Al@`Uk$UmGKmfcNeGBj4SJW7=B}{5if*yJ8L{}mf@_H=BD-|XRecJdI?p@ z7{ri{QO)f9(GPuuOdr_Rr)Cg#~lT+ak|NEiUbE|o%P6U|1IOTVw>RbwWNPdBJPNY^wTg@AhGow z@y?m{Q7D_=swoph*(C}vQ*{hF1Oz)B9l6)t(Kvp9QdleZc+UCk4J6gt-buT+K~FTw zyvQnTy#+tC^stxu+u@pX(=V)a zip*z;3g_cw!l92^qk~a{;ZMej?DY@PC0ToC=W!=8>?A61=H5(RrJLxBTk`tMfa781NQvj>psDaYUl+X~ndo;Y!6?oLhzW6DoW+^(xm1tdd=mp}?uLEaK)z%L@?9VjXqeXJ*ES3t&{|;>;1R)&y!PSDuvn0<>a-w4bFu?j^ zbiOam{v`8r0c`T>8+0tvX_9daQL+$1eV&0ee!3nlTWaKb{u4f$HcQCK2UfQ&`rFCuo0 z4GYhQ1gYb;N)(617%Znn*i89UtAfg+wLTGD*dzmCMR_g*0@T4Fn+yF!|syDxT%U*pB+X0s`Mo#`w_@$s`MofC(yq8B8V$|0{aN z!la~eA?eH-F>#O>MJ2S(Sy&B!^v-O?5ikJb!e&rdp2Y&ZK_A%h(FT#)9A{=aui3z8?S5x%_{RBCMBAz&A=k1hqDJ+M zg^5s4$bzyitAm*mDBBEW1sSCr|L6!Y{y{G&GPJ(YBK1%53@Ib0a$%}cANT<)JCF)z zl4b~KGL7bwT+%FsjLg&s+S1ZkfNmFE;Y_+R)X<_C-@^eT(Jon&m+GQK_Abw=XR7;Z8(!YA{|9vNseF2pAygs%GWOA7Pl zuq`;=z!^jXH9ONRKn*UdurIlSQ`0N`Ur3^1(Ab_)K>JZ$n zvyJAFES#=C6Y>@{Q!EjXI+Msm&Jy$7Bni)e6Sx!GfMq95!u5ht)XMWh_47Ps#ExQ( z-ulQ|2I4R9tR{-+Hah}s|LzkbZt#`FQNYNrf?!Cd_)(OKYb=GVEc1ybXpAu!E+U)( zR6?(`hNk@DZ!IAdLy@%NG?Y95qo)v~u|9MnJf6a?_Vb|1vBwy$G%J*o zN;5MHlq2&_|I$-cwaPFkqah+@TPB52UGX7gZPh4B(QpZn?9_aykOQ?$N1;nMeyeCWvj6i6f^kdiXFy2Enb z3{lmvFa|Tr=20sTf`A?ZSOadgxGr2`r4J#?ke-@>gzNm zala()e9}9`4yX1NawE=P^G8H`L{;hNNQ#LcZcR}(Hw9%jFl&NS41!aFWOOmZFHDdf zPM4Q5a`;eGGq6%16xD^U=`#^>diaJ+iZGuVeimLN0j(TXmfi{ zhcc*)pNKd!R016=fFD!>b+>DFuH|Trj5oKKf<0#*)t549adZ$ARx(7UqG)=$rn!?yt7^j4DZ0;I_4$*n&fEP4m6x%9DCc}t1 z#cw;X$t3e=h^kSSTS#&brjR$eS~V*^Fv~Fo)tJw-T>d0&>q0PBw15O-Qo1G)N1h+=V~jFIJD(h}A1sX@Z1kt{Yxtp+;D!nme$P1a+A`p4Kpxqs3U zWx@oc^|^Bf_#*sSo2#_~M##D>r^(3i!b~tR{sfS+m#G?AOCQ-USHc4P0Ts?Q=+VoYZvCkCVp5JY&fapg4bz#_hVl@2!wYx~BwaaTykMV?c6qFi1pA>Mc4#F8EuPdSZMN|| zTO%FM$i(}B6BFKy^R$6tuY)CuYI-?0tuA0YwuOl-N+OJiRg6(vEGqSW*VUEl*d`=! zh6k%rF^|6$oWuN8MIAg!A)NgZRmtY*Ta`>7Dh!bQl7|eb-4YWi&rQ3b`ZS_yP!)k8 zhNv;V#>E@UVV8U9_~I|n;nti9KJVi(@-tXq6rg~odFHC-?)RMqw1aNEm5pY;4qabn z+c<{TOzyC>Og74=hM%c?nJxFQvHX1)dmWu6u`8e-hIu_TaT77i8;;DNs+2IEnuQL7 zL|r#h;^!K>xV-zcL!0WvPlq2^y@>p{&xL8p^W=Zy56Jt+i=gAA|9RYGjU0iXTzqU% zw{yZ?(|Gix0wL2Boi-KID|kt7kIKR}z3n{06Lk~QhAx_{LuoVB1rahK?ap#GB1nF^RIhM8jonnTgpSq*o9>Wl}?`1Dr0vXzvKCM#0mq+IYf;7ug8<) z#dw;!=q*EjSf-p~T0>i9=ciF#pj$&eSl-q${Qd+)v~TZ_{{vGox+a$(vyGgoiOHbv z`JyjX-60UTJ;gi49m1=G2&ky3ZMHCB<}sYt1sS%`CZ@MhLXmoCRfLAUPP`L^O3I01+kYFtfwFZ_eCy3!f zK@n?Ju)y%(zl0n+di)47q{xvZKW418u;V{LwKQ4~|MV{4@aPGsI0eF`<|$E0K`yhBJbDZx$&pZ$AR)oBH;I4e}`^sj0Ktz*?f z^@>%hNU==`R(;lyEMBWtnQkQr*5JmXY&~{*_iV4+#f%#}ehfLXcy;IpM<$Rx%No+MMHz(W>xp4RHdz+ScDJl9*7`l!1-65W(76p|5{Dx-87m`IYop~O9-vT4@3<4<6$lS z_%oqR6~I)PP`kZk-9{EDmtc$@Aw-5EK&eC#iYKCI9Zpz{w&PAMN*Cc~!9ADMcJ|3g zAYylw2NHlBp_SiYRR%`oU4Cun7E}yI*4=jog(TK}9;pRaQHaUMQJPG?31^%#R`yX` z6A=ZSgbBS!8=XF;SQ=>8H}{nk@UCqgMFf!ZA=5=y9@ zSW9PrQh3miK0zmxp4g3+*gIur3LaHx(MD!~9Jv}6V1WJO7p`}iwHBLCbuyr2iS5PJ zejw3GRY&AuI#ril(F$v|)?QoEmq$gq|Jj`%u_T&*6_&=2KDbbn(Lw#_(+_MGMJn5D zp!w#Xhp1gQYETm4MlH6&xf&8n5eZyTpFmQ?*>prYs%U{4g{hRk*n#zAzaG6KEMR91 zwlT$g1?Vw%(h{aEWKHc=mBq?x+!2>G(rCG6HZ0_H0_|gmtA_3y(c>OoNhlES#xAyE!mNEl{yO1kNxoDk4E&;8&gdX zf_D>D-o4s&V@?wE@?5qiM3z`v{-YIIvW7ffL3`tOmBmqgW!E!wIkhrKkV|_vT1H99 zH{Eq_&bg)*dstRb-i}1k&j$r<|Dm8;|3e~)?!u;OiGM=$Xl59pR?~Blz9_Kboj-+f z8l~_33CniLEQ!aJ7j_VS(3+mZ?#@`v$_ZPSs^P1cD6UGSx~Y2 zN!#UFfC=jrQ?vS>5XOh|Rek>YJEm=jV-5_xMg4?Ii4S!f1wyPXW=Isx2YWXhJOgAnZ? z7fz{#EBBkB>=;uO?*I;O_*)ARjVL*3;pIEUB8m2ZML!oxaf)&AUqOyD5;|Grf`K|9 zP+(MnryUAu9mHN}u7i*e|9PuIpb`WSjG>X9>?n#;j7q3P7##~#CLDpV6l!v!sJE?( zHcW9`9p!?t^qr4y_v7AHp5!sFNa==mL0Lk4XqGUQ>~iGU53i2MpGz8MN*mb=9qSjM z!))@9tkho|MfD&Z2}p=^s@l+g`pUrZ&^8&WukyMJhS6lqC+u z459*AQHx$QR2r;^ZeBDYlq}RD#&|}E@Q9I1X;mscDb=Nbf(y{-B%4wRjl4DzvAqJ% zoz28sBhwT(?VuB}Px|S1#A#If5EVf4drne2Lb0X{(JW%gCz+Z>&iXZ$Wn%%SVqYsK zuYL+CY4WZE@@r}BdY!!l(c5KRv~+5Gi8TR*5Fm8Ie>XHvkID$!kKEBk_qvSO zVA&YYdNz}$|BY^&GG>&*-O5TfWvGu0xAo0~)d~t#@Wz}U& zA8}t>1arLs*;IR~^sa#uG~KT5Bwq}E9%&rfqK$Bfq7{U0cB;k@40=jn`0|qoL!}5W z0D+kU@d6nraS-FuR;xBm$nlZ`!XT-Te?L+VknDoV1K-rY!-`pI|90Mh+OM+WT+5m^ zi#Z%4xjXc2CtRpCTJPw}Ec@v$BgaBW`nq-`Y;xRcbqr>xWF%mE1go7+6)BCHHBs-v zQ0xTgrydzsGh-W-W&8>R+~5R*0%7t>K4oDl`ZHiVVJ1$l`{u%mYhXZXWSGrUvBi!o zhM5fL|9PL4Ng`v`D{IR1lKTeV?RZ+1jEeGJ=u%~)gzS;6Wy+RCvbVfFde)7hS6407 z;EC)xB0loXE^(Hd+X*5ZS-j}gfP$`?u+`3CL(Ly|cOzG$3wH!HY>S6S6qXnlfq^cH zxGIIfuMo>Aj;1we17z563Mi(jq_33PGoH%I&8Js^uf6H{l|&+0J)&03L7Z$5y_`v= z7u(@j3c|vwCNV&w{j#|qz9%2eCL%twjIK$OuE8)wf&P1IqQ7eGM&)&M2)nV1EdYc( z8{C? z|8^|-V)iigCQF!ngfdRyMBoTdpNZf(rI>j~E6wB*!fh^eh*#>)41Ec0?u7A-Vr3!z zK*wu59_a1b&QYRTyUAzr5@V3T3l3!%QE0NAc2po7BR}LtjR==~=dDtu-t4jj;Z#gH3x8!V}*4{uy##U6Y- z#%JPiijxg8-!6^VQ2wdmmqb>?YU~qLyPl{bB;H5gtm-u~RTra7t|Bs+M5|R8lqP={$Pqn(BK;yqUTvcd3C@ zI5R>K(q^in5kZqz-6k>S*C@xMSD6AF8yLckNbb=^&I}QR7>(^mbL=h3j5i;?IfLH-R!w-PgIkl7`9(asC zL5*+Hemj#9D&-KyVS__Ri$;iv<>n@#^@*1jCQvw4Q#gvIq-C#%Sz@7une-}AMmMA7 ziq0`~twl)+2QypoYPXnn$YvQKVmx1jDZl6-TO?e6H$fHVV#jD>kkT2wctsTiayP+S zBJ>lRV~xZV6rdw)GQlKZg?R!25kSBWd^e7j*evEK5}w#5{&9LpK~(kk90V7IrdWEY zsDR#M6Zut?xc7jQbx!@LH#k<12KjKGp?n*Wi@umb77z&D;6P*6|B=Qwk;XJU2$L_% za~og6EecVG3G+MQ@=6NBYvXq#6Er6^=6++=XI@}}Pze%BXcWz27&uvvmEn6tbu2gt zfbq4HR5vO>nUA%nEK#|V=xl=R$$2y zEU`gpRC0JiE2G01>1HTyCr2bTGhCz@jZzTRPt_M`@WjL;n{iDG2AUWW2-Cr9wrC)LGAR$X|24a_BW?zqag;8;;SK-k z1)ZRC=UJ2EVTO7$L!swb&iQ09HJGi3WJ^&dl|g{B_n7sRSpD^#bCE+;;z&=ya9Adw z6zWWF0}=?tp0N25JtL7{DW9piQ9i>~XXX{XLy?Z*5En91BeY7lqpjG(8zHH# zi&HWr1s#Q2jH)>kSM!^~A)C}OY>amu*yXS=Q*Ib4E$rxJU-F?uHJ$hNj!rnQwUvrR zdz1^fEKK{3_GK9(hN-u&_4BhJ8ls9> zTlZCZi(+;81ex_{f$ADAVDt=6KnX0dKw!GLpw(dEw6lOQZ_imWQ>7%C zm07LglL33Hi(-nBYf0W#Lj{IkSOz@Yfj2XHyq$9+HVa6h)0O4HwUdD%$X9&*f_GGj zJs$D^1aJ%gunPnb0mh@ga`AYz8?n8J{~^Yau?a)KJ{K81xPvt`i6khy<@!1Q``c@jXFQg$w%^nJK}Y)4OWOQO1U($wsEb40v6V%ZmXX|pRFxLCMa_a|$z z+OyaiOxP)>E81k-Wl>h=M68H#PD^D@A)fMTE5mBV`*XJ*Q7AJ9!-hgIGc0-I*M~@2 zVf_;k5#YKwQ5t+e01u$YvXc|o_d~#zeK|sB+}5@|ksMDf82o}j_>vTgkjUj*j>;)4 zPno#-%CF{!m{j%b-!WZ5T@u0?8iRpA4~#L+m4ig)mnNd$v5nk+oVG zL&04kt4cw=ILM|%i-i>nl-Zv4@q6|1fe@OB$xsvU|hK*nG1K#icP6zTw0^O|RyJxjVE&P$bcK{~)+Zhek;f^%QjCWk>*ncA<`w@G5v+?`Bg)V0_dZA{lx z5Cl*Gw_wN^5CK@}4gJ6eBK{C?Rcy48h7+VAp)#bR8ajUqLCx0Q?D^idC8{k^DTrdI zKtKr)E*Elh+(Vp><8K$!MR4%$H5^C87dK!n;n@{7Kza6XR>_0L4F==_a5$;NW#Xt z?6wkVEln)%#jov5v?7Yz|CVWzenh?c6y!|A2v_8nB<0|~@CVXR?%g$aG{@^+<2aHb zj}3T&Ak}G$ngE{}k%vV~6Am^?iCkth3{8JX-Se{N$MwMAC)mN8Wz znpx}=|JSI@@ryarS#LO8-?R-E7p`Vgtk<(lPc!|l=l(*K6n?;rUh4l~-35`%Hvgka zg|!<=u|T5_Sxpdd<*}uiI(m!W8Q<@Qr)PL+^k!d^-UG)wHeOQ!*NU5%&i`smry zb@PEYt`h{_;$pY!s9t|~@5 z8~9ZMZ&);5@G%(Q(g6g7H~NSqKV>gN2RFWSWN+_mplvw#00S|mddqsECL3ugTHu2ThzojN^}_>U#Z zmm&qen@OhAyLTWfRIH%Wp-GDsa{826)aX&9NtG^T+7zOo3WCZFBuTVjEejRO=`06T zs~`(fA66ZRRcl!bwG5IRST1XYT7u+81*`L4SFIqs&J8x-pup&l@?`)nNYW4DK*s*2LPCV5w*@R}d z8Wa?_FGSpMv-%b86?0^PZqH)1Fg&fmyHIQCG%GOdbk{Q7_I}>=Nle<|#g8ZW|5zyU zhCnF`yvbH^)siPUf(QOjJ{iwNI~IHfIdcC_(0d4|l`JFgKm-$15V64ALrA&Z4g!og zhseqZLbQydO2Dkt5(q30eTxb%pTHUjL<_dc?lX|&N)fKZ7L;*DrTXhgz=j?>i9Pt3 zf-kqHM2{noO>#rvqX_RtGD#^2L#jo;uaIltsRL(=;-a4*CxX=Q# zg0M&=$gQ`!imMjof}4mfHoX8s!z%OC6CoY3JjgsFJNwa3g$4zZqa}wN>rl~TB6Kq- zl?*UShE%f&r$_hHbW=_p5+pmkA~a|(3Kxa%OOTi=)xi;`69}s_|4~cK|5vjzZYo&g zlCw*_nxmvsVDo(DNhTlCk-ydq#Zg(3aC!{Un?%aUvrCK0bl71vdS}>3C9;U7Niz+0 zTym*=uAops<#Ns*(Nr_cRRvVb)#Ank%df6}%kW-ZSuLv6asx)tNq-lzQJ|m(Vkuc> z2gQjcL{o}3q-O7WReq9iz}!^MR#3xag&ZDHqV3-y?Ikj z6FNFy)XgWrq{>RLnLRE@wTw$Pnl=Du9x&m9I0_V}WTD0h;fPb}%+YD@!_O$4ibiPL z)`SXKWQ72{vEZZAR(r8FQ&u-diXD8}w&r9GuIA*L+Ymjuz>*5(|FnRPv$t|o42kHq z6Tb|okH|L2Mw=)KvTDjJS7~ajOL8p8izm{og3c2nSmE{@Kgd$hlT>_l){{z8TOj_jf|TJtPTMsQ5Z7eE`a4Tbwap zWhgfjq4|hW)jEnx5azX?2x=$$>51opBB%rgP=f+fj)d6PK31J+D_{wWU2^xc69rCA z`Pv;?UZk|%^yXD&q16U$h!`7{rZERgSOJ@owau)=DUF(l|C1`15|I#%H9ovy6Uik* zg20Mv?Rr&PG$Ome%`b2Ni;4N>D<3>NSQIJyVU~vG(q92vzRW>t@1*nLj4|&liETmAr^oN{ORlpnk z08?{bI7m=>i+~BtmT8zcMMh05DGuC89wkzdoWRm2XhTTSSaQlx=2AgYnb7Po2_Fv; z2QXo3P|mLMvoCgtXTNa|N&JQmeZ*oQ&AFE@t;vvKl}~6^s|+rAwLm-G5n--;<?ZVa;l5qvpO;WL=lcru=f z9pv&5dpe>@G^-Aymecs!*ZU~7C@(RhRrzz0|62xR+xo5uZz=R7GC@KXfAr&wfyMDfZ+`{J<+MPc1Q2}uffL#qidVRyxA)OkuIMvxs z8E2JUL$S$LeKB&~Sa$cFe|;hBq=?hBNgGD39fU%Ir7Rt|EcE;cYMHh>=3B=W!KkGl z{CI`nA3$p3%BtbVlo_JkvUQjWA`tvoslK|H(;4s0<0jgv?YM(`*9VX-h)M_05OM4Q0}t>60ZLw|DfnmnHBDfZhrnjk7g^N55XkOw?F#Hh3&x{QptgWXssorAy& zmWqn|Iu*J&k;4NEAn<|@TtU1G63WOoE>nmdsff~93=HH5(Tf{EsDKFQ0RoB$)X_d? z>!$OX!av!XHe*88aXGv%k*>jJCKufzrP+`MtP+x85^CtD6dqn=bZ| zioGDAv_L^MT%+ab2<(eMf&XJD*V>6R4>|En5N>!W89#?gGGjO3kul<0^r4k z@EJAAIfcM3b}Wle@tc1@p@T5Q-msl{9LE3>GpiY(?|R0~`>yXXs(!;nE6lr{`VKZ3 z!~{AgXG0X1(2=WA2o$VDe=M1yy9%!;2N&c6^AZ`2sG(lmI%Dj^xmOdCCS%;l z^pQ69aIyd?NGF3aME?Q}ySm6^^Mf}FgzvZz3y6R?)1&O02@{MAsM#TCl*vx{w1Vi3 zUBCc7U^wmIzi8aQJIpRmfe}Eo4sVEsl_a5M&(=DxVq&%x4{_@J&w5DvD%ZA7|=E1+rm>dYYj9YksKrk{HtN_2uPUZqKzkHlX z5y2jkO;tQjwg2G*H8aPgXgq}Ysy}>8na~wa8j)T^GV+u#=ZTv3Jt~V%>3o0) zAj|7qE_8^6SZD=*h>Ti@fWAz_@>|bS;tAla&pZjd!~6^SybJkk%(G+&K?nzLkPEC3 zE4ZRig|Za?Oo*~$56du=OPt2>C^`ns&I<5{TET!@V2cH9P#>DjTCzaVi587CQ5#Yt z_q4^w`76Uyh;Yz{$txV$AySgsqlh?`{jj&p=*&yW!lfY1Kp;yDNKJjPQMABO9c9p? z>=H>#QG)?eDGepBD5FE#j#^UEF;@$qzO;b z2VIMRJxEY_S=2S#A5MI=hpHf?n9U&_RpeolKsY3DV+{(*4VTyfy9q- zT?%Eh2nzrNYyF2@E6N-Vp$Fgt9Sgvy;6&Pl%cqpkc14h0MGRaxqps{wS~A6irLO^E zvXB6(d2JxgSvKbIgMR(02l&{je1JXZhXnl)f~`bxMGA0@#EGTGr$h>tJkkmg8AyED z#Q#E3yF=ED#hyUTs4fx&^K>Q)2w7GU45LL3|4Tzz`Y@brA`zue7Wo=3eOh|s3=uQ5 zo~4)~xfs70+TkhMS|y3u^9MmlT96Q}Hf!0Xcvq{nA)0+#f22u~u#tuRtzVGPLIJti0rAQGwclVNn+JyA4e~YHDhaEZV5-1g%6*9c{n!95+ha0d9ovAv22D7q5fx@v2vn~cUuJ&2BhVe&Ad z8DU~2PAmoXCY8Y8KbK|7 zU|WWAV>X`B_Sz=MSl^hfSJBL33)o@=go^tCW40J$VAQm>a}6piRbQH8lK%-R=oKpY zWn`Z^;+n9&4YSzU`PtTWGe6MLH1QX8jDRq{$6bIT^w^PSGoXu*&~Xi_8kXc_!;)ql znoWk~4Klq7`xaD=+-cz)r=Zq+)d;NELQsA~6*=1vxumw6z^jpC_m$v7yHi`^Rsa=@XE-J0Ch5sSLN9ko}T;0s& z+sqB;`@PZ0})~PDq2{j^%M!BNVh@_A~b# z(;Bd@xmo~(kBvI+E$eoRfKZE~T3G7}P=u;gIqrGD^EenCYN!(YRK8y1&t|`=foRCF z=y}%ZC85T%1<=F>>0rL1$W+XNPy{c4R&`D4r6{!Xz~hwWX50yF;Lg<8^_X+wC_^D^ zm_1tO-MhRoYw8-Jg0tcDfV(B7aS;n} zD0ksLLb3jAFPL@jSg1~Y@CI*qfOC-5tpi3K1q3f1(fCFT3oo@t!DXSqpec`Y%+y)g zte*15Ys>%yGXgn%2*BpZOMXaDS1WNqMvh?`4>va$zW=i7DaUg;_bHR8-olceUt0`K zjMgs=bb-*Z?9_)Vx>Z8=t^0$GL}B z*c2&om-?^SKs|_7_1qKoRjk-vb&~XZ<@GGO(u{>%-}HVc2QQDaqTKbWkfAjHb>yI` zf+couZ`_U{C9Gv-#Mr{Dh=6mTB3t(&kp3A_+{aN@45kK&(;&=mzxRpGaQ7`mEME$s zEHbeTj=<%ya0qn1v;azw$jPLQ`$l7BS9Nm7cg%bB1R8DLUW%EbGqNP+D8UME7*nF; zL@)4<@z`q^m-r_?!;?n&O}luC8NXm}#~=O_&i}=VJLvO{ug*8`geNFkK}ePv!DE(B zc_trvo#Smk!W7f%SrGB$SlCOe0E<9y22QYv1$_XBg!O$ldafr=#tTdYNsG z^A!#THwavi!FKqFtIztbkNeNg?mAZqO7NAKPl!APUw;@;#td9qb8hs06<7n@XYC(oWfEkgV$bSTlHMvo#* zs&pySrZt}}tne(TON`|z^s@luNy1K@$UK|su!6>aJTbByd&@$s3jD^B6tohhg>rML zI?bzhFW~7ab$rv zZ*3G{WU6t*A7%+I=$=!!b)uY&81jf1RL}Gnq>w{is9tUjIT#UI74Z`bE^X<4$t8@nNQ@6n*23RSq5c&#taT<*Bg45?idXe682ms4xklƼ z^2Dt=brO-B708+#0ST5xk*xehR?;mM$hGXT=c1dgy6eJ*+hUEnw54W&#OH-nugWAR zp%RG`AZFrhn<#^2$|oOLe8hJZy9+biu)_}%HC%8f;;ZDBzSe?^LX=JW6noWD)Gv3p z@NpMjWzHwCRj^eIvCA*R9CKk`0lA}G?fF_kEqLbGCPn*BWzjAEK18y~qK$jmN|d%1 zoQ~$o9JSO_Q>~McjsKClq=yj=_l$7V!c;4FeB5%hME?i^$Swb9sc>rxQ8i)9RnuL! z-7?1sFG;^`DG^klLLJk-FNyu)KLnhX+EB$1IS~az4q8Ezr2KAOLr96pazkLNL?4Qm1_u=LP}!fImt7!y7{A~KN} zsv#MP!?6lvkdT}jqa>O4$Vw_QlbU2rSmfrz3E}8=3xgvj2{^b-qB51@k(||%)i4WA z%?AW6{h#66=XC4;jvP!qX&R?9lfK;~pPpdzQ6 z))Q8(pu0W(;SjVxy!Hd&ZBL>w(~>}BVK)||Aiv#Q0O(+0Q^J`g~SL20d3J*C;& zt~R&W`)N#gMF2lUAhs>xCd>w_+vB2eoW1?W(01Xg2Ow;*hD0uPt9uyqJpePcz`)u_ zXx;CEmni@<(?9&-4~GZ>1ES4pX~XMY@ZN#|4+xMqKw{p103x=P>f(F*>tCJVuNGnh zNk~XTve`cJzY1P3OvcrxR1x3~f#64z7`HZ{9F@TuMlQ*A(X3Q~L%#A67<1W_;SHl$ zwPe%ThnVZY(4Y@t6$#7xQfy=J>i04cDI#|_GCz*Rw7p0<@r{eDLl2Ux7AeUu0smy| z65>G8ovv|6M>lKaD`SsjWRXz?)c9hT)R4DUPF`w-jO8;QZ^i$(iU-yB5?{7c$})tWV(cwOsX zJH;$9#W3x7E3>A`8Q99MjgdHbwowO-CdQ_8kZz0HWn+7qpt6Wp5}Xri_Xye99ye_O z{j)Ph@+LK=o3F|3?%NP_nc%~jx`TY}VGXw3@7}jOb4SRHN{zVs<~PA>691>10DRyW zI(Wewz8e1S1l$gvIP6M|6H4Bj;u`-Vz&Gx3S|VHFA0PSlBu?^^XOFffM>)%37Zp^S zaOEuz^@&&+!Pd!q=BYmI|6ZMOoi{w^53@MXi!PA@8a?Sb51%c>3o(@(O=eLgK8RJ?dV;V`X$KsC#3ZE9Tn`O*#hI`-ru3KLQME^A@HFwdxP_pjG zu!yDhDTcXCgmcQCqAMj9!wNs>b@&%0TPNan2%saWcI6uu;yy{|_jyE`h{$Nomsfve zh`$rd3VU$72R}x$?h{Aan_XLs52b!%7LT~zy{*$k_+0tXA7_;ks3hOQP|EN9Uq&QZ z03OBm&EEln)HAJ91=fc2>5ROPi_Vw@;$>;>>U?0ZeALf=U>EObIAq=ixjf@~8{@OFyp?I{R z462O4NTDUtT0-$eCWZ$d&Wb;rijwh1W`N?d85>UA7Q_^y9yZ6R#7AJ1|?x?5&h$s)oA9ugjRNTV}CR@zaE3pLF7DI=wP z8a6iCE8avejt6PThY(()I6_tu8l3mVjFQC?JL()c(&IgbN1H63OROV4I^!|cVL$p~ zEB=%*u3#<>WJ2m%yx{~LD&#{#WJF5jL{el$Vp>oLr2j<@WHFHzLTcnk-iA^YWJZD{ zHkQL>$jv^MXpW`v1Cjx#n_QgbbO*r;v`QfBcqH31o~maBpgZRWJaxoN3cXc zWJhn9V|fInM-HWG$%R~4KpbP zrA(|$Xz+)UEah4Q@CjTR|`C1H0Asg5S8Q=MA)IO&u2Qy_{4aTpSiT+5VF zX*>zsmD*JW)YDCNDVjB@WBiERi0QK-X$zTBR|%?Tn?guH001HR1O*BJ6#y(W00#gc z11JLk2>$>p2pmYTpuvL(6DnNDu%W|;5F<*QNU@?oapL&!At278$B!UGiX2I@q{)*g zQ>t9avZc$HFk{M`SyJ4sjT$j(q{*|V&!0ep3LQ$csL`WHlN!89fX}Ut1UEueO0}xh zt5~yY-O9DAR&g8?-sI!8tl6_@)2dy|wk=7px5VC@>b9=kyLj{J-OCpu0RnO}_8m;P zu;Igq6R&I<_n=@tbQMdUOu4e<%VnJcc1%_<=Fgx*iylonBxI)pb#B}YxwPxouw%j=jjkPPF_B^|{@87_KAHDUrDaL|e1-2H>yt(t|&?^e&U9dIv=-9Js-#)wba^z$y zg#YDEzP$PKho^@(*!m{=_weJ(&)o4lLC@po>)+46|4j7=wYQ#s1QuxEffIQb*+t>y zhv0({Mpz(#2MH+Qg&1ato`MGbgWrN0hA85QvH|$di75VKA&D%u=ptx<{ZoNBDOPae zhcD)+O(kN9T=A=_%--cm8Q-iZa$I zC!vfsIwhLMp(x~>;yG&Rr8^?3k(4)TM1Yi-hAL`?f5J86ZN|}<=%}o=iXfdfuK$*k zs&lUD>aDoy_os)E!3t}xzy@2MueR1|tgy%?8{Mu45v%O8(EjFZLDNQS?X{#a+hL-= zV(aaSmaefHdX$yMZaZ6mjTM*XxA*Qipyqz0*l2WOPFP_a`ZC97ykwLB=!hfUiDDPqA##JL#`4KS(RjZ~sp!yMO=wF6DnQ z{{TFR!Q>Ud0;Xhu95bK-NrFKAE%1SFLg1th@(%Pt@PZ7v*8~m1z4&49gB^>RL81r3 z5{58=CQRYOh~+VepiqV28<_(m@;l|jQi`B5z)xTHoEbR zaE#;6)+k5y{puO#tD_xPMMpjQu!VjMq#(_vJsl3Rc)vR1A$_ODBQElGKYS$R${~na zNV0g8w16N4VS#d7(sr2?L^(WJi%n(_ke?jXQ=k({PZFdSrL<&1s+h_=5$7+?p`<7g zBFb`fa+Z;-Wt--Lo7cE9A+`7=FG(o~VdBbEQF2dc%27&9T0kMCY$igkDUe+{(^ZT? zmEjIE%1vsr7M}FwF%1GsZyJP~6`&-{P}z_^e$z#LlFI82WB<-x79^Vq=_Nb|GRlEg zvSaX@i1y4!21nkLM5QCkJ_}RIb^f!W%d7w_1yW8;y3?01GhamT$k04K^hKr19rk#V zOl`&_nHGH|I$c^2cv_&E?i^?-5wg94h%_O1?1UlhdD0Nk4wq`8i7z*K(3mh4nU-`X zK{}~RWu}Ox6_Bbze#(#vGSrYoHL9n?v(lL8l9aJ@YBD*hOG{GHtymR^M?Klhb;;iwHoro3bQ`FC~T84gV4}vIluaCmc$Y4tCbF%Hh&r zMUvXnvQw7oL`f`1SpiK(AOe2ygQqYMfm{6Jpde+ZY?WzS!^W1U4XtNiW1?H${uVYe z;z$iiLd~o?Hm=^iZA?RA%G+R|DSW6dMl5LoKm6mEnEeNLcY3UM&UYZ3Wr%OjOCk*p zw<3+bX@tMaQBxWgz94a|ZN#yQ*EZLh5wMXq^GXmI31U^=gq1}JoZ0R&m{dYC7)sje zQJ6Y5wwX-pg-xQ#T@awdoD>Lwt7!om0eGEs`lvSlhglcm@Wn7znQ}$xSZY>zsiI7# zB~Mga7K@~l5x@W-M(N7z64}42BJrduE7=$)g#X0)I?+PITVeu2Swky(a6~xS$t>U4 zw2tj0q9R+{3^z9d;XyG@BVd6Dbc?&NI_QaJt1p^LI?n$npZOe#={u9EekS2$Hko=_ zU;eTdzDllSDeRGJauP6Bb}Xa)K-~v$a>XuXRyk0#(QegN(49{33Tf$S|I~=s6Vb9p zPuo&cmhvCUwvlPqS!j!N5)c-^zyltDf%`TEy5fky19oA7UBK3~7UT7a2LkNvuFA_4 zv2(E@^yHAl(vs*11V8wJiz@Tlt*1@YmgO4~O=9~2pTfwPgE=FXjV zi^5HI^1MT|-jBTZ31k?CzV#7{tfG3=LjNA{x;e^SiwG3a@eJjATgwMQL;$Z0w-`aC z-Ob9{>X+q=t7tcFDRTmW42*CrF3@petTxq>v3<5-aXS%GV!#3faBU?g`)sV2!`5xi z>cnk@Sb5SZlOC<{wj45a2I`sUzk+3=RDDh=3z^j#mr2_%&z)T=8=W_$la_;@+(|dJ%eaRKB>~ag7@4@j|A+u! z9>8c?!y5=)FXbs4yPe2{G^vVsB@WQMnLSw>;MvVvkKPx%&A)ud44b%PMHU?(9$=WqDeN zA;NYNZ7UFsxo!6#XqWJ)G>V5dY$PGJELFlg!I53%qH_qT(dhkLQtT+7A} zfe;R{P*=#-RpOV5k76TQW>>B#Xek#}d{$+X^@uZgUAs_o58!iI#Yrau2n6sCbogHv zsB^ODUNN|9*9Q_@1&#cnTMAeb+>(#0av~V@Y_k_=X?I#%XN$Ke2n0|L5kQIg$9Vr0 zcziH|w75b9wMoTgWhXauImkgmM34KSX%zrg3>0I~27wl{RvIOY?&VQ<2XG$2d5{rg zlTvrJKwW@@sutKMrRy}AQj|dm6wlMwLxyEe(Ppxd?sqL1XF_OhTnK= zYxZ7(00^N4Yd2Y74uda42S204F@^Y(N>VXv`{a^v%;D5`8YJw;v2#Em* zNr56+4hP8x9p*5=WGoHTaRvbv1fXqjxhnH0mh%yo4iQSg6fk8;lv*a0n-mKcK$(pt zk`W|eFKL$|DQ4o}2Moz`5}7Y<2ABmAg5>}fH)kt7g_w!CJ!B9Otwe&2c@X|MKvN}R zs@F_!SYT*qcz{4|;wYEN#AvS7hNyHef7y%`@BmKXV}L+KrMM)V)g)3RhmO0p%yBVp!N`I*j%u{2a1=V9V#3Zv|9(Ujs?+6 z_=zyy372_QNyrH?%(+5e#|JJ7enJ8kw$_aDxqAjFimmVpcj#t43LkU!k=sOd>;{h` zij%KIcukk0^3tU9RFjM*5dPo~|IiQopbz@s4|rLnnYnw-NdR{c3$1VqUAi>PwKqY9 zKIU~!um4wjGD4>F}9Q-*66mum^iEnA#SLGC>>>;A3%$heLyQ zz}Kp5gCdHAf@=0K%>}Jq+MN~ZcK>j28_{hK@LV58k$xHy(Psg1u}PLyt|()H++>n? zBWFQ%Sq+zb+yZZ>6fny>()pvPAQZBZF_&O()#*L9W zE(oKmE2@+jX0a-Ph(tqLg?f^EBUJBDF>lypY508R`78P;ltcMH8hb!Bx~deJo5z(m zC;wtTE?ccJ_KFfaY|`eYeR?p;ITIZFGo%%Hua|sqbF*ad4;e|V8d$OFhO%h*vLn%^ zDk`wtYPB;HZ4+r+2T^TxBO~_+eM+dZk1AqHyF=`15eT}k23xgU`!X$65QQbS?`JnD z5|(gSfiSaz8yasd@m5k>sc|c}KnpTQ%WzVKw+|aP>-ArZ7r1rCRCpT{*%@$#tF>F3 zxEZs!?!{{Bm}BueGFf=Ck2{Xmgsm{~dudgvKoPSv1G-*o5UQ5BdXs4tAX1zrU^dcn zXLVhRRTQKpPyu_m_-LAiq_|*PVN%60zEn+~)Vz07ozGA#{J3u838I{5YOIS|Gyf}b z)20?d1u|FYyhtWOqIP?HqggQXo&A))l~lJs0lH#$qG%CB&qy$PC`#d5xV6$&%>=K1 zv{MZFRya7Ahr%lk3P^k>XEL3HIx)wH&D!?4&Zy!WdvliGPbQM(nE zhze^IPir%KC}FWxGAE*^v6$Gmv+EgXaz5;A)WS64E=7B`cbj`xAI{od~u) zNE|UR6~2?jGAGh+FKoYi6S;7BW0e?a0rxnVBGSZD0dWv$XO({ks0$|NG5s| zYXP{x5B^|Sjq^j$c2GmOl$XeKyTrgi40Q?oierov@S|J%2}FweF&8K)VE-G(|7%*z zh&T)NFp;@fhboPe8o?;xPD1y2pO_R0Xq}RmF?R?tsy2Y=kY?fQxo>lr|1@{kx-EQ0 zO@eYZ=FkJ9hcwI4Gi92>%gP02*`;%6Uhr zdl%ikC~b0)Hp}AW&j!r4-FVOltUINx@wGv7Wc|9k$nV5+&EQGS#$IQ6-)KHyiu^DG{#c(lol-Wx$b$rY5_=$~25p%Z0h&!}q z4bdPQXzYx(5luJc(_PtXd>u2*@%$2!lFcj@g-b1@4l%6<*o^=jGPzoTvv^D>bl7d9 zNZR9KTqY!=9530VeUP1p_x!@|hLVheWy1BP4GcVLFH7eR`GtvTW$ieL~{QrpB?*(bzdy+qF62J1q zas}Jo1uG;RwAS6uCgZZWjfY4*HX{wIC*sfEGehislIbbLigkvqt=C$OlYD2ve_h`i zBi{+cxrV&IVG~4rl|K312{K|x4kS;v#%1G8DFOz+n0&SWIF-EJj}Zl6y{s%XqaA3*OLhx(C6Lt;1C%Z9<*dDnU5oG%%RBQrG)#EqAgb8DRxDl7YZ<=t}Y<73#C#VuQ0Xhr5uKP%An9Vjt zooPB`Qn13G#R8My*M2PR13xj7 zK-t8kN;!9Z9*B=gw9Kt$l_;u1!4Wmrpw7wa@J$d!T{eUa%1kQ|2{z?TZY#PZTQC{T z@N{cA?36U2V!$Ts87!JYAsD6lp1u@#%6_<9C^m^|Bet$tPAf$F-PqWkPMa0CaAjSaCK7=ZL4`7w16A@7UE!aH}?JhPH4nMUXWAQWT50Ru6Ro4O$ z2t_$*co*>jzldA*4wPU4K(OB=KN4{POMUA6i-qXlu+KxS@VgV&}Ub# zH1pTGCiNYYuO3m&x0+E zPJ~%bpi+aiY9ahrPEduV6{hBtdGsjOlY+|qv#?Z<9|8{^5Wuht=|w(##ML@0u4v1O zTdca2_U7n_W|JM5~u<1@@{6ahqT={b5&GGJJ z95`qNpDZQUbSl-L>ZpReV{R$vsdv6)Iu+kJJmF~PHMLWg=(M}n-5GC1Ag~KiEkPK> z8Re-H_1Yt_K#krp&o|*fD$PI#AB0fB#RLS9Ix>jTYOR@^lE^Cytg@gp&0=dws>&+k z=)Cm=EG|HddW!HR{sgiLG|4nXhz|=Q;3I-v7`nx%cN&YaNap(b4ZVaKgi1CepM)~X zlAtrMlL{f*ZL6C&jPJIqj(Vyn%KjnmKret`Eh8i^Qc=o~l#U{{*xoB@r4lvOo_-^#4yPDctJ9EU&YPzk{UW>_hO38Zj!=LJJck3xH6h%tU1p zPs~TPyhxk}!s*A42LvJ@v?fQT70^O!#WmL=i(|ARC+VzHCjLzHia*u(ldUxVH2VrZ zjFh6J0%jv~QrC{i^v*kA>%8R-ee}T~7JkIZv&nxJv^HG>*EG~zb>D?oGyz2tjxGer zxCqspl=`kI)jFf9)7_N1?K6QV4e=dgsns{jbX~gYR3%k za?dZH=sekk>nZJeA;qcg@8&|ypb+PJ4>vvl2@Ai4&?11d&k_>vDoH=D5A|ySX$uf? zF~CvDiq6{{_$NC@N$icJZ?}5qudfxb$QFI{RIZ2(O)Jzu%k;8>{lLJfum}>GHa|2I z6``q&p=2sqKc`dif6jVf&)6{X)nc%OJt#r%B=@43n&#y$1`^PLhXRbt7DKrVH7zG} zli1yqf|(Iz>;EbdNDE~^SQNQnU@A`$SW~EjCc=46c-M+b1Oy^CqwvZ+33{OA?lmB? zea34lf(5Any|QnGns;Xc4HuE^^b5U6?2WP$ZbZW<&`|-4B01)QC|o zHNBoNW^F5e;aU5J8VTQ|XNBIAoArpk+^YkRb>Oz=sE1=2%7p z3xD3R88<3ratxDOV+2^n=DiVK>|qxh!DvZNqR~n7U}shwF@I zK9%zr<7^U?Uew!ULUTXW(I-opLW-u=*M~rOV-F1xoIbu`LYz%yd>uko?|^t46ozx1 z1{~5){$aY6=yN7*N(_38DVPSD)TM(tPm6%!PmQ25Og9s#Rk{-*FK$JhNDE8Q1Q7^= zR3J$FXayfo`5}WuO)8fuCqSJi6@Gcgqxj?qjAjZTN4@lbL@DN9j!9FmmenTbDcwv_ zvaG}nOIA`f35Rae)BM4Yr=P*Vt18pS3jF6EZ3zui!IIF;kb#j7bP76RQGr-+X(TSe zSO44^L`<`S#GZv2O>rPH*2;D^YywIvGC_hYu)t|&r6Maw;5U`1HEcfG!W19~K)YJ- z^*;-F)nz)GP!M_H36z1W=lBx{eON#rOML9MM!G=H>IAHX(Vj|}7c9{>*Sey?SK44A z9-~+ag;dFmYLYU(JT4A=Eg8{V+ERf+=Ej6$VHSGBvKD>;pH^rn9*LPjd_}pEnk1So2@7VP+B#%yH#XYQP&tfsDVOZ=9}9^hRqpCYF#Qi9 z?*M|dA|lG`@k~;w)n#j0R>=cfZ!njgm5pn>#Jb>1^AfOl6R~G#`#xne)`KUBnjB9AlNoZ^npl zR0@=fScfEiE&*tT*3p^6iQYB92J0X#&l1uLX*^p9uR<1t)y%-m_uWHc3#UcI6HQvZRU&L-TQ z2xVduUxI}%yn9w>NerU8O@hx{>3y^;9d03sX(=psW2&$%An$%$rr#f#@mg?f*7Ue& z)0Q>QN^{^{Ath;Gy<8NzOLOssd zu+;HuwFm?*N?taZv@PG%6UojU2pF-$yo>|-2udABY@3^uE_p*abgmr})B0L{3l}r0 zi?S@dMf|ZIs*}ZQ?qpIrlIw@p$cF;4BPfE6GK;smO(ql=u!n@^*At3xXe<&UZXO{t zFAccc9#0_+up`U65QSzE%^8}HH6fBFrf3u&)|6`YLI&^AHVny(DE}+Wv8UXH>Ww%2 z5Tqe)d6$wx6bPl=q-`ZVc|z1qK*msHCYR5MrUgEFW%}|$oj$#J5*&Sxkhk{70&ase z6(%JoZObMB?xZm2>{7GUB|EJ52Qg6fav(olzkCN0!#J8K4Jtvrk03vh5w&Ae6VIa@ zw2LwB>ns&<8=YX31oQ}iQMhnIB9F->6~Z$X6A4Gcpus~x$(Rtbx{Ce@gjhfa3-AwX z@w&i>y>GG!Edf9z+dh%0h@RL%n$SKBlt4?uJ{ZagX&MoClRX^KI}rR35e$iCakw8e zHhz*L7%QYZ_y%yO0By3C#W;@MaW#L_uD1!YnA$fvoqlrZmLg3LV zqClVlVLJx#KDVi-z<|TGL&Q2X#qn~OK;$y<$``{kJoym>|1SOuU1*gNE{k*Wj-|QO0{)uIhV<|B5zpbRdwq7w}^km^g|uf}71rmbIWp zYZSy;gbkh$Nwl1pop9cDv7z2fXL@cvmjCt zw0ftd3CD<#nKJsmISj~wOeU64j@v^>X-T-b^TVaE0J5k6KMUc{rc@??bk&7%6{6igfVoH!iju-<=ttdMn z`bXX)L&~8JWt2ubT+G|ViGoZBF#|U0f(xD;r2qTi&CJQl)$~b%!H@O!V#KUaLSt%orsl3+w8YRijTAY%f01Oz(Jjeo1@pRDD%ZQJ2x?mxf zYYGuR00g$|y0%e?E9}gSfRldF%#neS9o(gg;HL|zq)$ws`WP@Xyv}#Y$O4tmjNne= z&>YJ9&e7)a)H8L|J8QKmvkWe~P=jEI+^Y%2%q%D^GcuwG zJ&lPTM@@o1=6a5sQGM6|Wj>KY;D07!T$IQ# ztMFJ8ArX_Hk$(sTIT6j&L?)rwQp052ybUa|0M(zQpwi{bO)3p|996Tzxnwgqg$3Nl zbzBYOL!7z9xLv$hAQLZ2w*PygAdOH7>ToBx6A9F?D*8a(hRv?!s5c>*sl&9-7e$PJ zSQ}&QMcUQg)cLNRqDf18h9@Woe`rD8HK?l$-SzN3PHm8tSkRKFI&levU@In-#9KHr zh`>0blEg@OOboz4m(oE^#NA%n?G*ZCKzyki<`W3Vf~S{-8O?e?q*&Ho8eaIbig3_} za3D{^Fho*~5XT_igvi%ceGszyU(y+n)%3LGEnuIOli^H>`pLGU8;brMDRZG23Qj%d zX||IXUw`mB!az|<3R~wD66sWpN#ckUwR>GA=Ao9Hn4}^`Op~k`5FX`Y zzA@oLsK21fx(!pm$XU;giA+AwnC;VBb`Wt`2;G1%Hh1|4RTgV z;n3d*T<3^h4*#nB38bk7qP08>^n*`N_zI0#%QOOzZNabA~EHBc#m96-PTS78k=tPdn{ z3_2x{kxGQ%qzy;B38qOsIveAV_Uge-GK9J4U6U+ z!%e9Qq~xL?0CzPix$bKXG25K8X~H(`y;v}V!G#TOIs|+hUS{mlN$2*MPJ|!;)F2l? zA_$mv3IAcF3tvu*`!vH2m58lDDmWzotU$Xq5!1Ck?dQIU-k6jwVIrC`WCwx7qDG9` zHYyYP25{MfJvfI4fa&^(Imj|-$?M@kX$gmDh4xDYNGTij_O)uDVNol??(= z0xocft?5eCn-_a*VG+k~L)lrJW+qFC1@l$eCsjRhJ&Xr$@xeG5-Dr)i$*b~ilIOEN z0WVB0)vW&nk&J~9hIbewUG%q%Y*Cp}E5}~)Vl_!R=TQlklYc-5K-=7Q!Sj=#%B_Bv z4>O@y!41$(T%gqPun|AI{M%~bgRRMMP6dhS`{oxV@j55K4nsIEY|Z_t(A^aU^X==X7tLv{wJr@C zBJ-2LA(XuMhd(#Ud6^hT-ujPtbF4>QV z*28@H?OR>{-A-fkCx-QqLf3g+$^E?%=~Gr^S>&kut6M&gV}u&kCj#Ij|N?K`J5tNs3BG$q7dM|B zWH@~za7(o5XBCKp&sjFAYRC?sApR#PaCv9_gqD5@DTP}`aOLL|f))JqPJjpL_LPGM zeU+bh^=Y^vhj&qD+&_vLgq3jzX&2pp3AyALh#k84;zPsrMip&SfknnEt^C3cKerG7 z-)IfOs1a)O9rw^miZoWvYi^BIS9fh$h#(pC4JRaPyg|27S6?Xv6>k4x(f1XUQ4V<~ znrT)5(oQ=WMqNs;g>(>B(peB)V{^8sCZDGDHXwp=(MDkfbUBD)E4MV$%t1g>hGm}+ zIhmnOFj?mxCHR3zX+p$}r4VXf*0$wA!)4^;r(Ve=VQB)@6)CK-*7s40+%=|MLC!7I zl1gmCre}e&3VV}+33VmkL4wW~)uCVg$VHV0-7+kPU2a-Yh!yyQm`Z{`SXHnK#YLJ` z3+B+~s$X-V3GxE)bta@Y= zHI#Dn)!Sv2W=1rS1^omPh#>wwXOY1U8ocg&AyWk5siZAc+JOH+$pt0BG0R-jYv9tE zm5Ci&+%HNYZFTL;L5us7$--4&a#@86nTQgJ2#p-YW?3wef4GFhZOsBhB=LOK9&Kn+ zQ?q7jNeY#gY)=nZ_jnG~>C)c7T0VNcgJzES zhJp9#R-xfuUK;+4JAMm8wClOd=qCNp0SDVDCUX-S7NeBzM$ zRifVM>0e@dO=4QtHz*D%VK)RJpRBc;raVp&KQRVIdcqL%lqVzf8xZGkmll$#YB*zb zA{X=MB3_jUF??jx`5LmK{`u@V+|wf>bF&n;98yC_+#>ou0))q;kx&S6RUkP`{0K@_=_bp({##|w4{m#iD5~!qBE_)q?gf54sIU!kWn&aDlGX) zOJ=E@RjGzA1wqXad9t=%zA;(?ED11~Nv2DV@SFaGQQjzdD}?CmZdsBWzHa5se@14Z ztZ9iqusIXDfW#lpxWq>SSgm+Dlq?oXBP4Y)OrSu>Jjnu!L1e=`w2_mcF4fQ}Ut>C! zw24%!VGKwRLztI#XnAJQi2nj%0bG0wDZyk(KlBj}xv6tB9EF+!iFGudq$D$h^4wBF z#k?{#m8V{v%rw^oz-vNhH8M?%^g8-gq=f%(BO%g`H!6S&3y8`?fzSsoPUIq9CTAc^ zbc#d-H4u1N2rX?{*E~0+*0nOWDJ~I7D-rTBva;nP76=5Mu(Q*~F6XE~wSa&0p^pm! z@h-2az{Wsw+83E~H1LX7PGI|xiha*mT>;aA#?w{K@>VY~MO}$rWt~9qqa>CESZ{?@ zr*Mcii-2`VadFWhcpB+6`eas>467F}vc)z_an+5gG)C(sx4g{JULWnG*s5sfvDeay zM$M}x#nj>n&WHtlB^ubNGK6Ct%8Y~JiI?kk4MJWKu(}+wIr3c6q);(SZsjZC@3&;&Mw5ts0$SSWcWeJO+)xBdl`ydBMTrHj%ZhD6I{_ywVa$r~hjXl) zY4Y(pg?Vgu<;0Ts>exl}rNlEp0+g0gB%U2pWoOGnvK%gUA`~l`c)qt}ad|N*3Qmh9 zn-wig7P-uNiJ_3wODDONBX2==3?I!5Bdw^Uhm%|&i*MrI&O#-GRc`XCi0Ke}77mU! zsV%u=BbAWKxzTQAT!kMa#Ub;mO}dJ+qdBBKK|SGYc&rM7PXrqx?`1esMoT;Ug2cs9 z^-G3ESLO@`6x`LQ#F@S|wi`zD>focmXkvK_ZnWf-G@9szRkI z>Z9bsYKzpB(XqK?vKk^BCWpJ=OgSNT067@PpjT)F*JHxoI*>j?8{2;IE`nVPDtZri z+PgJQ!my~3v&jcpt6JE@4X$#VNO2{N;f<~u?r?RNu&sl&aVYZ~>wt5lCmAnTt8^}H z34Q!Aad`rrJzTIak&ALdUpdq#V(p0p6ku;kk<4fQ=g{4TRZ|Hxe`7>6<|qu{MaPM` zq{fkV0I|&o-v}6Iz4fTydg@eX_?`5c(a_Pngk2U>ib?apt^?H|0Io@El>^|!yR{ZV zHLlXBCXw;dz4CeCH6Q=?&i9D_GhKpB*Ux)C5OHjlCL&~xEuWqdLFfasPbT#^YiM!H z)84UkyUyGFw&jZ#{_JP!vfyl_Z~<_C#Bj9+_sC;^0a{H|k|wsJ!}i~ap0#j@@T z-LqZXZaq`bRLN}c)7(i51`GuToC6>D0k{BP2p!Hasb01zS_M1-Cu|Ip{2$eD4S^IN z|BWCE!JgXy4o~D7@Z6Kk1PIQF#91%}2GoK-?7<#jz(NRwU?jv}c?W>~)I@<&$t2k& z#MQ4%-`0GI2$KIG6GD-)jMFStR%7JL5~9iB!9{iDgdaRW9}vI;M1Zjv2$h(WBe|al zdJQp!NFX?aQ0t&HmqqpX-8mmtoHJ&>CiP)tNeAEM%r zvey$nIQ&QjJb(%d%brXoZU0k9P z3l9GyLAakh=2{iXhjygXWGtrX(PGh+6eGkHP3#0LI%baXq@P3yuhpYYLdO#-2z0l@q+I5p)~{p~bW$Z#I+;UB!FumIbuls5wr6;**u2V^`frr{o8+(8p7l$`OqdZV+084(X#wmUdL=P8k0X zh8_)p65XCyTbs2P)PL<@UO$1#O?r6#IDEo0Fw0;=joe6+K2DM67-hHNoZfUCasCKm~5L66tLYmHa z=D335dSz+5sLX>nALIB(Ea=1O^dh0+0yzGTVt`iBVQOLVt3y<$#Bk+F1>j*7Y~|VE zi{Oe{Oqz=P%Ek(yNTi04g31rg=bVU*X#FKM!Rl>-i>;2y*+>PPUQTU*RyYJgET|UB zLPR*irk6UW2DOXKCL%^iOjahF{OIh2>EDoOk4Z#ck}fT(j?FUlB_1k7p@xXvI)rG| ziPAdkcrFf;LP7+V{8U*5j=IvL=!!1t;%jgj$6}-|Us#odY>>oVMT$%*4AIox z9!#NB%=oBEx|rNY(!@JZf+ApyKG5Xl9I8VgBJzHSJi=^%ok`$@l4Yu^UwrQM(x&Om zh}6F0V$>(x0&B+3YTtP16oJ-ge2)=f+;w`AJ-!-F=xGI@!|>MQiF|3Rt_f;|qrVi? zP6lc3?hciJ?vzN%vO@0=k(d)%ZvsQ>YT3ndO6^NfCQ^9b(Ng~noQP|<_ya%mgNVFP za4g;fiUK0B~P)VUN_4TmM zu1Ih}hs+7&unz5-&czYmUH{0fnfQY~6jv5kUF6<`9sShi=XJ^%(h0Hju{R(L}oKy#wbMmTea$z`*+TnsgTr-4xAca5Al)9GVT zW}&8&d2IB!_w1Xx6sSYEUmZ#7w_5ALCG-;)1`83BQ>(JtYZwIZ_4Q&57H(uuUj zi3h@=n*tV!{^^%cHeT>b7gj*);UGQ=5>l6}K466xjNJk4&d9Le8T*Bt2DE~aa!$a7 z$8pgUT6V02;&c9HHN9>R{ZKcJZ2AVwS^^oHNq2P|g%gc<5lpWxP_(w9wLT&fMND^wB`A%xpZxDYQ z%!aJx5Xx&H_ts0Xea;p!>W=Waj%E11TLEhC54NAsfA#575LN@ zRA{(K%&v5w^vv#)O|Z0g*_bdB_*#VK4%|T?P@_KF0S*SFyZtpKWH<3P62T(4O{S@T z*T$DL$S+Era(B)qBFo+)kRBWPwQfnI=_!e{D!%wkt7b?)@m`DpWcZ4mWaEb{>*k)N zWv47!^RB2{fC-1N1sQNb1XzF{v;sdsWkfh$*On|Vx~wEJ8jdTcr<|$51WtqO_ah&V z*a^6rwke?(xuKJ*inuIekS=6Fo;Im48UKTZ>X4S{u)uioPM6;zev0hY?HYxQexQcU zk|sQfmy0M28B78Jh(oH!Lb^{x9c=%=M-XHdaTQvtk)0zZY1mGmeHeci>63#SvkM;P`adfDf6g8XF;mkAOx+*aHA`z$UPwK6pdI z5}-&F_l*ZL`OLhK$Qn@oXwEX7yuI)9w#9Y>{a5t{fHEwDTSl|#qF6~1Oy}RdpOo21 zJ(p8mqrVS%Tc>QpVC&8d*{lCR2TZ^Q!1~%x&0=bO&Bv@J)o|M*CM*gmsl=11NDK%iVJIiT-orP#*5YWkQ4`3*hjFkZfO*aO`!Oko`88zztNu z27IGGbf(4Zdsfyt1MU0RSct*&xyXcWM*w_9cGM~sRzjBuF;*b+tmD6T4ar=TXfvYDf&Y5OtZ-8x#GesmcEV_LVkeqI z2R_XBOrb|I1UvovxRw8FSFc~eh7~)OY+18s(WVtxb)cXMTHDqlSkCQRfr9W_u+VZZ z%L?TzJex^SFwJ*DKR!fS&}HM7LlLLVDf8;kXOXuuJ?vC%XS5;lRhaf+4kzo)rl5*=uCsn)|4o+W>_SfW$78<%5!?wNXZ>({Yo z*PfkqS+;lW4)pu;U*2;6YFQ9e4lrK#bl1PTOi=OHn?A{RfBU^N!^y|V0_wHx2-GN~ zd-kb-nGXz6gB5-F0qnf*e4}i(gDRBiH-uc<@U5U|E9$A(WFxUCrHre9IgJE6=)#G9 zqpH9eX{^!48*%@HjX$yYb8EWs)>6p5^j1SpufF!mXfm(r+pn?wa?5HuwEU|KEsI{% zZbu%coG2uFLi>jp5g>68LJ6m%T1u$F^dwHtq?Ec$ zKIN1fYBA2D>dH!}9t~IAamlSQ){pp;>&H(oZA-o9Sc0z{>KvOj$a1_OgA!vX4bon& zS}ixMbG!dauA^GP94M6$7MTFG@v3?=BVjMhP~s1Zii*yqenas$;ueD_xlqB%imPq^ z6iS(>qjp(uf2#DS>1_)GJZnP6Q#}d>)0N$~c zAWcL-BaM{MS??{Hm~NBnVHvZtAfg0SHX@4+>#3n=og%rYKL^e2+k?RF=&F1nN;&Dh z`R*ILms8cdrcDL;(Sn){pI5z|^U;8W!GrlPFoB9# zobke7%87NYV*kT7yAUl4Tq+us+StY>)@95q#UqwDFhGDEU?6Adv)~0WC_TZg&m&c_ z9QG>cues#xIXIKc<46@b@aZfnm#NC(rsK8@HcCEU2?;meQ4L7s!w@Yn1SS@+2nV1e zU&EW2LC%tv z8HIO-iP>*1_W6cVZ$G}yDCVkWk(jenAxUg}MOWPvMLgEV* z5FrL+z{eSEunv5TCq^4w-A({_8?j+*lR}|NZQ?}AlXcEptoz3;DnI~zMgSN0BtW5b zSxbQqv^q_K(KS1Qm+2%)galCy9T{p&?;-Oq+bd=YZzDOke6Tu?q2?K5*})8jWSg)6 zg$d|E4-uro9tHW6EWOsm)-6$;vAYukGnADMJDHCy9B5PJZDywM*bgCTMlRYlb z1YgL59zi%mK5PI6dgLQeR{EEl&`DD#b`n{d?45{QBTumC?jwmJNG&kX3Vl={7WyQ> z11yjTD@m!Xo%L*5WJn)tveh7Y;aMCj)snAD3Zvh{3#Q6Ct8#dXJ<+TH&Y=0(aH%dS z6|lz{m_ZMG;6oQ82!aU=u~)(%Y9~mXO~gDD#Q~A+Y#T!&aqP%hgXo7K^V}l$=RrCaldRQ{d{fMW)mNMj%hFq$k%{L*MS|3@LTfBcf z^E%@j3jZj&owOE6O;6cJJ`yC5ecXc{^02T$mZl>92rIA`1M!D(ayHk6hr|q=$dYnm zn>^7I;7e7PpHQ5{9%oc62RO1xT-OiiN5r+>gN8rq%|@AR3*J zOXy-RDA(-0w-qvVe5Yv|ZJ9uV!7^7|jhr-b4aITWQvf?TXC?m*cw=^!$V~YBy76HK zp&WU~11eyP$=bpKeYkC=r`6p@PkY)v220NvS0rFP)^W>>6y0xzAOgO6!W*bg=z4?5JD=VvXqJFhJe0q`9JmUHBs<6}SUz zckG|FP@CdvVfDDIESB-m#lCu^8HP5!6LMR0La}^FRdlw(VmgU~ZdrL+LMrliTPHe; zLNan*6&8!2XxIOfDSZU%VSyypVi~$-u{ZK(k5I)QxbPxJ&Q@`$+p*C$T3_bKg6;JD z<*`}?xgg^kKF47FCrw}6drB4Wo&7CkRh>~`Y@Q;RU#0(e7|CQ#-P%bFPh2J#Nv!LX zu5^1Nd@EFuaq2d56`oI>@oG)l`m7xmj#r9Zyj1l@g0{25-SIGRp-UjZ!*y>9PI7v= z(af&{_eCvD@REAUh}(n{w_-O$ufb%h_f%pfAsy;tp~T`2HK>a>p7?xI`&fF$y*i3K zayS!rA8$@>$<+cE`tSx_=o|7*zh_=9uMX}d60qUJI+HY7lf{Lz_nErc*CodL?UAVT z@)wNoA2Eg?&s+84@1IVFvRXwq|2Da_L{}$Fv*A{g7^PEqV=EyeO!(1GLZjduu=N#|FZ6x zB1(EN#Q@F9F7^vAIEBd)f*(}EUyKLJ9OJd91NB7C2C2;=zQtq^La=OYLtf8z2E%o5 zN7oFj)&NG3T!(ySa15Jg{JtVRa&TfQ>b53iBiy4?fG`~R!5`p)2iSXs?-?Wu2OXwBju}_{@;U??0ns2`L(A>PvGnORY+zu0$qcSE#N}fY25UvnKkj^xTDr8T94nhT4 zPM3zK@X!Ye)hq@vF}*fXJ36l!J0rgG5HJ5~OTL~jAx5zu=ujb0qujQS8hc6mGOs#9 z1r|j{7^BJwU85FHt`KPl5yJy1&Pm{2Pxrc5qTBMQ+(>2f9HZg-p&u&haAwQp^spi& z;@sfPCSENC(UG7!FEP%q5>Idn;Snxkq>1Qp=)NXE5+c}GuJ^t|3ONE2ht4Kvl9red z~G6pG755#L@8r=j-HU5b@G4?*=^RsI8p0M; zCC$8ovy5$c*wQq;bFn^>nxaoXc(NesA~q2a6fGbS!l92mr50G|=8_3M;1gDM)6s+z zD{7Ffd?RFx#T^Ur7vqL&p5qrIF(;Sn{Pv?`8lxP-jBY|xBhYdqEaDx%^FqzVzb=V2 zAP~VMFRg-55WHay{ed@u5GOLDP7_414jJBOlB5A|1@<5u@@6*V-N8p%z!tOQ%Gvk!@s*_H)OZckPyuM{t@#YUf zbH%LkKYSHfE$Eh1hC>?_0)44nX6pdEq5?MxXnd(KD~cM$g+om*4RtI{!;kfZl`#j= zB_tE*XzxzN70q1A2)WB?$bb{_3`O*YSGPhtmrn7@6<*5-K^-OL}~KP5)#uNw%{Ja$E|7At{lp$!(1u+91|;pG<~CD-n58-f&;tc@d^QDZb}zm9Jm75H)IQMNi4B} zbHi`zw}k)kcRzScKW@gM)C0G)LMZu8T|dH8owX?&2YYWeEj%iLXXynQcp@yqL$Fo_ zX`(V^SB>B?H-_&8`-%lU_HfaRJMXiv$kl|ccv1JWAa178kYr{RLS0@WNmz~26mo{9 z7{A!UQFAP4GnhpTB~i9Df+Kbz9MfQ01jRs>{ti?@DYX8cxDpS;h0vE!wbzP??|y;e zcH4uCRYPFQqb0!DgE);V78!@1r!Mc2LfC_cXNipi>OY_3TOtGZ@We83E=HRINSot8 zUzc}b1UkpH7G2eJGp0!mdFl|EUgbqR7Em{}0)~*+effg8&q@R^(`?tYV5 zUT||gdWJR0t%Cm7^ENs$HS};&6pe53g;6DEd<32?Xo0B-85zx#I)?TDI+uKBpp%#} zY9vXQrA2Uhe0>*J&k>~|dLz?#ht18AN6wk%uG0~(*Xld=T3#r6N@bhBxQ-fKBC?};KlUOo zgvOHN=w4GazTQ(Wwc2^;jeJIPGNLQ9Yv;P8d9~wgtO+T(({gUOSuy^>nWf{egg0VD z7Q0>Bj9OEv!&HC6N4IS|0Zk}Vw3MTPS*THzCH^56Vqvkr4w*=ZmsM-3k^9l^V{T!W zpoe*FqB~*d#X|FpQH`y+xw|pDiYbC@Vk9Q3uC>d%Tv8UjBUioad5-_uTZ_e5 zXSjzgwvNMBMmSR^b<(5Ij%=jigN~dxu*(C61$?w@Flbk|DiFL?Q4%T;+_bAX7@3>J z6IP+^n8LG_p=Xbe^*LpB1j0KUgFZa3(rw~5jcjQzMWG17^zxT+ zjAS*ajd1rFkO&-Uhy0!L2H$8}Ckq4_(@ZKZBbRpC|5yW_RympN!aD!IeA3ToQDghk z#k6N?do4O0+}L}?Jt$`!dpgS8us@ne`T=zH`&iMOhS4^iS;*DjeA?QLBO{Sq_dIo3 z?=V-DTHd#=NwwyF3g1RVDo|{^NqZ`0v5jBPIfiybN*&q>M50fa-TnJ0{kkzm(Mf81 z>-<`d3P+(Js=!|R)cwLAVgVOcJ(xf(hSAXDJk^)x2!O_c63*DpcA25Bs_OxArOO)9o~W<-ck1&(}_elpClWP zdP251ulIvUehD}4d8(e|#f=0P^joRfT)x-^KFW&hW7s!`UM>Grv$;1|_=YZTuFNj* zO)3(7w4qLot7A>*u85tSf_ZgVYLf6-B-ogHMM7rptG*0j8{IgUk?&I6Jd@Mjc0#&* zCy{K$ix=)=UQzNh%^z8GhkHxhg3FzEO0CgHGPKUeV#0~BY2h(ZCS1V<1n5EVi7E8% zl_bRI&gXM4=#f~bdE9h=N_PyUZZF??u3kT8TVMrn+W)~HO4O+xaO6n*>%?Dsgp8^Q zN~&YOzQGEk|E1bSE;4Bv4aSRtXthhz|vdG@Z8#D5tZDrEoIsbb8K3I9F&*fOWiojiN` z{0TIu(4j;vPNc~2D9eUNAC?m&s9eLRQ7b&Px>PAeefq2lvQW++KYq9@V7)l8Rs{{M6aJC+4OBdqW)WCeu=fv&`?GbZSoA6cG;8L_j~8P&(oc|@uum0oV`n4?k_{IeyIkqt%>c}w;cP z-UQOtf$J&dn83un+frLUtm)$&zTR9O&9e_KAL>_qN~jTd^gykkKpk)he%VS67dWU@0C*71;L z!d9w7EfOwM(osY7TcZ(ac#>_x0gC?`YYXb;XuePrhm%aGnb;_%SwHkqh>}*s8qXaS z6g%g_4?iuz-HK?dVoT;nWU^r{)i#n5L$qq$)8RdRb>(6K9m5tB)jNMvPXY3QzsHk$$elhfz_F<#W3lRW4E3rTL#nXsZKzgq3Cdf|vps!kZQa7+YNP{A9> z)W*SAnZ-+_dP;YcR71#Jt|>qAN}CjeHyjeBR2ynd4m%PYs8#SY!Lf{KL_;+{iH0@1 zF^B;t*b*r&kWe^@7wtYGC)xiX5KXa};QpQnNBkA+bIw85*^(las+dqz-hmZJ9@3Dc2iu zSFg+{DntVeAf`H`n=gW>Uyk%i*&g!1d4*;*b;6~c3N=A?k#d-vBU>%=_#gjx=PgyD zRvj@3#Kl=;RnDqTEW+`a0DVC`s5E3^SF~7a`fP zcyd`$21&`qQ{u9UmO35RL2{7_DGm$Tf74tHmCNk_KTkb?k^j;KUng@}pK zo*6SIZ5vX=l(d#+W-kAlKU@-1Fyaq?5NVLiT$y*sWVo2^usxA9TShZu5Vcv-h*XN4 z&XDp+sx8{84lD;a>dTrB#Hz42pDOkK)YD=Gk^T!LUwtf&?Ti# z`gF+ZhIUb;sx?z{)CrJYsz)T5%$cStsbkE^5V}N8@OWh_N#YoA8^Z8*oA9+uU zxrIA<8{C-&lSH{DF>Upm$bwk&HDdM-XgTSf6(4A>D7td0q8SvU2KcnHc2*;D+6ey~ zdfKdchF+zk=sstA*tO~wbBCg1Q|yRL<}^%h?WBl-`dA(GJw&fw34}Ld(Y?Q>C1D_K zi(1@NGWz89i3|VA>{QGGmc6a*A1Qqfu|Qg~l6h2#ccI8xgLl*4i1DDln+eqN38>P2 zt0aRuiAK0~Ro>XPCBZ?7LxVRQ+c~W_`Z5uv6x>FED0R0K?$jGU*GD%|<(c3dDUz^q z6}%-(Vy;41IR@!4=N(0S4pF3t$5ThMlyJKoR0w<>l3mCYmS)t<3V@JQNcU_Qloax! zfp`0o$rgV_3aeQt zzKzHT#9FLNeI1rkCdLo+ zqIbxV<8A+MR`+Ej7ck4RMc_(=HX2ap_aw5!34l|nFdAQkBV6`n1Cv6M|2k(wHeK~W zR8#9U>zd6|ywmbT<(#{j@V9P(aV@y=6(x>^D(Ki)lKxB)nWeCih^+2`RDDw$)@nW? zWeGKp%gp9|va$|Y<8JrM*_QNQp>q`#8)gNWL`uv&f#OIJm1x&Y&8oVCGc*X1HGrktEJ3PjY6&l>B0y(2 zUFrXdQ~w@aFc$f0te*^TbD@Z3OQv9&T@H4LV+1J^e=8BqW7yuTYC9TNFPiXjKBlY^-oK$xFZt{rw9m*Xi_s#UX8+83IWY2v)L(Q_2-^8F^<%oe3c6)jI| z&)P)R9%ehrUJk0=iOCauD)(BsEk2YNj)S#!{v}$x_TR1HB&j+_yxZ(Z>5+ z-v`gNc?B2#7*-8iI?a9{z83y~Zd`{wLo?+qZ*vYD5P{J5IU`Z`>Zw(F+Y^qB#T=%3 zt8`GDDIhT4*x@s}5Kn{#x_{Qz=wCcZ=TW=!|K2;_Ku@K)rd$$c(|WM=dM832T7&-@ zOoKZ1w|TagC-cR95h!*RVJm|6d)8MM$;L2*c0I`F5QoQGacTr9V4_y3_~VDl{bH8QpWHOf}jXcvo|vKc8~OR4N)>nAx%}0L_?N?zz2Mq z<3ze=blb%=xI=`rMq1obfA(Tk^%80aH+rP^fBN@>Eix#zv3Zv=R;*=KlY;+%VCF!g zl!Qywbh&1RVAmYTB3CqHFO_H{24Wp)Q-LUj4$)B#977#DxMJZIM24Y*k@XK=00ciY z7uZunEeA(Xw-b7kO?&8ehuA?FC^rycZn+mwnL}WnCN7*&?BsaxqzEh#jV2=%>?KEF;uA0zl0g<^+{h!Eq)4_wdac8dx>0)xglYm7 zT2+X7p15utVOdKDg-N-OMWc`Zrgbz?8&-8=CBaa!Rh5-eWu^0eKgj=S3|V1v1`#d@ zj7EV(Y@v+i=MZ|97~#N-9{E#el3g2DGI8NA=7BPQ7me<56nBP*#kG*$$8BcWSLC=8 zU;Kjp>tXBu4usMw6)^ROm%yxNn`8lwtUk z1(p<8Lz16cxTkNM3M*~SMd)lcsc$VQCxXg@E3pX zg=L*6l;*N-_Gm4csgIvod)qQfl&KNh32W-6Wki!yFQOTLStks~q7RoVlZY7IQ%iDH zAZXcfbVwx4ccFhKn@u5@4nr)#(xuU~r5IWciqIN*#c^VzgT4tkKVhaNYLjmPB~oDw zaM7gbC|X}9gqkR$Jo%0;dXzh=lm?*^Jz9S}37S>rKT`)3xnwo9#UL~mI%rmgd748S z*e#I3UL82`l7CLfv&uD1tWR0QPq!8tr{kMfmv2MU zwXO4sjzda|Xa=&2L68PwuqKJ&YeJNP0P$ zR81$Er}EDWm469p$4)SaACuCp%stl82kWT zUf=~*JjF~mVLX9^F7b)ZbXHYlsX^RsiTbfcY{aot9#y88CR=quda)3e6GG!f!5he& z(;o1Y!X6kGfa_5(3@z0+MHozu29p1Flx4mq2BF5P6r^fsTD&7Vs65NGrO#pk7MirU zTomN7f!(1Nva*a0Ysw1Dn15NZ4<&iiY8#2H$cwBqo3o?V!oQtqga!DNUQ-lkq@VvI zb$p7kQvAtcvxu}ZVm73?&)F8}V9O2j${4}QEGa*7(@`FJli1-`$%4TtT*7nWBhZw% z)FH-C00ieTcmv&DpJE=x<`%1>wHTVt-ZDxH3AQh?ZcZl`2VAK6=DLQu&E?6T7!;%! zVO3Lf8lb@%m-&B**sPv>(O?tGKx7dPbBH!42pbtbh_T8Tv!=$2n<-||VXveB z71~y_1ie35oK0kET|zBF^hEzKxq>z*R>L)|EvkdJw>5;TyJc_5gtqv)hAM@SImANh zK8&i;mdtdt<2mM{5n=5T))Fq4>eT?FU3dmW!Dlz+CkPu_e9tJ%GG)CxVMvJtEJ8(+ z2?7-=D5V489*K=}+f{3VV=T=By%?ijXtT^UiGfUE7<7}XTN|)_O)d#cc>|0T9Svznl7O|3K3Qp&wOgR&xMzKF216~c?CeS-Q;(MCIOoE$qje}%KqkY`rO(UrUfj>Ok zc>7bT*mOx;o;2ax2BrTkvuHJTErmW8Mt6(X$K5kBM_qjse9rBl=2bcII^9Dd#)Lf{ z)F|Ga)XN(vjpNJSU>u+&nPL2j-eOz}qoU#9je(ni5Pw{LtIgo~NZ+!EmHTm{q{hf1 zw>kTJ+cwc(NxX#sjM3IAZ$P+}!_C(1(w53&N&V)EdXCYpua$*W1(`m>qQkLS~$`;L&ZC$Zu7Wuk+`*R z3k2Y+Tua)}Mk-}aRO<2n*(+{DDb`}q6^SqILlvGieMjStj_0lkwyFyw)XYzzw}lHr zsE69&)assNcP~T^TfR#n<9cL&DV}$(>$?J`4&6wUPT~(SeHnZgT0Wb{UZCg!k`*it zd~gfm5K`(v=PUDA1v936H7Yb^U4ti~FyiU6E_`5ND`=Av&(Q7+zAbkSqY)v^7Btdn zP11@PvQ)_I10j{0OyK*{exyU~Y3th6E;bC@z#(W|iaTB{@tZ?UJ`s(BsU|%UlPgy) z2)n=sd>{@_fdLP2E2f?o&#^E$pGn4-FcMCEcQSG|Go7A0*)=5e8V@4!`@7a!6P2Di z2Q1S0h7yoBrs@g*bOHXh5APF9WqAc0MqZ1~NUtL$L@Q~-M;Px^Tgl-pmo_b6OvvW(+jDI5>51ePx9dVKN=Plnu|IZ~3_ug)s{&DUwPclit z2c}Kt9(2%%5BTkjS2k5~)M4{MK3>s5a!CZpqTg8n^mPSaZhd~^q}`b(ee)5&$v**q z_XW2-Av6=K`~U-vGBXh#=XN3=M5~|KzT*1TvDjB`785}L>h3ZY&@AV`2L$i{FoxLy z3UPS{5Ed-|)cRN9-$8^01z9Lp!J)%)f&?{uXb@pVhY2H2tXNABMvxdUCj1w%LOEQ4 zPD1r`feXHF~{j(}P+o z?ED`8^ZWFhK|A#v`#j{&qluNzr(fUx+gb$w9lq$VpFe-l8R#RB0AuMs(kjsJqQ&US zZ6Sg5n~Oj`EK@8HTkgPM4-5oaY^Jmn-Q{!7$lL8TQ2jX zf(>m6#DWM8<13~R4;qmo&>#~?!JY&uh{cN343(@2VFOK2@W8St#Svl#hSn(mk-HLB zS7fVmQdyjOZB|R0uwu*Rf=ITgt7~myPE2g`)_ilAT8-Nwe)vIq-B{ba2x`Y5EYwWC z{;`(;uO*75pNJwL0N}ZZ%gaa5AtUg>kWdEdA(&=hHvbL!m; zIq;rK9?vB*eICxhpg{KlO9>f_(s^N{q>r$Oe~JP1PA9up|) z%vt7L4myd4{6GLepsqcmVOnwi_YmNP!yEh%81xLY8lWxZU>H%FS{UT114+afPXGd+ zl^MN>!$^7HG+gS{L~W3^8IBpsdG-C?w+lw|wWCK;VKR zCR|#66hokmFa~?EfsjBRz=zxHf)9RB-~uhwKmwsHRtT}*V%EhG{Wz>A{3%Qa*TTD6 zOz%~KPy`K&c*nRDNr>aW5(rC0toO7XF+FT zE2Ml=edyudB%A4!s6aDuhE(R`pmojefW<6YQD~Q@@;;0jRHVttROyD+kaDynot9xy zasEY6SH=dE2dk$l-?c8fD5hol0FAy@R?ktv&2+=f*UIh!fN?O3x z;O0gi^5-_eV;K+qvX^BgtV{IxO_GUaJYD)(l%n*^>pUr%y;5dYYw|alY!obw?C4qe zR#?y$7J!6d3T{B6x}d&wY_`c%JY%D_hs=|fFpA9SI4K$bzt&_fJ_15pLtEIxG7~<# zn$F*l0u{#+D@=ronPo|AbL}*zvV6-!REz5wUwNyT#C|;NLSlZ zq?IM13%U|=s)n4#BV%$)rZU-GvZBeM?Q>xZk0LDeLB&_nVeV0=;#~4-MXI%$jvcc% z6oiCzyF{7F;c|;#49BmnX@PHnbV**`@GgvTeQn!XNu&1?&``Dkl!6vC;g=vTjv3~z ztsp!^UrDq}i(}c!42R&q4mY}N0kUDK0u~8}G$g_z&xL9HWW0TsuHzjqrP#wXorbG8 zV%s0tYReRKmV|$Uij0Z&TN5-v8NWe1-b^k>rEQV_1u6ipPMzy~xFU<@kl}L+L%9-G zZNa!A|NX3*4NW~3m-fT0ETm0A<4-Z}v~5a@5w7MU<}QAOwV}bKpexeVLx&cj-1CW# zI|>(sC^%-vnQnS6N+nM#@>s_Vb*+Ww8$~nEjj8=2?@;tE8F3ldaur#QhF83W05CPQ z{B^8tjbU5SHM4%}4kQtj>dFQUxk^xg>f;3=x4y zt$`eaFI+KY8r6tf-ySuRr+;M>S}R9};5#y^r`2tmd_})Z5pHl#y>E!e#W(a6?}z_t z)BThgGOn%brV98cHFj9bFYe8Zbh@^Phq}4{r>3YPAKvJ&EI5!@EoO0#iP)(K_~j|* zxmq~Mrz&3@>ht88 zCWZ?WLbML!0fXtxRcHHKrk6UZHxJ|IObwxMWn0@Q{i%1A>u4JoHFd=jGWTt}sDbF@ zZE5K%%Y4gHqH5Kdqm0$i+M}~@0z{7PyzrY3$=$*`*$~@kfRTw%s}kc=cIA7gA{V3R z=PQUn=;KkSAu;D^T%@&wo5Cbdr2ToMX$RJd4a zPj2inV?O%*&%ueXI4kR(@{+BAs5HeW!0q$BRgw|+QY|cVh**dPO+gLc`oH`-3nv>e z7Xqo|ay<3`63djHDv4%$w>Y_=y*szj z5k006mN+!V z8@8gY2fDI(vy^$ttH7I>fe5{e@f7HhrH$)C?z)`b<3Qd^uAzuO^^h#&z_#%~KI5Q5 z%dkST>o->$FwLXG?s5v_z!rx8Lz^~Kx17SDd}6OZslDoRz@ep|-JVqQT zTXDa!GbW+{FjlNHADgO2q%81}!nOdD_0XMD%!xUx9A_)V(4wzJ-y$3+!Q!*Eo>@!k{nW2X8=|SkQ-aQIHpTj4+z17kRIf7?I)8lnaDM zr1G1_%8pNyii!}HE+H^H6Fav!#Q&-f;)0SEG8wKzGs)t`EObc1VmN)uKN(~|3z@i& zi!FU@oA{~)`*}2_;5&N%8kg+Akx&c-9)XO8M9HI4zYoL+$?1w?2FpTu zM6>HzN}{Bs=WxT9gT&EzJF_|ry6DF0D+l{AjKml-Eu%)KfR%R{o*8E zy`zAZk!+VNxuN?Ik45q}$g`35c#j)$604NUJ9-#LoG7=-len?LO(8keLKr~^2MX-8 z(YXy0tHy*}$bs+&;V4GLG(2lEOzmJKifl8z%(nUn50}fV=GY-avbKXf$b!JVc^u8( zQp}4Ghg&$wn0l(ua}Aa-kps-ZO=QS5Dh_FM#2rj2&x|9YlQP{TD+`H^raBTPA;iNJ zjpCw3u!Eehx)m$`R3m-Dl2tpp>P*S=?1@|Oj0j*%NrRiyikppNqv|8QN$fI?=@)$z zzge;Xp!}svfywo>&VtyL4va`{GBDLF$Kk8L){MDZ`4XP+mCLBe|AfDYnvDZ3EJw5# z3=jm)kiNe#ckrO@u7#W}+0DVpOn zo*EnwIYAL~s=W;@IZa6-V~7v~&C(x*vk7Oe2*K0QadH0!YCy^0ECI~2Y%=Wap(|I6wb8bAdWc_gb~UUGrx2(h=y3h z+KiZ;q*L=V(MdHZCk#RDp;G=@wfBi3W|_rn0|dbfjd%1YwAhwosW6oZzD7!rSuH5C zc+W(cue=Z)K6n6GO(jfnj7nt5RiQly350&wNmR+55$Pn`M9BPb5OtwFV`$c~x*T0i zO>1HwDv^$|>&}F5N{xIYaY&#Y6{t$Q!$%^E?Od#uF|w-gQ+M^2f8YZw$_O~oRB#}G z+ZjrS8B;t79QMFQL5PKOann@&F8OfPhIv(qP^^U&ssEx1m^W zJfqVv$O4fRyO1}c;{n z%+Xq&IQmpj0;R+dj*K}CAI#s-BAi=vv(y~Y^5qWiV3U8T0NK?C_HAE&K*QG9!1^^A zlwB&!B3uck6@1B;8G#}|z<`H!nPgL2^+d5~EW+pWT1mMG(BKh*%-$9*D8} z141mZRq#xXPOaMtzy%SGr7K#MLM7n)9hCx3ManH=&Uu|tN|##T3_ciRoS=v>q!)?_ zI+?)9z_5&kzy$$;V9^C*Q`8o*8!)XqS1fTtlwFf3j(|X@g*wp(b+Ujf?u|(Q(c5|uioIHWvbE{=&>@P-2%s%-`7I_x*JQ{wCYIIc*J#qS`4 zC$IoN=!bAs!GDaHHu1>h46z4QMU(JTB2?ReXwO#buwQLMo{bLtQM&qT#+{ZWVa~B^Q!B)yrGvf z%CL3|#L2mEDQm=Ch_qJCTDXH&$Ozlr+mmGHSoCYM8W(2f;lY@YiI8T?E~u2DO`Hg# zqiqujokm1C2rC+6`4EJ~M#EEj(Dr%an*ip|Myk0$hrGUH&HIhpCTPQEi$|JdsLTqN zP#4O;L=3R#ix3^fcy0E&-C+$z+aB!SCT<~Fh^^@evTaP4*xl>@HZ=G+vR+ieVcp0> zQZj+4XiQubuqaFRdVpPEnk<&>6K#<x@WgQ( zZuV!P=A8&=1<_H8IthdakQ2|9+}t|5!eBxDbF~u}Dbn#E{T}k&e7Pq*@+&zc+Wxq- zmho3`2>)=#3IJ-*cz}lJ4NP_9Y(Ye{eR7c4XdFiJC1vuHm@y;wa5K*^WeMzE;p!1= zmVM2XD1Vm-7^OOK#+Q5mEw_jw$6c-38QFMq7RT;37i%{EZ#?D&@k9@&C2vRSK=jEv zBom*BOj&N;c>r+$>Se}&Ki{7l7W6G-pZ;62Kz;O);z~nGbyYVs(q;?FJM=5T(CuN1 zOs@>22G$(D9nu(ekx?P{fC@tID9w8HA5}6S6?0@Stl>KFzVSo(wQlYR#GK}C3&5f- z<8^5R1O$TYt=$a@TjlP_uJxgGY;E?KIQB{+b9EQ*>w>X$yw|V}k7i+Zm{^Di&~~H_ zHreol5zUBQIICXcHz8`FCs8cIHT008b&zIvN*eHYr?IK}CMCpB#>?;Gy7aZscU{i7 zK~+zHr^YUyE9+Wu!_tcKREw3*U^Jh2HUT1-4>gAW*NGb%ilSigt57C<2W3Up<@2H! zKer$Z@Z5?-`7I9eo0#h}VL^%f;+Y2@US0Hzih4DZd35il6=%7SNo?eB2-16o=ANESdg$+u(-KO(*@Dx}e{DJLa%}z^#e1 zReA&$imy%zgc>l|&wcAJTu?K1tt)nn>eZV675)Z7XE^Ezhxko+GX}Hpg5F|tARla* zA8Rm=7F9&|>#u)!)O#q^dPF*s);(Wp z1K)YE0MaVZcVv%lJ^9ZhN`^CQ&U9IJG2_OLA0M=mM>$@ta)=!zf>iwM9gwv zRj?JXPHilx0-v~kK2vP__Fr5JfYeI=eg|alQqVo0VSV=xcejChl=^k2a^Mx7RrQNCYvsicFzB@qLE=~15BN~s6+sYRoE74RWD1=-R+wap zQ&tjJ*b!Kub^9HpoPi56*r0c0un(D;m4Oz7Ir0+Ut~=s+>3Sv_@IwJ0vV)`LlU_ZaGii>Ury4Echh4QJz$Fk zcWBfhiFKWbfgf)DBVvc7#i&$GIw^?|jb)Zuq?&88*`}LsHYe7EIfWINOJk+^)IS#8 z5sL-hc=KA9Bi;s^h!2qFhLIx4sj*B?;5BTyz|nF-C;3- zI?|Cgd6jQbyQGROKLhpCZMPHE0!Re4u7#^rBDx3}n$+r~V7(VFHzvj#b9~uhpR(KT zq%DCgBT^<_TR}fw`C~AkE7JxDZ)?#vs35sM){s`p`P;G2O7-0ChCdTs^o%u474%(4 z8Ykg)N{NF(1$P7zi$6I3`NK~?{OA+QY7>!2ZhNwd_;q9MdL`C?Mzh_N(rv>XH&NO> zy%S=>PWtvvw}43E9Wa+xLAQY{OqF`Libn=T5j*DYUBZr4x7?FcUb*F$F|BuUgsF?0 z$wNIbs|6LTC$3lh^dlm-19u3b7pEy6vSylJUc2qL;~px~;zAhtX)n<}5?eBd44>}b9(rw@aeP0YGWPB95>7QpN z-SzX!JHP#G6K5+>#C7kz1&AVSYWnCGXlfCQZk>cJ?pqgg`~tuG{Y-WcoM6#}^_TNq z#U}j2;85m5m5SK^1vD+whc`O3Cl8sef0*%2@F;jG30lmBGMv{;lBPn0h(=PeYvDn9 zwKZIbO?v`qSu7$zu01^f5a_z$V)9p`9X)V{$daHGt4PC1wTFrkQp6KtC%K`-1s&b$ z$3t+&5Pw_^jkPf$io(S}%Q#UWakL^G>u5&rHKEOCW<`~L?BjmwOQ?m zYk;#rEWF_lumQr29L&rd?;87c1$UlWnc zX30Zc(lLhr*xaVQL{&;b-iMI-V8?$%l(%CJMVSKm64z8#rs35pmSXcHH)rUmdg4<^ z5#k<#coh;{#DX21Fh&K6nH3xQGAI=p2*6?iE6))GT(u(GJZYIvqakx3xV$JwAJimz zMhuk3u)q_LBoX+nWgk!IcjmhU5oAi|*!ZT%yXvbrfwn{uX}0`)RF4J%ox6BwrYMo|^8 zDM2U!gw=!uokEeBH}ugDaeydSX}W_qVquYgENrV;?CMfB`bol;HL;3qo2bxg5HGQH zAP{5!1SdWcSFpD08M_IGNbq_S(1>F`)Ofja#;O)h$;mV z4u=Y^0?h!WP7gB)BMyXbsw~qkRztkMJ-ee``YJ1w`ks^C(Z`aEr525AH{fr z7m$Qmf#@(f$Tck=7C0w?bgPX2c#S_j$K0o2GP?4e@5ZJ}&w|vVu7Wd|Gq7Sz%pN8= zBJsnBsKSQ`K#?#0fW?SLw_bAn^}F=Flr|@9;l{PZbSkL@KpENBVO*lS4#ccU1UwJ| z*Ge1&K=4!rf>&DI>`EdjhlKU?-U|a}Z8+ZXV$@c$l#B?j7T{Jay7*1hZY4oDD27Df)rDZ;2 zI@3;xP-@t<;-#i4#j?5cn z$nZ4$y5+G4EX{L1_12ps6?s`1(ZdUuy=}JYYhTZkS8OXNkvOk0CVSBTINtWc&t$r3 z*OKB8EsxN%PwReX`_s$(OZv~yHbyu58y4!}-vH7J-H{%hWLWZLk4pW7Y7yWA zPS5Yb(4C3jfY=`dV&L@zo&df}0;*dEf*>~`-Naqa$x&YjqM-f!O8;5T$D9{WonQ*e zV4EBU@|_p>)|`BO+oXLPhZ{+dr_MAzmJH z1t2C$Vkee`17;Lhv>R)+4HfnjA$npeE(j$OR(R0f4t7kseV{7RqMNW{PAr$n2x6O{ zVlDdO$g$u^+(LaYge?BzF_xP#3Zp<^KwZGpVCWb!N@J}_53s$4ACyB|luDZH;xu}r zV(o&|+y)Zyfn43;H=^ULi5mt;O*x>>o=4d77}Dc{*;sL!s_rrA21UC2?YXuRKPIq2kc2?NCMTs zRRAt{LqGTf0m#gV#FdIc2W3Fy3UQ)H(j=R>#eIYXlktW`3}Zo@6-dI-VAbSMa!i%g zTREs51^~jqz33X3!lQ?oc-pHNs+J1P0PrC03#( zgOFr7&{)T{+5-_s{zTnO`UKKw2&trHUeb$-nTT6DB|>-x=mCpVCSXkJM=Y7HurR7OV*yxTHX_6itrTr+Tz^Ib)Vb@WPbg(7wY$lW%&#lTHW3xJbdtW0+p2=OJnTgFHlfbcmOn4+nOpma1v(F$7Eaj-6h| zn}S4v2}I3^rXkK>_mJtGHe7(n1e?~2pRPvPz(szf>5OQF4GI`B@+qQYlyLdx95}9;$PYi!zA_`f%i+28Cg*V0j4Zu=1FW4uy7HDV*@iMYzaAEQ+M6X0$qu z908IpeyX)x3mIQ0y9H)@qyrDxluSGd`nH zBxtqnE38tV!d{1>EL5|ik9z?uwq>9H!7i-&?Hr5hYjp?~y@G3?*4d=eU%TmF#yX$$ z?A$2Ajjig1l{m@MU>1!!EXwW}jdq50VvM3NE5UN4%%-c&a^8j7Pq(hsw=#~no|e#h zV#mITyap<{&}*EQrqePl)79*y{GB?Dh@)XFZ(c1EVv^%oZF5L$#E#Cy-fPf?t>8iE zZvfe%ihPb!cx_;ZQgR8-t6th@N9vFLxF_}m>O=5 z(jE`VZJB<}Q~8Nr2rA@8E)cToPgL%7KyBc9X-IKy=l)tSJVvJGw3f*H*&@~WTC-I(w$ zi1kWu(+wt2WNr5Dt0`iy_ogU<;H>zXZ%;tVZhGBzp0D{*FLNbf>$Y!-zAbkM;r!xn z{_5}k@^An8Z@rN0|Mst3dY5X-uK+8lTprrjBC!7&23|HW|MJSg@hat%Zv-zPe&C10 zXkYj?2hMsh274K;$m_ClaE)~Dkc6-Z?-G$vGf+ztVHVZJ~8rsD~zob6+_=ERx#%V9jM80 z^jc2&u5cIYQrSFjvGFkf#;7kTnz8WRZW*hI9KZ2TbzKS<2^p_(9ebI7R_x^HF(2-yYo9ABPx0`JgXiqT{9@tb41#6-4$p)Pv$)Pb3hC9KofL98}vaVbV4iiLNjzj zr(zpBv_yjNGDmb-`n7Q|S2X@%bVhS@M|<=~gS2$@ZAhmh%>pz@$>p2pmYTpuvL(6DnNDu%W|;5F<*QNU@?oaTYUb+{m$` z$B!UGiX2I@q{)*gQ>t9avZc$4;%@OFF!QC&n>cgo+{v@2&!0ep3iTOtCLcZ+D-gIf zw5ijlP@_tnO0}w0s^W~^N=lFbfvaG{iXBU~tl6_@IUZF=lWW?yaO29IOSi7wg|`T} zEyzda-M@eX3m#0kFvz@23I1FBk52`ph9gU!Ou4dUtKz&?V9fbn1#wv~iylq7wCU3n zL*HUdFy?B=sAJ2XO}lno$6qgIrRn%zXxqSp3m;Bgr{~5pc_(cwj(PFt(4$MA9&zU0 z%~^drrp~>)_wTX$I{z=Y80mQM=+moTKR2^>gNw8OW>3Gq{rgRkJMZk-JZ$^`1}NZw zBzZ=FXPa@BpnTgA=-`78Mwn1}%{4gCVvkjLorD~A=%IS?@$nvc|Hw4rhbX3~VssRq zClH2uxwzttG}cI3h$miHqKNa^=;Mz-3fAI(FB;gKUO^_QE)L=;V9)mMQKJPm}sV{rbk$UmRw(;9eC!Nbk=F-LO5#0Re2TMlGvSq z1{&yMNLpBAW_}8)=%Q*aCexEeKE~*zlv+vXnEWYaXr-KXI-{dAWt!loo|bAVhlD0r zP*Q!J>guZx^8c9>Xh$6d8iKIqs%w3|EmbR0%VD_du*7}`Drg4@TkNvT7N;OW$~G(Q zw6n2BqP5g!tLTttntPickJ=UAcrjS$Rw9+ z^2sQttn$h%x9sxEFvl$O%rw_*^UXNttn<=AM)74~CertP*_yJ3XXoNw2S*>+hO zW%u4;iT}73t$YVw)oV(5_NU;8J4N_`i6-uNQ<)uA;^B{1zEk9%_0_oLoadC?fthtK zdQQk4SmI|Kz4OdFqqpu*uh==pHAK&JLIxSEyRQ3$jcK;kM$eGZ$uqkbpKN^1eOu8x zxf}0%kr-mDk@C;$-YxYJbyofJqGqK1_ReEpk@(=3Z~pn{r?39{R=p!U`|vO1ekbwQ zZ~y%bNsoX0`tQ#^`Tjef?V1-r_6@Lr2uz^)0I0z5F|dKf>mLLsNWltPP;9Cr;02v` zzztUK8N~CT2uG+j36fBBwbS4UN7um>x{i4++}#6X7&sHs&~Y}rAqqpN!_M8VhdTr! z=l^mjM7t5Oh;OqU5-ZojB|7nmCp#h(omP%=)S?^;k>V7SRz*Qrz>9*wQ@XbJv~qwE zixv1H8p)V7Gpg}IW;7!ka}^Wn)Z&fM`-d6-A&523u~d0kTXZZ?i#%coi+Z%cA4`R( z$F%A+UG$v$*IsWhi;PT;urf^$uKIU7MnB(Dj8#eNqz)|oLo_6 za^scUOh%M~AmxQDnUFoU(jZtY{ zpmQK8RLD7rw-^wn6GgzH$ry9u#$U2zlb93;BEd-zR93)?4oy%Z5t+~96-1m0VclX> zXU>DJNH3eai5dGyOn$0Fk=v}|Km_3#msX&Z@zkWZw7Hl{dbA+NgpYy-YSItY1amp@ zqA*dZ&4+4|n3l987-{;CjBeALm9!Ryra4TPev5nxDrW`WvCxuau&A-g3`R->O_$tJ zpV`EwMNd-2LPiy%U;N@``j}0x>U5Z?ERq}(WL1P1G=JYqNfXg}Bc}L=aw=JAJhAo7 zz=i~)noJ`YE3k{0Jir3Y*`rU%lU2e_@|zqTY(nPe5D-T6B!^|JX&mJ%5&u23rIS^b z94kALT%u7NyYK@B`0xPU+yW4S@B?c3c-puosHr*{Eowu$I*0tQB{P&Qx*nC?j99Uu zR8?LWds^6zc#*9~c>rh#X+?}ipgF|cCyCO?QHmxgvpuz`j>>wF*ses2+BHn~NCH(t zV)UApG>r!}4LFL?{C2Lkvb9}X~KLQIiL+BJ-YbYm*t zy3yyBcO$ik00AQKMPfd`=0wVh6W-4&SVn$%gR4r_Ng$&dEa<#7vF((;1oa9MiH2;owdV%AYBt z*&}`QqUo%7QKDWcdnR9pU%R(eBeA2`1W!H^GMj0zRp5G+npURQSE`h2*m zUYtEbVqnvNsA5g`$ww^fb-5R{=Z~*Cay3@9~JZzg!7QjR&k3r!I|Rshhu zV7F)DdDC)W84&YMb*ooRX_(%Tm*HLv$&sF`oYL9q_gy;FVNT^}SAC_T*1BkEDhLAX z;sF~cv`wMi*Uj<~9r3j{+hK+4fxt6C4_{>(1^ZV!Q(YqY6g38dRpC|&V)Hi_GK$wO z*lCZwkhKT|MlFlX;H`P_bXp}|g+*R26VlGvY^8JW%X*0L(UcJY^ow+Ao8TVcna#3+r4h44y z^TrtA$2->4YT-};`oIZK;A9gqU>^8^WPoJT;~8^hY58|w8l_YP!Aq6&Mfrw#tc@^)oS z*b>Ud2g9X{MroDAp-kH75Rs%$dgXiF7+sr@f63QylQ<#prg5K94h`u=H0B2n;0Isv zlqFGqruG0s7AbV;9ZEEdM#)VYg)5MGJUe++a~5t-H6sE#k0}H{}wB6A0n32N?008zY@G z^ODIRkdQ75j^yB5cKqO&%$H&MTw@# zCY3~EQB`9)$DdBxS32i0a{ro0(o?59Q#%V0fBM67iWe2R=UeXxp4i2y7~^s{s83ON zr8Q$a{{&2zbg9F~b?@mD5J{#SL4+1lN}UOzmMKv_Ix|>ljdQx3$s$Lp>J;Q+HI2GY z(-eNuf{;4%53B_(trV>Jq=YL}tW=7t4M?XN8Z-VxJQnGlG{t4Y>10jXKtRbDl9h&< zcCGH#mUikHnMoc>HzB)5sy!i7S_7&|n12z)m7Wn*Lc=@@#dF~5tnrkq47GY#)2Oz$ zanX}5+T>FvHZ;`3V>H*VUMdtTrH%%>G`wW4Q6@dnaz?usG+0?(W|^VDl$03Ssw$Cv zsHCiAu~`1XRG8>n8vk`c?Os4UV{4guPY_d7G6o2PnObT`!*Q)qiP;ia{v zVGSsyCILg{B#R&{D2GB3%9iUGe2~yI;n~Zd?9D&i7w$& zKI?p5_l}z5lVO*JnWN- z=DMi^Lg&P+Wc9fTJd3law{1Hx@q4X-K!=iqs7NC$S?R+PHL#icbLr})B`CsL62oSk z5fj=8Gso|1dvKLCHhCf4G-NBaao>o#Acl}VmPzLO#RvgIKeRg9^F5C2 zFxZlKo_D!jv&V1*Q@-V;f@@*YioQi`#^pPTqFfLq#ll|&#s0F&2zR+R%rtFG9&vVH zn51ffjLUYW%bpCg65)DQE6fQaJ@y-oP^QW+qz!nF&DsI!aem(0Q0weaxI9{A-|mhmMoKE@PLP=@|x8Y8pVd@;hq$Oz&-MWRewx})bTM{K%;wGa#5 zaA043fQ2PT3mp`BQxhIzqZ*Xf6=TX$t&drmkE-O#o553~7gbo)P`JR!xX9ONB&TA! z6JgUIW{okGe6u)%#J7sr1(8bui(g}0bg1-;H?hl)J$fmbdqI&G$-{tWb1|&3#{#`F z%?ePi=hL?+K`C|+-p7gr!PEszIvq`g%HqsF;aIk30mP{>9SFNAlfz@hI&OWd5C5UM z+mvgjM0z%R)US<@3PyBo+7om00PgKYo2N1DTa(Z`GR*VS%=$hVG*Y3h5ScAeJ8Dzo zm1{D!(IDp00Hp{CClvnx2sANoyD(g5jXO2-LpVBC|HR+ey%2FEvgB1vfe>B$M3STv zjH;t@Kw#J#VMdfWY#8tWpDj=n4KNm|;tR8_7$R1BY|U&P5gs15I~(FmW~}QSCix^s z#sCB<$H-3s2;o2guW$#bmOUHpFU@M>3xgrbG=Mu@PS#eX!jvp!rbspRv99XXccchC z9u&r43`+3;vG5IP6bSu5Dh$Eo|6)+Py-XBb%48LvYdx6m_nDh?oCaJ~&i_r)B2h^J z=LC-74Xw}$7QheeDG|4G%=`-S&!VnV?+14n`mMr>=`0~~i3EWw83+oQmNiBB<0ik$gDi> z@VFXu-C0xI3mvw`b-hyoqIRV+;YQW1ED}v8^4=2L3O(8?sl)T<7ypm!4pH1!Wrt37 zpR{wMl@Y=TVw`=85MBWY8ixUg6Wn&P@+xts2^{eU^XV}^R``zb6(y2Cn_wWXjzrxM z`=_NTFA%Ui7m&MO{)-T$MvR>G>;fS?-CiYB-&!Wk7%h*kD^rgt9Xvk0<^)z@zUoHc z8dxf}qAjGtt1~>4o6}9AMlBVO4DorbDJftd>U84tAJz0U|1tFlL+YFNZ(mUkm9ZIo z9J~g<2tE++;00bV)D54%z5NzpOtTQd6b()Qwh#yrkcBQ^CzStDao_n4lldAH^(euW zgQa}Chelj!5Q+c<7mKs=RTq_&S)A+#1@{UM;G8M-E-=LO1pnpx3nQu9O&(Rf=<-W) zq3Od-$aOY_MU;?x1}+y`7M8D50R`6zK6uMDY0~DR>HOXH08v5UK!ODg9z>W>;X;ND z9X^B@QQ}036)j$T`0Sm)PMt7Xv`|Y>1wrL1STI?xoQ04ES6U!hp;iTx2L){uH1gw4 zo<0ASt5xnF#D5qBh1+xITLl6KJCWJgQ|eTzRjpoK_^#u|e_gSD1shiESh5t!cx?Di zZPaII%L*(v66Mg9DMtztIg_Tyk~8PVEI8Bx5STFE9;VoJC{YX#ObU8H;K4z)h%H~n zT=ktsgk9gAt)MvbXws!kgMIc4;Zu%e1ac-DS0>(&EB|W^rEBtEEen7!5Fj89x5$+` z3I2nr=$VC*r%x{?T8qFf47GMOHTc=BbMDu39(W*v<_uEoGcvC1ut_Z;tnw)m-8)OPEypAiMUP%22)@_^!LL4o@Zyin zoQ9eV!wUE~AlnDqjfOxRX(S8a_MjeNG(f><97bQ)xf57`vt7pVyv{Fk)>*PhY z&>XAHgmStKu04f9i8l)c@}sF;D%d3gmZmJ|zGsYK)6PqKvXx3o3#x1&*M9x-R$;+% zbi84YMfR``)pSmfpV-`MpazS)Gf06R5F`Q&Fc^roBaxHoomZ3nh&PlJav>vJtw za@Fg#G}XS`^<8`K{iq|%ay`f-n1qVaRKo-kFpxkjh`ZvrI_h^>OHcp7foP^cq$Bq=gHwh z8!|jU{}|V?oW}>k0H#{rKgiq)o{$)M3-}470(8KCsMvw5+}>B}MV3d6$WhS?U;?=l zxbPk6WjXsuZX8lR;vA|1Hj6-o`~ZlA0pIoSw=N4f=qWFnx$tt~Xi(-9C9VF6mDux$>COX8&Rs7cwd zA|jJlm|kZ^E+S?#RT-FG9=Nr^@vmD>To6fgh)VlIE z%uSJoT~x@&N^_;P0Md|#)FF~Qm5?8n4~|Bp$+-klI*l<&KO|w!11Hv%1|jf}VL4Ny zhT;*>eA1L;(c75#MLF`ZX+O{bA%<-88jonCD=+CI=9pwgaY$@S3;(V)RG!u znq@@#3XvB7m`-|f$5st$=DGZX#){w!B94ra8r`PJhPbaVDYF1BD!`AHN#!Yh8=mS; zNj<0RrC9vjheDuavp78=XTNCo%iX~0 zeQx??w_tWPd&SEF2ZLr}y7IY9p$;k%ffVb6TAroO?NJq|Vp{0xRHpVdD;k>$Njayp z#mo{e%sK2ukeIWPjISX-1Qq{ujOh@$T^#6<2}ig`X~@UIjqM*9?_sber)~ zT_5W|gq)7Fel;c2O4$(A2u><5%jywx7LtMOkdgnY<}@d=8Es$JR@4u4)?Y1Qp-@oN))25>$s#iT~TLbEMZWMh(P;W0;nO6%M( zS(5KE1T7CPh{9r_k8m*MSm43ya6;m+sjw3*)Jxj-^cAlwb)>@#v67}XlGGb7n2T66 zjZW@T7qRy1!p8aRV{88>6#znXf&}S9D_)UbVG&p$=*;6(Fd7xr{tcaws`19QYuy!s znG8d*WsiN_!i;(cIxL=>aRQYfHJ6iWuptUTpwx#~3=75XSrC?-ToEsEB{g#daps0< zCD)Bvt!_O_q{Uq4HCK9i4ee}#2y)V{9#P1rIZIw8vX}_uZ^XF3fNxVYvPq7I(Rd!N zXy586Alnc+9ceVB6N2CZx@IG}wsf#b<5C_1h9oIX9C1&BG5S=5joGSz!B}FcIi?t_ zYo#u%xtmID+E6RyL9tqNoe(Q+jFfmKnXuJu8u#+jHVn$5m@yJF;+?8ecZOP*YOHOC zh=#MRNwm~TSLpw7Gje3+5tFQ)AVMT;WVT>e3ebz$AC|AJ`$7d#N&TQC|cy3(7ExT(dFmesSNN zQf7ucUb2F}>wpZmh{$R?Cl!I{*xP>36Os3j? zmpm99QFb4?8oZ*(GcFG*M9J=K6rWo)%{K7W1#cO}xiOMBY|PrPQkm_<*dh+Mz-4{3 zsCGSmt*OJ(Yv@i$gS0bqbIiI}v;(QiOzKKq9(?L0Y_-SmMt6?A4Q={$MN zsLE7dp*{b+OB`9cK_V3xZodZMKbxcDtwO!<>{T(R-@EFi@4^S#N#~NYw^bZ`G?Pb! zjZ5BA#Q~ker*BcB7$tG@~?i-|inn>&cb(2K8=3cULcc;OI@shg$g zjF6JG^TQX^vxtn+5Tr3adig&Kq(OwpmAkl|ZaN7JAQC@Nm|WU9@uG<0n+Rio5USa$ z5LCWh`L&aq!T;fmMiVOfV7$Pq!7CJ?*kKt8TM)lMhk^ivhl2>KTZmfXh@I%Z_gj{Y z%8dV*ql`9VDjDi881s_3Q$8%j!`qTG+BmjiBM5(B7_iuikoF>xwXlXod>79Q2q!1H8QrygSX) zh%}U*E+LTv`j{uw#9VY6X{ooKP!NA|2qHv?_n3;sYLxC6v#uCH(b5=6Jf_MhB^yhy zRn*05j58h4iJa0o8zdIMDjACihm2dGG5Hm==#V?KqL1knbYekkbjOHz5O|xNnNY5X z5JrP=onS#nh5$v1Lnm1jB^hx<6d9R)M2)6tD|O@wYJA6r48@u-#_+*~{AnH*5kUWZ z%o2kfsIxj4=lC1pV z3Vn!$ehfRbkT)%X8n!se=cqNyaKRX1t(IiUou~;_qX^#EpNWVR@h~Q0JUxn#4Rl~A zW5bMd1jy&0L)#0>(pbu5;l~i92)&1*8gxS{;Ovay?9R5b&eITw z%sLJ@%&77x%jd*N+H13R8L;ES&iZVY(kc;$pidPM5wI*z^E{AksgMN;mdrfE_M{3d zYRAXR3K1zJ1hvlyt%{6csqfTGyaLa>$&$9RlpvCllL(HI5hS|VisvB7xC$c8x{A>1 zyp#)%1(nbkC7s?o9Lh+o;|z;xo238r4g$CkBDsYDO*k$KB{yV`UV)SYIS}b$(fHtu zd6Cg5^~e~}iH=l=tSqk&`7^#e7H#pDn^Vz4NhPEaLeog4t{{L4Ark)~Wkt8BB?FR8 zDV5V8BOZp@IucAJV${;tFw9{Q1U`5G48Vnc=mR#rvhCPUFDj*9>yUrAgkfzT`qP_74P1zV^{D*2qhD^XwlyGA33TTlWe zm<~$qINq$fKK<5#y$a70L*~qk3`G$c0iG`ehYxkd&!NI$UD5wGt3{3>05X^a-$^Eu z#IucQMS>OCWI9(50ZDTC2S6POJ~-6~VAYb5Sl**JX8as>GzbH6)HG!>0t^j_;?a=> zTFP)da-9$FsMJyQ4sjp=!!QXCgps*3&2GH1q5C^s;R7OR3S12lZTm9-O1Pg5+OT~H zqE(NKw3n?&*~ws>A4C@s0m{MTG7^!ed{f#q4O4p*tw=dEjR-f5LE50jRN5@Qh@{G% z!;Gjn*<|U=x;2n~og24Rl&dJEYOAZR5RQqcIN10Ho|+e5J4>}z|plGF>RWZ%$qbR zS?;krvI{NHP*`+nH7X)rIYJ8(=})7hu8{Rzp5Ua$GO1gtP?k*1g2m8|K-Ah5J?^Z# z>NQ%>3q5D6xY#v|g+$ug5*oMi+b;>e?xl+F#iIv-sg_(#ZS~B`qe;@~&Xd{FR&$~H zN>te;CHl>w($kmn=o`B7-xck&YC;Z_V3PrMNbVgqj!}%-;sT2!k?6$;vRvNuXp1}B zsPY{yz4@^A++2&JV6$Kr*4Q#>{M)eIqqGr>_d%>VAz1u9j*Up+*WiNaeANrZiuVX& z=aAXG6q@&(NujDb*=-lT1*J5(-gBF@Dh67`U?u+(k)2EnO>f;P_;3qx2%H^oV9^o1 zi4niGsE;@$9NL0aV%*`~WJetB;z`01MmC(U3X1gtLd++YswLkohFB7WwR$RxuXS)ddKKH!5rD2+hoPnk8` zPpN=J^e=T$UIdC@hZyDry-+Wy*`rb94fcv`;-ptvuWpfz!VqCz{XLIRf(md3eK1sd zLc-T#TG^PRGf6XtNI>qgvqjwy-hHT~eCPi~e&s_U4gfxs@RhjEQstK$v+n65k-hiVEog4}y+)S#BD9tS%SN z;`-o>yGo-9(B@a@hj^tYyyWKDc*=4B0tdUXSz|`G@XLRzh%-B|7%t4B?%oQeu%C`4 zX@MI|+7HTXk8%?aC6QC z>sFl>3*jn=nb3*vy{G*!6*(bmznWy9dLH#e3nfV5enuv~4oh(Ih8iK< zBvzX=$G*U}{>%JjBZasPQ!y1;c1Qn0EQp1UVfGY=Nzm9ACTuc6!fzfm3t$kzhRXj@ z>;+ceDXy;`h6?A% zr4Mh3+@@`o{31b_@zH2tt5}KK2Eu;xu#XVNANR(ATud6D5l~^T{BRZ#C!g_Nn`kEB zAomywpJF$q@I(TSCm1flX&V2-7QPG~$p-dXbtyq8#|$K9FHZ~1e8z3!p|BqBZ7wH` z?eKE#GjaPsEcX7Z?C=WUtfe8}%r`whcP?-^-?R|&>9~2fv7NR&k1j|jG%X2bl@kdX z?zf)G>xXDU3lin{xLy+1^Z=*rI64=ho}k$AaJPQ+$BK`_@(`(JVa&X|?%Rz1wa30` zIm#&YOkoXJUG-t(K=H0O($eiYxATU)t6dj8tFC}LdTvr}air`%hD7Kaw!+28s1>fe zWbckOjtMTD31^=wF?!3LQrHY1TR|uWI;ybrK=Z+b?qMpg8ImK!_0L18b#r%_R$sAZ zBB`A8?t}7ixn<#m@`wM4ejpYfrhFG5N(veC;8q)(BtV{Sf>#fiy6oH*&fh&KP_;n0 zemuj71q(<-YPZ3hP^fSKM(?=Z!`1KQ1K1{gt9J@DRR?*VaH(eE@$w#z`|ug2eDaxk zATNOVgjfmXO5P{HdNccA}!^D=mvsC4ur9zpPjcujm>eF8FnABx{V=QP<2srvX( z9cAVff&B?}`3P_ASbCii}lwCbR+>p+-r{94QvA$E1KW zKHDo&X5aq`3=N-5{7$gaPX8Vcu59@-=FFN&iY;fbYuE~9Ax2~Vn^&`Ly%8c1 zAV7zzgzV91Vvnldw!BMIG3MMT-w97yoMT?Ru9q`!?)>@JfqT=QCaw5iIY9(dL+|dL z>TH2)e+Ml*vN!YJN>{5lpR&_$vTkjEuW$c8{*Kwb9;{&fdIi#Pmmh%z5~iL(dHIH5 zM@BJd+;L4YcG+F@jZ_d2{-tE$M(!E-A&4PjwHRkXL4o}B-+3B6SQm#l8hNCV|G-odSpA6>7DNs0h9s4mwUkt0cRe$tkatPN z<#7MZ6xXwrjsPms3)Y6ez@dG6a}bVol{1dr=(TEhY=YxfjQzX58Se&iylg4RvkW|n$_$^oHBbYw$PoX zn5DE5=WDh%6{O``1l(fKEd+>zk)UVQXO~q#dFe%l1}y|6N@|9?FTa`zB#51z236FE z*7E8sgIA(@69!LK01g4bc{D3?stzX{QI83-=x4i{6k)$1i`kubE2>rtf06b?Q%h*0^uR}YyY_WS zZgcdH1{-u#kW~%ltO@4iFZ~HfMXCWdwj@sz9H38cG*)g z{y6KcGdmfv9a642wzu}Q|`$t4{6gL`3X`%Ku${$ z?MME@IaT7!9MRPwlD^TSMQ>D2>J^N=Jo@P$Ex1Mu4Sd@_+MV@uL`~jl6H))aeQy#5 z5eVI&k$6_4lW>e|IN4HZ)Th7&s;qiVu?xtWH@NuyMIv44%4ed+rt5(!RQnT2x*|Z5 zf|R9#oB1HS#3vHZ=p$%xTg;7m$G{oV5JeJXOUei{quXGxG~M#gU*g23yCDUIZNV9c z)Wg1JsmEkOsv#7kh!TcmXDypS42tGwMQ#P`D@I`%l{n~}PCOwZ0}4^)MsgP~RDg|6 ze4J*^LcUTr94QGSGPz+%h$BahSYT13abSyVvy`3` z2|+ue(IV-n$$r^!i-F9{WF+U2PyVqZnfuBoT{M{v`2>5S8DvSk!J_}-=?O*=iCs6QhZj-)D8i`IbC4{^MkwJ-F<5S<=@#`u1Upvtl1Ic8%Qk@~7O`~E zNZ-2UN0Ra;8E%B3=zJ)z>SRKRtO#HMO{6H{rzFZ@VZ6u>5A*pg8s#9#`Ntk$2r$tV4QKkI`kpS`_Mg{_!6|QBe1^QY% zoAOD2MDHYmh($OYLY7k+$)Pj~mA+1Lx1EyJCq|uScGgMJ6q)}poV+0zfEd!7s6M2p z9|dF(HHaTQI)y4?Nt`}#fy9%9!<*;;&O}}0H;0BKDU2;ESMy)7h#bFF_g$Cwfq5_wyB?%?EW;wk?MVE%b; zl-Oc02*Q}IulY+pO*hLH!zEDhgh!t?CdhcY=&L&3~oRE-|9SD^3NVK9)Mz!Sre;Im{VHGeWsichn)VcIHFw zIxTN{(?mOJ4=62zMRBdV+6G#&1qrejbeNaWCISB!xS+?WQ{I{*7*-5KV%-*)yy?_R zQP7L=VyH`>1dkT@;-F1QnoLJJBdCrML;npDnGoCB2G37HE2`Jl8d}c?dW>XKh!L)u zc7gz_g}1?z(PYZIZO+OI0)ws5t!&F854c*xGg7uWb?7%D_sOFM$QthQ`QSG1POD4P zm45+<+bu2TuN(SY%(1BAc&aCn;i5=@6TB!prcHwAtrbB;00Vxg00tg#*RC`YXrf8= zRK6rILce&#dBphE123Mkp z39YDtD{8EwaiYhJtw%k{!8~hNezjH&yn8)FwlxvoV)_r z0E0d9!I82YF4Sxkc$1-7tmIjXJYSV43HnXnn#>8^pvY-N4MMyFy=2@74V4~ol-fN; zK9Qe9{E$dApGwq(PC(sv;LIHm00!*AIaI(s0Du+9gZ^d2&j>=RR6vH%h3XJs+{o9M z=nZ5T22K&z2q_>DVn#!pMOc8D!_5B)M3|r1z|991M4k}KVlZ0NtWznek=#LD-lUm# z7(>IvLMwny1=s@ss6ZCLVCTt?H(S2l522mX)gu|@1GMeMDHOejGGL;&CDgVmYgHlQIr#2`ihOxZ+B zL8u$_B!-mb#&J|qgJ8^i= zrdC~mAW$5V?MYv2SW72hKsjK*Hw@V=nBhKDg9(tqCoDF`5 z1RtD>E$jiZm?1t?z%-1&9CYKv-P?J!N~iINdIVH2$zcNZmJVt~CY@tD5@Dv83Cn2& zrlbgI(8;^~pytfobr?pt@kCJARC#2iYFJ;FBn8}*2MFc~$h`|#MFg^7!0wpgJp>;! zh(ICG1D^PlH+VxhK-ka#&j+$gN1&p8`OZ3~Bv~4vl;ER^blyQ&L=9?)7m$aSMI3QO zh4&R?njEEF)(_4gUCZ^vWC(`5s0;$3SbiAGx)_8R+QURv05OOFBv7T!&B=M4kI}`8 z7p9+n{Rs$?U{e%aS*rgU6{X$JteBiQgn0o`PP$7!-6V=k8;=B?3~H4ro<&Z^oH_QS zk(ePp+(Sgbf(T%OJp^B6K}{{h0^+>NR{jJ@lA3Q_NOAN8O3L16PNxltCW~~DVVGAF zR)8V^K{FXfjtOA=3G?;@B=zH z;S}8#nuOqP3FuZtCv|=%P%^|?h{yQt#d-Nddo|Nb8lp{3j+em5Bs!0I%3eY6qfMOz ziF~MwnBhC@<}Z#wCp={^VcdqfS--IhL(N-A^wR2>X8>9SXu?w5L(}A^aaMpo zU}w~c)x;dn%Ei-e+CR=_0!#KYm8tkIWBCIa%O;D zlea1$xPoi@FoxQt6&^Y1lMZ4Vb;9;#1;--EtbR!+n4vGI01?Ch8Q{Yi*uXl(;DiAp zi$;r!GRCEC=-^a{nr%pg49mxWD%itXH4j;kk3M*VBa(#9rlYiSB^?4No)PF!I7?m3N*aBH7dU|vobAx! z*Gh(~BjyQ&zEy_^Om44X_S0{ZY{*u8^2TtE}}f;`Ye5S&3i*Z>%Ge#&DpF6u%L-(1ZT%xm5%P-y-n2VN|4)?+(a#>@hY zv)v6D00G8srrIEqJ)8kE&;vf;gD!|b5SV}=Xzu3P(Lz`NKlo#&mgB%h>x}X#SF)~n zs&1-riO$SzM(_a!C;=Jlzyna=?5>SmR^Wj6t;a0JTC9kF9$GN%t__KxIOgR};RC=L z#66@!7C=HZ@a9cUFO-~yL!B+&_C>I%& zZ>{5DR=@+qLgMTK224T-w@$>t2hH^;wo#M{%T1X{Dy*4IGNGQCT<|Z=)Sogi;S!+v z!cor-VS#|mvY=qZJb(pU-7g056dxdgP0ow<85jZO#fmEzx5b22kqaB5A&W)X(h+R( z85*atMik9nL`a#2B^!fJQjArbTE#bf1J+f5&E#?Nq-K1$J|i}K~6%n=poe^qQM z3nJ(G?kl67HA|5D%3dQQO<%M`8gH&nIxIT=XE|$xx}F5%7${`40yP>0-+2GS7&0>z zGii+Es#JM~rAjkFwz4`AC*K+JaWO6&wlZ*t?#MLH%xc8%2yt`r@iIrW&OA@0Ee>GN zpXi*)?tHK8((|Oij4RzloLL#`y<@lvE|0)66TT5P^RH^kV0SslSVG^Pkq7IM^Dft% zFz&Pj|DsN(76zN6Q5bU|JOJOsjYsEBL7ZchWCwsGEsK<@KS$3>e+23&L|S-Eth&XP z6v3d3FLdq8l!AvYv0pAbH20z|E~7P8 zAoRGJ>2bGCa@U$@m-boqX-mJh2vSjYlu018ll%5=T5d&m3mbY@iEUnmKkBGt(}&lP z1ZSUw7oaqj825VT2WVS`5S=h9yIM>KW_m$oW%{GoQNG}<8 zwT^e#p=tFjc!V3sgkJ@&T*rm~R5k0j-(g3G`>sK_bxH?mUzh(CZdV1!fOlBRZB6;~ zPwUwOInYPWc({=AR&d%h_c(I#`1shjmHoBtq*vm&W1e7jMaY<2uB2Q{ z6Sz6fL`PFObXYlC{L5_axC>)*6|rhs2zf*#&LS7Gb?b(Io48fiIO~|PlSv*&WcsDcVvgy$a$|Vmnh4Zj;oAv%diBFR8X~H*G&ZBOail3jgO!1Yiv$k~r^xQK z%UnmV@vZ}^gj2>>*9U4>$CPL`5-Ns2yq!Og2H~1HvvdFYs!tJaKL(!f%c~P8&k)5@ zKf05vcay((oa2y!56i5-Z~Qg~yx?kp1ID-~S4i{&;!GGi#D{}5-=jj%w~p(x zcEmfH%6Oz4)1Sop9I7XrcYAXcyo7AEYcQe1e?(L>gg^L$K!4|7 zn;)MGy@_G(O>8LFgd{oxOTP={B`v&UM?KXyOFBab)@Oaj^%oUUJZq*Ei)xg`cqkX+PG97p%B+hiLA5>&wxylY@AybSq<=LbJ1 zaeij}&*m;ax{qyjFuzNqw$HbQOyFtb7$rcEsW6Zr!A=676;!y8;4^^=pZzN+k>SFG z1rb(wXK~;|iWNaJeAj8AkSAv2#N1*udg0*yMg>QJ*4tCsuM^lV$VL!U)_ zx3S{8qMf?hBsp+z1(qQH>h-&@Q(&Ea33ho%CT~llI1R(3tMP4R%a<`{*4$Y0S&~X= zJ|*}Rv`q_DN&g!QwqRAVUB8wc2-Pc9*`0-2xXKoH@5)6>4}5nKuT8rmXV-juQ6kB| z2n7l(99eHsgJhhM>9jo}OvE>fgKs$5cX{*Y(c|QtUcKm>qSeB$6&z;Tr>k#|UYpV@ z*|4zvV{fNe^wCPd18s_nrsMpguONr`dZ-ifBonTn@93gyuf7VS@FoT|dgcvf3bU{! z5OE5xCJ6a*5WyE=jPa&tCX{g|hr0i(2|lUjlPIdK<`YV_*kTe*H`ZoLsy`%cEV3X$ z2*icACaI+9AmLmTPC|qJd8{FhFd8hwi#$}Quq+ocDxyv}2}VLRbIMUUHLI-C&N^|t z(;_lx{H`MRYOx^5a(+zfG@p*lFG(f8atf-gnACH&xd;-(8-2WzRMQ9fdIwEE2cv8z zPyfs9dv5xRZ_d8LvtiANAoUJ=DtL1FBYBR3B`e=c_ZAA)+J2dcWt6o zS#9ynSKrX`3KXlP0zFhAfp`DfNKcMDMliGmOP$Vy2nml z2t~VFPhRBhzanrGH+B3}Ynp2>u7f)~=1Q$p}Ou9HdMK*B2_^VURtHQ_y$fLM!lSQHNdpU$BNn z8>$QoSQ5$BQ<{Q67XHIuWb_G8rpKRh2ug^TJ5EHrb;R=V=V>ErA_h6wC5<>|ihE2? zyauD7;7v@BjGX@wi|Xh_N!AHa-4hGJ~(3QxoW#8h5| zj-qTFLf{0o=6%p&8DioSZ-*C>bSyd`DI#W|W)SBjM1>G}Oc5KSp-8%tne1^-RQ`oS zL22?&02@eDK^UNYphlM|4)lW`VF^168dUTg zG>sBb4O#!(=+GIC6QVK+k{3TR9}C1nqwPT!v}lIaVp)%w9^0C6B-EG5$kUG&lL*IT zN=w4v1)zsCmzF{lQ+UwmF+a722a8!R>%h9hM;cLBoyCBzkOV z>}Y;8%~p~WmNU|wBh6Y?!8wgmP#l$s(DRqT{3ESXlbx>&lSLI7ucmo*ZDn@Em6TM) zuUvWAQv#K&hn59rTpfsSA!yNI^}_^FyqXagOB(l=qlX*5E#67QdZ+o5>!`V(xDzf261^fZCk1mWy3MtEC zeWd@xsP>}*xTq?>2D`c3LN<=!+|M%aR?;5&5?pd&DQC;)M7(%#cNIzxcWajs$W6q& z{Cb?2NE<&0zgLo#B$g(VI*|M3_daJ+DC$bbUN!=^e-`yIE-WA=9ntKJqEjVuNk^5b ztQc%r9I0|DLMA1aPrSN}TJcY9{D2i)xi@!MQR2AaFmRH@iJ%G8Oh0NdlQ+n zh6+@UqgT;9-C>Wv7v!wdWX~tU4}Bz-j{fLJIJAa zQ?-^rsKqrpv#DGqMZ^iu@61UTmDC`u5ohnk({-UZ;Yn!MV_FxVZ2@vo@Fq5^b=H_ExEn{orVn*^DU+5nyRj!M&5b;kJ|2}I|wxE(h@(W9OqG~JqL zT~rv3BE+SGaELsf>P-WoaLD7tt50sHQCnQ*UrW@Kq;8J1 z>T4<_;C&tzMqf^M+m7W>$}Qtn{lO2?gU+p$tCMwF>qJB*JP_~rF6|=FrSS?+GX^j@ zoGx58&id{O%0wc?0ucL}iEF~BBJgfecIzL+E)e`9zw%Cue8y7H;kW;!VsxIw-bkqy zp5daZ=||9Jp$ZM&u*vQYrMb?-F0ialzD0%51YJ_9JIDot=&4(z<4TCn#L5E!EsoQ^ zZDN)UC#Fc#WDWyeiOmcsjyiAMY^)Yy!5eVlHtvhSUZ}5-Skv$ zrUp)ohJLz)F|;rPxh6oMNuv1B`vPc4c5WdEj2}{C_fioM^#RCSPdD?RtoUh5)X2zeHDPke?pv7mBYv7WHwq7pw6pW5&X6I}V z=P(I}0MXsL54JvH)F6fl^JsP&WPhm45*?@0gy%56sx5{i`od+*$mFsP2CNqF8(WO1 z+H3=JG2lcaj8O4sf&~QE5!TKrVPFHX;D`ljapjihu-GPD2<>xh3zPP3Ej+331S=;y zBa8SXXs|H7L~TnX=#IFIv_y;wQ>Yq^2Q&PSUiOCwBZM2_#dnkvBxg<}S!cn@4Rub^ zlIRb0QlcN|3@H2o^dJf*Q_0sL<+p0XCufclyg>E-BqjgWa46L>EJBK@Y^xM6!}ppE zY_8-h!E9(Y#M%hZLYh*7Hbg6pgZYRl%1$V0um~E#iW9+djl{AGS>p>ir>N);(6~k& zSy6NXK}u9Im5@cG2xaF;NjI3UN)E#2DuN%s0*3-KEb4CHLMP{r5i+k*U)%{13rD>q z=Ukw2Hdw@Q3ga;#kvo|02j68~NNWNor^A#lG+PGMCaZJIMlDa%*32p9l*BFj!5?S~ z9b$nNTMp5jODzIsns}1OE(aN70+ZN|$Oc+#KmeV6EB0MB0@id}JRjLS6>V9s-Coa;` z81hr2@LWJ^MS!DI&$K^Lv^7+;^P)viOzubS6lUC#>;wTFM9?e=)Oiea4Vi_8f-`Ii zks+uebXX}r?n@t}X+ECIV6w_Lw&N(5=U4yC6ojr2t|%jKtZpOJMC<5hIo8W8s3SQo z##AD-c}6tLPL*!BW?AIxJY^#_=B-D>4i|PbNk(HGjVdbk?#>iwFsN`5rZ0 zgJxY9!i|!(tG*aNTuLvdW;O$0rFi~9A802o`?6Qm0rhrc4 zH1bdPY(qZ_R3Cw<%UB3R&b1-h$7(CfV$tQS)C6N6glxBFE{5Yr6JkQU^vpEGLqD`> zp2GoK#HcjRUgTC^+G}ZNkxzB3E9R9TvkBN*4PQ~Q-D0)nY6ERf5KiwS^#W0$hSr6K zHIvLn9qp7x!QwO&by2-2m?V)f2GH;t#4napV;cm0%XbqSkI|-MkgU){g5o6{vVtTn zY9wY_Nwmyzw^yEN{n!*Vh&N`UX;@gTE0O|VheaRelVEkVI2DTzNuw%&;0b`BN9-$kzxT6c;BTDNGKx7K&d_FHzqxVB?D_jNyVG z1^t9nhl&>VhIMP4BD| z_7X4~EM9B6io67({=m)$y#gJGjM>>KKn`gwfXOM}0**RLE0C zLec^`QsG2o_~ZzOuOa`Ac#)5*kzd1I_v9~VBOn3SZf*A;BDa!3@0I`Ks@lkK86pq= zVHb^INNh-P5iOMmD4s*Adr@gD_ycuiDw)6Mj+BM?RK#NASP0EmO->hnzV@KCqeD_6 zhcSXV&PQCs)O{|(TMapdqYQVYIY!z`(16lyO!Xw)j8GJ|6untS>kL222sp{IE?c4; zo0gXrs5|)XRjy5rDW?;NhwjW^RHvf@1qe>#n72BC9A}n+XJV6XQscNmCiEbPXKeHM zDHs|f_=G(ELim;_*>+;HoiPd*1Vf|Qt51(rhw=2t+E8{QsoE1LVzj^Lk=KsuNYbdI zPA^-4U>8--UmPY-aKRtgqNejjv1!6zNU4)`VlHWr5}ts#UCD^4tMEP|YlRw`kD7hw zB-|#&+njD9Uu%~wdc1wfrwUo9bq9|c0)8YLp}B)> z#^yV?dsP2XCSJYkJsck=`kW8wM^Wu&(*0|vfsy8Nnz|6D3 zHHghur9%9$;mWaDJU#l`y(O94mdcusg`$#zH4!x{c)SD$?KgpQ)|hu;{H{P{CNoD0 zu=j;gxB^-TgLSbul}<7us)_M<+Z&d@nV8 z0Rux#xXn=sFJnTIiKQf0`%i4^QOfNV$C|fn43$~*MFo}4%@>adEcEzVQxu)=eDbLT zgt`Bb)Eo4Z%A1KgRo#eo3ez=5!!y=vFJwB#_J-4B#FNC^NX1h()p(Z8(7zQ@)yvh< zqhZ-~EwaU5*t8`+Pe!3B9PIQ+XxqqCZ)R|t&q3LfYa>#8uq*Vz8x~hrqGiz~SvDrU zEgIZ6f#4D(ecTQaC%k+4upMq+!k`Ztv%OZK!@}Fdoy5<~%$-juh;U65nHjBPbKU(c z0<4<2VoF#Qfh8gw^dTJbym^0;7|8-j-`r*CZ-9}z{no1``=Ke0ol2;9(eu$OL`vEX z63WkJ;3xqItiGT$)|U5VDP!v6v%Ml4hlj_OvQalCO#bAbOw?mnBpMHfrLfw=1WW&B zzB~}s=f&`V-oc-J1g_J4$kk4=I$nbjME5dkTXj7o#&QNiH1eGt-W8%k8 zxPibmB0inE<>|o1>a?A6vt6l|JH#Y%Ch;}Vyx{Nz}{tP;_=%)Wveyp|7g3W<8ORuhY(qBJxP#=!Ws`I46pAog@ z^cZrX!=%6qrhZMZ!bsQ`Q?E=qdh~9SQ6V}^4H3wbfedZ>j?Pix$b}O1c^l6Vy!!R* z>*6)rHLd1>Z6V`@JeTu8z|Zc^&*`6DlezR!Wfc9#(PLW;xKn+C%{BkfM}=`U7GCwW zhhc^qZitg`Fa3m^KtMgj+)3o2L*7F+g;?TlLw%=HEgz-mlZ?jk#-fO?naEpunBm4@ zS4AnLT#@Anx14esB_&>t?AfGRK~^aTV~|!}=~88D>Bm%B9Ki?JeR5%j6MvRjcGsDD z@x>s4ZB{^*fwAbgg^<&}ZSVaaEoelkVWOC)ks)Q79-=Hiio1cHu0DgxPJbrz`P z)QV6U7v*$6RY_4pJoe@ga5gRVCr&L{hZ;wdf;tkV98MaJKH;F}YN@trC?8mGg_r-8crWRe5o$l-!jC_! zRZvo-N3q(`Hk8WDB8y%c0=s`WFaYS=!E zA(@%6rJt`90W0i3#1{MJmw9!36UcQ5bm2hH{ zZ1;AXVoLtL8}v|&W%O=MQ6}YNgii@=+KrZrIC;XD1}gs$QlM|0TCICVccX7B!;AOPkRaYE9wR_5kE#eerwMl{1UueHrBTh zHTvoPcLpP^XCb;MWCCwR*0Rb>i6$DaGe&Qak2{}S^xpK0)iu{nT6HkKxjgkA5w6zo zYmh&s!hKU{!Pd<$7O&k%%Lfd+A*vY;ub9~p8mw$9`ajz6diZ+o(tI${vwDy|#koAK zY|3DO|Bq)PpRwbtR2@X)`-Z0kMs1P(bC^*SvsxMQsXq4%0Nq^{y(qMl%@>bwS z#hxrg&Br+hIV0_^b5YW?LbH0mpPJ#4>WBj65-m(qjY8dF_$DB5bugAFSz#@F!+b`k zR@9pWtErUeeJ=|BGqr8*eXi zKBXQxkT_$ho}s>op=&3L&RW~Z;x&~xV(^TOQZ>U!M5R0KT8?kf0 zIaz`!`l6({@~ySPwuc|-tNR$Dq+EOw`t`4emMG3*;yTE8W?MSxSMHy6J_zcHsTqR; z(kkdMsL2B83;r;^at|o-;Aq?8H2(}djy^Xw`!z#_1Y}h~mIHDcBxL8IO2V>mOz|<4 zgiRWc%A?m%IY`Ga7w0ir)ipG%NU)O5wPwk*u7?07c@f`NvpFl_;mphu+#bFxtWJu1 z2WNb0<(HHuL(MUZAwmY;E1!Z4ZcB3o)x_;tJig&345ml+b0bziOf+`xDEL!pCZWB; zJR9uXuh?+|y_ie5yC6Z7Qygf#Q#zoxIO#`y9VyB6P=eDuPs9|KL*#;%?EF&6Z;Bb+ z!nQ=6v1sxUC>RBH`_FwZ7zML2K-fTdrb%B^HggCSaIUU|_NT8?qx~tp`wTGsQsIK^ z)h4dGsA_v&s=VqNUej&)khs}b2{aJ~PsZjv?Oxj)M!jjE7(SF0unX#tKhSg51nS4O(KZjA@i^t|-c!8u_96i2-i<3-JJQAZ?_!wv%W+JD&<44d$Hmw6 z7~QgfxES=OHi2a;X-?77wM*Kl5z8&Xk+7U-c;m2u>rWyLKsPe@r1jg<{5e$Z4nBo- zmqJ&1-?{n`mm#2HFk*XK7L&Jw?t+n;4^2GCstM>y9g7WT4mDlM>W&}dJ*>9)B_(U> z1({j$sSu5X_g-4^2y=mpdW9(dIf6&_03!X=5Cbp?LgPRWMQf*-Qyc*r;~J*LZBrLJ zb7%Gs!2{(rhlcJx>BUE)S12Jc_6J@{vtZkiDfQ~43yr$PIvgN_@i1xnQ_q=)jhGB# z1^(C+Pf|&r|L2>zl$8JO2fkXgU6Vj>J4_fFcE|7+4g%;}$-mGA%*VIcR%5yN@P-Jb zlj6VM_$fJ`I*@r}e}*t!M4bL=nh#Sqs5-{gtU8@`U#4ls4B6WZhIbC7Q~;1tI-Isi zc0tdSdLdA+7yHa4e51h7!oBF7N7cB8SKy%A~7sTy27PJYBL zo^vsl_KwbmZYOLvQ^(fvpL#&?lDC4D3covhpZL`Hd5*>@cX>;G?KKQdPTI>RoJSJW zm;JlTYmOt<4RmzNir}ul0R-#jUZtg%jlR?LvWt<98gS?8@>1iU{8USf7?fRALIWN! zg72G4!v6Z}7uO~Qt|?5@6@lbeB-C0N)?t|_y2$z@Jy$K(6EG>tdmmf>1z(79yYe|7 zx>ip|u>i?gKbfMKA_h=WL6j$tMS^L}XWFqNHGPcdXckAAdKMh=9>3Y#IU{=q!IolI z;c%9dlhBsS{#oEmA3;O|LErK!X(4?5VTvWc>&KkTMGx=HKgX*07tY)*ggBBJJ2@Dq zLn`m;yF8YuJZ#=41tLS6qk{ZG!S(9l4Ui2oATR$NX$g!TYH! zLIx0V7Hpk+>}@>w-DF%jY*yn9KECDTm?&R?+@qn0BCpAtia&E(u$P`}A^&fJ;C)4_Vc-cC$|cG-D$ zR44By`k(^u+p>dUq2R1kBJnt$=2BqDs4fM+yO@jLPXGo>q#p%_?dY8R@?ceHCzL!QRRPf6-ASH?va$b%Qwe{z~aWEJ@tJKl5vHRqlNV;^4lcs5g zx)qxh7SejJk(t>#n`hxUt0`|}Wr!mrNQ}fzsZ!;0#*&%(QS!-rab;!UH=5k_T0hOp zEOjUugQ$GP%G(?w%i92P_*>> zTu24oOaS9sD9!z`%FA(UQGlOnf0jl4hn+7dGcRP7m$52Cj65xhf>`>mBVveJ4U){; zaWXF=;hPyD6Oy-3v_>!Z`&E(@yRR0mL@|p?Tp>H(BBln`2{lVffr@~=Y@12=cwt*V z3!{6KR#+OT9)+A4<@>X~J|!R+1jG`xv>+%2o0eXxYS6q$SC17G6w>OS;ZGcuFI##f z^ndgBjcJhj{;F6D5cet1?+w##FS6rQRz>ypK`Rr1C*l za2BZhQ9@lTYcy9J@2<}4!ck3*8T=<$RGBU#Z~`YO?E9l{jx8-iu{fy04IQjoRJ2q% za6*N6h|X0=RnC!s5+fKyon>f-OI8*~yNgR*mbjW-ahF8?E14-~w}6Hc4*}TJNuWI> z0G5`ZxVEEYq!-8o*T~3Wm5LJGJ^Jzrs)U3zOc$5hVOXR2#N!o_MXnd<7U@)WLOez) zTZHT=hC{vZV}QXT*!aP~5Pl`iN~Lh}1kHIpy0uogRGO+6Qr^Q9TQjB(S28WXYP(aB z>9VGq8>KCRcH9^9oP7$mVYFZn_C#ytehF0`K|P1lX$y`{B}}%Z4Mj;oP~B zNEkXz1l7HU@2mMk)wWf0pUwPUv( zV)N3<*g3eKDaC$=nsNo>hBxC#P2G3ZBR=HJsX`*1jpBGvfWtlrA~Cl+(><3>Ck84e zo$adE`!a)LY1GWK!6P>)2221AXHg*}Dc9lxEWxt@_Yw^>6*Ll{iy-CvVJE>F{mp^j zzid_k>=_}FAZ?r=#@5;s(IO?Je*-4`4Cc?MYPOJJ@RY+c9;-ZYCq(6TJUXAY3Xv2U zmQvI%hX6 zMvXIlV}#$;Y&W5U%%Z){qBV#>y#KF%Z-D|qs$P;@>~+BK&@e#sigD?j`xNRrTntwg=Paa28S!^?*>GukujX4 zSVCW&Z4Nego`i8ZtY6UF)$`SUTM?M9IPaqD$3Ej6vJRJo#8U$LNG`&CrWN#Jv?8tkL7Lq^ zqT_4EXuT)UwpE}H_t4PhR8f04f`&;6dB{)|H|P7sk34Oa!=W2YSdK@W^j*v&6-^Xx znR~c5IiC`&Q^S(pDt{}AgtU?ZqEb*aTE*j}gzh2B1j!(~E)U#Wv%#o%&Y0il(E6%G z8}&>r@S;!web}|4tUs8&lOJR`=?V~nfyFeNpSG2VL)I3HC2Za06@kGP2eiT0uCT$N z?T?OYXV2axxekk;`h>Gomsm2y8}-;}BR9q}Z$T-W+f`N}Myk1LQK(9xRNWW*aMU_P ze9^j#V)NP+L}mxy$_*AASxzvZ>pck2owr7TJLH`B+n)~))uG`;oN7#=Aq~LqM=NLF z^6foVQwec;KNo$RenD$TQEsF@4>${UF z)fRU8+vekicqvlgv!mst?fA^3j{ZD7?)TCc)UFN{}?-T0!PSnTOP%&!I zcu=Togh#ozQ2u?9r&iJ~KTf5(G3{n|~kB+ua+#iOyruQG*sGLM9y^`r&MjnB_v zTk+gu@k0Ow7mynZIbe`QVnn!9-loES`~0^Oy7K1>FA|*7s^b$B zFnn;LVIjf0N_ZZ_$&1k@N)6^kp&ay8p`kblGYL$k8B&6^Vs1XKSgvJ zg%rn#`XB*rNw4PM-0<2U7&RuDq#(SD2JI5pCox*p8O!}LTNd419O^je8jp2bZ|!DB zg#|gKx5%>^;P`jh&3u1IYl~|QYZrnxCEyQuy%TaMWD=*fO21M{O1FeY4-z^0aswqf zbJMnSxz`8=PR>fM{nei#FYi6@51mnX&(TzVT!DEQv*)EiBfu!pIi$TOl%zh7`Bb`BAq`f~6ia%M z8DJ+zr(t^aoUu;_W4wBWBkW`RFS>XO^^Mn!l(dzGBXj?T@&wV2^R)a1YTWoJ>54XZ z;~%+qIdkQ6w(wiXvi8t3Z;LVRiZuhJ%c~grMP^cuHTu;r)}z_0dz;I*wS;4KX$ei^ zoa+QrJ+WfW`*$SbIzea8O+GtM8 zaYf)-!#BS_9)Bh9nKNqsP(G90@>hxO!jWPQzsKbPKc?xhE54tvzjWDxN{MV5i{<*_ zsd@!T{%3LIrh$dIC5JN(EQ>F+_tGQ}s(da4N7WK&*pZm+h zjf~STqOEX9496?irbAKKte2-ewD=rMH)7u&uWhCaa7eCCtDX&p1563SS)^6W2m5i> zm86hvFQqxOJ$kFBRv#QUf0!dIZ*4q&-RWnX_4Hq0ne54^+@2C@Eb2o|W zhS9|R^324uRW_sxq&0?YcW*ZyFV$Pm;58lOHeSVR`(|0YMF%lMz2LrB)FP71R)7l- z|GIDv)HO~PrU!qoqo95109c@E+o3C{4_g6_M&|IyY2GTe=o(J|n zQea|k=tC<&xxgqj?8D+@JLd!2^JQe;q5jA%g5y$F`kpO67H;;(T|*5$rCBipqozVy zBj2B{q^x`IJRed(!Db`$g$-m;{08IpM+}}aRh?SFqcgQPd6~&Wm(0bEyAkMN;HhVq zk8wq_@+wrdT2%mM`BEQ!K&L&x5E(>aOo)}HGJ&ADRH#^;rcLXpdLVC*ES&3Q9Prgl zb1mZBRk7bY5EB6F?C6k^0AR>GAveZMO)sSQXhV7&A)??S0wq2}ks@i|C|&YtJ{ zcd*bwDFC}|Ukm<-;IgFJx!}zufvey-*76S0%kA}#Tu$hADW6YLSEsAYOTK$%Q_@4D z;x!3KzWLCo`Yl&4XPV|@35pW^LhbURYwU}OvIfhaWh|p&Z_^py&lRTSw+X73UM+X6 zS6+uzN<--57*^X0c+~&=Ngs&+RTydRCuX0`b-zJl77@ctl}403Vc1Fi`=8saIPTk5 zls~9XVyd)(GB!Vn7=?b~Xq$J#hzLwXTIZ2*_FOn{8b?u?RW^CK@5S)G+tAmi%l0By zNO{Z`whPx%hk}H=YhCRka-_-fpo(a>QaG|k12N$4D#rM}yn8RLctI2sW9!Bl5??Hs z-Ms&0KO2mKqO-KbvCCk(PJUrp(e!P^UU7F4%7Tets<(QSEdtc?;h4S4?bkxBD+rb3 z36h(7S8HGbpg`v&u1R&4BQS8UGthpNjVei|lr)0#XB6K3K56ygd%G_x7aV@HS=> z*$GzD7!krray-%NQFiNH@z}GD>h~Wi^g~TW^1=ul#gVFsAyic6ETux3LukHKavPHj zCNUy#12_o5#YBRxI53+7 zNguXF-Y00h;IT_`QF1G0F0*Ds;v=0TOpVjbEODJ)^*q1e$sIM#@)ui{JRW*Q_y$N+ zOn}2YA*!j2-Oy=bL8Keo+S;*0TP9{TmwO&Yk;yiVuj(LMR5n+X)?%V<3IGnA-gu2D zdVd4@_9PyLSc@LlB;Gzu1*9DqqN{Z0>&`ynDh4$bRjF7jmu&7q2VaN7JDVm4q=u2c zTqB6vVdI|dvO7?e95a%-j>Vnb*_bTaX}=|R3zIMwVMrp8PFVtr)9=XN0n@Uu!J~T5 zP5w;5=09GVl&0~iV~h^%jSsV!8A*=INIjzX@XCge`@n+lCTAkf{fRL`Cfaw+W36a{QZvs(7|9u>K@^!HHdd~pQDMRZ7;ChNV@1*9Cf1IA`Vf+A6p1f- zzt*!cM*CN9RXFZt_#+8W(}Z@R(Y;|P9QS!Pd+C*$t@b4JE@oOX(PZV^W&LsjQ>=e2*E& zq8Cs5fKt&16gC;RwNza^bM<--_eG^uPH$ewtxuTDm(-Plp)F^mxP-;*2obl^i;as2z{*hCg+7dY!gOtJqa=>n4-Zp zRW@nY^L->m+)4e*vBn9yG$sEl2fTl^@B^x9xj(2Y6`W*L%G?qGP>{Xho=T34js0?w z^{h#n5R8p^10Wez7Crp>m@RM9BC@|*j%EM8<7K-L)=nLXad%o|E<3)tj)e$ei9LlS z)t`NAc&%KriJ~OK3L1x%OW7#Icb7!zu%yN#0DpdpQoIn$Dg-ruoO{p}7`-4()D}7H z2{OA4+$iZ~n)Dg_c4=2FvP6w^s3`4viYf&-T$k=2lX@+X&p4+5b*#qctZcaz=-wae z9}95R7Tsqfg;BomNb}jv>5;sJ9#VIqOx@6-qA1`~@61Z`wNdIqyZX~VUHJZfyG-&= zCZajvtAtm|wlUz+z9s=be&Kk3==kkj`U-BGK{QvLW<}6;B)slj zKbmKvOnAxbi%ebsL7|?BYsT^P7nApqIPvGV z-w#p>cZ4Ay0Pr{AQ6jsZDA5odL@76$8gG%rlTxpAgerA}GudhgcSBl@avMS_aR zsnD_o#8}VS*H6ATbmlBEmW`xDq1~myO!2M#5OcYPWftmZ%^BPZX%L=IW%NVBD~ck% zOGE4Ccf|1W_{o@!+LY;TPnecO00XHI+vtMF4o=c1gCggWkk}lm+o#V1 z4bG}%eW&a5*bo)3gct4`qT7?uav1i5=^CXQOc_hhc^X8AOW`3X)U-&Lm=)4~l@z<> zP|J>fUZ9|xiXzrP{~{wfj?winCO!izWm{88C)@8`ggp9`=)DK)0*aOA!u?c_Cub_Wk9w zXuCSy2ae#3K{hi7hjoG9dcJ6N9a5cIns?_!<*VFQ-$-7kY^zeLIcI}`Jg?~J z*e-rSPCDs;K#7HXS52ayL216;#Qn}h*X!ggy-cgmxZNCcFPy?q4hT_jN-&DFUO{0D z3qv`zU_4e~Qiwp(S3zxVWYA!I@>Riiz54!{+{0IHR%m*&BIglbc92be(SMp68hu28 ztPiZX!IZZjDPLnTx^X_uT1Cp9qd4rW@SEwEZ6$NQ4_b(N^2?m!zQ58imSW&D(@aic zXQVqaDv=S z9~Rfk=1kYEQU+Jvp95v+(t65DLKkwmS)?&&Llt|UQf~H|R~1o+g$nX8-B150-XV!U zb8;;;Drkl@Q?5vSR8U{&;vml|SUt=xV`BHP$g~5%8^8=!?pz%HN4pYDfOqYuwAsTF zRZ{HYuW*=9>I{LAN%~1x!T(gLsi_7jf)D|S&Z{9GOTD`=a4>Wu)YJ1~okOvVG5R*} zz!TVcAY9y&$YrD|v*H4fLBxeFLI)5LxC5($2S8vF83l;nCe5|Eh;d)2Z?e^dGzh^o zpue-#AQu2up>RH1Cbsh{e}R0{$X)Fhfd`?`p)?CDj*}@%w?_HTmrI6esC)xBp)4Zr zglKSSy3pYlUr#EgMHTqXsKhO8G%Uo)466|*>}O)ASj5t?E9$;!Zn7=V3@^5yQ;4fk zn<#2+tct>nQT0$wbyvt(=4uUtBJ@qreV;Y#ww5aIel8`!syJ(1m0q4yAxyA zg>sqIOwEcexF0Ha;9()&XPq73w#TkKzZOE}4p&i;z}stn3mflfL^G4<&qH4gHq-8m{;D+gz7WEoF>054THZpH4`s+3;>Z`lpO6Kadmg=n#MaIz8V>^lHIteYeXlzmL2ST-=&@#c2W{D>5ShmANYohR zsQfNtFRungV!US~_qLfo>!Do=Ud2TPT^MhXp+K!;t5EK00q1bu1v}L&?tAR%nZhCPh?{O&- z)=ET}@S#mIlJQLZ->rWu&~1J!{p}!*QTzt;oNO$j zQk@~{RST`zTScZXK?@6h>w`BCp^g1zqg(sAFEXrcwKW3AcEf$IGFi|nc|BF@^brY# zkRBxiVpRt7V4bB!W8ak2XLD!~y_>mxDweEQB)o2ZNnR*?CP1`Zh!dX^U{@wJZI@?P z-D_9#%dU3YuI|#V{@tzt)xMF$zKO*in%k~fYQ;y#0?n`_4N1u3r1@ zU-muQ_Psn4@pGrWZIeIK&-!p=`dH2@-R*~#=TVIum?iB;;~mDA>Hp@OlM*{j_B#BV za!~n{HF$KM?BFo_t^y!&To86#RB~K0a$I(DTnTVojdxtjb6l@;+~~bvy>l2Cxk&6v z?TED74p;oG#Ad&Fu_NqspyYIDOa8vH~Ij)uQ)_ghFff5F$TcWZGP4wR*s(Q zDnFqGj^JkXz;%rIjsIR=wQ%k)-HH!v#JN z{gxp!s3o36s~@rUMXeAFkMD2oP&28`W2%GAl8S`4t1H3aC(%md;07&O!tDso8MlUw z9os~@Ktc~HfDtyN1ZT3jcejfAukL7&d9!Nb$ShGDe_nP}AzQk5Jh9~A%X=MsI3G0* zB_)0Kdx5_pb(k>@7MiS z|NojO!cA~v1X|%B@&lAkQpVM8eco zH!%8JrOX@{rI(Xu+3+C>KPVcCy3N0W31;#0JOpIW*Ho(6iI?XXge2Pu4n{%IJ53=T z3N0)|Y$kv5bnGi&O04SP=|KC%jMDb7`a@cI+ddxwa)U)46wKgoZ8Ur%YuD@LeRlyN zMgjcqLD51%K{3H{|G#W|Gk6#OC(sz3l0m<|KTCLv%>QZIXRw)1u4Kua4^Zt zf7$k3-ha;&ZMiM<+hCtJs~SA&cLyUe8I7hoZiZS&8VL?`cYo`TW^&qZPyJ{<&iXbi z%R=XUcf3%cUTri@z-l2}_7NHq>}tK(=y3b9J)O+3+=1{7c%<0fakD?BYQCt4PwtS{ zm?EFA)YIuv84#iM?$Za5JR_D)cWrj2m~Rl3by z6pg4?p&MCTo%6(I-#z9NkvQ z<2?U|AB*1`OUujiLvieE6oQbeX!9a?&Q6L^sn00961D73OPx^dK&6?UXO)FHP7^2P zCFyo$Wku<>XH_*gQB@VSBM^o^N#13=e`-5M_$rF3j?Vtnzu&AgHVokq8dr?aRDam^ zJb%ucXQe)D`$aALi`LZ-+rDkn^Up>5PB`Ob$H9kf|Kp_W|F3OdOkWtlef*j)rD0j} zcjkS|HA_Vc+L6ou_sLc6sUc_JWouAWohKb{NSQ>kEOP1 zUf%OP^E0MZ`MUFODN1NUA6}15@BH+So|P>~?&I(8_umfni=dr4tntZDwj2g|?i6UR zh*bz|U-D}c7>3tK=GN15^eyKq~I z=fCU@q^`K=K}1V$7{Dc6{$9)L^m;3O(fLojo`bbVHb#mysdm9SaQ+I>5w+)jJtDBD zYCDkizis>T_N(`Q9l|7nuXp8lv8VUP^%&j^>0Jt=_tf+vQaeI2?-Nrt>qK7d_@7Q~|QTTlsC z{AKG3pWC9)*NX2aQFwir@DvFOB&4Fp%N-^{!hjShWlR`_Lp4JAz>we0w3n``j_42+ zO2$MMDMLRKZ7^7b1Fss7tkVZ$31@yiG&anI3>RmpL3!Dr;(QU?mXoM8V=U{#M-LqPIX|M>EtjT*O)`9Tj~_95*&O2 zOjlHi*w!EmexwNnPb0Mw@g{0{xd_FGkww`-fBE3Cmbp|5m+Y{+6Gaw&DHh+fn1)wG zSwyvowwzlAZ=z&Q)xE`RjlY#E&#Ic)`vRE;Y3X4?mLmEb!y(}LOz>vB7^HC??Cz2= zX)#H*M)51MttFZ_n9g93bE*oevd%xOR1~#RI}>o5iP$Ov=Shy5XkyaR#55}KRyli7 zvs4o?jI8>XE0GYwb3l4axcA=1uX_fFa{}z;S))xXr17*-SDcT*vK62oAxjaVe{kwT zYRiqNRPnKK%wD&vzY^<)g&n7{Q1c5nwnt7x6snA@9Dhi%V7Qh-_%7u-yeaz?a2d}X zuiSqFGJEnoWU7c2y&~K-zy)$)j-rM>)76ZZOdaI;pD9UtIu%42{<6wR*1?NVEIu$` z#u(j;+j&GU>&bAcS7SSvB1ZDxn?k&lg@Q9<)J(jHxN@CT!l1BJ!hBG3O+^D&WH}L@Wjv-?<*Js%@&(orcDIbDz zUbL05(b|EG74^&E*M2v|j)~3)()}Z3Y9J*MI0*i!Z?#4PEfka6mvD^C1AzdHMDtNV$n{Jf|- z1<7iBNW0pCy8*wQ$q;A)1xvUKiy))F1vp=aN!~rI_%3AsETF$w5Z;Dc{f)tPgT3J$ zAVK!hpCFQxB&cy-9z3jTom!xpSbBPiMeA^Bh^O|;E0t3(YxC5?+MvxZF7oHIV!5oI ziCZuvGK}{QJ+kW|vxY>{A+N*$onzUo#lORj{{tKotszWE9^da)7uo^OvR7E4P>+{e znghLqQE)(f0vK^=3E!=fEg0B(8o$Y!UJ=BYImWk9_92SUWepJ+Hi)15$dH(DBQfR{W?01BU;?BB$S<2wG0E7>6Sbx^H-qSoy#gsEAk#S~A zBTSHP0>eOe@&0Fetu%gd82h-=J>FQUTw4Xf1WO04`IAzn8xsD*Tp zuYPEi1{-PvqXFu=!A4s5P#SIassWOz{7+E4x~=9j&q*8Sp3c@24<9Y!svrd1Jpx?e3U2R zjx#Er2>!H}=?4yD4EAtc36m?+OM_NvS}{t( zk=}U(=^h{kfS`%S6x#35-c5kVeTe8gQvTXLe?lXzO3-jXsOl@e7G@YTCUzA%^yLzf zv89^DkJKr5#uTf%4p!*k2B1F~z#&sbJRBR(8E9oKx6i}CfGuZZ5w?D4y-6Oqq(k3M z>lGDlc##n|!X-BfCk;&#FUD@&7##Tl>VBpK7c_iUXoaAf*jm<0-Az+n(VbGzu4`C%a z=||)Ju0v`G?P$3M!%|@VLvYM}M3Y4PJU4|AHRz92Y=C!4Bvzbhx#cHlLGtqGFF(kH zN=%eaC1Z6`Rjs0tXyvhkfU4SZRUZC9R_^FQ&~Z``&?AX>syZTEHU{G$os>}Mumn>d z3d&KCZB)AT54{Pvj7BnNE`;#DkeJAo0mssk!C^oIFKK~ON6S;Upax0GTig%Qz6)fS!Jk!W#u!uxLe zr6A@H03<0{9t1{aWxiOss+V|ON?KKWT3L8ye6cj|p|L1{jGO&b&vAFl8|BWJd?wLo zTdt7%-KiHqy5;^7CtUtH+^0jikUyVEAe~LVRBFdFyI;rX(HeFzzlmF zgkozU_h|U%2l*o`f5YDb0ZnNbNw;q_$-mQFqG?Qpr0CCVb?{7Vr2$$d)>>Z+L+3q< z(42v#T*Y15g{*2x`S4K|68Q^AMY5hL-nbgkuK6A5MQA3`na^kh(8ZRr7@X0-##!W$ zV0qegR?JauOgkkr33OjEAFZc=0WnP))eKo=+V@$oqFp54+;=#UAU|i|vuEkcyr)Y9 z2bQM5Vy3*Xoy;F-H=Lp)Ul|=a0M0qoVHfQUG7EmWu6T7ukJ2l0l!FQ|fqsW7 zxJF<^F*p6~RZg>zSN7M8OjEX?B@N9=tG-NfNLJHbPu9&Q?5zOpjV$`*BhX~>VVq-T z+u^g)vGa$?1&wk@Nr(@4Xn(O(fLgYF$V^{54kJJ=jb=bSo<;Tp2^^<~M!!_yxBz84 z&}&{wp20P#w1KRDMPKRsY^)S`Z0!X+N~L5Ahng|cAE4lzoywQ&X=F=;PcbgJDSjOG$# zsUYu1mozRlEuHjV<|UtpixN9R&@@VbAs%&*C=f|B2kuK=s#(CYy4Hm@Mi5K|lW9?% zTp6ce9$bnekB@svXt1|HToR>$g}c_AxK4z2;sQ-0ooP#WcFPW$;wQ!3z z!~PAD)e6DGRZ$}3LUXXUZinZo(u&yaikf?BU;;;5t2 z;N788m@Qw`NVV@e?(5k=5wjH;&Kkl5Y5f7u9^N;p+`uIj_*eI2OzB`PA)C8~1D00W z(l6_eDdrQFp|_2VJWe?hYYo0^@kUe=q|}|)x9&pL=sUNXW3{^2qo%}n%(W4P+0-6W z^wgUH9VBT}kRU^dyL|Ts$AyoC%tk^;RlCHn=F)d8(tlEkv_Fl>Q!hEJnzV!IaJn(h zeYgk3F;RRPTZ&uV%}>3skWhRQ2%FdS#V&43MY$9GN@?GNP)*_8P&PSO2HfHRv}BT! zgWoYL{wZkDHYo-7{{uT4!NOcNa_(;_t#r z4g+zvl%H|X@$pb_Wk{WN$H=~4_mDeqy*8&9Lhdc1)7`Z_+C^;Ie7@VbhdG!O^7C0pK%tD!U^L=* zgW$taVcH|>#<;POOtRe}2jxh>t)p{0vraAU2Z;or+Umfjy=^?&g>-ci| z>xfYm#yFf$VvQk+9ZVn-2!3NeA@fdGTtayt?`O_oUl6Z@2eHifn#VLMdP{ZboN&b~ zgUci?v+Jt);&k#t^Kyd|55K&Ge$zoEG(xT=j-}c(h(1jtC*AbH{!1oGdOLR8sn}9K zuM`y#@50!KI=_ayLn}ZQ3;BnG5}s_SHTs&xKuLOCvwZvKTILiEy5q;W8 zA1jU4h&pMR)>#Av^=s%=Jms_nt)+37BQ@+vCT3ASqobC#3@NfO`~AZ8mw;DQtN>fH z0E78>;)TYS+JhOcU6Wl3Lh8OD+Q8G5x|2>~4< zI;b(2)3Iv;R#HmiJEidKx)&V8z2)GFZA}En*(|Bi^k3@;-!&|tUqHYso4sRsUktvU zCYvyoUFoOXWOTIsdNT%bwv!cDy460>PW3Y0186QwpLAWOK48 z&HM)OY}qL48;~S@qmIE(8x~b4T26>+yLu{YCxPP@i zY0g2vnQbin^v3h0S&KZQGR3;YP{@s)NncF;1qz@-ogbCl(b!Ek)hA5}x`7j0=W4A8 zXu&{T#Hr2~Uy4lSp=$5NN%aSRAFjIij(I}uAgc#Nk!ADK*e6;@D#PS&gTD;6Ct%pJ z(C;i1yw|Kc2Ya?R&Gq640&7Grt^3kw`hHLPZ^v zK&)Yftq@{5i^-)%)8m)509*HvzX4Ou5&Ef6a3VWNI7S%FF4FGJh(RV!E{@rd+zphU zJ=Degr*A57sI|q(j%^u<#THXp#~CuF_8F;?B5Ywi@qI9JA92)AwbxKE*7z9ELzfiF z4x*V9m~;C^XVfht(tkjj)rLsIDvYfwfwEX1A(KhNJ1kY8$PRo;-^W!F@X~Vm-dNOXI(V^qy}%W7}StpcD;X_?bXA zKtw1fipN{Sj`oUZU6ncZE_abK>FL^}k5(E32u?#;=@oY~8oz@0w3LeOq0nZLRU@3B z$S;lIKWuv~koGk(J_oh{^SQgqf7$j!nX3Cz@x-44;PBNfMgpk?lYDs94&~CB4SPfJ z)sGaSW|WeB9+)hZ!UcfoOwX7$vvD{RNKGoDA4~WM_PQ0VEm~oPnzmIna(7ju#fMBx zttR9!!TJGkQG-FEe?(o{0id1YYr_dI;r8v}@7h<}@znXdQ~)kom|G9-a4wOZ(k26vRt{c?t%+*W5YnsTt@GaexLq1z} z-jRkQnsTeN0v_GqfN&T}?_L~--tTX`VpyN(j!16nMPSj+S^0C)?c-yQi&9X;pugOn zH6t?t7@B9&DaJTUX%lqeYHDd~Eu znq;3CI;g>oQ(`eeg?IYR3W8TH0J^uL>H>Ko2wck)C9#Ih6udg}C&(9E(IyPe5*7_s z&hmm6hdJ;=O%yE#(2PzcHu*(b5UiKuRHR?vyTgn^(buQ0ZOkph24h;UeT80H)E;>k zw`ZOI7}=km5O1~Ltc@~e4F>esxoPRLmd)Im#OE_{u4X+`25 z8$;=Y=--|~6?f&z%6hB=e7Rm%)#Df+1g;9R(||^>2%ux1&;Btr2CC`7k{{AVqV7vW{kH6;6$;tL5~M35d7 zxd?MqiKfuw2I0`IU|4>j5dZVI4w2j z^4Mzw$)m{%Akb#ljujg$90wFMy>^aixQ!=Egm+S`)naDbNJ*Wp=zKC#l|_E$1DT_X zqYs&GmSl%H4;TEA1WsSyF;gN#Q|b_!(|X<-V1QT-&)gx>!Zd~EHa?K+#D%}xVrwX3 z(gdfOmlhwhXo!;7%nZ)jr>KayQlfUsaLktyQe&7lXUb>0aH|T&1(A0P0rtmptWU{eYPn7U zrcuWsU+zdzX!?}e!yn`B1A0tv3@f_3&?#hIf%$!=Xn!`*o#pcrI_A<}{{ve2I@Oh6 zbTpeeX-qbxx~~O}b6PnMq+qEJee3}Pd=Nw+zOjb`hOI@Ts@7EYB~g%BCRhghNKoqX zkgCv=sv)tVXp)o3zZRCG$jqzwe8`feOfPf9nn?ea6cr^@_Q3+-7)$NSagMV7u{*U; zAW(W}OCbi;Y2Kuig%bM@t<^IqPBpAiH?u#(bnGTli*0UMQkTCx1eh1mN<&2j$CsR@ zD+DZGG@B=u2mk_71gJob{&9{yh+~wQYZ$?`43OZt80bCexwPSjQlW?co zxe+9==`x8X=dxb*);A-2b&y?v<*05-MYSxMb8_-q2w@~xM|sM3@Fg|?e}%>- z70}l>w;c(7#(HlFm2>~785omCKwt5K7u^3NRd<037v5t^NF3G>$e}oOh~d#daBF2Q zTV##E#pf)8RVAt13q$5*vOze+p*9K#xdn;i-yRTw3IJjk7$BCl@DoU5DJ8~=_)wb7 zbyvB5SBGLEtZq9d=$1r{mn`n`iMvF+_fGaA))Dj1sKuf5n9b1J0V1Cz^Jddfu_WTK zX(_{_(8-vZOV3d91wqB=X=&GjQI;~TNxCOpV@1mi`E;cJ>Qn$`^>UhCSsbC9p-*Zg zSfvfhh88<6=RFoYb@Z+&%{Hh)f(ob&#Y|Dh$Io8NHrU0qDv8tRAa)8H#l8JvlAt5E zia1Yn_c}^+DkMEmTUa7!%J%{xYtjE>`B_vY@k~`1HecmbD4{^nkiDta8P=6srS>&m z$M{medR}VF?mjVkf5*l!D{NG86r0^P2b5@@``JXr^*$e~xxh*9Zsr*Xoi+p7yDjja z;=XV!VnI!dtIi;mj^~8@*GNcTO{qF3avi#rsbYH2)6%W21X6{24BuFMgD zgKF$dYF+`O85o>fizqseOpevo9rB#k0%Uzk(f?OvE;Ws*Ad;U*JHlZ$_AX^*N{20YJOlN!~y4@Iu|BKo=SL|%RHBkwWU zRga|J&I#G1w=5Z~i7RqKy(9lVnDmVte6;0w(HR9jCHe1UC_l{|c4TImuegMjZPag4 z3pJ#YcXs)Oa0>*&`&cS@txyz;a6s!NH<^{yHL~>Jy;f5$nSFaZ)qCH^59*oPnxQd$ zr6>->)SySWhpW6#>#CPI8&Z?Gv4Jt}xhJe5jm{aCKuC(Y@;5xOs;7{^C4-6JFdn4C z2=z-GpkRoBxW5Fvp=cq&60AQn8l3y$2@?yyil~sH+7Ryuq#x1}pWB?B`m~g)xE>-n z*)W)tA`cO1z%@aeR7nd7x`hh76U53p;c~*O$~lJUxQ_rntI@LO5WK#KK|UG56{IQ< zi?@mxk^Dj&)f*f0S}6aQnVNoqxYIB`9-Kl+p&kS@H*~4F>2zy(2|9Y{2gn@F!j9EcK%iK<8^kD$Lm*$H54G_8Ow zhp4+u94R%rk;PNSE@Tpr$hVa`#ZAi=ew&Px3AcW8zROsoBMaNw-&&sE~9vq%g4S;J06qlz)H{g8@g7@T4?5#OLS|o;;xXaFYmt1=es% zBDpQe$jGG{iGXw;7kbBbX`hH-zn8ES^y9^fyh&rKO3U*aP3sWr>&ExEGD-2OCHum2 zqBUfCpBQYY2MB~%(1(7ojkU~?L*y0ZLBCJ@h{2MO!T3sBVVzE~$C>yj0%5IAaiO#n zF~XDy;?t%*D;NC|d3Jk6{xmK2Gtql)DV6=Wd|vB4QWXb(>-j6>`ZdbYaFAWOJ>#^`;&M)a869O|%MLNVBATG~ z$idhINawgtCKCj25YY3WGw@81o(Q1&X^=$wp!U=n5iGo1vL_URPmUmp;4Dvmyw72y zP|>hMh}%!*drqi;slTa`hkO_GJJ3(_iUsvQ2i+0p;K`xDF9Y4B95Tla%^fw_3dx|S zMm(Or!^9Dlr)3n4F+))wsyXU73fg%KaTuB>B2bH%FTEVGkPw9S*oCra6&yVd#;D4y zP!Rv;@QJ0Qh`$iXxw%cn>7_n-Omd6}q+&NR$w?+SgCYPw zfSbEqOy}Sga%7%8!VN~XG(oXKN1cw_dK_82olXfc43(h<(aOBYk3PxM`Pw@_<+3}~ z2=s^!JN%ITBnSdnr9~ZMr4u?B`kaWsa#E17O(9*a zF50UWTTfOsEMd)v6YZBD(!PU;DWUrc0uZ9^8Hu^*EyYw#j)<{sM9%6&KU&kJOD&$? zBvL*p#g|f%QKq!ItjX1ud$8+DRo+any?F zQ-}r5L^;HS91g=g%-+M!nph?jYMS9J&X6(FN4-bos8*GQ+T?Vs$Mdu3JD>>24GAIu z2OHXz=-R7M!A1ECFYwvj09u?#*O^EOz9P9x89kJ}lcgQJ;jBB_df2GNTaY->O(QDS z86|&sg>h|@Snu}E4_LS%2-=u8q3m^-j0A$C(@ABB`bdD2ZRuZZyMeoDkrdYuayj{ zk;J+Rgp$8_0tzgSAW2)|y+7Xal8-H>lj)^i6h_9K-u+cq87mvQg9?A>2Y*mAXyeHu zYb=3XNV*xE_thQ?@CSVu21S5T;?Oy4y{ZZ|N3n!A-3=y_8bJ<~P`Kq^(uLKTshp#1 z)Of83t?Sz=3So3 zC$Zm+Jd^SQDs*|;6I_lg`-lnjUz_cXLFwF)pexAuk;f_964AH-S&sjG^eqog;x@jt zyZV_M%(u|&Ru`Myez3)Z<58#+kC()Wh$!QUNQ!)EEa0Vy!MZ{r5e#P38hRy?a|90G znV*3$LN~VLk1!zTkuiY-#-+(t&*;8>Aj+UITKe^$KGq3%yAeiNT^shUFUCmWg=M^G z&x5#EtBh3O$O-g%Arii1{&k*)swRdxG>rHQN~i!_utm*?FiE~0x|qK@;jR6kT#0fN zbkGNHSm7NG2{M-3-WZW2;hp&7k3EiLl2wi){^fPf)^4(#uW~?%I_8GjG-yL3b6k)% zon(6rDZnUV2gaAB@CSdG;=@=DS;pWZHAkHoVn_B5Lk0=;D`Nj54(A|d=ZmI@Aabmv zG&EuX6-5AoW$}{|Ba!BK8D2$0Lm^42@=3BuJ#G$>jGRP>HV(hITy#z&U*3xt)(hv5 zi~TSZ9LZUXCTbT$y^N(0n$xmvGziDYWoEqC$#}_|pw*11kfhL6yy#JtoaBV2m`05>BuN2b9MdeS)iNgfg5 zR~Gg{nvNErWlE$j6LFZJ`w&$Q{Nq4IjG&0#oju;$Js$u1e8CdU?BK@BiZq_UrV6KK zx8L#$zT#;SiRSOMsIAEB!$$0@m>>*zo$LvWMca&r&|ls_s+@-G-8P8%JnkwM?({yt zP&E`bEtrd(3?t-exrl4UFiYx&Y0>Dh0~?F`h;P8KX{u;eg9bKrdIs1g3*BC1TNdLk zvS{^&aNyjedqkV8b{|anmHOrj!XsbR=8w0~3p$kR2oUZ^78-=GCO$s2?UT3AlhR z7u`T%wAiW}T^rYOyf{8VXx%`#8;|{wiPaG|h3@|{xrGOy9Y3f5@)M!RckO&OngFetOBpMfo zVCY@~^moZrrtwFhAb<$|$idZZ-!vZQ0B%87^_TdJ%hHE%2n5XWARAf90^!4N9uKu9 z2(+*OZ~%wefs2Eh8^)+7^fqfr24tTA3OqlDZyw$N9@fEW_)Uaw}9g9G}dhKGwg82UU@r5l1Jimoi(Qf%e zTXI(0Or>lX?4uf)SyE3%lcowJz2%pudXM;#Q3?ca2t8RM7cXtToX=EpKa5jrqJt`j zw6GX+lgS{}3uNcTFB*!!`5U(oiKg7`l$Y$P*L#o{91AHgsxY;?+wqyxYt!8zPC<}<1OMb_PzJ~roJERI4-UqClMwNpB{V!xrvgX z`$Ii>yfjOZ&GwmwY8a}Ms=(63ve*A)ax^gp{brKak%D z3b?>hyqS|tntHEwaFXp=?0}pjx#oSST0q-#?EkSF&vB^5w#3&t9Ue=};!Yckc}5Br~uAN|+R7J~KG*T}_)p zm$KB^>0nBrN-HLPdQd1zfn<;dWvbAoPp(lbJo}f>=uEF_*RpNv_N~Hl7HSa`Bxr7h zk#xT%h3oe(V2V3`x}3Ujr@{ZkJSzw+D|M{ech9UUE}OJ#;J-x`6Xw|0C|1clAv3)! zGpp84oscH(ocT5E*s?du-CIF!Uc0pk2JI_#Ht^sbB_~{n87AVy1UsQT9rd_Z%4CQl z^evpJ~xxEpSoT;bE;W_1_d%+eEmKA_-uDC6lvjZt-FFSUp6fNKY+h! zMBGi!h%<>#!h}^*aa}>j)Klm6_h5h=iN=#<&4uStQiC~_mP(T$)Cq`Q8PuCj{~R<~ zL912xB8)LUG?GaS>6O=g7Btk`Wit908-)|8wqZ;L@qu6`!f@eJO+_)sTaYk4CDw#L zW~3DvWH}cVfdhJ%R8jvk`J~c7s*whhP2};WRa%=}WhI<(0!L#*`Q@k3NFkNPSCw-5 z`QAYdnw1zA!dRu?iCK{aWt1;X>EE9dX=fCb#zh4ZUJ^Ow9+d3qid^rV zHfFlpognGATdw~GkNomVZi;iH5_{}%3&4*J%+{$pw>l|V8dC(seLx9PV|M9PK%&sr zqJ(Y5jd~;*P|Jmf6Hs9;-6G6glkBjKN3-iLN+aQA_MN`<={4IIal08GjgY~KKK&3- z!7au9)w8);1ub=bxrr@Ea?H&O(LbBHND$VHmuRWa9a&{~LDM}&R#qO1+cxK}mb}oc zW0zwS+Ch;-P^}Xx+yF zljZQl|3jH~ivg>V))z4dv5M-ocCSy5v;Mu0>&j{yjiigs8;%v&mHKmfgHJEi;4bcD zaZl|44gvp3@xlA*YZXkqkFbQf$T=i=wLpjZ$^nuTVMJ-lY0kyoL=}r!uSv>bj6yh* z7D~OZgEIPDTryR@pop(*?rIlF_LC9^xzB^V(#}tUvX#npZ*s!m0|Bx$Gd|b^fXMkB z0RyNiq-^9LAn8XSaIv4G1<_g{g31bu6P5}#(P@fe9z!ssIfS^dixr|zeQs5fn$X84 zAfby~W(7859nOn#ieGzTXci(W=Yjt~R0ROGHopwUSgleO;>I>ELGB_rx zfy;Fg@gbiQ36X78&W)A?(8^?WI^M*JQDqyPeWcaH7G4rYR{7Fkuw|3CH7g-p5QonC z$Cm%mKvF&k%Mgb^a=iUW%OcGoj8U*iB>yo9YbClMLZ(>CWnL#ow!u-!&_x=h=*56& z<4Y`S$;{WpFDsKuNL6O&NwvwcEwrrSe}sq2l_XDr6~IL~KuD2mWiX4wT#BiJ$+*O+ zr;_*FCO~5Y67}uHP9d?23XjtlBd$v%>dRC%dl%6EBxOCGT-uj{q|F~@shj~)T+`SI z5`P3D7Q9T?1RY7k@)Sl<>LE|!I#aEQ(lj;=y${&*G{zP3XkIkB(~TJQ8;{m>HZsJb z&a5aOtN77Meya|el1Io2qJnpuA)#vuq<7g@Tixt#91^aQ3=#* zr&HL@@>VFJ!;wk!Qkzlow5fSI8$^qR+TOOLF_1%NaNIN<(#A)C>nsgHfKZU#K9ZkV zl!{%i`aIoM?nzlhNQr*p6B04Ey=p-h>hMyVGmd4H#&sq%pUYF|y4Neq%?y@?q*oYe zXK6&`&_8OSh}VjjFcA|_12gy1$7qfWW?@lI+{E4fa@ZqoUDs|L=B;ej2S5M$jpTki zOw;6C%oqor4PXCD#grU&fH%m@}QMM|C#*{okJVg;Zq@b2}Lo4<4c#j^it0<)-*(X)y)^%sUQAO3)bxN4|cFaffm>^TrhO%?{w_ZR~l?r zwiTf^j~6MTVK1tat&$^)8mMW7GHJ|~qczrf*UDDNsxi%wcHv?!8MFU(L-3;yec0M~ zjCPBz&s~_a_}Z5$Lh6`z5=-~2C{WkFcSmByR)>uc)KfuWyPi2ta9=xPeA38VEHG}m zOjp~Idfd@0;EhrwjnW(!9^U@Klwr+mu6`-`05_MQ_c}i+b5o6t1oeJ zBt}wOv5hKk36~ujh=p6g7*taC(gba~+n6=MTh_Qrfb}~tU(TKvRLM~}m6L(M{45|d z3wb<#^Q}Wfv(mQp(P19iyHH%_$yFWDxxNzo#6ln8xXevWy zpM3OKOt``#seh-!mU5C%2^&+isJ-#$lGbr+!7{O@1fjU#M+pD#Vs537((!FM+DJ?) zO}RhhQ-X>XWWy1N(Wkw+1=dnX{nvDi;#g-A>&O+y#3*79szs_1Q^-9pe3OyS#Op%O z87;o;6teI%HUaMoN2%hE8-#cc!@)gIzNhtgnF10nxL$TUYkVNJ<(UN28M~t2O^>=0@v68B7HfCG!>%L_~0pq2pLdtaCTa>( zofgtn1bg@$BDLRbd>rs~lq5wWvc)1IRw1UaPsOR$(J9psI%7jP7xKJYFx_B8h=zpp z;`n*lA-aTjn3rdeffqyzwfR!?m>J`|Uu|KdDc1krCh-Qdx#GcHBR$#^UvQj-9mW5_ zPEv6MEqMgctco~p1c+!IRdj-Or4gWY*E?bmGfH7U0$@;?%^KkbH403MWn@dJhWxaL zqTynYP)a#6ju|o}WdP$sPEDhk4KC0{54zhj!rMzZq)4J#c$wc=Y$Qnp*PaOCMa5iC zwuCW(jG`6C&p8B1!s11QNtEdXGKqz3$Qx|oTrI>xAOr#yMwm*8U{c;(AEj2b?L`X3 znJX$+Fk<6bE{e%ziAt{I$63TB!X$8j$i>X0AiUb@krfr?R`8XjvZY@J*2!EXpY{!0 z`^02FQjCXO4iNF7F zW_LZ&JW3~P;ulcn=6iTZ6TW4=s0Ki0iLt>UOWXnmIE*;7!YdGfW*(JNOyEATr(ylX z;&2G7=$;V)fj)rc5>4b0zN0wlr%2j~`jyLUZl_A1;B|@&XQaoIYqbe_a8DwFv+j5kP}KA^)*QW13ODX79v zi5kaDxQ@c)qszq|`amj-fd-cp>P7H@AAG|e=m?m88nyvZg`lH_IGC6jk-~W;?#PpP zeQKy4E3d%OSMVt;g3&-xm2Fs7x!`Fcffh^PkgNv6KiqE4+$Kc9{Qald)_6mC<8P>jMo$T3Dun`hgz^0xWeabeu|jFi#ye z&=ZZTRH$npctbxBD{7XjPR6Un64-9p1hY;>C&ib#SYP73kaDnyH+Cro=%|k3YLR53 zaDa&X6;MOIEGJcfoOBa7Ba1fqQA!gWE)lb>R3bK%e28MKqD|liA0VJyT z-2^Vi6S!i;FrrTpRVu?;3NC~L#o1rfNSe;_tk}ZFPl_D92IXnSS=ty~_D!v9c;}XA z+%5>*B>9BOyj)iTtF76z13_?zXvT&Gm}zDb@Xj4P;1D zZnz+;`UPc_>}%{q0gC^Gow#kP4Jqb$1~6qUVv(dl3Fg+E+H_j z*Dl^6YSa}t0gLYP6M1QG;v#PdWual%?_nUG)l^E+5uE~u*4PwO`nHdsNC)Q3#EjMw z`~oY;UW6b_+#vPRTY`$|T1y`4ulB6UE7B5vN=1Ig+e;cS&myoUsj4CJ49Lh&kRDA) z;H$yV$K>U!l}P`Wfks6*wkS>|6gtU8Kg2>!E|CkqM8$SQ{-#v9_ANGXRnl@M;&uuL z+OWmC)wUf{23p(gF;4j%#^Vl@<~?SzxF%k3-uH&98!|?}!m31|3+U~~-t}=HAW=FwFkiBM zW-^>6P`jjy9iN)P-RC`x#3yxeAXhG}wCx>7>MF-$EO`We_=7(1LqCvJl!b&hJZwp< z?R!WP^#cFp;3ab!b0#oVjRfIjGAc8anz5`LV|+0T5`t^l;FzT$49hcx5IaK^s z2!%GpqAeE?_xn_ptlvlVqrKfRw+v>6ovvm*0z zDKW4{XcqAuM_^G1xcaQs3003SA}W$oLqsv;IWaa@mvr?f071&paB3`gB_-yZAn?Oi z>d5ETl2%$|Q()+$Ibn-DvV>_6h84u3WED;-QVPp7%&aISAMHg5X7T7osM!kL_Nh`f zd(D1tY9DBeZ2Hc1;UP+Y%FrF#|z zwsrsYNpBc~P;7=K;X%-4_L5vat92K}B_+SdZ)GxZJ+o2=T3wauE=~vv230gGL_f%~ zP*!3#d)6;b3+XKeg&0IX@WWH5Ye8lN*;T+g12LH;EbjdTMw+%teRd0*s!VOgC!?hl z%`j?v&>F)C%H$br=E>kTy)v1m)gOSlT1by5{Zo|ffaiY|92JWWXkafa3s*gAD z!(H@GHblfol+@0x-Zm1@ke0M(kn}w9?ckm?N`HlGc#P?q_7#VBEb2#dJ{Rbi_ly1z ziGU1yDGYqQW_)iO9Z&Esw}ruRqMML47O9tQ*=3zLj39(Vx9PA$$b|+(L>=5kNwxpo zZ4@oyT;RH?S9M#OBVqSfJ^5w4R{V{(Jhw21i)0$Lb@2>kY$FF3nXiFMi)O93A0By+ z&~!s+jkc%>r`ULfswrgYMv$mX1*kzpkU|}VUP)!DWIsfM%j`*vVpngF?s8^%4e*5# zs3Pg4&mN)mX8C^BHI>E}@J`b;O3Y$IhSca=w5D80dOc($<9H z-yj`&S4|1JOD&LsMAX45ke$S>Pc|z#&Ph000^iwg@%45H#p-0^JYBIGtE6KipkC_e zECkm&G38CoBojBM6A;UMPVk00?%+toVJTaLOGo9^m{5wCkOjixxJ0<_Wr_b-Ed=?k zgT%vjw2t&&Bz9t>!E-$C$Vt=dNprEWYY(NQ$USF}gZF1baGanwyW_P>*ID&K^uvc* zU$ysYiCFnjQ_OIgCY3%2LM0GV5))yZpG!~1kVGYJ29et$)JQPEM5IE(J@p}(DfsGp zqd9Mczf^z*e1Jl-clIqLt4CRA&dUNlcsqO_#=4D2-ds={I{0}wFE?9wtGOy)tSoe6 zn$422>S2-OxMVJd4%kCrdrSgW1Yp1#OvE&hf}VV(J`8wJXk}lvDI@Rvc4c5P{yc?0 zn0`eMSrb--B@*L#k(w)gOFTQC>;-0J%0K9XNx{{AiKA*)I_$B^3TFSkWxZyeWc#us z$D%wTKe>o3J^D+O!~=+_yc2{7Jj6aUe%pD&HV&zD7m-M(mZ2Ns7O@JHGuFUyM|l)( zg%3InZ8+-~23sROaKLs#htNm<15txC8-ZKpOT?6t@;^Mm6R^+tK2x1qeujaHWja^W z-Sf79RdC!#>OVj!C`15Mz!^Y9N!-P_^+QN7Rl}E_q*_nzI}Jd*t5AVVCxHV8{!{p_ z6G1X$o*k^9a3RBn4>2OtIIy6^jUN$Gyhz3)NRuZ~rc}9-Wy_HNHohz9l4ebtH*x0F zxszv4pF3+=5HzsXP>q6WDXd_jR)um;ER>^|kmA&zH5oqpSXBR}7p5=&m17DsAySQ2 zH$H=SR;kW+&(ssnFOCzC3|%z(gzp_*cymJ0Gbg9Q`yrfwt}Eo`eIqI;HBW+ zo&OkeESVBBN39zbYJJ#{E#{GTCsKTC7vtzo5$igfix727g`^*D=DV9W%Xhka2Nyn^ zcyZ%>nHJqB7OBE=W;=N?I#qAm<0jR9u22hz*5^$VE|sdZ+;+AnWs(M~Q!T{ba~aNj zSp7Onu^8^$8BierqygD%nByen1PLUN3V;Bj6YJc<%^=%4!|S8a9MX=ee?%KipsIdj z4WmvTObx9HEYb)#looR6#QIol(Zv^Gj8R6OB#UaXuZaJGNe~MfGfTwkEW3)ql(N$( zsPY<1>YpR25(_-4P)ZN9A+fUSpoT0ODX*$#+zP)~)*vCCYX&F>779oo zR8SJjz(Dk2$H@_C!c)!_r~K?8L{rPq!i78xQ8f?w1ah^EKC%!aXiIC4BuUXtSKW2l z<;_VPlT2-(@`Tc~MtOm}Ri#$XgHx&B=vtJnfdv0@a-yKfQs~IN{Ix8%(q45*%7E>% z>qm6e8VG}0{Ah(A0xV%rEr zjvl5{QGwen+UTQ^R@dMi%e$1k1R4{?j9`tBb= zkVKU}*sYec@C*4~c_uE?t=MaEKn6^l;+OwRt^#koDN0G`K_d-u&NTPTq&hLT`QfU# zHHbrQMkh4nFWun1jY14` zoQA4muD7YiPm;r$Mt&AF2;oMNZFK+SBOzHI(rpA)k_nR7Bohm71foW2p+vr5B0Jq! z1%w2P3l}|@MM}b}B@>HD*j#6mxNHcGjuZs%EC5G-oDPk9fuGh`_B{-}FL8FF2+gdB zp_~0tazFbG4?m(F^FiokGK}Okv6;=DOwvd^+Rpx7vXo4kXMc07nkZf6!r2U@JzIiU z@qTtEpMVgAT7f11YBH@WVy}&JtOc_4(GOno%~`)JU?G9i$J*RRMI!Q0GG+3iu*GL* z82pX5O1Vyra8P25G^jQ~8d8xWL^<6ok3x0?9;MuJQ~cN+)0PxCP|n3N%!(($So1~b z9kMsitPGyqnG=A4D3#YF+du#Hp$~sF$6qX@lb`gM8u>|;AP*Ya?8FD3thz63n0c9O zG+L;zwnk!bDdI@cn$}2`6d`kzmspwzlQ0I5bO?LS6K;qttq>JrDmh~>qIr(pPPB5VgGosvZHKRq-^pt%hw+9q}qy4M|o+u2Zy`i|Fvwnp)M? zkFE3iiglPHSIIPmQayT3R1&C~+PLZ_MIGZ$;!;NB@#KXMRIOQZ8Ao(D7J-KI9t>$% z%)2ObK?`ZkmQKsn&Qugc5xHm}dnTvuX{d#v@esLIn_l%sDkyy`U64{rDI_&!wgw|n zSCwKVVWx4nenk`;ds6=wu7&d_H+7U?o5jzM@J=jn6sS!164pco6S`-uR#mBDE-QLS zeFFJVUOdb%5IxhZ7$obU!ZnhbP{<)4UGIxw+$AO*<-Jn^+c)1qqT+s79;BR0%O>X=R#L<;5ZFY|qzXBVn5SvdlkH$}I>$fE_&G z3FkKX$!oI239egg;Ca_rV9g?WiwJ}d;n9d_$8VJL_9syh1Ufi@nwEX5SbFr&e&?+x!&f_KLH^ExxI(d$} zFV;6Y60HBLs~Y=Ca@+Qlz*P`aoVZ{!Yjm=xE$JDq5Ywr6@da5-+wePhX{~OO>!wp7 zxA4OQETGgSVL%wyzWe7Pc`GHGoq|(JyQONJSmC^V?FJ+o+@*?D(AOll35Fh%SkvWN z4t<6gUUuWKOvT{m<>FrZvNT&lD}P5_BeZ&FIrspWugAh4|9AiaxPa=}Zy$7{GH&cx z!c93N2`wvq5>Zl5;MD^z?i&@@ZGTjkdvYJ|sLdR|b6@w9e8$C@{iT;zL@QQER zFz6;em2U=Q}-0Um&TiVy>}us(v!M1n|xRB$WKug$V<<&0$QOwV0zsr71! z^~PgI)Gr4AP4D_`E+|f7GNJ~vM~KXbBPJxo0x&}cuvM^(F0>=O$Sl~bDMApCmM9_u znlPyX3S_<^5ccLYf@ljhF-G1Z6F-6wizk)fswrBn4D)T}PVg$q;q6{64BP6)tfI}V z1K@l@4o8X*W$8mg0z*qnD zVGkl85WL|5QUqv_a1*_;ILNKVybXHR(0cZ(bh>HX=8aQ4PW@gH-i}E-+^zgCl?Or?Rig(S_gOJ*=7-3V$xu&tUXHOz^NdSWJPa;5xb71wa?+|NSB^0)th>%VF# zAA3tMdkQ0hGQ0GTHhAw!8m;1>?~1}MY=GHiU(CDDD2$oX;dgCnT)0CvMtOBfAlh3UhfoX(=B$3O7u$skAoJ8 zg?abPBkDFM>=ybST$#(m`E6Dt%($iNGP zfhEmsAVaiDgJU&wB0az9L|ZXobPhgmlPtWit#E;pz)40`6s|h(Kc|$PuyZ)DR7To^ zP4|#Qg48+BBWQq-17nD$tRyoN&W1?OLP3j^PupovDb5ZV zk4YI-Id`Hwlg-i`HCTm2K(`_}Dy6mblVn(QrA{JLcO`VvfwVt@XDACxLLZLN6VFI8MiEqOxzUXXi;RlPV&-$26;Cj?^?f;DJ~03D(b zwZouxwXcrsLkCS>4H9MTHAG$XI4GsPYN8zGipUmhCpTxB)zB#nv+MOF?l^6szDI7nqTqLDZQ3s?4QaSCP4#wRo- zQJZxQC5SdDg-2Gg1NiFx>+JdNh zZ8$OQaSxYJ#_KVqQz?I!g*&aH28WP3v2}1TfVcL8?_^Q&6^HGIfD72-(uHYOG%sn> zjyIwo_`x&1!QJW-ZNIl)ffiS^cFm@Oa95&(S;(MZ)pr%NPl0eD*TQ|4F;Sv+hb#h8 z%SMd{^~Y2LSmAhnAhvT-l~MR^#}L>peX}3Xp+K5gQzc`A!w`cZg@9D8&RT45SE6#- zH)9bOT@g2i<`*x**ph*UTeQ|g1m*A&<&FQbZHTZ9nn&4DTDE9T+3r}RURZO%yy+$l zBxjvMgBy$dRPgNvSyFI0)i6X{6nSO1jY^o7L|}#~u`HAEf){fGHgXg_GNPF!YDMv= za*Nl>dbpagnVOMCj#)yF9oDs2HxR-B9gw(37)(>I=T*q2CLiT*sg<1vk5J;nRs=0P zjoFDVqG{JDvl17?@|l4uS*SROK;@?S#IJxnIhr8^nGo7}8G0ogxSP9)Z5@n>>k>?V zmYi!BTmgv4oH)JY?|?}p_nvyRloW%8Yo5u%*d!#gE^R*cQKNLYhWv0Ykl+ZOfD%TA zGoYDJpK4DOx~E5ZRIz84g&LhxrUL(x>XrRwBEU3?mqV%3MJ%);ZFxDyIxeRU8-d1* zIYWulHmaUcxHNH5YFF3}32m&M1+0?b2hKof4*BW=*eaS-uG?s=uOgd=*7o8pl^ps` z(D_>Ds2n&cZSk@(0NY;=Ew=~TM!;#YSIhg-5~m!r*USYsWLT>qTbO4fhJaT{J&e;x z`adzE3ku@1$8jPLI(B3yr%k(aQ2QrTyB$}o&PGsSfjF>no3}TmZT~u{*>)Y+>?i~pTFCP zetI|rjUMIlY`@v4X?7!+=Dq)+gItK{ zYCgHV_Ue9}d|kNpXtINmfw~+&f^0q&%Rz!uc8oM(%gWEDX*e2b?o1Hih=?!nwD5~1 zD%O1(qSQj-mxa06x*9JAoNETmaKi&9hR_fzfD?kD;W$HN|GVODxV*e9$O~OZMe@lh z8Ixa8f=UsBW|K;81+M=**HD&Kb@8@yfTtGvL7==byY}0vSG5KQ{L}}Ap3i04Jj}VP zO5^Hr))7J(g09J%(iykbaDU3Q@+jCzQqfKC?QpD9Um`9;DHS7KfuDFNG97}9f<@`a zfp{as=|{2Ujf~l8M8q0q3~;aTq}0yE$A9V0Z$jN4qTRQ9W07&e3C2W>mZ2KT-WxS6 zRC&R=v@y_z+4&}@7sJaN-RBrAm&5y!G)&DGC|9w%E%wSp5O?SYWM zOKjoi7&R`unqFe3S>>zrwn+2f9gE#kFX#9M_7FZLTvO+_U9HZ`=Q)V9bZ<#5WVyBC zi@Pj4zTL-LMi2j;UUI}1vSDmknNJpZ+>nz6@>c%;~j5ki69&gn^$X!e~9uKCN!h$g1*Vx65b3y?Zt! z;Y4Zl0peYSPR|BbIEXNz!i5YQI((S$-?L5$$&l$pMj)Ag3dtO#SixdOhzkD+G)OX~ z%9Sizx_ti$Gp5X$G;7+t$*`OtLFE?K+F9*=0y$=!Tj8C+P8th36j^qq$%+?8 zZfy9@?MR1NnVv*@GO*;ylq*}lj9H;ho<0SAefm^TtqQd)j81rzYgDyYIc9#%vb1Ta zOqn)rSvsguo`OKLh7G*sJHWRezMI%KWX6JiX-7O*_bT0!eIEi2?zrMW!+4uRe|)i_ z(};L2ddH4Fz54a+7jCwyu(ay&2aopj3LoKD+r@3S$Nm#frSZp~Ybl-jCBorEV4`$1gL{G+6r&R8mlK?L!qwLq)>=5h^=QqJ(ZiJf>OHixtNlM z3ocvIs}xV5T8ZS83t9|o%QaavFwF^7NMfx*?PZ`x9Ht4KuM|SpGi681h+L`$W$2;A zAyG78g^Srnr^H+?>T}s zeoe zYy4~C+5Fc*Y=K6A{_x3cvcw?W{q8Gi)6A5( z%yGFS%`OqDwuT%9N=t33R7~Ph9bv?n`nza{LEz_th2K?)3~g!PgEbrMBj5=b+G(1$(}a@FxPMm9)6iu0nPq&a%ES+jh? z6WX|%G`f@lyjP>4K|z~r447y9r9SZpN} z&v<3V3o2K5ZLCG|$ZnRSxzv9ddQ`EfizF6^#Q=ZDpzm&$CQsq(sm5xw-Ud-(N{h;Y zE0-49ew2|5ZV6n+@>@G?M08x%UJNZa7?EW7rF&{ij+`9gI0FZ6_%yL=Yy~{41cH?R zwT+`>Ox~W97PIrov9J_}DQpRpPl93|VvS;;H?D3%s7FGNC2DPPm}n3AFVHQzQBw!;HM?77{! zI3w$aK4JmDe>k>^G}?@ezjE<|G)j3K*@^2kU8q?q;t7hFYM0c=5uLt~p{V8Hx)>O^ z)5g_5dhwPXU#pU1aSmt~wh%|$ZI^-kNt%kJ;aXn*eZzSh=1$T)kNJQIQ>sgS+mGHBTDOBG$u3Dqc^N>} zh~LW&P9>4r;?DkYIK>mhxMgEpsFBEO3)j2AMa-mKGD0WSQ`zZ{(>9IdO4oq54}J?v zTKGTWbp}(i^+JTb|6ih0r$uLyc6N!UAd&U|8*JAP*jFSnR%l5g2)Iy1=5>JZ@nUN6 zea!NAJArc25;UK;8k)CZy#gY(MuR5OFAUd$e#L*P^M6mc5{IQW-exiVq;p=8am6u8^duif z@=AtxTCzbh_<>3jl7Y;k2taUxhvrJnBYm~EGF>7(xC1FWH5oXRDvalXWD;r-Q#6!h zWgG=z{svtC1}rZDJ4`kn1UG*SVJ7xhD}JFmgs6wzL{n-8g_-CvyA&T$11EWwWP&44 z+=UR`Wg0q_g|3k{3DtbNWE$jEXz%C$eqMqRijW`Uwo_!-71NU}S;84QRboSShsC6N zvSWUs7ETp5SVeS$wNg?DHb{OGnROyqFl51X~4!1_#tz0BP9PM zGN3gYN3umaHyL`ND%H~#-={}wLP&vl7e5D$?}IQO(q>@yQt2ih&?tWa1`>CbY9C@E zvlT>pwJ<%mI4_Bg=!knHQGof8WQ?W|{=i-D5exZv8+Bt8K|xv9GE~x&EM&7WGT9Iu z$VCn~Hl5K^uZSe=Y&fMLLmby%?KB# z;}@nwVA^AJccvJVmzFI7Q=?Xsut#ns$sxHTUOY)g{J?ScSPrsbch3Tm)^{2iCu2I< zKxarO@`6jvcXJJ*7W}{uPGJ=>rZ-TLGI5nitagqqqh{adH49-h#UgcCrf)~2V5rA_ zfkjd0)n~;Nmn|ZRwN-yCxe*Dnel^&G%0)gg`ImqxjBZzw=`vIvCyKX0MeyVeLN%RT zl6^VTe4mmPj$$JxVwql&7;DBG4OyB^;aCeu6ra&CiqaEVrGod?PRC-A5ra`088kMP zW=i80R=1Wdv=Hxha*jj)PA&2n;Djs&GKdCpJ-Tvvo`;jiVNS`loN)$4@~K>ivR%5A zS#aVH{t$rZ5D5O%C#4BlqT!vrgMhcuXUoS`slk~;F;DuyfMr)Cq`?%;DH#!(nq?Dt z78#(Zb2NN-k^QM5JNR6IH4u4NJ_V{fOrm*>!$(e&QLWb)YyneImPh~v5l#Ua7b<4~ zc$JCwkPtzp6@U&cW;mTfkECe<%Qj+Oxi$uv6t6g)3_)4Jlz00hPzjZz1=68ylxNsy zhduf?N{3VPlRZo1bdqz7N2(FNhF9McFnNialQ)&-qHut+e=HfCqIQ>PA|Xd~NJPhV z{fU-hT3AwOb9JcybH(v72Z0OWKzxRI5O10`*9nIYLX=j;Y}q+9onQ>mf*)d`d`=`9 zm%-6)sdhOu0;5EEXs!dJ7c^BMdEysN zL7@dVP3A*6L6oIJG#HgMsoNwN;IXQ#%1&KGd*)^v&}SnR;8-KZfTlG^&{+;73TYVg zcP8kMOQjH$poacIURtzT*6JZ#;;%O~rwZ#JqVZA& zs}BaaPbQlG6^V|>Y z2vRrYcvVJ{k%vs2MjSWjYkjF##FZkKn5h}kJ$d4FuZ66UA+%5vFyP3b_F8KAR~R7? zPD>~(GR3)1TQ=(0En=G}Rxu#CGa+2N8R6g!B`S}bXu6vDu)m>GK%j=9$$$&;g^0sR zf7+}n$`xfNXlL4QV^czMhOJ)am-Sbq;OMd|B#FUt9t80eK{R+D2C5$*ng3!XS`av2i>R!AvrzZq`)sj$Df{Nv! z2!de$4C!)sADXPnDp1TPH&Voo@maS(xLQ5;u5!ka8K|x01h|x2A&rAq#prZ7ORk)j zt~;B5Mu@?*Jo7QHiK9uvv5vH7|0%eF%{cxVzC)N+EKe!^}J$AT}4LLvz}9o(sW zm~|gXEcgjOfW}!tU76j>5`r4p)%(ooUbOaY~ z33s3tD#+fFUG;k#)N$r7X7cp~?@`7^d+K#i~@`sXWYwV+lgWr*gN6 zV>k!6N-20l@8K2P`e6?jPCuJH8Ejz#rpCD3uIDkd(u~5G3T&(=PRYz6%WOGE8__k~ zdMcTkmm8+loH2@QRkuhztbETr;mrx*nYOVR-f(GscT8#1ogAcCUg{ZAvQHnkhV40H z1&gcBGS5mp8{~YbQRhlJ1;rAPhfjEnWwpHTCW8v+v+Y`IL-QRZJj`^7Fh6|%L|YRX z;UvC#943am$0s5Z%LSp^@uNYvjyEOV&9$~E3Dff;G6s!I5Ub11RkoFM%aA|PO(iLSS zwFhoL#V9$g8=PSgKmoTi){a%ovFIT(qH%J#M76I9Yh8hmN@5Iah>phsbZarBm82GJ z!P$fxdL3dlz6{3%ktSqa5opQP192i~yflG(8PxX=$rMX_l5XM*FbzDi@j)F9>H1!Q0hLNQAG0B9t5Y=klt0Z1xSDNIR4 zDuMt6UH}9p`-I5|&@AD`Xr>nPlP!2<9-mzjSv@DB-EgOk+CMnHVa?h<9G6_vq)V1k zB^BU=vYk)qd@AEVLltvaY0?8!Y!%=Q3>4vSZCRp-<#Y;PyfGzI`yQtw6#UTTt*m3? zi5(^GohFySn8C_5EeJko*tK8`K%k*9zM6S7-XGz({MW8GqT^RRlgPw0$%r{i1K#;f z(X5S9;fOtTbTpdhzE7?r9!=o1bZGZMnaFBl@=HejU^@pX&brk9*s1iZ#vM3{VtuEW zv2$_h7FVJ>6P|GXIX4;mN+t3GV>ATfuibnc7S<`e*&>xm6q$cOCO`Gg3Px6@W7hLv)ja$fheT%6O`}- zL4Bb?YC{5j%sjb05*D7;d%Yr$7!610g3{DYvOeYlVQ#%I#F0&~O|vu4Bc%1`o_ws~ z{C-dofi0HE2w5_V^`LC!tLd>?Zp9VUBh|2DDCnmFw1zu&4I6)7T=cyl=oW_JEcZ!OioE#uj6on0!&QU6^&O;<@VS4xYK2$js`C3IamI zmi%9lVlCd?>;AO6-+7~!q-ypsmXuD4j)=Yt5ETUfc6#^kpFo5NpOIlm=9xi*2`hAJ zSPlqM1fDccmT-kEVl%0@boJrB6 z!hb0lKHGRPwCILpIz40k&g47Ton$veNbst}*&Z!&7WmFHnccS=k}l1=!n@H6Z5yOW z+~zaQqc2aVUfue2nu5Z9Wm^F)RmoF1-}a~fkKaF^Lu(1z8riT(vp%`Hzs-HS#Pol= zLR@QCrIAGCZzhaaYXDrc_+ zOEi%>-$pZyrr#1Or~=fS+YBPsP)jYf-)cS7t9V*MEO;d**9vFahl-a{|lQc6Si1F-x{NDpY0Gw~z|qO@aKQp-q~TF`A@= zE`*ZRjT|CAo5aS4<~!g2$sD&n_A%shL`ot_PI5UZ$>t-RyPrXZL&X|qaet&+n(3yN zmh_;(1*9e6&8B~8|ctVH%Kgr5i3VZm4Re< zO(eq}DVLnPEJ>A-q-E&36g%bt?OSLuP7BXOB1MT!j{59MUIQA0(sCw| zs0mhB!;8mQf~V67a**X%Pu>EfC*FuqCK2HY?J`LQ$jq>o>%^z$nm->?zo{ZnL7JP_%i% zgq}{?Qx%^vjdy8lN;8)x4n7b75PC|_^Y|FO10qh0_u&&73F%FVh=*8*xs-RfQm#xg z)S*Mw7_c0e8Pc?pN$;|zta5`-+3?7k|NIfnP|3Z8t`avZgAa33lcZLPlP)J)2~o{z zrCS0}D3Qs}p4^v}dj$-rl|h^JAlfm`1gvY@%wSshzz1=Ng8&@-$D1%YOv$-4DRbIZ zy!>deg5K*+h&>6S9%hw}o^_p*6l+I7LL8-P&U=R?tw0fCO4<1BrKg3TKey_{Jw^ne zn}g#xLqkyi&E^(6ckN;`EBYyYu?azf0?_&HvK9SsafciH-I=<%%zxa10JrdxA26UN ziel0~F#%N`J;uwi8BB$StEnP0J5IDH2Ab#9EiFY;F_};fAxf2JS4(@?k>NmEIv7eILr6%PQ%3>v81(Z;h8m!s;8P_EJ^sJK#dSCXU zmUH=f4Ut`&kj!w{Fs@MzYgxr!xYe(4Kjt!Cc+1vn;j&t$^u zUFtgj7a`KcE;zA9<}(_WsWX0>clc7{?*vMnG$AK9kv3CgcA2Q2p=5k35*>#?nyV<{ z)sp$!o8Of6$*EkTeTUXsAs3W#omO!&AB4ho3&2_{ z9QtU*bg%1E%+gaV#Z-^F45JsII)&EF;t-9^0@7;sSP-}{&!>O@gh87clA^)0NsyW( z%-Pe(lPMLi#CK_hIGNSIf=QgB^EnLfkjkmmcD-5rt-4C2ANs%rTL$J8p}0EKmQiz1 zW?dII!3nv7&1o#mQ;P=>08>;MEqgT_m*OU6OQs3qfu)RWK4*KJcCyo^FlB)!(AVDo z4OQa6S*t`S)5xBx8Fzfky-7`@8`CPm;#H;O-y;V}iS#xMbcA}%`q^*gOHW9?eTC*}H>iXx!w z(dL;wlI8#&@FSX9fDpouSp0-^L=rx$>tuO*`gKaqGI@itC%eD`k!o3|FxMz8Y!c~RS4 zDavKiRt`BzijHny&rqKF+J58H6QU1&#LItRxp1_6mmV71i1ZjEfe9DE`m4YqfCmti zwD1GApq-jfv#{U^y<#9cniOH9i$E){%CM}qc?!mwzlO?;>uVf@f-lLFmBm{@knyzg zgAtO!D^!WU^V5*-IVX}ZiBju_9fA9yN!W64}rfE5$f>XQZQYN54 zKt8~Lu?vK7;0r)lKrzCyrh}L}!i8Aqw_>^vs<|_d<2q-CO`Hsp z>k6{)0w6FwN35lj!IgN!zDP@w$9cg&oGQsf4WU9ecEh~Po3xSGFL$aL`C*n{R5YuR zvy#BS=E*z%YY*o6soE+D-`R<=gF8g|tF(Khm|!e+*^H&wx6~7)3B;^Ja;dwUy)-(E zPoc4};3f;8glr@#jc}k$qnRs1GKS17QwlJhn=NSc6_~>c(g43>`N1jzy_ze?A43%6 z^Nv`Eg*Aj0M3JVox)yP&3a$VIK1c}ZdI0~)Flh4>2LT~};D>PkU`dwryLZ{44Wz{e z8b)(GIj<1}AUG3n^hhmAk$370+?u9_)Wgk*!SF*BBT_`ulCQ4%j7+)}oH&hYWXdiB zz^L&>y&(}}5){0034fUsKAm5B&T96UtDY%l}ij_KPQ>sc$D zDw}ozj01dtw5!YR2#~VTCQ0eXTmlOI=%NhF4ruehI2_HPV$8uyAJE7#n4nDgQ!JN| zNNDWC1>2U)@k~V&wOIi=;nb{n3BWB95$?1vbCfY?D+fRSII)uS6A6T-a_mAm3dk@a zrUv3B@L(Ci)H@`M464b_p~|Xw<2IEOM$14vl~~ToyUeW0OqRGjm|(w%$ebFp8L-q( z-QuS6%o(@&I|XSy`$(S2Xu_T;n>q26F;lJ(6eEJEEfwR!3qiRGNlXciq&sZ1$sCRP zSryT=7y-o*AWBA3!4U*IpY}OF7_kWDs809@C5?PhC8Z8dtU;}?pf2o+$#}jz(M??m z(Fx=_|A8zD9E|;lOg*xIYs0oB9V+)j!39-`>GLDV?`CD1=)iPk#5 z>BNXAsj(vM$TAJooB+8Rgskd-$DY8mmh!U??V#WP0+k&TqtleiVWc2Ib*Rv@!>>}6 z)riyX3m=RmuNO?Z&74SYQ_z{{o1@SS(W;`GBehG7)tPA0HOsq6rO7+;9+Oy9R5Xj` zYbJS_iop;tWn>AXlU1VH!Nwd8NLxr{VZ5k{KTZnO%mh-xG15VFkvwHK&ETBJOejW- zPGlWdgImgUQ9@m+P?(4{n;gnYAwHpmqx|d(xXY6V5l1oU&603Xa&4#^lF>A^q@}8k zVs#N$!PfG7#+sR;gHW&`*1Ot;??;? z6}ht7dOb@)`V!nd-u*VU^E0#?rEwAKBfT-PXwcC)Mx> zl)%m!l#S4^(*xGetV>r|bF;n>8bgt+4!YPNL0ycPz!?gdGYJH5=o$Rvs1Ihai=j%e z!bZ}<3CbK^Vilpu^<4|bJ|MO&M!R7_4Z+X7jI7HpfPtQjyRiC*)TToi>#+d*GoAHR z;$-z+6|I$}GT_%BAN`Wxj>^ihHIyBMnGm8~-~d!F&d#uDVkeH24GK8sy$}~$7{qbm zKLcQvI3a!T25jM`>z!jD8;%nHgh6I$+)t{s_uX1z)Zp@EW0;ez7eQZ8!bU}=V=BcC z3A7Ne;VszfW5Rk57JgEZ3k$9=WVd_IPhR97q~vs?ujL4$rxM!tWwqQr&^1Pw8&s$l zfe24NKPo%oS^hY!7w1zMp-m>?}BqY^OYl)SN2YY}wo=&_co5o^OSLZWausq5SLpt@BS3!+#a zdtE!h&Mj}nFUdRKb6yZ+#MyP;LsWS$4hz-$!i(KDBcHP(`Ug5X9yaw9X4DeUq(_;o644>>jF*q0ybL5Vj*%h^hP;xl$oDQa!?-h|G*L;THgyd)PnDlXi))dGhk zvxt(9K1Om*TW58ll{F&l-e|pTKmUFSew~Pto}Z%b@b-ES!d|oRyfN)yNf#AsHycX5 zP?xy!xm?6J@t{CUrSbM+qy+(M~a*p3%l zarP1l|4?x6ZbH~on<3|qf@A5JDoR0N52^41g7l6M^zxyCj-RVtfVJQ_{)qdoxXj8l zD*vG9N`$EY94C5)mBukrD~eb>U#Gbo#R!1jagLjP^eMEw#BEH78RppFQJAc~5ZQyV zpje)x5d>p+0_Xb9NoOa@oRZEa=Nn?&MxN?@O^yAw*%jG^2zUS|kaXJuKaA8tg6f;t zYS>GRjN&P z33g*p4AXU1Qo1+hBZye3Kq}~)FkUkT2`vk3L^|el*Pluciy}0!OGi=TcFVlc&|y0v z;o;}@_>zI)Nvw`@nx>cSfDCgTc>WPLQz9b+tsLzWA$c!b%sg~~5(hvy8VgVa3RG?( z(KMO=0f`?W@RRsVkLRBe69={^4}K7b>w17!L(`(fM7n!N3tVreaOJnL9yVHbwU{{X zh?fg_f@a@&iBZ#)5T%WH2?gVvUh#FAh!IOER&jWMKuEnfWa*rEY8PQ^&7hSnw~L_G zdc8?0agYn1$Vq-U2LgzI2mk~)GA=_6Kn&Y)mTVXbKO-&~G_&}WEVWmqEQhSe`)T>C zi~Q8;EFTz0viQ^NQ&v-j_>}AZ?;4<48?_upp=c z!GZ+^2`X34f`xMb7FJ-G&>+Q%7B6DVsBt65jvhaP3@LIX$&w&Dks;Hm)3bNaQl_jx z=2=IWWd3!kknpLabPU`tkFoPhUTO{{#vAS28F~f-iHf z4B4~7yLUy;qD`xIE!(yt`4G5O@YVwY7y$VrD2_mcfgc6cDp<=x;EQ7`KCCd2U^%P| z3ujyyShB^1pi1wU$5Bv2wZv9!1SQ4O zEf`qfRBu!9;}1Hq*z$^25D_=hKV?~jqKRPv*rJOsipB?Z*~zs~e)82NPDqZ$x6x}4 zA=D#J>4}6DL0I)z5ITWmRoG)NLK&r$QyTP6Yaj_|mV%wUc9DQKtz@8DNuB3Y1s7Ie zOCYuQX3$_5$W_5Y5!6!LS}HDQCzX5h*(Y^y>BtyiF9PfJ#VOtAUkbV0o!y}UnrCD8W^cAWpmGy2)vV}J9MOHomL@W$h z@IWj4pnK2*d;lDQz#{$QD{VcOoOIF=SFjf!X)Q!vMuWL_t;GfZ6&%uhJTmsN(qDtk zUrI=GWZJp}zAT%@N26TP%1=}f&dnBJAmYGRJ#Y)a_vLA7+ryqnRM>-$>u`0^qD3)A zj+QztYRHX*3vZSZ)UC*bW1cx$S$~wEPOYi!oNX9=`SNDB=>07Q;uO~)ei(q$p<)qm z{88I~4=bwXyK5>A;!rb$5u;sWs#ZTt5=2zb)?V^QF|EYCTX>{^Y54JctO?**4bzCeG3`<`}1D`f+t^ZBSOI)+X(1C zp}s%S^{ zxiE2o@J1joMZcrnF_6r=;xn?ioUa&ChBHi&x-{Z3L6A`~Ib$6f)hL(;_~9pWDiIN- z=Mn~S(TExpBq__3$D-H>9R^{XT0Z%}Qo=H9EF2|90*A#@ie-)lds!GSqB_0V=mF=* zi$(kqi#}wMjYBg?A}NBzPzteV35+E*`Grf9&`*B!)$x^LAtW@T8N-#i zPmBzK6Mihv2YnEeBK*)tARYjSp4^H~cGBhNdbvK1^n{zHyyiE(Sw#4-k}XEN7C{T@ z&_0!uU!p1`EzLPfQh~%V475=ni0O}b_M;yaz|}7B$)4~v6Id*($Ui;OL55cJp{3-a z<>Ym}n94M#P)g|rX=1&G(yNGD8i@`W_$?mNiX!EB00wxanC;Z5h@QLFs#Ihqe(F?^ zI4xmTyE-o2Mf6(a+$v13iOz|^3TWTc8$Z@~p7=DiUr36fP-YX>TJmIUwc)8(`?@N> z0#;Lm6z6iTcrKhS(o2oRCPVV3mBB=SlBTTx1qL3#tE+?%BXzarslL?9^x@>Ed)3oj z18Z6&nl`mmLJ&>v>Mk;r3UKFy5L$xC!@)d2vM`lTWgUW_;aLQXt-2ygK5M(N$Skp{ zU9Mt{%iQNmXFz;qb{XRY|z5Vxd5ZcGWKJ z6}o*|sM1g?-}^@QH031cgD9Fu>^jpX=@ey`RQFxNWRJWN@a--Hf?k5CcV-c4Z!UiV zTXanbzf{s4x!T4;4TG3JI29FxNPIg08w`PvbML&ql}X`Pprq3C0|rWj6>)fsA`uo1 zb3kcL4sJKZCJVB-{$^hxBiSukimgZgRQ9EAws5)-R@Zo^ETvL<=8(-bk;!kOkbqoue^?g&=~#IDhrH8swaZDQY~w{}|4RcSQ{U{JMZ zW}=zE*+n2>;T*0bLubj3p7dvk3uOm^>6R!~>B|&ZA(UZ-o%rG8k0Hwk{VC=NQw1nb zAkAX32#zc-GYcWtOzBO%^f?s1HLua)DJfSeZLqyCunVGRPm?K;{>oJoNj(;S?NBj= zc@PkuP-R5B8nHj&aP_pLu9@^2mbnf$xs792XmZ6$1@W(0hZ;pZix*U|VzzuXBi;&Z zCvOL-1*)F#;E&Lkp~pQHgQ^Gr>rRtJOs*_9!yBH}Sw?iBdNp9_687$7zZYE#iDbQ_ zsue&)Ah#3|gsQy&glvg~!<;1vO~Cy*mj`=P*LFC`N=x&buRB4-Hpt5aj60Vs_TodF zP%T)er;VR6Jr8+61OV|nodDtFPy`B*&mt<7piSXl;U&&@XY;P#{9rrxim-p2UNFww z${6-ZV`Al4b;5~4ABzlL5bK zp~DxSY25`Rb?X{G_rx)%CbU4%68X{^-0wMGNlVgx_;9KG^s2WzxIy1{0In=_$d)5y z{a`5zytsw8Vs+#Vv(fKOlKMoKYsGHEy7Sxb{*ncY-K$BMO_XDL&}I*}YKnzgxi4cq zK~h`@%A=5iG)Hl+(H-NJ~&)P?_C!#DWImLO)tl#aat$3Ii%(*KN^OeTALTAQRdkL+M6SSO7ZYMLF~Xl%^96RPtn}jE4IpoWQbG5lWVlqJm~|1p$EuC6~U2VO+}i^rD6iYV#!cq zGo~5P?F3dO$4#8b#JR;k@WHEu+*9mRPs|fPFobVGl0HRX6Isw`FyE;pAv7kfFcB4JR)R5ev_q9W33cRtKA}7BA-60XnP) zAOJ@HT#`fw9Y<;ch*csd&bcB4K7%3nrxJx^fP!6W zuqH<|24U1>R2n3^G2jp>-Fgtmtl(xvL=DGChA^Mbvo#;Q3FYrBqgArY#7j@CX7fgo9Po9o4)@Pv8mv zaWdE#w%KG>4nU>7s(&6|o=g z7;3Y1(@l=4Y-L1)^@dfvigub(1%N{=_=Pyo$)Uao<3TB-qAJ*t3QqxsHj&|GJOP4P zD(Q4W4+x_;yh;`wg{YzfE(C`7MQWccT&fal8ye_%a-PTlrmt?uGXPfwo)T+8m4!Tj zTaMN_s8I-tUkLIEoEGc1x}Sj_<*aC+Z@2?=7U4i;1lr*$pl+kJ)<$N=0+IY_W2Ebg z1RS{Ps{$Gt00Gr)HU)?1!zIibv}VL$5d>Yht3JgSLHsFv;OcPP>%LMfxV{$uJ<^h) z@BHUc`1#a+##~D()N`{d*>6+T8W9DncDj{)UEJr-jOtOhSP=Xiq zT_v8<8&HG0Naz)TP!$(1uV$$8IYJ!#9lD$ zEz~RS{Ha7x))+D^PAsCLvSG|ZZ40`DuKKn{?r=(Yfi3dRM}o^ zM|_};?1G`F-mdegE$>2a3b6>9iAq*#5Z7)5bsVa7NJ_mS1YI=mNRs69MsNA<4`i+g zS-j99c0}wB#9q{{p?(Zqob0CjWYd~&{=Uy%madQBY5%_5Dsu1p@&WnP0v}*N{o3!# z>Td%x*E2Doxir)Nj0F6Kpe_o8UGM?7dM;_zgm++~ z*+Nv$bdZIqAK(~q7FQ8!c_~d4(k`jtS;Pfgyvhqp2^S^baWSy}7PGO^6d#?k+W(?W z&!UCXh(kI~@fx@B9)A-K=aR34UJ6AY9~1IJEt$Po2)`C7zI7V;w}4HrWpAOkWL z#gHS91SUiBCM!%GK_E=5qUK1MCUbHrCsA_()!V|%fO=37d21Kxu_?d+%p*V}bn^&Q4YKp5sLYskb3#jWCf98L-ldQ@e?~)3^hTo%RavwL z9W)km^hj&U3sy89rO-jnXIf}8NxL*!fE6xkw3Nhit8~gczjRKUA1cPw-^{eh)O0`d zvQ86qiqg_i5Ht<%2@@A}Q!C7MAvGwckTpAXRj(jX+ESSCi}UEMReN=^D06&`h1n4C z?zHY#qqVSxaVO!KH2TU<{|DQq^<0}wFmp4Wb=F$LnLpFDPOl@pky&T)G@QM1U*~ih z+s5eh42n)M;jpq{SM|94#AIh2s4#XkD|II0G-PwOOX)Rbk5(LEc0rQ{Rd@DjpNd{T zP-qiTmrjX%qIPUQwFjGrI7rNGhzd%%Y-Y=LZ>!_~XUCgX%m}^hvcK+jMEiDf)3a(P z#Vv%~UEoSI?Y44Lx6+J7ZqQ9r;mFfecXtcfCE`l80wiVKHr809ccXV-T}T2mg>QTm z4n3VrSR-7gcYQMqTo3?_J-}`H20ttnciS|5>oaoOw@WX}F5Kof5WpQoKr85jH{8qt zmp7ux6mJ)}JbMo=sL?I#K_K8lENrGy#JB&P%R^cCg<~^M$jDXHiFLpVw^GM#*IDFd z%44JWjo0Yq%1J|%17DaCewR0i4K|cOkAj5qbDTJiL$aAH%Z|ee29(3HFpb-k-gYIq z0M*}-Bl(ppIUf&^_b7xmTF0`Cc(E)rfu9Bc>4ENacllSN3;dca0aQmuWQozJB#hj1 zo9lUWG`Yjj2$rx5p6l|S6FO;BhnyRoQKNavX?dYL`bS)cN5k@?Q@SA07nfIhraK2; zvlMIU}yji(Xc7dZ;5bFKIb!-%(Jz7pm{ErH#6d&EX&`5joq`qQiQza@%5J zx$yRyfbV*0&bmk7x=fw=u$MCSk_8}#vR-+WX)Ajnqe^yVxp6cavfK5v%Q83z+f2xH zsLbZQaeKM@N3sKj8VdV>oBK$&w1dnBp6a^Sv3k4DafEn48|utqZ+Q%kIc?vbw+pm)TgJ4NH8@$JfyGA)g-N$fn-g~vQ_dP>V&ByRW<`RBU zJK~xpKL+%q>fs=CQl~X#&JMGMy?^`1h}1LWK(%8vG}a44G#QDOR+25o1P;8#yAhV6a>T zLI28G5LB?-NscQOMr8RCW=xqgY1XuP6K77HJ9+l>`4ebRp)B7$dq?x&L!(2P3O!hB zAkCJ)dT}%VgxM1)=Kfq`Ba`zpQ!p_Wc`paJRQnLw>1S_|RHd>rRE8S8j6Y)2Uatemy91 z?12U4rF{3J?7iE`mp6YNeR_uo)z`!<`=n*sD%;n$e;=tQz>Lhr_d5W@^L+|V(B2x<_fqTVqs!;(UhPrk_LCE{BAPe1^%`VYn)ef$wfu@-z$LcIJ53zEquoqQ6?jVLV10)O*;K`?JwYic+Hg&7?AC3lq&W)uay0<93^oJrWIEuAsUo`)s$vbE8xti&WNJb={Ri?K-;kL0owqme^uhJZM2%5faweW}SVuzJnrC)>UY& zy%yWhe6oE= z#~RYdZB>5%8D^o%nVHTfIlDLJoOQ;jEZ-l|nnTO%tOXr!^g%~S3c+5R?F6|Fsg^+W(I*z4FKip{yxZe3 z?pUJM8}Pu>`!7YM8S+^0#1%(uITM$wnIeg|JZ}BU%`|d%n-TUwrPwjQ_%?~`a)X!fZ{gc^$e|z`gpC51R>A$~d z`|;oGpa1^-{~v$>6yN{}SU>|R415PvAi~PGKnDg)eGio21Swd-&)m*}864NmG}u9L zCD4N*v=;Y7SVA>%3{dEjp9xu*C=UK9g)Nk!rBFCS8yX9QH`L*t8n{Cr<|%?d6ygwx zSVSWp5s67u;u4wIL?@<6e@~Pmexf)=*OI{L_nbaiJa=1xPX68+w6s1ov=%RoC03rDV1quKa04x*$002(|H3I+$ z{{Slp97wRB!Gj1BDqP60p~Hs|BTAe|v7$tA1pZ0PICJXU$@3(i3jcnV94JmC0RlfW4(-Xbsne%Wqe`7hwW`&tShKbi zS(0Q0kVr#*C8<=a*|TWVs$I*rt=qS7TMFev;Hx-;1VoDS7`LzAzkmY^9!$6}RIayf z@h!*)C>pQ>J{2q;7@^}Q_G%BySDAy zv>U^2T>;}Q*tmlWA5Ofu@vBBl#&!)9DDUIYqf4Joy}Ij+&WVC0sr0wyJ3IOJ@Z-y${}gEj_3R6JTu#5g|Nj66m{3vYJ=al30w$>7f((YY-%0+o_zM{=bwNED(Iku7Ha6Bh$gD&qKr1` z=%bKED(R$@R%+>`m}aW!rkr-_>8GHED(a}DmTKy$sHUpws;su^>Z`EE|0?UOwAO0t zt+?i@>#n@^>g%t-1}p5a#1?DpvB)N??6S-@>+G}8Ml0>K09`<$ztmQ1?X}outL?Vj zcI)l8;D#&ixa10_SGnlsX;gaorRy%7z6k`;r^NQwShcI_H_lzP~98`8d*%Q?B{uoOkZ|=b(o!`snn%^UOP>r*2u) zU90YTWTvD3`s{`QPW$a>*^c|}y!Y<=@4yEy{P6Y~Sv2v;Glk>j$v3~#Vq3TF{PfgU zZ~gV;9gqF?FEP*k_bWvY{`lmVAC=_hr~l)Z>bK9f`|vB&3Fh(FPY@Yo*bo2mvg@xt z`u+=`0Mj?W0mcu1285sX66ihvGLU!WC`aVj_dwT`Ll79WfCL-jzzU}Bf*2G;1T7#4 z4_Xg|7~Eh5Kv)YCuBu)BiC_p-IFJjLLxT(HpbHtbt^kpch6Slb3;%0TLWSsXhlxU% zzJBwa2rlG?MMQ`Y1tP*6QV@xTG7oxmBb{;(k%Tx*2nc~lkSVS(idgg%zR;t^;&@RI zB&=cuNT?7SqA`fI@Q55^61z0!iDgL%i*P_l!dj5=hhi+@9t)zuj@(co3zUfI@J9ye z`LRwRL0Db{hMOB2QXv|=V;Ng`!GgfC0z`DqKLpW2gs@H`ouDNCPIt*Wh2&o)!-^Ao zazipQa*n=4Wh+@(0XT|qD97|j7!4vm$8l{Tu`J*~o?*#a!pUJQi_Erm!HpoZ||`c@S>? zkevo0rXGWNkVq~hYWRC5J>&GRY>wnHM|{Z`%PB;1*2Io)G-fEf$&p3Yu!J2kDQs}a zky5%Odi|SdoEqaWkT6l7G}-7trbtYj$PkRIBqAK?Sdw!5gQ3YhDmnj=%9OTpAr93_ zEMqED3_0dCjp>f=QgTi*nz5%cd1E(O$x5KMa2FBC4oW?G!2_wZq!Mf?K^KC+32t>I zQ>|)p>>lOPSEIqG18VF76^$4F3qEtwmj}1-ipZxUI5<$|R$a zp2m@`J>#yT9g|kyw-SSrwVQ$sViD!aP?V6+JaJ0j9gwy1%$+s?HVK>id(CBhghP_dP&O@3twH%BN!FbWa6xBcfTIaExTXfOg8@B?a%T4G^#6A&$B2veuK zN+1T;Ip~#NG_Ba)3!_PIrfmo};rPhPu)Flatp-yWfhs!?t~hS+ zjCTW8g+#QP5)Sf8LhEAy!IvdBq+?*aS=eG`F#o@Nc_eR}Y~5m7pe{#+qBjAt*eb6u zp}vGLmxWtn94|A>|Dfx4Pm*hK0}77Z#9zAC-R>dc z_|K?h>9C&!OqJL$0)qIjPzfqQqY458yZ?BAAeMsQA8DM{XZ+!I9sa6GTc8tOVDlN7 zD$b4&aVYm4%Bz91-_vUFv7a`i$t74sL7W;8E)!@D*@@*42UvtMlrgz!j;Jac^XC{; zc&9b1%7Q-$tb$I72&=pcp8c8LC7CvPBbQ{Q5;l%*4zbo7&UHkYlg;_XpwCMP+)D$! zu=KX}&`$zFQ)6H;jU?}#Q@(OQ->Ai!4xtDyUB!jFNX&g-)K>$GbyS+@!z^C%K!J!0)~DTVF4l`2oDf&0h4k4;A1I?Y3Jj!ic9*Y-M*woYh;(F<|#KWY9%}d{<)s#(j`MRxE}A z>;Z_n=p7}cK}?7XyOm~!1A1kbSG2e&B>`tcm}#cBi$HZs)YeOeM~!F12WWMDHS~WR zacbi5ScH^RsyJ52r&QW_MbyM09bt{vNF}Dz2|x%Fk{F2@*Gl!*5o|VA;v{Uo_h6jX zcAf}|c1D4Kzy~2$NGix&oF#PhHbZOk5Orl?`$#40hZCCBga~3r1xasg)=#iui+m>t zyO4sBS3}|ye4l2Hfd9yAEi!%?<`DOYks(PX^Rqy~$U(z~MkiTV?6g5;6-N)&TI6;? z=4b)vI1q$qRG%giyOmyN^JqQE5Cj4U3ly zDmab$CxG0d zBy3({ZzxA?MgJvMRfb{(sB#@Snvu9w=6MT#Fr0VfoX_bY5hYPLMV-=Bbf1@vg0+GJ z^;`s1k7@{>fbgEFwvsUEMD6GYe!zm{c#9J80Ji`~s+N26xgsC_9`lL4hlnWNi5ptjgqoPR{FG_|LQ4Sbz4(Zkkw{W9C2`yeUHbvuE2^DJw(P{c=L`PY2EVztb zQko3`r4Qhp1mO?<&=3948wAj#@+lFuKmdDS0s629=kQh!3M@$GHIG9{<<^7%MP$H~ zYaj|tDE|eRUuhBq_*nVX5B%V!`f#a+h!J%9rxoE}aD)-uRHuLnM&nd9(L_rSI#Ad~ zorTw^A3AJCcBW_w5&Y+!{GbnR`VHUE3VT2o1mK;VN)Zbx2){v6!3r#6WJXqMGxr!f zS+kf$ghFw3a8z`Wh;x*+xt_BM5l~8%2eSvgN&pyu0S{1R^5~=$VW5p6c*oi+8|8Uj zmNZk9I0-3KNmXqC=b$kf8ycCOXjWx*Xj|qwR;)Cw=t>c}$!emSD_uoclx3=9@Q+}m zak1fb*7~aD#gzwp5UG`r1xsiPONaN`uDSA?1QD;HIEN>rey>?fH8`yu`gTQWB_x(p zGyfD!Bto*Y`KZqFNEFCX2vLGI6HTbok)>2hf^$qX#&9{Svpm+Xx45k>!L!UVMh^mU zVpMM^V>+~yo|bvB5+-My>aRAzUQH{tQyVtVVzt@vn?hSgTD!89l&ZHSPPk_p@ekPW2YJ zUcof1L9LV>4aR2Ll z&s!C?h+oJX7sZDw*S2u5d!=#(UMl-CTSH0R%dt!dv-p{tI64)wJ4J~57iVZI4(77$ zn_h+Ld&2ZJ@-&porFXy;bJ5k5`}-8K3$zJb7({z3dUSW8$(09DoSsCyJ(HmxY?VwX zRKC=ENEj8iI9NCszi)AvvVtJM=3(n|QpYsBFkCgEVMiL*rSx^BIAN%xq-1Han!9p( zh6Helw?1&0Ojb9&KBIo~#IbP%v-v5usXLuSd`99)U1%Yl34z3`Vt;f5if`MJ77z>m zpjKV{HPLCnG?|J$j0D|^E0<0J1HiDP6m;CzyGO`Ei5<3 z|MTb!S5Du8Pf)OS+{)%Lk$8+9< z4(M=x)mzBZ32>wb#a5ZE=NlDUBTsW%0oo(VlG3m1TW+Kz2!Vjgd`vj-6m+-sIk(A zvM_NugL*VvGe4~pos={*#9YiJvSpmr58kkqpA$)0vu2CDH-me(J^#^di-(!5m&Phl zG?Nt0v?S7)^UDHVzx%nxkJ2Fw#vu*p&686!v0+E-Y9zQfZ1l_!Y}$fll6F9$(% zBUun!OU4>Yx2__Hl<0HAq|Tn>PNp<++DM&ncW}I0(Jr%@66VQT%@P*qk4%fXv0~QU zSY#IsI^t6%Yer=EOr$?IdsSUW8V9)&hIXFZ)u-CBLTsddJuCcS#9^m8(UUkRG)M3p z&-rC|OtIKNVWF;<)RY}j1Q(c&ip^!MUTaCJu5&pSCeOM1xjH!4J27vneaz0G(Gf9A z>m#dof;7z&*~oF5_Q$SQ5rM3&kN=6c$but()7Ul>Rr+Wo zNNccRHeh2#-Y#;{`kZan#?=@r+rxsl_T4sH8nFdI!lnycyNy{gr?B6&OggRAf85vk zO)O0-I`HJt{nOJG_NCD+L*Q$GGsdR8WZ-d&;HP8~3!W^Q-4fkBG3p1>jssC#D$%7k za9ygD?AjEb%X9DDN(KeuAui%ZT@Yey;tO0&r;2C*m1dm>PNO7J>bT=Z1l%5#dDFG8 zO8zcljV?bpOA-D|Kj~0wNu3M!WNJNr77z%y074~=mQ0=y#uSPT=ia^1GS~9lD8o7= zEXkeTbz2OIKR4t#q>deyP6+lhE+&_V-p@IS86-JVBz>MEw<49!X#TWmd@Bdt41qXo3SMU1qRV-#YPatf@x$#vR za&syWq&5-hODmx<@cp8XM?VlI@9d33!?|flo#BgXTz=LAE=sERuCWJ%Ai5Cse9rnHi#RH6+G729qBiokw=QMjQo;=e5j zia-WlfO3JL53le5cW@VFA^R!+*O8y@C;!vamESr@Gv2ffKv>u50uJ%O8r#TjlW>;X;ND9X^B@QQ}036)j%G zm{H@#hR>dLBGYMLg?G=$gnahzA3>D~RlZ!wu$-U@g348Z*|I_{3q36evY9hxPL4%c z!o*3`D94`VED#iA;Xwrgfu!>Bqx0drlVk{p;;)y%zFpKIRpq8T#zTUR`r@;(sDTe?IB6uJI!_YG*w3#f3 z05!ny@~=e~3zV(4XLNJ1MjLOO>>a*N5@^7^jsxtaAT5Ll!kVhf5V{IZw5cY{c5=)i z+i*N9D4wbdYL^IpLXt#+#KC|7L3a7+0nuL4(JLy~>pdRat2vMLAeeR`ly0maSGk2j$Gnnq9 zky2C7n)I<$;l%G$S6{{P&X6W8=`NApBMQNbo?Ox(8KVZHY7(GZR4=;~h2=T;d|vkX@YS0sk-`AiQ)RgQTy7fVeR zsZ*edgBxTbcao5H1y3r4Rba#jRjRgVN;)}=8IOl4fUX34u1peld}Y2k2Er;4@B<&h zQvq=@Mnc(x3TDXggiNhdaYTepy)J330)eC;@~o;+5=oc#>7^643~XX|fPI0BO$&gf}XH0nzMYhg-Vg+=gm_bK>i$RO3=%xayJ#f~7!>RBTZOOW1!< z5u~xjtz4$+5XfoOC>k|TWI2hUe~A@EM$7Cs2mnz*1mYWeC}o72C%WfY6q_eiT=aOh zT8uSLC@jh?MSdBZG~Ft^=S9m$Ou|Kod~auAsis~RBh7?pNI55BD*+-9h;V3Tv*kF4 zKA@YhVo8Wo*jv&pPs=pSEC@m!Z7-ZE&`2SH5xozwu&Rbx0X#kH!XGwDNtij_2%oMg zqvbABj)caBRHh}!>&XKE@f@mDfDbA;#~wrz6`>YVB^vESIEavzknY>_?X=E*rL*2>(tsS_JzhE1ZSkfN7H3t(U>Niofh zRe}}h-PFe_X3DiuHM9&DJR;|60r@*l$uF?_3E+D zsbNvFnJB>_1$zaiP7?tE9nUUzF@VrkfincQoSeuDUTBJ4QH!V~|72_!ag0d-yJaGu zMMzP5bg(0GA1iv4dpdI&p*9ybzhnehz-k#_Sgbtj5`+(Kj$8ZB&kD}azHX&pWuznONdQU zlG9tgyFN62@?GD!y$sq3?MWyZW$}gFOx1~UdDG{+GJ=E7U*4M@PSz_D1xYWIM}L_T zGsG#ekiDwAos5tQJKL&nd6^#OE!l6@oir{;NI5U+*han}WaDenI_Gim@C z8VpRNM?<}uI}o@C(pYBp<&p}?N3_&oq8cTB*NRirEso3=)r1-zjTTmYTp4}p3Zv98iTsirs@k>4^S z@@w7QzTWaQ;G$*IyYa@d*2>a3;V_CY?2#MZH4$Fv& z=qWdJ;fa3Wha@Ycu+TM<0z9gknPfsgkmw4C7zi|RzrvfOuG+Zz;x6y7v~Zg#u$!#K z^FKkUIqwN5ovSJOQ?(6&8Lg zllW^1D)PG$1hFTRxr4yIPLiwVObn?V`7;EJ+mYOWz1 z5}YB7>LN6kd(5t)+3Lh`WpVO9;XU1pf?}u|-*rc*GH1QxjG5IgU&` zsdEjgi?9!>vH574A+gr*tEp6)D%v%Cj7i+W zixA>QL(D>fLyUew5@d@Eq1*{<0!0|wj!yKFB18xPNr`xoi3ju$Tlj_~tAJQ+%Ny~h z_E7}W5{q7nHj9u&Q=uZfunRH}%dL2-3EPu?%Ez;$Di6Fo#M(OMXgRSmr80awJW?Js ztCyW*MvqE4Y(yl!Ko6DciPPAj<&w(8bdfaiivLkT31gVd&-;oWp|H%P8WHOYX7Y;o z3$d#~iGKvmeH_UJi_KLmNNkF`YAL`Nq&86U&EVq8#ThYUa|p{IfTzeTbm-0El#k-b z4Wtk;l*ml^Jh^5WPus!572*q%#2xqm!3k@Z>U=bl;W+q=sh1-sb$Tg9nU>M|Np&kv zg*Z=_O3$B&fCz{MeF%rl;JZ_~AKUD}C9Fp&8Bjo(6!`QQ*Mk?Yu+6d<6?jQWuh2CE zJZlRq+TKX)MaH09G_1rBMMSbN!$ z@Yt4CE!gozJ5@ z)KZzGqFp3L;?kmUG7bX_xqaFX``V&F)^YS2f4N$!sD)TKvRV)mPU+hoSthDWS50!B zJoOKc+(nioif^^I6{1f6(_5UG$CYf0l8Dv5-CObtQ;w)V800y@J-+qGj8^3XaeCZ| zD8$*+*lO{?y5$Y|XYw8!X-CEsO0S8_aP~3nG(R z5QlE@gCWb#xME_c+RDn6y^QQk8$6NFc*I^h|aa& z4W0;np+@N$GS$6aXay6{n%#=1pT2;}#fg*Jm57;m0y2=L8QhI2Jr2n=3;79@!djs_ z0UXkWh~NaNT)fh&b6O5Y;up?4gnX};ctBj}2Yuj&hq{Hcnb7p)QG1(C1P0sOU|)%N zuygW)DD4f(#ogCfRR6VGFT6#G<4vhwyJ9PI&Ld9ZI(`Tl3^O%cAPYc;yOH7wxCK>` znJZ=p2w7r#yE5JFic(oH06Gap(BA7<;}d?17?qMep;XO{t>yS7US%XZ24#&{o?@wt zL7_E%=!fqFH{UY5O|{3;sT~|vWqxpmOW-pg#=zcX;H9uQG@%u~P>rfeqP9f}7Y^lP zeuzxNKU)+XL-C~FO`XMSWu3J*5`|^mp=3C<0DjPiK#0E64U4eE<&EfN+;~pS4M7g{ zK`d*s9|UJ)PGWtrD9}j`n#+}PJ2A^~+0-$)K+!$jK-z`0oI%;a-_W&j4p7o$v;yfR zIzFbYv>%e-V*hxi=oV8Fo$0(En@zxM2ng~Av&xh@J-HKgtvm@%i^$81&ct`t2t=yf z8&Qg0a$~Ao;~EBL`vFGXt>~RD!0m#zWF6$k5saPri9q0of1nnhfxTe1kk%*;T{|q4 zzLe7ovt3>aaa|3T)|ilMzdu|lrHndWB2-=lU7j{;8YB$Q>5ic0XeprtW2gXJ2!vR` z1q1UF*qh{xNy`PM=7PcuZVrSU)ar_;P0Xc|+&KtyS>X!7)u-_uO*ZVamh6gX5V+x& zf=;=J00~8?0IMivPH{w)01GjCBo-+q70%{75(EkyG;Yn2MJ8k%nbE^)$%&2+tBwel zWQkvdl>f>0ZH(ZTTIALW^$72v7VluxDTI^uOB19emDY%8Vyw_{ObWrCjK=jPS>}&P zrEM7zh+*9ikHBq+ST7pU;T}HV-&XGgxf1c*Rcti7>d>=5b)h>wU_uohm3)`9?KOgO z@4;pW;2bGkzFo&&+N5lx#r_SE0~rv+4-u=P+MDV1rtt5aEQhJtyecFzXy$+MLmv4n zl#;p`X_k^4L2RV%)aD38>ZKtTX*qGdFq)AXZ4nHX)JF}$(SRpU*6j-Sar#Rm0j>zb zmbF1>kvdMzrBaX7 z>Hi-`^VrU9c-g=Fs1+p7VBPR#UY+rf3l1oE-zc{ewypEuWfNH3*DO~K7U6O-n&2{q ztpfH{m=M>*_E=zs?lhNl2^u++OQXvy(3VOH)D~y5?#zu2# zeyWuTf9yY^?T||Ed}vh_x8sS?0x6!zg?KrYNljsy3OaF>o z7vwx+XhAhVs2js*_CaqldE2nvJ@Q3OINr22S?A@PU~!IhXoP2Zvalv}=+JQRzLu18 zBnNP`cUa?4|t|c z=RvLdzIO{E!iBq$o@w1U(dKXd@YX4pWU)60erN>`v3t%^n(}6q7%}|x8;+OYn{xmM z0_ca($a}Ux36a=Lzc>A**e$a$tGafpa`WomNPNZr4NpdhEzyTqAb@W`2mf4p`fq$H zMmzHX=5b&I3<4m79ROs$QN3U$BTD^;*FXK}7ePycwmm)S)|a)by7K6Z6WO2rtcu-Q z@P}96gFPq~Z`q|3+e|nmAcI8k7SKQWk8t*{$cx~Y{{9Du3IYYbdpEFx zg@1wq1qx>Y;lGIgDnvYJ@fn$n1|?eT=qMp#MUEp^vTW({WyOR4#9>JI z;1-_;ASJ%TSfOC2XD^3}H2G|z$qHl)%8bZ|Vg&+xZV?R`@tH}D7|AF_S@kQ}uwuuO zEo=5H+O%rdvTf^DY{7wo1l2+ps9!A$2|32}h%so>w_i(YlsHi^;{TyeBM73n3&YKi zJd<7(nmA?G3R#18Vk*!u&VK}MjRqU{bHsNU$zVmfdgY_xe|Ey0hLus$o)vxM(k)HXkVwOq54*QTLEUGn5uqM zQg600dL?|F!j_gq?)AEsL};2hkaq^H7Obz;TKm<30u5=woOzkr)UDTwD^ikZE&F13 z8hQC;li{j`)obAGIWBWqIVV$9Jl%`zq=g}NT$;*d`P^po8hr3a#kpERae30k?W-vw zJh4Te0;*PXWtMB?vhNO>X27r-9G|^_l_im*tcnF+n*V4b#$aEQuKQ2G8mUY%&Z!pH z5nXf5S!$EQ2Awm`9j{~~a|U6=#a|^}Ro-DnqxF|idk$-D#$56G9am!+re=8oh80&( zkK%e#)Eu$ao7OiI{dS!@cjPU@+zx5ni4{o<_rX$wL>RR{Elq$DtUbt+Qb(bd7)DiF z?QW`BpJZmFU3;DEQoOB9kgRQ0M3`kU`2=j@e48Gs+!lG;SlvUWF09C>%YIOIVk32u z62kl?OyOUi*_+>IHQp}Hf-Y6)&=r(Z%fcgl2=4Hmw^&hDkdyT}+7n63nq|(;{(YNt zc_gbk%BHaDk?4WGsqY@c(8toqyydZ~cBn9LBnkz$+tSYZ6M_ z))M%b$6}B=%zJ2-IGK5meHO%#_$(5fZ_y1;+{#v=_IAPV{mvvxVA?+>Q2{0F;1(j| zUs>dJHD4JIMv#jWf`(*1XxU33(reOutaqve3I=9r!yHO3*P_-0mQ)v5z6K-Fsu`k zytBrG2m%OqQ%E_0lD+x-?>q+l$|qma6aPZpt}|-`Ri3n+c z2x*w!&@-=IQtVS0Q33&eK?Y9fqaUQ%VFDL!oI2tb)wE=Vb~B#V7j4R23l-T|CGwV=9M}_;i!|I%1=(5F{gfz$8ET zF&qsU^fH{{rjG>mtT6>=BE?aNG13&sJ8h3ODM^w#hcY~vU@1wSi5Gl&(iH_Bw5Dds z7BG#O!74s+PCSZ}Z+5DZirQ311`JS#61WwEcylV+F_9UGniv@hu}y-4T2%p9K6F$d z5asCTC{ffs88WDTBPG^5D;1+8dH-r%;Bk#nXj;^{g5`As(Wg-LnG$aSM=yH8n=)aS zRl3>+Y4b4~MF5n`w(FU{0<1J{Y`)Y4Nu{hBC9@7doCu z52Ml zGrDy-nP2{MS^Q+)ZujIOb)SFKHW3B|Q(zE6&Soge=Rn-cyW~Y;oIugFB?AYmT&jb%h`-k)@|v{WZW&) zRm6sk-KCUw#^GuuPpuFab@z3xU%TWv))(xmDPPgxvzE1!<|4jZOMK^9ry)IsLJ6Vo z$6lBX4@V+isrih@=RVwMnVJN+_{^+Btv&nM8U1qc1vF;4R*;Huo^zp@9q=JhEW4dU zoF`P^2_&|Ae*ZmW@fiNSw3u({vuEYFy(O1xEO)%FU#|7}7M|vTMZ9bi4_~mO-YkIg zt$wt4l{gnqOi>~IGT8;=;DyxlgbdERVB9!8AzEljXzfN)K+AYQ zUGZJcSRvI~(P9B5P`hQKF0LSNeOqnen%KocC&r)-)tJQT;#-IvE_g#MB*b0_;YuWw zC>DnU;(|B@R&*TLhoE9hcoVcRh^^4w$;8CfoyoDJMrMphN+Dw=cHzzGQy2zXKxxnb z{{L3Pu_Idy$UAt!6Byhx?i`Ilhh7ASHw@$D^&ynFT`RqePp}hI_{Hg|Rl!8eJ0Tf_ zouC%tW8nSLZe5X39Yz!xnw4}U@ywn^YGN%I0~vUM1t8rM4A@BMo|7z(Z-fIs%p5mX zg%h?>PPRlV5`?>Sh+k;QrCi;SES+XB85Eu*_wgdcJdp=&(4h(1&mah+KqFJ`$Xpo1 zJ6M29dI3xlkh+LT&mc&*U}ajUlH~+N1inN=YK9Q$(fcsjp)}1L&5}oeO;)m_J3i%l zHAd?xQ?~pbd^z1-rq?e$!80hr6Xc2=9iU0Ak9rXarM-;342%)U1}rWmM4|@%DgT|W zpa}7GSIW57VKyR10!^#YMW9uhw;3k7on~1G9B%-@wD934F%&vkkyW;3OZ=gRxe;x= zqy zC|-@L_~y|#3Q(#iQPc;@jR{j-&S4mkn0Sa@J||PUBVaNbM_>_7iY7Md=UDKDiW$c& zdCGI;Pq1m{*z^#B;YQaur?NF&xNX;1`3|a)7J+8v{@q+)KunhP%B*#0q?yN8`Q5bC zUGSh8KxX98lnka7RzpVQ5P3l`(O_~Yh<#?sg_(?rW@U7~7a}Q-pjBoV2LCBp;1L7C zi+6xrt_>wsVV;YcMgb+Cdlsk?7K?E_#6y&pYNcF};hT~kCiDqO&h1QJ&50vrQJIQG zLhYN|B~f`E1l5$}R|H+L8INWN=YLG7SyTi=7)PtI)UvJCo1)%lo(CfRk_^rlRU#bq zA!M1tpH?tddv?re*x)w)TFYb!i&E-X1W0_Y43Bt&4i(6paY)jUX`%vNpM}Xj`P5%= z{uXY09eVY}tUg3J%pE z%{?nzcm`6alBogfSd@bw5C8^Lrbqxwrm$#!hz*P?9bK>kLQq75RsYJzaj3S+rowIA zirJ&Nib}i{W=+M#l}--0g2gV3h96K={uBfuqUB|fDy0&iQm~{m^dq=2D_y|UvB z#?ihunuBH|uMuF#9?W0R5403)RcHu{Xp$c&hx;f<)&Q8Oo&|WFsI&pW6F38$$r+e3 znT>8NKISWjlm}Nh6JBQB!Hi*og*XsE1Rw|>e8V1KKyPjn1Z4`yWNBHvWgoJP zYAH@cSimrN!#KsOS^;9v3N2sGE3t@++~Hn&UFg$dO*|Egcl7I5L@fabLO+me1b7|G zENIC%>}{0lutpf05C;(OY$=A5g9V7Ix~=7o3kcF}Rag-XBL8lkq)fi;t#97KO<({4 zJb(oN!UJ4hc|eFcdF@${2tiyAv+~jRj2jh2Zl)yQ?%tZ-ETQW?fxo=p4KeHkwo)JP|9f`a0p{8kRmZn zk2hsbyFRcD<43fzlYEdYn1mZxK_Bz_@6Wjm2h*=u(Eo%W{B1dq#NP5X)D0MZow#I0uMv^whQuwHt|0iq zG4%>oj9%`gTncxOASQ?R>I2$(gAcA(&&g;QC-ROdCjbe~&8UQ4p>eiR5&oF4lDtNZ zs;o<7hXpW3Z~jWf8L1FQqvd>FfDG`MaFrV(gxx9~*8r&^$8vr^9s%CTi9|BYO@uzd zW48JV#eqy_ro}FZ&kSLTcUTeM3RhT&?My`>e0*<0B-@q5hiM|&7t3-O@2(LeMgyDJ z$FeCYkr*$#siwSzyC9LxCSRsA|>c(#AImWwp80<^8LheBr(3*i4LRFe1c`+rG++aM)lB!tl677e^I6l$ zjLGZ`I@%Ke-5yC}^R4Msn_fn_D3)>zWJNR*8!s3liR2O4iR6TqP>`*#xhQb3sT#%t zCBhnDy~R&n^RHaAysb-`G_GsB$M&W~ehMf9PjFM8?{1loNpDdGV=u^bAMsq9ZKWqC z;nXm7b*5b=SL~5kKbno8DW0-~_r94mGXI!avqi9A^c;21HFJktKXMeO1m%Vnu&^A; zxbZvpb&&qGM}*k040g(a97)7SOl%OSEl#g6_HAW#H&NUbo*M>kkf|KxOVq&?TL3x? zh(aiIq$TB9sPG~h=U92P3TJFs+a;Zq)N8{wfs$QE$QBP%>SqQs_Fi|q=82VL6QkkO z|D_vbze*AbG)O!pIi+!$)j=(w>IS7BH@>Cq)kj8#9-`8jTTLjTbli8B&8Ug@#>(~_ zRbuviVpv2l^@8D->a{iGV{Th-;9o{-Si_ z2ys(4AIEV{2?r;R?~tIS2~+?n)B!)BH98RLPcKNYAmnIDF=tnTo(c$0+Mhyqxz~NSOt2VkrjZP@FmV zzA$Z#+bKsZQ{=f-Afk_h3)H=aLRlR~f3&mqIc(WBQT#bQmQUCm(a2OPowbz%I=nDL_ycrflp4%}53-Jc`}2LCl9%lZf*ZQSMuk)knnQUo#+i>TU&=KO1dV1+Sc8$mHU z1!DUELHDGhb6I?9pUG3vJ)YSx$`epRL5A;hzb{ctzy>e`+D?&G%2L-(f9SFWngZOQjdOZ9{1!)^#3SDp+w0AA86!i(@Zq~&Oi_^I_m3y?ho638vrDnQGLw%p z=&Uj-siG2kh#yl%HygB^F#xS}ebWI2(>AuJHQjDKeV->p0WG zit(|E4F7u3Aqp#E3PPS(G%hcOw$sqaBauu}$t9WWZZ-WZP!A)1yy1;18S^X4AQmaq z(#Df;>QBZSGb*u`a`sCwOU~@WXgh|Cdd8}q!YZwTo~{&I9!%kDvFm2M*Q&CM-)m7mFbEEVW zg~&gDa7pyW$PNSVur@IQvm#g#BvV$_`UEUj?&PGd6QjDklT%0E$x|m70i+8}k0Kzz zj|ebe;3qY*sUr{(`T6LgMiG+htEDO}$Fi9>4$)3-0g-feRoaA_jsm@~8roywN9|{*L20TDYnr>QUnc3spvg6bkY^Q189f zBEjq=>=}Wa9?LI<+BG=5TniK1?6c91>&b~gR&=D-{t*C7K(oKFJ&)BaSuKgk3az7! z-%+IF_(=NOUHLK=wk_zS5)G@csyZ6g)&7jY7Jc-A0B0g#0mX!bZL%N`efaT5Bq9rn zOT|0Go~mi7y)x=w1bd`$Y0awIlwYl=Eh{@xQ%{?&{9N{3_~D6vHOgT-;_YY0YIy>P zJTLzec&);L+EODlYhg$c_iiP+XOpnph`#XI!Wy&-uQM%L>6N?*gZI7hrUC>iAc7Zp z=n3MCd@}0KqoCUZ1c%t|ES_45?tIcUOOc9RntGOI1YL?u#qQ(pmzBdOwvhk6_I z;0Fz6AnvhEB{%^V8GsWm@{l+&1q)_@mm`6SCk&j)u%m|Tp5-ADtGIs$S3z=j=Sq*Yy|G@vA z#BzcX@^M6w)mlr0h(nQ`ya-PdJCz^^kefa1;T!!JKsNBP50{|nXSu1sFdEszuK0^} zW29wMT(`8Ttx+lYdSe^`#~>d?1yi+bk&7zXM`bRPnZ*N%;7$S{^_=8*=ds!m=dz}a z2&QoVP?%+6`90YEBzQSY%lJYwM29p;o2If#C}Z(}R_Nmn1HcJ-LSlhf=z|@;l4bRX zV@&0M#f&a84G(SCOEpRdh==^j?TBI{hxE%r%WCFDF`7|;{n3ynaSvcblFz+36q{yo zA-#^Gw@iBFn;|izMP4(>l9ZEuKFl50HigJlSx5c_)0!a3CuTOI z)wupMgFLk5gSKW-(kz4|zG7ux4iqs30cLzMJxjCTv>hW73_4p81_sK3fp5g30=H;q z&!QxV+|(qTXF-lrI|ZtOR#k($``1FD5=^ZAaxjtM*=xtzqOWf4thKf6ZQCLykDiHp zeT6GEL-Ne%$u(F^rENEJqDw4UC`cAsUru~ALd9H(B+0TXM4%QVPo{-P++1oR2UuA? z5C9Muuq;_r^0Nxi#8sE=Ohk2u9Od?9wpiMfLY*QV2dR-p4ta=Hn>PQQ?waLl48-k$ z5uD(S==Oynaf}TcNllfgq>-K>E@DvXsHuvtIMO4MUcr(lL9#|s56)9EIjr389z`@O zZqiPgHC_UU<1A4+NzlS^6Bnl|BA}Jgi;OuOJbDGIVxfxd4)UD-Zs)nr;Up~3dwJOud$65CD zr-uhDj8IrT!HHx@HXGc5l$yFZ(NyZ#XCM3&wSyRDOmK176+HjfXTOH?ES!98Rx|B0 zbm`eK0_7XFDx>koj3)9Ln`|MlwQ->o(Dtpaa875XANe(cFuiZ#UZ6P2uE^ ztye1OgxH(m7DR)S zR`zA%ZR@yLHz%mU)xUMfxI&ZFe=`ByTD5sHL_bB*M3x=wq>}3}Z$xUM7wOOP;ZuN&hqRE?R%2ofJ_NESP;f&(0l+8PcOD9RN zjh3=yeCQgziAMzO5k6DLYso&dqw%#Vso(d=`o)p7D=w7kNbrqb@3GezN>L(p+U%t_ zeX9hQl4^aKV=58kh$&JF0VersOm59}$%*Y_7v4*rk;-%5!nNk`-tZ{NqM!*aAEh)s zDDoX#eIbrFu?8H{S&0^DC9<`ob>6>G-g-uM@|lTtT*zWP{rS<)mS~nKO_8ySH$P%X z38z&hf3JPu$LSJ$6W^R}ffH?y4JZl=+zzDRDCgiK5Yq4hmYZ z##y*eIyR!~s4R>&>;%cM4EH7lJu28DiNv@JhXgA!Xi(R{t}9mUnsjalm#~k7uq1c{ z==z7Zf<_;7tT*s)Dw8j8Q7Ypl926gCY z5{-iMxX)4QLxT|FpERQuVrB=PAQ!#yB1NtoU9cSUqaxPIRj8@d3`;!9Ar<*T)&?T= zifbi9uGY|k7G0tM1;HDfK^O^aMVzo70kTM@ugs*)84n5s@g}dlF3Jk=+Agsm!A~m$ ziiMm=33lKCo&Y1MlI*q&A~>=XMNv`K%_ch158q-Pz4FA!;G<>@_o@;t-J(DEVi*MM z-x4UuNGk{DlB$?X8DYc<k zNUM{gZt5NqMY0W33iD1faT5&_7sn5FEJ7S^0Z{hIAG`?zgaO7T^Erhl>4;C?F7poA zF4q*MGsELEkBIMX?Jw*PIu(#0e6Xmnhvug6D3;G%WRnr`YXXa=MsRaw_!7$gaw)+E zYYw6}OJbcer6Oo-s0bheV!ZR8hw5|$h$1J2EiV8Uf2PglZ#@Xs6Aa8R-w`M3k(Ivbvb}aEIx<)g0fdDE1PzZnz zPJ$0GKnZB%Ly5FJ=8Zadl6{5_?Z~aw{((Zx@*TOOBqt1TY;Z8d(nP)zEI-jD`H(#Q zaME0JjGp4=P;{KqhY@X4A>tF`>a#xjb3C_WM$#fwGy{}c;Tygo5XNf{0ObKLrAP%8 zJoJv=#4%G;6FYP5ko1j1rPNxU2{a2$EWN^KR?;-t0#k1bMSL*Qk`6Z6v&TYf^rTSe z1QIucXcFO6PDkS~5hO-`qQj`77XD(PD@BM!8z_el2-F(@Vbe10SN+xh#t<4oV+iYFFOHQ=R1qVNXIVA% zdfZAU=JQD_k}#^b5}DZ4PPl zl49clD!^VH<^lgApl>aJ#u(4rgm!S}!cdPXF&s=Yi&af@%R<>AOA#Xh>BVXfV*u03 zVsvHbm{lz{_d7;ZW4RDa;|f#_QCz_@3tMNRst-&@51A0cZGST%=Wv!_)kZrZ5eQ%x z_YA5F&FpT9a^z7cRUrtS&3PTCi9lhH6W(Eg<(w6faBeLNoc2RG| zxr#S{*&+@b2%m0<4K=2t`pT0qHF^(Y5M7Wf&t&lm0tm4(GU2g)9df2}=X>*J_`+9R z0IyfM)qMXUvQ?dfbP(?!|;uiSVf9ll~YRS zTIG0_H)M#@)%>uK1=)EE1Q)y^98h9vWf_V$VjZXUQQbmVC+T=KWB^ZPe8K1;#?GJu zR+InLmuFg-YpqT+R@OHc2RQYtEe66EH0@`DQI!RFix)=q*ancS)Q_Gi2Ji5D_yia9 z!5dhqT4h049R(zEjXDY`7I)(m`VnkoujI_0&UsXi0(3ebu&n$qmIi$ znQs_qErw3mSe!MQSoJuw3~4J7wOQ90G=BzzKQn3tfpQ!PuExo|%3(K3(l5}NCEKDm z`EQD`Y|=dMdee7|r^=wqFFs*-LM-}%5J=ZT`F?FUKCEh^iI~AO)WGaGkHq+ju2SZ6(FEF^B@i4ZvChmf=eS)$T1)UMeS$l|DL;EbWS?72=*iKLqNd^Dp4K=N|2#;<_SG zGls`=se|dCZ@W%u80vIVw^doNqB{O;i7}pOq)I42_#t#Q!>dn?F~wYBvF7pK!8h*UMryccJwuP(RU+gJZ1MjcPl zT`2cwW*WKu!mBM?tJYCkhc3noJT2fuPUMm7T+HT3$0vLGt;yKC0c1{LGmRl+A#^jF zxy>hZda=0eZekl>a^!VJ+^lMRdho81)v`PiyFl*wA}(ViCZit!V|UTPrT$y9XB=S! z{Fv9PEwyLoi$UB(AKbN^K+*x9mWSD#zT?|t*`mP|a%FpV`)uK&uoTJ6# z+%j9fa|O~d!XH4V0^W`>d^Zj8`O7hzBT`YumBg@3&@Xl(x=~k5^~#JSTz$V=R(q_B zdtxhellyA-N|J>%Tg{T8+`P}IP7poO7kTHDqPkT@t%N(+)~3=2#M1wVW~$k4!X_BL ze>uI{TuB^s*b(epT)6POR4JVUwSgK`rw(mVq){+0ZjpDcea^wdPjf}Qy$8q03q9D+ zM$3KYk(&fV<-33}>%<}A?VP*QJ4**+{Mu2)%yc|8xTeg6Fj7b(T%BgF`+{`JcpzH? z*ug@A{@PT3eUj7rey1CDsK(xhXXFh=-$9#Vkd zBcR*oxBV)rTM~UfD|iC<_#WIdKA}bR;>$Sp`@C~4t|0~j2bf?gCq?9=T*{?wy&=E& z$Aa|l$^}zyBqoC&J`*uOUp_ql=DFh?EPxV1Jsou%I9nIh?`t%u3!%9?B3euQ_1?mf z-i_* zf}mQ0A~O6Z(L#lC7%fyp7sz9|f#oW0gh*0QMUEIPx_ti$GbTa{wJOxoc=4gkojiN` z{Auysy9z>sk|EQn;KPATE$B39)uUs{{5$GYV5g}% zfmTQw^(dH64q+OE>+|SPnM<`obqhGK;K76o8$OIUV%x-dzuH~;vv1DD3rh+bSy|xa zwiY~IPN|WlLdz$aGDcjkvq}q?C7*r`d*QpkFW-e_OWEjCytut`9f(mX@J@e&TQzRn zsZ^}Q*RlnS_bTmYTVER7D>FK6zW;_EU5h=o`0?b+n@^nkJi?+eUGIe4mnlkxA`_PM zsMK^ullM#K2koDFgIN%WH?gobnScrg_Rd;6eTV;>eijjT)LR3wm6T&$T}6<0$9eZo zcUMg{;%&?g)JceeX>}n+&Jg8QcnYQWpm6VXN1@g$Q-i-z`cj2}Sq9~O z6xEbpdQBcF<$a%Jh0=XAjam*xmvMV(yi;l>$dXOI z3^bGxNf{tTr2dR^1$j0Nb%SX23RA-x{t1z>z!96V&d7DF<8lf;92lZxIK#=rc3sOM zRN75T5Q*G17!{sS-;HLRJGf^CNts+)osTu#4LVt>^sny$A)TW|WWr&w!ehREuo8PfpdGEge zuFy?>^Zag2k|9Xdd=THHuc?qHbdyTY17DbX)C(rHUYSQ^TZ)dE-cU=IdN&aHqGQhC z=}0}D(CVza9xloWsa$va2!Ut!W0x;_egFR79X(Rn1XQIuo=FZOD%?qAB>P5LJ24!SahNAaaW>WZHh{|HFN ztm|JrE25_a!kT{=jUc7^&tj|>$j3nPClt9=f+DFaYjrVhYLgCI+W0v#g3v-}bearB zxsY@H#5T`JVeZ&QM~@-IY~NwjS^S8}T z7k(zQNla$XVbLVYZfuA(LbU`ts669ablA$bpi*_=J)k~w+gA?#Wr6;~>A zp7cyfA^WyKmMjHcH%Z(;@EM;+R`HL5dm=`v8N5XPgC-T3;6QcxsA@{=dmKy1Hc{lX zQCdZY84ag{RB5dq-9`UV%8}*k*waZWnx-nza7d5lI;JvOD$aN2`tP>4wMi_z0pO+{t+)6Bus|p>lHq~)r5#hM zY^-ImhcU&a*ufp$ROuX{?DjM`Qkif}XCX5F#%yr0-U{~uE^md(p%E6bnaN`%BCh1b zaSfdGeAc&y9;Bbp)K8f!r!<^cz#oAy9zs*YIKXj)kw3Pik?DCG9V?_^w|fXj*E7|+ zs7`ZD>?;3L7Ui(ib-8xmEAV`B6s(JM&qg*a^_froqqdfqBY+LC;!agWhB6N#_#~XY zru;ja+}0ns*qWjd0%WEL4khqn4PBu*TnY^}le=kA%m{TYrX*Qw*}=7hxnerbN`*yJ zd11~#m>WjN7o1YIDZvhn>U76O)rL%>oDZ6wn)1!WooR_qUo6^R_m9SV4TP=W?b9MQ zWT(P@bG-azR@0WW(%FH~R~{zAB~um57UGQuKll>dbX95Yh%Ll{mzVYqd{NP8t4eqIy1km9BR0)g&d}pWunkM|P04{KmBy^{C(1rGK zp$Grh^YV_l<$I&LdOEgjm`t=q6sKWxSI&^6{E21Ga*4*(^AJpD6w6-`yT~n6p@!^+ zcaBzzk{t~f4&#ZyWq-Nfo5}Nd`Pb)d(T6@HgxdYIc%jBt8gF@eS8L~u#x;pWEF=Q( z+7>mi1vIaH7z!`_np5)Xc^X1j2i9EhNs+~{pt!A`}gD%_1J>PNUJa6kAC#SA2UVjO=TOOGzUJXD5la+E23xnf~t$N z*11TSg42aCZ^b|#J?YP5sjOE`ZEbI`Ra+uO~h)p5u%O66lHv$Ovd z#=<{wgjP(FM}x9IR-=3qxD$yNFPmW{SacBmP%BNx5JQA)njvfci&kY+H*BvgYqt^z7WbQ4n-V?IP< zF&H%Y#e=|xR3gY@`6MfQm19cx8c8T1#HDZZQ+6tnC@P0OT~>tC(svn$g0{32$}xZz zgHg!ESiT`O^5lJr0zXzmMh?P-p;&$QbP}YYf%ai|m{CM^25%(M8LGih0Tcg&nWsFD zh%Q4{3(ufyN!L_0RcJPWe)94XZRimS1#lszCUxi%DWw*2M-&(Vdv$U?jA%_jAvaam z7f(Te7{^IqbUGckfDBlCVAMLAC1#?Cj%8?ugZ6xdQ*$sV2!_sF(VY3 zNGV_RmA?{`juuAMxRPPi9K^LbKXHxdu@u&#Y{r3-PIFElhi$<(l7u)Nq1Knm7jyCw zm`*8zwcrobhg)N4WTpaK^SD!4l7WsSQ2TaUayVG<1#CP9TuxUL4g@{&a#M_?P~;IG zYeX})A(qj!Dm@N8NZ(EkplC#I=>gnIdX}n#U<# z$q98bCm&5xUi_dBfglkh;SY!jF|VZ>;h+z__FFI3M95QW{z)msn2Wopm@s&Qh?7Y8 zLW358bBiM^=Gm4yVH*t^D_m5OU(`aF<6W#Lq@1&VXo*$sG$FJnVae!WMZr~SDL1C! zcK$acu|tk%k%4S_Hc2C<=sS17`hj~jste5iu>CQNQuH+vJa7u)6+gkzr=30BJCAm7y!Ogb1k zp%;?KBEo51zWFSO(nll0qa7wzu5+4P>T{FtFH@JM z)MrnXWPN=jCgn*Jf`y%QDtf>pIBd#-!5FRiVi14of(<2t^m3JhIv#1aK6hC&7gUkE z0*c>+e+Q8kbt$R4<4N5zOaFJ9E0R3j5@rRsWuU4Sm{U8-W{EpPuSij<_PR@B`h_A0 zE4k5DeR`(uVWIrc4?IJ#N9kfKCa_3@TlN%rC`$h+g3}*VLK;tZhvi8m^O1gZ=6US7 zauEqUl1PvYb6_n37b+r%;aaU)+Op*86POwqtmR<^ks-(Nb}4&Uj!LgmW1Q_VQ4IsK zLaR)FX?{{Ee@P2|CYm|Yrw`#EJ#+?n9ypH`(X04jlqX18wq6E`UO5ERmF8p9P_p+VP} zd(_o1bt13(LSQ>&sf@c;j=PQ%nku!XD*D(K(sv{w;W*Ih9-m@ts}g6or5cC!l{Awl z4pg>wgOYA(!1WqxWP`22TNatdmGDYS-ohtJCbPKv zq>U9iX#u_3(~8);cTTgo4na2!1HdBuKQYH+?RJKCn5-J1!j=29LlhDL(<_B~B(GQt zzSzGiHDq~{KS$#cH8Gw`E19kcx$dPUC3qgp=tH~WhbW?17YAYBO1;!XskTu!G9oQM zdqWi>Wp0QPG%}=N2Dn@^I;Me<;gkQRSi_*U)Tru`#%fGG5E_g#+FEbsZtRy#{lE_b z3y-aGe%6zIz=2yz!&9s}ZYJD2NIZ%Ni@FbCq5c4HMI2;BQV`NQt+zDBfV7(kC4mU> z9LjqT5tSn-buPyhX`OhYwCH37;bF(oyzTnMgd3`Q`AKnSOIy($?DO;Kuf~REy|pNJd=WhAicVH|IoD7%r`Pft6tc#N199N zyu_WVnhy#gF?k&vJrsdcw*slQnw5Ic&j4qx>z6QdR*C zhqDW&2+4HdB~#MZ!;m*0&*}dzgekQWqPk{y8Lc)S0F`f({5wdp5)5Tf)!Y$5rfX+g z9_I|uk)+bqgnH!cnq+a#LM@ZyNQofSE%1ZWedKATmO05=OP-}6564OP+?u~}cdbWt zxfza3-9uj3hMf7T7ket8=?$)8)ffTH!fcr6z>tHsJriMvx<)S;B$QHXR$5D56JeAp z5wZV()tI+Hj(2^-L%Z-6M7p#;UkRU(tYirsu2{3zH~Xn5(n`VEiGzKbO=dliv967Z z9Tv#f%Vk+-TLx*p?#2_Z55>H!K(3 z(S03%t&Mo6DUpY5zk%J0&6d(pT+(I`0u0`ISx zP~W3+%;6gvAZEHq6-5py8367(99=vnVR(x4Dn3Ve$BY<4(-tr8KN5q*7SwFVJC?0H zO)D9bcm!$SXvH@TTJU4C)5I(FIO6R}xN-j`hnkZ2qZC>A+*Z?iQnq2Ves?SN<`!vxVAn7s8Re$W1dPZ*ARPWH>c@ne8*@Ne#XDyRVSoD zIP2mS zSMZ^k((??Ikn%i}tQAel61*@F!{ow)Pi?4&IjVu~X<7)Xdh!mde59a1cHM`aX& z5DeHu5sB>-EUs>x4S$;nd_N&xG4$$n#(st^#ph!$AOjs})}`-;5NE3$5J|&besGj1vg5z_S)UcHk$UoMrw9OjsV$$)>H!;fL@h4&>Rp}?Q z9z+k}Vh10PD1X`y0;kqbpaWT?_+QGqL!E@j%(=~Jjtiyn1atEEJUFfsCs z+I1yImp&I7By-T~S+r@@u2tKq>^r1q$&y(Lm*F$FWIClpSk|n1U_c7+5Z_nJox>6{H;gtUEX!BV5IUosHna|+bFP=IvLEa^B6k}GSx(r zY@x?0(96CGD$MXR&O)26g1itK4#dp@<7%Mo5)yB> z3M`n2y^)0bumXPc@kbzv1PR9*lPo&#sU$Iat{jSj3U4L$fFR=`h=9sbC+aZLQMw&z z%+I{@(8Dr0jM}_$Jg}&Ar~)sp8_`TWXYBAG%kFeaz|0cr^De(cq!2L!53~@i#3Ebq z(9-@Q$e@81<1n*6S0c;M={D_%Qo#P%>&326OHsuDd%8H50BG(yI73^*aZ?j=aWcURgo zQ$ag}*RPEj+C+&6P=@T1Iv%CYqKFs=ubV#E4jv zHM^5Nh#V3MH@6gV=^uae@g}REWIZa^j-O(vHw(J;7R(qqiYQ5PfdHcDq_a*-z(KKP zYA(9=jVrOi?pjT;iqo1;VvP3;0~b8TFPn0x zaE@-;YNDBXR4%84_~D13B*Wncmz*VnIcDd83l6uVhh$GZK^79zq9?pCrYL`=^$GGZ z!{b}{rNLfZ@=__dD1~doS(yK45|k(mhUi&vQ_EdW zm9`W{QHm4E%2RMNC;0sAI8IWT7C|= zg-W5zUZ$5JylgLsnmV3c6jmrBb`L`;8O=dV)1mWS28oKKWfY+UKac&yDg=pDv4BHA zis(;)fj|e-Ft|sid1oqx%VTd!r;&!_%tuo(NZBS>LD*sNO2zY?AJNznpEwCv$-9r# z~&Q_H41vKY2{=r(?b7ut@em|IcTMjxH-CDEGeQ=EA`ll zO4KITr@CY&FKu)iJkl?ALcLW*xWv*j!HY{>5@4|`2$k&Ebt7n$pB9@EMuxoanbQ$u z$9gloKW45+fJEal@pM!t=_3{vNZsO;TfF^%M=oUr5nQ+?!NP=6wxU4`eab2tueFjc zgHp<#qN=95nYDWz8k<&eYt{dTcvYY?EXiQ^5EKb`AUI75d&*%)3;ry5;@R#3jWSn| znNY?XRE|(DxRQS8BOE>XXrqRc0+~lIA zu8L5Wl~i3*EGX}!NQYGNpx;T*NPmsMnF8A?)ATH`o%vsrSqz-PT1njw)GV7L3-<=7 zTJ#m9raNTOJ_&4VY>#J+hqWKpaH2ABR{*EA`F73{ct&AS8e5j`)%uPlzq!l` zL#}JzntV1!SJ861*HbiyTul*x10Jxh zczI;U6(pftU%X(+-VE2aI;pmW&TjvSine50+e8zQ+2J#H&9@c!rbI7Rz?jA&tX*wP znj-mx_WM$lxOlW4U39R^Iu1&bMN~Ao8b+x0QMJejW#BoJ(MIcs9wac_OzG!L9x zzI>KF0h*eiF@v%mldYGiDqa6?jCr=&hUcdXt)wg8nYw0uAsPg$(;zap|x-jf7y^o z`$FSW6Ai>WJ=+b*IG$>Py@e>0>bpX+v$;d#!GzG6_wo{OlMcs0LDf??wP>I=Nf{gQ z60Vq)D4DFwxH?dal|KJ0!q;fOzPX6*umG!BJ%{+WYH`1x$S(Y29u!NQDdfDSqQh*V z8wTv2(&!p9qQFM^I5i8HHG8Bp%a8}etqP<>l$)giN<%heC|uN*u^B}*Z-&7dl@@ho6r43QHyf<&7KL>gF(4Eu7M-^!r8h(1g@ zl{Q3`Sh5#H{NtN<6CxS$b9lE!L8rmF>U-~&F00LF?g zw+qFtYcIyaCGY?8xBo%FuTYmfd$n(48l)JQiwvBx;>TVLjId!Cab%@5v&Bm4IL3Ip zy7-rS!O7jD$mCNcr8%xZaSJW{$}!{0qZ~YCd=r+Dl5py=52-AifujIai^FlZ$U{2D zDVp~qi0c}OTOfd2xCIQrqkrhcniRleDHA0_LOJ<2pJXFQT*a@vin$WKvm6{%RLba? zt)@hen*2crHHlIXBW0IU+K zF_)%^&Eo$lCCMz!&R7)4Xgkn!yLfRI1~W&7)Vr?i#ZAhiaZC{lS&_1sPI^pF@}#-6 zJT%hTj(H@#&;d>Z@=mBYoTi)1@9Hs?#3P#c%Uf`!T^P)~>@NG`M|U%y`r94N^PPJv zmEl~EUx|%EY9Q08%K+^%u{=+Px|py*GqOC)uJ}TVlusd2jgJ%z04+zwOATr}4F4Q6 z?s-vco6WVnqA$6qDPyDGTu=suir=if=LAC2bCTr55w5s}2LK8mYdKNpavMLGXuu@CQC10O$N4&;+m7S;^!} zDx`2DnEOV}X{kvZNEsq1Jv0uIZ>;y zQZiJ9v5*~t$Ix+$j{{a)yS(Tk2!Fr@_$!)9WkGPFycnsH63xr^Q3p7tra-3is8r!8rwm__?Uwg%5uRK zT004&eA%oCkzs8>JR6Xj05)Nh#aUc4j`J7AD7Jo$FtABdr`9ND)DU z&{nV&F~6dsmAI+eh{2Q)2^*=>nuE?ulM?#FpXlnS9;^sHxCMf^g$Qs?4A_Hj2nU(u zG^!n2#2deFT8cFq#WKa)yUL$s`>xUb!*kc-8PW1%n;&Rj{#x77`@5W&W1Ge#BQ6xy=cWHkS=L8x_HAY_#)IUF~^ z5*&HP$DNZ?o!8{N17-RL&HV}SLW@q;I? zC4kE%fcCY3K$x7~7)&auM)+ykhINh{=|Q}V(lII6tc8sv4vL5Mf`tDaf@tNGxqq6k3~U8uix#%*3MdBF>{Q3bI(R3f~x>93By*2*)8#)WH(F;o&!DS!c{_;`5WR3fV_(#?h#l zTPvPR7s+C5xw}?-L-bK+Ft(7rrD$4$iEkx{a46^>5uAl(E+GHLOQtxSF7X4R&0IQu z>9h#wJW}P5n3-@ao%-b8#LW@X_2t^btcNv*L}F>d>5wk$&1TD+t`W7*Y_Md_6Oi+45MmAvO!#`wZXw6=(_2k^q7?#-wJq14Q-8kzrHlX zi#^=nQM4dL=28SIjAyoHm;G?XvnUk%n7cmpm*R!GOAb;YMVhr%>p@xR(fp!L z8gO#V*X5WJGiW6~H^4>?f{<4}Xy3;?i}*c9uTjF%iHVyrppHbkDDH}*X1LovZH|Gs zrk;!lnG}bPm$>U8u!g%QzENyxHmOivj(8T+ZZ(od?kNAtj^mDN0DA5EbjD`kiE?;= z2pI3K2pJxR?K~V7X8EWOG31w+ufHB{Rz)O5_)ze!>bld+k{zG4jtVi1#molT%@$?W z_3SS;YfgzEI8L^6xSJzlI&rBZo*sS9~k5-b~9#L6Up@TZZp1=nq) znN}9jZr|$e@W>?SD|42<(ot^LoTiN@-6<^p#FhWjh&e?pi}EfMlXL#gF~r^x(+`Jp=NNvCd_6; z*v^w}5U1Za2~pDh^H*md)vDO0Xg1MkE8W1MHs2@g!67BjH5WSYQpa;8Pmp?oIb$s6 zVqao)^-WwkJSl&TzJ?XVRza%Wb$5jwra@|0;Vj|=4@*vVl>YXtkz{KounBw9Q65|P z?CxvlY!L*Jq_FA({|XAkcSq*llsatP5RNJ{Ny@oSi(iRV(<~2Hx|VD)|Fxib-)5(1 zV20Nkf$Tjr&tQe|cS)BxkrhF2k-2H-V)Xx%;xK-*k{93<&N0L)j*Xg)kWp`or^Im7 zc=>KsW|3TM1bO)B>YSe_&QOp3j3!=Mi&y7yq{w$BYW4Om5dV~xB=oB=EsT<*vpV6(M)OnE)0=jN^t zb$f?Rug{G)({522XR^da(JuWug-+F`_0?XBNNw5yG8aV`(+ol}r#nRQQ@wDiVIbFi z_R&BY{oCrPjExrjIFq57Po4%~pRynH@q2kS} zcQ4<*dRu1I36o$ajwwMx6pAq;VVpY=pBx-hXkd#QGa4p37PH^Zo5 z6sH?qtg)cu$9F|CCX*ScZi>+rj;Gv^;JattpTmzYf4=CXs!^+6Eq88h)M7;@Rn<}! zBv#RHUd?4yejO#nl{fm}Qc+#D(O02`7y2dHGv!70OiN@WL&7)$){Vg_2q-a~Zh^ znM~SEWRRLDvDs#zL6zB&iQ;JnqeASNC!M1MrKe&;ov`GmsmCH)>VlrtHc^95hIH0( z`~72@r3d<^AWCJiwVzegzM8DK;|A3pdx$9~l81SsSrA7a=9r_0H4%FvVaC#G=}H>0 ztDc<|G4|d|Cas6-q~XFEFu4mejG?YYG3uTEv2+%tuSK^S?C-x6F%~S1leJ6m!#gXqRt4cVY16 zs8_9bZl^R)I-A#WM{R2WY#(m(xJfD5Iu@Z(x*iAhDF zlPEP>mt3u#KI-GEBa739Z^P|jNl%8h_e3>qhqKQJ-!Aw`*unm=NxZUNeDR6d&QpG| z!B#e+9r=Tfq`2tA&(tnIniO#VjeHWfYek)@YpL;Wy!k%9pFFQYQB#VFy5tXj^aE7bE7Hk$<0Y+B z>RC}khky`Azz@#-eKrjU|XXSr%#**fEqyWjBEcgc3>Ap!{eL zV{i*q)==k^jHTs|cAO-0pp+4TAY>tBvxo87q#3bcNU(0 ztq^tzxdj1~K)g^Y#3E4oP%I|}ONqjCFXA8o1_*#lkSZrfGJGOSP=!D$R1M zul&=XtElD)ieRj8%gZ90@oZ)rLT$?~IihQRF()6@n;ymS01Lz=RFitqNmJSw8Et2s zDS4@R%}Y|+*5!!&;KKtV;0HzG@nKDpP(E4fQ|WRgf>*Sxv#Juc+oBUx_fe*^fMARQ zqu7jY8cEI2q!SeWkA&&M>8d{3)^3*Z7Wa*dBHr>y$`A`_G5HW-*hwM+r?{$o<%2l? zBwGOm&olxL00d@p8k~F@h_M!CP1M5C4@z~6r@su4k+YW?&tB?VwWw>7lZ=^{61l}W z!4rr!V%DwRsZBT535`ekfI!5;0-Vz@qrf^7xVJYlFk%DWZJU@u*MRReoOX1Z7aixUqA8Z7o!NWYN1cxfFr@LVfQ|ue zfp8F~b4Q8*bn&9Yk?fg7uw}I!XL<{MDuoZ<*aIJKL0hC^k944gBINw{CNmm zlOPpVI``~u*Pbj?U_sBGPciMl}35wDqBO)*#P?R zb|0G}73B#@Y;0kV2H4>YKPV6#66ILLA`lN6IMM&}n+yJ}(UuwE-_T;6ms4`45+5l| z36NZhXuQ-6V&GwahcGkKbC<}>38W;*l%rw=yW532vllJ1r!!r?{DL>0DU#l0m$In`f5IxsHE1o2)bMTQ6jkcy2wv7B9o{NaPk6r~uFfL3Drr=+XiY-~-{w zyT!w=o^^b!K*S@R?zz-d90l;7xD&-9?#*7hHm%1)3Rx1Ml318KPmK z{lw20B1eS;UJau8kkqq{B3?Y-rjXA=gvzo!T~LGy@F~-MC}AkBo(uxTMkFG7BwlAY z;z#uu2Ao4@A=eqAg_&QfCTL{r3q={-+30w!mMB1v$BXF7_Vsir5g zmW<$CkznF8S%4oX#{+4YQepstScPEq(!;osVS*h0vYBQ9wPsG>S0JFnReGghIEHW* zC#&UFt_+4w?p)m|nob0XJW5|j_Csz~1f@xtYE;V$Hl+%=(IJ+SEndfQMrS3t=eFQi ze$1q?^$c{rXNn;ie(DcF;OFMCR*UpRkqn=bAXh60n?(#WVSXaZJ(1W#>cxdtnTEn4f3Al@{HKh8)tn#@QnY1LM8GSAL^8tVlL4O~V1R9g z1Tzu{RQ3)CB3xNnr-*JSo=sqL-ky-^WstUKI<+Q`0u5vKgjBR8NJ`&is+2qe!q+^- zMYRN6#pjd2(hDgmk_t-Q(FGZODVerfOpxgRKTx4(Iw@E23sFc#WJZhdiGz$br5SQh z88Q+m4Mt}C%PuNrJGl(@q^X*MP@*a-iUleZZlasQQ9It{6uusvK82lrOO^s?IXnOc zC{aSqS&>~9EU{$&)kx3%kaC=8qZ-eTx+*PpDHi<%N1V&y-KBuy1wGnhk#H)s?8XD2 zq(g+t1A~`Ufz)qfHG>VHcfg$tE}E?SyCZP9BN%!*hbb#y6|d_a)o7HOM@yA zAc*R3^%wt4D0`Y^71k2IO=}%hXuRHFqX{QsUK0Cxlev(pPo@i3v}KXV{Ea2~wUt6&s{ac-giJ54MtTC0c@%zHU&-I++m?v9-l$y$O>1c<{E z?Sj>*oA8+(S0SrtJOr`mo&J1d#)4799!uItE6&~^UK!@P>PXF6*oWM#Yw8O#5+lh( zK-f5h`+?-Mm7QGC(yA^gCS7EP`YdshOWLsO)+QhS3FpnYo_p9@jjdR>@?4h9CUqIk zSX|o)246W)?LVR^&atW1g6&XzEsqfD-V$cs_T&`?40Cye(Q0K@=tff%sDXNkN|d4S z`9YR>0e4j2yt*OGW(L2$39u|H;Pyq|j)v%hE*wrMt?C54)!AybW5^yY(h}C<7Nw*W z;}Qwh1Be4Te$x{G!KFr_@7`nop~`8V)Eq~c$eEPx(vWWQ{%opXocmWUyB2QH4ai$NN5O2)doqP1``%3Tx8%2?gt$2WwuDn+D zqG-etmOi;4|6U4$MD8x^%oFe~^^K+a@h!Z)(K=aSM!hZtJ4dV1u)9TvlFmt;WvkuI z*Zs;CL{wH=5@3Ofh3QlPAl#P;?~E6GSgrtQxfI6JMR3qy?hQN4vtqHM@`+*Q)+Mp- z1Gf+m>&rk%E}pXLRx~dEQ7&Fr#S@rIEg z5AqWc=zSHdA}2=u&66Ks03I@xU=~F>*0Srk)B5VHCp!m-^71t1)|gT;1G}DxaPY_S zj%*56mIev01x6VXzyt8F?q-%O=Y_P|Fev7+E&p;i->_|;z{t!TF+Xs$FUnnm30-Xh*LuVz$z}<$F zAXEY&6Gh9%6(cb4dUX8ob54g}S9vLJLGUHviVdrz%S3KU)2Ei~<4da?$RKR$RkTT$ zPaf-pP4o0sGjGiSG;aBc$MOY8)Itdc-Et511%d}RMEsc^QjkdZrUgaiNkAFthuTP8`*s$K z_I5jJ`DL31suG%nvQ89NgWza*?}i23LG@6|15`leDy(}yg8tbF~)IxjfLo+->89qR6@JMF2 zI48~9i5EGmJzbm{V~&W>4Pj%4e+7Fd_DMX5H=x5Y204+#Ns@zxReSkecd>;*34bG7 zAtJf|K>dS11ey|g#1X~93m)TE%Lk2!Y?$Nu``Rm+zXYndM`)jTQvkvPL5LrQjf}pw zp$lJA<1uihk4EQtq)+Rf^SRR%gfYO2#QsfFP|rS2Rk-v6;H|fuZ-pN)I$d{%nM?Yr zPb{SmZYPYFAY3?&FKJS&G!mZ7M8qFj>(YU9qjaw|`e-++j~1j8yZF6XrFzVn0YTwt zdYRp6azHnU?7FB=1Uev5Zj>Qff$*`fOayzo1sbh&3RI3<1##zhQ4`ZxOnbF6MFaqX zkVgPoL29@U3dkC}y*nPi2Dh1J7(e)@AUwgnwJOOz(y!NoyL&_}#Jg=U{HNTQs z#Oo*h66q8@$cSfGU!SOM1c^hS6>)r-$u;#=L^ORMIekw&#XG8-K&9wyMJ(V#Cp>`# zD1v1CCxsXS)n8|1yhGfFLqphCuD2FC7za}A{rEmd;0u1| z!y)3~)jU&nn+j#bB*^(c+yb$!YD546u(d|KOSGVrz31zG*urN2=!-tp z^g}F24{@-=GgV;yZp7=KOkf4m%Pi%%29l(8R8%mGA&3Vtlq`u9&9 zhJptfG88EAR)ks;D?}_;&O*6|8##9L_z`4Ckt0c#G$fX8xs~RdP`xsh6g+H zktnE^pa_5})XM17q*1tY>DIM-7jIs@d-?YD`!}ahszk|>Oj}Nng;@Uo(COpnC2A+4 z9j}IbCUh&uTMrig<3muaT#f`27bLJNG2o&HfvR@>8a7tRv1!+~eH(XfkwP)AE*#RW zAdPMV=^JK&jO5<}D~H59x#Q-H2MGWYB$}hRvjqW#RZTYPL+rZAmxq}(vw8LF*|&FZ z^4;aC<2woxv@xv(K~Iz~Zw^v0=seF1vg#NyJJ~3d+&|Q3BN~8%ZtE z0zXcL5VuxmJr>zym0i}P%O*3HvVSUgM%qkuO-UR+Fw)3RxAY740St&O)>dZCJr~_{ zVMQ;_mhN;hD<)U6(JhTWvjATo#ZA?=|DaVD;D7}lIJcm-`qv}$=(E$ME)(2^00u31 zfPist`|n-mq$QZ+jy?XkuUa==>CPrOidV>%4iv{*KlrHscrAYW6)me4%LN(doORxr zR;k)$^`A+9iY%sV+Y;m!0tnzEu_$vJv1E;do(*W8t-cy-X4yrUqkn>Yu{W7)te`sr z6cVnyf7z`%>$cr~o6)UrCdun(1l*HqnfQ1a1BuidQKU0dnj7xI4L|%nXMaXXv`uXi zr#-+CpPcf_UxkzFjv~AERmpvd+VapvAH6Q1HY9wcs8r`PQe%a-2~N^wpPhD^zBU;p z35hbDbx1wuC_I~9ubuefjVH-qsYfnJ^$TPDeWT!M%KZ51t*2b><&l@Kg49-imE<)R?V;&?VT0igd004uQBQx$LzejHLc$&%5s4hSUHxo!Lsv0uE=)|~6sc&N z(;0CjSVU9uim1gZei4kY`I;9&LPjISXN4t^Vi?)j#`TzRaX1sq__An5kPOd_dDNp{ z6lO<{_|Zc_#EbX#SV%*b&O$9I*BU$Yz`F?Wkdc&RPyRI)!7(yJtDEE|Ihn%s*)V9y z(O{z(*hy2KQh`;fkXA<7CP$(Yma)tbD64h*x)f*w7iOhWavaR zz0#M-ROTbo7))WxB{Cdb<}|5^HAqtEnR)77Gik@gYJSs=-lULkXsNLho)C~ZBP2N4 zS;cjllPorr8b>gdPM=^>Clq9fKHJ&PBl0tz*}UO|@MKKCl#@0s^yflR`Obg}^N-qe z4Jd6hKZahkq1=RLEj`DO_|OK07!~PAd14f8iUV>@tfxWm5>k@B6s9dv=tB#WEv2P& zgK^x8OJmwopQa9%*d&WM{O}+G2#XJCW9d(s+EkU)^Z^gTz%JT?fm=8gt65c(bbJJ; z3J3xNI6YZVwOUrQ>V`lOs8}Gp;TVDcsP!XNq0~~(T35T;#isfa1TL7u6n>QB0ZEOh zP61}myB@Z32QnKh!m$T}gyS1b@dm!S)HcH+7PE%S)<0?y3w`v10JgAzEt=A_mEDnA z9lLC1S?jhg{R16~$(CCl0EjZ}?I%D<4qX9r&DQ=lmJA^P;`sInj_ zS!)RD8X8I%#Hh59VwZMbl3Zs0oP7~zh*>PR*0EXV$Il)A zIK<&WCXk8j)FB6sD1!8ul8fAAWNA{70bMedZh}uIUwM#%nHH5(f~i~*%*QaXGM33a zm)ota%QXz}?l3s8PM-RBbTKl7(%0DQhjce zMFZVv42jQqs*BN_Q%hJ!Upk!seNih-GD1bCa?O|)HJa$`F^Y~F)sH^3N{*ajWg#@x zu~rY7S<>oxCQF`Qmi4Y(#nU#O`qS9t^|1e9t1*KJqPoV(v5DR6!BxuFic0pUM-*&l z^BO49DrncN9XXCxhS}l&qt^0reQk5kWJ&}jcep)N7+{CG({w!bx#|596j#N;!d{lU zA8O8;)QwZou6L?0D^Xl;%pnOU?|8aBa3JH;;0NbuyMojGZ)`2KoEBiPY#%Z68elNDFL z!n7}4tQSj^rXMBH?_|Z>C$sOLy0NHCeO3oai~KE6{KSvS>9*IX`mqfEIyTYd$ig6` z#VblgAgnK+?1ulIZsm-G??#0H-=b**;{5na0pVu>bB^X#gy(v0RPF=+KoDOZBL7@s zZ5S;B->jHg!q&`%Pz+E-rP)&5O?PM<#N6!D^>iIemU5GC@2yqmFkk;^M z4clW*p2R*pu@x^a+msIb045X%&=zlT;2OdY)1~&l5E!}74n;zqV(}Q8(JnTz<~q*R zoRJ#8PI)4(8naQ_rlz3gO&h;aO&Fq&yb&DBu{}yHTDqqjDMS*@(H*;P|AKJlvhN-1 zQ8$MFk=y326t9aJ?a{8tadeE)AFT@;MW-JJQos&U+7R;M6mm@jk|A~McJT3IATlCp z?97t!A~#Y!C{kU}kt0L0JaSJYQ!NxlUkS%L{uqb)GeW<- zve}^VZBP&}e@rmlh9nD9F>i&_7Sk~w6EY)HGADBZ9g;Hb(K0Xd9WzriJJU1k(IP*Y z^CCl2G$WETOA|Fya}=?o9aWPovBNH16DhG{A7%3|Pt!JA5jS%)6L*t0e-k)^Q#gl{ zIE&Lbj}tk0sWy`{BoC80V>11oQ#z;f2bYsN5i&8alOqXJJG;|6mCrwZ@jK zv9sjPQ$5$y?4pxB5z_A7b0Do#KI_vy?-M`sQ$P2UKgCQu`x6OC6F}uoKp7<<001HR z1O*BJ6#y*312zKz009612m}Bt1Oxyf`2+oJq5$ z&6^52awO=nr$(GWg9;r=w5ZXeNRujE%CxCcof>^Y-RQKc)vH*uYTe4UtJkkpp?)mb z6Rg>@Xw#})%eJlCw`8OKJXjX4-Me`6>fOt?FW98CSIG*RW&Do=tkJRL8V)>)y?~x82*m zg9{%{y!cbr#*-^w&b+zv=g^}|pH98H_3PNPYv0bjyZ7(l!;AkPPrkhQ^XSv7U(de1 z`}gqU%b!obzWw|7^XuQwzrX+g00t=FfCLt3;DHDxsNjMOHt67k5Jo8BgcMe2;e{Ax zsNsejcIe@UAciR7h$NP1;)y7xsN#w&w&>!EFvck3j5OA0ee260@lulM@a3JTDQl@GJ=4iMwDx-A zk+MTj`NK5Uu+;;2jx8Q~=?zrTZYwo#L zxrXk#Eds~xyDZv<9K7_FDDS-4)~j!aWieI(wEG5};cfF4EAYV;iWL>Y3}dJ;RSicx z;lsE|Z1I8$&wKI42NGN{#~=&XFJ`)G8}iBd)kQKH&!FtGeuCj^vO_+-Z1Z>Sf>iU( zJZrbJNb2@1^w2~XZS>JdC$03-OyBp;Gcz|WHFwXjJoVMyJsmaHTzBpDwn2X_wsK*Q zZMJbGpRKlXRkytM+r;Gy_uO>X?PS?@=Utn|diVdm_s4z*t{T9C7ycRHh9|E0;*2-$ z_~VePq;=$!_w`RF$5yU+X#YIp_U53wg?H$rmu`A+jio2R*j=0pl?U~6anLOb5R%w&uozZ`_`&}jl!b?@w z&UdMNp%V=##3(Z4l~9wVj!a*iM>`{S5{w7oB_a*t zOsgzXngU|e2!%xyZ6ED4G0L?!hM%EGyzb0H9YrX*)#u!tJy zn*8&oA4eHKCMJ_5E|iiu{D1)r5P$$96`?rBX8|+0kfj7=VoEo{k#fu>p$2)ULK^8% zo9;-2H_2ovSEosZEK?w5T}dX(A*MC~VxY+brUfuSP$y26nKAU8R8r^|s9pq)9rWs~ zco$6B9rJ~&^WsZWQc5-vbgcjD==CgMQZ@RHr`v2PU^G$-mv&RItD+%Argyzvf-jS4 z{pbltx!99%Qdp~#T}Kf)S%LpXm6)GZBlbShS$39hE`CjjRzZsU-#O2doMbb|trVyOEMXg4Q zDcz8~&jJs?rV}3++Xql~oBs$dd4G~mj?50Y8f_yEyPDqOJ}e~Qlq~pSYruXcn7#=X z$$cYmOTyL;5P*ol15`*=*8StI%-o4Vt11Y`V79@J`pC~Jvep!m7@!|TqV(X$M%|*M zfU6?_NFzX0K12WmND}cbDCiOUX+($u*-1)OY}J9(w<4p1@r-1gW$~r4gv12r@xUln zgZ$LTCds8Ai+WTf`C0$cypZ;-CEaXXCpn6T8floE6&8 zu9k@E#-|7}aKZwO)<-xd=*3l*8M=Bs@u)3A!A$OAO#39O>gq~>U5vP;HXii#dNS=k zKl#QuwzgyN#u!1rI$m;F7>$_<1TuJ`pZ{!PCs&EwB$}8b!z{_{YJmX@5Fi4xPTc@C zA_xqK!;g7w;-G)akxri2uwrbTdD-dh^?v(CVxek9lqWxc=5l_tOY50;9OZKIHn$U> zUA>*<@!tiQ!-D^a0K}9RU=%jF)YuL4dsTRChBTbe58v{kF@45vK1G zR_XmZtwtXpc_mKii_8uXCH)~^pB!L+({(Jn{oQEfY-(@@v*pPBN8RIc6;_u$W%436 zZY`X~h3dPB;DvW;qs;FNPb7Ku_IJ&o-r|1WBE(fjD<>07Wfl*Tb+HgcAOc|l#e1If z3$bBfZQ}UH&FDAGTtV3_imGE;M+$;(az$>Ar|$*J^+ct=|fydPOCwHIKW zHCO#sI~}D;y`^=+7hWxdJ^Ww+vEUC+w{Q}ne#pXp?Ux~G^AgKOKx1@bCTDvoluqCS zMlUB4sgoshm3bL)U=`3$5ddNluxBHfYTf5?1*B-TqY)Iee;G(5m@`wZwRw`ZTl$B4 zgI0Lt)K4REJtc(!($^D&SABN?4(E`4QdU|tgmfaPgSY@Kk>za?_k%PtTxvyx26S~s zS85>GYW(I$VAnhUK!Kwr2z;;$L?#sn7J%NM5B^Y6smFsfsDSfkfBk?9v7ir}Kn8~; zXz%|=hL3V(MaOkIAyzMkS~fO6b2t(y_-R(>XD!BL;m``L5_XBiJS+xe5Eu)AKz^sx zPLBwQjxv8dF??57Z5v^Urxj2w#zvP05!WMR_+}fd6M)z!C0IBYNQ65orel5-2*bx@ z+GcZT$4HJOizZ@*`gCTTr+5d}f|Dq0FKBocCxA~^jSi7IcNl|um`NU?a^)ao$p$`h zu{@n-W4qKu35bLs!ByB;BD!Z2^9OjFM0Y>IgA`g75$=C~UR2 z5r9B-P-sG9$czNGXNmP($RTKL^N%k=MY|UhWu`Ft7G8hUf&lkF|KyVIhK>v&2*>}n zR)VoQTnB}I;9smU72RiTCA4qbW|Jjpk}cvnLXm0&V~Oa*N$w*?>E@EHgMrmU0JXr5 zJ`p?}g#mtWBrb@185EQi<1rB7J}ab+N_i%hcpH{zKhuUpg20OD#&$*t60{Q~G*>e^ zfp`C)m8RH{TX>Y(wsQ+1EJsyu2jP!x=_8lNbKw|nqGgsIGo9jdRQxHCM(B{*2_bVtN9h<_@N-xa zF%{tmVAexchWDTA$&VA!Z-C$j0wq!@w^M+CljX1r)|GuU2z&}Carx;?wIG;V83FVd zivemO8`u!M<5353JxRAb$We3vmT_cdW5L5^Za8sA_HR2iJxE9Z4^VH17Dc=nZfLm> zg77M}<8Mxhl`YC4@^fkbl$))gd}HB67CBk_XqqDlpaXGa^x17c!3P)slrb|pQxS1E zXApFjI_IDyHdv+ExHVRJlkSzE^Rik7*B^Lj>S#Da78IxrX za0}rO2>S3U!G)(B!kuI=aCTNf+(JR)7tNx%5 z*vbzZ@tOhXtQf>)BB~JBXQ8#ArpT%6jVeBJb&t#dr;`mMNuiHs+v`FWZX zN&v}ut=j4j-|!8sum^c^Z)`ZGqPka(*FCdy3l{oWRH`asl}e$SG(o5kSsGOcdS@@F zrKpKplgOUk2pn8#HRtq9?uJB5$ICbJfSl@;fl=weTs*KSI~oeb7$L1eUwX0+d$goN3o5vy)3 zG>C_(hG@GIaXPq>m|BbHgF?eqLP)W{NS7X)6MO@%wiajC&}#L`rms zd3Q^o<|KB6bVfzAw8F!1n|rHoTe2govm{ZdBdRPHrH|s1ojem$twyoVH=GS=yGJX# zkqWzh`hJMXd$h~DmB&Ifx)4`(GfbJ3hgp56+fszrjadJdxl9_pI^(D%3KY*vExr3* z_2Yck%QMVmv45y8E3rPTvfK5MUIlW5(vlcg0t%!|2mfuATCP#I>yC7cp&o59QG z80#BK3?)`*cr_kzJUo?!J5^&;fxFYVq$rq1ljU;8Wve*x!m}cWA2Gwo^G7^;z&(Sv zt#NSN6c+;YUjH*PA)&dQJH1`Ez$!zs`}@Gi%a(q!n^7^r4>@7->bE^JMb{Xe$74_( zsCMTllwI7ygtU2YOsKbu!K*>OKuan2MOE^XxW)gcP)(zLqrre#mrEhaSpY<$iEF}o z`KbYS#Z}zDWo*V_Ge_-8Cv#|Y{^Yq1Ay!W%#ZxSVIUKJB`oq))KEY&7qre>A#kbx}&CYso?3!)d!xyPU5_F*;~GC-A05 z#rBGT)4LJWH8zyDWf77+I-Mo7&e9ANpw?5|cxIq06!AMNJ;6G|L||Sd8vXFjQ8l&} zi$j4`78$onQ&A4M5DvI7r1l9EQn|tLs#w5_6!n}{aFn-xGJWI38uH8ZCu^j!zV6~=Ny7@g6|$yxV`x~UTg=pe(mY&M)4*QDliDh(KhbwNq3l9DxRUWR+~ z3KQFG5b2kaeS)UVSJ(1m0l3f)J*`@EGexmvL;$tO4XtiOM8f*1Q)N}a+;q;#j4ixk zERg*)y5ibzG|Fk>FM?sWB6~(0J<(qCq6uMPsRbCQrEs2$))8?|!)AM`EffBTlEo5` z6+qKB1ld%L8?Mn>|8T94R*!6)I6MDVz4%&Jc6G4E{SYxN#{5Q{GCdO60!eWsH|b|J zjN~~`L)%;GC1%UkeRi$OxYIu^HuX$x`WDzxHqwJ&dkxcx85Kj-SSNkwMpF+ zQQq!1zG!FAM%QtB-8TWQe?(NQF?2xQ_!IqY;SLd9$2%`3$XVUnkFey|mx1CZg2D}P zbg9kGiL=bAhR(4=$Kk_rd~6aA$h&vON zV#P9!vocGR}ikmI$oi(Ew*hP{e6B5?Gobv-ZzrUqYU~j^)kS@&>K9RyB1rC6spA)1!RzJdL*g2? zt?v%u>mjr2Q(X}1N8yM&)-_h_u&$6yY~(XB?o?H=C#XUq?aSB^VAD^sAx-%j_^7l>fe^u=u6G=!f z5HWS=|9tVKK1`jZmczM<5fq5vp1TDZWm7JwLKk2z9IohY5Y4hjV1lp_Y7(JdG>bGj zzAnczL!{~r?6mxX%a%&whu8TmvNd7n631F{D12Ry=|*o5d7>ytLiCoL^$k(w>uN($ zgE>o&R)Mw?i5^S|xk?b#*cJi-nvyG z0v~Y?Dgwel((hu%jU7LRO!H^Z$dxT$#+=#c)2Ni0MrHW(=hcvM!Pb)aH7rJiY%iJ( zgw}1=S{eUO^Of7zF+r^YwQg*xC~e$?b2TFb%r@}@Kzs-g2rRkNaq88rU+4IaDt7MO zy?;;I)N_SroyI5pNe1Z4+-p^~#i+Z&*0O;tRG3ZhZP&O3b5|xvrTJar=7c~w4^MWAZQ0)4>QBam?X`NvV3F5NU)WkH%VA*mFCbXjSqJ#os>hWu1L zGGrRHKQ#quwxIjWauY9D_d@fswfKt_OD61^!>^onv!d}ea3_Yf&dem{3%AR*%IH4)BEZOxRxD@^ z!JKIx-Ui> zzqE!!tt;ceN^2`u0e4k1kp1-9FOyf(vzA>Nt)*TJA|Ut{J_su(a+g78-a4zYRJ(cU zFQ1Y#(m9Wc?2i64?)JOH!Vj;lD$y*(!I7$sk&GN1%TI!YnK#7#||%yNlFIBk3ui0+$sk_9;YyIs~H5KozO-jOK?E zY+tR?RROkSq;;)gqQ-VawRkm6WDeU_&m71UY#rrqCOnQ-TmzS-{cVW}(HG--1BjNy z1zAB9*bzzCm(Ib_jsX)!b&#Qtp5k>M+9{+k79s!!FhD2*xUL{MykjakLO|$kQI)U!5@%%QGb4V8dw>k& z(UOLjiJ*m-9@Goq-r)r=)aD_5B+t)`VmHmiYd{s*m=zUriwNim1AG4f2QC)ZA6Rzt zOd{M?t-LwTDv3r+3lR}Axpm9;v4xkgvyqM{!Ww~KFfC211rXqaol9ayKoVIM4Bf?$ zTLhvX{qTdn1d&i~lGC9^ik?FyS|UhM5}l|-V)t+<5O+qgkgge@JTZose_DW%7Er`= z!g5ElgwKi%^24GKWdU#a12F0PhdL*!Kvaonr$6n`8`IWBp|LY{uGtSq+oC#pqAM$8 zxsN-~@V(o`T?vXct& z0|shwiw7i+4;bid5Ftv`>EyjB~j5Hy0+Y zcDyl*pjwJp|M(WW(iq z-@Nd&ajHddWowpt{eyC&{OiVCg;egEtS;ijnI)Zi$>x0RxCCO*KPXv5i~@KlAXW;) zQoP3&yV%AZQK!!YTvVfq6L!2sh=+H3wD4Vx4-&a+ViNxYh+QxV$@FXwS>9m`ynZ-@ zemdAdiNg{25+HIYlNgA~^ROD;eN~z3T%?DVI><4K5qL2qnk>1k}jW(YRzYrOpMDwM3whM&i*^KvlgmyO~6ygP1`4 z>uFMVWWQ+(O3ykiDvc&MlN}k0ciV!aqDAgCO@@wKhIbaewnr=pqJ&$n$dC$dR#NVz z8xyhHU#N^uuBGl0K^!^BXG7cCA+gG6ZP~2^3k1jx;j2ee>zg)%ztS=Q8hduEG0k#SeZEgrfAZ2SO1fp#(^+NyWv&fxLxJj0UJ_rPt`* z^hV#z)QGmCGVQPfaMepG$T_>-cx!8YoyKl;q)*#)hP;Khw@ER5`lmH-@PqDn7ep+) zv4;rU^)ycR4}Po`AH+BBg{>8&NH;1MZKwb3;4~k5Q3ie$fK$HIR$;s!4`NK?2uNz( zW;ps1;`w|xSfFcBT?IxN0b#LZlo;v9++9enLq?M~D=qTArw1oEq5tshZIHCpa~0^h zoZyqEOSeg59^+dZQai>8Dl(0~61kash*%T1>a(lzaf`Be0ND_PbQ1@HIEOtL2xuyw z_Mo+A2)4lLpajYwbPx+%kiL@>Jo&piHVHmx13<`|lV>rf;_E*E6A#M^KyZ1D4Jw&? zGPak9kXhTF+$f7v!4nL4z*cFTmY55;ix&bZl_*1vr)UfHV;(Vzg)8I-!!toPJC_x- zHZLTKp^3Z@2?{Wr!LnMI2zoqNW1;`C0G|LkjUEh%l3TbfWI)s~7v5ka7a|Z`n?MWm zG9DZsPRb1&Q8e8m#AU;ryF!Op2!zM^rIX93+|aZ&JVPG4M1o5v=F*AVK(AOUpYR!q zR>?iw`7!W8~8F4_3+_Uco#L6J6U~IF9 z;)#xGFq3&IifJODAiP8CJlU(hIP?$W8J^S<1VA94S!|Er*byqjh<5swf(Zki(es$t9s6#c5oPju@Z0(4<*AzbP`m z>R~#)YexOh69Y6Gcd8mhIxzp7dO70ry8p02dQ7gS;EaYC#+q;~U?MlRgQ8~SiD#TX z-nk_G@QS4JAgwUJglsQOgNm@wk3||O+33Z~DH#+iN2-H9H#5dL@`#c2$mWWxw=s%~ zV8E7qutUqoonVbt*$-1f9S|8p1*9zW!k;P0sm^!@*O0x5C<_8fN6`5SuMn{VL!DAn zvFe&55D^csTgsh+8yVyYl>p1l`=z_$C3-W#n8={J0y4pQ5zlKhC8EW^dY%`ljlNhF zZ}Xt$sf9p@jq8(0JH#C>vc!i18te&8gzFE0!^^xRCoSU3oscR#iA2RIzpJFD#>_l9 zyvj4d$OQZz6r&TpgUSD|jLZ*YlUgJf5BnSoSd?(U4@Jx<(%c-*IVT&-DO7904B3{W zJUlw-NYz}c?i8BWj0i!>$JgMGk`o`SD#{0|7&`=!Q|vsyoC`v9qSF);y6OvgD~o{u zovrW(e&`2p97=41iXl0mcljI%_3DG@XQYTv#y=Ir$j=#C<;uIyNlcM z&8q9c^~#I=z?JoBQcpxtncFff{ho1ofJ^m*Tlk-{aVyb#jT*HHP;;B#gVUZ;vm^jQ zK)t`^6a?ijGQ?9=BB1083D(@U89_#Gv(h_zusp=WKjkGAH8!(+FudAIT+s^#5uImf zFGte|*N6pCy3_~og9i~jX#vu!NXJ?nP?~TGKHVuEffRf63lt^QR!tzyp;L#L3_YF1 zHnAAzGfQA?2yQ${rRi0uF*f(3KH^wM1B5+0El|ajn34;GKWo2{s6A!TH2yQZj_S;t z&<-`bEFNJX_Nh+)x+GTx>YO9J({+tca=V}gyi&H~$41jGK_Z;ZY9jbtkb@=4$b;DE z{6KU$(+5!$5sX%r)s%-N(!EiuF7efsm_g#n^M24C0$kbyUQMmAU^rCvg4Su+zNV;-!-1 z50k^RJQR@skoDDVJE2h#oo|)YI@Jvx!Q8UAgL7~W&y^G)ZC&Y9Gtvc#9`%g>dsxd& zHs<*k{!RdQ2Fm2{t{LL`9tu@r;9S#8;c0F|G)vVo%y z`w0XY?FT=DUN3pr^bHXfLEX>*6-N`_Ni93(36+&grk>C|ZQ&VZ9g!N&K3K5*V;hXp@&6+6fAdmYLK1bG&hw3E<2{D~YrB+tRVr zmUh(9pb#PRh-J2M%%fc<5KH3GsVG-wweI<|TNkh`Wzi&<@Z{E)T z$5_&tyo;R1Xqi|T(WqzVEV;oTE)3A;X+06npb~B?oG{s6$_SI< zE09wqUOf)6xe&{chD$VQxo4ivjfLo+LgC zmk7vIpu=Pk-i?x8#Y+!Y%8j6j*>EmZI3|yyPT>N%ipi)bIiqQvF6#aRy@YT+ zia6o8Fo{}lj;otLmWTO1$;tFsfKxiE1Q0d5KX&m!z zUyf(pJ0;4klCfqkpXTn+ji507i0GP(BW0_eCLo+V=lUkN!p@2I70mo5GSdhP3@8Zb zNQ~UW&jWvJ7LR05aq-7E;Fs;|gnpmm&ReyDn*wj*=Yi}L?YA%Ha2NA%oj_dfDm;_Y zkxT-Ne_&^Q8F55CZ&agl0~fQi;&GxZ2QMH4V~BCanDJni=N9)>aS~ep$(9eRpxvA) z@=!&q6un~{&hjCLZfL|qjF`o{i4lLmmkHS)d`56rd$R^=<3Jx8Rhx5tgPmRRf}gw& zm;vG#SD%K6%(|IoAjR~y*=^;19#>Hip@ks3rgQJC${(aj^b!kxP<4IqhmmNB_SNk< zso(20k7YJ=I^tWMIHdC>gJ9$5TqjDQz*)_TOX>Xd7%ZCsM~bneXu_q@toXxgAd^9N#?iwt%5sB0}2b@YpBsD~ixkQkr;@C%2rfcNScHDwpv zZd32?1xeo%@`0Ih8#j&?%+Y=ybuIke7Yf)*i}WQ*$v821j+o$qdw3`gi~o!T8WAb$ zfL%j}M=LW3NQ$6l(-yV@Z8+A*3g$PHXL{G_C}&b$PJ-DJ{*#ikjnyf|RT6U^rV?oO z-++>yA>Is~2+49>4| zZ)+4ybkC>rly@e0)#8_uk#~54pg;#jDZdo~iuI|)**;JPdTGrYBZ*%rg}#gfj+8b( z<;~g=)5*FKv2gF`nWy0*7K0wjSN;tqJn*@Qf^>Tt28e$GD-i5NMp1=Y7SPoaRL%my zi0{aB`d2~Vz;~UVRnXY!UBr;@o`I}DhD=9_AtMf4>2jrohW+&Q69f?>&XzBC^6cp| z;z^1eS7Q8W^eED#FL8pjc(UWWk|t3qt!niu)~r^GR(KZl9apVl$C52;_AJ`8YS*%D z>-Mcnh2<2+d{~Yk*MT`*;tcuhnV^Oc6WUyr0b zi=|nrCTJnE!;VjY+(n*Ppx2Kf4LT;68uH(Xogi1%xqRpI3UdQ%*wIvr3E88ckxDu#rIo@JCyYG#hZ%c@ zW!GM6vyCR|PKT-3lbu-E=vJP4c9~(N;)rw0pCDDll8z%03Z#Zm zh_s~y_<=zMw_s39+?9DZB!;JIE9`@+{dG&bx3G?uojK(w-@I)Demm~D1Kx2}?3E*k zHwj5TAY!xe*HVmFFSwYAsA8_H>lP4D%PkK`Yc|U^_tm`H3lnq;0bmn^eOZrV6dy{s zL909a?YsZ}PPIt;4?3~v!%txURGtqOa@rDz_@5wmAMVI3fP$-3^`!NH3PgY(;Q++S z!h#Z}+-pHgDi-Qo_llK)k5=V-Txfd6pRK{KgeE+p-zZcLf9Ruyu@FRN&Lp{|0PuOk z$x#6VxGaFJWO|?@z#Re+hzMYSd$bTmv5q_Yl6hnFWN3Bc8EPsKqUE zQCTpw050_54O}?nCVPp|wj#zm2IYuQ30oEcU19-o^kWYXco!c4LB+6eQu0CG^H4b+f{Q>%s{&?3p7KsoHvu+iMQ*eq zvclt%2Snfw`mhB7Y|)SZ3OvArVd)la{t>kQ97a6u0pBwS0KN)P5q3FQ(YC5p!Adf- zna+gE1=SNnLD-KV7OL8EMnbjhL1-~FoFUQ3hK_#t;T5)!9|ji5ve`w%bic`0#H5y& zTPT4H;Zr2tMkJK@sEIt!{3k#IY8I|Zb3*pB!0+g!M#)j`Ti}Y2k=S-2QWA}14*3TL zNEU%w6jMKkVpBmR>KmuvWl>cl*(9DJzK)Vbc6aMhjzlWZnbNeTScwT31JMu6bW(<} zLguj|gu}}XZE6DHgC7DzfPc`cmJ^kTP@u9Pl=RCzB>Uby&oZKhY0G%TvME@@D%RD6 zghu7i>4bV}z*`FcEJ+N)lS!XiRL=;+qJFr=@9wD1GvQ@Lx3Qdn(pS9Bunv6f%9_je z0y8%42(V*)EMzMgw1j$+Aa;3=mtK^VHSQ*+sH>CFbTS^&A@uE}nayU>4fgD!9uZAWz_00DLpy1ioT zMFzU0Ym6kbGrs2*IP+K=`S%!7`Lnm?O^900t3|rl#A*67AWzEsr{Lg;JE!EV!NM~T zxtWckYqjq(-{N29Y%^;s>Wan&6kzp=wIMc1FM~_?yK8>QYT0sUfqUaKK@wGWI60f( zASRF!wbd>EWWWeVE=I!)NA5kq8qfJA7}5c8FT#)Y9fj1YP#bIMxcS7axUv;OK*|?5 zkpb#*l~*)8U1Y5x%UEr6^chheaK2foSeh_mkgl!`WITp!r=pda-^Jw1ekyVmOQV;R zm2{SX=@t*lQaExnJ&}jnA zj+AhV1K(nA7<^ou+Y<0-6Af5`Y)WT-3vz|w9Bph%_xm`~3nKoqS z8n@6xU|5_$1KDazfAvp*z7qQ?p`Hc`$|PX>Z-65l=0*E-ZLLP;G8?5yx%Bzaou^M* zM_hm~76{M45>tdxNq%QbV&p}Mer(5k(5-+Kd@lhkuaE1EPdXstRcJvoa)B=*0$3uK$n^%%1cYUK)JM%Mu%j=g)ffaeX=1aGZ*$Z z6e0Ko;t#O3&U{s;v{<6aO`|nTkTh?J3gvgrY56SF6UBSnzj^v^FKjPhvDMcs<|oxCP~~hEwqW2wrdo z(0G{&HWTf+1PrZ&#tmNKJ=pr4AaZ$>4n|>T9f`J(fiMt&Fi65Lv`__*3?IaalD);U z-Jqnjk=50Q0?H3U{m#ccOdV<#oncTrDc%2pUWsjtjtm9!P!%8QlCfwY80HQLhC~Nm zp*6+O7Pg!8S%-*VULQWd$86Vuj9)N>C_`~=tmH++9y$E2B#Z(HChI-^0 zbQpsw&7yTRA|MhVE&jx^RRxrQ3{}KN1=^hWc_K5RpJt?6n0%o5O&xH-P-Zj-#~fcL zDq~U*5A~(kC>lv8uwhCOMBK2V`Rs=B!Qz!@9w8x~KsXI2<)I*e5)Ib>ORl(te0)~Y z6k?sR1fG2zY_!wt6rnawQY1>EGs<2FLYOqtg$hBAnyE{2^j<-}1VO!+VVKC?HD5{G z!Va87Ioi@W?uHH0pr7m==?Gv^l!G@wL@oToMQz2wNm)rKNj?spI?fl~0g}%Y#E>Op zMOG4K?3@<4h9HIY#30)mAo0sMyTdStfjy7%WIWO(`$k-VK#`qV_`q*s>Ra#~QZ=wfO9145xgAjE=a?ws81BzWQtY37r4w8_Pg z$agMicuJv55?&_-7gJt_g-Rn`4H)m?rzh5oHO>$KE#Gd$QDIgJo*m}Z5Jo=)0xksC zU+SKcZJ0imoa_M7UofF)O6Y_(Q-_dQPub%8{e*Pp+HFezCTD%IjQ+Te|UO2@V=|yo8OiMVz2We9+|~I^vJQPhi+0oft=v8tF@pS^oH^Vfto4 z?jdzrX@guIXh{k?W+}HBjvyFA4|3bCd>+Fs;UUYbJT)CP=ZXxnoGQ%Yi=V|%$XCK2y2l9 zRvzM{f>rKKVOlBVW_4z-#e}NbXOn6pcUj3ES`Ml9hSC(^r?#jz{r6i7|!`4~aM?Q0QG2MeJAwYZl_+o5G1h&P9&(E4GP? z&W04kN^2IC0}zmb7a%Li-p3wBEhAY5ikcp&&IW=x(rqwK0L8|I5#-S}*x$+Cnlj_T z3PjU91k~Q3lDaGyUhS1IDhwHe(+-!>R!Kw^;1V&SkJ9LJeu>na%UbOuYox~5s@KFS zWu*Nq(B_y;AS=D5ZD!Rg!+n!a-6ikr=@ja_2LL5Jhkhwj?MJoc^;{%%DEV?V}^Lj;08 z#DWE2>t}szQVK#C5$NZQuIzeR^e$`GBo5taMX3H__8!xjqC~^oDEKni;sTejUWO&& z6x2>Gs1*tdCMEeO`#|~9Uq-TtVE_%`2 za3CYa2=FK7iJpw%SJfS>^`s_>AyasTcML>$`S7`kZ)RNTQA`*$>4OogEnhlk(Vp(n zevu%wko;WY>RO(JqOc$Y0y@Mo3-fVX?43$1aZ+gBYA}Vr6fkJc{)1pYwiS-3*F%}ep@*CG~~2L{?=MC3BBE{+l;? zxnWf->`S!QSwZofJ}+Caqn?5PtTBcwEmm(0Yu{R0ONVZ2&E!hbLi1z6F~?b~U)qK4 zwKG&Ik1&s|9(n;_Sinh?GgnpxpE8FB3qmU9nlYEEb-MH9Y?-!rZIT3|FKTNgXAkRL z=NJR@)h%wt>R81ZXrrNJx*4;QvGT7@Sb=te7f7@!2cnN+gk8vK`hKBlSR74ybn<~T zAsOC<45PVe9}#}jqH=_>R4+?Mb0-lg&*Gs>m)UTE)^gl*Vr2DRb`nol0HNYWF^^oL z#ve~P=ZFLgWBqh-_%iqWtQI|;^!n?%gUw3+n=ifUiB1Nfn#9f~SNIT&A z+6?=>+prwUmyV&#a@cuCVVF7OV4U?)c1@i^WQh*=rc}ovzAls62M_6K3|CG(|At^A z(WLzmHKMJTV)D*-$1pb5KMyzrCnR4oWI~d%M?DYkp}1cxC~i8uSIl6HPt=;!*c+rL1S&vO~r<1w~TvYcW0=0vzwQmYMnlLj*rHvpvc+`RttyJ zC$sNQ_*II^_oszgeWyiqI}{n;1&q3+i{rCZ??mNTGM0~(k6A@{M{Yz^VS@i>XKU7T zPwH_g2$s^@>L$ppq;@(s+mA~)PNRij(|5_$LLjtcbbRK@lIyJc>YaaC=^@2&SThOE z4{nxk7b+=lV)^2FO0X`EjUqHNfSf4Vc-vQ^+- zOhj>Xa@(m5`mg#Jec+jkw6|G^0|xj3U+iN(()X>W1xV}q2T2qC2+>_=dYuuhnMM_{ z2j!!US)Zf-`nl61rQ9=b`3LQ=cAD>mA3)b^&8tyVAGOA}nzIInmy4hPfg-R&Ab7}h zYO75~=*6u&80sZuld-&0%H>ksKh>8ADNdrFg|(ZFAAn7~q@YL4j%NDuSq!H@h3|#;SSu*mgSbCI8c6G*)93t^kA2E)c|BS-UpBEVSCB z&ycooXnD(T>(*^VLo@^Z$}&NpI~WVS&TC_AuA0iDW5icEK)p#=ij=LFuvUQ0V7$UN z5Ws2aMRow$u<%H@p8Y^k#jSkMq^mndSimK)g9QXa9RGUJIx<#|y(eappS64p|C&h} zPJSQ%%(Nn{4o#`MV1OW;!yaHj%A{`s2ck)6N4Fzkt_wWYr@pHQH4rpI@L=)R<1<`| zkex$5@9*j3tG!P+7>J+y(5yD+>*Rjsk-OM^On`n)z!(w6#_F?*PrANf3%G~~LOAHd zfXXR#_OtU_pcb5S9y6AzUDR`uqthXOLpWf(S8Ict(aJ8H*o5h7>uHWJ!}JQKnS6l4VPm zFJZ=%Ig@5hn>Rncd-hJ`vwpxy0pmZMPg!>4sgX>li%XZbGecmj1E1LAAZr&Y%XV<=+dw1{O!PC^ayyAGw z#VOXZP;P193bh1@MqfH>c$e2#tG>FnK5PuT_>f2(K5)~Gu4g)_tTp?1OKrZD5L!(+ zlrTak7Jp)iWueUkD{G^w?y^tB5lJl3#1m0eNxzDIBM82UvO3De_(R#twX4gconku9uHHIC$byF|@J64_CZez@w+dS=$1%w) z)66r`q{yd|R;Np6N|12);cQs>ehqXu-l#=Y z+;Pb*7e#14T63g#$P=nn(l!e%UKfvKaXwp1?X#+#(h{_$TDmNWvZ3hzBumlkB6Ug1 z0w9JkEAFER4!Pox)G#t-0>H zT=nRj+F@mfPN}YrD{7f)wOu93sg-r6xoi^ky?RNV$7bmrAmW45?ZFm9uApa+o-*T- z%F^lP5RLJ|ZmL_edb!q|1k~%xG0)sY^*V#o(&F$|vE7e6PmjhtDf%0<(B(d}@*Puq z7Rk^JLa^QXYA(+LO>uF1y@L-Uk8p*NChTyM9!_pYb}^DlQRdhG!teIhK7>;9>#@%s zGJ>b3xVrvmKZ!Dfczx4*rsf3wHu7$E)ak>P=U>~ zCU-VL%iNfQ9GrcRX;wR&#g4@^}qxeEDy2@wx5%MWH0=nfE7qlhZqKpY*10v z@QkCt<1q&#@>?FaR^yYPU99+2Q@CxgiYk;Z-8ryn4E<` zDpE>GHAF}bzgVoKfv9`~ISU&hh@CU8l9e3X%o@#CvYcJejTSMFEk`#>*9b5^bdseI zS@yg4z>=1JED<2*C%?G(%8&+0N?>MnKOYLv0{#Gi3aVfRO6JCrFoD?MG)cv2G4MFb z3nk=^Q<#G6>nGqEVNe#A%T~^lp5+1|NC?I@bhZ&+CVbqwKD5gUSiqN`+2_43B}{!5 z^L`^*Tr$DM%xS)fM*Z-HKhVe?e~F|PWw^jLRA7Y({Oos&e2J3=N0Uz`N1izYWlQh2 zQsuDgbpw$W|XNfoLNG1)6~2eQ$#u3 z(XNyuQI>S70{qZNKm5Tt`;|lu0N6)0J^%&{BTFR6jz$&SW|HBgmr zf>Ki9JDVaopAMF=WD;s4*BG;-?(?bPv}s7ZXuY&`PMB^LB8T3V#>T48q4TpE1+}^o ziIB{z`iMo}#!3=<001BVn8paEV1_y}v6%&F7F-;g7rS7Rjj!9OffT38b!O5jC?qFB z99UT8E|-Cg(d-K6BsQta44n)$Ny^I8Ni{`{M)Sg3Sujh@U*eRy->I8tW15kP8uK%P zaEONJnA(x7_8$BA2QxmPfRR?RmLX}@PFFSm744oUogEn?k{*f{#Sw-eXvtMw!C1Gv zGMB=Itu0LWs7C2}Mn-d7QXj9IpxWVuGIIO~Z{7hXm>sFVtnq3@m?Ed~3bb|Ic^!Ju zk|6n(N3xBQNH|PufmrY(z8e+EYVFb53J^mC3Qz-=fJYLP9?&M`Twq>4c|~qjF~L)j zpz?A!;hYuNz!f%g$goSQchPIc1bO9{CwgJq_6Ck1F>$i~;l9wl7%8M#Geg&l7(hPs zEz1mTx|V7zGW;c5&xtPCTD;5E(&HXZ*#|5_U<6VOFv^g9%T97jMFH2fiprCxq82wr zX(g?}IVJ;yW2(Gkotf6sI>^3(F*fVLG zRf^`3q7g@;&>FUmRCOhOs7J3uYs#dPESFS#K+|oFG)gmB<+&z$pSk)Xk{o#I7X1UR z$EA0eYMpO=KLt*^9!c2jRV9|#4#iXvb5m*f>tQ0Bs0S5j;COPvySxQN!&xUGnHdx6 zTq&&a>9m_Wqnp*fgVM$6ga{UpjXH4CvU7e*sM%<^8s7EFIE5#^5BeqELdvn$Da^t5 z9dx0KWKPdj8A0**sg+R1cFyW?d0Dfv3{O>$!$jrBZ30Kb>?%PX2_&ToV(1;ageZZT z6ENF0ER8~X*B3PFnToR_z;?7WYp6e!v7&pYZ=~LEp!g%K!aRF1v0f86mhWO9GX2K9Zu3Y-eolY3NOa+2 zFdwaU9VEX;K3w2|3WVSSAGiaQhxlJ8%4F4i?>p?qd}9%VaWK46pjcsl*ckGgT;Vm_yS<@}#2}B5C`N)pFw< z)qq4k3?T&&!3eNm5ja5QXp17mggE3%re>{%9xVA%ZE}3Yi{=L|l7?UIZSfLsQmh8( zt}g^D%=KJFGdKmKL`i1Z2d{!5>y^JxI_^noT5VN=cYbAPhqw zc8Ftgh1xQ%hW(_ufU~JL~ss+4)W|zv-shE0?xxMslQs#w%$-aK!aX3tee{B zDBOemx=$wR%-}YLQ4X(q`osu@CeU_}qWZ=HisS(-pe$C)7gT@I&jThNWcs zAsn=isu-yf`3EZh>TE4MN3l{+hCt&PJ!}yv?mdi?COyfokgp;U=-Mul4()J8oT-k^r!Qow(-uS|OF}H!i5b7h5@T7JE#R94-f)V3F$HWw)=;F}=0^u9@Ap$BO0-Ep)n1LSf0UvZh z1cG1$48aWWKuh{zC?dpps4{5)%wDjv-Lz~W7so3z!U9!AxEgF2r>~}%%P!OM7}qGn z+++Oc#TXpUH>4@_(j_%5LN{Z@Cvp%QouZ^{s3j)_1P9Uu*#%q(GYGv5(pIlAI)wlT z;0_+b8zl1qFajUkE*}u$9#SC-w15(Zf-fKfDR$%~S)(f7uz`%vEXpjsZj88|M;IMQ z#As8D@+M3ol8h*FH!o8BI?S0UD>Ntp2*?9`V2C^I)4w#R7J%R~TrXatv#I1#@tO-9 z2lLhc1_{Na$B&E*I5q!4?)jAubVr?{>o5E=AKJ%~#d3XOo1I-UUtJf#n% zbL{kPQI>CTo?q-6BzQvCA6+(b9IWil3MkZcD#exgOWqZT57Hu@kQ_uw3E0Y_Vo zOEyEuEZ_+&z!;44dpLBN{u*#m1}yW`BAQ&IbkyrFYZJnn$w_`fag251dZkjc14b?28}^_T^dTJo zZs8U}PZ@_S7My`^1oeAH3D$H@A+bhlVoF~caw(by`Vez1FvcPnaFh_Q11E@8<&~b^ z%w?vAWQJlHkwQjnrB>4m4EHoMXi9dRMb{`shJf=T?g({iY&d$uhmv%iR3$m60$j{P zS%E_*lwc1W69b_20sf=qJj1<&Odr@`kG2(gdNDusb6@reH#7AX5%XIR)L&p$O`=bo z63>}k&|3PW2R!-LL zZB%ELT15VAnL$>1XOboQ%q)4u*NbNIVM39_j*yqdp=LIyvJ454&cxZ&TMunOe`xD zQzWJoJwj!&c*!-ead3D-WE*i2!N zc)vA*DCasHL|`2_>@?6d#<{B3|fZSc+~U;K+mt z2!6t`8=d7;p^@G;MpU%aAYv~Q)34P+408pMg-r-A&KDZTL>z9xe^~--98)y&tlN|# z?8s>~4{9~1SYu?;2(sQsE>?M8C5_S={~}jg4P>wVOXA^6SVb~ zY84#`Spji4>8=7TiA{3ZBPCY&5!El3{mV946ktciRcm3$z6!JdM3#98LwL6cneQe7 zv8D(c*h=|B0f*;6M1oiNYGM&2X%dvnJ}{Nlc~mlTV#e!H0~k8!Z>wCZGv zSCAbhCBSCjN*N79ZIHO{dXhpA{sJP*O=qNtoJ?)AoEUR4$ayj%pvsjgM1{&$EnWC_ zpGA3^&v`ebcb!Q(M-q;YWAG!4c2jC&UyUKf9yCrU!*VN7NeGXhxz(3d<|dq0B3t5= zcsYC4V>5tEA7V&}e?@S1nOppBv+CAcq-Ua^LB!$8n%PLx2$9lBE0~x0- zjfjHVXryIUo2W!uu5-Ath*|0O82pwFFI1>}t22%U>~aDPPHMp+p7|*J1&;%|MzHyk z`x9&GGd8zQy;8_E1m}sXXkEGbqbnjVDwqR7do6mFw80xVOq;0eu=PHJGd?&z%7GI8 zX{Lu$e7tmNcH4`O+a;W2sl%g(2lAlhb2K{TXetuz`gIv>aKZKJ z$jq8M{L0zFv}3v=ig*yF5L($9n2p(0-$hbK71r-OP;=jm{r4O9PZI&h&Q7oxaq>w8{( zvlzR;(-*SIoJH`H@ai4iTK>?vD8PA3UAQ~Vbwd$!QVB4=;CcQVsY$wMc+wj;8PD#I zCoj%XB40lPaL+V!2~3oFcwD@l_>`F<>Lh^Kp%`j2(3NzWY3XLTXH zOCkb>c{YL)aF>c=ONjNPQvIslsU<;DIPUqy z$z9(kcA3x~wsHqVCwGEC_`dJM`|mUBmyB+OM5ujy31Shd>hY;yq^r8$fzlz`n6IzX zTS8~jnnUMDSv$X&^s;dSW`^p{+W_KUg@FVA86=1>p~8g=%L%d|sGLHB6cGv%r104@f)pzv zJjgJl$dM#VnmmazrOJiR$aGr&nXseCS`f><>qI7Jg^nN@9?ZFMVa1$ef)=FMQm4$M z6`u9v`0r)TcmIrbdiHW)r>hCCX01Tf<<_ejw_^QDGHgzsZ2#RN0QW&GeFiOHP?8fZ z%)Na3`uz(yu;9Uj3mZN>(&I!ywFD+)OtPGXS}|)0QXKN>QNj*uPNazQT|&tc5w7J4 zH8tXrm{+_0+tMgZnJ1GL-AP+$RIsu?8ni9DBk-jKbE2NCyX95Q1 z&_$*B##3krbw(LN8nuT1nP%_>IA4VW-PGGj?>I-HNb2DxAx9hC=2KPQA!wp<6)|Pp zK+IJ@m18dba}#r&Z0MkLIH|>BSe;}SQdZST2UK|fl;a0ndj#?TT<7$GK?I5TXknFB zUWsLvSZ-)vb0iw3-&3_TI1+yd(NqpE+uyC(4;(QN%IVqM{#3^`mGu-UQu_V)e-BbR9j1Xi0;H7Xe)QoQlgX zN=5+4a*O&2Ypk-)+NXjFVThQ2AXYGDNoEp68g9FOw&!MTJ`@qL5FU9^vICJ8=&;7N z7p;xVx?eFr`=XQYTBtOS? zcm^oZMQ~bs8Ecw_cv!V~RtR&S=8CLgSSJ^&@Ry$@rB`+*o~t5^FBTM(yO72UR=wTQ z`yrm8rsQcw5BTxT1AUy@$^#Yz638t=hm~Q(W}l5V#8@#*9T{x@_0VMhXiQSCqv>j` zN6YS2va%p4R`Q-aNyb;rFzF;NNx8`dD1X;bXFNFzZL zHz$=#o!;pGqQ2cCShh;$R-o!!-dNB_*AQ9I>Zd-wop$fO|6XBi3}F>6XSZJzsND(S zefefD<6KgK7C?j%qt7#26Hki%*s;kdHEt5(y15Mfd*CykRO=~m`=v~VPA4u=#HOz~ z`;orr6pSu9mDT7?Wo0i-p^hca=~C$alDS$upkSw(-2);pJ&X}ee!#1s1uxj1iv5Xq zBx8($YDTxmaEw^enwCl26t`qS>szBiR+`$<6206>B`kDJw9aQ39GY)L>RaFHtmh(; z7-UD}`U(8T=BS@EMV9R0gJ!`9{4ey zp&}Wzx4+8K4?#_Ho8}%@5u7xIi7xt4yR?H9DHabfIKmu&q9_RHTRIx0Ipe@V+(F@wLI_njc4T}iLJtS zv|FxbE3;Gyx4`q8b;WO#c#0g+cJ)L#itdWLX-ZJk;vB&c6Psl4LW5pm0j~u!mgLl? zKmU2lT&{#dyWAx~)cBuZR_-9ts$*#$gc`~G(jawe=A7gRvl$8#dh@)MKueN8D_s-+ zpd(2iZF)Gw%u!OMNWo4^HzTB!y3gDwem1Xq2S&=%#z2U|)%Xti&X!TR|nNVGkQC;XQRE1!YW6+vrpphA67X z%uYNP6u0Ra=!I>O7RV+G5g@v3HDU#;wFD#9Yt@Laf*}e>yv5Of_*G7_#FI%6qOSY} zhlhJY3y)YkGn3ecFJZB#Nd}vj!XkFL%(akF!9*E@o++|_sncT_rI60dB^+`87K*wH(ixhHS^x zqcyX+5-O3;AnfaBa~}-hT3*Z{ZM1-3N+uCDYNlkh2!}qxVKM{nO;~*!h{sH8;FEl` zFg4WXb4iO6jUW!3J>hSLekwj3yN#{~`O5o>lGlU~c){ycmrh?LL}%7E!c1;5#Pn9a ziz$z($Er!3tm>Pqy0A{@;733H;lbd-l2|BYj6^!3QN^VgCDAF|Jj?22mhk9~-!-Cc z@|z-X5>daKOpc82IAE0$Sz=fev|!5xb~fu}^qLAeX90)jt#T4Hj_+k-CF(Dyd+N=Q4?VKk zXav0|F`c1(Y7lt3dfCj@iX~Sw%xVsVCX>0YN=ht}=}82`Z+am?PGw?e;ioMb?qdy) z=~ZcavpbJ&HmivVwa8hNhj-4g5%=ZV<3I`|WH6CCwIa%A`&3!q15$#cc4*xG(A^9_ zTdPyDHfnWLx+uXmNiL$<16epts_X|sz|G6*jhiAV6Ze`aoNmKJ+?$te_*2ylogVFs zkw3FFjWXBL{=Voq?@HPe0r+cS!t%vxez+)E``zz2+T~2A8O$I5mg!h6AJ(+ZZbJ&AD;DS*(cx87SQRW!A9#S4GLSK<4Ka1agmN>(51p3(5dPo|fq-#I0FUf)t? zXBKsVv1<4?d_6&I*7tv+gE+m0Gpkolw^1&_@;Vp7L+LX{3idtq!C?CWR-kb>$~I_Z zQ-A!2gR_!L+UFOHcX7;vLY^TK5n*B00&e}_ag>!N(}qV<^(`z^Q5*;#*e4)9*j$iC zBwj@l=*2!>B7T<>ew%@F$#xofrZAo}A~I4*@8?ZR#1JIMr8y^U+yMhaY5wB7Ycj*#u5d zL2G1~6%2MW<`NWX#X~wlFY!eXokL@Kr+=sxR0#I}TXvXpn5c)h=sRX7PnvjbL@0zO z;SdFg8~uO|%@PqZVN4ShZBG$Z-o=Zs;&}{riSC6WjpkP{h+pNGXS-5{w`C*jCpyi6 z6~nf4-{C&PB^%^G4hz*y2o`VM1Vadii}M&UD~A|X$PvpU5Ypr;ljaa$bBq;Hh|f}l z79}!Kr8yN8jSc62UV?V>=O8P%Z-YdJ$U#9k1|HVIXE~)mBm!H{h!AEmNZg_$6_5*r zun#_^ih*W}373C2NRKgjd%lB@xv`J>u@Z$?Ac0T;=n2q^w$DC_e7Y&6yv_!3Vq(gj^W26<2pDT!>w#*(jS zc#Xn?G6|PO2Rue8bdVN|dDM%>jngL* z?#C1`h>>t*Z(r<`)XqNJSNT3BjfC*@cZ1I&%_993!(O@eXm#rCiG`U_j z*%5?e0j4EAOvMlS&=2JnCl$q6AE8uEIEn933qY_DX(ndEr;DqYTq#E}z-O5}kuAPq zb7L4o_a<8c25>Z{loOF5WY7dJKm~`gU<$z;_qT)c*p@Wn9M<8Q@2Mq=X(fx38+P)V z4WW|-A`#)hfDv&ZzKI_cmQ}+U9|-mT5aXkG^0^S)Vn9;(9k%h2KS3_^g)}N6TS=rY z{#Htsau%HF9Sd;@&G?^o z!iW?VSieb>0ZE|pA!XW@hp8o8W3e6!*%dxwj`1fHVW?jl!C#o;6dCGUrN|-OiK028 z3{62rH|2fkd72AHWH*-=Ef=F}+8*{epL29kH%T}frjrX1H)K+7R~4Yrmyf>l5P=X2 zfnaU>2Tip>q6zt&4?0&M(IR4K6cuTeWmqJ!k)7`(8(Yeys~D0df($m&nBwAFK_oti zb2R5UhtmW#Y^tgl)FHCzOdetXVM3~9n848T`(>+vGpY8o9V9dRg~dv|QBYOaMr zY$O9m3=&6z^Qzp3N5MI({%H~o=q9zwr)XpW-k=ZYFsOv_l+D$YLF1r3;z`soCt=7g zA9$h0ah(@hAAo`uh?1!us56S93;;Pm#=oARGYR*t{&jyL*+eU9cv5t#=W4PrfutQ2 zL1!`<#^xbUxm!K85p9GR$Y?k7K2kwkwJV;>Pi z1;L;ailz$lBhU(PYR4Clxg*Xn{|aUbs?u?ykW#fx10$vLv2D4AFM6_RyNWVavP0+} z!6ID;il_z|f-x&m{t+k`wno&|jI@-pS9MkV_!m%Cu_UFf)d*{9k$dh#IUKRH3!9ab zx)!`4DUNYtg4tk1@doCgo^jMfU)wg?iHafHw${@qRiw75yDgt;5)_w0y$KPemnYoE zM-<3;m1Yr6)wi`&s53VhrPpl3g}5WJBlO~i!r@=}_-rPEYf6!!lnbq38akxkS7hsGpOT8IE57YP6Mm^#6G3VmQ;C&>Om~tSdTVJxh;AhHS%uqmLEB^y zs)dOwtlub|$$KHmyBt({|Dk-QFdL#eDVh=Xdm;5wSVQEo0JmiShdM}>nrJF-0Pr4?g5>=H4AC>bnYHl?6`bDpNU9pJ~2yCGv^ zM6fUGDtIi#iTrQ)+QAV~z60Swjqwq&8nAS05U=ZP(pbMrseF`FUyO#pR4FsuV!YzV zQLA>qZoCtkq%ePE|HtN8B(2rCMVB0%{xM6*NcPYeZKz=$l%LdKQMTwE)K4JLOn@*F(#xI5ureaH~e&1qAZ3RT8*gtv7ur`O}0u2v%y8!Zw09mR_yorp74jfSgSS8yyET3u3m z{b*4kchRw0*}0YIa-pMAE%qFpegxBMop?1}VbKMBic!A*U||BmF~O)|ilImKjM))0 zCLTj^*j338Sl0&%rxJp{Og7BPIXsO0G<_Bzkgd^@4c(TK%OJfkU43hz+u3WO9ZF#@ z9o1Qy#GMI^+VI)i<=f9fT^Aml)E$$2hgm4NEz)SCF*QvS_94wijk6M@Wz@qwt?Hr6 z1$JiguH{TN(^V`H()F?+LT#ir86QlL%(8Y#sV15( z|0{&kE445etqs&Op~VLwQ63R2)P;X7-h-#T&IkKZ35r(rPF_V#CsU zs+x_Bl*mv(GgGVVD1+{A8yiWFsWXrye@njUxY+5l`stT8w--ffj=bB)tkk>$W{n{; zrtQf91<*yP({EnJj65vBT}<{)(8I#x(8lUm#BOIUC=$6ZM9!RmPF0>m){=cAR4WoO z74Seiu3vGTbDW`jvfShu5|Bx^k~8q(-dy9p&odqBUPr~6^|lTu5FR5L-YxI*u1&oH z8MuH8rzR|-cAp0o*s>klJz?Xe?$d}`@iD{ehjFO*tq=ml*g#_JmD|ALOjVNN|Cy2! zU)(t%pS2-b0eeL<#~eRnO{1ps3`H>H9MszyZ%Oj*X7W;n@+v+|onF2eA>7AU5ajhM z>j~~7uO+NKHyb!xo5o(0%AH=ecJnzk%^o#W{X7`ktJG!9*TR}t> z;@>k=p^-FYBPbE=j{mCS=tSBL$5SQcbzDhCv6}5Fg9dJ~#@3$A4iIF>|5PAY(BMIY z2^B76*wEoah!G`Dq*&47MT{9AmJ?*5mO+jl3Dy!>;onJp`UsvpS@0i6LFEdu0Q43$EPT_iy0A zg%5Ao5pBVgO1(){%v{j%7bgJ*j##Vq22Z(=(D@>8mzC@PD1P-$KI(BG1nq< zXtD!)yX>k?IvK4lz7%_Gv(+RdEi%VeYt6+1Ei3Ao+*qsaHi7&~@HW*1^zla^gA`Jt zNAcymZ~BNterr@PADqFD=I7ek{t3k_C$ipsrFjJD!}Eu zvVcFbt_(9Fk~-4PNy<7DPR%*T#3)1=161gp9Sb6ZHVGrVk&KfZJE^mON|OyT4IAQ6 z(722O6GhPg!|KF=I5ZT+fmoZZM~5_BaWfoI!!b3)Q1x?G|5|IcwW2z$5-TPtc@^`d ze)Q?*sv@PlPFKo4!pS5kt#V@H7@kn3yQnrrd`XX_)ZGxzLxC6v@G%vHH#)5H_nW}gx&q@2vH*1)i;Wfx?M z%ACz3_Uv*uD6m>02~L9Agvcqg*3vV$qD-rp=RxCj`Js9j?5W|T_(j;H3L{>P(uyvf zmnp683XR4RKcjBJvHNn6#<^IFG0>heY&F5%T%-DL|G)#+2ubC971>*y@%yJAesHNN zB<~XU7N|F`OK-~hwCn0ip#W z5o1EJ=ly*2(I>pu$Cul5+J1&LNhWXf@n@tryUnvF$SdFJ%`g$|Cr<;@^lp|uh5!vc z+3AELx+O3CMeArhDc8)-h7gkAf-FL*-L!yo7ssRqfd$FUq44IA9NlJkjq;j{l2<&| zm}WtdF%{W1#5w5|EI@1Pc zo)i;*WDy;28d;L~=ep}u?oAwt7EnGy!G)-WPvCi?LdtZ;^9%)cT+xzEdUBQvmZbvq zGsy(G1UlTkg-~}xAciUyy@Q-ah4-2e3lEj3#6(PltEryb;AOTPuCXO+Gg0!)=MsX1 za66tNjoqME6pVGoLJJ~f8(sNI;6%}VNefqh-qMe7aK~9G>Ec_85)`i3NsP6y8A~{r zMnSO=kWS1N9J$31qeaeGNTW#};lmSt@IxkVg3_jxJNA>g|kRl#A zHA5n;9&t^5YgNPy3Q@GCH6kJD7F(tBx-jjmB)k+Up*1ODLw6m;T7V2JO^8b?*DVXauS;_3 zBNv$iUCy$EM#$C8A%hB`g{D(QMO9v-NOQNv@K8g(g=$o%N>#VytVabkjBP@R5C*A< zc`9UQE9J=BR2CPxBTj3v@W)*DJ=d4wW!ZF1V#Uct^E~OzBjS+>{;JwfYuW2@~R`g<+6~!nvpP9@IK{PSVWNd05CgR?v z-Dhm4Ph`6KbLQYGIfKlM$0Friu=g_$L1a)zZa|BlQaqc|p!o+vV**`_%HCF-m;4O%?z4Q5UJQ{8Mfi6%$uN_%Qt zG+M6oy)!|N=SF^@djn1pK*%wjCi*{7>k4t`HaJYC(~>R{r$Hw4ET0`Ns~;Og!Knc` zYH=IqxFz^gf&puc#^>OJDU*$B-Xz73EgHDZFkxh2wtfeqp}{cxpcE3W$7}w0s@?pp zFjX9(b8b@??{9J0)JHg$GuYR8%PH;iXq~D;xC4?mu4)e5Tqx1$=!j~k%ctqN zG@SyRL1GA1%N*3n9E2k&2811UbF@$Ey=fs3^n116X^3Fki+Fk@5Hl!LVVKgGyF-bM zP+_nJyF0Uy7M;1i5lps~fQ)71s!%c?%qWQFSq%#TzSJS7ze7JDv=#L`v5L|X$O@(f zv^R6Zu8!~q&pVdYE0?F3BVOqrVhX1EC?HI0!M#JPg7c{L;X9Cmt`sw|c6qZB?5Ii>X0G&i#UV` zqL4ZB{|iD%B#t5Mm0t-EJSv=A;T)9VhjW82e~1NgyRITTh{5s*wmONNNIk@%kB|#8 zo@q6P7&7h

    TT*5M!u3Pal3Itnp4NsOTkjJP8DwbDX95kt1q7>wO83?u5T*h0DG zqYZk)AqYzi60D0j+Z}SEMoNi|1G%*~Q$v!%mq|RwAaSe7(~{7`nS-bkCftb>(>$U` zj=?&PFNvwJ!=GcJC}V26pu#JNo17-HGWf_69a{+Jc*oi~wQ9nzof@_q*)t$q#$jBD zRKh`Z+8w!bp@iYM?GusctC13Vl^C-|Vnai16e3h9Jp5oHFd2;=fioYXNN7SL5wuBk z|J=#8P%dL>t5tL^LYpqVnkA#CuKU3*3%D`->yqc-5>vFVmEpt!Tq7-vLi38nNKBgP z05zyQh?L-mx0)Z3xU9(nj(sYi64FV?d&7&wNQ`+f6)C6I`oJD?t-7!zx`{rKq?;Z| z4GRIpt28V!JBr=0jj#wIcT_5~LX-t*#PC^*&tOZQtV~?#G&95_c5y$$F^*!n45ZA( zaOf^56uB=#IQ(m_FKN2S^b!I*t|TiQtt*Iyw8!O;sRZ0zyu@q zOdWv?I0yAJUE<2Gye9T3vI(pVs?5rBET}Mxy11Mi*YF#+>9S^|kPCgxSDB=RSU7;u zNU|s>d=i>ydq%Fp4-~{sniR7hLYN;CEB)#la;(wtjG`NDu7)TyvJe>`|82#S@CSY1 zht{b8bm+XCtHwL(qW&2p0{ja~sM33cQZ;JDlT0gPnhqqfE13~cRCJ#si%3@_9yc{q zhvP>1(!gSz7_&)60n0|PDG+Q_n7i}P8WApp2$9gZ)niPi4#d+xwa(q?nnbiq;!>rJ zE7YjTP($rjQ=Gss{LBFH2Tx4JTvR&N8Mc&xu75xW?@HHA(TXW!nHt+MO;NfX8&wF( z$CUI^^g*{N^Un$h7SH<#nm96Eq^;}nA?HNLwKFWqD54t5JC^X(-5M==5tYp-)}H#k z2RW5H6BtYxP9tg&Z~QF{;nN01o)Rj_6KvQS2{v!-)=I=5b5at0|L_LQ;*R=YF?uwT zC<8cBn2O5rVbG6<914%_fDYJDDCO0 zhD;{w6UwL)oS`)%E-A{H$&%G!AGCCuFR{SI1+W<{Agl?nnR?iqUGxy&2XI zfzwve!KApew5U72tvc5a3-95*~|MfIaWeag3%Fnwz)5Khw z@Q3bdPgP04gYC+zoGdI61X+o!*le=HT3E6D8!2^9)Y*@Zz*<+G%}korilxkM6-J2t zT^XezW78T89X!1~Heu!15ZPdk-3+t!$ktGeMu|3VY*FzrB@ey1V_ZE`@i+}WzO@w|GX)9c?tDaN$bFzj=hZ_~GHE|1?dix{{$AXhCxq_MEzTGYjy-E+|Qt z`|`1GA~n~Q$RLiHS}b5#*4he^X~$wbx>4zbfaaSe#xbo^W`^i%W{W-ROJXi&A1tD> z+Zqyv%-JxNlignH&9fJd-aO?%mK(niMQg0iw(+EhQB96+21wg!4;GssdK*mS2<0PI zI?CG_ECvEHu#Y6u%r)sIEAgf$-a?(z=Y>UQcZN6P$XcluXjfh-FUZ2Ze$yga*{p<6 zUUk7d{a$QMBoKaM9Qi%!*g+Bw;ajG-FmbrJ2I;xJ3X_Y`x~7YirZ)ynZ9YTN%bYS< zHW~52T$|7(eh|e@t3LbyqbuxY%2G3$|9J->@E@^2I&iksp1YH?0Ubc}S3l9aimsD0 zis~Gi-vN4uClGDsR;12>2wmRgG&UQT#f?FU+hgTm4lX+bK~Arl8i(s;k3JFDw#g3+ z?#ZM^-K}QXmSN^jtI)knw2(($xng7q6ZWMeo9ZQ1QKQIuR8WKB%z1+GCO3oKFBap6 zKwz#xe%iDI;4`XdCJPHm1CSq=sd4~<@xIayH;7sD7_3bl#&v#$s@trL{!*d`VKL#0f&&J4tbFg>a} z4&7r!r5RYyo828JNCHIVkQp7%|K4XINndXw2}S4xgjDn1cCi2b@4omC{??S+u2tx% zTfdoYgkaY0>v9PPp^z5oABxdTcke*CRko(|{fvo8U({KmiCSk4gP6a!LWh39FdwRSNl#=>h5oQJn9C|0npr%`z<;pG4{tNCb~Mvuy`^5gsIU$DH1UBFGlCm@w{~ z&=3*cs1UraSjQvyS0ZJWw%rqZ$FJ1&3Lw{Wv&}>E-Js&A7+eX1HLr`h;$*)M0>LZ$ zOP4_|dE7;GO3b-b6i_O*m_T|7GigYU00J+lU8+YAQ8)Ar!I8HmAwhvebT)9xWFFOe z(k;(OcyV2y*m0Cv=_-op+2O|`}c3X!% zBvfC;@SCI1S$RZYJJ)Wy4~~cQI+>v(q?^)~w}`uddFD6^GAP}eAAPsnYKDN&w;kSj zfyA(_Xoz4|9V~i;|7PRMPjhOGL<8$agb{thG{(_yqmeJ~8vgM15()s#4l4BZsvX7p zU5LnHbD~BgfT$pFAi)ad1X-|9uAsnL77*4N_;2DrGGqi!?ANb?KS2l&7NjV0qC}Au zMv^?4k|E2ME?>4hD5%`TgBEI)OIV>6!iOqX!lVdu=u3(VYyEQ=(V)k22bU6!Ds?K= zs#dRJefP}bvydi-zUyRh?8OT2DwM784CuhEWXU``i`8XVv12=}70S}BMY$*i@^vYc z>(Gf^(ZVgL^{~{De8tM0D|s^I%9bxT<{h2j9p}O!W(S@ z9VpmD^7Td3f_TxVl3Eo~C6YyUwWpqkAA%Snh#)m&TtuKnbr3(p*+g7VUj3zCdDk7s zSp^C$_!2}Ou~eK#5!EP^Q2vBBP;R@ihFo&XkwhS6dcow-OcU{R-D|c9qMK@k2`S@T zP?m>OKl%hB2qj+dD59EcdT3x`Fy~%vQ#nZpl$Tdgv>Q{YqT`uE zp?TV^w^P~0n65-UR^g!kbj8)3GXAOOhIsjBAfWDYb*`6Tk%gCn3pRQvLliYC?n`dj zHxY)4Xum&OIg~&CaF$R+;q>$>as)}cpluq1Jd#o`g_;vopM6BJ zQ?kO!kT+$%EGtrRD%qs4%QaUS&jd~9qLs?VDu@@9B;;Mv3Wtm{Rzk(5R6qQ5G?Ppb z|6`qXe{ee85`X5Z#Gil6F|^pc^v(z9fJqV*pr9o~$(O(ihAWj@cgh=?Z(Ajpp-A{f zn6==8cPmqkpTYbRX#;hpTOhGm8#CM`jXZJA3!cmnirW@sQ{*gH5G0r=QYq`_mlumD zbA(c>GkiZ9Nh40u#d=dM%}Ti$iT{G>|>VCRF6~DA%dAs>Y z^JZ)-i77+lR?Z5NITsy=Cj^v<#|om5-F<9if)L4dxK%8+F)4MwTM%<{#gw9n|EhMi zkW!UYQ!6;Zt!tJd3A7x73x5F5YH5-m4MQZBx%sXom@*eizGjvT0x^iJGmD$vwy%`* zD`7tgm{b~t5_}z|FTYvggXZP08e%bv5+mT@h?6*%Z0;dTV~SpWWs(7=aEK7O7PmZz zK%T6uAUqOX>5jFK3Dyxwsaw(!hqXZtDo14j|_NQ_?M>U_Q#V#1R`AmrLQ#V1#oatqDaVBKCYB;LT90mVXhc0B%TG6 zmfWQ!@#iAk#1UvssQ@liGeU|XrDV2}6{@(V$zvVPm(x+%OIrB3q}-?}|FSvJM{vZt zQD*Ru+8YT(bn*{lcC07I@f=GCHN-!X#33i~2hC8#%X_9Lhb-CQhPIcde2op9`fS_T zHq@t|)yI8IL)V6;Vm>HQgg*UDml9jT8Ytp3qw<56%QW{zmens1;mBBMadLgma>K8OwCyE8soES>@n~msa(&E~$`9fOH+y)ETSL(Fh_7@!ZP@NT7GN z?knhef{DVU(W_zweJCWCoO8quMzX*2N}Xw6v+fU7|YmFc*-h~S%+ld912KUCQ~UjWRX3p|BljRbyM2d<|Iq~ zXeP%AQ~+vcXNQ$eb!sQZq^{GBMzYpZp-RQkv!`BbI}{Fm&$Vs)8iKsK7vAoP zPhj~ICs&zC_^7hC3;hR&sA3mJt|qNtGKqhlYuoF3ilauNq(NX7G}=yfDc*dK;ZSv1 z4Gu+Pht%D~W=Ir;4NzILbcM$HT5Os_7+h{>H9ol6R2v)kyPKVh!S-n-0RWZtq zR3{dy9uT_{X2@NROB4v1a$T+ktGfW@;ew{EJ(}8Hax)c_RvL6FV}b5!_(n_G7^tES z;@7)mJmDK(=5P=+(g-~RUX<-7R}41J#1JZ##@W=sCzJ{_|5al$G43SDq}}mxFKA@; ziPnJf{YPSTl3L3#gs1r>R_SbV*{vu-vN)D7gakIw0^McB8dk7C%!alQi`AkREvt?H zhFgcaby_5GA8oYCUrM=h&5F*W+I7=OgkQnpF|svBn4HvT{N2;y4Bm(%eorAif@W-Y+Wul zl^O1{Nwzi>C%X1QMN|p056vLC@;cj=(edXDSEWr$>_>+L^P7)66GbwP*5O_w%xZ!V z;{4+l1mJ@o4jDS{EVntJ#d3cUNJ*LC1V_tKb0gnf|Lzr16W2-!s`cJrx#23ON~4!L%m`oQSe@}z#Vhl`LPq)2 z%k+p0?|r`t{zDuD7`4!Ea`Ohyc(fp2Xd{0N8dsN=;a|xOow1@om61dzE9W)f&RX?^ z#yKs4Ru3sL?mm+TP*GHA)VV_IVT&xx#0&mp$G2{GmM)Ozuz?*`08X+3VkTFc;ki_b z^Y?ojjv(UT10Te}6hB~~nL2GJYtT!@p_10oo-r9sRJv7pk`_H6xpj2lsP4Pp_=Z(} z6j{LTdbO;}k!5d}5jW1z6hC{#yDo`Y`tr&5{|S{OF4j2ei;vQpa+AGRG1Zn3g`T5N zrMz)Gm9FDGbfj*E4-uHHvwpojFy@5d5G`FWA=@)c!^q!4(fM4qbw_vK&wj?oF7BHr zCn??~U1_1-zLS1&vm_T@1z#1_-7ERt*MSV+L=VFS9}%tC{6XOTD9s4z9Wt@pNBr1_ z2p;Tl^5ZFa)0nln@QX@^lZ{U5?mP1z*_9+!0Kz@f*M(i1!Gg z@f8PDxR)1A1+5qun?N6@Wyu+)n|aBK|GbGqN<0LS_=CJXK+=?o9(B#qp^BBn5kxuN z8P1?GnZ(ryN%(bMWh9&+go8e81gKa5C43={@eSoMV4a1MpsZDbbkEu};Yzp-KuMu; zfRZh7)d3|AC6e!CB{xnp^5@I-!J_e8xG> zX_v9AA;GCh8p=^l0b^n*oe`z?NMRb$9uZcBU9=rm+)bUi zqTIEGK@Ok;4x~;>7;SYCX;|a^fE#G6k;C*NVTEM}mSvRP8vzU>MTmn1)LR5#z#iyB zAD9#26=Aqh%((qXut*23!Q3@MRpxyoMEu`#+{D#fOU_{>B`ReA-Cx*c%9Sk!J)(_= z@yStESpb@2TcM(4G?ZNlgj8bU`rIQIdKhLdW+m<7j$I5fy-Gv++h2N18pc;J0o-a@ zmLLe-S~4C&U;qYy13Hw+|I8IfXwYQFd=5mU1_dsJNCeL=0_L>XpsRHw;E@VW7S>^o zVQO9%z7XHI7-4^q#g%OeKIRK*+DU&LXG?62@a2Unl4fa6NMmwmUge|&GDL9j1O7;iT?gh{p`VZjtN-VA2t)Tzaij7%p< zXp)+ahX;Fz&qhg6tja*BCwh(4YoT&(9_JY2A#g(}i0*CbRWrl?*m z8R!&*Kk&mw&9>cG)7!+$qjg_iHy>xRVd1hE@5MS6LX2C z?agO=)@eq;97H~abx;Qfe#>c8pq5n-QS{s`FdhNip%}#iI!Fly@BxoBU-TVbXAp#b z%9CfT6N+vla#GGP<djqBNYQ5U>Mji_l?jBAKBC6Zso;2^=I|Vf<|1WP03V2?WzhzzETe;RR#lwp z7|m07j7coOQRcMF3$}_N=Gs!c6H)c!B8CQ!Yzul;s}^~uCDte>_LHhm=yU;$bWKKJ zG%QQhpMtzt|8IEVp*;u`5^TW^&H+i5eu|s83JwX?NO>{ofn7^NM1UWxgq7OcT$E)P zD#)04_>I}<=xgTa6JK6gI4(s_C<0=7?2V;kXMqKKKw*7+A9wr>*j4OY$W5}s zr}%7#z{tmUzM{0QQpX-G*6h^K%3nF~prmGv&hF2(JgH`skRUAMS_*_6qH7K|(BWl} zA(_X$p5ID9nncK8zk(A=DrS_O3CV_SUU96_t|PqAhi_%ZrOZ+~Q+C$F zGDcYRXf(oDdt7DWYSGDXTgnd3OOa{YT7}^)1(gP!xki9NNMn>VUs-MkYoH4MI2q8U zTO1i}|ILB!e({{OTuqu81L@+};{K|iTyE>lne06*V$>dGRISx2E7n5P))va=rW)UH zt@2jV2K?w8t{;jW6W zUePDWSt0eR3+C3<-f)sV&IP6fWaj$XuUW|VaPQ?|9EEuA0Do-Km@gJR>PgN8^Bqq) z0K)7h;$+lfiCmLtfoRk~SvHB*k5z}#4d8y2$Uy)BC44Xk>(#+h@JlddiS0+V$tPJU z>|HqU12>2QQ|;9D?|X!1S!}E;`b~2bpbux!sALo*A}&$TmM$t*Wi(E@MGkyy#~R_( z|H@d9U<#AXyh^!!7I`97Es#O8psf~bRGazeI`-D{6aeopp(R z#l%ejfg)t`nr#IR9b8#e2)u+Zu;z=1p@(lUawBgN6btX<_{JnhYn-^r2G_FkR?O)g znYd=4WYBDhj4~RRa^!&CPvitmz?|oxGOk^sK%tB@KeKJQ#iF=rdd}!``PQHpQay%UqFGlEdrg0UQuIO$e)3dR%s814~+G#8RuJcEyb2NkULZ4-h zHJ`>f;f!z#Pou<9#7Szv7At@79Mdsk(Db57kw~{hK3;DBv56{0@MUCi6yGBz85(~r z(N!?NV9Z1+X-+5<+m7$ zUS}2aFs@LI+>c7e-^9f&-!&%Nv>Kk;Lc{MD5hl(g$-DCFa%ksu+zbnM-n;RwXBFmR zZyn#@vH?GjD0zwkdQxVf9GW7aUD-$PorJ><1VM|o7KimRePy?jF}z9%|7G(`S^6x` zgdf#m&V-80YcwBS)AeoJ-STj4QaiGZO0{}mN<60`XR@?{U@+WG*H5626+1UHzZ!RO zF-oCs`<*J%P0n9<%qbgLs36u(;Ec-XtURHPzSZt{JDk_m^*+hff2W5kB?w?&#Q}qE z>)bMsUfZDX8&r-ifAcD|K+r`juV=+<8g2gs&Of zIg>tlE+UhGfI9~$QMXEuv|#Vc?~wMgc2X+p1!=1|=~hhrjCD>{x2fK($IR+w_hmMJ zmPBv%{>aE08XbJWTiFG<%3)L=SssR&9fHVsWrQMj+j!Y%Zkqy#|3ner$oZ~{~NBD#} z6QAdiXN6Z!I#<~T`T&cJ|9D6*`l4ou`GS{~5wmK|NYc1gaWqHbbSR-uP%780M`fo@ zOsH{YdZ;5U+%;i=6jw^`GhNVlhZs1q?P;45Go-9VjOmxBr^GGev##4+7{=f((yyhr zM*P-saWdRBV^fB{I?cw@y{S;H9KsZYGxpa;9OW7Wj1HkR)PC#_V(@>#vJc7qJ> zHD1NX#pAV`B*4J=p}IR(3K9WbIKT&c3B?ZoWRqyTH*y2~{|b}VMaObyp!>UB$4-Dd zh8KKZAuDscDL)0}ewR&I5j(aMTXPMs?^?x~CzM}MiYIn$@8S5zW2If1wdB0IpmxTG zTfn}4_C!KOXV7ZJpRCPj;mgH*rNFsyPQ$ySAyD z{I#((vzx?LhZZJUab$DpoZp51od$sG@Vl#>xg$v5J6x)h*5Yw1;46b;5 z+xMyzne#pgbN&f1%U$z7BqMeVM2uq88&pN@LxoU6aNvE z$grFs3l?e#{8!FG1wrKy*0LaIph=DdS6YlHa3ji;8%esXc+n)uT7qhkjM;Hu#fL+Q z7BzYlX;P(2nKpGQ@EIAYoeomGcrdC|GM%zMd-n`%*a}|Xy@P1=<;ZTj2L!o*|TZOeutOLS;qvyYV|8MuF!$UD2(of&*-56z1lg@1K?3=#AAT}3uS9`1tFMC5Vic*Cj{ zr;bd6k;*EqEX+E$hLbL#tgf@}9mN*Y|IkFitlKI>4Y%yju?W+wC@R465@@fbZbS3R zJoVhuFu7(Vh_#wPqAx%T%Hfg5nEvUfAC*XAOv!;tt7*KH+(XHNKms|`H9kH46x6lu zT#>&r`_k$<+{lazHZMs%jm+dY0)UO8V%VdrHk}FLmm1UB%TWG_@r_&qO|C+#U{#KRPA*dT{N@u8Rfys=9fc1 z8LQb(aVD8cXoq%A@h+|MX2`F<#~z$;ryzSQ(3t|1$w81rVohN#Bdbi~ZxzbO;u`@3 z!i#4GM<1R~ zal|Q5uo|CE8qlrLp38Yhg`-$;;6p*b5*yI-L0M2PfHV-%$S$JKz~U3}(Jd zU6y7K(~?LWKO(RI2+dq4{~OPN%5rx_UolI-p2rs6i4TD;5(ghJK!68WjWEV4i1HdX zImYxO5HZn7NLs_JlOQQFp~J{$G@>c4jU*=tRAI=QYZ*0ge62_ z5}QZ}nMI(FJq~o5nG{iSGMevc1PI`Z;v9hmGhUg6e16*@K}d$jv#pOW7s6YNglCo7 zF${Mc6Uj;TNXQDj3t&@0QliRt410hHNKtw=`d`a8{nYWP5a4a=Mn$Hj+ z2olO@t&~4I9I|Yr{~txfGLyNy(p1Pbz&vWDk#x$JC9OlM7#3w^etT0e=Orc&F%Upa z(hLWg2`d%V#eb8`;{neFl^W_2oK9IF2C66!fAr%Vd=P{mFaQX{#j=YJVH!sw6OOr@ z@>?lfBrY379*uPbNBUt6C1Mv&gC3}uaq^`L^HRjr@zOWlWQriwmmG(-4mIFe-~+u8 zhzPVoNYji!V|uq6xd>1oz9FEnXa%=}zLX&g+6bUL#sYKZpBO_88 zg?w&M*IEgEruD%u<`uIBqNr78I20V->43*XNLTfCK6r`sC7XOeS_4rFf51l{;9yje zBGA^0&g)d9IwDp31=h?Ow5)th*hxBa4t=!3fwcf-Vn?XdQ6?oP+ZxGhF4q!$CZtCH z?CTJJ`dja|idXsaO1{eXj2QmUUTy;9LvBMdO9s!h7A=u#S&NHj-S=7iVAe>}17CK< zDLj1atgg5ewC_qaxCR*sARe#>0$>0gn*;>4hyuakb{8W_BTs28491pp5FwFuRMm`n zU=!CS|0?e#Ncmt0)?dafz5pZNHwo!m`ObyD-pJm4`Oy!E+2R$VscnB{bz0~Ai@?q? z@QGhKMFI#{AiIbF0Uqqm+Y!KiAt_4N4p!Kq^i)S@WC&7W@~w+tZjBmKB$L@}o4h5_ ze8+?i4sF&D^768-7*z{nrnp)i!&bkNnzA^AB8dCB)yPJ^ZJQZg)&&Z8lNe~{Wv9qr zHb(2ZNI9&ec*0!E_=kBH@yFztC0#Ew8r6ppVA-f*S|sxP|kaEiXxnIR}C@*>;F^BNwqgPYCLO$?ylT>~(PcSzbKs@;5(FpW#06zHZzF}wXq;A#NY2lupIt5cDwRWa%Q^`SHFlqCq-`^$gOUBG|7enR zc+a~{XrhI=Tg;ZyM;=G+b0@q*k|~u(L?96Q$l#R0;X0E%St2G8yde{`FQR{bY*1g| z)CD3_;y2|+)n4y~75qiDCk5QJe92}o41DVk1(NY)UFj-76|dO4*oC?CRyMMWfM?4B z!7d^J&e4y|DE9$7GW_$HauoxPNwn7w)g}{PfxQOCP`-#qdJ5c-sDIw7B1{4uGGmQA z$mH}70*NADbcpRpWOY`D_eyI1JgY6t;)9;eNwVnt8U!U;!PoZ7wg~SZp2#W2sw|vo z10zsR_DbpmLp3~YCPJsD{72^A$2DTX8#=8%ph^ad5Sfr{`B;xCTFlmt|1FSUCnX>U z1UKX7*kz3FpcUj}E^6TcY)dH)u>N{T3`xo*j1XLeuP~--r@C%=s&6SW2q>tnY0Qug z0jHwM&?tT*Onz^fR<9vordU|x+6avb|E_gh;{p6&jJnVbttm0C0ywOY59=_Bz9!== z@3MAqFbc55DA5x~Yi)ecDE?v(&&LV*0ynr`VlA`5bU5gGf2n|u-OW+(#{4NJP>UWkr0EWi)WPbDyb(3+zk zPH>tqU>9uh61RhPWaJr>afKWQVF=Ld41+!z5gm)LnAYwn^l%J||IH68@Bojk8m}>x zEa3buq8op$TEKDoEMUSWkKI&kDduMAPB9)6=mtGV2P-cOzf2(`k`hyF=?L$La4W3x zaUmQF z6fH1*&hd3pu@{LhYXspWH6j9DA*A@jAN0W+A^-y@g+6d_App!EjIOsz#K3$A{DLxM zAR`daVS+5ODI@|3%@QuxBH8GVy$-IKkTNiu=q~SZnhcEqVo@U=APY;SD*2%nFyH~S zG6DdBD@TjrzH-1yDm+q;F5|LrSVPRh&27NYGCQ-z^3Yz?|72taX)j@LdQJl1UScC2 zU>hrdQJ6yz5K}8V4K&~GDVWhSho?M9f(_ecE^`w&*|KM1k+5K=em{5wutotWfGgT8uoK|H3$y@fqm_JR&1T2OwaT9 zvlBMlWn5LYRL3S=mlYXRCahS`D!o`;;=nmh~34Ftio`kU($0aKrPw7D`oUbu466 zXIQ0e-F8~wma-CtWFz3}l0*@vV#G8srE=qD?f+_29*Q#yVr&aHPyW_)gECc<5Fu<* znTqpm1Q%c7YVi0{BnE9J>fxLst|i;0w4KUjGM2sn^M`|cJ#sjbFwP@ML4E|xIB zHX#T%j0Xdd134&VGW?3p8QaTj=9Pot#egmEe(vB75HdYNLXPc%B_m)LsznDQ_+|Jw zK|%Qs2bhpg7-*QSl;bi^UWJSws$5>kEo}D@=_T;8r=w!w4zQ6Ysx&$GTe)8D$Sz_~7saENfsiwF?2_#h5PmN+lLXYTm@s0woCG2l6A$|0@dqp|E$OzN{o3`LJ?1DOv3paq(vJD6uA)xT8aFI=>dmBZ(3xx=~< zzl!RU?)il&Wl$~;qsh6WYg(KMI=%p`z0C7^8&sOynXc`?;0ayw_VJhf-8)?H>*p&6Wn zoZ#_?+Z(|{9B22JboM7(g|8pmndz zMwlW9TeX{9xfDAiy<>lu+sfM9$OZJo+tb#5_pk(>;c5ztH?sbllAc zolMoc#7~7QNMf$rCdN!67G8W%#89RA93*VP8+cB&^)$^{q>BmN(}&d1D|57r0W zWeg9hR;eQYdrNx)JPS8p?vB_|f;SzP^T+6vTm;i3`5q-=e*p*`A} z)k_>1f;S=@`2Qg<1mOt|b-kduFT-QPj9p%8f!U|YdkGxf*PY<$P?89=VK$=I=EP7= z{2}o-Yr?}vyfN!;1Sph?Nd&&&H-1E;dwSz`HG~=+o}qg}!cET~{?z5(_g!k+DR zn_WbzIiBWKxq3%c?E11Afq)Vi+o1RIDafO+U!ln}EF8Q+Eh~UJ38YE{J;`bw>LKuD zrJIvO_VTVu$d6*e;6uL@yZUwkK4Kvbdt!aL!(U98w~aZ}qaHw~y=$e_BoxPT$iNGp zU`=29e(Xsgy#88n0UfMH9NNm2HdrtQ9@LAO?E@5mjPz+xLSYbRi0mH3GjgqdYq9GiremI01N9y^#g!8E&DRZU^%lLL8nY zg*3g~?>Cr4pY(&@`cka5>9Qvdq8shW0=^w|@LhC6M6v4wcY>K8XngC?y!Xk`hlHQ| zrJZEleIelE&+@Aoyay$Km_Y;riz9gQXAHQMxoGDWDu7bmd5F<*I_>K&jXA#9=uu#jPI06wmPL$}!Lb+NUZ$0RUa-_?b zFk}9kIgzHrng3$y+{v@2&!0ep3LQ$csL`WHlPX=xw5ijlP@_tnO4a4Nch4?Lv zzyDZ)1jfR}kCR1PVb9VR9up*stNkh!ZPb z%($`R$B-jSPMWn-r-oYtyR=ZDoS;Dc`~~9IuM?RHXp5e8$>)K|x3}EJtZSimEk3sd z$ySUL=CjL43r`+Sytwh>$dfBy&b(>Xtfw=-tR<-WNehB->1!wG$z|a_D@JDx=ye4? zCHuzATR}Ho0=s#GhiDpe^ZWSo>)+46zyJS3S@zd_*r~Ud1-KMrK_}1jCX{&gjr36j zf_Oy0F6Ox;S6tKq(%M||X=YV~5Typ-iTjy|;)*P`=;DhL{YGL14oXzZZsl;MoBtxu zV6|3I;&q}ET-ABR$BhO`Bv*S6@PPpVD>Vk#P%6S`Wn@)espXbjcF9;}GtTGGjVx8i zmqV2emF7;kEm;9v)Qz;HAb>>R$A^y9=;fY#_UY%JfLa~joDt3WKu9Wq7@~-QcIxS;poS`{R03vJ=tyWi^CM5tt*X;Ukyh}LIBxFMksz1W zlIdepGIr{yzy>Squ*4pwDxsXlWYM5KwF(qmIrb$EAMJ@GmuI=QRw1vyN~LG9;D#&i zxa8_ok&vbW#q3Vvx!Th$w;=f@A30K2OOBU%_`tT@CVSYoY|32?y}XRPtY8$Y*{fW1m&+IT|EN+|(==q7Ol51?GJl-+VH^UO5Y z{HMoCp#@lBBOe7*wdOl4`czvkLEWur>rRxKX{_<=zL*iEK1QfIR*_~3-EeO1*F z{q37pZ_};RtQg)2Wqe%*UijshXRh(N>8iV^M2s&57Xe^Xm?XK1Yp(k0th1=7yEnzG zZ0JngB91s2r{4PRy!XzWjGY6es;U+Vsk^|k3zqD$zBlju^N(3hBmdkwoweHZ)DL`c z_NtK#{rBL9@6^G_3a{$aHb2KbQOt|){`>HoO?&L?KXh)7FC8B)@bUNW|Nn-0T~Wky zk@@HkT=fB<0vE_YrSLCt^>fJf{_()rb&L$aX`lu-$ibK-P&zW%**{E!K8`7Bepn-e z);h?-7KZO|E6m@{Xmk~~Wz01Ai^vBnP(c^^@Q1zIAx}j3HpD?Ng1}mn2USS57y|K$ zPy|;GLny+)7%+LFlHNp&XcQqv@rz(A78hk$#hIw;fZ{rr6P=U6FuL)LqVgiL%J`4s zH3*5}!XX^@$Vb9WQ95)aUMK#BMT6O}kBCfU&He}xn$YndRsR#t44Z?-q_j(sn9L;2 z@{|M{l!z%IDrZScVlnb<*n?#!lS4$r{N|9g z45lyv#YqS@GL%Q0o-&D3N#|T-jaA{?FsDh)XetU_F0n~32jt0a;;|}aBV`UDBh6}> z^PEiqCM&~9#o;uQDJO&`I_F7G{HSt{-xOEzZ^q=BpTnNbEE=AS%JipDETt>;S5BB_u9!egD*qhESWzH3il^g(s!MLE)T(|_ zMMix{1*ICUnUK$_VD)FIkeSPvosFyDnh8?F%GPRzbDKA%s7M1Q(6-vOnx3e#J$ATVN<0{CLG4V*eqCTYLk)wXwj@_RHV~ALeJ}h=o4- z!G~4^q872>2Rh_sx62Z=KdW`Hi05gyTHwMT6_9PwP-dQ4QKW|0oNhFgIN}=D)5USp zh;I0{3(AsCybtlBjG+RZv*gsqNJi&^@A3c)-~|Khr7V!O+C~fJ*C|D|4D6E3pm(!aeclpibk}X88%e85lc8df}O^C?}9scMN&VY{Ev>pN% zxP0W!Tf|@0JZs;+G75uheq|K{P3fq?%g(p^@lPV#Xh#ESocZacn=4J~P~mO8e3&v+ z(!3>Dj+%_6j;GO4>s!1A)+a5vYgYUE*#Gg^)PQ|!Tw77p zo*v?0$cpT0Ysq6eyIQb=rOioywJBFLwzbDi}=uyNT^BbqoCAK_MYV)Mrr};+t3tcTl{8 z2l0+G=7R#XDB`}&v5a@S0-RRL#y!q*kAnJRBrUngpUr5(oV?{YuZrdN%&Jwb+)E4B zt5*!4}nsgwQflHs{gj41bVh5!BMaL@Zw>iVD$uf6RWue#m`9~GMNH02socu3t& z_{K-(+gHXrm%=V~$6G#W)0O<>*&g`Idp@xa2fDuh{Q1(GlHHRw4Bt)9dR_NKK<>8t z=r6t$v$y{9%|uJ=WxvkQQ{?5hXMG_Nr};gh-uJ;zzEkvse9+%l`Os&xW{R(+=STng z#z7zTtB?KeBPRFW^S=0{JABOSo@OK;zxt_i{+VTk5@5d9`sdG-?FY*kPXI#1bVL9C zQ~h#C;u%XaHvU(DjOTBX1rb^#Fo%+0{8B3h*nj!;EelvKIbs{wAsf+>7!??Sz7}~R zLRI8KCMt6)=0Sjd(SaWLcK@rVg5rW5)nOYI5DtA~D~F*@EBJPdhC^rvER=GA<)9C- zP=oDpI#O{+tCxc~mwqYKgTdk^HzEk$r5hrVBaR^;IH-gH_ZyZ$B20)e8`w1>Q5))k zf^4KUsbX$g7;q#sKSF{T_V*$`NQRw3gb>&ioWz4`cy)`mbQr{T8sjDqGbS3CA@qhY z?YD;vR(C(~OC%MCFmi&jp&MoRGF0(}ir8g|s1p+BG@fx2lvsxks3}viiJ6FABIpx1 zaTU$yCoS<07f6V6VTe#ubo6Q2&tFC3E;1gJ^?TwknJ$jShBjJ5fdFvQSO~g*8Yf4iSdnXn9%4 zO`jxqqY{Qxcp#P7j+gh1$y8RgjJZ1yFrTs>3u=hih&}D z+wzN_=t>P4e7Sgyeo~QQD2fkZhOT#!IX8^>1d*PSjeQY?4{?W;Cz2j#idJ!C=U9fm zScpdGl2)gW=HhoD$uack7W@zhFj$W*iIdmIMH+b}V?&1FFob-ejzwvdQVA-8n1Lvf zfd*odQ|VwjsXAeJC^1MmTRC!*9si`Lm-E+`f?1gN(Updom|IepirJWaf_RP@nUXn~!-SZWd71q2j%|mT zoQWUeC_!>{kexZ2J5iPt!IN4^nxko&S|^jL`I@j9o3c5Zv{{?Bd7HSIo3+U_wy2xF zNfkK5n7=ukQ$aqd=_iCqoE?{xdy<^WNopr?FXeEPZzGz|nVW|KW@&ky+_@CdLMd3e zSKT?DK2a~txFqZtmE^gel>#C~LWo&uo$MK#?|Gifg<>E%pZX~gXc>bJAzvE?X8Spy zBOx!>p&j1P4=6z$3`w8^IyO0ypb&u)^68+jxs(~w5(@;G7V20nAs3XApB&da!r7sQ zWhN;p7b~1Gj>kzZmT973rJV^zovQ?+>q(=a`9i>FqYUPpg~OUW`dA#gI|3CD001HR z1O*BJ6#y&%01g1L11SRl2>$>p2pmYTpuvL(6DnNDu%W|;5F<*QNU@^Dix@L%+~`o8 zIF2AgiX2I@q{)*gQ>t9avZc$HFk{M`NwcO)aeQ(TxMj1a&!0ep3LQ$csL`WHlPX5;%w!;v#<-pskP=g%z%OT8=*Gw9Q( zQ>$LhI)5kvH+o#>+3MTFi~k=_zC1>qJ8|9uZ@#_z_wchLm+q8({QLOx>t_vB`r(}epOSA!st5ut<^hA85QBtGQSQ~rtc4u~bT z=;Dha;sxW3G}dV2jX3706h}kw_+~7_yMJi{WFWR_`WN?2C#B|)7;c;=gM#;H-7YYz0xGjiss=bj6F=_QnW z1}f;FfBre>p@_z5rJ7hSn&_jDX1S=JfJSQRrAbcOXr-8T>gkV~YD&guZ@8vj~n1*F1i>#YS&Do~oZ_UfyCyspX-ufG;+tZKn>nvtQ$HtVcsse&}? zv(#40*t8o7Ywfn&cI)l8;D#&ixN(I!Cb{UQo7gjC;HmDr@b+bHy!6&<@4fiutM9&u zYRm7x00(8|iUAjFa7|XS>+Zn}H@wn2ohTttntPickJ=UAg4uaK?H|v zGQcAb^bQ%Foa{2c{t5)K#4p!u^UXNttn`a@IesTJc;S?XR9A9*;QgKjfiE<71&a%= zY~`ej^psthJEu3`jU!}%+@1>(ck89sj#5s_Wo{X9R)5Vn=L~rrxb_r1(Cfj={x`E z%<}hiQ`g^Nn$K1X>fMy#)8UOiq+!h zlmDf!Z-4us@v=9+fgq4^227g>izcjEEd+vMqF~{qV;HJ9MT1{^9S3dlza6@6gxJ&G z)&G2_Hxmv}Aj3n5(hAb5iL6LK=aM1Qj58J9$fzhiG~Vk(_!9FqBys>e2>j$nMB1e= zg$;4xL&6lnc1dw-7wivVj*>+(LXL|qf#3R|$HB-g5Fv{rViFBfLL;UGf)NYa8=vNs z%qiz7{R_(f3^=#{bx@CCJD=X<*1O0R(s6u4qYw-7M}$nSA@f?~*a)MXMh*pmw2Ryz zU!phjolkmXd>snWCQ5-&@`wfrqZU7SN>zRjaRO0KP+*ym$bk-+D#>Knh8VcTA&DO10qBK{2vKn_6`uj8k9WrunsdjxeVQZKy^9c|+qtZl`C%+BGpSN_#3q zn1f^%r#^Yry3S;QW&<7kI0w0|f)jWxU8z_xvR9{UbDEwU;5AQ5M)@gJpW!S=p>`EG zP6gJkn0?9FICs5__LZk~^yLoAITAhAA_5sis0zuLL%11KgzNhU)<$NfrT;oLAz^}G zW{ruhi?lX`|I;NBWfxpDa+I_ofnPZyfLd_+GJAo1fCc0(lABr~-x33YjmYHO(AONA02>9V^aPyj#V7k&(m2**$o2yC< zT-Qx83L}7PDc}kSwt2esygn?ab~$3y|3v@;5$NYXbK-}SiUV>O*hK_nhv3_)*T1tX zh@$`}Rj;zxrj$ysLqhjCLEr)x{^$oTbQ#Ldg;u@5gq)Qz(pNxO0RID!^oThAcs(SG z!*5<%;6HXekbmI0x_z#cFeMz4-d=fWsnW7W`6`Gz3Bnt(=!ZVS0l1$I6U~jAC80$E z#&Qr8oSlr^F8Cmb2!xo)05<31Rux-Tt0|Xjo(OplO*K_D%D0{LP%Q!x2wa2%9mKWt zrq6t1!H(p5ONI*cbon+5EGhy7D8~bi_%;+vZqJhauOKFCHqN4Vhl;-TtZ5Ck$%b1Y zWBlI}o)EUKelECh^JrT~i6!q|WNr^&u0YXe-D=yJdPam2i5VqgTZ7I~2()Dp^(tj$ zopM&^Cb!N)_2Pd6SX1O}0~G}Mp80CYE|(9| z-gd{WYmh6YxKh@1FxJoqz^MirBGK|W(h@y7h!n4@pbh606Gn(iE2eB2FQ z?2Y*C$IT^WwwYc5g0}oi>7`wo@C+zFZZf#MhTY^P@=q2XHMKN;D0Xs1-UbMPMjN|=U%Z-1jdiwO6U2Q2)c>k$kMjz|!u$qFlEHa({Gp zD^?M5lVl#|C?mFQdzM2HunR?&ZSsS2?}uy(sRDXG=a!dG3jrI&=7gdFJ zg;Q8KYKCstlV_DBcwqHcA!J`o2ohf76a$5Z82=c9QB@A!pbzM-geijmM0~K$^EXG$^ZCHIt z7&j%@b&gVFgvfzkCOh?%d6j1;6V!|ygMTlI%&nSBSC0oYRXY<2s z6e*CQqlO>>IMJ3(bpm-;f?EH;2fHAKXa7Tbny6DDq;XoLhwcYaGF4z4*)eSrF+7xo zjWRr4xQdIiQ!z$8EIDJBs1aeqjS+x-B6%f(Pr`GACo_Wi`6wH-WfxidSBvrapCINlf+teDDthFo=W+jAwamLFsn(=$LeafI=s8 zi58bL7B~`y3$YLg0>n$LwwM~#l~@x2S@tK@cNZ)fV1?OmtI2cYrVvwM83b^U2%&q}$!1bVRE}tF*lc3MU(!oqg1 z7CcFYHBK>k)LESv0zS&cLmtWLFrSgdZ{Un==IytK%D$#Oo9Hu@&sD#nT zr#o|s3-OHw`eI#}r4}f87za&s;;1^BWPC+ifWiltI+FG&I`XN1X}1uc>NfzQh8an! z6VsMGrJdasU$E(Zm7;-D7H;Om&Up;5E74lwqVr*gR<6kT(~`U*P#b7ZtU8X(xt1iS^)&mtrZ$>E=F;$R!pLa z4uPN#dms+))vg;8LD#t}eN{aL)U1s9K7WQ$4?3#mh+vQEQkFWc{@{TX&=35Oi~S%F z0fl#Tx|p2mt1L)7{{Qp~TFqa!>Xo1@c$r1xnN zKQ^to=z)@^5Bks#{~!*xFoEmotWOAZzv?*zfTp$J6!4>WWaBnc^h6)|RxedXj%9&^ zYOgxPq1LLEbuzLDQGL^@vi{%^udoNt=}#b5vpYAOHQNvbunU0mrfO3=IiyE#L{q04 zh9|_Lqcm_2kuq09pJz$4cFL~OW&wbpwdirP%w?C-$gl2*pi;QCKeiL{#&cxTL(hk{ zg;TNgg?tB9LS`3&5&5j2YoCV)wI0R?exPJIgjTGRm`=O7u6u=FBZey`g*pREpJb)E zJ5i?eSx3uon*WP-4wNj;il87neQ3qJSA#am61LErseE`=tfRL`!%WgRVfb^DQo3|A z`bA@hyGVP1jXJ#KRjk3YQ=p?a{sOkvD{k})cpv+kL(6ar1;Av3tk)JgBFBVIN)f23 ze;xX(4NOS0mAH7@oIN2m?D{chHoN$nJQ(b@tHltVWx)ZwuQRKv9{RxfmyrmY!X_NS zLi2*s_lGd-Mnnl&^cqB)Cd5{Vnjed#BaF0~8pJUXqqHkD_PeZ}BYn9`Al%z=O(u%7xfnS$JMu{Jk>NS+XaYWOhUb%n-EI$vnJq z)%1)ZjLferEt=dji!6regIw3-vs>el@Y+}~MTlfam6@6?o4T=tRfsYObH#ehDq(KR ztTEMG%}NYTj)%wc2d`Wy$$&>f+GAb;M#xbtreiw6;Cov^Je;^JD+{5!7;{IwBF|zp zKv~4erQ;zf8nT>NJh1f8Wr>gg{m-vl&Cgf8u}ZVBJQJaO5E}_H=vZ&PY|3+oL@ru6 zdH>9uZWUugODr|^H#s~k?Cesc{60vhQfdskKaCR8OfnDB%^1UEH|NY<97cA`TQfSK z<9wgx8O!gC!&?p26pTbZ4LxEsTpA_3NF-F2R!J4*ImHiS7O)C^T*a!Hy8~<0r zNh=6}fDZdi#x<47JC$_QQx5&m4^VfpAl(yVBw^KjT)=!4L4!!VZ7^#hT>iq8!tyof z&<`jp({F7%Y!r{@_}WJMI2ETyedWA|O-m$XUms-OVZlaivM|}r-yD6uTNjMIY~04g zE^1k%orioyR3{87Z_o`B@3zYTI9dlD6ecczjkz(s1(Y$vl;IEv#ZAyx1w;D_b-yLZ zS_EQA^x`gIRA(b{O4xNM1kN91#ZparQ2AjL)J}t!L?QF ztCKPj;q&HFFe_RzK`J-SBKiFwZN$_X)3oR<;R`jGyj>H~`kB#o6N7yi^=q!G1bj5-tWRX7a5;e$= z7=7@yC+G!1=5P%2@xp=z%PS2ElxkP+O3l=f%!KgUU70jVS>F=DEqTr>DrS6u8`0sH ze1a8k@qFIHC|~Rm)1QI8zuHD$JI~Bd39vrl&|U|NN6K)0{uO~Qd@78|1#5W%5~X9p zMrRGv;Au7pS#(pGtzBCwA%aLW+wk+C;t^Y0LZHqQgpmMF(y7Y{)BiWM{ zU^Dbx0WHdbQ3qcXI=C?9hT`}TxXvf|cJAH1=agJ{VaxC3&7Vh~9xCXnp&zO??LM|~ zhY2-S=qS*_{(&NmhYci_vG4S&;oYRT&(%a(eszVKkWg%3Uyvu;E6 zJ_IpDsHltX!lf`1F+~+uOz|rDA|iu~XF7S2Ar~VO$U>FqBgiA$3bIY0e_CrzB%%z2 zY{=%;O0F;BPO3;kr=%+iyM=B^Ogk30cih*2 zESJ`TKx!?lkNj%a!~_4i?JtO&oUmS)L{=)_2(%+Wt#Tr8RAHNMR@tsjZ|1pYpLT|r zOoiYpRoMR&y4W>_RNXH?k0A`ytlsXNF8}An5adxx)D$WCPvFwqup-awG%tVv!k8~v zO;1?}lWmlbT?D}(C!psByKhN@7tb3quWpnb@bmv2r_<8B&#_H4>rG40@=h~7m_Sxma(=ESAd@k3K zX`9yqFV@D@YR7u3z&e8b(1(CPEQqBa`ER;$djGeiPV@dZK!v3SDco8J-Wt}rdl`pt ziON`7Vnv{Pv5HFWqKJMhVm@|NF#kBM@d6nV(I^Xiqkgf71wZ!$lL@& z8d~da!ehvc?k2#PED9sb;Za27qAK6;2ywV0jQawkq$f!PSKR4KEkgJR?N~q`;NU|b z?w}YJ+GKP!1Y@Swmc1~R@l2qBPu?nkI;-$dZ{#zMphWgPkCjL;N36`jbY&Tl8Agdy z(FlZ;^8h~x!XK}A0KoY04-eHuht|WP86~L{-BloUHd@;xHL0Ghd~Rr}X`@akb4QJw z%Ww60mAU*>zGvmJl8>oNc*ujt5BlXkf{4Ia{Ll&u;9^6|b7YLhXN(X?6KHvJA-+l1udaD0 zAc5&cm|ZZnQd-GmOIzwjGHTVUQgIV8 zNrRLbqV$Qbgb4Z;q#UV*k1UIe6;PA7PgqWLBSM8rt-2;90sx`{o7e~6M%g2>gCl9?%>!ihw@ zM&(XKQixg*l*o+*^&ye!Yj4o<0D&cNfs?%yW4#laXBZW@$E^t(WyIChWtAiz>|;v= z)jQdYi%56O4eTCTQJ`!EGpR+5bBj_U3mW$^+2xUJFH#YkD&!W3)ea!WLbIPl54mO% z?#Rp++57faCZoCG^-Pmrnu3#qhM6vY(&91LT$d&30@-%gSX`pG2r~q}q;pz{*6$Ds zT(y&ML--&N4BR3DyU@@^$8%5rN>YZbYjKQW(y3%F#7*|)2}SRfUqw*au(=1ornNFQU5%M6UNE;9v!)_L z1^9~-Z&onD_nk&a8rMk=$}xgmlChp;7jp&AXkBokgy!j=-y1 zfq_`~15#Hd&lh)HI+2Na-2Gg#QOHE#978v8`YF_`@`BKe?i4ybDmHA(2d-HyNt4Mz zN!2K-FaHY=;1#WSfH(S~uU#-3$LDt2Nd{_37(<+&>>ddZJ5^RnUNDa*v!=ZEI&Yma zL@{>m7=0!<*Zn+$5(NpOCdU`lB0IznekcbnFz|poyrKdGFw%2;ZSkI6#D(D{x=B+j zZWyImAz?kbPk?Qp-fXOQo~Cm(`&5e)KNSNr$CCuN`J8e^?p(&a_Q3`Vh;k61BAruo z(n&|6(CAw3_{4GlIF4~Pdwk%AW1LQf+>nnfZloAEY+@0DJU%7$w6p;YX^W|nYoCm_ z{~&-x1VI2lH2v-fL!dY>GV#xQRe&yL5cz%Iji!RLq3ZE44BITygj@-4=Waf zZ2!;d6x$@_h0pAW)M9Ldwhjh%fkZO0=eMPo`j?eL#uwWi`eo#JR|J~b)o;p&eTlcc zmZU+4z#L#Pix8gP$##X1jaIFh=&lCxjwtnT+C?A!Dqrc&2hctOUi&yTs|sO)sq332 z=_`sEaUlaqHv?3r{0cCMN|_4+KRFY***J@?slb9zzOq0p&P#~v_`le)x2UmzqwoT& z(=3`ZAG#71Dmjvs0fgky6L7MT7>SPwbHH}njx?&M9c(NCQk&Fk3U{j@rGW{xxQ`6< zvBE>F+KZA~Xq4qc5I~54-SdwUY!G8Gjbb|-IT<(?43$7Du-=$C8_XdEq><+XLjM*? z2+TkTzH38ti#lhKC68N(m?H@wnG>*N z13|zu;imf&INb>-VZuS{n?vt-!-0~+PRysF`?Z>avN(~6l8GX+kh1@v8QR;00D*=7 zJH)ZkkX^V9!jZCO`#lvrEf~ThqTsZGYOlIEiq)we?i0mQo5W=#x`A7?v^usuO5U$e@ao89KA(^0K4zy4WAegQ@q9G~)i)*}#nj4ig`-iJooiTC7 zE_nw_%tsm`#i|V&7jv?W^L;@TS59F=H=J{UARA&9Z)y_1+1vhc$8`X9?k7m!>+9m$=d`iHO@jep$1m)xC$zzn8q zNlhBU0t62Pkux*2%Brae$rC>$bO?hy2>+-6LaaS+2n2URM1i26Z>T+sWG`6}jQVp6 zU~$S90-Qj~k-Ne}(I7oFW5ck-pn7~v$Sfn&tGHjYNJboza*?wqYpsu2m75%iUCguC zy1fW+!gq3%gXo7nSR~tsD#DCQt1||xJ5fnL2XA004@-7=yiorvg*MYsN1HhiZ7zbU6JYz+AK@bfgl93{;e-MY{%aK4a zrRh)zWAFk>;1gzx$+@u&bclss3ZHl!CsqQb8+9@p)KN37A^-j4iD>gck$6XrFbEzi zijVZSuZR#i+r7;+Ai`jMo?&%Nvb2dur5M>ji6Q}v-Si1NeF%$D83${&9#NHxAXJtZ zj+BU)bgWg1xK-U6A24!O+SpP^TvP)zR?ZNL&)HOB^%)v#GKfjjZ&|`&q#7n%5@;1t z|2U3S)C=d75+5tMq)Z7oWeHM23M};-fV+sZ+gBP2lK;RGrJqvJb32H3-He2Q4|yFR z7ZIzb;8EaPi16b#3|xxb=nsRXoE3Fd{CLZ7O;{j{4!mI#lA#ScBN;xy8dD|FQ)))y z6V83~#NK36kd2$=T#RUQQ{=c)I| zn#CGGyUMT)G|RA!&786q>WC8!2MYSZizOe+NZR8%iK5BAJwk}o;>x*AnBjy9zi^#t z6qJ+$Ee+HzO{vbpyHl$CIG2T5u2o9cVO;;DNB{P$i#Tyry&a&|=m%Jn)r{3AqR5`Q z=v?4>!_X*QlE|J0oE&eROw&a!_UcGFTS8lLh%Eh;#(7*zWYF5>O`ZZ;YYmCD;NGBW zUg-#4a^Vw;ARvM82Y*AmHE!3~26lLfg571IZi zt-(z@qB_cQ*~u0rQ40tJTzD-GYl?tEp#RWKE(De&j?D=-9@t|IV$op}HJ%Bz8Djeb zP4Tmh4Aj$jYY;{3y$-UinBc={?VBK-4C%)_z$+H`FpANE}54dtPde}IY!p$;F1Gl#)xHpT1nHuVW#jk!XRgg zMw5${Kn`NoYV+HYb|SzG6hc!%-n2DNMO83t=|#Q@gq|FjE}U|~4s`HIJSj1j9$GIU zl^5xl2*FP;s*$Ww7Fxr``_>D2qYxa}~P?C>t z4UM1%Y<#kskNDJtS}SOkjQ{H@i-4;W+08#5R3gLhU(gsOn2ux2=9@t9hGgMv8Mc;V zEWjO-Zc%QCq#&zEI&Hfym5tNEzgA0oF(zL zpyhp6VKQm8X4_v8ga=5ZKzOwZ2$#kz%N_tgyFcOpg6r=WUFCG z0mKSzPVX0zzIvh4OZ=~|Fs2^5*d6Oj-H+S2D}EXxCf z7E-h+>JTzPxSuEVzyA!v38kXQO8MyOA10vD$cha&;(ECxhFQZMX7VPF z>udIDsGY8Lyu*mKUkkt-KoFAnqb1FyN)8G|Bg_>+z=b{#h$jUEK=6YIu(C8CKxB<% zH?f|j@FcIaa2TR7irWl?{?)K5>OUVK*N)VQf!4Rasaw@Q=oq|0+l695O?{s8peV1P z!9n}8U3m!vTR1FY3wAVzo(9ia>by!)N$QyZ%i(4z@J@9eHs{j*SXuWO+Gbm!8f}oC zWC|w7OWc*g@&ozF@}wl>AzxRmxW_K%APe9Zs^L0>HSlQ1$7%nXlgLDeZi=LcaQ~rq zeVWgw6DT){rvGjicecJvm`wPY_|yxgvtFj07<`#X*^t3HtW$ERVb9vrIiSAN9bB3q z{CM_mVwPzxs0)9IuNEf$8oHffJ^P-!&As@$iijH#*?K+ai6>>A*9n8{5!j2Wxf+iO zP(CS?dPbM`lBf50njuo>@M%w}nc0ubp*{7&@lW~enWy<~0VV&P7BqhC!-Rbo15l7F6DQi@SC2wciWJ24(PpVgGOGH1hz z5d~VU>2>8mGGak4)M_$nOpRnJP;Dx5AjhwA=fV`aQ6OE1LMu}3>JV?Zm0@{2H{Pt3Re ziNo;V0iW%mq1t|K_H@PAx2LNlIJRwQ?`KE13~LZ#SIQ+MmVopbNZoveWhBsA7Qu$! zfe}hLA%ztJm>^<;V299Uj&Y|SQ%v1Lz%AnNQNb=4sF&Yf`XMJ&T%G*(Qbk^PQHzTf zcBB@8523YEdjN(+5N>yUq$84qQADI~cnzdba7juzC6!gSRGnhAfR>OTm+_=!W&h!I z=Z`-lj%cDE7Hr9)Q&^^?QCJu;mlc0eMp@TCOPpbT=x~e$;oJ*&n3U%odu=PPW8-EB9gi%dQRZ51t zR%%3WZOmS|Y)B0*8ep^q4QyJusr{JkR&z!K=d=YClrhOAo6M<(3Hek`WdB{>lu$qX z_!FjOSosJsx*qBo*|i%BJaSEu`NK{-8d~5d$hi`#08T)$zwy!uQA(^{46b$2zbBuB ztzuY5Uvc+T6m9@`Z7wrj$}7rGCE(2n54s6{e%nS>vMC4qc0J`tMOQM^M*auqDB87bya!& z{rmraNJ__@3eYMjol{$z*4HMX;KW0<2n0F^Xfr&q=V!X{9cs9K}6X3cMq_>Uc9g4_FIRc>%2X5$RAn{r4PFFm@^k{u1;n&ghp%1`u z5p}pr%&l}I!U&;|i*{Mq3Kil#aqd%lav3J@kCp1v*c_- z1{yW(WQz)clWj~h5LPP0TYoy9_^NnGF8QPalH}LjEICF6eGy+2nkE^S#Fj4_b3hs- z*(j&8uugRHmF7Grxfs(W+3}Er3%nk0tRf>|$ueh`VaS3OxSFc*kA}-(m`KXmlY4Rr zG&pZhH7&_nBqrWH4qHg;jBo`RDI|A50n zQR?O&!KtWmj+aZ6m=uqm?BZcE0uoH6P+Wb~UsoV1u8AV1Nx~CU(gc!HjWV^Vnew3k zXLmmgVTp5~(FhP6snlpO$9Eoc=~{wAAemP1qawKu9?OB1_ALL?D}V|mPxCXbJ~r!8 z-z4Uv;OH9p1!b$oVdYYtD%imyNjnpis?Wlx#iQlT7|P5frfy|Q@rCo9oov@(*_zWS z`p!6}#HqCw8J7obYHQ#UPHx7c!jDAhqJ(`dY^S5vhQ$yyVMK_Q&K9_iv~(*3`-f27 z_?O*~#ImkUA!yxtMmZHXxti*uqFDMMQ|jnrz*CG`?6VxHado@hj4gP>`>1!`^e>u( z3@?%QpxVZ8b`7x=TWEXRk{pySiR>)r|DDPaGvIpZ1&cU%kcmk*xd19_n` zk+6&`t~&ZIbbO=Top#n+#*`6`BP`@0)902Cd7&s7l-*~r>^-z05<))g9ow3?tPh*a zicOM8p*EL;)*8_+x)cG-h{MXuIqz1EH_QXm=T5zH)m#=87UTUcZ*8$Lk@Ku)=j5wC zto5uNsTX4l_B5Mr;UkUMTazeu_G*0=7Xb*cFFr(I7bo(IaAlk{y-MgQ-;L6U2yh~~ zUEgrxV6GhW$(nNA~DKbh{5db0ERu&%*0-Fqb z3ur_wH^wtQ9gkK37k-OOb$)aZNy(e4&V`PB*vw&iYl;AXP)f2NfQx>hquHnd$hv8FrK&y?i7H?sI5REcRHLkqcdqfJGd()c?3*ORAJ+bozdi125@cZQ#2Y#@D1Q(s)5lY3VPAW6#?-fvfCB;+-#E%1 z_8^X0V4%#QM~=km(bugR#0$F;&9ctZpULcbN=^eyv1)cfW zRJ@z}b)W}=h1(4jZV6H>8zh( zIL-_>-{{@N-+ljt+-==tF*)MR83rMOpx zXq}2N4|$}AWb_xM2?7q9RksWq4 zhfx(y^IRSsZbmuqSFTWo|9zccOb!ht2=bMPJ`RlcRSHyiUzFIQK%S)gwTtX1;H{Yu zlzh`BIfqhl)(bKZ*F42$(4aV2pDxnlv2a>oPz6~mp+e%HIFdwg7{ur$#8fCBNvC0QWfRHD!P#Myj^#YLH;VKU{&TncfG6TV$0 zZW0z~avO^z(j*W_W;*0xa2FE-UPUU;a^2-?@<*K9Cez$aScPMvp~X~f6T>9ZHRfjh zM4omIhQg5n7w`cWkijHuS3(3QCJx4}#7`9cA9rp_6iL?8;Kfzm<)2hsQI;5Rl%>*{ zo5F>dp)uuv*;#gW=c8buOVp=<$^;Km4SD<~Rfq)_bVMVa)O(&p8{Up%W{R-woPemM ziLIHyB<7Xjg==PL?D^!b{bz`=)>;JFf@VzG92Sr$+C;=?jY3T#wUGQ|8-!MXFo*?D z+E7~%kCpXhk$nFaYL3lC@W^jLk#pv#cSH$#I+`)s4_~N~r)d&7Ra}MQsL7NHjTQ=) zP1S|sro&w6d9si&5WsZV$AmhKCKV=0McaV5tIaO~`DGR~0C*`iYnDDs5R0NY!b%lElS4 zL#t5Y7>fUz*kzNM0?atdBePzPlj`5AJq0w8)T+i^Dw1gRNW`GNplhCz#0ko>a0G1B zPQte9qd-!K%wiQO&@xi&qG5zsG+QD(Ln07BEAT_J*<8L*=b4J?SB2`M2t+k4X<606 z6957CtO$iNA>_c1tP(g^5gpBzyzB*jl!Y2U|TWO-fzq^KVvngV*ldnt zxGqb=?)$J;d)e)mF5po5qv{05Guoj-v^Y3 zJ%(;KbT$RRxV2<;qO#Uw5INrmFa4z6l-m7lw}g_K5&>mMtICJ_FVQ@V&am38V1fIH{}FC{zH8J_G`o{O=5lYn|zhMf9tr zPNxjLB(AbWniT{N3-PA#OcDRY2JxEJmFeTMA|pyM#PaAcRgFgM0KG;9&fG6bWU&j? zFoh*3cyvcj@bA((ni)IT&JEid=kSxT$8j|3cZ5uo(((6t)ICM&LVDQ>#_ocyAJ+^F zJ~=FH1Yy=%aT{atb@j1@$S2s=8|6v{B{fqxe&>RHlW}fx8H1}(a%qHW&|3Ik5Rx$Sh(Dvj8yErZ2+!>%`VoG9ZO|;=_|7o)D43$>0bg%M6 zKX}8sSgS;1AVU;$eqP{KK!|R@MKNn-N&8RAdU zJ@8;Rhh#aYF6UThH7~y98HI}9F2ECE3v)q zE5)YwUV)o$Ob}ih$ZAG78^7mhFmEaY?SHQpgN=qMQp&$<6}1q!Ic?AMuyR=|xTC^o zm2e@tD1sL}!3mP{7eX(Emsn+Ri-yxUnYv~i|Atv~jDwJc{@DM}D3kach3`mdVKT>b zuEvRJ%Q1RA0U11j?C|*DjD_*$4p8LUj<ShV-m-B^E12&cDyHKwhblbvH=m0J+Z{~zUACGtb=t(v6E!)52V$BN z_Crk1yaM5^i*}P^Bpc<6b#s+jcOgVC`fF*2(L@h|+iRbDpkv2o zL-0#Eup3X1x?u2 zLi-L$`$OOn$EhmyEccfeJDMhtLzGRf`wamBffEPcQF7Yc1cL8fw6zDhr!N}Bl3cmJ z1onoE6QyX}CDgy@QKuWr-DE_{DLTKGi&UaS5qra>6A&*M>A7Wlxw?2|^$Q0w7(yun$L|B&w*x+HPZO(HReC5twhCYE-<`LC_d+w|v-myT2<0+8iD~ zm^%Uye00BijAjqiX%VcDs-Mx%(bx+FS$tt`;tD-!#%oyy?O_>tDR`Il$elJrsLwPq zA&4&-*XxkLmW0dn3z;+ot(d86VZ6Zu>)cpCELi^`LY&~bqP^U7&dgaf@+w->zeN6q zWZsj+eMy__vAnF)*^!_I(--xa!~WkVzCTk^2er;wEqP0Jl0tWUcODH^nKFb6_mOQo z0r4IZ^}{+XE<*}-(g&}DVfQx{wd*Iichl+&NiJ;cGgoCl?vo2>gv!987w?P2O0E@; zyJY28DzIpTF7D+pxbs+5heLt>#RPOvTJT9nw0+=Y9fqCLyE78Owy}I(@v_Ox@WrEP{}2kY*Ikq z#8VKratwp0%Bpg6k*vtn%TY)SoeWb@rW;+Di?-pbAon#y?{+%dh(8t8OQYXkv8CNhz(=QllUg54@=ue5h04ki)aa z3lY84)KgWWgwx%OIWjdPU-i?(hE(g*yFo96ZBL7ML@dA?jl`8uoT_VTrq0x>puV40 zTJzLrp^er_@es08APc~HFiQv@QtO~My_^qBY00HjB$l9~>b#Xo3YNTZ5mL!k{e(3R zw#FVKaXkQQiWlBtJNoa+`wahDw6a2HLELmR}DtXQ?GsIWWjrmo{Ut>3-Y~nisE~qhuPr_&=8QJ|<=%I;5%BpIy z#mzy520nUNQPbM8=&1u7aUmqR%Vc>o>3$ttS}y` zxUX&c$5v*8q+0L2`KDGZr5EZH!OyscQDeRl*ER2WS@O%Wu_**syVK^?vfhw@%1LdA z*5wuE0Vg{6^0g5Pgs!@lPE@1(coGfo$`g-Wc5)f~J4-AjyiV@Il{F*KS9!H|JbZ%ZdM5G~u%gl)0zTjVj10eWK&-E5bE`U0J+=FW-Dq2|F6N78AM( z`}npTPX71LuOB2sX~KGVn?^dcM9JybyzB{3A<_#H_0UF{V6jDe7U)M^vZ6cH4M`!) z%AEZ$n86H1DM7ngn)~R-JXz^QgC*2a&_?F9pBU*&2z*L8cJY+59q)4l3SCJ;xVEos zL=d>Z*zG20kjhywNhUPn5nm&ou23l$WROmm@X`F%NaqW2P9XHlblC^C8!vPJZX6-WJG3pWA08yG@ zo0RbUI24@WaZ%mT3>G!U$%gQO01-IQ9`=BN2n-~kD=Cje#uudO7_@qQTHGwzmXL-D zsT@FX20Q<{7aWQjM3qI%=u9sN zmOPdDye@q;DSU{)EijPTKnMaB7@%op1ZAX=^u()^La1ZYQX1kh?<@V#M<9HN5UgfQ zj*t3jVR4(=`W+UR&08Pfc9*QZIWDsELQ7&nVp)Iep$`^_1vtdub z1VSI)I4~}{xwa7Rti)BeGQ?aGlJw9PSBBvWS4x?>FD8;BR8!3=ZEO}CdxWLC;KMF3 z7S`$R1xfX!(3rAYB%1l8B7*hnKX4Ha4aXunA673of#ecDUzyR3c9@+-`eLlac25VV@Ln* zn%B~@vE~|r&Iy61PGT--Q?I);Vm_6705n;}BHxk0+ zEva-x8CAmz8C=`K*u~U`D_7Z#Gu>Soo+qWQIc2*}SVDvNh;$4l2v_d8VW?nfCr56KP*Ja*VO@0mv}S6dI0j0Br=^E|^C7BEIt_6RPl%ZAes zmz%4>-B_PPiVRP%02#a>c0YyFR%kgh(ElELJaPpul|G8^obsQT^ zdYyEnmzr5^X`?8DQxv<(KJu{-&BG=|g(n!FS9wa(U_a3q(L3?G6wVaQ;|yTZS?vLCJIH--~=>g#HlRA>&y@RJP>%w?oRLx!~k$9Zt4F5XYv1jul*PTgc#(B z@Gn|`MpEj=SJtennkX->D2JBJ8=~WA%7Fp^zzXC+0XOe0lB59%2m3mZlDG>!#t$VJ zrC_S*dQ9Wh?rtOG<>NlE3x6jB8^ZUv3~ZR9I!+J^btCa`XHdcgguv@Cf-3+|Wd_s3 zFKT1~<76|^B@2VXD*OQ#0s*}Sf)oHi1+t(B`Aac4O$nC}0&mTL=BrTDP6}<}f>7}I zV8t)6%?mj(cEHf^j!wT|OesDtzVyN{FlHgjP9*htdLBduT@iV#$4Y>@_{NvA*v0eYN%RSh{^Y)_ama!AL z(P4tGVzlpaz|JTdt|gMBAn?K}Mi9WXf;b2xt5Qu&W>B2uN!U8fZ z5Xj2{vH<}0p&Cqp42%&Ek-~b~Wi6giS+-GBATfwUOkudq5rd8rPs8)Nu_L496RiZe z{%jPR=Uk40x2UcH7Hbr9;%EqB9Z3=y*+Z39NM7vGv9jeI-Ef3J0>gX|9m+ui001BU zVH!qY4iZu(7ox4KQK7s|oWk)5NhTxzD({d)(Y&rFII`J9tIs_0D;)+TJ*D8*PZZ5E z+Y|!T{4ZbnaWIbVo_GkLVA3odk2!W}S%|35?C~f%MIZn4p~c9f4|#*!`hgJdLHd|M z1PXy36cUVxM4I?)DvA@1YXTSMjBXJr%8n}h^1PG=rDE|Rd@&Yq!5;vi0*LY-h(QD( zfiY{7CKO1!95Sf9@|&uuGMx}7G-=apN(&{^GP}+hL9;xW>cR|uR4i14!W0`K zG=R>&RuF*d#UNsHBWCmNz;8n&f+m-RGW6x;o({~e!Y`fdbRwgcdWawJ;1mFW9_|4r zU_k^h!5*Y<@KnRC+z&bIDm+P}F(A%Y^sgb6MRfo6Cp;Btnf}c@U37wE1I)al;2NSV zhYvN|6LXaCmaZm9fP?;ClZ5=C)`UYPjg3_Ht3%;pVE~jRbQA5UGRnRp7JLxO=przM z0>jWjDCxl-h!P=EVFWY*LuG8|){d?ou)@BHpezhA^lu7%u9{XtdYX_cW#mQibPQ#~ z_e3+~&ayGlGG(?*nbJco8KSE|Wha9aA^1Tbh;%%74~qH|bnHYCpy9*IITe2EJ|ak}1-U_ka2`Exj3qt5a! z?@D6Zs&HJ%l_`Fd$sRRwYIJGfN-F<^k{jNk8XzGb450-Szz{T{6Q*H8OC`dLR!jIy zI{}PMb9G=Hqh-mJ)75Li#?dXpmLxO-E7K$I?4}ZV#y;V<52wQHRF7#2P9WwCZ+n9feL)3MAO+?CAILxk zHlYs42$~vKOKi(ly>~V3N&o*g@mTJf)6in4yEo(C{>^XY{3Xj zKpyDf15$w$Obte^x1%Vt11kzD`7oSZZd7Inb0IP#{?md)IDGeShV{59jk#RWpP{X9K=3G2PkoKgK_42p^ zE;=hF718Pe1R^l)fd(kS7&f354nYJe-~$xE3|g7|zUEA~>v8K;h|bqfZn@crC5-wl zR#qpP|Dt3z^3WJ|o(Nfk)VY`)riO`geG|eTyy1pDt^E=MMidq972*91ocqZ_H9@JniHUJ$ip#!u)RpyMKb5|)- zOL74wgfa1#w^KUt22&A(p;p3YzV{g~gP>UHqA|K$`dBw?16#zb;$W8rn_1x$0|&(g zLb&y-s1-bDDo6hVRo=D(W4A267KWyG7pK*RM_a>MEC2)c-~mGI0YG^spl^o+fdZf* z4X6PfHsG;=1)%-Nt0DJRZDD6IkD$7&$ptr7%t6dtppV3<7B4?wM{Y3XPL zVFO}8A6Nk$ir0H(HJxCXJh%6&!*43NvM11EHQ;ylVbfYo1suVZ|tZE?hvw0|UZ}!}K8>KKua? zNGzlR9U9diMrKALJTM;a*3kP_FX+_pMaJJ7Xx`Q&_-rBaSo<70VR9>Dc^q0~yIPJE zO4L&jyugkgygU~4r4JS2Kx)X*`fwCgastD^br>Xy#cG6%e;UfsvG}#yLLe}}7BIjM z^dS~}n#?~3%wo;Su;mFVZ2IF&0@;RbBXZDT zn_~YcJ)5EA#Z=|%W~I@6m;Lg`ub;q>^S50QZx;h>G@zR$Pee_l*{fC%R%zb@L=Xso)xUuN zT0tMsVcvyvwx={4V&M{=AS|vuV_fI6bCtC3uEh&}0*^zNd$)il0(Q=b!Wup**GuCh z{^|?8-d@aa)Vh>#CI-D^2c7Nh;A8bDwQ!PaP0pz zgc4#N5O9G2{PF?(fZq4Fi6%x6{1Wa5Uv7q8%1ZR;m(ADfeCc-viVseEl?5_p5Jdf1 zg|EKz5oEs;LIrPQ4{gmf-%0F|&;8^>_p*K74Eb7uy5xjq`uG9|<35*8%uozoeA@gq zc0vi<8Rkozi;&5qq{34Tzr(;QsG~DY%{$4c_O2)i>1TqVc*+ zWR^Yt#yvm3fsmv6s_dr1Dq7^^Tzd8mn7X+0`BR$Nu5eDMIX7k^Ka);A0D=se3IzZC zBZx4e!U_frE_{ga*|UW2Iz8(|=Hf(z4H+JU2r{I|kt8V!BqIz%EpdDF_1phNK%B>q zBL!KgWg(V6eYh-CDA&-T!;%UsRuH(6=*)vBGd>LZ@7c+B1%ndAh!iTqiUJ8fvr4f7 z)~;9I1?773s@SGvsFsB4@ZHHyO6%Iai#M;{y?p!n{R=p-;K79N))Iu!WN zu-0J?k$qUW`;FW=8t(Fu}_l4XP#ycOhAjn-$i!%k|wwx2=a3 z{(W(3gut3Q zJ9RSa8R2ZUykU1nkI{Yo-~9Xg{|{h*0uD$Ze>f>b*=mg`bl_qZP}cukITlz~)q1^E zXV5$Q*%sG^A>~&gLFIW^fokO-me6YtX~-dk@(o0rZ-Alpl!XQ2m7jMN@bLhTG3BKg zPTuGfi)4u1N7O$?Wp`IYXgO7*Ls)sIRd@rPw-$`gz^9OCWx?lSL!wD^RF+=ar`1?( z-NH+JiY$5*Vw6@vHCvQxkyvPXQGNB1Tsk_| z9BT@?=oOS4g_NjLHkRaGjF`?D(x3|QK>#0o1^V1h{a99uRx*YZ6+uka6_l0l8EOz# z8^tyjq`r+M7MBI>N0+fHRv=Nc-o4k6aGrLi=9;8li*2^rZp;5IUUe>HQ=XI(bZ4M8 zQ5I-9k`me)uR?7Ws;JEFDzB|%Fx8Q6j7Em;rK47KrbwaL*IQk?jr3`^Bgw>SOwstARlZ(`pLv_52Q5uXX51o+@s31SCp4jqFjN@ns&LIV-;8t4 zgndgXiV&rHQ%?Fk_{0|Qw3c}ja#@?NGr4H8bx-9+_BokpZ-$Dd`E^@=ld z7PiiGK8RB|`takAgiC=1(nejsm0_@CgT}E{<%w5Yc*Xj5vV2LMwX#C)S=6kR6v+)U zni8=Uu-TGNPI=|m-U*Rpm_sDtgNzY*lR%BB_H?h>VHp1(hhzIy_CuYEd)L-6C#=-W z9>Q&Tf2!3INIz?rcw}sG+Zws3Ckxa#RD08_cUoX^rJl(~5nu82_8yL&X1N}f){7S5 zbrxIJ=1zY3=ARGP=8i3>80Vhz?$be(Ji$Itv~>k{b^1Q!-mQ6cm>07Zu3E5 z@!Z9|rtE8hG_0WwE2lmN`K$s7ddN-A6$o$mgCPC-$pMAcqAJaeSp$*C3`LS5|0PL* zMA@H+q&PvTEm3gm;%=n z^uR?!62V@3k5p@7%DT+`fX~R@t-BLLi;pt^yqa0u>EmzAX1=K`>@SQH1<{RDsQ+vxZo{~1m z!KgS7UNABXM^0EWB~1i{XR#Ms;>98-H7oxiTjFC=EV57}p#`7eY^h6M3X*ZArkn`r zp=HbknSYckow34*F;>JRi!rTRu?Z+eQ2I&$*^Ychwcw~g(m+A*V{QH*-FXyx%9T3Q zN`q@293zF1zZK*n)Du$-^_Zb&hHy)$0}uD2$JJAM^&~8Xsa@~-&6|)jB$%_L02|V$ zbXmp=<2x#(sxnlP5D-AU{9s))v@Qb4GIve=PbF(H)mK%9JQbBIUAOvK4!$uXBkRgx z8IqKYQY9_T#0o-G>r9XG?}TLOBli{>(TCCpue|MTOFKC(k%UXHA(~7r+$q4gL91Q3 zJD@g+GFe(-DU?sOrX6wXTZLf`m#+W%2pdIo%D8UxA7kN7m8MkM5Q)HQkii(q<&{Gp zo74aoXpnv}5qEWYr|mH-tK2J|RYEJ;W9G}Wq7ISl`DTW6ul>g7Zh7&H%;OuO}nddoy64S>EPm8-Rv_=o9N(^umwFA_atuO@&%-_twh&fwG}FUAOHc# zK!67*!rSmJxyfgzC~ixVJPC|-*Vz9^XRxsNBM9#mU85M0s5o4jhC>fzOVdZhB~HiIG~`KDc%O#w zLE5)?hd2lj0hR-O?m)K@U>|8qygR0xhk^NDl}@34ZyKj^N2N`tSR299J(hLVn4}pa z2ueUspS0TO3!Q}GTKkGTVO?%QNIPde)+f&l7q(bNYHX5N1ZLTsBkDZDBMa#AATSY! zycduAYllt2t|P8Uc*c=%wJT;`E6mYwI$m?i-N)w6ePJTYy0V=MGHMbn6jw{HCOQ-AsX+2O~^wg`IQ~cq90%(af#!89+gr21ryTdaSs47{}62-CoTSG zghFN^hcQ7arzb4uHtbd-Q5YCDS9l2*FgD?BZZ>LQQ-Ofdg*J47w4``j0Z~v`6(1;F z5;Zqg!E~GjXD`D=$g_gEc0vUgL#y{uX4e$$1rpyAFgtd27*YW+K>!#K0T1ADyHE?! zrhjL5gpX)Bw-C|m@Iej$lOArKv? zSbJ!SPhtOgY1mEf)`ngs7ENOts|9*x!YtF{6eopoGgx0Aa~2!1V-&Qt~fB}2p4G*w%kmF_DXp+FOMH5LUk7N)`=yJX{jx9oN!#9rsb{FY4b zv1__gEn3Nof+U%J!8b$UjJWU(eeeKh*$O~t0n$NHG8C7$c_Yn+U;p?Ubr~58k}xXg zlERZ|ewm3onM5+u5jS}>Gx0$Oc9@I#Pp61-LARLvF_OHgMce3Doq2h&<|RNjXdWYn zL!lkNvQcyNYN5$JTghy^;Y2y7l!(bO7x@4~q7Q-a0DB;t761sq0*kmQpa2qv;Hdwb zJM%*&k|&=coa9(Q9oST)2%X6pCOUV~GK~FjYH|e-ZE?=LDcfIiO2Q z7;|Y7Z^;*ZcX&3Ua=b?{#aS2bX3Anrt|uK0`XG>aSQpEmx&b|ZAz^ZyHB+$P^obf*~%c> zDu(E|fapSUPyu-ya(wM1h|Y;)f2pn(rjE7KVwh^+qe6f+Q6f3oY5n*0PHBq&(dlmnp2?MSbMjCMA zPU3PRoAi39Xd#sGw66G*uPCuzS15Agu0&?DwUTR?Ql~wZa8M$iNVFCMduHm1Kgh@% z_4TqdB0F}mhk&rMIk|yT>$t#CqQ#dv8B1K^2ua5JG&dQpU)wW2F_>jEs>->l;9|LD z+I$d}VX9{>jj5(05w|&Wph^{%dP+4bT6~K3K_YVy24@}jqFY|4CHrQYh-WZ|t9r+F zXbzg37#F#Z>%65Rt%A`fcw)Ky!XKI&vK@Gn5Q4U4`?k)x9;Ap+Xx1OH#JV8rp*F)6 zZELvqq8$Xcl#hA09JrzcIY*zzC2)40$f#BpD}lv}nt{rxQo{ch&uhRlYpI2St>;83 z%eRgYLXM;wC_8a3>lV7jNto326Mlt-rVFiiL9@gsP_64p30yNHG9UoMlu5^h8$+iq zTrqRIyKt6l_4}vHGK~H^zPO>Yv6Dvw9E##~z(cGpePX%5=NIpIQ}v>`$3?-7kyEEh zt}5^Q3;+G!mx`jK!Q-oKm_#1R4W2?Fuw(B)Y+oJq7!@bC68--d{84+sLcxtOy z>YoOx5fn8$BY=@GnGN+PlmFL?i}2#8oxqaAj9Y|-0}c-+Ui zjGbIKxx{72gNL>{S%70p$TsH@Q#C}Ffs>zNF$Mt{-FqK`oP5ZNxteTpoSYv=+f}h~ zxS^~mrp#+?S&(f!Z0?20r}s=y>WcZ8A$+74#QK=YH;f3Y%l;Q#K>1??cuUGl5`Y;J zBU`R$%cTio75zXlCo;BD%v^;T%+{9}+?rT{VZ6EAZ#itR5f!xslPG!HUN-BmLt7?& znxpqhP2EbNPL)=8AwK)Xd9Yf~llrQ?Eq-Sg;CfsT5>Ez!NtmP%$h^S`jZh3-+qe_Hedo~r5xc0nB_@1?X1!(Igqd1l z%X6C5cItF33b0@8o&S&)Ft}TCRkU$>+7dLc+N|1r6wj`0-f|KsS;~Ovo!%TG)C-~8 z2wl`m@yx%?m$>t}B+MpO=c>tlfC>!}SmFN`x&kIDx-qy)7I{l3yo-Y2A)k*!O-~Is zJD8Z^z0#^ZT0MN;8{Q_ucZtZk!6D8efc+5tz&4v?DGkA#m@z^6l;8R-826lxCuoGB z_T8%!GUdtOUPEi%ta0w;y9eIY_p(}FL03pIyeR!BcLZxaniol4*0nj&91e-!7}IHL z&8{O@!i?9O9BBgs=PNf&t?oaW&rN&rpRJ?X5k? zY|sJC8cb$RPKG32N-kl`;&I*=a_#?fS?*gFg5&=SmR)h4z-R)zmms z>Qb#VN}EKuK`csLm4yYK74lV`JclFJM8kGEND=Ew!BwHli)S{#PffUW6&p{Y;mw|k zlb$nU%k9W04rxg7Cy*jf z%5Rmo35Vweq4MR7_>PnM1fc^=zyvH6k)Mb9;3yXTi(3I-u(1F7YI@%m8shn=rFzxZ z`amwL;i#u>NwE(Q70UlrupnsQKZFP?EM(Zw;X{ZKB~GO1uoe(X)S36<+mJ@FRW zE*Vdvipsl*cEYWxvU+UjE$EEn@xhO%60SQO8B}sfCYxN6Dc76?Od;N=^k_22+_TKT zff~xsf`YnSs6WgAWJti=q=ZW`Fbfh)O*rdvuq2w2oRZ3kaN14J-@?+V!+%2L?WU|s z>?9!vd*iSvovgzRH#6fRQn+I(ajB>8!s2n$5r5>7A(TcWayr}e#4b)$Q&n|Ui6Tr7 zPrqO_60rZo7J9QPGVOb)oPrWOQ@=(5mXt zzp%pF z6?kBR3zna?i-88vci{S`I{PFo8s}7)*&H!r1GN1UZ(Ukxg5R?(>4G z`aS=lflitxw#tHD@6OwroX zuo9%f;^9tA2$YexeuWsy1a2;dDH#dvvK_QsrG*aho2d$Aq@w&JhndOY`TkUt^Z}=R zP+`~ql5@$q(TU}_$Re-G2|_^NuAABN2R2F&V^g#qIqIe63KMQKw(bUA5D2m(*%ij5DZY8>bS8z&T47! zxft@+0!O)FMp$t}+OXe>5A**StIz918b7rE=DLSz=3rrd0N=Zz3#*;i`43K|v_Yi|nOkup6WkN9j z2);3KsUo}*s63B3%!3J&YzDOo3{_V_kW7t`SKhqAaMd8jEht68KaFWE#gEJ2c50J}#zD?*flNYo-jZFR zrVR5JGQWezgaH$$V(|)vM)FPcWrjsRA)iwk$GEa^w2`b4;!3xPmcJGAl5JsU1y3{^ zwB*DmQ$ro7*asDsVal!hJRc|5IaR<0_Amyzof#2ikf0{yOJQ>=R1-29Tn19G{mNJ# zGpW!QS$49Z!U-Ow>M25kvMQ6B)A&@E(IauRInvZ-lyLT0ksz_H2Mr}mb0Sy&s6gqi zGP;yI`4rX6x~EQ?C2k*^Cy|XzP&5|Xn5__FFVcqU~y0a@)?rhKfVk;p9#WC9q=4I>I@W>?Y}2`Pl_)H4Uft&YMlQ)K`k$LT>?o zsYd2)hriloFx|fRkmN$xfys?98zq>gTKt1oMjdKYyGy|a`=zqfbx3ERi&@9=k-^4< zs!ZZ4Aw4BzwRlpIcMw-7_(4v-Q&CBMQ!-bJCNsZEq6%E`>NDNKkDEezDL*~y+eyJT z#8XCGGPDc z*^gwAPfgq?jjdzjtxm3eH!cp3=PNrPw@R|FniouuT+Sp%GI7}A4z?N#7KeI;dTZvi z$+T>dFwt{NU`m@~HjJkXAND(y(adKQs%C>GM!A%2Dv1}uN-%@)j3D+fr!&W|#zM!2 z-MO)=6PL^(wl6C^E;JRVGS`p>nR&K84Np2%l&q|eMd$2nZM?Z5CMv`x(IFf4>fC8` zn@7|E%?~TNW#R;-aKoAT=V4X29DLVW&Dp)Fe+(;Prjl6JBBI!T!rM-CgR{jHNt$g_ z!XhUlTRKX%@j}+iJ}WDl+S3CIw6Qa-LnTU60Ax+5*1PJe=(gPd2u*j(3AFCUv|HD^ z+wfu(a_X3*ZPduCF0VcO>P4F}ZL%hFdBI(gc&8EGmO5^5c>)lW+W5jF9fVcUGGoBhB@*XeutAys9d!lB9nTj#^kxARvb%0^ zv(RwmL{`Y^rP4H=42SKTTzgC*Bq5lP4xcEuG}=7_+zY#7C72tVF`GO%b1p2?wKm$2 z+;g>B6F&s<6<_fO3vdP?n6=7kGm4OmGlDb4@{a-xx3FWhb3>2PGCGR*HHS;T-ikl( z=&|;j4UB7=pRt_zBaod?4NRgJC(;?v+Z25flG$pDP_a7ccrB+pKq;(?1pK|;V>1vV zjPGLzfjYj%s0s|Ej>E}5Vp6;VsfAeJ2c0v#fRn1yz?BobxwlERr@}z7v%K;cx0-4! zZ|X5?si?Y|q()(j$f*vM=q=qEm$XWzKuJF!q@I@lP@s((krMe6CL0$dTn(aektTu; zW@5Q148>w0ta{QlQuMhoj4(K(qs)+xz{5h5ax&C&A?Z6q3-AYA;5jz4EHa8b2>J^Q zD!P6eJO;5t0;wuItB4GFA7TqY6p^GsTrET785Jpv*=UYMM3*Qe8LIG&6)`OmX_^z^ zlc~YHiQt}fA{!iRLs4u;xVVg2gGaslx1&qN#ZVi#i6DbhKK}R}@xwi)>!pnlh{UUy z;KRN}i!kns!>4GV4^pb06G0}yLl-nh!c#`i@f$$IlSE0o`cn#XDl6jnDru9yZ3K;0 zt3PIJy}3dkAVQqfv5u9@iP=Fv;5sP@p`_~nu`hS*$t$DBE@QV5Vn7-rqYYydNl>InuF0L;K|_C7!!*gRKE*Lz=XGt$a5v^n@ z#A$>nY^kP}U=1gVC{WZJb~F^x>$7UYI8!MMQ?V0tJe6Mxz-^T~9>yN1BK zbt6hv6E$AZAm9rqf7(Y2I5&U{uNL&0S|mt)z=byfF%`Qo%@|172&2mIHxS&lF`UTH zT#^@z$k^x(myt2_d5ZOUifST6N-M<32^Dx*M2Fx+vb2koESs4L3nvm604k?YDT~1* z%uE}Va@&sgNkR9i3GyqP_*qQG+^kRkd(2^pOcu%({16j(1R>!;h=)P+pK z#pOx73g`!1s0DxUDG|ZSh`_b2{Iag3!-iOlnS;%IlTCwB%;O?Ps`{p&Q8{tTD!Ak` z66uK};U3dljl0~7FwxvUr)_DxpDYr$#f5A zaLk492Y(Per&v!O4Jd-#Mkwyblg6FYQUX$Rjn__{E9vH-lR~FP%-i2-8%#qqD?1OIp$SBBiTAQw|LmZ4oe~ zIW}cH$-9sxEs+XF`VgsD%PF!i-)OJO8#~{amP>L8ZyGj1Wz?H;RH57*e+U^^)F~c) zC4cOde%#8T__EUjL;I8=E;2&|Im6dnm<}1Z-?$8cfJ6GMh&D0MUgOR?#7~0R4y{30 z7j01}?5H$Vj!-$DLkXQHOgp<&)@3!f@xec&=oD)lv_6#)Oo<`zR6)&nsqQJIUS&ds z16RQk*O(j1zwn2C2vS!6J4Ip1*PJ^n1NA9gVI#tmOqsbr$~zz^<%m;_*A8J-g}5&I z+{#z{h|1&$=B!l~Bvm-6O}C1uONyVJkR2>~ExPPbm|3YLe6Wc4M21Z$c7(sG>oY-_ z8Lj%HsEC}o1i4ro$GGa-l<3yqf;yNDESW`5S?LFT=!c>3QF-*NQ{#<#^TK~1Sh2Lt z!lYFf!k~Y$S2Y4g>B-D6S;&9|iGc)@1KYLKW2y-48j`%oGDRX83ESPw&I6P21V3aLmW7C{%?Vk-=JsA~;Cc)_X~r51}E+{uzz1o=0jv{`>B zj7(LYqx+o$u@8#>*_bS0qsmRjd8I{stpG)Mf_IowGQD0mf?CaBO$Pc8HUT%Sg$wE! zQ`c!-*JU`l`b)UeF^>p}d2vbVv^e5;Ufs=;Y13G%7(*+06ue>!cOqV7l@U#u*qC4q z@#!a~>D5b0j?JUq&9YvCk~46#*<57Lf|4_j_#o-It~m2T`E)#UYeS>mqR=FWlqk(u zVYkg72>G1HI17p7iixU~S6sbSDp9cQe4UwipM?#!b^1%dF&qt5ju2UjKiffHjS;oY z4&;1d71UoD?G)YUv(d3Qic+W7sv*J@VZ+2Kzv8?@Lg8@L*_rh{HQ27N$> z2jWs023m$2j1Q6kl>OS(Tf@#;!q#oKpZO?NuUstKOq$F zDYH8+VVcazBjaPhqQ}PlV_5tRNu|_==m*6W7N9J^PbLUbt6JM3*js~74(nW|En4-x zS5*U5HCngpI$}+&GYR4ceF)Nl;D|CoS!#!xCp zGA7JV0b5@#GZ6%1>SUA>0U+_|&6DaLkH{R_%hQ!@9gAC3YDT4NM%QfCjB@>Ehv)}> z2n1#SsZ_wJXBuu3$2h*nk}h40IjCO$JQj)&25g2>YF#KnK$Q z{JTc}rFaW!)TOf)vT7#@O=GZt_Z8w?QZC7}2n4xW(9XG6)G%@u?RsN0gAyfIo(S|i zYuE2*iyAsz%&zUhJl>C7#|-eQ@zc)~UZJHN|E+@v>xr(a*tnf@c8G0hJis z;baAI;+Ftrf4MFRoa)IQ4{-x=g5GMkJLvOMON=8Wm7v&3!eH45rDJut?D(b#){-WD zYFf>qCRCBW{LZ>&HTc0iXMI_e3nl0+@u|`kQIkybM9^};g;)p#`+)I$gNfw-x(qjw zpn4uyf2O3zl%6?rN?J2UR*|5otc7<_gx72(<-#(~ei$Z~usG9ixG5VFWRf!OB9b0j z7o(OwJQ2XrE8kQJh>qw03f`Ign{NW7^R2+Y80MJ3a5xtnHJdAUQo9$s>IBAPQVuEM(Mn6A~p$R}4CjI@E9aKHs}ZWBnI#|a{A&C=J)juZd*cF3NHq+>Uoy)cLx zL;D?V11UUS;>Ki37ZOLMl!A3Fk0I>QB&!3|A*JZTOLPMw&MNn{_y?j#YOG1)&+Zj-7avp@M*b2pX8 z=9tr2n6TMNm3f&1%F)1`ejutaA@^O`%%8{4HD7l%`E~<=@8yDF#6tQ{r$B{()w7|P z6^GA#r*cH0$$=&&Io~U^ETFSEZUgr})Cv0w(FuGZJ6*1hZ<dxIkN$gd~)j2NJ-3dk5_oDCyJmk(C|6ZO>mKAKPD(Y;(d z_HoKrlP7ors=hLwD{rMQeJTaBRHJ{j>AP?)j|1N0%fows_$N?cg@OhT8tg=7;j;=0 z7ZN1XY2ZJF5)&4D_OHUbhwuJ9^H}lVGmB)NHN@!f;lfUxP&T9}Q>MY02x+R+SRv%e z3Ichi%t&VEMuR-x`5dUQLZ^bAkRt7ga4OZL1Wi7CS#>Mdu3o=_4J&pm*|KKOqD@Pd z;z70s%Lx)x?yaB-apm3|Sn!`deSG!7#Vg2y1-yF+>LqNU+(x*C2@9(AxK;(pwe(uW zxs@&E!!wJj4K4a$$4<58R*ihHTw%%vC8PG*xdLO_c56ld-uN$Y?9pQh8Z7O5IC0TD zpH6k#wc|skSx0hYyAY|}?hIIjl-e%^tg&t@L!pPAx0#?OZY!K2_S6BnVb(x14!j_a}3BH)zM*8V@ zQH)C#_n(q2;kdSzwE$=;n&4rspD@h!Q%OrgAN2mxMdQCeU1w#ibi>2rkIkKk&g? z>{)u|6(v{Nky$5yGBvebNaby0(L^ADDQA2dX~)n@0Jc=pm^8ZjBAa{dN1sJkf@Ggj zL+Ml>d6^9>?@0tPM0i~a@{{kGaz^fK1qNNZTjF?;f=+lpG|3K)CH=D+$*G#z; zS&)Vs{%R_{3mVy~!kV2s@LBK7=vAn-UTQE|a2+fvhKX@Vpn5bR>5`Tv^PG^oP=;kv zw|}x*uR{WQDUh{aI>euLM2S}>%228%}tTANe{3;}>Sg+{W$BJ59_}g45 zq;G1X8m?ni{OCg|LPPQ`*j%9#$s$b(**W>ZCRzS!RoYSg=SN&u=kvY8TKkcJJ(*c2 zNI|tk)J`b5v?hxogRGvlNx?^BO7Yr$Uv(3~ccqL-mc?dB$f@4F_unrFVQh5w-R8$W zbzEvy6nae6iHMhp$LDF<~ya; z$tQNw$jbb#-p8er}}I%0I#|2A6=F&?kgL;aU%OqNSI-hn(Z8->}lv$Uv^Ig3>#aQ8JjHu;nnEKcTA7Ht8R%1hl4N zt*Km!QVv|`gC9d=(D^L$BTkviJT67Z%6O%~Kq1AWaUo_@Z4#*p3Tv!U)lf=Xc94e} zZZ5nDOcg0w90qn#u!a)<6a=qH71(v{V?YHBvuFp^5ITijQ9-0jVB!=KW~H>$xoKMn z#yXO$X)5btYqlmND3G|MC96$YNZBGNPf9kp0(FqDM6_17a88MiLaSO=8AV@0Y^4w# zOkwV7lTLVnxCG>@A}s}771C6!Y?0MKTvh=yGWMmlbWKd1i4$Q}PPm0?)^@V?71s6i zV@?AOd|Zev-M*_i?o6thzS5GHST46ElpI9_Sjyq&ax40+Y z4+HU28*yt)XPVs1mBSwvLJ=%SIz_r}3O5_&=%|1YQT&7oq}@Ht<0_R?-3Zq$Dn%el z3zD~3i0W5;Tk8V|%iV&Pqt!OVKN>QrGO&AWXDYL`UIosI~f01Tt zB*bBGig%y>UbUMMepY8-=o#_k_9=n9DLrMwmfYG|%7XTu_{1o^L;tEoJZ>>Xdh|Jt z&G?oYEmg*fkit>^3Iq$ zNG;n*m}y`pCrYBsm2^>+DM$egm8NRvNN&Z0ok13qMlvtv=xnt9T=t+?ZEd%Sk2hFz z3lq_?T@hRAmY}kwqRq=uz7@vCwV8^8E*ej08mUE$G^wXOb>ktsG`&lX3yhxGC6v zGdxaZ@v!xhg3o@ioi|#BAinI#rqe%@<2G&xbEW3)fR?Dm)2+Jj2hwC;Lw^vVjfYy$awKMi zCZLS}8FDt~OlLYX0(Z>lr|-HIeDG#y)djra;;jG5hhF)OQX8>Jdpsc!n+K1_8lXA-J*zmmQ=)?={+8AZ<8 zpxO5wNU-ggS5OJnaM_Xqo-s#MA)D+%*_hSdQ8NUvp>%v4EW%Rf|Lf zkDs6oO^iqPDPRnyg@hzeQAh?ZPzn@5j*DqtODWS`0M2K$pW#r#l`skv@tXzUQq2Sl zr+E?nc~9x_AVqjosMW=gv{#e)3Wgj|2W^iG!btONTG+K+1pbuST}9nR*qQX223iOI zuytI@1WJcd#j9Nv$wkR$H5*S@iuGXOXAVIhVFPV~iLh)*j~%%X`KehkTC zIY{K)(nlQxB}Uh2G-4ZFici=@v&9<}(wt>=&tLVQtB@CMR2TdVAaT${Sy9H~pbl>t zVu9p~_JKtn37Abh4ZoDh{-6+Pp~;yc$sl$hb$o{0S&o1NoV`R}l*|^`k>Ci;hr;dM zzTku(He)SvBg;KZK_sGW;6*u1RBt$07g^mA{tx;ckFBs55Tpcz)D6E;1g}JtbLk(Z zRbE_)qT8^F6uJp(cp`|XSBx}Tl2t}600BpIV~haB966zNwB5GwVxMSG999MY2TtTf zBA6685LRsC-R(tHpkbbfAt>e@AVFAZg_ZUdMFlowOk!hP zRD}gO>5-O6WJ!V(LuB9Z5g+l<;+|-v)D)BjEzg29&lvs;A34Rab%z~dV;|<;R|=-m z1rBfY!x^a$*hNZ%)Iy*IQR<_n-4FjU9r&DL*0Gh#AWor- zPv-rDG1!>q4Ca6oTWIJa(JTn;)EZG?q-X@nN^~3y3R^}763FPG`Osefi>OI}=*3%l z)vz$cNw%h7{(~9i2x=f%q*c^;j9bgFB5KUfQrd+DP=bv~-Akp^h!uuFpo0bULwS~! z?afp?MV9xJhz3rfS;30c4b$ih(-SDdJvOInF3*pF#RUbX8G;Q|r42wK=S-5BZidUR zF-34ljvWE#^c`oQeVO$f;s>f=?DeNiLTCBC&knv`XI@IF43ScX*J}(05CFk`-iRO= zgFETmM)*S@WQzQGqMub_xeVS=6XaOS&Xj@#dw< zo@Gpm`k5b{R*|Tc)mvC5r#XgPz>KCiS_M$T6RhX!*+nc|NP79%p*Cuw7K=u$=c(-I zQerFISd^*c5?=&@V6>vHrp2oQD|pNZxU3Dlyh?{v)=xcLvUOlbc11(1>Sc)#{TxT_ zh0wlY37F6mA0}s04Wg~0YuB|OI{Zyvtt%~&mtHWc?NMI;L9mSa1O_E|U}tz6E~H+y zI_e5};u6sq}zqyi3WFv2s;)xs8s9cd~V0tlbw(+E=OstjYPR$Q)H zSbglPQ{YrQW!8=jXYCz_eL!1(xP`0PtPJkk;fUx{z7&vP&UlTd?RDp+sfK0zO68Eu zKM2C#z{OQcQI58)n`o`!iHKfYEnz?w*gB#X+D4J09{~vhC6ECSoNLvhg&y&!&gNdb zP-MJvX^i{_N$_UfDebFHE#MhRO8CxxD6F9>C#n+e_fefXegj5ulG?G;w2s_9&6p$(%^_5 zSDeUJKt_>qTvH&1F?kLU+Q>dWA?e;O-Xh5EMwk5EBxNoR`F`W@HpVCV?wbOEF-+b0 z+9I0*Xn>;J!flmNl$27Xs#wJ3USjXBN$%uQXob$pzDSaLa8?Q{@O$)KNK&w%Ro8l& zPhd?2soVvriHfe=+gJE%4pW!7ek#MASHp~v(ScOSz!3mPrh)|TuY?*GD$`@wX`yyS zdlKM}@NhwJ&Up&upDO7LkH$@{YVfv4Cy~ehnDNI@;SbLuXs!ka+nv@3FUxLrFy;DI znPzWNpbm|oj2s{C3zD(GZIo!{tq$KQO3jbTR3+>E-#rRsDAMRLZ5LAh+xk_f$cz-E zrs$}lPl+l9ipUtpP#0@vF`Ss}hA|dnDPAmGo+1oxA)^IzK;)yYX~!W3=JxEl1S4rt zTs!H^fNC7JD&uE5n3GnISOCS@QEzW?%~7~8ErXa5kyN}v)F?Ay`mP&SloA%dl;{u; zPR@q@YDf{i8{B3s$&N-lUg9ea4(F*x0b78L6_I#~tnflHc{0Yz@{PETF*ReV<>GQ0 zdxw^RDk2YKY?h&F#qDoN1YgEeGT%}EG=>H5Idhr;Vhi#Y_X_l&)!@~jE$8u-TYz(P zBGc88l8z)RXM#pQG@eI2%B!ezXJ*|hr!wxwAE(TPJTpd5LxvZ4L6MybDu;|d@3XCp z8ba}dq|Wk3pNvI=;#h)<84e3f%4e6AnW^^L@ja^YyaZp&Yj71(jP9gnoR=aaoPz`dNHb;P5Sxfbkev@aY z+*1W|Tk8cJ3UX(Ek`|la{6(7osvMAXmEW@RHFx2L-zr@K!LvX_RARPm;GFFW6*SOd zk&Yef%pI$V{vbmUcV$NrxQ>$VDh>~`t!@vruc2`+Yw%}C+)1R4X@4?ZzMw?sakbr% zx^e{z1I?d61is|<`P8;~OO`5&v}6DFPga-DfmF>lmZhp<6|K)FZpz|}^y!KNd1bdf8pi*fxJ{?{OjZhO0KrG_1Co(7Jah&qVjS3b?240ra-uY z18MIv;1R8;fOW%$79i`n!nBfB(8acIL4*B9MB;IqTR(8P14sHdSP- zqwMfZSTX#73fN}2RJMBjT9g%!vc2zBn}2)%;(B^dn~!bP6BL<)Z@X_;UzVn~^?_iq z19~2f3k-fFX^~6+cr|c_2(;-tD&n32p$ix>H?1 zv)hPhYx%H6UHJs6JhknH<`c`gu%nxdm1xVLdHgV$Qin7Hn@FQrN=OWKc8dH0p0EG| z1z0`auMy8i5}(y;+&)%N^6h5e;m2|O_q1T>iohWm0LL_YV^K^8+BZFX0*zidx44M~ zQo3&%33l?bROUFhy~UVMjv7mD3oU%3J z^d*vbzJqfA&y9s$nb&N@`api-%!#Z>FR) z#_Ug-1^?<-EVDLcV4Uxi#huXUznh?_R!$>d$ouB)%p}i}KQ3ZN({lghNC;z53_s34 zLI8w?aTqBsaCam6>C2i?BtcM0Q%?nA^bZD?#OtxAbHY`{$s7{PkJ%e_7^I}1s;`k5% zB(faI*Gci7d`p>a%bKZaUaTFAAd*0`$tCt=8ci$6QW|cegB04xf{>>3%_PVw z3~f6HFGR_r1%(`vNX!^R3%u}-^3JOxk*q*6_SCaVO6Vd&Y_G)ZYYs0a>$~c`{HC1$ zYNoPMVrDZdR$F~iA}dXLugMDL z3Jk2h@>8qI-+b%U*SP5W4yg$9+DpKyyc1JNs>a)sDJ8`4a~0ccwPmOlIxju2 z#)ZPQjw(wlf@q|U9Q=ni5d*crk3}6^4aG=V4ECsRQxr|3ASJS>DD{F153=D<@~BzM z9LnvveouUK+lVF3Y`h$$HR&(Am{hAjC?RSL*pS;3cG$#tGd3{#lBH@{UboW!NY{&} zjmkSMC%zeH?)+77C!O?7%A#{I-RU%&3`G>i3iycNv_S)n=+0jgHfh5?_Z9e`q#@Fp z-)2`*&tOlxm02qfO;zb4W6sW*?Y3DqEX@_CjtklRkVT0xhL8nWWPE4E627|R>y1`2 zOukC5g#?@1*@_n@d+?qZ^Up4~Ex#PAdK1Knvv8%-P;;M7%c+8PXM(tN3@pGPf=0!) zcWzl-s<2zD!HZPu;_B@>VZ6c)kEy7$%G9B7^y%kpMJzA|^X8q8tnyePhbYU!6I-jl zVd-jft9Rtm-f-?uZm+z<5eMvg%De;Xtj9}&)hzo{mbfH-2qZNEGR0RB>Iuu<@cqZ{z z1)FB4@KhvWnSxr^_GKEVyl7o0%gTlPsKG7j4~vO(6Ro%;9{4yWW|GEMkF+ zS~L&?f>>I^LMOGV&}nO5YFO5*fW8N)fQ$t610MtthzAe=9Ia@bY*GkK#<_$}#G%vZ{urL69F&`UbjdWcCZmBx z5GiJ}-NFbtP@5(wV=(;a#~zs_JH~K35h|QKJsBy>%w=WnH03Gf)6DYR6sq3jkVv1B z(1`p=a|LZ+Z3b9@TLe=O5x~G6DiDr96eI!>sGSy#X%%n(InyAfDH@MtlBo>Z5q2w` zB}+neFSx`pnWqX0A;}e0!)8Y>hta82a=Njf?o>O*@{(dHW1f00kyvYaQc40^tSRzE zpNAD~3k$j$OWoCvLX!xGCVDghI+PE)hz(D0!n7@d1h6ZW4O}OL%n!*;UH!wW+~l~- zp#&0+QbGz%!)O|X%yP8RwG5MV3R%5LVSGgH!g zq9)KzNQPBeZV?zr_!I_@hD;qN`Ir z{L{QDG&Rb3NCaD=+zBtGLw$Neop{`4mg!2IO9~vVI$`F>b{9S*7IEdi%N1IsNyX_+ zi-FXuxHfgx&50h;hC=$qg$lPMQ&pp(9?+2oEhk>Lcpz6)ds}^3*&qJE1?d_x7zlgx z%eo7*!!Sf;Gc2uploKf zge_J>JD;tvGDRe{`K?NiidWgbuFpBw(_-z353_lp7_m}~Bc+n1y#tb96$Qs{i*F}r zPSfCLAbk`LRmcK>Amdv2yPS?skldZFVL$lc4?1Fs)Tw5RRZo&Mn940=K6%J23G#$a zD1w_8pLWHPWZm+B<*5XJnXt8oY_1@w*jXX3mtiO9lSJLKF#(*=xt z7MrKs;uh{Y32nR`^kXhq!KlMaOmc#uPOCIfyEABs{fXh%tW8hlpac;1-E~}JDdIRc zF|wF+7^M?Y*h4j()4`h;@hXch(i;>1N)TfolvEtr)EW#pd1!dkZ^hhUM`-G=QeciR z#846a>VUVrmA5LG+#IA)N2F6hnFBI(K)!i-{Z5?@^{lX`7kXl=yZHE&=PSG#K8@Lf z;-(|~GBLjFbcxFMSviI!h|c;`jnX#kr{C5eV|x(L1d+lZY{&mtqkwi|?{k zwru(&NMv9xB!IvRjA86f5ZB=Ursy1L@;E6XPU$cj!+WZv^wcSc4r2qgA_I+zVgl~5 znnRybiK!rO;`$<-n2-&1PEXjc{bou=-bDsyQ2o%YH{hbvFd_#HESi1`rHE$LVo1fR zgleAR8P@Co+px{7ZVZuPvJRtq{KGsfY4X;Ios?=b!o*g#4+H%T0K>v195M95!$}yC z5kZ3Hu<2(k!qVhq$`UMCK23G*aR$|l>mMdd7GIU+(9^ROkJ ziB48xL#XK%S8E6V0r~v@P`qZM2w!3q!SU7-Yu8o`N@`^wf6Z@r2qB{}Gj6355v%*W zFMc3ylMF}vV91)nsUrQ+;wFU6STN|G2{TU3zkWheXpxH=jU_k&4n@ZxW`ZdA(7Jr_ z7t=z|G;-Puqxq6zh8p9M1c_BRk@Aj8GBoe$Gu&?;7N1sP-x z86q9aOu1^%(};_9ri3G&LOo>TA3E}OluLLdvMHAjDKlwg{w8n&$0y+gKhnn`wb28q zr1Mf{0Y@+<+9xm5((u@dEIn=s^KKDs5AIm?d!<6E|b(9x*Gvq$dcCFRdvrZfrBV4u8gR{`PGuDdYB7vQvoi zMd(kp(gT1rs3Y1DH0+TnP{&RLP#@beHkA)8d6PLmCK;6zPHv^niqZ=+k0@K@AKa-k zlQZ;WC@GtB+5|HxR8c?7DqdrbWjTB0nk*Cn8IJfE^nK_8E3tFB=p!%$QY&cX8Vl6K?4p#e&Q@%+B(}~) z%QIZq3e5EXYJ=o&D(DXf+cQ=GC@M}3*3`q~sx&f(W{Z}hPbQE^k5ti4?B5=5Liy=G zL=!=z@OqpSD~_&A!*oe*tR?C!uOO#Rbxcfg4MXYCHnWXCvmza7B7g1@uB4_0e*-S4 z3`_0PB;v{;N`pQXRYv!;>u?n3SX3e)P7OmdP-TqRbTm6*bSB>kN=A$&G)6herX`-} zFD;eFv`9(Q^IiIwTEBMcE^5%8YA^%o_t zCqETTByVB2)i01LP79GMh)zmBR!WEHRcAK3TyRhw?K)=BOqc6DD`Y1Ct7OBDIuR8Q zRq|pM#wP*+g*b{wCKY0p_SWFWYmyb6Dk*A}1$+b+LEx17#G-7oku_~);A)ge)x(Ht z(rj&t%9K>qik4!fRlWelTaYg)LNsRCu_t!qP&MUW2<#&CK_A@jkWSWG|2EMYFGgkm zmTqP!M~(I+6fkf`cXLJTi00O2IZ!*%D{u9aDdo&&ICr$p2A0C^L(_IeNf+j5N9TH` zz+|v#2gCit&QhSjra8 zS`?ZH632!pw5s>8A{EM((69XUD6H*Kyu}`6Y483e^^gf`)*?6Q@S3D$q-wz%E*B$s zuUim?eNPZ45%zOSms%O`Q+cSIo)P?f7X#@dZYJ;;lR^wVOV{3MO&WNCqsm*R5;~w( zASaVXT+nJ71(CGUetB0TAQ$&81uKVUq|7RMckpCOmN4Wmk0nC_n0g zi7N?{E*OJv)TaRSrReqYh}I;m?mOF}NyZh3TL@srZ(glr#d3A`HnunfS2t;bJw1ve z>P0JEGbS3!YEqCwe8OkHxDg>#d`Hm&D~WoXxOP`HK_+X1LD&0+aK>JO6qSfO17k8Y zQ;*xMckyNVzB0@XxXX6pDdF|pz!I8R@>|~cc7Ek-E(AkT(3U2d5qphGWTpZskuL-! zoph-5`dC34k1Y}T7>93>OF5L6Zf*CHmEQ|kOKkdFkvOP!YB;A(uGU6W)H&!6nuA$X zECeiVDN@g^@QRu1Zitxwt#l=^NtmxxH8pOBB_Mg(28Zv1ff+o7`B^=xX6R3+2l^9VW^`2|! zI|Fbm<{~k`2~1PBExhDvjmxC$Rg}pNRp{s-0-=CXke~_w`}Ceu6oZ;B?4o9^ zm{l}5sVg}`F`DmAb9_GLa6lW{9$){ z=;Tk9nzv%sat(qcrv!M}nDtDWCC$zPaDgAblBdtxKXrvoJa5H3*E_PcuKjF^d2_p0 zJHo2Ysiki67|U!na2p~Rk|C*XK?-M`WyWTA3Od9g5x9V(uNCV8gek}WB&1=yea8;5<-3@mqeB~I zIcDw^tuird^IdAEMu5N=u2~hyP039!8U3xgzXL|iH*OcaO#4ppthpt=Y6o_}k+;Lh z&v07FJIqO|91T#xYUe|Tc(;D6AjYJ=JLkTK>{MjwG=LyJ;!3^q{PaNcbeXzzwUJW| zH9>{CCb7|Dd#8VC;{kvG2p&MC13NN$h=OMqV}=~kyD9ksOMm^mq(vfAc0@@q*^?(| z^_u%B7y}6UgjQRf+QvMHz@k8NYKtKw7&g`3*-HqrW6(mK zvUM@cTi6do9K@lUH3RTas^eO4cd;zE^aKz^2gnNly((mbPQ0N{0(Z~Do!YVlh?;nY zpjax9`aOCy!N=*Y8N5M(6^;Nw+ARoxqD+LtgIUSX-yMUIngkTlT;CIf?FeANxMRZ$ zv(H~0NY1LaY6N+2Y~?Y5p=OO_=<^0zMu)`ti|g)2h6DtMg}; zgx6|NJnMXWK(kY{Yub%iKp1WOMffnei0|6KO z;kaGO)W^a;Bu^nddhWSGfmG)wXyca_D0eOYBka*d?mLH5W{N#q+=AFxn-Joxi$!0F zh7V-}C!V0fCx6F$9)0m1eV>{xL^qg)^57AkJM~sS4P}91!5hxX7LMoKn5Onni7H4~ zsGI6cWuG$Gu60sFL=s|+4znuueCGL4J`;jYVga&0qROq@?#jPhb}sEX`!=PY$DYWY zF!6N(?itS{*4372c5{(`lp0lORv3T*8wU){MhX$E)O)8V9SFm0iGPOEYsoS@3QhOO_+nOXbWT%>Zg;Z>t!ltQ`eD~355`wv{ zCe9vvU?3r%`tZ|_MV|6OXao@)DrIewQKT$RP1Xb^ET=lwQiC z0K6g+W#l2=E64d{a<{rHC};?bVY_0~kl$45AT+YycuL5J1HrI% zNy1-DtX4ZA4zX5Mdf1hqQXQ&Tg?}=Pq7)-`n{oYcBfjGpn;PT1omE#{O&6_k8VPha z(6}`2?%KG!Yp~!F+}+(ZKyY_=cXx;2fdFrS;347U8|UKugHyNrs%lr&-ea!0=0o1b z6Gt1j^P#0+F?vPHSqSM_My&oBJK8&r65A&@iS>MhsaVnF9guQjz&|G1Y#G2-y}H(k z2*3nrFdF@WDsH=iC>3ZF_ut{Mhmm;Msa|hWv$gQal_^sXeUu#S3{Cg6aK_JaV!$wV zW%5@LRD5?RoIP*SZC|ekcRMA$qgCFKc6my~i8F0*H|CLZLo$dkHPYE^Q~cZuEX&yp z6&{hVDXzY8B&!UbWknZL8SlyzdIf*Nr$w4Uw6PtF z!mTIYd6g~VMVdeLIAJBMI*NLonp5nrfkxFHiXS)6M(1I9SJePOR8cr#$)8vdXRv(G zrmOdg>l&(sOS0W-rF zFo>hJ<~4W&7!1#i^SC#qA)kXIBH}2_4~PC+bSmjku6O-d43o}@rp{xjV&gLGUyN+y zHRP&O>_Tx=^zcbrf&f)^!?`h^6 zsA!UvZaB&J>-#6eUiw8Uz_cnBJiKCk`eIMpt*lNs^8VF7=&s=Xvd(0v&OG`!K zzhBGFmG{x;2g-g;$Lw!h8eCxu%Wcc`pUDS5!hqRPo41O%w&*M#+VGFmyaEJ#CxRK7O zcJ-mmdjRX0G%G$vS=zTlVA51W%ZEL=mI-qJ7FX+4_y{VOB}?FBwDmgwsVeeYEzgA> zX`0b-wDT_pU3u(`TM6~7ows(2{YjJ2U00I~pH0NGXHV1D+Y28-L#5$C*JKn!kbS)9 z4|p_3=Y3t0$Mr+Fd9JmNz_l1%DN)ai{{-1035b4oFUR>Xu;hT@M_!J7H#NH>Y)g+4 ztKPR=(vERDv|0;hWh_p=p??uVinxbVRbDy#}WtKlbz5RRYP`)G) zR*vCKi`!^gXi}$MAo1(+@2&qX9DI5=zA*3Ud37rW7DcD0Z`Wmy_c7jvwUr)sVYblf z8T|3BhNii#1a(3-|7*X^N|JZn`7r(o55LEDStwqhf5irs{2Dr&ane~z@*b%7GvWE` za1d$thb6&Z8()1Ndp<_acbF@|UZ>g_18EQiXOD*%u11;48L@SQ%5>Pj-mz<)|EQ6{C{ z#fF(nv{;`yz*zht`ZC_!-eEYJ(vWko<`e(Rp$DxQM}KqBc`Fgq2`Lw%J_P2-3a{EI z&A59g06FCKktGK4<0FTLrD=1Bl|-1x35aQPq=4wEQNr;-23{RzaptnWc+ErA_;p*t z+2B?AxY+k6a7o0$UKReMI?NyG^|_v-LJ2s`Dm@z3tt0UbE+4~wteQ#C0Y=Vo|ul%tl=3%Lbbd5{$XGs2MbQGtkVG56m&9bbhpkPH@ zGDsn3_9P3eF@`@Sq6->Z6B)925_9?F*ohb(B^NupNog!?x;my}38zrY=kh+Q5dqnZ zZpro}G7m*)kpod!bN+(6xW!bxL3*_o4;^x}Ej_jx4t@^ktti&Oqhj$*m0*tX;#|*Cjd~WF~|6j`ULD zcm_X&SQ^Qs^1{cjj`s_1ocL^t5ze^M zb_)f1Qu8Ss#7>Y298!8A;G>0;(L<^$G_oZn_!*y2fg#Jcism6YtJuc{Gg|*Mf^igG zIzT|c3Y^8tkzLLZf_Oxyw#_QUxzK zXQM0cswm}7|7-nu;zDGq8`$(rDuW^==yobtT%a@q4_iW)zofMuD< z;Ct}$IjG#`D?x72AhqX}zUP&e?MqaGGnDdhMPveA6K_!G-Jak4D!pAR+HvBEbX zM#!>c?ldlRqpIY{?;va&cEk=m6r_&J zU^&*NNA#rOq=QY{!G?XBIE7lcb#q&OfQ45;mK%|Or_HKvNjqT8NcNykINU>1LU|Y9 zcuU3K)Yj`G=^wl`g$X zH^?|J6lM)N(ha-%hO9SHaatd+>xNpqkR-)10HH#SarPjX4s;2sh}Vb_gF_%l&kMUC zgU1UPkx$6<>*O~+zq3cm))K3QUXcP*jI!Ymou`7BvZ`+^nHlt1A25BdqD& zf$!ghFZe|&hLR9UY~&NNF6rq44!j=|4-C@+CYNng+A>G+2;x|o7?cA?CE)H~95kf-bhgl3ws?<94^_WGU zFPTa6dRmlb+|FxoA7HefQaq|eR@&);^>e)bR9 zKS}??JxH}qn3C6x2H{VCg=40o!v5^oomz9!yD+yspf}4;bJ%!l8us|5M(Y8z8$$6F z-s?jM#_Pd{*&^tLLiE$j0wkd*STNv8dIVC-d=)RE+=NGx3c?#+bcA61qg$8pE?pHb z;Qv{kk&hIe1+#x|-I#8Gy+Yc}33bGm*;kYlD&_x1YT{T%H}> z$P|fwxJ{i7RmUO*yd8WX{goZ{C)UjfCtgcL(ABPyCWYs2&N!9)FLSn`-&U)#2VTjL z?U7&UsT6Fml76_Omjf>X11p0BNB8i9Eb*uMKAJ|ymbO2;(^WZi@^)_4>ez^L>3_f; zR_!~KSi5iFqu}0s7{tVnI<=3O(sgyQZ(mWKy!CRR#Fs|ElK7ru*qfsn0-WS=C*o+r zu-a4G7+*HJTe>nnc}z_2M!5-lI)Q-tVL)CmMtu6lU}VToF6tvwmIrMSS$na4#}&l?%(gz9@>T9ju(tJ4 z)=a1Jw85aM-|2o90`$g(D6~%%&HIV_!FR(_CQYZs(}#x0XKbQp!Sg1hOL>iF;7~Z+ z790TXGFJXgwuebu4-=c>FzVPJuy~w5rrU+zJSCEfZVTyo;eNXGzm^jV_`fh{$Rb_* z&Ehz_tff^;Dl4f!`)78uLk;@Qde?ns{2#rnxjfxb15#84>OkD14_I^9|MD~^mqWx< zE~v+;-dq0zyQ(m04~5TM*IPi^`ueYb>V{lbfI}iNq%W}{!KyEGO`;teQe}i8G7S9; z*RJk5igIuSkDZ&MR^s#5uVlXFdPnMWER(v_?y}GY;Lr)u1yn_=UYNSv<68Ha&osmq z1G`CpuEV_lCqa@^hkDY%=+-fG_rF5GIRXCuJUIPmmHp^uX((Gr;=(9l|Ep6Eq1dnX z>+0_sYr2wDl+G!MltXN0B+5fnB2cL&N-nIUyF9GJT%})1Uq83(O2n^j>x2+sl;XZ^ zf+fD;rc)j;Gm!*RU>yOk8J90)iGBf}#+8G5^0oNz-Ge{6=~>$0z{@n@7vX?y4V+Ex z-$f7i)P)~S|LfqlnYT^qA>CDVNA*UqrHuZ7^Ph^jcY~DgPrRmmN=xkWZPDkgxUTp3 zw#t~k97h~)XHsEGEP1>aJocCL_wU&QuRyQcimq7jZ++8>k&6|cu0`rQ6=_=>P z+V|hsPlpHy@#cKL`I|&zBG?d51X~gB1x+3Y$^URyzC`F~z&}Dd8YGLp;@TA9{QUK{ z_Vf3$*xw${m);{LGw!F)W(H^Z0=X{1Izv>_>!qL4`89l zA0X5e^?4Dlr(`$!|C2dgkUaG(J=Bh65=lh@k>ne3a=f6*1yVU0rwW-Y#{J=hn(t>y zxjc^hqdA)2RSHA`{_}r~KNc%y`j){gr)3-AWhu8@s|PKPKqYtsG#}7G$G;kXK*Cp+Yzp;0{+HcX;=^@T1b=oVsoJhH^Cf!nnelwXC#U_s zfOj3g`}6Syx-b9n@VnB5^lNhfy+3!7eBK$W`Z!Tw{M+-ty&xf)ze%Szp;Yp*6Z$FG zUyXap{hqvu|N6f^Tz{MR`q6^!MEFZ;)u7-XfA9_M9f}mYI5NIv<50-pvYhDgyC9lf zQw7N=mM|vYNuwmTSiF+t0{YvzSjFTy8w@P2bkSMxY{4NzXkFSgo=(wvk&iB<2@)YT zl)9CLMo~A*QE3k^QJ@Je-{#Yx8r;f-o*bzwFg1*FDxykjaVpTib0o#nwOvK?axO@5 zDf3h*HqRubqfayRRvWn$9-r58sfs+Eq%jldYA~vW$XUeCb0p0&O*Rl*wy4XJN8r-1 zDxn8i2dMpSnf${WhE6UnC6#YIZ2!jRcW@o;>Llm0~TqbDn z2WYd_9=BmU9?RVdZaD5N<}+m-CVl~ko|X1~;VZ*5T@emzZG(LsreWCgS4Qrrrx{}K z@G44cZAMa^=5cIJHEVoJX?kNf!sD|$O}OKomT9Ueu}cvHN*f#9%b&cPz4y>ReEQ-% zzC~m3@X6;4WEM%l@)ebp#kwR(#Ud)s8cZQG{K_ zlH%`F%$`(k#Ik`OdtGNfo$xjk$GkQ0W>O3YyD#yzrpu$>CgzOgzROzod59oKY*z7# zd-ZwlIB#TGI49+5F;Cb1>)B~_q3eIn9<|E8Nn;Fu_#_*K&G7NKqQZpc#Ls<&`oeZ1 z)$fp@R@DF2_^#{NbV61vpvV4k+do+@l5D1btzsZB*9eKgUF~?lHSqU4YN2}akqE8& zwIV8}DFU81CxW1J2zIz>Q~guR6c2kS{%Bza?Iz7>0Fo0Sk{JN|Q5=-H7_MiqEj!vY zY*=^}&K7R=RgNDh`V5bV9SuV=IL05;XO9%i(?Q3k#1a*T6znsL6@7%$l3D#IWi~#P`qyXG-2!?>Xf?zaUUDt^(yP9mE z|1nn6BC43i$cV8=V`_y|nS%;dqTpi~xu3T*^Ji$H$UlI3NVc3#b|>hrmm^~U=}fQq z4&U%K4Eq~Eise6`f|yk}_L0r;uK)L$X1NB#RZJ`1sx|zO0|b1$XmlBq+kZ$P8H$lz zz>og(G+N{cF~%9Cl~w`O`26x5uTeN1Jt_dKR*fWd>on}3i+DTF#;G}lP6W@lF&5?q zJ$*iA`9Tm8RckFW`?3o2SW&6gR{mdPw}kX7y_3Q_GE*=G45?WHI0VC?yL5=5V6*5? zPFyYbYp#G14Wq_mm9nw&$j#NpuxdU^$#TUjwKVcUviDc3e%YSG7?)RsQtv2OInxe# zbyf?4XWVX?_bluT6zWCSYCljG!wiVl@?67p*8Mm2#4TWZ!K6v+#H>(-Rm8iJgb@$t~EXwW&zA}Iwmso^|=^v z&4<=oTRyOA{4_T@(cv?1J-*g`@hI?M41)f+2&Ah8sJ{9H-lmN}`4qEcL4O4VgqJw_&M^Yv}Vl-{e053+t^ z(;3w*!tu;Y^5~jt-7Hw4pFD@|sg`zFYbDq*78US?E9`uRZFhZZ0l8mINiE{+?U}DA zNXESx!I*uHmUk%$1*<4Q>f?8b?PZUQclQ*`{O^ihxpZo?_hyD?J1v-ND;4u-*+d@a z8uqwr4Fw-8{tnTo&`kz3j`PG5@|>%E-`-gH@z4+v&|-kSvnkK?*p%VeMZmVRmH96@ zEAY+1j;WlZoOaEeeknOpYiCEvqfsVokHW(H5G@qeA+`U@(X)ba!iV%J6rvF)`1h0@ zV&l~x!EyX8a}Po&-JtPx^$02)r6U@Sz7HG+wKzeLP2$?^;%-o@;2nor~ExZWHVM${+T#3GnFx*Wt>3w z5hLE9|A9NK>>J=89_246<;nEZp82<_B{YNrs}_+_74d@o6?c!~?6>bfZ@YqU9~jU> zc7F!RF8PcWd+>n#uMqi!rt8|zC;YZFWDogsM-y+GR)l%=ehTOF*+VnFGu*o411@It z@0)5x9{5hXEP%vBjmHyrky#374Nj1qZ-J#_Nl6XuZ}d$IMhTW39k*^G*BY^eoTd%lX&8BO=y8}Bw5{<^qW=&+Eg6?M-K9(XvyzD>B5XcPTjbrwi1bi?E9J~1-T z7s;EpKaLXY)0(7MVNzY$j+io&PN2XcMvIFJm9z*5S|275{uCj>pC7H zS#9dD079fN|M&?~Q4fn*3?mN>UJDHlR7OA@hD{|z7taTVYGCNNfIw4zr+*;Ow-#pM z7#m3(@>;mS8vrogoY|C=PAD6UkNkiei?XDsok zlrR|LFL}-s7YqjneL+G-hr@$oPJnlU$A!a(g9E(7F#&L&8x97;0GAE~|9XkU!hyHi z>R}C`$Who(EK9c{6sUh2lG!Rf`y4WahxZSTmVN2CFI;;SzuI9zVI&rhL{DQwZVcl6 zwEmziu!som`L8mu_=t&I7HZP|&QnrOG=qxanXnCZa9`aU@7C=D8x@0(_2i% z7h&RUte&#~P{~j-uBJjS{|$s(6baCK^6Maz-vu(QkGl%8oOA^}16$=5sU?#EDP%OZ zpDm`XEeoph6xyoSYV=y{_NLovHX2MvVo4R-Yqwf7y*N+x#TjHU90k@Hr_w1DT0i($ z^-3R{sl`*u{$R#WrBf|JI>e%${%o-~7K_g*TB&u^qoQ$Zw?Es}@_nIDGLB5SyY(WK z4B4r+jG)S3B7<;WL?DY>F6juseXYl2DVvCI4}!#q8!c@kxbfy0C;JRSm*ohi69``?`_0akc3ixvzl&YSe>4x9 zuw{l50*SOkX%xZ(ntLq5H6HrIa9kam!w3SWwH@jAhjNf1#9I$up@wkMEbwPcyO}_j z^yeefcbZCaRmih?vigMpbxF|7AN>8`Pkp~sBC%z%4^!l5t4>mtLakDa{u*1RNVn{@ znP_UzMitvZV|i(0#7S}Wk&(f2R48~a(Z>>t0Z1oFfW)nnJok0m@A=+m0uUd^xd&?b z;3g|wJKcG`jSy7m>sA(CI3bxUv$U=v66D5gaO%GN@Zx1;ReA!PhfS(>>`5f!Xp zyP9RMWA!aJx$nKo##*XRnV!jLm0aX-%`nfv3aWl@HS zrB$n48f;O6fKp>$>#@4uI2syUn`9-VYIjuCZ8Rtqf2b>vi1H|`P47Erl#zh8bw6EyGW>U z%rFnqSJd83GrOJE1v+8Hvu22j>RT0|;wkP}*XcK6n0MD_q^MjX^Jt76iXz$1Dmp5p zB(un}JgjPIe)!}%x(>-v4GpweuBD;%U>>m!AT93VGw0q7XoEwLQ^UcIE725;y<}Y$ zAUAy2_kVi7ei|V7W63fb7NnOI()-NGFeb7?kEJt+3KDO)V$X9zI95NRp3@PQdXbqig-{v|M0So=(+jjP4Evp_21bR*(cRvpF*!sI9jQ=EqA#;>;Fdc}+@2rpM65 zddqga7jToEG=5gJT+tuG1pUDj`W5Ow%!V$x2+1Onoj}yocQgdVk?O(+G8$gy{3scyfvNfI z+U32w|fIIr}xTy#AF+ELKyAPfWJy98++r$|hX zYGH4gT&>7mX6J0#wB-rQ0*J}0AhqNH!q>eN5aE<5bjsR@Uoz=%VhtNQ`ab#(kRYED zd#ZOJhL9zPvaOMIfLX!cgSjk>W>yUd&3cnjEIYk+)2~ z+Z9dcmC+`ETy&cd=m65y%EJ59IFi;1nz<4zrZU2+(HfE^7NnI1a5X@oKPGVb@{Mn%EWV17Ge#VL2tKZJD6x7sSb35E3~c3KB!r4nyUYSKJH z99SFl$J?}dYQWu)ZUnWF;c%;Jel(I;p&QMoa2*8)`x*k$d5nr^`&3CGeGRp^?CX6IcF_9`6(q&H zR=cpBV)FH|1LG7Jnp?6}8ewl^_%`E>@=Tm4gNdl{>4(S?P2Qhiq;43ef_=aX^J~)}-c&8qLMJ{U*(k+9_fua->y<=%oO+MDd_A6cOmw+kLCU8wYQnf+clj>` zS>^{2yuo1d!7+l@z{~K}*#j|L!~GuxUQE*6x-ADYhgfM%0l!$BzRJnUeqs2|3#D|> zkLDC!Vw4h0%lY@A9g0F|UT|3{D_45pGIM;i`U3fHx|#Rtkdin@;sSZWaOFh&$ca9d zB&PY}$1~CIcHwYA@q=LtJ)pgG#F+#0m@s86+;XXL$yn`!B;BxMs-BnkId4~bZ`dx` zoK@1?OU$|I<|a+T4^JlAp+Qz866Wq}et`}!cNopiS#R^*Y}+}95uu!Qh7Hbq_Po1i zBy*91B%!p_HN;gy;s*jEjzic9y87E^j7ozAdJFI<>EI!@*eZWdxUG9}yVY4ONLo&; z{6FnheMM#B-IQDefnJ9 z+-RaZNZ#2v*s*trtN=GL0A%3p<(t24wAqxfu)6z{FCssAQ!e@bZsw^QeJ7@y)}`HX zNA)Q6mTZud!+sbo%`$wL0Q=l{48wj!y0~_DWRH*7r&Ndauux{y5V6)`9Z(!L(-s0~ zc8`j?mgVHj6AvW9m_N~2i?ZG~e-V*4a{~w;TYtU1K{1=Oqw8u4P5Er-bYO%pmp$sT zU0y0e!yK=nD#P-z*Ii=YTD5mE!x-xMI5}G)dJ97B5N~?ASTKYt3|CB#IX;vj1ia)r zLrn1cKGQ1-uFO~yG(4}5{JxRmWgMrHaHm^XXz5c=`r&Nh6I1qjfp=2^w}sT8MO0Cs zg^N|Vz}3LxG54^T)IV|CcJw+|@aXo9&H1l{{G#Bew0>s8cLArvC_4FarEpLqa-^4I z6UBd$vcJ%`%2b0|=meNGbqoyyzou^Q=<;8l@wh~=ho&+tOsIzrdRY$p{48b>84B!5 z@%pbrQ^gvOMl*&mNKK`4b-ORKX0NjSdg+df>*EL5m0QhoA8KRvdIrX)}Zh z$j-ux=)gM~6dtC)3@-E_d=7K+3^HiKF>57$s{sF%<*(u+h7@9dEF-ao1Py9pJ>YtS zL$rd~8Pv;3vmSw6X2M+JaF~E_%jdASQx1(yJb4?Nj9bV}l+ixE`bLLq#R)3EdWcjD zc~%%a_F`bgQUq3$Rt*jCB+IT=H!@mI$F&^%!so~O4uIfm8&L65g4)Cdn>0)KqDXAu z{bjjgie)q1gp0Ga0vEluid;Sn1tJXzGj@c;JjYsSQ|Ea`h-GO=KjXFW@vYGs{hknY zMj|2*hifb3K-0tOF(prT70ASa5B^LhC@!3T?u^SR9DIO34S6wIG0hNlD^~LlW@H(g*`E zmv3b`6nMcDD3P5?5=rZ2ZLMgh^EFKaL^pe>&QsrzczN~3>Ql(p% zfy~b-sTqKaH1ViYq;e6ap}%~f6KQ%eilPi4`E2<*DF{WR_T_5&b@soh#sCgc1K~2{ z^dXh>gnSp!E_hvi{Va43?=#D|sWonnmya)pAps0TL6*XY01)G7W(9%f2Q&Y4dh+h2 zY)`_R10*~dN|1;+h&nw1N6!1RRAwBdDZ+g+G6;u| z$qEua&C%k}B5lsvT>4a$17>%L!`Hw_A_SZtU>^h%s9)qx6RH$UGKg98e!0bQ*Ua6z zl?~QP{+h4GXJ}e9U2T*M@b%n6g}4qu+m*TFY9Vue@VMy z1I7!ayncnq)hV^$?I2<4iWsk&ncV2TV&CMo&kRj*Yuj~`R+3J1byiZWc?KUdWrHd% z>KZE{V@KYf%Ia{3>cp~Ze#AzfACmlMCqt?4R-jdH&r?$*LGW9m;Q>9|$~Sm`x;|*7 z8U)|K6IEthT`r>Ch>KA`fh*gcBl&x}0iTvo5nuhg0*gIO5uRbQifi0>dV`32l_7q! zRvQb0PQ9Ci6w+M??Ym#g8mc;fd9zS>F~fEPnt1qDh7jBgHFu93Z(NIlVwB>qVtHcm zL0|1Z8ML-G;la1evVIbcf{Yj4t+|R9xPvv7T0fdWx%TNSAHd zI<~6BV(3CVG&pSz8f47X+z#Q5J_zkjidk>q4p%=*@NuECeh2nRi^nmH3BKB3n*sHlzAcC8I+MopF_v{gNLRIDQ}QJ{fU2dDTPE>R;i57oM1sn5xMdeb*2^AhTmx?2=@F4~=Wl77f9z!` z>G-}O6Jj7n`CimlmeSKLq&sik$)Mi5+9OF}PYJ}U%$QLl^GTMni3zf9EuCmO_3U2~ z0!a+_U1EsWev#*FZIhz!wUTlKUG{%J#13I+-(-Sna<+GYr2$uLpNIzqxd$Vu+1MD# zW@{wtpIaQDlGe}A){I0q@u6QA@)kWfZlgUb66Hj-lDF#C37+b@M_oYfVPFalpHv$= zVB~2_M(cYwZfH4~J=tw3^?4ldhv5j?1Mydhe(aX6svQyT-Nv9|-nUb7+h}lydbWbA zm2oSJa&P5mc{Vm^OoK$F{?K}wh;w%WS64th&L%F#&&7k8w^6-yH-rjvyFHas0YSPT0JXye z3e|`~JYidnL5VQEaoS7_iRQ@_O@qRxV4;r_t`kNflb9j>V^*{19G0u%Igq(UUIzuT z_w#wU51jgWY6`NY6HrpPoru3zgy}r4s}6}~>eH$ut0+y&fImQ*7K$q76@)9PB+Mlb z9kvrg%~z|HnvBJ2XTO6=GGbCkDT{P zJ^#`!pfCLs&-O9h3A2}&<@O9GNgRb@35JopBcMN%_ zu!}Z5`ZldV6z}ZZpG6k+%=Rw~RM0;_Q|n-AEeT7C-<0#0Jp;bMjjYW7{!H=~G*PZO z0zF`t+Ckpm`~HDEzgRhO=|E3;5p^FpFgK2qNT|;7?LIaZu)hRWKP2ntjOrt90|G{0 z=Ib#Bc*)j@_X!o?qupGlOEgc8F{2*?j*(M3B!-zu_m2>7j~SpR zlJ%S>P9M()v8(GsYXbyD^MLgqG)h_*#P+A- zPCF9=Pg}??(oJNX3#5!##ECK|PR_Mhs;tkC=1ZL}ZNwd<#$Xii8$J0hbp_|<1!k&= zn{T~`$^A#;1=(AHmxG&De;2WNw+*Q38zfV|BOhMka1e8Ha+aIjITrAS&*4Vd}}g9E32XW2czOn|YEp?mIia-60L&g^2!o zyVGe|{??=zAodgZjo{X2h*$I|tarm*5_i5m;Gi@X_sHn{gUNCg%DE2K6B)~yp7R6z z!b69plF9WAS~VM<>Eo@)wf0hoBY>f4&hG~#aP#(&o?NTLnBAzKW1fnEYA!jK^c1&x(9aOZ>|QC#_@I7cJRsg$`2NJ*VJ$`mslg9Wr9OBGt2Al-C-%Da{96SM@oUDfqtRCeLFwrVARM&P2st#=4%AAKY#QN; zim#()(@jrSx@g|dfUjNJa%-JoV z6G@~~Ya$*D&(~_2W@T~mc7JHTx$Jp1x)(sHv*qpn=vs{5JWBr;dRCOT=Zqx~Xrb{- zTHYGZ%l>!Z-#>reQE$XznK2q+4g1iNx~c+(dW8LgAK~$xWGB&VmC0dMUEf(#9Vy}% zH72`ZK*~n3IkHO!?veDgs-{uSVl=J?o#v{(r$NnldJ^$8RVva;dj%5FNNN}fVv-c6 zJWUpNV;(ps99#;ZySlFG@Sd4tITr24s2&=1O{8Jhw~HT%{)Y~ycvR};DEk^t6eC?^ zL>*g8XWxXTnhf#dkbTlJnw}{8k!D*_c}JZ3jxF^P-TXg`1hg+KC{m zHtgbTr$2I(#^RTlvR~o$Jk<0wky1;bJPJA_2<6iJW)b*So9Ye`EA3R#T*Wus-uYm* zISriYIE%d5R;69K4#c0Oda76~dn2sJja5DW%~kE0UGHSA2M3*`?Rn=@!j|Iwb=u8} zqG-EqN|uHp5@@z@>eZX{=vS1n1e|T}oR8cjTkWYBf3hyq^_6SugRjx!8S))ztE_Sn z^Iw$t_IA6fCUKFhqgyoE3}fY3@Tfxi9j<1sz#91T`R8Bq zu2a=Yb9IszxeTw{{$PG66ZsS1cREud=&j!jUjv&Q_!uSPY0Hzb$EebQkj46=^e@oZ z>17yGyNBCyZ6HtNDMNMB{Pk+4silU)O7caU*hl>MSBrb4QA43z^V>7T*~*@7!2cJ9 z(yOy#MRj{9Q_m~=<)QkYPR=c6*!QmP`gSqp=vMm-ukwks40#l)X++bn+!77*LkYf) zu{-{z4>|tQH2O%lh$u*u)EI9i?;I#X>n~FL5@4q?b8-Rbw{1FpR2l%w`SxTbqegML z039SG*hrCtj`hMcTDIyX;|?i<7Zps+DTP#cVR|x9k(UrEWw{X}L)TrL?GUWgMIpsb zkr=c)j2%iLZ=oeycbL8yZ!K)_D#KqEqhBR%#~}xPYA%nDM$--fpq@ZY>%0qkW@YG}f6r0bdT8sbTMgjVtA0c8uv$jWSBO_&%mn%oWF|CE?05+fl;kyv(G;rA+;g zMr~xwvQ!3|n&O9C%+S%&@IA-bwOp9jq%@?Lw%E~%&_@aXL7b8ThNlSN%0`z_s+AaY ze)ml#N$q2ZVS z^MA&z%D)f#W_R(fVoN1g&-07gTX3?6^(aCU5}AU)jVno$&DIra8?d{QC?;qNZ}czC zjRqRbRaZPdw7%ZnF(c&^lEmnaLoTPL9!9nNn~M zkk^MItWk64$XPXDWOV50R4U8Y2l8-cqQe~$##W9Ydx78)dW-Qo;G(>Zce+*WBKStd zuY9G2kHC?fj8S!Vj?+kO=RPNa^9=ZyzwC|LD0JPnaL{H^c1IT;?U>L_iX~D0*<7UV z;hPSk#8Th2<6wCfP<`qv=h9u`r_{}9-N0x8ob`Sk-m<)9%!Cb@ zZ8F=k!MAX8bA1ms$~YRx>-j_~+sjFu)hUa0?9 zC)|rAh7eo)G)5!HU2IJh{goe9Pf8FchIoSVRZXju>XSs0#c(z-4D^pH`Ja7lo3gnP z*Hu7km_prq3E9|u^hN+YzRK#Hkm}D(nJ)$8 z0eg$e3(r^+k&B!71fHNWB6(RR&G`O`LBe3YDVEzUrcCdL?PmzO?IOvln9~Ew%#lj_g{0&CUgK=%BP0e6%alHPw-f z-&q98S_favFY*w!w6h*qO-6x8Y_0AmNL=ovQ{Kd1bm5k=;;Z4A0!FO{b<`OU{Mz9= z+LeFw;2xhjdyj!s1Hsgn5uqBNGwiuhT*UhpvPX5ciLyiFVdse1ytXATYMH^a={Tv^ z6ZF zaN=_>^;AFda&kNq*HaB}4M()YH|4wG*LA95Pej}##khxG4Qt6DwASd9O5_*6;Ed$K zx66)4i{;n|IlM$UeZfX= z>KkUFSH!3*q-0u?qa%Q!s&k57^!ujth908Jgs&h9YT+TKm6-nhW?W5lwIcV#z0-`eKw%6x&a7YA3?tFC z6%@hFXt{t*WYP1SR|`oAw?370FcEcbddZ0XatCBnJ14wMA&ZccMX&NO`4{4QPi3&>qn?}oY}0k)9Seranyqb zzsnQ6M!X-Nr66(4cn&vHSt%O?7p23`DJ+>8z+KCitGu0#DFIcc z8B&%TN*|e3R$o>=C?z-2=i`j)fm^})q@Hd(h%*U=%j{WL??TpKNShPJnqWtf?&Md- z$ow9{=a`mYe2nhm4F?0J>TZVJQnU5ySKp6Tx^lB6NP!bmr>OOnkkiMh#H$-5_$(-g z{up6Jbk3qH12Flk*AJ;NI|a`>887k%&)_OsHImSj>CyIJ;eJZ>Hq);-|0uAk;*-P( z=PDo`a&Xz?<1TjMvce9qHAIVmFjT!4gY*0_nsSu*oeG+Uek#(qq7K`nh(gHf{fvI- zll%aO5OE^U&UE5%+gH?p!5=S4 zRi_vAs)_#q385aaSPM$BOU-4|RhWXdd>1Vip^SYuvacojiZF22IggR}P6TAW)y{3V)5^n(GN#xBVZ?qbhA6}vhe z25$PhA~6|T1r4`e{j&ZY)8ub@qAn2->C=_>AKvK%xW91; z>$j!RlyK9;WEhJRm?<9`<3V``AB2z@{cQ!S!oWr%|K!89@Kbh#&kBGTAcMVLOaLVI z540>U-91!B4N0eZk`L;zQi74;G$uuPhzMI`dO050V?aIPO2rP54IIP!@%Eu9pZbf) zC~$Ubn`BcC^Z|XQM(HZY*juhVc(a|;z?!gBRckb#CVn(eSv>*WdPgajIvMYH8x>$E z4hYv`S`UXq>ZVDxP{)gi)}4)?Y|l~= zsL#D^6qqtDe=HEebzRs{&`$wB>{^_K6c^r`4*!;j1#f7M2)Mdp1cc-CI`UwgGIiDr z8C{!IuxlmQn{BgkwWnH$02l;Vw}1=x0oU4fl%5K;!2}SEt=)_H)fp{NimH(~qA+o| z9~`QMu9LrGX&g>s8OJd~?JFMDZJ@UN#MfY`A2Aw+I=b^3+TUnV@CB;`yV!jJ+}#)s z+I@)kJkf|fpWOY`*jV2MbH7d9g(pNxsM~AB!QS_Ojp`i zp*;Qy9FCzYMUAG};JIkh2#ih&BdQ9xGjIUfgR6k9V-%XC*{3BN$6ydjXekXulKPFYXskoDjt;aS<)bEI9AWB*MH6tRjFQmiRG0cWQ`ueXr0;y@7IJm=80C-w}Y ziv^G*wvBdXjXy#N{9_KQ@qOX>mWGZFiE+%~HiNT0jzG!Z>py1&~ z5(T2gaz*L=AnWqU+`nKhIzqX=PK}T)*>0ikI`ZZ0pfEXIJuKdc>@CGr`50sZV>0l9 zw0u4F%T6g=?^2#&YUUNtStw|evH3m=DZ^mXv*aJ>kQXlPpzwyhqr$y2w%_O0MLEFotkkJ$2?UuVE7#woPqD3xcW{L!mR)`cPpt zi{B_(7H?a&={TqMu>T(pC*@v9AL>c%@N(6VuU&R)4;H>ajgeZw+U_|P_!R^wh0NzB zy}VGXuY^K*6OFe??a{sXisNd)=U79JWb`i?mo3A4w#4qIlbbOn;RiSaS=*gTO zPK*)KJ5G>y$nX*gI`jaF=j20R;gORU_S{}?Llpd8W-gmZ_pUFWujdaY#B|uXc%JtN zA?09WBJk3PqqkR!soh?VMf{F%n_5X_p3DtLD@LT%==X}DrA7s*T8RaGUEI~5O2p9{{gV1=*Y30}_*w?RED<8La zy6oF88Vlxr2UJkAAMO2|xE0j;kqZ7-q95+;wSFBL3a-$7Qg9!%JD-R64x*h)kgLnF705p2 z%)0eIy18%T-rZTDYuCJW*ZrMvCDW$HDZYDFcscaw(x+3eZvCW6<)y(U%zm)z(}Z26 zI_|7e5X#V!N3TS96Eb>*&7TuzyLGo!p5Vy}eE+)jKw1^|A6mLyXCPd@RmGNY&{@Wh zKDbzRU_+gdK_7(~YPjKV15QL3haoD|m3EVnHJWJ^ASF|YVST3Ji-(N|l6n)7W>b6y zp_UpMs|9G^eF}C3qXmKBBHMca{`jMZ2I59xa0&+LTZ7BFQ~`wkxCmU8(5WUOnEzq6 zl^B#=Iz$zjVse;Wir-xZ6lF#wwN#4}>J;8$=V?@3`*L97 z26l2#=fdnDdUz@H-cF#}o07LW>f2&p+zI7uz#d^1UZ)LVT4_sTSrBmqRYxGPAGh$q z$ATA2`*E35YHa9qcnNZ3r^qE3ExTimWHUkUbmC)MtloS#Uo!tCu2pbnBsbj^u#7ij zWi>5c&!5fskZs?|<`dC&*|`v(d@+JyP!#^azrxfa`7?!Ta7x(@H@AzW06k4f~QZ~_H@{`p1SKjvy^s%8N;9V-2cHJzki5&A11Ex zi03>0{YEsC^9fG+QC~Z>N5hMLoJx?|5fPaY! zz&13C$w5H5EKEsgkbP|5tKNpRPhz74Y?LN{2KTp1XS%q=r} z@eVvOhAfpn1eplEX++9(P{Ys^ry(MWoh-SlWfDtPOA*7pog-Jr&O3r)^#%?U-BthA!1YG^#62aEln$Qf+aLKT~SsNiI;8o zgHTol&~_|@h*+!|fwAC3ANu$Q9{|x&vT8M5`n+978hE){JgKr}HJwCu3fIlbr!a|J zOoIr@oRM&LIYu)|(E3$Pp*Rn$EJ~(osM1ti0&67?5XVpcW!Q4;LZm2UELTg5NUyH! zIkh;74)Y05inKE!tYTFch3nH`E`+#;X_8g6nc0F$_mU-I%Uo7Gq zx`f5Bknc=LOaMx~FStrBNF6!C-GXe=IVU}$mIsm;9}9CTkv(%V(KJMVM#oINGo;{v z*CYSpc`^8M(K8dB5zO%67BxBHEg~?{+^yzhalVLSZul@0g3w{;eB&3*jAsi0+98DD z+ZzX*K6l;*!h|L+oeRCc&&s(lYZZ)eHZ^2neo3>k?u)H5qv!}`L?u@prA$g^-bvr> z9~=qoSTWM9a7bDzU3owS#_YWv0ytB@UjHzu*PNQwuu6_0Ze@GlLMnnZ_}fVI;g@I) z+FW;2ZWnTQb)GygCe~Gp3=sqd2!QOX66C!8HdS_6^^7Pz7Fb>BLj+!Gf%Bp-#~uyLt9{;|W|(HXPr_6Kz2hd|izbgr&WUwaGa(Y1F8e*%DDFm2Uy?TZm(8 zgFSedib)p>anmmHp0uo+sFobcGbj9xRd5a+I z4mIpXdr4wTZU(kXiaJYd9tuzAqeYu)Ag>YIYtg*bl8 zu+gP1TRfDyhtZQ;#ME1;ndxaA0R9m#rJnd@NH#^6gI$(E2$6-T+1!L*19pbKafXv! z*g}+!AOHe~%|y>#nTW^`U2M_-C0|Kgo84_qr#TG&!H@HKTlzp=GC>#v>Jrtl*+>vt zr~x3F9bg9D4CfJ(1RliIE&pF)U|tah289Vkt(YL)dB)Ym2Omh9si+3`2+muvU|XEW zN=QX+JPMg%)(V-F4f>Mx?U5%P*+_Wa#UxUd6o0>){nh)k@+v^Z4|u3?anO?o-uOz{w@$ylgN zp0<4AKvWf%0=wL_3Xhn@Ilkmod?Hl0Uz#a~9pDGqtq;TSL6o}Laiw(k4VrZV0shU#t$T*tIJQWx= zzQZwsTk1vMKv1MQmc$YA6w9591#-{uWaLF!lXirLJ3<)=u9G-~(;<>noBZQjP*)8) zUY`MpQe;`ebzQtB$idl>5ZE47a#tl zQu+n-%oRbP->g8;Ky*|}ZcPPHSjz zrVwV9%J<=+h&0@8K;&cDl<{CEc~;kJN~ag)6cAcwg+AzEke^KqRgA32llg;|FrvWm zrq=X~wW1V=bVTv{4{ttN+Pqk-j4b}iiQvD>}%S2$wm zLww_1D*wo)6lQo@W^@|qNjw>8gcovLnPC;g?DgCzxrlroje0(!YiN_qfMi3=rdm|q zXEMoqJb^-f#SH2ZEQ%(~q*)xkl8#26R-om8R$nVZsqUx@lKN?ND36I2jJ*irK=cDY zXbC^?gF#T4-rY%1U`7~qDL>X(1;}De4Qa;pRB+8iG4%*ns7PG)V;kM+brh&jN{0{z zrfw9Yck=0Y3aaYdDco2jt=6VpMFyAZ%t4e=KP<#=%?64}TUlfWQPd}!nWjN4Bp)@W zOyGu0yeZqD4#TyohNRhF&SV-6;HjE~gNCHCB4H9D*|_EEgjEj-RY3E!2#7Ysq3ua) zUH=B6plYq1L{hq{>1>qs%{v&OD1j_BBsSQeyaVnWMV9vtkxPIft zHtlj83$o6gvaBa|MD1CS#WlgL#Wv@?vL<8s0$I0fmum?##N{#h0Q5rIuxwv z(837In+j;R9un7HN4Q2H=yqpGKot>gvKu&}5J8x=7W6#!u8M zv6clDc4}K_2qMdB6l${7b;5uf{NLh>&iFOp0H`YLIo@5;|m& zZZAsYErb??+C3=({{{KkkOVDMv6_TeScZ3G$-XsTjwNTqPN0uuU!R_-E0NrL;E7dq z%4f3W{`&5QXlevP?tI3oF$vTi3g81*tVY1x-R5v~Fp{&_oQDSEOl2kW2>${)_`^SF zFu^!(nV6Z$iUt-XinX5U6DzQBgd5N1P+#P1gFc7d{8{PwunuDc zHNr3(|1D5_sjeKCL!Crj_{UAmY*|$BT6xbTqMJ-I1RRrbsJ+Wq97V8%hrhy-+%BGy zg7IH0--fvGffyhw&T64aav8hO-|XcoYHu7J%5>7j3TSE+r~*$V_*}CXrTw_Q=6L z^ky~T0oSSkuUTP2w20`8_vVC#mPDNOag1P+6K4uYchIR-Cl1zy=Vm1R2^fqJ1c;t2 zs2HPIqpu7f29H9gO=If}S7MtjUmO-xQNtMEI5NWCM@0z`2Dh>=%pdK%tEg%TU0G zeuU>OuU>D+?3CmWk0B1tN4&ztd>9ju#6`L7Gm{iFr@BnO-k?SMM)=O}i+HkKqc3OI z%&H#f@pflrH|tzpyKP|MKKKEK~t-Ush940hEC9h@sw{T$dJTYr^zf9r;BYeXV2Ioj~ zjB{CT-2o1g^XW~f(#?4@b!@^9Snx?2j%$3^j2{-5kI*;59=97t@m*VWwG8Hk>K7+_ z_J3Cku5Mp*6mo%^ie9{^0uP>Lyhv=%ksuR?5gAeQhW~CIj#U`7WLgT?jh#e|bKFA) z9qQinx^QwkSBu}o^>k^ke^>cvHi;Oo%G#m#jgK7|o*`>JPU7O&&B`f^t20jeuf7I3 zbno+*pA@+M8So_cJ0a(b|8`)=`9D!Om4kDIDCFr4M9NesiI@4ypt;A1cH^R!Mvw0B zpn1pQWP% zk$p#cnYLo3mg&n~di8>?*}0FC@OKanbf}B^hdZnRJIAU2Z%nFp0B?$x3hf`NmmlGJ zT9HbZceai@*9;l#1Uh7{o4H_UIU$pTF+>-?YX3_C0<8jyCaZ%@C+Dql*5@)u`{+HR zvU{xA!|3!`K_bW%EEe9ImEzwWd-;FM+u6!2|59R8mwwOkZ%(ok-HZT znyF(vt;X9&0)ZXpk^Hg+-=JL-pS-lEJS$BED@y$BG2TF5Yuiwn{uLg;4J#rgSuY`dqKEu4d z6UahF<{=M_!an%#)Hac8bCa&Fz^`}fddgHRJf2~Lx&F`MwB>_VnvG=F=o`b zkz+@XA0O5d#4sE`e-&z3K=_a4LY6HD!faSj;L8dxasC6jlc7wWH*@BE*oh38PDJ1R zRO(S^#h^-UD)fnvk2ni{FqErs>;IO@ff&R+e3x`-S+f>{wk(S976Goew(>EH6KKEX#bl^zxj^y@H_qHlCL%rBV_R-7c&yAAs+Xuh$}%5wCw>t zF6$>8%cg|LKZYPQ(yT5Q8m=L(l;co24R;C+wGTmDZJ^H(O3uqU>8#Vv!UQP?MubH2 zEzceS?Q9|s*;K40AcIsAyQ7M{^CBcSiY`YUpOUCKuM*tFn?Njx02V$7Xzn2}9UW}T zgic&YR02R12e?q7lC(*RvLow6>q;zD*Ijun^;Cx3s}Vs#V$mlM%z}N7O(7jh)yLN8 zV)8UaCmTOf;7ye3jY#uEWr9A4&H?sZaA@D8NxBf^8$&oM$C>26sU%rebzOA3r)0A zgjQ47;YmY2QdWrQDmd57I5QBze_X|YfIap&XP{cxl_*v03_5w#XL7REp|%EUMIU`y zfvaDY5fykSGW5Ex0-}NzTI;R3#?xXP(ZL0?#vIZtDyeDPY}Ug@HWbl;h;B?5Xt#0wBpHNT_nHrhHe z2+FR}#z`;T^oNp#5+!ep)hb!~l!aL$j|t=4HP8IJTi3S13sBWv%(bA{a4`7h*qwW( z2;$QblGky$kNg|Pj@11j&n4?lpooYb?%R$mfp2 z7db62d>!mZTnb}|?K$aXKON}gWAF5r~sa2Q#h z(H3@;kK95DWJpACCYPZWwa|hP@eAXSQm+qAk&1JYQ$tjOo5o0{a06)!rq=hNXpM#@ zV^Px?=f|Rer7Bs?Ob+3~2Lkf!ITbm|6}84ei?n4DTo6WvsCPgC z4TxD70skWR2$mN?R*{mG2jA)c&VyJ~JL2cwjGrZF& z=g3Bd+^`(~2;e|=D2U`;1XpvrV*O0!5Ky5BRSM1SV#mvI;O~#D54|(>^KrX+@d9Xh(HfBvKI~h zC75@r6{_Nf$p^NpE&NhsY0PHI=@g|=-trzbAsW#X#fW^q8A$nHGa4q&>m6%ao3`$U zP>{;9At5RX)lwM0IKuHhB!w9*+4T=S2?7Iv=%XVSNRXWB>SzA3CPZ%WoS?P^aj;p^ zzyDnHPoOc1FFMs=G$k5UskUZMI6}!bRr$@JuuzOF1(ZtjvrZEkWuNL4>wmmjA(tiu zIJmiuW%7ifgH{A#fr(R7d*v5v0n$lGEKR*&nY=<@1wilrGMZvq^-a+XWk zk=t`s)XT!iHgij??Nr9n+L`dhfx&g}dw-G=Y$lf_!+{T&rdv+VfHh;~6x7`yg#R^^ zqO6@p5-pto$&z7W2t(p%E<;ZBS_+9*A(PZdd1)Iv+(GY6cm-%~`w3qWbB%G_(Gy6H z5-^&K23P#m?|vSZx+ZDY; zaZ~Wa2v3Cw?pSW?-}W&bv5D*x;f zR2$vqPz&2pz=p1LIz1xq?v}{V(g~=Cz0OJ}`moMKsj*?5ZEd^DV9z)r0m5>|T45TY z8^twpAuYR)q7!TVktByLog+3gnnn6dEWFeG9+0@nNVo-18guLs?|RrTC*AhJVNKy6 zo9r{<-~t(hQ2`8T^~z9_i3ME*BsFd9A6+ttLG)4`Id4|oD%!Z#9D34pHd^05aU=Ie z?zWi#+A#M!qjEKZwIaF6w+Q#Sh!57@hzRs3tfJ0u09%(oHrskkF1e+n1}S7F#kc;> zD6tEpq-3Y4y44|h%R76{rb4@Rn&~>**q2HdS)HG<7W&WO4qQ7gF8{z00!$)`5|+q? z%1kVod(+nl@V2S6CE%?fxe2tgh8?V6G1pcMc>+~zhn2-o%}%R3WS2E-NwL@wNY9%W z7!Svt^uGoDL@-%Kz4V#Ev|&5tZGQ1J=3Cz|sW`(F?=S(QdBZFR7mqJeFNG&+aoaOI zbhd>?h@3tlNcNtX{$6_NySL!a)H2nS5OJr{M7tRN_oNGjb^6$3+6|lW;=P5n(TB+> z3B|KHcXBF1BmWD1PtGLWMnFXJ&-N%5U->hFIFB9PL6K`p9R1gN<{pFnfR>* ztbLz)6-^>i+6v^MuB`R8GHgVhHoZM!IRy#9{}VXQzB>YV7a|{gAP4g*9T(UNEFNz%A3-Fn%_tZu(HVdXTz)km>YqV?N1n?x@rrjm9p| zur4SGosgqQgPhLl=v3(C{_qs3OsP%d1dE>CW5j2BUH8vlW-=d$C7ZsApK;T7^F8>^xV zLs6rMEf;Z-bh_>sArL1nqks_P65&HS>GATp$=5Ru6q@q*rN4E3?zc<<{bkR@sX2s!}>v;tEuBN5LG z&7=|aLmxIP=?P= zfKOMdFD1uM!*HS&o`4LVzyd75H$)=sysRe0V*ON70&7qFA|?6wgBOMGbFxP(Z!#^J z=L8joIYI>-3gEWq|MIuZ0gj#4!aLaFLSsZi}1T2Gkj@?$8= zyfmW;=L|Mc#AOaN2=N9-Zqq6SK`*?LC;yOQ>4MCsbWP3BZBagSBln{~t(F!Udhl0y|#=i~>eh|7y^(-L3Q1GR&p zn9@2yCurJ9`8eh}i*r3$V%2u9^Ry>}qL6CDut(w4VKxYK+KsP1qf1uHUiQV=-f_tu z@R;^*1>Gqr@5G_F(j^BIGtglT`NT2Cs4w>rm=1Lh>GaF+5;6tmP5%^yMB{T<%_zQQ z69sI@u7)qlGfq)8MMO|jAt`&vRN)Y?j`p-TiqPr8PB`@QH3`*bUu z#-{?x<543oFrFzdjLoFL^qqDU>i=NprK8~TZhpdLwTg)<5y_H37Z$Ulcio8=B zzq81wRh;fhP<0hjQ6%Mdv@3xXCN9Gt_~A-$(>IWnwMyjIuGJ+3LgfzZJO|HKXRjm^ ztGeW|`3#~lzx81$WCyJYsveUf80=hW=puarFf%q!uqFKx%|v#x z5<)~7b!RM!INMJ2SeCa^?)o}GX7^FvR%Sw;h-uYtH9U+uCbehLcC;XbKu^-yTFSU& zk25%SC_Q#Xq%=q@qDn8OSpQR2X-lflJoQ?yV>;|y&Oj-WPQA@Cz`#Wz6*Fvj*ch4#(}Osh(=qFS^g%vJ%f^*CWvb%&GV?4tlV zmp^bcSG5=Sa@XfHG<*a``O>cN$f|OEsol(X7$ZV;?Dbgwq%zzW@(zqTo+)Vjqj9S% zc#AUpwC+Q?hoR)`iU0I0cK!E%;YD-dYk;RHKqN)u6f<@Elf*#QgOBap90E5GqI{^b zF)XIP7;QMbCDBxndw|z|fA4S=$RtVC`L0)m8>enz&0lBtg9?}gFY9N^E`TFe=Wuu& z=VQLA5{JfRTFH$>S{7t^%UWM%Zo`UZ@uRvxkF-)0FGz${rFeI!*wuUrhna^2H@0DE zxQ0)Lj<1-ufX|h9xT0jLJuRSaI*mk3RdBzQX&E-_zE^rp&HA#3jUfb7RSuB%nABL9 zu|80$@X$2YG-Ks7iPe)($2XH|^FUbQ2`C|mcda9s$?JAcMQJS76b3LRu-as|RpZc& zy{qm@xpb1Zng1QJQbwkG^f>XncsoqjYGQev#1vQ}Ig&%f3v_vxHv(pJW%o!q-i{Jv z+s65F>W_9WlnoA=ow)<)8Gr@Z(+XM1;KfiqSY^xYU|pgy+Or_Mz!)@ABi1%eceaa> z_>BO~QqgOaffg?^NFKxYoP+LWM zW|x@D{9&vk^6L$ zM8 zQ8?)c*vOD-Z}sZtg)ee~v^s7?Hv8V3>&DXAut^Q))RKG6^9B!Bn&+jAWU4B0rF@c_ zPaOr5F}shy@=_mygay<6P8qc8tNn0d-0JUkLl4Sm&f1)twasr9gBlCHYqb|bvTSX2 zg;$1I;>IXgD~7{8<}2Fngn_p?s3lV(xDHuCkCU=XWOKhOY0)O! z)&ElYBjn36YJod3yTD~zr1-?Dmd046b|~xQr2g5nS%#yNO^_cP4N35G&}-x@3%^+! z#0hv7{ZPNng25}pA7TNkR$N-|ks?w8M`GM7_5{6kk+dXSlhsCd2#v=H!|JHTLjd!` zQ{}^re3}%>3b}YE(x);cBOFA+Vw60&`l}+!$9>n3Jt}xfN4zXLc$-r*T^}SR#r!NJ zOzoJvHMJRRS1+d5{K(DJ3h!Jg&R7sw=OhFiz!!-p+~Yv(wQx|pqN1(GccMnSygvc@ zFOo3P&EhBpc}VZaq$+R<_j%H(sMhxQjBBJMfRZX@&~BD-;U2;(1OXTH;Yzm2g#VZk z)j^`f13J(}DnJg#bkCN@Q5}=Mkgp}0e~nQSMNiUo9gABXZBs%axX6nl>ke6|rzh9~ z0wEB*;f%ou(4ie96kB-1@vckNTD7^jnUwMb^wj1vHN-MBGt#T$wcOeK+<8{O2V&hB z#Mxa^p-;^r#s^B4MsM#uY4cs7-^bHunpP_%o8^&F+(>jtCI9G6NOU4-K$d78wf?r6 zIzyH<80yj;zI%7KXJG__`5_j{p-L@YbKtk9v#B!BArOGrO7Z=;^QrN$^+4qtQFAtd zfTmYx8o0lzXyy@azD%pQibqtP#sa?Qf9vOYRwZI&Sl8F8idAxnY+T5r9skQ??h9S& z7i#A-y5tr3m)jQGSp(vKRVKA6d z_Dm6;KoPL2tup#${&C92?I$S#;@!In1PdBGh%lkT zg8%*zY}jyNg=b`*LA+=XqQrNd$awrn=AB20Bukn+i87_il`LDjeE$hErp%c%S6;+e zGpEj-JbPA9WUOF0FCi<`vQVpnpmIJH;w(t9Q_5O`2rfNX&O+6u3^m?-%Jty8XB|Cu zdRFsh*pnhZ3hb(pEg7jO-^K*nb!^424pCAxTlU#sGHVMPK8!fA;>9ib_GJt?^5Jq@ zD_EfPgsD-alo7`*$g$|uTK{U1eylWU;)oa{79I?EU|PJ3$$q4Z@N-FyXh{kzD;lQP z*8eD?rS1E+Y=LCYN`4MKy7cLb9aF!KJ*5SLrj=^uZee>^-3L9&Rw<|!WsQ`tu6B&N zC+xYM?9L{yxUE@<_+iek|4@AVxpt6sXZ5$yV1l6~*hCd&2mfJ&5>BX-fGWX9VTRZ( z)m;Uh8FbWI2u9UXR!Tu6$Wj)Hh@pHS&2=76$Q`DaN6*921EFQT~mT~GS zX{Ilw_@yErJ(C_(4+?UfS%k(k}GYQL4T3gg}a@#{XE2rb47%u5ZE#ZJaT|nk1Bf z_Nv%mJ^l%7v#ROnI4F@I;YN~eff3Z;v;q%IFt8_K8K1ZXPutNzrggU+Rq7&E zrb~I^q$*dqy7Zfpj`oBwt!~AOsk}4UdvAfb4)h|w1~J)IeQi}B(3uP0j5DMPqwK56 zIcrMRdDf=5oy`R`99EbwT^wx27R$RaOQG5Lv5Y(?#wW=>V|p@zR^l4bi%upKRa>Fe z*U?$M{mgdT5Vq>()hcuCcCtrV@I+Y%Atr^;~luo_h7x8%Fw%gD21Q^~ffvYmuXh zMZHcz%hYu2f^JPZTeiU{tL~D$e`D~%A;-}ZlM}REsK^gU935ruQ??sP^rVM7?yi>lqx1$y z%4EVZlu&a`?UYF!dLe{Fwo4N0RQVXVF%p1|l*s$evd1i{FNIPe;kSIJCV%*2Df^?O z9AO7IY8exm(5&YVoq4s-Jky@hktRXFN6m@YCY+-)Bkj6bE|?hZbj&hmWccLJZzWVA zAM(&rf?1U_M(rf?Jmw+!2~zK6&;OuZ2`Quu_&kD`hoA*T4Lo=8fLrX-U@f5&NbECC z7Df~@z%f!TpLj8exM!VI`A&M2w^M`66OVZ`sZ&8W(rglFrcaG#4o&&W2@OXa7~lgR z2(XKp{*-0{(N6{$D$bmCi+4!{YETytt-QW1(oW3sfWd?ncS2v^d+KI&Lg z3S~L`KmeKEf)5xd2VPlXBlwZ(wtt5KhRe;<%JJ_)q~p_<<)uJKrGDmq-YGbGUcW zY;kV*5k&yuaFihK&%OqXLVlMHeAMd z{ZDJk)sTYd+sMS#Ng!{fsF&X1kGY1ZAa=!dze*J|N-JAaR87cHFkN6v*Yu`)w((*J zAP5ZH!UDVS>NjG6l=Fr;&z*eVn`fv++7cQ%wPK~!o)x_=x=JCTYVj`vA=)7u+na~` z@v{HzwxaUSJ*sVXT1A|asPJ%_t-cN)kW7HDwjvyX2t+?BAa1-i`9d2_?!KX;pZZDL zB#8uX4JkFbe(=M^V=Bm19<5LZ1AOKgCG4+Zw0VTcZJwS=v5CiO|^ta2n89DzUwL@%bLJ2j)+T&pZ6Q`N_=gy6_IUS!9|}THC}kGEO-!U6I)Bv zgKa1osfQBj@q=iFP|a3^Q}udEH9fGkIVKn#7$yk21z_jpfk%}R^ml^eq;A>q7r2Cl zJ+pPSVtYa4Lgk=N{U8YF5DS~<9e%PCgr$FOD2k9_b1NZ-LMSc9S1(oPae3$}f*?1wyh=vG= ze2>_IVj+nf=XeN_iG;UK-k=Xi7h18_6Kc4P30WBMXo^+wgJPIPQIbn>gD>V7bF~5( zYZElW2O;qH02Uw)x%d(ssWHT-NGX+rR>*KvQ)rk4VE@o1lQV3$a*!1vj|z#CGr@f+ z(Txuojha@ zNOioD9{$h|HkckA@?ky^P&uiVGEs^uF^6FYjy%U*$Q|^L~`M5b<`Ir>RT57=@&+r7e za0%N%JgjFrW6>ZBLQp(1iJK{%aEKnqlMta|mO6ERz-X1GS(I}*MZqN(XHgLz<6cs- zF(xJ$&}I~-fp)n0m4@Lh^2VL9mxhkncw93pH?c#_1q6zq2%ou~SED2&WB(HFBAp3Z z5<7_!ow*R$X`S5ko!C*A;p_-JzA3w-$zjo4mOw z82Fox1RpE|5{5DnM?nNucy!6fct zV`Zg*Kf0MiiV_TJ65Zin@1=E+0b)yPl&e^Q31Tc9$~XR0B~^Nuf}jX88C^`#a`*&a zUKfmdL!aG=C?WTvrh*nYBzd)_geS&f&IxG^XQyk~a@E-qWYnhy$^T6&x<2PrjG0A# z645YES`jugs`KNfw-R76!6AVV3z8aU4v|qt*rjsBUdkb>kSHK!8mp>E5h~>*iWx14 z$e5s!nezufO_{2qSd|Ybn%-Cyuc|)!RG%0+tG@%D{P?cRb*R>qm!L5=Q@I$KnG|=G zhetuIl`&^Vai-d-mG){t&kC(drhv67D4!!Us_AJJ;V5{7jo2zGznX9otF7URV^a!7 zc&ZTN+N$TeKXii^wh3RoI3V$=OS@DixoVT;$&}8as9ESJ#G`N2^%5pRR4%u#qqds| zWpy@17;K72He{d-%a6FRB$h%O(z=pmX0bG*XB=}j9NTsqyZ;a(`*pgQrzGoG;kTsg z+O7b^iXSwZ7y3_X^E}P*JfU$OaC;qaXFNz%6QPn7;qVZ4H6~ozK~jZ(M7tLVo3I93 zr@$IzV^I(iONl&FHjYv=gb^6KDHdFGlURFW;3`xg>vBf=60TZojkmIr5hSofd9CPH z++{#cqN|-$vu|iu50kTCXm?mKIl;CSFnO#{xS0Dnd3+*rh=UrTsI>KB7c13#iIif- z;+&15M-cJ4E}^RghPm4~m_O;DIti(;M~%)xp{9$ZLo|-b2oYp)Bj!sd{?c#M!*w2xe(7XtYO59*YyJ{3y6t75}UXYfLUfx%vn_?OVMG*(Gtu zwKVZ;)Hg$ficKn$YlR9#Wc9K|(Y6G{H(HB505?F$zd#FN_;V)CTQFc{SXTmLB>AOa^(;R{16f2wjT8-$MHE$ytDs*xuY9eSiFzPe0m(8rxdZH zGO$v!FFq>B^`yCltQCd z6UEIcohzKDB-PLejh{L9IN$r3>o znDFRD?{OBQ(CG7Oy3^+`2@vCjqOu`uz^KN--Z5z-cX z7{F7X5KE{HQydRcF;axbilMGd6K2oW5S|B(zf}JY0A?6wYysX7CwNPkK0=c>ZC?Zu ziYT4D2Vrjc>6Ov*e*qRKX{x5oTPOLBWKd+Gu+ti~>l`~@ z@q+I>wh3L=*+{Y2Xu~0q*9nJ_)Z5o4^`T#zom^qe11-9+RMg0vJS-bNYN{`X@(&Gj zfe!+;g;Baa_i}gBWfl+%;Q$enM9f=(bev(C;>^P>#IHSNpRkM{u3X2nqZZMfyd@*o zl7$!UV&4C8-d`FRxV+m{WqZNA#J`;r>IDCqLJidB!Yao79^^QvK?e~lWgIoLk8Z2a zf`U8|VWQ6{2;f!@o_8ADeS$GjRDm$ef@u{$S->;}Hl~f%Te*^bMnYdfyzgxiUmdjn-qjW0d7L+#KZIgt1!-Ogy1-oanPjF4meHoL_9Kaa6@*rKXp1CQS0oyDAk! zo)efnI27>&0%*$H(PE~|Xo~TJr0oBB{V>WNuCvN_YjHlN&Em(G`Ivn2WCL3q*LXBG z9%ypfzS4xw$HLc8h2Vw06I3DIO>tEIg(y(I%+x&T?}aD^WLz656$`!TG5f2?d~WU2 z5XNy4+YxqFA=m+ZyA{`rMzIy>dg4_lj9v=EAhfWM_?WNQYG-=KM^j?4ZYZ}A=)FPg z=v)>xGU1Yv?8@H6#d8>Z`j>_cNQ90(>`i{>V~xKd8k+tXAB?IC?<=T7INR(Uo(Gqz zKGDWQfEEqXaTo8@qMJQ#Wk(Wu!=0aQoz{&f*EBQQFCXw+ykt{(+JQuRsLJqll<<4% z@P^SLp>2N@57h?c5WMDLFf;!U905*`F%oNcT2K$GgrmETBOxqPe)UV{ z+{R_Eoi4MlToE0Ei+*wp;#GFY*B78Wsg21B{ z21<1LjLfrt?>sVPI`my9hn>V;9jF!KPNh1LQZ0&BBF&yZZ?1$ZQPAC$bK~|C6iZ*f z3W93ct<~1B(~DLKPCOhDvCOs@zbf>2HZn+~3JX#+Y#HLaf($EjZd@AeSBeWYSJawN zH9@yI6@%uO*7j}OxpnX6-P^ZZn?Y?4g&6m4xyNh|N9`TkrB$p>1vX_3w!&AeW}zR= zs+CM9i5dxRR@?u4E%(cBEm-I&{A9V~69tv4H8>@F!V)cb<5!^^d;O3z?YxW<0vyh5XtRn=CdT^)7L?g|#4KW07s)@t`>mY_MQf;h`oRbMU@vi#qMHpk0 zaYh2fUCjz-s4z1*HQ*kA)l0%EB>%Kz@qV2S*>MFAcqAbE7d0Wn+ zo_taXsEXzb%srFh`VS{>EYJrhpm-95xGgE7Z84%aglR!J7aFb3BQMOVp$aVw^v{JH zVl2V4P@_mJDbLH$MYAM2@gVeyd`Yb96eGh%PCNDVQ&2hmZ#^>a{Kp@C`V)$#n*^(H zP(jnO^uqs=whFSVh^7m#DWs8KA%cu0tPsQ+?+Hzutj-8 z6?kBR3pO~UQJ;+oEr0xZ%O{`|7IH`;pUkqQS@|T%U&q#h4yi~peQ3J1obtCo5x*lT zVtR3w9}q3;Zl)I?yjBSqWM><8%xUQLtVB=O6D>G zc{KlB8>L$;L&@!k@RF)t81I;Bvd=%80P({Qn;t(YuD}NK$AUoa>UnOeEYj3lqQe&V z+$Oum40S&vQkvo*qt=wdy3tiM#6~+qy~Wk+rOb4g5HgffwTn0Yc;s2aQDV2xgKsdj zK8JNc#uwKcBM=XrhjR8)?3L{o6&(8IFSrI zy)>2ZoRbXFmD%@i(q8{}XIH@G~@ zrNlY^fu2!DWw{9UtvuF4N%n#!Ct+EHWGB-Oi4?=VZ=qytrmEgceg>c8@Bsr6xCQ?P z2#}-u=*1`S2}eI1h8K_k3MFu=*c8uowSXb;dIFr#p2pP{*K7uOhY3yZw8+2eOb}QF z5f22B))Ik?B`ibpU>$oCGCSrGBQgu2Nt^>Fr#NL)!nvSZjMhT}!pc-@EJ+;an3@NQ zr+*jIl}iRF6EW3990c$~VAgXBJJbPmCc#f&<|7Dx=mQ-xYnwq562PN%tcrtI>GUNwjKW0bHaiKNPtkL?yaRVv<#8A_1zl^tUdzt|m^U z5vChEYLt(fs3V%1pmh{7D5dZ&r7k6GVGWBCY&LWv&!gL`9#%*T9f^ueH7855C!T|Z z1%TO~6c>^#NcvgmP3D5t9rcZ!t$xurf z*wBeSbGLlLVnOP4LB85`nQi%~Y9unEJ#h`Culb&CR}#l=nf0*Sn~$|y)C!=S7kDU7?~>y?1ll7skaJKbe*iw9=Z zuJ)w0j+IMLSOyWespwq>F>Ym7XB(rgcu^o#S5STY67_->s<$YjU;3dBZ=?YL`S5Xg37gV;Vh|sYlIDW&Jiu}HiV;Np{;%GPPvdvGEQpd^44hfJUWqy!Kg{udgQ$A(3nFO z*=6@6WOfd^9G>Qf1@`cOKtuooYXHDK_E7<5h@cSkFz~f35DUIozzeh{B)VyZ#vxtj zt>+$KWSq9l-60mq>;2Dl~<9=pzt-Ab=5J@n3u3Ln6cw0VD{nWu6`UFXadX zC&&OrnwRszc$IK?bJ_ESSa?V+;@1L=g%yqlGHHL!UByiEQIJ!2d#};6ZQ5vbzz05f z)h?6ga#)vfU!*-drE~roY$G@u%2yfL%Dy56Q+NyBE(B1zAHbpCSnvY?^ti`FU?Bob z*yG>Of)D5P;h)kzW4sP8&z6ibI$APLb`04Uo-#9)^wcY*aOav5J`@x@b!!P3D-5K-obRIu#^NTAGpWf#mUG#X zmunJy0gD?@f$EMdTaTEGbcKo!wbLjC`n2uv`6RCq$f!WNrLC0y_i0J#k-j4$9~ zCxD@&n}Z0=u&V>YrC_3{le?=643X%Aki1(fX9FOPk+h|l8av#@YpcHYBZ=?o#2B0i zKq52g+Z8~Jr+8YiU(`Izl89S)0EuWsey9fkSO+D%hkS4W59r1VFacu7vXjV>n)ruQ z@&-&aKpv8%Iq@yC+oA>n#SXa;GVHO~xQsCLh>l7K4s%6i8?w5KlUXd3<_k!_6Et2- zNEZ9U2-B_#k+Hc6m-YxdiX;h5yPH7dLyZiP&HBLhDEgHE!i3a|$N;D&do21v*U zLudg*zywVIi_)tZm?5Emzy;2Eo>Bk0o4l#8{ej0c%QSSk5MDbkx46KLl8WttxT7el zNntkAX_pcsABAYPm}m%MQAn-at{*%@cf^v10k4dKqDMPMj&#ULQk=4U6_7+r!1EUv z+oWCK192z^H2{EZcm@^_0eP?o6M%$upojbu$IAhb&+!It=p6dk$>BHY%I9|kXd0Y zio`{ptFLcsyoJYyg1dgauGA0l^f3RIrDazzZ@CA9GhafnEe6Rs9 zXtkCgAwB{tD7%rLflRZUyI2&b`XZ0V^gYweN?nRLtDv(wn+O1+o?{Y2SIjHyVLmFs z74VcRA61FKxf%u?Qb+&A$Xr5Fh73i{psi7wNZI6!C-n`Kdq+Q!xW|0dim-<>U45eNbkFa$C!)Ad-=*s>ObGMDXqB?L6P{K<$3BMnBSn21vp&OD6UdDGkq%2V7z z<-1S9A*mZJF|4Um7ERP!2V{D*-E5_jdSjv>ZQOte`sr}7h> z6lGIfq*DU<6Y~H06nIUWeaMH^tAKsD2YQePeZ4085EJY428S6F1WJi31U6d*iDH90 zqNx)ytc-<)67kVTy5dvLyokE;jMk8$JQF$Y*jRxmh>c>``b682ecR!>5zRg#p2+upDln|ZQWzzLh_sguYMWARbz+AyjeN(4cTXUf_%+Y}iE zA4##tED2kkctsz~z~$Ijp-~NEom&`!)=(8()}^drYYWSxr+2HC+QR4?!uhn#Sc`W~H`_%?dm- zUCRG!FRT%tPDtYiHpDBgWQ;*lEH=+BPB{k_nYC=%G5!b`lVKPRs(~S8+W=mP(8NVc z353GJiX`GYJ`zY4V zX+`X%E@lSo%z+Ij zkcnqq6Thn|8nkng^VDoXhEJ7)A$<-JA57$##yioT6lq%#=u_?1wP;9v>l^><)LU-0 zK&sjA1nkoRVHzHcq}}L`n2of&AjWQNq_`+56=GrY>d?qsatSl%R%Ta%VA09D>E2$8 zyY6z)>2i+ibLQ^+^ltR^v0GHe!m>as}U*PIv+fZ~`v)DUaMpp!!1QMSl^8 zLrviHlqhDM{?{I3f&IWa5BM;eDTU9US-tv|Pag-%?-V^`PUIWJvJP-2zcCHbG ziU05e0=NZ<*d#!>+kzj`)_Z~?gAyPxhH^!ljGSd==faIUvWkcFDK|R8or`{O1}})N zb(+$M5D)Z>>^jku9oD4}i)3>jij-;$+#9;*#&e`PrjY`_b4^94#c=2Ujr#w{-@9PkzuQeXnpSF6C#S1!HV7akmA;`SYy7kONMR2zlE#P9W3^5(ukd`YOT7LFUQG*5l()_HLzoi zhy_FJbeZ!+hHL$zM*WT;LWz?@PlUa+VFiYUaq|BmfFZX9efs(#Oj*kUA0>h>w@t2m zIrHYu&kikmbY^3wLlZ)ME|u#>o{_sF^xKhaU#efVqfZa?A#L?JQ#wuf+T-nQdWEK6cv9YU9FyYBM45(9_7#z5++oGmL0~3^9E3|D2U>dXoQVums%Zu^x>IFofuntwHXR#P38YQfB^=6JfItl!%-?uZd}xL*Oi-c+Nq}% zNGDa06%1$Or&~gqRcd%W+2lvAo{AK#vC3p6t*>q*5w2v;v=>|XX=zh^T0zv%fLigF z;&NqN?Mc$g^PU117|$q;446qY_rgLcZ+U;Gkf+DMijB7=WLY+ zW?Oy*4K(OK0s&pDODIm9n3+d&lr#q4c%zRuM~&3t9(&*$RYu#23%BmO(`uK0!CHV4 zQEwe(Yo`WBdET;8s=YS5y1UK$a>j3$m+6B!UpBdfXTR{rkL}tZW`b{-nBQ(op8J~& zCS-p5pb?hifvf3SH_k7ug*&k7%jZ?0j-iMz*LzI!o)Uo<`NlUV5eEV8K)Y5vAb2yw zne-}XK`QwUWgQU&R6+SrCN?QyMR7c`5n%u=7%uupHbf(X{g@*l$S zsy=?g%V7ir8OSh2G1fBE5E(ZuwjhpYgj-6Uwv{2vg~nT{8Hl(hCy=Y0D}W{{O#xL$ zfITG8H^33c9v*OqSP-m;j>+H(+vrC1+-4|55sE@mQZTZO@NIjm*z-=P#|p0MjX$A_ z*xK@%LYk0eh2*1(KIXT-xvVz&F^uGt^+U-VM2d}}p&HwhAeJesl5k2&j^MO791%p5 zsu4)!3VFphX)%fuS=Vin7Lr(CpcP>RPHrM#fiyMN}_!5{M zxgPhh>Cun!1V~0|CI_phoFr8{HqcvR+8(-?yw{k?JhG`}UWx}pR((RHg4QDj>NK6u<4z5LCWG1`OS_^9w(znksm1J2Z5kn?2sFYyf z78USE81)f|2iyWuX_ZT_ilsQg0=JkVby9GasSpX;wMk#;V_pLrvd^vSxgQ;tbAf9Y za4Kh7ij~q{u-iSRoJptMy$x3O0zShLi?5ly%rHTT#FuFoG2NmibMqLM9nNMTQ>`jO zw;0RVIOHl8h%G{BTie@4uqeFkt!Q)=+y}RDxC~1Oa=&J(>@kw3dsS7cwtF5N0(QfI z6fuM!5<~6SCB--0OlWdaPSlk*W*Xk45Yfxd4*AJ5ER!F6Uv{x+=EueP`Pk3qJ0hw& zWLl&vjjsP5<1X1Iq#t4Y#vU?y;hC&uao&Y6m;WV4qg;7R38AZTPv}Psr^UHy16{)E z+vP5EQa#sI@nJc`zbFy1d1>Mn^Qz**k_k0MUpg5RwPIMJ=IxU|&hRybwKGBqwKAxk zYJW?V;%#xO0zCjkUJR^{K0LseVZKRs-)w4tA(MFTDf0&_%*Thlt-4CeAzm|l>Ra>e z&du>1Aw686IJ=ms{lp7tN(LYUErwZlTHd&#Ytxs(Gg{qiw4;GVNc~b8blHw2=Kz@L z9@HW){piOl5AdU?v#W#njx6f6(K?5Qji~(=3~iPgX_LEL9o{&rA%zQ?bXS6 z8-4$k_LBE=2?bBxFypp#a+O$_Wt@*&Y+pAgH$R>wT)&qHGNvZ^ps;i>8J-Ys{oKOb zgCxk7_wDlKv>T>F^$1C$LT`MNsn)Z0DNwTfa%^_=NG6_JQs%;w`v6PKj3cjk4aX$O zo~=FKy6lb}OPh@)`MD(4x_AOT&`4Y4P?etL$YW!2@TA;>KB3;v+kVX9otqxaub)Rr!yT2`SzxNOGFB7`(ON!;2T3DR&ZG}*+OgSML zV-(!(DaL=`iR(#Vi)r8XJ(@XLTx(o}d`t_bW!|1(#1r_E`b~u}5tjdv7B;|rF zK?P8^NyJ(Z&I$H}i5-U=nqerW*-?nh2PKlC91H$Qh0i6&-=&ZW{hl4>To6tP+uaKB zu}t&*Ut;Zz{~#IpG$5e$P02{kI*p4jVin1_&m*D-5&qgQau4W;AGc)@{Wx4&WZ9yL z;x<-KRm@veKn2F3VzTueP1qt@)L{yt%3qwLssszQbxP*I;Vp7cuj!)3Od=ya-Q)2P zQ{f4Km>1fBM^goeBBEADC7NnwARMWP?&TxsS;T)W1qJO8C;mjy7#{yOY9x~UMGJQ1 zUfIxpB#czFkua;2B7YU52dLV>eCOBWVyK0!T!liNY}wQ$*lG zu!Ue$-J!Y1s<_R^K%}(@lSH-_jucMyDaQ(KWL4ILl!W85sFkejQT_2?D5BH=UL~zD z980!jN)lesy@s2(87}%7WI0dM9ZN7G#==!gp*>DAa$Ken(T*VHPbpkz(8fOjl()rF zhTzDgoflg84lALhRc_;dLiPrCHuoW1SfaZl2F!oearbAU;|fg zZA4|(9_x7+kd>u0nFUpLS!ZshX}%h)pkRE_CNA$GWPx7Y_PGlCQ z)?JW@=^0PSQGoz9d}COKAvVV+izoe^H#Iq6Zj zID`!js{2)ige}+el*$LOo=C=Mle%GbZYpr$5znb)hdN$pq96@{;6jEb^VI|*Hd5dS z4iHw%UM$O}CgMJ#YXA63ZY~EaHdgMTXJiltw=hNcY05#Ji$*{krFIOwdW3A`Ot5n1 zQK+5rkPu45MN^ncpFNYoGV7H7=(ECAq*f@W-ednbm7&TQRdllJObEuK4P#D%SYgm5 zB8F+QU0q;~&OBlxo&8Pp%;HmiRL zYT`M@O2up&9?BBc>kbL$CGM9-Nasnep;{nS(88YD1z+G=Y^%M+($d|GHmQ^L94Mwm zEPhH@DlSJ>DRoV$)w-O6O;wN{psU6NQC_Gbf`vJq2Lei;L89$@swonU=r-X9C&DYf zk*sMIqOacRVAL&Nl*Y%MPkkDocy_Gc-rE0UqFHs-2T{z}A2A!0H0v9#E%LD}j4la< zR-1MT?n*uFzCGe*y$aWwV`2eHMvze6`~_2768XlaC57nnjtpuZXHl)#+qR*8EYV%S zMt}U5Cc1{XTp@YX7p4Jk@Pdtz%22+=(OXb!_>AO(My?7nE;eG$rbO)mhvy$wtK?d0 zGhXle_2nKG3%`8KV?v1f_=S!ROH4^0sZtjDvghfdEo}~E1{0jGj-`FZY+0TR(%v>&zfVn80G&4*Ov*!z~Aq zoY;(0#LlBvsBWu(>j{$qnwah5pez477M&!eFAOI~u_o^ww#VdTU^;OWMM9;YL|i1| z#9CM<5yMrHhSwbfhTX>m?yuGTc{l1y(htD*%bGOb|ob9Az^WTKY+DNJzg=(-B2E*~m` z%wzdvBA#(%vGSyCGL{AN3^%dSUUNs_h+r1W?H0uKkfs58X68ttFb^{l7jtqk=apWs zKf`4sw}lK#59Dsx(oXIr%W?lULt-rNmyReIP-;r8aqHTUvRLHVn8;XnxaWw*CaStJ zP<97>VYCohs%-l~ro$ba#vnXojIsvrP1ut0;9vThiV#Op5;VTVaC)JpSG zLy|sLu;i8pJVNhFhfYfxl&zAe_Hb@^j4^^h1W!fU;XGC`^S`{bE~$TA;YX?jy^_$xh6Z5`9>)g8yB zkxu5?j7!I=eS`}`2=@PA|JCu%M5-A@MgMZ%_HIElHDmW4Vh^rVLv>}Z&<4wyx}cdo zKW~m|oH-NK$4Lkcohsh8bhYTV9}^s&wswBiCRuQUQ0znVQ5SwO744drd37QEpa^m^ zv`pl4b^~`y;oNY;-CV=0H5)gz9rtzeqs}?Ea!+e?f3sgUwnuEXOuZM5+8Z~YASmaa z;+1Gwhll7&-l>-8{!%rTK`Dj9Nj_`NM#zE$)WSYoX$99MN90ZTsSN2TnXZ(#--TcE z{x>w&GrSGBDlYO@n)u#UbKAuzHAOaE3t1vVxn(QIZ#Bn;mWY=&&X$>2((wg4$<%)(TK6;>A_M=vMM(p*UXVWHDj+G}f_j*K3AxuHpCx^Fmp^XptxN{Pg zH=>(GLgs81NWm0v5K$kn)6F>_d&!FP@X$1#VX3xHuwsP^`-nkI%n5EaQLU2qE5lFt zOP%=UzV%K7d-Fq3c@FRtwk-fS8v`*ds#Sc8=26P zKmBJmy2VR+cBZ&YILMU)XSa7XS^r|<%-mO?bH#jm^3`RUCX~6i{5yM-OWbTHBtZh` zffj^$_1>IV5d4XNVX<|gXw>#ZF#S)9a?wXvk#ahaMM<>d761BjGk1hglmjJrfic7# z+?=9hM>TX8`ol{qMsKrCur_mSde(mYXmj}_dc}1Ws|iyU6LL6*yM9uL_uD^en+y#v z6hQwiyu%rc56(Be4DP(IrcjB%43nZjOrMr zKw;Wl{WhZHXTt**H+~#>a^(r7qOCgD_wR*u@s8aIeR^l=fos(&X92B+3Tu~_&z|?X zNa;eU-;Hjbz4fCT7dxd7ANNL&pKt5Tj6NsZcACtf!sA=k9}Y@j!x(~~RhHY1Ba#S(hwjd?0rtR>S%;_^m?*3?Kb zmi~!}B^b#7>ZQ+c11!2E$x2bCOsgD~)KX1d6ulvf(k#qHkz8_1PurRbA+w-D=sV|5 zC96>^v+T7~S3CVxRxsByia5RatM54dc62E>hF&`MMZqMK(&}9XJ(kL0^B^vkH75Jat5qLU269>RHdOy8(E4l}aXkV3 zgJ`psC`7MKtyB`|P2VhIkTPg@MmODKg~F}BEh|t)8=&&Kt+0c&L=Z8TW_B?$NN0m7 zwjE)lS?Q+!swm{N)n1#ik;`_QVvO59c_?6+szu}OaES#xTm@k}xsfZbut>ofUb)JM zI8E-%rVV=(B|8u9c5#LT9FpvW6eDPJpexhps?4em`rNUS_LSmpYl7?uoQL&q7M-)?f&u z5Q%)~ce#<>)#^8#6^-jsk{Z*$hIgACF|8%R^NaQNB2Wv!F?;f)p7POiVL5r&MGjIh5lZjRH6C zLFZy5BoXw|W4O1h#RC1%M?cI$JvuVQk9VOPlTIbQg}tPJ$~jCYY?CZV0_86g(^mN! za=S(Sj3A`53rCX<7eOBVBWNM}BCT(M&0;I8P%t0>8hC=}pZz9R@hh65^ngC<;TIILiu z%!Del=BYL`u<~dsA_H5K5v!vnI5i}b*@=)r=sBUhfGaYEnawHj)XEZB5mkI72^(uE zBWcVdjtN2)HPw_N76B76KJ9^t}w!i26Ybs{ZO zL&{+mwWuYAD6Ql7&S@l+wN#zyR9jE}#Y5rPhA?SrW<3*lPs?%gnpX^{0ln2NROUsX zmrA8PS#&AXsI-p()Z`DDxJ5EJgA)yH=tOr(RSW-8GFyX+4DGafJJ5Wtf&>z&P|oH% zo&L3Wd<5W6h3ZngOmlFvD_d(S2iUjR5qvBS*5t>qLIsYosBSO=&usyelaUZN`5 zrub7CMYBv@JyS?unJyI8C5xJaNm}XER@<(zcLlV`Y^s*KVSbH9I--bPDXWvl4cD>B zova4!xLCuos<6<-W;QzsSLOD_ZSUb}BY|qX*e<7%v^?%wCx(*7E`*+IykXTGrB%QDn#=^1OuFGMhKz&xijLoMpW~Jp)XO1rPe*;8YTnV6r)FNM zL(QvSX_wVAG@WxnNSg;YDh8DiYR6<`!AusB0w* zGgYS!hR-C=#5;x6@6JzkbI|l$I)D$`y_0;LqBNrLpO~a0_r>pz=kl@^k+;0B3QE-! z^I{rndy4Rk5fKCB5%)IB#PU{oad24gl{yBfJk>C(R(@_YgV@XKj85!moqIu^S*U!gQOf))p8 z#^a>vTx0a5tn4_dCsw8LgERkUrwzc%M$Xx1r%hFph6mSotDch~1!@9I__ zm&d%v*uUP@rtQ((X${RrOHxow9A4qM)sTS8HZ?#q^g#A3Te&KViSzvk*l&|^r%Zgh zRS&D@KTpii6Ej?#qBNx{^5@lMPFVx{HtyL!w~V{|)TPUj{AGfQYyxu2pXDhPH389D}C)xT9lCJ7> z^vD{oQRA|aO~hq$f^DDPOCWKDiQ`eFB22+ z#FX-se5QQ1MHHhjNBAQi87v%4(%e|895b&S?L}37qo!(MEPq5TYH~dOM%KQJDtBcE zC#=!Ol~Y?2342>$LRECG$l%BPu> z$fHKkt1kbE43$C%%~IY*lMOM1_J9S*NJK62CI{g|CUrt%rV=k%@o?_#9cS^R40GIg z%m;~5B#ds2&g&?L>4?CO#we$$7-Xi(B~2bC-PjP(vXNwN=W+sS5j6*YwoS;WMgq?# zF4rY&&e1fL3pJ;0FDWvN07*UuMHFYzO^Tg+Hy61F^7G&W@OP!#DB;PwFJWkn0hL zD>gHPYJBrNAq?kSu`=E04TIu4v;sGCSi>_30x1>f!UX30D>r6@TF zv_SuY*A!G*fJls(uW7hW8SO_vPIP^!aXM2o!!DE7I#Kw##vn;F5D5b{D5xe_D546FHv|D zCO}#AO>vLi-bN=g?tbjXkmM6Cqoe|!AQh8?4uzr?qog)pPSyS=J(U9`rK<&dVp#hE za=ZrcLWxoj(^Btg0c{N7Xl7nc%mSlv%4TdLTk$`0kPLTYIi)avr1YK&2D!-UXyX5_ z-*#g2YPJ2wl1>}r(W0tXzGF))04FvE2!KEl7S$>K6hH41S31T=nd(no>F!Epk)Ra~ zo6bBg3D2rEJHBxtH1@MPNhB5$LOmx@l8rFPE()#7ErK*f^N(oC1uR1ZiRvtkzHJPf zmD^a*2=&zk$5TFN=(W_WViSTq2Es|c!yo)XJ}V$%k)v3H;#e0VD(uo|cM?=#QDN=T z2Kkcx@>Mz1CuH@lDnPan&&&FRNg*rB*gU7JQr0zsBP?fzGV?EHtw~xVa{#MKPn@q~ zjR|tHNUxHDd;rgYd<|$RP{A-?}3S=&Zy zs|Z&05-01k%zm~w$hJ(dZYR*TQaiSCZq_B-)@vHlFvd0LdWUz(Z3Da^`yj(rDpB_=qjCjeX*=bG%$I7# zNhi7~YHOuqur|NWwR9UsF1mK*Xf-hUQ26)}G5hooA+2org+Ui2UyA=qf7hkDHj0F>BK;?{cQj@Ll zB{6n+RJePYqgGDXSL4k%ICp(Lw=Be|M>o0d7HIz%C5_upWPi|z#-<^D;z-Px*Q8k? zxpM4aM`imZdBb&!>NP`uSBj|^wj8H34UlHJBG7mpRfW2iz6`P!Ipt%c_i5r&rIryX}XMp z_?1EOikVH=&_@E>qMo1ho@1u4r`4$K&6o!nM)w+prGz<9xEB2Xfgkz-NAHh?5gI=` zW}z1Xs~I9H^f-9?8cBErCntMh;y-hgndE`Lu(jkCz&`%bGg-F2xc56F&x{q=c^NtfHC4%ThDw zWC3pZ9(7-K_sV*gxS=@yj(f<%L~P6$K!~%!BI=w6(vAZ$M%bviSr3)ZZ_5??$U|tw z$q5eeRxa?SAiB3Z#5cX+SPz@Ry%!>!T=R(dgfSYsAOW-bVq6Gp#zq+b#+Uo z9m0_??-;4hJ-DdL?(YFp~fy-OwQK_=;DN9$#pH01$mjPvH(|g z6w#SE1x~mM$QU7TD_6JIK`MF+bWJR-d_+Ci9iHTOuY_INQ}o<>V?2hySJk4)D5Cwn zn`1S0e48f9V}9JPO|9fJUf|F3tf`gf>6UJph0YSxYaH7~Ky(?IawBD;UnaEFD^5>bR@|1+=iua$p1fKn1J^eHMR4E5K>Y1~y;IY%J{Pw>HMb(!Z4@pA zHX)c@tjKKR8Dc(De=G*Qd32S&x0<4!m(B|{!ecq*ojIi|o!79{*LZE*la4nW_%Uk( zJ=h(%s&6n-`H%1Nq8AHEY3ZKyJvyMcl%Jkb1%&D6e#f3TDly5)nj6GjERsmygRJ`W z+Xjw98&A`Mmp5!E%%0F!dn)V?ApTVlB&b}4g9t19TbK~xLV*nhS(wYWa3nMuoKt+3SNgMY)XmsHYCK8xE7gWKpKYvwv@Fh> zbEzgRsudy9n^)7CwfMJZ&!TZ{B2Jo6@!-dhBTJr4xpGI6b`zq_d=R4rkQFQ_tW_%^ zX{VdfcAjgooQ2XIYgv#eQuD*o1~0N+y}N?!i3C%wwaqy)#*oL0yX+*^vh(MVg+jNe z^PtYcHgzNAShRUpqkeHd{Q4a{VwsDT@_YufYhJz8C#UYca(GFk&29FrkduEynPg;^ zlUxlU#vFJEA*NJawN-{4PY152A51=xm)&$2W~kwY8e+CvYPvo2nnQx%_8CO3i8SAQ z9@SD%N9B}AP&tAv6r&)5Ib@greF)v?+e9Ol#G{JzJ(k=?n@x5hhfFp(Ad}sFr_^Fg z5*Oc5S7MdrP(y+h-+BBwrWblr=@nK_`_Z(bkvVqM7D8{r*%@49q-h<0yHV-gGktZ} zWpsuGDxp|U!pT>Yh$gD&qPDF@T${WtM4D-^38F|$3zZ1sips?ZTWbW#Rtt{|6_n&z zs1+wFLJ~=oTZkVj2icLlsTdibbdksCu99V?>vTwwIi+F0)`lRRcIr14U`~Zcmt)UX z2j*WgjR_y0RN7hJscg#npRx#nJ11c_Wfqu4@G(}=U!8Cp8HItyB$QUqc!ia&h4QNJ zzWk~Mk%y{%q>*e1xh84nc}RwU|-q?QI7h^Q8KW5lk>$Wfbr8W*sw zkh+v`zbFruFNPGJ$y|ico&^y|?p<~Ae3O?(Q5&YR*VYT6OBi?SEuBB} z{MA1IRz#J6<@S~>xr!+jvRP(keJ_7trY!c@WDEIk(vOl9t&uLxlCNbqZW2uPY8b3u3m$!udc|&srMh>~l~$ke zQRNl9KNXf0gnXizdX^`lJ=n-5QPr31I@XkI&r;V}9DI>4Nx8$6Efy0_?6F&=qxNc81SSBL0{tkYMQP|B=gZ*5b?v>LtJb0zRkt3^A*k5Pd^d^_h`n@(M`AV@IQy2 zww4)qqiI zc8a4I2&t!Oda0G-OvWCtbVpFEE6V=j$HEr+OM=%c3&DCM8sChnGYjL?`1S^>f=usw z6}VB~#)r6rc}+UaF;ICJGbRT1EM1ZFi1lU#LpNQCc8)q8|C)x8x3r0j>j_h}dh)cAL56fK>)?dU zSSR{X@sW`K%ot}}x3+(f@P*&i4*>e@aKsG|NxDR`h zQDG#NcQVNtW_Wa23`exW8KnG*f_`Mk&qyLV1DZ!MgFIliPKT!b&5@3V?1;GbXu)Yl zOIHhtQjXpNw4^+VFNYGL;i~8p{Rj_2UDT3yX30)SCQXXaJBaj3Q>sx(FljtYUoG+( zlD|cBd=JUz5@#a_pcHO8udE|YWZ6!~X^}(5Q&$_;#Vm?mQ96E6(0B&8k+31Ko7Gw% zc0y{H62>r^FKlIjnuEuM;4vZGv<II!((El{iO8 zqTnS@dJNXGs`3zektC4i8OlV>G?kLlg?G;sSy|aKQSAQvxelVifjfzW+45XKE!KON^pvSXHQ|Wzp!W+4M zE3Ly#6G^=5Gp|Xfme{3<(aOo*%f`vK@EzOSwiZttvP}`>v1eJ}SxMCXXnV1(?Q2i} zDk{O?&9!Nn>VTms&dJ8Np~cJ)e7<`{U*;%EznyCU9gC-hd=PZiDU5XSx?VVuWx*PAMWZYc~1+`RdwC0bzz>X3_@50ML z-zSmnU2jZ!t?PE{cC03;?>trXL&@$&$_?2VzES(CS_|x`G(r+nx2!$;2r)T{x;9Ig z-nu9?W^rB9)4`IMIx$ z5sevn<_POI<~)|b`UOLfvw%@K>CYFh&Fl!1?mqgk73 zB$H;6x&H%XpzmsR>{i(=CEjvI1~4x^e<`=U!d&-2WTI!-C|tq+38+X@_)KJn z7PWJmeuWjEP>K{tn;X!$S?^VM4zzgodj0W1KbuKhGa}KRDC#vNoJC|?k-c#~-=7C! zs~JLT8H&cMM=qk?!K`zX@c87A-<24rQ|WZVD^1&RGk0CiI??sr?LK2EUfpPS{-mps z+5NIgR<4ocvN_j&*Np9oBs9MVzV$4rH1FOV`|i6^@+bLCK?QQdPp_}@{bkjQ{?_zY zi|@-A3C?hiSkQn+ctbov3uVIc-7WNcu^9Rqf0Q=7pvD2$H!>@0N8?gS3BVcBuEuK zpAs^mw{!!MTBubc@W&8A5)w1QehKD&P4+&|v2|bLf@A}3z%qOzw;zla63%CFzruWV zB_42=P7+vNnDtY~=Y*GWYmY%WArXCs2WN>TOfuGg==5h~_AyB45%|+Uc&CJRXc9u# zYT}nvRYpTf7eunfew&AX=Qny%h7s%sN{Cp0IYLyZkvuDNhx`(K&#^$|^%dTg77te* z$klQV2o@B!A0|h4CG{&=fh_`9IM}yyZ}oPEwnrccUXIch&m7ajtW7=$`Mbr`sZiYVk029k6C8Yozd^@<+Id$RZ+H8EdzK`1@x zZAgcfkm(#w8I@+FiybkM2N5I!MUlC|gUY9alA@96=OaV%PjZuZk@Hagi7XPKI@ zH<^Z1Yl2u2v+-Y2>1k;JZzv&kNa7>=ri@u8mSK1*Woa@WB%LTjoNNg#$HyMURu+`_ z7r*irc?8jAnv#Q^BwM9fomPq*L#i@yM5X$2hFH{c_JcYU#FKJ)MPVWyeRrTf%5@fa zlUj6-0UD18#Urx0Ir@00yYyBj(O5C&bH!Ok1d1hIQ7>)krjVCjSV}{yc3}3!bbP}i zC1H&l!HBEYjI5ShYPu;BYBJ0*rI@NA!edLw^o8wJNW`?3A@pJ$78I74m;0%d>GFIi z)OFc~o0x{6_()NB@^r;XKL%1P5rUNelbVZC606=vD$<)HCny&P zt57#!aGH-@rxm{<82yMYG3qn<*{YAj9;*{+^!j-E7nR&<65(1&pk-wANF@HC5BvZr zLdXzSwV6JsBYl;;wBPeWG zHJj>_i;AakF)j)kASH8#Co^u9yAqaaV|@#Bq6!%%OO5N-p=7HX{h$y1P!_8ef2J~? zriP*gnzv<}624fvjB-MJ`<`v}h0QS>yE>c6x*jY-Tq{z1sI!zA3o{UMflIk(eHc59 zSz}=b-h`!5IvxNGeAcbhY!EQDUSy`T?7z z^SEqiG>+QC?3udk3tBPau5g%Yk}^2{4?$5!@{{foxEC1Mrl_N$jm*jC}vV;0*2w>kb}dqMK%zj;h)k1gFB0@oaZ(kqRQNAkBYhaffUY>7PDHyy=?v2Jn{oBoN~kUQ6gY1RR& z)K0_7!`07_qn2yg#ANNg(;K|zov9Cj$_+BZ?WfrVXCp*7!=X3JDz&Of;)wD?+7Heo z?roC)1&KnTD&JlAmrsOj&D0W$e3LR>JYlCA0PHoJ+r?GO8wiRu3ha*+x6lpCKTArG z&)rA@KH{0yPe^6iT}gU$Op&(Y-TlSkpIysg#-K_rRLqp* zj3{*`y{}4GI1byWtr3CEG%85B`ePKS$bHRC#q813TfXG8Mo$r8PeG)NfWvtj&SaaJ z!v!kR@x9=8_vd4S#k+ixhjD$I@nJb$XjV!*HW}Tc*Jq#3Zheit2h5`m{$d238Cbg! z*@24_DCUqJ$s#W4@dYWhJA;z#L#cJ0>k1NoXyq(1*4}wYes1fEQqq*Mh1j8SoI*PP zrrzpBgcEC(QV{U{f}G!W0kK83$dp^)n&%v{RLaQCA=P@vAJM*_ zYD1aQp@(>IQ5QZIX`U^yCbrzzwchS9+J#KAg>V!X>|B`4d5c+|v~?ZuCqaN>!Lusy z&xGwWE3~J&qjPT#htEf9YtAgJk-~9$I&CiSo2Iu!I+mnYDX*eET$Pcj=6)XS)psjq z4D+QZA5EzQ7!KRXq-a`Pg9A zWi*M$v2DHV>(TQ&A9?>CoW&s+Y&?SnR)3*KFV?My2J7I8lDEB1_bJz5B3+I+Lo4}2a?G`8n6Vfpw_`B z117{5PKWWqzLH;Xomys4CG?l@B#9vp=;|AODqD2M1>tvIrO`}bnK7|@p>Qt&#o34b35vxqAJRfQ-X`xxWm; z5?et~xqsy<)XG>+&|0uKw`RTCv#!>aMajAqNY-pYq)fXWMx0pjV#bXLB2DVJq|T%@ zD|~cGv*ul_nt|SAh%jV@g^lkz#Vm5x@~Io^a_(uB_CVne%av7IYv5nG0@*Sot9?2_&cXHa-bz&N;0khU{be3s z{(SoJe;${w6KT!;qcxL;ZX?e=Q)#1*1_JJ|^o$}dH8MIG396s}$Y8CY*dBbTFTU`y zO)8yk^NKLcDugY%#y*nhzxq~Wu|@Mt)DXI=s^hDv@G3|vFbl}CATIAV1kE`b?ShG` z1#h}Bu(br5i$oWngfdFzh&u^H$0*xxwUfv=r~;5M+7PRwT(gqAjBNU?%%V;!%A+kK zlrS|lxAH7Q+UoLbDiy!NYoMQ;p(K(rQAL%jEVqH(;N-0N;T3oa$- zJ4uObXjAIWi;m5b)&p$4OfB$YQB_xERjS{E%UQ9HHP z5^Sy`c`Y*BuXu|0(UOvUD@TG90RpgZ{{>jp(d4sj%PlARDJ61i+qJ}cMSD{rCl~gK zP=iKR&HM*5sy^DdM2Avim<(PU^VK*A1BN+Zd&dQNNTPc4)Trxr+z}?Z z+?x4jA?39&#$h=k)w{W@0|L94pN4wAG^IAU>Wf6GF=&N=EeX^!ExuTxL*G3rMM51E z2~iIRe68!1`s0#g@-S2LCe3yuI>g!_gp*{#w$;0_YJF2SYQv)hbzI?cc6;c=*sHm@ zq0J4GUYk!|+`0&oi|DZK9i`qy0>+@S;U+5N(wQO4jzO zAH_1s)=CBeIH!R^}H4;n~f!Oe2Pi;8uh)WJcnqG(%(R? zlR12GZbxTX5(sq&!UnErL~x@}%7Vx}j!-FWv&)~O{A3X(qHaJK!p+;@XD6XGgl^g^ zNQbJT6Vi+XMYSmt3#+0V13qYnXH;S3oE0?xrIn>DCoGy)r1L@vws3MFOJY&b*PcCQ zYL7ESV<4-?ycPLJfqt@IRM=)17_xCWzr)Z171y7UXzxsxaU?{3)0O`$MQv4-49OnU zHbIv1MdK46M3Oec8=6dvjT02kVkoTfd97x%gSS_v%xy+7RN2}zkvfiyLWnZXFx6?Y6yni3 z!DQDi?b1#=#&U7p1f}94N0eIRa9x}0hd%o8M|rN(px@dgeI^vdEg8l&xSUDEvWG>< z0C9-4jLt(XM-(h&1&h;SC$V&cpo@0@WTf)J+Cf#CHILFLX?`rrbv74AeBvrlH=QF1 zjZ{we$grTi^o9i#aglz`bfrgCRI>RD)7XhQZ_`&)fn!cGh%-5LMxkum~R}`I8`Xp#(~L%2vWI zsz3D^&~Y>tl-pHhYsnJIR1cHDsnrv!Rmznk^K_@nji@UieGDo8#+q_c%Z%GHV`0_F z#+Sael}oBrf#jG^BwFt@+EZV9ews^)JVO!sQf+Xd#2@-NXd;Vs2u-|X)u&jLKX~0D ztqutl4Z#ggnH>+^oVCxBS%#DUCnX43qX^xppy-prHKn-(Gt>2i?q_N-D)d-5p3){} zFVVD=zl!>h{qhlV=mju~!bzWGL9k(2G;rM*B;7BKQnUzU@N~;1APMJhqa=Nh2%lA* zSxwfvSyZqwNkXV<2AF;8El6wISeB)^Qh#?PuYJ+;%G?e%#VyvHiFNEl0@>$?$5hGh z5~C2^@lIl=vj`+3R^mYA1Z@*Dnu$c>)mw)5!@}xr{QUIbnS@v|HRLincEy|6}f7LE+C(U=B6XU zvZXA+npZc>m4VeWi{Op_D2Rf!r}{!=DPIz=p4mrcIFfmnWX|DwG3DYn+Z0N;{UeV3 z2!z|!P}H}+Qfld=Is+$?PsvmeR|*|r0`e^>=S7T@8@fR3<_6GJUSqT-tjrASZ*UHQ zmEQt4sGSr`(YWpwsndrRvCL&hXsN}W$$hE#ic6ij{OJ zb?pq@Bfr#K$o=0qX4BOHdueVf5YT?ae77LS`4}heV<@6cwgLfmP8UU}1WOKDQ;eZ4 zKjjjs^k!NY?=hzT70TtDe@o=~O1WE1t)2H=D&^_|ow>IM7mP!krZ`r3# z&xqqCiM8Yj3dh5DzL^FzCe{{OO=nW2@`6**aqO-VxRG@&5VkB&rPG6w%mWO6 z=m%f=C#B*4Jo#flpD?_o!kPhrGRj#U%St{78;wd*3^~~;e98!sp*`-aIM<1$PofFX z1GvCLpiv6521E`NgQ>1dw_x(UOKAy8Ah*qFJoo|^0eq6@)2+~NJdEjvQSfWfzlC$ICX z8q^L!;FlZ9AyhlJ=$ogq^C!46KwOwZSv$i&94;0FvxI80$%rZsa>6S?n@XC(32MR% z3&E1mt~GifD@#KEVWD(#nVs{u<1#})j0*hoqqQ)#Gefn0`oB{PyHpbl9kMYX^dnue zLr|pu#VM%?+hI8Ep{!*hK{G+UMWhJHB8u1{Ef;zv+lV;g84oRzuS{$kFjFueL=6gg zz*-E&lQXmXTRHZsKUCB@bnC-1lObEH9+}z>$>Xc5v&MB~j;K1YyEBT%YN*IUzR!Y; zi?I`Fe8PZ(wn*%*YD-3Y^1^3Z8$+o^cykOA`#5$~4{RJoZOpoCT%}kemDmzR#A!t4 z(+(T!2Y&FS+#*MY6v^b6q6%CV3E4&ekQf0>5xxlO4+Pjey2ys+Hw@5?pK(Ui+tu)I+ZIrwx0mG-@5ka5?IH9?8WILjCLEKaS z4vUDrlUTi`D6|HlVlom5utIZs@w-5}kW5aZ7 zqyI}xe`>A7bfa0s$c!Atp|Up|a!%lsz7rJ9-mE^?JjuKQAhStJV>33fVK!mh#|pzJ zS0g0al)}78zo6tO)!;Kankb#K&J^pBzADajQ8VO(%JVTw9Z59+RF%opPocB4?@)^Mp9^%PF`7qTP%{0P;23q`dKH&?q9yNCF@3qM4_t2@O3%y1+W5 zbfa}Mi;9f39oZaI^Q$%c76W|;N?Hx!fjDesDPg#szP|QJW)};$-Ix(1Wf8M z9r2+{_ry&Udb{ciFZ}4hfLuOUxwiY1i+`v}8dW?}YrwtjXq(<@si0$#I_{lP1~eGuO!T@ zOAE!*B{coU0BsAn(wFNf$8nNVgeXEgrA6uSOh9uDuGuy}WzFNez@tF_&tU5bq-#$u zrHu}=w5A}LW7#-+q0mP~#LX19>sX7GtA%GU1{nQDx`HWmgq$Bm4g>WLlCu$^@l2~v_zznide0R z)mzmX($X!LhZxaBod=?g@_0n%JB-C)L^DO`nhoN*ml&7du`7?6_e76S8HrV z{!+Y@JJtY2v#4-ZCaF$E=`TQPud`iPDgjEWVpV{{QY9m38(fY&Ko%BP4eK7zT&RapD8+=V^J=rNR`P^yG* zrnmLS6D3QFs@qdKjdLxYsU)F_i^6H6zKfz%+G$jqWIn4WtBZ5N0wDk`W?y7B-HF5x&oRblt=_ z3Bu^mJ0uMybFjG_A;$3t-I-pa+Y0MSI(xz+dGXdf^@)qXI6evB!vM7g7GgKG6oP#V z&{^VZr6D}t)x*$AI7$*5lj13k;YaFFQQ}i1U17+Qm?rz4iXbvE9^)j%iqN!~d#V=; zeOd?<-w18NEUIMW*<$%SUj*G_#AvT10j?!I43KR9*kU*DfHJTxsUR5Ye5|lR%VDYk9jw)f*I_1W#4;2fK9h4Fv1XyQ% zhis)~>*OM;fks)aQhXY=)5IMF4x2}&k4uhNX37~&CRr4wW8}b2ej}@i9W7hNxoak~ zRDR{H@Fv>{qOiP5|o8qWu@5~8jQr$B~qPoPUUCUvr zIAMP_386UTeG5sL9b(C7>1y@k_^>hf=!brwP951lhpyjpzTb({&4~~D9@{l3NUf+K7PXl6srPOF3}cPHYmSdWZZUt*2Yv{`o8@V=E#G)- zot8-{grwEyjpvSmpSBT|dahccIpO%o2tt%2qLJ$LtK!9YV_c(PR9=zu_3HT8yn!%4 ze@MrdK5N31;J8bv^&}D*z8)7jy0(VfdXbkITOL1?j~=eZ^U#w~{w%Z%WyG75!bXl$ z#_IW~=_fu%f{yHUq>QxYsJRv*nw;jA3s&!xu3Vf_&wjM2&S&J%kDrSQA4V(H+vvg? zXcob0V@2p3A>?W;xyQZjv&G!0L$;9?>SR0axPI!qmaU|gTj|m5rr>LtjKaT8jyMLl z*nUB$Q6<;zPQ}2n4^B16?rz|m?EGN=jbhqwT0Px(#^l&l(jr3dZav;UWtcJn=;8`& z0ORVzrr^Xr@UHM?+J?RW7vZJ&+)(P=EKX(xe!qg1z^U5UkD4MDS=7t!i0!3s#xP5e z<8a1+Z2GIT(W=RlSEX~nrbgropA09da65HK z`lxRGo)3JLO;`<4vE zK6Lp=Xh9YWiGW2!m*U)>59zx9mP)fyr-pP(zrOK9X4o|Df7Eld3gyciaJ)042p2A` z=JCP?b=ekPJTo6Sjb&9|bw$s$k}>dDm$9bJ%~_?rqOJ5X!EJDs8V-etYbL;z};U`({l27g%M-20_>LcuNJ8qFcR`>08 zcbG5Ud7tmZsyJ$*@nD1h-i}B~{%yL5L(<}zrsHvuzNM2sJ*`c;8tqbhZ%M{_OL zT%Y7zV4>^ZULzWDu75W=aEI%3ook}@G^zK@l~$-OZy(#{<53xP* zlqy%UZ0YhP%$PD~(yU3b6Pb55DZV4)vlCE)&wTpSN$}mfXU{rSAnNof)TmCI0)6(* z;!cbfFAm&l5Gl^E&jfD8S^=xstOtjZVXAf_(uPCF_H_G}r_sB5vzD_!^I^z_9ZBwG zX+coLzJVQD*a)&PVLeXESS`{aB#0vZjbtH+A&NL6i7K5Z8EeI@$6i|m{&%2O^Swq8fU(iG zRZ>JXNMn3n&9_r~@4fV+Nl7h-oPwK`SQ&RJN!SozOLF89VvLpJlVVb8xh0ofR(2wK zMP=q*L-m~`V~zAt2H-#Mbk@m^qV;GdkeYorS3!8*38$BQX^2vk(|xFAm5o)PkU-BU zVrZX@I{GN2MFJJ)U2NS}kefsm$RmstnMJ3ZM7~Amo11z%XPPsyMQK)@8i{Ft%`vEF zq%kr7YEqzr4H}%Ng<*(YNFCKWEV0FUNo!=5{$!N0roxwMMKgv9>Zmi?c9nc{>ShyL z&g#{vL<};AXI#d%l%Yva9z>x`;O+&|VFG1oF1_{I3toGyzNKue6)4-Fv}bneW5BKM z*4}NTF2-do55+4q)KN<_M*Ef z&0O$I`n{Q!!dMf0(Z?sPY9Xy|Mod{;U``F;LAf5C^o0yHeON6c|E%xSef#})aZ0NH ztgM(~8@M&iLwc1dfozsNEpKG^W}>Dbsy&d~wedX@ahXd^^pv0zl#m67;f=0Cz?O(C z;H|qJb;``$oS@i~tAu#s9^+`6lBn7)5#sQ%#Q56P{w~q0{$6@erkl$Y)az4jC^|t- zBj!;#8A22iU`AWsZ}j1dUoTtWSxRZxI@^bP*KJzL=lZav5Bbb2h2^|WSsSD$>jU*a z{#_bFI=xM)bkeEPK$3>2?)(XW4tyX4IY)-@{04ZrI~DFOD4*GZk5lr~-}>k?zlJal zfB8F}fIgFxl3Y$fCZw0smR1nbm2NS&a@wsB7{ML#u!k&M5yjplGY}3@a`~bE$v^hf zmf|_?aT#0*!d_@2Owmq7LQ+ty{%5iQ3aWIa(+=FMhq}7?u#9FbqvuqnqiE@_H%IIl zQtgW%v6bTTPpF{l75oQ079f7P3nmDyjMK~R_}W78RRs> zDb8ZWt}wgor4ozhOqNi~nErc8YgV$%A2oA?z+B$ys<CQxjMW=S!D(fX1m$_#6L!5BPc!EO!IV0iq(NA zUP|)Iv2Jr$GjwT%z*?`jW6LF+sm!q9$dc#I}a2W&*0u)=)w`#mWw6 zG0|d9U7FRt7}6j{qa{f)giQh>wzaO6Nv71vt+6y~EM&T5t0ueuM8jD&a_lo3G)d%@ zrlRbM`=TvAx2TYjE1(-CNqFuZN1ifQwJWM_V_bymuX31cFMm zd}(w<9aDUB{H6$wOpaJakaE6AEq4r0RS4`QB0n1cCBx>?BaOXrT!9W<58;PG@^*2C zL-lhq=NZo*h+or$lWfn@0*g4ZLgYD0*ul$UgfZ-y6lGU zp^?u17R?Bb(yDU_uUzzx`#b;FtDOQur>+>l&2)KDlzpB1MIhpJ2Kc4Ko8XgpK~|(i zUQ7@Vu>=WkotGue#qV(-<;2*M1fNTI$Fl{L=`4h59m;aGn*`S21Bq7!y4;)OhT?gj zgUH_iid6Q6(5=Bm-Bq7nBu}3#SK-kdN#q2SP!d4PK$Dk&97;9Bw^|{NYa@=^&T!#T${~b;lrUc_8*`u@v7l^|x#A102VogM%y~H3_B}O5wpGwVO z>aC(ON{yuiVNJ;0J3*En#)#zMo}-9^F7m++>;M7q0Rem?|4E*4?bz?xPgIo*SY=7h zeUVEwSs^YXJi^XxOP*yN09mnIfOIjxL3&0?}x)Pg^FgFbkJAb8_T4VIN=JP3Q|6rnYj%lF zG7yFMlS_F+KXBC-xuI_Vo~OC&Qd_m&Rm z*SHZ2a`uB><|iLioFLpmECj*~{Yr@J6;G@O;~D4B{0V7Q6As}Go^)R5SSXM-N?lZu zbxzW5?c2Z6pSjr{aE4{1NM$Yb1Agj5EARo=%^)~@L}LKLJI-W#uEmCtkWREm_%T=K zp`z*-N${m%khUp_uuMbNq<`cXDDK-m+KB!Dr*C=*luGG|Rw>s>C5^i_rrwLT3c{57!?$XP1;oOXMyxf} z!Y&9UapEMBYDtb#7oLe0Y}TvB?`nr zcld)K#DZS`zG^=}ENJq>A^E|jen(vlD9NB0-Dp)HC?;W)8dAXK$VM%Bkf1~mNL0LK zk!%LZxob$05bwZj%uXd}{;ACxNP9B>blf|&>A3SbZx!F8%Q{f&YqvK zKFu=#L0U>J;HpeM#vW6QSw8L?wjR|UzO2!1iM9s9*n(!2+M7Sb0>rK@s0o7GzUVGY zrQBKu-NMk*%-Pv?m2>q)hXk(bjtE$+(6)`F=D8(@30Vr&i%G@;XnKR>^1-}V?szr? zVgN#|YH8M-UaLGPVW!Sqhz)v^17T=7+Nh>;z~>w^@Xm5 z+%9PUS}v9H?n|{O&~{om2(PUc#6zqq?fuY(DhS4k2X)L~_T)+;@*HkbF8~wgX^h3% z8R`AP7JrWuLEH#}{K_H=?Wqfe zNUoGxY%<16b!Py-FvUO=;cnr-qFw$Cgr=;G(&^UGHExLb<2UXB24FxRd_yY`00KRP zK49(vLc|A$FdwL^g|Mr|3`!zW3D^}-z5Oo?ckyKu1!?r^qHUXT{_2ZCB@_`3*gD89 zoM=BlgkJhX1OUQBdc*o!$3ko_{MJGg-z#%aalJ@nq%NppLY&-qF(PwWP%P>NTFn~& zMbww^;xWmt-}r<+phM2JC}NakUxb4`{4UMaf*wnk@Jcb3?TQMsEh;|E878tTCt>-# z*qnm#$vUYo<*+25v1(*5h*At0JVEJ1K+fR;I-rZt)a{z)%`f+5g@kDHu zKgEr_LG3GNvu2vHQ2581K}D%%=d8gloNb#e7om`{g%@V(dZ& zYiUak?a8q0U05oU(a;o(y;FE)!MpAo8#8vtwr$(C*|F`+*ha@4+jhrJ#~s^7r!#l{ zYoEQ>I_K()+i^3VU%gfJjH>s0zrZ>!RCoOT`H=5cT5A@OTF{QbN?zi<0S{Srh)5o! z`ZTFiQ;kR7U{^T@HaPoAV+4*~2O|#-eK#NcT=W(>Co=s0Orp?b@@DBm)lti5XI~#v zdQhMtZ{8s*ik4)MxV%U>G2lGbEciqzvinm}ls5_zJw>LTL!%_$6|6~Kh4J_nWsfaK z9DQlx$f$^NK3quT56ji>Gi!MrJYs+cHzk8w_wnoU#i;`&B)*fzsz+8@wBvs>L5YaEikv`1^B~!dJ1h?=0{$jheGmi}&J*EOLx}5$1@`UWIV|wJ6?kqYIkyu^v+l z>SEa-awaJ{D=_9u(rG_N?r;LCM^a>B_erlgEmoArUm*RMe*Dx5C)aDN4xSqnLZ1vg$)Rw!@u-&wE?{}~Dp5LdUFjlX^Cpp!Fj$HeDk7-i0{mK(FL zn*J%cr=MJh(zpD=Gk$8oOX0I^EEWJfzx+FzgDX9 z>f^8Ii*VUw_tV~^Y3HuZaaAi4x;)Ha=DQLIKdA99i1-bI^s6(}g#8=zDl7?JCR|ZpKP+jCd&j-f zs9lNazA*-#fxwYe2EgGN<|!LbH)J^fsVzBD{ASshCViME2XZ0ODO6&=j^~~-@5EhH zaO@NM&k94TDyWWP=^89lqb>PbF*I`AUsDkXH&t-X62??ihLTlLh9ap}L5JyoGY4`5 z=$nwnu#4iJfmqy?SKQ?AO5+QcEbRdgxTulMzfrCvA{`^eB zELl-&-BQzcfq{>jY?4|FD;$eb95(c}%1fmfS0{?x9>1% zR87qH; z9Pg}sFCq8OcT~j|rixswhsz^hi8xlomfue$HGNd4^-l;tI%wp)iV5m`PFnP@Cur;Q zbjl;7?3v?WFYolFBQOofzao{!i_)6YS8#cw%e$HXmu_O$V zv($V@5WT&$B|?0mJs#Gxo&NaH7D-O(#JM`vX@&X0I+pCx#+-xKF@1CCt@aH4pa`-i zdw+XccQS6l8Ei8AIU_ytw{r}iGR`Gt{O^KzrA}KWbt@wETTISBqeYCVj>HgTcar`l zjSMf@S;Lhj#5p==e0$j_6X3#1iOvbBDCpHms7E~pXKa}oVnZU(=`CBN_(cS7);Zuj z)o8N`s|aG>kzZ7FcLM6JK%cXe>yUj5E`l0B+Gy*M23SKVvgWp4NQ%*n%h#@GYhn0q zAH%h<0P)kJbxs$~c~A}`G6gQVcIF7)N^X^4;i;UC_ba%+<$&l0YdBv5hQar-7|5{3 zdvB5$mq=DZyx36?UKWFH<-1HyYAa*aiJX|;LQ#N3lXlc8uVvD>CkK#}#eM=NS@&Sx z12d%_R4jrplFiePEyY=kQZiacmZ){dd2#WN3|T`|u?+tEc&)1ULSttCAVY>^I3^xYc0q8d$yei zJ$-b`slsZ$ovG9@#+!Cx(~-ivhq z&~zoo)u+j|h(L+ibil&7JClB&7tL?x^5#q{V?>VUcT1_SAE+;OV~wH7G!IFXD&*pO zI)emrL5!cJ29|9Bh+C`{=by$(k!+8^IbVmd?PN{pJ6>2QP*B%RjJCj>>=L`~#= zk)i@d7#*?D5MROV$)bU)7H-RtV6)EHfNk}?v{}(77HQ1sb7OFQW`=!tD%A8$WA6K@ z=F?`%f8uJ4GN&cs!EPn3pxcefZz93#{_d<3OY6MIup9P8Y4!No=X#h`l>>Ddt=8FA ziVm>C4N@0m!+MvJoKSF}Hx&Dew}ot(eL#yfQHH`uWX}`WJ~;Hlu%N5{;!reJS=>UM(<#>TT`bbDE*TupahXDr`8Jy&4>6n;lTMBEx8mQz)}BOy!HOgS8$ zY{_vAAxve*(OsPiOlL_IbnrkVSb9iR@SNDEi_OX1L>o{HcppS{YDnPvV>cJ|TMu2| ziEGP(qPb%UWN`S=b~JP0`8w=VJdDuuo8dlT!q+*KB4904jp1)BNly`KkceJCUienrTW_v z#wEZo+MYf2?-3#_e9xSsTkWs6(f6*i5!&~z$W)61SwCUDJHZqJLZT3sVy8)2 zbgGHuU@x4>j;Y=k&si;Xt1we9*dM+tIBjid6i>R2s*etc7N8VUSBue%wdYE8y zgrf9Lwx3*cY!FyddF%AAD&dXF`IF_go`y;8-ihRyS;rsp9t_Y67EjutO#Di^`A$lh zQz<^1K-vc+TRZ~ai#@ungYaR5^tgS?gL>+yTB8H(K_rqs*Fyyi6B~t~@+mW2JF^VwXBsjo!vOHZYr)HoQ2RHoNHM1hI^6uEjdM1(euYo93{BWiO zJ?EUUdPBylo$@v{{4=UYd{Y@vs&*M-S&CGlh659N4F&@=TFqC@2M@!5$_b4CBO`m@ z!G)=HNqaOZsIaVhOpU053yA#548{A+EODsUU{j?i(;p;&`N7?a8~Af(4$tq{rgfAX zR~Rm61r17!T+RYeAqEsyBuI)}FtZ;cMOcqT#7#%=6rre-%zXC^i(D?V{wz4P3kiL< zus+Nv0_=WVz}MnpQ%o?h0luA;MlrSz>$ut=VR)>!H8uSz(!l$dIeh#SG zE1-CjRHLe>u!Y-0fm7fwD3EEg4VbY~FT-Xsv1QpHIlEeM{SF(_8}jNKBv@W>Uz?&+ zN`Y8#Sl4rx+4}y#0{tUyOivOc_L4Ssx*icH7Y{{@ho({DSueuNBVj3QN|7%|E zn$Y#1ZygiXQW*^!V_FmYvRyJ)tY8j+@`H^Gjs(57o&|Q1y@hR?i<|@ujPwl(8UYLo zj6nuG6`Tm{8yFZI2#g+%P^N)|SQHwA&fxXtH)J>>IUU1G;v6Foon33-2^A0mhtGN5 zY_bO)sRt61Lc$C|B}NMUEYrxo9sH#SWj0x=>zv4q$m22l_2FN&M(=^2Q?zc>qW&hA zds7{-Nrib>IefJysKHo1=uf@->WGlzHG0sVnx>N45&@r%odU6KwAx^s3gfp}s#L@f z-EemQRy~Y4F)U7LFJJ2Oq+P;V&ANhbc3RNfPS$RXD+R5bmoNs8@(IOhKFvCMo zEfy5J<*;NFpeR-#MTh@vr2pv+X^=2s7(K^G< z03=Biq!MXD5qw-rO_VcyD~3R0O$wRA@2@u3 z&djN!K(DwXuw&nZ;u&}T9ROAGME$^O#;3H=%0;4t;2|`VEomf6q|yGUA(#-T`$~`w zZQ(ubXoQHoXtImKRBKfjVnForejg!hMr?Og{EK?pn?5apSxl{3XI#hyjgA2xT`$%T z3|Z{v24uh~JZoV~eBglf4nU~K&tv+q9i@uwFjB4AO8o4G5)OkQ!EY8(pi(O;kHuvO z_9(tNa@#97}MOb~F|X3Zcm-fRuA$mg3Gp{!9V zzmkbS#yY;sZ*HFNoZL1y3}Fe;1JrolrSF{|$rSMY-=GJ|ehB`*K$G~thEvb~Z_tZp z+M;qP0p4Gr-A_&bCuk<{B5)iqoc{xw=JtnZC>-qdR6AH37KBPG+@n95yT&n?g zAg0G3lGRFKTvak75nA$dY1}1O)|W&xfpFaAKbkcWsv$|C0E=CcnqKe$l+5`dXGhaW zI8iJA5gjs?_07zuO7%K*=yLob+e^t3!W|Ffbz2GQQYYfE zvdrHkR2Y?XN$C?V`CEQ(2HraUgHWu^WW26^+Q@1`;*3b9i_2&0g4rNFA#+l(0zr(V z-wOx1^*==t`ZNI1qA)vn1Q&W!adZUOG#J&ArA@GW)xC&{{u3oQhYCUjCUmD-Z|_L# zzOT1u=x8J1q*+Pq`*`Mzw?>GEE&FNq5T)Fe_$_1 zo7zQ%gJH0DA&pTt#6wY7m*5R#%s<3|FqBo}YIkH((VJY0%|KSN;D8l{B)l`UqLIii zHl|-oSZ@F*>x<2e@l+@}J;#Ui7n=f9%Ej)JW(WqftV=Y3inP5^4RQ|ICz`wM#d0jZ z_*3@>Gi_fjWHYwps+tK8qyJLT^_PkmK2yi93^`@qkXl*7Wz~OLE4)CgJ;z1B`8+F# z=t-m28b$v%lE-(A8oVzg>Hk5(wI4y%cesG)O7#Pog(IZER0@??$dHUKi)G}VOvo4O z@sE5uv|8&34)p$G=gl`kpLw(dE}kxri2pAUjQ>YJAov=QhA$#mPzYO@PQ+s22%x<$ z{p^T`pj)hZloGJyU< zgJ3i6VfkNZZobgmM;t!a=|%WQ>fsKRRxUMH%CrmqU$vB2E3>E>(*Z|F!fxW?J?J+- zmvHgF&|sZL%UIT%jQt-(Bg8Biz@nG~Q4&{5AdL@^o&0 zj3R_GEH`B>i1EfAL1|W4ik1w=lENB|=)@LJrGn_v9&A(zi9?{^Zui3WGI(6NE!w7e zT&bjx_Oa+no<4 z^W`(iR)Z+G+FI_Q4+s$4T!y;=aGdB$E=J1&ZBmSDPJf7k_5D+i!MQESZk%R?cN|G6+c7F&O??rOvsR}RE;NyXp$@u%EpX5=ViT;y@;<`vaDuxtu)=eqOHMU0HJM0VVZ_m zyE0T#SQuf|(a$6XI38kB0bp&osc{^?D-IF_o=*=FMG`zRy}ld;iO`yil7U}cF!f&^^EM!2hqQPm|GlwR@gvle>){14n)!YhOJ9c9uXC4dvTnrzT68QTf&6B*3 zD^CjipKZ%LcaVQA7-vpAZc8INax7)9*UFH_(22eOl$7zMfK#KRv@7szl~6&?W@>5! z9Sg0v~OiWC@*dGz(u#pDgat) z)pF9|T4TSu6l)q6Pmoouigg~lIgXuQweLpKUv~&d(JqDJrs;s??Qypt zLtXp0__y~tKNg~_{mv{D4Z2(t`r8Y8_;!`-je!Ep=3)QZ-ei3UY>I@4)S~N*SZSw))9O7{EtDB2?<_I$I{MB^~0c3o4mf38GFWMON2QT zdLK4mU~P*$Mdot5nA`tei*<(f^}MG2`u%()-JL1}I=XgwpMv+zeq6W&_RRt5;)sak z9pwVeDoB$GjwIM!i6{!;ha;okqJ8f1lxKh4zvnHlxkvYXj~YVJZ&TrBQnAzsRDJyR z^61-+Wn;-C`v8`FD?G`B?!<{h;4s~SJ5eh`3D$P=kR3kSrp@c_t>*?Q0Rx zWJ{$8uOXYOsaIhqy@|{9Im4@qg{Hzd2NMHhtdB*FUg}AV&Iwwh_8@nKPg1{zNBr6kIWQs zq!fN1W4~@b^Lx!zRI8~5+aGMi6yFQaj>-YrIz^o#R&WxCNGfglhk}d}u3<`!LS~2jfOg5Axn5S5G2{H!Z zr<7{>IE$F%ZSA6nC})+yiJUg+M3j)CxHWOqe)Z!&G(lyu@&gX^X&LNAmDn?5UV)Pd zHuzJ%BLE4me0Z~Zm5jm;Z9-i`TKAklOI;yytxXYTs)Uq+a0IJ%DvNoFq)#oY+V+&p zu#OHXXe?T`ru581jw=s(P@^1@p&EwsO?m-q*`A{aUw&~+J!izDWj>zCM!T4AneBq< zhF!^~FICaxjgnbCel@a6Y-~X{ofg=gMxs_#`*XcT7HwBN6|7BYOoq1BhBH}B1)VcJ znidc*vAkL;zY9U&?OHShG{$-IoX`aH$?C1bR>JEe%zImtaYwcZLp?|XS}D$n1Z)vEX$tb;P^ zqRP;Q@G1^CT{|0tvp9^-Qcuo#ZfVLyo@I^fT29uORQb9MJ@=0KsndL4H0MX5h~ON| zEO$;|9hw$oVuJIIrC1kcp2r5gVl+Nn>)QU^R-zzd{dKlI;qVcOZx1%bXK4d6H>bmP z)5b#5Y*HWEb8W?H0@c)T&10vxxZq=Weof!2Vh5-bg&&;@94hpFRJ>0~y)cadU825* z-bv zp)#XPze0NmYH3~4*)&%BBkXk9zU;KNa{F$*ju;yb>g2~Bw4?^!^Db&XD9GZ+Wk76> z1TiwRC#pK$6(*)jRP73~_9SK=x~=OPa69zS@8b!}Rx1*It~H&&$< zxNNeTk;eDY2)@9_(8*yr)=Wk#4!_p+Ff-i1U~MMXXNTAnje9qWG_-FVUd&KYIGd^t z#N+Nq&WM35(lV>0Jngt2tFjG}Bri|s9Tq}=MCqlyBjKn3E1Z!Dpi|#^Q;zl(V`3e$ z4jeWSY|hZd@8q!hwu|_pI<_gWPGF;W4o#}VY$B~00#Ra7EJY!+W}$mez!sfYOKfse zo3If|N;w*59%$2$CMCgA!W(nU^Ft-gve@`N-l<{sLQmoR5{r{^YaDDUfEb&cH+iQ` z>=sQR{UEoU>UXUoiVm%Ka#3%=FdW7O-J+Ag8}=xTh=ldE?@8&-rRjzamUy^ea0QuB zvNG5ND~cs+SjG$SFtU-A;(qOGa$B5k2yIDyz(B)Oy%850oIT_}g=n{@9D;3PryfzC zG%<0hcsNBd`wg1KS=gUsi6$;M?aqkNBuK$v{;;~Ks=u5{}TOpUw9F4kTSPpOD_=+u=aoNLm-w_sX(v_!hp^{ket4H!_t;HtK%CfQ+2$%%dx zBp@4{EE|JfskGQ8w17bdgEEsk+=P@y9U*sMj0-7QsW1nor2?(@F$~jofA;X-x*dlPv~KHZ(qxthv}^*;O?$l#MV%IkYpoDqJ{kPZ*rT5=}%P^etT* z_NzpjdD=!kHzTVF-q0(AeD^g5A2)N3i_5w)bw1MnsF{CHoHnstu9w%JbPyKx7PX)W zabE?cn4DP~6dKm7TTqM@4P9wJk;QF(5V6ja zUEUb2$r{YYifgULc_E7<-eh`2!dRLuFqw)6+s@_RkS{7m$n7r93oi0%S%AW4zT@pg z@REd*YKJVBsp00cIb=P!pAodc^X!uzZOULz&S+4a3*U?j;f{#P5~LH21BHp-zMIbR zR>(zXh)Q0hi$kj8O>tb70>;hF^2<=2fohn~@5=-w1jBzsmd5`1S+`7Gx`B!$C zFP)M;9Y=y|8GwyTIHqW1|63zu5brYqQIn|#C?kcoOUD_ZtS!@y>7eAd3nG5|&m1|8 zfOmlZY1vBJk9HboxsbSEI|6fAu=tH~gFy_J$uJyI0oGyItaqGT-)hO$EWR8M^5H7o zK~`5eU~MUVnME`mHJ&<5PJIPs{X2O*nlbAG?od?q^5oBh&gx5B+HN0rs{INPI=euu znu?rs2#P8t9bpaHy1qM$c9N*ic~rk$ycE6KM|{1p{R*v$8edBVZ&nLDZ)*@M5o3vk zm*u&NOqYOfY|qkhVzs(XPHN~D1r z-RjdB?K`X${AQI=_RkkqB7Itv)?ma>kTc2zrVmzkbBG1?W)`O}ztmVu0 z8dJ{dttyHO#GJpP9%-kfykao;MXUMT>5=_NpymZI)r3c|+rkkgEl(?-)o8XQqCPYg z>)mkFeZFh?weu#oCp97}Q&OI95Y&r;WySKaBP2%fcVRuUct>>1ctnV;H_GX=5-keb zT67A#km`s7aJ-FcyXy@0I`;?Uyxti~33@PEvZA8>8AjSD*#XiQ+^VkMTXGW-q$pOi zySLv^*(YK1OEB6SLvxRLA9HzFi z^bGUl*oE$?t(6ac zuDTctovwG4rEJC;bFYav0Og?Lx(+Evg`IzBS-|Is8Wt_$_48uXys%&wDIyM#M)-P?n~i=U2E!F8d;oAc|77 zx?X7OdTrzm~@_BVlm%#Nd5cc)Tde*25%k2sD^@r`zZbS;0iDv1J7X%_I zT}9!`8KsJO3FH={h2a@VJgJQO1PywT z8QKa1!B9!nCT{8j`rl}$!wR+YZRs9{GBe_>eae!0HvIUf!HDNJi%$H(-{5@Uj)pvY z=X?W}^uMps=81uGTeLE?VSzuJp7KUQ8H_g74e#BN2#{QsaAE`xR`OM8iv5bYqzIx$bZbi-fWnRL&S!mxmjUP z5^_Iy>M)ttOVnK3s`GnfTadx1!y}ZPBL;Je?2&D4Eko^56hgi{{8zOlx7c1xlb>qMgW4!Qx zW%~!zlCGf2{gIYq>R(uZl781~37W?^%&|G)Q?)q8Kb2w20{QeyTO}EJXTRQ)d*yf; zcw-m2SiR6#(b)aQu}_i%`~4PnJU%lkfV?j?R^JRlxsbCHe=Y=cn?rh>qh(CnZDC15 zvU(C;XndQCp^@!H@!D=RFBrp5m6~};{hg-1(B-1FCUGgu7poVrJjKLU)qGxyaxs0) zmjoMYNp$s92t(?=Pd;qLb2Z%`sw*<3MeJZn!r*;Oc@6(>n>FFg*|8AlO+#32N5VFIcw7Z0>$@`y z&73^3klga;K-VSLQjr5N<4&YX(-nD^-M>Ft=Z6es?Z0&o+Qs64c+WI8iT_dCF@X9# zGoi7zOjoqc68VF8hmzsO9Eeo?^zZ8E@_mKqrT-nJO!Km7CYsEJBS6EuR;Zt;mD6E;`gt{AWLHoJ2|=!_nL>-U z!l2aNrkDOIfuTfbnr}GUh=J{Hv&>36nA2;eQ=+JPtdeO9`1c7-sB`;e*h8K=JX0?e z4*0OsJ+;xeGM!8(cBEXY9O|wzr%dpV@+kcs;sTq&q>XZA5r=S4^_k68f0U&U+L0Qw zUu@f+iq53NRMdZU-XD!8mR4H#x)}X8ubAh+LRqq)1tNl=Y7AtgkOHZZH4pExO z$bzXsK(rpDAzWY>@K^h{Y3AHNU;d`eGhVF7YXu>2Sc(P_NYv%CzCSQc15D8x;s~Pf zobQ#PO2zN9bP?ku2Zh&m1c}x6lW6rV@6HG36=9RAiA5OH40c8DLl?@dcthhA^ zz{ljebP=K-p^~afV1_ULlHix`8hcLF`R*ZyR=ZM-o zf)u8k0{b4F*I>w`S#qECP6W4+PcGYJUy$Q>k3Zn;G3aHzuW^jV2>mfw`Y`ZhtvxV5 z^nmhc@GFgqa;o8v8y=da9tHv2#~tR0u*ZwlqWS-De+K2hpNpde{IhPU1mjW_Z*Ry0 zPOWfAO&P&0xyld+%;s{cy}zewu)fT@p4QezMtIS?$VKvdZ26st2z@g_S&BVq2dRmvkSJ%pdzYJA(vax-KIg}%--6RTmV4Q6l0 znYPL(D~GOSX}Fi_7Q+e__6f5aAGcff-5cQiumt6at?`jeTfuuPGKr3+wq@9^gSvGUBg@ffMx*z= z>*Q0TSRVpMnlKs;5aqu;CoTMsyIJb3Fr9!p@PT>;CiUb6^VzefWYP~{%o8j0r!0q) zWz}WW8}t_e+`+m&X#5&GRNWYgl1CI=kkVmpcPZ2xC%qOJ5Awc{)7B^SWLe);=0f&E zb(nRn=21kFY^U2^M^fumdO~()rP~pGz2!i7o{tjdK(dNF!J~O&Vi5*k`t5+laN==f z#qNM3$wY~Ef_H7;xupC!dt7b`K!1GYQ=;XCwfP-r>`7b)+d*Hp$h|HK>hl_Fy zNf;*YGn8t_BzBmbd&`OLofI#EMHz$Sc)kA}aiIJf=vSd&gCC`f%3tot>c2q7FbSY(o0QY-dL zRe2|IKVUb9s%WrdR^T*o5J3<$%L zJlEb8ah2aStEbc#P-12@8C(2*M%$dwk?=r}Q2P511VfzYO2oS^SDr!6M({jias{SY zV&5ldV7O7T0k ztAY+A&BRn#aAS7;2p9?W5$}5I@2=IEh3|@-6Qg!6Z9%$As!Xu+! z#i;z!oT=TAZTYccAR%8}fdMDHov&uA{UE{FeVgtdBcZk3yJf|?x-i)%XKs-RE83tx z<-#Q1Mg5xsIb{CuV>)jKerOFMJmF?C!j~^ZQP9pYng)2&B6C#%_b3jq;u!`rjR}rL zbDzL!ttfow;Ef}fA%@%V8n^_FRsbxI9!L~SWOvAV#`2Km-{BON_WGp5nTcjbwPdmy zFL3*O(R;9KZn_fAT^$+Ms$E_=$_5wi2w1@bb_XVuXO4Q(ADmpn2#Ypp-p)dml;vg^$W8}gG4?@bP;>VZpmK2H7`4yaThg+ zBZ>4#-lY2pxpmd3qa)@!d2ut}E{OT1gG6a$u@ff^AB|I{eQE0@uMl?YB!vxP{a$}| zKxZsnf^DCexk9Ar;D$sK&il`&VS6P;fdeCRB9!iSSR|tE{Uz=pe{?BLtwdFk0EvFP zw+I77NGwcD$(%CV`_8McTX{9l)R1~{wX_{!uWDL`RiitHw0mZqS@Rig1XhD?Y&Qfc`oE7tEV7 zZILwdos$L#83 z?*a;q=1fg$J(L^Ws}7J=&fC$hyq;v?hesW0^z@NTYnSY?F%Sc_~w zqmUn*7s4k~)z{sSR$G#FCC(Jd)A73AW2cXQp_B<5h>=SGIBhnGhGCdS!~=yKaxw?@iWN+diGOmi@- zk%t4W&)&m>4mtAEAY>S_rLVyo5g03kE{aIv9m;J_el;oln=f9(7jxScC$52aH}f$(B#O`a$-?U^!1_axfjw&=)8jA*ok7D_83Ud;7GPh{qugK zkt_wx@O2M4qma`wSa_v7l~krFGD+qwToTn{)nE{7ydffTh(PG54(PAa%?iw%qvrl2 z3<*g1wCr`_C=Tw3dxVV(xbWTT8boZ^)I66#J^-75>;m>X$;4v=v*_+u#AL(YoT(5n zzRKmPG$_8jnALN`G%OsMUQ_BKKfgwVf0tM-;yp_NSgmYRZJ@EzxbZCm$-E#td zq`dWPBG4B5hs*6HPV`es%&dy?x>>l7U zm2C4slS&8h)2nZ6Ow+rZ?w`s~JP0$iNvfj#VSb*guM^)nNd^Id5mE@HH8?#ThZ3>( zYg-Knw-(W_+XN@VW%YON(p@BEJn8Omwy#=ZBxl|wmDh8jN7Zh6f3TE^7S>;0;KY>8 zLXL0AH#mSJmgNo$XML2~S@o(k6=oZ~^s+oqWD;^zzQTR}{el~!d!F1^s2@s(zBuKf zG|7H3?@XlLVL#8Yxa_5y{xO!RB!%7|O&V~I07jVhEuv8$7_1Nz{8x@~b3&e3a!In9 z6EssuH&3&eRD!PaK?EtrH2E%7X9rXq##53*RP+lT!>GGQBkrz1PYFfM7hU8S{du2U zr*|8s%Qn>hH)lX&S#GzCY5u{gZdZ zqY_s#0Khja?^grOM?X{#-xAlP@w0g>c&kt=btyqzZAL*ELL+M#{fE1Ha=#Cl0-I#Z zGFsdlKDFM z$V!BUy$-DiNeIExmKy^mT!DkrIXi~GYFfJt8&XlS)w$-Qg&vpUDmmfC`7^}Gpf zr&)$E6-Su2F1^2!$WK(OlZHozZ=jBdf1$p#5n#jCxctz4g4ZLSN%W+ky_+2JOF{9g0#i&y>Ar<=5{ZTL4jvGt z6G^I2j4d%)_`%*;DVFy;n=&@G8s9fA1wm0iJZ&CLg3Rp$e|!0dU;5y73m6bx?C9>N z>#3k3fyNvy@t%eV);Z_YL@t=1k}hYAqGCl7jkag})&nmA62u%C@92?B&({tqegsSPJgd(8az zln>xa@`i08GM0TuLRzHBD4pjlo8;xKz!j(fzlxpo&sNWtuIWr7U3| zl%NxmiBY`Arrh3e$VMX(6@IhBp zjEUMpw{-nBcZBY46DMR7@u5g}*i`X1rXW4tlzN+2H+LG+QCwhS4-}`#n+O*lRYv#& z+H8n7+m1tq=QI9P2Pkf+7Je%>bh}%f!hy`kjpwUNDql-DmX$TqbpISl5D|4rzP@GxXa7{Lm3}m|zzOt+hR0qQXqB_fHfO4wBYdax4hEG7t zP}Z$!NrP<8gJ`(A(1i{DK9ecd-tUnmA?mDh9+o}c`%p{fpu?dbD*{{12o{=ZG~omS zFXp78w1`V~Z9f49M@K*M?W%vz7_c8p)d!=>F~uq~$zW;N0E*6*Em}%e7Q026EI)U% zjZnoYm>yaxL&qpA{lFsb(47{I59x?UTd-d9rI9z3;jW;<4CMo*7z4;O+?05~*PZ3g zOJVe!jf9xRDJeAoCCBG2kiOAGceM#7SdhHPBaqBD9%5X7g;obNfCT^$uAUE!kG?~U zieiKt6lFJ&Sl&<>E@nM+w~7HvSsdG+C+Eh_Ush~StAeFl=@G#UoVp^+n9LVYRX;D|Ak&H;>OG{cfJPa;j_CRxvNuJmCCX2eT?G>n{&y^H|f!Xbh~3yT(dk z$=0vKY~K}4G9a@tS>d?$ht^_IGF)vLWhrMJ&4>1H>oc4NVluWP$#!2~6qd6=_5Np^(pzYirK%eJUr zUr}wlx)~kx~*f$CTkGtGcQG~4>8GT_$;Dk{}&D(WoWlP(N2**f_FZrLV ze#;u?`{WKiWiflMRJ29^$NyC8!57-%t!s zQ1LYibdrg^TH@mf5wxkF-Gv8$-v)SR<_1#FHH00xE=)>ScC%GSbY)jHI1vqz&e?W~ z1src}JZU6!zu9WP_(l;b?ugIirMWDVfhOg83sSssa$ZI(YqaA4tj5%jFom~q%tCs! zp2}H=;hNi%Mx`DP;q^ykOiUqp4s2)EFtRwQkU7>;wca~7p%i7P8#Wf9uTveYQz4jH z3ThBBJBx*=FydYYlZVeHiRIQlxl*1l-xQj^>3KA&Px+v(RCZpC9nyEbu5od8j&UUr z=Xz9SwQ7>REM+m4eyfDlTQ2TRU%h=U__EJ#gNk z#{NW`dYh`IGws-R&_e79y3IaL{>u&f)bne;DzQ+z1z$lFRcy=gZbRrw|5=#YPa5<5 z4b3f1GQaKp*bBib3NBp4M(y#_8KheoOg(i=f4MjAv0HiBU!6I|Bj$I#G@8d0pb_+f zO_6mN_z-RNkHI>iVJkOo{U)G4u%#hg2MC?JAUQdA$>GCDam@-p(tS~YoM35P0^3+(944I7(-@WTe zQ(y(6Mwc>e>hvkps8Xj=t!njZLXZDKmK(_s;y;67C5rXfRnWqkAP;8Ei4tiov{)6^ zB*<2!)^c$%-tDQ@YlV_XF;>kxvg-r?0vHzadGO#C*;5=n-A?o zT55m)xi=tz-Q|asNAIkL)P6kG^h_u3{1jAy6~;CgTN0J_P;mu?$<;7+NIhiRWN)X*Ezmv|dF~Ewth%rhpE5bW#M4 z1`^RpBRMLLs@29cp;lgLs8gUtj!L1m;fl*rmryn+DnjRuyDm%=3Fg*B5k+)aMZLN7 z7hAuML|9kC))v@@Acp8KZV%3;V&P8%Voy@R-O#$MH9hFmPKh+8WG4YMg$mG1M!y8TB7DOP>57|M5wwK zQpee2eC(2>KJ$S@Kx8kEYcOkh;v^%bL>io|)nOZ50fgz6JXy*J{{yPnQ`Xd#asb;G zvz+ac$n$P-@n(>{yb4Tk$O*nf=urXqAh8pFjt~hAPWukM6Z|JCYUmK=HUT8PQE+L8E0A4+h`$ICYGTxf4y!DC5=7zE8Yxk%|Yi`bH8?fWS+?5mI1*;A5T5>Evx6 z0vO+bha4s z&`?Q{Q2b=5I&y{SOFbkV0WlUBjHpEra48@WU9~j~%BVOWv|57nvn8t-#sUwZRciNG!_fD4FuV?G_?!z}=*GSeJPiG+*bLr#;W5sDIx zPudgNNJFy!3Waczt>jc8*$6FS-Hd&-!$Zrv*1SA`m`t4c8{$sQhX)v#lgZ2fMjb z0vVSc0SZM~lVb~2X~P5HfUY=X zw93ShUo7XC*>NyAMg2ikizV&ho4e5ZTuCOu;z zCR+Z9TDR7)5eeIes*9w!U6T^U+(m|J~E%J+US#g)5$&cQdCnq>nuA5 z(gU%H>?ZvlgouLxbTW@2rJHKXnuc(S_14mCvc&83t;tHZ9(VfRZ3D{01NBC(~0QGT%^S0gLl;dhf3_6LEyQhpER&-Ie z2{Ko4rLnl>T`1<)f>`N|e2qCwh`a`g3A&F5W+wJ5+fK_w&`?yHb-v z_M*!Mo0`|U`F)6f>Q86eqUSo=>Ay#f$KLj^-#B>W3sQs-JpcxjSpscT5xpPZ ziHvNmU$|VG0|uG>>EDGU#SclK)`8AeS(F1+AO^Y`=WLn(mCw=<+=j$eXHg7j4cN@l zRReCA2pY;mh>5VZk|ae`*n}XZwcbmWiY&p}2;vcXtxC;VAd?mUA&U8(3C2smXhjLd z#or->KlsB#Fc$%4p~(>#lHHe!(I5?07$4Q3+IYu600jz)5(MUvZS^2hT-j#;;R+rh z6aE{M}*oafeHRgbm#UV8G58c|>|V3MgGhlaOIabVv-o6rq$}QOuvW z>6$lDMAyr>UPcE}7}Q;*IIh z2NEGS&J+K6%)A6maVSwg=mRdM1+ht8Bub;sgb1Akjoc0Y3h^bQ<@TV|QfM=FvVT#%oJ)m}!fU`YsIbCg6yqNE*W zVE#2Dz!(xg=mQr1UQ8XY;o?a}03H0rW+ifMU36fa^qfUmK zN%Z6jcAV`5#?GLm?m_b}eu+YcBWmOWYR|p0cIGQFBk&J8%8F7kZ1=)~Eq?*UD zrE6ZsS}e;s{)lJ#We}~NNU>x~;u%HQge_jAY$}is(wyoHj-W`HDq&M{x>qH(405(7 zEc)GZlAmFSWMS|JlJv`?-DUP!W=cX|QU1%%zztX!#CR?sSix48{7~wJ(a7F5{JQ z(TkEuBmzrCD5#K4UT8ue=-}Ls7U@ix*pQ;B=j;=m5Loo&%30!>Z?uX9_ya%SVse=1 zOF7^0>CC!e=H@_S91>|{l8AMbMQ&cF<)B_kc9_VlCwWEKMuE_R3M!kr96ia|TTsWP zRHxrylrdaNt5^|Qz)c;-)1NBgO4Zg?nxc+vpFrSgKfGg&mL4fmYGD?ImH5YORH{H` zOq$(jLDC@uWvZqU<7O0utDH#=`iYkqLyZ2USM1G~!V;6Y>Kty9!~Gkv(xs=Oi9hH= zFIv&{tQv*s>Zl+@NU4-mxy(Qo-*E>2WH(->pu{DzLYrGo#G>rQLD=LbLf&PRMHQJS zu)_$XNhwurGO0_6%X$6#dpv3K2pBd^AeQYT@PxLk0ndWQu zAjQB120hIt8b)G+?G24up!0R;fti+p1%+`kM8c9#P6{2UTIvm^-K^Q!$)c=_+|g|! zZHQHo-t6OiPGMI>>O0WmL|x*5T4_~U*AVh3yHZHENs3MA$zMQaDwP(~Vkl?=-O;9P zbS_FcJeqQLMp<0ovu@6TWaS0+X*!LeAKELp(v(?5Vs8>=VFuswmFK~#ZC^f}DuJxq zGTUZgMw`l6+&WQUjVj0-Q;x*{D+T`MS2c(KrR7u#YndJx;ubB*vML`^C7Qx2EQQ9e zXkz3(-x4uuJ^o1)E@Z6jR^v8p2?YyJU6UJP4$&R!iE`9QhT^5#S{^B!#;(Ry0&eYQ zY$1;Blm>2KhTeeSu0opKDuqVd0$F(i5{TIvqUjL>!jX^0P1aVgJOL}N0IbfuFG>&N)IutS9)sPyZ5Xe9{3E^rFY4I6I{*M#E4 zSvg?91Ay0{uHW=AZ-pXZD%qB{xhoe#aThnof8@v70Kq|B(bAepKC0T2pirqqT(8xixcDA#w4Hr!to2Q+$0l)0D2HUT5xzl)QCNxM@`t{BA=^m`0p{3a!G9)f1K`mzG^HjZZ2b46S0c-Hb^KTu?}e9A%D$v2?|C zQO!Wv@DYKv9Y@gC<#XKbYqz`L~21N4D61Og!o6KCOJYG32; zAg@Cp1Y{VsrO6lp>fuQRCIf>uq_%Qd3*}gZMIh)yAP7fR&l@w|v>M&^EukxAE8;C2 z>ERIoA!EQEs0ScC#xj2J@T#9@W36HCh6{d!I)-=g_*I*tS^8$p$mGap8+Swtv*(q? zj4b7IV@PC9cSJLsbzk@846$|x%nyV29-PBwIP(g2ULKz}Vx2GS^!6>dw?4E&C{@jA zUgd;+$qwtcC=(Ed2n0IdLa!hlL_w~)nl(5X+ne!$d*8tWv_d#wVPHg#h8^WI!?=?L z-x&}8Qn68lKfJ<+%lDc==p^S+?`VpN$DO-W%HHsXH#qmdMt2qmbX_Md7d{vYWjQVo zfIeVuM38seeK{7z;f^wFr3H6$z))ws(a!PUi%+>Rr?{KMML+NZE_g-uo?^|?kb6QzAtpUvY!pEhIv!gx`nDd6P@JnCRT4>-lLr>(bQ1pEo5| zlOf6c@kb4&^qdYk7)~8UA*v5_wkLIOE;^XxBf;^_*DQGum;7LG>D7; zW4g_qkhL$fYxSsao+NHXE@GiQ%@6gawN?`3tfT5K7+VW5m~%n8WD+uhKPm1Pv0 z3&s`2|FgeoTYM#)gvfZkH+wb9kCt7lWihG8-`sE(p;pveMm$D$6NIksWJh+cv*Vws z{3D@F7=1y+KW!c45se@ed{QSD z)64Gi4*8%to90nO*E3giH6f7SvP0-?qM`i={z|Nb&!CfFp_g!>3j9Bk%B)xaz~j`d zqo5>OH?rGDd<95+oi7~Htas_~D49YC>oY#eFUPM1t-WtOH}2p`(OY`l!r>{74YIF) zl<)pAJW5Us*vCkrgnoj2Tcx``l^5dOE@QGIGUYS9nOgpP&3d~h4s`iDp;}_H=CuHB*lsbJ3ULe@FYhv7@s{m__1V7 zn>TUh)VY&qPoF=52L0L6BS@h~LlRY(aNs+l&zx$k5EE%tp(hP~{0P!MzuRj;2Cw{_dw1{O!PB+)(fYGQeE5tNKec0c zkysI`bd1-mbocjjmhCifEy3ayi`woosqVt>A*}XWi8`mu^3T8rA&gMM^&DfXwuK6W zFC}p%p@|OyB%8{?2}f(tthTh%Zo(BEIw?4i*uri^zcjS)xX5t-yD>tn`kM|l3k9-| z#UY6-5=R#k+HE4DS{tns0{9SyCUFS3rLB{c6A(Z6KHBdhBf<1BtF?qXax$ZoglshI zw1kboHpN89GyusA)6P5bw92=a-qSCovN|MVtRM{{fPk^~gws9T+RKwgB+J`MQNfIp zu2Q(1jIY7x5HpId#a_h7#fw&Q)YMZ^?NcGj`kag=tfVZhqWw^N20cEeTWZqqaGNpJ zMO)IXKOuV+60k~{^c1XKbM=xqw@Uq#u@fg_%-3JB%~sopPP=U&1qXUY31M7J>?3i| zQxDAOw*3yiB>9VWz50&Ci?FW<%t%-7E^W~wnaUFu)OHE~E;!rMrZvi`rqELhDy|O2 zHB3f#HF&_zTIM-U5Y;p^UxSysqdjj7|RuEp32*eqptuG(r6Z7ZxLDkT>0pIe~p+Q>Jr z<{35qKKi(!(G(J-91AQE%|+(8)rel?G)faZ1XF4cOOD{$)mC^JH087U3QS6ZEP)= z4LwO5D{YP$a-1ui9ckFJ88~_dw?UvnZhghFTvVUZD?u7{q2E0$$)A=o2nsXDBDB#S=OKgmQe&AkL%T zKm6CIn^13lcrxJR)@C=E1c@|X3gHidc$if+rdaSv#N%!y4uAB+9~I%6x`>plAm+y) z)~v=Ji!Z!fSVvJvOkC<2Y^(JputclvYC-CK=PX68{xR5@mNSIUt*l02mp?L zRDd7)Xre*DCblRFFpdGtn{wh(5eopp0-zKBTIVX@Jm|>~DEg684x3iP0@eX-t zaz(NP>ZOpSq+FtKGD#x(#wau^-#7SCfqwAgj|wqV<$MQ_DIAbT`vw*AD%i}M7=5IN!;c(VOrovY&yz0nL;(fT<>R_%$tZ5RF{Nc(^70ClN;q3 zQG%TlhyKdh89~KKJJNFpj?1A!JX)l9N>n_-k)NJ`8MJ)jvlc4qO+_AK7dWlyq{>7I z)e>pNB(df<%iIVu=hVyn6qKYzO;xb}mNv$$pBZ27^iWMcRn1mrR#c9Uip_D+jBVoB>)VH9H$%jU@tL*A{7Z|Qeexl+VRluem ztOW9%gB+q(;d&N)rSu>EK*vgp*%cneb(`CptUrT7HnwJEF%<%3XM{zND#oWHNxkc7 z?>#Pr%~C?oXKC%~KlIrTeQ++Ak<8yqQG*x2=?FeP zi)M3t$`K6NXg&`u$W&TJRMc)4o*}cSa!aTy##LY}igOgZcG+C%8BLgA^anq15jtY( zPjvsJj$NQ~*7$i1n{`^MW2d%8zfz5U*d^~p;IZys9cg5cWHA?OJPfCf$F3ECYZ)!TQJa+ra2V5^&W?agCWBpzFYli{8g#k4i{@~<=$lZIwe zcovT1-26+aFm}$v#SL*zrJEuwi)bOHqw--Z)UzRet3HSW;g~VkhcsHLbf7_9VQmJ{%%lcTyohYywz1hC#0Bwj za~hw6I>7bpmyZBZVmXAN`00qv?xWP-bUp z(nae{Ikimx5lhIP)vmC?EmDm>uw-3+pMDVinJVh@y>a7QBteH<$4rgdK1*AY4)_{^ zlIg_ya;>Dj9pP{f99S5AV~uc~GJ*p8V#_}AKScLMF(OwxZk5zA(>*-^H3&P`)#*}d z-D!anoaEd=7+*);J!cX+sx{U3norWmcfZ@w^X^iL0f#2+DGjI4?Im`?r{~h9L@PtR zWXX}fI)T;ohZycs$^|z?cl7UNFYz%k!cfV=*iZ-gtbrBX!{j8R#$?zU^lped$Z)%4t@)*yj$xf4mKD zigB&DB&4CC+ni_)QC?)`ejG#rT}4zEtuldUdY}~EK&$!{6;VcA;^;yu2`Hx zXavC+K0;_d2kxqG0u5xcG_c@GuP%a)?JlHN&PvRDWNM&K2*FTI7=__7>pn_`D;z2K zgeKZvFoDPrLMDrWXi&VMiS`=eZqg_W^-$w-2?*y&b3$kDcq5*gFjo{wAs`T(Bqsr} z<_2AAMKJ6Ql`124h;H`q5}5|iHtTI3uol>fUo>KKj431ZK_C7hf8I=GF3S&%s#iQ^ zplVPtdWf7bu@?DoV2Tebh-M9AfoMQ+pWf{sN)fn}hZA>3>QZaepsNJsX4b|m18E2b zS3jG)NSck#^cklJIGT8tHzD@aGs2x2}JQH<{XEHv z>aZH^(KYfArS?x7fh!#JVUL(+6wMDGtLPN3?!ETte(q-t&oKZcQNhT}=fKO5h|r>Z z@HYai5ASg!55^)`C};wKANELp-t7_9j2ZpSAPoW+^dTIEroLQoq9Dx1*yu$bQq7nu zfKXD#IF9{rV=-=$BZbnijEl_-f*Uso;yy=tB+?R3qr)s95G)`T3NZzXa3^Wdbwn=) z%_O;$>tuLrREln1U~eeR5)w7yZTPVQkn-x_&F6ju*y@PQl&2J{(rw!1CqGFC&G7R7 zkjYE3gVXY28b@a#u4^nmis|6dv-Dy!9)>W_GBU%iy}a=s_<VgPMX;_s(X(GURWfrz|B)eDICoXo!~x(DY0y*(9?!qi8zZCLn8} z6kl@98cs1qrfoDLZi?o^^oJ=04Fj<&->7U>V#or;ggsUh_*llIj3p8Ugr*vWrjXM& z%@b?d=Eyb&7g|jl9WL{>Zy|@KBK(1!NK+7p4cWF+_euw?Ad@@s?f;&MI-M@)(2+OK zGeNU*`%owGzV^ug>~XDbU-yeO|!dn?Jtc74ik$@wQjHek}%Or zNqa_mkPk;UbyusC80%{d4RtdY%T=C~`qVO490|9QYgWM0D;RGNHhgU@y|YQ=?ar1z0?;Qu{>bp)>PF}A@Z1tHBO}G_$ZWH z_0{BJZoYua3$TOta;si@!ZByagvt>*T@^K%6}J#CQ`IU-a}fru4<1WrQ}#7um5v@O zvmBlPD)dhKc9V+sPhq1F1UpV!|Er$_)TY*Sc;bhIUI?4`!!W;4V{ujtFNz%TXN3eo zWfSRTqtlAswPhjlVMDYl1wv7&kF#DZNiM=%b9QRM(HTw1OGAfq=3?deZkqBE&7M_R zWA}22n+V)c}q9q>GWMg${ zz3!Rv?4Y(nvEFuZ`BkV|6&sHfNSRM@_m*O%wa60a9h9dW;|;m&gITF3d@!O*2x1Bu z_jJF9oKAxtl_Vb>l@>SDT`_A+a|vSMRwWbxD%nuPw&U6i$F)Ey#!z>7)h)*=gQ6Zx zXv5OAjQ2m#6;Q=;c4^nnMimj`Mk$T!r3jZvZ%zFOS9pmxd@Vz>(AK|PwCSULr)c;xwB{l zK_8xt_xA5kwXS#TY=dPMgn4*W!EQaOLwvl}3C|{FhgL`p11pNg6a67;AxTU5ZY0*L zhp8BP!R|B^=5}gOHf2L_MHGG|cwyyL3~N}WSm-?`RZAwve^$7P-MDTtB5JT}I+_A# z3DO{T;Q{=heq~qg+ITV%kBbLr)#y{co_OLWZ-aHCLCo!q9hvUvFn$&)jFUKMKIB7o zp@HQDt+Yh~|M)`iwAsADe$of~VJSV*hBNpfIGL4^xpGd=I4&0_|3Mu8Fd#jg$YwCWi!nFTcqo_WmoG(D zJ|4phEC4%BadSA#n%(qoY)zGud7S~2n})?AN|=Q!zz<@%irR+{_V|=319r8UWeFCA z<%4_ILJ>aJDL#jW-_p@KSe@Cqq2ZP(&@X(jQr$G-7BHY_P6L{42@qE6gdz1rOh%A# zW=veTo&VBlswkMaF%Uk?I1_om96F};(aIk7m8s+bf(a`kh5#O5?^MfT8+D>hrDW)L zc!p}oJoD=Qp&9hS8=x%}uUSY&Po}Bb(c%$@5Ew51!B7Ch#H2uH@ z|C(>wS|%5flF!*X>@7{b^=IF~A4Zwk5Ye2E=SQaE${_o$E0;2}6zTYydVPdiLxW}_ zV2?uwJ-#6pycAs&d)!WNmn}^{0Wd(f^C^vq8)w+xs?u)rD`wGJw1+wry%c%amE`#P zm{a?1D_J7|p%%~stivG?0s$RtDOKg@27j<2#}>EUj+-UyZJMo*h=!kBdb46yq>(!r zYdE#xrj|oZ>N4i9&E^(*no3{+Zel?n`k=cx+w`0Zw+(7c%$qx-r;)$=>iUQu9;+7E z6OG^ZTQV-bPcuDDkNfO9(8^-6pDCOFSNkMx!4JTQ0Q4acVnHPFxq`eK9%=R%=eL

    K;lVZwHS z4NV zULh86L3g-%$&JkfHv_&47G*QsjEFJ910lYic(}WYQ0UanPbFBE5*|;H3Bk?IJx#}c zn4&9!0HQfP9&4}yThQSd%psVeCswkj_|V&{kNn|n;_HG7JhIiA(v3XG;n8^z8N?^I z;|$lqb@m@#{1$HERHCNEE?!-HMHi z&PPKb>3q9F9DHP!q9*g1381x;{o&H(*K?FQW4E5HxQH7&i$1JA`#}(JAr_j*M!B6@ zrajy;JiXsL<4!5eTv=FT0~T_W0H-k{*E>J3qtZ=T87rn*QdD)!mX`PkTbIs$e92^qeV z;@QZHnB?URsTa%PTEgA`R*36yJzV5I>(6xDZ8bf88*9DH&KG(3+9F%Qy;YtWJ!G!! z4ex}*w2^S>^z!{-L!R;7W{IYr7@M1YtIh9Sb^lCR!nR&yfNP0y{;r;TBeoLJ?09^K zIkGbfRbNuvEVqqmD!hkBXoQO@0Uu`HUi2H6!vlYF6gKr#Kj(P_yRK84uMt995c10w zelM^mGru;cby7KB;&;b5Jidv zmBYwT<1>R1C05M;_^^UNfhcR*yoocX&Ye7a`uqtrsL-KAEygTop;pb96$A-N`LDvi zT2E_PI9W8S)~!S}ZZt^dS<8seo`vO#mTOF^@1jOkN;R$Ah7aGFmDp+J+=3bje(X56 zZ^w@U^FFich0M#GeGA^ji!!Fbca-;9zKl7u=FOacIxI*PE5@UOlp1v#*Q)5wtXso; z=yLMRumn2+c8zrA$+}_U+ue zdq?}Z>2K?AuUeOy+WhzR;QIuK&Rja`(sJDNbLw@01+koj6p$9QXb^M+YNk<3im6rCN`wuRBVY89AzfV&MH%2fM^;8; zmtKD9uCcO5kRHK_@ zRWJ~IF%6vCLnrZwY`$5YD55`sgyYtD%6>he|T!`C&J zm$&}D3^Rb?iR!Yr71;YKXeN1vW}nl>yxoZgUrA?-Ha91nn9P=TvC0kw>5n(+1~-AoX80kf6@}%3Dj=jC*-4RIQWZqdXGH_0QUy*<1a#bT$E}-bWeTN{nm^CYnO6j} zWVYKhtz=hQ1kc+VzDwWS^v4Au^_C$0By?zZTjM42eG7*Bao8I3EZ{;ZM(Z2y zHc~ozXGuERIYbTK#k*^>YAW`GG@(>aA!z2}xmc#B8w(3mJAzaIz*j6{110n8PciEWJ8YL|I ziExB?YhUr+XFCfxr9@p**qCTX!tO1|HiVN&??lC*`p~8@C+r)n#uOfZ#KIp2WZsF) zgOqR>1c?XKQK!a&!4Gl-dqY8v1TE;2S7ob$jH1&OV+h7DlBqYvVvGyp*Pf^UiHJvH z3<`w)h7i5{ZZ;J0UKWk_v}DyHCj8I`5u;+OWmPRIu?WQE5K=h^f)HI3j0)$fSi0yS z&~vt-5|s>g9kF;2l8!`~B-;o|P^xN!`chbF%+$gJ!UdI~ETvQmcaWEqg@(P;U=Mxx zkgFu+A$>$gKl*_br|1qngz85iaN!ToNK1M~6wv2f)3Hs04TQI>on5$RLz&zqiZaxa z+wAg@R*Dmh#A4*EcBDd9c1|j5ROdL~N4jiMX-*w6+#?e=DU$@rA$^R6K4Ot3cV@3N z{xGIDDrTCUInj}DOV|_d=te8u)0%?`r(q(JAxUP8oA4Z4qpjKe`^!_vsk zRf_aXBvVM7_=i!-c=1ySV<)rHqz`?dqmWp!S~I0`m4Af80^#t7RmMa$j#2NEc$_0d zyScp*dhSV^Y8$~cb-kxf@^^rF=1RXBvng?MDk^MZL=k#3fqwNw>KdM&3}#WMk}`mx zd76K6Du`I{V~I*lo$pgjgJ%CfXiT-V}o=!*mHP3Qud+ zQbxre{5WK;suE){o~SYgL){JMIY%c6j=DTcZ*}S9ge3aaHJTrLBu+;# zoZalOx4HFQ_kIN|#p(6^gr}uYr+efzRdp&;_+TTVhhGkl^@;3F89ccSN&_;Mb>kC} z=mss|d_tah%8cB1y9nWL`3E`>%;e-H)ywbU&gIDetZ{95_(|8+;>HzHy);@EvsEX# z*NYq>t-HJBRa$wUgs#3(qO89syVB;$mTO&+CFj&~VZs4v;Vh{JwIf0gaf1rjoF-Fw zYRyu%b&g9Ap8MfrM*PDeJ6^}i)Gm;k&nyu_s@X)oc6!l#*S`*WfWt54?J>K&;l-31 zwb|G3F_u(dJ}KKo2tQJ>&D?3jAz3%;^y=O6*_dWafdez9)MxcJ<7&RyUKxAu4#**I zx`$-V-ffG=$qq3|VaND5R^t>=kyl!FGsx!uB8T^R%oSA^23;5=7*zLZRstALqHm9J z8<&$QIk$i&13Zlvej$h|<3|-_2Tv8i5B%UlPJw4&LSoA|a~0usFGnRYha5B`Zkv{S zKg3|g<#1Ndd0KEoV^er%kM)0&x zR6$6w@OMw)Q?F5fpqO|KcwuAag`ie_50^vT)?%9_fi_2Z9cVL!^L)K%isRTJB>0M; z0vbV;Li5uuFL)_vb2g+!AAnIT05gaFbxow`E-~SXX4n+B;0LEQU};r^&k|TaxQy9_ zVcuv>U*mv(VSN|~fmVW9P;z9!0ykOpgbr9w;6sidS&n9yic*znvW6_TGD;FcEOj&| z*#S%M*mi8@Nc99jb7O|FvPWuy0enCJx4?Jj;b1_xGe~htg5pL>CPY4AXEVbSa_X{_|6_j> zI44_)G30X+=~NE4PyrFJ3m8BEfY5^G0!S(3WJFa|O>vYqM~RKZSo-K~54dd>*NBYO zd{B5Bgdr7Wp*n^|6SU!sUD-mU1)6#%Wnqbv3W=C#7$)LIT}FamY*%|UGeuQ*Ba(*9B4ZJm(#Lp~2!#y7aa2+;31Lyxw`$xHLLv&LjZ}il zmtC0#fDG!QX1Q-6r%8RNVl&kum|6e6g118Ia^>A zpjXI32~Pl@U6iONCyhjFo>@YrjaX6snt+rkM@);UtM<243%R*^wk88(e!>)>3R}D< zW1QucxS4sn=oawNde0HMwuQJPWLgGxOnktfevngFLAU6LoXrwnVpAHS+<09V2c5F19GEGO ztQN#rR2I}0!JWz!QcJ}nY0K>_G0pb8({csDg zdZJ`S6+`k5eG~`_)3?&5sV66y*9k?8m&b+&S?9Jp*N8p4bEy`xi(~VB($>69tjKGu zgoG7i)*FgQ18~Gs5vkXmTr#+ARGncV3>Xkpd>{^wb_;)5JN&vla9lO}dtnXYzoW?i zsGWv;*Lkppm&69kz#Z3ONL-O?icy8e#A_^(i#)aNI5V62s4%9x=ykypD4Ks*t`MxY zb_5s#>SUmtg2>gAVc9Uc`NlcbTcoEu6Nj`OcEcnp<>XQfxTu%V)*20?v= zHo|gPvcfEaCYr4%bZ7O2y_4jzxI%}dBC)OX$9QofeBcKa0Lp+OuX-7l1h{2{qM*pw z#u_Y_PW7|f1}WYaw7IN+SyFj=e7KQCq4kla;Z!!k`$3gbjrjbLPuvk1GmU4YR#iO5 zK3KkWr#3t7i!<|-GO`k0q0kL22oC_tB)KpsR{>bIuM88;-Q|s5bI62y&IEh^)iyWM zSyCfZ7ex%)(^pxvV^z`{4bvo-k}ama-e#mQsH^5nz6A_4v{wQ9kPrK?wFPZ+;=s^; z@WOnsPO71s1qH_o)3wMuwAqu+DD=?^`52p&h0+8tnb?qz6ir=+Mw{Q!y{Xx3fBUFJ4`rr*hg3%~c$C?F^9-Y-w7+qBTgjx9i(k26~Kbx#L z!nDwh-EMbH&s*K+dEI4tIM*g7=Fut}?9=<$KA-(E*)1C@82?X3Al{y~ZEL;a2p^&uz5W*fG_YuvKY$#;nyF ziqO==;fi{xhf3tHX5=Q+%F?1U_r1X{DdCyye7aUPF_dc#QQrGd4JJSaR$g~}9+dyE zWZtmbqPoV-rQoQU!;IbH*V(_1O28h(u+4fM6{zEGp3BjiA--e(+~yOl`+euJ{w+b7 z%-uuDFwx9l`^<)Z=;iI&(=Y;a;OHNVR`@FE|FlGYZgq#_YS9Pgv<&M!jJTH#>a8y0 zyPTewvZ1Im&jU-&od?{qzCI(IY|7=p9!Vtl0bsly9v=<>_kh>TAOeLz508F`R0CgH zHp3G7?ED^=M>gcC&e9wd?N+zd=&ZCDdc@3o5mc4wApMCiF7E5o>}S2t?7k=E-P%z> z3?cvt_5M1LZj6rR4Y=UtQQ7Yrhw0NRSB~eJpe~t>>RbXYdyEIIagyVdwaYy|$PeGy z0YC8~Wasbn#6BOf-rhQXQvu~o5BKmHupk1MU=MixvC0(xJyBu9mQJCaE@lPK*ud?Y zq)w@#HR?aVjSwFvJn+Ud)rK$nd}I3R4d}RX#8!Ko18wx6CFKC{`gvavm|y~_K>L%dCnFDE-f;4= z4E?`9;E&pjhI`0nUc9ZI=D5Wi*uKWSJQFhw5dQ@Kb|O>oSp|d%6)t4hu%WUN01>!jwD&qfg}oEBv+-;)i~pJ$v8L z@O%0SC0C4Sm^xL6CPHX0;R-bHKm-$1FsGf$TJS*#g_7-`qPBU41rg-2#{@{$p@$xO zq>9kAsw^m{f^tm!ryqS{aW2IZ;~S`_|4fqYI+4=43qI}y3XdV{emt)~gN7_`Eh7O# z>@mXXYe>M%O1iJ5y=p`&z{m(3aZ4_{^zuvWB61K+G9R3fLI8YGfruE$z~>Ab))Dc` zs$xV@kce0eCoHUh^s7h!XLKz`zi=GOuIc?yOVtFpREITrij zv(ZHdy)s$xJOU5ULz5+SJ?#X$(K`|;6_VOY`$F$j_gadzNA^5I^`}U)gmzYT+jaNd z1_>JXU3#ml^@1jRk%yij&dA3GFzA_+PBEb?jw^rk@dm64pz}5&fMNM%E!Y~ARj z{jR%;*E@1E*)ruZJCQSj?7qgxO~|p7v;8N%n&#W=KFb_U=-hg9))~ThC06ycd3P52 zqs1sZgBg18!ABPnf-u1lfhCN{re>$wDWVf$tRP{1$|*+>3tYY{=;4?Rlf7*>inJgZ zUrzg3zT6U$WJw>U4`oXon-5csrld&P|C<)eblPPveFBIllz3}s#v4~nwt@r!`#8rd zPqpW=O~a>x5D~g3l`J5sCOD}ppjxy#!~Q2lftU-#0?F;hIJQ3tkCsT=k+sM?-L=y_ zQ;<2PaXsAO^HgNx3Cqtu0r|eUd7mn?E;4Kxgs2t^dcXh!#i6wPd+-ZK2na<4`Pu@? z!&l$Ts@OJZpM3U7XrFuNk%#o3#SB&}JpTzK5L}35Z9b!2vIs~n?Nm*7?cvdofM=w+ z6)J+{k)8y@<+0^aPgCM*nUPomDu+lWEzdIu*@y;_6g_|diFgMe9w3(W#ZZQ7+X;Qj zR}Pv`0tgP9VGmyeG#@tVIb1my|N58{3nEf5cO;RU_fANY>d{bJ9m|&K@Zz!udaOzh z6B&P|Xr7a~XGtf~QL{uy9{q&Jgi>6c1yslfh`11ab_!x09VeUfc_A>63mXsZ_(wp7 z=4yTVhl>2+3{H44GsBx$kQ(?zx4G|j4D3{Z7$rMEt!+@6{Ec4r6UMnD##0@{l+0RU zB=MXvF*sx5P%N-LKnz731lR@o0(nb#kp*n#qsUok!X94w5tqXxrb>RPwf>cZ5))|& z+P1SSD?Za=mR#b)+M~d=K}vYOlnatPStN=X(Mff>WyL|yi= zO8RJBaO0-NJmw!wx~5QrYi6MccpvAP@}%K<*^}5dpVq`Iii8q~JY5)z=Ah655g>p# zOlMJq1{E~^gdIS=Rg;_8DMbYpYEy+%m0pf$D3YV<6fY7eX987G5S!gr|1r~xUPNL| zG}$)SBeD^?XFTnh3uFfO9!^2>gQrT!LlY7z0rf$6N;HME8IH4 z$+apewn{qXczS86|IEO&gF0GiPxh9^&^ikue%RhT<%kP^2xN{bZ zoKX*z5cARGveD&k%_7p8=Wa=pSTz(R%S+L;g>|Fdgo`F!63%B>uqX1x2q;BL9uWG+ zn!xpKN%4!<@Jx@SXDbJBD}Vu>0@kPKOO9Zh`yS4ASHj%6Q+1_l+5Tb8mJ{Z1M86wh z@q$*GWaQWysTJHIrPrEb?cEgz=tV&x2DCB;TuJ}(*4VPMZgzW*&!!Y#yO6NHE!AK( zbyp4*CIk=>D6TjPX@Ol}-~p2;vQ|6H;q&p5b_r3Pb~k)wFw>MGzsaAVFk<4Z?ufK% zCKrphdZe*t{}Yowft)zm`&uQo^-?tB&yC+IF5%)ggOUNVVD{rAR#LPUDwMJp4td=8+-4g+;vWwg_r%m0+@7A_d!W(g4%cAN)M-tW<-5qD$ z+hV-9_nUZL?T~S|$33f&Qg5X*d;UV^-}SdK*~!o1V9H<9m1}VG8p@&xfPq$cLt!{{ zAz=5=3IaSpMK`9|sm(1#o`hFSRvFCKBsWlbom<|WsiBcY3m7YBG@7I>Pk4uPm#STh zRAhZj zq+h@<(%*#-ecOE3w)5V4J(^K`;~nLlUQ0baG|sb((^I3gsI%`|nXSOU>^sf}~noqZgZ`t1`aO zGdgQpi*H&yJFB%lVkR*aL6C zg};h`)?+GOCYm_F+ljps8@SpD6SkV8Q}AgRM z(xvL6BOQzpyR$BLIgMF^!!D7Izwzd;kn+Jz=7_-vB;7bSkoNw;eQ%gcvB?6Pl`+ z!ByKl2Y48+(T7$L zw{Zl#@>s}u1Rr%gAy9M2=UB&)q`#^8LwXA`>hZVK55PI@Ci^y2Jdt6RgBJ zvbY*qla zB#s#m=BqWfkjSC3i!6+pzC=nN<3hhA1HqIfk5tKMv7)lH%v_=g>Z*lxe3gL1!LMu> z-XuwyI33_zHOAP((HP5sJgTcyOEy}hoAkpGQ#?%Cj!2=gt8_xn|C@-{DhtoF8k?C& zX4L&z&K-fP9uo zlpC!wL|n_sv`j)qT*PGj$sJjg%mB*Y6d!h~E9%KDf~bjAVw=};2}wE-~(N_Z-n9(A1MM3(5pzWaO1ccjO7?8k$uw-1#)w;(eJ z6|?K?O(Nx@^72lcOhP9$NC&A1o9GKFEs%@TvGCKEDsm&uG7TyM z&5DzZkjDg4p!?AU6%$ZZPzd$I^u(kHtxzQ$lEsp<0lJuIi_~Z%*?YP z)_aXp zVN5L!?Io^@RFT!#wfzkM8<5T5RjyM}6pf^t)Vtlmyk^bXoC!f!v?XN)+)HIppI9~- z#ZER7OnTf{&GvPJrva+NiLnVP@pLg?Amca=?NTU_!&&zLpb0}VkNMG^FY zs*hAzsYP8ta*HS#R@^ZmA-x^pBR(bES;7NaW#rKKs$J48Gp$S7%Qc*RU2wUyD zN-S2{U7wPa%+}r15DVDpO-HMPXQU{~H^iOF&iS&a*9u)8dcuj1s*p+so-* zh6t6;bI36RO}&-Y1tQkH-Ck&|zoH6Xf?88t{8#_|qXv-(=S8$ZG~4XdS8Tmcc2nibg2T2E`7-*(l>%?(-iK{Us5Mq+|T|i z*oHk_+agj4O**-8Kwc}+;We4FkT?!T5CMadyb6^X(L}t(*dwMeh*93ZjbS8KT?^~K z6;4TmUD(EX{$ zW6f3THQ%yhFdj-2THVaiisKk4F6q=k{|=&Snbkv0HhZhL?W{+iF+>XlRHCfM!6;P3 zm{0h%Lix~PzFjFsZQ3X4*k4T`QT`SjYvg{?O6cXE$%JLgG`eQgH}ch82%cjguA(8N zWbW*aXv4U_q2C6EUYKcRXw0jnbxoHtPhhoKvy3=&wPp5stSl~|72alCj7Jnk)69rs zY<@|Fbi!$o(DBvbZlchZY%iir-^yLBPTn}dAc$~Ypf393X7<7X5fDF9)DCsp2xjIF z%;$3E=L$2>3$u#Xb<+{G9u4l_gf5LcJuL#Nx5T{DLv&~D1m=l-XJKy1V$NV=&cwGR ztw*^`sKR4oc3-9Syt}1HKW1i&|E%Z(!6QQslsjR=MXm~H{ zSe|8SO7_{al;=&(DmZqipkry}C1<&E=`4wijF{0??#mL6QBXO)qBd#(W@x^xFycg3 zeih`i&Ky!J&57%#jc(^|#TZHkY4BWQG8kzq`Ws{JkGdk|_?^5D_QKXYpkCFIeAH@+ z>Lp{1Yt3w!0Ose_+3CP#*1oF=IHl~$9utXm4@x#Izy6QC{>-Qddi&Wz(Ug6suZ zr-*iv&1hnP7T5RuV!n;nWCrb*fUGkmt%%UJvLTWZqD##@iFe90qW0oZxTap z)WOTBbBXdW?ag#no6TJZ%wX4kZLTU$`HXFTeqP=}VRe-Vc0Fa>c2vBr=y7PQaXU9sKM^mU@twylt~EhzOrk{({RA!&%IZ+I%&G~4GGRZ*9& z54ia3=GEJo80#I^E2stWoL1e-Msn^7lF9ZiUsV_0cFW_$ZZv&X>-OtKB9wQ3}AcV6qv`I%cKTD0%_3?Rq z#vZ%yP-kYg{_thT)aSEto-QjPyHS4B<(TzDMRZS7mD4rWR9{Z=f#5G78wUv>1a8>H zLVal;|LU{YL}yc4ha>aSrICz%_8847r{ac5>!FFXsPc4E^A zdANqEWjl10=Vr@R>#G;_Zc1*k*>u}wO@Toi4J&W9%qds11S)I z26zG>jXykwi)LF{k7uV6B~(P#7^SL*UVO#}#!W%=ic-GE#r9^AQ?z$+ zfFSeiAHfRG$aE@rutGsH1}jt`;}9aniWVD1SCWND zm}1$3g%1yGD6nA6fj}L8R9ROe(u*S}zvIMi;=86+k;+uO)Mm}n5MA@c?D6%#h$9Qb z|9v@B;IrPFe*+ILysp}=wcHL*9`@xw3tZFwLt9Ib^6J*F%f0)|wA8W&acA!+6zBH8 zL&?_GY`EY%iQF4YWDMD_;>opbN5-Gt;eDG1`wGWgP&j=S8BEGVlMs8pQN>(*=jG*4 zNC~}G6M^wDSX&hcskGBlTP;>Er6?VBX0gRqi`If0 zZf?5u8s4w*g_&ihmtth4ol!E|6nsCGRhf^UZa8N}M^<(!n7n?5>Oap|G7mHR{PP*5 zPFWh!YBxnBDMd{kY1_n`0?h8Xye2EI#~*`SXh&-?idHPKR5WPFE3^Dmm`g1T7E*wr zhH-82a`vf{s7g7iO^88OuYEsT)*et=MHG}ku>#jwzyd!r3Ojcu+1+6H|0SJtNUkad z)J|QFbR?4Rj>TN9&bFMk+H1>v-&*DTgN{D@;0Td(Y~!sqQh1@ARm}*Mc5AX9f*LBR ze$M;k;q);}A7PYDc3Flvg)}UzRnM9+LN{?*0fJQf$#AB=%``0K6rG+>O)~W)-e<~g zJa*rC(_Xvno-#CDbkH$s*6qKG%W1iJdWVo>8M}&MyM&X?vt#gLIB)S)-k0Tn8yYlQ zl@axZdFBKG#vk|-kx6yTMwuO9QUK+{HY6>1kjQ+sen10aW){0 z4}==xl0|9)K8pm*ehR}>h{AV10^%@-JYr5QEbu>ml%t2+(jgIhl&NIh@LJGP(^7U) zEbm3BgV1x5kDO<#>tTsiTg%}4lJ>mhgb5_|frusaVToELktJd5*Osz&F|WmmY6{Zc z3}ZK;+ia(Zdfa2vBxM>Na-}O?;Ugizc*LcYig*N>Q}-akC+<}yXN!YiWnHY_ABdwG@V z`7)R%+KLp3r^zF&FG7fn8MK;5L8460h1H`8d|2hB`)sXi{{taUn_wc4NdD_D7?BAl z!uCsOd8wP(tOzXqSj))BDUtOPoE(K|&wDZnBCEkCKf?(^XGThJ75oq)&AGgrH86^$ z0w#TIBewmRDRQ#x8bchT&Tq=|H=WtYXLh#AOl{Cj{t3($C#J!(-Aaz5tduTax6*#X zG^R40sa;$*LK;d=m&_u}!X^m8a=D9V7Q^JHl6N!^dDAjg>Q_cJ3Q`!7^rq*$SxAZX zC8gBHUSGS7cWzRkiZP{aDP_%VXlmB8qBX6CL(=TdSs|UC(1Bm{nM0vRFD!{qpVZ8% zMETRT6mlu4a~)3S;^&Z10+gjx-5yO&SCMP}Co*kn|CVGFq!|h+MXZh#+p*N@+0TO3 zo?BrSE}yj**3|Q-84apJ?~2fn5Ol9Us~Cs`W3PxY^(L?j=lW{-QkTU@Ycz3X3oD4l z*`RShb6kj$jHJkfNj0?6oi26vm#*LX3W2z(qfiUSGo~r@F&Jc?*k)V33`z60OXbbx zp!hx0a5tbCQXg^mWn4}&Zzu?*o<}g`)x=KKBbBT!feUQlmN5s3dQnLB$P+#);`N}{ z^Vt`rcR|R}*01S}T$L=^-nP;Zx!r4OE-R{3*AN9KoV?5&9oZ8F#ius|lSyagJE;TX zILA7!mf1j>*VuR)RR(H z7L`3pyWkGjKaT8jPWz+OF(*s1IoS<}fqSd%WVX$k-ZZBdJig(X+0o{maarjD=P1=R z$v}f#CYj~u^PV>|L&i+j1mYen=MB*{Nin01*2&0VXglf(QEKN&>ZJ4ryPaM(vzgt@ zDl74Y2pP6e?uuszno>SZ#vRcth)rsCZFNU`AFQfMn1td@*N%P2ihT`HUa3aEPu-27 zb9t-DlDOIW4&H})_^o|YYi9Hds5j-O|3QOKlTr-T_H4P$6mRp@sI)zrD|d3@$|wch zY>QE0Lf7uR9`A+m-Yfj34PbkFq?iC-IgdwXoVKvsTEf#f$`?Y~@BGJRV|3tv9d4&r zUpV5}^jfQqrY{PK6p{D?o_8LSV&g7kRkWfSe~oq%oGrLQG2gnhwgQnx36SfMn)%5; z%8_L`>~Z~-_s%KVz-apXq4lw_f57?>F1cpC50TP7e^)Q3qb_n$-;ZA$`?_B#iCLBY z=xfF9_&<5aD&r$|@j2~F;{v66zi4S2k_FP+@;uI-v0?#RBI z@Nm6l26tN8Xs%0-b^8Mpo2e#kTLdt&A< zsbmR7J1-pckyjRB&{4h6hFqM2;xowG3WZwdAUu_HwdBhLV zSq}{=*I4xg5Wdpu9FQx4+2WnxS-}sEAWF%Q;B|P%bo`rN$cT!>-xHe1-|(R5Ac+>% zN2Wp0U6zhpq%2md|c_C=^ zn;`TLTTF)_;94KmMz$OjK?sqz6yg`&MOdldP6=EF%2gtzp>mnp(Bz@f*j?$J-$c;_ zAu`>sY0@;+;CL<4L<~ghkeVu5jRSIuvVd15h8Fy^g%8@ubg%^w)Z1KKA^rJGiVTVn z@uDSSlM^yt!Oc-D5+agy*Yn{^DCP$d)?HR@$xdtt>|oSzg(1H+MBq_~N*LLaA)bqA z9+3rD&V`BtHlt_34_e$uJ~juV009vEO9d!nB;rVo6a*L6BX%sDF{X*;bp$7#p(p0w zclCvZ2_0XhqhAFJ|Gi9zZD65$CZ_%7*Zq2k%}`}V?%+Skv-EI79c4a9c<(sII0N) zvERyY-)fLn>>XD{j@3^bo3eqzr+T%#$bghb(H=@Wqz#)JK3*X^NxgrsB`$ZR0p zVF}xw@RYLs4YZ*UP^1}O=FdH*g_1~Nb5wvo@IycNj;DRyPbT9ajY@5($Pk&Pm2H~_ zuE`eJ-b3aY|9MHI__*RL)<-GoiK2bYt`UV2-j4K0NqM15!rjIAfy7B7lTC<7QS41T zjZ$Om7GOfuZt4$A`d|-z-IK|sY}O@u;AR!VXNoAIdH&-rV&1VRkU{O4Jo(u*ie7al zCuX{#texU(K+>YsQZc4Y(B#}J?w6^2V@}w{oUM*SGReSzC5C1wXB$D_ zI1xBb7F5|H%SfMPsEN)bj)nDB45buw!Y8q)6nVztfvOSYI55kY(GAu3umR6%6_eoavd+v{;nNo;PO5mvk6{Qt18M z(nl8RP?03jP>_KXMn{ny=4=$ziD-~I7lW+TKV6Y?-D#PA9U`Wu1=t8K_ya|tMTp&{ zT?$c*P9BRSqI`}jPT^;WiJ7v{(IsUgg(b<9Lg<4^M#b1%MqrN^DQL8$+f%BGWiHN; z5?ihK#UI|L(o94Jj;VJJ-I7M)u^`iUc!#In>a?t7KDwm^@Pj_AYO8Kcn4jiTJE!Y5b2=uWz+ z7OusLpe)FwPlYOp5=NNM>|#&>Q_%F8Me2!EW@}`EM5UIRqe3dNLW#S4#6|^7$5PL9 z7O815#H1+AG{%#HpdZA{oV?rxvh}P8mTFAiO)cPp${JH3eG7B!tc#XK58-V`=w+O) ztrF2=h&4s9?H{;x9&3@|uqo}qo{QL!Vg^kNOQ@j3Zk*2K2cJzCl?Y6Kyx3%wml@ho z&b47_2nERk#?{Ubh1{D;sNCNg5=EFOjif~n=>u%yh@IjdK-#Lj=BBJZV$R;C|INy* z>jFt{VjfRCX4XE?6iE}SHRZ5k)#6Tw`RyHQDJsszRpq9N#YWsSf#{;Kiv-H!0af6b ztsmgvD6fu=rD~{2CL8fu2WsZyYWmP?+6}(?3Pb>}jP5QO+8>OJOr64S+c?s;E@At) z<6#+}Qof6@5^S;J73YahYd}+%M6M%2Eq{`zWK19bVU3#1-^&# zN+fI@hTKK(+DX=&WYA9#8nWmUg?cLGmT8le*EQ)3z&&(Z;f zSrQ3{Y>Ckf*Fe#kM=~;|u}Eigy>bV zJu#|^mbS#tR|73FOw6bRgW9rcM zXFwY)9-T;irq^ex^mx}qqm&ZH3^bDJh}t?urN;_pSvNC*~nBO8PP=gAfiL~F#;&RDHo zB`z;hab?EG86&7H_Lh*n26Hip7lX^pb#NxVGjWo!Q7!~~;RI@=H60!^nZ-{9I;Q*1 zi$!1c#|HEKrfD%xHAK(mLT_Q6E;PKR@K?~NF-IrJYBfKFwQNqW#jTF0UZVsJt*RWx z9Bt*=;W1-U=saU*0n6gpvUErbYBTNi7+VoNPt&QaQF1C$|8D`UCQTYBiy{Xe_6_Tf zLCYmVGahB%W+59ST>-C3#-tSHCR6(z$bM)r1*~jB*8AX_e^&BX-}bq*+OrzTr%|94 zdvXjjrdzAWi1Cy;va?NPl6nZL3@XlpQLK#d6;9{$kBrD++1l3P*QLSoahKRppJ*ZT z$w!2pVml z|A|mIlZH37ky_&qror{t&CmR3BLj=qZYvIWD$f2L-*fs`O+S*>b+>{8^FgC2K{!WP zVK#X>x8f+H-X8LFTiPQMWJI4umFvxas#QY9PW33w|NZbc(Sk}?TP-((v$`nWxxf)) z+W2O+^%2T9G>YJV7oIogTzQW#1%Wk>1}nJ$VjGW~*$$>2J9%}WXYQyeKDvgxHuXTZ zh%zr?rZ+Q&JN6)JVRN*Fr#~#n5;Z<;1tuPlo}Bfkkxi;MFkY26sXUZw_p4vTxWya^ zbMXs~tF^`4#6R&liOBV7hax?@xIz`W<{VWU*5awhGMC^_h-`;XtNNq!CWHTRik52r zA~Kf0+*AMVLsRz@(quoZDz?xlsXw}1pYfhpUeW0P4&omvuu{=h~rm@)0J$2qBHP`q&P; z724EHPb6-qG6Og9lg(m2SDei2pPH~27B@>~#tO7!yyzUXa-XRVH}$uNI!}5y&p_g6 z&M8Fr1G*QaS=@*T*5SodS6BIUG@kF$Y##F}PE2z%+3EYAJG*97OpMjyY7_q0YQ53_ zw~x=`_hb`3l@F3cPZfUgl3re*1MJ6d`NR7b-(RphJ$>lb43u*Ch&1N zR%`j@PNG}*aI>y>bqS^Z$x-_5J)qLN|IzjlCsTH+hEfo!BYXaWFZag#PKmPnmeZe%9tYe9>FYPQ*sFVo zQ|$kS|2#%kCZd$9lQt;oeKP?>r+)+gy(4q*9l>V@J0%>rkW2-Foeow=Sm7WUf&U;z ze8!Pt#%J%|J;Ru>LQ0h@S+;cf5@t-95(yftNU#D#lQjRGjF^&OL4+)EF5FnK6UI&y zKXOc&@#jpbE`ugrNwES&t0BpZoca}PSg~WtmNk18ZQ8N}Q`V}*_FpXvZQrVN3pQv4 zxwTdXEmuKMIk|sT;w&d9@Y%e3{|g7Lh1ddLo^1uGwR{pS;MX@RZ8$a*c1Y&-WWPQZyp_lz6)u*#zA zi800ELyWMX{wXIi1f#3qFv-}PkirTr#H%`uj8hP*qhxdKHRv9M2(+p)$_phEyQ)b! zg>qv{B-~OA&ZpnByD%);x|&R<5V`B`q@4ylsYLxieCnsAHXN-bjec^ns~d5glFBNr zY|O5@B+_f3xwxB8s=OGK|B$u6Dk~7k0MjxIOxFHE49&$XAnZ!C;Jdm1i5m6C zys*QFNSu-rvb~4KQ);}37@|nZPIcXt*Bsq*3of|^0#?bp!aR__$=(bsKsxnv(7<2$ zQ?P=_9&?s0HG3T^K3uo`)-nZ2JESL4Up* zZ}s9+c}vOWa<5vKsfzE~2b*oyaA?iaZ!mZNb8F7T1<#wNls2UtbC~kV*Y3=yg=)oe zg?=t7AFcWnUbdo^G0{Lv)AvuTohlkQ={W6F$J9R*d*MQJ`f)^w6|P92j!FgI^P1*n zwN@u#%^dpZ#gfx4x#$Ai*q989`N5STL>VVE7l&EEGT}t5d&M#55GCo|&mC^imU1+1 zNDp-vHg}z7|IN49J_~d*q+wm#E>|Po^vp^@<6rxD1iZ4DEl8ef6nIQiyPgm#G~Thw zbQ-dd=eZArAspPWEKtHT!G~lfBV4kcl^DfsFIg`mnPhPCAHjgHCwz+@`wA$+%pt5# z$)lKWJXF7WWkqDR@4?JG!#`wXd z{bW<~a!(I4sJ!iT#alj<;~WRnlM-4FaBUhPgt!MJy{JTojEkI|kd>gBp$}Rx#97`v zh9^4i@HtLwBXgD*C^AG$Xzub2?|hRs*Lcxrk@BMKtY{cClP0@#28SH7Ml4a+b21-+1m9 zF{Q<-AhlXbrOa4OgW}CG)jFZ(l$oaTMJRnSlp$sQk}{D^i+qb*U$_2w(7S!Apy6`e z*9JDW(lu>eCpvx#&{~Mo?~R_qjG{8hNP5U zxEvphB7&NdDAl7;?GFBOpsP+4qRK*0SfeS;HFi#_AH8Ns)xy4m z|6KE}?m5yE874LUIV@=MJg5IaXEcx?=~kO^2(NX=Fky{b&wbY=LVO9d1q8*$i>(&b5?>UO37u+-;7vb36NvLyLxlBLwk zI(i0CU>&umf{$l3uB>Kf#MMlf(UBxXyNwVV?5E%mb%E#!X0d^5A2jW=kuvc^6|<>_T8EBML^R>reG zppp%U_mwU0^krX$@UDQG8Z2Ss#boA+(RXug?&$KYT)9$Hh)6jqk=I;lz0J%52=Yso z@t7^}a*J^D5u|pztV2i|3cHrhSWZnAXi>1S zu?T2}JfQ^Fv7Q7|au6YCo^ZO>N3F)t*;0ZoX{#FBE*@6QB11yGlrPL}{~fb_07Fr& ze%YF%dRw+vEjmbFW}Ijg(n7XlXG7l^sgxAab^7Zj;iQ^>n`*I30LC8uth<~_+Z?lB zGo2B7)4q~i+I%C+si5z&h_+XeT1O^LrY zIF(|^wP&vCgI74a3yR!h;Bpu*Pj2HC;D!j%>$bA zgwT2KQ@8h#gYHtt?S|+)7750ujr4pmU4qq?HYlH-&j=3n@(NSR|4M_z#Yx5uiO$CR z=wl_3$9Ng;K;a&Pj^&PDvkrCDF=OWiL zzz6tJk+N>mOdW|}*V#3P|0k#~H>9L{a-nO;Di5M3`QLGP?#Vws)$`>rWyjAjwHF_K z=tHZ6p`KSmoY4^av;3B2v6>?ddX&hymbPx{Hc4R~%+Mao-MGldYNRU!D?e%?ImpAY zmO=rQ=lL3s0EY+KBCh>pE1!fSVSwaS(j)nzPx|r?1*dPr#!gG(!f(zCf-2zs#Ot&) z?Yqz}1#__Vh@;Q=PPQ~cR)Fv5!0EuyrnT@a0g34$lm^&(|A-_=!kzwQ={)b7xPt#w zDCV9g>hLE7m+b`IWL}5@SDs4U?oJ6?10&Li@pLfvL`*L)2+%%uefC%u5w-kq)UyBg}C6@Cyi$ zLivE~rwH*7d8-qBOA;OLjp}SE4yfa5Lh%If%?xql4vWdyY73Q6D^d+#pkzyih!>U6 zAQ}Sqa8U=<&&#$%A`-*wcJO=bu$Xd)dpyh>>v5<+|3VWZ%?cetND6N1f-0{d%mSHB zT-b>OS)}*O+aiz(?Xmno=Jcv1^~CP% zdXb{GM;+yF%s|p59}bMdQ4+a{C!~k1CvO@+<9DNcwjfnqN>ET+bBCXN#yiX#=aL$sbiC#;77}EjMbM&^7kKE%; zxDwpz@CH3iED`5CAB{q34z?5x>sAMF-g5w#%sU3L2wjvB1+FdqvKgU@s;qGNG>In( zF=Li;Ia?2miehaDOf)zSZuBci(sD$-|IR;HZ!??p4Vy_GLvBh_@7pFrd_V}NSaV4~ zk1;ti(qL+)QWQ63(?p9e6N`qusH)mjf@^GQO;0Hy{}LuI@E-}Y@+5Ipm@`POqjV2GHmV)F&g6J!wkNdPZD=(@e`zRc|s+fiwi+0x~(L z@Q|dqilR4_727nkkGgXpp2-gH{{!x(lpbyGD&1B0(6U*Xa}t{hwxZQqTh+f(m7eDG zGQ+el`_n%MlQ=ukTkjNHZB19H)o!Rt;4*2N4l+eEl3x*ZOSUsX*fG=Y^|S6U)J${5 z*63eD_VYaQ(tOb(2G#_#&+u@yOmnj@YwA>Y5XB_$&z@1=t`jEX6Jx3K*@#S*60vT2 z4T)a1GIu0mVfMm$)fETgFb<>JQj$D}m9wlCJMnL3uLjnx0%7HIvF_3o1B^x~@L>V$ z@qmv`dof_^RtY0TPhV9D^U(`!GoaE)l6FdJXVO;EtZ>B^wKgnD{UgiDfk7a5G)1#s z$&Aei2L}mPahH`LXH?sa|EFeeByGX)Ow*<#3^g~C4lT4OZix~Sxm6djae|c8bfzu( zKJYV2ZZfm=_jGM@uNFhOg!RR*rG2{IC>HIpf(LKGR<-4q8(Y%djyzRzbA4I&U0B9o{}?hM>sqT zBF++~cs>|W?Dt?7|8IKPW(tY+2h}%c4cB`?_zZn_N^pwUAdiRBs)jf%atR`4-sCZ| zHr(`Yh6v(XJZp=6#;v}vDsc{pE`@`>RCY0P%yHVggA{YvNVB(HT4qYdyL^$71;+BZ-?8f=s>S#PomlkR5~?4HolE#ct{NiV@N%o=}tV*D)GoGD@i2K$LSmv{-)y2s(L}3+!rs zI5(dHa7sxu{PqA<%g6faXQk{nf2{FJX-09)6cf2M?PZ5g2?{U5BCh#_af3J3*^QG~ zjuxX@R`4y1|96ngn3AVd1c5*i+?it_lJ8>epHQMHqBbacoqw@slFcp-QZa9vnmW6|T zu?RMQBiKe53gj0!?XCIz%Ca{rVYs1K50k zzzYFn zZTF)4|L{eu%|$4XrL|&5Oqj5RNV_sC;}{1HLG)Q5gmO$(nIm*Q7L)4NwiZl z88Jt>Xml~0eUh0W6h~WEIBT=K5flr-c^WN(P-#WIdDkd-?Ys$FRWvO^%#ymy>uVFl zAKI=mD%rog`<6uqysNl@VXCA`l&JZ~AaRG^%6b}!1G%9uqAgFTXIWxew~@e`HriFX z5&WB*B`D_Pad{PHB6V^H;{7Dm+)P|G;hT5uWjqx7h~pGfta&Y2WKu9V3mZ>wi9Ev( z|1@+^GbeR>ey5ej&6+-_6w}B{#Q@yg6oemQ0SG8T9#Q(qmD#};Sertz$MP(~{Y?lL zoR!nNy-t_XD38ebwl!n*IgT##u&yG6CbGZWeU0~Cu`C@)(!x}-lKaSfYM~;siqA=u zB#irwVVWjHmA}rsofr^)S^76$x4j&kqmSE_71PlNSz=tJ0$o2`|<{|Sq1 zau>O-!ap5%H{3pt9Lejd+R%AjRe~f?Ex$K+(?l(NY;b0(r9oJhjyTj_{b(lFyp1IDg4I0v^7Gof2$}ImZKTX^ zHJ4cr__>v$zTH{#%WOfoj7lM6F5coVfVncbEvp}%na-FX!n12FxAmp^^<~~n-%CX) z7VyFUZaZE@kXVA%KRE{u|LQ#NV!peV-+W4FG->ZYxD3|k6(9sz5G1IA!Gj1BDqP60 zp~Hs|AO4$Yu$G{5|0rDSw1D<*g^T|#ZnQuc@V~njOm39Rm?y=JwJ6p#X%K9= z#2G7C^xL?z=g*)+KaJ`$Yv-s$4Zi#56RSjzUYB|mP1EDe3RBl^ z_Up_#Df*lZ_L*tg(4$LlTvM~%qk&oKJHiEL z9CaHChv0}L9#j)|9py!kb|i_XB6vVha@0U3{=?T_l$Ge<|6?v{gqcD8^fO;cnbn1! zjz}h{>UqmuE=ywQ;sZ#iey4qsL z5whMXTUd91Rxl8GLpthFNq&y>-I2tuH(8PI=}B&6Es_lJ%P_}W<9cD8D%+Q^{mbQ9 zAVTR}fjRkBELa74s-IG1?r3Jh`T?cua<{5F>%2_MoNje`-IbI{?+J9>i8!8|5kcD} zd#q)7o@=tl-p)*tMj1_J_1ttrY%fAoKI{-z#&IchoD4hX>&}1&+*^f63yX6`6fS<# za%w?0p;tWDJuKD}SI6~TAtP&D$_JT85y%&Hm$ls#d3UUl3xnj3bu7E*vgNebo+NJp z)63_x$&NoLNzWOLdN-rhQdSUAPK?!IToF4U93OMeU_7H8=U&5l57|z+i+j1pyCuA~QBYp+#;sJWO2@ zR4wK0!-#4JQl3$$i^!)%-^^$2CM2&_W)YG<@yHgH=)d4Bk}docV=225MVpAFMnLf- zLicl!;OSFgcoB@(Oi~^}m4rSZ!%Pe@HlA1FC^H&+pV~k*EekxMDaE{~Os!TzfBv(H zPUOl2t@$$lgfg3kQ>f#nX~7K^r7CGLVgD<)$DuOL)HA%~i`GQM@D1Bd4-m#dM(&wpYh1rDIWK$4Y=#jSh-6<`16|A%sDNhut206s9qVm;CW!dIZ zC$bzhru9j2)7Tv~5-7AhLlNMCVap`OC-@u$Fb8oISm|QRy}c-HhYhW{49Zi)@s$io zK}diisaECLwV}?tYl2p*R3@p$u-wdy;L;@8xKNgT=+xMaDgasVU}TqcnHa$`vk|Yx zb4fkC)^nFzr1JFkx|egUUfYty>k0HtUhIu*?XxPNjdxR^A}APba@4_Qgeu-l&x2T1 zTkLjlvg7d!Uut2J$X+Hp$U+fbFaO&f#nx#(m%-}B?8{(4ZZN*?-BoesC8yN*Y_9Nw z(rKl~T}6H|o2NZ*L#mUL0MZ6jj}!5A(bk<_?a(~J1rJG2=h^Y0w6eF058Mv3v0pa$ z$D=JvU)_>gcR>uQSr7`d&T;2sCLta%S;Ryo9xWq4Qg;m&=))J3m zm$^L=rrzuh_=!Qa=MGmmC-ZEzYNa?(c}2U6tQuUM5xXx#m-4TOUC_f-*3`k8rQmMv zHKDV%HQ`!hhr`Kidtgl7_^=H!Dtl$3UMTRIADTcMZfJqj^xY=En|i*(c9kcpVd6>L z=MfGnxbJIfNd|S{X?pLvUD<5}I@aOaZSdM?o*?t>Z?^okE;S7@kR16~a%D+2dt$!k zwcGsd+K%X;sY_?3tpBOnc+HtyPYH6*YB-I%?PS$)4W)Z|+g79`JOGtSGu6O4&QeYt zZhX^s`)ZCxKi@J`YciRWC(6oYNpNQRJtTfl7w)K+nkwf!s!ljGU?ILK^#YGKKPM!U z5u_rO*gE&)B+e*v_fW>0&hhvTzT79|Es!zyOXdT-sQC5HWX^8JMvWxFY(ExRzn=Q; zr*ieI*LXnhtjV!X@g-KQe1uuO^z|am;zUQ9O&^qXCgy5EKX!iRX}nB#cU|*nmuBQL zRil?O(qatWmw=+d8~Cz*#zSq2<~U;kL8699RMC4YF;*BvYoHW?23Kkuw{S)kg7#Kn zqNa5`;$tGWdH)@ABwH7N+wmjx;Sc@b52cqZf*?@}xPyl$b@jJit!HhmL4v6BZ5POa z&clMMrGrUSM!`aaT4qs}=W<-w;nKHH6hSO8?Vw?>0Df$UUOPS*Le-RY-W1f*$$@gOGtTDH0^IgFeq^ zhK^Vva)(&fV;0ZkfpwEwt)YX3!%!@^cu?4Ne3*1y0&5pFi2J8p{l|Y@r;4hFX>EgG zGKgc3IExJhN^+KTc7=GKxMbh9i&oNe@E3mz5{!%jK*R_zpmcjW~AF z2#C1_Po_tJErN*K*pG)7iT&X~JXd$^*Hcm?Aj6k)PRCcs(~csjhf)KOy=9MB7)yk> zf-p!auNZo<7>NG(k)96GE^B8p)!1erS(J#AD>o^K9q4*K<&tdFi`7Sd--lD5B9k?#7K2xc7&&gN zh-}<9jb@gQ%SI3dIFCkomQm=Gyyjs+V`mUTlB&l{1)_=En2BIHO8yjBTH%mIxs^^= zkN=$Ik#(pUUU)WCSbAugn2=~VBRCg!|UEijV?gb%g=~ftrGZ6WIQdpDpl9bt&8ixishaY(mj`>eqv@7nk%EnS^$N zptmr%!I(0|GTF#_T`7<9b(r`RU_^ow(-NE3Su3B`f6I|@lzCv2S&&MZgkaf3w8@Lm zmR8?EegD{;qxY3uSP_N!E!Jlc#^9dUd7m+9IG34si+2?1m3X=tafHK#OBsM2xSxd9 zDbr|``lOyjr)(k^H}-j#hscOAr(CazpBh?%bk=~C*&DFZn=R)vKm(rcby_0ooB!RZ znd|tF3Mz>SV~Iz^ayz-4g&21l33J`zEtRsNK3XcCIA^xmhQF7h1S+6%NT7*>pubpZ z!ugBFI2SazlL^zA-!z&Zc$$8xFrzn=@~N7`^rK=5r3>pVQ83#_}7nQ(}P-?c38@#F({VEhHpe9nRqu zb@daN)Nq(&q<+URwW?>m>U+Oep0qY9O!|Mm15weFt}_`R@a3+H5_&qhNqcIF$~sJC zIhKN|2o5T(06U(;IFPJSAbN6sdq+WR2tgOv6srPXMwhTkG_FnBfw@U*d}Xepbf@at zs;=6i`Q%FfdaUf}b;jy(gy|Pw$dN3gqXGM}gocM4*m_w3P|5Ki2rCl{q_I=!P^q@7 z&M~YLMUw?{oXBZLn_5u!wy^@?f`O@T#(5CgB9z$Jr!af9sKK+26ECEeWV$g{fOkm> zw6hJXn-;69&$%>Oi)&~RIiSh5_xGY} zp%ZF?8{!sCC+V>&igXxtw4Eqn>L#y}DYfNv?bgvRBn46nX<**oscoWrmyy!bXx5|N2 zQDnTrKcq#mzQ>90Ig&BAuE6O{u{$>#L$CRHx5?_AfD#@}`={u8!2h?urd6W4VvY)o zEr@|4vmQz*v#%Lw$jAqMU@npOR47mIx!T(2NK)z9GL`75Sg~AP$ zv3wRHFidP_A-@P>nB(KYKIu7=wpkw|J9%2d2;78I*^LziAvKI7?u)jn_C(anwhN)b zsF$ZA>$LdFW!}MNwgkR2>=FFcSwuX>xcWE~)WStri6%;*LA%7&+f-@c#_xg_JoCU1 z^B%4QV=ow?B!L-s@kZp~#m^~_3E{hBoT|xt!4YaIh6hY>wUQNFceOii+#_U)19^XT zb*Sb)qJvT%V;LwnTj(Sx*kTdMRk!*2wud~%My#Yki^gzPc$uik!jp+OtYrWarKYL8 zLp!-^q*)?MN8Xzeg<;CpVVEm&xoInrt9;1k_PX&`p#Ko;y6biy{&CAO3CFsez-zo{ zmBmZL{2k>&Bm*oca3sjhoX$@yxRt7KnlevCgU#QHO~+;{-Ge=HOlC*NtzG^} zG%)$t6&Klc`Pxux)p_VJad#9tp~_k<8Rp|BuN2m%xkr@oUz}^q=5yAlz1$ni%ZX|r zt&N+RJzMZ4*G%l%4UN-pl|jgd)4pQ3(;b!4MU-#D`zM~|e-kxhCapZX_-sC{4OfU^W+RDNgtbzBE)laF5 zIqpc=&7bs@hDG}y9G+cZyfNlYZ&FG~2fkzO5jV@C+)lpW-4&Os6)>aJ<-%je);+KO zvD04+g58^`C;XR=>R@#=yRF24<4{3o zJT{)fsra@ATrU7D{B?*qo8Hj*uSv0sj2%9SOH*k0+~b=B4edq%V446(Bl zUe&af>bojyD`lKwPVhjCgRCy0t`3Wy%VuHx=%4&innmgT?(hEHtF*fsx*ZghM0*Rd zd!sgVE_`sqp3P&f(zk1nUtPdlR^D31I`IS6_B2NW+%`!4@gTp?(flv3%#^GGVS34_ zd^7WxvL!^A$Tc_X@Gjt?%%7CPc>;k8flwaqv||=g5aN98LZ8+;Zl*Z5b2J>9L^Hi9 z$?&3-*v>W{D1UsDsVl5_;YH~F^seQhWL{`( zwLJ&l%c#q-!?G-XE#zLjHr+BFUXKg45C|z%F@9ns9nbHBui`cJB#spnhNCA<1@Mtf z*XBM-`ziSp8|FZ+Df08viGKBq{`Y0qw4&FvVjuS7bX-JD_NlMb^Tg|T9&2rR(~8@3 zlW)x-N{$`vWvgxB8yh9%)V;kbni9e8xGzeL>kf89_96@-25rN`V>bsUE)ZK zf8APc?kdTo0P*i%g$nPUy{jN_V1-U(5E6tKQQ}036)j%Gm{H?KjumP}RQRu)APW{g zc1)#gaK#7NW`u&z`A;8<${6kv)F`Wn0Ta%7l7r{R<@M zUr9mbOb)c5v!zS9jU7LR99i;Y%8{pz_4<|Wv$1^NHOyT3>gKAQLW{<$@H5)eNjJZ( zT3H}LgRN0xq}wuaSe1HRD%APYrc2!`DFA?*AH0kfXEEst}~|s9nwGMNnYpf9~JnOt2mn2WW;7**af}29pOG>{;;wd4+7Tb+U zFTVscOz1?sPOOHW`4Gb|>8hYAvTn?ZJ0+cqazcVY6w^Zal!J01!(O^iO2d#N?l8l! zG$~I-7iF|j^rBqPEbvN0bGz!ir0z`{lN?gCC?$&2QHlmlv`>Qc$}gdnSS+VFffJoJ5>k-Xn7{`fJ9X zKB}eLmH)6T3|Dc-B^T3>+|*PvOCyBTDk3NCYSWO!QFezNDjtwJpHl0h4xlK%6oMvJ<|4+d>P%v7^q*C;MPZ zD7Y0r^1DS7r$#(+#pnL#A?_H`y1K2AdbX3PI7&;LQpv*~lnA0TdHLyR}37|ya&se>^~g_0xQ_R0o80n#K8ZQF~K{H8rVt*%{wYLnywRvqq{EJ_qa zQtbQ&9FO>9PbCS2H~JxvL4_+^BE(`AHI|;(bi#5bL|6T$5=Q1ZFFKh*SpWXMIIKVyh!6XqN;9saz3vuMphd?u9 zMSAo&UE#@5;gh96s^k$zCI@mrnvs-n_cTg2)0tpG<5U>pmigt9atjgI&bH%4K}t%0 zS<~9EOo_^Jxbcm9@PkKK5{Ns(Ng-_<< zfT&^?)Z;T1y3hj=3tc~J94{MZo%wYWB^*NmAT#<4?s|w+!WNb}4)vl#vdWq`c?_qVR85;8XBCIYiLLsZEGY;1 z50J*fjo$D_>24vumiPk~{fI>$&Y{v40V0xQTgb=enx?3F%6Q@%$e(2SQ|+DO9jjsJ~QNQ)2{%7q}2 zYY>rZ{;2p!mc%Mm75nFGu#~Et5ZAu<#j5^-MpJt_c9R~OYGN%{Ug@e!e_HKmQo9-f z&%UuAHY=+I-pCJr#3CHi(?@6l3b)r{b%F)X>EDzCF>4V-bgf#VNMhBvclZUrFNSer zIJQ+wdeLK}OfXFo0-}9-uWr-j3Z-(aUhJ-tX5IRSX)Q3let>sN5g>pz^>d2{Bs9hr znTkC3gJPNJ0~dJJ;bAI32_RhY%4bG1*FK~qI(;_PTncX~Qm306Wr z1CbMsW{k{zX^sCICOrcJ!`|@5H~1yHKwAJ8p_THXAJDJd)PfI+Uh$1robJ6X*X6xy0f?BR)dDrYxov?4txC=&C%LPlUZo@$r~Rb9N(a2s$7mXC z$HpKNd9D~~<2;96y*?~{uGG1%5OCP1Km2tjxK8`a{g6`DTEGuBFREl< z+D|3!7O(>sXmP@Ys=(KN;nI8ynsPsChIowBVe}>-tJ`p^p-b}SzBoJjfeWADA{K#= z(vbx*z$VqD9HW=AVf^4WG^0pL&Bp6}2)C`hci8sl?>?{^qN;*;%}2E1eKi@kKh9~S zvBf(6$1mQK-P8gX=wJzPKp@Stzwk1NdXuu1F#jF%t2_9E45QhtJwdA~NfZ}DzYBD< z%^8s>p&P5=j5jm3YZ5k3;+coSGSx7M=gO#GQYtb~1VrNlf$#@?;D>>;06rkV0+cj* zvo{P{KympR$GEH9X`)1-3#ZXCXv4rIgt2CMG!LvOH8U=^Fb&MYBwqrQYg57c3o(0g z6Wu9?TUZu<;D;&lja$G$(5t?dU>TXq4JZQyeNz+($-l3Fm=2Ps+~^6HNH`|+!}H^! z@Q^~S=!$bG7Erq>{7bJ|DH1wEBq{NOT~MbL@d-X?g?QTtDiMTTXgft?J79|n2+|l- zF+dubxkus_CH%uxM7qu3CfTwx<5DbmvHuDxOe{t0xg_#JIFmFFihwvQhgK%V%D1zK9 z$YC@4h&1g%L$zbWPL#)&vl)K?ul0HWKVT-q>ctexD-+uf1`NoOjImD=LQyo65Cp>V zpu*75MDeQtF_g$;Bt7EW5}KjD62SnDOua^NLa=a}6j2GDFbNAZ$)rrb*a9WSc{s*f ztfnNF)r%?pNGs7Yz?+~)oU{N=^#2C!0u;l*fR@4)ssxK|Il@9Qi6>FYw)C`5S;Ss( ztY!3>xMW0*@tG5RxvPx1ti-o|;Yv=FM3urL0ssVOyUP)T3l0LPJy{XAgv?GuNp*3+ zWfa7+Y{7zgC)%Nq7gN}p=4<&?`~no7XoGPab?_HB(cnW;`Nsui(R>WDX!MsLxz0J2?5a-l5Ei^ZKlgP6V>Y)CM_i|+`7k;od6_&&7?Vs)Q}&g z!2~ot44{t^@q<1bP`a5>A8iX8-M7WVQjW-vYn0MFB`mls(9}>&8vQ1Zqc&trtKVF& zkzl*w>SzBDb$HC@rE`>;+CMJD0D*VNNYrKBO6(CJ!I;p$h zGaW}&l?b?n78%g?tN$%N^%C{@NlnGoEJ_k9{iZF|5xNK(IV~FAq@9_$h1v1W;zQg0sxDP0c zfa6q^xG_&jH4kqskXy~xd<7x(6IPtNlYU*EKz-Bi$WCD_*Llm(Z^*FeY@-^yj|V_K z09B`>c$R)UHuSPrebrbIk`Ne;(VSb7%aI&Yn~lCQRCBEbC<6yIR9Ab9*qQUtc{LdH zbfF~O*qco=k*(9_ichYRsgX4>4JuUbY{x`9MwncrWpy)v^* zNs#r~OUhXO;{PEY1=B=}5`Sniy{wC*-I?r^L=kP0Nkvk6qT08GO+E1e!Zd%>qHQtJhTpm#>fOU$hNh|#fOXLt!;>_J=-CaJMRvqh^(`~9X+uGv= z-{}0L?`=~;3|`8ti*CtKzs=9ne7yUpw^D&t1;ST;Wl}z^)bQ0`$E6M|R4=?KM{YgT zZrR)R?f+5mv>u-XkhxF}nbk3^fZemLOnbGvh$Eu7C3;5?x@XQ)1!-Cn*j- z+11?WJyogV!PM-L)KrdLg&O4&6M<`2WNfQKDecy_RfM$*Wd1{fL z^4YbMQi7deEB#a9 zC1a1~>07p0fo|m0nC2cfSFx01zQspPq{k9TX3r$VXJFy#%sQa$>8keWfX-(ibN|yS zZYWJG2V({z0U6A|h2Al4Y7v&^hluLC)UO=2>bFiDk)GnGt>|$cpKfvAuqKJr11MBA zG`0btw2qyoC5o&LrG19$#KssZjfihZq$yO9>fk#>3C!dEcV-4P=t;1Y{H&d$9}*g zvYgO1?c?TF#Kv2Jc{&1pY?-ENXyRMh1}NGN>)RgVm5Arr0D>ntlHZO{$11P7M(*;S z;`SSAaQs*HV=>g0!X1w8$+Nyh1){~MMv9?t_$B9sgw3h7a>b z@PTIApw(dK?iDJ&XJVf3wwnt3?v4BoWkfsbZz3H%(ur7^8bD6)5%<>=jPSs1KX!3C zWBDc)EWH$YjYyA`euTELr^GM(2 zW11Apdc4Ux@h7Gt0Y~R2Oe^8cm6E6)2{v5sDfR4L>+rtlxsdc$pZ_h41+s)#zkMQQ z2qiwdO>Z_9CW2^YW$Ou)VrAi7snL6Ed z1{NUqbUafdy@)5_e5Yn^=O^3}VuuI>Os2t(a~`)KA6K&lr}lVXNX*If&|A_;5t7nn zu~n;}x-g(`kLeic(Dfzv10<_@n~juhH1mud^ILQ-=UGvXaj9JkdAE4%H1Z=qyg4(7 z2+EXgmfVuPZGUG!wmTI<@AT56x6|YAc0I~SqVqieY_?8bX}|cJ_nz9&C`m8fh@y!`zfe$hAmVUl^OKkV;8{_=0G==xru7If$j zEUO%d3cG_w`f$w_F>XQh6I`Zg0jMI5bGiqJ3IYcbEdOZmAi{(S7cy+<@FB#A5+_ou zXz?P(j2bs`?C9|$$dDpOk}Rq5A4-9p$hKr{xIl6j+O?!LC$mu!VB90_o!i^dHz-55!>&oVadEL7EGS725YR z=+L4^lP+!gH0qo%W41K86Xml`&-9YnYqNFg$~*!6RQk2tKYsdbRoF^3@jtET8b{UI z6Hr5#o(=D;Mb_Fh2-7$fCWG~^%G&OL=YfT?stxLH>N@`{WFzw z5iU8_hjz-OQj1#(Iw+xq8hYqRO_Bj$m??>a!DUkY)DJ&uL890M=nNR{8)zt!heGW3e=) zk3Lb2N>ziH3G~dOn%-%FX4Y9(&Q?xsW-GYiiaRd3hIYqZxe)mpL2p6*^p8H47Q5$b z{s^a!KU(z@3viEtm0x^2VYm{b?s2QFP?}kQt#Ro({4m53OB~m01Ao_+SSx`ET35Fa zz#C9P5w%Aj#YStbaQ*l*U}C!N8B#-js2mqP-># z0T>wQ70v$a;RjVrXU7w$npOZHzzCKj4!iHI#L~t+i#;~kWt&Z1UKiu^GkQBc>IWYc z5RfzO=1N^o=(^ ztw;tCwLD5^_4V6-KmKQR85@1r^Q19&UZa)#AgmU+h=o7+kg13WFfjy(06N^UsF@hA zdHK5_1~aI^i(C(5l3ScxLMNx0UCjc4V2U6nQ;`Pl0t0Pw$4pjL4nB}@gEqV&4sWP9 z{n-v(=ZYR(bflRFTy8++5V3>>01>wp3a%AyDjmyaSi@TFFo#yWA{NW@!+_<5dmIbh z!Z2_k?=9>BOPq*%D!>>&04Q{S``F25b)lPOv5t1UqlJQ2t(>V3iBM~x1u$?GH15P+ zv4{m9@*~8%EihIW@**B1DM>84hg$vPz}dI;w@;8ac^T zva*#~>fl$>p2pmYTpuvL(6DnNDu%W|;5F<*Qi144qe*!OJtf28?M2iN?RS+byf&~kL z1`4XR@?aPv+?bJ|#abu@isM6+00BN2_}mgF4p1$YC|8zyY7)c1 zaw%<&d|42r%91j(X5=ZboCSquDZV4qiK5%LaO29IOSi7wyLj{J-OIPH-@kwZ3!YnZ zXU&{6$&R#83nb8~SxKIS%C%w4unrFwHk=cuPqG+;Rv@rc+&~MEQ8E@;4zi$=7BH9= zhm6wlG%M1gpzk7Q zKL0(t6=-FJ!41x2sI&UO&hS6`#2S-yL6lU1evkTeCC~_b4746dyuC-(N(t$#mbn?!06jo^Ag&1b2;f5S$=$ThHjnx}_Jeh?WR*hZ85`884#~4HFA(UQ19)Sek zW@F*#9#*;?RY7Y|os?34U~%*mP!{-c6?rx(h#Nr+9)uuTVjbjUj(~MS#)N(0nAciu zbtvYTWR_{>nP}pbT~D0xWTSfq>2w;Bjs5f6Yb;%6)=MiQMBj@Io>$SEh#45?nj&5$ zSp^Rez~5Bl5m;meN1hbHY+L2%;E0q~SrB&%O^J}Goc8q&mtDg7C0l8>>gubo#{ViS zbBM+#qLH@l76r8grBD|BDpE0Oj@a~feb^dZ_y6_C`f36P;6~?7@6B22HZxlomk+S z?B0x5(}>Z}9LM!caV?i^_StBst#XZqR#$Y5&#H%7j0lpn5lSEnI3$0-ng4w7p!Gf} zZp;uD>>Xp(cHLyqQ}4nnLA+Tsw0lEGE9jd_qZPD75w1ms+MtIn`sk!n==Me+60K^k zs*aS>*NTo*4uLI&B;?J2s^_;r{W6R6h%&?6ELPy6lsJ?nc?Z_i;=HzT!3LV!aJb0J z=M|~yDeRDz?~Gmg_uz*w{`eFHpD2h7$6cy||A4HU-QML17Qjbtm%DG;^R6{Uzn@OH z`dZPA62=5z$i$GfNXmB08a(2eN7yk^z6qFuvyNEkc^}xCkZRm z6^3=f6Wj^OgQVF-D;)et2|x6K3)NI^Pr4)58U=Ko629?So_SKF0D(BFJpco^G)hvG z!j#LrW*2-wqh9v@=P&R~yyXn1TUC5$`n7 zxCnGmg30859X$x3TuGxgA@!tLAs|U)Dy7x{Ni}#PYdKWfHH|=xo#;fCpY-FB*YJjG zMa`>T_gclHA@!&>B-Xx+CA$T_E!J4wY3F1~O2X4Bk5U-eg#`eC0nSRLqs64nN~2X%+i-?>nuHuP%_UsB(hps) zovnJ;%iegIPpJ^Y)oCLIJD^cYCEv->p4J#t6zL5;s38;pNg7#+R(7ozKnWjqA=jEk zMi6SH+UzuP8IO3fVXfchyfn9taU)AxsuLaS!ry*{B<2D! ziMPyU+0v{pWcb`${JJcfnG~WgE=y&fg2?f-GXH9OJL3~)Jm)AcOM=W*>R9Z?Knv)z ztcvrq0jWh_jyzU@1Ws*2uz6Un2w62Hp3us_A;Pai1C&B4mWrayH z$LJWV7u26fa17i#OEil_({pf_R>$V86k!#t&37WqPp$NDKb;jPtYx%2CQ$`cstH6X zGhFFrJNwy#DWP&t+r~?JML~VK@{i7Gs3~zft5>ZBDuDysPIGFSHxe>{u)+r;1Ny&a zsn$OPiRHzlWWthz>${a{PvHh49RARUH;ny6XdC?C2$zTsrR}v?_vU>(!<$yJjU`c0 z9Nb~S_*H(C)1{(2eK8Tl4+Y@|1Gbd1wEqF7NMaL})=c#x!p3mLPHs`cvYd38ekeN@{g4=AeggEjbZmD*kd{ASSSI)%WD z&00imxhiv9$erYB1^S)QLNwr)w&nqRzzP9GM&7N&W~Tp$Zj<+XVR8Bz@Nwl`Wtij- z3*dq)c6wZT;iC7kNyzWG&;2tmjpw9eCu-!xKnj;_V_MVaKFv>P+i2wA06Ew!a{z%edrd;l>`}VtEI$y^0Gc-7n_`jOk zWOhvMKIBCNkp(Ek;#&$sAFuIv=JhG|B6)0*XJ+<3KZIZ+vwTg~G{Mw9n`dW91zyk> zGK;rDbHaFsl{fD}n zSWwueyJ9eoIE=&ye~@?= zLuVJ1C~NpfD16dfnh`zsvqp5)8bTstECCg(b{;XOfTwptmG>jHRu#s3bf-m>!B{ zS7=o*K$T=2hHu^&HzEaixJEuSY4O0g0*F-C6@69k79`Vb3&;0>{WH;ed> zFd38N6Oayde{W(QCjaz%0s<%+Cq%l&Cob_~0|P%;2T&^Zd?6tat0-EFmN`A)G(+)L z!8LN3+cgpoP>$jzFitZX#dK}FgL)3(SyWeLcqnNHnHSt6mXO(gWN8SjGAvouF9rA#x)Cs(2t|nHjm!v*UR7Bd83DV)9aYjpgY*wFr5jKI9+Qa> zK$U!H#%;J|Dv}tP!bxQrQx0TJ7Wr`^ z$$}X-6EqerwSgqo*@Vz}dM8+0l1CDtwHVAKT-#9#f`k%)Kmej?NyYgPEs-^|7Z)cc zpCU?v8(M!F;XNgq83gz*m2+lgnH|0vf9BCyGIVMyB4i{pZYwq@f0djmv3bTKP~ZV< z-@#H9ppmE<7bbFmesP>5`lPuRkP5dZ$C;uFv0CZ5B)gMIw#7DE^@Ma%qXV&$sDYzb zvwu5kl>Q@msG(Mmmmt~LVTr>BN)ljz5vT767q^fI!q5~03Z;O0a8jBXMT4SN`YEgB za+@J>fd9js8VWZ+QxN;DAIU;4E%_ln#`Zy3VWRSrvsiur?SQkif zOGQy#f%>Zd_^H2P+T>%a3$%mZx}s67jGPz!U=E2P4N!+H|o}%U308rQEZz z_d&E3@jk}%B>NhGjkG-EQKoB1Kkpe?GFxCfrL0XGt+4U7ofWbKL0uv-w13f~ctI^l z!3RcR6izv{fV*B*i?sg0i=Eq zmvXxs1n{F2H!od!s9N`5X?%A@wjZbsI*FKH!IGa9427dR7j6705dgbZfVM0a-~k7en@|7kUeXg}weuR4F>B5i)D_!UW^A zrm%?~4Oyj!1abll7$Phd-#1JWr@KeQaOsk@zBBvV0Q2+dn6l}0R zJQ3U~#DMX2&sDMSrxb(;av2$@J5CO*z<4h1p;mxEgSs&~W(#sSU&=0k+2mEUi$z0EU zwa&MqVx9JtmCP~Zm9FQ)Z0s8sVT#T11%{XlIo-TDqFQ^%qcs>=YS3ocoz!e4^#9gG{?LaJJ<&x$ z6iPu$hzrxBY!^xkX|%Y+SRB$woi-!QzNd21J7p~UII359%w*EIF`Kc3b{08Z6h}c6 zrLhYF%Qy9lN&OrVb#x1&IE$fMnn~T(r8B4lEf68v%!?M06G1meUDfe%)mEHG8tv5) zy;+poS_w4CGlS6#!An#@i|p#wj!im?T-H)7W;qE`QYaA&!p2&7*fEH$u%TIbf>sV0 zx5xoUck#YvEKf>dg^%6ZYO||}A=v|=CB+f7kO(rKj9MmqCVVXy1$d3X_9(17)=0J&dfpW$FzP{|%3f87dRW--t~SFTLN6 zc;C<#;Dl<4TGb-&ec1!9;1*6|WQ}_c-VhtkMis7MVRPXk?%%ayx*^u#>gv?*hGVA; zg(Lprf~(&BolsJW;uK58$EYBh4N@UD;m>j4v|8RUKI9V2#WNnt#cAV*0w%!Bfu`&b z=Vgs!G5}OStH0k>8Ja|XiItcQ&r$i5$qH7!)iX9WyQ7peW>6-sVUN z;p!=_UEbtsS^?U`;3_h8!D|xuvEx5JGkZZl1#`A;Ug)ahVq5A&L$&Rap6=={;ynoD#RKiiJm-UY=Xu%^h*rA=>hJVPiTZ5T5dZIx^X>s(&jSzS zKeCBbd*EI^5DK59HskSx_+SQ~<ns0A<;o|iv8BQRIq$=3p!rTL z2+~Q?gwZkW$6##nQ?@oH{tzJ=^B=VFvJPP7Hc@(o^O(-<(95zA>?>P|@%V|)t#jKI z&Cch-^=5M958h-1=o<8xjACe^u9A2eEA9#3;GZaR^_a|EZGXkdz=Jx^l@DA z?JB0<5d56_-^2QZn~M1*_pm?p={?J(`wzCig%STrDz5QDD1i?A5DP1}YDI6EPyc+& zera$?LBmz)$)6mA?+|G3{0HCnlW9TONO+CCZA%VJE}vqHjJK*7F+ z8@FlBxeOPQEnDFonND`|9>l0IV_T5r2L3c@Fs0yHMdc1GoA>U)zc(%JJjs$^+o6Cv zTWtJT^k~whO`k@cTJ>tylxaeyIeF^RpiBSJeoWk2_wKc?>sEwKx8S>XGJ&`J_)%ri zav5`5)RjQZ4VTmBGGy_Ky5|_U+xje=qzvRkpo(7KWey zv3aA${qnEinNHMu2*8e_vw$O#ovX&DLRc7^zf*RemZPA z(IVqfy9#a04L2u`>hPuTUX%15lX>FDzj5dliDd##$S+wJ>L=Ca_3{V)~PE95=;S!BaUu@G}V+Z6iq}tcis2CbnJQy64bD; z4Ih2?5KsT{SCuVGb~@)&N^)y}aomjZukCGHR4FZ1_NcQx`weB~mEIQGX)U5%(`XHf z+h^n7hkx9{VOrBhkcSs4eiu)-Gv&u!4oXQpf*I+-qkNJRgC9|YmT6t_4TDW`$}tbjif7{cA$MPN2#-esDDnFU41U6>;o zq84Ztlnu~<)=O3diFdBvT?IV_E7%5ExI`xYY=o|HRc*58GF-$WhfPGv!=88(hQ!S% zud7-N4WgWql;m(9QVbBe5+%-c?tp1@*A>SYK^IPuDy_>D7WKGCgl$n&n^Rj;O41KL zW-)tybelp}_Y&GM&sFOR)BvS&z>Q2$E1mNjw+5I%V<}0E%1V@6_B5tKmeQ20LL{ST z5=f++agPnrq8*`Ffv5a~lyy58^3vrYR=$KeAW53MdbqZgMNEyZG$y4eCQGWsEpMlU zW;8LzOsY+&EWQ8qr6u(^zPNy~mVTT}lv;*BsRTwO8Cqd^uxCe1_7a($1dSl@=d3>A zPMY_`r;cJ-O^EqRjRo;XKm3tQX_l)^TXbEYUSqCjx~-q!%gh*$LXyvvF`buOCl93r z&pBRnh)~4lRs@uue3sOtEGpW(02I;+@IxQ|U?~$}DNAl1WokoO=nHYzP~a4ge96B@o$!qhGltRRufP)}-{0Qw(YMaMQ$@ zQWIEDvuFQW6FI6zx-U47{hK*~NwP|UCnK6^3_AC-Lqqnkv-~n@cSJ{6*0%Oj0Ls=p zW%kMNJPM1~aqJeK=`-bxC1)#ZtZ+n@pjvFmF$sdn;GA?TrYexMJ-U-k_FBE9Om`%5 zbZvIEyT7}Vk1DJTPcU!GOxeDKx3F5um~u)lT@tNH_!`MYxIvq`GjBp_@FI@ii&*1)hGjJZM56^@LMNNn7grz8zbpjbthgQV#;A z5;>A+2_a=I`v;Ogtr&C%vhz zbxo~9S4GiYV{|GzZOBIQ*C02|rF9rm%OOp6ldpEk0i6@-!X(HrO`c9a^brny8CZvU z{#^`fwbTS6c}}*jYOk>(4sq~-00g+juI1gSE?3v1e4A-hfXozRgS8-I9Z;+7qF(0M)o_ESs z4xh&2b&I52}z!chpJk%??_Wpo~!LrmmzAdBZJu|EcvU~DZ@{#+=2iozk0l_-gU+|{>k{04>j}VMVL^&H)RLw*^y^V zdE&1q3nH$a?OT|ST+O&;6!6K2Xh_1VSUiA+$)o}wbsOoOE4Sc-)(b*_fr$Tu_l^gC z@YhJwkPn3BE?MM`HUDb=JZhdj`Rv>6j9ldal_zpl&2#r0do_AHp0nRFjaz?}!!Ldy z1mF(Vg};9GKZ%+l4=mU@2*rX!`z{JPG`m6%l(%OsY#CVYD;J2O?{`r=vdnD76*r5*~UQae=`=1GcD}tV?sOmQcU*i-7-H-~&H!9**FH zcoV`pY>^stq7mA+SdpELfe0bt3ko^AYEpKRWNwm8s z)1K`UB$F9KIlH&iV6d?2K&)6pK>!3k_y+z|3WR=uxBvql4_PcT6eB;Jq+S`r1gt8DD!8n$I(C^u zR1AuC!vG9m#jiL%SR}_x`k+0unH?jgV#5;*l88e>Lnky3!U4MCYNfgpz0DGt<^dVO zlRx^iiUaaKYIKjax<;({y~e182!J<0*oE9HIY9_VuW-e3v`GJR3^w9}HmMlLmQb%C zAuo_)z{?6MUEzr78@;&09AJ{c)}uvFDhTX)hjIV{ATS0M5eo`Jv31iuqPDG8Iq`x%uCpt3- zW2i|=KoOjb4Qn)x!lOfz@B<83E{<43qqNH{GoBVyN?Ys`rf|wxdB5eO3y~N`mzh4d zps4%$tisd{Sh6OhOr~)%20;Jw1DT!9SwL!&spsp}s!j1qP6N*}T|z>}+tV>ucWOSB<6 zx4^uk6wW4O%GgT~Mc|8s1O#ya2Yn!bK&YaZs(>E^tjoO418o}Hn@VO%C#Upnl}8(rAwI^g>$EN&b08 zNeK_kXif~^13?Ife)xx5@B=Gq2pipsr$AF6UCsX;ElS8-7IefgM4B3VG72!MPFH)$ zDjgn-xRu6G&p_k9kifnRF9MIXh}8bhc18Zb+uHMxo$DV;asbhe&~iVVH73gClA6$F16 zC_ZS;2cSdg)YM`n!k`hez2rmS)RUg^pzygl_7c&QNt(~vj6ZGCBN4h)Q@H}M6Sk~R z(7-W;JP8Tx3<9vWO2s{6WmoMKAu3(Uslc7zN(y##IqZl$FX^2naS|sbMw3ZIErgIt zP5pfKUp&Ss6HU}xX_KU5qWkS?6mDoT%TVcqcz`D?~fL(Yu zsv|j+%erATFR3ir#&c5{R6ayn$AI%o*Q5`N)6GMpkV7HNNI?vOlrJl(z^?t)Xq_Td zshq}GB@1X#T@?q&Er$sB#-=bl#4Cpf2#QKILcX2bStYRiz1HH94hR!IWy>k=l2HFVfn2s&5}H^E40u3|7TBaCU+>k&Y)IIZh z*3p$W3Tw~xR8~CsN04P*k*&NGdDam8{mq`>rU;3UxEH&MwNY{MIQ~DpUJ4i!HjIFHM!vIG({D)?&SU#wQhaCt&2#S=r zSmXQH1@_@@b6p5ViaUMKG^1Aw_FF3r91Ig-kFb&vtHcZSz%DE`|DmRw5j(PqTNXND zLd;W9p;?Luur-612(TPky@mfqt%Y^<+#7Z`xcxU~{oy&rsEx$PrU0W7c8N?K8s4Qb zczoTiLPXqz9#sR#+x@$3wPK5)1WK^S$|Fr8J~mN3$DRDg2X>R*7_wR*W0?uDso2$0 z%RP>0w_U{mIHu!N&Op?tU<^zo9_ug4s3e+T*+8=|;rZR>Fc~7dW$itTpQuS*))Tos z2rbqhU2=$+=-Y^)5svr=wRq;}kyU~~*OBl&aJ0Hwm`+vBk-D`A1-_nE0~K#Z3UGeB zT~s$2apqcv4!7&P-)&V}raM70QP|m9?GnkgJSrkn2mwlnO{`c9z>JKT!(96b4B%#s zC1(m2T}xb^DNT(DZk7M9ddzvsiC1OJtodY5RYrm^6oUP zHK52DJln4SvFVzK7gUiGWI~Ev?TlNH%kV&8npS0R_Ed9@74Yq0b%N)yiB@H{WfbJ( zM`E)hqgnAHmBB!+f$hW&=DBH(CNo>=zGaJdHWpJ!J)}U_e&7dRO=Dc3(epcOu`XIw z@;99>=7y%=Qq>FZEr?dmii4fnC%mvcVc8g#r#PvYQ8Ns}38sW7Ws;Lk5*#o=>q3u) zy1@d=st(J4p%MR~(&H5-G|S5Tp@i zeq5bEpnvc^KEQxUMJE#X*LG`8%i*eQ;s|4)gi_OLllo#XTnI%wmFqQ#G%iH~XoXh5 zfItv%U0vP-Z!H!?%IcJ3&G|Z=QC(cBOJP8Ed3YMGGYkyZHxvfFkedw9agA z4DV3Oc=e!~u8nvite2_@sU1&Z@stPmAUN!5MsF!li3uH(vj7 znD?xI00|U?hs-^A(>kgr{68giS#k53AL;@ebV2yW76tqF-eakh@Y6S1i64lImoYp< z@|GhBJ&jGNUWi8b2(sA_mau>bz`ddf1aAlj3G{k!+Lfffapvk*ZL$Z{7GcIqVX9T}4&SF&vB@+Hie zGH24P`EaAnf@H|#?CJAoOq?xoRuozhWUUHCmol~JR8Uf-<;n@NAV{Rre*$Y+h+0cf z1&;*yRNxA-f*?P)FkrYcH6EX+DNb=H0u; zqq(zh@BY1e&Ijd7jI8h(8PGDvOSTVxB7d1_wi#MU!Is;Axw&SQW*K=$8&pX>@KH$r zSl|yhuki5za$5nyM{^w|m7R!q9YmFh=v~Fwe=Sm$pkP&Xg&=MS_Vm~@>j|Wui#_`I zBajdsrci*L<*4I9=()5}kv*ZLl9EK$SQB^(?PU;sH#OKKL5$rtByRt#U1!yov<=9g zSye^Q)>~S6qmPCW%tg>H7<{-OT2swv;z28l^q4U#2018AV*VszUo#SvWlNqVI(jMC&EX<>3{QVnRt^4k+q~_I;^&eYS~^6C z>*c!i0M@=zlnh~-J^GVz?^3(zt2_Qr?K=HC?L`WFuh;)v>lyy^lB3R9*s2vaytt|& zB9WfzaQCTy>~D7oQPTsg5;&{KNlpeSj$1k+qk{MkA%1e5pxg(frIGM<50P9)nq|0$ z4GexhIiCn)Xc!Z=1cow{lnt{~uNJzHA+jr8SAJL!yjbUJ^BRfo5#=btykQw+EG5cU+pH8q$3%;IH*C+;wj{Hte6bccW8qIYcg8tZhK4LD zpZe^05gSemk1?~IrvCLZiL|a%kkc9qxAm}`2`PX=S=huFHlF)w#5H$0jF}Qrm*qia ziWT`y#SFp+KUl?WTjLW#QnooV0AXcxgioy)XA}Pwg2a;+=;5VyqLNqgvX_O@V|{qi zxhL-Mk1x9&zlInRR2EWyT+>$o8xl?WMF%^Qyw>W96_zX-aciZj#odTiOE><*4|sD+ zL3U9W!fh>aeKOoh6frzsvZrxO%83eVh6{f%&}@X+iRS1j(1A)5m=GBqr9>mX7B0$& z3LK;li`XT9h2=`#G||WT+zmAhc2rKv)0}a|4m<01?4m zBm_cc1fgHLRx*O3rbcS4iT}#+CJT7OA8*+VE;$u6o-*~JgOOul);AJ_N=B*%;gQiS z=NWog&O2#rl}VHHFp1uUR4nq#?~-~%0jd8KI;V;a2*ENHwOYiR1>u98{JHPv{QVLJ*a1XW?iO&_9F8eFB0|0M~C9>yn;;a zAn7B-xE5BnSffi`nnlS4mbO|Z608A#E6LzK$fUEe&Vr!hmrZ4KArkBoS?^M_#iCat zPRuP!q@y)3x|4R2!D@v=ytuJJbSD2wI$isKMAfXqS4s?xrR9b@+*0LFYkgrS%Tl*r znAB)?W4fg83VIN`Zlrg#S#Sayd>sCAgCB+Kq5R3jssNe2Y(^EWD55a%2-ksY zEo)|X_M~Tcfqz`fp9Wo5v*?1^#V{MQOq$s&N>{qmqXl4pA)?TMpcXzLE@)RB2ddOP zr^bR^h>!d9YX1c}Mz`TgpVcg6qaAtBR;EjkOY>xc#OqsU$xHsC*TGnO8bBH`=1i`J zmti-%+SWefsTaEDUGs2J)aL)RAr({0w{feyE1^n1aS5a*pP8cSBm5|(04a_y&i%M ze!@1TCn*Wsc~RTF)EVAQt^fYW!Q8uo{w<~(YUzK)ii6H;5T0wZrUsK@gywv0sU0ea&oMg9 z{oVA2lsb=t;X3F#&Ltg#`i)NQ(!7(imoM7at|GFz*dFOyY`$05-(2QkDV^>T=H1H? zi5}X_0%wCKuqP3E@P7Yv-WYkMz4M+&DqR_V@cATDK8Q2pv5R#`51I_g?~^ZBvO^#H zjjwlLg`-xQ!W$Pl9wJDQQOU|k^AfIo$bIRDKKe0Z(rF%PqWwJd@9w0f-f&&}tvjO* zXfysm@=Eiyam~aIH)**(C;-==v_Nh6h{+0IUtyt2m-2V`rTK)vtwkVW0Z;LJx|qc` z2(BUDWt|LrnfJ7|B0$KnD+I5=x`NYiO+SDN&=Oti} z0Nz7L;9wLJr-=X8Dh;8_bjtIk702ZsiTT;30ifAf2lshYS;S5<9UVf*T+;ay^Q})O z6<&!Y49da8e9;V82wKP)V9eP?;G~e?D4dS@#BhOI)Nz@&MB%TU1ohQK+YzB55)It( zmy5v&qtM5}^v0N(h{S>417%iDjiJufTK9Y*HPwo5aUx7C%i_(SCbD5)n1!&g1qM98 z11!gKjNO?m;J!!)I?&k;PLmNnO^$$=P@EZ=4dNk=-cQuu3<*Xd_L1f!oUW}J;a!>7 zU|~kQmE|xI=9N{AVPP1a;zxB5=-J#I{#!`7O;5;%L$NVM5I8Xkt`lW`}~yoAtX-#q)?Ji6e=US%;40`9`RY2jh@=VL%d3G9nIu&%ng}_u21WRyL;znPFi$D1lSshSO)^eSf|bD0 z*)?WWQ5r;gWX@2&M=mzR`6yulUe$Rtq_+g*8NnDJIippo-U4Bzi0qciQCRW7QDBgx z-AVu9IKJATja1-~#$1U<<~U(X$Ve^p0|8*bz>!H`*d>*D6`sWerqIT|g`l$)1~Cq% z170Bn0_4*y)nBTXW2&7!k=Qu`cD)q|*^bxDY3BUdC|}TLb!G0lJVsHi~|F zmTgjvo0Xxx9n&`5#-pj8W)7KsSqH>r=XhGpjaAo16(3tx#JIe}h1wXdjKy8NNg%)t z-iVk59;mlqPC=N|PqNjAoyUIW5fuW3{1|75GKyP@+J72os@0A`rWbCo$kUxnjY$6# zvw0;8QRs}6CARP%KE7U4poe%CpB=i?Sn$EGv?5@osC&MrMzK#ya2^nXq0;mxsf~t9 z)~LIvsX(0B+BL~SGGuc45{$K$a84#FO%c{Hi@q!=hBhf130z-t2MIOSQlQ5_sK@z@ zL<+T3$Ls z<;0h5A=^rj6?KMPi)d%&6@_|;#-olz*R09$K;B)<4SG?fEWN~v`K14Fqk{S-0z_r0R1s`JTlnC7N#_j3+B8E$cmL9V8F!~&b?NEUg>MZG9#***JJDwVCsmVj%3LqgkzLM zsTM4eY8y-pC_(h-`f15XKrFbdYQ(BaAR1IgOh>&0YW`>}teK3Di9-bOAz}1uP!t=8 z%HvF|Pz41-IfR2Z1cI7GfXg%2q_LDo073)=LSWp21&A&|aV^)XR^6`ZuYv|6 z1}RJclxmg_(#|f6Jz!3Js4fEU%i%2V{_ew?hy5li!3=313Q3^pZSX{1enn_avB+9P z#au*h;{wH~7KEcz@Bi`HE=0hDfC%1fumyC;K+P>^?$M7r1ni1df^aL{md8Ov;NZe9 zN=7D4Fs2~BE%V_o;6`M)$gZ^sFE-g+KVc_!EsK(>YeOsxaR~oMbBL9VQpUH4N$Ii# zZ@j|hcA!;=Lj=&&2ZJ#FlJL&bEO?>E&#iY98Y?^(b(IP%nL?6@F9A0f{H3B@>iM+PAnrtorpwn5I_d0 z91*8!1n-F8TJk`~gqqm%z5p^ZS8fR7hkUvVj`ZwFJEa#J1kn~O(EVk+oQ5?k^DZMS z&#J^vC!j=AEW=U+R%*tmIE~72^&8%fbaBe56T5UrT(Jc_ug8D{OY;&# zTO~z2i8BijTFdU*m2r}!HE_!BFm~#j5^yE69TT!|Nyl~Jesh%}?t!F`z9{cS@PRrd z1s^n5S6GacdZ!`+g{PpkV`6Sjq^%!l@CG9bS-bx>a8()hG@ApJ~~lPK%O_H-W+kZ z3>M_OaF1pPH$W#RR60b@h;nIXk6hoiztZ)Z7B%$Io~w$qauF~kZdglHObTIu{t1L( zTQ!?8w?QaWLIaXr`(V9_gu-NW&tWxIRbC~pO5K*kQf)IQn}zFm^6m8H4*LXn;33a`#(;#d5h#hz}E1`v(P^P?3Of zN~FZkgp1GUC_@x=b-%TO_eky9M7#93sY3raQgXeN_O6j^>$+|Q zu(nBf?d?Y(qRcD$99uk9tyyd3;@HWW2gC;drGBqBegwSkR_lFqx$~(yEK>UsmiVljH4EmVqnMnyyD$$!_(DIRQcwKgNSwvD+LmDu zydy_BJOFZi{Abs;V7RD2fX3HSMmac!B0RgygO|_bT=V@ea04crKLj5!W=?M_vzu1J zk22`#jI-zN-S+&ZQsr9BMkOMcU@gXs`GZkGZw1T=bC^X^Lwy+bddP2%ewz9nnedp~ zgljwj89V`hBWmL}&Cc3*9m)S8zt)b4>%`hOc+Ev{=L0RY2`ogUm{hLmj|XD6;`;je zpGQWBS5Wb!nUOx|Lq&9SaUq9Cf;wU%+a!|yKtx~zS~*_lhn)jOtc(i~FnXm_eq*9} z=BKfxzCePz@q6|c?dsg7jm*89&6r=bPT-WymGlEY7!^P80~L0(==MI- zUfKqph2F0`sSBd@H#m6)e=M1(9P%j~_uU zG&nM3$&Sw~cIx!*ok^D=4Zi#sljFN*EoFAJn3HEupFe>H6&f_5$f83XvQ&uj8B(WD zp+=2LlOf5NN2_9OSd{-{xr`3ImXo+qB1J)h9P-oWkLJjO6$DKr`c~oCg%;p$^r+S2 zyG~7qvZQNt@YApk1s#?;)NNs@&pM^VxtKCczLnpR=~S6>XV0HGQr&u)=d-4eCoi;_ z8gy&dulMq`Dth!xt%CG2lvu&*#)-He`eQrTZMm~QNuQ;-Fmc}4CY26FEt|GeuV@=4 zu7w@8=kC-TtOvy`}grH6iu3R$(>A>0<0m^@+;6l zp{i@lJntOa&LIc$ifcKC5?;!LDdrv%|B5Mi23ZheJCmE4UlCh({%dsQ^iEK$e5$99V$}1@( zt}&iae5kC3Ae4x^EFJqtq8O3WsyDE(Q|zG-8}jSMpae@uJve_du0s}GOzgTV@q9|o z0|Nz#M}&-0jkTPhjMB)96va}~NnL8PQ0T^sGgC@Al~JoLU+R)0yL8jb0yJkF3`X8A zmFYsn#L8&Q5L?ZuNd1bcQ`D#MYEGe8&l=OyTIt-hQ!Z(gs=i~HZ8o%Ik-F8{Y33=ZuU3t+tHn4}E&k`vS`IY~EsTG)fIDCB zL6!|1%CXz!sX1RO$eC>e zi4E#VtLaYo>7W1%k|Dn=vS?1g8P`$E$8MCi@yQXyYG$GPUg*oNO%83W?aCRK?2ay_ z$ybR25l(I1_O7bVkmhcWC6#cxYO%YQQ&FLsht1rl$f^Wfy`Z)BY0x6;^GhW%sHWU` z5c&V}9irG7GW_}Lk+m55%S|Gcw^KR(6)hEK!!@>cY0_B`Z`R_PcaAQqn|hxHlDGV* zngcypx!y03`x+gFJ-FbtC2@~KtfxQ;8j3FL3K~$hrMXkFtt=V5 zADk>jBMTTr5aHm5KOpoEyj@Ts%Yw*|9MY)iE$VOoSWR0R=&EvT>~v`9AHx3Ul<8@Hq274xYg=sF_R#o{ z$N(X99vhjt3ZgR>y0D6idz43Dct^Sgj*6o)*@pPxqKW*FJjHuV(u}phBUY$bvs3>G z!yX&xk*+G32S9v8oV3dQq`1ma+R<>U&JuM8<|yqMp$R zUigVn!3FkPZL`#xbh;3x4ECQ8v?xwGDO4eO6(mIhDoJ0mF^2FklM3w&lO51g z=bY@sHYqeHzC>xiH7@B6`xG&)Fs*^}&Wzra5ZtJcJ2n9VbMZK*)Ou1#R@9N(f?KY7 z#Ra=kS)@CoWfLXNC$|!G?5Cu+U55Paa9};|T-I7a*j9kP1{F{M?;HPM=)UHQ+uJNu zj$&5sptVMmkt~D4o2Ly;I0nYpF&!a|`iaL)@KnD=eilqIgOmQc`MWPo)q;42 z)G9ZG#SqCdodN9>cBMF~1{DuhUWLb%pt&zuZLMOdWnoJqE+ru4a5?b|DmaOQ54Wg^ zqp!S@Tg@3sP5y|H)6~j4w?{KLL7a9K7id`*l2|FT)n8B=!$tptSSFnuaTe9Q=EQ*- zl);{|f%OX!P7{C+d?0{0{Orx?O|2oMCX}g(y35LKag<3uob}`rGt?4$*5%HRq_sG@ zV-pFIp9I2OWnv@N^zwN}?97lI23W0Li+dH@w6pmj4snzN1AySe3r#9o){+?8CrNlt zW^8W~wOVSLk&L-9zOzyq+cwD>$ZC%Ij%}T#uzR(yZ{5pQGpEGb>bz5-Z4+IE+~V21 z;h_aE0BuH%6V~AgXJB1a600h_tdCNh72h_sgwpyYcTizNP_vHW2P_8G@?d-rMigrYDY6eoN%&-f03<>jFhC&~P%j7$_L5@j zu;jI-=_%-o)0)lG#G&TWjhNsoDEf>(Hse-Mum1m3@G0tQ0PPMV#zZ*EfqncTo_@@D zlrLiDt|tI2#@eZ_pv)mQ4`cXX7p{*Xc7Xtlhuruk>JlZr7^3WGZXrHU=h7|j(90)| zrX?uvJz8%C!4Sa`upAc#3dNyj91sK6t|2gh z0+WK+>h9VoMk+Rs=KOF9)lXF1P(&^w{sJ)!ArZxhCuFR{#x|lKPRI<8X|jM%C`$0i zx=nQ6E7KYR{l16~9$+H;fgjKzE+PN~V~E!p=HX7NOh{31$mN0Hs(Gdm{SHncmhE49 zq_l?OVb~2Ypf3`Y(e4fm|AeAmvTir-f)oE25yP5J%P`OdvydY;??Y|@0|LPt0>J_T zVHccD38#k%X)W`j=cSUXlgjM;KClAP?;QP*f}U_2wQ#5mhu)U)A7c)=ek$U4!XFF5 zSAath0wEk2f(^A$7CWUA>Esa=sNb@Y3{?Yo28;mS3Ig}w0Rq7m_JG*{;Q{V3-qcYH z$tp)OO7b2^DRi+EJ*^d5q#s^y^F&D$!OIzk3wW5w(vlG%g_8D!ud*`7AXO+gBqA2{ zAr>0)27v?SCB$5OJkflIK z_OjA4V)CCl%>pW6n6kGZzI13^va&Q63fD1@7{b2E)EC=RZ z0wy;Fs|d7312Zy>5{aZmM&HOdXz-S5su(XKCMP&QBuL zNoxY|3eP~#i!ZIMjl`;F(4yqm7~gZE}E-EHOoAZ=d?+dz@+5pL{%#c zOrwR(78xf78_(_o1_-LN$&k0pjEVsVy5UA7iK2Rq_)%}^F$Uc+V+^h;;& zYN%}?0**ofVM71#(qql`xu_QAD6&s0fGovg`18Y$2Cm4Ynk%BQegU zs(!9ecNOetBP$K|ALLU%IE_R(uvnRuH@|IHeRQn!Xfe$17Q9SoE0EFZHF9m&SEu4} zY{u~fC6S;tqA*e@)GKk83>(>TPvcF$nr(Rl&I*O1zd#l`rI%M*m+71Wc3XD9q=$N3 zlRbX-b_FbRTPst{X%J~|hn#|r3d#U$vH7HeC0K3?|JGGkH{w39*_`)b2g+SJD|q9L zHFko1b25Y~m0ABn-*zl+fEIk`);I~g85X( zYPQa%_p1K{Pk;ybYtapg@(49Ru4d<#fgw|TPtAiNSSu?-o1~;41NJpm_an$phFw%V zheB+NmniSe7;{&GCsB4EVkg9uV4A{!P*{kqn1m4!gsqGUdH9GA6-F(IVL&b=ozpd9 zayGR1Av|h6K$eE%q=y67!xSoPgQp+4u%m4FiuJf+HEKBlpgLy)U-?cUPciX@ggkwgSltLr-c#muCc&D#ZDN=IYfeQqg4=iE_ z2sL^GS1%>`(~hR7bPt1%>p8(g!*CCiMmd+Q2=!ta!yoa!Jzo@!mGjT5_S|L`vRy z>V_DA76oRWp`EF0GfV>xCP=SYmHD2T8Sz3osK)gmrZ)Mc4gG}S8X;p6 zCSeD**->ioH6A#?4td=kl7r)}<>+>zjD;X}iH2?OqyhJtSz1k%k15_|iklOqtr}=x znPfFOPFQR_kZv-Rhzx{bkbUnp9ydsT8mL*=;(!!>syc#cSYAY{ki*o~#+fIZn6CeP zM{dU$emR=1uNr*q&GOKg)mjZeP+}6Qjd`9y3Ciym<<5XJG6GR+xy-czd##=!#QS`$hM#Rv{M_-hAytE&fS~Am+$9VvxW{e8>gI!}&A~;*wd-ivyBhozU%y zmzHxAv#Y$yKRj)bd@=-@40fQ|m@SzqU>;4ktj&1CXBa*us(yw|!7rs2o}d$OGG+fl;ywihE{7kfODienIKp4haU;P0z|+2$K)Syt z;{%zE5Bi}3`avJ6oxcwns$j#{Cn(m>$1Si*--1~pu1iI*V;~;=p8;z*k0+#@TgSDx z86EbliKJkT1l%q(*;9J;W~R#sB9Dk=yWpb=#bFPWX%(}*&yO`cU8AQXkF!T<3m@7* z{pov}V<7a2(xQf|4s_F>l=a5Bk416)tO_+8hHCCAX#&IxJ(}OoHg>bO`qF9(OV|W? zDF7pAJqcjp8^YPBe5YBpp+gDW@i9IerYRo!87CxIVj?k!Y*WOKQ4;H<;WFtJdRKm0 z#t|B6a$HVK1A1SLsbRiFV11hIkXg_lemETTQdf#!l9$jq9)zrDpN2yt*NsLEzS#AGDaUDm0-yW9_nDVUN| z&4%a$)cF~&uk0p}jy{K|t?$U&HD9n37xU07xiS@-ex!+1B9H@|;q?{jV8fObN%mVS z>VZ(X8p#G`X6eHFNC4uUz)sIPb*g|cp~8g> z8#;UlF`~qY6f0W1h%uwajT}3A{0P$GzmXD2RwOynWW$kU$RJeznR28+j59ltc?Pki zI06I`isM7zR>+Mhhhl7LG^x^wF^ev3_^x0}GM$?KdwKP#pmG)Z>BD6~P|U4aS2pGN z3@1{971BO@XOrr?q*m+Jt#J3^vzDFS@!d-`F5kXUsZxE4II-fzj2k2YN*y(K zoDQQp$H*uD==13v7rcL7YI!jR7**e4CY?$Bg+$g!wS-mw)K7GjrDx+q@4Y0EUk5p46Mz@(7vO@7 z1xaILA}&@ShV8*umXRLW78`Hnq*RUtAqKaRK=%3AqJ0Hw=b}eNp|qt_XJr)TT-|-A zoq-71R8W~o-ic?PdhQv~nhWI^Qk^9>bR3@$NvYU>hE^uqX(^hOU{;8BB%y=6ja5sC zApQ4ci!BQFnvfkSC~2Cda(R(`K^E56m?a{)Xsoi%N-Jc55_nUs7)^>8th#38r(;vu z`DAQSq8bvFDe;!+Rat8K(Whj%Hdj;~Y9}gDXs(z4rbqu3WKgwZ#+mJ}=AMggx_|PD zn0@N@iELx?y?bb|xHeZWl%*n8sXypknvx*RQrM9|Zv97RoO}(nq`m~F$|0Q!F{fR) z^j?f{#(Cbln6DZaOPa?YLrGeJC7lXWydP27l|F%3Fz~aiu`AJ92XUzIkBV~iaHNfH zBvq)Qf;#QUMjwszV;m>y7e6R)-rJCeb-zzK{shW?vDxWeDcqp!wyxP1WDDGcZ1!udFtA4PcNydAq5`cf%_-> z#k6yV9kIA0IDTznjRdp6|42^Z=L|JB{Y@2DENY84s`)&RSj#Zk36=EJ$Grq9@Lcv& zN}=#ZImHyFeCj&iQv@@pU14p54a^8xYVp1&P$GDN>)^yJcc7dRCU@bR&{?=;7lBX) zFxfK^^%7FI*BHn<%v+!je+a}Nx)6FpDNF{t7eVy#rH1gri?J939N>W`c(ps8VWfwu z_W3Mj-OANMrc%QuafcvjBc0WRh{iPks!?@BG)juRgfR;)r9cS0$mC4u!3X^VCtjdL z5(W63pph$aw_1|tii5*I)^S5N@lZkpV-QH5agCI$q$SfO$2kVZM2A757JIeC4`p(3 zys>~H2=YmzfNL;P`=i?k>Ae07#B>m04t?+!N={zplDzDtFAW8q#?Wy?UU)(+-jzzu z#D$4=93En7!Miqk1d`cuWeI~dJ}@nELx0gs#g-SBUy5^_s>P)BqgTf<;)WGP!R9(@FZJ`rK4+_(mYRj2b+~$j37|f;c z^F9{c3njMFn_9Wjf!$P^G@~V_$cXe<&3t8~Jcb%~Rit&-ncEv>iq))kl%DlWiz69& zN4M!NDfmLjgWMUFidF!GLOrSgU1}??4yBP4L=SvMwYqzn$T>Zv$zf8O$d_t$u!K#J zV;Tk6#()rbJVVn;Ko%1DWXYWoz0F!FyC}B`aABkws!@XUK!mzAqjO@ZUcRHw!m4() zz@n)rnH@@0J4jD@N%wyWdEB!d@hCn(Q(~wpp77#9Fsbm|+@u|2P zUD0?5Tp{H}*tKS%X)KWg8`$;rEsf0F^i1?s0n1pmAlBe~!aAa*n0LYY=*MMmo_L8WEn&p`A;9?>tS0KpzJsd((WZD^vdCVal;$3UZ zkSr<00=e<=T6;5JDWNQ%1PW5KxQyL6 z{vvE;3w+@EDmaf412a$a!y9~7@Olj!VTbKYzxrlKw*!HT;qrM#VoS_GQ*=8<4SI_n zz1&roV<8U@F_o}mPfVh6ptz_nu@RYM+{#Muoa>zAC*IwgYoQN)SQsIUn@|(a%L$IDqs!h&+R)>n$BjTg?7;))4Z#%M2 z&8@^p=(4y71Y;8t^&91QqzJEolD+E>^X8pAp8f1`pORBt^RL&1G_i7jhBH-{mWgS1 zNf|5c_RPP1)67l`TiNm;anHTwS>vFi1HNjD3b!0VU+ZlKubK&O3XF>xxyQF&>*gV9 zQhBjgcQC)aL(5#3a?!l;1)k5IFNI1-0U>XWGkThh_f&e3yhAj;dVYx7!my8P;T5{k ztE;IW;avAr7Hr2i9k(3tSAMxBPx2>%V#Y;tCU;QB z6@p+4&tMEN0~~FXbmTV?MsyM8g@9(IfQXlPOd~zHw`?`0HOeDm9kE|7G!Vo0KSY*F z9tb!6HcJ&af>AhN8>Cxkc6}-6P6!x!NA)yv1y>g_TnLz97j;-WSUX!sM&f6G6QLof z^k=PAMoGmv(ZMm(MH6W=J#jOHQpkq^c1~Qfh6`aEcjtHO1wtkiOHSq^J}5AAw`WQc zfvU81hjv@v_Ytmv96q=L_>ob*oUWxT+IfGR!DTb^<`vWSq0($ z1$Vh|nQmeFw(rAy= zl~?cxF^{+v??6$eh&XJ5a~n|(m=#jwxG9i?dgF+W>39_sST)XpPQB-7E9FmP0(1AM zk@&bRrWc3{5hMi}h9XH402w>n2qR|bae}0aYWRi)(;?V(J^@r6!iX(7aYZPS9rd`8 zJsDHUXM1S2GiT*|DM2j?VT&LEVB@BbK=T@uC}`uiSs@6Cw3BY-cSX!tANjWbQfcUu zU&&fQ`H?sXGgl#uCxwzv#&HO#P}IbZmDrHAR3H*5l}Z;AU}KQQqLL9ok6f88J_2JE z1C?M&nB~-zqw;%}12u06ftGlQ)D&fP*(Krh4z?FNcmXz$<2ZoWkrPINTzN!>CP-dc zn5B78$wh?@87-}365t^_^YvDP`HOK`A88pCX&?=yThCz-lXWFpVqQqcmi{G4 zR|FvZG%m7fn$0Ohjh97I=0D`cP28B25CfRK=_r+Xm~2vYxG9+%p*iFig9m9P{{TEA z5q%tpl`ADT&JlUE=bZCtn0+%oH8vCFxn;%2BFyO!;4>Gr*(1%7n+@3iBnvs4FKM91 zH61dDc)&A=^usc@mTJeTl_2P!^r@j~)S2|PRYPbQc+GTWQH{(K;8!DvYbZRAblKUny*=bkjXdem+qp!h} z@b#if_ZI*%Xk%$sf&gmXaGt*rHpLl=1XMaKscS=Orj}$Riw0s!S|8syCui4IJgRk7 znvzqB5p(H~!ox3H;SYfaAt`Z)-MJ7=QaTEgp=XMydxIm)WpI~xY~VCH_@W}V7MXVX zfh}66D%ub+>7mM}6!H@=U#gzb#EN)WsEe1Vsp?D1)u^BZpDrc;7mTT?c?t**a0^%p zX`@M^=ZLF7;$`a5sAH*Sc;PR9`WrbKh;aylT&Jqdil!)|o$Uf3G=!jY*sEJq4j7Oh z1h5N)<|UZQl$L4{qly;~tSu($hfpi#mj|nPX00b$5p2muv4@Xp`gRvR=Lw0zk ziFcxGA#-JCV9RE7ADJ0`;I9dR0i(eOevlO3^0VMMs~}nbrabqF*_LXB3ItE^9qK8n zt9OREcC|peJ#mf0SPTa)kU5Fqpp%@AtsWD?k?6d;JNnCnBZ>8x7hvE;fO z*>g>zyL(h44j6y|fUsWL8Wg+WijW3;IOL47hOi~dppiIIxHXD`DG~d(W&gm7yyJJL zOH`o39I

    YBamgw!JW=yw`ej^{BZG1$JqwMbjJq0R&*Z2yqSsum`pf2oZ3=zFICp z)3Uru8qDD&wrg$JG(2&*tF|E!j|*No>Ut|%RuEOcStY38*cSH6)u}dM+L>Oyu2jTE)T)_u`ke3nBA@p07N1Q$m zvx!dSptM`O6rpDQ;9HSc%{^>N!(ZfF@qB=WJkiibq?Ek>&=Y|{kOtD3%xo|{Bs?J$1mF({2owk1brOBY zS&b2bVXo+w84#%yS>ju^^)I+!5=G~6+QHP9#4h@|GQ#^1!)4XIWV`(PP5iY;`eZ+! zQ@Y306A0)Tf{k|V%#du|T+>px>k}IiY}qDFS^eN@?xdqtqdbDr*U$MmfXxwwoxWV* z+RuvBghnp+la?fxbt2(pIounO%@v^`4wN0HYTcMS&C_pv+5Z5MSlSzXTCCEHCH5Ro z@rcc-&2d*ZgC)Y+8G+IjF`UB9p@)sxr+mdCJ%>(Ioc0s27U0{SaSMZ(D%~q}svI;v zy@BCJn;^ts|6tLfW+}S-SvphyRn{#l5k<#~ znc`{WlNKRf<9EV@T-bRg^yCrI-6K3|mDDXnRz;=bCfLJFV499b$`CtYx~gZO*}~)J zs2brk<p$hsY!(W>Ust>!#SYSm`H>vyd{Y8bHF zTJ?o(gAL@Tr4Z}{;RmVz&4qsD{Z_t>G0ve@VZ1ov@hIky4oq(dWP9}W@V-Q5fh zL=-AGr4z38r>UFai@>w$q^`}ymgNSH;NVlfZ+q?gmKIMC2+n{D&;+~*1#^nDkloSM zx}Gs@Bt!Fti-S$o4Uy(ecTvQ?Kr<{%?s<4vOMgs)6MViE6`S1Dj&@DLodRD=YccJD z3LPw}9YEmlicpr4NvJ`}B7BYRuhT#q+cvin?+>x&9)G&@&OJ0qXrTVbgoj(eyyg-z ztWl}usdAvV?ji+m=Xq+Bt2`KK(SlFlMGW|Rs7gHcbn)q)jFKL4Ir8xnvFTgxX-Plw zw+77S4l2nUndE8z@C%^`Kww!FEAp8(=(+lovnvyiq^1k~ayBWCD92K{=_yVOH*<01 zLq97d)!2-h>cH;q7h%;-pE}0wu?~nq8ZYG(xfqI|2+rP!4}R)+`}GW=kXaiOA{H-b z#_(xxc6Op!<6f;~%%5_POn+wP?cVk9{_ObcPJFMZet-3x|MShED5~ta0g6+;o?xPF zS%ir9l#a)t()h4tV7Gm@UBVnFmbQSU;0PV%bX-niH63m~;3-QPi(eV?Zan&zA zt~_z^>!Z0comW9nD=qs@Q4UvPw?+XuvB0Glb^Vym__tn0hhE$y@ft~pEQvRoP?^X2 z(*0};5ETUfk|Fa<;6a256&`$6utGzF3nfmZSkdA|j2Sg<UN01>!TKreiW66IP zQFbEpj^evc5f!3DiLjgnK^ALSC?`@-ErBh3p zKV*#m7_@l5&3}JQOcrZ;nH~vpW=hj#)V`PlzkA>ES?lov6mUQS3k+|qj!s*sq-Wye z3&8ditUwS8cv?s|qrPhkJOcx)DYyj@G>R({3wkXx{XS$5x{~fnDJSaG0y63(`b5+yNg-1&K{!i7QmXnyMgDmPno^;KBMOsyvW z{;~{-Oxb?2(5_w8#H-C%WhLuR@#MVpBmQ19^{U9ke8{s|{SPlr$uW!c}5<%W4xMoPsculTK>}BxSa8WzN0?FG3(5Gzbt<<1{MMbg zshP#9xhS9$PRTD@15-C&ts|E_;JEP>{BV!%C780gaH5dI<XYa;Zb@)6#+~$EeDv zxq?j3j-~Qg!n0NPXuA6N(Z}tzEVR8NMQyCbpNQEk=eZu!Q(SM5p3ATCjTD2aGH5Xe zsm{Xk{L3V7<`|u-ueDb}FIiXrZ{Cy!)50ufaoYE9cbok3(zH7()M)tqrG>(ZX zC<$aL%yCZs6!Wm}$mJipt3W^8Hl4bxZzJ0Z2csOdo8w84PW=g50X3)+Ad!n`hmoE^ z$N(DtkZNr6d75bWv^)Xlf5!qs@E1i&l z)N>Z<@@B)3Xh>1I)5q@AMjJnYMmPQNMj&>BmVdY?K!MAW{zQU2^33NbMw*@g!DKy) zASp8PvP?{pL@ayhFf`18A*yKjM?n6Kc}s&Lm7qtGsVNFrq2mp&a6>+>bEC;( zauu;Kt8aLe)F3~yrrCI-iu$0=KP;dkPj&=5{J=#Z`e7D9v5x); zT6vyhMujk`TnfS2(CU@PVakj-$01T6o%zi5Sj;8B3?{;&mq-gOGBEO^-}n%NN+(it zBU5T05~HHasI|{IPXrkP4^kmfb_GkX*F?DKmPh&z`FBo4Q8G zOCE9Xm`iD4ScKU@_N6C#iyH{!_EQ+@H4~Z}$QbQc9}2h~|jc6w&>YDE=Vc z7iH6{$r-R>;?zjgR#lMBB&%64F^C?Ch&uKRlp-1`M<8&qlYM$478yL{5JLwPvm6g6 zyn$3`@?%TI;qp?tED{=r6p`RG6n&)X*+uHuzs0qLr=;0wRd(4?xY}=hOj|3=DgwKW zDCb=@N$D(!Qbn6I(uYcm(*c2+%8XEjr($`bISb;`5CPRv@0$o@HS(RC#6lnaz=Z|E zp^t7!m2*O+h_1?k4n|p^0u}1X+?1Kjtg=Oauj?ut4JsV}v!N`7b$n|=EF%+pO*VPj zvuIq6D@4Rn_J+f0UlSh#(UY1)bXhYBKlq^#3=NiYcr4&g3!B`Rgzq3+A&g-#s!oCy zi<1>W??xK(gmUYqE;?J1dFB`2gktyDsfwbBe|e^ z*l6v{UGl+MPKo8*&t&Xb#XW9^NeA9FN~pP5CCacm2NVJaCcBN47%D*oBBF4GGcmzR zgA{vHJ3csJxNXY$OzK{Qbw_vnNJ)ioMcNQih$mgttUsi4LALZ~ga{6C$O_Y2u|DOi z2>qsGhIG!5%Y%sE$GM)?X9%v2h4lP9&DZd`40aBB0v6&Ua$^RX&;2Tq0zHU z6Z1YE4GVdYb(UV^?UL@U zzez-ZI`Y&!DC~%`IKd$g80wim;<&!Rz2w-u+Iq9vshrt>8}I6|ESd|qs0$6zCN#p1 zCqpf7=!XeWEpy{Dqe(p3YB2L-xkBPJ6$FmyNsrbrl9E!P1W6Ero`0lTcqGSU;f<_NTBF$m3hA?oRm-{UcM@hAvn zF$x4e$}qycs|_?#J%8c}F0z2@GLxDsDXvn%N7JA1Ye7#kFp4{zzMvAB5+^xI4qI_I zeKQGlk_r;j7f{hD%mYG4geD>XTq7csjrwCkkpQeDgge&z7}tZnV8g+~q7sr?jt){M zjO&?EOrfArKMxU=Gz6j3NgYBFL4=@&NJ*Z+aqhP68EH~;q zBThsR1422EnnWt$Fx$JA^s&9=Kp0lzMsz{OPxD0X3ZwjElYZ)_u(Ocar~rPr5jHuC z)td~9OiA)7o3`7Lud5&J zx{h5u3)iw4f_j(zLX?{SNy91B9L*t{EU6O`gTVoz9iPj)=o*X!;t-nigIo9<+nF2b zU@4=zyPps|d||r#(W+^=Lv=%}z>z_U__DERsHKR;DYOWDyQ7pK6;Ue+#e$)?R7tuV zkg3Bp;cCK;yqlOL3Lg@Rha8EJdB7)pJz10xD?}fxvZ?fuykomO6B)qXs3);pnxqUB z4Y3fj5C=XW00OuLT;vEvvJiP{NK4Eizqmeu0TV_%u*hSGp*czc`mH^rK$MUpY%D3G zYmky)#n8Z#M665UG?2SoF1>6Igyryh=UtWaU74dbi{Ccw$f2UC}U1dQ;1t^I2JL^ z2z$*Z>9n-Ssk%Z<@03hr;jqnF#tX$s*+Ypnp^yl`06w^baEJgw*aZTJ0QjT`>@bsB zz=ivAi{=y{@M*!-91adLQ3(>E3q?F0dzw!vNn@iyEEKv?{0z;YJgbmU!IZ!MY($P~ z$-Sg7c%058?1^>62wpSISDBMfG`u~@84UxA#*8M)ls!od#gyoYorr+X{D;nZ0CA{* zTi}B@-L?V$!wFmnhq?(05FIJ4l*fq_6%lexsOyY}2uCs^HaPhdrU)}tEI&#@y055E zMYKLKEw})Q&_F`dG%eFY8jim539M|3N)?b1<*sUKuX^K)|Ix8A% zpT=0y(NG~QeN}uVkQfbCSWJjq^@#4+)iUYTRxC8x_NzIBGNyl@>gC)O> zp)mmeaZAU;yx*G3k%$aTy4QRi+5)S(ebqJ$^;d-aSPxy!iAd7@#I3@FHdTWVoARD{ zWKphLRv4R5+jNUCdVocJfDc56Z`iDzcmNF81%Ta^Ez*Z@Xh>to&s!4L;SkVr{f%Kw zt}x|D)hJL@sZ7bE!5gCp+6=B{bXCw;wya1@qJ7*F>pq69RmQv1ygL^;BUmtjTHEMQ zsF_Qnbj5>3(D-Q3r*JD5brXjjLK%UyhIxQj2nY8p2hGxlSg-&bB><-F+_s1>-9fDi z0RjfZRIbcQoNB%sGNGlxJXICKK3o+5@mnc**SK6AuOtsUGF{R*R>%6Ftupg<5!pR=@xZ*aNtgVhj+8xw(lz*d1J8go2RRm)uVUA-u~?E}FpL zS2jfQod~`ePft+~{NP9ISvs8YnDA-janW8pF6JUBIQ~l|-sFfu&bm0e;l2!sBG%z> zT_?(FyIR3oEhDih5fcn^mHZu3#jxaW2n5*m1`O~6aQKF!x`hWAi5&ShbSQ_2%o$-tYX!O3uovI=qK2Lj^z~$I0t?I_=aosgLh7w zyXdS6(X6;VFwu>);bmj>&UNR}Po1J9`i0_ca$;sc%b z1HZ(@e)4NfHfx@+HT_i5VVvb|qs>|79-|JIoYhG#%_!j7v)uDdDxqqe`ikVxn6*P3 zpiNz`KJ6CvI``peM}nWUZd9#yUpg(c@DH* z;DZAmY=;Cl&^jJCwA)rzTs=;_ZH(*_ZD?LB%lUJ+QT5_C*|uraRwgE{gVzpz_lSPOpxMijuemKQIXbK*DlJ z&qN*UHAII|@@c0{+0>{do%5u1m2Q?L7fy|31NDgQ?v&{z+_Ew&hCZRo2q4i|YxeHy z+yb%4tBGlL+=@6uw7Fqv9_`8NNPau`8JLfLYi`VED_J6>S>y7wm~(2ik`M<1*o7qj?+zSoaHl9YGZ8m$K&{RZ zq{Dfg@P*;viVUqqo-1dktWhU)*c;^v|Cn!p(OeSe+ zwaz``7YWJayo1wOz-bDEYnfKxU6C;21-0E9zFyD6z9da7p2z&qo zd%y?rq={R&1s;D|C7cdH_+7k-crMT3YGynJCFW79Cbkq8Ehn}%`qWMd$@oK?%|7w+ z;~!+-bYZvc?wNR&7f!=*LWlaj!)bMkIH9DQ4$!G~%QcOY{s?WxU!NZddjJ3{kcWH- zI)>_d-0CNl>jww!)~eEHtiRu|qY0G!%G-9UEaPY3IB1$>qfG%Tzp+`PGmgodYR^8Q zdXWsP|Cu&t`?mK+Lv)%>yloxNP_;LEamHc=oOW!GZ<}z9ZA=Ai;kQ9Wsn4aU#Wt&pN%! zSaBoAjuan){O56`M|%LasmMYvUq^@)AcAZtlH|siENY_Wm1(`%fjSZkIN$EsFC5r%OuA#_H4Lw zR?!np?yF{k%*vKCVRlA4+VReUwJh`h(}%90Aj_^}AB-Dy;#sB+_ul2~n4wju%Ma>3 zYPL9L=^MW~@7k2{)=tR>@0Ck1^w!Xxord4)*EIX~?%%^789DyJtLyQ)^qimK&YYt4 z-Y4IF^dYoZfCMT9+ko(ygii(2j4(%Q38E!ZYzqSRTx|sP=1*?n@aA7dr5W_qa1v2B zqC?o3XAp_dt%VVAQZd%aixo7N8AUnDsNFvo8Kf3g^O3aKczlUR5L=6}m(fM#9f+Wm zQ&L%_a#k+(RFqb7iP(%%YT0FiuYLKRN%!ni0W%^LLXT@QRY+!I&vEG6P382Xk2kwj zaHoo8Rv_G;7@`OfSW?|sT?ID(0xDCDjW!pRk4_cTqjrl1Ib@0jWu(_dY9R?#dfqLz zRY6EHY1&>VirK2GuNJ1zl&J=a5v=$TIctj*V!7+BaTb)QX(rj@Uv%8TXRj{$TWrNEJL)8r*&ar3kK-dis=~KxmKdxHHj5J zc5$%r*6gntj_554rQ&J=1RUK zQI7tGH!oGFL*>@lNRrn2@$Ee&DJC*o?Fwi>ypSPyP5REObO#^eQK(7~M4pe7XBg(u z<8B4Pgb7k1Jryq6F}oC6J#y*v3sPkg#boCGI6_bKotdpQu@R%e#=8ER@>5l)n_ zV?T)SsD(`dRr(lsolYS!BC12$L##G50NQ7B2b>}m|6;Hz>L-T(@s4pu=$;AEu7VaU zj7iqfynE!s1s+)A1x!E;mP{{c%^H*!4>GOVq!3#|k(r4CSI5PL>xz&eS7>f%Gm*@2 zkUJ|6q9hk6?NEv!Ka8B@m{=xHH4R^KX&MpPvZ(2Bhj*Tl*9);|%2T4!VSmKh|57&?AFzj3b%k6ga>T&TzTejYG00k+slpn92#)Ibk>vqb(|;ONrg30yURv znrc8i^4yBdXVSPyrEQ zKn8rA!3OIXs7L;0ArWS7l@d82PWr*UemG=WW4p@wi1Lp>MipIeF<)u0BSfmq@KW88 z2?8?(m9*(#@G3OE`ZTbDh1E{k1IEE}36@eNj7f+J3s9IK zpdv{Bg*|W;GN^8gFgP(;1yZ;tk(5JQyPMoIz1ljB3hp0*T%2Rl`W)fF%cJUqQFk08 zHTz`*XO+sxACZ{Esz}XKzGI|e<)t3PB6o@p^PORBB_gl3%C5Jx!<0~MMaMfkRiOLoK zQ&he$QBrITiOVAaT&bxq6!#QB| z;*=3h6IviBq8v9-T$~46V#t_yvGSw|J_JgTd!!-@kf_ElE&Hq&Nt7flYp7+sVF6r( zHaN4DrUDmPWFvE?QZO9Q&#Fwqy0~wwXR#)cEqNT#4T)28Etu1!TO#3tc}bp@3t(w- z6ANH~5AV1I21MNFV^@hfkxooGI^hLD$|wNl`}iELf$w-SP1vlX6$1oUA|C`O1C9 zs7Qzoo-{V~KYU_cHMz%E$8wV2Fp0099Y9|6jpj*UwQU7NJ@AMHKV@nl-ztR4{-VGJ^q3^NQUx=Ll z0mc(nz{yQw+$kX2*r17B#xSwSd4VClMN)+ z&ADKnlAI1&RE-lMT#)q3V)R>jG+5*eV)|iXn-$vw5C9+W!EJD%Fc3f_K1qjtB63wC zbX}rH$llr!h9+uNH>O-DBF16O8x^7=MaT;&&YNt>oBUYKdcdM1rJ<-*--$7XA)!U* zQ+^vKT945|!Cklu?VJzOPqgdj+xA7DT$1n9N&10TFX*WE!Nte)zCN?c;- zZ$1YDuA5$JUH`!yCvK^4I;U4<#aOJO&LPnM>RhBn;fNQW4;ebscB+<&I>teA#aWcv zkIE3xbjE&4;t0NkU&zSP;3z|!j#>zdr(ne&@`ZceXOg7KpiYF662L9+Lo77ullB8! z=BBX?oN>Krnf94^QOi*18GEc`Z}`W_7zC`E>R+BIntFv|k_=#+pB5$^1eU0MX(zvh zVNy^Gi;_j%sTzI(4O%oNLinUd^&{wf6?x3dTgb?8Bv7JYDKb7>==}&x(HVqEL})!g zvDw14#KHqOsXlz_A#!9(-sh}#T(S1aIL1%fed$41XyJgVyyhNHT32Vxpmn~}onF|W zR7#S`x5dth>RAxmL zrcDCfat_|Je&IwNET{=%fmKf!vf?g&8X-ziueJ@x5*X6Oi#I7&M}6P45-Dk(Y-WI2 zyl5b=8N^M@A3;1Ry80&p6x+T|gufBw(IS?PiCm!wZN657tP%v`mf}qIRh?+u##$fvJ z1rPW^LNE zLmAc~g~Xz=z#W`o4j<7S>?_OGKU(G-S66i@`8C`fo#bHH_#OW1{*(aH{Vrq08TiU%#&=Oq6j5Bw+*D3akl11Oq_8+ zm5Z;0apq27BH1)#;&TPnvB(^wyx?*EsMI6zCNTR4v5D$ngmFUu)Iv9MI6^PsM04%2 zcywSOvr>1-uV(WwjVQ|vtO%#tY^gN8H5{<~OwxQX8^6%kPT(mbM;nK1 zPVe5?MkKFEO)oEuStz65Erd}=ps2$*B_~eeF)$bpmRWCY@}zuC1v| zhw6M@4;V53O-`O+(^KtPUs&(IlJ-{z z4=6`QIhQcRhPHzp1sW&BqMTSvM}#w0(pagJPqxqLV&QE2joB?lZj(}XN2%T;Z4w8*mxUbnIj-IE{knqsw;gN+FO35ZK)Q#}b|Os-SIkeYhA z38!-Z`kwac1wvSS1;$f-d$=h`P9r+sYB-`Trgy|WM~W%yjOX@DI+u2d$5d&gxO&6z8$Xepzxj^m+pS-U z9cvz`ikW`O$|)Mwr4Re76t{mAw~s|Kft+Kp@4HS_`(a4=!hR#dZaY^U|L z^a-jTJYJ0B$r)|T_pJVT@=j%k05?F$zeW(^7(#4li2EA0^o-Q#XX7e+SM9UTdS9&e zmy5i8lUj0!)2`=QHr=HI>6P4cy@+fL+1rU`3-H+N%0Zv=ITrhh)cbX3yb9HlJ8ixQVtt23Dc8i@xd z|NOA}emuSxX6|ZF;J`k<2aEh-u1i~tJqV8z(W{3KZIQvfewCcPh=?36_`^deZYLI? zV{6?`kfUT&OV1y7m)L#vTX%ggJa$HTnAD(7rgdg1lej}G=qf9`ycLnPNLT|zg$kX> zka;HXS;0;M36fb@kjy)R?*vw~coCz;e;YY=^!O2Eg^waTW@HEvr9y}eQ?{g-l43%G zCTZ5B`3xsTn>%^-^!XELP@zMK79CoyoFEH=%2iCdwA?>`{{9JMdhx2jjTNgJ&G2y`RPfe~3Mylb+fOr0dVa=k0_*;cK8|25v_ zh&W@xgBBqpR9F!r=9!4!6&y^IZfMb=2^YrLS7qu>GFzhTRMj=bhM(JZS}1XHT;02Q z_lAp9Q1GS#%hd{AmZLs>6$O#Cn6&X?wP1()Y+fPkPR71#Cd|zpe0cHW;f}VuUHjjO z4Ou2OeUW45nTr)0-b#=#{jR3zUdAZ%w1O;ZsG`a;s{la3yc*4;civ$zq`EMYuQi8Q zLJg$?|4C_~3Lv6wErkeL?K|>JJQ2mCf)nmJrI=!jAAXz?WbL*N=ffzzGE1wnEAO!UOQ(bS3-dqEF5=U}DaoAg zNs<(z5X7Mv94sO@yHt`j4n>sg&J1(?aMu#qa*(xPH@y~Hnrb1G#TlWxYbuQV>Bk@D zD9uj4pma>jEDI=2k2gp&dgmEy)xs^?eDzI_u}%R~vL+^166hscS^Z~3`#!bOpjY?X za#b%+tw=s;Rjmw7ktRzl*FB{LEi{Qd9LPNeJ*_HKiYv4&HNGm${|q*i930VJv}UGt z-<$Sq_YW3ue2&{GDT`fA$Zjl&@`N`SV3N5#Kjg3&rW>^>3qf-Eq`=s(R+UDDX2x2S~`>HtvM|R35I?0 z+R00KBvb+R`qPXFBUSd&2>R~FyX1Rtzbqj>vTRlFuDfHE=Nt1&&TvI5b2Ejw33-1hc|tYxWhzy8V5iX6?|BuTgZ|EwfGpFiT-~O60aOe3AQZQh5o~ z#Gz-=t|+q8%K&LO9Zxbf|+ zV4m?)MBtK;zMRpCflLcWl43-G^g|z@+o93Sr4i%Ik2r|p-|3`gNbV?5keR#`B=Ltt zP|2%m+u=}NGFZF_79>l`^APh$$wx7jk%JwK75TnY|FOKuWioIi99>MZLhwn(moRZt zhHUv3iL9ncnp9>{?jppCCT<5DV0$em>hMT8NlYlb-aGmJ}zf%6Y_aZpWYpDrVQV zD3~v*@=2-u$4sARu~VgOpzgsFt!%SBbNVDf4B}whcsUX&uB=YKx}!tLm>IEIjiCuc z=~gkKG?Vm$A3Ds4a{iGztQe}KLzE;}*~-;`|Mq65Br_@kw|PR=luwHX98o+;62{hP z(1Z63YzGBfpv8|?SRbWLi|BsL7~t$Z$w%ZTPiTa=EpA|2I8Nyoz4rN|a_ zYc=nDy=mJblB7f1qw8*QBs=-w^|$mn2~*Vu(Tnm_GqmCCOv~g(g&DA~KgkI}>9UZA zAjme2bpRo-o896b&LNid3}#!CN65uR|9$qP?>Ae!6R?)6D@&<~aR}wENuqbfIi{g~ zIdxNpJV>|vx)mQGiz`!B<-fv(%|&A=SOsrTSFv>PgFD6JDT?MHy=vS>#=!wajN@2b zvxqt|D-rXFn2z;Yr#c7Qr_4-5$IA5TSM?E&X!49KI2H=#;@s!jA@R9JQJ_xAxnChm zk0K5_AI|bax?T0_ix8tpJ6{P%=c#deB7}7J7StfY?NrNJ)`~19KnqqX_QTwjs>}M; z8m&MqaT6uVUE%C!L5{X4I)dL=ROb~w+Zxz9^Q4uBGHAyj`9X<34}bex%Ve{UrUK4Y zgd43fMq|XmWt=I4=R;{}!)cHn|7oC`xM-#<96=9)AO}JrEIkm#cc0MhTfq5RkeZpCU-9RCitwt@m?9& zSW3BW-20nH4#LD~kIX|-5P%Qf(cz|wc|h9@@tIleBs%tui@xPez76g^uc9Nh$gxRU zBZ=!f>lsA|-;JGQ-E$+UwBbihV!e3xHdDdyxnUzw-;m6{BeQMOuSR(m@1TcKsSd?7}d5b(Y&#)PBBE!iad*h%fJ6&fs0!uhdTg}&Y zl~8*o6u>jp?(iUm5?(L{|K$-MD5)O|>O84+)dxf|a5~&1je{=Y&|^FSgMIW@BYSO%IzJl00lZBAA(BZ$ZtYqj%EC==I$gh=&fD|PdCgA1Jf^S zp27lZ;d5$2f8NC@h_3q{Pg6E6^k9o;EMnJ0&~}I|;rc}E4+;fYvnr>XbE#ACVGphU@))Pgjxn_WSHt1 zEFm7;fH3HVo-D}nY_9ok5htE6Oj?HZDrSYMPbg|_UeP5C2~{KBA_71TQfBsI zP#`5Nel35*Zzu8)TW%_q!lS)xgi(-=B0FsgMdkAnQ4w*ZFep(Ry#_%lL$gi=i(Zfa zUV>~iQQ}gsZvN|eh{ctbFi7rE+6YQ*OcK=4NZg#k8Qded5^(ucA_O7s@ASxp@I%qc z=OK+LHcC=ccm$gUkt1nhEVHHUxFadXf@ju}XQ;_5&C(FrBvg7u{*H3~qR!@|!?&s` zC%vdJB+U!mWGgcWg#ybbH_l+HCW{7Y2Nz}_!)+(Y=P%eH7^-UH2=ZjKOENaoWwNYb zN)9IQ|AR1KqVW1c4c`();Kvz@jwEX&EZv9v*3Z{U&^Tft7OrB>P}6_>5M;IkVFqGs zwut^xs)J-t;%vn-t1U|^NT=MyJvy#d&IYs!a~==yxOQ;t*e)&`&dkhYB%T2i=7AG( zt`z;H-M;6tXpU{xlgxC2sniJ=gV8lkQ#UWqMT~A42eAcPb8B>xg2LlE=1?EZ;!x@{ zU%+r1{e@mUWH4T`;leN?bkEElDrSl@Y-lkZpR=$o0_|`n6x+#G+6fIKi*!(pnNr0} zGVCxq!4l@d5}pM?Fw;a4q8vxWkB$eld=h-}>pt{^H%0{74Aew^4PDAIBUckP$4EL( z|57d7B`b`hy@V7lWwSTmruP~XC8r3m4B|F+g{nYp-14ed+z~dA=arj@oElI6#t9k?u0(bj5qwqccc_Ni~|}AG(V4m zQ8`bLln*VHB4}hJIC@P{F$F>0La_qp2p?2SJ28wv6QB&xH{TpeJ86k|?J zbAwHL%T3j+(Mkw1wFE!9s8`8A8KMD3%M%8b=Aj!hqgXmgPCKT^&L_)2Qxb2{S!%K?KXB&sXj;eB zCu-1J#8pL*w0)pxVb?;5FoIE6L{l}^^E$Q1K0-F|!<_!dSPO(y`X-C=F|gdxpZo=% z4%0gGl{)R_zx*|)bPwDDmfjFH)Use1EMZEzaDobF=FH6SENmvUYIs&?Uh@e1?u8dK zR%QLI1a%}^52aB9iAv!D0K+zGwo_OFvw28#IGf8PEUY-EkB?xF-Ws!Mrqck2)*uG;EZ-~B8VkL4*+IF^e(&=}lB^2%Tc}O)#5LeCoPP$%qeMREGl-40w!y_dm zyY9k9r?(94HPN27B{Z~E0}p81!m=nNbkX-GRtr1CQX@T>gwxk;g<^uf19kwdfWT3M z7)KrT3QF#`Y^W@sJW&7xE6Ql)a0SFWu0}o<*X{c1Nf20B+a^vA|ES7()bFe|f@AeC zQG*NLv?@qt!1_dAS)zp1q6R^D1%*RDw^)o<_JqxKrxvs!d1qAVmSTpd#MpOa8Z>$0 zDo(rchE?Wlc9xF6q7r7NYvMiYZ&>|n3UNVg)q=r)+VP68583MRxv4f4^t?M*N5e|h(j!H zb^|187N9J20{s#sfLIszL^dATn8j|wCMYWXm zZ}mi?Wg=O?B94{)$(5nU+hTc#&5>4Wc~1zJib44&{IhZV|6-1{(O&+>79CQ>*uwp&Ig{Tjj15|$Pox$-l~pZ@WYl$=^9n0#S<6t?qP_wjGZ2u&v@ zcux*<&92-GaKvsAPJJW!jJBTRbdI05F7z4B__=Tw86`F%CF~Vi-NJ`Ec%i@Ir-OR@ z%9yTb(M|?JqP3Zu(I`hnb4sj4qrH%v6bnQ~Z=^MYRHw5lJ#%wd*>W{Eg7Xwjd8s>QI_d4?9Jpbqgd^TW zVSnd4Y`w<`Eazz*A0S(Q)wcd>yRG4rdDA-mNu(2ulAn=D(W zlo&Hjh&s=jk39QSc9{7}@p~rHY8H**SbL1|`VYOEfCKiXjww4swMw>`d9yis2-{3C z^r24XorU|96`OzWn~@)SNyp6H({1# zefV6LS#`?;d~*BNG;Fof2^XtZZ$e!oFP^8n+d`fCT$uwj4~8Parw8lA#Q1_?eF*_#mCR#zCFUMIE}Oi#ppB5`i-2%21dJ zcUSnjH`+tiCpE*_Wi2%oV8HuyZCxo6|JtB?-O#uK*k@bVn~Q@9`%Sf+&68c8DIL}6 zJAU_=sjD&~Hq+Br3aqi6mBG5KExOz$p5js6mqY?;w~TS1CYBB!hx?34L;N%Mb)hRy=$=)Gu+D zGBYMe`Y;tdfNE`pUSqGayEkrBq&@!QUz+8EB1&5eegwX`m~?dAvPRaD?1lQt8MiP7 zgTU5UchnVFx|D$Fo94(q$Tec6+27T*KPgVPI%3;!!lg#v<ug&@;Mzsrsk!t`UrY`v_5UTQpKr`ybj!&($61`&^$95F zTziGSU#r*13;5_+5+MHlt3a@Vz=H?{5==O-!aFi#-c@);<{3eXoeE+sSfNuyg91H5 ztT>WPr-2klX5`4SrOTHoU%r$0vZl?OICJXU$+M@=pFo2O9hy^NL8A$Y@~kDOU^xp4 znLYthtK~tCSCz7?Wua^y;%z9n?%>l54=+@` znPBIOeqk?;O!r_)h$1OEB)FM!T9LCk`W8*Rxbfr2ljlXs5V?i11O?R^SSygf3S6NV zcDp*SQbA&CmJ?*5Ys>OpldE6P-Y$3Kad)eR>s{|??hiB5REWEFcef{{R#0sSNfca6 za!EGXN}EX(Q)?n+2AD%BL3W%%s@;Vecd5-6*E^s&^jc)0c@&y{S78KLNi@B4+gUD_ zbfROywP)jvIOeF;|8j;MRv%lnc=b<^1z8Y|KG!|h9ewW!^;CK;ffZg$U}>l0l~^h@ zSaC@iWnhzffw|g!FEu97KfQs3*g#))I2S|vanu@Piz!ynYYpZJ5s0{TR#J@@nkFcR zJoYHyc6lL~S!g2~wB%&Z+}0VN5^dJWOge4JTS>9K6={~BhAJvhJpO0caa^rt0a>;5 z!;e2i&POIuOj+3ydFIuG>!`f;IxBpKcJ$+;zpmMmMV&qNk4mMnX_BFZW%SIU7V0

    Z;h=oXi z4uysp?V4btUF z?;Oxo6!!)(xFI6dij<@lh7%WXVJ|9B9EPz=`=dXLa6%r5M2`n?A3}|#53v|Gc1;*^ z!-67J7)>fpb-jS{(0GrLdEngBkIU1W9t?dMqKGXSEyYdcY~#}oS=>lX%eHJ;ET_S8 z_^(b8GvIPs!sE^~2I2rM#lRL%x|fO>f*)D7BmiM=t5ZnUWH?LfWBmvb!{~5%)_Qls zA8-LxNjUiqCU!ZaDiI9|TUay6G?_D3hH>a=S=0Y|+$x+|$Te?BXlUv+4mo~BPKVL0 z%0dnCVB&JB1(rBaVj1u&(%FMNwpI?ugK9Q}0J0>8(_)18F9*$owy)u?2dqq{V5+Vq z_(2~E+A#w|f#%|b^-aaVcOoTbeA~pACHht=x|sucn>aTjJok?_A|z_YHh=h#3nHX# z10?P@bw`R>O2j^sxH90&**q30ZSwv^nM&0K5P>j(lyWH;xQzn}vSb-_p!1;*uBwHi zANau^*qCD4tbya0Dz`J3SD3N5LZP#|vu|!s{VTNBg<9G=YQyJ3JOm{%I&5=QHQ4fF z8YM@k_OlJQOECs*t>jtYIG%%7NBA!}8F&9HaPapuXiicJQkpeUjM_hT)-Y~mm;K_Y zu{ziMLSHTNlfmnlJzJtH`FuZDv~QU1T$&R@0w`!|HV2lqy>5|8`!YUSuohZwd~I$t zhA(qlh~5YwQ)IaLz#vm+D}Fmz+73KM%)|r*qD|G7dCZ{em7y{?J8L_jr<+0UuCNMX zYcPAek-59u*=f&)QDk)N%(|t%Z+|b^_*8Ll#Jaubv2=P226OVhg{XVW0R!e)Ni8N? zVFCvOa%-eVS79i;OIgIg3c?%lp7l_=Be%M-yP^HVBFI<6r98^K?t3^~G{SnL2^FlB zW5iQRz2nq<$`Zu0yg>x8gLZpEz2^T&$A*xO+BzVW$6M)S0US9oQ%*g+x`qRwHw%O# zm#HfTcPjfZn4FHU+oUK@v!{H~w;F#7%I92T1G}u#W@08OxzQJfN5{Neq!u=jjZpzk zGSGJ6sC*~*xyJ2zSLFFYbC7qf6VLsb!uGt_1Q&ZLgJK}ipOpN5iZ-H~B}c4n(Su>WqWcSUA2G@0)+*pPYXcXJfsv(Defa({ooc@ zHmCW|5B35yuLY!Zb2@@2gthxDH26Lih?R_-{tA7<-{kHv+}DS_e7kx)wd2^YDb0op zVr62TK9{V@d%UgPCBaA9XM6uLXpwfVN-~T`pup^4wql_w!1v8&=14MZKI)n% zj01go;1S@{UXSIT-TjRFSUkhV+U-TeqjQzE<&H)-J^k{me?rhv?}m1d5ikthQ>4h< z6MRjUn=`H&@tgC(8?*n{nIco35A%&gbnF<688a#+Fp~8g+2a@T;FrvhX6f0JE_e`L`cmMub{D`ol$B&%^J_Cue z<3~Xj1l4*eNKm2lR`ZQr% zgI7J{m12g79-dV79<#!LE*W|A8 zF!ijE1xgk@J|RZ)*==ecKhtLW_w(gftf|@{)6Q`zLxl}nLN0MN7IS$CHB><{CAd&n zV-=ScLHQNb6LA$7XI5@5rN?219)1X7h}KnxT~qY+#aL#|ImK9aAxd;vVQzNnerheKEm6qjl&Css;H&b^OK-grp*QVa_%d6oi+o{MUam3XI_5jK znn!GGuX$Huo+n`ksj&?IXDTF83}UPOj)c@$|Y~5YF!A{%+ixut@|a%U0Jx6syCbbZO?xH4S0G{ z4>s%2bU|jcz9sUC_|lDa*rJ}wMlE%7m)RK_*V75@kxV~>ZBe2U-ZZ3z)sgL5k7o-y z;M!B|6=kN_P81Y^a#z@;P$knQ7FI5C8hG%+58oH%y53|j+6F~d-a~qh=k)VJ0=TVo zmnVKPwG!_|UwnlgdLK^=W^fp2|)s=`(&hgWaB$h8k8S6bA z@!Z7>7?ScmNhD?onznxCAFOR7`rRdO>=N5TzT`ht4RDA=EMfsec)l2- zuwNCVOXUoL5e906Y0bIdL$H^+BsHaansJ(q?&Tv%6>f>z3J%$ZB#1BqZAfS-hZ@Tl zG>^ehFsoyqa?a8^lo@DcG_;G%h(a@oNaa*v5*ufVh{#0p?1*ISm}nA+7w~*hgh*W8 z6shQ!C|*)&Cp2Ey9Fr_3o+x6Cj7S>MR}&MOt(4-c#R%aT$LVQsaK)iY&gTF3zqzml zZa%b2sU*abT(S>m{7V&4i~>JJDs!1=QeLEv7AwQarD#d4q(Lfp$${)elbggM*b4Yf z#5j$UA1q}SAvuwEc+O&B`PdVqa+DFyu4A9%6WZwGm9<3)c2trdUVb^UWKLv5KmFkTS$z7=erwv-uFQmh*_6+~%;1meT)xQSUC?si7M| zW0Z^9F?H}Xp$oal!41;qD&up~E#rs7uEeV@#?cW#QBt9G1@ovq5$04ci`moN^O19f z<{WQ2P4?i_s~`E*N70Jf)Z)ruj?`l1NDGq7arJ{>SPb0uD|*2m9L^dswhTF0iwb z+o_=X@?=ECOLUQ1RExB5OAC5#A*%vaEK6F_SbM1AblX$MFfGhAZnTol(v!5>$yQRg-#Y_21Dn%CE6I}Qs z%4!21G7~-NXlIs3oW zb6_F`emr7!<40Gb)gTs7A~E4#G8K2^mp#$PXA(hJpyFT~q8h+6GC}t`>bGe8wi`&H zfbqk4QsNK&;17WoC6wZL4XA_wG;t5OCKLZCfo+0)nl?cU1VJ~}eQdQ>AS5Y~@i!!> zJ^rOwBSSWE1A%A9MYB|U!{j81mruc_AZHogy@SsBXR~tahLW@ zWVB^3f)SvGgCDQ5B*iIv27pRySOwh%q-~A%rzM<}-)v z(|fWAd@ON)`vxV&G%^N7NL+Ggzi5xdV>rkE*o?z9O8Qt4R%kg>bP^~xU=vq!?k6qN z=S5%Wc@*do7!oGlBNH(fEuxrZFm@=EnBtI-_aZkRCW^vE zLb*05$aEH|$cHyrGZXecwm~Ti2!km92uxggmO*hl3%79n=a#K0KnR&FQD~WR5hIx4 z98+Bm}?YKYD7Hv`%%Q3z+Nbyg7ZUVOoQw<4Iq*q^&eNh0BC zr1&bg;Gnn=3$YMo$?*UVs(irz)sJe#O2k!}zgbJ)xpunFlwQ`$)*yKO_-QgeLH+A2g^4!GbA`Vb2k zunXmYs1;C{#PL-B8J)=Umsd56Fo_T3eDjn_$6?F*kIFRCE=fsox?M3Wp2eGODzLf-_MXC`l0;!l|AzaMe|1tIDo{ z^QL+Fs(EozsC8C?X%Wi*C>e)Qt1>E-AO?jMU~bmPD!&@6{a}ExPz$lp3Vfib1i%$B zw|lX9krT^Z;B%9B$avrKY`(@1!Q>_F3bXiV zk>2>OgfT$~vtIT}5w+Pw5rjohh^xI?0fDJy{~8_5S+E93g!;g=4}h3=podp7HZ91T zU?g+%_7UefWnnQcUfMF10+q6Jj)D}PU12vp_Y*m{REX+i3aFhi3%3wgvrtqRCIM1B zYkqZcLE^!Ti_$C$Q=RFwRRX#_usNL|(VT!#0S3zt$H%mU8UY00pd$ONllU2zYipEg zI2poJ=(!wei>0RjB!`viEnXWDd=#c7N*7NhHfGV1LJ;#yGl5>!5J9<*^_9M zQFgHyW(7rHS4Mx^XM4I3++;P;c)J+{65>Dr5g@Ee%e2TBxej^zFFUw0wk(4VN zp`QgIkdXsQ7-Wj9p{M1w5CfAq!RWukTM^Ft0Oeq?28eL_AhMFn5*Tm`W2?P>>#PnL zpBU?dLD51nm%S*FGynD#2!{*bSrN$L8zcI@vn!T8;RVl7D!;_F&Beb#>@yK;xxU*S z`jvLMGOy16Bak|~vxgBQI5xAX7fsPJPEg~fMN7oeB)K80w2P~B1i=R%%oYwxW0*U_ zvgv0Q@im2HUE#Y!39`0}8xAsDuKPDKrul29;w@>RfKb&Lsw>2UY$irqV!n&Sb@D|f zu~l_TW_F=pIJ;F-%$xp-EL$aiJe56^e0dhd2M^%IUu+O#+*B422o`V$MCcEz3|>^r zz2Gyca|3DhyBjF@w&hwI#uS5D8=7W|vY_g83i1}I@y9gNdW4M3tis8}C>NX5lZ3%Q zb`r&T0xWY8$>wom?g33#TVBz#bppG|t(wWf^~tqhu-Pctwc{gWe6C!Pf` zPeZs-DpS_Tf zCkedS6oIt1jj$?Rgw9Lfg21@>w-x!l+Z4M#6X?@PrM?*wwTrE5)M^}JQr+tR=f0hC zwum)=o`Jt;NfnA;lNgQ2}0E;VDm}1a0uQnpIyGJC zw-5m@>-v4MPB~BnJ?Ld|C24zqe0c9~h#aQU(7gc@;wsRCQ4|X@uDw>+ht2I9 zPefL2R(2vuX@|F7xP=FijP)5Fx=FW9OmqA?&hh#}j%rQs;00vB4(U52=->_EfT%zL z2n?$aNvQEff}!_ZrNKeI1Kw0jy@37@2oc{p)!uj6e$36zw!@A^k@Otz^}@sH z2a%(kw_aGN?l7+qTV-E6`{}vDX@<++7wM!x^DE4@8-WlADCiTn@Cpc95vF@SF~5il zA?Vuff6%>aT+0yTn2QL~=$e`J7mkAwUyD*E&^Y|{nXgRCTUNRMQmd)eI2s7f7**=d zW0Vwy;?@_P@&19Z?t!FF23}wcg5U>yAQGA?XH)?QsoXM$?D|I%&_yNGanZtaY%*oh zk?UeuwrKEdyP!!v7i^<4b`Pe4toheJM7JwNHqyK0rV-S9V^Ex&HQDK1$8G9m`U;_p z9Gz-<1^1Td`g@#xceX1rY!KRUlYUpumC&6)F@YsNBL@4i`?O z2vJZ$LA7dGs6}w&!-VB5_!GpDV8)FdD|) z-rV_f=vE0weWx}k+3I@X?j>Wr!gR%C#m?PMcI#Qae7*bqI}yC}#O?>iuSmb*GqY+- zVs02Af=>V&iX{+mHWLaE1QCqDwWN#{aSLYBnc_f2{RBQ3Q5D% zGHNKHjavFJA`+u>@kJP8lyOEHmpkmm_FNpREAguTt1B$E-V+MH#tO1aJG91AYP5v} zLrTbm5V9}IDJQ}YF7HI5Y$tIHgeZ_$1i6I|q4+Qp0|E%x#R3{xQ>dmErOK^04Lt*~ zpcRACG9i>u1I?+^aytpA3@NPfP(%||bWuiabWlbop(C;aAnS{CsdvoVt4ODqq_WDP zpfqT{8%KSVBE}}ta-#~mD|-70R^S ziarui)`@7lqsPx zA{gh_B;F`(ai|}2jPg`ZG zH6d~MP#8L06`HoWX;aiVB90zo)hL2#LlI%2;1m?N-G+OKWU|XP`|OZYKAC5emm4>* zwu!Q9J-#ZH+2)mJ5^ZmtbLP2%OuPH`=Zo$Ic<7gkuISZ-1WuMYI8DQtX+rrsQ76bM z?z%;XeEk)*p>BE_?1#kG9BtH7SABIyp;J5VC26r%5eQ+b`*Vy7hftxF}0%1}_!jN)2>|wU~$#0&u zv7SINg<~XR8O_L)7UpnmO?=9vY=|@d!OcPRiO&tmrIqy*1vEnR(wl-~0Y8vwA#9q+ zW-`LY&`l0E6MT^v4K^VXY3XtQ=Bvo_6vnZQO^uRgB;x0@i=)i&h6d^#}x|c zUBO%BLgvO4n{Y&Ee%weP5%-z$`KKhti5_rD2pvZD5-J;X2q7t!!HLAgA($hd=RWyN zXhw5=E_9)Bz!FE~*i9;SjA0J<*F*c@?_`acf^4;f0mJ&5oRz zGv_U6Ml1_pV4cXU1qS>;l@wWqGLM;2)1;Rieo7KG&YTE8^nr_pV5W2x++;$1`ct4X zN-zs8*;zRCqQOZNZ*8mp=qlMJkc>*DoXB(9^^nJhG-Xw*%$v&f0^}1JJ<@AN@>x|p z=Ns=KbC7?G);xpyR=B>EsGW*bHSY%1JVqy?rvw!$smaQ}2?Q;9g`l){Le;I+A_87Y zAaMM(&ZuniiWHR4*8o>ms_64`&s!u-_!GiE{xYtkB`q}PN>{_!6}1St-;DOk*QbUu zusgIXTB@=tLAc688Ch&&e-(jR`4%diMVvk7S~! zMdSeui7#h}ycGIE?Y=aPj);F{;jJ-5ry)GCjJ^EjvQhX+*d-FgN&OU`C8hp1aMnTGA6RCtA^Hdvb&0+&h`j(7VlH zu%q48<|fhXN=0gPk%L$Z#PSK$zJ2scqKuq-0ysv*)NDXMi^)aDTGj(Dv)-($U>8*< zsVwws8Z$lr5%X{w#7&uVs4*hmVIO6(#pUIn`&{MvelDgVBHyg7eQlqPm%_CsNRMMw zX-NC7O5Cj3uNQKqASIjF7cnAxYeeN`%gLF@tr0)b5eS1ZlbP7&cfS$U>87-j+iX^M zjrfvj|6%hzdowqcCQ_DkM@J(f*@e4j6z?SShG6(jRBimB4{rp~#mwEZzfFE}92Gp^ z+8#E!KL%cIavOLy#xBC;1#$W{GIJFd?y7$YXrtisBweo2p8Ua&e#CM=TPiuqo&I#? z&{%(DCi1|o#bAchuE2z0dCnV75WA=u;E~OoEfIxrJaNs5m8CMHrhUPj5*_Mur~8t| zUNRp4D%|SUGAhk+=(l#oE9Q^7lGs&F_FC4NMr{YWo=hCwMV(bz8YyVqEr0o=lv;oN z(|nE4Q(nGxGw=z2pF)D|@EM^*5XPQfw9~#6N2J?HbuQ8e z?`D1rePCXON7U0uTC2~q&neRQiUbq-ZZDift_OMDXJ7k?`d{yu=$PMr*Rw>~Q&xMn;+~=H=U@Mga;nRpg6rov2)h8i)XKT@gP*(uyDPa1 z_JhAHI-+;`IqYjM2@xZ*nYanuKMlmZ&bt{m^FFH3v7>U1_|OW#D88IY!Jwe1w!n)2 z!W+SIQxr*pz>AB&e%c(+p|=g(K^`2L4r~|scn&mcj(MRi9O9iDTS4@TkHX8NyIT%P zdH}_9zKMXo(c!or%t9@^mc3iKLy?z+XE+&Kn*WjKOLV5es;LLutDcyTZ21LrSbf zPKvO@V+tKpGwr$`nj1qT%q~6@FWE`NJ**0}fNBp#1<3?yD`IZGsH~GD8hO|iL*oKL70B<27jokh>^Wnj7DkHE@QMsz^cXnqu4b^ zL%pkrC<6RI6;wjw8!Vcs#$!wr&FKey2nRm-vkZ(8ZJb7Vv^=S+F;MKnYM~=u6u_=X zz;WxFQ7kvxQbe!Yy=QxlIjjJGz=elMHDr_#H=0L@oXE_p$9$x%eRM(HIYOIzvUFL$ zQ3=T}I~_Q2wrnvaicCqB1VXckux>Jph2tKk+C;b2O-gv@9K5NuT^lan!?{ z{kat8((UiwiWJwqqLUAKK zg1{rSn3Q;&C@8@>y7MZ?2u&IJib)$y-HgVeRE%=TO>pr_CP_-W0K2*gOs#85ApyzV zG>SiLPUxJj-rT(Acupl;I2)5pHCw>E3YV-S56p_rq2NmKEYG+qP17t$w{gtT^BcU5R46}p_N3RIaGaRa`7@qsoPxw4g1YOWFLQlKHmf$ND z)C5b%>^-PD3r-{(L;DW|(TH7`3~}f=7>mi6WKa`*rs=FmaD>pjAe)lbVz3eoH?m?3 z1+^AISP($)gDj$roB|OkY(DCWN!&b9BCR{jbdFpk3fQ!gTs*k>JdEK<3$#2Njo<@7 z0Hi5Qh~S8?BJENy4a!bpO>6u>7ZuP7NYcy%OSpIj|GSomD4I6~(l_J-<@r)Oz0;lX zqzP40!6Q(>0l$Jk)4xCuPH9tW5ebpPi1%|EF8vieeN;PjP+Rnt9fOZ@Ou~v7(vZv! zv=~7{Wt2Ar7=f}J)w2LKDTsgo03rDV1quKa04%`+HUj|w0RR991OO`p1OOrV1O*BJ z6#y*312zKz009612m}Bt1Oxyf`2+HC{wCj$?~K}j|@Ln&}gunpqqlq zRj75drO%&0g9;r=w5ZXeNRujE%CxD`ml$WlJQ#AMz>$1RI^D{(E5esDV`8LP!KQ_q zItS*gRm-c}w{YXiolCc_-Me@VYNQD^EWwflt!B+jxUj;h4$00eXEtZrf))hkG)%d& z<;$2eYu@~pFI1@r2@nuB@RruWoKtf}+>mBDo@XB)v?-9b>f5+;>)y?~_tUXt{SM4J zT3|@y#eFLW&iWxo#S1gy%-Osk+kw}YYv0bjyZ7&IWq$vjiEyO2;vuVA1<$!}--vsK zC;W$YqkQab)yhr^oxT7600t=FQl14TpH;VvmLP&!4TzU}&{Y@RNBw;jTR{~{CR;g? z@i(4>AciR7h$PzA5n>gZCeVTel*UJi;X!zqgtOW7ScV&MXd7?{bx06TPq_!S}JV z?6&LfcjA3^-?Aa8DA2N$jTc{P&yj181%T!`5N(WtX=H=|3-olL5Sthedp?pJs+cMJ)IV>}4Z25IHm^90Ls7mYVB_z;j zr>*u~#4cTdy~>I-T+(*A&9sDlrWKi>aV7r=A!2*4jL?@|SN&rx&?%gwpVh()?nTd# z@r>D`u5J0{m> zM+H{zO?L>L?y=3z(bo!zdr;)XA7w86yfUx<{`xmd<9VldT*$>P{;YB?s$O8MHzn{1Y-Og)P|RxKLt9bJjGy6@sNm2WJ(5T9Ffq?Lqj{;LSDm>1U9WKSS(`WAT}&BmZemtV%+R{ zcqtp@1Y6KSpOvJjljevpgqvKT*c3_2TH4Yg$ifQ9Ac-lRZEcbk{8OuhW5u0h5-9%r z+bVm)7>`(BDzE!TIm7{o2s|JH5dc!+_NWn6-VTdZJf=akiL=&}^OopLr_U0U9Hva= zouD!t8@*)2Z>0`vn7m?9{Ad3#p>512DBDE@#F0x35P%OD$cjM4K^?_-^CR-}RvAuHt3W(=v$j(Rji1&S9uJBSlHVhJ`EG#mF4qROA>?4?4oU`OaAj$QacBzzDJ z1~BjdKkPyr;^0VjEWiiaY;$2+n`LcY$&ofXE|v(PAEM4#w=;h9s#x`pnR>}3(`5;9 zI-zGuvllk(h;t<{g`qUj@{jWzl&9B>01{ubD(SGLnh#)TE5q_of%LVBQ!VB=&+0qk zW%aO#O|1E*)=SdOGlYVmPg}5sIG*sNS~Mvuej3#;6bUDSsERDIGJ?9jN(NOtsbEPX zP*gD;^->XC9|+%fQG)+ev?$N$#0fK#+ubVmx4wO1%o)l7VYOW*ol zFQg#_tTc`I7|Sk~xyDSDS>AhEWKtKOJ86qe_;6E{o~kTkb8j{W%E3GCC$qDq6;cUn zRxWYWA{oU@i0w<_5|=S7toaaI*jN@-WW2&PN(*2h0vJHm zp3sY*2u7e!BKU8Cz7k*%^KYp&$xGd87l~(PEk5)-Mlk< zp|9Db-je?X>rE6@df^xd;Dhai`#f#6FoHDq1xAFrHnRN-43I=ilFbwPhr4}IjE^^5S zYr-|VNUY`*cSn{sb~C+VV-%JA40yVm{`A_hJBrluzTNo)9SZY@KNNi-LUDROGp6Xh z*(h*j2V|b^R5Uof1yIGoAwhi|y?&^b;-*q*pC9aeO3FU0U5)zgdp~1B zJX`cCtZ2 zf;fvAXH_-CIr&vP$Hijw0X1FbK1HO3FVTXJ1%JF#e_yeR)sb@FLI6>878n2sO(!l* z78yHONoTl3PQ+~tC{%eUHZL)7s?uDhqDs(3i|9yew)i~|gkQQyO5SKsI-+F8WP**Q zS-i($3~`P#GD{*OYSBnpf*?0H7+E|bC~gQ*Kc+XPICLsC6G~QyGQmUQ)m-_3KKz)D z8d+uQ=oB0$ej!w3mt~N2q%h*c7Q$$6EOm--M-i2{jDENaFQO4LITjJ=5k3E5fUN^$ z+IT5F*DpjUevT1JwbB!3=7aD-RU5gKvzTQfvn{FuR<1>RX~Aw@B0n#IW6Wig3n78S zHCc+|FtT_vfyR`6iC8A2 zITCUo&VxxeA(k_tXp;tR_6TYB6hiZMV@c>T!=e`oLsX=JFmVDF)Cf_6;D&1PV;6}< zeq=n+mP{RCGYX?40CP(9k&#c>k$*XxS2bTm(U;#6LRU!}#h6l7d5YY`jMSxQEh!OI zvy4cDL^TPK&j*y-_-FW14nXE3f&v?Vq>o_Y8pg#VD)}u7#CU<=4gUWihjC{>w0WNN z6_|plls?j5*OGQ=M|CC$6&G}ejhPV6Wlmt(ABX6sd*+Xex+;4e3|G#4TA0qkLJJ7y=|U`W&_3X^`fV%UK%%r#R?ys%`>74fUHC z$t^u$thD;AU1X~<(h!$*f^TF`-6Vv5HH@4$ehA7pipek&u`jFWIAx)xi{WFF#!X6u zFBR~0Uqh{Jktq^MTI46MiUL1uffL}`u(pJ#0zs%jf~#YNYpuefX4ouVLTrLGl&DgX zsp73)d8QaL85UrGY3O*s(U|~a0ex@|i{f}bVl&slpy2=GS_7k}fl>=)6B&!@ut4iY z5L*Ei=aE)bt{q31<(IMNQ)7=9L44DEC|RaCTM@@uA+BSV(&Vf)$9V-83;NIx1d&kU z@{3FIg8RZP8mcPjAXRj9sa7klK|8k=7EQ zG8(lJpX994@c@e%e4vFyf$$AtI}n_>J3vxbY2_w}nRvDVf)lC_?%)Mg2)8*Vw{)Aj zzoMQ6M6P*DBVooj@HRDoyJ96%GNwee+t)n67bYCBEL&S@n9^_K<7P3dSD!{cW+GBo zL!N8|wy_YNfv^^nsk+wdMMVo2V3e!NIE)uGuzUaVSnt}JHaDW!38==YFBP&a!Z#-f zg8@R47Gxp_TzeI*Rdvs!uD^7d!ip{E5WTwdj`}GStb4r%JRo573>SB}m*y`gcYV0X zZeayyMT9sNWLb?SZ4QaPNy#cS!Wf>0lSj5Ms##p^L?1Q4 z5<#U0e8cQ9v}UTq4?}pbdp8tPXMz`hSsF@=#*!SIl)W1+{{RS_8oVCHze{D7(#lZQ zS%2n<6S?zgYT=cZST4YW!(t2`^y*T>MZ>&{w~n|nj|je8Vk!(XKA5{`)vHoLL`aOG zwY|8QBWYXCiX}V=q#cZ=_3?I4w8Gxgf(jwjc9 z+v}Tm>_5n*dy-+1j4~PBVvLlSVi^K3PONF0*0c(SQjRAZBeAv7(;>fzOgVU~zG{Yojd?-m$6pX?77m@r z`71N6)SR4luv4)g974r((jwSwv`hbYNi*SA2*p!J2GKmt8xT7&T;+mT<3~mqT_Q`# zEJbq#a<<-UGVofVl9qV)nRV;t9yxIyoN1DGQlm!*r&-!Jo)j7n@R>lxe`h7JJ)PE- zVO8rQw*<#ZcnqsZ{e_MSN2UfDTD)Sio5?M~(J~??RQwNqpw&3itkUQw*n|OoFw^d2 z*W>g64N?yL*MkOO!)krmhLINptljTse(Y+t)IBA3XozDN-@mv%`FkSKDxiP1O_ui(MMGz}pSvj?KIvS;2p! z0fxtY-w(ppyQRou0*XhQAFHj=9C~u7>1|O2;CV-sboE2JIzmW;5nYE(d*fZ-^(ysC z07whgxmY2!0NH*(cDImFqmfkjUE=wT#vB3E4gq-oy;O0_q3*U2K7z@O(LROjMm7{b z!E6;tEeXNZ_d?<^5`HuFg|$%M*y5vYrZOh}o;6=>(}&}thx@7V zGS!eFWz7{#X3={oQQ$iSC;!lO(Ohh3C=M9F*aQOK_d!&~g4krY!BC#(+Pl;bp}<&f zncv1cvr>4&bZAVY%>Vxa8NoEQCC%DUsN>0k0cT-IK&BJ7u-lE!;G4_gkgXMPh%6@I z-g-Xjob^Bo0n}&o5`kmt3e=$U>NkGj<=#1t*g@vUR*4t;nSAgOlKx-e7LC;->ie{G z3_4D=upXaoEJsc1%uW~TnIS_nEk6g!wq@%NV>cPj#b#yg$fRxtVgYW>5#kVszFur> zI;sJFVnPz%#==G=VG?lO?DFmwy`>Snl4f6CNv!Vb@qVn(a-TDcjG7L~29gs95{JO( z5P%S5&baK5wC?iJtSaIUf^Y}m@Cu=x>GQtvWsWlyXYXXNRR$j24o9YitL3MQGf>o% zMJ}KyEhf9}`KmeFN7w`m*J>mzW;SaH30egV)BtP_U zKkPfq-*J{_OAko6C-vFG;v8BR>@nnF+_fqWw2Q>iRdb-~et zV^BMP9_0T;9D~QO-I6~t0oO-K+rhE{GlLz|;l@&Er-W4>g8C^i; zTmfjv762iRz=LwNYE?j(P~k#`4IMs&7*XOxiWMzh#F$azMvfglegqj(txF$rm3WxHAT(O+iqgl6ly}VO#tysNZ#ZR<`WI zueARe&Q3U);m_8Qb7r-y6YS>R8z&pAFxl(D;?12uhaO$}bm|(Pk$I+2X2iw`EiA;H zkaupP)#qwO|CP9Ti|BRoWFKGteERk6-;a1bdqOf~3O9UgA?6k{>^s^9#Lp!761)g3 z1|NhlLJ23N@T8zR8O$lg{^_oQ@DQp^p~EU9u|yM3L@`AbS9Itv0u3uKp#WR@u!0UN z2oS;=S>&-tAAbZgNDFuL@IMzF3K1cndiv|b9K~CYKC*x;o_VJNnbh=BHxXaev@GS2`e`Ti(1h;R7QLJerx=l|Ra}7wCb(dw z7S*pNa+3tn)RvwdPdCkuGSfM7U1}((c@^Rwe%myguou)Q*PF_^aw(nn=v;KChd+KQt4Ein$FBzybQ={OG-{r9 zP2BJbGn*~(j$v}!A9t7+9l(vmdh&x{1X;AV@#%|Qu9}$E6bGQ4bS3{R!h>6*t~WOj zoiBU%YKZyP2bZx)(1kCYPy@rJvI~mGEkjw>(Ps0kKsBUY$V5ghat-No&_y`^dk_-`IkXZNE&|i5kyE~$r_&(mX;ha zh2hI1cq)(~Ja*EPa@3=_W`d!(;eq};j4vm3HxDUik?~StM7+sBHHD-< zs4{0d6}nKm?C4lYbL4T#gA|=KGKH-y$bbfV7p|=diSuISKbwV5p@=jt;arueK7vr~ zB$S~qg{fYSC8-TwG+N&&$RcrLkmDdomwVaKmMHhbmh{Y)-P&kLX6eWv){rCH8C-wJ z>C%{1)v9?R9?HU_oSdu;M;YUjy$;ArtU9qN+tP_jkr+6V0tl8P3Dm8?+0=q|k>DcfDl(dJz7$6``4Q8>tazQ!$nRu zIM|TZxW}CftW=vUZI#CY%#>Q{cJ<9+A?}e{QS^~@~j>s*MTIFpp$eZxBl`pi#E@iXJU5R+Nv~|VEx!kL+_nAyE zW9!dC^n)SPrZ>J9#&Ak4B3LVi=P|l5P|XtK&&{}&bdEJ>gAim~rcwvl-b8N8ip_yH2w#l|3?5K3BTShK6%S)}>)qB91p=T57+NY*IxR zud-2MRr*xw-9)$8v+tv^rDYx8a1sl(4}l&6lZBKGDVe6~i`uSPlvU=ABx6gOl%m}_+VA%A zw2L>XqRH;h%as&OYIQgdVGL=JlTqTn66ya~Le9l0J$9=PUJ_I_7~T)JY6Vv^8-r`P|mXcO`_qZ;ae)CGPX?-M!)V$_9e$PRX9nMpX^RovIqFPj&hY+8=pOYu#V)SR*8_MJ&ZZ~^IVx?06?|9Z* zTI$%T0BQjoYXquvq~&Sj0NV(<9k%~c;HA47YS!6iRJR0|J56%7P~J;sx8C_nY`zXR zl{;UW*W12LX)5_%kks08NvSOgQ9O-H%uH?3+h&RqJ&do74c=CDet!I42fIC$t44Wl zV4!ouB#kpAE6ie+@Z+AeftsxVB9c+Le|s}0`6JayAm-bX=##$*ybtBFKc>>OlvxvZ zvpqz5Ce1nwcJVt%s;rrbkzHwv(NHyFBDO~4KkEO0b;7nvXi+Jn;|?qb15#!=_)y#LnUk&dHRZ7 ziI#zLCgGVv$0@mEVR>vS}GQvdc8H3nK102%U)>UK>cq zSuW{g2uM_jUPK5nT*v>5q>iaNmwtP_{g}B$beJYtrB`9bIAaQDG9|7m#Q++r#i~K` zLmgmCx!@tI!$8F62(`H4v0W4hbG(Iw5Qs>`$e~oHl0hv>tH0Pw72aAzOiK<9xyQK* zL74dt_s|Kg$O?pnt(Sz!jA@7v*{7kyiCHuqccPldImdLY00QuXt5M63B+9rni4S=o zOmn%8kR_+0jh@Iy@B$GJDnGl#_DIp zvAK~m2OO3GN<04qJ4b{Nh~=P_qX^2GlR(gvOItKGKVvW<%rYR9q1?N|^dmFGz%Nru zkNlyTXqmyE_`G}(A^v$X_p2L)$)Ksa9xl3Hbjx?;i|8y%>ImJ~1umAgv1nr>rh!{5d%{uHe$4H--R7h~F zL%$4);VdEx=uqacinWM<%H)!#I!zNDLm9$Pg@~{c6)(B#&vpzl-bAD#VwSnGAz4|D zQhY|@@E8Ba3!yO?q13}0@lu4bGY=yq(|${c${Y%DzyJ%73S1P2T8Mz3q_$UUIx1y7 z_zOpx%MLc=PmhF47!{D5cuG*ytS>o=OdU7?5+m|}mM%om6B{Chcm^*30w9=9_F%@k zI#NP4IjX=349LX_#eij7PDItg9)z=tu{el`Fc(9>EtM+>-xKDbJRAOKe-nHzK>Ej8E1 zb58#%>&S`d)prfaL2;K*G{W6L4}Vc4W&PA1sSTXciT&)+4QkK3dlo=I6=ARdS=9%9 zs09KrB<8pUD1F$tDw&L(SeQ%7DCN~obVp$2h@9-aZ_62yG)p8i)87lXAek-;wLzLH zs5yZ_L3jW_@B%XE0RySo2WSO-uz-F*2y!*J+$0*64cdpBqAN9-;sV;(;ZjIdlzh`T zPMJx+Tpn%0jILU}9|^t+%r|^9!hS@HM^FS@h=lGyu|;XmnN>+S`Er3l*vba2-H>;2zq&l;OlcjmWG17_9&C z%3M9GSOj#4g!$Y}(kr(O37C|Q*!d+aOS~1&2(jlX z4E;?Z5B!|L6{dmBNePw05b*<)w9HoMAy_4VxmuJ)bi^KpxE#hlbt44wQXB5?BzG#R)zf=;iIx&BiVmigcr?Aq_>$*x+92F|#W*J|;^xfO`vDJ1zIdWpe8U0Tcg8TPbA7ir~k8$kbA<%3;Ni(T-# zn(`&8mBB%jwv!OJiEQQ^xaaK7-_k9 z!It$le9Aq|Fh#`pi__gp`}GOd3<>D5K9TwA2kz#YN#Z73R-e^5hQLhZY^h~2- z^@+_3FP7>SVA|{o808m&TMr9a83PC2vO3rT+^k7D&e{)1;zrHs}s#@P1ky94co9Y6y?xsrTf z0$5Ff7I&)4qMi3)|Y&=1w=Ed*5`-|F$8zZAA>-`u% zX68TnAzjke|D=jb$;@)_(EN>nU7+w_@=WrP2+ZAZ-fl`MPNh$UK}6D?BkL_!09Qb$ zzez|D4pT|x9w1onIL;hN=G&bypB0>O+~{ka)d{i)A=;76LA&hIH?P}WK z`t>&EA~F2=Ky8B9#%!E?D@C~;KuYjD7H@HAMU&8`Qw5)mo&64f@Q24pi=V*EGlx10 zrX$jYb17|aug|whn{b2Kn`i}#pK96RMaNk7 z2YvX4Z}0}p6yHa-IY>w0b}GA$*p4-Ri_EGMYjMpwK0^JSr=xah5djYUokLOI6h#UP zi81rc;g&7;6qlCuw5Wh{_y$|3*?~aYS-*(q_VtDiFRsyTzz8jM=jW2UZ#xcCXOHiu zC5y;PyC&rWK=@R(d6=|`*ZDZkYH3}qgy}=@RW+h*y@FoBZAnJE!oFYoMB?*suQG~4Q z)H_1!hwU?BNee)@ig3_}ei#T`@WWeJ?b_RT+5s@YD(2W3Lf{2m0Z$5g_Da-U@eL6T z|0_b|wxzwy-*S$C2oNI?r42y11 z;P9wrTpDk3Mn{Sy^1+XvxQSr&4Iz!KB;RD0yO~4(tjztaDaErGOK~ltsX+ViJqvNj zUwFZ#k$Hv@>GCDam@;S5tZDNm&YU`T@-+CalgE?q z$aLyy^rFmif+`3q_mSYg3P`0MR2WrYEeo}vmb0MLLer*xBJhctwP^$k78qjWGf~{X zh!!e<)re84!KM}P?pp0$(v29Bnb0txu^&W&n{>|b}gw_2j<@T@2l5=N+Ss3Bd#29KjLl? zK#Y)2ga>{EfkiH8sqBwl`%>1+)G6!m;>Sxy+11ajhk*FFookV{ZlRqpX9ha{KK}gr z_w(=n8MS{Ux#m(=zR3nvc97i`mw~$R2G(`R-Imcm)I|UY2G=cB3kJ9FK~aM3MUY!Y zU1^ozK@sKU1sQy)7$A%>0@hVy7Vxo4gcKrJUP(iBw3>`T3OOW^MH+eJOS`=UmX1@M zRAD)_Z`cV3AqEZYvK|5d7(DRdV|{@&5g*=3S%H+`A=H&Yjw@xw6;n+<7}1mhefke$#9bFlKeu2I0lJN`_t<)|GPqkrZuS+Y zU)#a?EWX-02$+-F_G=PlnF;(1!TB0|Fv5sxlvOOkQ?utD_w zXi#Q5E4?(+;w>yO#E}9@bZ1XL=~`2K)dn?273QTWf)4V==0M+$92Hm*?BZdUd~jRQ z%MS$i?rzk{>!Kj<7~`JSO)E_4zaN>X^GuOSMp;Rh0a>u$jf)nzA<|dt$ z+vi4cAcQFGn=?*~2~Q_?U|Ip9=Mj~3|GJmPgP=d)=H9&Xo*jTCiSSpbNI zQR32%KIM)Idtxd3fjfi$R88sLo1#C_$-Vjpy&y?1JK94?y!Zz+qx=MU0-?-&1V*0$ zQt&U6t6+aFsFLa-4qETg*VGaeC6p{sCkDKfu<#}zq%f&#Rx%Lj;^dVpm17rtxJ4{t zQL0zG4lzo3Kn~IKF55)~JN&cRUfkoI-eE9<1w`RVR8+zjwd8;P`xH{F*F_Q zVrY<}!9gL743!ia{hwka5baBymwNSI;~h{Ukx zMi{U_pZK5;efR+)r^8FfGIGQeiOwl}5g-^Fr6kul3oNw4V;xswMU-4dB$+&AXtsz- z&OlBks_YeyI4QLM0>;Z@wdzaO)N)6gaBL+C45d^w7M~UJhK~;Vib8H<0Sr_iI#QBS zaR`vAw-`hmtti)Yh}V>eRLLavs^r&J*(eHSZhD>r<|nJzN}U`_oK0M(O0wwAJdKeg z9h4_5p_aW_W+Z14npGT=*p*$<1bfp%$Px7-8#h{vb69Z?L6o8p@FWuhxO-+-A`m9y zIHzUlDU({j60?j^3y-(5q+c*O&&Q!~b0u+4MCvG)TM{NaUtCE$S2{p=>Qqs$v>Zva z$GvRHCRZ_)msbV~DKs9`AV(DD)uP%l>r_X2CK8hk)3;4^(XX10d5o2Gi9M18a4F9K z!p#WU)2hY)XRN{_(Ogt?of7usfe}p2T6@aVybflsEnyr!^XbrGVlHEGEC}A1sF$p$ z6_&Lzj8nqO#GpDVq9Fwvf-K~gRRy$F9Bs=D_d*x7z%a5US)wt>;03=>k*`(TXS810 z8-ULCC|OL5)uVQAk7AS!d)Ty|4*_C*&Djee`Xr8qRLGby zg))VdQV<{jL6=HtY>^9^AOd@nu@&e?Eb!}8{t!9OhSsa}Y;xebDw)B#>sC=Cq|%yA zvbj~R@cFXS+#a7~d~;0EVW+|v3~bjAfQU_29zcM!Y>X)|)O0|X9ILJvqs??VbWK56 z5@4~B$8a`{;uei-$^CfP!YwvSj+g9CAlPS9i|U&KN)SIM5o)wzqNRZvLP6Q-Al)|q z2{I#lIg-$-Jh#{dX%VYcK-sRL)aKmxu#+t<8yl#jbSh0v1it&^wBCw3vcFOCWks7( z*zqJovww?Z$|BHC!pWA+p!Q%WbJ}`)zA-wfd=Tqo?bA^_5xF%b(OOuiR{87RK)F^B zk3`8g-=X(21D@d@Z(2&RzRyi%jwgWYytq11_GuD6+@>O>W)6ywWZboDjTi6I7J)1t zcNy+E?lqn0tD7@AoW38;%Zt+yy&I>Y#~T_ft4B#!lx@7mz5VOs9j}8fXG-M%akp`s z<9XP?BO%UWDtEcpee~=^c#VmvPrHn+Ab$!QB)M(%s-#_=A54j2IZpD#+jz@b+3>^~ zeA={E-jo#@-N<)M+FQQ8-f8qJtvpE{o||5CJO_I9XR_#=`&i&eMvCcdL({N9R5I3$ zu&tbXLX`;c!N_!b$t69RTd zzcJcDtzG6YR$*LN?-h|>6dlfNNWOKQ#gUU?&=miCp1=vqM6pRq)E_}{T03z}yd0F( zt;xR0+~yEXa{Zs46`%p`9QCc#0&)i9G>o%Y&F!?BZsgJ2VaF%AUMQ{q$-5;{?X^at zvEL+p48Tpr)LDd5q#ecVQpK$x=^PYkNnY&4T`c*M&E4OAaR&G0p!2xI8rt5{3>7`$ z;2h>%{Be^SWg(S0MYkl^QtXn>%$`zR+T;+9x9nhvU>^D0-t3VCwng4xT@U08PJZA>Lh(C|XsVWMQg!H0g)Hf z=u8$yn+*ovd6~yZLCH;gg=jq;l2u$dO_)h+(HBmlT6hd;Js8|Q-@=hwZop4J3|f*+ zPb*GFZW$RV9$QQ_&<%>uhBhE@F4WeTe z8&*(D^RdJgicvLsqBiy#N-$$Hj$0d*1E6ULmP}(y{TTYO(Gwoqk71u}h2!AKL<6E? zHwsBeCQh;RUNuhN?R8JJ{D~uK3ffhSip69u0+gFHVS;g^r%+TclGR+@kt9aZWbqF_ zLg7LhW9-ZYpy5t|?BstL4$ZX}2i6okQC^Sobc4Yj; zS1}pnym-qbIfZu^*5ySTZ7E|ushpk|)k|EV>IqyU^3AsqWehD0sQFr4h14bT)^imG z(TEf-paVj(64m?^NkHab1XlI6B01h*z96Dylq7Z4#66+^UR_n7ED6USMrFKl3#h&2 zK#Y#y1k^6FQDKgtf(goEUF7Dm`mPgOjlNU6mGNC&#?LPH2yzo6q& zRscW50+vMOHXangtRqQKCSXWr&yD0MqGD!hB`I-ccA61AU0F+61plC+`eX^oIi6Wg zmBjEP6=j=|Jy8m2Ap0R+8Y}u9@V&RE*IDXQt@V+`UJ+wPsD)34QwIUS-WC z;w6RlOZ54tPkdvS{idD>C(5XWn9POx97KA!m(;-j7f1lflhumm^%nfuU4UVwXF%Xf zXymYcC{JK%dG_eS$ysVrQXNTDdpU_)O3b^ZQ;h0nOWaxHjogIFD7l59P)w=M{N+>t zX-J7lQ_O|q9qDcf)q-KfD>313W5dY{;UFVV7~$bzosYp^=V`{FEabSQPmneR0YHR`Bv)t!DqYH9(b(zD zwNYtY&{#I=CxYj&3fZy#+bdQ}dCW~h5RO-Hpie&6iOMBx=7ntzqguoXmpNcJ1`K&l zOxW~NuR&vO)G9RPg;qF*uHvNBa74H^CPHQZtJB@Yq!R0@*2KMzX!9^Cu|bZNDpaI7 z-i%>{O)@AOy%9Rak&f;JW9g?l5wyY~^rpqcGEM^B^xTSJcBqcT4d+?#c8Z5)oYfa#$%7W)X zgv5LZog`ik>%~j-k(xNb!Z&y<=7Fqh(VwG~ETNQOiV$tkjN46UY0VO)&7OwL&TQFY zq;3|K0(IL`_9bju+1A{jrRhw=%E%5L7?&1T42eyz{T^8C!UK>Bf+lE~lnJ7VETvrL zoCXx47MP=X;MxYO*tUe@Lg`noEj-cx#$v^aNeyMs5}u7!Ym^!6QbcZy>|pU-C}MtL z*~OFvs2a<7g`Wum)-J9;7DnH`$?y!`!ze0c>}%z|EZ`=eA&&&_?9WUa5Me5kax$?Uhc~-QR=BEsRhf!Az{rWt!OH#e6MO zNX0_@pl@={)5DKm_>zgCGQgH)LZHsqQ;Lzy{nJC5WJ&ZR*`%7f{}s(FJ2C~-Jk z2_k{5@9ghN`~*>S!up~H@z$8Y#A``t@fNeO203o;x+3(9F&WDye^PG4MV`)}BVk>y z4Be84)T)X)(dGg#d;rf3B?blrDIy`~E;y4oeXvPzZ;!8;XaQ*`GlD;z#!mKaPuO=^$pctf5{br`Q+QxZ>ha`qW5XT(v zZ58|GER{!r=)*Ti6aVp_+|+V5*GJnz4Qf;>@Y;kt&+~>ZwB`~r7jg8Wbgm4yrNp7( zKjY~|7+qJeYX!JW+yo>`ePt=3upYrK9$5fH69Br=WGz%QDt&8xbf`I6=ze;pPUo{a znyfuL<|Y;M(tLFEl<_7zihJ@RIlbTKb5L`d$ni$PHsuh7YX5R%mqb@gL=y@EEuT^= zR$e)JH90byXLE7*0M~5OHZlV=c`r5ov5A&Q9Gy-X1wWC(qDMrCHi;PoT`Ttpr!oYA zrfp3mN?12^e9A4X8M<6I7@>6qxNlQ)@s4mr$vReNenjSSGUwv94}0}_XQjBxaEh_F z-8LC~w-uU_g?c*%+*o%Fzt&A0%UYk2Uv-7tSimi$N+GS_MsHL^MOer#tSQ^#LMHxYSPFuG{L$r}}R1;$0w|LaKeq0F=4m-~o z@_4iVM)$791tGbTm{B^(oE0QPpgX?VpHoF8>C1^kUg2UY|J5Ln;JCr(_y8WV*GhV3aT9qSu z1U!a^j7SA!xmgcJ$=7GKC#t1ix-tJeNoIzx^G8Vj#0lH%osS+lqPx%QIm(|=z`r!h z!+J18z_^`q=GS8Aa<=E-}>a&9j zzk;-R(qzer<#5)5D##0&upi;Zji_|vJBAM5%GH~Zu12o!v0jh*2b4HXV$z~Vqc|XBer}gFy~U5W@G9cUBRC|eierD;X^6tuclTS}a<2TyLbA$ISFk#EoYoqT!om><)% zJCSQAgOc5zmp`9A`t*@c>Wn&2^gx>BP!fc+>He#rAAWq|r=Njt;iI++q&x1ufx=7Z zzny*?NxO);!_A}0p0TVsk(eS2G=)@4NRWmEtPimYD{@Fg7rhJdq74iG8mOSWDidxz z8i5Sbyx=yBsJ*Me5>m+}nWQkc31dRhg0l33uu7T+;zu8UCaN+k{7O0zBD8p#(o2Yn z#7?;%Zw%8-F)hR?Aq(u9uE~yIoXDX&HR}+!CrK;`Fydxh$g6?;pqg~a!S$)f|ZOaVWF+`vov)?5}_WW%~sn+9W`sPTuqWmtDL4Q z?V^@iw5Y^4-Ha&3R#(+jt~ICimRfANby6^irbA00ee{XS+-)KM>u6bmU#u0(-HL1$ z;fI&3*CKocdZ&{@Aw0*Z|Wyl6O$FIy?qj1DFi>8I_p_oIWMuA1m>31exeR8vxMXo*@n z8D)kQ-Z>|y?5qGSvC*U%qFIZT8a;hg-WjGAaWa|QZI|ZAVvMcU%T}d`{W~GUlq)W9 z!QFm%TBa{X)VRed7Zz(G1+|>gbByjjgknh;_+LxqH9 zTV25(hsaJ)Yr!SO+%;U@L!I@U+|U`BQ+@Dt-Hj?Ho@e@;(Y_7l3i;_Jw40fE3vNlO z*rDzcy#E1EevONku^cE7sxgp)N`j!h0(GXI39M9cde?{+7MPU*C_oU3MIc5)JDoi$ zZq^fBMVw|8s#JzeFC1WraOI!8Wv_#jDjL&hSi!N)4}%h!40o(0M6pEhfgRIGhlZy_ zDbnYHCMt;rNyak%_>%tNz@tzmAD0Nq<7ytl>m3PwzQ~?S6`zZ3n!8|_Y@M6Xk^+!{$V^m z5@eOVtcY^_7qLYqB!c_rjd+SgbfE(njzxOc{Y3T>ZEh21qI(QOybu-Xyoz?gft{=(+6ig4GmQy}p8?y{O@6J$ z0ZKMOroLx3fanuB9b%I8r+90!_vV;1sE%-JiSyQ zaBOK+(<#1el5%9qoZlg31X<03u5CQ67*G?d*bp+-B6pGPZUhRX zfmUC&We-tHd$GttDx%kV4{81Q6fGC)<-tw;mOe58jb0#(+zRra~`eNe09&jn4!dLrz)d zHIlrQiXaDhK-`v>NK6cgfj?Sdg+wo77e1Vl%Oj@x;wYJ~yGn7`Dq<~PCTu3gEO`U? zF8r0_3DRhVU~8 z0wc9$MQ27W3~eJztHiS|ehHS%e4BqK#bfWhHZDM}vz@<-r#!nYia1`h@-$2~x&$jx z4ms+nVXd+H;w&Zvg0ln<1;NJuIL*|)^RY%kG)PPXm%+!LPajo=~*EVM`6_OPz^wRd&C`4n=`Pm&e z$-WSHn5MZb`*t4Hha2+$*BfEC$|V|Vh$jRn4q96TZufv&*d4A_)6R*QLi9M-y(%ad zxbXsxcFFg-qp32Cfo5Q{Cjwk=B^2-cM6VBF zsR>2m+x|iNxQ{4=!u14VC=9Wyde5B>B7x435rHw#pd;EkN_@nvAH2aI4#Xdh!V*U> z6XB!nA}me+JP{BPf+%j|2wP7aY-(*RKsE}d2ggm{aH((pA|#}*?_PmRbgwpMgAjFb zt~SDdiiZz_@gBcy7+t6s6@oyJq962OK_Emx>W~wmQHJdCv8D`JYzBuIBo0B)`~JZX z_P`qgp`HK%14Pi`R)>GCu&^qk@Av><`r#)C;3VNO9zSnKJZ%;6@g*JYkND9-vWZm! zff8G!AmOgDmay4~1G;byPGXU;zDOVl0914@Cw>M4?!X&BE&_dN_9o6HWAY=&f?ke~ zK|)Uav?L(=E+zAE|A43~WF#SB@+?p61?z?;@1_<2CS(Ax>5>D`3@;5O$1EYsGwN?< ztZxYacYzO>vJWf(AY_a^EY;13 zl5YOcGBoAG1ozLh(5DtoXg>h3<#-VdeG)#X(jZZc31_huA^`9Np)2(v76joHgyJ7^ z;`##1E~GLd25>YhDyAqTz^J7%#n1XQ^LNBDBiOAfVpBA+Q`F=UGE9?{0s$9Pq=O(+ zdUTMz8Z+nIFpmhp`_cv=^}!olqUunh7Ie=dWKrjC$jHDXM5sj=2w(@EQ!6bKJgU$> zuM8-&GePMKQ1(SGqeG+uAvJkOG4v2&%JZ%8QZ`o#P8JGq zr7_AjG=k{GqZ&u|lwcA9l>4@lKp783BgJA;R6%((vRY`iHY9a)0v*gJSK?AGnr~w| z6C?Hn|BCIEZjleJ2|^+O14uFXYT^1u%`9XwNwb4~{DP3aq5yS|0EFQk8srvhR3%xn zBK!w5do)fH#u>qO2kZ-?lR$h~m~PU=Cd(C4d4- zt4|>KtU?0~su;;SBWOUmuNE~mEd>=rBLz<7bX0>YYhZ*-TBILhA!v4@PqWLPuCqy? zQF10v$`EfubT2XeqZVcZ5L#>G+6db+G%bhax)6{q6+$?#aZNq-P_OPTj0fxgNOf8p zRdE^ z@C^q$ELN8YOgq9$ErJBgbTdJyK;6{lC^cYNRy%p4-R4V$4i=K$Fe4yuWzUY^3TP;f z;todxB{lX(pG*tIF`9O=BR)_gbg%Dn(JE2aB8~(>TXt%7Q6tQSc4k&+BlT&XDie=0 zG>vCQ9HbWf0RBL(Y00LyBG2EBb}D`3Fyt+=9+sKBV&EohSgBUqX7xM&%1S^&rpWj; za1+X7qh@adsYkEyEpU-GWWzyXM{5s@cL*192`{biwpl;5{HEeQ^L7!(lRWf;i);uG zi_FhbG5>U9ZuA=4hJK`I z#}~X1%{p`TMp?+GzV>(8XWHVicCYU1YBm7tmarywgz>S2`2x@6w==J@x{Rtl?gV)o z?7`61#wC>lJ;-a7)6YN+isxIb+&-jtY<`2`W6@^wD z8lp_&wWkWWf)i1QB^aS7_?Q&25A`vVGdQpybz%+;#^%_8`PeuWC*vS_mYr6EtH*j{ zBYJ}37NoC%EA{9AbzH~uBg0lZ0=bWm*e_Za)#eepjJR!O7@)`CIrw!-+Gjm9gG2#~G0 zpR3gbiN`HA_>wzzC}2bU4$7Cm^qeJ6oqev6$(b+dR+6`=!5k(l`^lZhcRH=me{9&F zL3%;uDI|vD*BT-b-hw!}Kniam-&|OK`{|hXIFu8vLs$xlQMfUR^KEGt*!W1MUD=`O z?9)QJsBJV%fX+EAw;^m1A^d~@xS)d**>t6PrpFVR5P3Q~vQtc^bf#vLd+X*tSEEZ6 z5g}NC>er~%`l!`XgicP1iGl!#r6GhN5r|_FqI!t`|5=aFnE>ZklOfMCoYkphNpCa_ zQleFX*=v~>!;$4xEuZM8%x^Slu(v}RLO3(Ksktqt!5V)hx_`=Y&=%$_ zcH$+hNhc=`@*>K~UV1Tr8s<2fNrQ)w{hqvaGUVKG zc4c*$9DCzt)u&Pv%#5yUc4#2FwdPK{xyrG5E@QrZy8vJLZEPmC#(TkE6_%uJ?Z_an zH&x^gb08cyx&3CenR`}|=zrF4&_w1TDlt0$Y{{t+@GY1QbYfASzKoQW_`rLrpt6v` ztr;@e__q1gE|lOZK<*@W;1~SC7BLpXo4K#av$=(Al$UQOgTn%V;0Y>=yH*2doi!_T zgABfvlI1l&r6z$j+0{;&#;KNt4zJ96&Wn}c8Q!rSyCY32V91%xboE+@ko>Ub8^ufx zT~a7R{N>uDmIGNTEBJGVgPH-Wv5n^#iyKzV4;W;ZJZ)Pj(4Az0I-50ovjRAQ6IlAq zrNz8<+TF%;x7Vk%t!_%ijWpUq$yJAw_Dv+P{F#@9z!&s+pOpO)=xut@sOFr}6*N{D zwDi8nR!D0Slt3W*p%n&?AIzv7;XKa&IU>x1DyK#Hl=tt$O{bblr5u1@;FQxEC4}Ix z;+cQWc;rIXH5Qv0E3vaUtzdhec|Bmanx~KjFbo~FFA(<x5w%7LE$9$`SMWwpV<@%=I zIz(9!bw-_?m8u$=9J;hrJ(8{NF*b+K`x}j{5jjW#j141cVP4daZWRyRED(}7h}WYz zzLhA=ogm9ojj|m@&BS3%;%$X*8UPRe(r_Z58IifuJ@tMI+JV^qr za5QDZrwj#^Ygo?0RfgpRVMK})YSgh}D?C%E)Fn%WH3uFf!?b8cvJ1yX%~-Hgg@ez| z%KZyCu;9Uj3mZNhIILT}75gp}YEqmy#6$0LTuV8#N4gzZ!X^5YvskGMyE+u)wV|L| zKY`kPeOI8_vkG?-)C=>iOUh(nQ@uzK?o!RfiyJ?VJh}3air=-wxUy5+#$*H0jaaD_F&<6@A9LzR4F{TmhzE=eZ9~KkOSe#J2nES8vK)fC3IkV1Whx zR#s3kU6huACMov+oplZxW*1FL{fFIn10#>0%7fM*vZ(#kn&}s|uqYpY3l*mwe z6P*|%L$}p9Q*dv=6&-*Z5!qUo5z%*HLvKBIWSnx&NhfixLFti^b^63%Mk{@`r&HPG z3Ep6#$yk~N3-!Z~KeZ^z)s+_AiK0VaHYi(ex%HKsnGq2=sEIN5`Ou$6in)}hfv(DG ztFB6fW=QY7`jKd)LIl%&v}%MXMxIqi7^R_(h~+|8mQxFAlFpS+5S?J^QU$^{YtJfuUNR;9)PkcE@!ouaMuHGN0G-(a%pm(y3ET za&J)n*}TZX>zpg!QK(n7*otAql&~}X4& zu*bY}Wl(q@9MF@vW4eyq?;%g&UH>eSE9SLCIE=ZV4R7eG72*#{iUHo(-~vMP0M2zf z+@9=cg|-7ht|37C$Fy3vCbaa0CC`zM0C9-LEOrl1iCLlvN#mrH@Gf586U4r2F?XQPrbZ^CgzON_(u6 zjVitPCFcaOGLX}u{^+R5v@IlaaSY-f&islsNB#+escjRD0oso$BCWG-8XKhROE zf7$JuTYQb~t|*djVr!q*#2Srww@Y_QbfR?%%{(tkkyl>HA1M=+J_DA@h$w6SQTphI zL(Ui{swh--@EWDV$^tn&b`hj2{ixf}X+*YcN_-UUsZWcew|M^UqW4P{q++qGo)Au@ zoFS*8SmqCJn3O9bnp3xmRH)@Ri)S0MT0~32LAr2GruhjfMJ^NuMW_l+v;km62hqj{)JnVCFhs6 zS})m6@RM!rtY=+nP}fw1M5ghFKKQ{kMF0Vzu;Obo<@%4dxNDmg2t+JO)D^2hDy7`2 zsLVP-pNX-yR>IO};oyQ;2HA^woc*kGp9@Wzu8prOgPCbjTY;?HbEaYcZIHY6<&E^b zGP@q?5%o6G7MT`@u^)+!eOe^9m_)0&U}dkn^t2bI#P)dWG~&&w^H%5zc)<8UB_Syy zUe+**qvqKyNS2r#I=1Mw5!tP2!)r~h6-!6w{SHDhc}N&_xHSVoSjB{Lkm07Oh?@h7 zK#=sXwARp~^M#TuAViK8TP@Ma(!#otK{zvShAFO1qJ|C3`zVt%1-tSX1_9eI#qsJ}b4GceGspO7w~@_$?*GJ~gA8 zKP_UIR#v1hyHY<&YMug?_sHiP>PID_l?*@1L$oCjW|c%S0wpdR09x5|ozqk_EQy(+Mwua3y~u*8qs%5rN3#j>A%#c2@> z(ozTOZ;w@SV`eHaEo%w3&46gK0Cv`l{d%^)k(4b1S9l?h>Q})Dp6yoqD1=q(Cqwv= z9x3fPIlpZ$s^TrEbwf;3>29nCJ08VA+IwVbX(LN$;~z-(wBQNw>b7UoEJDfOcdd>QX3zs~7R(;O@mrSUvBqUFKqi3^HkBU@)KFp?+7{$Iqo56@?>8+R;SdDLmln5?VFnC~CS)yNwerJJ|iPH*3MuNVDhx?eL@*y>5d zRPHnFjRl?>nbn&FY(QBee)b~oc3&9}`O}non6`Y}$SoD%(WXXGxy>pb4|BL6ZemwV{(&O6kT zSa|Y3WpWt^Of_@FK5f=M_y508j@4R^@*@_&IEs+}ce{jll2d;Zp)6&mZb;`PMdyCU zhjePCZ~_Q-707o&_fZN_V-y4(EukrH26O<3g3N+{49g7J_{laf0tB zfsdhdO4NLs!hx`bW$xB}(&tx(gn`yadTn-tDaeGr7iWToC0$_*l)w{TqHi8oc^Gn7 zou@jF26{m!f$#HR>9c_Bq9#w{LJF$k(S1NB(h?mHV&XpuUk$$j) zKqC_pdSrIMCWx5@M7wB+NVi|SbBb?k%ScvCFN;Y(QMOKDdq@} zw-GVJ;UCQSlyw+qfu{A5|KJcCRS9OqLy*draS2NmlW50=*Eh$X;Wos9$Rs0;m~%I z8ENGhmuco771^Bu>Q4KFkTRnGmGDx6i$zcBX%XcB2!1db8*+${bv9Ndfnf=aBNj~> z@*7=|5U17;hUas}hlU4FwCiHy++oj^%&4}cQ{AP$v@ z5&o#0Jh*_(Ifh)xCG6Hood5)RwIEd`o%?8_^O$ljDm}kPr5P!q8SyL8QW%6JT^x!a zfPevffC0B~3w&^zW5X0pXI`P0prt4_qsBjDVgZye9WAIDASs~&*B`nGj#mn(P-$QY zqAo$wO6ECt&-s((prf}?3uXEbXKJP1m{uw}NQWqfaOy=&6^BYp~4D6r}A+l=z*N#7eZuUW=uX-`AdXfqXfV0@CyRj2VgjV`zh|-2A zD6tC!;0I#y0JrcIe6S0UG>I-^JOG-ClPZukD3Sa+O{fBjRFM_NKnBi`XZcB@)=4Cq zVs|mbtyJK*&0o#R@@n@;5$*-p< zlc-5`f)`=qQ42ue7or*nXM}n7=N&a+5WACW5sO2{C>}HwuF{3Eyq8Bi2ylCfN)mAs zw-B=b00;z70jIYA2YlcMDLXJ7=Yy@%dEB#C>bPm|W~22l z{7OwV3b)%QFzym0l%!^6Hfd1%k`G~~6~G4%APBMW4gT;A1mLI!XRn^;ue_y-L|V3p zX)j+|pz0y2r=|}?xv;jWffX@9xJtLnNhqdUPJR|g83tW@>n)1r z4~H8I`tS;SU<-l}0lP49>LM{5`<0+q?cU3kt`sp5BiXnR24I$dsgv-yr=71 zO471U*f0xf5vx&Hq5D&?%S8>McU?%AVHHhMive}ft`D#Wt$?4uI)0yMJ!0#oP1${c zSwgngisRw`n0S^bfzT))sjR_vdX>bzuZt%>L=lZ=9=T_+c{#Y1!+=MmmFWq$JyW=L zg#mj&0Cx}wR$-&5HTzK^Swfxit%KXfM~?t17$v^rxImO^y!Vj zB8l+9OQMyGRLiTcAgTnw4}uV{6_C4n+QDUfkwI*m3;2v|`N0UH71IkMOq_Ywo260o zEhfqT$K~RTF?) zgtD^FP|7gTi)t0{x!!l|_81C{4zw`LaN4(w`j6c?u%_gbRTXqJ>q_|LQARsMdkvt@WioTiGGC zKr$B0cH3-B@4Rs5(ozr@|9$5GX^>}_20lbu}^8-~httxy33v9uchAmTv0 zD78wpJX+;D*f|)Mp+nljxV%OwBduEur=?M=rrUWt#)bXB#(mO<{X5@m-yA8j1u7N& z4Rrn8PE&@GFD%={VSF-!CRMF0rh0UtIH$SOdC8gE4PaLrg>^7P)WvzB@OiE6rP1b%WM4iv zPf%E1;03LvMQYvrLL}g)~UKE08v1$zoBa3DM+`5 zGIVSrNVLP^;qt;c_cNKmcpeY)++&{Q){Z=P{$=U9rx@D_PmnA%9@k{vnuKWVz=c*z z_u#Oi7hbuayHkiR5ghT6I^xabeM3s1oiX(`;)n#2Dbe1uj#A)`L&%pp*SFV3*^K9& zwprp2u@DF(QeHN0nmGPw&;0H~i|GLTVocHLG7}tC6Qsq1wh0(AM$4Ua=tZyO@f_wZ zexqdHIT2O95U#l|Fc5HM{tu3N3o(qfYw8O=G!8ryI{&i{XO(p z^lQO`DjZE=L{i-|xZO9pQ8e91JxBFL%HZgRP&gjIC3@IJ*FJ4o7KT(Ec!Dz^7fi1M z@V%KqspIyl0r%kKGtBhh!T1nl&7dUX`nHVa(ew9ygQ4EkvUOO?{l)q%efZHvTm29V zUU5dkez}s)vXS4(t;6+wO7dqPlEg-IMx;EY$;}V579gh7b-8Z*MAAP0MUtMYMHHC{ z*WXj_fc#C;7xY{AGp9^a-#N$g2@Yhcyvi6QCD%n-#XtLJ4iFUt{|+Qr(4fF~oyd^s z^z7Y1hz0*moEXs}!G#MAQlxm%V+Dl^J3W-h5hBNb6%sPc$gtDPXE9ZhEcosj!-yz5 zLcIA*r9y>1KMo~Y)aX&9NtG^TS~4Y4lLgBO5>zf?EelqgZaun_tH6p;hgPjsOR5F4 z8!Haf#uMCrCZnTP@Q7IKD`){A1x}P$2(!baKWG5lgb5+D2PQIH=qsZ#wY%vnH3!l18HZ5;Cdf zl*-68?51oJ!iS=SEV-RTi-@&`j8f6g68*yRNkkLnam9S(T_u=;XQE{`*@B=Uy( z4yzq&oX$?g!~;`NQk9BwOONEc4x`%`e9bFH(<{ro|4EMoup2|_`6V-rM?fTFWhaKnX?Sx>oaE-68>LR45d z%L_8aOu4fFymUpi71OKKB}h$i`*lj%X)PMDwr_j#?A6CsJTH*Ty2L9sExk&wAANdV zkjYyAV{|Cm4$XFBkE-1gwH-)p_Tlj-A)$^GpZ~AR>q-8<1(Rd#mrQuh^lJL$p7$#h-!Ly=K0isEwYV_(F|^COo+!kEFuf$ zs>K_98b-^t7Gyb`M5YdrmA z(8dWv8z^V3GuM3Z6w8woyBve|4pPmlHdV@ZBa#cQ&mXq>pw-ROxAaoEek~$yZKE^w zv2k6zu&WqTjBS40R*-a0mrXb>eJRd6!Hdf!cc{b#CmDCl5nhV6XD?*@DcTrvD8!h6 z0;zD}Mr^uV&?8#N+VcyieSUHO9@(JgI`bLpdg=eI>vr+QCNdipbC5Tlv!MzivD??| z{D(B#-3}rmu?=9vmcTX1&1_doOS-~gpnfp#fQ>5K?+Ei9iiHC~W+{hN_|>`TQ7>>` z;ao&SCby-?MGiE`hlf~r75|`7NV$>X%C_BL_kx`Tg z)F_%S@styn!i;4ZHEK`^A=7>@@e3#kvP*U5@>ap~k3RxoOh|NId}p|kx6O3iu^tT8ruz!b>c<_b6A@)oS81zAsQwnq7PHxl(^1{s)8mZ)3=KTJS`f(~_ZYn1*K)SnKb$#186ye}Q{t-%yX zKZ+MtQE52A$~&9yepkuzy@Y~5wA53<0>@asOd$Sw`Cs>KqW6?ab6Q#3h3l5SPVjem_6Br9oBC68Fl+8bOk^HmEe zGw-(a%Z{=Nax;V!|4S{zePc02%uyK2F2rkAF6GQgGeFxAY>z8kOV(GM6I;X>iZDn& zEPxJ-<#lFw)z(_Df)+vG0-S>Ahd0o%sUyetG=~lrlijG|K|i@LH0d5_+NjnlN9)Rj z)}6IvEZ}7Uu2^9n^N17ek=;rKI;IYGzp9F=OC9880VZxO<;`Z4x2946yX$+g&Yez;%zEu{zqLuavHiwI;n;hB~$>D zW`jIPAh7;fd9(yM+prd_+WWGKNh$7L8&sY=h20cQjTvc&E0ah`HbP`3pyy;bggCL*b-|Hm$|wDdB-AhqbZpuDwlMw_7Tc51pr?J2z-TjZ=v1*d_)ML1&dkE~3@ z-sog8X8}%Nq4JWc6Ye6Zw0!(EO84*dbrO%Q=h@>#d`kzr8^>jwu*VHYFTpqex zQ(tjs7w;Xl-6Hag!8O~{JGofdJVwB$k!;HIjo46H%D~lJT<6KLf3oeEOIKx2kZBBO zYH>$?2qL`%ik@bhzGS+?OsMqDQq@}pr?CAEHZ}?S*G?HCgr6Q|BSWIq#RtNSt-Mxvmd=B zz0wkM|Dmra8&hw@qAWYQbHEol?x)8GPo)1C%VaMoOFk~7TMs)-#+RW{(w*3no{MTS zu4@U1%e^z2xS(mM4J(QW8?lbNpw|%=FlxRoV;5rbJFUTw#{-~?f(*avHV}E2Ah{A5 zQ@q97DW7XO9gCeD^A5rj3j)!MtN^D#$h@*hi}t&mw4-qyX}ZcF~RZNkECn zzkySnTyqnss}lHWtt^5w`La3%M6eBNyPdL^2h5S)Gq(65G-VQ!;oB&lds59zfGKdT}9oGqMOGr|=7hoC*X% z|6)VTxDz)6!uZo7W+K9W8bG3QoX<-N%K-^kqpmi?6O@_>m`FgEOOjzFX*;EOMsMaz<&IwS~+38AcMATRV6)jOh*c^8gQh%F!^X)ql!R_MKp_gTR(KboA8sAbQzW(+`k|kHFF9ULJP#7F+CLFIH}s9 zcj^GV5G%K^vN_N z3b|vChZM^X#HfAAyX5qX!^>TbP#%p`58miBNeo^TM=rX}*D}M@tmSf!eIy|NEDt)EMqD zqRni=v}CTI{2H7wFcn;=W8tUsc?expO_0(tuUMTU8?K|MCgUln@6+aAG4r*FI3jn9I|L8nPg1@H74dGZP zlxU|Ny}r!+vuoU}AVr;ML_)|Bk$|gFlDmjx=}{?N(ioD8c#la~qy%P2x>&Pfe{M)KXkhbLu2u zObMYCFc7gnu^S(FC79N8McS%8R9i`cW!1n-4Dc&JiMUB7X;^uwEVrzbWX+&XBnb-? z$B8hr;fK*FRb<>v zZ8R~x`qEY$iDz5Qn&KO{&|3n)1?znPLBJ!$@B;!Mu{3OwOf#;jNE;w9)qk1PsEpe< zIGy^+d^W$-QV_LR%j;`dEwoR1=}YJjy62))X(El+Y_s+1$;dB(1uXMw-CT(L-g4 zW8((d8xO_`K_ygg$SFWLQ37zvwg4l=vH&0e8&f90J4IjG(^;H(txru?h5Mb*WMrMl zCoIvEt~))JsAL3gy}G60p?FMA-d`?Opm_^509lgkj`VLjBIor6G?1(%2s)|EP;DwUeYv?yS<12I#Qc~6gBV!yw z8-0*Y3)ltC(}z9401Vgzaj1n89^Mk#V((C_I;M%nq)O0uT(b)nvGbTX35}-lW+;qX zs&kXS3g`SI3B^HL(47g%gwNKxH=LkAqo!y^DGfaW3$tCYl$aVwDK?bT-{Dmq?=6#r zJR}G1EHzf8|7|h~#fh6KB~1Qhy;!*wF$N$oVG%|PxdPz_C=2M-yKp6tMpZ0pf;0na zOBlj0Pd#L%xjueAWKCt~@vKDm|KMSx71+W#qN5$vf@uxHuG}o5+j#x#4mH?=oo6OF zF^cdHEiI?O7PF}%>5W*3-1I`(j!mYjZJQgKu*J$vhEP6HzJMZw9Y9f7@&^kbfIWx+ zK{#PC`V7gKH=}9c^y>@{0JYaZX)}#1Xe>WR(o&jiybf6xZgV zZEu%YaQMce*F?}=-4E5}@PEjd*4Zs9PV0^g(`-g;A$^Bv+cVXP&3Kl>TTglFC8Aqm1 zESoQon52#kP4q6QyQEib|3mQY{&8^eP%5hO2t2Vwe91hTl+~7wb!qZ2ZPO}!;8xa^ zDlg!))vSAkOrl85%kc@63R?v{O=seRG}o>Qp9&1%1LCVr!Z0H@*N5qCH^wRP9Io?{ zC?XX%<9|CGe8a0k5nmk^(o=fh@xvj=?n4NPnf0Ewmh=c_5^6^W@=Wo_oR|!IMv2@_ zk|JTujGC!1zR{1_#i}9nNZ-}t6CSfvFvKBit+^S6&PqZF#R_;?aR848@B<5Y06rM_ z@klhAoSlpI^`g;I`m8T~S2ZCuy-n2%7tgh^{;YSpTuS=~RdESvgc=x+z>E-Q60|IrbyMi|YICZ0+c(pmC`eA_m39l$d_HDhW$8)}Y&c@nCN(q-@) zMZq@__{ksu3%GTi)^&oQxI7A{?&=_l+}VMROkH}MM(6Hy`uM4Qr}QlO@IfCN)%7cL zssI%9IRA1XyvLYVlq?=BLXpXH)Vaa>&wiV3chk~Lw_AWkCu?LoUpBJi!`j22y`*UR zkot;!%DsKahkb~zQWB0^pkwQmb@svTT_5ChIp&$_JnzeQvL^P8xTIiLCVo_-?*28% z)|1l0^VK`ru^T%>!xHGKdqZ3mea-t#*XfU#DV{X?Bab50;Oa}uiUz|ttu!4}-H!YC z*94I~$+&sa|3Hc)UFg~D_XNWSFt7)F_-caqd;%Zc(&ml#XpG`Nvs&I$+%qO9;SWY3~Tk0MQ~bScxOPK^pY%JHb6T2U)h z6;zI(##$D(?&MldkRXy?Em+7}uw2@wZjoLk`R&xQh39`#zTV6C!d z4Lj7^H*V9QZx=r<={Pdw%1-S8up%!XW6K6_@e#L_G0%l@z1FHwSdfKq`ocLZY*KO4 ziz^dP|9$$;qtBX96%UnJT5iO58|4lSY5cgu z(__MmKRu&QuYSGM&NzqXtg108+rjheA8&{{ep~sfzgkAyXztA<(SQUAIG=bA8MxR< zb9GmgU=@sY*n0#5B^-v)eTQCxOHIa}i0WacLKd5CIATtE4Tl$NtXWV?ErImoO+T?A zmfKVBB$StqP5o2Tb8rE7lY#`AHxYalkQWnk7J=m3krbuG(sb3K6dprD!W5I0Ub?kp zb6rLxrHW^wndXHHI+&t=3K~?_KlPc$kwAsnR}x#O(TSA>0j8)En|0ze7j*3ns+6I> z{}tGyQ4?xqkZczgb>3qhnORw&Nm;6>_OI&QcmeT1!K)P~s8o{_4CSW|%A%jjDu z5nQms1!1;NH71Zj>aA&#rej?0VigNN-uUBCRrVfb-fSR4rBFz_`l#2nz)Du8wn=K+ zsFUGAIIT#_il?QtFwvCkkYUD*DMZfwys*(n6WX3>^OYpyn)Ssv6^oNPv}Zvu|Dsja zS$U#Ka>4mB$yZL*>YEc=WT|#8d;L~A9nxSUT`*?&{4>o6b4ZMI#Vm>^ugAE&R`pk= zDvHseF)Q^?MiP}ha$HRUCMJ``z5J7s1U5MnyFUNA;*+*U)ve9b(gbv%C~b#w+^cte z>)vcSo${S%S+$^I|4?0!x@@I&7VUnvPIyO+BZX;YhC-w|+)45NrnsFgZ&asFO{^5z ztcQ!)J^TE_j0lC$Q+?~8lS`PuY@ze2X@0-;QG|7h|K8%a{e~Ch@F&{jKu=_eQHc6ZYVq@ah$pC#`CVh3SVr z`k|jGF6APJ3*~!c(jX!|u1xs*-*l98l$))nIhdoM0KL#Ng5?i(uY7`uz6-}6VM=P(=i4k;S zduk0N=yKM#k6x`7>T?KSk#Z3;ol+{*@`(;znxth?=Sl?n|I0argj0b?X{QaT*|hK% z)SZy?IcMFL9TUnf{@v!Vab2yCo{B5M42qDr%Mn2Y#?aGLB`t%&g}OYd2t}N)ut^#1 znWo2DgY8u^`bu87QhD6;%#|J!9S9~&kP5vX?R8L;6m12eF@AKcl1Uk37sDi^#&(b^ zk7ZfQ&Uhh{Wr`t-BkGZEBfx!ar*WSJZMURELG<<&aq1MxQlHyYcLwH98br`+8(Gg= zGG5^BD1=~i z)aEt`OeWt96~O6Lsjog_G9jlJkO(%4ycnEbLki?i8d0*O*g%?h8(AQ&#hAA%zR4e`IAVgo4*s zsW=$iovY_pyKp-H{PIAiT2V$m`iPsH@j4q^hfXtd3(q=)Bc(({fCVT_aBmT$!X7P5I$y_J zF^qY)MkMul%V?Nb13G!(0d)J2g}tpc{}CR4Fgwl|H{M5_at?w44!p%}^39f$=|Fx# zoOAl#InNjD@JMkUof+Rzrm00iF$#Cn+JdLG2>mIf9P`bc#FMD$19%w(4wCiop`9$s};pWq)f4Bh^@Jd#g!9JT^w?B zR_|R$0#(OAmD5ZNi*FQIXbl>#tX_T_PpijwK5qh3IoB=b?13ut`E{H%7n1CUWii(&R-~k|naYgQ+&({qc+H{1s z^d68f;E|D*?b%Bw#1@o+2?qW||7i@L1zw=7;fwJ}N4K<1HZ7e`+{Rpl)*h-Hn;;H( z=ot_I!4o(E7a*b5#h_cw;FSDB;*m>k{0j<6qFH=}j1UC*H3g$N$a=I0>phz%E+V_( z1HKi+J)}YwKteU33H(vdJ!y%*;Rr7YoaKcf8kXIy9ia-IAzQJRX(Q3^xz&_-|J{ZJ4+ygzx!##i)FvemCZDUE* zVlREr9VuY842AABO06Zx|4K;U#K9Ct6ypN9%~Vv&95M^(KnLd#h2HHKPmH9ckPbF> z$yx0k5xxr$2*NJ-0Rh}X1>6D#Tp~ZVp3S)+gw=wTMJ`Ql3fqvWiF8p=-Q01L(GMXcG+PwW&n zT_2M`Uq~pLtwm#B(hLeR+YI&B?- zO+1xEZ}k&ZW{9E$p{CHprg)8Gl!JwZC1k25M7f`c38I*A5Nmh?J5Yj5_zGen+)0=v zuLx!}jv<2x1-5Vm|4E7m0PW8V1rTHS#{MLWGM)s{Xcm@*U`5W+2=>gD6q`bPQ-7@_ zeDu>TC?){_0!jFRPm<$mCKQoOre+FZ?6?rfti~e-*3fyTQxuwO-e;SPn4SR_z$7Jl z0_f_r=DkP`)o>f!xX)NNR6~#=1Qv_z1WQOlsA|DsGu={MY$Qkd8a8f=t-TRp@|q`= z+;nPLk_p9V0w&76R7mhlcTNRaASNI1!5-WJF2sfflxKYgC?*-qqp&AO@em{`lx!4+ z&52zNMh1$t-$^KB$_Syq93oLw$c|E}fac@3e2%09oo=?8C-<1h@DNaJL%d(&bc#1<} z_QO~51BH+UN<4rPWL}jLWsge8kKz`DCRnS`OKj9aXCBz?jm(q23#-7Q_fcx9GNLGQ znzmF0QWXRj2DNa7&q&D~8cqe$%*H9F zLo`r|DhJ2e$$ohSViJHj%IGUtKmmT0N9-8^h@q;Es!%>kY9?I3RI0QU7E`8;yQ-2( znby02YEgh8x#BBRt)D!BqG8Uq=SxAXoqfC?;t1X^-WP2|o? zM8>P-@@slk$fvIBO8}4Ox-Zf^?UzQaycX5hF6i+#=-LSr>q3cp*^02J%3}EunqmY` z%pqK!ZS2}9lOUAZB8Z{n2d~iul@M=BjHX?hWRP^p-jRejC?+370N_S|R>((A5&$^J zC;>#k6Kn_5axa@j?!_4C#K>oz>8RnEFVvzhYkc2quDh zy7A^O@-J}C*#x33X;3Wf8jc*E(Go-K*j}*~{|ds`j5Gqq8-@iN1<-XeDRs^S{{}}% z0!PFwcCcbTfb_P4K6t}7@Pmx_13K(&wyH1-$E5ZyVzl{74F8p9(l8J&ZL6|sxpIUK z{v7D>un zap-PyOhj1wT>`bSV(fwiM1Wdjz#e?Vt7!Bqyh;Hkrt=mS-6jOVs^&=>MK)LFN(Tl` zJEWBsvBRy#4QtEv)T^K9C@f~5PxJJ%sdEa}YN}0H0)`PNAMKZvYe~q|Acam-=S)qI z5kru%tsWD_RdsI;iO=-QI4ws~FUt|4sI*M52_o;YQ4UF@^*4s&|6IRHEIdFRw*moN z?@!{cL|g=w3exs%^QQr}kD@dV9$xLTmNu0-6=+?U!=}U)mWTRDU-eyl9UT7s$ zJO}kW%XXreDe$eX)Ch@^0T7>FBt#!m!?7GzmljlD^~r+PIPEe)9Q29mGaJK%aSXKJ zur&eL0&vfDEJQ#n?7;(AxcHu-SnjC3+)6iNm0u@?we3(#-(xsGa(B)NA!lBQ7MX;G)H{uWrwc$g`p^5ayo?S>?hbqYTJ^mnWm?q;%!mRZmARCAoD zPkh`#PYdZlr$U@XIEn^1K=@qW0XTvqhOeky>U0r)NQXbS|CUn9WPW$4DGW@<^mS{+ z4SP;!j5D1J?ThRA5LWlNE?g~5#eySBj$>7M>#%1p_5bz=*eV=PWCS!hA5yP2Rnsyc z2K9bT*#eD|$c~dARWP7-2_>lYVj6@$a5Qkkf(3x1NWYcXY^S0@CT(;$rs`O9vn!n& z(!Ry`rE@psVp6Zuc|5lGo)i0lJO*Za(YT0Y2mZ!7%kvp3sGxPN6gP;}Xe9g~lmzbT za@LA|599N!q#sVtIL+6InxNg56Gk`?7w`d2@X}nvf*^$OT9Y`{Rw>^oEv>uxq5rUS z6KIwuyG)~aJIx7J_&QE-u4FqPP#63E`LwT8v-~>g{~7wZj~I2Gi-)1F-46i;uT<@s z*QsR92YkUKHfp4oMkbSMXQot($WTjxdqck&~nEywxvq#=SHbwN4`i7BF{0Y+IM4q!7UO0Bcdmgg0kbvZdKvld+oSJK8IKTIA zhISBzN)4*5DT58FAF1|z%S^;u_kMASc8bzIlbqYst!+&6p@663?JdlLh1HrBW$(4+ zy6ez)c3&%Un~R9Qo*UDz_|uoFZe`R8RegA4Jv)895})ZOfiCVDz5;6eZ@L6qGWFc$ zhTBwJRPcxzjr>v>`GZn=dZ@fh1dvc5x;Q0S|2W~@dR2f2ivu74uDS$X+k}nWd!9j= zbK_E_*VOR5)VsNrk>x(}JVr*-M{-6@^5*xe_u~uV#^URYo#$M9olB&|_piQ~_IV)A zaCWGIc(Pafe7Akl&Sjz&&mo`<_s-@Dx&zM6$cHEc} zrb?sbW?B$LwCGW$PIYSJ2vufItXsKuZTXI*SFtCLjx~E0?bw9^%_g|FMwL%MY}_x5elFw39@%hk3m*rZRDf`tCw9K7UF z+|jQ#6~x>)(SPM?U9Wn4I>>h;O%{LZ9DT;CUGJvf8s2^UuJFf#MVk~l$c1#lXI{C! zGXMB82{Gh2(rZA|z+((Bj}AmABLo?2?X%2Y6RoY=H2ep(lJ)|uvf2vYsp5e-suFxDu?tEOf2aO|IKL-4}hHr%jF{n+v|LGbs@N%8#0vRsSw zOTf+0)TqchDa5oRIUU@H#E(d1>)Q!SqwKds5lu-`*CH#CHS#o_E5K+kwQ40zyEPVH zIE9@KM_GwnsmGL1O^Y+GHq45ulbB+1rG;zZ)z&fXLuu4bh0PaZq6o@(zKe&&uh?YM zv}$Beal2PvleDeYQ75x)|Ijg!poMora5D=xJ($eh2;7tGZFJ*D{S@>vu*hw)vy`6M zO*bw%{&>tRr)tsTPK zWc}nw-b;CE)=HnLEfPVuY(g#1g7~zoLpA{&)WW=jHFqGDG!=32byYJFwAZ3Mc5VF_ z+FWk=f?E~GTB@#KSAnrMI7qBKbCs&4K?mugfI;HUJJylg(plmv4zu%>Cf2ya(6^G^ zcp8x}x7F5Jvg{p?;6}<#np5_$py*i++D5k6dbzc2rH3-Yq2m<@@py~x_0Yx8mCH2H z92J^T)-p$EcD}S${|PL)dQaXtf)f^wP}i`qg#;%x+MjknwvmMq$25UT2~%Y9qp`Wp zc`*?c08=u(n6!<9P-#yHowGCv8mf62lw4~hRHpKs5L0`ijO3oXMR zNimK>I0GK!M3bwaIcTq3HN@rJ^+DMUR=6~}fbfnv3mHdBM=>tm z&WmE|W1Gb2{}Ed8DJE04ugs6pgP0 zBEv>z#)fnf$qp}LlNS1~a;X8<&NS4e)`SQ}i%ZuMAp*@Z73+r#L8g0(wH_RKkvS98 zQ5ee-JkAX$jaBJq9Ca1Y3f%LRE_tFq(MA(8F%(VYo99F+nkw-*DNWIP42jMbu}E4^ zNQMMVBK>0{`K4?zRqW>A;3lEQolutdA!ldIIY$vqa&fA8l00|%Cqd4#G9&rfC@Z=( zD`u&cQcCDY_9@Ho)a^NDs*b1_1}Azc%%Hft4(e7%%UeN|cCiHD@`_qkvtqD~5v2>W z9@Wuj|3wgnt0IkocxER6^#vv?O$=dKT(G9_RUuO4$7lhIkRIHK|iXAR=n!cKRg0^H7#sB~8|ePm(GS#27_!4H457b8`r z9z+?sFrB<{nA3eK_`)Hn*7(1q-=P`F~A5y*Pmy|bSnwNuI%+M`$ot`@r^U@3K#imEo`>}(5ZTR+sk`QgZjOv#!k zSy(a{4vpa=T~fL|>uw;fb0uAx83>^a#q80skyq?iaehXeF=n){e`sy1URpd`{}S}H zZyn8+OeLYVt5$3yYa<8BM8CGV4vdmmA?U zGR$o-tZj*YIlA@!2+ROy*RP7W%`qcLSt@%kLS3eX=;`)P4XdUOO>c7agA|JR!%-Xi zQ$rAv(xe3`DA}tn>k0Xq2~P3I8XGRvOc^BU92k$hNkaGP)`ii7qWt;!AJ$Ge zT-jwJh&UEdK23GqTmx8RW_>I*uSx>jJ@v*%1tX)T7p@IFxKsvYoldEE2(-NhOs5|(& z3yS%c>>Fy+HGO@^eSP~6c<_cHmkH4hSKhqH?5u>4rZ3Xw%rVl0;{t_mCZs|DW6x&q zONI@mhOFdHV*l=iA?!tcUh4R8PFM;~ye7=*?k^^A0w>C0`#=w`vZ9O>Ea=pTj4Y|g8xp*?INh7 zRwboKDo~K6aOR}0mhHkWEHn59oCN8;_Ncn@2j!YC##Vzetd9bN$I-ZCyp%BU@`(wl z&EGoj)0_x(oMPUd?CH9%0{TH8c(5e&K_480f+z^Ba6|Nl0(aOeZVE0vD3J~)D#c{V z2^mB11SL(_VgZQ{Ms#R~pr^DzkuO%}g}g8qW(Q}GD5u_q?;e8x z5^WQGrZw#65-)50n6CYpWCdphi+rcmTurJ-4F^qP2iK#usB1dRk1HPW{#HO1HRBh>krgLwE<*9kJf{ZxrvDBDk%Z*KUMMWSFlhJ&LG}!)b}p|T&R5O|?;b;C z>IbH@4Sj?Wux#wKa>F$)!)Z!#Yb-L={Ovk=X-Dw!7*Vh+fXuZRt|t1yzxa$SO>GgQ zjvloNBqyloa4X`dvfNJ6Lpt%j=u$WMFW5veQ*^K5B+5+;A}k(+_r`G)KO|5TX8=cJ z1Xa)|;Uq@=|6~F^^B0@M`1VpcgNr#K(;l-YWi~GD zu&bpK)5~aaJ5A$B&!;jDaxgby9P*$7<03L_iZlI=qd=)i@h`ksG?YH{JvA-Ge$LWt z$v~;61^+=e9fLMiLdvL&^cb;qjI>CzvMrGmQWG@A_G2m?GX({+G8BoY3dwWGfIc1s z_WVM_2y;59^Tb?{B>Bm(pi(bUW+P-l0#<>Xtg-=53S8uL245H}y1^ zoP#BRtRhbgocSLdwt~jg%u$oi5%Als7}D7$eoP4$e6DgCw~wv=HzzWKXUr zv`7RbFs;ck3^2H6h9pe!#E8+)l*T+W{{mKXVIR&gF5qV|Y|6JxJ-bEO5Ay8+PTfAj7F4I5g!r{*872Bn- zK9he0%VRAv=NQIzEDtUq7A~p^85eFkZV)X`Gg*V;Jh0JPiIjxIGHM}h{aS}OLjzd( zZTmXJPCN5c90Ief{9ebt|t5s7TL&EPyBas2{?i=oU4-Bysev z6%()4@nV;Qvh^h@gD4`D`(Rf+7!yG>kO1BFcNHmABWQT*YEPufcUdYdf-NoyW`Baj zy5gvu<$|1tR_3!&UDq4=|5w$L5QmKifJ5R-Vbu3t?n!s8#%2sO>Q+nX>L7$IbK;j< z42Oa}mBpsVlR`;zeJMVH(pq9viQWMc3_u93z~cN2eqhqX1T(N!^nCs`RSnpWC?tx_ zHyK^Ry(X4aSWj6LORn=re|Ejy}4nElAZUom>NP5Y~S{%Yh-TpUb#qGf6t@Y6{2jW$&Un z!F207V{SpJ6O5FZnb`Ps%7R7tj{!RodO+(riKj-ZtOqc1#rksscrg1B~x29xyx&@Wz7UtOSOzR zy3NWsnY5#;OFUrsh9t-o!8@)#YP>=9*jg67iKI(WTVRFnv#I)$d@VHIKpmQ4#x&|+ z*!Yn2|5-sQgEad4T{ZP{nsNgR+@mpNKOiNybsM@eEbK;23b{S z>RmSH88~4cDj@>_9m0!jyGtWQNh5}PuZGyM86H^}@DoYTn7Ih-yFZmOd#omqBh<`%UJ<9DTW&$Xw|m#roOi%6trJqA74 zpM*Vexd~TQPPtPqB0(L_Ks6XnkuloE<|v8~aIREN(B3tzi*s#|l4KF|soqN>E8sRA z|8$ndW@|AvW0Tuh)fccqIhcL|5#hbfZ(WnYN?Dw0$}@K^s%gdmN!SfU+SenX_2r*u zxq{h>j0Iy1j|fT*echFpnU<(DUgHdup&NW0y9dfa0?X3>=Ar`~y*<6Mlt^bkkV%Wi ztrr|O-(5E`SM;ufBRq>OVLhgi7|r`vpUMDHK(4>xKjBT_aUHC>t7!`|qM{NxKgX_s zW7ow_OX1}9`bq6rOyZy&Q1&?55nZ=-_QiM#l(ah16}!lQvS@2^OExr*LM)@zkiTW# ztBi5jj(Ouj+x|%XiQ;;7PCE2p8uHGMDQE@X^Ia>kj^9lpC;q*HmOi(3+HyLRP79Jg zbpK;qozH1&BI2-qpsix-&7|vT*0TSG8zqAmTaxU{{^RAIa_6Ds@rUijXU+1C(b)R6 zd9SFu69cV}kAForQ6fz^>h-bw(|}6wAFuoP`kdR1Nff`YhyK-%lsUY?8+^jOD1Tc^ zeC1$YvP@Agt94lHfmcgo+{v@2&zu5Rx(u4LoFJ8g$_*9z@}E%)Nm;r~DX5m9s#vd<`-#$MRR6AE z!-^eCHs`yOE6ded%eJiBw{YXiolE!U(x2}{f*m?_rNx&FX?8M5RO-ZMOkYBzSU4z| z!9@QdhFq91%8LRUZuEGW%)4ig^LpkimoGw`G*f%z)aj(@ra=)FZ1`;QMwu5uLmr&_ zBTdAjML&)hmY`XSg3;Dp&YY}b!p3`AK&rHI+6vQcMqg>6)>8{xxjy~cyuA6hc?X|g z&%V9;_c;6V{OnP^-Pg}kBCeQ{WJu`RO{5$`4OxbgK)L}a8Gi&4mfk>`p+uTy>45~2 za&En*6KJZbwoz~PU+=7^aCSgeXX;xcE3vI;Ee%j@?lmBQ7RRkk^ zK$d43S4j1VWWRC#|(VYX0Qk+t+N8+TGR>+1d^2j8YTw1O;eMVoiWira7fhnmrQNkKs+Y(Wg zK8IkP-KHciW|{(B;GaGsHmF0i5o%>%GOr4vf+H&CE=7~Qr0-yxafDHwGnIRrU?9TFTa@+6d8FK~+h{_cp~>35!y%QNmQW$$$qg_~0Zj%#%eJ<{0n7 zU)Bjq+eylg)*k`Z(_U0L` z6T&>*#du}g(!?p5nPY3Owl($qt<@JLlwxKMy%b@B>W21M=~@Oy67lo0jN1a3DH-u; zQnTxf>*#cmX$@>9KAH)K#Kt_c)JbLVaTS=7S1Mw0FnJrh%9G^QljrFqZz(Y$_g2Wl z7P^otFUv{NYN8Wm*^qo<;vJ`0b|*3cXnjjckoz(ls zh-fFvcm`4na$VJm#yR;(&~fTX4M|`KyqExrg3W2z+CWIfwB^KN%vqcpyV4zrl`wi> z*_8@k$jAGXv5$aM9vgGQAs>?JS*+_%4t=ykG$M~)o&Re|h1>_5%?ad)TTBqT&NVd- z0dQpUGEn8Lb{57|t!hZr6k%=$D*cSK&6gxj3JVM z*)fk%Gs;nE&JiYH44?Q8`l&@~#DU(7)a868&Vp>KoMa$~X3+T& zfQe+2NZVi6;MqZWlBksR2hGx{{E3^Q;SV+=1L`E%ouNF%=popY{~frqG6_ z0W()kM1r_iS~GC+Ix8<#io~IIZl*`=kd5lonlbuhc|Tc~R6hv3p!gD^VvQAfroQ>`xC0`~bZ}g>YE?-+EXsIrbUDfYwZU1W%fn9c#FPjkr72MpK*>$CJP3LeiW5r7$ zD0cLVQB7)@k>~<6d-1exz^&FRk2k^)?iP338jiMx~$VoSuOn-TQ;IjsrqwpVkTJ`vUvn1PI?Q+M09-2VvvCcKLYjTDjR6U{QUQ1coQlC%o5uIQ04Yr#q1 z_IBt?tF=+pg>;*6Rb*d9>_~~Wc3KI{lDtaUx(*wodSo&ttl5~<`B{|J^mU*(LT z`bdXgEga#2xYmnR+19)@E>sRY!tryQPI1#>7pHEqJGnD$G>YwJtK_pt7OPgi5|nC- z7SSFTx0C<-tGDk4Xyp!+Y{&+^ zubIKuQ5T57;P@}rpB!A9w|rEIM4g=U`&`YCyTc9tDA-?5_RK*lyr`FkDKOT!-i#CA zqfCYF7mpaoQSSG*|CflbolbUeZ&p@0{daG>3dmvJ(B5|x@G3iRQ@P#Q34%Iy@XG5P zi@HF@8&&FCYY~PE%kQMJ@c5j69T)XeR15dV%fIo@j8QeVKttcce0I*B%?>Bxr%({F zrEw_NcT~Z9&i6u&0()!~Z6}8kiV$)sr8W>pUA0nsN6}DQ!CnW5QGn-4d8TLhkt5yI zV(Bz$ipMIhVL1oEOaF|)IeoPsm!fWGhFk44V;Yfp$D&vSG%piUKU@@gg#k2?&oamN$W!4{v@2~VJV zWTYeL7eJ>$X}aQadDDPi$V}a~gdYfk8UiPrVjpAVL*nug+u?5WVo3(kWz3~Dw-G0p zaz%u7H2xG=IH)i_rAGOZEk1&O0umHo#$3GfIHN)%W@2Mh1X6|68SW>6bq6t4Xnb1{ z6;slD|93P0$72MzF%US1(`66}=!6WYXh2{AK+txgs9u~^D$JI5(xZXhQH!|vg{y}_ zdm%?Cqfd}iAphg_NKbSc2sDW1WM#WXbO=;+k(YF@#x$NpgR|yAleluvu|D~9bM{6q z)ABA1aVar^gby=;olz;jvvoTXiU06pJ=Kcgp+^H@6+ecByC+{3^=!?!J#Z&Lyf}ua zS=ZPLSbTs1e$_>d5}dirsP%(G{&LO&)^QZoZDgfev4$Ac}% zj_?B^7=~|2$6_QgLCkV`CZmIoHgxl2A#fFs>gOSqcNk;Ba-(RJG+`(L2bHD>EU-ry zl}2q4MSEKjWU_d1z|?o}wRa}TbYLivJkg6O(GRg;Xa=Y_8fkZr1W`&ci@Ne{P9j!N zah809lK(Ffjno%ubV4Klkz4JzgG7am-zOn8 zRsXUA9c=f1ztWo#=2vRYhaG1YTl5a^+ex|ug;asSfx zr-%koBqW!_h^#QtqR_UAAX$N^<%VMyttZ)?BL<#`lpp+5n2>X+VHjq}qZfaOe<*@~ z6d`^bv2I4gV1`MA(p55Rs7(|`Fau{Ce$pq6sc;N;iJAqCS(if*u^sApL$iSva>j1T zWk;lwFPJpF3 zM-+Iz@reAhs^T?xVMrVYyb>Xmj4<(yV1mjB8JRyYBz zKWk)w7Lhp-vnbII`Vb4=k*&u=cHGK^U3DY`v9kg=Q4u4Dg9}X%E0{QHng+HewUKWr zh&kiZ5*a}dvFTSBM!Jzl80$7!N_UwH@m*`yX}Fshn-(*gdM$r=Gn+OwrIadH3K4fm zA}pyvoM=n`p*y7~c@B1L2SRrcV|$V2kFW@wiso!lF{dZ1cALv3hHJPtVWJh_mWZpZ z5;&;S^DBy4c2!XrZa6&@wPfLYpX{+)FJnF(qKhnJ96;nV_u4O=TDtHFDGQXZrvf7~ zMp>CDGE;@0vRF{zgne`7VwPjHOanTKl#Xz-t^>DXjPzaC)uq+qk^jzffo#OBS%Eix zJ6f5`WP*FYn-!vIS-$DJHzfLS(HXfaif9}MT88?!IPq-3Lc^yT7M**edFUpn(=LI5 zv6={UM8vd{c_#Ygen&|)DLFHE1|uq_nMTHrS{p!+^C^)7y*ooD45ScYb2KNxBSR~x zIz=^ZJTpHz#e>ok1Zj)1B20wokyw?&fJ+@_*<%@r#C0*gO)+WMniBk=54a$FacOs) zMZdX8#9jd+!`dWKGIxqBu;>~&*GPECM>dZdl%|ToICGOele!l?PH3`-3v{pf=5OrA zV2I+c=4UKZR>rKxRH@>a>}fSPGH3jOB7{;pQKd-73&MF482=Ko#u6mAA}oc}nL?|G zw|aOpdK7NpW`!QAp)&lvq1-)rO1|xDttb)6&S`+`3$oqnzF2j)K@o+Ldx4;Q&a^hl zOAMV&EEK#^pC$}91XVf4Rj-`-Irq|CLMJtM+!|oaz++NpVoYTEsB(~6U=>SBk0eB` zq*AP>VU7gNQpq`0=7RO{A=m+j)+=e99+Ix~8T8Cg)cxl6BkzzhJ6F@-Jj&M^SGKFCdwm`?+5-DjC zph6>c=E3Mi#`*{ykh34{XikoyHgUExb~el-23}fscmE*l)?GR?5L8+A$#*59oC%Yzu_+~#AylxFmPquU+K#A6>vD#ytyBV!dYO;hvxQH+j zbxHIty{r-$L7TeO*Ft)jm)?Dq#~-kU_FQyI0i!8 zTr1i9NZ%2QJ~KC2#dl0*Y3!Yfh5O!I__Ev)-yY7{9`2$bH!*!$sCZ`+M|@0xiYx=} zcc*QmH+sqrvml+a%Gd(STrFz!b2*;Wv|+lh`C6E{0TQ~r%PIz2#=YcWiX*YfO>li- zq{oTuiFy(}%yLyUKw&(Th_J&w92W6T&oJXum2G0VqBkR<WzlWTM zi{#cyLY7?L;`-UTH^1D=<)hqy7|Bxs?tE9i(Swd{B{#rVO&Y@te>cgp4!(#I!HMAX zl%85*Fmf;0<>qiMFu%p&Y~9vr&bz=$g8wk~wqZIIc70f;DLnLX(W11jhb|@xRw8+^ z%}&M5)O%xcSWFcKF+E<$4E5ic-rhZZ!_)J~6&T-x%D(M;-=KBWGF^C5lBo8pRuR%@ zL=IBXeN8SyHsNQ4gCcM3sf{&hAcj~=oCtaUljaQ_RQY4I62wYY62XPe6r~80S<+H- zE$xNb7Ye6!OjF7Tk4WUmze1}+u6@ii-U!e|~H_{LH;Q|1c$nII9G zSHynj)AIfr_gK5g9j=5fDcy{L=l}D)dE$Lm$o zi%%=*?x(tvtR55eLoZT2P2zh>@3!=S>}-6p$u^1#g&BB^r-j8)F6DiBiwx6&H_^7% zcG|H2^3>w>s5EUXkyzJW>iMIN=3wI zYY9=$2^v7q7WW>=08v5Uz=5?alp|P9P%VMwEHES}ND#t*6$e)QR{^6%f*n0ps3qv3 z#DgDAomnl){zKdi?jS0+~(~-oYdqvqDjVEn&XX z*m7x1sZB2yr5aRV(TYMZdhPV?T_>2G{y`mD^chkz&(% zu8lhI?Zt{PpZVn5HfqnKZP^M|JQOM2m541fe#e)o*rRIQI`%u0A;g>@Em#ngv1!hd z6%?*+-B6Igi6~#&#hqLCZr-~smj11J#M{#6 zOz0sO-tHZ~d1>5k#h*uill;nOo#M{4ym)y~!;Ql-B|84!Q~4PiXXfar3J&uKExrUY zP%z<^s;DoGpejo)w*Q2BtE{rpqiG}kfNIPufec&FFv27g>Zh2X(ypS$27JsYvLtNL zrORgY3C6Xs0A6#M! zsiY{wCAeUNPa@MLx85SOG9(kmmo1{+`!GLz|HIe6r=$`Pz={X7$kL1eGm2HBGQ&@! z3gaqOu~FMPwa@w3UCYB*bGi`UW8nm>-M`xMuP?ur zr)&TC8kEBJk?jT0i89NVyZmMnwt2(?8Of8}@YTby$;(+?Y+F_2lsh`130?*<2>uS{ zzyBw;(I#3{NeY8?E4jH)Vjam@ zVU-35D7{dsQX(P@COhI4Xqga277QB5D%VGfW4`khXMJ&-qMfpbM$_bpYV^BT+ENk{ z>)0!n)GS*vi#bji4KR+50T#l@WI&Dx4MjL=-3=*Y$6od1SM(fRAj5J7)%LoaAdzd*QjSBObiI=VlbkoT3)-M3PLF&^eg9Y{n&4rx*zy>ic_kh#GERbOF0Y3QVBlhEJEwmULe%417fnQ6~q7R^aLhDimyh|(lM-ZG=Wb|jQ?kr1#JtJTY##6HSL#`9D$UI<*}AAS&?B5P)L&v3%AW~Ofo8UW;#ugpr=iCj?Xj&cB$1o z8#Zu)v$Qh?v`Gx5+HHP#G4BXdY^x=C-?GpU1J8=V-vav5r$A@+G)PdaN zfiyB6Ag|`Hu#UJt&9bSTgf4hq`Ap^J$~u@;8ABn?@?fW8<_CUczMbS}%rYid$>_+w zTuxd|bVamIkk;D#kjR_2Ehhu}q(O`bCnXRzh%dHgf@jQVXyBqj*Q*@xSEv7OZ2_6kFePN)%6L6Sr7Zl&uWex}W{CapCtY0F+FcC^6Vt#dij zzJG93iFR?USt7b>GK0iB3`dBgy?;pX{+u{LVzN(A!X?>TIV4@`Bj3Za&g8kd>g(*2 zwp3rG`4k1@pCF`cCUA3-DFyZmh5JjS+h>ePgPox}Sejqu6`CvD3Nt<3K+FwKY>X z{+l~wBZyE^k;!BCabpbc-Jyi?);KEjPh5dPzGOq>1}GMCDt) zn+prZqQgTmH9?Y`QJjhl1BxTewI2K@XR{U5QU5&EVL#S!i-Y<+EQ!DMC@w$BBU&4Z zlla2bI2JLCjLsM(D|DzD0=2R5kaEBUeh3GX;zJvHrhUtoU7`+YsReKF2G>Iuk(do} z959I3z)$2ri%39&04iW>mPV5fR6p2S0iAIYw>X=Gy8%1^W?9{Q{tTbo~6h)kob-4eSWdBG9`u6}eK zuRN#GL>!nj2`UW6;lW1ZxW3>CEAt}{0m&4>TFbE{3KOY6QwzMFP^^hGiiKgk<`h63 z)IpcTBaUJyzhR#ELyN;aMOEyL%wnU`L$+S5BgDX)pQMcl{D*(R1^kJyl87*&8!%18 z&9>_trZAEl^tx)Qo3E3~^^`$BlmCtvgF#gJN1NEi{S>Z5TsbQQHnCJrps_0$A-_49 zpp-iqjv>wia;HtgA;Q8%3f&Z_;0#Gx9DH)QrIM`G7>GQT>PZ?SU3R` zr$RD`n4r-2I!ncX9QqiYv1~>hM9~UOQ-ykyYaAX2N-&~)jRr%hd22m{@UtTQ%qH!K z);!bWB2%f9%#_G2mr9P>V*isA!^fI(sxLvR5o1u5(hZGDRkxGS=%T@FoRK>175pGM zZNkWUDvf)Jlm#-p4C2s?#8JjuiiOEj&R7`psML;HIA5g++W{1~9H3%c)@PeSQW`Z= z#7>_$z)Z!*u4{-r!@W3BFze_N%bEysbf!i_)n8RrLWM_HB@J*&(%6W~R&ADpjYJo_ z%3b9|nbkV z=>$MTCB`I^Q2>$~hEgG?i6jKlOWzdK*jd2g+EJNwDXdWqG|@x_>&(_`8#PiuiZ!^s z8ZP(bCX4tFkjN>@8FSLz8Ufz{T9E+z3$RE-HJnTiu7LHe06wf$ zyU_?y^{lL$6Pdyy0Vb^_-B&dsy-b7Pb<+{PoGtI5Fp~JsZ5s$>=?6|k(1XZT+Qccc ztzuw(I}9ezjv}I~dW?`OuY0V!Bio$%i&n5KOP70@6ILj8WtzwEj2uNW&9fiC*f^IX z*T{`KyWJ}P0F1;eu7bNxWv!l$AYucQ8dk*IiHIfWa1JX~jd_cgt|P~D6BzwWV|Mdj z+Fe@G+yC0Uh#HqbzUr{bCpE5sTxE#0GiO6Nm(B9e36|o)6NK%OTOH9 z)sm^fnt$uYAD+Y^A~7t@QuzVO(B0*zTNR#a-5=Ve99v2-Zk}7sS9~>N7R8VU4d@8v z#2o@xHx}lToDkGuNkyv=1JT|DLye6qv~}e9+~KjiyBrhPT>N%ClijRpO}m9E2KpIr{|qx;60#i z#b%UgIeSkE6HQYsYvUT|kHp%1F6h^!WfpT!LOk2kc0R$b*{S&1!tJl-eURvq-0iTB zzS|%xRGrHpnY*%3H+GJ@T#DRw5zv0-v}9T7BB6jBHlTrLpdd3-)2M15OUvGEmAJ-= zh{I0Sh|##`_7TcINu&I=8(c1JT&8VuO6?b32oz(->gZCJ7*KxxQvD5~Rh{o~vj6Rm zu(|nL*Wd+|3enTa89It;V?V07Z*Cgnj?My4-wWQzBn)Ttqi}Pc?zo;4%x-xoUBB74F&~Yi@@h0!UR#qn5MIzEu z*cA(Fq+)92%MEB$^e7$Ny4tz5*3`CsW+L=$%VSP7JHL^B=05%A4VUN?4*!~s1Y+RK zVCnv{e)5|vkMx_K8()Pv7`8qYRTL;k;mVyYyf{E=ddCM;;;t21C4Ce1;qOGxbVOwI ztd4Drpz)lL=X^9i+#7383+id7u|zzYNYzocrD-HQGh-`tgpsc`PxTy)sA68FY2Mc6 zmS(zkjzIpi`;ptq$p3D!0PoEl`mCP>)l+_nC(mQK!-pGlGpGKIrmy+BlN5r zjYNb%02lXhho&j=8d%PWg}9=QP-sTGRri){f)VhOUeS+7Qwi;mJ!Mk`zxAB)kcCvJ zkN)ObvuU0uxVuatl`b0L-gmItP=*KD!Nfa9q~KkTuSzwupcYAKTmM%%#|vi(hcM=v zdE+005=!mZV>V!C0(RNC*4i7V3`z0sSk81d^ z)Yyx?v2_Pa!wdW}o7<4h>*bI7^+ZFPt-8l~^$k?W)7mfW3&<}K>YG!>AK-Hb6@ zGw1$ei!k6##tZjMc1at%^|qTL@tSL@K9_(hhfw>zcllPviw9&9pwiU=W8ho%`|~hg z-H+gi{dIFc_bKNUQ*rFC&URx$!2>oH}>%?CJ9- zP?RY#KI_!!8JSKQK{9RHlp&d@QJ=k2noz3MhaKNto!ANMGp9|THGCFUtkb1p6IPWP z_2@gUPaA%HE0-xlq*1vp9I8<7#FTHf;)P0vtlzJEqdt7c7~@=j{)8$l=dxT!fd^&! zjaZH#M1ccqg|sa5EnKsiupSi@Da}m{4Tx8psim=zT{~dTvd1B}Lp7<)u;C?T24oNoB{McVw7(reO^ZxYc+N z#rIWK9WtiaLtlY8Cx9XK69{M{x~=Wie^?k02KOh}v8oE+i0%2}!CMj!Js^ z>2s?2)mcfAPFlfAJL-sPOh0CNTYMx@){~K-+W&ehtd>qxm3{K@2IYbkq)FvtSN+LQ zR_`S0pIH`l>7`j_y-6uZaSe;rv}bmh-+#Xb+n;&|c2?k=AthLDO5Pp2no(s!c+`_2 zr6dq8{bUv!X^E=(&`HBdT0xLYiuzEbtEyB`qygi)FvIbV1ZAKAFSn3z$C;*Fz{Z8T zR+bHW{IRF}Rm7)!@&fCedT+TU-nxG=c9yRFP1$l@V|qzuyE$8RvwGQT>zvI=xfd(A zYKjYVop#dMEqQo3+2mm)Kh&&tAYF*kOuq4U(Zu}zquHxR)A&zOd|`TOLY4^!WTzmz z?RHA3)d-wvo%%b|+OD(DBH@tl&jn^Q#2ezwL(As+h{p!w6EU& z?H%Sk%h?}A?I)%FxX|kKkLUSwrT>IYS=+h57Lukv)vtDiBUk^(_Molx=`iz~8qGMu zBb0~+QM_|V-b8{h3?)o?+EU-|fPgO#U4}%%BOeN7G`+aR%y|?`*z*3RHxE)LRu}Y8 za%xtU2&S-yD&!cK9#}QT<)$w3`1VlpDim6e*UDh`X_K3{xS%#ga4SY!ig$`}7Td^kljObS*Ez zNg@MW6R@bZ8u)k;Cr;s2egD!}$kl|aa0;QN7!jBer6{qUAQV>AI2k6C7e44>kK~)Qteykc8n4kX=hBN`L9SmYC`wC4Ch%YeU6Oa#T?pnu$!lnbMucXr>cE z++L{iPWrKsl}4K@)FA|R_`rN0o%$F1Y1x;uI9cX^SNG-M$bae{Fh-2yr9HK>w zbV<~zMLJm2i;OTL;)1~?R2Rrvc3z9Qyj=AQE#i>@SH2@T2=+b1?0G^VEwydN@ zta8U$?Z+W1%TDVwV*f#1a&3)Y^XBe|N81D0M6Pl19Y-meHIo>2dDM(7bbGSMv=P<< z*#qZF6-!-bGNcw>@PbIAD_)-XG$Ni$3T#2kow2Blv-H$%Kr`|zGD+*I4T(!p9CTXs zzSb_Q^N8g7_tlFa1%3>)4qT|_Cls}hW`wMW#?po=xthi$yOdi?YU>&e>n(={L5MbA zI9^*#c40hnDRROlQ^jic!zwllCA^>r8jiTem$j%C9r(T>O7&O63Z>)J1H`fnP@?Za z>RUFMIkV)fb^*0-Cn1_-)V5Z@u+s=@f~BuBvF0(X$#GW+BP)yK4R?@|lt~uEv6$g> zRvAK6ZOc)?$NvJ%Cfrqsb#08K7UOC|+XEkV^*Yd&XssbpIi#Kwy-Gp9E6n_TB5wey z6d;oYD@2{zmT-b!qqrA|SJ`CX)PgzvVvEXpS#7!Wdy@ibxy02H+Ipl`Sx%PtQZnSx zy=q~c^Q>7{S3`+e-kdLt#fHuw)?3#asc6X__-zrMj!(xb&O~~!Ssq?Cw*Sj$2yO1h zC%aFrC@tk!{te7ifpYxtyX4H}Z`-RAU45kJH2+%Jm$6OCGF^SV2fnOZPZc9uPcz+Q zvS_zdtd3=Z6RfB#s+rO}DMPp!sXG5L+t~e}#RX=mqLvs_#vZ4OGA?qBgY|Lfo??%& zie$1d75~x%RgaM$jpG4Th18?fcF|BhC)I6))l1ZuM;5H=3WPd+6)`7B>xwr?VYbyv zJjoKDp$HD)c{y?puR^#hleSKHc+#(>_Iv4C~}SE4TlSc(eV5 z`P(mli%?Zc_8IZYLGBvjDXW%SjW9-6TfRyx4{G0{k20+Yq2mlG$n?fj{-b=g8%nU| zOjALMhNj7L!f%arwU-Rzhc~27(OKTLzwz>fPCe<>9`*<&P39^0@nK+z-p8D-scWg+ zzil6On(qvKLu_Kz^c(Y+hec_-GGKbUv%ro`#5iZ$E=@S|GwQ?1y8a@Lg$*U=Ai-XH zIR6Fz)u|hMe}dhk83Aee1-W|AO~m7qlFSRQz5cWhpVToYqPVmMr`5-J91&U(l~a-0 zm(-En-H){(Uf#*a5g8f&)Wz}<-Be`PN?pfL7{yN+pGF)~ko?+*@mgw(*?j+c^k%2%0(m{tEgULS=6D3)D89nvME~+T3r9( z2^y7*%N>+e=$>0-$7eC1Q{-1y1RFfL+eP%9^06QOWZGq&8crCI267ZgWFXM2OBCKj ziwIoUq?rwwOzDVNLQnz_6bwTA+S)kB1T7CP=tDR>9&sdG69U|?)X~ueiDrBp3jakL zGnH6{DNi%7gAyjsBJSVpQN`2nShfz_LUU=?wNrK!b$ z5yf8A+HeU4VCcz?0YwGb#2_{gX*Amp8jd1afFc0T#ARMh;9>JL(q9!Eo;_D5wx2Rh z*7aQ<#HCPqaoo1qUjrG#q0m?-I^pTvSM;IFC<@T~oT9ne8))^1Xa(O+Jm5RtTotL7 z+wF&cXjxl? zZKEF{hcyn)*v(&e6^u2a-A2aVD z+~k4OTrH}FDgI(cNJUhsS6Mhnljs~1MNp&A;qW{o3vuLR!A5bpP-grFZAqet@q-2U z!(S=lN-hqm^qS#ZO+~82=n2kD<=f$1;@LsoO0Hl370vRgQdx)+gIJ(#Ia*G@8)qrm zy6KGjw2QLz1Q$-9;v8j8^r9}Jhk-$a)O1JQ0fuLE5KN$rRjQfOwHS|F&1rniZ}6HN zfuT$c%K2&JHF6wHRpM-!UqXV2IerW!(%;#|hBY>d!rUe4wd6tZq!ivB75b$@+)`hn z(obYgr@2?VP(@Ttrg??OSxpsW;)KtEVZ?w-Wi}q;C1>K{2@dVeF#l%XK-QK;ZH6Q% z2dd;tX^=>*B^-T%-dlPdT6PEk&IMblSpI<}ZkEv<)f#)A3dyv{NCM}`_20Qv(G*G_ zR?H+)iYMOPMou!>QADS7rpVrH1brmYPaNR+;2U)|9tUpKU!a=Mg(tBUL|eLL%oNX{ z?c#f0*Frdo)m(;*Qp9QW=+ZS|VTejv4Cz&p$cvgEY&?%i&S-ILi1P3WMJeb;rlUK` zqqfE5-K~rO#>~tJM1`G7 zAfB0T(P)!aKp+G{^SGw$Js8S?&~6y1ipiPMHII*wB#TYk2>(Sy!Q7^l@?VJtn1f;| zTNn}Y3CoaV)dk)Um`09j!N*P%7*AMWUhW5;RLZtYi}*xSFpVFL6=T!vhP2h-O$Dm1 zf|zyXsxHceH{=-|1}8S^Ca{>F_{|_+p(LX+*L{*0u1cFY73e}FVGTBFrJm)O64WMT zs&VFpdZ<=|xMGJikwJdlsDjl$I?<`Vj81H4w|+!|EGf~sQ>@xrF!j;s&4jHoptxWV zZ{8^1pvrPg51&R-*C`@^>RFHqDcAu~fBt6OkknqCMqGgmC7R$|`e8$0E0Fl-wvMcM z$)YNj7#RJSnygPN(Z++`1h-&Ei8h9HHlLa52bwkzV*hBD_^=O(B#fUa+i%I$ts36b z>0j5qhS@cudZyJ2vP6$ks(3xDe&(mu<|t?++`ld%K_t@NTu8_<%r|nZCAyxZA}z_L zZDWiH1->j@L}-M{7R1P%$u8QumRvyP?9`atye=oxu9}+8X2%qrV#Hx5IEu5Dh>J2B zh6OBpTpy#X1g(C?Kk-w-MlQfqoLMd#)m~WBVHSHN-Pec+-S}Zi&1!8Ltj9cRweBkF zp{?4+?tOi$DDqjSnaQ_ZOS)>|-LmVCO=qfN#muTB&JM4*HWY7lXL4G_b$pqy=o;*T zL|zHn+?2?oLWH9PjP`-Vm(5&G*_jJ+?y+v4TL0Qwz90;o_$D0|TTb~^!yXCi)|#I! z1pe8Y$jUDMN}q@pZSPuPePkn3j9Psh#TSOFrd>;1tSX8Oknwg06e_Sxo&{~mrD1Rt zcKjXWQj0JZt^0YDT@s9MD5O`G)?FlQ{r-_6(r2^+R_6*=`Syq~oy}9Aoo`ye6TCy> zGDJ6GQbX#dXP9k5_=7(1gT6{_{^F zh_#58%zCjV39mY@aw@CxLA|7qejruMPwljozu2h^&Xx1k)iCyP$nCJ3{X?M)0GKmR)}8bu-F*K zV(VsXB@gB?*{;RkT#WcRMIc`YNVd%+UC8)p^PW-b=?!cU_rqkgXh?4>7ArB}U0PIf z#gsrLtyEQE>P)*pCwAZyP1B>7jPgzZ=18jx00;HE)N`Hy(wm)1eg?*g{Y!`VCfZ=s zP)~|%UIaHOv_M1F)dJvQ84A-DgpYVNiDa-2w@M&KPv?T6RxSiT=)+kfhqk7*T9>Hv zmfqVjiI+LCZRF%#KPS#eXHLg!Tr0>aClTz7GvkZ7JTjl}qkhG=7KNao+>Npdy2t-vZwKS1)u$~MUg zXM?R0%2mgBWMPcxPkpg8O))06=rufpncNn4gW_^hHb|%q#Ux$#d4p#4>8NRXl!i!m zLs&psSoc=s-F#;6H=$XA?i4s8Afree1Jy8J1xL~yX^h8qfPOQ-(6}3GG=6hy&PK=% z*){^2PLkEL8Ny9WTL)H{6&7RK0|oX=W}`-MD6@XcaOCP{LN}@qR|$C#NK|-Aumn>` z%}N0xVO39fN7ol>@NW58Fp*O-nYPIGCvzDw_wd(795RCLcz*ZadF`((_4l3BBAFQZ zlGChi7y8>84NWIEJO6fTO25fa>&7>2*)W!$S!#4jScVBE(-sbmUNy2p@6}(`7-#!% z=IIVWRL~zz+|x?qXlF_@hIKk7v10`crhv7czh++Y%E_q`TF?hySYfD2)?SaBJ8d5q z`{klLCKq|A(5%+HRQSQ@3kmL8usq7M#pVuH#ViX*;VvuRM(17JPbc`BY`@! zEp}~iS|1(?eE;YBY|pHp2+^aNy0=tXmEw##**I9m7sBTRVE!QBYI5ZnLmj#72}YEwvd)n*UaIFpOI1c!5YNWai?I@kYO%Ek2OhiNMwBL@pZtcrhRtH6Gwethay#O|+{HP35{+&u5=`S9*g z*}x<~{HrkVU%`R{5hmC-O5!fNRCtmSqN-_aN)O8 z&HjrzmoD41cXV0HOhZa2=wB))JQAYN=lH)sy6$^Gk*h%Z_ zlQQkD)cybSbIFQiQfJit*(8~qGb!V3c)TTVl{B3XM2%eUOt={_KbBi-AX5jBM8mworj7cB!QCLyoRKuW5~ruOiGc!J5kKDznPrjxoE!%kMk> z$g3_S_}nwVg&9M6-`BI_m-2&;kG6U4oYoTSez;jVg6APB9r>cXMwL#{n3Hv(+2 zr=ncYzz;RVa!4yBENjcb2-`Bj%b2>X&6$v*4%T^O&3jQM`HA@8AY|NQF)0h$hAxd>g1UkWz6qh7jdljTf8u3C}8S- z?9ow0>oWL3AE!E&*0GFQYD#f&?MuxWf$a*b`pmP{DhmSyaM^0@Q#{=2J7t0HSS~${ ze)UXCvRpwSAe! zsgM*WHB~#9>}XfJ8!?K3jLMn zxXg)KK09Ah1i`E7K#wP1QOxs5W*xstg>cq;j8hi$v%+o6jUwrx3`=J=&eZ9SRdf#g zq!=J;DKUvq^yDW&2|AZd2QOB1nv3!k6a0vgl^1cP=vG$2QAQAhK-|o0NEbyN`UpsP z0iO$#7$qp>F;{crizvw>A6#8$g)E#>VkRUfM5b&{89H1eCuGA04bT6G*c6FbOr{^5 z$ucT}5eX1F2}*UYQ-89g6x3WZMlynHfVX)`apYMvWa34OLDA(dMY6^Kit``7VPj1g zlRjbMOf4B3k5$N0Mi$!Xg?>~_m2AbZ?lDC>&Ff4W2^v3u{-uOHq##m2+7kCQR5PMV z>3W12roj}Foi(-T&@#CZiX4=Jb>m3Y#8{Dhs%)PG!)NJ6+L|qLGpSuEUhLcmxC063 zDGXg^4*O*|VL8bug2|Ow2054Hh~pLqDhv4FQpr;4@qHrorc4t{O|{@HhELQ4wk4dO$erj8X=S(@lzHl73Vm{8G4=ctvdf*DpMuyQnP;278i+Xfzy|^lfDhec3pGF5soc6Xv(5vQN&mUn-oB@8 z22N0!`z0A=OFEpAbZ2mp9(iKHpn1Q=5BO|rECIxxh7 z*crQ)5W5T`CxNu%kH2bCt<&}Jhc}edNWHF>TGd#~#zvB(;?%o#k=jt*X_Mt(SA)B9 z;|(>^Ok=8d$NKFGTDsi?Q0UteGCJ zl&N&x%*B9?pIzgOfH9JVe8QE!t_2^C2sf&+d1Dkj~!y`+Mw*m`U5s&Gj zNULn?#DLz)m&jxu@SG(v>9Zx1%Q_V(rw6@1}P*R6PYWIBX=X(EbY@uS7&lSMsTKSyft_3c%tv;H-_7(?e;cH_X0r%_z1Nv?+#B-PMVZK*{RU#i_?uy_nZ$@NM zc`%+srNbrC)G{6J_tB9)&+keqmq?|jQdOzq85c_CJE|H_O#Z^Lw0(UYU~8Tj?3EE0`&4 z>o!IebJ@M=+McvMJdFupSvGl_=j@F z=LP;b(1qucRew`+lK1}igFQN1-*@m`^YOVGmw`J|->G{wru+4zr9eN36`Z@b4c$OFqh_ zF|Iu*+5TWttV9*D-?htk3*Q5g*H-l5y%_a;e{$lR)D6zuq?7EQcVaCTQ#%?Q? zO%LzmOMarFh=l-m3fgr<|o986mKjs9*4*d<6>5#DLz6K`>W*o0=6PRJ8%IV24WWoAc@4G<+Otg zAp{q1Z7AI1Ni5{zu+mB7VwCZOC-?INW;c}#M4IE>xm~`ao3?fw|h%bY!CH{q$lyf zvdtzc><%KMiX>1LQYnlsGmvrmy3s!D&@mfv%rePmi0%)KGeci+5aUVAFlk%j2{p9B zV1C2lBys&3Yij~CYOJQC#ZGMbEkpl0|ZuJ&NTa2sF_+}<96D0IeEvhP8 zV4=SvCNN5q0Q}%S=%|ty$(szKh7OH8p5)dvEUg~W)v!XU=;}dXZ(3N8vM98XGV*M4 zqUbbqO=0aL&2j%iXDzbOG$GheYd|t2L8C@M&uaYbB~!^FR&?^pvFRv~(9mNxFJ~fY zMv$iDh3w7bhG-{L5mA9p_X?~kZeb6;!MQYv12Ho~gRTtgV#=;$NpzARLi4Y-6f7B3 zPfQ0!B@<@e=L8XIE!_-igp*BmRn0gJP^+;9hr~vhgZ=i!{9MG9E&`TXLv*~Tb+U+m zB+`F^(BB3%P!*H3d_t{oY+B0md~%dhtqfKMa44`%BmQgKYGD`7H4yxv)vAII0HNs4 zGCQiI>m|bpBCGLX^lE1O#|zM@L3j0G*(q9Il(&-O zX>h~*Wa9sSjP;8+7GUtSG{BTj=SaP56IiKYSdjb$FH6aMBQY{rup2Y301qA6ev(j)o1oLPsKp*r$OT7}!0`{TaHdfD) ze-3dOw#!vn9)RnTaB;Qn@?APFCc(;h-hDT&1X_U&)f~yw9Rx--o!WAT#MG&GaA&7Dp z24hQdZ&ArqR2xGhvM1wVsR+4YET;(a?i*6SuEeuvPPBc+R3?l4jOjCG`J%|58 zWz-@`I4`MD>QDkWwv$lGcqDk_j&)P(Zr4@j>YIu*E`pbD(N4Gwl|LGcK1x&DVvC81 z6Cs$lPR8_cjj?IpYjP+P9T|pc2P9(x_EhUIV%`uxqBW*QXNlN&mHRE%{+M`cS4A*l zeo^CZz*(Zb!W3Q0KkQd~DNsB1ZN-g{#+xVbK3j+``;*rgl7(uu$ z+GFf2cwiHgVSuSEl8Ac7?#k?HjEQKE>gr71!4J`wmDw48+Kh$EnNM2R;Xv|@d5v~s z*`DLM;RMQ;cS%4jhW6eO+M+LmeJVWLjSnyYHJidIwM`YBSwcaWcE#dM?DhXukt~aI z%nko&fIrytb|`#H8Cpj}Ssu8ZQ5uTcOyc_VPW}=kpUOKCC64(iCTuw!B}=CD1Hz)E zoa$)qs^W7Ey1J4%4No%y{GhDLRiVXjovZnj`)-C{bCZ`)qw$D4Mo1|NA~TYZd>v`t zB$}j0!+W|frOn!jk_w*p8FARpU`UC&@^~Z)j*O$0By<{%*-ciO&ssj0cxk$EHrg#} zfvn<=w)8Ws@76VUh>CaAimREEC1fj=#GDE=k1Xo19qchZ32PDitUX(3aCu_;XLe*+ zht~z4HR6KY7K|T`uJig1OR$FzT5@(V0T)CZ`~Wr8c%r*;XyGv0bQ1q*C>IW6k%0G6 z1C4f{a?X5;Vz*UB_&z(j^XEZ3lw^0QMIz`>;@TU>5n5TBCi5eyO0ch$a5vjcf(PS2 z+_2oD^_sWCNwoPK*GI6K$1&`u zaN0M(WxSob(Kl9fV3Cu%JVgDB%B}qw2uiBa(Tp;UGjRv!Ng}A7HA(Ep`!Xg zraHKFrv?);$Xd@JVAG-ojlL){p@L~?VI0C|yv%oeIv3)`DRcj%2#CzZ3ykmAS1AXK zZ-P*B#$}{JK>=?}oNf+{k@?CD@SHkSpWG$vm0KNKvA0U$OKZob%d~1Eo6_=nLR_h(a*>c^FpUM_*iw}90%i8$m?1z zM>x>bN@dOaLC-R_saesp>9$S=zO8noc0#a!U6_jZA5uJsn#rU{chv~Vo11;#jg#Gq z<2$8&l(cisQTuENsM>Wd&w{+$C0vf6v~pnd)Rvonr5OKDDjFd}oY$);`6ineBTOqk z2LGZ>C=xHUu7x3`+!pzL<#n}qZs$8OTdfz|yF*(#+T6`qv^({g;kA!&wpF$(9tC}# zoclbn?E=%UnwT`cEa$^X2_%CQInh8yCc*Sk;yPqUkU zYu@Iyk<{TFmvDYzl5Hb$(!zUd+y}ii&0UEY+rbjOygRG7wFjfy3o}N`J&Y-O7w1ek zYKXj&!@IumrAuO=y@eSo^6A*L-@L9PrLkI)=EZU6b2yfQ0Y2=Jk#oW%Izv_fXWqIncF-_hOiegFSB-Pq2_^mOd_I4+3TR1!s5vh9`8 zFgL$6BD{T=NJq0z))gt7cKOzK$SdDFW4^byGF_^@|BBn>hk>litw(Q+%;BTNw}1cr z+Z0CSwu(aiY-wkfM?EbIUL&g0bpYaBg@FVM8a#+F;lO_k8!CK=uoD?F&mda7NYEid zh8QadDtGW7$bSkob~Gr+0!W1AEYv!Au_4KfCSkU0`Ep>bowa1rW_&rxudFX1bE{)Tc^YHom+%^FX3pCmTPGJh}4a%$qw0dm83p ziIT+*t~t=~JHOn?RCss&cG>BEJ3U)G`EKF9)#*wF9=Bt0jlwr$hdnyCL8m!O*Nh$% zQAO#bl1e6thmi#-twdZ;cHP%eLn#@UQ&gW3B%o$nJ;z~(9)1X7h$6m)phDY4R#tc_ z{$m(I;Hk$JWZZSq;&A|y_0BUA(P-m)30F;rhU|2PGueeijd*-QUrKG{_*`i;nDn{K`dXPnk)36Wl%oV8+jcMjCu znCtabUX97o8I+9XA?FZ~@g;cSP`dePQD$fsB%-im9ky4rf!c{!rDWu6vElw)}mrDd3ofCj4EV1?

    _NDWyw7eR|-yr&sc3MHFUcws|L-L9=oU=F=v~Rm9wd@qh1s{!c(n>$K zGC?O!#4)PEcBB`@(LRH9)>=<&bzaPPS3t6&TkB_zXA)+o;`O=0BA=(BbRBvS> zUt}`#vxjT1VAPH~lyCpDp_?VoHAz7KOA`7b)Di_DuPWDh)P+JbA(ud9b_=qe1uuv} zg2l`~J}D7<0EIQ`6zqV~G0#Mp*1ojO z!kTjsgf>Dk#D!2Y8cJl?JJ~R1i_uA!LT-{Rp43Kf2YblOS{1{WjIkg>w2Hgl<-o&1 z(M(XGjZ4l%$U|0fl9a5Zajxi>Fg}EPgaYGt_VUR+0c?;4d736!x3@H^kR!V|B!t7AQX)K`*BwL+I|m6d*rt=iHL zUBc|{0IQ{qdc<%wd!?vK#Z*BWSRq&GNdl4bQr&HexdZWs zKVaFf3DGyhRIYMbOZ>e|&grcW;R|)v7uGgmxlU|tPM*fd-uYE3v-%V;J^4DW&+OzT zKm3%LGGZW7odkzQ#oLhQTb0Lprhx;DO-%I}D@WSZ%8YKbn-tlxkk$#sEwa;y#!Tj8 znHkcMZVPF^<-guexxRBVuwRj)+>c~PIB-SNs27>%uM~GIO>}JX-bd(7;VqxL^~lUD z?C4+%+lsG4uK8B1w~Kfglq3CUk520%vZeFPC{3!WDYz1vKD(uV=15d|smC7kkRks> zf-jL57sNEN_gl(cU@XrJ5djs)Zo3_}_vYJ+snb|6wwcOP2WIN-i#*UknuJ*CZ5}L1d<8w3`s#&e59q+BawqLSU5JL40 z^O&z!$5BWmq)V~XnyEzDI8G6SH#RdaHT-||^ZyTko!3dxLN-vdK1r5rCZ=hjq;TBlS*w>~ zCF<5=+ zhg&Jt9Vb?12>3o10Wk#UceCeN4s&;Sw=nmIK{jNBO~)UOA~>wJb`1Xo68fMIF{Lws z04BoaP90HEH+C~9NND%bKl2nDv!W_yCxd7Bd{cKe#dkT&LNRtSgh|#gk}(~u=V1;Q zghID;c~^tfQ%e;na!S=xAOR985l~1-D2~Dp62=uq3j#GDU@=%Zp zBctOTgZ6|4!C@j)0kK#)Kv;9pQgAgWfs-?1Y-nx$k|vTeX8`}^T=erKtQ25E#%qif zJp*w}UYLp4v1)N7D+7TGW$0kr2$9^#NphHi4UsIzVpf15gu|Cx2cBxlN zAY_N6rbyDL9=8xVW7Ulo!5?%sS<#eLe`r{p!6kmPDg|+!1fXHX#)}Kq zEpa1n{3MA(gk_n*8JzMD{m>7+SAK*C5+`_neF;^;NJxSJ1oNgK4M`d#$CQaFicDvb z=fNT;f*93tj>~c}hf_SIl{2$Qi_CI5N+x;EQkD;(G`as{eGigcSCV|Yb%;~faDb#Z z5atc>M~RNI5>>&9LU|cS(I00b2*zMt#PK8srI^DBbFbku;6YNBW{w3&5e2w&arF}- zRfiR@EOBTV?D&9sD3-YRE|w`11n>ZEc2Z~=oq9EaK=Wxmb(mgJn%pFpk;sJp5Dw_D zjQBw`e-w?p_;t3UV?v`E$yYQJ#(2a@ppB*_G9eJ&(M1(N5f?)pUZxmsXht*WCnIGn z&8dA6qM0xeI@XDL9ch6L!3W_fVI$N!Ckk+gR1={=AeLuP+mfH%h$K3{eqm)d~NgBW(YtX;FC|Gm@eBg+Aob9egs5F@c?1 ziJ2fmbo8hcx9|Xh0AVHy90&SVqjy!+=t%oSlWA&86zUrN~jXqnA%|&OEF$;m`OO8gZg8h5z!rsVO}4zrRMl~CrL{JcpGAB0T8AS z17nAw_~aSBk?E{oRSk`czzXTCXQFA!J2O> zsUnp+5FoTHeQ}*>+I@lHgX$=HYT++91Tv&6I2;a{Xqf*etoa&j#5x!iVIIH{j@}WKe*r9|xj}YU z9bP#WfO060hI`TZ8n?g)f`AKB=nZ?-3O48%E`BNZ8<8G6)r zxkQ`6hl$`PS^A=*)+Ip};G226vihpCUbUy;0Xi4LH8dijFNJ{JrI8GV78>cHw>XNT zWe~SO0JY!^7F!P0l`HpgTGZla@c~Et!4cGG6_HAgE0m*mrK^j^Joi~ae)*DDw=c3W zT_0G6K;Q+GP-Hucw^UY3FN=iU^%p`brE#dRPJ^gWdZ|uBnQ>yR_feLB5Drop3opT~ zSEjKTs~eKy6RajPlz~G^VO;3iL5%;DXaV|a@0u?oVXzjMI9Jg){_uzurWwZIvUy9p z*+oO%=(NXbLKthKIQt}JFiBi@#V&uxqset|MRX*-n6tGjzm;@PxJyT-2Q{e3 zyOH55FVeKuA`#U23`~lp-=V2as-0ryET%$n1;Gcc@D1Mp3xFUF;s6fcK)Kgh7L}3} zn5(8@!m7goa_kCwi%4z*f{z*^M+3AVXOg`esJfzoP)Xvx=3x&Y%zBKr;_DxYMS=Th}oG+p#%YsHzS#hACD9+2S$P7$6w zfeRN~#i)11mdtpY%!+H=8}&zfjT2)-MG~mSx^pa^ZI&OaJjAJdlYbgt_KLHztj-wL zw*R>C?iE3Q<@j1=(`!btR=gwAK3|tKok;75d9FU5fw`eTZP|A*_G?R}zMr{T?+QhVmZ#sm&L^EwKdGa1$z$ zus4guQc`gEoS5o{)GExbGk%_7DDQA22Hj+ON6F9$&B(`iYS^Crvl=Lz#UwFZmv>MX zcQ8zizN_JqNPDM`Q#4I+S)Yi~VZB|Tqe&-{j&I1BD5@wrQZp9;xy|s{$ zMQ`nXouZ%4;&HWP2tzYi9QwTvMRLCX$2&a8SuNY#( z`$Wg%ZQ}GhNWH?^g1te8Oc6e)K^ryS+L57hffF}Tmg2BZ5CXLizz5>+PDTL;(JQg9 zX1&+jMw9;{Da8%a)%_%1v8%kH8nS_88GbUZ))Uq|P&OHOL)u%96hU5o;$v=2B)N|2 z;%_-}9_=kKXjV_Lco#RW5@osLpW)vapc(!>9I+C;Kwbd|9^OYz*fNyO$kw*(mTic6 zWWU-|bMs@dwI=l!qt@2lBi&PFsNiI->CQJUspZZuj>O}Mhf=}T4GU6)sW<C=8;Y%W%$$Ccl+=5zgyNX`&_0UlJ^5Ok)e3lo~NW6)r$7q1B2Bm%pLD z(`NzhXc{txIPimU=dfN7)71|IP5^zddjp~0Y_{viJv$OdrT!iu7uV58lr8l;a&b{u zQ1Z1qRF4l{l+X^^3lH>CRNlv8?GO(Z6EEt#ZiDvC>TDuEeYasaN{mvp723W_bthA-m{KmQS@SD z=AV=~3VRn!cpd)D2Y`@jB=;17um^r%mi-{bmZIMr&dULEKMrlFJ1FnL?iC&V)D8c< zy@-~pl0EJvZ+sE=MrtBu^Jt<0EPk0xE(^l8+oRj+2P)vn8TRIBcM4f|+om{D~;qiU5a*2Rps z{v&%UxIhcLynil{eAiv9Tm12e z4-tw5EqUc-er`FJ^DJm0ZU6%ma6keJH1I$KIdTuCcb@5Fy_?2^4XFg*JFcdwW@B$F z*ANoNE0q9=z^sn;!Tto{?kFwj;KsV%l@oUTGDtF-b;EVI;7DG6J8Mo8LrTq(#37rco#4YM=zw|{OS zU@URsl=GHbyjl{aT~L&BPqzFhE5__x+N&e`R?0C)oA!E$x;qUr3d}&og9tkvG5V*` z^DMH!p(#7^Zz9g1+;UV>OEvXWR9RcIrP!PblcokCByd%l(3BNWS|MtY02NcD^UXmU zD#uSh7M)Cx2$qYKQkR;fuG7ER-08>3=9)>dLx+oKQy(8HR3az+v(Z2EQZ@HnbkkM$ zTtp|ND$FibBD26;WkM5DLkr6FA3j+6Dx=Es{I@;}9{6*}Wu5=zR#;(v<#H>BCF~ z@{>7D35!5amTFs>x^ka&SU53Z!t2}6J#~X2CsA_$ITPo{` zd)9Qiu04a6Y|Jy)d~>nQ)(P!~rh5CdxNkD5H=%$-N#~S!F6Tr*YTS&oq49ocJhYt^ zs8K}QrK!AQ2ahl@$BXAkW3k>kRw2tR=lpr-qnG~NmOuY*Iwc23FOBV&8f;4YqDxn3 zZk70O(~q~hqeviSW5WIVYD<(4+i4T0wkK?hm+@*P4O2d&{fO&w(<5L34S2xRRPSuQ z^U{M>!##I7aCo#LK;PuHA`2R%I5m0SOLPY?`sGAX4xyOK{vy7P*e@iB(})1Yg+Lg_ zP=+%s69ZWyI@LhXOH{ed0#{Y1l(?x*8N6Etb0Z_Pfr~N0dQbh#^R%G&g>mp>qCar4 zkwW!HBwnLo7rppJFmg$Ujsjm4ZD=nZmhp(Idea5}MLvNDae)a_iN@wLm@~#?WQPk4 z@fwG=|Mf#3v8YJ$M7G5+CQ^}$Bv~Fu<42m{kTm~eWXZjJ;y8|&uOV_YNCn5zKE)vs zeg^rEU-sfiz&#N?kHn-NE8-7+gkw={0okXx$Vgn~QkMxr?fGGKtUp7R6P&sEx zPM0Qjitk@;GNPM62~9xtC2<2$B2IuO5Je3zLC7X0)alc_JG%rjDB| zIh_R2DZaavrA}VimHCWlJvy?dP+9q)b`l3gQh~-lg}miLUHVd(uCbv$+>lIoxK8V( zQ(kHGrJ3qPr(5i4n;|V+DH+mF@~kqO2i5-));d$rm_}8qQ-xbivRAPSO;nv^z+XGD zM-`j(v2pYg<&Xd~&9Jnzj@CMlTCoHYk%@|`bET_Py(&rf>2#-Im8f1j;vlf5#;X?% zX_15%G@0yipQsXyKKY! zfyCA(TAeLz0fbv$*7Rb4DQ&k{^3UBdm4l#FZh6gn-U{%>vp6yu#BA%R9!ib8@I@UA z+xy$FbZW7AJ1JY?g3ILI2i@rI_Jlk!W(@DkH2D$gu;*zQHV1rR3}-ke zov5sVC$*ByHblC&qpmxrvR(vQj3{S3s6V{XkK?7bq#CAijgvcH0~3#;6oM~bJA`A( zDwuOCUfdtU#S#331yen;(tBZgv---deJMn;c&?2U^ghD)DWbs8{CBc_-d$ ziXxHeN>3?uWTC|Fpe=oAlV$msQdHeb)H^yhbA+-!j;fxcRISrm7SpR{b><3P0S-&e z%?(9URXi7J5ucdUQpHq2?_}*^khWUb!qlRDSF?%`-XwV9*0mS8fiByOhbJHa03rDV z1quKa04%`+HUj|w0RR991OO`p1OOrV1O*BJ6#y&%0002O12+Qz2>$>p2pmYTpuvL( z6DnNDu%W|;5F<*QNU@^Div$0Ot3cq^zke7*iX2I@q{)*JKb8|HNRWkEAW_PkNwcQS zn>cgo+{v@2&!0ephSXSrk2pRRJjN8tw5ijQDl1qJRBL6eS{BMJeaf|~*RNp1iXBU~ zY}unm1@gJY_M_RiaAm4oIknu%fmh23avQg=-@kwZ3m#0kEFV66Z&moWmT%$5epS|z zD(J333m~a(h0M9L=g*)+iw4Svz+#X`K{nnzx;4*#cUxLUh*@A()~<8w-p#wW@33n- zeoU>AfH>U0lV7YmJ7KN80`)ppPQAMI>)5ZSHLZH%;li45RmTzdG0?db%6?7YZ1QuxEfe1EqAAKCfbYFsk>1Wqu|3!q^fc$MH z9dZzM=;4PTj`pBYMxlmSh-K*q+hkKw<&stqNw*<;*LD_nBaqol_(Ga zI{Mg?ge$p3m0ftXRAfQtRe+;Hli`LTSRXyp$vZ}N>E)N~5h+lR#|`-;m~z3EWQ$B5 zR9R(b3UpO;S0Xg$K{Gz3;xlAuNd}sL1}bQ7V$Qb~cnuDUQ%h_%M;mO=ITWCrd+9i- zn-wE*hFP}= zp*P#C3SCN&wrWyl*T@EKT#%LoGxQFpBdG|is4Z7*b-8GDR3d%iJO~%BaAd64nO#ziv+-cB5&9=yvQp87--uL!&amnUeHYiOh~8jM#oNi@{@WA zP)q#NK)vV@ib3QM8x7c!y)cLmet3Wli$@o&sE8_8si8Q`$SpMTP)9zDARve4OHD#; zC`8Or96!m-W_o3OS47`lva**Nxx{wD)6puMVoJnmjjec4pxjhzdlzxz80WbN! z{cMwC$vnx)1oNw9I`f|RJx8F-=0>>@L*-EC5b8eB_mR-*;uTXCNO-wIn;IkoE+ zeW4q%B)PG*OF^#=U#k=UMj%?VT`-cSf<+5>q*@RZ>Q;M*M$F=gx);u|j@c734PWW6 z9p>zZ^Gnwvc@MD>xmQxC8P;1~C2mx)03deZhX|}H${$g%TQS&Hy18?%{sHrj$4utj z?9q7h9Z&=PgOw6`%>OYd<;!`1dE}j>j}I3T1P0Qoq#IJXD5Z%24QIG2y+lAOGWJ%3 zHT$H_mif_;{tRZ?%$IQ7rXT%amXsD!XE}9x&L#}*NzP*z46wLAio}2yF>sh$bZsNe z1;pVj;D<_Okf>p%F>nnlTqH;O*TCjVZB|M!e|RG+{?LcVP^+@qO()I>?xCnl0O0ZEJ2nj<)#h zq(vb;Mr-OxmAmFQZxeZYZ^SSuUx=%X&32}{KPxx=O>QG)3{(XxHD92H`m=Kk_>Pii z{2otJS+YI;5j?A;ALoZp{E*m)JSpAF1o@d>fu7fwdQH84ZK4^P(~K(H?e>ipKX$M_ z-m<(#d;j}Lm}T4T-mAq=|6NNKmRha!chy4MW_a4^=^rHTycY3D%qAq8uBvjgR||)M z9yJ6bmlG95f@{wuKB3n{@{&BcqJ9!MQUmjLC&4%`LM;__P30k8s1!dg$6GPgM*Ia4 zx^+mj6?-FQSwM9-XXSmrBpFb(Js=27&@m|r^hB+cRf#u&KIlw$V}T|iZ~pOn%J)xt z#eRq7Z$hRrl(t_&cu{9XGc}TZbW$GL(k^7gaABq-?Q

    6(zA1Wr0uu{V;5VWfMyH zgKQWqPcu4SQ9^BYT6j@^Zw4tYCvYzZW?D9cT44dkQh5LOX5J58-fwGzc#LAUYt`hMss_jM5w_b$>)=EBIAKa75D5Ix54H0j5m*v$_$QtCiOzUeP7*Lo zn2PVW6?*t`As9;K)nB!dV$~v6*4Q!75&?|@2m}Ci@N;zhu|8-x5d9Df74m*ch7zFC zJkNNKq2+ASNM!oxY1q_{!h<%%^iOVP5^y4jw6$kjA$fIpiy^jW!E-hRnN9PDjA?c= z=#z#g@rL-=k)btb(g-KqbtTnUSh(>n4mC(CrBOB^9ePHEvlWi>R{<)BVr2v)dH<4( z3voen(k)n#W?2zV9yyd6lKA2F^vu(U6|J-wlsA(f_hL{9y*B< zwSfUIc03sHQi)knv7itAuoSX}BXFXQCK!+4VI%K?4!?32LKm65={H%jC1zn&1p{iV zb!0pREq75)6}5_?c@hw0m3o#hyd)j95D4GUns^~rM&**37-}$bCs;HYvHxHJ`cMp% z00e^6o9bybZ%COmVVOz!d25t2xHMQUAvrK5kePQAE~#)R240NVPc>x$RymlvgeU=$ zLh@sDjDidL;0+bv4Y+W58W|L8xSkkF9e48=B^e`{VNfmwRZip|rxcXC;zuYEApR*O z4X{F_ApMH13(9L~0fs2^i3pWaop5D_Ct$ zR)pcSGiB9fG9eu`c~(8RA&P~N*+L{D`9v21rXN9AZ(&jIuTd1b+ZmK&ZceWEE(7Bhj*=g3zMCb47u=T}%Q=_vvdXm7LWC9pI#fof@gK zS{N6aCTr=BpH-=jfgZkBN~C5j&jDRHVWln@0X?{Bt_n7W$0p!bQH^>LxD=H@DiX~T ztFwBoe=(Uf5fuPL5+JFh*yLQY(JYfzQ;GK~JkfT1ia`1DL}Rr_R@pxcnWBZ-og>jJ znRBbyy01(05$%N%AoC_zhY@aS9^TU&PEr9oI&2PCJ)zVS(y^i|H-}78g^rOLX1RZc zY7_4u3=(pU{2H>NQLSmRT5ZCCC6R<+xtp8?9sO`@aW-?)N)uU8lj8q?l(ey;|6m*_ z`WLsLYO7{@o29QJTeL!fuO<<&ZQ@8UN2xd)YPZrX)rkwC=r33m6ic$M%*PNhiWrfD zK+X9VG!!7>V2*xz5EW>&YI{2%0f!f%wA*tP`bQBRDk(p2ULrK}Q<7x(gPMOA(cOnTw%-hq{WNBR&x+ zPNXzK_F7_v6pNi2)iH- zti}hs;6vsc!G9rA0@Sp40ZL`!d&4)1H5FIJ2a3q9H)7>ap7WOGLC+5 zU=U2hR16mE1xIuDzKjTAHBqp+a!v7aX7o}Nh=OOw`Z?hEU-%msXVn%gsuEi?7Ki_dq?4f-Qt<%OgGY2c$xPuE zqM}rlF}HfWf=7vp5oo|%(Y!G7037!akfgr_3>mg4CRouCFUG`_9LqC7VNFv3Qv)DB zdc^WqB^Md1!ntrPJQL5s2jC?Rq`bU*5msUKlaEm&Zd?no@C~mJ$Ff|_JRxBN@vjZh zS6mgaya;$A;cvsHGYtF>C9ETE(GUK>UADIvuxu8!00?cN4;HYRPYlY|oX;46z7#Pl zm!z}{##wnh64m(5V*xE$U{1@1kNEq+{53m+- z9LyrE&+2=Bh$26~dt=j~#U)Xxg`vQTRLU`3j1Bi{LE=RZASbS}xV2Uz4D7ygT-0P; z655)S;ITF{3yej@$GxE!1VGAAG#ug(B&Bf}Yq1N|o3VXS4vX7Qdr=<^MAT&+%m1Lw zG*V6Eqf~I2(&_<-q%6;f)M{Pxz@huU8#@?>)HUZ3*9=J5g+0l&{1CN+gBKmp3pdd5 z^}1x_YR8ecsO5ClcEX%7n1^8j-}_W!9ooDt5t~#nS0cQeg*U>@9uKRV4%KElh0lRe zpQvG{y&c^aam8x66;230PQ7Jz3EGcwpGE(OK(fKys!XD*CEezI5uK=(K_e~F2Qa_I z-GpIHjba_jz1HV_-w1KV1c9`D3}-0~mvKrUf{Yylj^74u5%#!~w{mXw=S1**;1K?3 z3|ZXxg3@js;TWED!k66}!`B)9;XfGM1M7=z{oo*e;>op5JAGrj(sCxQOfQb&GENvr zUE#9#5K4U>8N3@{BTzH`;|Vgk{XKd!l^MEYycb>=p`>X^4&+YW8ClXN`!(E8Vn^lY zU8k)ZOjLyJ{p4P*974*^|Au2k6&0csdeP_DqOs*^)a76P=31_%mzj9~3{#ilL%Jm= zF)kDV#FCr3=WrhAJwY&HzK*?|5gz|`e{u^>ZocMoc^M(L_s`!JD#)Zp-4!2~IQh zeoboG~Y>!o*^{(_2>=m?M5G@6gCkb5B0pc z?ILfmI2n|hV>G$PxeYt)O`Mgs#kXEYXR4hYo5(3;ulB?@^{j4wQDz$xc^@6?&-Z~Zxy>Wxf^WWVarUH*oU~;dL>DwX^f;0(7E>I&>+aL?CIK5K+JZ}nLgB#~DUY6+Z>4)9{u^$=?Ppy9{SNz(Ys2Z=Y&RG-!=6&{5@Jms z7rgAik?0V=)1 zG~ln#h>a^8zqcN~Kmbvp*1rk`E0nWPj-Ww?4IMs&7*XOxinS~pj95$1zl9JHDx9d1 zBSn%WO;Y^#?p?={99@nq_^#74nkQ}E#F`CEr%jmHYHjvs>E6Yzc&1MwxL(BH(eH_NrueAt5*whRQc|i%uZ_C z{skOZ@LGCMrUJot zWIDYzxN_t(maYFg$$h3;uHVnSeg6g?T=;O}#f`_5O>wHA)mAHXZpiWS^K#RnJDhqP zVp-1&JM$KpUE=q}Dl5#bp!Xd^xiWL3kL~)kdh^=t-^ZU{|9<}ck=|a3BC#yn$hp#1 z6YIHJrZeb3h7jsbJMto$aJ$VYJZ&(Q-hvOJ`CNO-KM_YHu|yM3L{YJ4I$4h-ks!lJ zppB|~P7nsWy3nA|@~iNn9&wD&pdZ^>NTv#4BdNX=mt?X@C!d5e%JCMwNFk1xqmHB@ zhqRB&f)L8^p&fk;6Rg|j15ipe*JQIzH{UEQNQXYMh{nt=0x()wMMHgkXQAgnna=idS+enbirh_og2CX`4%=-kLvrjS0L~ANdeZp(6Qy*ou zRaakyH8Gla%Z;{^e)M!umMjyVTT`vILf_Tn<&*uPvY(?%3iXNtAxwqv_FCi8o8iyOWuj$ zh*xI0WtW57&^ApsOcN*O&iW2h-YhzeJ3$P-ujG>dloREbk48FarG4Y5)@re$SKSJp z(MkVMm@d89uRr&-kt&4k6Z-371=e+Gv(H95?WSC+b|#XjUe9fwyt3@F%*L{mY(Bdk zsq25Z(mTJ5rzG^;gzr^5am5!`T+*m*N)#x9%EHV>lB3eBJFzSR)JBVb2GqZDPd@jb zpEqJWb=6l_Jh#=Jj_4{ScapB4OAj_ZLA3_7RNJ2~>mBjnMi)Cg$Nt`mJJy$HzWJ45 zpXfE=W9RkK>hk>vBZCLT>`rZKbWX%_w<>xwUM;8GdG*(4pXF((N6AhbZyY>f<`la5 zE|<9f^B@B6oE~t97ARAZB`RXEob3YGJ_a_>flrd1vZC`3=}p963G~rwhBcJ|{Vo4s z1X_qjaG{aAL5M`a;#1((0wa$ZBz%v0%L8RN!-mE1BpgwVre^iNh>*{MA=8tUo)fTc zU9Te+2t**zK@c0gP(&!)lnU>HkUnu`fhhWiIBrpaTlk8GUj$>LY;}?ys%9dsd7T6y zk{Lsd>3+<)NE|hC8R;-55d83m0-Y!#g=NH82;AINl&2_;h@%1o-~%5B(8Vw|(vggk zS6^C^l6jQ`dK0;nrm$tQi^#}hRymzkTsRV|?2(T^E0|r7*TM;55KfBZgCWI1fPWA` z99Mi~E_HbyNnO$*Y;%n-fsznZMv)~XoKX=IVmCPA>1P}%B`(2LOc{;DGXMYL%xFxx zrY+vmAh+lyAH29ra+dQrP0JxgWM-SB*r;n2xe(}{W4cmSu9EG94h}qq{B?Fz>#y^ z1La!Fve~TMNv=)el1n}EJ4Ma3AhQGnm9D7}n%9b|Y$GCVzrd^o$&FNeD*tD4LYv`pVe3|Uy(0Ts5_Ro_oZA}AktgI^=i*Q3bnzbAfVfYZa+#ir*x zR6~jt?e@`<Dq0+RprI7)pSB)z;~fhTl`QMV8$$!Y;kgkWIF2qZC; z^?YJ!xw$f(Sh0bZk|`l|mm)!REG{jSF;mOG34#ET zNr-8YLft)=I&HP07mOofWr`{VTV5W|hPfQD{&B_3kdBIT=t&7J`=_QZsRbZz@d|%f z;1+$zj6oit>4lBa)X!d>yi`449Qqk7gf^L>$HQo?EXb}{VwFfHV*y17`cW5LkuV~l zWPJ#P0R4!Cwsw){qVN#f_eL8qCE3`%)ax&=*62Z+XxaacCA3vIVD7o*4%ofmw!S)b6E*eojr&}Hkc$-N3s^3`v zk&ggEAY#!`KD;8{2BCLRQevb4*%0JMe-~aHOYJNx2H;#V3vUv#;a+|2w+BCLp}1mG zi+Cd|^%Y0>M17Pq$)&6nM0(nvMc|=RFHPn~Ov)|mtHe4OhJqcKIDwHp8K!%E(4Vz^b-Q$Y!10e`FmqYcFG-N@& z8h!f1YF3bk$cD~Ca^Y;vg=FIyn5Z4=*$lh;9>z0}$uO@6@B`UqvY7YARLjVVUbjWpT}d7paCK*`3euJk%vet%b~lp3!f^qH46|Y=mLa8 z!hrM32}Bye)tDt;K{X*XL;l#1GL(o7S-k%)`JT)nhym0L&+`rq5-Q)*3C!Cikq`)N zF(HevvqZweU63c704H$znF!E`fq=6h!3()L!$>@*YGaL7)1UK5BmSs8-C-sM2|&B( zAheo7pm3Z^+(VCXK~&TTJ0ra1`a-ej!dq~T&LAwc=r2jcMdNTBBw521gGBte5$N!o zP{J_WV~LwtuLJyvrb9bD{3X467hMaTDr6#CyS0+Ig$LM*St7i#=_ZJ1!(0?c)OZ}( zLnm$Xk4(8SAM&H9!XQZnHDvSJZNke?x3R8P##I^ou2r#XW|14u%=zW zfP(T1>>J01Bn<9K3Aih{d6BaGxElZRfi*FiLM-8$tTBldSqUrQjrzbpaJ0775wq&J z2=a3x46})BRKN$ofG#p5KG=n2(?^HY$;6TkxiE=~DiO=L!tJ{fbQF{CIKh2M5EhJ% zXYc|bxG|=@35HA^kODU<$_i!l2?FRwm#YQCi>1FI0GHg!vMdY$$;sZh5jg}Ng3Cyi z>YP0R9WT;7W)eRdaz&Eq6@Uo}aJoHryCsWgxU@9O`n$`C07*9SmQG9<|Cyu6vLJrk zlm5{Zi|CLlAxy;)6j1uEpy);!46KSnFB@5;!ZgjB&_9UE%zs;u%5h2P_?(y{F2FfP zk0c8V;I;pgO-DjAv$!XqxP|}W`-gt$hjB4H%gM%jIn2|9zHl@OrQ<(eqbjmA7nm@{ z3?#u#u_AE^K7o@8soXlHb{mQPfRm$)GR+$jGci-g<0jo?GDA{vh>is37Vb7+M<&Y+$E4=FQLdr zKu9ET;0JKng9?ZRepm?ijGmL*&?cR{{96coyuKvt5256}ca$?B`pu2%h|M{!t|~|j zST{PPqT_;){Mim|Q3U@)t48?LO!6EG0*C-W5QlOI2Ym>K!K;Nl3Qjj|QVpa^VCpXS zUPK!-b! z)8|vtKULLdOp=*MiC-)^OPoZ;ghIQTP0s*0jtDV%x-lTds;-LEz#6VaLm1Ms0AuJh zCNj##Lr5a~QLK7DYV*@oJ+X_SHbZ3+UO_>LFs0xDMIf3SK}blgz!VDrQaFp$Ijx0+ z3`}qNqp09RPT5qo69hk)zu^-Eg<3wuEJCQ6R%(4HGy)Hi3qcX7otaY(zqn2zF$;f5 z2u0c|7W#+ttFQkEz#y%{DxN_gzsMg>5yoXzJB=VLE^5Vm1zCu|N6CsvYX#WoBUF3A zRxF}blXxL;yVa{8rWj=oKk%#061Z-u)P#zFup}V})Edkgh)kK+v=aodt0k)thlEVk zHB=)aEQk?pSzI)+fB=V9P0s9OR0bez<^My1tlxUO3f<1RYd5Z zB6oZc)DeV->!77Yx;h)a7wu7=z>TV1PQg{(z{t+Sl{A%YOK*9#yTO24+J!Al*Wkn| zW|dnf-B^_w8F3?S%>j@8kRLx>01!fVtC5QPhM`PbEz-jgVz06LusqSZybCY=4S zc`HahI0sy)R}83t$IXm+LnI}8r@Ru-%Td<6OVH@EI&`sG+90s$#oyn6L!4NuDRNDX zB$*@nu&A}zpNWNjxCJq_0LMin!rBEsSYJb1Sf#}rkif(1O5L8&3xfcWTg=}N{)|MM zp6&yl){F=Q0wN0-U_PKCTnL9i$czV=H`L22Swb?9=uChS7-1|b&Slj9`kUgB9JRQO z)4kf01Ys1*;3feaP&ElsS}D*tF3ebACyanyfV0}=1I}5psLhTXKB{f0BbDGFC;l(W zT4MjI^^A^#393wD|3bTkBUa_?#Ee;$LXnCAc3VLRhi~wQEcQF(sa+XtKn?0lL`$xY zeKuh|aXTujrs!AYKagO&o|^F~L}? z2npg|<_Yd07`C(!A^M>UDhC&h1;09nSJ)ygCXfe+g;?+dMnX`n>{hwG65lyl!{WmO zoTIdS5BVsF`NZVZJS_e5$-;m@Y`zPvC}qL`GGRf~iWul;q$0+xFT;xk@y#l~>O%h{ ztJhnYP(O&lkW%Nf2*sp5ph$M8mf}kxu~np?j~kNaGkj8ian^SF8#;6pWe`TjssZs5@8_h7k^mwZDM2u z`RJ6EA?MJDv9LR)z>7f%ClnY()ss4+7a#x$yM94MK=8^W!P77X>spbC*ZYL|T zo1Er}YpPzgu#^$=H1h(KS*ifKPAI}MsxS3U(O@EDAOkOe*OE3Y7BY*=!fm7g+-aNb z^pI_&iP^*GTOxwGQvOFXZVSyIz~wwx&l$ySu>kVr>kaz{sYOQu;92rCM$+aJ=>USg z@EhMYr(gDmjEgs_NK3A*jI2FxoT|v%MvHC5Jk4#02c{(x?G`&z zTM#1vKzOTLr6UTl!%IV91TQBVd=77z&`x^Y#qeNi6=rjC#>7}^$?CQebcq)-nxmLZ z_r{92y2gcV2t^{0L~?6L6(_NclAqtpNXc>)7&6=5$f> z@m`ivG^@3-*=?C0?VjhzO7fxzjg(TW{|yQ0W{6ozNaba5f$OmRa-}&>n5rHY1Uc4f zLTiVzwMCDNM>`Bub7MDe9+zmkw;^w)xQs_b9tLr5o`K9fCkn_k91u4NC_k-T5Obbx zT^HdO&W*=?qMkNym`1jpPSW*~)N}@NoC;@%$YF}U1s={&A&^2xK5q&y^OU;rTAS!T zTn6qesdl|24J<=#AcGNwrOlP&%>qhlMh|wiQMh;AB-V)bW6w59zytl z@@oOPkPbTNps4l)VO|5pc$8Hac6x^yZLi%i6`|~-M`k54S&?Lexx*d#{+6K$vYp_p)8>d;I_x$idj z(%zlZ$#4%}O)mo}G9PYcl6Ngpp_vC1tw}ZoTcdO?t`66wD@pIGCq-F(B*bQn^tQZs z-|a^Yb<`2@gkFkG0^8v}pXoT!iBRo`!0KzRh`p?)U(Tf#+G+gQcpy&rh~fIgz_7VI zi;g(aWKX5YE*5%kcTipY)%$q+*?eX16c52#otROtL7xB5e-?xPsLlrq!-l>dKTV29 zi~X%b?F_E*Ql6NuZ>69NCSN^~r_RTZTji9dKj{pFX!ZLd1_MF}{2EFQvwJot~|%Y4aw|oH}>%Ecoo% z%br4q621AXlgpw?aas@=bfTbg|3U^uSg@QRLA5MMg-KBB%UTeF9_+f5pxB^My>|Rp zu7ZWA9=AGWD_3Bx3Tz9KA%k;kg>wD8WzE|3>Dm9piWhrqS}-l+i_gdaMQqV>=u0R*FK(MsUZk5W|b;PwEU%r*C60l|C zijn(^1=np;)}yCW@0b^L%G%Rq{seuoWon|htCKH3{N-o!>Xo9d(2{+`PGpi&*Lr&| z)`AvTQM|NcJ?F1VHucMevnnfU{+KL zbsv@&)tQrP4I(7SEdu$00jqov@M?*G%DNz1aQfpsRRHRc~dX}RQ1^QN8o+<@cLf;`9r$(oWSn8K3 zakXs)sj^$Fp6@)f;y@Y#7gnt1>Fd|1_!?a7OUEXAaGIzooY-Umua{rH+2)o~aR@a@ z(2s1D#ch(6VoEJroSIuEKSuG39ZB);%GPE9;zsG;E~B!_)YYFW!sfLqqg%P=b~)(i!w+;7V4(W%y;tP*<7LN)cKGQR zg}(aj|4FexZ!vmU^UBBBF!4tFi$(PHEU9Sq137yRHF2Fe7>qOg)C zd80_4n6??*$fQL`>(w%UD4$^=E1TY|5QoOckV&k~hi-P$HhIpesKTKxs~diTV7Up!%|~gEI6}y_^VE;ABg4ijy_<69*uy zbv&H;PB9OW%O$jh!6k+fe?t8$h3U=kzNI!$&pqjRVgj0=|;M0)q_23 zU1L3KO|U}IMI!%ctq{#tF2zANVgi+;FXhZ_%0U1=AY>O0$RQuT6aiAAl{5m;YDZea zRiSQXdW9ugOLY3!X)+dZDiq=gtwv6cWKC_i-~&@>hDos&tCgGq&@l<%2RK=8Dlvnp zH=BjZnkdYUm1WG~U_05+ILxfO-Hg$63oI;BM6Hs#ZTW~}fz-W(56Zm71ER$+^kh{u z)AEKsR0}8DaHSTn{q01~=0TlAEUPO*ZjJ~F-h3e|vdvm=W`OxYpHgHw$*5cPw6w#CMug%h<6&}>mFNmrl{*s3S0Sn1$RNg@@I{j;XL{kK>@JD9tnWj) zi(acag%AG`@WWnx7_$J|=#h$@PF4X{_ zN$ymz&6d9OpR82exS*3sLaRjtK%0=%5g;WI*pQmVGdRbV=O8hwmmeOWLmUov!+&eq zQI5jTNDqXS&FZI0SQ*@c_!!g1KDJUmtgR};M3Ed32L>)!>QcLAOqzm`?p$}YMw>^F zokIT&lrH>h&O|VSm2ucUf$b5k0(-*9K1;FR{o7kU4lXXFH%7QD5-0n?-Zc|pjFu+e zRHw}~hbafS{5RQ%ysnh;p^RS`J&-`FB!Z#jOT#%rT6wbsj2uUHrA#xm+2RqETMv!t682h`iyz9;(T zFE|!uF+pnfk7;3G57GWd)r&WFIu(nsAJOA0Ti2|G0&eH;&573KPHa}Ly|j|Y7rpdeFue$B>7)eCb)gmaYx zqTK@gG|{LCT<*9exL?HKj*jRbK{!pIScd%dUyP`qdwkC7fgqq5*ItB7$Sm1I z6vSr*Mcok7Y*654twf!OU`-58P!O16xP_@0!_WPY&j|#w#GT!+n%XfQ2b%xjhum64 zm{R32p{!h1{9H}P%ozce%HC9fKiopt@Iq8F5$dwKe0M`VD zB+ZJbN5~>6Q+R+C3 z)|t^_6Q0tr;2}4L&=20`7R9hdhFDz6ezgAtSZvS*x=$wx zlm}Jc5@KIC`ieBE9ykVMj8Ng6?aze>5*Ol1pty$N<%IljjZ`RGPT(L*EK6p@#%uTk zI$%l5X%Il-PRy-@?+^QsqSqDX!#Fyjin-i&jJhLt;d6pp2&o$B*#Din!Y+ zX`)h4(pcz&KH$?mY7kKJ6#Kd2JHn(%UR>5aA5$PD6K5$MEqlc)WN$|`O@1z88{#NVg8WDP2Sbh*I zWmR|fAhYZsc|PdDEXj|cUopndW2}Ts!@Uk>;@k+9pxMM|;XF)1>Pa+3;h7Fi zx2R18^+tgJ5gB-aqLqt&(J4(SYD}z)N^XpqcA^{VL{eISKWq=cX=mUx6WU>EVFBk) zkl@A5VObuQm!^_o8ca}hSg~fRrm7%UJWA1Y0wstGqr}a@nd(4Pr^@8$KxiqFdIfME z2<4!}t-yxDB~3M9T3-aKMf_=|eyg(b4hjMW$x#0X#X#g^f(44M#3}^}yB=2FBvpHW zDDW&KOk@tCW=4ips=U1_HwqPxDeSu*Or`i`mK9)NaHv+~>qm$e^<~wsKH=J-2b~~c zPjr`pR0UGCM`a2bMhYx9CTkx~=fOrSUNx&;OyqQI&IrN8J3Ijp+>x)H+g@}gMeyN{ z%tvVu?M{frk(43l5DmJ`p;*%9ZuP4_VQQo%k;}@6qMF8M_{HtfZ1`9Y1(gRD#g2R! zgH0%w+YMyI5e|kDpPk+N6=66^(_7w@WbH|>q+u%tP!Q%H z>CMNQ%di<(MRTV z34XeVOX5Ua9#5WFZ8n`9vfQD(QC+^I08CoRg zwkCyNz^q3kh4`*(-cTA`Cd}gLY4L*LUiA>q^6hMF6ZLKG(Dd$R5TYqz$HVyr{7G>9 z>h7JU$FD9g1>Y}tbyx%u?{CRfo{9g1IM~pZDbrS=rv^jc`UcB30njqGXatck4kH_C z@Q4mG4&i|Cg*h)}VDW{0)-8wwxty9Hh+oMxiJ>$Jw<1nyyomXt*%bS!LS5_PX0bQ5 zlG0`g-u-RsK2}8V3mF&LUeQ%K1QwShleZ2lO+B9A`UI0sY5+9};N=+%@1dCDF%h0F zA7>xnR>UNk;qJo3A2(DXN7(zyTg`aSE=bvGNl2&>a3xm+K!Wk@kgPx$alJ$tjPymH zg02dOG84ilmf(UQ_(Nr}Pf6DC;2JX667hw}SS{>=a}hwGHV~i21T`m(yH>;t?s3ts z8WEDSF+UcTuu113=@Z^nN=5$+QgkuL@j-ju(A#i~CE-S8L381t@IcVbCX*N5PD1`%jP`;m-^3!X(8c3=eol(bkLaQ)pf^abI#T?F0qmqu5BFe2a2 ziB~J9bRh}}Y79m%;tGG*UUg})P*F>sMORPHDnF2m1?bR5)3ZOxbeia3I8($cF!O!8k zF`97?Imd;Y1)#%-w5RE~=S{rCmG}3KjQNHg7}Vf}`T4<1SjCrPLLY!-oIZQ&QV2p@6CV5Tpu$+T)A=XS%!Uc+%W-ZuDOjX(ow=xTR0boauD>%- zq@nPh$v6jedFabeJlvwY`YW30L%;+YNefrQ*A{2B6n<9Ip^BjqQ(o-(R*hv|(hk84 z%7fQSQak@Ub*S&aL^!oGp*vYjmEPDhwff~nnmxL-d`smTIYG)rSr8CU!c~HBzeK=+%&QBgSVx|o@c|az(QmAi^Yy3`(8OkG! zc!B>FC>wroA{=Ab<{U0L(bVw4Q+|5lnCKtd#&({p`&CEQ zC(Sk`tm!BJGRZyat_z-e?*iisiacHhr@!1W{-g1`W<>U096Cl=6WX_!;sHeji9bM8 z5IB%vL4yYoCRDhPVMB)xAx4xqk)p(Z;`k5{*NF_7XBFO&>2&a4$%-dYB7FDmBS()b zJ3V9hE@e%dH*w}%II@CBnH8RW)LAiPr+)>59v%9yWJfX;N-AACRpGyxAbkp@x|RQH zSFc~eh7~)Otj>!a#S!3R^_|p%D@~Tg$+2flnsV{x9U9dt!>D=>c62NDBh8@CL=FZ_ z^eGvZ9E~oPJehK3%a<`5{M)dkxC(p(6S#FtFjU1rpPtT)u(0aYujM}L6qBb`gRLzG zHBB;ZPLp&87KP3EA7Y^YN)K1QoOyHSd6ARpo$y^uK77Q1N)8zTQAjh`k z(NB%;RHQ=lnc*6&{>(MewrtZUEdH@0Q2+S>3{b!Un+xfwhG^o#Gt{2J=%TfZQw%Zs z0yM8a0x>M;pV@+9=_iF&S}i$(in?&336KP@9BV!UdmwZvsNtt62)SCpcKoCJH5F}8G`tp%69KVW6xBNo% zNXP$3?bX*`)8l9dafC^%s9T^yObIr}EE3q^?(%6*l*$3rPYWujc2wR79mu1T(Dce# z0H6BKz-Q4-SKZt?8TOq?gh_F9MUZW;9NTsdnF~F!pP_fcKF2~X1OcV;j+6zOr&XVvH%DmR1H`vR*yRBzFPb1b;`E} zs*>)-8DCdWC7#J>XmJE{@SnKJ6uI%K#JhEEf=r<4~w0F!9LFsCY#=9#J@~ zJBWn{f;|TY=NX9z93Hl9ZZx5*}}3JAq6nc7gNZJMcHJRkkZ>kebaT$)*)Wi3Mx^&<8*M zfvx97(JUbnPLf(l67!{pC3o{7R&E25Qckm!vBTor7Q)GeIjuU1x<$vtA;_Q!V^TXj zBE+7S!>+WfKRM)|^NhJ!UlT>~d21lp_)K;!${#PM!#r(yjk=g`4wSR3Q8ekoXQnLzlvos<xN2QGBE5mVDi9)uZaJbSF_UC|dVhCIfp ztkD(I!ivI*;zX#U<04Uq($d6KN>&~0t8!L19jiR*v3RxY9I=U2sQv7!7i8*G$->AA z_`@F>Ef`0aQj++PltTv5l~FBINhRg!X_vLFKv#Fc4cYa!TiId-_@NJf2-Ay*O>D9X zl`EJv^)X7SqQYDm&E8H|j<@uz#4v~@(3UlT{oqGw1K6huq;)Ta(^VT2cPd#mr(3tY zQQ|5~UHRTlYj3jPSTWeG;h45nf-qgHu*Ltm`*f%+knvZO;##1m9Y}HM9PE4{+$)=c zWV?)eN`=c7E0nG#p731Nf2jgeqf|;L4Q^_V9(+P7`6Q|Ysb_>?ytyX*)rjIrF{u*b8AKOkRl#|} zUADM&=cpV*^iEIGbF?MM5WWBFb$^y#$C7rpU#4oL^9MS}oA!#~cT#!2Q(WjSACuq- zlWMp7|1}KU#c1fy5x@-znpeNo0OCxUf$Dbd6BX0X(z67Sh&i?tgi2NYm z7f2vt;Z;n>zh*C8O6u}RZ?Y7IJ?v=sA`dH!BDz$JGfaiT$WQ(-aQ*^?`+NyU`ff5# z&mdZ6AiN=fs)k^k2VMVSr$k~zwen>j3p-Y-Ke zBv+0i(=-Gw*rQyeZj?}K5&_P?#tTq3uk)}a1);-GS|$o}ai08dQV|&v`2&NA4B0d@dbi53>0#K50QBsZ~ zPYe-PPKv_71yLmNB-W@IX(}t4DjuoH0&K4v|E&NC1|V03Ac5{H01+YjPAd4p0^Vth z;*dFcBlmASk`W&-P=->3lM=_Wyf3^?Qrc3`%R)lbO2WS+P%IWQ zA*zK|Ub0M%ks0-IlhSD@CBjrrtz@n;A-v{0yaoR5>I0iH+I&xh0PRmc@GL{@_#Q&E zTm>pX5hwoz@ovr#R}x1oPedfXaxWN9BB08}-m)>3CmjodAPev!h3zCHY4!$?+gk81 z4|CqM!Y2o1TLRNY%4Y|uaV;G)HE*Xcy{II73?c5(ArNsQ2BtQ}Z!=%8-D>2bATcKb zYYB;r{~B#quJE!9Mm3=gk_N*9-oY3+5gP+1x2A%VhVmfWaq3>M_~@kB>INjs0@70F zA!uXEkh44)r|CAsbP5VQ7RW|?K=~BK(j*X^QH)s{Thfrj^jy8MjFjCKo{o( zqr!B6?K4wj?zUniUJh8AL}6rxD{EsiVv{M%Lr&VGK?BNL@>4)PR8lxX5v-`+He_LRMN!XNXFM6ae6bK*i7qz4)F9nb99BK1{& z1xZ2VI0_CQTjWzIqBO;V_?it@ZzmA^Ays~_O?~rQ6!98iBmyteNny2FVM!oG zQ5H%FNC?OL~5fzosM73c_wnkttkH#k~1r6T1rHvNW)(Dj{OeDZuHJoDPW?u zOw%$#*E4njbA|*<{S$BTQeB)@FS@eG2x5z#fgOZPO2Y zqe^I}#maXwcA)}-=O`ZFflGn_c0p?rlJQ*D6z`5Qm`XR9&H{{q44i@YWH);MJQib` zY?Tr*0s~lpyMiN2Ha@iJR$DkB2mk|2P$S;SAJQTMjH_0S1bRtk9)EEGkCqOB;3wXx zdb!AIgF~T=>C~Lqg)Jj0fbwUPqr}2&|$iUDJ3*lUle>6RR=T+V}HEn{BzAdF+-8~ouL9GFBQ zbHN6~<17ks%Yrv4Vsze#j`gskPWUELczgLcSbB3gT&RX`2boOF(Eu4G2!LREI2QEb z752au1R(+dVImDzpA?CW(}lsH{U6suNPaH zpi-j(apWVanIdih0|F;~A7Bq!VQOpPl_ME(U}S4Caze{7RJ~OY0G9XmFc7{=DYlV^ z4D-^S`K4CoAWd@4z+;9f?*Cw#W zk%FrPk+7&ztl;=#c|kf~9``l;paufnZ8X}NIBLw0W zTA?2-Abp3#p>IP_)x~6vSalR;VNJ)81|l2+!MKQPO6@k%ZV#^CmD7f_sMh+Ub1A86 zLn#cYg>N`6M&tXC`#KRC+QJIZ%+LTE1x)RwjjtvkyzVEF3SgInSak zz#D*RM)$HZ1#L9h`!%T5A)@&vo`ektLXzy8WCR(2P4AQc$AS+cpcYm;f}5@){*)`3 zQJrV?#K7Vb-)XdPArKx3AU_g1E1a3~(`r-Z97}qCTimvK$gTLunaQGu9oPcI!C*dn z#Z0rW`xyOTTtB#bk;1D_0&KwFSOz1o$FrNtY2~g#hs@6hMId8$?Gi8Qg#f}h!Ycwv z%fV$n+)rT*>{u%H1i}8w8!vVa&Bc5%uyrDqnjr?<_acdEqfDj73c=0=GsIyR05Zy1 zTu?ygpYNo$uUtN~!i{}{F9e|&eyL!-?2=zp&XN$ahvkljY8RSEI zzC^02$ek-Xp(RzOX7^Cjha(GL;@m$IP}BRxT`G+(9=19JOk@+ddHiPuhS9-lzhl(P z6XKi~;>@oGhK`;J3+LjgUN0cB0sXuJlBw8uL5LwYRBu8Gg#P!+LC|%-r+g{Rk{j*t~Y=jkI%w8hQTD1L-n>b;{l?Az;^-*8a#+Fp~82a1d{1QP@%+woyb&p_w3of zjT|>JB$KgX!-5?rnl$(fVufcGpI!X_cQU5TnKWzKyoocX&Ye6-*0NA5VL1!sHugM< zG%3xYK#eLDSWZwarxkuyp!zhc)~!k-u7p{TrADqefl9^t^sGO9{G<*oTJS7bvN``T z4C|Gy!haSk^xgVzSxpaozJ_|#i4@e_^(Rn-TTg!=)9(R6v?HqwY)2}a#{ z-$6zYMj`En-g*};wiS9SRRGzC2=2(^f&cszn|~S_*yB+!nU_$0*G)tefK!c@6>~C1 zMO=|qYW3Ge2ay5B1y;zD4|~j2byZwdJ|*Qp`snjkRZgYpUSQM3+1FuD$>~>$Pp$Zl zYAoT{7+o+m^j)7iUWsU;T6IQWO@cIesG>H>*wa}X5*VpYY0gEVq@K7L5mY}Ma^X^k=uSsY*>-8L<|7im`YEWrokDA zKUk$pE>3dwqYo~%JZGl5WD!e-iR+q#Cz48elo&$K^l6cdE-IYWp9_|kL2hf@YRU6UzBxm2yTUl5Ez(m0bQ-@^Mz+}9IDI$qMiqf1yWqNC*4byF(FAvp6=a(`f@d4a zQ0WE*_;+zl0e)Yjx4Kd` zlklrG&vRatn#aHciLEb$noHgg_aK1~Lj;frUuGnC zV#YHDE{)i65>;qf%F+xfFX?rHwT5C=t2YdxnSPF_*bC*QB^8qzckA zEYG@$xr~y{R!#&~+N=^T_sKgYi3BnBfn@~{_YON6q!T0P#P&{-Jb}_DHs)JPAOdl$ zKW4`NI6&%0AOwL6T)<>#eR~*#jE6F`g@v2dJ10hOha&_Y%P4ufi~(P{qIn`Ti`z^{ z@2nLQFbdU2#S_n@R@%~{-egTip_xFWI#jRu#UmRk=0 z8SX^vVovXxgp-Jg*h_}BG9$_}p15nRYj+32z2MU(6#Q#@E_jfPh6N(6#4j%Sg*H{MYVNaVv1EieQoG_jaFazzn}z)C+XfDQxm#4NbbM=S)< z4{x9&O&gTtOm}2n@oMI450NoO<4BRlMo%xXeO46h``(%<^+QKhrK2GAF&iZ*t@x$M z7-`eNmjU^VEtRr}5en3d3}&klQ3-Sqw~c48Km_uzhY29D4tj(&xLav)Rq}Nwwpqn3 z76`|Q0~v^bn${$Q+HP*Vay6R9R3~=fnvEsW5xd|xY=6m7qTQL;hctwW=MA#|Ync*8 z+BmQ$oL!)j3-als{5Nn>s`6?reMv3VxW_B0Q(zX95XAXH1w@De8Srri8?1xPI3$j( zD&*$OHix8w@JBUMiVK2OxhCBdtuFJcl|z$6f6itYJ6~kpoC45^aa4?A7~B(#kj*bG znKG)k72n_?3nw#pNa92RV0%V}g_>&?Ejd!mKX=~q^6 z?`Vl@T-8bolC?;c7>PLE4_6Ak4;SBk( z0T>*I87VPGf+3$d)Y!(FaKQxGdaVkJ^_l9x8qaDrtl4#$f69hwO0% zX3zs4_|Sz2f?xteU_I+x?@8ofSY3%&$W}u^hlvBx52?sSN;9$A#t#O?YTE?6$gJeV z2u5R3%>HT>WWv5;pP0vbj^ zE<)sS9rt@R;%5-_aX8X;{6Rv5V<_m67{~W+JW*U@^LN)rPcDOTHkWDLha{eaL$g$V zJOMB@L`YZ>HT2YE#nmFy7h5x85rtwvCg&H2vJd&tM-{LS_dpNw@PE@{DbmDDd&Vys z!W_2uG{)h8W%6D1!z01BcDk~09|s;X#zegIa-!lQk2eqzVHCW?cqmv(QK3$kb9lQG zYOJ>&0d*8Ls9L1gd-?ZJB$WyN)boOxK6q_i#rlGE2tDh<~BNaFT7}rZPXdFl0{BY zg1VA05~C4)_ewW%6;72fuXrW!V-`OoE`U%0elUXqMqPYmdxX*TF8}cV00eLg1n>ZsI1vbVg@J$z=)fB3 z_fp`P8D0ouNrw{%LzUJBF+-FRc&CB#$dpCLP&KJV!qJN?(}D`ILm>u~wKNggb{nHM zf;rJffyh9=q!7dC9WYUhvG|B0ScJ@17-^*_e`Hsz@mzes2Ve;Z1ONy*c!>pND;8ir zu3>QtVLT-niZ3=P3NnW5lx^X05R8#%7eQHE5h^Hin0|R5adS5$vr`k1dtzxNF>)o0 zXw)-#fhC3Dl|8OHGq74QHN5CKwgnt*_IvNt5%C_}Lj z4wNHhgLZ+YfpokvMT1ro8iz{S!InEEca^mNA`np?TS7uUR)=I^HC|<$Su`d%xk^{{ zJdsjg#mP;csY~R-hnMnKtw(;rI8>L3X#JK!BLt0F^b2#p%DrkFc><)$uhnMbd3j)mgW?FCr_Xjo*x4x9MlS^sUf@Y59pv$6=0(xBNJ&7 zKg`h&&t-$3VGL39mVV)&D=HKDIa-DWJcTA0;4yR)@ubg!a=iH!2SX4_iKthKrMOWW zsu>e@RbVU=3qE>%KRFYQXMJ?FPl?w5Q#b+=<HIBM7#)Q$gVcjJg@!G?#V3tP)X>z$2i*Q=ro^sWMTiPT?C*+L-Q`h@SeXr2&iv zBBJC6H-lLa=x~SErhrWbNVQx36|baO@P1$UD-2)Xb*9QAxO73;aSVs zx9k*~uGoW$5zN@BL|S^+x{meztT1h5C|Ny0qA96`ZX zdKC+#Q6QTkL=M^iib!gkC&6~eWN5=xmrP2&Rt$}N>}nNUBa%itgDhk{0bo&FZ}Vt* z=wu90c51t;7x%jv`^KFG8A>qemtDrmq;Xu(dkX`53$oD^K2AZwog^uL5U|c0OMYqlp;aOlS%+o?8nBvSV>l*$zAxT@4O_rm~95@py z6qq5Ip_t3+NN%q*q*=wfBYX=+;SaqU6&Rqqn$khbi&r(&qIatlw7Dn~RTwfc$Cp%& zsLl1!euw5y_jfZ3;-4kO zd3-3w3LU?T83So0*n?%zoDgJVA~yQGkp0bBan!7#xZ0wYSs|ocVPH1aF6(QmZw!7YlqCV&{sxDd(4fh$6nAwv268N#>Z731bXCRwGq9GSx zR=T%f5xQ^mXk0;>TixJIvUPQ4K5s(dJV6)-jIsWSzgmt?o4u%JR76xcEV~&VieYzW zu4P$*n~0d=4NTE({^E#old|K96QqJuliNiS(L5H1pe>6C0Vh@dR92p|2m+n;y5#Fp zlr&lkfuMkc`lA&t$2uX`|9KMP$S~EK*6|EBWu+Lh8MghW*djM6f!7Y5$t z=_tBhE}L+PP8oY|*D9&=>|_Njw)}-tB0EJm{TF14>L&}IYkPo#4jYR^T{1gV4FQ3w z;Vk4bXQVOPF~RLdMKz}`T<w)0XvNxX*3kukioS(3L1*$i03x4Hf=wWw8pxN}15EA!Bxezc?%Wjf zTaxRzfxCzv@BAJ!`h-&YK+fnBpNZ9$W609@&YwU2+1x$DPG3pSO6;VK+SQ}~E7#$h z{>&YmWYaev@|RKbH;yUfG9WX`6yoA=H!kbZVKAyuj745M(tO;)KC0Pr?@6)e6Or%i z!MRxlwpy>e6;yl4-Wq~V6IgyF*&P#p)-LCywO|gFur<0?TsqtajU;P}Pg>YtA0a=% zHkLR@Di%S*Qxvr1^JA0qQp!Jtp5gu`rUpr0gY8IceII#0?35Dl<4NUf0;c!bv0RS# zYR6r|&(3G<5GGNId-37-hK!R>awN`Z?gAFvcHG6{kX)beoNpSeQOF)z^mJJ8`eNf- z$v5R-{+yF4@xYnHtbTjapk^c8xp@zkyNXi~E`I#_wRH-2r^UJN$P8qtS7zhJH5c!M znR8*oPLqvX34=-zqq6Fv%|#2(+AcP(c$?5A>0;E;F7SHXk*KCfv>=?o z{3)wE69=kqE;4fL$)d=3TFfh&qU6cSwJg#vCWQd(Z>Ao@1n;7lU}LPb1@$73EtgD7 z@<%x194NQ1j5BYi-<(QH5Og3?WHF@<3i78K-{h$x4NFQXGegZ<2s=4_S_;S59-WY) zEhGI=%rQ0fDaa#-)F&LAK-CGdycSzhHi|&X$sjD(YDl{OfB2)d)q%c*=&p`Lsz|5R zKOdv(o9en3Bp+C%ihu%P7SDm_S@odGmh3WD zT{HX-v5Yd-cw=)RQkWx#CUZ6~0zv!d*c9_sSubg^r3tF`E?f(_zRIH3Duj@0xhELG z640lgXR~x=gw{>ByK#x8@ZO`_EsS45(lgTLeP>d&=_h042&{+U%T=_Am5f)Sjxf$> zqtG<-sMnP;E)XJ<3mjQ4n^ZMuv7VLIo4lUF&FQ!QJ7t<`-??VsYZLfp8Z4#awI zrHgG2a?0gmy7Fpkj;a=Z^qG3|-WoG_@5Z_=(7n-X%@xbAF|zVM&k737=LBQ#n6Z@2 zV(CnWa>x94nRY|V+L-rB$SS{sq?0c?|6!ge7J-+(Xhx^E{wt;*_mr=t1c@cE!Ne*N zuNuiU3)Zhpr%C;kb}}ENyERj&CHZl^2sE&>7c(VueI@Dk;>AZ%HmN}Mu6ID9?94aK zYfEr+7L(#6$0}JNoO9YD5(HvSBJ0Xv2YHgc>i8}#0^^In_^}=fz(pVnBiVq4^1gYY zWl6T$mHix28D8}&SihQyuohIS{vl|0O3Iu6T1HZmf}Blz9>kmicLJioRiG)1!%cCH z2O^+shl!Xwkb*|HMFif(VnvM0yQcFLA^F1_o+J^)TjnFBFv&oI^hK%0O1KvAOj=82(54BM0h?SoZKr$|u{7NFux zq*cM9vFlartDG1~C&zQ@jZ%?HT6f0(H>X;JLm$KeWdRc+7dzgufabJX``+`auR&y3 zdkki-@RuPQer9%Dx);QJWzCi>Qk#tVA$foxh+X)h0=HNx27&|2d`3!_ad}>ca?-MY zAn!y4X_O^x!%&VY2sGvaYD|sl791UwCP+HSU&0ZHSfmg&a71c@fcZAlSjHgPiJ^aJ zGCw6HQ;&Wu8)p32qiCk}hvKQ*V_;(}Vmj_DIqeW11|p7w+=34iEUHrTGElAdStq3&W>AD4bh?wln(1I-x>)U&s!du8 zeUHv);yB8>i^9b{HltQPAg;(iOE*#carp0i;-EQYwI-_dmPepNgm{ut5HYC>)X$jK3KGmoL86_?W2Ckwqzno6k%xNX056z z(QNwmwM(=BAS{3Z0<2Wi1bBc20>K-BaDfLHfS8MMJtv=-IVd|LFI?JbSvRv&x)E9D zo!|X$*<2S|3Z%$l@aFHiG|GObl9fz`J?tK?PQ9x`(L(yjXh2fzGdmq5W3WlKk-7=B z1UcQ~YVm+@vl|fqO%W%#kw*q46jurDb~nI}oxr8OG&lG5tMjI~?;96{c{R6r(Safm zOUKDUBxPuy*NfgVQihjs+i|k#*W>#wXyKn>4XR5?O(rYKaVG;HvyzA~#{MI@(=5CEqokg{-#{d-zoBP4vX)La>lnfYvrB5UgvBrl8h%*96F-kd5P5^M& zwTCcOe7)o?c;{`F@SN0E?~TtY#pZ?gUs0CG{7qnmmKI{mF8uTx`p`1bF(LeM_K*cq zWQ*jTcenlj;X|10@hXw6W6Q4qUdb(p2eiT)-uMPTp5hO5+#bsjc*5VX{1jGKpikF+e*JEvdNL zVwAGdIQpWAjuV!;0IBplFxSHt;_?G;ivR-n22lH#K=_A#&azgyggDj zf?x=+^1May475p-?YNAv^Em7{B1wy<){C;GB7h+jgxGt9Rv>^@NRNT|hFAE8frx9|`?udX2K*l{_#0szlLqtS=z_M_| ziW`Hzu5g_zB*j#dwc$Gq7Lv7E$O;Y#MO@TIbCe5)U_8f2kQF)_(!!b;QpKK7uvmguL-8%?dh`)aUM0S&mjk7Vx zBQpbxi;bj;eA>jXU=dHew{@z*b394MXcg&03vIf|@wJmEMtf{GZCfO2Oh$0Cg@1vFSctzpcz_C^N`~VN(aA`?>qws2 ziAy{npPMxqF@~L!u75&Fv{aLYq7E4>tg9r7m23$zlnMB3%We{048IJ7q^%f zAG!>;DWulp2&3$wVJW%X_%=SkD{w$WhzNvm0uG%p7Pty7KjcQ$tc7Y&zu$y zOH0!H%c+}=f-{LmxyLM;l17ofxa5)*s*GXd4~AKdlteuNi?k_8$i!qpia`iUumBvy ztAX%`^mxjF&<9ws0F*<<%G{bqX_E;wI?WW6v-nI80!gsoPSG^U3}V01RGMY~$wp~A zovgV@R;)?OU`=~LI~tc4_t^n446Hl5DRQPzNC0A2hBu;xK7IP z&LRy>p(x1+r4SDC(CKMM5<9yzX-}|N(RqZ=>cfxQm`SW^i@35$5wws5VTh0+Ih7z2 zpsY!7ytZJee{fE6Lr|c}LxIc8mwJxmXhN`*QnLulZX7`G+M=NU!P4Kshz?Db z&@e9*olEON(GH|XJTjkQOQwpVl2;VZ#JEF)5{O=u)1lmtI*q4hK?%GP2SBI*<@!?I z7#aXXo+9d|j(kw-IWfSiP%uH%uh7sot5Qh)iG8Y&)acSO*}^cSmqWo+k6}-*iLdMH zRI&K65BvxNV%EEn8e%kOk4SO`APowa2a~DG0QRR)aUQ;7TUl8(`%S zTf-h=l@vlPKJYT5WQ7w^v=B#f)?ngP4-^+MtqJ+Ojk;X6vOtWSR81Z6C5b}SeJsYd zIoBs6kY|v_2(YOE2@rWjQVwYkU;PRv1s-5sr%x1EPDGAy914Q}%^A)4u7fSuAHh{N zeLRQL)X{KFp8(VSFj}B!MLepchjl26m6KttsAd^T{vwux{8%L@x0jtqu}fLwTPJFK zSBTI|uB?}R)i+FZODX)}aAXr>gm&a`-ux>>4VqTk2TQ(k!@< z2`!xE+kmvzP6bYBJq)V)GsGZV8Ff71{Mb_|8{kriSH&|Gfc-VxGptR>3eDuGrhq)E|W z+R9TeUII+trPiP2Ph#RPJ(S%bA`|^hi}|_RBrIO`W56UHiW5T)#_3t#FpuJkUhkk@ z$T4DNeZ|U=W9I4%hP`2lXp=u{oC>ZyD1P0B!q(XTEgy(c6>k+o^II9;+(T7)(^q+1 zjLitlbQE6%+vB@jB^4axRT;J@V4izaFxiur`dqQxks-z$#{Dj!o!c9#7Y^kNU-RBX z2Hy+brL20WGQGaGkPi)XC^vmtP5D^wKSgL=g8NbX%{>G zAWtm5w*z99Y1GQeS<<|h1{%XWo4CnlD(oJf;N}X^+;`>1skl(% zC{*P|9GdlKzuf5Ky9wV_4NJ05g9_xlUF4yaC()rrD*?l+8Qw^dFCJozmnP#@T}w`m z;5iLco;F^{5$c-V6Qc;t6xyr^#vVAv=qFWZr}iLhgezk_T6#W8IZ|5tU{8}SG?Q z?6=4skU;M1qH~ZgAqko@HUz#(nT+QOX5Fn$Uu3Q!*EDRsX-l|i6Wu*ZFA5@M;cRtr zh;+242;>WXy=%)+RHu-tbb-Q6yrtv+NDj&hYGP%o%F$!m?mdrqV1mva<8EC!nr@mL zX7wyI#skw84YbD|>4ZDzJ%p0OTrIMQD%YCnC-+{+a_J=;~qT{k3c<= zQ|PYhbaC;Dat{^1OBB`85VlDO;idASaoWI@B7a+%hB}Tt>;j=_>>y<9J}{-sLK$x8 zs8|{*4{utsiwpne3i!dnhzj%nXzeLStS8!SqJi5i2S8u4=4P3|;I`nxzF>Xs;Mh=F z8du%Oi@d8AHmU}e)vMM3-$#_8>8Re}`&6H5Dr(G5zp?}o^{jnxGEv}e*_fNQP6vEwHxBffsJ*^u-dQ8+QgRRFkO;YriwTuz;*(q@b~o&0_H)v6 z2=Q3zu92kUZ}}GH^8IuU7Ijh2Th41=*?0}d$kC~@u}p5ut@&yp_lVB;J|t&Qq%c_= z2jHNfECDnQJI-(;kqLkQnE5}SF$^nqY+>{dW$VTYGK(?zEz&I*wdoOuY(mNQ+;tBZ zH*SUpL3+7rHRmNTryS&do5`3GOU9krF*`AHu_Z&YW#1xETKQimTjwbbcH(Y(2f6{x zj`U~?U8jn^QdDwwGY5|ox>b`-7JBQ^X#VjfliA|f?DV8%aWb!B)=lhAAKZhwc>XrA zlMlL=R_^@T`t~@?(<4!Yx<+6e`@}o3DI#qO->jEXCmAOF;mB~A2n>j*>wb@ST(4Q? zWg@U^LX-9amGbXv^yV z3l?P1|fpXs9c@>d&aCY|?*b=~*(yOwm22oRD( z4QW^UMP_<{sPODxg@Ojj2qb9m8Nzo95h{FEkRU^W0uf3iLuR1CjvhaP4C&FKNQ3|W zy?dvyB+G&f5qj)Yuu~_D8d-9@>GEIAPAXTn4BGJ-(T1Hk3QekXDbuD-pF%bHuUrKS zf~YnGA)w-p{WyQh1&&H)}ER2rb@(~kLIqBd-mhz&Yz0}%hzdLuI2g`6m&ZKc9#6~ zbr1QyJ6yYt=}xVB73$PlcCkZ`A2Ox<`u6YRH|+T|^VS-XmzLCKIQ2%EYidcacGj)%`UB++2`!5E`cU$H^HQ!jZ`A~ zq#zdm)Iw)TC#5%COK=fHDSG`uYS(E-J{qZ$S(Q_Zb%~N%s;Q^S*i&lqofO<{6tZL{ zoZ)@+S*^69xzd3yajE8n|JkXLN-3R0tViSI(I$unZTXUiHubrtavbWkQ=46~SSU(A zq1tV8cQNYdM@_<45|Z5Y*N;B^F!xv!DwCd;>#e@~^4o7nJ#iUdO8Yg4 z>#WSVXkccbEzE4d5GqvKXxHXPRBsL|SXQI2RePJWwPgsPZAFnmP_=7P+@(gT)x@p8 zFVA>YxO0)~6p!e(q%M)(>4T1_i#BQjoi#H_m838uowU+0W(paHDMN*jg8xY^ZkP}M zhXkf(%Q43FhGgz%;=*E1+4P7pi6x5!xukeegdeJW*zWCxV3zCW?F|HU)!x}Frfz=OnVb)}RO`)&3uFlc@RKB!r zWqEL5AG<4^+xoVq|J250l}XLrm^DDaDDZRRQ(ENi@-w^`hEtllmOk{+y&`GWvU>LZVJDmwo1?msk@T5A|A*f;b6HUNM<2mI$Cn9}XSr&ef0Radj zAIj@p+=SJj;GHaf!aCDJfG8BA0P2P*+?V1gXPCszi!S`|#`ii!BSwLbCE43zQqsr9 zH|FPow&9Z&2?HAzxn>~2IS~P+)ki8a%wU395dCcRL|$>kjj7V3W-2fa2arGrNN``< z3N@3Sm<}RQG)dodgEuR-ByM?t6NF{doZ^ymet$=xMiO{#@epjI6dmNF>E z0c9?G$rO#GtXjMb%7==h6F&ZHA_o)6#B4~&m9*}F-LcFrqg2d3p#&2D1`t9Q$Z!VK z(dJDrfui3C_K{(gXjydx%(JqUF-7`vN*Yw$VKk`0kNBe>{>V}EZmGMtz%L{pjHm6E z1F3@&G@*dmq%M21&cckyCnC}b>AnU?r%CWus1s8(@#oRc{IGE`TG_>7A`wgoAqf+W zmQu_~BHaK_PK2^h#^hA6nUL}+f;kRRh>}x<%12ZyStVk?=%|yRBM?V(rO|lwyb+r5 zJwQ!LjVxf*S`KrnZ~STL_P3a0s)nNC@~UmH<`Er^iB9(m=|v3Hx*vLolK0u$$v)SK zPedbQQc+&V-qsLdvB{w)(<4U8hfKr_l0-1!s#crR#en*Ss6=i55;}q;P*uLqHYlBSeY4t<0DjMVr z(r^YJSF=RkbjXRh$*(jEbT=5gAIfGs=vWAwR;_&UlSktJ)!fLuxm>`SeM?d6;Lq z5sVXMw<*Ho6HG(X6X-6dt_>WR41eR^&bayG0z zBMa&n1#mnJH6|kGfPS+u*8&wXvGk6*uR#-us;uLnc(*F!Ma(IU_LYLT%d}0ETp8P_ z+4Sm|mUy|&R^0q%&k9q`x)dvHV8X|+_2;%_{jsE1$7IemNQgN>nUt54VEh;l(+hcn zKAN!(-c)qaD@*R&d;-`eYxs!-5#9_JGa{*F^w7?Etea1w9lmG~NF8aCFARf??jLerCIBz7P|r1k0cWJ!oVHnjd(7;@@e$?<|~j$ zsKX6lg&G!fBP>y7I5lx=U`JeSPyY9^!U(+Ri{;<#Jve&O}K_?Z{*zS1s(PPR~9oOQ{+MYPxZI?_S^Mso(9Fj6j4 zL}OPHB@ZRQWkM_55BNE@3ajo$`MJzh8&Pckdt|se(Q8wvCe21?%Wu;H4z&t(n~b@% z){2=naXxK&s7M`WOVJ8H1!rRMT7GeByuL?hRE`e%8tmlfc!m^@bS>3}H@AuOfSq>3 z`2H8}xTkxj?GAG`EuFv&Ur4A!&J~$c#Gl^?%Z25h-2_&j7*m|c#$$ZQ>WI|(HHFi) zNsNWwE7b_0*~RAx)Qn-E)i8=TkP-D@px6P*(}3I*wV+4zM>B0rlF8mh3BHIKp2W3Q-vVKDVsxBO{mmD8@8eU7E0d#A>F=-NQcQ!Wpvv^*kGV=b@gw2Kh(A^A#A?^fot;|u(R{*w6Y^=<3HD1_Jo{VuGoQamvwAc2L zpcYmd9Ks<(mb+(nV&`pFR8 zZJZwxUEt;2xKSD{;-Id0O)e@B^9@JL`JW|1NBP9wZ(s=T-A!}dBT8^h^y!sNvDt|< zq8(Y+Oc2T@YMll;99V3YI6f8sSTqViF~=94<42O;vtW`$nh$F%m;>by+j*OR$zvbZ zqt~>YtTADRiCK$~PtgViZe0mY$Av(UL?QMwVhn#8Nmq+$U;fWLl$T6s9!M z6xo&K)y%~yy5#4;T}%pu?JjM9_*NpMt|#Ry{E(4D#QpI~Xr-S|XGgqlOBU6v#g zI7Q}DE@tPA-$&fVR~QcedP!z+eq{V*p??I2h8z~SRY>)mqI0&DLVV_`w45>C9df`_ zWwwyt0E%w@#cDd@c=k<|0aq_3+ zz2$ovhLtqm>V&0q>Lhf^qjbXE+;M~h=9_KC+{aA`3r(N$EeC>e4Y;uht+8cEC0&CQ zop0GD*BlggG7T)sNPdo1E1lPMa0OO^Vpwbi8un+6YTY=+=ULesYhs~j$RmKMq>?FU zfRWrhPG=K_O%qk2iKa?9dD@d2N@4+^@%US4rzGsVC(D>h7o3`ZOLt(P?zq~4^@*rN|VD_3!B+Q@99nQdFo%li&f0b zc@SrIh)SnQiJ)cQVy_{;VLaI+F(Du#Fxvrr?P_8*9;1*K2=8-!{vs!U8`-4^6lC)5(C%sQ~G z9hfFE&5V*4_a<#Yz-~u0+!LShsf3{vBZ`d`iB>q~6h8@%A)+)EDXqD1@GdSyx)1(3 zCl5NP|An5$*U~H;HJJ|&7!VUFaw$+ktQ5RH#vR?Au_ngc${_cuFP)4poiwrb95NT1 z5?t7=w4$yT(URbnsUky(61HmD1!nu?%?nS9qZsJMvX!|uBD)seOQi5{Zqa9e+Ycjf zfVtF}sNHInUZxOBvI26LzQbNNsG;asphnfOqk~!W7@b%;PYwi9&MazoHP9@ z#V-p_O6?&rb~D z|9j>gG+$Rno$mgQtn(r--0+P-N$3d)kFA)qlW~SROGZh_L}M)89U|88@SGgS^rcWV8B)U>nNNa-h@edJ{PTJFMgadaKi=|0NC(dn zbaqURi{?!s$3;}@HCF(&R5!qGPiwX@9xqZ!A)ST zFe>Ac_Zfq1LrSQeK%pSA@l6c{Ysck5E=)&N+6_w$K-M;}j1pHt3T>XX5L;yfUrS1y zG`#>fg5Pa|Gv+^896T-SDJ88<53&hUa#Pkbp4G@=;|IKqP+u1_h=-uBm2h{<2guCz zYs4T#7I8Lv>HA_d@pAMzXUuuBroWx!b6I#lt_m(T%qMeBsA=ak8xXar(Lr&ghsvN? zUaT6_fwvp)mW7>;_MOWf1554!yHyWw?4EO{A6M@aJtgWP& zIm~A-;A9NVo5l&cEhB{jset`hv2aNIlu0LCD{h^b12n7y zMVa4+hxY}D-#Qe7_`Ju(%gca5_TcOxSAVyR#RvqE9R#3Qdfes7;*5@LgcawG#Mj(eaQF4VQyi%qEcG}F zJ{$L{7yMJ`yKKPvV+XR6P=&3h`3Pgx2k&?=b8^Kx+LiqTxrb%H{iveX-@ZX4^z9ot zDX+?#a>jr0@8Dx30B-`mKL)=8e1==(uwk*!wimNaf0(CuurSn6 zKs-zCZAPPaI^Sme;x}80Ok`w)=MPr-pSb+|*O39lGcuk2RWN8!U?+iODm?4N@EyZ{ z1uI0XNbz7sjTnKpI$6lzqdQ>j+9TA`MOf3<)boJf=_#;Z+d3O%@0rq`@p4VJxW z!9q`(Ma7P_dlzqBy?gJjd{=jGM393&ZhU6&FGq`s|1%0^Y!D=4!2}Bmtl;w@<%bnP zHhefxD>I}v^U1M-GPFpc&O~cSq?JZv z(6rzPG|#}1UTbYd6&*xqLctI#s6xady9_f350vU1(h93Aq~#n;ZX@h+^2)iGsM;zg zrHtw+Iwubb>m}4QlJdkXwcK(lD%sLUANZ&|{|_xCWjgXFG`+&FskI7%3p_66oRdx- zpIHgOheTYixbSM!D50em>y0;qDx`73e@@!-P7fLU@EMV2v$D>rTq8(DJR#F9BZv$O z2}p)03v{C%|N7@oyl9i?pOtd+>L1Gt#YjK#o?DW?w#LgSChNq4?>kC$6}Hl1jV&)O zpfK_Wm-lwm%f9^LTJzR3>1!&xqO!yB*lxX@(?s(sloUAO0<9~WTt06;mZG(mC7sN4s=K)O+m+>@g}&>s z2vv(nB~(jmOt?We(v!kOZG1N+sV|fzYrt zN$)fzLOI>^MN;*4P|||dWhpu5$q;?2$v6sWp@c8GDDbL`+*vI+pA`AOpN^{XK$M4H z|5%qPKYXjs^}^1qDW|nH=liP)|D7i4Qf423-bxUT{&$rNNzZkDdR}8%2N{dOXnR!~ z)$f#6yozm2YdxYDy}|}7_F3jPoDz$_Ow%Ibg%C!fLYklka}toeC@&~f5klr>pjcr= zO;gDjN63}~6)=EFB+uezHYKjSPc=0=OSCML8JtMj{}k8ov6gnR*Lf0|qd_*Uf$U=88mmS%QBCA+f5BP>>j;sB zU@vTuisl}XSS%2-5ru~l<6_VV7#X$3TukH3+nAG_l3Xx11M%J^o0FYeb%$+0#EJ0c z8LaReF_-z=%X*qP5w4&zHT2WS=b)2A`ho8%+scmN^jRcD1+Ss0p-mdelbPC)6!Nv#;IdMF%-aY}2rqgDtdTYaCq{zvs+`(o zGE1xBo*J@}R#~!RMik25uri*+$+KkW^N9K)T2-ou>3sMK?;)Xnh-=G zH)m5*LlIMNbM4JVWaU@{fu@B<4Nii3dM~NK5;&8S-^;)w&yFnxaYa-dL)qF|n|LW; zVu~DJR%ykqNKq|Jjiq*kq8*q5kc_S^4;g-*G&u4NbV{IBBk*OZ6>lDF$Ts}w@lCq6%-QYe9w2o+buor@SpW&D2+2>RKGBjpWapX;*Yx%r(F!?NQPj4e;*hVH^Yb#=luB zyxwcm@B|8YFV$0L^on1;7?LrP^ejL05#{`{6G^=tt>XT>6fZZ`Wa%TYoUIDEQ<`$0 z`V&w1SXoKDkQx_U)!eS^Og0SDx+#&e%%tDy-aAQ3qf;B(XHauu<-+TMe3kAZMH)>a z(`&|Q|2?*V=0=g<$nd-;1War$Z7>-rL}?CbX=X@F9^B+6Qldi63;V$je|Q9B2*nP6 zubh-DqlT(4BVw0Xi|mNHuP$TG%ab6Czci2a*Ri-&gOT&~dJ61DH?9{J6SM6Hp% zj2d(gTeX*Bn=-)DOt%SxQo_LdnJ1~Y0M{zh@yc-7bGGciYQxg*a(hqHD|S<~rXqUl zn@BvYRnCG^aDca(JFJZ=D2FoZkdu<1*MfLn(41Q+8I(ZNxGtiU=Ah6b#ku4lB~T-$~I3=~2$z>aBAxuP(n4j;B~>LcvxDYR7&1 z|4q`Vo6qDsk1V+kf2cf@m#z9HC?i~OIvK=;-?QOz2m8xTNjcO9t#!)<{L2W#F~Vj(OZo zk5FDhi@s5!KweQ78p?Otl&eA7GZ9o@#`M~;`hE=OB=G!0h|DL}rpoJzk9{IcW1eX9 z(rj~%1h+tE|EdBgV!|k>1#>XY01Xgn%54Er;+vF-FisB8c1}Vl>fMl{`cjB85XEA4 z=QT2|x~@(67R|%x4nSmxMaZkN3}fglt&n~vAv&re@C5nB0?&eE0|5xDc&cPt$u7zcI%Gm~Sn2k>=Vs6hw-!fhmSZWFZMh&z`X1|nV9r7W4`Hrrx@yiuT1e**PY%fggnDee8Y`zr=u;|9yFMcR zdJM8+sA#|@NIYvf{2>r9kV#IDtbipeYC=p-iz2qJ2ovrNNe8wN;~8YD1E=mKgo5pO zEsDSpCn5}EBrJ(Is{>mRB@*IP$fGnSD5I7F`t~l`{IEAV?eCVcm(b7R@*yYoZd-BV;5b1Wh7nQ7v8~sh~wDIti<2{{poT>_|LK z3x%<9WX9uCV!;5=F3M1>s!-S5LN#*FAQe(P&X8<~%O6$4GLpvqV29h9Fa4gc4;?6i zJhB+KEgF|^8e@5FDF06 z4!P|t%IFdw>n2k$g+%bsQ1BX|Z@XUVx(?C9n5|gE51X!s{1nS0QpEU5<91Zc63L4% zI1LG(;a2)B%A6xx_QLcKY&)z2pVp!roFf2rf-WLX9l;VnZ12`+|8FeN?kJ`%th#FL zQt32LGk_wcGd)u=G;b^$!*qgUX&6KAn(O7#Z|AB{L*fW7Pm&N74Kz$7=S0OdYw$Hy zLoa!VBP8!Hdr&QlM^T&%<#;JGoXm78pda)h9O^MB@nV&H!bytq|LO;Gc10=kgX>_k zea;6pByLH3u`LFN_Z|>3e{WjW5Chi-(%f?|I%+n5?B=#_&+sY_Khh)jMK%#54$-m& zu}$1i5)osCG`i2plx;aJr4fG)jc~C1Di1^>3z{Iq(FW)&!?HsvB6(H%QE?~m6VmgvRLNX9TW%*)d22*mg7Ekl! zggDBkFI!SMU~>GPB~XY1dmzTHHgx>vrUHQNP|E?z2(@IQBQE?wANT=goCGcwY61ac zJp-^*jU{9VN&$6fNjc{h3n~JuVqK4IpsVD5c6?i^WaCjw-GQhmh2Yj#MugQfF3^_t>YTz^`Wa zBF~uhRaZxkN~%pEC?rde1h0(`MbKStl|^eVO+AB6?(_a|MqI5HV~J#LL-S@9PRtI(Y;!uUhCvd zpVMuN^(NBo1OHW>0_6VYu4l>uN^oL30FVlQ!XVG6DuE(WCzn7>MV1^l&6-qKcy@|r zbS$W{KC7@7rPpzS3whg0KoybGVhkgxR*jm`T?*DgNz!1cX=;>14b%Mm47_&4Q~=l}`k^0!|Iu&G1Rr|~QU8G-0zn>Gj|#t{9+~ojjd+Cj zSBIlwU571|7_eO>ut}dJUNx9>EOLv@BZHJyLW}B=c4~#Kv|wQMnbfx}@fh1~)!dFV zCEiy#MK6#AL{=uxyM~ocEw*)u4lw92zn~^dO11+tCP|E>J{1lf++$F!I47n z{Rxfp89j~-ABUH}s?2A9_I}W!sVHy)l_)tQ)}=q~cB8i0@@7+hscj27R`pmyNw7=> zMWKZmUE)`D4e?$?n1CD&C5o^qG{vfC_hSDgqMu2qW#v@2q`0Q6l2dx1bfW5lBT2ZT zw-^{cRN5-B_owYGUu_~=bTKHjW1g8pt2<(N56V+(dU&kst`*V7R>x2IM0NUgg?Cek z5wDKv5_XhNm~Eq&8&)Vc|LgniC87@|uf3Y0qj|i#y1WkL-a0n=Ze#J>*d}}ei9al3 zYCCb?q=7>@h`QDOUP6fUPm2?~b1~W`GO1CkxPbo6)OaQ}pNNB7x^YJ8p@lmyjxn&W z;zC=)j+L!sCX%8YS|~Vnh4BZUQjt6geD( zUZp3vHDb0U*^8W`?b`0j%tDsbdWxJha2goaV4J!XWB#UMp}vT>_^3ORxPWl$bJ%24 z0mwCn@WEB%I0kyC5h7neSB;dek$;&s1+R|TcQ*W3L&KKFYw+G6nqeJku}6+Wt-`c< zkiIqOQo@C^^u>IS|9mebIh(inCeWI31DIF-T8lPH^htOXBsth{wJHY zS#)+9bVRW&pgVh+dCQq|-;i1_Ob&r=nJkf<4-v5(E&ID?c-@ZE*}j&ua5#Q>*|oE} zp&>&h!t$%hu}Gx&YVQX1{zW zQW}X!ZMg;8Q{h^tb&aQgoYZ~XA@Yrmz4yqr({h(qsM`|1RLXJ@HnU45b*DPHjD65! z+nNpiVPDf&5rZj$jw#+o#Ve+3Y@Mss>ALY^S^=ZB-x|#$aH-~zcMk$(uz1!hMJLWG zh+?`a7!xIK{}G-FJKdWSX@JMUB~*@rr`KOqsoS#G5AWBX8mo;@*?ebyz4lH}+f2>r zHUxXyqy3qondumPI#+~mZUf$Kn6gy^DZr#Irc9u6mpz+eTVGvc&VAJDT~1ob%h4V8 z^rtKIGnHAM)(nJ&X<5a#8^twyq33+pn;Kmxbl5$9q@u3r*3svB*!YZ`qETDg;Se&i zQId27hcSZAQbTT&nEu+MIZo?yNd7F^B#1!Wuakl(SKj8cWIhP2e@b~iTe%h~P@UIa z18Wkl*ckdw++hGV#s7V&0e?+Ryaqcvbs$=VQDuhgJHEGi;(3_3hdXO#PDC=dy-fq~ z?V@Ba|9vN>Ow;wc$=e-Cpj@ozTD9z6PB1t=-F+&kw@KAxT|v0NQD1s?{mm(eoB|ql zNjz&qqTg#Q^ZmG`!ggaq80jBAwnNDGqq9IryMPq^Cg@9QY-~d04gB`>N})`Uqr-9% zHw*t-sR+)MBCwrT8ucd?)jbIdiTBcfuj+6yiLn3t(PK{wW3+YY$avoNt5#U<4j}#s z{8yo%!DnO=R(Lj$ph1K0o^?v7utKM2&)&Uj$g$(Vh@Bo%%=oaRL4of=Rv^>qok@rW z$-JCN^I*-F2rD?e`4C|zhZ8rxqxf^@!=V*6@?6UFU%3i`$~m=A;b~N+RBQgLRq(3L z|5_HfGOcPsY6YqS$Esb+wyoQ@aO28_Th4+YL1_)%wUGDWz?oaC77Ps4V7a?<6D#gX zhT-DJkRwZ;3>RU>i42WSM#xei=aQLee*SBcb7Rs+O)o@^(=-Y;|mzy=5vjIkY(Cli>ngwaV4&(z3x1pB3vA>eIEg z@NQMF`MUe}@Z&2!ET~rBx_A3l;HS@Dd!2|CBwknsfhCrF2>t^cf($n3pnExSw$Xo( zahBOcupOiyXbQ1*kZv0mloCa&c~}}oucfHsinr~?Q$xeC)Ld{2^0eTLHNi$<|7nA< zrQA`SS@hd~NbNY5iGB%Ufj9nv1r>ozrKc5Fc0E;|cWkAX7JCnN>E)M^)dH4OJEa#@ zU;O#=(o+YX=hZ)Vod>35K^Zycop?G}okT;vH5AF?#zOs{43AD;?cgyc&a z%{JR@l**=2jW?krk#B}(ilbVX;#AP5H-)s^SVaO^l#r*Ebeo+#sp%hJ2I_QPtxXjM zn4IUq3Eo<$X-SZJc@}HzvGwU>)Lw*^G-iS08J6s^iv>mPwb;6s5J9KGH_@npR&y|V?s~mO{N}SEx9sxE@=bJGWAcfs z5m5%MDw;x}Av#jeIK5^OyX_{{TTC*B`qDo*?mL&iH|gtfNy@cXXLTW_3Rge<47%`F zSz*Z&o3xhnbz7G01fB(9)mJpkaK~NXT^-MT=6@3l*p;*q%dOSheh1!iPJl1=8Mxw( zTJ&c?mqwzZqPeCxM~i(FW718fR1nQ$L0uAXp7Xd6TZGf}>bC6_IdxOMS}CW+Op&ed zVQlmLFk2BVm~2!^o~7*Sf(KvhUS#T}WQcVgNM)O_3ooLh&PV?;|KWwt6_L1%f>!jO z(6y@?gElyKV1fzU zg8}rR2;yYSeuJS(mMC93U)b)Hlr+kw^aKm?!DnsL&?Pf61gXivl7| z0C=8vjwPOhq20r5+7^K*0ti$Y;7h_{6{{o+qDY;~{{_+5H?#PPc|vtr#Rg-POR*%W zSUo0Yf*F?)6(@1pgB2jh$f&9{WKIggks{~Sl;Oy9r6TH_)Y@W1A<8BrTpgQ$B3320 z>~gOFRFXY`Rj~lwq&`_OUS6}xSTog4Fko~_oCcE~SGKHg!b&G&H!B&RKvSLf>=sxB z1~}C~lV6e}nuCtEIe1}HC2S>TT<>|*`89@rm7~&Nzhky}x^A*^5r}WkVp#;%6D@|} z3urluRN|$@E(BGe8|7A)vk>U97O^OEx9b+H1#+>>`OS!wsW3ib=rn>98eY%(9@fT7 zyISeVTHtHCxRBGfnqv)kUm2~q$Vsv*JLp`>|I=5!0j8;ND&F3f%Uyuh2QQU9Xj3U$ zK$xw^Jk_nrejCi;&wVR~8(xZ61@htdmA0bb1*d+Y9oy`4nEUv%!wpUz7$35+SMD}j zUMyvLjB#Zwn|Z!0d1;X80ZJr>= z@Psmh`ql5-vmO~N*R|@=BxCg$_t0Ct|JtZ%eB`qsM`>b1)^=rR6y15NxhJ860+Ig`nZy zobUZO_O8V?1g|DFDaCML5&My`_0d}e*3zTb6~~S7F|tnxlr%RRE?FsTP74ENY!($F z8~Lkn*~WG%-pw#uB6QA?nmm_7|Eo0)IrZG@&AoJ$5VoUbNJWiFbe1yc81nE_2i{X3t*-BfDT-(Yq;WlPWHhO=Z;6%Jy}KSyPO7j; z2E_8$BNc^yN6=`0{29W(@0o|cR$p`o`N&(O_=;AgpqcHQ4k`O@3j{2A*eMvjSCOB6 zNy!s5=#&SVr^ri-R#wPVmk`_DXy%7#7`Z(Y22%R`XZ}{A<-;_v!c^!2*ErAEC48pE zWQm`IBgl6YZ!uT)rEDUlYauo)g=HlQlvr+o7#))r2SQ*1lqpuB9#OG!g;9I(*Hh(C z3kr5_6Ei^i;ej8BRhFSR|230!_Lmvs#X{j#biwB(n(}`GcoGZpb!PK4ilH@+WG6_$ zbHGv`OD1Vp@_u6jVFV>yV@Fixv3|2NSzuIU8um)*mx4!9So~CP^CWwuA!XL5g`oB) zC4z!EA}=LIT4`l?Tvb*{Q-k#9UVz4YaiJ1Z=Qz%xd_jgA-g8xQae-$eVTti+^W-*` zc6L+JMuNZ}fuKdif?QltV!*cg^5ETazJLFe-;eZx{hHvL&f<;uo^;bD%7bdYJ z`hX62v1M=PiIGu>|6gbpXC@xgQWBf!BxOg9u$B@ivSyUQJ(w~aq>*dtRA*{TWOif^>3SD;i5L|U$}}7q6L8>#BH|+(HS}V7hC}k0gMLv`;Dn2CaU8lh zaquRCnc^{-C>4sRH)9xq26a?eB72=TQvf0r(kOQu34&4BlXZcO4MrG)^$x}`eoN_k z-_nbcflP=3G%>hdGYN-{Vkw^`Y{Lc;oH7zF`I0d*kGM9I2B=Dl_7WjMD5#;8C!viD z16=RXJ4onH{||GJH=!lo5C{w?Sstl{3p0p%fnCf+AKSPzQWB6?d2SYhAgVDlx0rE- zVpbt0O@ve_=2RPzvMHBiO&PJ4-+P$3Xo zW}OHbk^kwRYy*{9C6%|+97<7B`DvhL_AQC&Z2GbwNTIW1pIFjGmL^yiDmMp2 zmmH^C6AGjqMiCw=mw2Qv+e9cPnRQY6F=^Fr=gB_yh$zcvdCylW&z5(dI)=6Mgrjh;`F|8z7b}>(t8Zs9al>NXDv+_!{qo}WEA&mne3)d){v#dRFUNV-Y z|929Pt)&?LmyZ=99Y<#_A_7yOF(6dYlcofEk5N-?a%EN-h$5(5S~8#%32MCBF$%O- zRq;SPDOfzoHovN+f(2!1VqAQQtQ-kb@ftLWq(|^VgrLE1=0dFxl2-LXqP1mX1!xhF zL5|M09Dnjz$_8C3LunTY83kq#pEj;X)*c>eg=S%#YYL8Q#}$Hk5(m<&%($ReGA0L$ zErj|K{-7o%N3ie*5?r^l`?C=C0<9;qQL5E{uvmw3Q6uL$L_g@BpUOl*fiW2OVErXA z)i_)x5fxTsAV11zY_WkF2&WD?iPl)Mw!&8%lQ&fMc@z0hvG-q^F(8R5wDpD={|wu6 zqt%_OBSX^)v6JCmGFCZMml*P+sVjOh(Bl(nBOGL~WHkz$iy?Zc*H>PFoX_#M<+WG; z;G@drxiITn4QN1T5s_#Lg-K|-ax`=Cge2|8xAulL5yhQ*baXPrimhm=*7|srLyL)V z9EQWK-4!Gw)FKX1CCxG{U(0?s@i6@;bs9q;1X_IGJ;A* zaxy^upb!1v5L0!kwu^J($ROI8LOmuvzH7LI6}$@v8QB`9u&Efkh&!OVE%NC*6L^B~ z`(mBgynwm3HQ^8XP+Ny#B~yWzMw?WNNv?0pjNvN?l;8zQ0zo~TMw05OL@m=~`UgvW;QHQPC1c;0q8TtsDkcuM9(L25RyOm>9(|crP>rP7f zHWJJa;;Wl~tfgAk2rvx_c1V%vB9iB^IT}VdjSEH%cq%WMt5Fsc>k$yyrzwU^{ zfAbM6X;SLrzkjyF#)1@^Iuq7%RY5`)W-(79h{mx)HwWq_ayN}2rWV8M4;8E|ctGa4kt1A`KRcg8f726A^Aq1Hfc!HCu#?*Mjpz>>>h=Q}h%4AuVX8BR4sYn#t zNU#Wi+6u%~R~ZnBSD2O-PbtjamxceRj4^5wttYxW@j+@4uvf7q|B%?P|CGg-tc@DO zq^bK5>||7dvA}*qa~1HjeKB);i^}rbqVho-nfa@OL`{WsNUmj4?Rd)y0}@adGDyLm zTCqxI>Lg5Mep7K3BWq-&`Vi%(83mdYnHnonc zt}n6B%#6cD2(JmmV!-Zn0BTNo9cplf$z`btahfU^$q$i`K^CH;XQ!G9$LQ=jp$ z8B84_B`J(&sWT10*`gBm%%z~JkowWn3=CX)cO}w`(0~cM|G})BpbSA(2D>ds)c~YR zNwz!2I=3BUfn?ETwe$|Xvmxf()!_I+wdpNt7-t$%*24EGdc4bciFGz98LucDB|+Eb z8%#5cxqGU_GqoW^9WSd>lp`!Ayfe|TmU4ObV8^<;#+u2CZD~0>#=?}c7F64mUD+8i zxn(_8@v_;RT`Z=(P2Lh216(1T@ej|2+Jam-1KlgM;@AKxs7*Z2i#R~4$Barn*nn-3 zh{0~0JZVzx&5G#B!=zJb(H*s$+^=^%I9ZP0q7?OehQgGsClxnTTaLCCD#KhR%9Uy4 z3T1-1z)CIHFT%kcNH($Ea{J_jMqE3|BGkd%fvJ>S|07sNNYQHQanAgmg56Wr5|S2& z6C2;msT3Ku}V*dzWsAOwbG@ zYC`qNy6Vx~48aywL<#Z57ue!gxhc5i9vA|H1@~THEj>5B5)J(r(Vd-+r_5+kCS^9W z29~SmVaSY(g;S$$fk7r|@!irw&9ns2VnMG3Qm@J~PZ~2=4IVsOu|3aFmFp_zOIZ>8 z+!n=@79Un;XBg+#(dJNFjn{!Aob6eqNTaC1-Gp8e5Qygmy~G=G#o?Vm&=ph{9Vg3z z+psn=Y!ek^#${J-x!JrF$0O?4c2I!h=$Kxa|9rCMr@nJ2Q4zkTL(={-m)cr6{vaAV z8TLa&%M8kYnxF=T=XtIcvEAp82Gt`cw%3Tkg|6&u-o@;8+JPzD7{0ka4mULgZI*WlCkO#vWEbQ zUBW#RRhXdQd~&e;#0G-y7PM5{&F-&ukRx15Y***rT;gnzJO%H+Ay=wgKJygn-(@{s zp`o25#EyW%@T>KT4*&3Y{2mEjGovjr|JXOsb^dK}>KBO-+hvQ_91NMAr>hvW-e#t4 z7Fj`(4AolR7U`z#63ERYZ1%PbgnkkmGTtazBST0l%M3!tY1zkcVc?riXI5MEzHUkv z&n8^5U|STniP6Lf-P!?kJOB7p4pC)9z2R7v`*|7c4>S;t&e2LG^B}WiBfC#Zj`@1( zNd=E@3h((8;`S`&5r<3qNCWil(!<>jSE6nzz_Q79F3fskPaD6vPam^VpV5xLH)T8S z9fs#RI%R@&?29q^A*0>^Q9N2`h3^!Cgea1XK$8kPt*r1Y|6$5!FUj1zS)tRjmI-~%#P~DeyLUAQ;_TGv9m%Yn5J*6iAaZ`aPn3$zwO z3tr3hZEIEVV2XGLijBLFYutZ^t^VywnCaew4^IY`oLTc`&YeAf)>xG2NUxG@>hy{A zaOsCdZQ{(ilV?w%wQUnk+UY1#rCL2ZJ*rf*?h2SU>#kZ+5aX{t2Xg(I*rA2Tkp(tB z$hPc4#9^|f9F6$hQZBVx1H=bp7-!EXD(2HC1_D)9WnxA^`42QWYZ@q1?{ z)&?suwwZ9!$)t;jstGm;{|CxVwr4KHFt^=cONuA|5=u%S;RX~;HQy#$&Y<|h`bi+_ z=sQa=zyz}EyzjW13%mUYD(|Gd#A^#k5{JYNyzsVr=*P{dJ8(d|q>J&ZA*ZCWN-L{e z$v6aa(kZE;a*|2QiZXmnLNm|2$xEV6Oz$6~D&W$yXVODXO#T2XM@6vmjE*hijI^r9 z?+|0Cv9HQ;tS;*+ppHp`9K901`3}2Lz`x{c62QM0g>fN3BLy{7QAhPiwH7nO3a0CH z0y9&Nio!5fS=XG>rQq1ijJVHC%&5ok4pNb=O-tKLvbXSh620r}>kcAc$tuV^KS|xJ zS@#YVkUg$k#V@TL{~3Bm5rICXwp?@11r;j~9ULyUFyVx$C3LNF^C(;in=__+B@-zo zJ0TMC*Zut5%ej9+(vhtjd+bifh#FfoF~1g_7vc9Pl`q?i73vf~v823?wezT}xMY)0 z_U|Qj+r25we{@@kq=QkOh&VF#Mesk}NZRPxw+`w`tjN9<46(Qz6AxKzE%WKwV()uS zS(-JP)IBIQMe^bS|A8*8e)#bxNVldYyKJ*Nnv+|rQY9`zs??AIylR zymZs!R{2!T|56oErL5pSyFd}s%U4&>kn;#q9ucagZ*o5}v^*81MhqdyBoc>!TPjfg zmI(Z~#V5-Anz726*Gtse01XSC%E~(R$DeUeC%=5uZ?0)&mW4}@CD^B~*T35D#y2aE z3dZa&pb#C5>++Bd+FT4mqlevs58@Dr0TJ^64BSE-|A^1D26w$SIj3lt>t43(b2+*U zi87=+4ByN*LK31ZOP^^G>ttp=*$E_H>apEto|2r7RF6jzEZU8#roX%lDk2D6UdZ?W z2(Abs0v33{F2=H`y=gF77PQ#as^+ncS#E3kiO9%0RzfazF;YwM(EC18pm&|-U8Z~4 zKs|C`}wP9yRNSXk3UlmyRkfa+SpCU=-y5CDP2K@1@HAeIOaK#B2rQDAhZsj($e zi~w6&ajf_s|FtJkPFl?G!ZSE8cG8npdWj3wXPTI`BtfsE;JFg=LO8-Ol~(E33q3-| zLp@R=cC;fwu+taE*pMIyBJ;3FK#QSH?-<3RR~E{0xeSoC zHaDeBWh{%I1ZOw}bfK7#frhz?O-oo;0eO+lDO#Z$Xtc7%lvOi}IJ}%KA=0cV76&hJ zxmtO+)5kydN0)v4%36#_0D~}bkLnrV@RqY2KxQO^*TEbk>2gsIVlheM6K6<8svgnU z|8pa!>5`iiWYQ}+MOWseU;ildA6YVmmgiw9jw+))jA9BhrP-S}_z=u-9YiQYHK3l# zVZh};4U0LQ4B!glIYvzriW8Y)k+!C}jBd3umKm8QM>sktaI|6p+o z+{$v9Nq#76U3C(pNX8J|cx$!qh3^=Z)@HjKqGU@~MV68PNheYl_N#*%lH>}A0|O6G+$|jF6M}xY0{YO0 zSqiyKV3h*{R&AOyEz9ahoo9>S#W$wJRGw$4CxcqGQh!E!>P_<6;B_w0P5}Aql)UD( z<%9`LX@kOKzjWCo!Y^)zgy`xVsNaQp4@aLo7SJ-JiOOV)xEE)@SEKbGu?U19{BXcq z2!eqTVAK&+MDl7sCWKfVGeb&j;?C}smOnl(t*O+_PKoT`PuDe3|BfAzSdoS$sv!tf zXcK6U`~){yr)!}}r0fEt%7vZzc$T%)i#rOkkhBExP{({~RmE_M$o3>Uo*jc?Vqe=} z+99Q1lf~0i_8-R&rUU-@W^UTuo|=WHP1d$Vns^HglxFWU0q>#u_>Hg`O1%~U1O|4o z@Hb0lWT~e2L(ttFR!cL@N$a^_cG&h>HPj(0BXQ=%AK#11$$ckTCp+CXA)R1oO6kK<7;bijqv%8q|{ z0LNO9lFJNJBOp|hK7DwO-WeG*8y)R0xcT97o@SNONo`5iPN|V zQ@W6(Koj<(5ZghM+z>k}5xbUQy#R|B&;Ty zmnMOgnkbVUjLE@(2MP=-`v>I`hqQ=~ zjUuQc8K%!LwYGS^G1LcsXejcyEGTk3HRO(@+aeEw|FJr>#i#=`Mr?>vVvsdynNYGN zv3tKDG?%sr5h~#=Qrshe62LZ86uQ8`iwG`3Dvc;ip6Kb1yU4Vmda{9Ak^&SVjPM73 z=!dBS!-3f!SUmEF3MxA_lW37&#MK}i&r-0KKB0t5v-iR2-Gj~T^o zi^!kBNF<}PQv8UaYZPiqL(bEtpA?m8b0Phr|BqQ@$*?S;v%<%Rph35SssFOcQF%$7 zS(X0)lFfk}!2?QcVhXuaGH@fe0|EpuOd#cPN&vBudK0nx2{Ddr8WN1PTr!lWB#~iJ z2z6PacofUZEFUt$!@+PkJyWThd5IOe%(+R#L<^9$NQ;D+3+qrw$FzuTfiMd=Am#EM zph%npYACSe45#`E)z+1bjFb@ znQ3ba)=bX+oSIY_r2unI``Q_mIH$N7|IKg$K}2dbv+yhK%PZP53k{2aBSNOcc_0Jo z%Z2<5jZ_TyoQ(bO9EIRHh~X=eWKU7q6oK>47Ud$9aU=Hol8Qo5(flGSv7-e=B-?XG zn_86OX($6CfT0?ycUwvT(Lg%GN3=+b%i59pw1}(}vFR)|iYho4#ZoTH%)!XP*BCv2 zIh%}Y5KvhORoNPFtP8iJOsQBhf0B}n5{Fyx1Dq>NB!$mLNl}d$QT@=JtF#CDtF|moe(`-ZAb>8}&vDUIxnB>2iaGlh} zIP8TJW@F3Pi5^`wdzX|B>0U~5Y3U9T0Rd5|rF-d)m0U_X1!<%~q+#hLmlRas@P9v^nKSdwoX^kK z`+DYy`?`PA%^SoE%^!~Ar>E%$os>HJ$_iHfyz<0(bF8yEa-e~2=}vVW=4Eg6$gmu&zB zdueW%*7<2qlwZ(*mg7gDW*)P=6KqE-X>v~68mAnb#3nuj;w$IYBS+@BQbTQacD|K% zbiuYZoYu((vwft>Kq>Qxw<$zjGPtiFsZcx#J?T5|3F1 zWt?7KE}GSZv~^WDE^(SAz0m%p?O1}cucerCM?7oGt_M>mk}7s8Ykd(fdnKLsd(C?O zw0#m;eLzlT<;NGtQVR%kn?Cpoxb|1Vbg$rzRC2b4keWT7T?RK$+LPMl0;%r5Y(5!H zgd;?1F4w)x;=De+9@r3WENe0?dhvNgxN z`kYjb>l{vd6A5X1*OIER>%8Xt3@wEFH!e>DGNM5yD5uvqyz^w!%49R(z#K?i_$k%R zZeI-ML0sjI-q15X=9$*!oOGd&VZM`8k8m|_tL!+KbviwAbV(j*JXBrNrsmFU>X@}f zsEM@4pU%(<5R=6Wxem|)o}k=@z7|^`V;YO%D2+v zp2IoZDh=P!?b-P$b-Lm@EXGpICZ_oV)3`Lja0Od5GMuZ!=1uG1Ie%U1l`Xfwo!KV_ zafM6RD=yZ!M2oh#p{>0AD(Rfs>B;*)cBsv+Nt_sQ(VnnP+)uYu>3#-adp7|ZU+MPj zIa$x<2q@!zNfxqX#fi2#pF}9StMaU0hE?-_@cH`mAWwDHE~dS8KJS_rVB`7 zx-yz-ARe`DbbqY=e6T=BLb{cFji&gH?DMJu^C5dNA28e2vE36(rA9*M%>k`gGOCC7 zI_eW%Zm)shE0~sXrau}!=L?d{)qwo)>!X6b6poK-^o8W)kF+!}Ib7xw{AWgQ&hW;f zb6WTC{C~MmljZkqCAzl0zcT|)p_=EB!(a(UVu{Q~@wAN{Vh-kXv2a>0bUX1miROMh zz<%$whChwqjrVK+bA(~i*42mo0N+?B>4IV`7)UI@=(AhmY@4Ktf%5ZG3wfTYp)Gu- zL!Qvp*}8KKkjRK&3v55VEqIlbwHjz3%n5BfGmnxHBQJ03%Hrt1hGt)N>s7pE)wO0_ z>3D%sYkz!qE$n#CCG;l8(RsnIkmkc%;!6efj(Y-;As@Bh!UK#mM?~3)D68>$F2u_w<)KrV5>4@2vqhV#P)UwTrSfm)K$+|u) zYGxs+hXtMNSteooxnB9b#4RxX=+uh#yWiwK;UH5C-^A3+g%o(zR8B)ccR?2RCkZPj zN&6!&-MDJOkLcdNnCmT0Kd=0(jlWl0`h*54evvVsl`K&8jcTs~V3nTh7 z6%IMyqYH}VdjtWEu!m`Ae19}Kw*lPcoNXxnIUNDHQJ?*Q5A_2+Tu+M>6v3uSeuZaX zuMWlH?^>0t&l?Zl#rj!g>|;|yK4g=uCAKfPuG=PR%_WIvdMax6Gg71-)z zr@2AiV-%(!{uuJiem3XzRXV!iUxg|16N*V*c_>K|@~*zuyy+n@*;J<^TX z_t>X=9|KUMNdbNzj^h@^^`G@Ud%#>Gb3o;MEjO(5x|jA}B7J6lGT%ahtTXOTWepd5 z`KTD``J?2`#sjl=nhCB}X23Ez)dzy4J?=3uic735**z0>t;Y8B6EDY%spc`i@`LtRID)>Col%xE~@u7s$pqRX8G=P$~1{~d_Q)z04gVY zeMJ!UqTZM>i<#9(dH_c%h^O5_xhNf1AA)H%xDZwLu{_;0KbdUZueOGU8fq(y&205< z!g#|J?P#ixW zCjGNCbz4(LJD2=S%s_uYxs=>{jF@VY#q;+6wyV{cHJyV2`%aVSa{(j#%Y4v|QEs`u^t}!0v1F zGHS@YFr8u#x!C?zf}-CQZG|dwkX}dePK3ky1K8oMR|u=|iWUkTUByd&kd0E84Os3y zAhr^G>1z~&odGqJwm<}t%I3f$N0mgGCcK6IO_yoKzN45VaQ%>+yh`oI^M)DtG+4C( z2GTF=YY(E-r7kz2CmrGgSiRHlycb{1yx+D3;e?ADM?+JwQ7+SN^NgT)ug7drb4CNn ztyC<+`B$sK1v^y=?GNm*;OBou+x6t`wu!dYr04UNBx1FV#&H_1W*1q2;?CPOoex23 za-VfI`YlIG`v8D>|~-C4hz;bzhXu!Cw}<8TEAAgdt~avmep% zyTxljw*b1pSy)+Wyz6&qjp!G6WRRd57ko^&@YPJl~25=FYqA_A3Z089^7=~oZ zq8|`iE#i@~k@XHv=5xB@8zU=HRgYo!$9%*wod-DCp)O zc_t6#V(c?ETj2|kGet|>?^x1>LB~cY$-w?9F?3W0G%|^CugNKX#g=7pul5=wn}X7@ z<7dk6Rla5o1?OhWAM}7cUEe%ZrEzg`eLD%60dTahWDq;@j|5;cS~k*Ud$6mAdk}(m z2kb>@C>ZT>2IBXmZ7?XdYY2h{nD4#WZ$maA?>P>#9{xEGJW95ch9Fe6oIv|Z9l~mV zSesa9!Pb|iNM z`wWaoecy$zf=yLiN(SVV<5my>C#n+VE~@rAAS8MawEQ~&HrGDuXCiYn?tgQ`31qD) zHu$~+9>YlKxl4al+@)7Hpomt{@}0yW-;H-Nv@YiDl^wAn2WY@~_y<`n^ZiH$c@!nI z;j1BQcx7CBk&iq zk*hvsI7*qBxs%#uP^uZ6Zy24eTCQX>-{Mc;1fMh3*s)QgS4#B)FzORq$5RgGPPTNP zdQNln?FibhQM1Ni@Yc_dz&{}*;H74-=7O(DGy@R$f*(M9T{u=h3KHztJM&s5N6jJ@ zFYSMA@9u;x&W1nTd5%x??Spr0cqYGSpUDcvB3Ah79U86EJhH&4ODpvR4rbO{8$X_e-bX})F@{r46K6HEovZQ8SUjy?hB$I z_N+l+>1&r{;+6^352@9BI(7>~w9dfFG`9KiAa-a%`&_}0b^$X)f?_ta$AT4J@6@&8 z0d_Qvz9xkE2Fnw~R1EtQ>6Q(&;@l~8_56{3ZQ1T2y`mQibyjNjOw%+f$ zw$;fhlyAyAPtpccckf_&D>T;AsJKUe8jPv757;(fr#x;FG!=gS@5lQn28o*Sc#?EN z3s^9vFRn=d3x;CVv{eSz6fo7))-?^FIqVS@SUKES>(>@<$tR?(W1M=|`cm-fy$3pr zBqp_fM)0c!;FoC0+g{vqHHB0E%$Ml@rjH>rLe=!RLZAO(YEAo1zwj46cZ}zKBLMc@@@6~;g@i}&p2#4yI2ljhtm82)ht+gF0<>_HbkwLyl0Qt@so6fl+DiIEC5la_jxhzxkSNXMUEu0!4gDJznGy$UAmb z=*cQx-KP$Yc3Krei}pb&G6X(yfTWxro7!H!c8$@V`Wusva-=C+mSOd*uK!S2pe+*w zF(%BU%LQpfzWZl&#mIv38tcF_^`qw~1j{m&Qv2jd^7m!F2%bdZFDVQ#R;Bd|*z{r( zV=*5qT@9$7vW`T9AWY{BF=m8j*s~`{)kdO}r`~-ZkHu5YBI45`(R4a2Vk7Z%}oRf;T!M-hi10eQS#ePx* z!gYpzXX(hGM?L4l@_)R>A&RsW?bagL!y0OeK>u3IeQ2YfzwGdDs$ua5qxit*`d{(@ zFflp<$eT!Vx$@oesC!wqN%D))46#$d#2_Q6SU9KVQY*cFd zszuU(jkW0du4umA?V!JBrhsgz{DZw^7Xm{`11L-G68BZR%08(qPnvefha3c{0l+XWB<>51p@R;R1>=w`4CR|+&B|F5SJ+8`sP$pmN<4JZGY*QVehCc+_G?Ql-)$qdTyJ01>GS*qo1Y3fV8TBtQV2VxD@^$p ztB3W`+F(u_@VZhlgL4|BBL<)0BF0nzM>Ak!F8+($aj2wdcKykBLtzN77n>8q7Z<5T zTPKzC&|=vOl3x-18WocWS7V#ny@ft{FwTy9P7+eKxIQHU+a{!tHG&0A2^-~9cwe?= z+Y8(UC2K$T8sAm3`KcqB8qeyO%VPWZlMQm$|C^BAu8G~fF43kcs(V zE$=4=-s9NXo15%6F=ti_+QGf5C(dzENi8zTq>WjK*Vm`A^r?GoO`M& z+s-{iCkRG#K9{gW```ym7SfPpq`i8`ru`W7&2hO!u!&m9AkD(|yY*5fjuCWKyO4r| zm?hd!y8&lXu4DKHtRx46NE5Fz-tJ&db5JNbZcj85-4kUTtiA(hU z+>v;Pjv+W-z;5|GDyyEQ6;~?XT;1YJuwQ?5GX!vtyl@jZRYMZZsANQp09@g^?pF8I zjtB8+J+7xR*toOf@=vGStPMrD9X6minV5TI@vsd8DeZ;xY5B{ik^wF6=gkZ5v98vR znTN?DX0(xvtBHKf2T zvJaz5hmzhIJWMJ^(@!*~-36bms#I)u^(!K^JccQFY}d=#PJUM#DLW!br1<(eYI`?U zZ#>mafPV?6Od$bosK9TzT8X3s*PCfgeIJ@4=5Q`>^Cv`tL?h!g+?Q_n>DdTHo7HQj z2Wvx*v}2VE*BDQbE*=BUG(6Vd6Z!oataplPHeR0|>A?-+$-a&78CV`KhaIh5n;nxK zkA}H6XcBhXIs2oSYZ&PFh6J8IpLf9ngegm#IZ4~=5LNGktPb@d#F+J%!1;41RPAun zXc+5tN@SUr61L4RISt_wqjT_?yiV#IQ?+#yAjfIwm~!sazivJnc=TvqclX_mF7OoJ zir{_pn;u<=keFa)B5yI*(cSu+fiTrP9@R8|12J7Sz&&(Zx)zyr$hIYj@yb0{6`Na8 zOSGfTB7Q_nSTM`&Ku;<1J6AnUKm5HW1SYJ05~;6CRPTF%-$}lUQ;tfFEwE5QzG8|0 zBdp>*ksev1EO{HA$Z>r3RSmDYp5*;=+j}jGEAR3_?xHo0D|1#qms1jn^Ibnw3E4S= zCKFL^scfakfTG}YM}qj1~NTrFvqr= z$cQB7|Jc*OR9HdDtulv057<4$!*r9>VK_b&Vl(*?It1bL)3~Wo8wi7v(~$`Oq07*gfpe9AGbKa6^QI_-drHe534~?UIed?{;wP)*1w>HFd@?P7%)sN z(mMR0AR-w~`Wd83`>P!KV8cjJ#}Fye~S6>%HD9b%G8U{Q``0DXmxLIEe}^=yL7W0^1P$MQgpFik2m z^M%#X#`38`7Gtk|&=lu%sRooMz0${8jKU#SAYZe&$~cNgUYA}-{qSdN z_35%pr>Bp%Uz(z&Q_&1Ku>EwJsxU0as;|B14i=CZ>9GmR(*N1^&e>@6#7a!#jS{Ga>VhNL!Qvr^pC7TRoTli;r^89)I-*G<@FNLLHzV>8;raT%VX4?0 zKRMJ9Q7QN;rr3X1m6K$c!k+wEzLtZnxEDi!?Re1zm+b_}y^8HbnVW;{Bn2GcPO=KM z>rRRWSLIHswm5PJs;36rO)~_p1zaaPqxnCH9!*fs+LG5->9L-gAP!cbGcu<?2BwBUAB)3#T2RNHlPcvRPm!+Tsm z@Sk5h!c~3zbzJ=DxN%yI_oQha?Ed7}I#r*vtok3Fv~EW7p0@2|xSzHklvSU09Jd^u zcA|!O&$_M_+|Rmi_o~l&{@om%eM7_LL-k_Oc%b_5xZ$Ypgc8T7eo}S5^8rc|kMluV zXZZOL!~5g&VU{SqixG}YkBd=2Is9Ub??1iv#W3IHxacpB%L&PS_~oR`@8ioU1zi5C zX%!mJs~HXMnyXoDiIb~2J$3%;c_WjPcifNvO8M!|9BRZ`90KyiTBf39Tbdnn@5P#5 zo7ng_+jXvque=+Uty%M#B05>YKIgxOZv+;ez?X5+d*I8_&n7*#q8}&P5My$+x0|V2 zymvcs)=V{P5pM(}H__4mf0+A!QYwOmh8BgE@PzLp+6j8m`?|3IpDvb3S`!MpuEgl?>JeaGtp6coTPj>%N`088F{omWmqm8L=-yR>)Z~&VjSkx|? zp?F*soBzq~o8hEt07L{O*aZQhb*ex_GWZ`LqF5pUThSaDE?Y5xvi}{7V)v!6NKJtl zTd1x%o?{xNF6c`H_%*&QluFx-GgI#q(jm*#$2B?AB6aCY4pB)USMD=&WOD9Xr|d7` z{_9Il1-?8Zcn_*BxCBPiDY*{WBgg;>>-m^r=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +def add_mim_extention(): + """Add extra files that are required to support MIM into the package. + + These files will be added by creating a symlink to the originals if the + package is installed in `editable` mode (e.g. pip install -e .), or by + copying from the originals otherwise. + """ + + # parse installment mode + if 'develop' in sys.argv: + # installed by `pip install -e .` + if platform.system() == 'Windows': + # set `copy` mode here since symlink fails on Windows. + mode = 'copy' + else: + mode = 'symlink' + elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: + # installed by `pip install .` + # or create source distribution by `python setup.py sdist` + mode = 'copy' + else: + return + + filenames = ['tools', 'configs', 'demo', 'model-index.yml'] + repo_path = osp.dirname(__file__) + mim_path = osp.join(repo_path, 'mmdet3d', '.mim') + os.makedirs(mim_path, exist_ok=True) + + for filename in filenames: + if osp.exists(filename): + src_path = osp.join(repo_path, filename) + tar_path = osp.join(mim_path, filename) + + if osp.isfile(tar_path) or osp.islink(tar_path): + os.remove(tar_path) + elif osp.isdir(tar_path): + shutil.rmtree(tar_path) + + if mode == 'symlink': + src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) + os.symlink(src_relpath, tar_path) + elif mode == 'copy': + if osp.isfile(src_path): + shutil.copyfile(src_path, tar_path) + elif osp.isdir(src_path): + shutil.copytree(src_path, tar_path) + else: + warnings.warn(f'Cannot copy file {src_path}.') + else: + raise ValueError(f'Invalid mode {mode}') + + +if __name__ == '__main__': + add_mim_extention() + setup( + name='mmdet3d', + version=get_version(), + description=("OpenMMLab's next-generation platform" + 'for general 3D object detection.'), + long_description=readme(), + long_description_content_type='text/markdown', + author='MMDetection3D Contributors', + author_email='zwwdev@gmail.com', + keywords='computer vision, 3D object detection', + url='https://github.com/open-mmlab/mmdetection3d', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + include_package_data=True, + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + ], + license='Apache License 2.0', + install_requires=parse_requirements('requirements/runtime.txt'), + extras_require={ + 'all': parse_requirements('requirements.txt'), + 'tests': parse_requirements('requirements/tests.txt'), + 'build': parse_requirements('requirements/build.txt'), + 'optional': parse_requirements('requirements/optional.txt'), + 'mim': parse_requirements('requirements/mminstall.txt'), + }, + ext_modules=[], + cmdclass={'build_ext': BuildExtension}, + zip_safe=False) diff --git a/tests/test_apis/test_inferencers/test_lidar_det3d_inferencer.py b/tests/test_apis/test_inferencers/test_lidar_det3d_inferencer.py new file mode 100755 index 0000000..7d76b62 --- /dev/null +++ b/tests/test_apis/test_inferencers/test_lidar_det3d_inferencer.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile +from unittest import TestCase + +import mmengine +import numpy as np +import torch +from mmengine.utils import is_list_of + +from mmdet3d.apis import LidarDet3DInferencer +from mmdet3d.structures import Det3DDataSample + + +class TestLidarDet3DInferencer(TestCase): + + def setUp(self): + # init from alias + self.inferencer = LidarDet3DInferencer('pointpillars_kitti-3class') + + def test_init(self): + # init from metafile + LidarDet3DInferencer('pointpillars_waymod5-3class') + # init from cfg + LidarDet3DInferencer( + 'configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py', # noqa + weights= # noqa + 'https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20220301_150306-37dc2420.pth' # noqa + ) + + def assert_predictions_equal(self, preds1, preds2): + for pred1, pred2 in zip(preds1, preds2): + if 'bboxes_3d' in pred1: + self.assertTrue( + np.allclose(pred1['bboxes_3d'], pred2['bboxes_3d'], 0.1)) + if 'scores_3d' in pred1: + self.assertTrue( + np.allclose(pred1['scores_3d'], pred2['scores_3d'], 0.1)) + if 'labels_3d' in pred1: + self.assertTrue( + np.allclose(pred1['labels_3d'], pred2['labels_3d'])) + + def test_call(self): + if not torch.cuda.is_available(): + return + # single point cloud + inputs = dict(points='tests/data/kitti/training/velodyne/000000.bin') + res_path = self.inferencer(inputs, return_vis=True) + # ndarray + pts_bytes = mmengine.fileio.get(inputs['points']) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, 4) + points = points[:, :4] + inputs = dict(points=points) + res_ndarray = self.inferencer(inputs, return_vis=True) + self.assert_predictions_equal(res_path['predictions'], + res_ndarray['predictions']) + self.assertIn('visualization', res_path) + self.assertIn('visualization', res_ndarray) + + # multiple point clouds + inputs = [ + dict(points='tests/data/kitti/training/velodyne/000000.bin'), + dict(points='tests/data/kitti/training/velodyne/000000.bin') + ] + res_path = self.inferencer(inputs, return_vis=True) + # list of ndarray + all_points = [] + for p in inputs: + pts_bytes = mmengine.fileio.get(p['points']) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, 4) + all_points.append(dict(points=points)) + res_ndarray = self.inferencer(all_points, return_vis=True) + self.assert_predictions_equal(res_path['predictions'], + res_ndarray['predictions']) + self.assertIn('visualization', res_path) + self.assertIn('visualization', res_ndarray) + + # point cloud dir, test different batch sizes + pc_dir = dict(points='tests/data/kitti/training/velodyne/') + res_bs2 = self.inferencer(pc_dir, batch_size=2, return_vis=True) + self.assertIn('visualization', res_bs2) + self.assertIn('predictions', res_bs2) + + def test_visualize(self): + if not torch.cuda.is_available(): + return + inputs = dict(points='tests/data/kitti/training/velodyne/000000.bin'), + # img_out_dir + with tempfile.TemporaryDirectory() as tmp_dir: + self.inferencer(inputs, img_out_dir=tmp_dir) + # TODO: For LiDAR-based detection, the saved image only exists when + # show=True. + # self.assertTrue(osp.exists(osp.join(tmp_dir, '000000.png'))) + + def test_postprocess(self): + if not torch.cuda.is_available(): + return + # return_datasample + inputs = dict(points='tests/data/kitti/training/velodyne/000000.bin') + res = self.inferencer(inputs, return_datasamples=True) + self.assertTrue(is_list_of(res['predictions'], Det3DDataSample)) + + # pred_out_file + with tempfile.TemporaryDirectory() as tmp_dir: + pred_out_file = osp.join(tmp_dir, 'tmp.json') + res = self.inferencer( + inputs, print_result=True, pred_out_file=pred_out_file) + dumped_res = mmengine.load(pred_out_file) + self.assert_predictions_equal(res['predictions'], + dumped_res['predictions']) diff --git a/tests/test_apis/test_inferencers/test_lidar_seg3d_inferencer.py b/tests/test_apis/test_inferencers/test_lidar_seg3d_inferencer.py new file mode 100755 index 0000000..df49fa7 --- /dev/null +++ b/tests/test_apis/test_inferencers/test_lidar_seg3d_inferencer.py @@ -0,0 +1,105 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile +from unittest import TestCase + +import mmengine +import numpy as np +import torch +from mmengine.utils import is_list_of + +from mmdet3d.apis import LidarSeg3DInferencer +from mmdet3d.structures import Det3DDataSample + + +class TestLiDARSeg3DInferencer(TestCase): + + def setUp(self): + # init from alias + self.inferencer = LidarSeg3DInferencer('pointnet2-ssg_s3dis-seg') + + def test_init(self): + # init from metafile + LidarSeg3DInferencer('pointnet2-ssg_s3dis-seg') + # init from cfg + LidarSeg3DInferencer( + 'configs/pointnet2/pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py', + 'https://download.openmmlab.com/mmdetection3d/v0.1.0_models/pointnet2/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class_20210514_144205-995d0119.pth' # noqa + ) + + def assert_predictions_equal(self, preds1, preds2): + for pred1, pred2 in zip(preds1, preds2): + self.assertTrue( + np.allclose(pred1['pts_semantic_mask'], + pred2['pts_semantic_mask'])) + + def test_call(self): + if not torch.cuda.is_available(): + return + # single point cloud + inputs = dict(points='tests/data/s3dis/points/Area_1_office_2.bin') + torch.manual_seed(0) + res_path = self.inferencer(inputs, return_vis=True) + # ndarray + pts_bytes = mmengine.fileio.get(inputs['points']) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, 6) + inputs = dict(points=points) + torch.manual_seed(0) + res_ndarray = self.inferencer(inputs, return_vis=True) + self.assert_predictions_equal(res_path['predictions'], + res_ndarray['predictions']) + self.assertIn('visualization', res_path) + self.assertIn('visualization', res_ndarray) + + # multiple point clouds + inputs = [ + dict(points='tests/data/s3dis/points/Area_1_office_2.bin'), + dict(points='tests/data/s3dis/points/Area_1_office_2.bin') + ] + torch.manual_seed(0) + res_path = self.inferencer(inputs, return_vis=True) + # list of ndarray + all_points = [] + for p in inputs: + pts_bytes = mmengine.fileio.get(p['points']) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, 6) + all_points.append(dict(points=points)) + torch.manual_seed(0) + res_ndarray = self.inferencer(all_points, return_vis=True) + self.assert_predictions_equal(res_path['predictions'], + res_ndarray['predictions']) + self.assertIn('visualization', res_path) + self.assertIn('visualization', res_ndarray) + + # point cloud dir, test different batch sizes + pc_dir = dict(points='tests/data/s3dis/points/') + res_bs2 = self.inferencer(pc_dir, batch_size=2, return_vis=True) + self.assertIn('visualization', res_bs2) + self.assertIn('predictions', res_bs2) + + def test_visualizer(self): + if not torch.cuda.is_available(): + return + inputs = dict(points='tests/data/s3dis/points/Area_1_office_2.bin') + # img_out_dir + with tempfile.TemporaryDirectory() as tmp_dir: + self.inferencer(inputs, img_out_dir=tmp_dir) + + def test_post_processor(self): + if not torch.cuda.is_available(): + return + # return_datasample + inputs = dict(points='tests/data/s3dis/points/Area_1_office_2.bin') + res = self.inferencer(inputs, return_datasamples=True) + self.assertTrue(is_list_of(res['predictions'], Det3DDataSample)) + + # pred_out_file + with tempfile.TemporaryDirectory() as tmp_dir: + pred_out_file = osp.join(tmp_dir, 'tmp.json') + res = self.inferencer( + inputs, print_result=True, pred_out_file=pred_out_file) + dumped_res = mmengine.load(pred_out_file) + self.assert_predictions_equal(res['predictions'], + dumped_res['predictions']) diff --git a/tests/test_apis/test_inferencers/test_mono_det3d_inferencer.py b/tests/test_apis/test_inferencers/test_mono_det3d_inferencer.py new file mode 100755 index 0000000..71df09f --- /dev/null +++ b/tests/test_apis/test_inferencers/test_mono_det3d_inferencer.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile +from unittest import TestCase + +import mmcv +import mmengine +import numpy as np +from mmengine.utils import is_list_of +from parameterized import parameterized + +from mmdet3d.apis import MonoDet3DInferencer +from mmdet3d.structures import Det3DDataSample + + +class TestMonoDet3DInferencer(TestCase): + + def test_init(self): + # init from metafile + MonoDet3DInferencer('pgd_kitti') + # init from cfg + MonoDet3DInferencer( + 'configs/pgd/pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py', + 'https://download.openmmlab.com/mmdetection3d/v1.0.0_models/pgd/' + 'pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d/' + 'pgd_r101_caffe_fpn_gn-head_3x4_4x_kitti-mono3d_' + '20211022_102608-8a97533b.pth') + + def assert_predictions_equal(self, preds1, preds2): + for pred1, pred2 in zip(preds1, preds2): + if 'bboxes_3d' in pred1: + self.assertTrue( + np.allclose(pred1['bboxes_3d'], pred2['bboxes_3d'], 0.1)) + if 'scores_3d' in pred1: + self.assertTrue( + np.allclose(pred1['scores_3d'], pred2['scores_3d'], 0.1)) + if 'labels_3d' in pred1: + self.assertTrue( + np.allclose(pred1['labels_3d'], pred2['labels_3d'])) + + @parameterized.expand(['pgd_kitti']) + def test_call(self, model): + # single img + img_path = 'demo/data/kitti/000008.png' + calib_path = 'demo/data/kitti/000008.txt' + inferencer = MonoDet3DInferencer(model) + inputs = dict(img=img_path, calib=calib_path) + res_path = inferencer(inputs, return_vis=True) + # ndarray + img = mmcv.imread(img_path) + inputs = dict(img=img, calib=calib_path) + res_ndarray = inferencer(inputs, return_vis=True) + self.assert_predictions_equal(res_path['predictions'], + res_ndarray['predictions']) + self.assertIn('visualization', res_path) + self.assertIn('visualization', res_ndarray) + + # multiple images + inputs = [ + dict( + img='demo/data/kitti/000008.png', + calib='demo/data/kitti/000008.txt'), + dict( + img='demo/data/kitti/000008.png', + calib='demo/data/kitti/000008.txt') + ] + res_path = inferencer(inputs, return_vis=True) + # list of ndarray + imgs = [mmcv.imread(p['img']) for p in inputs] + inputs[0]['img'] = imgs[0] + inputs[1]['img'] = imgs[1] + res_ndarray = inferencer(inputs, return_vis=True) + self.assert_predictions_equal(res_path['predictions'], + res_ndarray['predictions']) + self.assertIn('visualization', res_path) + self.assertIn('visualization', res_ndarray) + + @parameterized.expand(['pgd_kitti']) + def test_visualize(self, model): + inputs = [ + dict( + img='demo/data/kitti/000008.png', + calib='demo/data/kitti/000008.txt'), + dict( + img='demo/data/kitti/000008.png', + calib='demo/data/kitti/000008.txt') + ] + inferencer = MonoDet3DInferencer(model) + # img_out_dir + with tempfile.TemporaryDirectory() as tmp_dir: + inferencer(inputs, img_out_dir=tmp_dir) + for img_dir in ['000008.png', '000008.png']: + self.assertTrue(osp.exists(osp.join(tmp_dir, img_dir))) + + @parameterized.expand(['pgd_kitti']) + def test_postprocess(self, model): + # return_datasample + img_path = 'demo/data/kitti/000008.png' + calib_path = 'demo/data/kitti/000008.txt' + inputs = dict(img=img_path, calib=calib_path) + inferencer = MonoDet3DInferencer(model) + res = inferencer(inputs, return_datasamples=True) + self.assertTrue(is_list_of(res['predictions'], Det3DDataSample)) + + # pred_out_file + with tempfile.TemporaryDirectory() as tmp_dir: + pred_out_file = osp.join(tmp_dir, 'tmp.json') + res = inferencer( + inputs, print_result=True, pred_out_file=pred_out_file) + dumped_res = mmengine.load(pred_out_file) + self.assert_predictions_equal(res['predictions'], + dumped_res['predictions']) diff --git a/tests/test_apis/test_inferencers/test_multi_modality_det3d_inferencer.py b/tests/test_apis/test_inferencers/test_multi_modality_det3d_inferencer.py new file mode 100755 index 0000000..e0be781 --- /dev/null +++ b/tests/test_apis/test_inferencers/test_multi_modality_det3d_inferencer.py @@ -0,0 +1,120 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile +from unittest import TestCase + +import mmcv +import mmengine +import numpy as np +import torch +from mmengine.utils import is_list_of + +from mmdet3d.apis import MultiModalityDet3DInferencer +from mmdet3d.structures import Det3DDataSample + + +class TestMultiModalityDet3DInferencer(TestCase): + + def setUp(self): + # init from alias + self.inferencer = MultiModalityDet3DInferencer('mvxnet_kitti-3class') + + def test_init(self): + # init from metafile + MultiModalityDet3DInferencer('mvxnet_kitti-3class') + # init from cfg + MultiModalityDet3DInferencer( + 'configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py', # noqa + weights= # noqa + 'https://download.openmmlab.com/mmdetection3d/v1.0.0_models/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class_20210831_060805-83442923.pth' # noqa + ) + + def assert_predictions_equal(self, preds1, preds2): + for pred1, pred2 in zip(preds1, preds2): + if 'bboxes_3d' in pred1: + self.assertTrue( + np.allclose(pred1['bboxes_3d'], pred2['bboxes_3d'], 0.1)) + if 'scores_3d' in pred1: + self.assertTrue( + np.allclose(pred1['scores_3d'], pred2['scores_3d'], 0.1)) + if 'labels_3d' in pred1: + self.assertTrue( + np.allclose(pred1['labels_3d'], pred2['labels_3d'])) + + def test_call(self): + if not torch.cuda.is_available(): + return + calib_path = 'tests/data/kitti/training/calib/000000.pkl' + points_path = 'tests/data/kitti/training/velodyne/000000.bin' + img_path = 'tests/data/kitti/training/image_2/000000.png' + # single img & point cloud + inputs = dict(points=points_path, img=img_path, calib=calib_path) + res_path = self.inferencer(inputs, return_vis=True) + + # ndarray + pts_bytes = mmengine.fileio.get(inputs['points']) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, 4) + points = points[:, :4] + img = mmcv.imread(inputs['img']) + inputs = dict(points=points, img=img, calib=calib_path) + res_ndarray = self.inferencer(inputs, return_vis=True) + self.assert_predictions_equal(res_path['predictions'], + res_ndarray['predictions']) + self.assertIn('visualization', res_path) + self.assertIn('visualization', res_ndarray) + + # multiple imgs & point clouds + inputs = [ + dict(points=points_path, img=img_path, calib=calib_path), + dict(points=points_path, img=img_path, calib=calib_path) + ] + res_path = self.inferencer(inputs, return_vis=True) + # list of ndarray + all_inputs = [] + for p in inputs: + pts_bytes = mmengine.fileio.get(p['points']) + points = np.frombuffer(pts_bytes, dtype=np.float32) + points = points.reshape(-1, 4) + img = mmcv.imread(p['img']) + all_inputs.append(dict(points=points, img=img, calib=p['calib'])) + + res_ndarray = self.inferencer(all_inputs, return_vis=True) + self.assert_predictions_equal(res_path['predictions'], + res_ndarray['predictions']) + self.assertIn('visualization', res_path) + self.assertIn('visualization', res_ndarray) + + def test_visualize(self): + if not torch.cuda.is_available(): + return + inputs = dict( + points='tests/data/kitti/training/velodyne/000000.bin', + img='tests/data/kitti/training/image_2/000000.png', + calib='tests/data/kitti/training/calib/000000.pkl'), + # img_out_dir + with tempfile.TemporaryDirectory() as tmp_dir: + self.inferencer(inputs, img_out_dir=tmp_dir) + # TODO: For results of LiDAR-based detection, the saved image only + # exists when show=True. + # self.assertTrue(osp.exists(osp.join(tmp_dir, '000000.png'))) + + def test_postprocess(self): + if not torch.cuda.is_available(): + return + # return_datasample + inputs = dict( + points='tests/data/kitti/training/velodyne/000000.bin', + img='tests/data/kitti/training/image_2/000000.png', + calib='tests/data/kitti/training/calib/000000.pkl') + res = self.inferencer(inputs, return_datasamples=True) + self.assertTrue(is_list_of(res['predictions'], Det3DDataSample)) + + # pred_out_file + with tempfile.TemporaryDirectory() as tmp_dir: + pred_out_file = osp.join(tmp_dir, 'tmp.json') + res = self.inferencer( + inputs, print_result=True, pred_out_file=pred_out_file) + dumped_res = mmengine.load(pred_out_file) + self.assert_predictions_equal(res['predictions'], + dumped_res['predictions']) diff --git a/tests/test_datasets/test_dataset_wrappers.py b/tests/test_datasets/test_dataset_wrappers.py new file mode 100755 index 0000000..7fd8eea --- /dev/null +++ b/tests/test_datasets/test_dataset_wrappers.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import numpy as np +import pytest +from mmcv.transforms.base import BaseTransform +from mmengine.structures import InstanceData + +from mmdet3d.datasets import CBGSDataset, NuScenesDataset +from mmdet3d.registry import DATASETS, TRANSFORMS +from mmdet3d.structures import Det3DDataSample + + +def is_equal(dict_a, dict_b): + for key in dict_a: + if key not in dict_b: + return False + if isinstance(dict_a[key], dict): + return is_equal(dict_a[key], dict_b[key]) + elif isinstance(dict_a[key], np.ndarray): + if not (dict_a[key] == dict_b[key]).any(): + return False + else: + if not (dict_a[key] == dict_b[key]): + return False + return True + + +@TRANSFORMS.register_module() +class Identity(BaseTransform): + + def transform(self, info): + packed_input = dict(data_samples=Det3DDataSample()) + if 'ann_info' in info: + packed_input['data_samples'].gt_instances_3d = InstanceData() + packed_input['data_samples'].gt_instances_3d.labels_3d = info[ + 'ann_info']['gt_labels_3d'] + return packed_input + + +@DATASETS.register_module() +class CustomDataset(NuScenesDataset): + pass + + +class TestCBGSDataset: + + def setup(self): + dataset = NuScenesDataset + self.dataset = dataset( + data_root=osp.join(osp.dirname(__file__), '../data/nuscenes'), + ann_file='nus_info.pkl', + data_prefix=dict( + pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP'), + pipeline=[dict(type=Identity)]) + + self.sample_indices = [0, 0, 1, 1, 1] + # test init + self.cbgs_datasets = CBGSDataset(dataset=self.dataset) + self.cbgs_datasets.sample_indices = self.sample_indices + + def test_init(self): + # Test build dataset from cfg + dataset_cfg = dict( + type=CustomDataset, + data_root=osp.join(osp.dirname(__file__), '../data/nuscenes'), + ann_file='nus_info.pkl', + data_prefix=dict( + pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP'), + pipeline=[dict(type=Identity)]) + cbgs_datasets = CBGSDataset(dataset=dataset_cfg) + cbgs_datasets.sample_indices = self.sample_indices + cbgs_datasets.dataset.pipeline = self.dataset.pipeline + assert len(cbgs_datasets) == len(self.cbgs_datasets) + for i in range(len(cbgs_datasets)): + assert is_equal( + cbgs_datasets.get_data_info(i), + self.cbgs_datasets.get_data_info(i)) + assert (cbgs_datasets[i]['data_samples'].gt_instances_3d.labels_3d + == self.cbgs_datasets[i] + ['data_samples'].gt_instances_3d.labels_3d).any() + + with pytest.raises(TypeError): + CBGSDataset(dataset=[0]) + + def test_full_init(self): + self.cbgs_datasets.full_init() + self.cbgs_datasets.sample_indices = self.sample_indices + assert len(self.cbgs_datasets) == len(self.sample_indices) + # Reinit `sample_indices` + self.cbgs_datasets._fully_initialized = False + self.cbgs_datasets.sample_indices = self.sample_indices + assert len(self.cbgs_datasets) != len(self.sample_indices) + + with pytest.raises(NotImplementedError): + self.cbgs_datasets.get_subset_(1) + + with pytest.raises(NotImplementedError): + self.cbgs_datasets.get_subset(1) + + def test_metainfo(self): + assert self.cbgs_datasets.metainfo == self.dataset.metainfo + + def test_length(self): + assert len(self.cbgs_datasets) == len(self.sample_indices) + + def test_getitem(self): + for i in range(len(self.sample_indices)): + assert (self.cbgs_datasets[i]['data_samples'].gt_instances_3d. + labels_3d == self.dataset[self.sample_indices[i]] + ['data_samples'].gt_instances_3d.labels_3d).any() + + def test_get_data_info(self): + for i in range(len(self.sample_indices)): + assert is_equal( + self.cbgs_datasets.get_data_info(i), + self.dataset.get_data_info(self.sample_indices[i])) + + def test_get_cat_ids(self): + for i in range(len(self.sample_indices)): + assert self.cbgs_datasets.get_cat_ids( + i) == self.dataset.get_cat_ids(self.sample_indices[i]) diff --git a/tests/test_datasets/test_kitti_dataset.py b/tests/test_datasets/test_kitti_dataset.py new file mode 100755 index 0000000..ea7488d --- /dev/null +++ b/tests/test_datasets/test_kitti_dataset.py @@ -0,0 +1,108 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import numpy as np +import torch +from mmcv.transforms.base import BaseTransform +from mmengine.registry import TRANSFORMS +from mmengine.structures import InstanceData + +from mmdet3d.datasets import KittiDataset +from mmdet3d.structures import Det3DDataSample, LiDARInstance3DBoxes + + +def _generate_kitti_dataset_config(): + data_root = 'tests/data/kitti' + ann_file = 'kitti_infos_train.pkl' + classes = ['Pedestrian', 'Cyclist', 'Car'] + # wait for pipline refactor + + if 'Identity' not in TRANSFORMS: + + @TRANSFORMS.register_module() + class Identity(BaseTransform): + + def transform(self, info): + if 'ann_info' in info: + info['gt_labels_3d'] = info['ann_info']['gt_labels_3d'] + data_sample = Det3DDataSample() + gt_instances_3d = InstanceData() + gt_instances_3d.labels_3d = info['gt_labels_3d'] + data_sample.gt_instances_3d = gt_instances_3d + info['data_samples'] = data_sample + return info + + pipeline = [ + dict(type='Identity'), + ] + + modality = dict(use_lidar=True, use_camera=False) + data_prefix = dict(pts='training/velodyne_reduced', img='training/image_2') + return data_root, ann_file, classes, data_prefix, pipeline, modality + + +def test_getitem(): + np.random.seed(0) + data_root, ann_file, classes, data_prefix, \ + pipeline, modality, = _generate_kitti_dataset_config() + modality['use_camera'] = True + + kitti_dataset = KittiDataset( + data_root, + ann_file, + data_prefix=dict( + pts='training/velodyne_reduced', + img='training/image_2', + ), + pipeline=pipeline, + metainfo=dict(classes=classes), + modality=modality) + + kitti_dataset.prepare_data(0) + input_dict = kitti_dataset.get_data_info(0) + kitti_dataset[0] + # assert the the path should contains data_prefix and data_root + assert data_prefix['pts'] in input_dict['lidar_points']['lidar_path'] + assert data_root in input_dict['lidar_points']['lidar_path'] + for cam_id, img_info in input_dict['images'].items(): + if 'img_path' in img_info: + assert data_prefix['img'] in img_info['img_path'] + assert data_root in img_info['img_path'] + + ann_info = kitti_dataset.parse_ann_info(input_dict) + + # assert the keys in ann_info and the type + assert 'instances' in ann_info + + # only one instance + assert 'gt_labels_3d' in ann_info + assert ann_info['gt_labels_3d'].dtype == np.int64 + + assert 'gt_bboxes_3d' in ann_info + assert isinstance(ann_info['gt_bboxes_3d'], LiDARInstance3DBoxes) + assert torch.allclose(ann_info['gt_bboxes_3d'].tensor.sum(), + torch.tensor(7.2650)) + assert 'centers_2d' in ann_info + assert ann_info['centers_2d'].dtype == np.float32 + assert 'depths' in ann_info + assert ann_info['depths'].dtype == np.float32 + + car_kitti_dataset = KittiDataset( + data_root, + ann_file, + data_prefix=dict( + pts='training/velodyne_reduced', + img='training/image_2', + ), + pipeline=pipeline, + metainfo=dict(classes=['Car']), + modality=modality) + + input_dict = car_kitti_dataset.get_data_info(0) + ann_info = car_kitti_dataset.parse_ann_info(input_dict) + + # assert the keys in ann_info and the type + assert 'instances' in ann_info + assert ann_info['gt_labels_3d'].dtype == np.int64 + # all instance have been filtered by classes + assert len(ann_info['gt_labels_3d']) == 0 + assert len(car_kitti_dataset.metainfo['classes']) == 1 diff --git a/tests/test_datasets/test_lyft_dataset.py b/tests/test_datasets/test_lyft_dataset.py new file mode 100755 index 0000000..b99b718 --- /dev/null +++ b/tests/test_datasets/test_lyft_dataset.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv.transforms.base import BaseTransform +from mmengine.registry import TRANSFORMS +from mmengine.structures import InstanceData + +from mmdet3d.datasets import LyftDataset +from mmdet3d.structures import Det3DDataSample, LiDARInstance3DBoxes + + +def _generate_nus_dataset_config(): + data_root = 'tests/data/lyft' + ann_file = 'lyft_infos.pkl' + classes = [ + 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', + 'motorcycle', 'bicycle', 'pedestrian', 'animal' + ] + if 'Identity' not in TRANSFORMS: + + @TRANSFORMS.register_module() + class Identity(BaseTransform): + + def transform(self, info): + packed_input = dict(data_samples=Det3DDataSample()) + if 'ann_info' in info: + packed_input[ + 'data_samples'].gt_instances_3d = InstanceData() + packed_input[ + 'data_samples'].gt_instances_3d.labels_3d = info[ + 'ann_info']['gt_labels_3d'] + return packed_input + + pipeline = [ + dict(type='Identity'), + ] + modality = dict(use_lidar=True, use_camera=False) + data_prefix = dict(pts='lidar', img='', sweeps='sweeps/LIDAR_TOP') + return data_root, ann_file, classes, data_prefix, pipeline, modality + + +def test_getitem(): + np.random.seed(0) + data_root, ann_file, classes, data_prefix, pipeline, modality = \ + _generate_nus_dataset_config() + + lyft_dataset = LyftDataset( + data_root, + ann_file, + data_prefix=data_prefix, + pipeline=pipeline, + metainfo=dict(classes=classes), + modality=modality) + + lyft_dataset.prepare_data(0) + input_dict = lyft_dataset.get_data_info(0) + # assert the the path should contains data_prefix and data_root + assert data_prefix['pts'] in input_dict['lidar_points']['lidar_path'] + assert data_root in input_dict['lidar_points']['lidar_path'] + + ann_info = lyft_dataset.parse_ann_info(input_dict) + + # assert the keys in ann_info and the type + assert 'gt_labels_3d' in ann_info + assert ann_info['gt_labels_3d'].dtype == np.int64 + assert len(ann_info['gt_labels_3d']) == 3 + + assert 'gt_bboxes_3d' in ann_info + assert isinstance(ann_info['gt_bboxes_3d'], LiDARInstance3DBoxes) + + assert len(lyft_dataset.metainfo['classes']) == 9 diff --git a/tests/test_datasets/test_nuscenes_dataset.py b/tests/test_datasets/test_nuscenes_dataset.py new file mode 100755 index 0000000..4b85f34 --- /dev/null +++ b/tests/test_datasets/test_nuscenes_dataset.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv.transforms.base import BaseTransform +from mmengine.registry import TRANSFORMS +from mmengine.structures import InstanceData + +from mmdet3d.datasets import NuScenesDataset +from mmdet3d.structures import Det3DDataSample, LiDARInstance3DBoxes + + +def _generate_nus_dataset_config(): + data_root = 'tests/data/nuscenes' + ann_file = 'nus_info.pkl' + classes = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' + ] + if 'Identity' not in TRANSFORMS: + + @TRANSFORMS.register_module() + class Identity(BaseTransform): + + def transform(self, info): + packed_input = dict(data_samples=Det3DDataSample()) + if 'ann_info' in info: + packed_input[ + 'data_samples'].gt_instances_3d = InstanceData() + packed_input[ + 'data_samples'].gt_instances_3d.labels_3d = info[ + 'ann_info']['gt_labels_3d'] + return packed_input + + pipeline = [ + dict(type='Identity'), + ] + modality = dict(use_lidar=True, use_camera=True) + data_prefix = dict( + pts='samples/LIDAR_TOP', + img='samples/CAM_BACK_LEFT', + sweeps='sweeps/LIDAR_TOP') + return data_root, ann_file, classes, data_prefix, pipeline, modality + + +def test_getitem(): + np.random.seed(0) + data_root, ann_file, classes, data_prefix, pipeline, modality = \ + _generate_nus_dataset_config() + + nus_dataset = NuScenesDataset( + data_root=data_root, + ann_file=ann_file, + data_prefix=data_prefix, + pipeline=pipeline, + metainfo=dict(classes=classes), + modality=modality) + + nus_dataset.prepare_data(0) + input_dict = nus_dataset.get_data_info(0) + # assert the the path should contains data_prefix and data_root + assert data_prefix['pts'] in input_dict['lidar_points']['lidar_path'] + assert data_root in input_dict['lidar_points']['lidar_path'] + + for cam_id, img_info in input_dict['images'].items(): + if 'img_path' in img_info: + assert data_prefix['img'] in img_info['img_path'] + assert data_root in img_info['img_path'] + + ann_info = nus_dataset.parse_ann_info(input_dict) + + # assert the keys in ann_info and the type + assert 'gt_labels_3d' in ann_info + assert ann_info['gt_labels_3d'].dtype == np.int64 + assert len(ann_info['gt_labels_3d']) == 37 + + assert 'gt_bboxes_3d' in ann_info + assert isinstance(ann_info['gt_bboxes_3d'], LiDARInstance3DBoxes) + + assert len(nus_dataset.metainfo['classes']) == 10 + + assert input_dict['token'] == 'fd8420396768425eabec9bdddf7e64b6' + assert input_dict['timestamp'] == 1533201470.448696 diff --git a/tests/test_datasets/test_s3dis_dataset.py b/tests/test_datasets/test_s3dis_dataset.py new file mode 100755 index 0000000..542953a --- /dev/null +++ b/tests/test_datasets/test_s3dis_dataset.py @@ -0,0 +1,206 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import numpy as np +import torch +from mmengine.testing import assert_allclose + +from mmdet3d.datasets import S3DISDataset, S3DISSegDataset +from mmdet3d.structures import DepthInstance3DBoxes +from mmdet3d.utils import register_all_modules + + +def _generate_s3dis_seg_dataset_config(): + data_root = './tests/data/s3dis/' + ann_file = 's3dis_infos.pkl' + classes = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', + 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter') + palette = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0], + [255, 0, 255], [100, 100, 255], [200, 200, 100], + [170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100], + [200, 200, 200], [50, 50, 50]] + scene_idxs = [0 for _ in range(20)] + modality = dict(use_lidar=True, use_camera=False) + pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict(type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=5, + block_size=1.0, + ignore_index=len(classes), + use_normalized_coord=True, + enlarge_size=0.2, + min_unique_num=None), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) + ] + + data_prefix = dict( + pts='points', + pts_instance_mask='instance_mask', + pts_semantic_mask='semantic_mask') + + return (data_root, ann_file, classes, palette, scene_idxs, data_prefix, + pipeline, modality) + + +def _generate_s3dis_dataset_config(): + data_root = 'tests/data/s3dis' + ann_file = 's3dis_infos.pkl' + classes = ('table', 'chair', 'sofa', 'bookcase', 'board') + modality = dict(use_lidar=True, use_camera=False) + pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True), + dict(type='PointSegClassMapping'), + dict(type='PointSample', num_points=5), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=1.0, + flip_ratio_bev_vertical=1.0), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0]), + dict(type='NormalizePointsColor', color_mean=None), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'pts_semantic_mask', 'gt_bboxes_3d', 'gt_labels_3d', + 'pts_instance_mask' + ]) + ] + data_prefix = dict( + pts='points', + pts_instance_mask='instance_mask', + pts_semantic_mask='semantic_mask') + return data_root, ann_file, classes, data_prefix, pipeline, modality + + +class TestS3DISDataset(unittest.TestCase): + + def test_s3dis(self): + np.random.seed(0) + data_root, ann_file, classes, data_prefix, \ + pipeline, modality = _generate_s3dis_dataset_config() + register_all_modules() + s3dis_dataset = S3DISDataset( + data_root, + ann_file, + data_prefix=data_prefix, + pipeline=pipeline, + metainfo=dict(classes=classes), + modality=modality) + + s3dis_dataset.prepare_data(0) + input_dict = s3dis_dataset.get_data_info(0) + s3dis_dataset[0] + # assert the path should contains data_prefix and data_root + self.assertIn(data_prefix['pts'], + input_dict['lidar_points']['lidar_path']) + self.assertIn(data_root, input_dict['lidar_points']['lidar_path']) + + ann_info = s3dis_dataset.parse_ann_info(input_dict) + + # assert the keys in ann_info and the type + except_label = np.array([1, 1, 3, 1, 2, 0, 0, 0, 3]) + + self.assertEqual(ann_info['gt_labels_3d'].dtype, np.int64) + assert_allclose(ann_info['gt_labels_3d'], except_label) + self.assertIsInstance(ann_info['gt_bboxes_3d'], DepthInstance3DBoxes) + assert len(ann_info['gt_bboxes_3d']) == 9 + assert torch.allclose(ann_info['gt_bboxes_3d'].tensor.sum(), + torch.tensor([63.0455])) + + no_class_s3dis_dataset = S3DISDataset( + data_root, ann_file, metainfo=dict(classes=['table'])) + + input_dict = no_class_s3dis_dataset.get_data_info(0) + ann_info = no_class_s3dis_dataset.parse_ann_info(input_dict) + + # assert the keys in ann_info and the type + self.assertIn('gt_labels_3d', ann_info) + # assert mapping to -1 or 1 + assert (ann_info['gt_labels_3d'] <= 0).all() + self.assertEqual(ann_info['gt_labels_3d'].dtype, np.int64) + # all instance have been filtered by classes + self.assertEqual(len(ann_info['gt_labels_3d']), 9) + self.assertEqual(len(no_class_s3dis_dataset.metainfo['classes']), 1) + + def test_s3dis_seg(self): + data_root, ann_file, classes, palette, scene_idxs, data_prefix, \ + pipeline, modality, = _generate_s3dis_seg_dataset_config() + + register_all_modules() + np.random.seed(0) + + s3dis_seg_dataset = S3DISSegDataset( + data_root, + ann_file, + metainfo=dict(classes=classes, palette=palette), + data_prefix=data_prefix, + pipeline=pipeline, + modality=modality, + scene_idxs=scene_idxs) + + input_dict = s3dis_seg_dataset.prepare_data(0) + + points = input_dict['inputs']['points'] + data_sample = input_dict['data_samples'] + pts_semantic_mask = data_sample.gt_pts_seg.pts_semantic_mask + + expected_points = torch.tensor([[ + 0.0000, 0.0000, 3.1720, 0.4706, 0.4431, 0.3725, 0.4624, 0.7502, + 0.9543 + ], + [ + 0.2880, -0.5900, 0.0650, 0.3451, + 0.3373, 0.3490, 0.5119, 0.5518, + 0.0196 + ], + [ + 0.1570, 0.6000, 3.1700, 0.4941, + 0.4667, 0.3569, 0.4893, 0.9519, + 0.9537 + ], + [ + -0.1320, 0.3950, 0.2720, 0.3216, + 0.2863, 0.2275, 0.4397, 0.8830, + 0.0818 + ], + [ + -0.4860, -0.0640, 3.1710, 0.3843, + 0.3725, 0.3059, 0.3789, 0.7286, + 0.9540 + ]]) + + expected_pts_semantic_mask = np.array([0, 1, 0, 8, 0]) + + assert torch.allclose(points, expected_points, 1e-2) + self.assertTrue( + (pts_semantic_mask.numpy() == expected_pts_semantic_mask).all()) diff --git a/tests/test_datasets/test_scannet_dataset.py b/tests/test_datasets/test_scannet_dataset.py new file mode 100755 index 0000000..fdbf213 --- /dev/null +++ b/tests/test_datasets/test_scannet_dataset.py @@ -0,0 +1,229 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import numpy as np +import torch +from mmengine.testing import assert_allclose + +from mmdet3d.datasets import ScanNetDataset, ScanNetSegDataset +from mmdet3d.structures import DepthInstance3DBoxes +from mmdet3d.utils import register_all_modules + + +def _generate_scannet_seg_dataset_config(): + data_root = './tests/data/scannet/' + ann_file = 'scannet_infos.pkl' + classes = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', + 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', + 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', + 'bathtub', 'otherfurniture') + palette = [ + [174, 199, 232], + [152, 223, 138], + [31, 119, 180], + [255, 187, 120], + [188, 189, 34], + [140, 86, 75], + [255, 152, 150], + [214, 39, 40], + [197, 176, 213], + [148, 103, 189], + [196, 156, 148], + [23, 190, 207], + [247, 182, 210], + [219, 219, 141], + [255, 127, 14], + [158, 218, 229], + [44, 160, 44], + [112, 128, 144], + [227, 119, 194], + [82, 84, 163], + ] + scene_idxs = [0] + modality = dict(use_lidar=True, use_camera=False) + pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict(type='PointSegClassMapping'), + dict( + type='IndoorPatchPointSample', + num_points=5, + block_size=1.5, + ignore_index=len(classes), + use_normalized_coord=True, + enlarge_size=0.2, + min_unique_num=None), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) + ] + + data_prefix = dict( + pts='points', + pts_instance_mask='instance_mask', + pts_semantic_mask='semantic_mask') + return (data_root, ann_file, classes, palette, scene_idxs, data_prefix, + pipeline, modality) + + +def _generate_scannet_dataset_config(): + data_root = 'tests/data/scannet' + ann_file = 'scannet_infos.pkl' + classes = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'garbagebin') + modality = dict(use_lidar=True, use_camera=False) + pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True), + dict(type='GlobalAlignment', rotation_axis=2), + dict(type='PointSegClassMapping'), + dict(type='PointSample', num_points=5), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=1.0, + flip_ratio_bev_vertical=1.0), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0], + shift_height=True), + dict( + type='Pack3DDetInputs', + keys=[ + 'points', 'pts_semantic_mask', 'gt_bboxes_3d', 'gt_labels_3d', + 'pts_instance_mask' + ]) + ] + data_prefix = dict( + pts='points', + pts_instance_mask='instance_mask', + pts_semantic_mask='semantic_mask') + return data_root, ann_file, classes, data_prefix, pipeline, modality + + +class TestScanNetDataset(unittest.TestCase): + + def test_scannet(self): + np.random.seed(0) + data_root, ann_file, classes, data_prefix, \ + pipeline, modality, = _generate_scannet_dataset_config() + register_all_modules() + scannet_dataset = ScanNetDataset( + data_root, + ann_file, + data_prefix=data_prefix, + pipeline=pipeline, + metainfo=dict(classes=classes), + modality=modality) + + scannet_dataset.prepare_data(0) + input_dict = scannet_dataset.get_data_info(0) + scannet_dataset[0] + # assert the the path should contains data_prefix and data_root + self.assertIn(data_prefix['pts'], + input_dict['lidar_points']['lidar_path']) + self.assertIn(data_root, input_dict['lidar_points']['lidar_path']) + + ann_info = scannet_dataset.parse_ann_info(input_dict) + + # assert the keys in ann_info and the type + except_label = np.array([ + 6, 6, 4, 9, 11, 11, 10, 0, 15, 17, 17, 17, 3, 12, 4, 4, 14, 1, 0, + 0, 0, 0, 0, 0, 5, 5, 5 + ]) + + self.assertEqual(ann_info['gt_labels_3d'].dtype, np.int64) + assert_allclose(ann_info['gt_labels_3d'], except_label) + self.assertIsInstance(ann_info['gt_bboxes_3d'], DepthInstance3DBoxes) + assert len(ann_info['gt_bboxes_3d']) == 27 + assert torch.allclose(ann_info['gt_bboxes_3d'].tensor.sum(), + torch.tensor([107.7353])) + + no_class_scannet_dataset = ScanNetDataset( + data_root, ann_file, metainfo=dict(classes=['cabinet'])) + + input_dict = no_class_scannet_dataset.get_data_info(0) + ann_info = no_class_scannet_dataset.parse_ann_info(input_dict) + + # assert the keys in ann_info and the type + self.assertIn('gt_labels_3d', ann_info) + # assert mapping to -1 or 1 + assert (ann_info['gt_labels_3d'] <= 0).all() + self.assertEqual(ann_info['gt_labels_3d'].dtype, np.int64) + # all instance have been filtered by classes + self.assertEqual(len(ann_info['gt_labels_3d']), 27) + self.assertEqual(len(no_class_scannet_dataset.metainfo['classes']), 1) + + def test_scannet_seg(self): + data_root, ann_file, classes, palette, scene_idxs, data_prefix, \ + pipeline, modality, = _generate_scannet_seg_dataset_config() + + register_all_modules() + np.random.seed(0) + scannet_seg_dataset = ScanNetSegDataset( + data_root, + ann_file, + metainfo=dict(classes=classes, palette=palette), + data_prefix=data_prefix, + pipeline=pipeline, + modality=modality, + scene_idxs=scene_idxs) + + input_dict = scannet_seg_dataset.prepare_data(0) + + points = input_dict['inputs']['points'] + data_sample = input_dict['data_samples'] + pts_semantic_mask = data_sample.gt_pts_seg.pts_semantic_mask + + expected_points = torch.tensor([[ + 0.0000, 0.0000, 1.2427, 0.6118, 0.5529, 0.4471, -0.6462, -1.0046, + 0.4280 + ], + [ + 0.1553, -0.0074, 1.6077, 0.5882, + 0.6157, 0.5569, -0.6001, -1.0068, + 0.5537 + ], + [ + 0.1518, 0.6016, 0.6548, 0.1490, + 0.1059, 0.0431, -0.6012, -0.8309, + 0.2255 + ], + [ + -0.7494, 0.1033, 0.6756, 0.5216, + 0.4353, 0.3333, -0.8687, -0.9748, + 0.2327 + ], + [ + -0.6836, -0.0203, 0.5884, 0.5765, + 0.5020, 0.4510, -0.8491, -1.0105, + 0.2027 + ]]) + expected_pts_semantic_mask = np.array([13, 13, 12, 2, 0]) + + assert torch.allclose(points, expected_points, 1e-2) + self.assertTrue( + (pts_semantic_mask.numpy() == expected_pts_semantic_mask).all()) diff --git a/tests/test_datasets/test_semantickitti_dataset.py b/tests/test_datasets/test_semantickitti_dataset.py new file mode 100755 index 0000000..d334870 --- /dev/null +++ b/tests/test_datasets/test_semantickitti_dataset.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import numpy as np + +from mmdet3d.datasets import SemanticKittiDataset +from mmdet3d.utils import register_all_modules + + +def _generate_semantickitti_dataset_config(): + data_root = './tests/data/semantickitti/' + ann_file = 'semantickitti_infos.pkl' + classes = ('car', 'bicycle', 'motorcycle', 'truck', 'bus', 'person', + 'bicyclist', 'motorcyclist', 'road', 'parking', 'sidewalk', + 'other-ground', 'building', 'fence', 'vegetation', 'trunck', + 'terrian', 'pole', 'traffic-sign') + + seg_label_mapping = { + 0: 19, # "unlabeled" + 1: 19, # "outlier" mapped to "unlabeled" --------------mapped + 10: 0, # "car" + 11: 1, # "bicycle" + 13: 4, # "bus" mapped to "other-vehicle" --------------mapped + 15: 2, # "motorcycle" + 16: 4, # "on-rails" mapped to "other-vehicle" ---------mapped + 18: 3, # "truck" + 20: 4, # "other-vehicle" + 30: 5, # "person" + 31: 6, # "bicyclist" + 32: 7, # "motorcyclist" + 40: 8, # "road" + 44: 9, # "parking" + 48: 10, # "sidewalk" + 49: 11, # "other-ground" + 50: 12, # "building" + 51: 13, # "fence" + 52: 19, # "other-structure" mapped to "unlabeled" ------mapped + 60: 8, # "lane-marking" to "road" ---------------------mapped + 70: 14, # "vegetation" + 71: 15, # "trunk" + 72: 16, # "terrain" + 80: 17, # "pole" + 81: 18, # "traffic-sign" + 99: 19, # "other-object" to "unlabeled" ----------------mapped + 252: 0, # "moving-car" to "car" ------------------------mapped + 253: 6, # "moving-bicyclist" to "bicyclist" ------------mapped + 254: 5, # "moving-person" to "person" ------------------mapped + 255: 7, # "moving-motorcyclist" to "motorcyclist" ------mapped + 256: 4, # "moving-on-rails" mapped to "other-vehic------mapped + 257: 4, # "moving-bus" mapped to "other-vehicle" -------mapped + 258: 3, # "moving-truck" to "truck" --------------------mapped + 259: 4 # "moving-other"-vehicle to "other-vehicle"-----mapped + } + max_label = 259 + modality = dict(use_lidar=True, use_camera=False) + pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + shift_height=True, + load_dim=4, + use_dim=[0, 1, 2]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + seg_3d_dtype='np.int32'), + dict(type='PointSegClassMapping'), + dict(type='Pack3DDetInputs', keys=['points', 'pts_semantic_mask']) + ] + + data_prefix = dict( + pts='sequences/00/velodyne', pts_semantic_mask='sequences/00/labels') + + return (data_root, ann_file, classes, data_prefix, pipeline, modality, + seg_label_mapping, max_label) + + +class TestSemanticKittiDataset(unittest.TestCase): + + def test_semantickitti(self): + (data_root, ann_file, classes, data_prefix, pipeline, modality, + seg_label_mapping, + max_label) = _generate_semantickitti_dataset_config() + + register_all_modules() + np.random.seed(0) + semantickitti_dataset = SemanticKittiDataset( + data_root, + ann_file, + metainfo=dict( + classes=classes, + seg_label_mapping=seg_label_mapping, + max_label=max_label), + data_prefix=data_prefix, + pipeline=pipeline, + modality=modality) + + input_dict = semantickitti_dataset.prepare_data(0) + + points = input_dict['inputs']['points'] + data_sample = input_dict['data_samples'] + pts_semantic_mask = data_sample.gt_pts_seg.pts_semantic_mask + self.assertEqual(points.shape[0], pts_semantic_mask.shape[0]) + + expected_pts_semantic_mask = np.array([ + 12, 12, 12, 14, 14, 12, 19, 12, 14, 12, 12, 14, 15, 19, 14, 12, 12, + 12, 12, 19, 12, 12, 12, 12, 12, 14, 12, 15, 12, 14, 14, 17, 12, 14, + 14, 14, 15, 14, 12, 12, 14, 12, 17, 14, 12, 14, 12, 14, 14, 12 + ]) + + self.assertTrue( + (pts_semantic_mask.numpy() == expected_pts_semantic_mask).all()) diff --git a/tests/test_datasets/test_sunrgbd_dataset.py b/tests/test_datasets/test_sunrgbd_dataset.py new file mode 100755 index 0000000..f8519ea --- /dev/null +++ b/tests/test_datasets/test_sunrgbd_dataset.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import numpy as np +import torch +from mmengine.testing import assert_allclose + +from mmdet3d.datasets import SUNRGBDDataset +from mmdet3d.structures import DepthInstance3DBoxes + + +def _generate_scannet_dataset_config(): + data_root = 'tests/data/sunrgbd' + ann_file = 'sunrgbd_infos.pkl' + + classes = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', + 'night_stand', 'bookshelf', 'bathtub') + + from mmcv.transforms.base import BaseTransform + from mmengine.registry import TRANSFORMS + + if 'Identity' not in TRANSFORMS: + + @TRANSFORMS.register_module() + class Identity(BaseTransform): + + def transform(self, info): + if 'ann_info' in info: + info['gt_labels_3d'] = info['ann_info']['gt_labels_3d'] + return info + + modality = dict(use_camera=True, use_lidar=True) + pipeline = [ + dict(type='Identity'), + ] + data_prefix = dict(pts='points', img='sunrgbd_trainval') + return data_root, ann_file, classes, data_prefix, pipeline, modality + + +class TestScanNetDataset(unittest.TestCase): + + def test_sunrgbd_ataset(self): + np.random.seed(0) + data_root, ann_file, classes, data_prefix, \ + pipeline, modality, = _generate_scannet_dataset_config() + scannet_dataset = SUNRGBDDataset( + data_root, + ann_file, + data_prefix=data_prefix, + pipeline=pipeline, + metainfo=dict(classes=classes), + modality=modality) + + scannet_dataset.prepare_data(0) + input_dict = scannet_dataset.get_data_info(0) + scannet_dataset[0] + # assert the the path should contains data_prefix and data_root + assert data_prefix['pts'] in input_dict['lidar_points']['lidar_path'] + assert data_root in input_dict['lidar_points']['lidar_path'] + for cam_id, img_info in input_dict['images'].items(): + if 'img_path' in img_info: + assert data_prefix['img'] in img_info['img_path'] + assert data_root in img_info['img_path'] + + ann_info = scannet_dataset.parse_ann_info(input_dict) + + # assert the keys in ann_info and the type + except_label = np.array([0, 7, 6]) + + self.assertEqual(ann_info['gt_labels_3d'].dtype, np.int64) + assert_allclose(ann_info['gt_labels_3d'], except_label) + self.assertIsInstance(ann_info['gt_bboxes_3d'], DepthInstance3DBoxes) + + self.assertEqual(len(ann_info['gt_bboxes_3d']), 3) + assert_allclose(ann_info['gt_bboxes_3d'].tensor.sum(), + torch.tensor(19.2575)) + + classes = ['bed'] + bed_scannet_dataset = SUNRGBDDataset( + data_root, + ann_file, + data_prefix=data_prefix, + pipeline=pipeline, + metainfo=dict(classes=classes), + modality=modality) + + input_dict = bed_scannet_dataset.get_data_info(0) + ann_info = bed_scannet_dataset.parse_ann_info(input_dict) + + # assert the keys in ann_info and the type + self.assertIn('gt_labels_3d', ann_info) + # assert mapping to -1 or 1 + assert (ann_info['gt_labels_3d'] <= 0).all() + assert ann_info['gt_labels_3d'].dtype == np.int64 + # all instance have been filtered by classes + self.assertEqual(len(ann_info['gt_labels_3d']), 3) + self.assertEqual(len(bed_scannet_dataset.metainfo['classes']), 1) diff --git a/tests/test_datasets/test_transforms/test_formating.py b/tests/test_datasets/test_transforms/test_formating.py new file mode 100755 index 0000000..d306fa4 --- /dev/null +++ b/tests/test_datasets/test_transforms/test_formating.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import torch +from mmengine.testing import assert_allclose + +from mmdet3d.datasets.transforms.formating import Pack3DDetInputs +from mmdet3d.structures import LiDARInstance3DBoxes +from mmdet3d.testing import create_data_info_after_loading + + +class TestPack3DDetInputs(unittest.TestCase): + + def test_packinputs(self): + ori_data_info = create_data_info_after_loading() + pack_input = Pack3DDetInputs( + keys=['points', 'gt_labels_3d', 'gt_bboxes_3d']) + packed_results = pack_input(ori_data_info) + inputs = packed_results['inputs'] + + # annotations + gt_instances = packed_results['data_samples'].gt_instances_3d + self.assertIn('points', inputs) + self.assertIsInstance(inputs['points'], torch.Tensor) + assert_allclose(inputs['points'].sum(), torch.tensor(13062.6436)) + # assert to_tensor + self.assertIsInstance(inputs['points'], torch.Tensor) + self.assertIn('labels_3d', gt_instances) + assert_allclose(gt_instances.labels_3d, torch.tensor([1])) + # assert to_tensor + self.assertIsInstance(gt_instances.labels_3d, torch.Tensor) + + self.assertIn('bboxes_3d', gt_instances) + self.assertIsInstance(gt_instances.bboxes_3d, LiDARInstance3DBoxes) + assert_allclose(gt_instances.bboxes_3d.tensor.sum(), + torch.tensor(7.2650)) diff --git a/tests/test_datasets/test_transforms/test_loading.py b/tests/test_datasets/test_transforms/test_loading.py new file mode 100755 index 0000000..d071223 --- /dev/null +++ b/tests/test_datasets/test_transforms/test_loading.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import numpy as np +import torch +from mmengine.testing import assert_allclose + +from mmdet3d.datasets.transforms import PointSegClassMapping +from mmdet3d.datasets.transforms.loading import (LoadAnnotations3D, + LoadPointsFromFile) +from mmdet3d.structures import DepthPoints, LiDARPoints +from mmdet3d.testing import create_dummy_data_info + + +class TestLoadPointsFromFile(unittest.TestCase): + + def test_load_points_from_file(self): + use_dim = 3 + backend_args = None + load_points_transform = LoadPointsFromFile( + coord_type='LIDAR', + load_dim=4, + use_dim=use_dim, + backend_args=backend_args) + data_info = create_dummy_data_info() + info = load_points_transform(data_info) + self.assertIn('points', info) + self.assertIsInstance(info['points'], LiDARPoints) + load_points_transform = LoadPointsFromFile( + coord_type='DEPTH', + load_dim=4, + use_dim=use_dim, + backend_args=backend_args) + info = load_points_transform(data_info) + self.assertIsInstance(info['points'], DepthPoints) + self.assertEqual(info['points'].shape[-1], use_dim) + load_points_transform = LoadPointsFromFile( + coord_type='DEPTH', + load_dim=4, + use_dim=use_dim, + shift_height=True, + backend_args=backend_args) + info = load_points_transform(data_info) + # extra height dim + self.assertEqual(info['points'].shape[-1], use_dim + 1) + + repr_str = repr(load_points_transform) + self.assertIn('shift_height=True', repr_str) + self.assertIn('use_color=False', repr_str) + self.assertIn('load_dim=4', repr_str) + + +class TestLoadAnnotations3D(unittest.TestCase): + + def test_load_points_from_file(self): + backend_args = None + + load_anns_transform = LoadAnnotations3D( + with_bbox_3d=True, + with_label_3d=True, + with_panoptic_3d=True, + seg_offset=2**16, + dataset_type='semantickitti', + seg_3d_dtype='np.uint32', + backend_args=backend_args) + self.assertIs(load_anns_transform.with_seg, False) + self.assertIs(load_anns_transform.with_bbox_3d, True) + self.assertIs(load_anns_transform.with_label_3d, True) + data_info = create_dummy_data_info() + info = load_anns_transform(data_info) + self.assertIn('gt_bboxes_3d', info) + assert_allclose(info['gt_bboxes_3d'].tensor.sum(), + torch.tensor(7.2650)) + self.assertIn('gt_labels_3d', info) + assert_allclose(info['gt_labels_3d'], torch.tensor([1])) + self.assertIn('pts_semantic_mask', info) + self.assertIn('pts_instance_mask', info) + assert_allclose( + info['pts_semantic_mask'], + np.array([ + 50, 50, 50, 70, 70, 50, 0, 50, 70, 50, 50, 70, 71, 52, 70, 50, + 50, 50, 50, 0, 50, 50, 50, 50, 50, 70, 50, 71, 50, 70, 70, 80, + 50, 70, 70, 70, 71, 70, 50, 50, 70, 50, 80, 70, 50, 70, 50, 70, + 70, 50 + ])) + assert_allclose( + info['pts_instance_mask'], + np.array([ + 50, 50, 50, 70, 70, 50, 0, 50, 70, 50, 50, 70, 71, 52, 70, 50, + 50, 50, 50, 0, 50, 50, 50, 50, 50, 70, 50, 71, 50, 70, 70, 80, + 50, 70, 70, 70, 71, 70, 50, 50, 70, 50, 80, 70, 50, 70, 50, 70, + 70, 50 + ])) + repr_str = repr(load_anns_transform) + self.assertIn('with_bbox_3d=True', repr_str) + self.assertIn('with_label_3d=True', repr_str) + self.assertIn('with_bbox_depth=False', repr_str) + self.assertIn('with_panoptic_3d=True', repr_str) + + +class TestPointSegClassMapping(unittest.TestCase): + + def test_point_seg_class_mapping(self): + results = dict() + results['pts_semantic_mask'] = np.array([1, 2, 3, 4, 5]) + results['seg_label_mapping'] = np.array([3, 0, 1, 2, 3, 3]) + point_seg_mapping_transform = PointSegClassMapping() + results = point_seg_mapping_transform(results) + assert_allclose(results['pts_semantic_mask'], np.array([0, 1, 2, 3, + 3])) diff --git a/tests/test_datasets/test_transforms/test_transforms_3d.py b/tests/test_datasets/test_transforms/test_transforms_3d.py new file mode 100755 index 0000000..94d2e0c --- /dev/null +++ b/tests/test_datasets/test_transforms/test_transforms_3d.py @@ -0,0 +1,303 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import unittest + +import numpy as np +import torch +from mmengine.testing import assert_allclose + +from mmdet3d.datasets import (GlobalAlignment, RandomFlip3D, + SemanticKittiDataset) +from mmdet3d.datasets.transforms import GlobalRotScaleTrans, LaserMix, PolarMix +from mmdet3d.structures import LiDARPoints +from mmdet3d.testing import create_data_info_after_loading +from mmdet3d.utils import register_all_modules + +register_all_modules() + + +class TestGlobalRotScaleTrans(unittest.TestCase): + + def test_globle_rotation_scale_trans(self): + rot_trans = GlobalRotScaleTrans( + rot_range=[-0.78, 0.78], scale_ratio_range=[1, 1]) + scale_trans = GlobalRotScaleTrans( + rot_range=[0, 0], scale_ratio_range=[0.95, 1.05]) + + ori_data_info = create_data_info_after_loading() + + data_info = copy.deepcopy(ori_data_info) + rot_data_info = rot_trans(data_info) + self.assertIn('pcd_rotation', rot_data_info) + self.assertIn('pcd_rotation_angle', rot_data_info) + self.assertIn('pcd_scale_factor', rot_data_info) + self.assertEqual(rot_data_info['pcd_scale_factor'], 1) + self.assertIs(-0.79 < rot_data_info['pcd_rotation_angle'] < 0.79, True) + + # assert the rot angle should in rot_range + before_rot_gt_bbox_3d = ori_data_info['gt_bboxes_3d'] + after_rot_gt_bbox_3d = rot_data_info['gt_bboxes_3d'] + assert (after_rot_gt_bbox_3d.tensor[:, -1] - + before_rot_gt_bbox_3d.tensor[:, -1]).abs().max() < 0.79 + + data_info = copy.deepcopy(ori_data_info) + scale_data_info = scale_trans(data_info) + # assert the rot angle should in rot_range + before_scale_gt_bbox_3d = ori_data_info['gt_bboxes_3d'].tensor + after_scale_gt_bbox_3d = scale_data_info['gt_bboxes_3d'].tensor + before_scale_points = ori_data_info['points'].tensor + after_scale_points = scale_data_info['points'].tensor + self.assertEqual(scale_data_info['pcd_rotation_angle'], 0) + # assert scale_factor range + assert (0.94 < (after_scale_points / before_scale_points)).all() + assert (1.06 > + (after_scale_gt_bbox_3d / before_scale_gt_bbox_3d)).all() + + +class TestRandomFlip3D(unittest.TestCase): + + def test_random_flip3d(self): + ori_data_info = create_data_info_after_loading() + no_flip_transform = RandomFlip3D(flip_ratio_bev_horizontal=0.) + always_flip_transform = RandomFlip3D(flip_ratio_bev_horizontal=1.) + data_info = copy.deepcopy(ori_data_info) + data_info = no_flip_transform(data_info) + self.assertIn('pcd_horizontal_flip', data_info) + assert_allclose(data_info['points'].tensor, + ori_data_info['points'].tensor) + + torch.allclose(data_info['gt_bboxes_3d'].tensor, + ori_data_info['gt_bboxes_3d'].tensor) + data_info = copy.deepcopy(ori_data_info) + data_info = always_flip_transform(data_info) + assert_allclose(data_info['points'].tensor[:, 0], + ori_data_info['points'].tensor[:, 0]) + assert_allclose(data_info['points'].tensor[:, 1], + -ori_data_info['points'].tensor[:, 1]) + assert_allclose(data_info['points'].tensor[:, 2], + ori_data_info['points'].tensor[:, 2]) + + assert_allclose(data_info['gt_bboxes_3d'].tensor[:, 0], + ori_data_info['gt_bboxes_3d'].tensor[:, 0]) + assert_allclose(data_info['gt_bboxes_3d'].tensor[:, 1], + -ori_data_info['gt_bboxes_3d'].tensor[:, 1]) + assert_allclose(data_info['gt_bboxes_3d'].tensor[:, 2], + ori_data_info['gt_bboxes_3d'].tensor[:, 2]) + + +class TestGlobalAlignment(unittest.TestCase): + + def test_global_alignment(self): + data_info = create_data_info_after_loading() + global_align_transform = GlobalAlignment(rotation_axis=2) + data_info['axis_align_matrix'] = np.array( + [[0.945519, 0.325568, 0., -5.38439], + [-0.325568, 0.945519, 0., -2.87178], [0., 0., 1., -0.06435], + [0., 0., 0., 1.]], + dtype=np.float32) + global_align_transform(data_info) + + data_info['axis_align_matrix'] = np.array( + [[0.945519, 0.325568, 0., -5.38439], [0, 2, 0., -2.87178], + [0., 0., 1., -0.06435], [0., 0., 0., 1.]], + dtype=np.float32) + # assert the rot metric + with self.assertRaises(AssertionError): + global_align_transform(data_info) + + +class TestPolarMix(unittest.TestCase): + + def setUp(self): + self.pre_transform = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + seg_3d_dtype='np.int32'), + dict(type='PointSegClassMapping'), + ] + classes = ('car', 'bicycle', 'motorcycle', 'truck', 'bus', 'person', + 'bicyclist', 'motorcyclist', 'road', 'parking', 'sidewalk', + 'other-ground', 'building', 'fence', 'vegetation', 'trunck', + 'terrian', 'pole', 'traffic-sign') + seg_label_mapping = { + 0: 0, # "unlabeled" + 1: 0, # "outlier" mapped to "unlabeled" --------------mapped + 10: 1, # "car" + 11: 2, # "bicycle" + 13: 5, # "bus" mapped to "other-vehicle" --------------mapped + 15: 3, # "motorcycle" + 16: 5, # "on-rails" mapped to "other-vehicle" ---------mapped + 18: 4, # "truck" + 20: 5, # "other-vehicle" + 30: 6, # "person" + 31: 7, # "bicyclist" + 32: 8, # "motorcyclist" + 40: 9, # "road" + 44: 10, # "parking" + 48: 11, # "sidewalk" + 49: 12, # "other-ground" + 50: 13, # "building" + 51: 14, # "fence" + 52: 0, # "other-structure" mapped to "unlabeled" ------mapped + 60: 9, # "lane-marking" to "road" ---------------------mapped + 70: 15, # "vegetation" + 71: 16, # "trunk" + 72: 17, # "terrain" + 80: 18, # "pole" + 81: 19, # "traffic-sign" + 99: 0, # "other-object" to "unlabeled" ----------------mapped + 252: 1, # "moving-car" to "car" ------------------------mapped + 253: 7, # "moving-bicyclist" to "bicyclist" ------------mapped + 254: 6, # "moving-person" to "person" ------------------mapped + 255: 8, # "moving-motorcyclist" to "motorcyclist" ------mapped + 256: 5, # "moving-on-rails" mapped to "other-vehic------mapped + 257: 5, # "moving-bus" mapped to "other-vehicle" -------mapped + 258: 4, # "moving-truck" to "truck" --------------------mapped + 259: 5 # "moving-other"-vehicle to "other-vehicle"-----mapped + } + max_label = 259 + self.dataset = SemanticKittiDataset( + './tests/data/semantickitti/', + 'semantickitti_infos.pkl', + metainfo=dict( + classes=classes, + seg_label_mapping=seg_label_mapping, + max_label=max_label), + data_prefix=dict( + pts='sequences/00/velodyne', + pts_semantic_mask='sequences/00/labels'), + pipeline=[], + modality=dict(use_lidar=True, use_camera=False)) + points = np.random.random((100, 4)) + self.results = { + 'points': LiDARPoints(points, points_dim=4), + 'pts_semantic_mask': np.random.randint(0, 20, (100, )), + 'dataset': self.dataset + } + + def test_transform(self): + # test assertion for invalid instance_classes + with self.assertRaises(AssertionError): + transform = PolarMix(instance_classes=1) + + with self.assertRaises(AssertionError): + transform = PolarMix(instance_classes=[1.0, 2.0]) + + transform = PolarMix( + instance_classes=[15, 16, 17], + swap_ratio=1.0, + pre_transform=self.pre_transform) + results = transform.transform(copy.deepcopy(self.results)) + self.assertTrue(results['points'].shape[0] == + results['pts_semantic_mask'].shape[0]) + + +class TestLaserMix(unittest.TestCase): + + def setUp(self): + self.pre_transform = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True, + seg_3d_dtype='np.int32'), + dict(type='PointSegClassMapping'), + ] + classes = ('car', 'bicycle', 'motorcycle', 'truck', 'bus', 'person', + 'bicyclist', 'motorcyclist', 'road', 'parking', 'sidewalk', + 'other-ground', 'building', 'fence', 'vegetation', 'trunck', + 'terrian', 'pole', 'traffic-sign') + seg_label_mapping = { + 0: 0, # "unlabeled" + 1: 0, # "outlier" mapped to "unlabeled" --------------mapped + 10: 1, # "car" + 11: 2, # "bicycle" + 13: 5, # "bus" mapped to "other-vehicle" --------------mapped + 15: 3, # "motorcycle" + 16: 5, # "on-rails" mapped to "other-vehicle" ---------mapped + 18: 4, # "truck" + 20: 5, # "other-vehicle" + 30: 6, # "person" + 31: 7, # "bicyclist" + 32: 8, # "motorcyclist" + 40: 9, # "road" + 44: 10, # "parking" + 48: 11, # "sidewalk" + 49: 12, # "other-ground" + 50: 13, # "building" + 51: 14, # "fence" + 52: 0, # "other-structure" mapped to "unlabeled" ------mapped + 60: 9, # "lane-marking" to "road" ---------------------mapped + 70: 15, # "vegetation" + 71: 16, # "trunk" + 72: 17, # "terrain" + 80: 18, # "pole" + 81: 19, # "traffic-sign" + 99: 0, # "other-object" to "unlabeled" ----------------mapped + 252: 1, # "moving-car" to "car" ------------------------mapped + 253: 7, # "moving-bicyclist" to "bicyclist" ------------mapped + 254: 6, # "moving-person" to "person" ------------------mapped + 255: 8, # "moving-motorcyclist" to "motorcyclist" ------mapped + 256: 5, # "moving-on-rails" mapped to "other-vehic------mapped + 257: 5, # "moving-bus" mapped to "other-vehicle" -------mapped + 258: 4, # "moving-truck" to "truck" --------------------mapped + 259: 5 # "moving-other"-vehicle to "other-vehicle"-----mapped + } + max_label = 259 + self.dataset = SemanticKittiDataset( + './tests/data/semantickitti/', + 'semantickitti_infos.pkl', + metainfo=dict( + classes=classes, + seg_label_mapping=seg_label_mapping, + max_label=max_label), + data_prefix=dict( + pts='sequences/00/velodyne', + pts_semantic_mask='sequences/00/labels'), + pipeline=[], + modality=dict(use_lidar=True, use_camera=False)) + points = np.random.random((100, 4)) + self.results = { + 'points': LiDARPoints(points, points_dim=4), + 'pts_semantic_mask': np.random.randint(0, 20, (100, )), + 'dataset': self.dataset + } + + def test_transform(self): + # test assertion for invalid num_areas + with self.assertRaises(AssertionError): + transform = LaserMix(num_areas=3, pitch_angles=[-20, 0]) + + with self.assertRaises(AssertionError): + transform = LaserMix(num_areas=[3.0, 4.0], pitch_angles=[-20, 0]) + + # test assertion for invalid pitch_angles + with self.assertRaises(AssertionError): + transform = LaserMix(num_areas=[3, 4], pitch_angles=[-20]) + + with self.assertRaises(AssertionError): + transform = LaserMix(num_areas=[3, 4], pitch_angles=[0, -20]) + + transform = LaserMix( + num_areas=[3, 4, 5, 6], + pitch_angles=[-20, 0], + pre_transform=self.pre_transform) + results = transform.transform(copy.deepcopy(self.results)) + self.assertTrue(results['points'].shape[0] == + results['pts_semantic_mask'].shape[0]) diff --git a/tests/test_datasets/test_transforms/utils.py b/tests/test_datasets/test_transforms/utils.py new file mode 100755 index 0000000..a54c64d --- /dev/null +++ b/tests/test_datasets/test_transforms/utils.py @@ -0,0 +1,192 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np + +# create a dummy `results` to test the pipeline +from mmdet3d.datasets import LoadAnnotations3D, LoadPointsFromFile +from mmdet3d.datasets.transforms.loading import LoadImageFromFileMono3D +from mmdet3d.structures import LiDARInstance3DBoxes + + +def create_dummy_data_info(with_ann=True): + + ann_info = { + 'gt_bboxes': + np.array([[712.4, 143., 810.73, 307.92]]), + 'gt_labels': + np.array([1]), + 'gt_bboxes_3d': + LiDARInstance3DBoxes( + np.array( + [[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, + -1.5808]])), + 'gt_labels_3d': + np.array([1]), + 'centers_2d': + np.array([[765.04, 214.56]]), + 'depths': + np.array([8.410]), + 'num_lidar_pts': + np.array([377]), + 'difficulty': + np.array([0]), + 'truncated': + np.array([0]), + 'occluded': + np.array([0]), + 'alpha': + np.array([-0.2]), + 'score': + np.array([0.]), + 'index': + np.array([0]), + 'group_id': + np.array([0]) + } + data_info = { + 'sample_id': + 0, + 'images': { + 'CAM0': { + 'cam2img': [[707.0493, 0.0, 604.0814, 0.0], + [0.0, 707.0493, 180.5066, 0.0], + [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] + }, + 'CAM1': { + 'cam2img': [[707.0493, 0.0, 604.0814, -379.7842], + [0.0, 707.0493, 180.5066, 0.0], + [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] + }, + 'CAM2': { + 'img_path': + 'tests/data/kitti/training/image_2/000000.png', + 'height': + 370, + 'width': + 1224, + 'cam2img': [[707.0493, 0.0, 604.0814, 45.75831], + [0.0, 707.0493, 180.5066, -0.3454157], + [0.0, 0.0, 1.0, 0.004981016], [0.0, 0.0, 0.0, 1.0]] + }, + 'CAM3': { + 'cam2img': [[707.0493, 0.0, 604.0814, -334.1081], + [0.0, 707.0493, 180.5066, 2.33066], + [0.0, 0.0, 1.0, 0.003201153], [0.0, 0.0, 0.0, 1.0]] + }, + 'R0_rect': [[ + 0.9999127984046936, 0.010092630051076412, + -0.008511931635439396, 0.0 + ], + [ + -0.010127290152013302, 0.9999405741691589, + -0.004037670791149139, 0.0 + ], + [ + 0.008470674976706505, 0.0041235219687223434, + 0.9999555945396423, 0.0 + ], [0.0, 0.0, 0.0, 1.0]] + }, + 'lidar_points': { + 'num_pts_feats': + 4, + 'lidar_path': + 'tests/data/kitti/training/velodyne_reduced/000000.bin', + 'lidar2cam': [[ + -0.0015960992313921452, -0.9999162554740906, + -0.012840436771512032, -0.022366708144545555 + ], + [ + -0.00527064548805356, 0.012848696671426296, + -0.9999035596847534, -0.05967890843749046 + ], + [ + 0.9999848008155823, -0.0015282672829926014, + -0.005290712229907513, -0.33254900574684143 + ], [0.0, 0.0, 0.0, 1.0]], + 'Tr_velo_to_cam': [[ + 0.006927963811904192, -0.9999722242355347, -0.0027578289154917, + -0.024577289819717407 + ], + [ + -0.0011629819637164474, + 0.0027498360723257065, -0.9999955296516418, + -0.06127237156033516 + ], + [ + 0.999975323677063, 0.006931141018867493, + -0.0011438990477472544, -0.33210289478302 + ], [0.0, 0.0, 0.0, 1.0]], + 'Tr_imu_to_velo': [[ + 0.999997615814209, 0.0007553070900030434, + -0.002035825978964567, -0.8086758852005005 + ], + [ + -0.0007854027207940817, 0.9998897910118103, + -0.014822980388998985, 0.3195559084415436 + ], + [ + 0.002024406101554632, 0.014824540354311466, + 0.9998881220817566, -0.7997230887413025 + ], [0.0, 0.0, 0.0, 1.0]] + }, + 'instances': [{ + 'bbox': [712.4, 143.0, 810.73, 307.92], + 'bbox_label': + -1, + 'bbox_3d': [ + 1.840000033378601, 1.4700000286102295, 8.40999984741211, + 1.2000000476837158, 1.8899999856948853, 0.47999998927116394, + 0.009999999776482582 + ], + 'bbox_label_3d': + -1, + 'center_2d': [765.04, 214.56], + 'depth': + 8.410, + 'num_lidar_pts': + 377, + 'difficulty': + 0, + 'truncated': + 0, + 'occluded': + 0, + 'alpha': + -0.2, + 'score': + 0.0, + 'index': + 0, + 'group_id': + 0 + }], + 'plane': + None + } + if with_ann: + data_info['ann_info'] = ann_info + return data_info + + +def create_data_info_after_loading(): + load_anns_transform = LoadAnnotations3D( + with_bbox_3d=True, with_label_3d=True) + load_points_transform = LoadPointsFromFile( + coord_type='LIDAR', load_dim=4, use_dim=3) + data_info = create_dummy_data_info() + data_info = load_points_transform(data_info) + data_info_after_loading = load_anns_transform(data_info) + return data_info_after_loading + + +def create_mono3d_data_info_after_loading(): + load_anns_transform = LoadAnnotations3D( + with_bbox=True, + with_label=True, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True) + load_img_transform = LoadImageFromFileMono3D() + data_info = create_dummy_data_info() + data_info = load_img_transform(data_info) + data_info_after_loading = load_anns_transform(data_info) + return data_info_after_loading diff --git a/tests/test_engine/test_hooks/test_disable_object_sample_hook.py b/tests/test_engine/test_hooks/test_disable_object_sample_hook.py new file mode 100755 index 0000000..fcc1e3c --- /dev/null +++ b/tests/test_engine/test_hooks/test_disable_object_sample_hook.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase +from unittest.mock import Mock + +from mmdet3d.datasets.transforms import ObjectSample +from mmdet3d.engine.hooks import DisableObjectSampleHook + + +class TestDisableObjectSampleHook(TestCase): + + runner = Mock() + runner.train_dataloader = Mock() + runner.train_dataloader.dataset = Mock() + runner.train_dataloader.dataset.pipeline = Mock() + runner.train_dataloader._DataLoader__initialized = True + runner.train_dataloader.dataset.pipeline.transforms = [ + ObjectSample( + db_sampler=dict( + data_root='tests/data/waymo/kitti_format', + info_path= # noqa + 'tests/data/waymo/kitti_format/waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5)), + classes=['Car'], + sample_groups=dict(Car=15), + )) + ] + + def test_is_model_wrapper_and_persistent_workers_on(self): + self.runner.train_dataloader.dataset.pipeline.transforms[ + 0].disabled = False + self.runner.train_dataloader.persistent_workers = True + hook = DisableObjectSampleHook(disable_after_epoch=15) + self.runner.epoch = 14 + hook.before_train_epoch(self.runner) + self.assertFalse(self.runner.train_dataloader.dataset.pipeline. + transforms[0].disabled) # noqa: E501 + + self.runner.epoch = 15 + hook.before_train_epoch(self.runner) + self.assertTrue(self.runner.train_dataloader.dataset.pipeline. + transforms[0].disabled) # noqa: E501 + self.assertTrue(hook._restart_dataloader) + self.assertFalse(self.runner.train_dataloader._DataLoader__initialized) + + self.runner.epoch = 16 + hook.before_train_epoch(self.runner) + self.assertTrue(self.runner.train_dataloader._DataLoader__initialized) + self.assertTrue(self.runner.train_dataloader.dataset.pipeline. + transforms[0].disabled) # noqa: E501 + + def test_not_model_wrapper_and_persistent_workers_off(self): + self.runner.train_dataloader.dataset.pipeline.transforms[ + 0].disabled = False + self.runner.train_dataloader.persistent_workers = False + hook = DisableObjectSampleHook(disable_after_epoch=15) + self.runner.epoch = 14 + hook.before_train_epoch(self.runner) + self.assertFalse(self.runner.train_dataloader.dataset.pipeline. + transforms[0].disabled) # noqa: E501 + + self.runner.epoch = 15 + hook.before_train_epoch(self.runner) + self.assertTrue(self.runner.train_dataloader.dataset.pipeline. + transforms[0].disabled) # noqa: E501 + self.assertFalse(hook._restart_dataloader) + self.assertTrue(self.runner.train_dataloader._DataLoader__initialized) + + self.runner.epoch = 16 + hook.before_train_epoch(self.runner) + self.assertTrue(self.runner.train_dataloader._DataLoader__initialized) + self.assertTrue(self.runner.train_dataloader.dataset.pipeline. + transforms[0].disabled) # noqa: E501 diff --git a/tests/test_engine/test_hooks/test_visualization_hook.py b/tests/test_engine/test_hooks/test_visualization_hook.py new file mode 100755 index 0000000..7bd1da5 --- /dev/null +++ b/tests/test_engine/test_hooks/test_visualization_hook.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import shutil +import time +from unittest import TestCase +from unittest.mock import Mock + +import numpy as np +import torch +from mmengine.structures import InstanceData + +from mmdet3d.engine.hooks import Det3DVisualizationHook +from mmdet3d.structures import Det3DDataSample, LiDARInstance3DBoxes +from mmdet3d.visualization import Det3DLocalVisualizer + + +class TestVisualizationHook(TestCase): + + def setUp(self) -> None: + Det3DLocalVisualizer.get_instance('visualizer') + + pred_instances_3d = InstanceData() + pred_instances_3d.bboxes_3d = LiDARInstance3DBoxes( + torch.tensor( + [[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]])) + pred_instances_3d.labels_3d = torch.tensor([0]) + pred_instances_3d.scores_3d = torch.tensor([0.8]) + + pred_det3d_data_sample = Det3DDataSample() + pred_det3d_data_sample.set_metainfo({ + 'num_pts_feats': + 4, + 'lidar2img': + np.array([[ + 6.02943734e+02, -7.07913286e+02, -1.22748427e+01, + -1.70942724e+02 + ], + [ + 1.76777261e+02, 8.80879902e+00, -7.07936120e+02, + -1.02568636e+02 + ], + [ + 9.99984860e-01, -1.52826717e-03, -5.29071223e-03, + -3.27567990e-01 + ], + [ + 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, + 1.00000000e+00 + ]]), + 'img_path': + osp.join( + osp.dirname(__file__), + '../../data/kitti/training/image_2/000000.png'), + 'lidar_path': + osp.join( + osp.dirname(__file__), + '../../data/kitti/training/velodyne_reduced/000000.bin') + }) + pred_det3d_data_sample.pred_instances_3d = pred_instances_3d + self.outputs = [pred_det3d_data_sample] * 2 + + def test_after_val_iter(self): + runner = Mock() + runner.iter = 1 + hook = Det3DVisualizationHook() + hook.after_val_iter(runner, 1, {}, self.outputs) + + def test_after_test_iter(self): + runner = Mock() + runner.iter = 1 + hook = Det3DVisualizationHook(draw=True) + hook.after_test_iter(runner, 1, {}, self.outputs) + self.assertEqual(hook._test_index, 2) + + # test + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + test_out_dir = timestamp + '1' + runner.work_dir = timestamp + runner.timestamp = '1' + hook = Det3DVisualizationHook(draw=False, test_out_dir=test_out_dir) + hook.after_test_iter(runner, 1, {}, self.outputs) + self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}')) + + hook = Det3DVisualizationHook(draw=True, test_out_dir=test_out_dir) + hook.after_test_iter(runner, 1, {}, self.outputs) + self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}')) + shutil.rmtree(f'{timestamp}') diff --git a/tests/test_evaluation/test_functional/test_instance_seg_eval.py b/tests/test_evaluation/test_functional/test_instance_seg_eval.py new file mode 100755 index 0000000..89f93dc --- /dev/null +++ b/tests/test_evaluation/test_functional/test_instance_seg_eval.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.evaluation import instance_seg_eval + + +def test_instance_seg_eval(): + valid_class_ids = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, + 36, 39) + class_labels = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', + 'window', 'bookshelf', 'picture', 'counter', 'desk', + 'curtain', 'refrigerator', 'showercurtrain', 'toilet', + 'sink', 'bathtub', 'garbagebin') + n_points_list = [3300, 3000] + gt_labels_list = [[0, 0, 0, 0, 0, 0, 14, 14, 2, 1], + [13, 13, 2, 1, 3, 3, 0, 0, 0]] + gt_instance_masks = [] + gt_semantic_masks = [] + pred_instance_masks = [] + pred_instance_labels = [] + pred_instance_scores = [] + for n_points, gt_labels in zip(n_points_list, gt_labels_list): + gt_instance_mask = np.ones(n_points, dtype=np.int64) * -1 + gt_semantic_mask = np.ones(n_points, dtype=np.int64) * -1 + pred_instance_mask = np.ones(n_points, dtype=np.int64) * -1 + labels = [] + scores = [] + for i, gt_label in enumerate(gt_labels): + begin = i * 300 + end = begin + 300 + gt_instance_mask[begin:end] = i + gt_semantic_mask[begin:end] = gt_label + pred_instance_mask[begin:end] = i + labels.append(gt_label) + scores.append(.99) + gt_instance_masks.append(torch.tensor(gt_instance_mask)) + gt_semantic_masks.append(torch.tensor(gt_semantic_mask)) + pred_instance_masks.append(torch.tensor(pred_instance_mask)) + pred_instance_labels.append(torch.tensor(labels)) + pred_instance_scores.append(torch.tensor(scores)) + + ret_value = instance_seg_eval( + gt_semantic_masks=gt_semantic_masks, + gt_instance_masks=gt_instance_masks, + pred_instance_masks=pred_instance_masks, + pred_instance_labels=pred_instance_labels, + pred_instance_scores=pred_instance_scores, + valid_class_ids=valid_class_ids, + class_labels=class_labels) + for label in [ + 'cabinet', 'bed', 'chair', 'sofa', 'showercurtrain', 'toilet' + ]: + metrics = ret_value['classes'][label] + assert metrics['ap'] == 1.0 + assert metrics['ap50%'] == 1.0 + assert metrics['ap25%'] == 1.0 + + pred_instance_masks[1][2240:2700] = -1 + pred_instance_masks[0][2700:3000] = 8 + pred_instance_labels[0][9] = 2 + ret_value = instance_seg_eval( + gt_semantic_masks=gt_semantic_masks, + gt_instance_masks=gt_instance_masks, + pred_instance_masks=pred_instance_masks, + pred_instance_labels=pred_instance_labels, + pred_instance_scores=pred_instance_scores, + valid_class_ids=valid_class_ids, + class_labels=class_labels) + assert abs(ret_value['classes']['cabinet']['ap50%'] - 0.72916) < 0.01 + assert abs(ret_value['classes']['cabinet']['ap25%'] - 0.88888) < 0.01 + assert abs(ret_value['classes']['bed']['ap50%'] - 0.5) < 0.01 + assert abs(ret_value['classes']['bed']['ap25%'] - 0.5) < 0.01 + assert abs(ret_value['classes']['chair']['ap50%'] - 0.375) < 0.01 + assert abs(ret_value['classes']['chair']['ap25%'] - 1.0) < 0.01 diff --git a/tests/test_evaluation/test_functional/test_kitti_eval.py b/tests/test_evaluation/test_functional/test_kitti_eval.py new file mode 100755 index 0000000..f8608af --- /dev/null +++ b/tests/test_evaluation/test_functional/test_kitti_eval.py @@ -0,0 +1,266 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + +from mmdet3d.evaluation import do_eval, eval_class, kitti_eval + + +def test_do_eval(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and CUDA') + gt_name = np.array( + ['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car', 'DontCare', 'DontCare']) + gt_truncated = np.array([0., 0., 0., -1., -1., -1., -1.]) + gt_occluded = np.array([0, 0, 3, -1, -1, -1, -1]) + gt_alpha = np.array([-1.57, 1.85, -1.65, -10., -10., -10., -10.]) + gt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134], + [676.21954, 165.70988, 691.63745, 193.83748], + [389.4093, 182.48041, 421.49072, 202.13422], + [232.0577, 186.16724, 301.94623, 217.4024], + [758.6537, 172.98509, 816.32434, 212.76743], + [532.37, 176.35, 542.68, 185.27], + [559.62, 175.83, 575.4, 183.15]]) + gt_dimensions = np.array([[12.34, 2.85, 2.63], [3.69, 1.67, 1.87], + [2.02, 1.86, 0.6], [-1., -1., -1.], + [-1., -1., -1.], [-1., -1., -1.], + [-1., -1., -1.]]) + gt_location = np.array([[4.700e-01, 1.490e+00, 6.944e+01], + [-1.653e+01, 2.390e+00, 5.849e+01], + [4.590e+00, 1.320e+00, 4.584e+01], + [-1.000e+03, -1.000e+03, -1.000e+03], + [-1.000e+03, -1.000e+03, -1.000e+03], + [-1.000e+03, -1.000e+03, -1.000e+03], + [-1.000e+03, -1.000e+03, -1.000e+03]]) + gt_rotation_y = [-1.56, 1.57, -1.55, -10., -10., -10., -10.] + gt_anno = dict( + name=gt_name, + truncated=gt_truncated, + occluded=gt_occluded, + alpha=gt_alpha, + bbox=gt_bbox, + dimensions=gt_dimensions, + location=gt_location, + rotation_y=gt_rotation_y) + + dt_name = np.array(['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car']) + dt_truncated = np.array([0., 0., 0., 0., 0.]) + dt_occluded = np.array([0, 0, 0, 0, 0]) + dt_alpha = np.array([1.0744612, 1.2775835, 1.82563, 2.1145396, -1.7676563]) + dt_dimensions = np.array([[1.4441837, 1.7450154, 0.53160036], + [1.6501029, 1.7540325, 0.5162356], + [3.9313498, 1.4899347, 1.5655756], + [4.0111866, 1.5350999, 1.585221], + [3.7337692, 1.5117968, 1.5515774]]) + dt_location = np.array([[4.6671643, 1.285098, 45.836895], + [4.658241, 1.3088846, 45.85148], + [-16.598526, 2.298814, 58.618088], + [-18.629122, 2.2990575, 39.305355], + [7.0964046, 1.5178275, 29.32426]]) + dt_rotation_y = np.array( + [1.174933, 1.3778262, 1.550529, 1.6742425, -1.5330327]) + dt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134], + [676.21954, 165.70988, 691.63745, 193.83748], + [389.4093, 182.48041, 421.49072, 202.13422], + [232.0577, 186.16724, 301.94623, 217.4024], + [758.6537, 172.98509, 816.32434, 212.76743]]) + dt_score = np.array( + [0.18151495, 0.57920843, 0.27795696, 0.23100418, 0.21541929]) + dt_anno = dict( + name=dt_name, + truncated=dt_truncated, + occluded=dt_occluded, + alpha=dt_alpha, + bbox=dt_bbox, + dimensions=dt_dimensions, + location=dt_location, + rotation_y=dt_rotation_y, + score=dt_score) + current_classes = [1, 2, 0] + min_overlaps = np.array([[[0.5, 0.5, 0.7], [0.5, 0.5, 0.7], + [0.5, 0.5, 0.7]], + [[0.5, 0.5, 0.7], [0.25, 0.25, 0.5], + [0.25, 0.25, 0.5]]]) + eval_types = ['bbox', 'bev', '3d', 'aos'] + mAP11_bbox, mAP11_bev, mAP11_3d, mAP11_aos, mAP40_bbox,\ + mAP40_bev, mAP40_3d, mAP40_aos = do_eval([gt_anno], [dt_anno], + current_classes, min_overlaps, + eval_types) + expected_mAP11_bbox = np.array([[[0., 0.], [9.09090909, 9.09090909], + [9.09090909, 9.09090909]], + [[0., 0.], [9.09090909, 9.09090909], + [9.09090909, 9.09090909]], + [[0., 0.], [9.09090909, 9.09090909], + [9.09090909, 9.09090909]]]) + expected_mAP40_bbox = np.array([[[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [2.5, 2.5], [2.5, 2.5]]]) + expected_mAP11_bev = np.array([[[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 0.], [0., 0.]]]) + expected_mAP40_bev = np.array([[[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 0.], [0., 0.]]]) + expected_mAP11_3d = np.array([[[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 0.], [0., 0.]]]) + expected_mAP40_3d = np.array([[[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 0.], [0., 0.]]]) + expected_mAP11_aos = np.array([[[0., 0.], [0.55020816, 0.55020816], + [0.55020816, 0.55020816]], + [[0., 0.], [8.36633862, 8.36633862], + [8.36633862, 8.36633862]], + [[0., 0.], [8.63476893, 8.63476893], + [8.63476893, 8.63476893]]]) + expected_mAP40_aos = np.array([[[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [0., 0.], [0., 0.]], + [[0., 0.], [1.58140643, 1.58140643], + [1.58140643, 1.58140643]]]) + assert np.allclose(mAP11_bbox, expected_mAP11_bbox) + assert np.allclose(mAP11_bev, expected_mAP11_bev) + assert np.allclose(mAP11_3d, expected_mAP11_3d) + assert np.allclose(mAP11_aos, expected_mAP11_aos) + assert np.allclose(mAP40_bbox, expected_mAP40_bbox) + assert np.allclose(mAP40_bev, expected_mAP40_bev) + assert np.allclose(mAP40_3d, expected_mAP40_3d) + assert np.allclose(mAP40_aos, expected_mAP40_aos) + + +def test_kitti_eval(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and CUDA') + gt_name = np.array( + ['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car', 'DontCare', 'DontCare']) + gt_truncated = np.array([0., 0., 0., -1., -1., -1., -1.]) + gt_occluded = np.array([0, 0, 3, -1, -1, -1, -1]) + gt_alpha = np.array([-1.57, 1.85, -1.65, -10., -10., -10., -10.]) + gt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134], + [676.21954, 165.70988, 691.63745, 193.83748], + [389.4093, 182.48041, 421.49072, 202.13422], + [232.0577, 186.16724, 301.94623, 217.4024], + [758.6537, 172.98509, 816.32434, 212.76743], + [532.37, 176.35, 542.68, 185.27], + [559.62, 175.83, 575.4, 183.15]]) + gt_dimensions = np.array([[12.34, 2.85, 2.63], [3.69, 1.67, 1.87], + [2.02, 1.86, 0.6], [-1., -1., -1.], + [-1., -1., -1.], [-1., -1., -1.], + [-1., -1., -1.]]) + gt_location = np.array([[4.700e-01, 1.490e+00, 6.944e+01], + [-1.653e+01, 2.390e+00, 5.849e+01], + [4.590e+00, 1.320e+00, 4.584e+01], + [-1.000e+03, -1.000e+03, -1.000e+03], + [-1.000e+03, -1.000e+03, -1.000e+03], + [-1.000e+03, -1.000e+03, -1.000e+03], + [-1.000e+03, -1.000e+03, -1.000e+03]]) + gt_rotation_y = [-1.56, 1.57, -1.55, -10., -10., -10., -10.] + gt_anno = dict( + name=gt_name, + truncated=gt_truncated, + occluded=gt_occluded, + alpha=gt_alpha, + bbox=gt_bbox, + dimensions=gt_dimensions, + location=gt_location, + rotation_y=gt_rotation_y) + + dt_name = np.array(['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car']) + dt_truncated = np.array([0., 0., 0., 0., 0.]) + dt_occluded = np.array([0, 0, 0, 0, 0]) + dt_alpha = np.array([1.0744612, 1.2775835, 1.82563, 2.1145396, -1.7676563]) + dt_dimensions = np.array([[1.4441837, 1.7450154, 0.53160036], + [1.6501029, 1.7540325, 0.5162356], + [3.9313498, 1.4899347, 1.5655756], + [4.0111866, 1.5350999, 1.585221], + [3.7337692, 1.5117968, 1.5515774]]) + dt_location = np.array([[4.6671643, 1.285098, 45.836895], + [4.658241, 1.3088846, 45.85148], + [-16.598526, 2.298814, 58.618088], + [-18.629122, 2.2990575, 39.305355], + [7.0964046, 1.5178275, 29.32426]]) + dt_rotation_y = np.array( + [1.174933, 1.3778262, 1.550529, 1.6742425, -1.5330327]) + dt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134], + [676.21954, 165.70988, 691.63745, 193.83748], + [389.4093, 182.48041, 421.49072, 202.13422], + [232.0577, 186.16724, 301.94623, 217.4024], + [758.6537, 172.98509, 816.32434, 212.76743]]) + dt_score = np.array( + [0.18151495, 0.57920843, 0.27795696, 0.23100418, 0.21541929]) + dt_anno = dict( + name=dt_name, + truncated=dt_truncated, + occluded=dt_occluded, + alpha=dt_alpha, + bbox=dt_bbox, + dimensions=dt_dimensions, + location=dt_location, + rotation_y=dt_rotation_y, + score=dt_score) + + current_classes = [1, 2, 0] + result, ret_dict = kitti_eval([gt_anno], [dt_anno], current_classes) + assert np.isclose(ret_dict['KITTI/Overall_2D_AP11_moderate'], + 9.090909090909092) + assert np.isclose(ret_dict['KITTI/Overall_2D_AP11_hard'], + 9.090909090909092) + assert np.isclose(ret_dict['KITTI/Overall_2D_AP40_moderate'], + 0.8333333333333334) + assert np.isclose(ret_dict['KITTI/Overall_2D_AP40_hard'], + 0.8333333333333334) + + +def test_eval_class(): + gt_name = np.array( + ['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car', 'DontCare', 'DontCare']) + gt_truncated = np.array([0., 0., 0., -1., -1., -1., -1.]) + gt_occluded = np.array([0, 0, 3, -1, -1, -1, -1]) + gt_alpha = np.array([-1.57, 1.85, -1.65, -10., -10., -10., -10.]) + gt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134], + [676.21954, 165.70988, 691.63745, 193.83748], + [389.4093, 182.48041, 421.49072, 202.13422], + [232.0577, 186.16724, 301.94623, 217.4024], + [758.6537, 172.98509, 816.32434, 212.76743], + [532.37, 176.35, 542.68, 185.27], + [559.62, 175.83, 575.4, 183.15]]) + gt_anno = dict( + name=gt_name, + truncated=gt_truncated, + occluded=gt_occluded, + alpha=gt_alpha, + bbox=gt_bbox) + + dt_name = np.array(['Pedestrian', 'Cyclist', 'Car', 'Car', 'Car']) + dt_truncated = np.array([0., 0., 0., 0., 0.]) + dt_occluded = np.array([0, 0, 0, 0, 0]) + dt_alpha = np.array([1.0744612, 1.2775835, 1.82563, 2.1145396, -1.7676563]) + dt_bbox = np.array([[674.9179, 165.48549, 693.23694, 193.42134], + [676.21954, 165.70988, 691.63745, 193.83748], + [389.4093, 182.48041, 421.49072, 202.13422], + [232.0577, 186.16724, 301.94623, 217.4024], + [758.6537, 172.98509, 816.32434, 212.76743]]) + dt_score = np.array( + [0.18151495, 0.57920843, 0.27795696, 0.23100418, 0.21541929]) + dt_anno = dict( + name=dt_name, + truncated=dt_truncated, + occluded=dt_occluded, + alpha=dt_alpha, + bbox=dt_bbox, + score=dt_score) + current_classes = [1, 2, 0] + difficultys = [0, 1, 2] + metric = 0 + min_overlaps = np.array([[[0.5, 0.5, 0.7], [0.5, 0.5, 0.7], + [0.5, 0.5, 0.7]], + [[0.5, 0.5, 0.7], [0.25, 0.25, 0.5], + [0.25, 0.25, 0.5]]]) + + ret_dict = eval_class([gt_anno], [dt_anno], current_classes, difficultys, + metric, min_overlaps, True, 1) + recall_sum = np.sum(ret_dict['recall']) + precision_sum = np.sum(ret_dict['precision']) + orientation_sum = np.sum(ret_dict['orientation']) + assert np.isclose(recall_sum, 16) + assert np.isclose(precision_sum, 16) + assert np.isclose(orientation_sum, 10.252829201850309) diff --git a/tests/test_evaluation/test_functional/test_panoptic_seg_eval.py b/tests/test_evaluation/test_functional/test_panoptic_seg_eval.py new file mode 100755 index 0000000..d67abe6 --- /dev/null +++ b/tests/test_evaluation/test_functional/test_panoptic_seg_eval.py @@ -0,0 +1,101 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + +from mmdet3d.evaluation.functional.panoptic_seg_eval import panoptic_seg_eval + + +def test_panoptic_seg_eval(): + if not torch.cuda.is_available(): + pytest.skip() + + classes = ['unlabeled', 'person', 'dog', 'grass', 'sky'] + label2cat = { + 0: 'unlabeled', + 1: 'person', + 2: 'dog', + 3: 'grass', + 4: 'sky', + } + + thing_classes = ['person', 'dog'] + stuff_classes = ['grass', 'sky'] + ignore_index = [0] # only ignore ignore class + min_points = 1 # for this example we care about all points + offset = 2**16 + + # generate ground truth and prediction + semantic_preds = [] + instance_preds = [] + gt_semantic = [] + gt_instance = [] + + # some ignore stuff + num_ignore = 50 + semantic_preds.extend([0 for i in range(num_ignore)]) + instance_preds.extend([0 for i in range(num_ignore)]) + gt_semantic.extend([0 for i in range(num_ignore)]) + gt_instance.extend([0 for i in range(num_ignore)]) + + # grass segment + num_grass = 50 + num_grass_pred = 40 # rest is sky + semantic_preds.extend([1 for i in range(num_grass_pred)]) # grass + semantic_preds.extend([2 + for i in range(num_grass - num_grass_pred)]) # sky + instance_preds.extend([0 for i in range(num_grass)]) + gt_semantic.extend([1 for i in range(num_grass)]) # grass + gt_instance.extend([0 for i in range(num_grass)]) + + # sky segment + num_sky = 50 + num_sky_pred = 40 # rest is grass + semantic_preds.extend([2 for i in range(num_sky_pred)]) # sky + semantic_preds.extend([1 for i in range(num_sky - num_sky_pred)]) # grass + instance_preds.extend([0 for i in range(num_sky)]) # first instance + gt_semantic.extend([2 for i in range(num_sky)]) # sky + gt_instance.extend([0 for i in range(num_sky)]) # first instance + + # wrong dog as person prediction + num_dog = 50 + num_person = num_dog + semantic_preds.extend([3 for i in range(num_person)]) + instance_preds.extend([35 for i in range(num_person)]) + gt_semantic.extend([4 for i in range(num_dog)]) + gt_instance.extend([22 for i in range(num_dog)]) + + # two persons in prediction, but three in gt + num_person = 50 + semantic_preds.extend([3 for i in range(6 * num_person)]) + instance_preds.extend([8 for i in range(4 * num_person)]) + instance_preds.extend([95 for i in range(2 * num_person)]) + gt_semantic.extend([3 for i in range(6 * num_person)]) + gt_instance.extend([33 for i in range(3 * num_person)]) + gt_instance.extend([42 for i in range(num_person)]) + gt_instance.extend([11 for i in range(2 * num_person)]) + + # gt and pred to numpy + semantic_preds = np.array(semantic_preds, dtype=int).reshape(1, -1) + instance_preds = np.array(instance_preds, dtype=int).reshape(1, -1) + gt_semantic = np.array(gt_semantic, dtype=int).reshape(1, -1) + gt_instance = np.array(gt_instance, dtype=int).reshape(1, -1) + + gt_labels = [{ + 'pts_semantic_mask': gt_semantic, + 'pts_instance_mask': gt_instance + }] + + seg_preds = [{ + 'pts_semantic_mask': semantic_preds, + 'pts_instance_mask': instance_preds + }] + + ret_value = panoptic_seg_eval(gt_labels, seg_preds, classes, thing_classes, + stuff_classes, min_points, offset, label2cat, + ignore_index) + + assert np.isclose(ret_value['pq'], 0.47916666666666663) + assert np.isclose(ret_value['rq_mean'], 0.6666666666666666) + assert np.isclose(ret_value['sq_mean'], 0.5520833333333333) + assert np.isclose(ret_value['miou'], 0.5476190476190476) diff --git a/tests/test_evaluation/test_functional/test_seg_eval.py b/tests/test_evaluation/test_functional/test_seg_eval.py new file mode 100755 index 0000000..d2b4485 --- /dev/null +++ b/tests/test_evaluation/test_functional/test_seg_eval.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + +from mmdet3d.evaluation.functional.seg_eval import seg_eval + + +def test_indoor_eval(): + if not torch.cuda.is_available(): + pytest.skip() + seg_preds = [ + np.array([ + 0, 0, 1, 0, 0, 2, 1, 3, 1, 2, 1, 0, 2, 2, 2, 2, 1, 3, 0, 3, 3, 4, 0 + ]) + ] + gt_labels = [ + np.array([ + 0, 0, 0, 4, 0, 0, 1, 1, 1, 4, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4 + ]) + ] + + label2cat = { + 0: 'car', + 1: 'bicycle', + 2: 'motorcycle', + 3: 'truck', + 4: 'unlabeled' + } + ret_value = seg_eval(gt_labels, seg_preds, label2cat, ignore_index=4) + + assert np.isclose(ret_value['car'], 0.428571429) + assert np.isclose(ret_value['bicycle'], 0.428571429) + assert np.isclose(ret_value['motorcycle'], 0.6666667) + assert np.isclose(ret_value['truck'], 0.5) + + assert np.isclose(ret_value['acc'], 0.65) + assert np.isclose(ret_value['acc_cls'], 0.65) + assert np.isclose(ret_value['miou'], 0.50595238) diff --git a/tests/test_evaluation/test_metrics/test_indoor_metric.py b/tests/test_evaluation/test_metrics/test_indoor_metric.py new file mode 100755 index 0000000..a6c1fdd --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_indoor_metric.py @@ -0,0 +1,65 @@ +import unittest +from io import StringIO +from unittest.mock import patch + +import numpy as np +import torch + +from mmdet3d.evaluation.metrics import IndoorMetric +from mmdet3d.structures import DepthInstance3DBoxes + + +class TestIndoorMetric(unittest.TestCase): + + @patch('sys.stdout', new_callable=StringIO) + def test_process(self, stdout): + indoor_metric = IndoorMetric() + eval_ann_info = { + 'gt_bboxes_3d': + DepthInstance3DBoxes( + torch.tensor([ + [2.3578, 1.7841, -0.0987, 0.5532, 0.4948, 0.6474, 0.0000], + [-0.2773, -2.1403, 0.0615, 0.4786, 0.5170, 0.3842, 0.0000], + [0.0259, -2.7954, -0.0157, 0.3869, 0.4361, 0.5229, 0.0000], + [-2.3968, 1.1040, 0.0945, 2.5563, 1.5989, 0.9322, 0.0000], + [ + -0.3173, -2.7770, -0.0134, 0.5473, 0.8569, 0.5577, + 0.0000 + ], + [-2.4882, -1.4437, 0.0987, 1.2199, 0.4859, 0.6461, 0.0000], + [-3.4702, -0.1315, 0.2463, 1.3137, 0.8022, 0.4765, 0.0000], + [1.9786, 3.0196, -0.0934, 1.6129, 0.5834, 1.4662, 0.0000], + [2.3835, 2.2691, -0.1376, 0.5197, 0.5099, 0.6896, 0.0000], + [2.5986, -0.5313, 1.4269, 0.0696, 0.2933, 0.3104, 0.0000], + [0.4555, -3.1278, -0.0637, 2.0247, 0.1292, 0.2419, 0.0000], + [0.4655, -3.1941, 0.3769, 2.1132, 0.3536, 1.9803, 0.0000] + ])), + 'gt_labels_3d': + np.array([2, 2, 2, 3, 4, 17, 4, 7, 2, 8, 17, 11]) + } + + pred_instances_3d = dict() + pred_instances_3d['scores_3d'] = torch.ones( + len(eval_ann_info['gt_bboxes_3d'])) + pred_instances_3d['bboxes_3d'] = eval_ann_info['gt_bboxes_3d'] + pred_instances_3d['labels_3d'] = torch.Tensor( + eval_ann_info['gt_labels_3d']) + pred_dict = dict() + pred_dict['pred_instances_3d'] = pred_instances_3d + pred_dict['eval_ann_info'] = eval_ann_info + + indoor_metric.dataset_meta = { + 'classes': ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', + 'window', 'bookshelf', 'picture', 'counter', 'desk', + 'curtain', 'refrigerator', 'showercurtrain', 'toilet', + 'sink', 'bathtub', 'garbagebin'), + 'box_type_3d': + 'Depth', + } + + indoor_metric.process({}, [pred_dict]) + + eval_results = indoor_metric.evaluate(1) + for v in eval_results.values(): + # map == 1 + self.assertEqual(1, v) diff --git a/tests/test_evaluation/test_metrics/test_instance_seg_metric.py b/tests/test_evaluation/test_metrics/test_instance_seg_metric.py new file mode 100755 index 0000000..1ae1dce --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_instance_seg_metric.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import numpy as np +import torch +from mmengine.structures import BaseDataElement + +from mmdet3d.evaluation.metrics import InstanceSegMetric +from mmdet3d.structures import Det3DDataSample, PointData + + +class TestInstanceSegMetric(unittest.TestCase): + + def _demo_mm_model_output(self): + """Create a superset of inputs needed to run test or train batches.""" + + n_points = 3300 + gt_labels = [0, 0, 0, 0, 0, 0, 14, 14, 2, 1] + gt_instance_mask = np.ones(n_points, dtype=np.int64) * -1 + gt_semantic_mask = np.ones(n_points, dtype=np.int64) * -1 + for i, gt_label in enumerate(gt_labels): + begin = i * 300 + end = begin + 300 + gt_instance_mask[begin:end] = i + gt_semantic_mask[begin:end] = gt_label + + ann_info_data = dict() + ann_info_data['pts_instance_mask'] = torch.tensor(gt_instance_mask) + ann_info_data['pts_semantic_mask'] = torch.tensor(gt_semantic_mask) + + results_dict = dict() + n_points = 3300 + gt_labels = [0, 0, 0, 0, 0, 0, 14, 14, 2, 1] + pred_instance_mask = np.ones(n_points, dtype=np.int64) * -1 + labels = [] + scores = [] + for i, gt_label in enumerate(gt_labels): + begin = i * 300 + end = begin + 300 + pred_instance_mask[begin:end] = i + labels.append(gt_label) + scores.append(.99) + + results_dict['pts_instance_mask'] = torch.tensor(pred_instance_mask) + results_dict['instance_labels'] = torch.tensor(labels) + results_dict['instance_scores'] = torch.tensor(scores) + data_sample = Det3DDataSample() + data_sample.pred_pts_seg = PointData(**results_dict) + data_sample.eval_ann_info = ann_info_data + batch_data_samples = [data_sample] + + predictions = [] + for pred in batch_data_samples: + if isinstance(pred, BaseDataElement): + pred = pred.to_dict() + predictions.append(pred) + + return predictions + + def test_evaluate(self): + data_batch = {} + predictions = self._demo_mm_model_output() + seg_valid_class_ids = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, + 33, 34, 36, 39) + class_labels = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', + 'window', 'bookshelf', 'picture', 'counter', 'desk', + 'curtain', 'refrigerator', 'showercurtrain', 'toilet', + 'sink', 'bathtub', 'garbagebin') + dataset_meta = dict( + seg_valid_class_ids=seg_valid_class_ids, classes=class_labels) + instance_seg_metric = InstanceSegMetric() + instance_seg_metric.dataset_meta = dataset_meta + instance_seg_metric.process(data_batch, predictions) + res = instance_seg_metric.evaluate(1) + self.assertIsInstance(res, dict) diff --git a/tests/test_evaluation/test_metrics/test_kitti_metric.py b/tests/test_evaluation/test_metrics/test_kitti_metric.py new file mode 100755 index 0000000..0703141 --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_kitti_metric.py @@ -0,0 +1,89 @@ +import numpy as np +import pytest +import torch +from mmengine.structures import InstanceData + +from mmdet3d.evaluation.metrics import KittiMetric +from mmdet3d.structures import Det3DDataSample, LiDARInstance3DBoxes + +data_root = 'tests/data/kitti' + + +def _init_evaluate_input(): + metainfo = dict(sample_idx=0) + predictions = Det3DDataSample() + pred_instances_3d = InstanceData() + pred_instances_3d.bboxes_3d = LiDARInstance3DBoxes( + torch.tensor( + [[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]])) + pred_instances_3d.scores_3d = torch.Tensor([0.9]) + pred_instances_3d.labels_3d = torch.Tensor([0]) + + predictions.pred_instances_3d = pred_instances_3d + predictions.pred_instances = InstanceData() + predictions.set_metainfo(metainfo) + predictions = predictions.to_dict() + return {}, [predictions] + + +def _init_multi_modal_evaluate_input(): + metainfo = dict(sample_idx=0) + predictions = Det3DDataSample() + pred_instances_3d = InstanceData() + pred_instances = InstanceData() + pred_instances.bboxes = torch.tensor([[712.4, 143, 810.7, 307.92]]) + pred_instances.scores = torch.Tensor([0.9]) + pred_instances.labels = torch.Tensor([0]) + pred_instances_3d.bboxes_3d = LiDARInstance3DBoxes( + torch.tensor( + [[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]])) + + pred_instances_3d.scores_3d = torch.Tensor([0.9]) + pred_instances_3d.labels_3d = torch.Tensor([0]) + + predictions.pred_instances_3d = pred_instances_3d + predictions.pred_instances = pred_instances + predictions.set_metainfo(metainfo) + predictions = predictions.to_dict() + return {}, [predictions] + + +def test_multi_modal_kitti_metric(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + kittimetric = KittiMetric( + data_root + '/kitti_infos_train.pkl', metric=['mAP']) + kittimetric.dataset_meta = dict(classes=['Pedestrian', 'Cyclist', 'Car']) + data_batch, predictions = _init_multi_modal_evaluate_input() + kittimetric.process(data_batch, predictions) + ap_dict = kittimetric.compute_metrics(kittimetric.results) + assert np.isclose(ap_dict['pred_instances_3d/KITTI/Overall_3D_AP11_easy'], + 3.0303030303030307) + assert np.isclose(ap_dict['pred_instances_3d/KITTI/Overall_BEV_AP11_easy'], + 3.0303030303030307) + assert np.isclose(ap_dict['pred_instances_3d/KITTI/Overall_2D_AP11_easy'], + 3.0303030303030307) + assert np.isclose(ap_dict['pred_instances/KITTI/Overall_2D_AP11_easy'], + 3.0303030303030307) + assert np.isclose(ap_dict['pred_instances/KITTI/Overall_2D_AP11_moderate'], + 3.0303030303030307) + assert np.isclose(ap_dict['pred_instances/KITTI/Overall_2D_AP11_hard'], + 3.0303030303030307) + + +def test_kitti_metric_mAP(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + kittimetric = KittiMetric( + data_root + '/kitti_infos_train.pkl', metric=['mAP']) + kittimetric.dataset_meta = dict(classes=['Pedestrian', 'Cyclist', 'Car']) + data_batch, predictions = _init_evaluate_input() + kittimetric.process(data_batch, predictions) + ap_dict = kittimetric.compute_metrics(kittimetric.results) + assert np.isclose(ap_dict['pred_instances_3d/KITTI/Overall_3D_AP11_easy'], + 3.0303030303030307) + assert np.isclose( + ap_dict['pred_instances_3d/KITTI/Overall_3D_AP11_moderate'], + 3.0303030303030307) + assert np.isclose(ap_dict['pred_instances_3d/KITTI/Overall_3D_AP11_hard'], + 3.0303030303030307) diff --git a/tests/test_evaluation/test_metrics/test_panoptic_seg_metric.py b/tests/test_evaluation/test_metrics/test_panoptic_seg_metric.py new file mode 100755 index 0000000..dbf8f6d --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_panoptic_seg_metric.py @@ -0,0 +1,123 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import numpy as np +import torch +from mmengine.structures import BaseDataElement + +from mmdet3d.evaluation.metrics import PanopticSegMetric +from mmdet3d.structures import Det3DDataSample, PointData + + +class TestPanopticSegMetric(unittest.TestCase): + + def _demo_mm_model_output(self): + """Create a superset of inputs needed to run test or train batches.""" + # generate ground truth and prediction + semantic_preds = [] + instance_preds = [] + gt_semantic = [] + gt_instance = [] + + # some ignore stuff + num_ignore = 50 + semantic_preds.extend([0 for i in range(num_ignore)]) + instance_preds.extend([0 for i in range(num_ignore)]) + gt_semantic.extend([0 for i in range(num_ignore)]) + gt_instance.extend([0 for i in range(num_ignore)]) + + # grass segment + num_grass = 50 + num_grass_pred = 40 # rest is sky + semantic_preds.extend([1 for i in range(num_grass_pred)]) # grass + semantic_preds.extend([2 for i in range(num_grass - num_grass_pred) + ]) # sky + instance_preds.extend([0 for i in range(num_grass)]) + gt_semantic.extend([1 for i in range(num_grass)]) # grass + gt_instance.extend([0 for i in range(num_grass)]) + + # sky segment + num_sky = 50 + num_sky_pred = 40 # rest is grass + semantic_preds.extend([2 for i in range(num_sky_pred)]) # sky + semantic_preds.extend([1 for i in range(num_sky - num_sky_pred) + ]) # grass + instance_preds.extend([0 for i in range(num_sky)]) # first instance + gt_semantic.extend([2 for i in range(num_sky)]) # sky + gt_instance.extend([0 for i in range(num_sky)]) # first instance + + # wrong dog as person prediction + num_dog = 50 + num_person = num_dog + semantic_preds.extend([3 for i in range(num_person)]) + instance_preds.extend([35 for i in range(num_person)]) + gt_semantic.extend([4 for i in range(num_dog)]) + gt_instance.extend([22 for i in range(num_dog)]) + + # two persons in prediction, but three in gt + num_person = 50 + semantic_preds.extend([3 for i in range(6 * num_person)]) + instance_preds.extend([8 for i in range(4 * num_person)]) + instance_preds.extend([95 for i in range(2 * num_person)]) + gt_semantic.extend([3 for i in range(6 * num_person)]) + gt_instance.extend([33 for i in range(3 * num_person)]) + gt_instance.extend([42 for i in range(num_person)]) + gt_instance.extend([11 for i in range(2 * num_person)]) + + # gt and pred to numpy + semantic_preds = np.array(semantic_preds, dtype=int).reshape(1, -1) + instance_preds = np.array(instance_preds, dtype=int).reshape(1, -1) + gt_semantic = np.array(gt_semantic, dtype=int).reshape(1, -1) + gt_instance = np.array(gt_instance, dtype=int).reshape(1, -1) + + pred_pts_semantic_mask = torch.Tensor(semantic_preds) + pred_pts_instance_mask = torch.Tensor(instance_preds) + pred_pts_seg_data = dict( + pts_semantic_mask=pred_pts_semantic_mask, + pts_instance_mask=pred_pts_instance_mask) + data_sample = Det3DDataSample() + data_sample.pred_pts_seg = PointData(**pred_pts_seg_data) + + ann_info_data = dict( + pts_semantic_mask=gt_semantic, pts_instance_mask=gt_instance) + data_sample.eval_ann_info = ann_info_data + + batch_data_samples = [data_sample] + + predictions = [] + for pred in batch_data_samples: + if isinstance(pred, BaseDataElement): + pred = pred.to_dict() + predictions.append(pred) + + return predictions + + def test_evaluate(self): + data_batch = {} + predictions = self._demo_mm_model_output() + + classes = ['unlabeled', 'person', 'dog', 'grass', 'sky'] + label2cat = { + 0: 'unlabeled', + 1: 'person', + 2: 'dog', + 3: 'grass', + 4: 'sky', + } + + ignore_index = [0] # only ignore ignore class + min_num_points = 1 # for this example we care about all points + id_offset = 2**16 + + dataset_meta = dict( + label2cat=label2cat, ignore_index=ignore_index, classes=classes) + panoptic_seg_metric = PanopticSegMetric( + thing_class_inds=[0, 1], + stuff_class_inds=[2, 3], + min_num_points=min_num_points, + id_offset=id_offset, + ) + panoptic_seg_metric.dataset_meta = dataset_meta + panoptic_seg_metric.process(data_batch, predictions) + res = panoptic_seg_metric.evaluate(1) + self.assertIsInstance(res, dict) diff --git a/tests/test_evaluation/test_metrics/test_seg_metric.py b/tests/test_evaluation/test_metrics/test_seg_metric.py new file mode 100755 index 0000000..6f2507a --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_seg_metric.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import numpy as np +import torch +from mmengine.structures import BaseDataElement + +from mmdet3d.evaluation.metrics import SegMetric +from mmdet3d.structures import Det3DDataSample, PointData + + +class TestSegMetric(unittest.TestCase): + + def _demo_mm_model_output(self): + """Create a superset of inputs needed to run test or train batches.""" + pred_pts_semantic_mask = torch.Tensor([ + 0, 0, 1, 0, 0, 2, 1, 3, 1, 2, 1, 0, 2, 2, 2, 2, 1, 3, 0, 3, 3, 3, 3 + ]) + pred_pts_seg_data = dict(pts_semantic_mask=pred_pts_semantic_mask) + data_sample = Det3DDataSample() + data_sample.pred_pts_seg = PointData(**pred_pts_seg_data) + + gt_pts_semantic_mask = np.array([ + 0, 0, 0, 255, 0, 0, 1, 1, 1, 255, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, + 3, 255 + ]) + ann_info_data = dict(pts_semantic_mask=gt_pts_semantic_mask) + data_sample.eval_ann_info = ann_info_data + + batch_data_samples = [data_sample] + + predictions = [] + for pred in batch_data_samples: + if isinstance(pred, BaseDataElement): + pred = pred.to_dict() + predictions.append(pred) + + return predictions + + def test_evaluate(self): + data_batch = {} + predictions = self._demo_mm_model_output() + label2cat = { + 0: 'car', + 1: 'bicycle', + 2: 'motorcycle', + 3: 'truck', + } + dataset_meta = dict(label2cat=label2cat, ignore_index=255) + seg_metric = SegMetric() + seg_metric.dataset_meta = dataset_meta + seg_metric.process(data_batch, predictions) + res = seg_metric.evaluate(1) + self.assertIsInstance(res, dict) diff --git a/tests/test_models/test_backbones/test_cylinder3d_backbone.py b/tests/test_models/test_backbones/test_cylinder3d_backbone.py new file mode 100755 index 0000000..ea6b3e7 --- /dev/null +++ b/tests/test_models/test_backbones/test_cylinder3d_backbone.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_cylinder3d(): + if not torch.cuda.is_available(): + pytest.skip() + cfg = dict( + type='Asymm3DSpconv', + grid_size=[48, 32, 4], + input_channels=16, + base_channels=32, + norm_cfg=dict(type='BN1d', eps=1e-5, momentum=0.1)) + self = MODELS.build(cfg) + self.cuda() + + batch_size = 1 + coorx = torch.randint(0, 48, (50, 1)) + coory = torch.randint(0, 36, (50, 1)) + coorz = torch.randint(0, 4, (50, 1)) + coorbatch = torch.zeros(50, 1) + coors = torch.cat([coorbatch, coorx, coory, coorz], dim=1).cuda() + voxel_features = torch.rand(50, 16).cuda() + + # test forward + feature = self(voxel_features, coors, batch_size) + + assert feature.features.shape == (50, 128) + assert feature.indices.data.shape == (50, 4) diff --git a/tests/test_models/test_backbones/test_dgcnn.py b/tests/test_models/test_backbones/test_dgcnn.py new file mode 100755 index 0000000..27d7cff --- /dev/null +++ b/tests/test_models/test_backbones/test_dgcnn.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_dgcnn_gf(): + if not torch.cuda.is_available(): + pytest.skip() + + # DGCNNGF used in segmentation + cfg = dict( + type='DGCNNBackbone', + in_channels=6, + num_samples=(20, 20, 20), + knn_modes=['D-KNN', 'F-KNN', 'F-KNN'], + radius=(None, None, None), + gf_channels=((64, 64), (64, 64), (64, )), + fa_channels=(1024, ), + act_cfg=dict(type='ReLU')) + + self = MODELS.build(cfg) + self.cuda() + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', dtype=np.float32) + xyz = torch.from_numpy(xyz).view(1, -1, 6).cuda() # (B, N, 6) + # test forward + ret_dict = self(xyz) + gf_points = ret_dict['gf_points'] + fa_points = ret_dict['fa_points'] + + assert len(gf_points) == 4 + assert gf_points[0].shape == torch.Size([1, 100, 6]) + assert gf_points[1].shape == torch.Size([1, 100, 64]) + assert gf_points[2].shape == torch.Size([1, 100, 64]) + assert gf_points[3].shape == torch.Size([1, 100, 64]) + assert fa_points.shape == torch.Size([1, 100, 1216]) diff --git a/tests/test_models/test_backbones/test_dla.py b/tests/test_models/test_backbones/test_dla.py new file mode 100755 index 0000000..915c024 --- /dev/null +++ b/tests/test_models/test_backbones/test_dla.py @@ -0,0 +1,26 @@ +import torch + +from mmdet3d.registry import MODELS + + +def test_dla_net(): + # test DLANet used in SMOKE + # test list config + cfg = dict( + type='DLANet', + depth=34, + in_channels=3, + norm_cfg=dict(type='GN', num_groups=32)) + + img = torch.randn((4, 3, 32, 32)) + self = MODELS.build(cfg) + self.init_weights() + + results = self(img) + assert len(results) == 6 + assert results[0].shape == torch.Size([4, 16, 32, 32]) + assert results[1].shape == torch.Size([4, 32, 16, 16]) + assert results[2].shape == torch.Size([4, 64, 8, 8]) + assert results[3].shape == torch.Size([4, 128, 4, 4]) + assert results[4].shape == torch.Size([4, 256, 2, 2]) + assert results[5].shape == torch.Size([4, 512, 1, 1]) diff --git a/tests/test_models/test_backbones/test_mink_resnet.py b/tests/test_models/test_backbones/test_mink_resnet.py new file mode 100755 index 0000000..04552b7 --- /dev/null +++ b/tests/test_models/test_backbones/test_mink_resnet.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_mink_resnet(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + + try: + import MinkowskiEngine as ME + except ImportError: + pytest.skip('test requires MinkowskiEngine installation') + + coordinates, features = [], [] + np.random.seed(42) + # batch of 2 point clouds + for i in range(2): + c = torch.from_numpy(np.random.rand(500, 3) * 100) + coordinates.append(c.float().cuda()) + f = torch.from_numpy(np.random.rand(500, 3)) + features.append(f.float().cuda()) + tensor_coordinates, tensor_features = ME.utils.sparse_collate( + coordinates, features) + x = ME.SparseTensor( + features=tensor_features, coordinates=tensor_coordinates) + + # MinkResNet34 with 4 outputs + cfg = dict(type='MinkResNet', depth=34, in_channels=3) + self = MODELS.build(cfg).cuda() + self.init_weights() + + y = self(x) + assert len(y) == 4 + assert y[0].F.shape == torch.Size([900, 64]) + assert y[0].tensor_stride[0] == 8 + assert y[1].F.shape == torch.Size([472, 128]) + assert y[1].tensor_stride[0] == 16 + assert y[2].F.shape == torch.Size([105, 256]) + assert y[2].tensor_stride[0] == 32 + assert y[3].F.shape == torch.Size([16, 512]) + assert y[3].tensor_stride[0] == 64 + + # MinkResNet50 with 2 outputs + cfg = dict( + type='MinkResNet', depth=34, in_channels=3, num_stages=2, pool=False) + self = MODELS.build(cfg).cuda() + self.init_weights() + + y = self(x) + assert len(y) == 2 + assert y[0].F.shape == torch.Size([985, 64]) + assert y[0].tensor_stride[0] == 4 + assert y[1].F.shape == torch.Size([900, 128]) + assert y[1].tensor_stride[0] == 8 diff --git a/tests/test_models/test_backbones/test_minkunet_backbone.py b/tests/test_models/test_backbones/test_minkunet_backbone.py new file mode 100755 index 0000000..086c269 --- /dev/null +++ b/tests/test_models/test_backbones/test_minkunet_backbone.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +import torch.nn.functional as F + +from mmdet3d.registry import MODELS + + +def test_minkunet_backbone(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + + try: + import torchsparse # noqa: F401 + except ImportError: + pytest.skip('test requires Torchsparse installation') + + coordinates, features = [], [] + for i in range(2): + c = torch.randint(0, 10, (100, 3)).int() + c = F.pad(c, (0, 1), mode='constant', value=i) + coordinates.append(c) + f = torch.rand(100, 4) + features.append(f) + features = torch.cat(features, dim=0).cuda() + coordinates = torch.cat(coordinates, dim=0).cuda() + + cfg = dict(type='MinkUNetBackbone') + self = MODELS.build(cfg).cuda() + self.init_weights() + + y = self(features, coordinates) + assert y.F.shape == torch.Size([200, 96]) + assert y.C.shape == torch.Size([200, 4]) diff --git a/tests/test_models/test_backbones/test_multi_backbone.py b/tests/test_models/test_backbones/test_multi_backbone.py new file mode 100755 index 0000000..02186fa --- /dev/null +++ b/tests/test_models/test_backbones/test_multi_backbone.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_multi_backbone(): + if not torch.cuda.is_available(): + pytest.skip() + + # test list config + cfg_list = dict( + type='MultiBackbone', + num_streams=4, + suffixes=['net0', 'net1', 'net2', 'net3'], + backbones=[ + dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(256, 128, 64, 32), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d')), + dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(256, 128, 64, 32), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d')), + dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(256, 128, 64, 32), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d')), + dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(256, 128, 64, 32), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d')) + ]) + + self = MODELS.build(cfg_list) + self.cuda() + + assert len(self.backbone_list) == 4 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', dtype=np.float32) + xyz = torch.from_numpy(xyz).view(1, -1, 6).cuda() # (B, N, 6) + # test forward + ret_dict = self(xyz[:, :, :4]) + + assert ret_dict['hd_feature'].shape == torch.Size([1, 256, 128]) + assert ret_dict['fp_xyz_net0'][-1].shape == torch.Size([1, 128, 3]) + assert ret_dict['fp_features_net0'][-1].shape == torch.Size([1, 256, 128]) + + # test dict config + cfg_dict = dict( + type='MultiBackbone', + num_streams=2, + suffixes=['net0', 'net1'], + aggregation_mlp_channels=[512, 128], + backbones=dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(256, 128, 64, 32), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d'))) + + self = MODELS.build(cfg_dict) + self.cuda() + + assert len(self.backbone_list) == 2 + + # test forward + ret_dict = self(xyz[:, :, :4]) + + assert ret_dict['hd_feature'].shape == torch.Size([1, 128, 128]) + assert ret_dict['fp_xyz_net0'][-1].shape == torch.Size([1, 128, 3]) + assert ret_dict['fp_features_net0'][-1].shape == torch.Size([1, 256, 128]) + + # Length of backbone configs list should be equal to num_streams + with pytest.raises(AssertionError): + cfg_list['num_streams'] = 3 + MODELS.build(cfg_list) + + # Length of suffixes list should be equal to num_streams + with pytest.raises(AssertionError): + cfg_dict['suffixes'] = ['net0', 'net1', 'net2'] + MODELS.build(cfg_dict) + + # Type of 'backbones' should be Dict or List[Dict]. + with pytest.raises(AssertionError): + cfg_dict['backbones'] = 'PointNet2SASSG' + MODELS.build(cfg_dict) diff --git a/tests/test_models/test_backbones/test_pointnet2_sa_msg.py b/tests/test_models/test_backbones/test_pointnet2_sa_msg.py new file mode 100755 index 0000000..c8e8750 --- /dev/null +++ b/tests/test_models/test_backbones/test_pointnet2_sa_msg.py @@ -0,0 +1,120 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_pointnet2_sa_msg(): + if not torch.cuda.is_available(): + pytest.skip() + + # PN2MSG used in 3DSSD + cfg = dict( + type='PointNet2SAMSG', + in_channels=4, + num_points=(256, 64, (32, 32)), + radii=((0.2, 0.4, 0.8), (0.4, 0.8, 1.6), (1.6, 3.2, 4.8)), + num_samples=((8, 8, 16), (8, 8, 16), (8, 8, 8)), + sa_channels=(((8, 8, 16), (8, 8, 16), + (8, 8, 16)), ((16, 16, 32), (16, 16, 32), (16, 24, 32)), + ((32, 32, 64), (32, 24, 64), (32, 64, 64))), + aggregation_channels=(16, 32, 64), + fps_mods=(('D-FPS'), ('FS'), ('F-FPS', 'D-FPS')), + fps_sample_range_lists=((-1), (-1), (64, -1)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)) + + self = MODELS.build(cfg) + self.cuda() + assert self.SA_modules[0].mlps[0].layer0.conv.in_channels == 4 + assert self.SA_modules[0].mlps[0].layer0.conv.out_channels == 8 + assert self.SA_modules[0].mlps[1].layer1.conv.out_channels == 8 + assert self.SA_modules[2].mlps[2].layer2.conv.out_channels == 64 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', dtype=np.float32) + xyz = torch.from_numpy(xyz).view(1, -1, 6).cuda() # (B, N, 6) + # test forward + ret_dict = self(xyz[:, :, :4]) + sa_xyz = ret_dict['sa_xyz'][-1] + sa_features = ret_dict['sa_features'][-1] + sa_indices = ret_dict['sa_indices'][-1] + + assert sa_xyz.shape == torch.Size([1, 64, 3]) + assert sa_features.shape == torch.Size([1, 64, 64]) + assert sa_indices.shape == torch.Size([1, 64]) + + # out_indices should smaller than the length of SA Modules. + with pytest.raises(AssertionError): + MODELS.build( + dict( + type='PointNet2SAMSG', + in_channels=4, + num_points=(256, 64, (32, 32)), + radii=((0.2, 0.4, 0.8), (0.4, 0.8, 1.6), (1.6, 3.2, 4.8)), + num_samples=((8, 8, 16), (8, 8, 16), (8, 8, 8)), + sa_channels=(((8, 8, 16), (8, 8, 16), (8, 8, 16)), + ((16, 16, 32), (16, 16, 32), (16, 24, 32)), + ((32, 32, 64), (32, 24, 64), (32, 64, 64))), + aggregation_channels=(16, 32, 64), + fps_mods=(('D-FPS'), ('FS'), ('F-FPS', 'D-FPS')), + fps_sample_range_lists=((-1), (-1), (64, -1)), + out_indices=(2, 3), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False))) + + # PN2MSG used in segmentation + cfg = dict( + type='PointNet2SAMSG', + in_channels=6, # [xyz, rgb] + num_points=(1024, 256, 64, 16), + radii=((0.05, 0.1), (0.1, 0.2), (0.2, 0.4), (0.4, 0.8)), + num_samples=((16, 32), (16, 32), (16, 32), (16, 32)), + sa_channels=(((16, 16, 32), (32, 32, 64)), ((64, 64, 128), (64, 96, + 128)), + ((128, 196, 256), (128, 196, 256)), ((256, 256, 512), + (256, 384, 512))), + aggregation_channels=(None, None, None, None), + fps_mods=(('D-FPS'), ('D-FPS'), ('D-FPS'), ('D-FPS')), + fps_sample_range_lists=((-1), (-1), (-1), (-1)), + dilated_group=(False, False, False, False), + out_indices=(0, 1, 2, 3), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)) + + self = MODELS.build(cfg) + self.cuda() + ret_dict = self(xyz) + sa_xyz = ret_dict['sa_xyz'] + sa_features = ret_dict['sa_features'] + sa_indices = ret_dict['sa_indices'] + + assert len(sa_xyz) == len(sa_features) == len(sa_indices) == 5 + assert sa_xyz[0].shape == torch.Size([1, 100, 3]) + assert sa_xyz[1].shape == torch.Size([1, 1024, 3]) + assert sa_xyz[2].shape == torch.Size([1, 256, 3]) + assert sa_xyz[3].shape == torch.Size([1, 64, 3]) + assert sa_xyz[4].shape == torch.Size([1, 16, 3]) + assert sa_features[0].shape == torch.Size([1, 3, 100]) + assert sa_features[1].shape == torch.Size([1, 96, 1024]) + assert sa_features[2].shape == torch.Size([1, 256, 256]) + assert sa_features[3].shape == torch.Size([1, 512, 64]) + assert sa_features[4].shape == torch.Size([1, 1024, 16]) + assert sa_indices[0].shape == torch.Size([1, 100]) + assert sa_indices[1].shape == torch.Size([1, 1024]) + assert sa_indices[2].shape == torch.Size([1, 256]) + assert sa_indices[3].shape == torch.Size([1, 64]) + assert sa_indices[4].shape == torch.Size([1, 16]) diff --git a/tests/test_models/test_backbones/test_pointnet2_sa_ssg.py b/tests/test_models/test_backbones/test_pointnet2_sa_ssg.py new file mode 100755 index 0000000..cd4d993 --- /dev/null +++ b/tests/test_models/test_backbones/test_pointnet2_sa_ssg.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_pointnet2_sa_ssg(): + if not torch.cuda.is_available(): + pytest.skip() + + cfg = dict( + type='PointNet2SASSG', + in_channels=6, + num_points=(32, 16), + radius=(0.8, 1.2), + num_samples=(16, 8), + sa_channels=((8, 16), (16, 16)), + fp_channels=((16, 16), (16, 16))) + self = MODELS.build(cfg) + self.cuda() + assert self.SA_modules[0].mlps[0].layer0.conv.in_channels == 6 + assert self.SA_modules[0].mlps[0].layer0.conv.out_channels == 8 + assert self.SA_modules[0].mlps[0].layer1.conv.out_channels == 16 + assert self.SA_modules[1].mlps[0].layer1.conv.out_channels == 16 + assert self.FP_modules[0].mlps.layer0.conv.in_channels == 32 + assert self.FP_modules[0].mlps.layer0.conv.out_channels == 16 + assert self.FP_modules[1].mlps.layer0.conv.in_channels == 19 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', dtype=np.float32) + xyz = torch.from_numpy(xyz).view(1, -1, 6).cuda() # (B, N, 6) + # test forward + ret_dict = self(xyz) + fp_xyz = ret_dict['fp_xyz'] + fp_features = ret_dict['fp_features'] + fp_indices = ret_dict['fp_indices'] + sa_xyz = ret_dict['sa_xyz'] + sa_features = ret_dict['sa_features'] + sa_indices = ret_dict['sa_indices'] + assert len(fp_xyz) == len(fp_features) == len(fp_indices) == 3 + assert len(sa_xyz) == len(sa_features) == len(sa_indices) == 3 + assert fp_xyz[0].shape == torch.Size([1, 16, 3]) + assert fp_xyz[1].shape == torch.Size([1, 32, 3]) + assert fp_xyz[2].shape == torch.Size([1, 100, 3]) + assert fp_features[0].shape == torch.Size([1, 16, 16]) + assert fp_features[1].shape == torch.Size([1, 16, 32]) + assert fp_features[2].shape == torch.Size([1, 16, 100]) + assert fp_indices[0].shape == torch.Size([1, 16]) + assert fp_indices[1].shape == torch.Size([1, 32]) + assert fp_indices[2].shape == torch.Size([1, 100]) + assert sa_xyz[0].shape == torch.Size([1, 100, 3]) + assert sa_xyz[1].shape == torch.Size([1, 32, 3]) + assert sa_xyz[2].shape == torch.Size([1, 16, 3]) + assert sa_features[0].shape == torch.Size([1, 3, 100]) + assert sa_features[1].shape == torch.Size([1, 16, 32]) + assert sa_features[2].shape == torch.Size([1, 16, 16]) + assert sa_indices[0].shape == torch.Size([1, 100]) + assert sa_indices[1].shape == torch.Size([1, 32]) + assert sa_indices[2].shape == torch.Size([1, 16]) + + # test only xyz input without features + cfg['in_channels'] = 3 + self = MODELS.build(cfg) + self.cuda() + ret_dict = self(xyz[..., :3]) + assert len(fp_xyz) == len(fp_features) == len(fp_indices) == 3 + assert len(sa_xyz) == len(sa_features) == len(sa_indices) == 3 + assert fp_features[0].shape == torch.Size([1, 16, 16]) + assert fp_features[1].shape == torch.Size([1, 16, 32]) + assert fp_features[2].shape == torch.Size([1, 16, 100]) + assert sa_features[0].shape == torch.Size([1, 3, 100]) + assert sa_features[1].shape == torch.Size([1, 16, 32]) + assert sa_features[2].shape == torch.Size([1, 16, 16]) diff --git a/tests/test_models/test_backbones/test_spvcnn_backbone.py b/tests/test_models/test_backbones/test_spvcnn_backbone.py new file mode 100755 index 0000000..504f2cc --- /dev/null +++ b/tests/test_models/test_backbones/test_spvcnn_backbone.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +import torch.nn.functional as F + +from mmdet3d.registry import MODELS + + +def test_spvcnn_backbone(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + + try: + import torchsparse # noqa: F401 + except ImportError: + pytest.skip('test requires Torchsparse installation') + + coordinates, features = [], [] + for i in range(2): + c = torch.randint(0, 10, (100, 3)).int() + c = F.pad(c, (0, 1), mode='constant', value=i) + coordinates.append(c) + f = torch.rand(100, 4) + features.append(f) + features = torch.cat(features, dim=0).cuda() + coordinates = torch.cat(coordinates, dim=0).cuda() + + cfg = dict(type='SPVCNNBackbone') + self = MODELS.build(cfg).cuda() + self.init_weights() + + y = self(features, coordinates) + assert y.F.shape == torch.Size([200, 96]) + assert y.C.shape == torch.Size([200, 4]) diff --git a/tests/test_models/test_data_preprocessors/test_data_preprocessor.py b/tests/test_models/test_data_preprocessors/test_data_preprocessor.py new file mode 100755 index 0000000..3db374c --- /dev/null +++ b/tests/test_models/test_data_preprocessors/test_data_preprocessor.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch + +from mmdet3d.models.data_preprocessors import Det3DDataPreprocessor +from mmdet3d.structures import Det3DDataSample, PointData + + +class TestDet3DDataPreprocessor(TestCase): + + def test_init(self): + # test mean is None + processor = Det3DDataPreprocessor() + self.assertTrue(not hasattr(processor, 'mean')) + self.assertTrue(processor._enable_normalize is False) + + # test mean is not None + processor = Det3DDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1]) + self.assertTrue(hasattr(processor, 'mean')) + self.assertTrue(hasattr(processor, 'std')) + self.assertTrue(processor._enable_normalize) + + # please specify both mean and std + with self.assertRaises(AssertionError): + Det3DDataPreprocessor(mean=[0, 0, 0]) + + # bgr2rgb and rgb2bgr cannot be set to True at the same time + with self.assertRaises(AssertionError): + Det3DDataPreprocessor(bgr_to_rgb=True, rgb_to_bgr=True) + + def test_forward(self): + processor = Det3DDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1]) + + points = torch.randn((5000, 3)) + image = torch.randint(0, 256, (3, 11, 10)).float() + inputs_dict = dict(points=[points], img=[image]) + + data = {'inputs': inputs_dict, 'data_samples': [Det3DDataSample()]} + out_data = processor(data) + batch_inputs, batch_data_samples = out_data['inputs'], out_data[ + 'data_samples'] + + self.assertEqual(batch_inputs['imgs'].shape, (1, 3, 11, 10)) + self.assertEqual(len(batch_inputs['points']), 1) + self.assertEqual(len(batch_data_samples), 1) + + # test image channel_conversion + processor = Det3DDataPreprocessor( + mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True) + out_data = processor(data) + batch_inputs, batch_data_samples = out_data['inputs'], out_data[ + 'data_samples'] + self.assertEqual(batch_inputs['imgs'].shape, (1, 3, 11, 10)) + self.assertEqual(len(batch_data_samples), 1) + + # test image padding + data = { + 'inputs': { + 'points': [torch.randn((5000, 3)), + torch.randn((5000, 3))], + 'img': [ + torch.randint(0, 256, (3, 10, 11)), + torch.randint(0, 256, (3, 9, 14)) + ] + } + } + processor = Det3DDataPreprocessor( + mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True) + out_data = processor(data) + batch_inputs, batch_data_samples = out_data['inputs'], out_data[ + 'data_samples'] + self.assertEqual(batch_inputs['imgs'].shape, (2, 3, 10, 14)) + self.assertIsNone(batch_data_samples) + + # test pad_size_divisor + data = { + 'inputs': { + 'points': [torch.randn((5000, 3)), + torch.randn((5000, 3))], + 'img': [ + torch.randint(0, 256, (3, 10, 11)), + torch.randint(0, 256, (3, 9, 24)) + ] + }, + 'data_samples': [Det3DDataSample()] * 2 + } + processor = Det3DDataPreprocessor( + mean=[0., 0., 0.], std=[1., 1., 1.], pad_size_divisor=5) + out_data = processor(data) + batch_inputs, batch_data_samples = out_data['inputs'], out_data[ + 'data_samples'] + self.assertEqual(batch_inputs['imgs'].shape, (2, 3, 10, 25)) + self.assertEqual(len(batch_data_samples), 2) + for data_sample, expected_shape in zip(batch_data_samples, [(10, 15), + (10, 25)]): + self.assertEqual(data_sample.pad_shape, expected_shape) + + # test cylindrical voxelization + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and CUDA') + point_cloud_range = [0, -180, -4, 50, 180, 2] + grid_shape = [480, 360, 32] + voxel_layer = dict( + grid_shape=grid_shape, + point_cloud_range=point_cloud_range, + max_num_points=-1, + max_voxels=-1) + processor = Det3DDataPreprocessor( + voxel=True, voxel_type='cylindrical', + voxel_layer=voxel_layer).cuda() + num_points = 5000 + xy = torch.rand(num_points, 2) * 140 - 70 + z = torch.rand(num_points, 1) * 9 - 6 + ref = torch.rand(num_points, 1) + points = [torch.cat([xy, z, ref], dim=-1)] * 2 + data_sample = Det3DDataSample() + gt_pts_seg = PointData() + gt_pts_seg.pts_semantic_mask = torch.randint(0, 10, (num_points, )) + data_sample.gt_pts_seg = gt_pts_seg + data_samples = [data_sample] * 2 + inputs = dict(inputs=dict(points=points), data_samples=data_samples) + out_data = processor(inputs) + batch_inputs, batch_data_samples = out_data['inputs'], out_data[ + 'data_samples'] + self.assertEqual(batch_inputs['voxels']['voxels'].shape, (10000, 6)) + self.assertEqual(batch_inputs['voxels']['coors'].shape, (10000, 4)) diff --git a/tests/test_models/test_decode_heads/test_cylinder3d_head.py b/tests/test_models/test_decode_heads/test_cylinder3d_head.py new file mode 100755 index 0000000..3bb62c5 --- /dev/null +++ b/tests/test_models/test_decode_heads/test_cylinder3d_head.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch +from mmcv.ops import SparseConvTensor + +from mmdet3d.models.decode_heads import Cylinder3DHead +from mmdet3d.structures import Det3DDataSample, PointData + + +class TestCylinder3DHead(TestCase): + + def test_cylinder3d_head_loss(self): + """Tests Cylinder3D head loss.""" + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + cylinder3d_head = Cylinder3DHead( + channels=128, + num_classes=20, + loss_ce=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, + loss_weight=1.0), + loss_lovasz=dict( + type='LovaszLoss', loss_weight=1.0, reduction='none'), + ).cuda() + + voxel_feats = torch.rand(50, 128).cuda() + coorx = torch.randint(0, 480, (50, 1)).int().cuda() + coory = torch.randint(0, 360, (50, 1)).int().cuda() + coorz = torch.randint(0, 32, (50, 1)).int().cuda() + coorbatch0 = torch.zeros(50, 1).int().cuda() + coors = torch.cat([coorbatch0, coorx, coory, coorz], dim=1) + grid_size = [480, 360, 32] + batch_size = 1 + + sparse_voxels = SparseConvTensor(voxel_feats, coors, grid_size, + batch_size) + # Test forward + seg_logits = cylinder3d_head.forward(sparse_voxels) + + self.assertEqual(seg_logits.features.shape, torch.Size([50, 20])) + + # When truth is non-empty then losses + # should be nonzero for random inputs + voxel_semantic_mask = torch.randint(0, 20, (50, )).long().cuda() + gt_pts_seg = PointData(voxel_semantic_mask=voxel_semantic_mask) + + datasample = Det3DDataSample() + datasample.gt_pts_seg = gt_pts_seg + + losses = cylinder3d_head.loss_by_feat(seg_logits, [datasample]) + + loss_ce = losses['loss_ce'].item() + loss_lovasz = losses['loss_lovasz'].item() + + self.assertGreater(loss_ce, 0, 'ce loss should be positive') + self.assertGreater(loss_lovasz, 0, 'lovasz loss should be positive') + + batch_inputs_dict = dict(voxels=dict(voxel_coors=coors)) + datasample.gt_pts_seg.point2voxel_map = torch.randint( + 0, 50, (100, )).int().cuda() + point_logits = cylinder3d_head.predict(sparse_voxels, + batch_inputs_dict, [datasample]) + assert point_logits[0].shape == torch.Size([100, 20]) diff --git a/tests/test_models/test_decode_heads/test_dgcnn_head.py b/tests/test_models/test_decode_heads/test_dgcnn_head.py new file mode 100755 index 0000000..727f82a --- /dev/null +++ b/tests/test_models/test_decode_heads/test_dgcnn_head.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmdet3d.models.decode_heads import DGCNNHead +from mmdet3d.structures import Det3DDataSample, PointData + + +class TestDGCNNHead(TestCase): + + def test_dgcnn_head_loss(self): + """Tests DGCNN head loss.""" + + dgcnn_head = DGCNNHead( + fp_channels=(1024, 512), + channels=256, + num_classes=13, + dropout_ratio=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='LeakyReLU', negative_slope=0.2), + loss_decode=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, + loss_weight=1.0), + ignore_index=13) + + # DGCNN head expects dict format features + fa_points = torch.rand(1, 4096, 1024).float() + feat_dict = dict(fa_points=fa_points) + + # Test forward + seg_logits = dgcnn_head.forward(feat_dict) + + self.assertEqual(seg_logits.shape, torch.Size([1, 13, 4096])) + + # When truth is non-empty then losses + # should be nonzero for random inputs + pts_semantic_mask = torch.randint(0, 13, (4096, )).long() + gt_pts_seg = PointData(pts_semantic_mask=pts_semantic_mask) + + datasample = Det3DDataSample() + datasample.gt_pts_seg = gt_pts_seg + + gt_losses = dgcnn_head.loss_by_feat(seg_logits, [datasample]) + + gt_sem_seg_loss = gt_losses['loss_sem_seg'].item() + + self.assertGreater(gt_sem_seg_loss, 0, + 'semantic seg loss should be positive') diff --git a/tests/test_models/test_decode_heads/test_minkunet_head.py b/tests/test_models/test_decode_heads/test_minkunet_head.py new file mode 100755 index 0000000..c684565 --- /dev/null +++ b/tests/test_models/test_decode_heads/test_minkunet_head.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch +import torch.nn.functional as F + +from mmdet3d.models.decode_heads import MinkUNetHead +from mmdet3d.structures import Det3DDataSample, PointData + + +class TestMinkUNetHead(TestCase): + + def test_minkunet_head_loss(self): + """Tests PAConv head loss.""" + + try: + import torchsparse + except ImportError: + pytest.skip('test requires Torchsparse installation') + if torch.cuda.is_available(): + minkunet_head = MinkUNetHead(channels=4, num_classes=19) + + minkunet_head.cuda() + coordinates, features = [], [] + for i in range(2): + c = torch.randint(0, 10, (100, 3)).int() + c = F.pad(c, (0, 1), mode='constant', value=i) + coordinates.append(c) + f = torch.rand(100, 4) + features.append(f) + features = torch.cat(features, dim=0).cuda() + coordinates = torch.cat(coordinates, dim=0).cuda() + x = torchsparse.SparseTensor(feats=features, coords=coordinates) + + # Test forward + seg_logits = minkunet_head.forward(x) + + self.assertEqual(seg_logits.shape, torch.Size([200, 19])) + + # When truth is non-empty then losses + # should be nonzero for random inputs + voxel_semantic_mask = torch.randint(0, 19, (100, )).long().cuda() + gt_pts_seg = PointData(voxel_semantic_mask=voxel_semantic_mask) + + datasample = Det3DDataSample() + datasample.gt_pts_seg = gt_pts_seg + + gt_losses = minkunet_head.loss(x, [datasample, datasample], {}) + + gt_sem_seg_loss = gt_losses['loss_sem_seg'].item() + + self.assertGreater(gt_sem_seg_loss, 0, + 'semantic seg loss should be positive') diff --git a/tests/test_models/test_decode_heads/test_paconv_head.py b/tests/test_models/test_decode_heads/test_paconv_head.py new file mode 100755 index 0000000..92286b6 --- /dev/null +++ b/tests/test_models/test_decode_heads/test_paconv_head.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmdet3d.models.decode_heads import PAConvHead +from mmdet3d.structures import Det3DDataSample, PointData + + +class TestPAConvHead(TestCase): + + def test_paconv_head_loss(self): + """Tests PAConv head loss.""" + + if torch.cuda.is_available(): + paconv_head = PAConvHead( + fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), + (128 + 6, 128, 128, 128)), + channels=128, + num_classes=20, + dropout_ratio=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'), + loss_decode=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, + loss_weight=1.0), + ignore_index=20) + + paconv_head.cuda() + # PAConv head expects dict format features + sa_xyz = [ + torch.rand(1, 4096, 3).float().cuda(), + torch.rand(1, 1024, 3).float().cuda(), + torch.rand(1, 256, 3).float().cuda(), + torch.rand(1, 64, 3).float().cuda(), + torch.rand(1, 16, 3).float().cuda(), + ] + sa_features = [ + torch.rand(1, 6, 4096).float().cuda(), + torch.rand(1, 64, 1024).float().cuda(), + torch.rand(1, 128, 256).float().cuda(), + torch.rand(1, 256, 64).float().cuda(), + torch.rand(1, 512, 16).float().cuda(), + ] + feat_dict = dict(sa_xyz=sa_xyz, sa_features=sa_features) + + # Test forward + seg_logits = paconv_head.forward(feat_dict) + + self.assertEqual(seg_logits.shape, torch.Size([1, 20, 4096])) + + # When truth is non-empty then losses + # should be nonzero for random inputs + pts_semantic_mask = torch.randint(0, 20, (4096, )).long().cuda() + gt_pts_seg = PointData(pts_semantic_mask=pts_semantic_mask) + + datasample = Det3DDataSample() + datasample.gt_pts_seg = gt_pts_seg + + gt_losses = paconv_head.loss_by_feat(seg_logits, [datasample]) + + gt_sem_seg_loss = gt_losses['loss_sem_seg'].item() + + self.assertGreater(gt_sem_seg_loss, 0, + 'semantic seg loss should be positive') diff --git a/tests/test_models/test_decode_heads/test_pointnet2_head.py b/tests/test_models/test_decode_heads/test_pointnet2_head.py new file mode 100755 index 0000000..c10ae1f --- /dev/null +++ b/tests/test_models/test_decode_heads/test_pointnet2_head.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmdet3d.models.decode_heads import PointNet2Head +from mmdet3d.structures import Det3DDataSample, PointData + + +class TestPointNet2Head(TestCase): + + def test_paconv_head_loss(self): + """Tests PAConv head loss.""" + + if torch.cuda.is_available(): + pointnet2_head = PointNet2Head( + fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), + (128, 128, 128, 128)), + channels=128, + num_classes=20, + dropout_ratio=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'), + loss_decode=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, + loss_weight=1.0), + ignore_index=20) + + pointnet2_head.cuda() + + # DGCNN head expects dict format features + sa_xyz = [ + torch.rand(1, 4096, 3).float().cuda(), + torch.rand(1, 1024, 3).float().cuda(), + torch.rand(1, 256, 3).float().cuda(), + torch.rand(1, 64, 3).float().cuda(), + torch.rand(1, 16, 3).float().cuda(), + ] + sa_features = [ + torch.rand(1, 6, 4096).float().cuda(), + torch.rand(1, 64, 1024).float().cuda(), + torch.rand(1, 128, 256).float().cuda(), + torch.rand(1, 256, 64).float().cuda(), + torch.rand(1, 512, 16).float().cuda(), + ] + feat_dict = dict(sa_xyz=sa_xyz, sa_features=sa_features) + + # Test forward + seg_logits = pointnet2_head.forward(feat_dict) + + self.assertEqual(seg_logits.shape, torch.Size([1, 20, 4096])) + + # When truth is non-empty then losses + # should be nonzero for random inputs + pts_semantic_mask = torch.randint(0, 20, (4096, )).long().cuda() + gt_pts_seg = PointData(pts_semantic_mask=pts_semantic_mask) + + datasample = Det3DDataSample() + datasample.gt_pts_seg = gt_pts_seg + + gt_losses = pointnet2_head.loss_by_feat(seg_logits, [datasample]) + + gt_sem_seg_loss = gt_losses['loss_sem_seg'].item() + + self.assertGreater(gt_sem_seg_loss, 0, + 'semantic seg loss should be positive') diff --git a/tests/test_models/test_dense_heads/test_anchor3d_head.py b/tests/test_models/test_dense_heads/test_anchor3d_head.py new file mode 100755 index 0000000..82fda62 --- /dev/null +++ b/tests/test_models/test_dense_heads/test_anchor3d_head.py @@ -0,0 +1,196 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmengine import Config +from mmengine.structures import InstanceData + +from mmdet3d import * # noqa +from mmdet3d.models.dense_heads import Anchor3DHead +from mmdet3d.structures import Box3DMode, LiDARInstance3DBoxes + + +class TestAnchor3DHead(TestCase): + + def test_anchor3d_head_loss(self): + """Test anchor head loss when truth is empty and non-empty.""" + + cfg = Config( + dict( + assigner=[ + dict( # for Pedestrian + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Cyclist + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Car + type='Max3DIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False)) + + anchor3d_head = Anchor3DHead( + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=0.2), + train_cfg=cfg) + + # Anchor head expects a multiple levels of features per image + feats = (torch.rand([1, 512, 200, 176], dtype=torch.float32), ) + (cls_scores, bbox_preds, dir_cls_preds) = anchor3d_head.forward(feats) + + self.assertEqual(cls_scores[0].shape, torch.Size([1, 18, 200, 176])) + self.assertEqual(bbox_preds[0].shape, torch.Size([1, 42, 200, 176])) + self.assertEqual(dir_cls_preds[0].shape, torch.Size([1, 12, 200, 176])) + + # # Test that empty ground truth encourages the network to + # # predict background + gt_instances = InstanceData() + gt_bboxes_3d = LiDARInstance3DBoxes(torch.empty((0, 7))) + gt_labels_3d = torch.tensor([]) + input_metas = dict(sample_idx=1234) + # fake input_metas + gt_instances.bboxes_3d = gt_bboxes_3d + gt_instances.labels_3d = gt_labels_3d + + empty_gt_losses = anchor3d_head.loss_by_feat(cls_scores, bbox_preds, + dir_cls_preds, + [gt_instances], + [input_metas]) + + # When there is no truth, the cls loss should be nonzero but + # there should be no box and dir loss. + self.assertGreater(empty_gt_losses['loss_cls'][0], 0, + 'cls loss should be non-zero') + self.assertEqual( + empty_gt_losses['loss_bbox'][0], 0, + 'there should be no box loss when there are no true boxes') + self.assertEqual( + empty_gt_losses['loss_dir'][0], 0, + 'there should be no dir loss when there are no true dirs') + + # When truth is non-empty then both cls and box loss + # should be nonzero for random inputs + gt_instances = InstanceData() + gt_bboxes_3d = LiDARInstance3DBoxes( + torch.tensor( + [[6.4118, -3.4305, -1.7291, 1.7033, 3.4693, 1.6197, -0.9091]], + dtype=torch.float32)) + gt_labels_3d = torch.tensor([1], dtype=torch.int64) + gt_instances.bboxes_3d = gt_bboxes_3d + gt_instances.labels_3d = gt_labels_3d + + gt_losses = anchor3d_head.loss_by_feat(cls_scores, bbox_preds, + dir_cls_preds, [gt_instances], + [input_metas]) + + self.assertGreater(gt_losses['loss_cls'][0], 0, + 'cls loss should be non-zero') + self.assertGreater(gt_losses['loss_bbox'][0], 0, + 'box loss should be non-zero') + self.assertGreater(gt_losses['loss_dir'][0], 0, + 'dir loss should be none-zero') + + def test_anchor3d_head_predict(self): + + cfg = Config( + dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) + + anchor3d_head = Anchor3DHead( + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=0.2), + test_cfg=cfg) + + feats = (torch.rand([2, 512, 200, 176], dtype=torch.float32), ) + (cls_scores, bbox_preds, dir_cls_preds) = anchor3d_head.forward(feats) + # fake input_metas + input_metas = [{ + 'sample_idx': 1234, + 'box_type_3d': LiDARInstance3DBoxes, + 'box_mode_3d': Box3DMode.LIDAR + }, { + 'sample_idx': 2345, + 'box_type_3d': LiDARInstance3DBoxes, + 'box_mode_3d': Box3DMode.LIDAR + }] + # test get_boxes + cls_scores[0] -= 1.5 # too many positive samples may cause cuda oom + results = anchor3d_head.predict_by_feat(cls_scores, bbox_preds, + dir_cls_preds, input_metas) + pred_instances = results[0] + scores_3d = pred_instances.scores_3d + + assert (scores_3d > 0.3).all() diff --git a/tests/test_models/test_dense_heads/test_fcaf3d_head.py b/tests/test_models/test_dense_heads/test_fcaf3d_head.py new file mode 100755 index 0000000..eec8317 --- /dev/null +++ b/tests/test_models/test_dense_heads/test_fcaf3d_head.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch + +from mmdet3d import * # noqa +from mmdet3d.models.dense_heads import FCAF3DHead +from mmdet3d.testing import create_detector_inputs + + +class TestFCAF3DHead(TestCase): + + def test_fcaf3d_head_loss(self): + """Test fcaf3d head loss when truth is empty and non-empty.""" + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + + try: + import MinkowskiEngine as ME + except ImportError: + pytest.skip('test requires MinkowskiEngine installation') + + # build head + fcaf3d_head = FCAF3DHead( + in_channels=(64, 128, 256, 512), + out_channels=128, + voxel_size=1., + pts_prune_threshold=1000, + pts_assign_threshold=27, + pts_center_threshold=18, + num_classes=18, + num_reg_outs=6, + test_cfg=dict(nms_pre=1000, iou_thr=.5, score_thr=.01), + center_loss=dict(type='mmdet.CrossEntropyLoss', use_sigmoid=True), + bbox_loss=dict(type='AxisAlignedIoULoss'), + cls_loss=dict(type='mmdet.FocalLoss'), + ) + fcaf3d_head = fcaf3d_head.cuda() + + # fake input of head + coordinates, features = [torch.randn(500, 3).cuda() * 100 + ], [torch.randn(500, 3).cuda()] + tensor_coordinates, tensor_features = ME.utils.sparse_collate( + coordinates, features) + x = ME.SparseTensor( + features=tensor_features, coordinates=tensor_coordinates) + # backbone + conv1 = ME.MinkowskiConvolution( + 3, 64, kernel_size=3, stride=2, dimension=3).cuda() + conv2 = ME.MinkowskiConvolution( + 64, 128, kernel_size=3, stride=2, dimension=3).cuda() + conv3 = ME.MinkowskiConvolution( + 128, 256, kernel_size=3, stride=2, dimension=3).cuda() + conv4 = ME.MinkowskiConvolution( + 256, 512, kernel_size=3, stride=2, dimension=3).cuda() + + # backbone outputs of 4 levels + x1 = conv1(x) + x2 = conv2(x1) + x3 = conv3(x2) + x4 = conv4(x3) + x = (x1, x2, x3, x4) + + # fake annotation + packed_inputs = create_detector_inputs( + with_points=False, + with_img=False, + num_gt_instance=3, + num_classes=1, + points_feat_dim=6, + gt_bboxes_dim=6) + data_samples = [ + sample.cuda() for sample in packed_inputs['data_samples'] + ] + + gt_losses = fcaf3d_head.loss(x, data_samples) + print(gt_losses) + self.assertGreaterEqual(gt_losses['cls_loss'], 0, + 'cls loss should be non-zero') + self.assertGreaterEqual(gt_losses['bbox_loss'], 0, + 'box loss should be non-zero') + self.assertGreaterEqual(gt_losses['center_loss'], 0, + 'dir loss should be none-zero') diff --git a/tests/test_models/test_dense_heads/test_fcos_mono3d_head.py b/tests/test_models/test_dense_heads/test_fcos_mono3d_head.py new file mode 100755 index 0000000..47ad88c --- /dev/null +++ b/tests/test_models/test_dense_heads/test_fcos_mono3d_head.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import mmengine +import numpy as np +import torch +from mmengine.structures import InstanceData + +from mmdet3d.models.dense_heads import FCOSMono3DHead +from mmdet3d.structures import CameraInstance3DBoxes + + +class TestFCOSMono3DHead(TestCase): + + def test_fcos_mono3d_head_loss(self): + """Tests FCOS3D head loss and inference.""" + + img_metas = [ + dict( + cam2img=[[1260.8474446004698, 0.0, 807.968244525554], + [0.0, 1260.8474446004698, 495.3344268742088], + [0.0, 0.0, 1.0]], + scale_factor=np.array([1., 1., 1., 1.], dtype=np.float32), + box_type_3d=CameraInstance3DBoxes) + ] + + train_cfg = dict( + allowed_border=0, + code_weight=[1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05], + pos_weight=-1, + debug=False) + + test_cfg = dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=1000, + nms_thr=0.8, + score_thr=0.05, + min_bbox_size=0, + max_per_img=200) + + train_cfg = mmengine.Config(train_cfg) + test_cfg = mmengine.Config(test_cfg) + + fcos_mono3d_head = FCOSMono3DHead( + num_classes=10, + in_channels=32, + stacked_convs=2, + feat_channels=32, + use_direction_classifier=True, + diff_rad_by_sin=True, + pred_attrs=True, + pred_velo=True, + dir_offset=0.7854, # pi/4 + dir_limit_offset=0, + strides=[8, 16, 32, 64, 128], + group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo + cls_branch=(32, ), + reg_branch=( + (32, ), # offset + (32, ), # depth + (32, ), # size + (32, ), # rot + () # velo + ), + dir_branch=(32, ), + attr_branch=(32, ), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_attr=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_centerness=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + bbox_coder=dict(type='FCOS3DBBoxCoder', code_size=9), + norm_on_bbox=True, + centerness_on_reg=True, + center_sampling=True, + conv_bias=True, + dcn_on_last_conv=False, + train_cfg=train_cfg, + test_cfg=test_cfg) + + # FCOS3D head expects a multiple levels of features per image + feats = [ + torch.rand([1, 32, 116, 200], dtype=torch.float32), + torch.rand([1, 32, 58, 100], dtype=torch.float32), + torch.rand([1, 32, 29, 50], dtype=torch.float32), + torch.rand([1, 32, 15, 25], dtype=torch.float32), + torch.rand([1, 32, 8, 13], dtype=torch.float32) + ] + + # Test forward + ret_dict = fcos_mono3d_head.forward(feats) + + self.assertEqual( + len(ret_dict), 5, 'the length of forward feature should be 5') + self.assertEqual( + len(ret_dict[0]), 5, 'each feature should have 5 levels') + self.assertEqual( + ret_dict[0][0].shape, torch.Size([1, 10, 116, 200]), + 'the fist level feature shape should be [1, 10, 116, 200]') + + # When truth is non-empty then all losses + # should be nonzero for random inputs + gt_instances_3d = InstanceData() + gt_instances = InstanceData() + + gt_bboxes = torch.rand([3, 4], dtype=torch.float32) + gt_bboxes_3d = CameraInstance3DBoxes(torch.rand([3, 9]), box_dim=9) + gt_labels = torch.randint(0, 10, [3]) + gt_labels_3d = gt_labels + centers_2d = torch.rand([3, 2], dtype=torch.float32) + depths = torch.rand([3], dtype=torch.float32) + + attr_labels = torch.randint(0, 9, [3]) + + gt_instances_3d.bboxes_3d = gt_bboxes_3d + gt_instances_3d.labels_3d = gt_labels_3d + gt_instances.bboxes = gt_bboxes + gt_instances.labels = gt_labels + gt_instances_3d.centers_2d = centers_2d + gt_instances_3d.depths = depths + gt_instances_3d.attr_labels = attr_labels + + gt_losses = fcos_mono3d_head.loss_by_feat(*ret_dict, [gt_instances_3d], + [gt_instances], img_metas) + + gt_cls_loss = gt_losses['loss_cls'].item() + gt_siz_loss = gt_losses['loss_size'].item() + gt_ctr_loss = gt_losses['loss_centerness'].item() + gt_off_loss = gt_losses['loss_offset'].item() + gt_dep_loss = gt_losses['loss_depth'].item() + gt_rot_loss = gt_losses['loss_rotsin'].item() + gt_vel_loss = gt_losses['loss_velo'].item() + gt_dir_loss = gt_losses['loss_dir'].item() + gt_atr_loss = gt_losses['loss_attr'].item() + + self.assertGreater(gt_cls_loss, 0, 'cls loss should be positive') + self.assertGreater(gt_siz_loss, 0, 'size loss should be positive') + self.assertGreater(gt_ctr_loss, 0, + 'centerness loss should be positive') + self.assertGreater(gt_off_loss, 0, 'offset loss should be positive') + self.assertGreater(gt_dep_loss, 0, 'depth loss should be positive') + self.assertGreater(gt_rot_loss, 0, 'rotsin loss should be positive') + self.assertGreater(gt_vel_loss, 0, 'velocity loss should be positive') + self.assertGreater(gt_dir_loss, 0, 'direction loss should be positive') + self.assertGreater(gt_atr_loss, 0, 'attribue loss should be positive') + + # test get_results + results_list_3d, results_list_2d = fcos_mono3d_head.predict_by_feat( + *ret_dict, img_metas) + self.assertEqual(len(results_list_3d), 1, 'batch size should be 1') + self.assertEqual(results_list_2d, None, + 'there is no 2d result in fcos3d') + results = results_list_3d[0] + pred_bboxes_3d = results.bboxes_3d + pred_scores_3d = results.scores_3d + pred_labels_3d = results.labels_3d + pred_attr_labels = results.attr_labels + self.assertEqual( + pred_bboxes_3d.tensor.shape, torch.Size([200, 9]), + 'the shape of predicted 3d bboxes should be [200, 9]') + self.assertEqual( + pred_scores_3d.shape, torch.Size([200]), + 'the shape of predicted 3d bbox scores should be [200]') + self.assertEqual( + pred_labels_3d.shape, torch.Size([200]), + 'the shape of predicted 3d bbox labels should be [200]') + self.assertEqual( + pred_attr_labels.shape, torch.Size([200]), + 'the shape of predicted 3d bbox attribute labels should be [200]') diff --git a/tests/test_models/test_dense_heads/test_freeanchors.py b/tests/test_models/test_dense_heads/test_freeanchors.py new file mode 100755 index 0000000..3d95434 --- /dev/null +++ b/tests/test_models/test_dense_heads/test_freeanchors.py @@ -0,0 +1,80 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestFreeAnchor(unittest.TestCase): + + def test_freeanchor(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models.dense_heads, 'FreeAnchor3DHead') + DefaultScope.get_instance('test_freeanchor', scope_name='mmdet3d') + setup_seed(0) + freeanchor_cfg = get_detector_cfg( + 'free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor' + '_sbn-all_8xb4-2x_nus-3d.py') + # decrease channels to reduce cuda memory. + freeanchor_cfg.pts_voxel_encoder.feat_channels = [1, 1] + freeanchor_cfg.pts_middle_encoder.in_channels = 1 + freeanchor_cfg.pts_backbone.base_channels = 1 + freeanchor_cfg.pts_backbone.stem_channels = 1 + freeanchor_cfg.pts_neck.out_channels = 1 + freeanchor_cfg.pts_bbox_head.feat_channels = 1 + freeanchor_cfg.pts_bbox_head.in_channels = 1 + model = MODELS.build(freeanchor_cfg) + num_gt_instance = 3 + packed_inputs = create_detector_inputs( + num_gt_instance=num_gt_instance, gt_bboxes_dim=9) + + # TODO: Support aug_test + # aug_data = [ + # create_detector_inputs( + # num_gt_instance=num_gt_instance, gt_bboxes_dim=9), + # create_detector_inputs( + # num_gt_instance=num_gt_instance + 1, gt_bboxes_dim=9) + # ] + # # test_aug_test + # metainfo = { + # 'pcd_scale_factor': 1, + # 'pcd_horizontal_flip': 1, + # 'pcd_vertical_flip': 1, + # 'box_type_3d': LiDARInstance3DBoxes + # } + # for item in aug_data: + # item['data_sample'].set_metainfo(metainfo) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # TODO: Support aug_test + # batch_inputs, data_samples = model.data_preprocessor( + # aug_data, True) + # aug_results = model.forward( + # batch_inputs, data_samples, mode='predict') + # self.assertEqual(len(results), len(data)) + # self.assertIn('bboxes_3d', aug_results[0].pred_instances_3d) + # self.assertIn('scores_3d', aug_results[0].pred_instances_3d) + # self.assertIn('labels_3d', aug_results[0].pred_instances_3d) + # self.assertIn('bboxes_3d', aug_results[1].pred_instances_3d) + # self.assertIn('scores_3d', aug_results[1].pred_instances_3d) + # self.assertIn('labels_3d', aug_results[1].pred_instances_3d) + + losses = model.forward(**data, mode='loss') + + self.assertGreaterEqual(losses['positive_bag_loss'], 0) + self.assertGreaterEqual(losses['negative_bag_loss'], 0) diff --git a/tests/test_models/test_dense_heads/test_imvoxel_head.py b/tests/test_models/test_dense_heads/test_imvoxel_head.py new file mode 100755 index 0000000..0d55a78 --- /dev/null +++ b/tests/test_models/test_dense_heads/test_imvoxel_head.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch + +from mmdet3d import * # noqa +from mmdet3d.models.dense_heads import ImVoxelHead +from mmdet3d.testing import create_detector_inputs + + +class TestImVoxelHead(TestCase): + + def test_imvoxel_head_loss(self): + """Test imvoxel head loss when truth is empty and non-empty.""" + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + + # build head + prior_generator = dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-3.2, -0.2, -2.28, 3.2, 6.2, 0.28]], + rotations=[.0]) + imvoxel_head = ImVoxelHead( + n_classes=1, + n_levels=1, + n_channels=32, + n_reg_outs=7, + pts_assign_threshold=27, + pts_center_threshold=18, + prior_generator=prior_generator, + center_loss=dict(type='mmdet.CrossEntropyLoss', use_sigmoid=True), + bbox_loss=dict(type='RotatedIoU3DLoss'), + cls_loss=dict(type='mmdet.FocalLoss'), + ) + imvoxel_head = imvoxel_head.cuda() + + # fake input of head + # (x, valid_preds) + x = [ + torch.randn(1, 32, 10, 10, 4).cuda(), + torch.ones(1, 1, 10, 10, 4).cuda() + ] + + # fake annotation + num_gt_instance = 1 + packed_inputs = create_detector_inputs( + with_points=False, + with_img=True, + img_size=(128, 128), + num_gt_instance=num_gt_instance, + with_pts_semantic_mask=False, + with_pts_instance_mask=False) + data_samples = [ + sample.cuda() for sample in packed_inputs['data_samples'] + ] + + losses = imvoxel_head.loss(x, data_samples) + print(losses) + self.assertGreaterEqual(losses['center_loss'], 0) + self.assertGreaterEqual(losses['bbox_loss'], 0) + self.assertGreaterEqual(losses['cls_loss'], 0) diff --git a/tests/test_models/test_dense_heads/test_monoflex_head.py b/tests/test_models/test_dense_heads/test_monoflex_head.py new file mode 100755 index 0000000..e146cc7 --- /dev/null +++ b/tests/test_models/test_dense_heads/test_monoflex_head.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch + +from mmdet3d.models.dense_heads import MonoFlexHead + + +class TestMonoFlexHead(TestCase): + + def test_monoflex_head_loss(self): + """Tests MonoFlex head loss and inference.""" + + input_metas = [dict(img_shape=(110, 110), pad_shape=(128, 128))] + + monoflex_head = MonoFlexHead( + num_classes=3, + in_channels=64, + use_edge_fusion=True, + edge_fusion_inds=[(1, 0)], + edge_heatmap_ratio=1 / 8, + stacked_convs=0, + feat_channels=64, + use_direction_classifier=False, + diff_rad_by_sin=False, + pred_attrs=False, + pred_velo=False, + dir_offset=0, + strides=None, + group_reg_dims=((4, ), (2, ), (20, ), (3, ), (3, ), (8, 8), (1, ), + (1, )), + cls_branch=(256, ), + reg_branch=((256, ), (256, ), (256, ), (256, ), (256, ), (256, ), + (256, ), (256, )), + num_attrs=0, + bbox_code_size=7, + dir_branch=(), + attr_branch=(), + bbox_coder=dict( + type='MonoFlexCoder', + depth_mode='exp', + base_depth=(26.494627, 16.05988), + depth_range=[0.1, 100], + combine_depth=True, + uncertainty_range=[-10, 10], + base_dims=((3.8840, 1.5261, 1.6286, 0.4259, 0.1367, 0.1022), + (0.8423, 1.7607, 0.6602, 0.2349, 0.1133, 0.1427), + (1.7635, 1.7372, 0.5968, 0.1766, 0.0948, 0.1242)), + dims_mode='linear', + multibin=True, + num_dir_bins=4, + bin_centers=[0, np.pi / 2, np.pi, -np.pi / 2], + bin_margin=np.pi / 6, + code_size=7), + conv_bias=True, + dcn_on_last_conv=False) + + # Monoflex head expects a single level of features per image + feats = [torch.rand([1, 64, 32, 32], dtype=torch.float32)] + + # Test forward + cls_score, out_reg = monoflex_head.forward(feats, input_metas) + + self.assertEqual(cls_score[0].shape, torch.Size([1, 3, 32, 32]), + 'the shape of cls_score should be [1, 3, 32, 32]') + self.assertEqual(out_reg[0].shape, torch.Size([1, 50, 32, 32]), + 'the shape of out_reg should be [1, 50, 32, 32]') diff --git a/tests/test_models/test_dense_heads/test_pgd_head.py b/tests/test_models/test_dense_heads/test_pgd_head.py new file mode 100755 index 0000000..0c33535 --- /dev/null +++ b/tests/test_models/test_dense_heads/test_pgd_head.py @@ -0,0 +1,210 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import mmengine +import numpy as np +import torch +from mmengine.structures import InstanceData + +from mmdet3d.models.dense_heads import PGDHead +from mmdet3d.structures import CameraInstance3DBoxes + + +class TestFGDHead(TestCase): + + def test_pgd_head_loss(self): + """Tests PGD head loss and inference.""" + + img_metas = [ + dict( + img_shape=[384, 1248], + cam2img=[[721.5377, 0.0, 609.5593, 44.85728], + [0.0, 721.5377, 172.854, 0.2163791], + [0.0, 0.0, 1.0, 0.002745884], [0.0, 0.0, 0.0, 1.0]], + scale_factor=np.array([1., 1., 1., 1.], dtype=np.float32), + box_type_3d=CameraInstance3DBoxes) + ] + + train_cfg = dict(code_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, + 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 1.0, 1.0, 1.0, + 1.0 + ]) + + test_cfg = dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=100, + nms_thr=0.05, + score_thr=0.001, + min_bbox_size=0, + max_per_img=20) + + train_cfg = mmengine.Config(train_cfg) + test_cfg = mmengine.Config(test_cfg) + + pgd_head = PGDHead( + num_classes=3, + in_channels=256, + stacked_convs=2, + feat_channels=256, + use_direction_classifier=True, + bbox_code_size=7, + diff_rad_by_sin=True, + pred_attrs=False, + pred_velo=False, + pred_bbox2d=True, + pred_keypoints=True, + use_onlyreg_proj=True, + dir_offset=0.7854, # pi/4 + dir_limit_offset=0, + strides=(4, 8, 16, 32), + regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 1e8)), + group_reg_dims=(2, 1, 3, 1, 16, + 4), # offset, depth, size, rot, kpts, bbox2d + cls_branch=(256, ), + reg_branch=( + (256, ), # offset + (256, ), # depth + (256, ), # size + (256, ), # rot + (256, ), # kpts + (256, ) # bbox2d + ), + dir_branch=(256, ), + attr_branch=(256, ), + centerness_branch=(256, ), + loss_cls=dict( + type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict( + type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_attr=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_centerness=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + norm_on_bbox=True, + centerness_on_reg=True, + center_sampling=True, + conv_bias=True, + dcn_on_last_conv=False, + use_depth_classifier=True, + depth_branch=(256, ), + depth_range=(0, 70), + depth_unit=10, + division='uniform', + depth_bins=8, + weight_dim=1, + loss_depth=dict( + type='UncertainSmoothL1Loss', + alpha=1.0, + beta=3.0, + loss_weight=1.0), + bbox_coder=dict( + type='PGDBBoxCoder', + base_depths=((28.01, 16.32), ), + base_dims=((0.8, 1.73, 0.6), (1.76, 1.73, 0.6), (3.9, 1.56, + 1.6)), + code_size=7), + train_cfg=train_cfg, + test_cfg=test_cfg) + + # PGD head expects a multiple levels of features per image + feats = [ + torch.rand([1, 256, 96, 312], dtype=torch.float32), + torch.rand([1, 256, 48, 156], dtype=torch.float32), + torch.rand([1, 256, 24, 78], dtype=torch.float32), + torch.rand([1, 256, 12, 39], dtype=torch.float32), + ] + + # Test forward + ret_dict = pgd_head.forward(feats) + + self.assertEqual( + len(ret_dict), 7, 'the length of forward feature should be 7') + self.assertEqual( + len(ret_dict[0]), 4, 'each feature should have 4 levels') + self.assertEqual( + ret_dict[0][0].shape, torch.Size([1, 3, 96, 312]), + 'the fist level feature shape should be [1, 3, 96, 312]') + + # When truth is non-empty then all losses + # should be nonzero for random inputs + gt_instances_3d = InstanceData() + gt_instances = InstanceData() + + gt_bboxes = torch.rand([3, 4], dtype=torch.float32) + gt_bboxes_3d = CameraInstance3DBoxes(torch.rand([3, 7]), box_dim=7) + gt_labels = torch.randint(0, 3, [3]) + gt_labels_3d = gt_labels + centers_2d = torch.rand([3, 2], dtype=torch.float32) + depths = torch.rand([3], dtype=torch.float32) + + gt_instances_3d.bboxes_3d = gt_bboxes_3d + gt_instances_3d.labels_3d = gt_labels_3d + gt_instances.bboxes = gt_bboxes + gt_instances.labels = gt_labels + gt_instances_3d.centers_2d = centers_2d + gt_instances_3d.depths = depths + + gt_losses = pgd_head.loss_by_feat(*ret_dict, [gt_instances_3d], + [gt_instances], img_metas) + + gt_cls_loss = gt_losses['loss_cls'].item() + gt_siz_loss = gt_losses['loss_size'].item() + gt_ctr_loss = gt_losses['loss_centerness'].item() + gt_off_loss = gt_losses['loss_offset'].item() + gt_dep_loss = gt_losses['loss_depth'].item() + gt_rot_loss = gt_losses['loss_rotsin'].item() + gt_kpt_loss = gt_losses['loss_kpts'].item() + gt_dir_loss = gt_losses['loss_dir'].item() + gt_box_loss = gt_losses['loss_bbox2d'].item() + gt_cos_loss = gt_losses['loss_consistency'].item() + + self.assertGreater(gt_cls_loss, 0, 'cls loss should be positive') + self.assertGreater(gt_siz_loss, 0, 'size loss should be positive') + self.assertGreater(gt_ctr_loss, 0, + 'centerness loss should be positive') + self.assertGreater(gt_off_loss, 0, 'offset loss should be positive') + self.assertGreater(gt_dep_loss, 0, 'depth loss should be positive') + self.assertGreater(gt_rot_loss, 0, 'rotsin loss should be positive') + self.assertGreater(gt_kpt_loss, 0, 'keypoints loss should be positive') + self.assertGreater(gt_dir_loss, 0, 'direction loss should be positive') + self.assertGreater(gt_box_loss, 0, '2d bbox loss should be positive') + self.assertGreater(gt_cos_loss, 0, + 'consistency loss should be positive') + + # test get_results + results_list_3d, results_list_2d = pgd_head.predict_by_feat( + *ret_dict, img_metas) + self.assertEqual(len(results_list_3d), 1, 'batch size should be 1') + self.assertEqual(len(results_list_2d), 1, 'batch size should be 1') + results = results_list_3d[0] + results_2d = results_list_2d[0] + pred_bboxes_3d = results.bboxes_3d + pred_scores_3d = results.scores_3d + pred_labels_3d = results.labels_3d + pred_bboxes_2d = results_2d.bboxes + self.assertEqual(pred_bboxes_3d.tensor.shape, torch.Size([20, 7]), + 'the shape of predicted 3d bboxes should be [20, 7]') + self.assertEqual( + pred_scores_3d.shape, torch.Size([20]), + 'the shape of predicted 3d bbox scores should be [20]') + self.assertEqual( + pred_labels_3d.shape, torch.Size([20]), + 'the shape of predicted 3d bbox labels should be [20]') + self.assertEqual( + pred_bboxes_2d.shape, torch.Size([20, 4]), + 'the shape of predicted 2d bbox attribute labels should be [20, 4]' + ) diff --git a/tests/test_models/test_dense_heads/test_smoke_mono3d_head.py b/tests/test_models/test_dense_heads/test_smoke_mono3d_head.py new file mode 100755 index 0000000..70c0a85 --- /dev/null +++ b/tests/test_models/test_dense_heads/test_smoke_mono3d_head.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch +from mmengine.structures import InstanceData + +from mmdet3d.models.dense_heads import SMOKEMono3DHead +from mmdet3d.structures import CameraInstance3DBoxes + + +class TestSMOKEMono3DHead(TestCase): + + def test_smoke_mono3d_head_loss(self): + """Tests SMOKE head loss and inference.""" + + img_metas = [ + dict( + cam2img=[[1260.8474446004698, 0.0, 807.968244525554, 40.1111], + [0.0, 1260.8474446004698, 495.3344268742088, 2.34422], + [0.0, 0.0, 1.0, 0.00333333], [0.0, 0.0, 0.0, 1.0]], + scale_factor=np.array([1., 1., 1., 1.], dtype=np.float32), + pad_shape=[128, 128], + trans_mat=np.array( + [[0.25, 0., 0.], [0., 0.25, 0], [0., 0., 1.]], + dtype=np.float32), + affine_aug=False, + box_type_3d=CameraInstance3DBoxes) + ] + + smoke_mono3d_head = SMOKEMono3DHead( + num_classes=3, + in_channels=64, + dim_channel=[3, 4, 5], + ori_channel=[6, 7], + stacked_convs=0, + feat_channels=64, + use_direction_classifier=False, + diff_rad_by_sin=False, + pred_attrs=False, + pred_velo=False, + dir_offset=0, + strides=None, + group_reg_dims=(8, ), + cls_branch=(256, ), + reg_branch=((256, ), ), + num_attrs=0, + bbox_code_size=7, + dir_branch=(), + attr_branch=(), + bbox_coder=dict( + type='SMOKECoder', + base_depth=(28.01, 16.32), + base_dims=((0.88, 1.73, 0.67), (1.78, 1.70, 0.58), (3.88, 1.63, + 1.53)), + code_size=7), + loss_cls=dict(type='mmdet.GaussianFocalLoss', loss_weight=1.0), + loss_bbox=dict( + type='mmdet.L1Loss', reduction='sum', loss_weight=1 / 300), + loss_dir=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_attr=None, + conv_bias=True, + dcn_on_last_conv=False) + + # SMOKE head expects a single level of features per image + feats = [torch.rand([1, 64, 32, 32], dtype=torch.float32)] + + # Test forward + ret_dict = smoke_mono3d_head.forward(feats) + + self.assertEqual( + len(ret_dict), 2, 'the length of forward feature should be 2') + self.assertEqual( + len(ret_dict[0]), 1, 'each feature should have 1 level') + self.assertEqual( + ret_dict[0][0].shape, torch.Size([1, 3, 32, 32]), + 'the fist level feature shape should be [1, 3, 32, 32]') + + # When truth is non-empty then all losses + # should be nonzero for random inputs + gt_instances_3d = InstanceData() + gt_instances = InstanceData() + + gt_bboxes = torch.Tensor([[1.0, 2.0, 20.0, 40.0], + [45.0, 50.0, 80.0, 70.1], + [34.0, 39.0, 65.0, 64.0]]) + gt_bboxes_3d = CameraInstance3DBoxes(torch.rand([3, 7]), box_dim=7) + gt_labels = torch.randint(0, 3, [3]) + gt_labels_3d = gt_labels + centers_2d = torch.randint(0, 60, (3, 2)) + depths = torch.rand([3], dtype=torch.float32) + + gt_instances_3d.bboxes_3d = gt_bboxes_3d + gt_instances_3d.labels_3d = gt_labels_3d + gt_instances.bboxes = gt_bboxes + gt_instances.labels = gt_labels + gt_instances_3d.centers_2d = centers_2d + gt_instances_3d.depths = depths + + gt_losses = smoke_mono3d_head.loss_by_feat(*ret_dict, + [gt_instances_3d], + [gt_instances], img_metas) + + gt_cls_loss = gt_losses['loss_cls'].item() + gt_box_loss = gt_losses['loss_bbox'].item() + + self.assertGreater(gt_cls_loss, 0, 'cls loss should be positive') + self.assertGreater(gt_box_loss, 0, 'bbox loss should be positive') + + # test get_results + results_list = smoke_mono3d_head.predict_by_feat(*ret_dict, img_metas) + self.assertEqual( + len(results_list), 1, 'there should be one image results') + results = results_list[0] + pred_bboxes_3d = results.bboxes_3d + pred_scores_3d = results.scores_3d + pred_labels_3d = results.labels_3d + + self.assertEqual( + pred_bboxes_3d.tensor.shape, torch.Size([100, 7]), + 'the shape of predicted 3d bboxes should be [100, 7]') + self.assertEqual( + pred_scores_3d.shape, torch.Size([100]), + 'the shape of predicted 3d bbox scores should be [100]') + self.assertEqual( + pred_labels_3d.shape, torch.Size([100]), + 'the shape of predicted 3d bbox labels should be [100]') diff --git a/tests/test_models/test_dense_heads/test_ssn.py b/tests/test_models/test_dense_heads/test_ssn.py new file mode 100755 index 0000000..80a440d --- /dev/null +++ b/tests/test_models/test_dense_heads/test_ssn.py @@ -0,0 +1,79 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestSSN(unittest.TestCase): + + def test_ssn(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models.dense_heads, 'ShapeAwareHead') + DefaultScope.get_instance('test_ssn', scope_name='mmdet3d') + setup_seed(0) + ssn_cfg = get_detector_cfg( + 'ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py') + ssn_cfg.pts_voxel_encoder.feat_channels = [1, 1] + ssn_cfg.pts_middle_encoder.in_channels = 1 + ssn_cfg.pts_backbone.in_channels = 1 + ssn_cfg.pts_backbone.out_channels = [1, 1, 1] + ssn_cfg.pts_neck.in_channels = [1, 1, 1] + ssn_cfg.pts_neck.out_channels = [1, 1, 1] + ssn_cfg.pts_bbox_head.in_channels = 3 + ssn_cfg.pts_bbox_head.feat_channels = 1 + model = MODELS.build(ssn_cfg) + num_gt_instance = 50 + packed_inputs = create_detector_inputs( + num_gt_instance=num_gt_instance, gt_bboxes_dim=9) + + # TODO: Support aug_test + # aug_data = [ + # create_detector_inputs( + # num_gt_instance=num_gt_instance, gt_bboxes_dim=9), + # create_detector_inputs( + # num_gt_instance=num_gt_instance + 1, gt_bboxes_dim=9) + # ] + # test_aug_test + # metainfo = { + # 'pcd_scale_factor': 1, + # 'pcd_horizontal_flip': 1, + # 'pcd_vertical_flip': 1, + # 'box_type_3d': LiDARInstance3DBoxes + # } + # for item in aug_data: + # item['data_sample'].set_metainfo(metainfo) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # TODO: Support aug_test + # batch_inputs, data_samples = model.data_preprocessor( + # aug_data, True) + # aug_results = model.forward( + # batch_inputs, data_samples, mode='predict') + # self.assertEqual(len(results), len(data)) + # self.assertIn('bboxes_3d', aug_results[0].pred_instances_3d) + # self.assertIn('scores_3d', aug_results[0].pred_instances_3d) + # self.assertIn('labels_3d', aug_results[0].pred_instances_3d) + # self.assertIn('bboxes_3d', aug_results[1].pred_instances_3d) + # self.assertIn('scores_3d', aug_results[1].pred_instances_3d) + # self.assertIn('labels_3d', aug_results[1].pred_instances_3d) + + losses = model.forward(**data, mode='loss') + + self.assertGreaterEqual(losses['loss_cls'][0], 0) + self.assertGreaterEqual(losses['loss_bbox'][0], 0) + self.assertGreaterEqual(losses['loss_dir'][0], 0) diff --git a/tests/test_models/test_detectors/test_3dssd.py b/tests/test_models/test_detectors/test_3dssd.py new file mode 100755 index 0000000..627994d --- /dev/null +++ b/tests/test_models/test_detectors/test_3dssd.py @@ -0,0 +1,39 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class Test3DSSD(unittest.TestCase): + + def test_3dssd(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'SSD3DNet') + DefaultScope.get_instance('test_ssd3d', scope_name='mmdet3d') + setup_seed(0) + voxel_net_cfg = get_detector_cfg('3dssd/3dssd_4xb4_kitti-3d-car.py') + model = MODELS.build(voxel_net_cfg) + num_gt_instance = 3 + packed_inputs = create_detector_inputs( + num_gt_instance=num_gt_instance, num_classes=1) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + losses = model.forward(**data, mode='loss') + + self.assertGreater(losses['centerness_loss'], 0) diff --git a/tests/test_models/test_detectors/test_center_point.py b/tests/test_models/test_detectors/test_center_point.py new file mode 100755 index 0000000..cab2b79 --- /dev/null +++ b/tests/test_models/test_detectors/test_center_point.py @@ -0,0 +1,63 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestCenterPoint(unittest.TestCase): + + def test_center_point(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'CenterPoint') + + setup_seed(0) + DefaultScope.get_instance('test_center_point', scope_name='mmdet3d') + centerpoint_net_cfg = get_detector_cfg( + 'centerpoint/centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py' # noqa + ) + model = MODELS.build(centerpoint_net_cfg) + num_gt_instance = 50 + packed_inputs = create_detector_inputs( + with_img=True, num_gt_instance=num_gt_instance, points_feat_dim=5) + + for sample_id in range(len(packed_inputs['data_samples'])): + det_sample = packed_inputs['data_samples'][sample_id] + num_instances = len(det_sample.gt_instances_3d.bboxes_3d) + bbox_3d_class = det_sample.gt_instances_3d.bboxes_3d.__class__ + det_sample.gt_instances_3d.bboxes_3d = bbox_3d_class( + torch.rand(num_instances, 9), box_dim=9) + + if torch.cuda.is_available(): + + model = model.cuda() + # test simple_test + + data = model.data_preprocessor(packed_inputs, True) + with torch.no_grad(): + torch.cuda.empty_cache() + losses = model.forward(**data, mode='loss') + assert losses['task0.loss_heatmap'] >= 0 + assert losses['task0.loss_bbox'] >= 0 + assert losses['task1.loss_heatmap'] >= 0 + assert losses['task1.loss_bbox'] >= 0 + assert losses['task2.loss_heatmap'] >= 0 + assert losses['task2.loss_bbox'] >= 0 + assert losses['task3.loss_heatmap'] >= 0 + assert losses['task3.loss_bbox'] >= 0 + assert losses['task3.loss_bbox'] >= 0 + assert losses['task4.loss_bbox'] >= 0 + assert losses['task5.loss_heatmap'] >= 0 + assert losses['task5.loss_bbox'] >= 0 + + with torch.no_grad(): + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + # TODO test_aug_test diff --git a/tests/test_models/test_detectors/test_fcaf3d.py b/tests/test_models/test_detectors/test_fcaf3d.py new file mode 100755 index 0000000..ce98515 --- /dev/null +++ b/tests/test_models/test_detectors/test_fcaf3d.py @@ -0,0 +1,48 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestFCAF3d(unittest.TestCase): + + def test_fcaf3d(self): + try: + import MinkowskiEngine # noqa: F401 + except ImportError: + return + + import mmdet3d.models + assert hasattr(mmdet3d.models, 'MinkSingleStage3DDetector') + DefaultScope.get_instance('test_fcaf3d', scope_name='mmdet3d') + setup_seed(0) + fcaf3d_net_cfg = get_detector_cfg( + 'fcaf3d/fcaf3d_2xb8_scannet-3d-18class.py') + model = MODELS.build(fcaf3d_net_cfg) + num_gt_instance = 3 + packed_inputs = create_detector_inputs( + num_gt_instance=num_gt_instance, + num_classes=1, + points_feat_dim=6, + gt_bboxes_dim=6) + + if torch.cuda.is_available(): + model = model.cuda() + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, False) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + losses = model.forward(**data, mode='loss') + + self.assertGreater(losses['center_loss'], 0) + self.assertGreater(losses['bbox_loss'], 0) + self.assertGreater(losses['cls_loss'], 0) diff --git a/tests/test_models/test_detectors/test_groupfree3d.py b/tests/test_models/test_detectors/test_groupfree3d.py new file mode 100755 index 0000000..784de7a --- /dev/null +++ b/tests/test_models/test_detectors/test_groupfree3d.py @@ -0,0 +1,49 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestGroupfree3d(unittest.TestCase): + + def test_groupfree3d(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'GroupFree3DNet') + DefaultScope.get_instance('test_groupfree3d', scope_name='mmdet3d') + setup_seed(0) + voxel_net_cfg = get_detector_cfg( + 'groupfree3d/groupfree3d_head-L6-O256_4xb8_scannet-seg.py') + model = MODELS.build(voxel_net_cfg) + num_gt_instance = 5 + packed_inputs = create_detector_inputs( + num_gt_instance=num_gt_instance, + points_feat_dim=3, + with_pts_semantic_mask=True, + with_pts_instance_mask=True) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # save the memory + with torch.no_grad(): + losses = model.forward(**data, mode='loss') + + self.assertGreater(losses['sampling_objectness_loss'], 0) + self.assertGreater(losses['proposal.objectness_loss'], 0) + self.assertGreater(losses['s0.objectness_loss'], 0) + self.assertGreater(losses['s1.size_res_loss'], 0) + self.assertGreater(losses['s4.size_class_loss'], 0) diff --git a/tests/test_models/test_detectors/test_h3dnet.py b/tests/test_models/test_detectors/test_h3dnet.py new file mode 100755 index 0000000..09507c3 --- /dev/null +++ b/tests/test_models/test_detectors/test_h3dnet.py @@ -0,0 +1,46 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestH3D(unittest.TestCase): + + def test_h3dnet(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'H3DNet') + DefaultScope.get_instance('test_H3DNet', scope_name='mmdet3d') + setup_seed(0) + voxel_net_cfg = get_detector_cfg('h3dnet/h3dnet_8xb3_scannet-seg.py') + model = MODELS.build(voxel_net_cfg) + num_gt_instance = 5 + packed_inputs = create_detector_inputs( + num_gt_instance=num_gt_instance, + points_feat_dim=4, + bboxes_3d_type='depth', + with_pts_semantic_mask=True, + with_pts_instance_mask=True) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # save the memory + with torch.no_grad(): + losses = model.forward(**data, mode='loss') + + self.assertGreater(losses['vote_loss'], 0) + self.assertGreater(losses['objectness_loss'], 0) + self.assertGreater(losses['center_loss'], 0) diff --git a/tests/test_models/test_detectors/test_imvotenet.py b/tests/test_models/test_detectors/test_imvotenet.py new file mode 100755 index 0000000..ac0fb9a --- /dev/null +++ b/tests/test_models/test_detectors/test_imvotenet.py @@ -0,0 +1,80 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestImvoteNet(unittest.TestCase): + + def test_imvotenet_only_img(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'ImVoteNet') + DefaultScope.get_instance('test_imvotenet_img', scope_name='mmdet3d') + setup_seed(0) + votenet_net_cfg = get_detector_cfg( + 'imvotenet/imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py') + model = MODELS.build(votenet_net_cfg) + + packed_inputs = create_detector_inputs( + with_points=False, with_img=True, img_size=128) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes', results[0].pred_instances) + self.assertIn('scores', results[0].pred_instances) + self.assertIn('labels', results[0].pred_instances) + + # save the memory + with torch.no_grad(): + torch.cuda.empty_cache() + losses = model.forward(**data, mode='loss') + + self.assertGreater(sum(losses['loss_rpn_cls']), 0) + + self.assertGreater(losses['loss_cls'], 0) + self.assertGreater(losses['loss_bbox'], 0) + + def test_imvotenet(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'ImVoteNet') + DefaultScope.get_instance('test_imvotenet', scope_name='mmdet3d') + setup_seed(0) + votenet_net_cfg = get_detector_cfg( + 'imvotenet/imvotenet_stage2_8xb16_sunrgbd-3d.py') + model = MODELS.build(votenet_net_cfg) + + packed_inputs = create_detector_inputs( + with_points=True, + with_img=True, + img_size=128, + bboxes_3d_type='depth') + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # save the memory + with torch.no_grad(): + losses = model.forward(**data, mode='loss') + + self.assertGreater(losses['vote_loss'], 0) + self.assertGreater(losses['objectness_loss'], 0) + self.assertGreater(losses['semantic_loss'], 0) diff --git a/tests/test_models/test_detectors/test_imvoxelnet.py b/tests/test_models/test_detectors/test_imvoxelnet.py new file mode 100755 index 0000000..4158406 --- /dev/null +++ b/tests/test_models/test_detectors/test_imvoxelnet.py @@ -0,0 +1,89 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestImVoxelNet(unittest.TestCase): + + def test_imvoxelnet_kitti(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'ImVoxelNet') + DefaultScope.get_instance( + 'test_imvoxelnet_kitti', scope_name='mmdet3d') + setup_seed(0) + imvoxel_net_cfg = get_detector_cfg( + 'imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py') + model = MODELS.build(imvoxel_net_cfg) + num_gt_instance = 1 + packed_inputs = create_detector_inputs( + with_points=False, + with_img=True, + img_size=(128, 128), + num_gt_instance=num_gt_instance, + with_pts_semantic_mask=False, + with_pts_instance_mask=False) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # save the memory + with torch.no_grad(): + losses = model.forward(**data, mode='loss') + + self.assertGreaterEqual(losses['loss_cls'][0], 0) + self.assertGreaterEqual(losses['loss_bbox'][0], 0) + self.assertGreaterEqual(losses['loss_dir'][0], 0) + + def test_imvoxelnet_sunrgbd(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'ImVoxelNet') + DefaultScope.get_instance( + 'test_imvoxelnet_sunrgbd', scope_name='mmdet3d') + setup_seed(0) + imvoxel_net_cfg = get_detector_cfg( + 'imvoxelnet/imvoxelnet_2xb4_sunrgbd-3d-10class.py') + model = MODELS.build(imvoxel_net_cfg) + num_gt_instance = 1 + packed_inputs = create_detector_inputs( + with_points=False, + with_img=True, + img_size=(128, 128), + num_gt_instance=num_gt_instance, + with_pts_semantic_mask=False, + with_pts_instance_mask=False) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # save the memory + with torch.no_grad(): + losses = model.forward(**data, mode='loss') + + self.assertGreaterEqual(losses['center_loss'], 0) + self.assertGreaterEqual(losses['bbox_loss'], 0) + self.assertGreaterEqual(losses['cls_loss'], 0) diff --git a/tests/test_models/test_detectors/test_mvxnet.py b/tests/test_models/test_detectors/test_mvxnet.py new file mode 100755 index 0000000..b0463d3 --- /dev/null +++ b/tests/test_models/test_detectors/test_mvxnet.py @@ -0,0 +1,47 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestMVXNet(unittest.TestCase): + + def test_mvxnet(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'DynamicMVXFasterRCNN') + + setup_seed(0) + DefaultScope.get_instance('test_mvxnet', scope_name='mmdet3d') + mvx_net_cfg = get_detector_cfg( + 'mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py' # noqa + ) + model = MODELS.build(mvx_net_cfg) + num_gt_instance = 1 + packed_inputs = create_detector_inputs( + with_img=False, num_gt_instance=num_gt_instance, points_feat_dim=4) + + if torch.cuda.is_available(): + + model = model.cuda() + # test simple_test + data = model.data_preprocessor(packed_inputs, True) + # save the memory when do the unitest + with torch.no_grad(): + torch.cuda.empty_cache() + losses = model.forward(**data, mode='loss') + assert losses['loss_cls'][0] >= 0 + assert losses['loss_bbox'][0] >= 0 + assert losses['loss_dir'][0] >= 0 + + with torch.no_grad(): + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + # TODO test_aug_test diff --git a/tests/test_models/test_detectors/test_parta2.py b/tests/test_models/test_detectors/test_parta2.py new file mode 100755 index 0000000..0409e97 --- /dev/null +++ b/tests/test_models/test_detectors/test_parta2.py @@ -0,0 +1,61 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestPartA2(unittest.TestCase): + + def test_parta2(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'PartA2') + DefaultScope.get_instance('test_parta2', scope_name='mmdet3d') + setup_seed(0) + parta2_cfg = get_detector_cfg( + 'parta2/parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py') + model = MODELS.build(parta2_cfg) + num_gt_instance = 2 + packed_inputs = create_detector_inputs(num_gt_instance=num_gt_instance) + + # TODO: Support aug data test + # aug_packed_inputs = [ + # create_detector_inputs(num_gt_instance=num_gt_instance), + # create_detector_inputs(num_gt_instance=num_gt_instance + 1) + # ] + # test_aug_test + # metainfo = { + # 'pcd_scale_factor': 1, + # 'pcd_horizontal_flip': 1, + # 'pcd_vertical_flip': 1, + # 'box_type_3d': LiDARInstance3DBoxes + # } + # for item in aug_packed_inputs: + # for batch_id in len(item['data_samples']): + # item['data_samples'][batch_id].set_metainfo(metainfo) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # save the memory + with torch.no_grad(): + losses = model.forward(**data, mode='loss') + torch.cuda.empty_cache() + self.assertGreater(losses['loss_rpn_cls'][0], 0) + self.assertGreaterEqual(losses['loss_rpn_bbox'][0], 0) + self.assertGreater(losses['loss_seg'], 0) + self.assertGreater(losses['loss_part'], 0) + self.assertGreater(losses['loss_cls'], 0) diff --git a/tests/test_models/test_detectors/test_pointrcnn.py b/tests/test_models/test_detectors/test_pointrcnn.py new file mode 100755 index 0000000..cb03e28 --- /dev/null +++ b/tests/test_models/test_detectors/test_pointrcnn.py @@ -0,0 +1,46 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestPointRCNN(unittest.TestCase): + + def test_pointrcnn(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'PointRCNN') + DefaultScope.get_instance('test_pointrcnn', scope_name='mmdet3d') + setup_seed(0) + pointrcnn_cfg = get_detector_cfg( + 'point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py') + model = MODELS.build(pointrcnn_cfg) + num_gt_instance = 2 + packed_inputs = create_detector_inputs( + num_points=10101, num_gt_instance=num_gt_instance) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # save the memory + with torch.no_grad(): + losses = model.forward(**data, mode='loss') + torch.cuda.empty_cache() + self.assertGreaterEqual(losses['rpn_bbox_loss'], 0) + self.assertGreaterEqual(losses['rpn_semantic_loss'], 0) + self.assertGreaterEqual(losses['loss_cls'], 0) + self.assertGreaterEqual(losses['loss_bbox'], 0) + self.assertGreaterEqual(losses['loss_corner'], 0) diff --git a/tests/test_models/test_detectors/test_pvrcnn.py b/tests/test_models/test_detectors/test_pvrcnn.py new file mode 100755 index 0000000..05d1801 --- /dev/null +++ b/tests/test_models/test_detectors/test_pvrcnn.py @@ -0,0 +1,63 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestPVRCNN(unittest.TestCase): + + def test_pvrcnn(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'PointVoxelRCNN') + DefaultScope.get_instance('test_pvrcnn', scope_name='mmdet3d') + setup_seed(0) + pvrcnn_cfg = get_detector_cfg( + 'pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py') + model = MODELS.build(pvrcnn_cfg) + num_gt_instance = 2 + packed_inputs = create_detector_inputs(num_gt_instance=num_gt_instance) + + # TODO: Support aug data test + # aug_packed_inputs = [ + # create_detector_inputs(num_gt_instance=num_gt_instance), + # create_detector_inputs(num_gt_instance=num_gt_instance + 1) + # ] + # test_aug_test + # metainfo = { + # 'pcd_scale_factor': 1, + # 'pcd_horizontal_flip': 1, + # 'pcd_vertical_flip': 1, + # 'box_type_3d': LiDARInstance3DBoxes + # } + # for item in aug_packed_inputs: + # for batch_id in len(item['data_samples']): + # item['data_samples'][batch_id].set_metainfo(metainfo) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # save the memory + with torch.no_grad(): + losses = model.forward(**data, mode='loss') + torch.cuda.empty_cache() + self.assertGreater(losses['loss_rpn_cls'][0], 0) + self.assertGreaterEqual(losses['loss_rpn_bbox'][0], 0) + self.assertGreaterEqual(losses['loss_rpn_dir'][0], 0) + self.assertGreater(losses['loss_semantic'], 0) + self.assertGreaterEqual(losses['loss_bbox'], 0) + self.assertGreaterEqual(losses['loss_cls'], 0) + self.assertGreaterEqual(losses['loss_corner'], 0) diff --git a/tests/test_models/test_detectors/test_sassd.py b/tests/test_models/test_detectors/test_sassd.py new file mode 100755 index 0000000..c9cc12f --- /dev/null +++ b/tests/test_models/test_detectors/test_sassd.py @@ -0,0 +1,43 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestSDSSD(unittest.TestCase): + + def test_3dssd(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'SASSD') + DefaultScope.get_instance('test_sassd', scope_name='mmdet3d') + setup_seed(0) + voxel_net_cfg = get_detector_cfg( + 'sassd/sassd_8xb6-80e_kitti-3d-3class.py') + model = MODELS.build(voxel_net_cfg) + num_gt_instance = 3 + packed_inputs = create_detector_inputs( + num_gt_instance=num_gt_instance, num_classes=1) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + losses = model.forward(**data, mode='loss') + self.assertGreaterEqual(losses['loss_dir'][0], 0) + self.assertGreaterEqual(losses['loss_bbox'][0], 0) + self.assertGreaterEqual(losses['loss_cls'][0], 0) + self.assertGreater(losses['aux_loss_cls'][0], 0) + self.assertGreater(losses['aux_loss_reg'][0], 0) diff --git a/tests/test_models/test_detectors/test_votenet.py b/tests/test_models/test_detectors/test_votenet.py new file mode 100755 index 0000000..456db1b --- /dev/null +++ b/tests/test_models/test_detectors/test_votenet.py @@ -0,0 +1,72 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestVotenet(unittest.TestCase): + + def test_voxel_net(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'VoteNet') + DefaultScope.get_instance('test_vote_net', scope_name='mmdet3d') + setup_seed(0) + voxel_net_cfg = get_detector_cfg('votenet/votenet_8xb16_sunrgbd-3d.py') + model = MODELS.build(voxel_net_cfg) + num_gt_instance = 50 + packed_inputs = create_detector_inputs(num_gt_instance=num_gt_instance) + + # TODO: Support aug test + # aug_data = [ + # create_detector_inputs(num_gt_instance=num_gt_instance), + # create_detector_inputs(num_gt_instance=num_gt_instance + 1) + # ] + # # test_aug_test + # metainfo = { + # 'pcd_scale_factor': 1, + # 'pcd_horizontal_flip': 1, + # 'pcd_vertical_flip': 1, + # 'box_type_3d': LiDARInstance3DBoxes + # # } + # for item in aug_data: + # item['data_sample'].set_metainfo(metainfo) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # TODO: Support aug_test + # batch_inputs, data_samples = model.data_preprocessor( + # aug_data, True) + # aug_results = model.forward( + # batch_inputs, data_samples, mode='predict') + + # self.assertIn('bboxes_3d', aug_results[0].pred_instances_3d) + # self.assertIn('scores_3d', aug_results[0].pred_instances_3d) + # self.assertIn('labels_3d', aug_results[0].pred_instances_3d) + + # save the memory + with torch.no_grad(): + losses = model.forward(**data, mode='loss') + + self.assertGreater(losses['vote_loss'], 0) + self.assertGreater(losses['objectness_loss'], 0) + self.assertGreater(losses['semantic_loss'], 0) + self.assertGreater(losses['dir_res_loss'], 0) + self.assertGreater(losses['size_class_loss'], 0) + self.assertGreater(losses['size_res_loss'], 0) + self.assertGreater(losses['size_res_loss'], 0) + + # TODO test_aug_test diff --git a/tests/test_models/test_detectors/test_voxelnet.py b/tests/test_models/test_detectors/test_voxelnet.py new file mode 100755 index 0000000..eaccb22 --- /dev/null +++ b/tests/test_models/test_detectors/test_voxelnet.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestVoxelNet(unittest.TestCase): + + def test_voxelnet(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'VoxelNet') + DefaultScope.get_instance('test_voxelnet', scope_name='mmdet3d') + setup_seed(0) + pointpillars_cfg = get_detector_cfg( + 'pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py') + model = MODELS.build(pointpillars_cfg) + num_gt_instance = 2 + packed_inputs = create_detector_inputs(num_gt_instance=num_gt_instance) + + # TODO: Support aug_test + # aug_data = [ + # create_detector_inputs(num_gt_instance=num_gt_instance), + # create_detector_inputs(num_gt_instance=num_gt_instance + 1) + # ] + # # test_aug_test + # metainfo = { + # 'pcd_scale_factor': 1, + # 'pcd_horizontal_flip': 1, + # 'pcd_vertical_flip': 1, + # 'box_type_3d': LiDARInstance3DBoxes + # } + # for item in aug_data: + # item['data_sample'].set_metainfo(metainfo) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('bboxes_3d', results[0].pred_instances_3d) + self.assertIn('scores_3d', results[0].pred_instances_3d) + self.assertIn('labels_3d', results[0].pred_instances_3d) + + # TODO: Support aug_test + # batch_inputs, data_samples = model.data_preprocessor( + # aug_data, True) + # aug_results = model.forward( + # batch_inputs, data_samples, mode='predict') + # self.assertEqual(len(results), len(data)) + # self.assertIn('bboxes_3d', aug_results[0].pred_instances_3d) + # self.assertIn('scores_3d', aug_results[0].pred_instances_3d) + # self.assertIn('labels_3d', aug_results[0].pred_instances_3d) + # self.assertIn('bboxes_3d', aug_results[1].pred_instances_3d) + # self.assertIn('scores_3d', aug_results[1].pred_instances_3d) + # self.assertIn('labels_3d', aug_results[1].pred_instances_3d) + + # save the memory + + with torch.no_grad(): + losses = model.forward(**data, mode='loss') + torch.cuda.empty_cache() + self.assertGreaterEqual(losses['loss_dir'][0], 0) + self.assertGreaterEqual(losses['loss_bbox'][0], 0) + self.assertGreaterEqual(losses['loss_cls'][0], 0) diff --git a/tests/test_models/test_layers/test_box3d_nms.py b/tests/test_models/test_layers/test_box3d_nms.py new file mode 100755 index 0000000..d0b8752 --- /dev/null +++ b/tests/test_models/test_layers/test_box3d_nms.py @@ -0,0 +1,114 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + + +def test_aligned_3d_nms(): + from mmdet3d.models.layers import aligned_3d_nms + + boxes = torch.tensor([[1.2261, 0.6679, -1.2678, 2.6547, 1.0428, 0.1000], + [5.0919, 0.6512, 0.7238, 5.4821, 1.2451, 2.1095], + [6.8392, -1.2205, 0.8570, 7.6920, 0.3220, 3.2223], + [3.6900, -0.4235, -1.0380, 4.4415, 0.2671, -0.1442], + [4.8071, -1.4311, 0.7004, 5.5788, -0.6837, 1.2487], + [2.1807, -1.5811, -1.1289, 3.0151, -0.1346, -0.5351], + [4.4631, -4.2588, -1.1403, 5.3012, -3.4463, -0.3212], + [4.7607, -3.3311, 0.5993, 5.2976, -2.7874, 1.2273], + [3.1265, 0.7113, -0.0296, 3.8944, 1.3532, 0.9785], + [5.5828, -3.5350, 1.0105, 8.2841, -0.0405, 3.3614], + [3.0003, -2.1099, -1.0608, 5.3423, 0.0328, 0.6252], + [2.7148, 0.6082, -1.1738, 3.6995, 1.2375, -0.0209], + [4.9263, -0.2152, 0.2889, 5.6963, 0.3416, 1.3471], + [5.0713, 1.3459, -0.2598, 5.6278, 1.9300, 1.2835], + [4.5985, -2.3996, -0.3393, 5.2705, -1.7306, 0.5698], + [4.1386, 0.5658, 0.0422, 4.8937, 1.1983, 0.9911], + [2.7694, -1.9822, -1.0637, 4.0691, 0.3575, -0.1393], + [4.6464, -3.0123, -1.0694, 5.1421, -2.4450, -0.3758], + [3.4754, 0.4443, -1.1282, 4.6727, 1.3786, 0.2550], + [2.5905, -0.3504, -1.1202, 3.1599, 0.1153, -0.3036], + [4.1336, -3.4813, 1.1477, 6.2091, -0.8776, 2.6757], + [3.9966, 0.2069, -1.1148, 5.0841, 1.0525, -0.0648], + [4.3216, -1.8647, 0.4733, 6.2069, 0.6671, 3.3363], + [4.7683, 0.4286, -0.0500, 5.5642, 1.2906, 0.8902], + [1.7337, 0.7625, -1.0058, 3.0675, 1.3617, 0.3849], + [4.7193, -3.3687, -0.9635, 5.1633, -2.7656, 1.1001], + [4.4704, -2.7744, -1.1127, 5.0971, -2.0228, -0.3150], + [2.7027, 0.6122, -0.9169, 3.3083, 1.2117, 0.6129], + [4.8789, -2.0025, 0.8385, 5.5214, -1.3668, 1.3552], + [3.7856, -1.7582, -0.1738, 5.3373, -0.6300, 0.5558]]) + + scores = torch.tensor([ + 3.6414e-03, 2.2901e-02, 2.7576e-04, 1.2238e-02, 5.9310e-04, 1.2659e-01, + 2.4104e-02, 5.0742e-03, 2.3581e-03, 2.0946e-07, 8.8039e-01, 1.9127e-01, + 5.0469e-05, 9.3638e-03, 3.0663e-03, 9.4350e-03, 5.3380e-02, 1.7895e-01, + 2.0048e-01, 1.1294e-03, 3.0304e-08, 2.0237e-01, 1.0894e-08, 6.7972e-02, + 6.7156e-01, 9.3986e-04, 7.9470e-01, 3.9736e-01, 1.8000e-04, 7.9151e-04 + ]) + + cls = torch.tensor([ + 8, 8, 8, 3, 3, 1, 3, 3, 7, 8, 0, 6, 7, 8, 3, 7, 2, 7, 6, 3, 8, 6, 6, 7, + 6, 8, 7, 6, 3, 1 + ]) + + pick = aligned_3d_nms(boxes, scores, cls, 0.25) + expected_pick = torch.tensor([ + 10, 26, 24, 27, 21, 18, 17, 5, 23, 16, 6, 1, 3, 15, 13, 7, 0, 14, 8, + 19, 25, 29, 4, 2, 28, 12, 9, 20, 22 + ]) + + assert torch.all(pick == expected_pick) + + +def test_circle_nms(): + from mmdet3d.models.layers import circle_nms + boxes = torch.tensor([[-11.1100, 2.1300, 0.8823], + [-11.2810, 2.2422, 0.8914], + [-10.3966, -0.3198, 0.8643], + [-10.2906, -13.3159, + 0.8401], [5.6518, 9.9791, 0.8271], + [-11.2652, 13.3637, 0.8267], + [4.7768, -13.0409, 0.7810], [5.6621, 9.0422, 0.7753], + [-10.5561, 18.9627, 0.7518], + [-10.5643, 13.2293, 0.7200]]) + keep = circle_nms(boxes.numpy(), 0.175) + expected_keep = [1, 2, 3, 4, 5, 6, 7, 8, 9] + assert np.all(keep == expected_keep) + + +# copied from tests/test_ops/test_iou3d.py from mmcv<=1.5 +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='requires CUDA support') +def test_nms_bev(): + from mmdet3d.models.layers import nms_bev + + np_boxes = np.array( + [[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0], + [3.0, 7.0, 10.0, 12.0, 1.0], [1.0, 4.0, 13.0, 7.0, 3.0]], + dtype=np.float32) + np_scores = np.array([0.6, 0.9, 0.7, 0.2], dtype=np.float32) + np_inds = np.array([1, 0, 3]) + boxes = torch.from_numpy(np_boxes) + scores = torch.from_numpy(np_scores) + inds = nms_bev(boxes.cuda(), scores.cuda(), thresh=0.3) + + assert np.allclose(inds.cpu().numpy(), np_inds) + + +# copied from tests/test_ops/test_iou3d.py from mmcv<=1.5 +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='requires CUDA support') +def test_nms_normal_bev(): + from mmdet3d.models.layers import nms_normal_bev + + np_boxes = np.array( + [[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0], + [3.0, 7.0, 10.0, 12.0, 1.0], [1.0, 4.0, 13.0, 7.0, 3.0]], + dtype=np.float32) + np_scores = np.array([0.6, 0.9, 0.7, 0.2], dtype=np.float32) + np_inds = np.array([1, 0, 3]) + boxes = torch.from_numpy(np_boxes) + scores = torch.from_numpy(np_scores) + inds = nms_normal_bev(boxes.cuda(), scores.cuda(), thresh=0.3) + + assert np.allclose(inds.cpu().numpy(), np_inds) diff --git a/tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_fa_module.py b/tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_fa_module.py new file mode 100755 index 0000000..cb1f86c --- /dev/null +++ b/tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_fa_module.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + + +def test_dgcnn_fa_module(): + if not torch.cuda.is_available(): + pytest.skip() + from mmdet3d.models.layers import DGCNNFAModule + + self = DGCNNFAModule(mlp_channels=[24, 16]).cuda() + assert self.mlps.layer0.conv.in_channels == 24 + assert self.mlps.layer0.conv.out_channels == 16 + + points = [torch.rand(1, 200, 12).float().cuda() for _ in range(3)] + + fa_points = self(points) + assert fa_points.shape == torch.Size([1, 200, 40]) diff --git a/tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_fp_module.py b/tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_fp_module.py new file mode 100755 index 0000000..ec57db6 --- /dev/null +++ b/tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_fp_module.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + + +def test_dgcnn_fp_module(): + if not torch.cuda.is_available(): + pytest.skip() + from mmdet3d.models.layers import DGCNNFPModule + + self = DGCNNFPModule(mlp_channels=[24, 16]).cuda() + assert self.mlps.layer0.conv.in_channels == 24 + assert self.mlps.layer0.conv.out_channels == 16 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', + np.float32).reshape((-1, 6)) + + # (B, N, 3) + xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda() + points = xyz.repeat([1, 1, 8]).cuda() + + fp_points = self(points) + assert fp_points.shape == torch.Size([1, 200, 16]) diff --git a/tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_gf_module.py b/tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_gf_module.py new file mode 100755 index 0000000..ddc14a4 --- /dev/null +++ b/tests/test_models/test_layers/test_dgcnn_modules/test_dgcnn_gf_module.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + + +def test_dgcnn_gf_module(): + if not torch.cuda.is_available(): + pytest.skip() + from mmdet3d.models.layers import DGCNNGFModule + + self = DGCNNGFModule( + mlp_channels=[18, 64, 64], + num_sample=20, + knn_mode='D-KNN', + radius=None, + norm_cfg=dict(type='BN2d'), + act_cfg=dict(type='ReLU'), + pool_mode='max').cuda() + + assert self.mlps[0].layer0.conv.in_channels == 18 + assert self.mlps[0].layer0.conv.out_channels == 64 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + # (B, N, C) + xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda() + points = xyz.repeat([1, 1, 3]) + + # test forward + new_points = self(points) + + assert new_points.shape == torch.Size([1, 200, 64]) + + # test F-KNN mod + self = DGCNNGFModule( + mlp_channels=[6, 64, 64], + num_sample=20, + knn_mode='F-KNN', + radius=None, + norm_cfg=dict(type='BN2d'), + act_cfg=dict(type='ReLU'), + pool_mode='max').cuda() + + # test forward + new_points = self(xyz) + assert new_points.shape == torch.Size([1, 200, 64]) + + # test ball query + self = DGCNNGFModule( + mlp_channels=[6, 64, 64], + num_sample=20, + knn_mode='F-KNN', + radius=0.2, + norm_cfg=dict(type='BN2d'), + act_cfg=dict(type='ReLU'), + pool_mode='max').cuda() diff --git a/tests/test_models/test_layers/test_fusion_layers/test_fusion_coord_trans.py b/tests/test_models/test_layers/test_fusion_layers/test_fusion_coord_trans.py new file mode 100755 index 0000000..7fbd34c --- /dev/null +++ b/tests/test_models/test_layers/test_fusion_layers/test_fusion_coord_trans.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Tests coords transformation in fusion modules. + +CommandLine: + pytest tests/test_models/test_fusion/test_fusion_coord_trans.py +""" + +import torch + +from mmdet3d.models.layers.fusion_layers import apply_3d_transformation + + +def test_coords_transformation(): + """Test the transformation of 3d coords.""" + + # H+R+S+T, not reverse, depth + img_meta = { + 'pcd_scale_factor': + 1.2311e+00, + 'pcd_rotation': [[8.660254e-01, 0.5, 0], [-0.5, 8.660254e-01, 0], + [0, 0, 1.0e+00]], + 'pcd_trans': [1.111e-02, -8.88e-03, 0.0], + 'pcd_horizontal_flip': + True, + 'transformation_3d_flow': ['HF', 'R', 'S', 'T'] + } + + pcd = torch.tensor([[-5.2422e+00, -2.9757e-01, 4.0021e+01], + [-9.1435e-01, 2.6675e+01, -5.5950e+00], + [2.0089e-01, 5.8098e+00, -3.5409e+01], + [-1.9461e-01, 3.1309e+01, -1.0901e+00]]) + + pcd_transformed = apply_3d_transformation( + pcd, 'DEPTH', img_meta, reverse=False) + + expected_tensor = torch.tensor( + [[5.78332345e+00, 2.900697e+00, 4.92698531e+01], + [-1.5433839e+01, 2.8993850e+01, -6.8880045e+00], + [-3.77929405e+00, 6.061661e+00, -4.35920199e+01], + [-1.9053658e+01, 3.3491436e+01, -1.34202211e+00]]) + + assert torch.allclose(expected_tensor, pcd_transformed, 1e-4) + + # H+R+S+T, reverse, depth + img_meta = { + 'pcd_scale_factor': + 7.07106781e-01, + 'pcd_rotation': [[7.07106781e-01, 7.07106781e-01, 0.0], + [-7.07106781e-01, 7.07106781e-01, 0.0], + [0.0, 0.0, 1.0e+00]], + 'pcd_trans': [0.0, 0.0, 0.0], + 'pcd_horizontal_flip': + False, + 'transformation_3d_flow': ['HF', 'R', 'S', 'T'] + } + + pcd = torch.tensor([[-5.2422e+00, -2.9757e-01, 4.0021e+01], + [-9.1435e+01, 2.6675e+01, -5.5950e+00], + [6.061661e+00, -0.0, -1.0e+02]]) + + pcd_transformed = apply_3d_transformation( + pcd, 'DEPTH', img_meta, reverse=True) + + expected_tensor = torch.tensor( + [[-5.53977e+00, 4.94463e+00, 5.65982409e+01], + [-6.476e+01, 1.1811e+02, -7.91252488e+00], + [6.061661e+00, -6.061661e+00, -1.41421356e+02]]) + assert torch.allclose(expected_tensor, pcd_transformed, 1e-4) + + # H+R+S+T, not reverse, camera + img_meta = { + 'pcd_scale_factor': + 1.0 / 7.07106781e-01, + 'pcd_rotation': [[7.07106781e-01, 0.0, 7.07106781e-01], + [0.0, 1.0e+00, 0.0], + [-7.07106781e-01, 0.0, 7.07106781e-01]], + 'pcd_trans': [1.0e+00, -1.0e+00, 0.0], + 'pcd_horizontal_flip': + True, + 'transformation_3d_flow': ['HF', 'S', 'R', 'T'] + } + + pcd = torch.tensor([[-5.2422e+00, 4.0021e+01, -2.9757e-01], + [-9.1435e+01, -5.5950e+00, 2.6675e+01], + [6.061661e+00, -1.0e+02, -0.0]]) + + pcd_transformed = apply_3d_transformation( + pcd, 'CAMERA', img_meta, reverse=False) + + expected_tensor = torch.tensor( + [[6.53977e+00, 5.55982409e+01, 4.94463e+00], + [6.576e+01, -8.91252488e+00, 1.1811e+02], + [-5.061661e+00, -1.42421356e+02, -6.061661e+00]]) + + assert torch.allclose(expected_tensor, pcd_transformed, 1e-4) + + # V, reverse, camera + img_meta = {'pcd_vertical_flip': True, 'transformation_3d_flow': ['VF']} + + pcd_transformed = apply_3d_transformation( + pcd, 'CAMERA', img_meta, reverse=True) + + expected_tensor = torch.tensor([[-5.2422e+00, 4.0021e+01, 2.9757e-01], + [-9.1435e+01, -5.5950e+00, -2.6675e+01], + [6.061661e+00, -1.0e+02, 0.0]]) + + assert torch.allclose(expected_tensor, pcd_transformed, 1e-4) + + # V+H, not reverse, depth + img_meta = { + 'pcd_vertical_flip': True, + 'pcd_horizontal_flip': True, + 'transformation_3d_flow': ['VF', 'HF'] + } + + pcd_transformed = apply_3d_transformation( + pcd, 'DEPTH', img_meta, reverse=False) + + expected_tensor = torch.tensor([[5.2422e+00, -4.0021e+01, -2.9757e-01], + [9.1435e+01, 5.5950e+00, 2.6675e+01], + [-6.061661e+00, 1.0e+02, 0.0]]) + assert torch.allclose(expected_tensor, pcd_transformed, 1e-4) + + # V+H, reverse, lidar + img_meta = { + 'pcd_vertical_flip': True, + 'pcd_horizontal_flip': True, + 'transformation_3d_flow': ['VF', 'HF'] + } + + pcd_transformed = apply_3d_transformation( + pcd, 'LIDAR', img_meta, reverse=True) + + expected_tensor = torch.tensor([[5.2422e+00, -4.0021e+01, -2.9757e-01], + [9.1435e+01, 5.5950e+00, 2.6675e+01], + [-6.061661e+00, 1.0e+02, 0.0]]) + assert torch.allclose(expected_tensor, pcd_transformed, 1e-4) diff --git a/tests/test_models/test_layers/test_fusion_layers/test_point_fusion.py b/tests/test_models/test_layers/test_fusion_layers/test_point_fusion.py new file mode 100755 index 0000000..916c404 --- /dev/null +++ b/tests/test_models/test_layers/test_fusion_layers/test_point_fusion.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Tests the core function of point fusion. + +CommandLine: + pytest tests/test_models/test_fusion/test_point_fusion.py +""" + +import torch + +from mmdet3d.models.layers.fusion_layers import PointFusion + + +def test_sample_single(): + # this function makes sure the rewriting of 3d coords transformation + # in point fusion does not change the original behaviour + lidar2img = torch.tensor( + [[6.0294e+02, -7.0791e+02, -1.2275e+01, -1.7094e+02], + [1.7678e+02, 8.8088e+00, -7.0794e+02, -1.0257e+02], + [9.9998e-01, -1.5283e-03, -5.2907e-03, -3.2757e-01], + [0.0000e+00, 0.0000e+00, 0.0000e+00, 1.0000e+00]]) + + # all use default + img_meta = { + 'transformation_3d_flow': ['R', 'S', 'T', 'HF'], + 'input_shape': [370, 1224], + 'img_shape': [370, 1224], + 'lidar2img': lidar2img, + } + + # dummy parameters + fuse = PointFusion(1, 1, 1, 1) + img_feat = torch.arange(370 * 1224)[None, ...].view( + 370, 1224)[None, None, ...].float() / (370 * 1224) + pts = torch.tensor([[8.356, -4.312, -0.445], [11.777, -6.724, -0.564], + [6.453, 2.53, -1.612], [6.227, -3.839, -0.563]]) + out = fuse.sample_single(img_feat, pts, img_meta) + + expected_tensor = torch.tensor( + [0.5560822, 0.5476625, 0.9687978, 0.6241757]) + assert torch.allclose(expected_tensor, out, 1e-4) + + pcd_rotation = torch.tensor([[8.660254e-01, 0.5, 0], + [-0.5, 8.660254e-01, 0], [0, 0, 1.0e+00]]) + pcd_scale_factor = 1.111 + pcd_trans = torch.tensor([1.0, -1.0, 0.5]) + pts = pts @ pcd_rotation + pts *= pcd_scale_factor + pts += pcd_trans + pts[:, 1] = -pts[:, 1] + + # not use default + img_meta.update({ + 'pcd_scale_factor': pcd_scale_factor, + 'pcd_rotation': pcd_rotation, + 'pcd_trans': pcd_trans, + 'pcd_horizontal_flip': True + }) + out = fuse.sample_single(img_feat, pts, img_meta) + expected_tensor = torch.tensor( + [0.5560822, 0.5476625, 0.9687978, 0.6241757]) + assert torch.allclose(expected_tensor, out, 1e-4) diff --git a/tests/test_models/test_layers/test_fusion_layers/test_vote_fusion.py b/tests/test_models/test_layers/test_fusion_layers/test_vote_fusion.py new file mode 100755 index 0000000..9f8bed0 --- /dev/null +++ b/tests/test_models/test_layers/test_fusion_layers/test_vote_fusion.py @@ -0,0 +1,322 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Tests the core function of vote fusion. + +CommandLine: + pytest tests/test_models/test_fusion/test_vote_fusion.py +""" + +import torch + +from mmdet3d.models.layers.fusion_layers import VoteFusion + + +def test_vote_fusion(): + img_meta = { + 'ori_shape': (530, 730), + 'img_shape': (600, 826), + 'pad_shape': (608, 832), + 'scale_factor': + torch.tensor([1.1315, 1.1321, 1.1315, 1.1321]), + 'flip': + False, + 'pcd_horizontal_flip': + False, + 'pcd_vertical_flip': + False, + 'pcd_trans': + torch.tensor([0., 0., 0.]), + 'pcd_scale_factor': + 1.0308290128214932, + 'pcd_rotation': + torch.tensor([[0.9747, 0.2234, 0.0000], [-0.2234, 0.9747, 0.0000], + [0.0000, 0.0000, 1.0000]]), + 'transformation_3d_flow': ['HF', 'R', 'S', 'T'] + } + + rt_mat = torch.tensor([[0.979570, 0.047954, -0.195330], + [0.047954, 0.887470, 0.458370], + [0.195330, -0.458370, 0.867030]]) + k_mat = torch.tensor([[529.5000, 0.0000, 365.0000], + [0.0000, 529.5000, 265.0000], + [0.0000, 0.0000, 1.0000]]) + rt_mat = rt_mat.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0] + ]) @ rt_mat.transpose(1, 0) + depth2img = k_mat @ rt_mat + img_meta['depth2img'] = depth2img + + bboxes = torch.tensor([[[ + 5.4286e+02, 9.8283e+01, 6.1700e+02, 1.6742e+02, 9.7922e-01, 3.0000e+00 + ], [ + 4.2613e+02, 8.4646e+01, 4.9091e+02, 1.6237e+02, 9.7848e-01, 3.0000e+00 + ], [ + 2.5606e+02, 7.3244e+01, 3.7883e+02, 1.8471e+02, 9.7317e-01, 3.0000e+00 + ], [ + 6.0104e+02, 1.0648e+02, 6.6757e+02, 1.9216e+02, 8.4607e-01, 3.0000e+00 + ], [ + 2.2923e+02, 1.4984e+02, 7.0163e+02, 4.6537e+02, 3.5719e-01, 0.0000e+00 + ], [ + 2.5614e+02, 7.4965e+01, 3.3275e+02, 1.5908e+02, 2.8688e-01, 3.0000e+00 + ], [ + 9.8718e+00, 1.4142e+02, 2.0213e+02, 3.3878e+02, 1.0935e-01, 3.0000e+00 + ], [ + 6.1930e+02, 1.1768e+02, 6.8505e+02, 2.0318e+02, 1.0720e-01, 3.0000e+00 + ]]]) + + seeds_3d = torch.tensor([[[0.044544, 1.675476, -1.531831], + [2.500625, 7.238662, -0.737675], + [-0.600003, 4.827733, -0.084022], + [1.396212, 3.994484, -1.551180], + [-2.054746, 2.012759, -0.357472], + [-0.582477, 6.580470, -1.466052], + [1.313331, 5.722039, 0.123904], + [-1.107057, 3.450359, -1.043422], + [1.759746, 5.655951, -1.519564], + [-0.203003, 6.453243, 0.137703], + [-0.910429, 0.904407, -0.512307], + [0.434049, 3.032374, -0.763842], + [1.438146, 2.289263, -1.546332], + [0.575622, 5.041906, -0.891143], + [-1.675931, 1.417597, -1.588347]]]) + + imgs = torch.linspace( + -1, 1, steps=608 * 832).reshape(1, 608, 832).repeat(3, 1, 1)[None] + + expected_tensor1 = torch.tensor( + [[[ + 0.000000e+00, -0.000000e+00, 0.000000e+00, -0.000000e+00, + 0.000000e+00, 1.193706e-01, -0.000000e+00, -2.879214e-01, + -0.000000e+00, 0.000000e+00, 1.422463e-01, -6.474612e-01, + -0.000000e+00, 1.490057e-02, 0.000000e+00 + ], + [ + 0.000000e+00, -0.000000e+00, -0.000000e+00, 0.000000e+00, + 0.000000e+00, -1.873745e+00, -0.000000e+00, 1.576240e-01, + 0.000000e+00, -0.000000e+00, -3.646177e-02, -7.751858e-01, + 0.000000e+00, 9.593642e-02, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, -6.263277e-02, 0.000000e+00, -3.646387e-01, + 0.000000e+00, 0.000000e+00, -5.875812e-01, -6.263450e-02, + 0.000000e+00, 1.149264e-01, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 8.899736e-01, 0.000000e+00, 9.019017e-01, + 0.000000e+00, 0.000000e+00, 6.917775e-01, 8.899733e-01, + 0.000000e+00, 9.812444e-01, 0.000000e+00 + ], + [ + -0.000000e+00, -0.000000e+00, -0.000000e+00, -0.000000e+00, + -0.000000e+00, -4.516903e-01, -0.000000e+00, -2.315422e-01, + -0.000000e+00, -0.000000e+00, -4.197519e-01, -4.516906e-01, + -0.000000e+00, -1.547615e-01, -0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 3.571937e-01, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 3.571937e-01, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 9.731653e-01, + 0.000000e+00, 0.000000e+00, 1.093455e-01, 0.000000e+00, + 0.000000e+00, 8.460656e-01, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 2.316288e-03, -1.948284e-03, -3.694394e-03, 2.176163e-04, + -3.882605e-03, -1.901490e-03, -3.355042e-03, -1.774631e-03, + -6.981542e-04, -3.886823e-03, -1.302233e-03, -1.189933e-03, + 2.540967e-03, -1.834944e-03, 1.032048e-03 + ], + [ + 2.316288e-03, -1.948284e-03, -3.694394e-03, 2.176163e-04, + -3.882605e-03, -1.901490e-03, -3.355042e-03, -1.774631e-03, + -6.981542e-04, -3.886823e-03, -1.302233e-03, -1.189933e-03, + 2.540967e-03, -1.834944e-03, 1.032048e-03 + ], + [ + 2.316288e-03, -1.948284e-03, -3.694394e-03, 2.176163e-04, + -3.882605e-03, -1.901490e-03, -3.355042e-03, -1.774631e-03, + -6.981542e-04, -3.886823e-03, -1.302233e-03, -1.189933e-03, + 2.540967e-03, -1.834944e-03, 1.032048e-03 + ]]]) + + expected_tensor2 = torch.tensor([[ + False, False, False, False, False, True, False, True, False, False, + True, True, False, True, False, False, False, False, False, False, + False, False, True, False, False, False, False, False, True, False, + False, False, False, False, False, False, False, False, False, False, + False, False, False, True, False + ]]) + + expected_tensor3 = torch.tensor( + [[[ + -0.000000e+00, -0.000000e+00, -0.000000e+00, -0.000000e+00, + 0.000000e+00, -0.000000e+00, -0.000000e+00, 0.000000e+00, + -0.000000e+00, -0.000000e+00, 0.000000e+00, -0.000000e+00, + -0.000000e+00, 1.720988e-01, 0.000000e+00 + ], + [ + 0.000000e+00, -0.000000e+00, -0.000000e+00, 0.000000e+00, + -0.000000e+00, 0.000000e+00, -0.000000e+00, 0.000000e+00, + 0.000000e+00, -0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 4.824460e-02, 0.000000e+00 + ], + [ + -0.000000e+00, -0.000000e+00, -0.000000e+00, -0.000000e+00, + -0.000000e+00, -0.000000e+00, -0.000000e+00, 0.000000e+00, + -0.000000e+00, -0.000000e+00, -0.000000e+00, -0.000000e+00, + -0.000000e+00, 1.447314e-01, -0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 9.759269e-01, 0.000000e+00 + ], + [ + -0.000000e+00, -0.000000e+00, -0.000000e+00, -0.000000e+00, + -0.000000e+00, -0.000000e+00, -0.000000e+00, -0.000000e+00, + -0.000000e+00, -0.000000e+00, -0.000000e+00, -0.000000e+00, + -0.000000e+00, -1.631542e-01, -0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 1.072001e-01, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, + 0.000000e+00, 0.000000e+00, 0.000000e+00 + ], + [ + 2.316288e-03, -1.948284e-03, -3.694394e-03, 2.176163e-04, + -3.882605e-03, -1.901490e-03, -3.355042e-03, -1.774631e-03, + -6.981542e-04, -3.886823e-03, -1.302233e-03, -1.189933e-03, + 2.540967e-03, -1.834944e-03, 1.032048e-03 + ], + [ + 2.316288e-03, -1.948284e-03, -3.694394e-03, 2.176163e-04, + -3.882605e-03, -1.901490e-03, -3.355042e-03, -1.774631e-03, + -6.981542e-04, -3.886823e-03, -1.302233e-03, -1.189933e-03, + 2.540967e-03, -1.834944e-03, 1.032048e-03 + ], + [ + 2.316288e-03, -1.948284e-03, -3.694394e-03, 2.176163e-04, + -3.882605e-03, -1.901490e-03, -3.355042e-03, -1.774631e-03, + -6.981542e-04, -3.886823e-03, -1.302233e-03, -1.189933e-03, + 2.540967e-03, -1.834944e-03, 1.032048e-03 + ]]]) + + fusion = VoteFusion() + out1, out2 = fusion(imgs, bboxes, seeds_3d, [img_meta]) + assert torch.allclose(expected_tensor1, out1[:, :, :15], 1e-3) + assert torch.allclose(expected_tensor2.float(), out2.float(), 1e-3) + assert torch.allclose(expected_tensor3, out1[:, :, 30:45], 1e-3) + + out1, out2 = fusion(imgs, bboxes[:, :2], seeds_3d, [img_meta]) + out1 = out1[:, :15, 30:45] + out2 = out2[:, 30:45].float() + assert torch.allclose(torch.zeros_like(out1), out1, 1e-3) + assert torch.allclose(torch.zeros_like(out2), out2, 1e-3) diff --git a/tests/test_models/test_layers/test_paconv/test_paconv_modules.py b/tests/test_models/test_layers/test_paconv/test_paconv_modules.py new file mode 100755 index 0000000..9c278d4 --- /dev/null +++ b/tests/test_models/test_layers/test_paconv/test_paconv_modules.py @@ -0,0 +1,300 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + + +def test_paconv_sa_module_msg(): + if not torch.cuda.is_available(): + pytest.skip() + from mmdet3d.models.layers import PAConvSAModuleMSG + + # paconv_num_kernels should have same length as mlp_channels + with pytest.raises(AssertionError): + self = PAConvSAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + paconv_num_kernels=[[4]]).cuda() + + # paconv_num_kernels inner num should match as mlp_channels + with pytest.raises(AssertionError): + self = PAConvSAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + paconv_num_kernels=[[4, 4], [8, 8]]).cuda() + + self = PAConvSAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + paconv_num_kernels=[[4], [8]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max', + paconv_kernel_input='w_neighbor').cuda() + + assert self.mlps[0].layer0.in_channels == 12 * 2 + assert self.mlps[0].layer0.out_channels == 16 + assert self.mlps[1].layer0.in_channels == 12 * 2 + assert self.mlps[1].layer0.out_channels == 32 + assert self.mlps[0].layer0.bn.num_features == 16 + assert self.mlps[1].layer0.bn.num_features == 32 + + assert self.mlps[0].layer0.scorenet.mlps.layer0.conv.in_channels == 7 + assert self.mlps[0].layer0.scorenet.mlps.layer3.conv.out_channels == 4 + assert self.mlps[1].layer0.scorenet.mlps.layer0.conv.in_channels == 7 + assert self.mlps[1].layer0.scorenet.mlps.layer3.conv.out_channels == 8 + + # last conv in ScoreNet has neither bn nor relu + with pytest.raises(AttributeError): + _ = self.mlps[0].layer0.scorenet.mlps.layer3.bn + with pytest.raises(AttributeError): + _ = self.mlps[0].layer0.scorenet.mlps.layer3.activate + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + # (B, N, 3) + xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda() + # (B, C, N) + features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 48, 16]) + assert inds.shape == torch.Size([1, 16]) + + # test with identity kernel input + self = PAConvSAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + paconv_num_kernels=[[4], [8]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max', + paconv_kernel_input='identity').cuda() + + assert self.mlps[0].layer0.in_channels == 12 * 1 + assert self.mlps[0].layer0.out_channels == 16 + assert self.mlps[0].layer0.num_kernels == 4 + assert self.mlps[1].layer0.in_channels == 12 * 1 + assert self.mlps[1].layer0.out_channels == 32 + assert self.mlps[1].layer0.num_kernels == 8 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + # (B, N, 3) + xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda() + # (B, C, N) + features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 48, 16]) + assert inds.shape == torch.Size([1, 16]) + + +def test_paconv_sa_module(): + if not torch.cuda.is_available(): + pytest.skip() + from mmdet3d.models.layers import build_sa_module + sa_cfg = dict( + type='PAConvSAModule', + num_point=16, + radius=0.2, + num_sample=8, + mlp_channels=[12, 32], + paconv_num_kernels=[8], + norm_cfg=dict(type='BN2d'), + use_xyz=True, + pool_mod='max', + paconv_kernel_input='w_neighbor') + self = build_sa_module(sa_cfg).cuda() + + assert self.mlps[0].layer0.in_channels == 15 * 2 + assert self.mlps[0].layer0.out_channels == 32 + assert self.mlps[0].layer0.num_kernels == 8 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + # (B, N, 3) + xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda() + # (B, C, N) + features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 32, 16]) + assert inds.shape == torch.Size([1, 16]) + + # test kNN sampling when radius is None + sa_cfg = dict( + type='PAConvSAModule', + num_point=16, + radius=None, + num_sample=8, + mlp_channels=[12, 32], + paconv_num_kernels=[8], + norm_cfg=dict(type='BN2d'), + use_xyz=True, + pool_mod='max', + paconv_kernel_input='identity') + self = build_sa_module(sa_cfg).cuda() + assert self.mlps[0].layer0.in_channels == 15 * 1 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda() + features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 32, 16]) + assert inds.shape == torch.Size([1, 16]) + + +def test_paconv_cuda_sa_module_msg(): + if not torch.cuda.is_available(): + pytest.skip() + from mmdet3d.models.layers import PAConvCUDASAModuleMSG + + # paconv_num_kernels should have same length as mlp_channels + with pytest.raises(AssertionError): + self = PAConvCUDASAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + paconv_num_kernels=[[4]]).cuda() + + # paconv_num_kernels inner num should match as mlp_channels + with pytest.raises(AssertionError): + self = PAConvCUDASAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + paconv_num_kernels=[[4, 4], [8, 8]]).cuda() + + self = PAConvCUDASAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + paconv_num_kernels=[[4], [8]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max', + paconv_kernel_input='w_neighbor').cuda() + + assert self.mlps[0][0].in_channels == 12 * 2 + assert self.mlps[0][0].out_channels == 16 + assert self.mlps[0][0].num_kernels == 4 + assert self.mlps[0][0].bn.num_features == 16 + assert self.mlps[1][0].in_channels == 12 * 2 + assert self.mlps[1][0].out_channels == 32 + assert self.mlps[1][0].num_kernels == 8 + assert self.mlps[1][0].bn.num_features == 32 + + assert self.mlps[0][0].scorenet.mlps.layer0.conv.in_channels == 7 + assert self.mlps[0][0].scorenet.mlps.layer3.conv.out_channels == 4 + assert self.mlps[1][0].scorenet.mlps.layer0.conv.in_channels == 7 + assert self.mlps[1][0].scorenet.mlps.layer3.conv.out_channels == 8 + + # last conv in ScoreNet has neither bn nor relu + with pytest.raises(AttributeError): + _ = self.mlps[0][0].scorenet.mlps.layer3.bn + with pytest.raises(AttributeError): + _ = self.mlps[0][0].scorenet.mlps.layer3.activate + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + # (B, N, 3) + xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda() + # (B, C, N) + features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 48, 16]) + assert inds.shape == torch.Size([1, 16]) + + # CUDA PAConv only supports w_neighbor kernel_input + with pytest.raises(AssertionError): + self = PAConvCUDASAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + paconv_num_kernels=[[4], [8]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max', + paconv_kernel_input='identity').cuda() + + +def test_paconv_cuda_sa_module(): + if not torch.cuda.is_available(): + pytest.skip() + from mmdet3d.models.layers import build_sa_module + sa_cfg = dict( + type='PAConvCUDASAModule', + num_point=16, + radius=0.2, + num_sample=8, + mlp_channels=[12, 32], + paconv_num_kernels=[8], + norm_cfg=dict(type='BN2d'), + use_xyz=True, + pool_mod='max', + paconv_kernel_input='w_neighbor') + self = build_sa_module(sa_cfg).cuda() + + assert self.mlps[0][0].in_channels == 15 * 2 + assert self.mlps[0][0].out_channels == 32 + assert self.mlps[0][0].num_kernels == 8 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + # (B, N, 3) + xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda() + # (B, C, N) + features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 32, 16]) + assert inds.shape == torch.Size([1, 16]) + + # test kNN sampling when radius is None + sa_cfg = dict( + type='PAConvCUDASAModule', + num_point=16, + radius=None, + num_sample=8, + mlp_channels=[12, 32], + paconv_num_kernels=[8], + norm_cfg=dict(type='BN2d'), + use_xyz=True, + pool_mod='max', + paconv_kernel_input='w_neighbor') + self = build_sa_module(sa_cfg).cuda() + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda() + features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 32, 16]) + assert inds.shape == torch.Size([1, 16]) diff --git a/tests/test_models/test_layers/test_paconv/test_paconv_ops.py b/tests/test_models/test_layers/test_paconv/test_paconv_ops.py new file mode 100755 index 0000000..9f3f9cd --- /dev/null +++ b/tests/test_models/test_layers/test_paconv/test_paconv_ops.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmdet3d.models.layers import PAConv, PAConvCUDA + + +def test_paconv(): + B = 2 + in_channels = 6 + out_channels = 12 + npoint = 4 + K = 3 + num_kernels = 4 + points_xyz = torch.randn(B, 3, npoint, K) + features = torch.randn(B, in_channels, npoint, K) + + paconv = PAConv(in_channels, out_channels, num_kernels) + assert paconv.weight_bank.shape == torch.Size( + [in_channels * 2, out_channels * num_kernels]) + + with torch.no_grad(): + new_features, _ = paconv((features, points_xyz)) + + assert new_features.shape == torch.Size([B, out_channels, npoint, K]) + + +def test_paconv_cuda(): + if not torch.cuda.is_available(): + pytest.skip() + B = 2 + in_channels = 6 + out_channels = 12 + N = 32 + npoint = 4 + K = 3 + num_kernels = 4 + points_xyz = torch.randn(B, 3, npoint, K).float().cuda() + features = torch.randn(B, in_channels, N).float().cuda() + points_idx = torch.randint(0, N, (B, npoint, K)).long().cuda() + + paconv = PAConvCUDA(in_channels, out_channels, num_kernels).cuda() + assert paconv.weight_bank.shape == torch.Size( + [in_channels * 2, out_channels * num_kernels]) + + with torch.no_grad(): + new_features, _, _ = paconv((features, points_xyz, points_idx)) + + assert new_features.shape == torch.Size([B, out_channels, npoint, K]) diff --git a/tests/test_models/test_layers/test_pointnet_modules/test_point_fp_module.py b/tests/test_models/test_layers/test_pointnet_modules/test_point_fp_module.py new file mode 100755 index 0000000..c413f27 --- /dev/null +++ b/tests/test_models/test_layers/test_pointnet_modules/test_point_fp_module.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + + +def test_pointnet_fp_module(): + if not torch.cuda.is_available(): + pytest.skip() + from mmdet3d.models.layers import PointFPModule + + self = PointFPModule(mlp_channels=[24, 16]).cuda() + assert self.mlps.layer0.conv.in_channels == 24 + assert self.mlps.layer0.conv.out_channels == 16 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', + np.float32).reshape((-1, 6)) + + # (B, N, 3) + xyz1 = torch.from_numpy(xyz[0::2, :3]).view(1, -1, 3).cuda() + # (B, C1, N) + features1 = xyz1.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + + # (B, M, 3) + xyz2 = torch.from_numpy(xyz[1::3, :3]).view(1, -1, 3).cuda() + # (B, C2, N) + features2 = xyz2.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + + fp_features = self(xyz1, xyz2, features1, features2) + assert fp_features.shape == torch.Size([1, 16, 50]) diff --git a/tests/test_models/test_layers/test_pointnet_modules/test_point_sa_module.py b/tests/test_models/test_layers/test_pointnet_modules/test_point_sa_module.py new file mode 100755 index 0000000..2b3e678 --- /dev/null +++ b/tests/test_models/test_layers/test_pointnet_modules/test_point_sa_module.py @@ -0,0 +1,208 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + + +def test_pointnet_sa_module_msg(): + if not torch.cuda.is_available(): + pytest.skip() + from mmdet3d.models.layers import PointSAModuleMSG + + self = PointSAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max').cuda() + + assert self.mlps[0].layer0.conv.in_channels == 12 + assert self.mlps[0].layer0.conv.out_channels == 16 + assert self.mlps[1].layer0.conv.in_channels == 12 + assert self.mlps[1].layer0.conv.out_channels == 32 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + # (B, N, 3) + xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda() + # (B, C, N) + features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 48, 16]) + assert inds.shape == torch.Size([1, 16]) + + # test D-FPS mod + self = PointSAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max', + fps_mod=['D-FPS'], + fps_sample_range_list=[-1]).cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 48, 16]) + assert inds.shape == torch.Size([1, 16]) + + # test F-FPS mod + self = PointSAModuleMSG( + num_point=16, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max', + fps_mod=['F-FPS'], + fps_sample_range_list=[-1]).cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 48, 16]) + assert inds.shape == torch.Size([1, 16]) + + # test FS mod + self = PointSAModuleMSG( + num_point=8, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max', + fps_mod=['FS'], + fps_sample_range_list=[-1]).cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 48, 16]) + assert inds.shape == torch.Size([1, 16]) + + # test using F-FPS mod and D-FPS mod simultaneously + self = PointSAModuleMSG( + num_point=[8, 12], + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max', + fps_mod=['F-FPS', 'D-FPS'], + fps_sample_range_list=[64, -1]).cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 20, 3]) + assert new_features.shape == torch.Size([1, 48, 20]) + assert inds.shape == torch.Size([1, 20]) + + # test num_points = None + self = PointSAModuleMSG( + num_point=None, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max').cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_features.shape == torch.Size([1, 48, 1]) + + # length of 'fps_mod' should be same as 'fps_sample_range_list' + with pytest.raises(AssertionError): + PointSAModuleMSG( + num_point=8, + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max', + fps_mod=['F-FPS', 'D-FPS'], + fps_sample_range_list=[-1]).cuda() + + # length of 'num_point' should be same as 'fps_sample_range_list' + with pytest.raises(AssertionError): + PointSAModuleMSG( + num_point=[8, 8], + radii=[0.2, 0.4], + sample_nums=[4, 8], + mlp_channels=[[12, 16], [12, 32]], + norm_cfg=dict(type='BN2d'), + use_xyz=False, + pool_mod='max', + fps_mod=['F-FPS'], + fps_sample_range_list=[-1]).cuda() + + +def test_pointnet_sa_module(): + if not torch.cuda.is_available(): + pytest.skip() + from mmdet3d.models.layers import build_sa_module + sa_cfg = dict( + type='PointSAModule', + num_point=16, + radius=0.2, + num_sample=8, + mlp_channels=[12, 32], + norm_cfg=dict(type='BN2d'), + use_xyz=True, + pool_mod='max') + self = build_sa_module(sa_cfg).cuda() + + assert self.mlps[0].layer0.conv.in_channels == 15 + assert self.mlps[0].layer0.conv.out_channels == 32 + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + # (B, N, 3) + xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda() + # (B, C, N) + features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + + # test forward + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 32, 16]) + assert inds.shape == torch.Size([1, 16]) + + # can't set normalize_xyz when radius is None + with pytest.raises(AssertionError): + sa_cfg = dict( + type='PointSAModule', + num_point=16, + radius=None, + num_sample=8, + mlp_channels=[12, 32], + norm_cfg=dict(type='BN2d'), + use_xyz=True, + pool_mod='max', + normalize_xyz=True) + self = build_sa_module(sa_cfg) + + # test kNN sampling when radius is None + sa_cfg['normalize_xyz'] = False + self = build_sa_module(sa_cfg).cuda() + + xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32) + + xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda() + features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda() + new_xyz, new_features, inds = self(xyz, features) + assert new_xyz.shape == torch.Size([1, 16, 3]) + assert new_features.shape == torch.Size([1, 32, 16]) + assert inds.shape == torch.Size([1, 16]) diff --git a/tests/test_models/test_layers/test_spconv/test_spconv_module.py b/tests/test_models/test_layers/test_spconv/test_spconv_module.py new file mode 100755 index 0000000..1ae7691 --- /dev/null +++ b/tests/test_models/test_layers/test_spconv/test_spconv_module.py @@ -0,0 +1,105 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmdet3d.models.layers import SparseBasicBlock +from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE + +if IS_SPCONV2_AVAILABLE: + from spconv.pytorch import (SparseConvTensor, SparseInverseConv3d, + SubMConv3d) +else: + from mmcv.ops import SparseConvTensor, SparseInverseConv3d, SubMConv3d + + +def test_SparseBasicBlock(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + voxel_features = torch.tensor( + [[6.56126, 0.9648336, -1.7339306, 0.315], + [6.8162713, -2.480431, -1.3616394, 0.36], + [11.643568, -4.744306, -1.3580885, 0.16], + [23.482342, 6.5036807, 0.5806964, 0.35]], + dtype=torch.float32).cuda() # n, point_features + coordinates = torch.tensor( + [[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232], + [1, 35, 930, 469]], + dtype=torch.int32).cuda() # n, 4(batch, ind_x, ind_y, ind_z) + + # test + input_sp_tensor = SparseConvTensor(voxel_features, coordinates, + [41, 1600, 1408], 2) + self = SparseBasicBlock( + 4, + 4, + conv_cfg=dict(type='SubMConv3d', indice_key='subm1'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01)).cuda() + # test conv and bn layer + assert isinstance(self.conv1, SubMConv3d) + assert self.conv1.in_channels == 4 + assert self.conv1.out_channels == 4 + assert isinstance(self.conv2, SubMConv3d) + assert self.conv2.out_channels == 4 + assert self.conv2.out_channels == 4 + assert self.bn1.eps == 1e-3 + assert self.bn1.momentum == 0.01 + + out_features = self(input_sp_tensor) + assert out_features.features.shape == torch.Size([4, 4]) + + +def test_make_sparse_convmodule(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + from mmdet3d.models.layers import make_sparse_convmodule + + voxel_features = torch.tensor( + [[6.56126, 0.9648336, -1.7339306, 0.315], + [6.8162713, -2.480431, -1.3616394, 0.36], + [11.643568, -4.744306, -1.3580885, 0.16], + [23.482342, 6.5036807, 0.5806964, 0.35]], + dtype=torch.float32).cuda() # n, point_features + coordinates = torch.tensor( + [[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232], + [1, 35, 930, 469]], + dtype=torch.int32).cuda() # n, 4(batch, ind_x, ind_y, ind_z) + + # test + input_sp_tensor = SparseConvTensor(voxel_features, coordinates, + [41, 1600, 1408], 2) + + sparse_block0 = make_sparse_convmodule( + 4, + 16, + 3, + 'test0', + stride=1, + padding=0, + conv_type='SubMConv3d', + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + order=('conv', 'norm', 'act')).cuda() + assert isinstance(sparse_block0[0], SubMConv3d) + assert sparse_block0[0].in_channels == 4 + assert sparse_block0[0].out_channels == 16 + assert isinstance(sparse_block0[1], torch.nn.BatchNorm1d) + assert sparse_block0[1].eps == 0.001 + assert sparse_block0[1].momentum == 0.01 + assert isinstance(sparse_block0[2], torch.nn.ReLU) + + # test forward + out_features = sparse_block0(input_sp_tensor) + assert out_features.features.shape == torch.Size([4, 16]) + + sparse_block1 = make_sparse_convmodule( + 4, + 16, + 3, + 'test1', + stride=1, + padding=0, + conv_type='SparseInverseConv3d', + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + order=('norm', 'act', 'conv')) + assert isinstance(sparse_block1[0], torch.nn.BatchNorm1d) + assert isinstance(sparse_block1[1], torch.nn.ReLU) + assert isinstance(sparse_block1[2], SparseInverseConv3d) diff --git a/tests/test_models/test_layers/test_torchsparse/test_torchsparse_module.py b/tests/test_models/test_layers/test_torchsparse/test_torchsparse_module.py new file mode 100755 index 0000000..70f844d --- /dev/null +++ b/tests/test_models/test_layers/test_torchsparse/test_torchsparse_module.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmdet3d.models.layers.torchsparse import IS_TORCHSPARSE_AVAILABLE + +if IS_TORCHSPARSE_AVAILABLE: + from torchsparse import SparseTensor + + from mmdet3d.models.layers.torchsparse_block import ( + TorchSparseConvModule, TorchSparseResidualBlock) +else: + pytest.skip('test requires Torchsparse', allow_module_level=True) + + +def test_TorchsparseConvModule(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + voxel_features = torch.tensor( + [[6.56126, 0.9648336, -1.7339306, 0.315], + [6.8162713, -2.480431, -1.3616394, 0.36], + [11.643568, -4.744306, -1.3580885, 0.16], + [23.482342, 6.5036807, 0.5806964, 0.35]], + dtype=torch.float32).cuda() # n, point_features + coordinates = torch.tensor( + [[12, 819, 131, 0], [16, 750, 136, 0], [16, 705, 232, 1], + [35, 930, 469, 1]], + dtype=torch.int32).cuda() # n, 4(ind_x, ind_y, ind_z, batch) + + # test + input_sp_tensor = SparseTensor(voxel_features, coordinates) + + self = TorchSparseConvModule(4, 4, kernel_size=2, stride=2).cuda() + + out_features = self(input_sp_tensor) + assert out_features.F.shape == torch.Size([4, 4]) + + +def test_TorchsparseResidualBlock(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + voxel_features = torch.tensor( + [[6.56126, 0.9648336, -1.7339306, 0.315], + [6.8162713, -2.480431, -1.3616394, 0.36], + [11.643568, -4.744306, -1.3580885, 0.16], + [23.482342, 6.5036807, 0.5806964, 0.35]], + dtype=torch.float32).cuda() # n, point_features + coordinates = torch.tensor( + [[12, 819, 131, 0], [16, 750, 136, 0], [16, 705, 232, 1], + [35, 930, 469, 1]], + dtype=torch.int32).cuda() # n, 4(ind_x, ind_y, ind_z, batch) + + # test + input_sp_tensor = SparseTensor(voxel_features, coordinates) + + sparse_block0 = TorchSparseResidualBlock(4, 16, kernel_size=3).cuda() + + # test forward + out_features = sparse_block0(input_sp_tensor) + assert out_features.F.shape == torch.Size([4, 16]) diff --git a/tests/test_models/test_layers/test_vote_module.py b/tests/test_models/test_layers/test_vote_module.py new file mode 100755 index 0000000..31a5944 --- /dev/null +++ b/tests/test_models/test_layers/test_vote_module.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def test_vote_module(): + from mmdet3d.models.layers import VoteModule + + vote_loss = dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0) + self = VoteModule(vote_per_seed=3, in_channels=8, vote_loss=vote_loss) + + seed_xyz = torch.rand([2, 64, 3], dtype=torch.float32) # (b, npoints, 3) + seed_features = torch.rand( + [2, 8, 64], dtype=torch.float32) # (b, in_channels, npoints) + + # test forward + vote_xyz, vote_features, vote_offset = self(seed_xyz, seed_features) + assert vote_xyz.shape == torch.Size([2, 192, 3]) + assert vote_features.shape == torch.Size([2, 8, 192]) + assert vote_offset.shape == torch.Size([2, 3, 192]) + + # test clip offset and without feature residual + self = VoteModule( + vote_per_seed=1, + in_channels=8, + num_points=32, + with_res_feat=False, + vote_xyz_range=(2.0, 2.0, 2.0)) + + vote_xyz, vote_features, vote_offset = self(seed_xyz, seed_features) + assert vote_xyz.shape == torch.Size([2, 32, 3]) + assert vote_features.shape == torch.Size([2, 8, 32]) + assert vote_offset.shape == torch.Size([2, 3, 32]) + assert torch.allclose(seed_features[..., :32], vote_features) + assert vote_offset.max() <= 2.0 + assert vote_offset.min() >= -2.0 diff --git a/tests/test_models/test_losses/test_chamfer_disrance.py b/tests/test_models/test_losses/test_chamfer_disrance.py new file mode 100755 index 0000000..3aaff2f --- /dev/null +++ b/tests/test_models/test_losses/test_chamfer_disrance.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + + +def test_chamfer_disrance(): + from mmdet3d.models.losses import ChamferDistance, chamfer_distance + + with pytest.raises(AssertionError): + # test invalid mode + ChamferDistance(mode='smoothl1') + # test invalid type of reduction + ChamferDistance(mode='l2', reduction=None) + + self = ChamferDistance( + mode='l2', reduction='sum', loss_src_weight=1.0, loss_dst_weight=1.0) + source = torch.tensor([[[-0.9888, 0.9683, -0.8494], + [-6.4536, 4.5146, + 1.6861], [2.0482, 5.6936, -1.4701], + [-0.5173, 5.6472, 2.1748], + [-2.8010, 5.4423, -1.2158], + [2.4018, 2.4389, -0.2403], + [-2.8811, 3.8486, 1.4750], + [-0.2031, 3.8969, + -1.5245], [1.3827, 4.9295, 1.1537], + [-2.6961, 2.2621, -1.0976]], + [[0.3692, 1.8409, + -1.4983], [1.9995, 6.3602, 0.1798], + [-2.1317, 4.6011, + -0.7028], [2.4158, 3.1482, 0.3169], + [-0.5836, 3.6250, -1.2650], + [-1.9862, 1.6182, -1.4901], + [2.5992, 1.2847, -0.8471], + [-0.3467, 5.3681, -1.4755], + [-0.8576, 3.3400, -1.7399], + [2.7447, 4.6349, 0.1994]]]) + + target = torch.tensor([[[-0.4758, 1.0094, -0.8645], + [-0.3130, 0.8564, -0.9061], + [-0.1560, 2.0394, -0.8936], + [-0.3685, 1.6467, -0.8271], + [-0.2740, 2.2212, -0.7980]], + [[1.4856, 2.5299, + -1.0047], [2.3262, 3.3065, -0.9475], + [2.4593, 2.5870, + -0.9423], [0.0000, 0.0000, 0.0000], + [0.0000, 0.0000, 0.0000]]]) + + loss_source, loss_target, indices1, indices2 = self( + source, target, return_indices=True) + + assert torch.allclose(loss_source, torch.tensor(219.5936)) + assert torch.allclose(loss_target, torch.tensor(22.3705)) + + expected_inds1 = [[0, 4, 4, 4, 4, 2, 4, 4, 4, 3], + [0, 1, 0, 1, 0, 4, 2, 0, 0, 1]] + expected_inds2 = [[0, 4, 4, 4, 4, 2, 4, 4, 4, 3], + [0, 1, 0, 1, 0, 3, 2, 0, 0, 1]] + assert (torch.equal(indices1, indices1.new_tensor(expected_inds1)) + or torch.equal(indices1, indices1.new_tensor(expected_inds2))) + assert torch.equal(indices2, + indices2.new_tensor([[0, 0, 0, 0, 0], [0, 3, 6, 0, 0]])) + + loss_source, loss_target, indices1, indices2 = chamfer_distance( + source, target, reduction='sum') + + assert torch.allclose(loss_source, torch.tensor(219.5936)) + assert torch.allclose(loss_target, torch.tensor(22.3705)) + assert (torch.equal(indices1, indices1.new_tensor(expected_inds1)) + or torch.equal(indices1, indices1.new_tensor(expected_inds2))) + assert (indices2 == indices2.new_tensor([[0, 0, 0, 0, 0], [0, 3, 6, 0, + 0]])).all() diff --git a/tests/test_models/test_losses/test_multibin_loss.py b/tests/test_models/test_losses/test_multibin_loss.py new file mode 100755 index 0000000..28ed133 --- /dev/null +++ b/tests/test_models/test_losses/test_multibin_loss.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_multibin_loss(): + from mmdet3d.models.losses import MultiBinLoss + + # reduction should be in ['none', 'mean', 'sum'] + with pytest.raises(AssertionError): + multibin_loss = MultiBinLoss(reduction='l2') + + pred = torch.tensor([[ + 0.81, 0.32, 0.78, 0.52, 0.24, 0.12, 0.32, 0.11, 1.20, 1.30, 0.20, 0.11, + 0.12, 0.11, 0.23, 0.31 + ], + [ + 0.02, 0.19, 0.78, 0.22, 0.31, 0.12, 0.22, 0.11, + 1.20, 1.30, 0.45, 0.51, 0.12, 0.11, 0.13, 0.61 + ]]) + target = torch.tensor([[1, 1, 0, 0, 2.14, 3.12, 0.68, -2.15], + [1, 1, 0, 0, 3.12, 3.12, 2.34, 1.23]]) + multibin_loss_cfg = dict( + type='MultiBinLoss', reduction='none', loss_weight=1.0) + multibin_loss = MODELS.build(multibin_loss_cfg) + output_multibin_loss = multibin_loss(pred, target, num_dir_bins=4) + expected_multibin_loss = torch.tensor(2.1120) + assert torch.allclose( + output_multibin_loss, expected_multibin_loss, atol=1e-4) diff --git a/tests/test_models/test_losses/test_paconv_regularization_loss.py b/tests/test_models/test_losses/test_paconv_regularization_loss.py new file mode 100755 index 0000000..754fa54 --- /dev/null +++ b/tests/test_models/test_losses/test_paconv_regularization_loss.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random + +import numpy as np +import pytest +import torch +from torch import nn as nn + + +def set_random_seed(seed, deterministic=False): + """Set random seed. + + Args: + seed (int): Seed to be used. + deterministic (bool): Whether to set the deterministic option for + CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` + to True and `torch.backends.cudnn.benchmark` to False. + Default: False. + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def test_paconv_regularization_loss(): + from mmdet3d.models.layers import PAConv, PAConvCUDA + from mmdet3d.models.losses import PAConvRegularizationLoss + + class ToyModel(nn.Module): + + def __init__(self): + super(ToyModel, self).__init__() + + self.paconvs = nn.ModuleList() + self.paconvs.append(PAConv(8, 16, 8)) + self.paconvs.append(PAConv(8, 16, 8, kernel_input='identity')) + self.paconvs.append(PAConvCUDA(8, 16, 8)) + + self.conv1 = nn.Conv1d(3, 8, 1) + + set_random_seed(0, True) + model = ToyModel() + + # reduction should be in ['none', 'mean', 'sum'] + with pytest.raises(AssertionError): + paconv_corr_loss = PAConvRegularizationLoss(reduction='l2') + + paconv_corr_loss = PAConvRegularizationLoss(reduction='mean') + mean_corr_loss = paconv_corr_loss(model.modules()) + assert mean_corr_loss >= 0 + assert mean_corr_loss.requires_grad + + sum_corr_loss = paconv_corr_loss(model.modules(), reduction_override='sum') + assert torch.allclose(sum_corr_loss, mean_corr_loss * 3) + + none_corr_loss = paconv_corr_loss( + model.modules(), reduction_override='none') + assert none_corr_loss.shape[0] == 3 + assert torch.allclose(none_corr_loss.mean(), mean_corr_loss) diff --git a/tests/test_models/test_losses/test_rotated_iou_loss.py b/tests/test_models/test_losses/test_rotated_iou_loss.py new file mode 100755 index 0000000..1f29352 --- /dev/null +++ b/tests/test_models/test_losses/test_rotated_iou_loss.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import numpy as np +import torch + +from mmdet3d.models.losses import RotatedIoU3DLoss + + +def test_rotated_iou_3d_loss(): + + if not torch.cuda.is_available(): + return + + boxes1 = torch.tensor([[.5, .5, .5, 1., 1., 1., .0], + [.5, .5, .5, 1., 1., 1., .0], + [.5, .5, .5, 1., 1., 1., .0], + [.5, .5, .5, 1., 1., 1., .0], + [.5, .5, .5, 1., 1., 1., .0]]).cuda() + boxes2 = torch.tensor([[.5, .5, .5, 1., 1., 1., .0], + [.5, .5, .5, 1., 1., 2., np.pi / 2], + [.5, .5, .5, 1., 1., 1., np.pi / 4], + [1., 1., 1., 1., 1., 1., .0], + [-1.5, -1.5, -1.5, 2.5, 2.5, 2.5, .0]]).cuda() + + expect_ious = 1 - torch.tensor([[1., .5, .7071, 1 / 15, .0]]).cuda() + ious = RotatedIoU3DLoss(reduction='none')(boxes1, boxes2) + assert torch.allclose(ious, expect_ious, atol=1e-4) diff --git a/tests/test_models/test_losses/test_uncertain_smooth_l1_loss.py b/tests/test_models/test_losses/test_uncertain_smooth_l1_loss.py new file mode 100755 index 0000000..9d6f2b4 --- /dev/null +++ b/tests/test_models/test_losses/test_uncertain_smooth_l1_loss.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_uncertain_smooth_l1_loss(): + from mmdet3d.models.losses import UncertainL1Loss, UncertainSmoothL1Loss + + # reduction should be in ['none', 'mean', 'sum'] + with pytest.raises(AssertionError): + uncertain_l1_loss = UncertainL1Loss(reduction='l2') + with pytest.raises(AssertionError): + uncertain_smooth_l1_loss = UncertainSmoothL1Loss(reduction='l2') + + pred = torch.tensor([1.5783, 0.5972, 1.4821, 0.9488]) + target = torch.tensor([1.0813, -0.3466, -1.1404, -0.9665]) + sigma = torch.tensor([-1.0053, 0.4710, -1.7784, -0.8603]) + + # test uncertain l1 loss + uncertain_l1_loss_cfg = dict( + type='UncertainL1Loss', alpha=1.0, reduction='mean', loss_weight=1.0) + uncertain_l1_loss = MODELS.build(uncertain_l1_loss_cfg) + mean_l1_loss = uncertain_l1_loss(pred, target, sigma) + expected_l1_loss = torch.tensor(4.7069) + assert torch.allclose(mean_l1_loss, expected_l1_loss, atol=1e-4) + + # test uncertain smooth l1 loss + uncertain_smooth_l1_loss_cfg = dict( + type='UncertainSmoothL1Loss', + alpha=1.0, + beta=0.5, + reduction='mean', + loss_weight=1.0) + uncertain_smooth_l1_loss = MODELS.build(uncertain_smooth_l1_loss_cfg) + mean_smooth_l1_loss = uncertain_smooth_l1_loss(pred, target, sigma) + expected_smooth_l1_loss = torch.tensor(3.9795) + assert torch.allclose( + mean_smooth_l1_loss, expected_smooth_l1_loss, atol=1e-4) diff --git a/tests/test_models/test_middle_encoders/test_sparse_encoders.py b/tests/test_models/test_middle_encoders/test_sparse_encoders.py new file mode 100755 index 0000000..6982823 --- /dev/null +++ b/tests/test_models/test_middle_encoders/test_sparse_encoders.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_sparse_encoder(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + sparse_encoder_cfg = dict( + type='SparseEncoder', + in_channels=5, + sparse_shape=[40, 1024, 1024], + order=('conv', 'norm', 'act'), + encoder_channels=((16, 16, 32), (32, 32, 64), (64, 64, 128), (128, + 128)), + encoder_paddings=((1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, + 1)), + block_type='basicblock') + + sparse_encoder = MODELS.build(sparse_encoder_cfg).cuda() + voxel_features = torch.rand([207842, 5]).cuda() + coors = torch.randint(0, 4, [207842, 4]).cuda() + + ret = sparse_encoder(voxel_features, coors, 4) + assert ret.shape == torch.Size([4, 256, 128, 128]) + + +def test_sparse_encoder_for_ssd(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + sparse_encoder_for_ssd_cfg = dict( + type='SparseEncoderSASSD', + in_channels=5, + sparse_shape=[40, 1024, 1024], + order=('conv', 'norm', 'act'), + encoder_channels=((16, 16, 32), (32, 32, 64), (64, 64, 128), (128, + 128)), + encoder_paddings=((1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, + 1)), + block_type='basicblock') + + sparse_encoder = MODELS.build(sparse_encoder_for_ssd_cfg).cuda() + voxel_features = torch.rand([207842, 5]).cuda() + coors = torch.randint(0, 4, [207842, 4]).cuda() + + ret, _ = sparse_encoder(voxel_features, coors, 4, True) + assert ret.shape == torch.Size([4, 256, 128, 128]) diff --git a/tests/test_models/test_middle_encoders/test_sparse_unet.py b/tests/test_models/test_middle_encoders/test_sparse_unet.py new file mode 100755 index 0000000..6fe1e5b --- /dev/null +++ b/tests/test_models/test_middle_encoders/test_sparse_unet.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmdet3d.models.layers import SparseBasicBlock +from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE + +if IS_SPCONV2_AVAILABLE: + from spconv.pytorch import SparseConv3d, SparseInverseConv3d, SubMConv3d +else: + from mmcv.ops import SparseConv3d, SparseInverseConv3d, SubMConv3d + + +def test_SparseUNet(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + from mmdet3d.models.middle_encoders.sparse_unet import SparseUNet + self = SparseUNet(in_channels=4, sparse_shape=[41, 1600, 1408]).cuda() + + # test encoder layers + assert len(self.encoder_layers) == 4 + assert self.encoder_layers.encoder_layer1[0][0].in_channels == 16 + assert self.encoder_layers.encoder_layer1[0][0].out_channels == 16 + assert isinstance(self.encoder_layers.encoder_layer1[0][0], SubMConv3d) + assert isinstance(self.encoder_layers.encoder_layer1[0][1], + torch.nn.modules.batchnorm.BatchNorm1d) + assert isinstance(self.encoder_layers.encoder_layer1[0][2], + torch.nn.modules.activation.ReLU) + assert self.encoder_layers.encoder_layer4[0][0].in_channels == 64 + assert self.encoder_layers.encoder_layer4[0][0].out_channels == 64 + assert isinstance(self.encoder_layers.encoder_layer4[0][0], SparseConv3d) + assert isinstance(self.encoder_layers.encoder_layer4[2][0], SubMConv3d) + + # test decoder layers + assert isinstance(self.lateral_layer1, SparseBasicBlock) + assert isinstance(self.merge_layer1[0], SubMConv3d) + assert isinstance(self.upsample_layer1[0], SubMConv3d) + assert isinstance(self.upsample_layer2[0], SparseInverseConv3d) + + voxel_features = torch.tensor( + [[6.56126, 0.9648336, -1.7339306, 0.315], + [6.8162713, -2.480431, -1.3616394, 0.36], + [11.643568, -4.744306, -1.3580885, 0.16], + [23.482342, 6.5036807, 0.5806964, 0.35]], + dtype=torch.float32).cuda() # n, point_features + coordinates = torch.tensor( + [[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232], + [1, 35, 930, 469]], + dtype=torch.int32).cuda() # n, 4(batch, ind_x, ind_y, ind_z) + + unet_ret_dict = self.forward(voxel_features, coordinates, 2) + seg_features = unet_ret_dict['seg_features'] + spatial_features = unet_ret_dict['spatial_features'] + + assert seg_features.shape == torch.Size([4, 16]) + assert spatial_features.shape == torch.Size([2, 256, 200, 176]) diff --git a/tests/test_models/test_necks/test_dla_neck.py b/tests/test_models/test_necks/test_dla_neck.py new file mode 100755 index 0000000..3bde3e1 --- /dev/null +++ b/tests/test_models/test_necks/test_dla_neck.py @@ -0,0 +1,47 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch + +from mmdet3d.registry import MODELS + + +def test_dla_neck(): + + s = 32 + in_channels = [16, 32, 64, 128, 256, 512] + feat_sizes = [s // 2**i for i in range(6)] # [32, 16, 8, 4, 2, 1] + + if torch.cuda.is_available(): + # Test DLA Neck with DCNv2 on GPU + neck_cfg = dict( + type='DLANeck', + in_channels=[16, 32, 64, 128, 256, 512], + start_level=2, + end_level=5, + norm_cfg=dict(type='GN', num_groups=32)) + neck = MODELS.build(neck_cfg) + neck.init_weights() + neck.cuda() + feats = [ + torch.rand(4, in_channels[i], feat_sizes[i], feat_sizes[i]).cuda() + for i in range(len(in_channels)) + ] + outputs = neck(feats) + assert outputs[0].shape == (4, 64, 8, 8) + else: + # Test DLA Neck without DCNv2 on CPU + neck_cfg = dict( + type='DLANeck', + in_channels=[16, 32, 64, 128, 256, 512], + start_level=2, + end_level=5, + norm_cfg=dict(type='GN', num_groups=32), + use_dcn=False) + neck = MODELS.build(neck_cfg) + neck.init_weights() + feats = [ + torch.rand(4, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + outputs = neck(feats) + assert outputs[0].shape == (4, 64, 8, 8) diff --git a/tests/test_models/test_necks/test_imvoxel_neck.py b/tests/test_models/test_necks/test_imvoxel_neck.py new file mode 100755 index 0000000..9d3a071 --- /dev/null +++ b/tests/test_models/test_necks/test_imvoxel_neck.py @@ -0,0 +1,16 @@ +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_imvoxel_neck(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + + neck_cfg = dict( + type='OutdoorImVoxelNeck', in_channels=64, out_channels=256) + neck = MODELS.build(neck_cfg).cuda() + inputs = torch.rand([1, 64, 216, 248, 12], device='cuda') + outputs = neck(inputs) + assert outputs[0].shape == (1, 256, 248, 216) diff --git a/tests/test_models/test_necks/test_pointnet2_fp_neck.py b/tests/test_models/test_necks/test_pointnet2_fp_neck.py new file mode 100755 index 0000000..cf5df0b --- /dev/null +++ b/tests/test_models/test_necks/test_pointnet2_fp_neck.py @@ -0,0 +1,37 @@ +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_pointnet2_fp_neck(): + if not torch.cuda.is_available(): + pytest.skip() + + xyzs = [16384, 4096, 1024, 256, 64] + feat_channels = [1, 96, 256, 512, 1024] + channel_num = 5 + + sa_xyz = [torch.rand(3, xyzs[i], 3) for i in range(channel_num)] + sa_features = [ + torch.rand(3, feat_channels[i], xyzs[i]) for i in range(channel_num) + ] + + neck_cfg = dict( + type='PointNetFPNeck', + fp_channels=((1536, 512, 512), (768, 512, 512), (608, 256, 256), + (257, 128, 128))) + + neck = MODELS.build(neck_cfg) + neck.init_weights() + + if torch.cuda.is_available(): + sa_xyz = [x.cuda() for x in sa_xyz] + sa_features = [x.cuda() for x in sa_features] + neck.cuda() + + feats_sa = {'sa_xyz': sa_xyz, 'sa_features': sa_features} + outputs = neck(feats_sa) + assert outputs['fp_xyz'].cpu().numpy().shape == (3, 16384, 3) + assert outputs['fp_features'].detach().cpu().numpy().shape == (3, 128, + 16384) diff --git a/tests/test_models/test_necks/test_second_fpn.py b/tests/test_models/test_necks/test_second_fpn.py new file mode 100755 index 0000000..b0edf7d --- /dev/null +++ b/tests/test_models/test_necks/test_second_fpn.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_secfpn(): + neck_cfg = dict( + type='SECONDFPN', + in_channels=[2, 3], + upsample_strides=[1, 2], + out_channels=[4, 6], + ) + neck = MODELS.build(neck_cfg) + assert neck.deblocks[0][0].in_channels == 2 + assert neck.deblocks[1][0].in_channels == 3 + assert neck.deblocks[0][0].out_channels == 4 + assert neck.deblocks[1][0].out_channels == 6 + assert neck.deblocks[0][0].stride == (1, 1) + assert neck.deblocks[1][0].stride == (2, 2) + assert neck is not None + + neck_cfg = dict( + type='SECONDFPN', + in_channels=[2, 2], + upsample_strides=[1, 2, 4], + out_channels=[2, 2], + ) + with pytest.raises(AssertionError): + MODELS.build(neck_cfg) + + neck_cfg = dict( + type='SECONDFPN', + in_channels=[2, 2, 4], + upsample_strides=[1, 2, 4], + out_channels=[2, 2], + ) + with pytest.raises(AssertionError): + MODELS.build(neck_cfg) + + +def test_centerpoint_fpn(): + + second_cfg = dict( + type='SECOND', + in_channels=2, + out_channels=[2, 2, 2], + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)) + + second = MODELS.build(second_cfg) + + # centerpoint usage of fpn + centerpoint_fpn_cfg = dict( + type='SECONDFPN', + in_channels=[2, 2, 2], + out_channels=[2, 2, 2], + upsample_strides=[0.5, 1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True) + + # original usage of fpn + fpn_cfg = dict( + type='SECONDFPN', + in_channels=[2, 2, 2], + upsample_strides=[1, 2, 4], + out_channels=[2, 2, 2]) + + second_fpn = MODELS.build(fpn_cfg) + + centerpoint_second_fpn = MODELS.build(centerpoint_fpn_cfg) + + input = torch.rand([2, 2, 32, 32]) + sec_output = second(input) + centerpoint_output = centerpoint_second_fpn(sec_output) + second_output = second_fpn(sec_output) + assert centerpoint_output[0].shape == torch.Size([2, 6, 8, 8]) + assert second_output[0].shape == torch.Size([2, 6, 16, 16]) diff --git a/tests/test_models/test_segmentor/test_minkunet.py b/tests/test_models/test_segmentor/test_minkunet.py new file mode 100755 index 0000000..16312c2 --- /dev/null +++ b/tests/test_models/test_segmentor/test_minkunet.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import pytest +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestMinkUNet(unittest.TestCase): + + def test_minkunet(self): + try: + import torchsparse # noqa + except ImportError: + pytest.skip('test requires Torchsparse installation') + + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'MinkUNet') + DefaultScope.get_instance('test_minkunet', scope_name='mmdet3d') + setup_seed(0) + model_cfg = get_detector_cfg('_base_/models/minkunet.py') + model = MODELS.build(model_cfg) + num_gt_instance = 3 + packed_inputs = create_detector_inputs( + num_gt_instance=num_gt_instance, + num_classes=19, + with_pts_semantic_mask=True) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('pts_semantic_mask', results[0].pred_pts_seg) + + losses = model.forward(**data, mode='loss') + + self.assertGreater(losses['loss_sem_seg'], 0) diff --git a/tests/test_models/test_segmentors/test_cylinder3d.py b/tests/test_models/test_segmentors/test_cylinder3d.py new file mode 100755 index 0000000..084918f --- /dev/null +++ b/tests/test_models/test_segmentors/test_cylinder3d.py @@ -0,0 +1,42 @@ +import unittest + +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import MODELS +from mmdet3d.testing import (create_detector_inputs, get_detector_cfg, + setup_seed) + + +class TestCylinder3D(unittest.TestCase): + + def test_cylinder3d(self): + import mmdet3d.models + + assert hasattr(mmdet3d.models, 'Cylinder3D') + DefaultScope.get_instance('test_cylinder3d', scope_name='mmdet3d') + setup_seed(0) + cylinder3d_cfg = get_detector_cfg( + 'cylinder3d/cylinder3d_4xb4_3x_semantickitti.py') + cylinder3d_cfg.decode_head['ignore_index'] = 1 + model = MODELS.build(cylinder3d_cfg) + num_gt_instance = 3 + packed_inputs = create_detector_inputs( + num_gt_instance=num_gt_instance, + num_classes=1, + with_pts_semantic_mask=True) + + if torch.cuda.is_available(): + model = model.cuda() + # test simple_test + with torch.no_grad(): + data = model.data_preprocessor(packed_inputs, True) + torch.cuda.empty_cache() + results = model.forward(**data, mode='predict') + self.assertEqual(len(results), 1) + self.assertIn('pts_semantic_mask', results[0].pred_pts_seg) + + losses = model.forward(**data, mode='loss') + + self.assertGreater(losses['decode.loss_ce'], 0) + self.assertGreater(losses['decode.loss_lovasz'], 0) diff --git a/tests/test_models/test_task_modules/test_anchor/test_anchor_3d_generator.py b/tests/test_models/test_task_modules/test_anchor/test_anchor_3d_generator.py new file mode 100755 index 0000000..294aa93 --- /dev/null +++ b/tests/test_models/test_task_modules/test_anchor/test_anchor_3d_generator.py @@ -0,0 +1,263 @@ +# Copyright (c) OpenMMLab. All rights reserved. +""" +CommandLine: + pytest tests/test_utils/test_anchor.py + xdoctest tests/test_utils/test_anchor.py zero + +""" +import torch +from mmengine import DefaultScope + +from mmdet3d.registry import TASK_UTILS + + +def test_anchor_3d_range_generator(): + + import mmdet3d.models.task_modules + + assert hasattr(mmdet3d.models.task_modules, 'Anchor3DRangeGenerator') + DefaultScope.get_instance( + 'test_ancho3drange_generator', scope_name='mmdet3d') + + if torch.cuda.is_available(): + device = 'cuda' + else: + device = 'cpu' + anchor_generator_cfg = dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -39.68, -0.6, 70.4, 39.68, -0.6], + [0, -39.68, -0.6, 70.4, 39.68, -0.6], + [0, -39.68, -1.78, 70.4, 39.68, -1.78], + ], + sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]], + rotations=[0, 1.57], + reshape_out=False) + + anchor_generator = TASK_UTILS.build(anchor_generator_cfg) + repr_str = repr(anchor_generator) + expected_repr_str = 'Anchor3DRangeGenerator(anchor_range=' \ + '[[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \ + '[0, -39.68, -0.6, 70.4, 39.68, -0.6], ' \ + '[0, -39.68, -1.78, 70.4, 39.68, -1.78]],' \ + '\nscales=[1],\nsizes=[[0.8, 0.6, 1.73], ' \ + '[1.76, 0.6, 1.73], [3.9, 1.6, 1.56]],' \ + '\nrotations=[0, 1.57],\nreshape_out=False,' \ + '\nsize_per_range=True)' + assert repr_str == expected_repr_str + featmap_size = (8, 8) + mr_anchors = anchor_generator.single_level_grid_anchors( + featmap_size, 1.1, device=device) + assert mr_anchors.shape == torch.Size([1, 8, 8, 3, 2, 7]) + + +def test_aligned_anchor_generator(): + + import mmdet3d.models.task_modules + + assert hasattr(mmdet3d.models.task_modules, + 'AlignedAnchor3DRangeGenerator') + DefaultScope.get_instance( + 'test_aligned_ancho3drange_generator', scope_name='mmdet3d') + + if torch.cuda.is_available(): + device = 'cuda' + else: + device = 'cpu' + + anchor_generator_cfg = dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-51.2, -51.2, -1.80, 51.2, 51.2, -1.80]], + scales=[1, 2, 4], + sizes=[ + [2.5981, 0.8660, 1.], # 1.5/sqrt(3) + [1.7321, 0.5774, 1.], # 1/sqrt(3) + [1., 1., 1.], + [0.4, 0.4, 1], + ], + custom_values=[0, 0], + rotations=[0, 1.57], + size_per_range=False, + reshape_out=True) + + featmap_sizes = [(16, 16), (8, 8), (4, 4)] + anchor_generator = TASK_UTILS.build(anchor_generator_cfg) + assert anchor_generator.num_base_anchors == 8 + + # check base anchors + expected_grid_anchors = [ + torch.tensor([[ + -48.0000, -48.0000, -1.8000, 2.5981, 0.8660, 1.0000, 0.0000, + 0.0000, 0.0000 + ], + [ + -48.0000, -48.0000, -1.8000, 0.4000, 0.4000, 1.0000, + 1.5700, 0.0000, 0.0000 + ], + [ + -41.6000, -48.0000, -1.8000, 0.4000, 0.4000, 1.0000, + 0.0000, 0.0000, 0.0000 + ], + [ + -35.2000, -48.0000, -1.8000, 1.0000, 1.0000, 1.0000, + 1.5700, 0.0000, 0.0000 + ], + [ + -28.8000, -48.0000, -1.8000, 1.0000, 1.0000, 1.0000, + 0.0000, 0.0000, 0.0000 + ], + [ + -22.4000, -48.0000, -1.8000, 1.7321, 0.5774, 1.0000, + 1.5700, 0.0000, 0.0000 + ], + [ + -16.0000, -48.0000, -1.8000, 1.7321, 0.5774, 1.0000, + 0.0000, 0.0000, 0.0000 + ], + [ + -9.6000, -48.0000, -1.8000, 2.5981, 0.8660, 1.0000, + 1.5700, 0.0000, 0.0000 + ]], + device=device), + torch.tensor([[ + -44.8000, -44.8000, -1.8000, 5.1962, 1.7320, 2.0000, 0.0000, + 0.0000, 0.0000 + ], + [ + -44.8000, -44.8000, -1.8000, 0.8000, 0.8000, 2.0000, + 1.5700, 0.0000, 0.0000 + ], + [ + -32.0000, -44.8000, -1.8000, 0.8000, 0.8000, 2.0000, + 0.0000, 0.0000, 0.0000 + ], + [ + -19.2000, -44.8000, -1.8000, 2.0000, 2.0000, 2.0000, + 1.5700, 0.0000, 0.0000 + ], + [ + -6.4000, -44.8000, -1.8000, 2.0000, 2.0000, 2.0000, + 0.0000, 0.0000, 0.0000 + ], + [ + 6.4000, -44.8000, -1.8000, 3.4642, 1.1548, 2.0000, + 1.5700, 0.0000, 0.0000 + ], + [ + 19.2000, -44.8000, -1.8000, 3.4642, 1.1548, 2.0000, + 0.0000, 0.0000, 0.0000 + ], + [ + 32.0000, -44.8000, -1.8000, 5.1962, 1.7320, 2.0000, + 1.5700, 0.0000, 0.0000 + ]], + device=device), + torch.tensor([[ + -38.4000, -38.4000, -1.8000, 10.3924, 3.4640, 4.0000, 0.0000, + 0.0000, 0.0000 + ], + [ + -38.4000, -38.4000, -1.8000, 1.6000, 1.6000, 4.0000, + 1.5700, 0.0000, 0.0000 + ], + [ + -12.8000, -38.4000, -1.8000, 1.6000, 1.6000, 4.0000, + 0.0000, 0.0000, 0.0000 + ], + [ + 12.8000, -38.4000, -1.8000, 4.0000, 4.0000, 4.0000, + 1.5700, 0.0000, 0.0000 + ], + [ + 38.4000, -38.4000, -1.8000, 4.0000, 4.0000, 4.0000, + 0.0000, 0.0000, 0.0000 + ], + [ + -38.4000, -12.8000, -1.8000, 6.9284, 2.3096, 4.0000, + 1.5700, 0.0000, 0.0000 + ], + [ + -12.8000, -12.8000, -1.8000, 6.9284, 2.3096, 4.0000, + 0.0000, 0.0000, 0.0000 + ], + [ + 12.8000, -12.8000, -1.8000, 10.3924, 3.4640, 4.0000, + 1.5700, 0.0000, 0.0000 + ]], + device=device) + ] + multi_level_anchors = anchor_generator.grid_anchors( + featmap_sizes, device=device) + expected_multi_level_shapes = [ + torch.Size([2048, 9]), + torch.Size([512, 9]), + torch.Size([128, 9]) + ] + for i, single_level_anchor in enumerate(multi_level_anchors): + assert single_level_anchor.shape == expected_multi_level_shapes[i] + # set [:56:7] thus it could cover 8 (len(size) * len(rotations)) + # anchors on 8 location + assert single_level_anchor[:56:7].allclose(expected_grid_anchors[i]) + + +def test_aligned_anchor_generator_per_cls(): + + import mmdet3d.models.task_modules + + assert hasattr(mmdet3d.models.task_modules, + 'AlignedAnchor3DRangeGeneratorPerCls') + DefaultScope.get_instance( + 'test_ancho3drange_generator_percls', scope_name='mmdet3d') + + if torch.cuda.is_available(): + device = 'cuda' + else: + device = 'cpu' + + anchor_generator_cfg = dict( + type='AlignedAnchor3DRangeGeneratorPerCls', + ranges=[[-100, -100, -1.80, 100, 100, -1.80], + [-100, -100, -1.30, 100, 100, -1.30]], + sizes=[[1.76, 0.63, 1.44], [2.35, 0.96, 1.59]], + custom_values=[0, 0], + rotations=[0, 1.57], + reshape_out=False) + + featmap_sizes = [(100, 100), (50, 50)] + anchor_generator = TASK_UTILS.build(anchor_generator_cfg) + + # check base anchors + expected_grid_anchors = [[ + torch.tensor([[ + -99.0000, -99.0000, -1.8000, 1.7600, 0.6300, 1.4400, 0.0000, + 0.0000, 0.0000 + ], + [ + -99.0000, -99.0000, -1.8000, 1.7600, 0.6300, 1.4400, + 1.5700, 0.0000, 0.0000 + ]], + device=device), + torch.tensor([[ + -98.0000, -98.0000, -1.3000, 2.3500, 0.9600, 1.5900, 0.0000, + 0.0000, 0.0000 + ], + [ + -98.0000, -98.0000, -1.3000, 2.3500, 0.9600, 1.5900, + 1.5700, 0.0000, 0.0000 + ]], + device=device) + ]] + multi_level_anchors = anchor_generator.grid_anchors( + featmap_sizes, device=device) + expected_multi_level_shapes = [[ + torch.Size([20000, 9]), torch.Size([5000, 9]) + ]] + for i, single_level_anchor in enumerate(multi_level_anchors): + assert len(single_level_anchor) == len(expected_multi_level_shapes[i]) + # set [:2*interval:interval] thus it could cover + # 2 (len(size) * len(rotations)) anchors on 2 location + # Note that len(size) for each class is always 1 in this case + for j in range(len(single_level_anchor)): + interval = int(expected_multi_level_shapes[i][j][0] / 2) + assert single_level_anchor[j][:2 * interval:interval].allclose( + expected_grid_anchors[i][j]) diff --git a/tests/test_models/test_task_modules/test_coders/test_anchor_free_box_coder.py b/tests/test_models/test_task_modules/test_coders/test_anchor_free_box_coder.py new file mode 100755 index 0000000..30858fc --- /dev/null +++ b/tests/test_models/test_task_modules/test_coders/test_anchor_free_box_coder.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet3d.registry import TASK_UTILS +from mmdet3d.structures import LiDARInstance3DBoxes + + +def test_anchor_free_box_coder(): + box_coder_cfg = dict( + type='AnchorFreeBBoxCoder', num_dir_bins=12, with_rot=True) + box_coder = TASK_UTILS.build(box_coder_cfg) + + # test encode + gt_bboxes = LiDARInstance3DBoxes([[ + 2.1227e+00, 5.7951e+00, -9.9900e-01, 1.6736e+00, 4.2419e+00, + 1.5473e+00, -1.5501e+00 + ], + [ + 1.1791e+01, 9.0276e+00, -8.5772e-01, + 1.6210e+00, 3.5367e+00, 1.4841e+00, + -1.7369e+00 + ], + [ + 2.3638e+01, 9.6997e+00, -5.6713e-01, + 1.7578e+00, 4.6103e+00, 1.5999e+00, + -1.4556e+00 + ]]) + gt_labels = torch.tensor([0, 0, 0]) + + (center_targets, size_targets, dir_class_targets, + dir_res_targets) = box_coder.encode(gt_bboxes, gt_labels) + + expected_center_target = torch.tensor([[2.1227, 5.7951, -0.2253], + [11.7908, 9.0276, -0.1156], + [23.6380, 9.6997, 0.2328]]) + expected_size_targets = torch.tensor([[0.8368, 2.1210, 0.7736], + [0.8105, 1.7683, 0.7421], + [0.8789, 2.3052, 0.8000]]) + expected_dir_class_target = torch.tensor([9, 9, 9]) + expected_dir_res_target = torch.tensor([0.0394, -0.3172, 0.2199]) + assert torch.allclose(center_targets, expected_center_target, atol=1e-4) + assert torch.allclose(size_targets, expected_size_targets, atol=1e-4) + assert torch.all(dir_class_targets == expected_dir_class_target) + assert torch.allclose(dir_res_targets, expected_dir_res_target, atol=1e-3) + + # test decode + center = torch.tensor([[[14.5954, 6.3312, 0.7671], + [67.5245, 22.4422, 1.5610], + [47.7693, -6.7980, 1.4395]]]) + + size_res = torch.tensor([[[-1.0752, 1.8760, 0.7715], + [-0.8016, 1.1754, 0.0102], + [-1.2789, 0.5948, 0.4728]]]) + + dir_class = torch.tensor([[[ + 0.1512, 1.7914, -1.7658, 2.1572, -0.9215, 1.2139, 0.1749, 0.8606, + 1.1743, -0.7679, -1.6005, 0.4623 + ], + [ + -0.3957, 1.2026, -1.2677, 1.3863, -0.5754, + 1.7083, 0.2601, 0.1129, 0.7146, -0.1367, + -1.2892, -0.0083 + ], + [ + -0.8862, 1.2050, -1.3881, 1.6604, -0.9087, + 1.1907, -0.0280, 0.2027, 1.0644, -0.7205, + -1.0738, 0.4748 + ]]]) + + dir_res = torch.tensor([[[ + 1.1151, 0.5535, -0.2053, -0.6582, -0.1616, -0.1821, 0.4675, 0.6621, + 0.8146, -0.0448, -0.7253, -0.7171 + ], + [ + 0.7888, 0.2478, -0.1962, -0.7267, 0.0573, + -0.2398, 0.6984, 0.5859, 0.7507, -0.1980, + -0.6538, -0.6602 + ], + [ + 0.9039, 0.6109, 0.1960, -0.5016, 0.0551, + -0.4086, 0.3398, 0.2759, 0.7247, -0.0655, + -0.5052, -0.9026 + ]]]) + bbox_out = dict( + center=center, size=size_res, dir_class=dir_class, dir_res=dir_res) + + bbox3d = box_coder.decode(bbox_out) + expected_bbox3d = torch.tensor( + [[[14.5954, 6.3312, 0.7671, 0.1000, 3.7521, 1.5429, 0.9126], + [67.5245, 22.4422, 1.5610, 0.1000, 2.3508, 0.1000, 2.3782], + [47.7693, -6.7980, 1.4395, 0.1000, 1.1897, 0.9456, 1.0692]]]) + assert torch.allclose(bbox3d, expected_bbox3d, atol=1e-4) + + # test split_pred + cls_preds = torch.rand(2, 1, 256) + reg_preds = torch.rand(2, 30, 256) + base_xyz = torch.rand(2, 256, 3) + results = box_coder.split_pred(cls_preds, reg_preds, base_xyz) + obj_scores = results['obj_scores'] + center = results['center'] + center_offset = results['center_offset'] + dir_class = results['dir_class'] + dir_res_norm = results['dir_res_norm'] + dir_res = results['dir_res'] + size = results['size'] + assert obj_scores.shape == torch.Size([2, 1, 256]) + assert center.shape == torch.Size([2, 256, 3]) + assert center_offset.shape == torch.Size([2, 256, 3]) + assert dir_class.shape == torch.Size([2, 256, 12]) + assert dir_res_norm.shape == torch.Size([2, 256, 12]) + assert dir_res.shape == torch.Size([2, 256, 12]) + assert size.shape == torch.Size([2, 256, 3]) diff --git a/tests/test_models/test_task_modules/test_coders/test_centerpoint_bbox_coder.py b/tests/test_models/test_task_modules/test_coders/test_centerpoint_bbox_coder.py new file mode 100755 index 0000000..e1764fd --- /dev/null +++ b/tests/test_models/test_task_modules/test_coders/test_centerpoint_bbox_coder.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet3d.registry import TASK_UTILS + + +def test_centerpoint_bbox_coder(): + bbox_coder_cfg = dict( + type='CenterPointBBoxCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_num=500, + score_threshold=0.1, + pc_range=[-51.2, -51.2], + out_size_factor=4, + voxel_size=[0.2, 0.2]) + + bbox_coder = TASK_UTILS.build(bbox_coder_cfg) + + batch_dim = torch.rand([2, 3, 128, 128]) + batch_hei = torch.rand([2, 1, 128, 128]) + batch_hm = torch.rand([2, 2, 128, 128]) + batch_reg = torch.rand([2, 2, 128, 128]) + batch_rotc = torch.rand([2, 1, 128, 128]) + batch_rots = torch.rand([2, 1, 128, 128]) + batch_vel = torch.rand([2, 2, 128, 128]) + + temp = bbox_coder.decode(batch_hm, batch_rots, batch_rotc, batch_hei, + batch_dim, batch_vel, batch_reg, 5) + for i in range(len(temp)): + assert temp[i]['bboxes'].shape == torch.Size([500, 9]) + assert temp[i]['scores'].shape == torch.Size([500]) + assert temp[i]['labels'].shape == torch.Size([500]) diff --git a/tests/test_models/test_task_modules/test_coders/test_fcos3d_bbox_coder.py b/tests/test_models/test_task_modules/test_coders/test_fcos3d_bbox_coder.py new file mode 100755 index 0000000..d1f53f3 --- /dev/null +++ b/tests/test_models/test_task_modules/test_coders/test_fcos3d_bbox_coder.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import Scale +from torch import nn as nn + +from mmdet3d.registry import TASK_UTILS + + +def test_fcos3d_bbox_coder(): + # test a config without priors + bbox_coder_cfg = dict( + type='FCOS3DBBoxCoder', + base_depths=None, + base_dims=None, + code_size=7, + norm_on_bbox=True) + bbox_coder = TASK_UTILS.build(bbox_coder_cfg) + + # test decode + # [2, 7, 1, 1] + batch_bbox = torch.tensor([[[[0.3130]], [[0.7094]], [[0.8743]], [[0.0570]], + [[0.5579]], [[0.1593]], [[0.4553]]], + [[[0.7758]], [[0.2298]], [[0.3925]], [[0.6307]], + [[0.4377]], [[0.3339]], [[0.1966]]]]) + batch_scale = nn.ModuleList([Scale(1.0) for _ in range(3)]) + stride = 2 + training = False + cls_score = torch.randn([2, 2, 1, 1]).sigmoid() + decode_bbox = bbox_coder.decode(batch_bbox, batch_scale, stride, training, + cls_score) + + expected_bbox = torch.tensor([[[[0.6261]], [[1.4188]], [[2.3971]], + [[1.0586]], [[1.7470]], [[1.1727]], + [[0.4553]]], + [[[1.5516]], [[0.4596]], [[1.4806]], + [[1.8790]], [[1.5492]], [[1.3965]], + [[0.1966]]]]) + assert torch.allclose(decode_bbox, expected_bbox, atol=1e-3) + + # test a config with priors + prior_bbox_coder_cfg = dict( + type='FCOS3DBBoxCoder', + base_depths=((28., 13.), (25., 12.)), + base_dims=((2., 3., 1.), (1., 2., 3.)), + code_size=7, + norm_on_bbox=True) + prior_bbox_coder = TASK_UTILS.build(prior_bbox_coder_cfg) + + # test decode + batch_bbox = torch.tensor([[[[0.3130]], [[0.7094]], [[0.8743]], [[0.0570]], + [[0.5579]], [[0.1593]], [[0.4553]]], + [[[0.7758]], [[0.2298]], [[0.3925]], [[0.6307]], + [[0.4377]], [[0.3339]], [[0.1966]]]]) + batch_scale = nn.ModuleList([Scale(1.0) for _ in range(3)]) + stride = 2 + training = False + cls_score = torch.tensor([[[[0.5811]], [[0.6198]]], [[[0.4889]], + [[0.8142]]]]) + decode_bbox = prior_bbox_coder.decode(batch_bbox, batch_scale, stride, + training, cls_score) + expected_bbox = torch.tensor([[[[0.6260]], [[1.4188]], [[35.4916]], + [[1.0587]], [[3.4940]], [[3.5181]], + [[0.4553]]], + [[[1.5516]], [[0.4596]], [[29.7100]], + [[1.8789]], [[3.0983]], [[4.1892]], + [[0.1966]]]]) + assert torch.allclose(decode_bbox, expected_bbox, atol=1e-3) + + # test decode_yaw + decode_bbox = decode_bbox.permute(0, 2, 3, 1).view(-1, 7) + batch_centers2d = torch.tensor([[100., 150.], [200., 100.]]) + batch_dir_cls = torch.tensor([0., 1.]) + dir_offset = 0.7854 + cam2img = torch.tensor([[700., 0., 450., 0.], [0., 700., 200., 0.], + [0., 0., 1., 0.], [0., 0., 0., 1.]]) + decode_bbox = prior_bbox_coder.decode_yaw(decode_bbox, batch_centers2d, + batch_dir_cls, dir_offset, + cam2img) + expected_bbox = torch.tensor( + [[0.6260, 1.4188, 35.4916, 1.0587, 3.4940, 3.5181, 3.1332], + [1.5516, 0.4596, 29.7100, 1.8789, 3.0983, 4.1892, 6.1368]]) + assert torch.allclose(decode_bbox, expected_bbox, atol=1e-3) diff --git a/tests/test_models/test_task_modules/test_coders/test_monoflex_bbox_coder.py b/tests/test_models/test_task_modules/test_coders/test_monoflex_bbox_coder.py new file mode 100755 index 0000000..d379ef4 --- /dev/null +++ b/tests/test_models/test_task_modules/test_coders/test_monoflex_bbox_coder.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.registry import TASK_UTILS +from mmdet3d.structures import CameraInstance3DBoxes + + +def test_monoflex_bbox_coder(): + bbox_coder_cfg = dict( + type='MonoFlexCoder', + depth_mode='exp', + base_depth=(26.494627, 16.05988), + depth_range=[0.1, 100], + combine_depth=True, + uncertainty_range=[-10, 10], + base_dims=((3.8840, 1.5261, 1.6286, 0.4259, 0.1367, + 0.1022), (0.8423, 1.7607, 0.6602, 0.2349, 0.1133, 0.1427), + (1.7635, 1.7372, 0.5968, 0.1766, 0.0948, 0.1242)), + dims_mode='linear', + multibin=True, + num_dir_bins=4, + bin_centers=[0, np.pi / 2, np.pi, -np.pi / 2], + bin_margin=np.pi / 6, + code_size=7) + bbox_coder = TASK_UTILS.build(bbox_coder_cfg) + gt_bboxes_3d = CameraInstance3DBoxes(torch.rand([6, 7])) + orientation_target = bbox_coder.encode(gt_bboxes_3d) + assert orientation_target.shape == torch.Size([6, 8]) + + regression = torch.rand([100, 50]) + base_centers2d = torch.rand([100, 2]) + labels = torch.ones([100]) + downsample_ratio = 4 + cam2imgs = torch.rand([100, 4, 4]) + + preds = bbox_coder.decode(regression, base_centers2d, labels, + downsample_ratio, cam2imgs) + + assert preds['bboxes2d'].shape == torch.Size([100, 4]) + assert preds['dimensions'].shape == torch.Size([100, 3]) + assert preds['offsets2d'].shape == torch.Size([100, 2]) + assert preds['keypoints2d'].shape == torch.Size([100, 10, 2]) + assert preds['orientations'].shape == torch.Size([100, 16]) + assert preds['direct_depth'].shape == torch.Size([ + 100, + ]) + assert preds['keypoints_depth'].shape == torch.Size([100, 3]) + assert preds['combined_depth'].shape == torch.Size([ + 100, + ]) + assert preds['direct_depth_uncertainty'].shape == torch.Size([ + 100, + ]) + assert preds['keypoints_depth_uncertainty'].shape == torch.Size([100, 3]) + + offsets_2d = torch.randn([100, 2]) + depths = torch.randn([ + 100, + ]) + locations = bbox_coder.decode_location(base_centers2d, offsets_2d, depths, + cam2imgs, downsample_ratio) + assert locations.shape == torch.Size([100, 3]) + + orientations = torch.randn([100, 16]) + yaws, local_yaws = bbox_coder.decode_orientation(orientations, locations) + assert yaws.shape == torch.Size([ + 100, + ]) + assert local_yaws.shape == torch.Size([ + 100, + ]) diff --git a/tests/test_models/test_task_modules/test_coders/test_partial_bin_based_box_coder.py b/tests/test_models/test_task_modules/test_coders/test_partial_bin_based_box_coder.py new file mode 100755 index 0000000..ad62e62 --- /dev/null +++ b/tests/test_models/test_task_modules/test_coders/test_partial_bin_based_box_coder.py @@ -0,0 +1,219 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet3d.registry import TASK_UTILS +from mmdet3d.structures import DepthInstance3DBoxes + + +def test_partial_bin_based_box_coder(): + box_coder_cfg = dict( + type='PartialBinBasedBBoxCoder', + num_sizes=10, + num_dir_bins=12, + with_rot=True, + mean_sizes=[[2.114256, 1.620300, 0.927272], + [0.791118, 1.279516, 0.718182], + [0.923508, 1.867419, 0.845495], + [0.591958, 0.552978, 0.827272], + [0.699104, 0.454178, 0.75625], + [0.69519, 1.346299, 0.736364], + [0.528526, 1.002642, 1.172878], + [0.500618, 0.632163, 0.683424], + [0.404671, 1.071108, 1.688889], + [0.76584, 1.398258, 0.472728]]) + box_coder = TASK_UTILS.build(box_coder_cfg) + + # test eocode + gt_bboxes = DepthInstance3DBoxes( + [[0.8308, 4.1168, -1.2035, 2.2493, 1.8444, 1.9245, 1.6486], + [2.3002, 4.8149, -1.2442, 0.5718, 0.8629, 0.9510, 1.6030], + [-1.1477, 1.8090, -1.1725, 0.6965, 1.5273, 2.0563, 0.0552]]) + + gt_labels = torch.tensor([0, 1, 2]) + center_target, size_class_target, size_res_target, dir_class_target, \ + dir_res_target = box_coder.encode(gt_bboxes, gt_labels) + expected_center_target = torch.tensor([[0.8308, 4.1168, -0.2413], + [2.3002, 4.8149, -0.7687], + [-1.1477, 1.8090, -0.1444]]) + expected_size_class_target = torch.tensor([0, 1, 2]) + expected_size_res_target = torch.tensor([[0.1350, 0.2241, 0.9972], + [-0.2193, -0.4166, 0.2328], + [-0.2270, -0.3401, 1.2108]]) + expected_dir_class_target = torch.tensor([3, 3, 0]) + expected_dir_res_target = torch.tensor([0.0778, 0.0322, 0.0552]) + assert torch.allclose(center_target, expected_center_target, atol=1e-4) + assert torch.all(size_class_target == expected_size_class_target) + assert torch.allclose(size_res_target, expected_size_res_target, atol=1e-4) + assert torch.all(dir_class_target == expected_dir_class_target) + assert torch.allclose(dir_res_target, expected_dir_res_target, atol=1e-4) + + # test decode + center = torch.tensor([[[0.8014, 3.4134, + -0.6133], [2.6375, 8.4191, 2.0438], + [4.2017, 5.2504, + -0.7851], [-1.0088, 5.4107, 1.6293], + [1.4837, 4.0268, 0.6222]]]) + + size_class = torch.tensor([[[ + -1.0061, -2.2788, 1.1322, -4.4380, -11.0526, -2.8113, -2.0642, -7.5886, + -4.8627, -5.0437 + ], + [ + -2.2058, -0.3527, -1.9976, 0.8815, -2.7980, + -1.9053, -0.5097, -2.0232, -1.4242, -4.1192 + ], + [ + -1.4783, -0.1009, -1.1537, 0.3052, -4.3147, + -2.6529, 0.2729, -0.3755, -2.6479, -3.7548 + ], + [ + -6.1809, -3.5024, -8.3273, 1.1252, -4.3315, + -7.8288, -4.6091, -5.8153, 0.7480, -10.1396 + ], + [ + -9.0424, -3.7883, -6.0788, -1.8855, + -10.2493, -9.7164, -1.0658, -4.1713, + 1.1173, -10.6204 + ]]]) + + size_res = torch.tensor([[[[-9.8976e-02, -5.2152e-01, -7.6421e-02], + [1.4593e-01, 5.6099e-01, 8.9421e-02], + [5.1481e-02, 3.9280e-01, 1.2705e-01], + [3.6869e-01, 7.0558e-01, 1.4647e-01], + [4.7683e-01, 3.3644e-01, 2.3481e-01], + [8.7346e-02, 8.4987e-01, 3.3265e-01], + [2.1393e-01, 8.5585e-01, 9.8948e-02], + [7.8530e-02, 5.9694e-02, -8.7211e-02], + [1.8551e-01, 1.1308e+00, -5.1864e-01], + [3.6485e-01, 7.3757e-01, 1.5264e-01]], + [[-9.5593e-01, -5.0455e-01, 1.9554e-01], + [-1.0870e-01, 1.8025e-01, 1.0228e-01], + [-8.2882e-02, -4.3771e-01, 9.2135e-02], + [-4.0840e-02, -5.9841e-02, 1.1982e-01], + [7.3448e-02, 5.2045e-02, 1.7301e-01], + [-4.0440e-02, 4.9532e-02, 1.1266e-01], + [3.5857e-02, 1.3564e-02, 1.0212e-01], + [-1.0407e-01, -5.9321e-02, 9.2622e-02], + [7.4691e-03, 9.3080e-02, -4.4077e-01], + [-6.0121e-02, -1.3381e-01, -6.8083e-02]], + [[-9.3970e-01, -9.7823e-01, -5.1075e-02], + [-1.2843e-01, -1.8381e-01, 7.1327e-02], + [-1.2247e-01, -8.1115e-01, 3.6495e-02], + [4.9154e-02, -4.5440e-02, 8.9520e-02], + [1.5653e-01, 3.5990e-02, 1.6414e-01], + [-5.9621e-02, 4.9357e-03, 1.4264e-01], + [8.5235e-04, -1.0030e-01, -3.0712e-02], + [-3.7255e-02, 2.8996e-02, 5.5545e-02], + [3.9298e-02, -4.7420e-02, -4.9147e-01], + [-1.1548e-01, -1.5895e-01, -3.9155e-02]], + [[-1.8725e+00, -7.4102e-01, 1.0524e+00], + [-3.3210e-01, 4.7828e-02, -3.2666e-02], + [-2.7949e-01, 5.5541e-02, -1.0059e-01], + [-8.5533e-02, 1.4870e-01, -1.6709e-01], + [3.8283e-01, 2.6609e-01, 2.1361e-01], + [-4.2156e-01, 3.2455e-01, 6.7309e-01], + [-2.4336e-02, -8.3366e-02, 3.9913e-01], + [8.2142e-03, 4.8323e-02, -1.5247e-01], + [-4.8142e-02, -3.0074e-01, -1.6829e-01], + [1.3274e-01, -2.3825e-01, -1.8127e-01]], + [[-1.2576e+00, -6.1550e-01, 7.9430e-01], + [-4.7222e-01, 1.5634e+00, -5.9460e-02], + [-3.5367e-01, 1.3616e+00, -1.6421e-01], + [-1.6611e-02, 2.4231e-01, -9.6188e-02], + [5.4486e-01, 4.6833e-01, 5.1151e-01], + [-6.1755e-01, 1.0292e+00, 1.2458e+00], + [-6.8152e-02, 2.4786e-01, 9.5088e-01], + [-4.8745e-02, 1.5134e-01, -9.9962e-02], + [2.4485e-03, -7.5991e-02, 1.3545e-01], + [4.1608e-01, -1.2093e-01, -3.1643e-01]]]]) + + dir_class = torch.tensor([[[ + -1.0230, -5.1965, -5.2195, 2.4030, -2.7661, -7.3399, -1.1640, -4.0630, + -5.2940, 0.8245, -3.1869, -6.1743 + ], + [ + -1.9503, -1.6940, -0.8716, -1.1494, -0.8196, + 0.2862, -0.2921, -0.7894, -0.2481, -0.9916, + -1.4304, -1.2466 + ], + [ + -1.7435, -1.2043, -0.1265, 0.5083, -0.0717, + -0.9560, -1.6171, -2.6463, -2.3863, -2.1358, + -1.8812, -2.3117 + ], + [ + -1.9282, 0.3792, -1.8426, -1.4587, -0.8582, + -3.4639, -3.2133, -3.7867, -7.6781, -6.4459, + -6.2455, -5.4797 + ], + [ + -3.1869, 0.4456, -0.5824, 0.9994, -1.0554, + -8.4232, -7.7019, -7.1382, -10.2724, + -7.8229, -8.1860, -8.6194 + ]]]) + + dir_res = torch.tensor( + [[[ + 1.1022e-01, -2.3750e-01, 2.0381e-01, 1.2177e-01, -2.8501e-01, + 1.5351e-01, 1.2218e-01, -2.0677e-01, 1.4468e-01, 1.1593e-01, + -2.6864e-01, 1.1290e-01 + ], + [ + -1.5788e-02, 4.1538e-02, -2.2857e-04, -1.4011e-02, 4.2560e-02, + -3.1186e-03, -5.0343e-02, 6.8110e-03, -2.6728e-02, -3.2781e-02, + 3.6889e-02, -1.5609e-03 + ], + [ + 1.9004e-02, 5.7105e-03, 6.0329e-02, 1.3074e-02, -2.5546e-02, + -1.1456e-02, -3.2484e-02, -3.3487e-02, 1.6609e-03, 1.7095e-02, + 1.2647e-05, 2.4814e-02 + ], + [ + 1.4482e-01, -6.3083e-02, 5.8307e-02, 9.1396e-02, -8.4571e-02, + 4.5890e-02, 5.6243e-02, -1.2448e-01, -9.5244e-02, 4.5746e-02, + -1.7390e-02, 9.0267e-02 + ], + [ + 1.8065e-01, -2.0078e-02, 8.5401e-02, 1.0784e-01, -1.2495e-01, + 2.2796e-02, 1.1310e-01, -8.4364e-02, -1.1904e-01, 6.1180e-02, + -1.8109e-02, 1.1229e-01 + ]]]) + bbox_out = dict( + center=center, + size_class=size_class, + size_res=size_res, + dir_class=dir_class, + dir_res=dir_res) + + bbox3d = box_coder.decode(bbox_out) + expected_bbox3d = torch.tensor( + [[[0.8014, 3.4134, -0.6133, 0.9750, 2.2602, 0.9725, 1.6926], + [2.6375, 8.4191, 2.0438, 0.5511, 0.4931, 0.9471, 2.6149], + [4.2017, 5.2504, -0.7851, 0.6411, 0.5075, 0.9168, 1.5839], + [-1.0088, 5.4107, 1.6293, 0.5064, 0.7017, 0.6602, 0.4605], + [1.4837, 4.0268, 0.6222, 0.4071, 0.9951, 1.8243, 1.6786]]]) + assert torch.allclose(bbox3d, expected_bbox3d, atol=1e-4) + + # test split_pred + cls_preds = torch.rand(2, 12, 256) + reg_preds = torch.rand(2, 67, 256) + base_xyz = torch.rand(2, 256, 3) + results = box_coder.split_pred(cls_preds, reg_preds, base_xyz) + obj_scores = results['obj_scores'] + center = results['center'] + dir_class = results['dir_class'] + dir_res_norm = results['dir_res_norm'] + dir_res = results['dir_res'] + size_class = results['size_class'] + size_res_norm = results['size_res_norm'] + size_res = results['size_res'] + sem_scores = results['sem_scores'] + assert obj_scores.shape == torch.Size([2, 256, 2]) + assert center.shape == torch.Size([2, 256, 3]) + assert dir_class.shape == torch.Size([2, 256, 12]) + assert dir_res_norm.shape == torch.Size([2, 256, 12]) + assert dir_res.shape == torch.Size([2, 256, 12]) + assert size_class.shape == torch.Size([2, 256, 10]) + assert size_res_norm.shape == torch.Size([2, 256, 10, 3]) + assert size_res.shape == torch.Size([2, 256, 10, 3]) + assert sem_scores.shape == torch.Size([2, 256, 10]) diff --git a/tests/test_models/test_task_modules/test_coders/test_pgd_bbox_coder.py b/tests/test_models/test_task_modules/test_coders/test_pgd_bbox_coder.py new file mode 100755 index 0000000..6a3a998 --- /dev/null +++ b/tests/test_models/test_task_modules/test_coders/test_pgd_bbox_coder.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import Scale +from torch import nn as nn + +from mmdet3d.registry import TASK_UTILS + + +def test_pgd_bbox_coder(): + # test a config without priors + bbox_coder_cfg = dict( + type='PGDBBoxCoder', + base_depths=None, + base_dims=None, + code_size=7, + norm_on_bbox=True) + bbox_coder = TASK_UTILS.build(bbox_coder_cfg) + + # test decode_2d + # [2, 27, 1, 1] + batch_bbox = torch.tensor([[[[0.0103]], [[0.7394]], [[0.3296]], [[0.4708]], + [[0.1439]], [[0.0778]], [[0.9399]], [[0.8366]], + [[0.1264]], [[0.3030]], [[0.1898]], [[0.0714]], + [[0.4144]], [[0.4341]], [[0.6442]], [[0.2951]], + [[0.2890]], [[0.4486]], [[0.2848]], [[0.1071]], + [[0.9530]], [[0.9460]], [[0.3822]], [[0.9320]], + [[0.2611]], [[0.5580]], [[0.0397]]], + [[[0.8612]], [[0.1680]], [[0.5167]], [[0.8502]], + [[0.0377]], [[0.3615]], [[0.9550]], [[0.5219]], + [[0.1402]], [[0.6843]], [[0.2121]], [[0.9468]], + [[0.6238]], [[0.7918]], [[0.1646]], [[0.0500]], + [[0.6290]], [[0.3956]], [[0.2901]], [[0.4612]], + [[0.7333]], [[0.1194]], [[0.6999]], [[0.3980]], + [[0.3262]], [[0.7185]], [[0.4474]]]]) + batch_scale = nn.ModuleList([Scale(1.0) for _ in range(5)]) + stride = 2 + training = False + cls_score = torch.randn([2, 2, 1, 1]).sigmoid() + decode_bbox = bbox_coder.decode(batch_bbox, batch_scale, stride, training, + cls_score) + max_regress_range = 16 + pred_keypoints = True + pred_bbox2d = True + decode_bbox_w2d = bbox_coder.decode_2d(decode_bbox, batch_scale, stride, + max_regress_range, training, + pred_keypoints, pred_bbox2d) + expected_decode_bbox_w2d = torch.tensor( + [[[[0.0206]], [[1.4788]], + [[1.3904]], [[1.6013]], [[1.1548]], [[1.0809]], [[0.9399]], + [[10.9441]], [[2.0117]], [[4.7049]], [[3.0009]], [[1.1405]], + [[6.2752]], [[6.5399]], [[9.0840]], [[4.5892]], [[4.4994]], + [[6.7320]], [[4.4375]], [[1.7071]], [[11.8582]], [[11.8075]], + [[5.8339]], [[1.8640]], [[0.5222]], [[1.1160]], [[0.0794]]], + [[[1.7224]], [[0.3360]], [[1.6765]], [[2.3401]], [[1.0384]], + [[1.4355]], [[0.9550]], [[7.6666]], [[2.2286]], [[9.5089]], + [[3.3436]], [[11.8133]], [[8.8603]], [[10.5508]], [[2.6101]], + [[0.7993]], [[8.9178]], [[6.0188]], [[4.5156]], [[6.8970]], + [[10.0013]], [[1.9014]], [[9.6689]], [[0.7960]], [[0.6524]], + [[1.4370]], [[0.8948]]]]) + assert torch.allclose(expected_decode_bbox_w2d, decode_bbox_w2d, atol=1e-3) + + # test decode_prob_depth + # [10, 8] + depth_cls_preds = torch.tensor([ + [-0.4383, 0.7207, -0.4092, 0.4649, 0.8526, 0.6186, -1.4312, -0.7150], + [0.0621, 0.2369, 0.5170, 0.8484, -0.1099, 0.1829, -0.0072, 1.0618], + [-1.6114, -0.1057, 0.5721, -0.5986, -2.0471, 0.8140, -0.8385, -0.4822], + [0.0742, -0.3261, 0.4607, 1.8155, -0.3571, -0.0234, 0.3787, 2.3251], + [1.0492, -0.6881, -0.0136, -1.8291, 0.8460, -1.0171, 2.5691, -0.8114], + [0.0968, -0.5601, 1.0458, 0.2560, 1.3018, 0.1635, 0.0680, -1.0263], + [-0.0765, 0.1498, -2.7321, 1.0047, -0.2505, 0.0871, -0.4820, -0.3003], + [-0.4123, 0.2298, -0.1330, -0.6008, 0.6526, 0.7118, 0.9728, -0.7793], + [1.6940, 0.3355, 1.4661, 0.5477, 0.8667, 0.0527, -0.9975, -0.0689], + [0.4724, -0.3632, -0.0654, 0.4034, -0.3494, -0.7548, 0.7297, 1.2754] + ]) + depth_range = (0, 70) + depth_unit = 10 + num_depth_cls = 8 + uniform_prob_depth_preds = bbox_coder.decode_prob_depth( + depth_cls_preds, depth_range, depth_unit, 'uniform', num_depth_cls) + expected_preds = torch.tensor([ + 32.0441, 38.4689, 36.1831, 48.2096, 46.1560, 32.7973, 33.2155, 39.9822, + 21.9905, 43.0161 + ]) + assert torch.allclose(uniform_prob_depth_preds, expected_preds, atol=1e-3) + + linear_prob_depth_preds = bbox_coder.decode_prob_depth( + depth_cls_preds, depth_range, depth_unit, 'linear', num_depth_cls) + expected_preds = torch.tensor([ + 21.1431, 30.2421, 25.8964, 41.6116, 38.6234, 21.4582, 23.2993, 30.1111, + 13.9273, 36.8419 + ]) + assert torch.allclose(linear_prob_depth_preds, expected_preds, atol=1e-3) + + log_prob_depth_preds = bbox_coder.decode_prob_depth( + depth_cls_preds, depth_range, depth_unit, 'log', num_depth_cls) + expected_preds = torch.tensor([ + 12.6458, 24.2487, 17.4015, 36.9375, 27.5982, 12.5510, 15.6635, 19.8408, + 9.1605, 31.3765 + ]) + assert torch.allclose(log_prob_depth_preds, expected_preds, atol=1e-3) + + loguniform_prob_depth_preds = bbox_coder.decode_prob_depth( + depth_cls_preds, depth_range, depth_unit, 'loguniform', num_depth_cls) + expected_preds = torch.tensor([ + 6.9925, 10.3273, 8.9895, 18.6524, 16.4667, 7.3196, 7.5078, 11.3207, + 3.7987, 13.6095 + ]) + assert torch.allclose( + loguniform_prob_depth_preds, expected_preds, atol=1e-3) diff --git a/tests/test_models/test_task_modules/test_coders/test_point_xyzwhlr_bbox_coder.py b/tests/test_models/test_task_modules/test_coders/test_point_xyzwhlr_bbox_coder.py new file mode 100755 index 0000000..e29f740 --- /dev/null +++ b/tests/test_models/test_task_modules/test_coders/test_point_xyzwhlr_bbox_coder.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch + +from mmdet3d.registry import TASK_UTILS + + +def test_point_xyzwhlr_bbox_coder(): + bbox_coder_cfg = dict( + type='PointXYZWHLRBBoxCoder', + use_mean_size=True, + mean_size=[[3.9, 1.6, 1.56], [0.8, 0.6, 1.73], [1.76, 0.6, 1.73]]) + boxcoder = TASK_UTILS.build(bbox_coder_cfg) + + # test encode + gt_bboxes_3d = torch.tensor( + [[13.3329, 2.3514, -0.7004, 1.7508, 0.4702, 1.7909, -3.0522], + [2.2068, -2.6994, -0.3277, 3.8703, 1.6602, 1.6913, -1.9057], + [5.5269, 2.5085, -1.0129, 1.1496, 0.8006, 1.8887, 2.1756]]) + + points = torch.tensor([[13.70, 2.40, 0.12], [3.20, -3.00, 0.2], + [5.70, 2.20, -0.4]]) + + gt_labels_3d = torch.tensor([2, 0, 1]) + + bbox_target = boxcoder.encode(gt_bboxes_3d, points, gt_labels_3d) + expected_bbox_target = torch.tensor([[ + -0.1974, -0.0261, -0.4742, -0.0052, -0.2438, 0.0346, -0.9960, -0.0893 + ], [-0.2356, 0.0713, -0.3383, -0.0076, 0.0369, 0.0808, -0.3287, -0.9444 + ], [-0.1731, 0.3085, -0.3543, 0.3626, 0.2884, 0.0878, -0.5686, + 0.8226]]) + assert torch.allclose(expected_bbox_target, bbox_target, atol=1e-4) + # test decode + bbox3d_out = boxcoder.decode(bbox_target, points, gt_labels_3d) + assert torch.allclose(bbox3d_out, gt_bboxes_3d, atol=1e-4) diff --git a/tests/test_models/test_task_modules/test_coders/test_smoke_bbox_coder.py b/tests/test_models/test_task_modules/test_coders/test_smoke_bbox_coder.py new file mode 100755 index 0000000..a027961 --- /dev/null +++ b/tests/test_models/test_task_modules/test_coders/test_smoke_bbox_coder.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet3d.registry import TASK_UTILS +from mmdet3d.structures import CameraInstance3DBoxes + + +def test_smoke_bbox_coder(): + bbox_coder_cfg = dict( + type='SMOKECoder', + base_depth=(28.01, 16.32), + base_dims=((3.88, 1.63, 1.53), (1.78, 1.70, 0.58), (0.88, 1.73, 0.67)), + code_size=7) + + bbox_coder = TASK_UTILS.build(bbox_coder_cfg) + regression = torch.rand([200, 8]) + points = torch.rand([200, 2]) + labels = torch.ones([2, 100]) + cam2imgs = torch.rand([2, 4, 4]) + trans_mats = torch.rand([2, 3, 3]) + + img_metas = [dict(box_type_3d=CameraInstance3DBoxes) for i in range(2)] + locations, dimensions, orientations = bbox_coder.decode( + regression, points, labels, cam2imgs, trans_mats) + assert locations.shape == torch.Size([200, 3]) + assert dimensions.shape == torch.Size([200, 3]) + assert orientations.shape == torch.Size([200, 1]) + bboxes = bbox_coder.encode(locations, dimensions, orientations, img_metas) + assert bboxes.tensor.shape == torch.Size([200, 7]) + + # specically designed to test orientation decode function's + # special cases. + ori_vector = torch.tensor([[-0.9, -0.01], [-0.9, 0.01]]) + locations = torch.tensor([[15., 2., 1.], [15., 2., -1.]]) + orientations = bbox_coder._decode_orientation(ori_vector, locations) + assert orientations.shape == torch.Size([2, 1]) diff --git a/tests/test_models/test_task_modules/test_samplers/test_iou_piecewise_sampler.py b/tests/test_models/test_task_modules/test_samplers/test_iou_piecewise_sampler.py new file mode 100755 index 0000000..b996364 --- /dev/null +++ b/tests/test_models/test_task_modules/test_samplers/test_iou_piecewise_sampler.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmengine.structures import InstanceData + +from mmdet3d.models.task_modules import IoUNegPiecewiseSampler +from mmdet3d.models.task_modules.assigners import Max3DIoUAssigner + + +def test_iou_piecewise_sampler(): + if not torch.cuda.is_available(): + pytest.skip() + assigner = Max3DIoUAssigner( + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1, + iou_calculator=dict(type='BboxOverlaps3D', coordinate='lidar')) + bboxes = torch.tensor( + [[32, 32, 16, 8, 38, 42, -0.3], [32, 32, 16, 8, 38, 42, -0.3], + [32, 32, 16, 8, 38, 42, -0.3], [32, 32, 16, 8, 38, 42, -0.3], + [0, 0, 0, 10, 10, 10, 0.2], [10, 10, 10, 20, 20, 15, 0.6], + [5, 5, 5, 15, 15, 15, 0.7], [5, 5, 5, 15, 15, 15, 0.7], + [5, 5, 5, 15, 15, 15, 0.7], [32, 32, 16, 8, 38, 42, -0.3], + [32, 32, 16, 8, 38, 42, -0.3], [32, 32, 16, 8, 38, 42, -0.3]], + dtype=torch.float32).cuda() + gt_bboxes = torch.tensor( + [[0, 0, 0, 10, 10, 9, 0.2], [5, 10, 10, 20, 20, 15, 0.6]], + dtype=torch.float32).cuda() + gt_labels = torch.tensor([1, 1], dtype=torch.int64).cuda() + gt_instanses = InstanceData() + gt_instanses.bboxes_3d = gt_bboxes + gt_instanses.labels_3d = gt_labels + pred_instaces = InstanceData() + pred_instaces.priors = bboxes + + assign_result = assigner.assign(pred_instaces, gt_instanses) + + sampler = IoUNegPiecewiseSampler( + num=10, + pos_fraction=0.55, + neg_piece_fractions=[0.8, 0.2], + neg_iou_piece_thrs=[0.55, 0.1], + neg_pos_ub=-1, + add_gt_as_proposals=False) + + sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) + + assert sample_result.pos_inds == 4 + assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds) + assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) diff --git a/tests/test_models/test_task_modules/test_voxel/test_voxel_generator.py b/tests/test_models/test_task_modules/test_voxel/test_voxel_generator.py new file mode 100755 index 0000000..806fc78 --- /dev/null +++ b/tests/test_models/test_task_modules/test_voxel/test_voxel_generator.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np + +from mmdet3d.models.task_modules.voxel import VoxelGenerator + + +def test_voxel_generator(): + np.random.seed(0) + voxel_size = [5, 5, 1] + point_cloud_range = [0, 0, 0, 20, 40, 4] + max_num_points = 5 + self = VoxelGenerator(voxel_size, point_cloud_range, max_num_points) + points = np.random.uniform(0, 4, (20, 3)) + voxels = self.generate(points) + voxels, coors, num_points_per_voxel = voxels + expected_coors = np.array([[2, 0, 0], [3, 0, 0], [0, 0, 0], [1, 0, 0]]) + expected_num_points_per_voxel = np.array([5, 5, 5, 3]) + assert voxels.shape == (4, 5, 3) + assert np.all(coors == expected_coors) + assert np.all(num_points_per_voxel == expected_num_points_per_voxel) diff --git a/tests/test_models/test_utils/test_utils.py b/tests/test_models/test_utils/test_utils.py new file mode 100755 index 0000000..662f853 --- /dev/null +++ b/tests/test_models/test_utils/test_utils.py @@ -0,0 +1,289 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + +from mmdet3d.models import draw_heatmap_gaussian +from mmdet3d.models.utils import (filter_outside_objs, get_edge_indices, + get_keypoints, handle_proj_objs) +from mmdet3d.structures import CameraInstance3DBoxes, points_img2cam +from mmdet3d.utils import array_converter + + +def test_gaussian(): + heatmap = torch.zeros((128, 128)) + ct_int = torch.tensor([64, 64], dtype=torch.int32) + radius = 2 + draw_heatmap_gaussian(heatmap, ct_int, radius) + assert torch.isclose(torch.sum(heatmap), torch.tensor(4.3505), atol=1e-3) + + +def test_array_converter(): + # to torch + @array_converter(to_torch=True, apply_to=('array_a', 'array_b')) + def test_func_1(array_a, array_b, container): + container.append(array_a) + container.append(array_b) + return array_a.clone(), array_b.clone() + + np_array_a = np.array([0.0]) + np_array_b = np.array([0.0]) + container = [] + new_array_a, new_array_b = test_func_1(np_array_a, np_array_b, container) + + assert isinstance(new_array_a, np.ndarray) + assert isinstance(new_array_b, np.ndarray) + assert isinstance(container[0], torch.Tensor) + assert isinstance(container[1], torch.Tensor) + + # one to torch and one not + @array_converter(to_torch=True, apply_to=('array_a', )) + def test_func_2(array_a, array_b): + return torch.cat([array_a, array_b]) + + with pytest.raises(TypeError): + _ = test_func_2(np_array_a, np_array_b) + + # wrong template_arg_name_ + @array_converter( + to_torch=True, apply_to=('array_a', ), template_arg_name_='array_c') + def test_func_3(array_a, array_b): + return torch.cat([array_a, array_b]) + + with pytest.raises(ValueError): + _ = test_func_3(np_array_a, np_array_b) + + # wrong apply_to + @array_converter(to_torch=True, apply_to=('array_a', 'array_c')) + def test_func_4(array_a, array_b): + return torch.cat([array_a, array_b]) + + with pytest.raises(ValueError): + _ = test_func_4(np_array_a, np_array_b) + + # to numpy + @array_converter(to_torch=False, apply_to=('array_a', 'array_b')) + def test_func_5(array_a, array_b, container): + container.append(array_a) + container.append(array_b) + return array_a.copy(), array_b.copy() + + pt_array_a = torch.tensor([0.0]) + pt_array_b = torch.tensor([0.0]) + container = [] + new_array_a, new_array_b = test_func_5(pt_array_a, pt_array_b, container) + + assert isinstance(container[0], np.ndarray) + assert isinstance(container[1], np.ndarray) + assert isinstance(new_array_a, torch.Tensor) + assert isinstance(new_array_b, torch.Tensor) + + # apply_to = None + @array_converter(to_torch=False) + def test_func_6(array_a, array_b, container): + container.append(array_a) + container.append(array_b) + return array_a.clone(), array_b.clone() + + container = [] + new_array_a, new_array_b = test_func_6(pt_array_a, pt_array_b, container) + + assert isinstance(container[0], torch.Tensor) + assert isinstance(container[1], torch.Tensor) + assert isinstance(new_array_a, torch.Tensor) + assert isinstance(new_array_b, torch.Tensor) + + # with default arg + @array_converter(to_torch=True, apply_to=('array_a', 'array_b')) + def test_func_7(array_a, container, array_b=np.array([2.])): + container.append(array_a) + container.append(array_b) + return array_a.clone(), array_b.clone() + + container = [] + new_array_a, new_array_b = test_func_7(np_array_a, container) + + assert isinstance(container[0], torch.Tensor) + assert isinstance(container[1], torch.Tensor) + assert isinstance(new_array_a, np.ndarray) + assert isinstance(new_array_b, np.ndarray) + assert np.allclose(new_array_b, np.array([2.]), 1e-3) + + # override default arg + + container = [] + new_array_a, new_array_b = test_func_7(np_array_a, container, + np.array([4.])) + + assert isinstance(container[0], torch.Tensor) + assert isinstance(container[1], torch.Tensor) + assert isinstance(new_array_a, np.ndarray) + assert np.allclose(new_array_b, np.array([4.]), 1e-3) + + # list arg + @array_converter(to_torch=True, apply_to=('array_a', 'array_b')) + def test_func_8(container, array_a, array_b=[2.]): + container.append(array_a) + container.append(array_b) + return array_a.clone(), array_b.clone() + + container = [] + new_array_a, new_array_b = test_func_8(container, [3.]) + + assert isinstance(container[0], torch.Tensor) + assert isinstance(container[1], torch.Tensor) + assert np.allclose(new_array_a, np.array([3.]), 1e-3) + assert np.allclose(new_array_b, np.array([2.]), 1e-3) + + # number arg + @array_converter(to_torch=True, apply_to=('array_a', 'array_b')) + def test_func_9(container, array_a, array_b=1): + container.append(array_a) + container.append(array_b) + return array_a.clone(), array_b.clone() + + container = [] + new_array_a, new_array_b = test_func_9(container, np_array_a) + + assert isinstance(container[0], torch.FloatTensor) + assert isinstance(container[1], torch.FloatTensor) + assert np.allclose(new_array_a, np_array_a, 1e-3) + assert np.allclose(new_array_b, np.array(1.0), 1e-3) + + # feed kwargs + container = [] + kwargs = {'array_a': [5.], 'array_b': [6.]} + new_array_a, new_array_b = test_func_8(container, **kwargs) + + assert isinstance(container[0], torch.Tensor) + assert isinstance(container[1], torch.Tensor) + assert np.allclose(new_array_a, np.array([5.]), 1e-3) + assert np.allclose(new_array_b, np.array([6.]), 1e-3) + + # feed args and kwargs + container = [] + kwargs = {'array_b': [7.]} + args = (container, [8.]) + new_array_a, new_array_b = test_func_8(*args, **kwargs) + + assert isinstance(container[0], torch.Tensor) + assert isinstance(container[1], torch.Tensor) + assert np.allclose(new_array_a, np.array([8.]), 1e-3) + assert np.allclose(new_array_b, np.array([7.]), 1e-3) + + # wrong template arg type + with pytest.raises(TypeError): + new_array_a, new_array_b = test_func_9(container, 3 + 4j) + + with pytest.raises(TypeError): + new_array_a, new_array_b = test_func_9(container, {}) + + # invalid template arg list + with pytest.raises(TypeError): + new_array_a, new_array_b = test_func_9(container, + [True, np.array([3.0])]) + + +def test_points_img2cam(): + points = torch.tensor([[0.5764, 0.9109, 0.7576], [0.6656, 0.5498, 0.9813]]) + cam2img = torch.tensor([[700., 0., 450., 0.], [0., 700., 200., 0.], + [0., 0., 1., 0.]]) + xyzs = points_img2cam(points, cam2img) + expected_xyzs = torch.tensor([[-0.4864, -0.2155, 0.7576], + [-0.6299, -0.2796, 0.9813]]) + assert torch.allclose(xyzs, expected_xyzs, atol=1e-3) + + +def test_generate_edge_indices(): + + input_metas = [ + dict(img_shape=(110, 110), pad_shape=(128, 128)), + dict(img_shape=(98, 110), pad_shape=(128, 128)) + ] + downsample_ratio = 4 + edge_indices_list = get_edge_indices(input_metas, downsample_ratio) + + assert edge_indices_list[0].shape[0] == 108 + assert edge_indices_list[1].shape[0] == 102 + + +def test_truncation_hanlde(): + + centers2d_list = [ + torch.tensor([[-99.86, 199.45], [499.50, 399.20], [201.20, 99.86]]) + ] + + gt_bboxes_list = [ + torch.tensor([[0.25, 99.8, 99.8, 199.6], [300.2, 250.1, 399.8, 299.6], + [100.2, 20.1, 300.8, 180.7]]) + ] + img_metas = [dict(img_shape=[300, 400])] + centers2d_target_list, offsets2d_list, trunc_mask_list = \ + handle_proj_objs(centers2d_list, gt_bboxes_list, img_metas) + + centers2d_target = torch.tensor([[0., 166.30435501], [379.03437877, 299.], + [201.2, 99.86]]) + + offsets2d = torch.tensor([[-99.86, 33.45], [120.5, 100.2], [0.2, -0.14]]) + trunc_mask = torch.tensor([True, True, False]) + + assert torch.allclose(centers2d_target_list[0], centers2d_target) + assert torch.allclose(offsets2d_list[0], offsets2d, atol=1e-4) + assert torch.all(trunc_mask_list[0] == trunc_mask) + assert torch.allclose( + centers2d_target_list[0].round().int() + offsets2d_list[0], + centers2d_list[0]) + + +def test_filter_outside_objs(): + + centers2d_list = [ + torch.tensor([[-99.86, 199.45], [499.50, 399.20], [201.20, 99.86]]), + torch.tensor([[-47.86, 199.45], [410.50, 399.20], [401.20, 349.86]]) + ] + gt_bboxes_list = [ + torch.rand([3, 4], dtype=torch.float32), + torch.rand([3, 4], dtype=torch.float32) + ] + gt_bboxes_3d_list = [ + CameraInstance3DBoxes(torch.rand([3, 7]), box_dim=7), + CameraInstance3DBoxes(torch.rand([3, 7]), box_dim=7) + ] + gt_labels_list = [torch.tensor([0, 1, 2]), torch.tensor([2, 0, 0])] + gt_labels_3d_list = [torch.tensor([0, 1, 2]), torch.tensor([2, 0, 0])] + img_metas = [dict(img_shape=[300, 400]), dict(img_shape=[500, 450])] + filter_outside_objs(gt_bboxes_list, gt_labels_list, gt_bboxes_3d_list, + gt_labels_3d_list, centers2d_list, img_metas) + + assert len(centers2d_list[0]) == len(gt_bboxes_3d_list[0]) == \ + len(gt_bboxes_list[0]) == len(gt_labels_3d_list[0]) == \ + len(gt_labels_list[0]) == 1 + + assert len(centers2d_list[1]) == len(gt_bboxes_3d_list[1]) == \ + len(gt_bboxes_list[1]) == len(gt_labels_3d_list[1]) == \ + len(gt_labels_list[1]) == 2 + + +def test_generate_keypoints(): + + centers2d_list = [ + torch.tensor([[-99.86, 199.45], [499.50, 399.20], [201.20, 99.86]]), + torch.tensor([[-47.86, 199.45], [410.50, 399.20], [401.20, 349.86]]) + ] + gt_bboxes_3d_list = [ + CameraInstance3DBoxes(torch.rand([3, 7])), + CameraInstance3DBoxes(torch.rand([3, 7])) + ] + img_metas = [ + dict( + cam2img=[[1260.8474446004698, 0.0, 807.968244525554, 40.1111], + [0.0, 1260.8474446004698, 495.3344268742088, 2.34422], + [0.0, 0.0, 1.0, 0.00333333], [0.0, 0.0, 0.0, 1.0]], + img_shape=(300, 400)) for i in range(2) + ] + + keypoints2d_list, keypoints_depth_mask_list = \ + get_keypoints(gt_bboxes_3d_list, centers2d_list, img_metas) + + assert keypoints2d_list[0].shape == (3, 10, 3) + assert keypoints_depth_mask_list[0].shape == (3, 3) diff --git a/tests/test_models/test_voxel_encoders/test_pillar_encoder.py b/tests/test_models/test_voxel_encoders/test_pillar_encoder.py new file mode 100755 index 0000000..8f1f619 --- /dev/null +++ b/tests/test_models/test_voxel_encoders/test_pillar_encoder.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmdet3d.registry import MODELS + + +def test_pillar_feature_net(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + pillar_feature_net_cfg = dict( + type='PillarFeatureNet', + in_channels=5, + feat_channels=[64], + with_distance=False, + voxel_size=(0.2, 0.2, 8), + point_cloud_range=(-51.2, -51.2, -5.0, 51.2, 51.2, 3.0), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01)) + pillar_feature_net = MODELS.build(pillar_feature_net_cfg) + + features = torch.rand([97297, 20, 5]) + num_voxels = torch.randint(1, 100, [97297]) + coors = torch.randint(0, 100, [97297, 4]) + + features = pillar_feature_net(features, num_voxels, coors) + assert features.shape == torch.Size([97297, 64]) diff --git a/tests/test_models/test_voxel_encoders/test_voxel_encoders.py b/tests/test_models/test_voxel_encoders/test_voxel_encoders.py new file mode 100755 index 0000000..f5eb630 --- /dev/null +++ b/tests/test_models/test_voxel_encoders/test_voxel_encoders.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +import torch.nn.functional as F + +from mmdet3d.registry import MODELS + + +def test_hard_simple_VFE(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + hard_simple_VFE_cfg = dict(type='HardSimpleVFE', num_features=5) + hard_simple_VFE = MODELS.build(hard_simple_VFE_cfg) + features = torch.rand([240000, 10, 5]) + num_voxels = torch.randint(1, 10, [240000]) + + outputs = hard_simple_VFE(features, num_voxels, None) + assert outputs.shape == torch.Size([240000, 5]) + + +def test_seg_VFE(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + seg_VFE_cfg = dict( + type='SegVFE', + feat_channels=[64, 128, 256, 256], + grid_shape=[480, 360, 32], + with_voxel_center=True, + feat_compression=16, + return_point_feats=True) + seg_VFE = MODELS.build(seg_VFE_cfg) + seg_VFE = seg_VFE.cuda() + features = torch.rand([240000, 6]).cuda() + coors = [] + for i in range(4): + coor = torch.randint(0, 10, (60000, 3)) + coor = F.pad(coor, (1, 0), mode='constant', value=i) + coors.append(coor) + coors = torch.cat(coors, dim=0).cuda() + out_features, out_coors, out_point_features = seg_VFE(features, coors) + assert out_features.shape[0] == out_coors.shape[0] + assert len(out_point_features) == 4 + assert out_point_features[0].shape == torch.Size([240000, 64]) + assert out_point_features[1].shape == torch.Size([240000, 128]) + assert out_point_features[2].shape == torch.Size([240000, 256]) + assert out_point_features[3].shape == torch.Size([240000, 256]) diff --git a/tests/test_samples/parta2_roihead_inputs.npz b/tests/test_samples/parta2_roihead_inputs.npz new file mode 100755 index 0000000000000000000000000000000000000000..8dc8e548d9c6c800df200282a78a7e0a41204bef GIT binary patch literal 36522 zcmbrmb+pyh8^(!%v?ASbfm^4)vEvQ85eY#=q!Extx|N1DLqHOxm`xGVRZ}RJt6xmaB zZvJhf7R{S<>DsAz=V<%ydVU~}{-2-!-#_@@*U86(YPGA@sQW>RZYg>vHEZ7an@&j- zmsG5UmlTdmYSE!nmrhOEH|o%-S@ZurUZF|b&dufV&MljK*IfRdtQ{vD=bAt`u5aA` zw~q`Jux@09zF(w`xu%MG=iEXVr~6ZFy@*p48Ns_vnb32mo_cL-ax+fnoQk}nXFSl$ z6}jhipnlvVHp9+?v+#rejawW@UvM69yxN?cRc%+lMr!%x_#DlKly)?;onf-m9cpB* z1w-JP(NxOa2NQ5R4+{OwU%|NW4D%_UXN^n zkI=^OT~pj-H!Ynrv@M|#PR;rPSK=K#2qUmXKaRGdS~itE5Br^j@Dw$ar%(X4s`GSL z*Tw#DI`UJ(Q&lSbJG{^jO-;@L$5jFUic^-Kn$(oVRPeIlBxp`OtZRL~0Q>PIou-Ri zg72B8&>4cNG5^AqK3R=Bs2*CHaN)EoK zjrPB2Vd!o5`ze!)s%u_J`rZ~TQrz65NjL#hx)JCY?dn`LJ8`;c?9TJXMORV*_>W#k zkYi?(X$u{>rW8J3_cXNDw;>a6#KOEk@irJ}ZkC%Qu5SP3zwI`2UoA21U>?-;U*Jd#*}Soy z$(!8%YQ7$A9zYbI!&WCF#+&E(%w6Q(=Z^YI{{k+yxuLKdg+VH%&dUc)8fOO=r3g2G z?34-Lgm?Mh;@{4NtiSr5%ti8}MPhgHgzgnw0z=)Sadl`_A*+3_6r_g&RMd_Lj12tY zjC8cW+Z^V{YOOx18>vz7F>EuJ{1u@-{$ce_@6sF93%7r)ruQRkQR#FyED?*N{&;{! zLK&w6`0h7)DedMS-bR@1?!g>^>B-Ca8q`*+sSEVt<#5=^#_we%uC(=dh3oKO{(<|_ z{%|if*tB;aVjle0tTZhm#~?q>q|ExE8;Jg)%g4ss1u)5Ofa!4s*n#q9h$)X<@OWU7 zDdmAvqR8{;b9=}Z52nDvDivN=J?Jmn$5f(UU`BlX)We~yMB zFI~jRx*`S4M?4hw;Y~bZ^7=`3vy&yd&$QDUVyU15wBS~1Co%Sb=Bg#$=D{`(y@H$W z18#s{!DN0CyHT{3n&~}ANN-Z;SEerKqvyWweCricuT2+w2A=qjDaiBiI!rbJ-Oa3q zw9t~asSjYEe~%KWEI&*-&4hygy5d!nL#4;pwkzkw z`gSVSN?HLGD3dwnFSTP#2qM%kGKQ+cOt-GNhpQoC_UjVfUuJH?ZrJ2Txe-p6`+Wz$ zc8B?m^-NWrt!MdZas%bT5e~W8aUEGx0sHCcbRu+wpW}S~Ky8N;^fOMf?VydV0yc6M z`r9*3I!M6DoW_ECho53??@HWRrwuhroa#2E4SYU0)~gFwy*?0!i=c(`EPj$1;YRf~ zuT!9^QyzBV4E!td9h6f}V6W?`5Wj%E_zl;k@$`!C;APrKt!RqcL`^uKe_Rja+wPz6 zgZ)b%jAd6n^8duu{2Tfld>^`vuVEi=p{BS@x20;7SGUkZbq=0x8dNlj zo1$hmC93k_+E5L~n}gVpYhw#%H%39hJ-X#JQio}3;gY(e8p{JzrciZt6}Do^gvAc2 zY4D@X#l_qqs;MrcxkMWF&7-dHv0f1ve|M z(`(3s*il=t9lgUN_It_$uZ#SMVtW=`H_< z)k5Q-W3t7z@RfQ;W7G|}K#P-BIS2Tj8}D9%IaJFz#CNf=+2CB^GD&N74ZAU6E^fyA z=r}1t2cav}$MJkuS5j-?ird$nkMH%*bR4_GSKf07VG14`J7YgHm0_@{XIgWd+{qu9 z^{|9y##&%LSdXP>rWq19!GG;tg#h)8v7QA%NW*iYnV=XgkX`n6(l5FS_EXhWFZKW-^s?Y8YR65yoYaxxFe9!q7fc$y7p`PpBsa6) z*;HJoNLAlJWzMZmLnn2WYfuW#PT6B+d_%)vv7asKK?NuYUqqU~BX0ox3mG67JHx#> zpU!6f;Wp7E_^3!Cen)B4CfuzUTjGVHALCJ;C|6^FQr z;@j{hx(UW#hXtu7Y_rz;gtPDnEFNCY)uDLUMn+L5dl|;!Ehui4zk_a@9`=%*9eqO+ z0zRdtH}E4DHGirQ_4Rtw1obV}jPAD~)lKaR-r@7+op~-w>@%v8+<-R1pR|xxC<6}t zMJ7jcrc-1D`mvv_%ZFnP-Gu(=Hh-n$c+hR zg@ITP@MYR9iI;_R(m2820-uW3O;-Xgyps4NZTX0NL=08m|s|8~h>o z3m=!$BM#Sl)oCD?g<5iwbknN}+~eE!cO1ZXDGDQC1b=Oo^HqwvJvk>kriK~`PhgUJ z#+1?voW_(2Pf^2!w&r)-1r6x|t`GDpoaCg5CDL7!9DhIl1ijH+{Xyzih>NC*q=M`? z$xJl=Mzg536i|K467?gl;A^r!hfFhnGv#q-@;y!{+%vW*v`QX-rmlnw7e2(Vbzf1P z+H1+J;lBL=06u_EDK0XLmf|q{LXUtY=Bg<#8ljp^8{9{^=mfa7uK$gkUsrjKcZxRT zdkTlmZM1N;Tg`K>$4`?xBbRC&z7+Ws)?pvr zKL37m4g-8zT~D5__EAchg@tWx_&!_bFFXHG=xs_Lo@sEz&-RZm@_U2#{! zedIo=N2T~3X4Cf}S$19zDko&&Ph`#PRFB-&)Xtk2%cuL{L;N>pAqzEizl0;$6K_HW zcfHdL{#DCpl-df3_B+n!U8g;IDh#mc$>$Gfve!$+%t;um%jKyVoSVlMhYCCvPpKzVk+&P)J>orK-?_vM)i9bw?d(1` zAg9)+CM!GPzEF~W<)`j{+=0rOAoMj$O^AP`mdQ_}-`i=}nBKvY|LE}P_kPs56IqiSvw zW>t%Kn5XzXZ%8a74&tWTAD+z{^(!ww_AT;9@&;G{uVa5eH8He7e#f|X>Wd@ z7LZJXah+eudk7tMD)Sw;*RN%Nt*W+TF4K!YDw5s%&%5t^hA+_fo8oWYNpm_p7K1jk ziHB>^y{3g6!an82rtH> z&SG^Q&pHktD|p_hUWPZrA5a=|Lq~6av@(^_ zdu;85Y|hU`&e02cg=?^VcquBkegO+()nJ+#zan%`|A!judXsrR?TO^#Umy!t5Fa!W zXPJF;Q2|u653wd6q)KW7jIuS|T$sVz=5D2WUKU|h{(UoS*LL2Da_)2t&qNn`#Pc7%`UWw_5NMr+l3Iq^4e4Va0W%wE_HBMR3~ zKIjbh_ZN9-``LaRpw?WQ-B?v#X4`vHU@l$v9;l3zmY6!4lFk*Qu@3$m{gFr6T3Ct) zo1gg|4bk0kf)nQPeid)Cc)B;vb$snx?q2j!y3V$N{Y34d${0_J)d=&6la33kudz+E zG_Am@`oCBU=VQEJ-f%ta;)zc*`=PCKH(J%{$3y+)nCMKhY55o~qTi!~?XQW=^j6#3 zjHfJiwJv0szJ!hV3Fq-jz+(QvQSoIU3Uj@A^t&Ab9dVlR{G0v~{)+PX{Y1mBz_R>x z^cOU`1GiKM>@JhvEQgd{ox+7f8+62MGtDWb^NV*8HrR>8Dr>BtdRw@-{?|S<4MDUR zB&$?VM=ejT5nZTG^CndRZzGCss7pOyH4UKiPO@HTUwJc30ODePpBb#6EnC(>8m@32VC4$P$wc$+S&2H^-ip(api zaR$}kBUmIF?nlTLR(u*t%Kej0*3Z8@ng-jOn3cQxJF$xB{+&)iTujAyKYmfDDvb*% zzD_6AXKG=Acy*QssR4A+`8V@?_Xa1EC$6xtyUzRyGx?}J4u6>LJj|h6h_4E9(5@fxRoti6u7uelZO#ouDcnb}Lc=Cy!C z9xVIOus}&O!Ip-exI@mz!Olc^Ug`ao^vc}{(fl9th``sh!~IDVOfBb?&4tzcSE1du zJg>$Kyf!(DZNYWTI#_8k%l;6i;rui8;cxI2=QgAGLiCNA#&!7^ud+MzgUE^K7>bDo z7()mA;+UH2P+<;$$;?&n3 z$0EVacD6W%-yo|gq#8!=;&V#Evf(!Nf_~<=gM;zIp%D#{Q>=#l5q}J4!41Kx(F5iT z&Xx1hp>tdj=b)#D%W5A88K^TC=9|3KEti~$7pv^y-PAzW#12kBYLoO3exuxOehkVE z-P&0ut7{HD;@SGT!)|-tOmmd+M~nY$pR`$=?|VO`{UFvI4dhXk)J-RVP4#RzY^Tuo zCaX!yMI+0ELfkA|pum!($voQ~7e0kxu0~$y`H;=V^G4f|24Z9F>s|Z=%9G1A_=+>a zJ8JVd3q zaF4=OT%>bgZ&{BOW52;}r&i4O1DFVVU=lVpzNb};^4j9&ko{2j(Q$hs`rg}tmqZIK z^7@Of%w-B<7d{d0Lz`&_6^m|&4dXVniZ(dMbnv3-^ZFk<#?coK2WdsnsFcK;JT=vZb+PU@fnv5DtMkqBk>#UfR)-IaEqz z5H>o`d;~obis{N8+m={aCGu)W65{wXj>3;IQ8ZUUVLESMC%m#F;YHMksqnLYLsjV} zjpx^NNIi!txL0Xx$E)R3%SmZ@6IJ5Y=JUuq`I+hlCU7}eh5h+F%%%jV0M3qkiz}%K zCBjQ;m)xCy^)_RYnun=$UOW(MC>*O*@*eY7uzT!2yz_2xPxwp~P%pjF;>)&$E_%;# zk;yMjHx&uFV=l2G}_xx?#gd7?G;U#j^MEz7QeN~`~aH>Ra90^VT%yv5oV_S33|%8Kg;Um zLY&u0forj<9Z5o*)H_@QV`IOvM`LI-#&IB=QC~81DF=?nY;ux@Fda2_e>3ac8?=`1 z!eYCQ52%gMm`>Va=7u>hPQ8lmM|1rCjKMx6gjkrz*LE+2F;kI=dLrG&WiUyXg%6=W zcfb%7P^}C?e>w-dbXBjCY6YcDy4VDGl$^q>;YOwtH^YhgWB3q1r>wXwxqniyB*vBaBRTh~!4`iE4dD}h2=ANa(Zc3cpr0CV+eWqu$NHAj z;jQc^ZCX64JHohNHTcAT&o^ZKb`34jH+a99XYxW36{IkTlv9&s5831Pw-u=vUQVnT zDF)3by<5URFAny5KJEFUBhQ5g+D!geP8;3}g`h0%(Qo{=@S(RtKZ~s60rmu^$F6h= zTSec=uHBW|s-s*SuF^$2S5#SoFvXEHNWX(;__;HK7YK=pL9VQ_hvJfWWMmhAM1N&iz?reE>ft`2EY2%o7k$(X z^Fp=cJ!X$;NJoW&jf2Y$CzZy#rmCFe0hqzPWhdAZ=t!T%Y73byiNA}ht!ukdQMZCS z-U&JrR3T2oeepL~$D-0`J$)hcHW^B(0^mo6(HVVCt?2Z(i!S}S76_3FOG|d0P*>9raZr9UdbJW>^nP`ViL%F;?WT3b|m0H{X5>DX8 zJ{2)uL0fIu}cxP|@k55AkoJKYQqplIw{ws_h@* z^?`z*4RbZVrU%2Ft>0M!m%avsrhuH#*yO3wG*5j_kla_giN>T5gGi|17)(Orq9oqbM6(reX&enMxx8~C38fr_DO5Kj|%w5g^~y7lF(yFo{24RV=zC6@KI|;;ec&Z{Ps#gS>ViRyBPMixc?L zmcaYo8N6)n!a6lge}bQJO`TOA(~GILXs(4=fk-7{AUw&`4=#gk(8t*%?lC>a{G)Uw zI0yexU08+l1U|;_SV^BkX8b+3r6XQ#A;)Ee;H6OW@exht zwR#=z3a!FCUNbl?nk>e{DWdM-5y=X^#rcjWzc<6R_8s-XY0v`KQZu}a%>(rf1lLnn zx-a)|2A*#R>nY}vplD>J{ZpoDtU=2GeQ$HQdks!)vIZ zpAC*ef*;S1`3YYWRy~(z2Y!T)qv({0p5RKNp0m&}6@j#((;s{PU^+b|dX0+da@^D$ zbxU&-e&)A;sXC7P78Z`j^`mq3K+2Q62N7mKEB%m`xwGs_tfg|>_guq&t~WY=+xE^i zlV5iG+R;QIMpspP4ww^M${FKsb3UWSFjAQ6e@+^>#6LxEd5^vQLJf|I<~EV93Y=A) zz_(pZ8Sgqz#CXqFf9HPyH-sjh;YsSVr0)2WI5NV4yd1N}_IgFbm$3k)=0g0He~|Bo z{(`aecX&YHb5mOtiA?g}1xx4!;s#&yt3YpV7(QcM5^95bS%#o5j31ePKtOup7?~hfT@QtkEoLHJhdr89IM$7Xk z;pcI$ODg(DG=ng{w{*yiaPr`KN{OFBD|OGzE*$3fvBj#1DCGh1E!-?-maw`nqq~HL zjS+|PZD0YWmW(sE`xiV44~HUn!99Yt)pd7XbP1&n7fR}w98??STsZ<4bo1m5^n)ow z1CwXNO=v7Swv^DD=8~w?rG_dlyvekZ?{$naWBW*Lb;#Yxr;-;sEo0rpF^l^*v!m5% z6~9usp#@yS$HJEShel8-`c;)N6F5_7UU-*tjWfd{`cCb|8hQlHl(Tl5y=3OttbE4q zixz@*xD)n6N;=_Qj`ioBdI3fxL(7C8n0?yjznm`vm!UqkEbUOQU`6DcyceN6R`H7p zN1WyE;jg^Ol3nJ)n51|=>v(<+KTtiVU9hZVocCC07#!n5G{70>PbAMw(M8Sgv=aW& zy|D(h(q9BG;%$2vyEucnW8U$w7%tKh*kN1Y&g9hYBYs53Xc6>`&fe7@d(^Y9kc;++69JICx{$#hP!OooUQopO{qp$HDC+p6lRRjJ^)ibgJ zUQ;<0&-v`1@EGR0$|;29a2%i2>o6bRr1SKxT-h6v=Cle#m#cbSG1zR!y8Gx^bV1~Ha!<%%AJItvvbWIv z5$@nd`W||?r|^l{2#aFh>+aM#_8#)-dvsD#>8)JArqyX|n3~$sRD%=YDE2q2cyDAB z9OmC-Ek0HCCFA+iInQ52U+^up+$ktj;03n`Zgu|f(|g~Ft4dD?5;xg8!uJMyClb2x zLvi3)CKRdC3(~V2J1^HXPgh|r5xK?;69&r~$a)xH9hw5u?BW&{)b>Q^eU$x_BDt-QH z&M=7TCm1mMlb6XEmJ-&h8MdSFsZG?1LsZYax0#|{Bn7xBS2MNWf)mx>m^LzwvdOc} z$`!@`jnuvMSm;S%XSBVN{0R@xO?V}A@#^7A{hRyR-^m9etG&MVYiw$t{*T`3j^?1- zMYF07*_BVXDODt2bhWI7f}GZTs2)3~>=IwmFhbf1E%YSS(|ZZ|y!vp=O%Yv$ttCS( zq@JqM(h;d@D%!m6UcG`31{UbT;fv-j{lrsv6hHIg+>dNFH6>Pur-jqQ6MGbD^3+HN zoWw!9$7V^sn0Pn$S{x+n`d1;|V0G`)lrW{>mO-6V~iG?H1bk^S$H7lygI zfbQp|l|3P+WaVvR<7iRxF})#iq{=0V`ZGH-Fi?mn%NgsFjg>FPaP9_I7nxt zBA8A%`#E<;;qKv9I8CJ#TW$aMAdV`jE*d+0z_5 zPnYB*Jy^E@BVC$?`Cmer*c)!DPph4{Cel#1lMs#S zu`d#c{2oY(B+=4@0+a_o_iE!NY>S0-K5=5`&czv|X@)K9Z-BM_D19G)fuqzG&Ol-A z;@GVub8>sY_*jB z5DL>Ky1^6(%rM)e5Ahv-6{#QU&&x?Nc7BP^)kWCkJPPkp34viwC^px-tk+<{@K3Oi zKKH+e_teaGr!GZStJLl?=NG?Dq^<1Wh2cgdHxHzn&P5s&d8L2B8}764c^2-6Pw`{z z!3RR{56Ri*#-4hgC+w8xpVj0wC9#<4hHu@c&QdDx7vhrmz&qvd_KJ#k-K0Bc_BWWd zR!KH~5!kE8L-ZV8V6w=LCcLQNR69}6##{QWSIw=H%#<1{Iqm3@KBZ2p|DZuMGu4Mw z-r(?I{@3X&-Ka#J5}v_4GY3lNSfCeTL1?slVWUg;P*TIs^?jbnA3|H4 zYfB1M&r0u1*2sD@!;}g?=kEe=rBv3H+m)tACUo>zS(L z^)s)W8#y}#@xozmG1DO5Rg zL%obl#3@2oCc_@p(R>N(DNXp5FP(ve#cG3YAA9I^0~_v2H@v;rQ`X}tZbfH}a=zdJ z@H;o6hB_DAS4Esg{MMakSIht22UAcJEbVsmXK;0%>V53BcH&{Iog&#s3i}A_Kt=bw ze*~+UB^2O(`lS6Vx`&>KT4=5s`d~UHT=JS>E0Yyp>0bJReJ<^^$?jff2|Rat1Xf_< z(1Ng=G>)!7F;x}P2jAMGww}(fj?#SiFpxrT1mpdSvuPGTFeY}6mtr@)6kUJ0`c0l$ zJ*PP}Q6CE1s)dVT2_)OeI;_q*X}Jk3;kKru_dP^Z5W6KGbtbwoxiXWzc67>Z=zS=J zCd6Hxm2^(+a}V%DcRFjB8`>o8oci=1d>_ll*?GR)XW3cXcd8Mrgiq*z=^?xHdb?8n z<0fIF*d3=GPKPThF|f+IvQGuVAMzViutCzL#*ms{NQ<+LG&P1mz9JK;9sDiMxsLse zONM`-E6ynh=tKAnJ94*ZJNP_4L46mS>F%J*Lf&&xg9I==c%e0L$!%a$LMrr4HCsgP zu>tx&)4@E`Tg(Eei*Z7UlD$moh*tU6S*-LUr(w{lD zIks4c;F$2gP+6#JPhli8)mEA(r}IluyG;T|Y%TMx@RN_=vNU-{sj+^qU`|d^Y^%+T z*=$O6~EK9g4G@2dcTv@1(&CUS`?fE8=;23bUMy zqGrxyPU%*p% zCdz1cxHBzNSGCFCZ+BkOOZtTxiF+TR+p9Y6C_P@y!EXK>Qwm+-Y~=vxfcp&h2|H~kE#4e9Po%H6 zNm?|&b0x_GChI~zx@{?{`tp6QKwZ29A#I)L0ygI*PF6`z{*B%eSG2%Jp;3Hx^95c| zUg8Y?PwwL_{!}>T45SR|sQyZigpzJGDyxb_Gf;MSk-ifhD=FFB;5(Wo+0tTtgoB|8 z;0k{%E-J7hSCliirgjT1Qdh8oSs^)m=a@9clInRWy-P5hpW)A@EsgcF=)uk|X&^qP zJ?;(r(gw|DGbiCUJqeT@$|zSRmoDgJaIeE-*3zbFO_Tib$x3JlV_Ean|1ybSNj^%C zF$we1Vc0>FrQJ1->f5sBBhyBuG==a_mCR4Q)aocECZ}=wNjg^!&*;>=BYcy8723Dc z+ht!t9-hu$bA(FR7kUA&mK^>=Z=)Rn`xAz%z3?WuN_yb~BtO|fCn2Rf6mEydMEcnb zo;1px3#OQ?_NaLq{f6Jd|C-|~3rs9p%NuF($mw<`6sO-tuX9>w2OZ*ZLVQzlM;pYk zHjYl2-$Q32$LWUZoS0TxnLmj}NK39XG2PE%P^Bk?OgK(qAz&@=xVe#-CtGdvQ$fjZG_1p&7w7NqOEiwY&L6D60L z*2ktn9yq5`N@_P4b_ENF3#%{f>i9%pl!~o8lHz z70T+H$hH=iyU71ayl)j=26K1{tiig8Rj|4F0DG$X6m*u_ zg0dDIcZ50W=XDD5)7U9$W5=q-v4T7hzM#G6L<&X+^Uvm<+ea6~nszhhwYMU5g)x`o zGk7z)0s7hJMMwBn<@0~#qS!02iStR@Y7KP5xn?NO@OndURO&~6WYT)RXufC32mR6c z7b8($r<_o_7-jbEhlfDfP|k$zSUR@N_JjKVN;MyD>o0gw_^CQ-Zu$$ki}cza^JeK; zK60|k+`@3ym^L|`^O?P^`$+qtAm_LH<+@gN7bjMywp3it;J@etsKGyTRCwT9mUq?Yu=zZXh!k&6|*>!rjL#SUZj*l1hX9EB^Ue{vcer+U&y z<{sx%uS}U(cCRy+i2Z9T#xK&x`L=ojleO=Z3M?0gkpfTUZi3}mvIQRJzX`3R4|D`AM8Y& zZKDE%RhvLr=}V@F9*&(;nM8lA^Lo1jLxpU*$SAI5?m71*t4qe=luI=f)$WD#r}=YWTiHg zUSu2}RULU2bl0sx=U&h2{3vu(5qF*BQYW4DTuHiO=w@>c;7}+Fd-T%yZmPO7H_#RO zU>E-z>LT1cJDssdV1l1RJ@R{)F-~_r<7D*H@Hi?Ty8;zSITe)E!@^U96O6`#_G|qm zzmiNR8`t6ows*oHD9ztXu2WOG&(&nDEOd*|Mf1)hX?czhZ}Qelb1e=R&^fm-)%E|d zr|kh@_H86jO2ct70dz?n#}Da4wH!Y#2HQxgoN%^e|q19zs3D-6(JK@IY;0< z^_TwpQvOu+OMWB0e{whDEPtDt0qE_-eXhFxr1>{QC(M=Ci0EmE3fT+b0#)SIt#9JUq?7uV?FQ=J*Ns-5-P$BeM2o&%bfC}F81LV z@d&d`AkYE*j!vN@LN#ukzBjTp3nSqV6b7>Y6&e{+)wnDYSy?k$)qV z+*;mW@Vn|BcTHMNVO7m-MPs;$6I2IrR``~CPI8xf`d!h_14h*AAgsJzn*(F58P(zhn8Y}eMi>~H|8GpWWjCxkGespbZO@luhP$iAFfXBLZ!^Q z@LOp@jigrLU%m6rX>LnxI0MuP9W+Ig z>*1x_Mz4j7ZY^mklnh_yjp}RaK#hPR>0mpql}^H893rRPCo&6lLJ!4=bnOn2BkX$~ z{cSVDEz>u&SiZwAdNQ_xkn(IJokeom1U~9n^^>iSgLzE!YhGarn4f8%+XmPAC_VnG z^sO_ zxbo`$I{KHN%Z$tsTh)ICzo_H9nH#U#VtaQlJSm&kZMT$68Yyti{08|ZT@hIBE`y+Ehr7)t~>LY5EyBm88$6chp zlWaL8Eu=4ag}AGG+%cS3q=7Wdu3F&*yv*&RYLUZD?L>P_r-G3A*PjokWRlR>Bf3hc zj8LFX`Uaqlmsa#j{gKSYJfgGk278-Vsq#Xcq!CrAk9S-q7ivKeQ`pLOnl!%Bs1FOYfj{|*`op9Q<-=lr85kGIE{_z$ ztimN~+7&d$)O2r|RD541OWFuIzOVNu7czMyC+a4zOi^lYNBafsKC>8;ZFjEhj)&Zo z)jQ};b>6DxI=A;{@)G#dpiPDt-H-l7z5LU9V0aJB5fZ!rn@fi=m)l%FhKw*GI)Xlo zww6h$M4suqG`abq)1Q~o6igSpF1@grpN>JDrK#R&ouC)XT)}O*vL_{pRnmJD>WmeI z$6A>iV>43PliA=h-Q!f)$o&oi+`#Yaw7?nsWn_!YaQe8^t1KDe26a_(_`EV8WzKpDIr}(&ObsdWE;4Dc+a$)6leI;fMZC=L5eDJ|+$C;#j|1a-b zJB*9z#mSG|-{B(fq+9SH+K}p+9^O-#KbjyOt{1f9siMF;_^ov!Hk0Q%o{IBx@sZQ1 ztt2VyV?RM^-G;I`f6#JiOXt^PDdtCcJ)UqP(kOZotA#aHl!COK6%BD;OEcwBbULNh z(@k14Rpv8V<5>PzI{KHr=5*N9)J1I}cp*)Laqz}jNY$~j6R+~a9ciC4Wp~#3N4=X` z^1NtqKBGsf7CKkH<|c>Jl-G#Qyv>X?Vl5=!c`S+4VYtJW%ns?lU(zvsf>(GsAPwa6 zrKP=80EWgS8~6%c+4eOO6mD;=$4dDZIC(5vXntoVR_0FJQtf2ZHNEBkKSeI z9BBzBcnBB5nA?#0tMz8L-_15u&sB~8*9856-@R7;WKf4?%w1bw#-J({=ue`e?Z z_vrsV{_j8k|Ig0raGWr{|Buu287$rQx=7M;6IUe1yTfE<_7O_!JEiq@3?_8svRKj0 z2>i|d?mau7iH2X`AePN1XVhJ4R2+Y)K0I8OtoK0i?OoohW|u6hd<}sc+k6M zUHS`W%IUEe?s3HJ>Rh+&#Boo@zOwdzOUi|FX-wigVFGe|}y^=qTPAUN}rCoIfd*U`X2QL+Od^qVl?3lDt{}jm}-pj|Kx|#o%O@JNv zl_@K}ZG+6oWS6#gFU+AF9S2>cqqLd2(Boh(nWI|9T{wlz#(f>UW==p_JrFj+KU9o9 za?6u+mF#)#fU@i%h+!c$InQ#m|RCbj42#rCBAHu#I^@2+lLe+~4A&g8MtE&f*CMoqa3 zEQ>a=i@b&OwKM|Lz&SaueiZsVN*7ZdlkZzsrW3Y$@5K3pyz$bV`VZ$sdzo?$(tWp< z9+q5A_|tl(c*`QY(@NBx0OlC5M_$~9tp_I>ksZD$yooOwBmyY>DN3{+T`14 z8=hAy^mvNL1GXqEb$;O)xX?Q!{irY8+xnsRQdazU{!kioPrz|cQ6>6F-g}^4V6Tw- zarOkc(M_=!+zFqHtl|rDc6O2$b5`Ccjq=oiI-$q*qP!14aeYngrH}cHDuT^&r_TRV zpuTi0ZsBUl>vH)`ga#I&&&Bgh$Ab9WJjSt<(fsJoQ$yr)Uq%K90Vv92?OrTTE#c3| z5nW&UC7;t#o6fr$+b1oZMqX=rB2(cPgN2~3w=q7>ziH~(!7@i#4XOCBio^1b6RT;v5aO6=YR=o1_N6-w^HVjo+!XU)#n$^DtG>ZU;?nNJbn~kh zfyJsz?7M^u(gMpN)1{U0d32O`^re9chUDE4Qs4#2_rkWSWSSY_fwSNCmihdsmrwGq z4zaLg8&&+y_%?bNirFcViQW@kDR#j=v$Lf=+l@~8m%P#@q)zf5u7M6-T}Uq-Dvm8G ztc8xO7WGpN^M(tVv(9R1-#(7iu+7}m(rhb)iTYNoqG;5fRLrd>oP3A*HDc^#O0dP` zs+FLXxLj*DWo(!89p=O=eB1;~MLJ}omZ$y37wR!AvJmqGFP+U7R13StL^9%irGtL<==|N;CZ#rjxU+28AO% zq+ef9rU)Byc_#7CL^)utypzQ5G~CZ6lg~!gkoRP`!8iR`dCS}EvJU4<>Qm5W7An^k zUr}zq4!*`0ehMF>KojMJ__V+t0)GKb6=%^M<7F;>7agaIP!%nmu+KxG@IPu?D2^WU z5B@TIX&br$e^0oBUFApZ2rOmKsuy(6Y?1wZQtpo(nYpLk@mG~Gk7Xw4s9Rk$&Dl(jGef6DUm26~>|jVHt7UBw#b zwi6$M+uharJL>KK&}XbOzUn*X9eFG6rZ`6WnTLzya*n8MGRZxU{N^B8hBOq9)Tr>U zDmO?7BQd`{!1n5s39Zf1dKTG))M6c(47YHEZ8pv-yfZwfdO`-~>Bvm7#^<>|U(@vysiF^We4^p@)0!i}{%UBCSYhd>63qxt>KjiODC0SE`b*d)3SHF{+@|5?AN~Q_aqi=<-hZA0j@45zk4DLxo^M+2) z*=0A`9HbXY=Ob6sQ{>dQL(9oO)z$c)d`eSBNQ1l0fA18?Ia zWr)lMT7yro^Q@J7R3&J{{d5NMCZV)?#X!3;Jb|C12Jzzdbzg310M9(nSV#Fnw}%!n z7qf}c;u9Md3=dvFE-PMblnX(usB5%lkI^*^7wbi5Rvw8&3#VDUULX7@-Uhcj$f6_P zYw{%J*Y#ra$k+53SVP%(ZjvHXdv~dW>??mO>yk^X8fJ}ctpxtrSKjK&e^F&YJ<4Y; zfc|xg9>k5bL&y2@)2=FueHhzd>cjb1F zjl%Jfn=r@uNxx%5p%9kRwbh!yHEEgy`1xRaBg~4C*YtV)NN48QX-{Omccq$=@& znDl4J^#M1~dvQU=7&~Q%wOwvPV&)t>g2{br2B$0%ZFlJMD> zETL5b3wTd8Ry5LnxD8MFfJq@|@x(Qw@1TE0sK3Dh8U`oIetyyJpq4@dKCUmq!~4#c zQwG5l*vFCWQclcc{Yn&v27QE2a@M%3kh$FjB1t9EPyU9!)v#~SSH=Mu!I$%^)_u?y zwyCtIvk$(BYMi?QPQEkj60Jec7*j<8$Uqge zfr`Bj@@+#)c?_aDqEb$sSgGkJAXrY$tO4n9548LIO#{o z+Q^8m^TAfyl|2(9eT+;R%w^YH1{$oyZUGXp#HbXJd7 zlm7;H=v`J@^2v&`O7r!+jQ-ay&Y!FMv~`T*%tadg7qwX<2jqOzw$Tnw{d(xiW(7wE zwbg@BC#ZLW%`q@ClWI>#+GF6e+QI9?Ls^I(72_f|$P?nLH>WQ~wjx^9*F(Hf!HOU` z{hRO(Oq-(kT8iqXqJ}ySRdJ$k7ku$|#YFg4wx|x)G}$?jn@&*At*_7%cln<4mv(j8 zL!U9u$dj=JWCQLOk7!%I3A5d2tUdJ0yE23SzIvv{*hAb#aP+m1Mf5aJpZMCA7gc8% z?-ae!%?zLZ18`j&>8B;tf3hGg0v}OreGOjZFuh6Lmu=vNn?vi0JuK9l9@<4SC~(uI zC#*aibrIqK3sGP2L?7p0xf9ojh6D#J2vykyaih_-1RIuM@ZzgLD=$(AkdjC#ZiH1=vNfOdi1# zyg7EMT~Ggwc}YLhANw!+PgLZ?=t$BlHi0AO7}J^0mlpgYDLT|Wrt*+>YMA$}*krEK z#Z?76MBP#8-RiVF$O!FXACbcHBilz>xt9JMcLTL(8?wZ1ERH$1j8Ez^lqXBqQ3!Up z+f{l?`18_*2{+W@=y9SH+&uSXJbXt3WjoJ6IuOj32V^q;1TW`b;*(gcmWpE{Ik1Y% zvd61`L<$&5{n>!drz-7wDZEBve8Y}hHtGEAp+KjcJS;B7vp zmAg`9i@RWRnvE|GY_k7E-hGNrAo0jO4G}Rc2XbvxM>sD@vW~|&HIn>>DfE8n+6J$n z4uE}hH&C9`=b^f^k&a)b&D>5t0rhp0obMZ`MjKlG73?G~k)h(T1^O8MK|uQpxrTp4 ze>kChY6AE~8a)TYWcyd}7aOQr?sb|-_mW}Y57bU*3^z|pb|YpJPT%igYvgjfqG%3Z zUvZktFN5jb%9uM%4NTR8L6|t8XV?{klhtmvjiy89Z-2n2L&RuyQeKd0d08>tuA+0g z$DqQkv^q(GE^>nok>`asFo%S(KXucn5Lw5$0adRQ%fager(+hn6G~N2r=XfFXOgEL zn=JAdl$v)2;jolC9$3oTilJ&a`-SyKE@_AT4?iM?vh*~;>KD98pMY)>)Xbv<~*Qj0ZC1M(FxD*Ez74a0=Il>&Ydd9`P}n2c1-MVH=_N?!Z9+b#kW8R2MVMQ| ztz%qJqv-FbQ^u>cq@Xxv{|xtDec8!u8q=Ci@_ckMh=(8^f}`V8R!;FY^0C~)!Z8p2 zB1!y>U~SQY6M9UaBk7TU-2n~z9XqCm`}boCpQ4_Nxw4lXu0s^{71cwaGe%RJXJ+l} zQ@pQn5_kJwpxeaa>FELuGd(%Ul63*~(Z~h2_B_1{K9+FQZm)D`V3iz!>iH;-fTIzf=8f;=VYrwY1Av6l|ETauQvDzbz7kQbk7zvUsMzmw#C;4MjRwO;j zWIytw#M9h4!m``BJjdN?sx#ZbKH)pI5~}QDVYv-SS>z8Jh-S8jU$CyiQ_xKQE7Ksc zI}kIMQId-W)@ySjZBO&jW~wz?hS_f-e55Pj7~LS+L-%MwP0%TB;cQ5w1o8-7gwnI! zb}EgG3p7>tcx{@Fj`6)xzsj*>9+-wjk=(uNjuCxXFZrEB;$1!rx6UPJfZp!Tbp7r= z@luv5a@&`R4~JUuhv|{sbaQnYDWvAkXSb4AB7E*x{>=I=GJ>1BOU#g|WwT6_@9uK+%;||z?v&b{mnLlwmt2ahJbak&)LAAqQ);upmRWf%XZ>lDs&z%$Z zxkQ?Hr?f;K>o>QT-Y+i0dpgBy>AEtJH&c^zGiZdVkwVL_t~>4NO}$i)_dQ}a;$R$9 zzp3@kN%@?fa+fndJPhTb`4ejgb&R(l(sTqtCziC0^?|{t?$9xjY5?F-jDn*Ice#J6 zxt_PR@0+9C;cA9FjQ-qe(MjlykE$ZEpXD-L$VoyiRz&_30v04+Z43bc zr7zq~mEj44>p^BkwsINr-;usmYL=cDc@|VDXyEYh#prrsx>*ggl0mGl?ky)cEdn!i z4}C;rcg?67@WpqOw*CW5l0C?ZG#2++J{_c!^*|%S{($6oqD&)S*+TBZsgg!(b&^)q z6Ra$DK_fDHI`rN4YQ0{F*=l+An$Kp#*kQ8B%*y}e0cR`sK#{6$J!Yk4J{2XW2B)h* z@+Y+yipyn?W{SF(!OVI9?P;A;R;;7VtikG?{grz4ZrPozQLVrQIAC`U5KJH=wCnpd z`W`ZGUeZl1K&QG+CaL-)4qap@Nde*Is9a_Afy?53a4LO*U;Wa_J) zJzYbGp`)C{a|Um+WKqJ3mhY?+NC&U7-kBS9MU_UJ(A$IS$rtyGb3yM1nK-R8%{c~s z!3ABEchymtVZT*lz(f5{`i;ES&#aUBUDlUFNqK7py1#qw*WhAO6F$-h^mn+BGn?Dx zYrR|FRJ}sa>QLWo*$sErQMV8(`rnNcIFTN)t$d286c|m9*j%3Bb&%Y?tiZDNZ&7c^ z1U3`>L_xU|lki-2q<(DP5_k9tJ5~%LNzfmZ@s(GCdeV;`3f6U|=@)n|BUCRW&+7Ya zoeRF{j`Sb5Eq^2S(B`VDiR?w}o+78|4Sm)R!icHF4dfq-$ev7U6T=o-ds(AgHKQanGYplZUYJWPE=hRQ2bSO)i@O4667-K6B$b;bU!pkyE>7m=fV=acKIj$^3-Q&RS3AiGQ4-Zf4Es5Hw#;E{ zvi2!IJuR>6@`l4>)lD$nhWI|V^wav|{krhoxLHYkl9j1+u z9UA1eva0byt)s!*Iz$*K@9?0Fbm zqb9}1n0~8(XS0!FtrNpYuOjtWKYOab9-Yl@HQ6bkO)uwsSHYndmTbP5eTuM9ftY zA}c8&${-neNX~Ui>RdPveq~cYml>e@gM!*J_=l_>|Av>)v*Cg~PIrpdMXs8NjjAZ2bE(h+&R0! zEdDHK(1WV2%1p~^E&RR>WRyJ$B>al1J-Cduq1~J%i#&74Ve5TtM!0$!p=<5zn!yUJ zh$!XEMUM7}yB6NgexOJcW83Ks`Vw^QP zk~#2&XF{T@tm-X`(Ri>?-@1d;ZN7&Z+|p4v6W@b@{1h(8ucQkvDp%Qs#b?m5$I?z_ zImto(zvnzbQHMHX=!|oopo(;&Pk!WSXLK3GA5<@MK{lTP`N&+ zmO7c_@PNCg@9>`wp_m&d4X(Q)zV_W-K@W(>9r>|j-#)4q!OdcMQ5 zD7Zzbjg_|2%hXBxPvqZsKtF2(szEtDfz3jepb4^xH1;%@W?78yVuAM-QaR7%K@cUn z5>QN3Rvn`6>2@+Cct{VDK9H^7(20C3DJU~gB~KZnoOimKQvkl_b+R74=qzWSMJaoh zy69w8$M|4X9CWHIybAfnN`)l&8FdB;v0BLQhN%Aj6|zE9jI)b9HVx?J*#c`tC%MB; z2Uk;^^Pb-ea4331K-=o1F6*3fv~QVbhOe&jlew&}jEJ8iDLk{0<|+KtzLO6TX}wcv zdwC7)(+;u;on{B*GyQ-(hqKmajb<}Yg;!(*yffE6LLRCuMn859HF-Xk1A8H^IZW5h zH%F&p4={m9<;Dlgg)-lp5g+N7b?pyxGZYh0_JI^&U`4zQCh#Qmi%p zlttJV{0^l+rGKg7z~V^li>J>()(Kd5o$P`3v^veN*V69z+(O)c)I2MZWdpP8clbIE zt7*PW)_T;nr&LWkG^~}iTTFsGs3T@_bHIKWCv9Y$%F6UKCn#ROs!});rem`6w`%SE zY=6Z2yV+fZWZ*S1$Bd)@^5XHMkS6;A232Ev9*OzMaCmkS8O%)5BGY|i_;Dwb+|0Sz z&f2f?(Nr|2uPI!$U%n)fL=JO4@)vxG$So7%cLT7 zygOgk7ta#5swFfPdt^JlNl=~hc$B!SzazWcQIuo<1#7@bk%Wx`1wAeGJ}^`a>~vVg zcOc2W3^Sz*x*W^tY;Z>4c3#QmTSvk8nft;x)!`dY20}9%h~i3(H(BiaMD9Qa5{=aRD6f&C4HRkmFY<~^r&OmCi**m zW(Bclwv**8G|I{`XZVSY{pXkNQ%uq1X$T#?1u70~^^`>uOufNEJ0Im5;3 zRNQ#oAN-y4WNhHI4p=FojyXkauve>~JQ!Rdy32;<7na$H5L6E|$0Ccc9-re7D+y0< zSJa7nolqJtR>`NLD7w7T*gtX_9{HJI1kLcy64#CBEGHwIOgcEC-XAQjJLoC-B&7MQEJ}orv@qMa0{o%g+ zj5o!rBdq?x*R+6(Bpaiczz+3jE=OjlC}E0js;0YzRY6uWNvE}QlhZ~m>kuuYDuEl{ z#d95*1L>A>AJcE*B-=%8cQFX8F&;lQyX4d}@GN$8-s*owUOo;_wd9sn4Aou}x|^go3Sf5sg7g7b zqzcOg64h+lAO5HAVxj*K@jA&kAhkGQ9xig)1^kgD5YGzt`m^{PQuW9OC3;WO>! zaH;IUR+gX`51#T}eM=vbm&JNL9ZJ@D>nj_BpZa$XA}@aBBDnzWh8Map?`e)=so)5xg5E!b#S%CX z%-Mm1s;E;7%0^K46RVLb2&%NGe@mMAE zj3FL!)zgsX)-wWAtbaIBnXNs=A^Ys1H!~| zahNxc28)1B@pYDCd6G2{j>H+FGPnfI?M!4k@=K%OT>VX!g~l&Ip&sP9t%R`@TQIH! zj;aI5B#hw=%m!9LXfF5M+bYy~if*L6JmwtZhohF0J-R1!_C4TV4A*Jp401zu5e1z~ z^nkmaTKE~Y*Siz?vFb9D&OuA77}`)Q#Re5*L}eS+M+}m^i0N`DffOu&x?+X>NMx{9 zLYMx>TR~q2SK>N9joi{DwMix8O??BEcrG~VgV0yKg?@8Mj)ULpZ#GG8P)~Js%!N10 z<90vezW9dp!c8@Vw1!H43^dQ}s+aFPW*eP!4d=azLDl?Gt-}1=UN}hi5*X%e?~0KUrNjcA!jU{ZH>CX=OT*h;H;}eM(n@Px7Wp6g#aL zWMXRTJW%{XX@ab4FXnz#9C~0JrcW8+9QX*@%N>0b$&4BFA)koZVrBbxuJtynE!6v_ z$buf19zPY5EMil+#vHhoJOtOe2h)x2np#_B>V$4&0<^1b;+6bMeUa3ust00UQ4zfV zH&jJ%N$x;Xt}pM(nt>$v+b+jlBm=}ptB121CqO%G>d9iN`zKsF)9`eZa4(S2{EWL* z4NxU%72~|S+q+mt@lZBGR>$U{JVrI~n@mlNBiRr!{)9|!pzOE6+WqEo< ztuu>>UqwyuhMTYwEX67c9`R>ekv5*g`q4glE6h8kdY>Vm(2fCMr zqN6<&6ARvDZBK2SxP6h~+sFIb`Nd(lPRoj7I2B5}iJ0GdWl21T(tnhdHWO@#@5w)O zfGErR(Y`-i+3d7f&f6K+%)WB4*z3&H^Rvwmy^$lU0cD^ZuPbuFEq6j^K$161{uFZ* zY1a>Yn3b8|*0rGFe#X;T(VBp+sf96}Hm5tl^;qguwF|;A^@ES|cW@7;$_LB}a87mM zZ*?cU>05LQOj;I#95_>7@(<9CI)MA}rC1)=i^M@i&}wT18xaQ|Gl#qgb%lxiVm>B9 zi=65p+Mke3ajUfubJ;VALS9#rC2SLH%9YyhhOip?118sdL@MP+PP30V1+{s3)Ls4x z%Im~C)1GQc#A!9zrkDgPwYSNT}`%^5TvWkBx8lO zS;Qfyvl;WW*QiNfTd%M$qyjp^Rp6&)a~cQc(!&J3AC%6faInr2^H^3HE;kytK?a>5 z?mCI$XLE`B4(@}7YBO~Hnktu?>DHzV+y_QsP*iR7UkbeLjd1F}(W~5}*j%tNI4E+x zrQ`skj@eZl4kYU_RI1@TlmC%>P4`#x;04|*S6Vrp#x$K%Qfv?|^viOhN$l2ycihlF zc^_gL{hIGrMOXvo)76k!NezbBB>5|CsCvb%g%#UHfS{D89>y6{^(YWJg-YIuhq<3%I>MB%U#Z7Xvn7n z(t2!v(haQT*njn0&BXl}bS(7-DZAeAHa5Wdy)ytpwiBWc(iiXp{l~`haMc-nCv;RZS6+lYB?eoSrlMf4?| zvQ4Bb^fdOTuPKtJYf0~5X=Ipo(M?D$RJ6MB1EM)-Rd;12(Vuq~oyFe;TJeXVN){o- z;MF>=kJ0wdC%0~>V5adE8dwXy+-S|eiRGdrnWN4-bK`Pjm&;sav`3O0s1Ag~)K#_A ztw3@FM##LPnC=xhldgtOZZFIvMW z2ja;&_m=fQM@t@8fiFdW(V3J~eQ^IbbxZItu?crL<$iVzl*;XJbRQNuVhiyd?hQ-@ zo4D6iOX|j!mE-C2gsgZ&>WEqBbf>@zy8<2lI8u*i6~8#;jRhn#-OGwQ7e$ZA{@6{{ zxnL5ux;-H`MLHA}U&Iu$QAB9m&p+ac(E^ z1zDSgu1AFn?RKCISOraWSyt96%|gt!gqnSQC&g2_vv7Oa06+6BEQzM5i8zld1umlQ z|Cbk$C&d{N3F>0rKmm6y{Jky3zv2((i22YLX1X~zJo@53eI2~a@7RAi)iLS!#9sv? zx;fIJ9Io9&omI7h5C57P=N%E>7(0`)>yNwzsih8yuJlc=t#%Eyks)J457Y9V=E2LP zIW`N~>?qj8GifgMNyb8fF0V=ld+MY12zvmSRxgpYs}66?T)iI~TsDAR*iUE?if+G^ z&fv6T75Opr_C2v{F)w=Ps&FYJ`+C96lSds>tDLT=JhrQAv;`7LUYbqB25Pa}@(pPp z9c5KhC)6i+;FjWc7|f2kCp~ZClXR!3yzzseHF~Vk#%#l9`m2?0 z6PXEIvcX;wcNDqjIM&KZG}r2(d=eQf)`EC@LC3`%wYI^1*i?6;B=`ceo%P6zF?e3r zJG41=H%XT+b@w-bgVGxQUx1E1Z-+7+Fsn#Fu)6WKCQ z4MMGFvLsthI>i5&o=zY&@O3Q^`}IEl5Duk}P{dYSpw)T@sPAfqWr&_?YVd_TKr*>$ z)h;`YeF|#PBU*qwfM>TXyu4ef3={+XKAGJ{5-W|GF6O|$edb3lLi9G5fVMeIw6Hd+ zecn~jz4wV%DkE|sJ5+i1qWc@XL|H@tRMn=sBP)$$Koii1ZnL#|DtgGf<{7y=&=uN` zA0KrX>gQTU4?R+6Qipge4v%+m0{L4k@gO21o1uz3BfjZhctM%bEkj>|;MGX(vfp9< z$IpyoCSJ;VEE+lW3Q(Q1&ptlZauzvDPq}FGgNIb{K#RPXW2rhp9PWpmt zk#v7R??7=o#5P0sE5RD&9*Qo-QW^L`de5q-*19!tzxy$PD(3EzE8)gc*n<^~CqGIo zp~u7?l_ILyZQP2)1YsdHA42N~rr7kPU}Q-i_z-^;O$Q z4sqY;A)1;s#SruwanSu;*$7qWb&(rv$`MEm6hK~Km)ePsa$EhOwVSngcg#54jRkx*5S8yegULw3h4W z8~PeMaY}>g-vJ#`jo3rdC%36zz^s@pG;gHN&>oo1-`5|^t7@k1FXseC8pA-!exWWY zB3lQ7c4}xNWq1*Hy?vUskQd@+p;in<8aJCg7H-1TT6%+QzQ0N^(zxk<(obZlF0Rj5 zTj>jXAw2N$ap7jH&ga{QXK4eAME)zQu9nbMpLedR)*u;_R3~*#UIt1-B|S^GvRlE` zUN3H|TMLw#?qnG>i?+DA|DyM$W-%bTr-Rb}&Uq&DvF>^rw1O#4e>o7}{W-)4B%w11 zV+HuM&_-4s=@P|!4dmWRCV{eueAkjC!IJ&`Xf2> zRHs$ltw~mQ@*PyH?K+AcU_aB&cK6_MYaB?_6*TtA>#i)1kj`sj27#uE+V|q17@n7Oi`DWb=-m&NUIIEpzESVz8$-kkT7PgLY zY!uVvxsNTUXb5IT=_!>sn6ET*o&ZPL&BmEfkt%=!b1Lwcq%>gnPMdr-|S z205mk=n=n+{j57<&*3&}j+pHa7k$igJSvcn^+8tdpe_ox0d~S9Je5mmb76t4zPFg zd&tFpcfQDNWSP2QhGB0|F5MlQinhf*qCIs4?tusPb?l8?j7@Vdo!+=v28rox3ii`x zLX|lfDa`rK6V}fChrjWT7vFL3-cq&YA@v4X0aJI8+O5qU_-o(sLM)&3242TUg4nr% z_XqdjnrfvV`Zfo$%l^TDXh}}f>+&7AJE<@)XpgtIIjGwCuo>zu4VV#n5vaQAl=K zNXopk8?%|{{njBD`UM2^yn2{$kaa%|6{WLysiugVbQxxiWs$+iFJ6&Kc7JRT>Sey9 z_4qEZbW2&qc^+LKA8Vf2rVls^NI0f#E6Ev5&t{_Qt!f2DKD!55i;s5}lk(d-kI|GD zh#O4D`%kM@qAPT;*--P|S}Wa)a3A|Y?0L^$f?^OF=&Zs-wD+(((dJ-aXNBthdwdif zsqzPB#8eZ1lZO5wjtzG}5BWj7jqL?}`~}Pe~@mr ziVdKEe&yHfE68xi${hMVy-E$;-^r`0$vYO;G4R%1;8(R@9Cd7x!*byYy@qZoMfQo? z3$^*A>MFCbTES?Kq0XwRF>TDkysc^`cJaBQ2D-prbcW1}%tp}oBimqfS{2oLbDr~z z6_bap4(1?v75z~UaB+@`4Qwa$#lxZ%UuG4vmK*ue_gqCj^s_EtK9FPSP?`v4ZEIdi zrpGx_OpFO^=MlkHa4Xf*cW5r8o8O8_`1t*09=vm5It}(;fg!8H*;$!^j1xyxMPoW9 z89T*I67Ef_#>=ACKP(K&L^*eZ9?Mc_V`RKcTFlcDw=xg3Hj)Cl=t)_KR0%e*_wsA* z4$!OuYCLB3^HFnTz*M+t;6flPX%oz+7x1i3inTC!0Vn1rwK{5*+JO82qNoY7>kd4r zLp77Zs7E>tHgNm>1#l8)QOoc%ZR82?=_*vVqxnFI?T_4c9r!Fps%pjB6V%_R9J@;`^05|fOZqF6q~_S` za2mT1J#G$P2)luM^PRhkUn#PL3XvQ;%Kk;4kjVy9$~_m%P3lw0KhOu*(!NBE43;p~ zK{c3ebd20?cVXZ75-@W{iRt`?Y6Q1!2vou&_8b+)J|J6I5#3~%y-+OHHF$mNcQ{x| z!VS_vW)=zPAu~W>`wE3EpS2KF&ws#IiB^+EZ(h-CczrkPNzA6b9j#A~p!EEL(BaL*HKqeJU3W%1d zT%a4%81ad|RK=}Bd=ye?EkG_jXXPcW)FpR`oI_XP^DQkNCHyAG%Z@yfe^dL|Upxd} zjaBlY@da7>&pN9$7L2?3bP`+y&sbNs1boB$bf%mOepEwjLzpf0>QrDbuhcC;i0z}` z0d{UWn9`xDUXQJx1w>)|bc(SMy}+8yLg*9J)f6X64RaLci-nNPjt1jtmpR<`n9ha6 zqz&DO9_+cSr_X~ZtE|IphFeH>Q46_G=fh6DP+EiOLQUA=sH%29aQfzgiyj`=2HYj| zf5>-M0mm;7vixt6vMehKLfPD{{;+RaNGsc+@+y>{@VGi|Wz5>6ky{@ln#Rn-E}qKf zZ}u%wmxhvhm~~y1;h5PRF`~t6x`R_0=1a2n%Q2Y1R>U^JH}sLd3n#@z(uo}B)oB&% zRcm6+B$4m|U$R;S^J98>Fz%~LhtS_f9E#_mz@ z^ls`C>Wm#G$MBKdBpj6SWAKTLV>6J%Or^8NwqQTr>CED)RZOLzi~GZ;NpZ4|{*5|4 z$!YJM3=&W#<6mCZ+6Jm)C%q3lM;+BiC1PjGX$j{I_T^Bpz)rXY#8Lh$rX91b*LDVa z!AybsHiGUaiS{2gmoRgwn>MDLThclV9(Nba zYi;-_TFam8EO2Ufuv6Q8;P~8)y?k4Ab@~K7$;x=DM3$y^@}w$!{vU=X%|JqC?wo!XDhj-627zXu7-!UnwVGAwSa81E^K9rj19Zz5SX z3e=@n13lCXRX@6hE+Lj66@SJE>hr;C)-tgUchP3Mm`DUs=M(*14R&_e7WTcoC98OIb^lZl8kbiI*7VtIyTcL z20G~dc7V@yD#}l62`h>`$QUCHHpEVc-rCYEV7~-qXp6Z(E${ry~@me7Jzq3mQQd=9@em%&%TP5sQm|z_e5&Uz^1LziS)GTP0AH+Izd9~=X$j*EM zAB4C3y|sm33=V@Dp993~)yU}S})u?KXeMzOJYPBy`v^$Je^ z;_`&+4b~^6e7|A?{6KLbus}b?mh{zXj{D51M2r8}@1S#;NI=40kpp?B!~Xm}4ZhZQ zDD}Uy>n>HFtdnX?q3mEPpH(&eiPj$~Hdq*+)iN<6u#hhXEB+B!*sJst&wNbko?=Jf zc$plv*jS2v2hE^@78mclS9y#MI2RS1t~3pHoqw>>A!T$~1oSj9xkxT@6&>anb z1p=3BYYvtRWYToT}9x$*U zx|z&Gs9x#Rd+Vg{b#NSsgX#*VF?NE5Aq8~93Dpz*gFyb<%kJ52MHyqYdS%@V?v=$L zPs9ZLajVo}V?4V0FQ6z**Hx`pY?7}N>;o3`BW#sAr#}SKLIIqN#D;>)<%3b!^I3d%@COUaW}YwRwtz=3iSLN4 zj2rwauX+=)V`UorZP!Si$T&G#PG(VJ90;5rH2j;GtFO?9Sw2iA*I<%1g6F~Bvthgn z*cjXVtvx?d)@688_Z8#Xi#pZz6fh<*ul|HvssKLc(z1<3++(@|()9OrWWXbqBk|lu z-B7Mb(ZHa_Sm^$pw8RxKva0wjHOYD8I z3*>OpLm!NXzZg!1$s)HMZHF0Aw7UyZiuGOgb%(%xw@3CMz1h#)(T@J+W|hCf%al(p zhI8$`LqG*Ngt^*ucvrOlr98{;+6`HC;Q^27u^vQs$n4m{SWC?i+4*{K)z6BGB7`-t z?>Y0$S=1pL;Q}v>Ow|c{lR9KH7g=$#bOZVPyxf9ae627m`zjqPyFitoeiF5?vwDqF zPUI%puvz#J(hSj<0M1edT?JxVGr1~wlyqhRHgSA`d);Zk5p&l zJx=+G>M^E#TkK3uq-c!2GG3=}_&N80-CCy=cZBU$qu2B`-9&8&2)O|4*zTynFQbEs z6`zp~?5%f@D(s86sfyU=ss5X<)i=}|zn%Yqm0v7(Ub*Mw z6t$T(^Z%&p3#u;8PM%+W){dTjQjX{unM+Mju8RxJnYt9t7h08^;EvY zemqv93X}6}DYi>0c zAz7A6J@y6VVs()CWo3}{pT?*4|7jaCy}J;Ek&R%!?#33>vbX_8TT6INsB2;JyR2jV zW>wM~v8(DLwm+xlUBxeCl6?^sMIPEz=5!62L)Def^>v*^ttQh%8S9fXKh8w9?Ikkq z^RSJv9N)je$UGR?;s^m#fU1q|U?d4m|7ML0g$IX1%{9UvlnG4;PnblvsvDyk! zTSwnGaC)ktgP)J>0B@}R{IKeV&;E@12o}Z}dc`}``Np&GWY1K7$WFw&_lHOURq&qJ zV@;9=kZYYHXUk$_fQ=n&ZcOl=DsEImUvVp7>xFPNo>t+;l;9rq!Ai?Y7XGd$2J6c~ zP)+}iX)KHChtxw7{G*_INBVo(b@(cO3sD5M<}IXef?$R!xbyFmT%xz`jf#JdoEeM+ zaq&FP#U$?rBsD46bi}v30-V<=Yzpei^VSt+KBZ0 z9a4Q`R1xEtJ`hY2XNjXySe(A#*$#rm5~#OcbbVB`r8Ur*ZRGxh2{ck|!_01o7_MfE z4=k;*Qk1Z}igEU@W<6d-zT#=z93Wrr)5pOHo^Rmy3%)UZ&QX3_j?}45hu6T~)_tJ6 zOi_I?HBSp>+7fn~lp#0iFdpvhCwHi2AOqcpH?p5I4bGss1-X8tW(Si9ibK+AID>^bp+!59U$X0H0?c=Oufpazf|q zhOXg`g}p=i7g^ct9Baw8ieTHZ>E`pk4;F-T@tCMB!Ku%Pi~D z&)6?k&X}kU(X7b64Z-Z)aEgOuGgi-IHN;2S7*9zHF<6%{&)8*<=xZzu{V1%o`wJ=R zFmDuSS%&$EH^4jcPL5XVe0QT;$tURM=jlD3?eTB*l)y>+u6y;+*igfdY-Nb3CyF}x zt@->{X0T;A71JQgb4&N<4MZbQ%x!X9b=9r0>pB7|%4(fj4gr%9ycPIa8i3)2?Ly`* zIQcJ-$#j_ef%Ha}%s{uhTMQ8i$or(oToGyAsk}E-!9DS}_a0qa{SWc(|NqthFB|X2ulV2p8JxNlK3>oNOUkQWIbHf6|0V=~^u(VF JkNfd={|hu=jYt3h literal 0 HcmV?d00001 diff --git a/tests/test_structures/test_bbox/test_box3d.py b/tests/test_structures/test_bbox/test_box3d.py new file mode 100755 index 0000000..eb6258e --- /dev/null +++ b/tests/test_structures/test_bbox/test_box3d.py @@ -0,0 +1,1796 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import numpy as np +import pytest +import torch + +from mmdet3d.structures import (BaseInstance3DBoxes, Box3DMode, + CameraInstance3DBoxes, Coord3DMode, + DepthInstance3DBoxes, LiDARInstance3DBoxes, + bbox3d2roi, bbox3d_mapping_back) +from mmdet3d.structures.bbox_3d.utils import (get_box_type, limit_period, + points_cam2img, + rotation_3d_in_axis, xywhr2xyxyr) +from mmdet3d.structures.points import CameraPoints, DepthPoints, LiDARPoints + + +def test_bbox3d_mapping_back(): + bboxes = BaseInstance3DBoxes( + [[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 2.06200000e+00, + 4.40900000e+00, 1.54800000e+00, -1.48801203e+00 + ], + [ + -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, 3.43000000e-01, + 4.58000000e-01, 7.82000000e-01, -4.62759755e+00 + ], + [ + -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, 2.39600000e+00, + 3.96900000e+00, 1.73200000e+00, -4.65203216e+00 + ], + [ + -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, 1.94400000e+00, + 3.85700000e+00, 1.72300000e+00, -2.81427027e+00 + ]]) + new_bboxes = bbox3d_mapping_back(bboxes, 1.1, True, True) + expected_new_bboxes = torch.tensor( + [[-4.7657, 36.3827, 0.2705, 1.8745, 4.0082, 1.4073, -1.4880], + [-24.2501, 5.0864, -0.8312, 0.3118, 0.4164, 0.7109, -4.6276], + [-5.2816, 32.1902, 0.1826, 2.1782, 3.6082, 1.5745, -4.6520], + [-28.4624, 0.9910, -0.1769, 1.7673, 3.5064, 1.5664, -2.8143]]) + assert torch.allclose(new_bboxes.tensor, expected_new_bboxes, atol=1e-4) + + +def test_bbox3d2roi(): + bbox_0 = torch.tensor( + [[-5.2422, 4.0020, 2.9757, 2.0620, 4.4090, 1.5480, -1.4880], + [-5.8097, 3.5409, 2.0088, 2.3960, 3.9690, 1.7320, -4.6520]]) + bbox_1 = torch.tensor( + [[-2.6675, 5.5949, -9.1434, 3.4300, 4.5800, 7.8200, -4.6275], + [-3.1308, 1.0900, -1.9461, 1.9440, 3.8570, 1.7230, -2.8142]]) + bbox_list = [bbox_0, bbox_1] + rois = bbox3d2roi(bbox_list) + expected_rois = torch.tensor( + [[0.0000, -5.2422, 4.0020, 2.9757, 2.0620, 4.4090, 1.5480, -1.4880], + [0.0000, -5.8097, 3.5409, 2.0088, 2.3960, 3.9690, 1.7320, -4.6520], + [1.0000, -2.6675, 5.5949, -9.1434, 3.4300, 4.5800, 7.8200, -4.6275], + [1.0000, -3.1308, 1.0900, -1.9461, 1.9440, 3.8570, 1.7230, -2.8142]]) + assert torch.all(torch.eq(rois, expected_rois)) + + +def test_base_boxes3d(): + # test empty initialization + empty_boxes = [] + boxes = BaseInstance3DBoxes(empty_boxes) + assert boxes.tensor.shape[0] == 0 + assert boxes.tensor.shape[1] == 7 + + # Test init with origin + gravity_center_box = np.array( + [[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 2.06200000e+00, + 4.40900000e+00, 1.54800000e+00, -1.48801203e+00 + ], + [ + -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, 3.43000000e-01, + 4.58000000e-01, 7.82000000e-01, -4.62759755e+00 + ], + [ + -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, 2.39600000e+00, + 3.96900000e+00, 1.73200000e+00, -4.65203216e+00 + ], + [ + -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, 1.94400000e+00, + 3.85700000e+00, 1.72300000e+00, -2.81427027e+00 + ]], + dtype=np.float32) + + bottom_center_box = BaseInstance3DBoxes( + gravity_center_box, origin=(0.5, 0.5, 0.5)) + + assert bottom_center_box.yaw.shape[0] == 4 + + +def test_lidar_boxes3d(): + # test empty initialization + empty_boxes = [] + boxes = LiDARInstance3DBoxes(empty_boxes) + assert boxes.tensor.shape[0] == 0 + assert boxes.tensor.shape[1] == 7 + + # Test init with origin + gravity_center_box = np.array( + [[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 2.06200000e+00, + 4.40900000e+00, 1.54800000e+00, -1.48801203e+00 + ], + [ + -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, 3.43000000e-01, + 4.58000000e-01, 7.82000000e-01, -4.62759755e+00 + ], + [ + -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, 2.39600000e+00, + 3.96900000e+00, 1.73200000e+00, -4.65203216e+00 + ], + [ + -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, 1.94400000e+00, + 3.85700000e+00, 1.72300000e+00, -2.81427027e+00 + ]], + dtype=np.float32) + bottom_center_box = LiDARInstance3DBoxes( + gravity_center_box, origin=(0.5, 0.5, 0.5)) + expected_tensor = torch.tensor( + [[ + -5.24223238e+00, 4.00209696e+01, -4.76429619e-01, 2.06200000e+00, + 4.40900000e+00, 1.54800000e+00, -1.48801203e+00 + ], + [ + -2.66751588e+01, 5.59499564e+00, -1.30534586e+00, 3.43000000e-01, + 4.58000000e-01, 7.82000000e-01, -4.62759755e+00 + ], + [ + -5.80979675e+00, 3.54092357e+01, -6.65110112e-01, 2.39600000e+00, + 3.96900000e+00, 1.73200000e+00, -4.65203216e+00 + ], + [ + -3.13086877e+01, 1.09007628e+00, -1.05611211e+00, 1.94400000e+00, + 3.85700000e+00, 1.72300000e+00, -2.81427027e+00 + ]]) + assert torch.allclose(expected_tensor, bottom_center_box.tensor) + + # Test init with numpy array + np_boxes = np.array([[ + 1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, + 1.48 - 0.13603681398218053 * 4 + ], + [ + 8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, + 1.62 - 0.13603681398218053 * 4 + ]], + dtype=np.float32) + boxes_1 = LiDARInstance3DBoxes(np_boxes) + assert torch.allclose(boxes_1.tensor, torch.from_numpy(np_boxes)) + + # test properties + assert boxes_1.volume.size(0) == 2 + assert (boxes_1.center == boxes_1.bottom_center).all() + assert repr(boxes) == ( + 'LiDARInstance3DBoxes(\n tensor([], size=(0, 7)))') + + # test init with torch.Tensor + th_boxes = torch.tensor( + [[ + 28.29669987, -0.5557558, -1.30332506, 1.47000003, 2.23000002, + 1.48000002, -1.57000005 - 0.13603681398218053 * 4 + ], + [ + 26.66901946, 21.82302134, -1.73605708, 1.55999994, 3.48000002, + 1.39999998, -1.69000006 - 0.13603681398218053 * 4 + ], + [ + 31.31977974, 8.16214412, -1.62177875, 1.74000001, 3.76999998, + 1.48000002, 2.78999996 - 0.13603681398218053 * 4 + ]], + dtype=torch.float32) + boxes_2 = LiDARInstance3DBoxes(th_boxes) + assert torch.allclose(boxes_2.tensor, th_boxes) + + # test clone/to/device + boxes_2 = boxes_2.clone() + boxes_1 = boxes_1.to(boxes_2.device) + + # test box concatenation + expected_tensor = torch.tensor([[ + 1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, + 1.48 - 0.13603681398218053 * 4 + ], + [ + 8.959413, 2.4567227, -1.6357126, 1.54, + 4.01, 1.57, + 1.62 - 0.13603681398218053 * 4 + ], + [ + 28.2967, -0.5557558, -1.303325, 1.47, + 2.23, 1.48, + -1.57 - 0.13603681398218053 * 4 + ], + [ + 26.66902, 21.82302, -1.736057, 1.56, + 3.48, 1.4, + -1.69 - 0.13603681398218053 * 4 + ], + [ + 31.31978, 8.162144, -1.6217787, 1.74, + 3.77, 1.48, + 2.79 - 0.13603681398218053 * 4 + ]]) + boxes = LiDARInstance3DBoxes.cat([boxes_1, boxes_2]) + assert torch.allclose(boxes.tensor, expected_tensor) + # concatenate empty list + empty_boxes = LiDARInstance3DBoxes.cat([]) + assert empty_boxes.tensor.shape[0] == 0 + assert empty_boxes.tensor.shape[-1] == 7 + + # test box flip + points = torch.tensor([[1.2559, -0.6762, -1.4658], + [4.7814, -0.8784, + -1.3857], [6.7053, 0.2517, -0.9697], + [0.6533, -0.5520, -0.5265], + [4.5870, 0.5358, -1.4741]]) + expected_tensor = torch.tensor( + [[ + 1.7802081, -2.516249, -1.7501148, 1.75, 3.39, 1.65, + 1.6615927 - np.pi + 0.13603681398218053 * 4 + ], + [ + 8.959413, -2.4567227, -1.6357126, 1.54, 4.01, 1.57, + 1.5215927 - np.pi + 0.13603681398218053 * 4 + ], + [ + 28.2967, 0.5557558, -1.303325, 1.47, 2.23, 1.48, + 4.7115927 - np.pi + 0.13603681398218053 * 4 + ], + [ + 26.66902, -21.82302, -1.736057, 1.56, 3.48, 1.4, + 4.8315926 - np.pi + 0.13603681398218053 * 4 + ], + [ + 31.31978, -8.162144, -1.6217787, 1.74, 3.77, 1.48, + 0.35159278 - np.pi + 0.13603681398218053 * 4 + ]]) + expected_points = torch.tensor([[1.2559, 0.6762, -1.4658], + [4.7814, 0.8784, -1.3857], + [6.7053, -0.2517, -0.9697], + [0.6533, 0.5520, -0.5265], + [4.5870, -0.5358, -1.4741]]) + points = boxes.flip('horizontal', points) + assert torch.allclose(boxes.tensor, expected_tensor) + assert torch.allclose(points, expected_points, 1e-3) + + expected_tensor = torch.tensor( + [[ + -1.7802, -2.5162, -1.7501, 1.7500, 3.3900, 1.6500, + -1.6616 + np.pi * 2 - 0.13603681398218053 * 4 + ], + [ + -8.9594, -2.4567, -1.6357, 1.5400, 4.0100, 1.5700, + -1.5216 + np.pi * 2 - 0.13603681398218053 * 4 + ], + [ + -28.2967, 0.5558, -1.3033, 1.4700, 2.2300, 1.4800, + -4.7116 + np.pi * 2 - 0.13603681398218053 * 4 + ], + [ + -26.6690, -21.8230, -1.7361, 1.5600, 3.4800, 1.4000, + -4.8316 + np.pi * 2 - 0.13603681398218053 * 4 + ], + [ + -31.3198, -8.1621, -1.6218, 1.7400, 3.7700, 1.4800, + -0.3516 + np.pi * 2 - 0.13603681398218053 * 4 + ]]) + boxes_flip_vert = boxes.clone() + points = boxes_flip_vert.flip('vertical', points) + expected_points = torch.tensor([[-1.2559, 0.6762, -1.4658], + [-4.7814, 0.8784, -1.3857], + [-6.7053, -0.2517, -0.9697], + [-0.6533, 0.5520, -0.5265], + [-4.5870, -0.5358, -1.4741]]) + assert torch.allclose(boxes_flip_vert.tensor, expected_tensor, 1e-4) + assert torch.allclose(points, expected_points) + + # test box rotation + # with input torch.Tensor points and angle + expected_tensor = torch.tensor( + [[ + 1.4225, -2.7344, -1.7501, 1.7500, 3.3900, 1.6500, + 1.7976 - np.pi + 0.13603681398218053 * 2 + ], + [ + 8.5435, -3.6491, -1.6357, 1.5400, 4.0100, 1.5700, + 1.6576 - np.pi + 0.13603681398218053 * 2 + ], + [ + 28.1106, -3.2869, -1.3033, 1.4700, 2.2300, 1.4800, + 4.8476 - np.pi + 0.13603681398218053 * 2 + ], + [ + 23.4630, -25.2382, -1.7361, 1.5600, 3.4800, 1.4000, + 4.9676 - np.pi + 0.13603681398218053 * 2 + ], + [ + 29.9235, -12.3342, -1.6218, 1.7400, 3.7700, 1.4800, + 0.4876 - np.pi + 0.13603681398218053 * 2 + ]]) + points, rot_mat_T = boxes.rotate(-0.13603681398218053, points) + expected_points = torch.tensor([[-1.1526, 0.8403, -1.4658], + [-4.6181, 1.5187, -1.3857], + [-6.6775, 0.6600, -0.9697], + [-0.5724, 0.6355, -0.5265], + [-4.6173, 0.0912, -1.4741]]) + expected_rot_mat_T = torch.tensor([[0.9908, -0.1356, 0.0000], + [0.1356, 0.9908, 0.0000], + [0.0000, 0.0000, 1.0000]]) + assert torch.allclose(boxes.tensor, expected_tensor, 1e-3) + assert torch.allclose(points, expected_points, 1e-3) + assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) + + # with input torch.Tensor points and rotation matrix + points, rot_mat_T = boxes.rotate(0.13603681398218053, points) # back + rot_mat = np.array([[0.99076125, -0.13561762, 0.], + [0.13561762, 0.99076125, 0.], [0., 0., 1.]]) + points, rot_mat_T = boxes.rotate(rot_mat, points) + assert torch.allclose(boxes.tensor, expected_tensor, 1e-3) + assert torch.allclose(points, expected_points, 1e-3) + assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) + + # with input np.ndarray points and angle + points_np = np.array([[-1.0280, 0.9888, + -1.4658], [-4.3695, 2.1310, -1.3857], + [-6.5263, 1.5595, + -0.9697], [-0.4809, 0.7073, -0.5265], + [-4.5623, 0.7166, -1.4741]]) + points_np, rot_mat_T_np = boxes.rotate(-0.13603681398218053, points_np) + expected_points_np = np.array([[-0.8844, 1.1191, -1.4658], + [-4.0401, 2.7039, -1.3857], + [-6.2545, 2.4302, -0.9697], + [-0.3805, 0.7660, -0.5265], + [-4.4230, 1.3287, -1.4741]]) + expected_rot_mat_T_np = np.array([[0.9908, -0.1356, 0.0000], + [0.1356, 0.9908, 0.0000], + [0.0000, 0.0000, 1.0000]]) + + assert np.allclose(points_np, expected_points_np, 1e-3) + assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3) + + # with input LiDARPoints and rotation matrix + points_np, rot_mat_T_np = boxes.rotate(0.13603681398218053, points_np) + lidar_points = LiDARPoints(points_np) + lidar_points, rot_mat_T_np = boxes.rotate(rot_mat, lidar_points) + points_np = lidar_points.tensor.numpy() + + assert np.allclose(points_np, expected_points_np, 1e-3) + assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3) + + # test box scaling + expected_tensor = torch.tensor([[ + 1.0443488, -2.9183323, -1.7599131, 1.7597977, 3.4089797, 1.6592377, + 1.9336663 - np.pi + ], + [ + 8.014273, -4.8007393, -1.6448704, + 1.5486219, 4.0324507, 1.57879, + 1.7936664 - np.pi + ], + [ + 27.558605, -7.1084175, -1.310622, + 1.4782301, 2.242485, 1.488286, + 4.9836664 - np.pi + ], + [ + 19.934517, -28.344835, -1.7457767, + 1.5687338, 3.4994833, 1.4078381, + 5.1036663 - np.pi + ], + [ + 28.130915, -16.369587, -1.6308585, + 1.7497417, 3.791107, 1.488286, + 0.6236664 - np.pi + ]]) + boxes.scale(1.00559866335275) + assert torch.allclose(boxes.tensor, expected_tensor) + + # test box translation + expected_tensor = torch.tensor([[ + 1.1281544, -3.0507944, -1.9169292, 1.7597977, 3.4089797, 1.6592377, + 1.9336663 - np.pi + ], + [ + 8.098079, -4.9332013, -1.8018866, + 1.5486219, 4.0324507, 1.57879, + 1.7936664 - np.pi + ], + [ + 27.64241, -7.2408795, -1.4676381, + 1.4782301, 2.242485, 1.488286, + 4.9836664 - np.pi + ], + [ + 20.018322, -28.477297, -1.9027928, + 1.5687338, 3.4994833, 1.4078381, + 5.1036663 - np.pi + ], + [ + 28.21472, -16.502048, -1.7878747, + 1.7497417, 3.791107, 1.488286, + 0.6236664 - np.pi + ]]) + boxes.translate([0.0838056, -0.13246193, -0.15701613]) + assert torch.allclose(boxes.tensor, expected_tensor) + + # test bbox in_range_bev + expected_tensor = torch.tensor( + [[1.1282, -3.0508, 1.7598, 3.4090, -1.2079], + [8.0981, -4.9332, 1.5486, 4.0325, -1.3479], + [27.6424, -7.2409, 1.4782, 2.2425, 1.8421], + [20.0183, -28.4773, 1.5687, 3.4995, 1.9621], + [28.2147, -16.5020, 1.7497, 3.7911, -2.5179]]) + assert torch.allclose(boxes.bev, expected_tensor, atol=1e-3) + expected_tensor = torch.tensor([1, 1, 1, 1, 1], dtype=torch.bool) + mask = boxes.in_range_bev([0., -40., 70.4, 40.]) + assert (mask == expected_tensor).all() + mask = boxes.nonempty() + assert (mask == expected_tensor).all() + + # test bbox in_range + expected_tensor = torch.tensor([1, 1, 0, 0, 0], dtype=torch.bool) + mask = boxes.in_range_3d([0, -20, -2, 22, 2, 5]) + assert (mask == expected_tensor).all() + + # test bbox indexing + index_boxes = boxes[2:5] + expected_tensor = torch.tensor([[ + 27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286, + 4.9836664 - np.pi + ], + [ + 20.018322, -28.477297, -1.9027928, + 1.5687338, 3.4994833, 1.4078381, + 5.1036663 - np.pi + ], + [ + 28.21472, -16.502048, -1.7878747, + 1.7497417, 3.791107, 1.488286, + 0.6236664 - np.pi + ]]) + assert len(index_boxes) == 3 + assert torch.allclose(index_boxes.tensor, expected_tensor) + + index_boxes = boxes[2] + expected_tensor = torch.tensor([[ + 27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286, + 4.9836664 - np.pi + ]]) + assert len(index_boxes) == 1 + assert torch.allclose(index_boxes.tensor, expected_tensor) + + index_boxes = boxes[[2, 4]] + expected_tensor = torch.tensor([[ + 27.64241, -7.2408795, -1.4676381, 1.4782301, 2.242485, 1.488286, + 4.9836664 - np.pi + ], + [ + 28.21472, -16.502048, -1.7878747, + 1.7497417, 3.791107, 1.488286, + 0.6236664 - np.pi + ]]) + assert len(index_boxes) == 2 + assert torch.allclose(index_boxes.tensor, expected_tensor) + + # test iteration + for i, box in enumerate(index_boxes): + torch.allclose(box, expected_tensor[i]) + + # test properties + assert torch.allclose(boxes.bottom_center, boxes.tensor[:, :3]) + expected_tensor = ( + boxes.tensor[:, :3] - boxes.tensor[:, 3:6] * + (torch.tensor([0.5, 0.5, 0]) - torch.tensor([0.5, 0.5, 0.5]))) + assert torch.allclose(boxes.gravity_center, expected_tensor) + + boxes.limit_yaw() + assert (boxes.tensor[:, 6] <= np.pi / 2).all() + assert (boxes.tensor[:, 6] >= -np.pi / 2).all() + + Box3DMode.convert(boxes, Box3DMode.LIDAR, Box3DMode.LIDAR) + expected_tensor = boxes.tensor.clone() + assert torch.allclose(expected_tensor, boxes.tensor) + + boxes.flip() + boxes.flip() + boxes.limit_yaw() + assert torch.allclose(expected_tensor, boxes.tensor) + + # test nearest_bev + expected_tensor = torch.tensor([[-0.5763, -3.9307, 2.8326, -2.1709], + [6.0819, -5.7075, 10.1143, -4.1589], + [26.5212, -7.9800, 28.7637, -6.5018], + [18.2686, -29.2617, 21.7681, -27.6929], + [27.3398, -18.3976, 29.0896, -14.6065]]) + assert torch.allclose( + boxes.nearest_bev, expected_tensor, rtol=1e-4, atol=1e-7) + + expected_tensor = torch.tensor([[[-7.7767e-01, -2.8332e+00, -1.9169e+00], + [-7.7767e-01, -2.8332e+00, -2.5769e-01], + [2.4093e+00, -1.6232e+00, -2.5769e-01], + [2.4093e+00, -1.6232e+00, -1.9169e+00], + [-1.5301e-01, -4.4784e+00, -1.9169e+00], + [-1.5301e-01, -4.4784e+00, -2.5769e-01], + [3.0340e+00, -3.2684e+00, -2.5769e-01], + [3.0340e+00, -3.2684e+00, -1.9169e+00]], + [[5.9606e+00, -4.6237e+00, -1.8019e+00], + [5.9606e+00, -4.6237e+00, -2.2310e-01], + [9.8933e+00, -3.7324e+00, -2.2310e-01], + [9.8933e+00, -3.7324e+00, -1.8019e+00], + [6.3029e+00, -6.1340e+00, -1.8019e+00], + [6.3029e+00, -6.1340e+00, -2.2310e-01], + [1.0236e+01, -5.2427e+00, -2.2310e-01], + [1.0236e+01, -5.2427e+00, -1.8019e+00]], + [[2.6364e+01, -6.8292e+00, -1.4676e+00], + [2.6364e+01, -6.8292e+00, 2.0648e-02], + [2.8525e+01, -6.2283e+00, 2.0648e-02], + [2.8525e+01, -6.2283e+00, -1.4676e+00], + [2.6760e+01, -8.2534e+00, -1.4676e+00], + [2.6760e+01, -8.2534e+00, 2.0648e-02], + [2.8921e+01, -7.6525e+00, 2.0648e-02], + [2.8921e+01, -7.6525e+00, -1.4676e+00]], + [[1.8102e+01, -2.8420e+01, -1.9028e+00], + [1.8102e+01, -2.8420e+01, -4.9495e-01], + [2.1337e+01, -2.7085e+01, -4.9495e-01], + [2.1337e+01, -2.7085e+01, -1.9028e+00], + [1.8700e+01, -2.9870e+01, -1.9028e+00], + [1.8700e+01, -2.9870e+01, -4.9495e-01], + [2.1935e+01, -2.8535e+01, -4.9495e-01], + [2.1935e+01, -2.8535e+01, -1.9028e+00]], + [[2.8612e+01, -1.8552e+01, -1.7879e+00], + [2.8612e+01, -1.8552e+01, -2.9959e-01], + [2.6398e+01, -1.5474e+01, -2.9959e-01], + [2.6398e+01, -1.5474e+01, -1.7879e+00], + [3.0032e+01, -1.7530e+01, -1.7879e+00], + [3.0032e+01, -1.7530e+01, -2.9959e-01], + [2.7818e+01, -1.4452e+01, -2.9959e-01], + [2.7818e+01, -1.4452e+01, -1.7879e+00]]]) + + assert torch.allclose(boxes.corners, expected_tensor, rtol=1e-4, atol=1e-7) + + # test new_box + new_box1 = boxes.new_box([[1, 2, 3, 4, 5, 6, 7]]) + assert torch.allclose( + new_box1.tensor, + torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=boxes.tensor.dtype)) + assert new_box1.device == boxes.device + assert new_box1.with_yaw == boxes.with_yaw + assert new_box1.box_dim == boxes.box_dim + + new_box2 = boxes.new_box(np.array([[1, 2, 3, 4, 5, 6, 7]])) + assert torch.allclose( + new_box2.tensor, + torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=boxes.tensor.dtype)) + + new_box3 = boxes.new_box(torch.tensor([[1, 2, 3, 4, 5, 6, 7]])) + assert torch.allclose( + new_box3.tensor, + torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=boxes.tensor.dtype)) + + +def test_boxes_conversion(): + """Test the conversion of boxes between different modes. + + CommandLine: + xdoctest tests/test_box3d.py::test_boxes_conversion zero + """ + lidar_boxes = LiDARInstance3DBoxes( + [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48], + [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62], + [28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57], + [26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69], + [31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]]) + cam_box_tensor = Box3DMode.convert(lidar_boxes.tensor, Box3DMode.LIDAR, + Box3DMode.CAM) + expected_box = lidar_boxes.convert_to(Box3DMode.CAM) + assert torch.equal(expected_box.tensor, cam_box_tensor) + + # Some properties should be the same + cam_boxes = CameraInstance3DBoxes(cam_box_tensor) + assert torch.equal(cam_boxes.height, lidar_boxes.height) + assert torch.equal(cam_boxes.top_height, -lidar_boxes.top_height) + assert torch.equal(cam_boxes.bottom_height, -lidar_boxes.bottom_height) + assert torch.allclose(cam_boxes.volume, lidar_boxes.volume) + + lidar_box_tensor = Box3DMode.convert(cam_box_tensor, Box3DMode.CAM, + Box3DMode.LIDAR) + expected_tensor = torch.tensor( + [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48], + [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62], + [28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57], + [26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69], + [31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]]) + + assert torch.allclose(expected_tensor, lidar_box_tensor) + assert torch.allclose(lidar_boxes.tensor, lidar_box_tensor) + + depth_box_tensor = Box3DMode.convert(cam_box_tensor, Box3DMode.CAM, + Box3DMode.DEPTH) + depth_to_cam_box_tensor = Box3DMode.convert(depth_box_tensor, + Box3DMode.DEPTH, Box3DMode.CAM) + assert torch.allclose(cam_box_tensor, depth_to_cam_box_tensor) + + # test similar mode conversion + same_results = Box3DMode.convert(depth_box_tensor, Box3DMode.DEPTH, + Box3DMode.DEPTH) + assert torch.equal(same_results, depth_box_tensor) + + # test conversion with a given rt_mat + camera_boxes = CameraInstance3DBoxes( + [[0.06, 1.77, 21.4, 3.2, 1.61, 1.66, -1.54], + [6.59, 1.53, 6.76, 12.78, 3.66, 2.28, 1.55], + [6.71, 1.59, 22.18, 14.73, 3.64, 2.32, 1.59], + [7.11, 1.58, 34.54, 10.04, 3.61, 2.32, 1.61], + [7.78, 1.65, 45.95, 12.83, 3.63, 2.34, 1.64]]) + + rect = torch.tensor( + [[0.9999239, 0.00983776, -0.00744505, 0.], + [-0.0098698, 0.9999421, -0.00427846, 0.], + [0.00740253, 0.00435161, 0.9999631, 0.], [0., 0., 0., 1.]], + dtype=torch.float32) + + Trv2c = torch.tensor( + [[7.533745e-03, -9.999714e-01, -6.166020e-04, -4.069766e-03], + [1.480249e-02, 7.280733e-04, -9.998902e-01, -7.631618e-02], + [9.998621e-01, 7.523790e-03, 1.480755e-02, -2.717806e-01], + [0.000000e+00, 0.000000e+00, 0.000000e+00, 1.000000e+00]], + dtype=torch.float32) + + # coord sys refactor (reverse sign of yaw) + expected_tensor = torch.tensor( + [[ + 2.16902434e+01, -4.06038554e-02, -1.61906639e+00, 3.20000005e+00, + 1.65999997e+00, 1.61000001e+00, 1.53999996e+00 - np.pi / 2 + ], + [ + 7.05006905e+00, -6.57459601e+00, -1.60107949e+00, 1.27799997e+01, + 2.27999997e+00, 3.66000009e+00, -1.54999995e+00 - np.pi / 2 + ], + [ + 2.24698818e+01, -6.69203759e+00, -1.50118145e+00, 1.47299995e+01, + 2.31999993e+00, 3.64000010e+00, -1.59000003e+00 + 3 * np.pi / 2 + ], + [ + 3.48291965e+01, -7.09058388e+00, -1.36622983e+00, 1.00400000e+01, + 2.31999993e+00, 3.60999990e+00, -1.61000001e+00 + 3 * np.pi / 2 + ], + [ + 4.62394617e+01, -7.75838800e+00, -1.32405020e+00, 1.28299999e+01, + 2.33999991e+00, 3.63000011e+00, -1.63999999e+00 + 3 * np.pi / 2 + ]], + dtype=torch.float32) + + rt_mat = rect @ Trv2c + # test conversion with Box type + cam_to_lidar_box = Box3DMode.convert(camera_boxes, Box3DMode.CAM, + Box3DMode.LIDAR, rt_mat.inverse()) + assert torch.allclose(cam_to_lidar_box.tensor, expected_tensor) + + lidar_to_cam_box = Box3DMode.convert(cam_to_lidar_box.tensor, + Box3DMode.LIDAR, Box3DMode.CAM, + rt_mat) + assert torch.allclose(lidar_to_cam_box, camera_boxes.tensor) + + # test numpy convert + cam_to_lidar_box = Box3DMode.convert(camera_boxes.tensor.numpy(), + Box3DMode.CAM, Box3DMode.LIDAR, + rt_mat.inverse().numpy()) + assert np.allclose(cam_to_lidar_box, expected_tensor.numpy()) + + # test list convert + cam_to_lidar_box = Box3DMode.convert( + camera_boxes.tensor[0].numpy().tolist(), Box3DMode.CAM, + Box3DMode.LIDAR, + rt_mat.inverse().numpy()) + assert np.allclose(np.array(cam_to_lidar_box), expected_tensor[0].numpy()) + + # test convert from depth to lidar + depth_boxes = torch.tensor( + [[2.4593, 2.5870, -0.4321, 0.8597, 0.6193, 1.0204, 3.0693], + [1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 3.0601]], + dtype=torch.float32) + depth_boxes = DepthInstance3DBoxes(depth_boxes) + depth_to_lidar_box = depth_boxes.convert_to(Box3DMode.LIDAR) + expected_box = depth_to_lidar_box.convert_to(Box3DMode.DEPTH) + assert torch.equal(depth_boxes.tensor, expected_box.tensor) + + lidar_to_depth_box = Box3DMode.convert(depth_to_lidar_box, Box3DMode.LIDAR, + Box3DMode.DEPTH) + assert torch.allclose(depth_boxes.tensor, lidar_to_depth_box.tensor) + assert torch.allclose(depth_boxes.volume, lidar_to_depth_box.volume) + + # test convert from depth to camera + depth_to_cam_box = Box3DMode.convert(depth_boxes, Box3DMode.DEPTH, + Box3DMode.CAM) + cam_to_depth_box = Box3DMode.convert(depth_to_cam_box, Box3DMode.CAM, + Box3DMode.DEPTH) + expected_tensor = depth_to_cam_box.convert_to(Box3DMode.DEPTH) + assert torch.equal(expected_tensor.tensor, cam_to_depth_box.tensor) + assert torch.allclose(depth_boxes.tensor, cam_to_depth_box.tensor) + assert torch.allclose(depth_boxes.volume, cam_to_depth_box.volume) + + with pytest.raises(NotImplementedError): + # assert invalid convert mode + Box3DMode.convert(depth_boxes, Box3DMode.DEPTH, 3) + + +def test_camera_boxes3d(): + # Test init with numpy array + np_boxes = np.array([[ + 1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, + 1.48 - 0.13603681398218053 * 4 - 2 * np.pi + ], + [ + 8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, + 1.62 - 0.13603681398218053 * 4 - 2 * np.pi + ]], + dtype=np.float32) + + boxes_1 = Box3DMode.convert( + LiDARInstance3DBoxes(np_boxes), Box3DMode.LIDAR, Box3DMode.CAM) + assert isinstance(boxes_1, CameraInstance3DBoxes) + + cam_np_boxes = Box3DMode.convert(np_boxes, Box3DMode.LIDAR, Box3DMode.CAM) + assert torch.allclose(boxes_1.tensor, + boxes_1.tensor.new_tensor(cam_np_boxes)) + + # test init with torch.Tensor + th_boxes = torch.tensor( + [[ + 28.29669987, -0.5557558, -1.30332506, 1.47000003, 2.23000002, + 1.48000002, -1.57000005 - 0.13603681398218053 * 4 - 2 * np.pi + ], + [ + 26.66901946, 21.82302134, -1.73605708, 1.55999994, 3.48000002, + 1.39999998, -1.69000006 - 0.13603681398218053 * 4 - 2 * np.pi + ], + [ + 31.31977974, 8.16214412, -1.62177875, 1.74000001, 3.76999998, + 1.48000002, 2.78999996 - 0.13603681398218053 * 4 - 2 * np.pi + ]], + dtype=torch.float32) + cam_th_boxes = Box3DMode.convert(th_boxes, Box3DMode.LIDAR, Box3DMode.CAM) + boxes_2 = CameraInstance3DBoxes(cam_th_boxes) + assert torch.allclose(boxes_2.tensor, cam_th_boxes) + + # test clone/to/device + boxes_2 = boxes_2.clone() + boxes_1 = boxes_1.to(boxes_2.device) + + # test box concatenation + expected_tensor = Box3DMode.convert( + torch.tensor([[ + 1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, + 1.48 - 0.13603681398218053 * 4 - 2 * np.pi + ], + [ + 8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, + 1.62 - 0.13603681398218053 * 4 - 2 * np.pi + ], + [ + 28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, + -1.57 - 0.13603681398218053 * 4 - 2 * np.pi + ], + [ + 26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, + -1.69 - 0.13603681398218053 * 4 - 2 * np.pi + ], + [ + 31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, + 2.79 - 0.13603681398218053 * 4 - 2 * np.pi + ]]), Box3DMode.LIDAR, Box3DMode.CAM) + boxes = CameraInstance3DBoxes.cat([boxes_1, boxes_2]) + assert torch.allclose(boxes.tensor, expected_tensor) + + # test box flip + points = torch.tensor([[0.6762, 1.4658, 1.2559], [0.8784, 1.3857, 4.7814], + [-0.2517, 0.9697, 6.7053], [0.5520, 0.5265, 0.6533], + [-0.5358, 1.4741, 4.5870]]) + expected_tensor = Box3DMode.convert( + torch.tensor([[ + 1.7802081, -2.516249, -1.7501148, 1.75, 3.39, 1.65, + 1.6615927 + 0.13603681398218053 * 4 - np.pi + ], + [ + 8.959413, -2.4567227, -1.6357126, 1.54, 4.01, 1.57, + 1.5215927 + 0.13603681398218053 * 4 - np.pi + ], + [ + 28.2967, 0.5557558, -1.303325, 1.47, 2.23, 1.48, + 4.7115927 + 0.13603681398218053 * 4 - np.pi + ], + [ + 26.66902, -21.82302, -1.736057, 1.56, 3.48, 1.4, + 4.8315926 + 0.13603681398218053 * 4 - np.pi + ], + [ + 31.31978, -8.162144, -1.6217787, 1.74, 3.77, 1.48, + 0.35159278 + 0.13603681398218053 * 4 - np.pi + ]]), Box3DMode.LIDAR, Box3DMode.CAM) + points = boxes.flip('horizontal', points) + expected_points = torch.tensor([[-0.6762, 1.4658, 1.2559], + [-0.8784, 1.3857, 4.7814], + [0.2517, 0.9697, 6.7053], + [-0.5520, 0.5265, 0.6533], + [0.5358, 1.4741, 4.5870]]) + + yaw_normalized_tensor = boxes.tensor.clone() + yaw_normalized_tensor[:, -1:] = limit_period( + yaw_normalized_tensor[:, -1:], period=np.pi * 2) + assert torch.allclose(yaw_normalized_tensor, expected_tensor, 1e-3) + assert torch.allclose(points, expected_points, 1e-3) + + expected_tensor = torch.tensor( + [[ + 2.5162, 1.7501, -1.7802, 1.7500, 1.6500, 3.3900, + 1.6616 + 0.13603681398218053 * 4 - np.pi / 2 + ], + [ + 2.4567, 1.6357, -8.9594, 1.5400, 1.5700, 4.0100, + 1.5216 + 0.13603681398218053 * 4 - np.pi / 2 + ], + [ + -0.5558, 1.3033, -28.2967, 1.4700, 1.4800, 2.2300, + 4.7116 + 0.13603681398218053 * 4 - np.pi / 2 + ], + [ + 21.8230, 1.7361, -26.6690, 1.5600, 1.4000, 3.4800, + 4.8316 + 0.13603681398218053 * 4 - np.pi / 2 + ], + [ + 8.1621, 1.6218, -31.3198, 1.7400, 1.4800, 3.7700, + 0.3516 + 0.13603681398218053 * 4 - np.pi / 2 + ]]) + boxes_flip_vert = boxes.clone() + points = boxes_flip_vert.flip('vertical', points) + expected_points = torch.tensor([[-0.6762, 1.4658, -1.2559], + [-0.8784, 1.3857, -4.7814], + [0.2517, 0.9697, -6.7053], + [-0.5520, 0.5265, -0.6533], + [0.5358, 1.4741, -4.5870]]) + + yaw_normalized_tensor = boxes_flip_vert.tensor.clone() + yaw_normalized_tensor[:, -1:] = limit_period( + yaw_normalized_tensor[:, -1:], period=np.pi * 2) + expected_tensor[:, -1:] = limit_period( + expected_tensor[:, -1:], period=np.pi * 2) + assert torch.allclose(yaw_normalized_tensor, expected_tensor, 1e-4) + assert torch.allclose(points, expected_points) + + # test box rotation + # with input torch.Tensor points and angle + expected_tensor = Box3DMode.convert( + torch.tensor([[ + 1.4225, -2.7344, -1.7501, 1.7500, 3.3900, 1.6500, + 1.7976 + 0.13603681398218053 * 2 - np.pi + ], + [ + 8.5435, -3.6491, -1.6357, 1.5400, 4.0100, 1.5700, + 1.6576 + 0.13603681398218053 * 2 - np.pi + ], + [ + 28.1106, -3.2869, -1.3033, 1.4700, 2.2300, 1.4800, + 4.8476 + 0.13603681398218053 * 2 - np.pi + ], + [ + 23.4630, -25.2382, -1.7361, 1.5600, 3.4800, 1.4000, + 4.9676 + 0.13603681398218053 * 2 - np.pi + ], + [ + 29.9235, -12.3342, -1.6218, 1.7400, 3.7700, 1.4800, + 0.4876 + 0.13603681398218053 * 2 - np.pi + ]]), Box3DMode.LIDAR, Box3DMode.CAM) + points, rot_mat_T = boxes.rotate(torch.tensor(0.13603681398218053), points) + expected_points = torch.tensor([[-0.8403, 1.4658, -1.1526], + [-1.5187, 1.3857, -4.6181], + [-0.6600, 0.9697, -6.6775], + [-0.6355, 0.5265, -0.5724], + [-0.0912, 1.4741, -4.6173]]) + expected_rot_mat_T = torch.tensor([[0.9908, 0.0000, -0.1356], + [0.0000, 1.0000, 0.0000], + [0.1356, 0.0000, 0.9908]]) + yaw_normalized_tensor = boxes.tensor.clone() + yaw_normalized_tensor[:, -1:] = limit_period( + yaw_normalized_tensor[:, -1:], period=np.pi * 2) + expected_tensor[:, -1:] = limit_period( + expected_tensor[:, -1:], period=np.pi * 2) + assert torch.allclose(yaw_normalized_tensor, expected_tensor, 1e-3) + assert torch.allclose(points, expected_points, 1e-3) + assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) + + # with input torch.Tensor points and rotation matrix + points, rot_mat_T = boxes.rotate( + torch.tensor(-0.13603681398218053), points) # back + rot_mat = np.array([[0.99076125, 0., -0.13561762], [0., 1., 0.], + [0.13561762, 0., 0.99076125]]) + points, rot_mat_T = boxes.rotate(rot_mat, points) + yaw_normalized_tensor = boxes.tensor.clone() + yaw_normalized_tensor[:, -1:] = limit_period( + yaw_normalized_tensor[:, -1:], period=np.pi * 2) + assert torch.allclose(yaw_normalized_tensor, expected_tensor, 1e-3) + assert torch.allclose(points, expected_points, 1e-3) + assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) + + # with input np.ndarray points and angle + points_np = np.array([[0.6762, 1.2559, -1.4658, 2.5359], + [0.8784, 4.7814, -1.3857, 0.7167], + [-0.2517, 6.7053, -0.9697, 0.5599], + [0.5520, 0.6533, -0.5265, 1.0032], + [-0.5358, 4.5870, -1.4741, 0.0556]]) + points_np, rot_mat_T_np = boxes.rotate( + torch.tensor(0.13603681398218053), points_np) + expected_points_np = np.array([[0.4712, 1.2559, -1.5440, 2.5359], + [0.6824, 4.7814, -1.4920, 0.7167], + [-0.3809, 6.7053, -0.9266, 0.5599], + [0.4755, 0.6533, -0.5965, 1.0032], + [-0.7308, 4.5870, -1.3878, 0.0556]]) + expected_rot_mat_T_np = np.array([[0.9908, 0.0000, -0.1356], + [0.0000, 1.0000, 0.0000], + [0.1356, 0.0000, 0.9908]]) + + assert np.allclose(points_np, expected_points_np, 1e-3) + assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3) + + # with input CameraPoints and rotation matrix + points_np, rot_mat_T_np = boxes.rotate( + torch.tensor(-0.13603681398218053), points_np) + camera_points = CameraPoints(points_np, points_dim=4) + camera_points, rot_mat_T_np = boxes.rotate(rot_mat, camera_points) + points_np = camera_points.tensor.numpy() + assert np.allclose(points_np, expected_points_np, 1e-3) + assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3) + + # test box scaling + expected_tensor = Box3DMode.convert( + torch.tensor([[ + 1.0443488, -2.9183323, -1.7599131, 1.7597977, 3.4089797, 1.6592377, + 1.9336663 - np.pi + ], + [ + 8.014273, -4.8007393, -1.6448704, 1.5486219, + 4.0324507, 1.57879, 1.7936664 - np.pi + ], + [ + 27.558605, -7.1084175, -1.310622, 1.4782301, + 2.242485, 1.488286, 4.9836664 - np.pi + ], + [ + 19.934517, -28.344835, -1.7457767, 1.5687338, + 3.4994833, 1.4078381, 5.1036663 - np.pi + ], + [ + 28.130915, -16.369587, -1.6308585, 1.7497417, + 3.791107, 1.488286, 0.6236664 - np.pi + ]]), Box3DMode.LIDAR, Box3DMode.CAM) + boxes.scale(1.00559866335275) + yaw_normalized_tensor = boxes.tensor.clone() + yaw_normalized_tensor[:, -1:] = limit_period( + yaw_normalized_tensor[:, -1:], period=np.pi * 2) + expected_tensor[:, -1:] = limit_period( + expected_tensor[:, -1:], period=np.pi * 2) + assert torch.allclose(yaw_normalized_tensor, expected_tensor) + + # test box translation + expected_tensor = Box3DMode.convert( + torch.tensor([[ + 1.1281544, -3.0507944, -1.9169292, 1.7597977, 3.4089797, 1.6592377, + 1.9336663 - np.pi + ], + [ + 8.098079, -4.9332013, -1.8018866, 1.5486219, + 4.0324507, 1.57879, 1.7936664 - np.pi + ], + [ + 27.64241, -7.2408795, -1.4676381, 1.4782301, + 2.242485, 1.488286, 4.9836664 - np.pi + ], + [ + 20.018322, -28.477297, -1.9027928, 1.5687338, + 3.4994833, 1.4078381, 5.1036663 - np.pi + ], + [ + 28.21472, -16.502048, -1.7878747, 1.7497417, + 3.791107, 1.488286, 0.6236664 - np.pi + ]]), Box3DMode.LIDAR, Box3DMode.CAM) + boxes.translate(torch.tensor([0.13246193, 0.15701613, 0.0838056])) + yaw_normalized_tensor = boxes.tensor.clone() + yaw_normalized_tensor[:, -1:] = limit_period( + yaw_normalized_tensor[:, -1:], period=np.pi * 2) + expected_tensor[:, -1:] = limit_period( + expected_tensor[:, -1:], period=np.pi * 2) + assert torch.allclose(yaw_normalized_tensor, expected_tensor) + + # test bbox in_range_bev + expected_tensor = torch.tensor([1, 1, 1, 1, 1], dtype=torch.bool) + mask = boxes.in_range_bev([0., -40., 70.4, 40.]) + assert (mask == expected_tensor).all() + mask = boxes.nonempty() + assert (mask == expected_tensor).all() + + # test bbox in_range + expected_tensor = torch.tensor([1, 1, 0, 0, 0], dtype=torch.bool) + mask = boxes.in_range_3d([-2, -5, 0, 20, 2, 22]) + assert (mask == expected_tensor).all() + + expected_tensor = torch.tensor( + [[3.0508, 1.1282, 1.7598, 3.4090, -5.9203], + [4.9332, 8.0981, 1.5486, 4.0325, -6.0603], + [7.2409, 27.6424, 1.4782, 2.2425, -2.8703], + [28.4773, 20.0183, 1.5687, 3.4995, -2.7503], + [16.5020, 28.2147, 1.7497, 3.7911, -0.9471]]) + assert torch.allclose(boxes.bev, expected_tensor, atol=1e-3) + + # test properties + assert torch.allclose(boxes.bottom_center, boxes.tensor[:, :3]) + expected_tensor = ( + boxes.tensor[:, :3] - boxes.tensor[:, 3:6] * + (torch.tensor([0.5, 1.0, 0.5]) - torch.tensor([0.5, 0.5, 0.5]))) + assert torch.allclose(boxes.gravity_center, expected_tensor) + + boxes.limit_yaw() + assert (boxes.tensor[:, 6] <= np.pi / 2).all() + assert (boxes.tensor[:, 6] >= -np.pi / 2).all() + + Box3DMode.convert(boxes, Box3DMode.LIDAR, Box3DMode.LIDAR) + expected_tensor = boxes.tensor.clone() + assert torch.allclose(expected_tensor, boxes.tensor) + + boxes.flip() + boxes.flip() + boxes.limit_yaw() + assert torch.allclose(expected_tensor, boxes.tensor) + + # test nearest_bev + # BEV box in lidar coordinates (x, y) + lidar_expected_tensor = torch.tensor( + [[-0.5763, -3.9307, 2.8326, -2.1709], + [6.0819, -5.7075, 10.1143, -4.1589], + [26.5212, -7.9800, 28.7637, -6.5018], + [18.2686, -29.2617, 21.7681, -27.6929], + [27.3398, -18.3976, 29.0896, -14.6065]]) + # BEV box in camera coordinate (-y, x) + expected_tensor = lidar_expected_tensor.clone() + expected_tensor[:, 0::2] = -lidar_expected_tensor[:, [3, 1]] + expected_tensor[:, 1::2] = lidar_expected_tensor[:, 0::2] + assert torch.allclose( + boxes.nearest_bev, expected_tensor, rtol=1e-4, atol=1e-7) + + expected_tensor = torch.tensor([[[2.8332e+00, 2.5769e-01, -7.7767e-01], + [1.6232e+00, 2.5769e-01, 2.4093e+00], + [1.6232e+00, 1.9169e+00, 2.4093e+00], + [2.8332e+00, 1.9169e+00, -7.7767e-01], + [4.4784e+00, 2.5769e-01, -1.5302e-01], + [3.2684e+00, 2.5769e-01, 3.0340e+00], + [3.2684e+00, 1.9169e+00, 3.0340e+00], + [4.4784e+00, 1.9169e+00, -1.5302e-01]], + [[4.6237e+00, 2.2310e-01, 5.9606e+00], + [3.7324e+00, 2.2310e-01, 9.8933e+00], + [3.7324e+00, 1.8019e+00, 9.8933e+00], + [4.6237e+00, 1.8019e+00, 5.9606e+00], + [6.1340e+00, 2.2310e-01, 6.3029e+00], + [5.2427e+00, 2.2310e-01, 1.0236e+01], + [5.2427e+00, 1.8019e+00, 1.0236e+01], + [6.1340e+00, 1.8019e+00, 6.3029e+00]], + [[6.8292e+00, -2.0648e-02, 2.6364e+01], + [6.2283e+00, -2.0648e-02, 2.8525e+01], + [6.2283e+00, 1.4676e+00, 2.8525e+01], + [6.8292e+00, 1.4676e+00, 2.6364e+01], + [8.2534e+00, -2.0648e-02, 2.6760e+01], + [7.6525e+00, -2.0648e-02, 2.8921e+01], + [7.6525e+00, 1.4676e+00, 2.8921e+01], + [8.2534e+00, 1.4676e+00, 2.6760e+01]], + [[2.8420e+01, 4.9495e-01, 1.8102e+01], + [2.7085e+01, 4.9495e-01, 2.1337e+01], + [2.7085e+01, 1.9028e+00, 2.1337e+01], + [2.8420e+01, 1.9028e+00, 1.8102e+01], + [2.9870e+01, 4.9495e-01, 1.8700e+01], + [2.8535e+01, 4.9495e-01, 2.1935e+01], + [2.8535e+01, 1.9028e+00, 2.1935e+01], + [2.9870e+01, 1.9028e+00, 1.8700e+01]], + [[1.4452e+01, 2.9959e-01, 2.7818e+01], + [1.7530e+01, 2.9959e-01, 3.0032e+01], + [1.7530e+01, 1.7879e+00, 3.0032e+01], + [1.4452e+01, 1.7879e+00, 2.7818e+01], + [1.5474e+01, 2.9959e-01, 2.6398e+01], + [1.8552e+01, 2.9959e-01, 2.8612e+01], + [1.8552e+01, 1.7879e+00, 2.8612e+01], + [1.5474e+01, 1.7879e+00, 2.6398e+01]]]) + + assert torch.allclose(boxes.corners, expected_tensor, rtol=1e-3, atol=1e-4) + + th_boxes = torch.tensor( + [[ + 28.29669987, -0.5557558, -1.30332506, 1.47000003, 2.23000002, + 1.48000002, -1.57000005 + ], + [ + 26.66901946, 21.82302134, -1.73605708, 1.55999994, 3.48000002, + 1.39999998, -1.69000006 + ], + [ + 31.31977974, 8.16214412, -1.62177875, 1.74000001, 3.76999998, + 1.48000002, 2.78999996 + ]], + dtype=torch.float32) + + # test init with a given origin + boxes_origin_given = CameraInstance3DBoxes( + th_boxes.clone(), box_dim=7, origin=(0.5, 0.5, 0.5)) + expected_tensor = th_boxes.clone() + expected_tensor[:, :3] = th_boxes[:, :3] + th_boxes[:, 3:6] * ( + th_boxes.new_tensor((0.5, 1.0, 0.5)) - th_boxes.new_tensor( + (0.5, 0.5, 0.5))) + assert torch.allclose(boxes_origin_given.tensor, expected_tensor) + + +def test_boxes3d_overlaps(): + """Test the iou calculation of boxes in different modes. + + CommandLine: + xdoctest tests/test_box3d.py::test_boxes3d_overlaps zero + """ + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + + # Test LiDAR boxes 3D overlaps + boxes1_tensor = torch.tensor( + [[1.8, -2.5, -1.8, 1.75, 3.39, 1.65, -1.6615927], + [8.9, -2.5, -1.6, 1.54, 4.01, 1.57, -1.5215927], + [28.3, 0.5, -1.3, 1.47, 2.23, 1.48, -4.7115927], + [31.3, -8.2, -1.6, 1.74, 3.77, 1.48, -0.35]], + device='cuda') + boxes1 = LiDARInstance3DBoxes(boxes1_tensor) + + boxes2_tensor = torch.tensor([[1.2, -3.0, -1.9, 1.8, 3.4, 1.7, -1.9], + [8.1, -2.9, -1.8, 1.5, 4.1, 1.6, -1.8], + [31.3, -8.2, -1.6, 1.74, 3.77, 1.48, -0.35], + [20.1, -28.5, -1.9, 1.6, 3.5, 1.4, -5.1]], + device='cuda') + boxes2 = LiDARInstance3DBoxes(boxes2_tensor) + + expected_iou_tensor = torch.tensor( + [[0.3710, 0.0000, 0.0000, 0.0000], [0.0000, 0.3322, 0.0000, 0.0000], + [0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 1.0000, 0.0000]], + device='cuda') + overlaps_3d_iou = boxes1.overlaps(boxes1, boxes2) + assert torch.allclose( + expected_iou_tensor, overlaps_3d_iou, rtol=1e-4, atol=1e-7) + + expected_iof_tensor = torch.tensor( + [[0.5582, 0.0000, 0.0000, 0.0000], [0.0000, 0.5025, 0.0000, 0.0000], + [0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 1.0000, 0.0000]], + device='cuda') + overlaps_3d_iof = boxes1.overlaps(boxes1, boxes2, mode='iof') + assert torch.allclose( + expected_iof_tensor, overlaps_3d_iof, rtol=1e-4, atol=1e-7) + + empty_boxes = [] + boxes3 = LiDARInstance3DBoxes(empty_boxes) + overlaps_3d_empty = boxes1.overlaps(boxes3, boxes2) + assert overlaps_3d_empty.shape[0] == 0 + assert overlaps_3d_empty.shape[1] == 4 + # Test camera boxes 3D overlaps + cam_boxes1_tensor = Box3DMode.convert(boxes1_tensor, Box3DMode.LIDAR, + Box3DMode.CAM) + cam_boxes1 = CameraInstance3DBoxes(cam_boxes1_tensor) + + cam_boxes2_tensor = Box3DMode.convert(boxes2_tensor, Box3DMode.LIDAR, + Box3DMode.CAM) + cam_boxes2 = CameraInstance3DBoxes(cam_boxes2_tensor) + cam_overlaps_3d = cam_boxes1.overlaps(cam_boxes1, cam_boxes2) + + # same boxes under different coordinates should have the same iou + assert torch.allclose( + expected_iou_tensor, cam_overlaps_3d, rtol=1e-3, atol=1e-4) + assert torch.allclose( + cam_overlaps_3d, overlaps_3d_iou, rtol=1e-3, atol=1e-4) + + with pytest.raises(AssertionError): + cam_boxes1.overlaps(cam_boxes1, boxes1) + with pytest.raises(AssertionError): + boxes1.overlaps(cam_boxes1, boxes1) + + +def test_depth_boxes3d(): + # test empty initialization + empty_boxes = [] + boxes = DepthInstance3DBoxes(empty_boxes) + assert boxes.tensor.shape[0] == 0 + assert boxes.tensor.shape[1] == 7 + + # Test init with numpy array + np_boxes = np.array( + [[1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 3.0601], + [2.3262, 3.3065, --0.44255, 0.8234, 0.5325, 1.0099, 2.9971]], + dtype=np.float32) + boxes_1 = DepthInstance3DBoxes(np_boxes) + assert torch.allclose(boxes_1.tensor, torch.from_numpy(np_boxes)) + + # test properties + + assert boxes_1.volume.size(0) == 2 + assert (boxes_1.center == boxes_1.bottom_center).all() + expected_tensor = torch.tensor([[1.4856, 2.5299, -0.1093], + [2.3262, 3.3065, 0.9475]]) + assert torch.allclose(boxes_1.gravity_center, expected_tensor) + expected_tensor = torch.tensor([[1.4856, 2.5299, 0.9385, 2.1404, 3.0601], + [2.3262, 3.3065, 0.8234, 0.5325, 2.9971]]) + assert torch.allclose(boxes_1.bev, expected_tensor) + expected_tensor = torch.tensor([[1.0164, 1.4597, 1.9548, 3.6001], + [1.9145, 3.0402, 2.7379, 3.5728]]) + assert torch.allclose(boxes_1.nearest_bev, expected_tensor, 1e-4) + assert repr(boxes) == ( + 'DepthInstance3DBoxes(\n tensor([], size=(0, 7)))') + + # test init with torch.Tensor + th_boxes = torch.tensor( + [[2.4593, 2.5870, -0.4321, 0.8597, 0.6193, 1.0204, 3.0693], + [1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 3.0601]], + dtype=torch.float32) + boxes_2 = DepthInstance3DBoxes(th_boxes) + assert torch.allclose(boxes_2.tensor, th_boxes) + + # test clone/to/device + boxes_2 = boxes_2.clone() + boxes_1 = boxes_1.to(boxes_2.device) + + # test box concatenation + expected_tensor = torch.tensor( + [[1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 3.0601], + [2.3262, 3.3065, 0.44255, 0.8234, 0.5325, 1.0099, 2.9971], + [2.4593, 2.5870, -0.4321, 0.8597, 0.6193, 1.0204, 3.0693], + [1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 3.0601]]) + boxes = DepthInstance3DBoxes.cat([boxes_1, boxes_2]) + assert torch.allclose(boxes.tensor, expected_tensor) + # concatenate empty list + empty_boxes = DepthInstance3DBoxes.cat([]) + assert empty_boxes.tensor.shape[0] == 0 + assert empty_boxes.tensor.shape[-1] == 7 + + # test box flip + points = torch.tensor([[0.6762, 1.2559, -1.4658, 2.5359], + [0.8784, 4.7814, -1.3857, 0.7167], + [-0.2517, 6.7053, -0.9697, 0.5599], + [0.5520, 0.6533, -0.5265, 1.0032], + [-0.5358, 4.5870, -1.4741, 0.0556]]) + expected_tensor = torch.tensor( + [[-1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 0.0815], + [-2.3262, 3.3065, 0.4426, 0.8234, 0.5325, 1.0099, 0.1445], + [-2.4593, 2.5870, -0.4321, 0.8597, 0.6193, 1.0204, 0.0723], + [-1.4856, 2.5299, -0.5570, 0.9385, 2.1404, 0.8954, 0.0815]]) + points = boxes.flip(bev_direction='horizontal', points=points) + expected_points = torch.tensor([[-0.6762, 1.2559, -1.4658, 2.5359], + [-0.8784, 4.7814, -1.3857, 0.7167], + [0.2517, 6.7053, -0.9697, 0.5599], + [-0.5520, 0.6533, -0.5265, 1.0032], + [0.5358, 4.5870, -1.4741, 0.0556]]) + assert torch.allclose(boxes.tensor, expected_tensor, 1e-3) + assert torch.allclose(points, expected_points) + expected_tensor = torch.tensor( + [[-1.4856, -2.5299, -0.5570, 0.9385, 2.1404, 0.8954, -0.0815], + [-2.3262, -3.3065, 0.4426, 0.8234, 0.5325, 1.0099, -0.1445], + [-2.4593, -2.5870, -0.4321, 0.8597, 0.6193, 1.0204, -0.0723], + [-1.4856, -2.5299, -0.5570, 0.9385, 2.1404, 0.8954, -0.0815]]) + points = boxes.flip(bev_direction='vertical', points=points) + expected_points = torch.tensor([[-0.6762, -1.2559, -1.4658, 2.5359], + [-0.8784, -4.7814, -1.3857, 0.7167], + [0.2517, -6.7053, -0.9697, 0.5599], + [-0.5520, -0.6533, -0.5265, 1.0032], + [0.5358, -4.5870, -1.4741, 0.0556]]) + assert torch.allclose(boxes.tensor, expected_tensor, 1e-3) + assert torch.allclose(points, expected_points) + + # test box rotation + # with input torch.Tensor points and angle + boxes_rot = boxes.clone() + expected_tensor = torch.tensor( + [[-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585], + [-2.4016, -3.2521, 0.4426, 0.8234, 0.5325, 1.0099, -0.1215], + [-2.5181, -2.5298, -0.4321, 0.8597, 0.6193, 1.0204, -0.0493], + [-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585]]) + expected_tensor[:, -1:] -= 0.022998953275003075 * 2 + points, rot_mat_T = boxes_rot.rotate(-0.022998953275003075, points) + expected_points = torch.tensor([[-0.7049, -1.2400, -1.4658, 2.5359], + [-0.9881, -4.7599, -1.3857, 0.7167], + [0.0974, -6.7093, -0.9697, 0.5599], + [-0.5669, -0.6404, -0.5265, 1.0032], + [0.4302, -4.5981, -1.4741, 0.0556]]) + expected_rot_mat_T = torch.tensor([[0.9997, -0.0230, 0.0000], + [0.0230, 0.9997, 0.0000], + [0.0000, 0.0000, 1.0000]]) + assert torch.allclose(boxes_rot.tensor, expected_tensor, 1e-3) + assert torch.allclose(points, expected_points, 1e-3) + assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) + + # with input torch.Tensor points and rotation matrix + points, rot_mat_T = boxes.rotate(-0.022998953275003075, points) # back + rot_mat = np.array([[0.99973554, 0.02299693, 0.], + [-0.02299693, 0.99973554, 0.], [0., 0., 1.]]) + points, rot_mat_T = boxes.rotate(rot_mat, points) + expected_rot_mat_T = torch.tensor([[0.99973554, 0.02299693, 0.0000], + [-0.02299693, 0.99973554, 0.0000], + [0.0000, 0.0000, 1.0000]]) + assert torch.allclose(boxes_rot.tensor, expected_tensor, 1e-3) + assert torch.allclose(points, expected_points, 1e-3) + assert torch.allclose(rot_mat_T, expected_rot_mat_T, 1e-3) + + # with input np.ndarray points and angle + points_np = np.array([[0.6762, 1.2559, -1.4658, 2.5359], + [0.8784, 4.7814, -1.3857, 0.7167], + [-0.2517, 6.7053, -0.9697, 0.5599], + [0.5520, 0.6533, -0.5265, 1.0032], + [-0.5358, 4.5870, -1.4741, 0.0556]]) + points_np, rot_mat_T_np = boxes.rotate(-0.022998953275003075, points_np) + expected_points_np = np.array([[0.7049, 1.2400, -1.4658, 2.5359], + [0.9881, 4.7599, -1.3857, 0.7167], + [-0.0974, 6.7093, -0.9697, 0.5599], + [0.5669, 0.6404, -0.5265, 1.0032], + [-0.4302, 4.5981, -1.4741, 0.0556]]) + expected_rot_mat_T_np = np.array([[0.99973554, -0.02299693, 0.0000], + [0.02299693, 0.99973554, 0.0000], + [0.0000, 0.0000, 1.0000]]) + expected_tensor = torch.tensor( + [[-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585], + [-2.4016, -3.2521, 0.4426, 0.8234, 0.5325, 1.0099, -0.1215], + [-2.5181, -2.5298, -0.4321, 0.8597, 0.6193, 1.0204, -0.0493], + [-1.5434, -2.4951, -0.5570, 0.9385, 2.1404, 0.8954, -0.0585]]) + expected_tensor[:, -1:] -= 0.022998953275003075 * 2 + assert torch.allclose(boxes.tensor, expected_tensor, 1e-3) + assert np.allclose(points_np, expected_points_np, 1e-3) + assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3) + + # with input DepthPoints and rotation matrix + points_np, rot_mat_T_np = boxes.rotate(-0.022998953275003075, points_np) + depth_points = DepthPoints(points_np, points_dim=4) + depth_points, rot_mat_T_np = boxes.rotate(rot_mat, depth_points) + points_np = depth_points.tensor.numpy() + expected_rot_mat_T_np = expected_rot_mat_T_np.T + assert torch.allclose(boxes.tensor, expected_tensor, 1e-3) + assert np.allclose(points_np, expected_points_np, 1e-3) + assert np.allclose(rot_mat_T_np, expected_rot_mat_T_np, 1e-3) + + expected_tensor = torch.tensor([[[-2.1217, -3.5105, -0.5570], + [-2.1217, -3.5105, 0.3384], + [-1.8985, -1.3818, 0.3384], + [-1.8985, -1.3818, -0.5570], + [-1.1883, -3.6084, -0.5570], + [-1.1883, -3.6084, 0.3384], + [-0.9651, -1.4796, 0.3384], + [-0.9651, -1.4796, -0.5570]], + [[-2.8519, -3.4460, 0.4426], + [-2.8519, -3.4460, 1.4525], + [-2.7632, -2.9210, 1.4525], + [-2.7632, -2.9210, 0.4426], + [-2.0401, -3.5833, 0.4426], + [-2.0401, -3.5833, 1.4525], + [-1.9513, -3.0582, 1.4525], + [-1.9513, -3.0582, 0.4426]], + [[-2.9755, -2.7971, -0.4321], + [-2.9755, -2.7971, 0.5883], + [-2.9166, -2.1806, 0.5883], + [-2.9166, -2.1806, -0.4321], + [-2.1197, -2.8789, -0.4321], + [-2.1197, -2.8789, 0.5883], + [-2.0608, -2.2624, 0.5883], + [-2.0608, -2.2624, -0.4321]], + [[-2.1217, -3.5105, -0.5570], + [-2.1217, -3.5105, 0.3384], + [-1.8985, -1.3818, 0.3384], + [-1.8985, -1.3818, -0.5570], + [-1.1883, -3.6084, -0.5570], + [-1.1883, -3.6084, 0.3384], + [-0.9651, -1.4796, 0.3384], + [-0.9651, -1.4796, -0.5570]]]) + + assert torch.allclose(boxes.corners, expected_tensor, 1e-3) + + th_boxes = torch.tensor( + [[0.61211395, 0.8129094, 0.10563634, 1.497534, 0.16927195, 0.27956772], + [1.430009, 0.49797538, 0.9382923, 0.07694054, 0.9312509, 1.8919173]], + dtype=torch.float32) + boxes = DepthInstance3DBoxes(th_boxes, box_dim=6, with_yaw=False) + expected_tensor = torch.tensor([[ + 0.64884546, 0.78390356, 0.10563634, 1.50373348, 0.23795205, 0.27956772, + 0 + ], + [ + 1.45139421, 0.43169443, 0.93829232, + 0.11967964, 0.93380373, 1.89191735, 0 + ]]) + boxes_3 = boxes.clone() + boxes_3.rotate(-0.04599790655000615) + assert torch.allclose(boxes_3.tensor, expected_tensor) + boxes.rotate(torch.tensor(-0.04599790655000615)) + assert torch.allclose(boxes.tensor, expected_tensor) + + # test bbox in_range_bev + expected_tensor = torch.tensor([1, 1], dtype=torch.bool) + mask = boxes.in_range_bev([0., -40., 70.4, 40.]) + assert (mask == expected_tensor).all() + mask = boxes.nonempty() + assert (mask == expected_tensor).all() + + # test bbox in_range + expected_tensor = torch.tensor([0, 1], dtype=torch.bool) + mask = boxes.in_range_3d([1, 0, -2, 2, 1, 5]) + assert (mask == expected_tensor).all() + + expected_tensor = torch.tensor([[[-0.1030, 0.6649, 0.1056], + [-0.1030, 0.6649, 0.3852], + [-0.1030, 0.9029, 0.3852], + [-0.1030, 0.9029, 0.1056], + [1.4007, 0.6649, 0.1056], + [1.4007, 0.6649, 0.3852], + [1.4007, 0.9029, 0.3852], + [1.4007, 0.9029, 0.1056]], + [[1.3916, -0.0352, 0.9383], + [1.3916, -0.0352, 2.8302], + [1.3916, 0.8986, 2.8302], + [1.3916, 0.8986, 0.9383], + [1.5112, -0.0352, 0.9383], + [1.5112, -0.0352, 2.8302], + [1.5112, 0.8986, 2.8302], + [1.5112, 0.8986, 0.9383]]]) + assert torch.allclose(boxes.corners, expected_tensor, 1e-3) + + # test points in boxes + if torch.cuda.is_available(): + box_idxs_of_pts = boxes.points_in_boxes_all(points.cuda()) + expected_idxs_of_pts = torch.tensor( + [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]], + device='cuda:0', + dtype=torch.int32) + assert torch.all(box_idxs_of_pts == expected_idxs_of_pts) + + # test get_surface_line_center + boxes = torch.tensor( + [[0.3294, 1.0359, 0.1171, 1.0822, 1.1247, 1.3721, -0.4916], + [-2.4630, -2.6324, -0.1616, 0.9202, 1.7896, 0.1992, -0.3185]]) + boxes = DepthInstance3DBoxes( + boxes, box_dim=boxes.shape[-1], with_yaw=True, origin=(0.5, 0.5, 0.5)) + surface_center, line_center = boxes.get_surface_line_center() + + expected_surface_center = torch.tensor([[0.3294, 1.0359, 0.8031], + [0.3294, 1.0359, -0.5689], + [0.5949, 1.5317, 0.1171], + [0.1533, 0.5018, 0.1171], + [0.8064, 0.7805, 0.1171], + [-0.1845, 1.2053, 0.1171], + [-2.4630, -2.6324, -0.0620], + [-2.4630, -2.6324, -0.2612], + [-2.0406, -1.8436, -0.1616], + [-2.7432, -3.4822, -0.1616], + [-2.0574, -2.8496, -0.1616], + [-2.9000, -2.4883, -0.1616]]) + + expected_line_center = torch.tensor([[0.8064, 0.7805, 0.8031], + [-0.1845, 1.2053, 0.8031], + [0.5949, 1.5317, 0.8031], + [0.1533, 0.5018, 0.8031], + [0.8064, 0.7805, -0.5689], + [-0.1845, 1.2053, -0.5689], + [0.5949, 1.5317, -0.5689], + [0.1533, 0.5018, -0.5689], + [1.0719, 1.2762, 0.1171], + [0.6672, 0.3324, 0.1171], + [0.1178, 1.7871, 0.1171], + [-0.3606, 0.6713, 0.1171], + [-2.0574, -2.8496, -0.0620], + [-2.9000, -2.4883, -0.0620], + [-2.0406, -1.8436, -0.0620], + [-2.7432, -3.4822, -0.0620], + [-2.0574, -2.8496, -0.2612], + [-2.9000, -2.4883, -0.2612], + [-2.0406, -1.8436, -0.2612], + [-2.7432, -3.4822, -0.2612], + [-1.6350, -2.0607, -0.1616], + [-2.3062, -3.6263, -0.1616], + [-2.4462, -1.6264, -0.1616], + [-3.1802, -3.3381, -0.1616]]) + + assert torch.allclose(surface_center, expected_surface_center, atol=1e-04) + assert torch.allclose(line_center, expected_line_center, atol=1e-04) + + +def test_rotation_3d_in_axis(): + # clockwise + points = torch.tensor([[[-0.4599, -0.0471, 0.0000], + [-0.4599, -0.0471, 1.8433], + [-0.4599, 0.0471, 1.8433]], + [[-0.2555, -0.2683, 0.0000], + [-0.2555, -0.2683, 0.9072], + [-0.2555, 0.2683, 0.9072]]]) + rotated = rotation_3d_in_axis( + points, + torch.tensor([-np.pi / 10, np.pi / 10]), + axis=0, + clockwise=True) + expected_rotated = torch.tensor( + [[[-0.4599, -0.0448, -0.0146], [-0.4599, -0.6144, 1.7385], + [-0.4599, -0.5248, 1.7676]], + [[-0.2555, -0.2552, 0.0829], [-0.2555, 0.0252, 0.9457], + [-0.2555, 0.5355, 0.7799]]], + dtype=torch.float32) + assert torch.allclose(rotated, expected_rotated, atol=1e-3) + + # anti-clockwise with return rotation mat + points = torch.tensor([[[-0.4599, -0.0471, 0.0000], + [-0.4599, -0.0471, 1.8433]]]) + rotated = rotation_3d_in_axis(points, torch.tensor([np.pi / 2]), axis=0) + expected_rotated = torch.tensor([[[-0.4599, 0.0000, -0.0471], + [-0.4599, -1.8433, -0.0471]]]) + assert torch.allclose(rotated, expected_rotated, 1e-3) + + points = torch.tensor([[[-0.4599, -0.0471, 0.0000], + [-0.4599, -0.0471, 1.8433]]]) + rotated, mat = rotation_3d_in_axis( + points, torch.tensor([np.pi / 2]), axis=0, return_mat=True) + expected_rotated = torch.tensor([[[-0.4599, 0.0000, -0.0471], + [-0.4599, -1.8433, -0.0471]]]) + expected_mat = torch.tensor([[[1, 0, 0], [0, 0, 1], [0, -1, 0]]]).float() + assert torch.allclose(rotated, expected_rotated, atol=1e-6) + assert torch.allclose(mat, expected_mat, atol=1e-6) + + points = torch.tensor([[[-0.4599, -0.0471, 0.0000], + [-0.4599, -0.0471, 1.8433]], + [[-0.2555, -0.2683, 0.0000], + [-0.2555, -0.2683, 0.9072]]]) + rotated = rotation_3d_in_axis(points, np.pi / 2, axis=0) + expected_rotated = torch.tensor([[[-0.4599, 0.0000, -0.0471], + [-0.4599, -1.8433, -0.0471]], + [[-0.2555, 0.0000, -0.2683], + [-0.2555, -0.9072, -0.2683]]]) + assert torch.allclose(rotated, expected_rotated, atol=1e-3) + + points = np.array([[[-0.4599, -0.0471, 0.0000], [-0.4599, -0.0471, + 1.8433]], + [[-0.2555, -0.2683, 0.0000], + [-0.2555, -0.2683, 0.9072]]]).astype(np.float32) + + rotated = rotation_3d_in_axis(points, np.pi / 2, axis=0) + expected_rotated = np.array([[[-0.4599, 0.0000, -0.0471], + [-0.4599, -1.8433, -0.0471]], + [[-0.2555, 0.0000, -0.2683], + [-0.2555, -0.9072, -0.2683]]]) + assert np.allclose(rotated, expected_rotated, atol=1e-3) + + points = torch.tensor([[[-0.4599, -0.0471, 0.0000], + [-0.4599, -0.0471, 1.8433]], + [[-0.2555, -0.2683, 0.0000], + [-0.2555, -0.2683, 0.9072]]]) + angles = [np.pi / 2, -np.pi / 2] + rotated = rotation_3d_in_axis(points, angles, axis=0).numpy() + expected_rotated = np.array([[[-0.4599, 0.0000, -0.0471], + [-0.4599, -1.8433, -0.0471]], + [[-0.2555, 0.0000, 0.2683], + [-0.2555, 0.9072, 0.2683]]]) + assert np.allclose(rotated, expected_rotated, atol=1e-3) + + points = torch.tensor([[[-0.4599, -0.0471, 0.0000], + [-0.4599, -0.0471, 1.8433]], + [[-0.2555, -0.2683, 0.0000], + [-0.2555, -0.2683, 0.9072]]]) + angles = [np.pi / 2, -np.pi / 2] + rotated = rotation_3d_in_axis(points, angles, axis=1).numpy() + expected_rotated = np.array([[[0.0000, -0.0471, 0.4599], + [1.8433, -0.0471, 0.4599]], + [[0.0000, -0.2683, -0.2555], + [-0.9072, -0.2683, -0.2555]]]) + assert np.allclose(rotated, expected_rotated, atol=1e-3) + + points = torch.tensor([[[-0.4599, -0.0471, 0.0000], + [-0.4599, 0.0471, 1.8433]], + [[-0.2555, -0.2683, 0.0000], + [0.2555, -0.2683, 0.9072]]]) + angles = [np.pi / 2, -np.pi / 2] + rotated = rotation_3d_in_axis(points, angles, axis=2).numpy() + expected_rotated = np.array([[[0.0471, -0.4599, 0.0000], + [-0.0471, -0.4599, 1.8433]], + [[-0.2683, 0.2555, 0.0000], + [-0.2683, -0.2555, 0.9072]]]) + assert np.allclose(rotated, expected_rotated, atol=1e-3) + + points = torch.tensor([[[-0.0471, 0.0000], [-0.0471, 1.8433]], + [[-0.2683, 0.0000], [-0.2683, 0.9072]]]) + angles = [np.pi / 2, -np.pi / 2] + rotated = rotation_3d_in_axis(points, angles) + expected_rotated = np.array([[[0.0000, -0.0471], [-1.8433, -0.0471]], + [[0.0000, 0.2683], [0.9072, 0.2683]]]) + assert np.allclose(rotated, expected_rotated, atol=1e-3) + + +def test_rotation_2d(): + angles = np.array([3.14]) + corners = np.array([[[-0.235, -0.49], [-0.235, 0.49], [0.235, 0.49], + [0.235, -0.49]]]) + corners_rotated = rotation_3d_in_axis(corners, angles) + expected_corners = np.array([[[0.2357801, 0.48962511], + [0.2342193, -0.49037365], + [-0.2357801, -0.48962511], + [-0.2342193, 0.49037365]]]) + assert np.allclose(corners_rotated, expected_corners) + + +def test_limit_period(): + torch.manual_seed(0) + val = torch.rand([5, 1]) + result = limit_period(val) + expected_result = torch.tensor([[0.4963], [0.7682], [0.0885], [0.1320], + [0.3074]]) + assert torch.allclose(result, expected_result, 1e-3) + + val = val.numpy() + result = limit_period(val) + expected_result = expected_result.numpy() + assert np.allclose(result, expected_result, 1e-3) + + +def test_xywhr2xyxyr(): + torch.manual_seed(0) + xywhr = torch.tensor([[1., 2., 3., 4., 5.], [0., 1., 2., 3., 4.]]) + xyxyr = xywhr2xyxyr(xywhr) + expected_xyxyr = torch.tensor([[-0.5000, 0.0000, 2.5000, 4.0000, 5.0000], + [-1.0000, -0.5000, 1.0000, 2.5000, 4.0000]]) + + assert torch.allclose(xyxyr, expected_xyxyr) + + +class test_get_box_type(unittest.TestCase): + + def test_get_box_type(self): + box_type_3d, box_mode_3d = get_box_type('camera') + assert box_type_3d == CameraInstance3DBoxes + assert box_mode_3d == Box3DMode.CAM + + box_type_3d, box_mode_3d = get_box_type('depth') + assert box_type_3d == DepthInstance3DBoxes + assert box_mode_3d == Box3DMode.DEPTH + + box_type_3d, box_mode_3d = get_box_type('lidar') + assert box_type_3d == LiDARInstance3DBoxes + assert box_mode_3d == Box3DMode.LIDAR + + def test_bad_box_type(self): + self.assertRaises(ValueError, get_box_type, 'test') + + +def test_points_cam2img(): + torch.manual_seed(0) + points = torch.rand([5, 3]) + proj_mat = torch.rand([4, 4]) + point_2d_res = points_cam2img(points, proj_mat) + expected_point_2d_res = torch.tensor([[0.5832, 0.6496], [0.6146, 0.7910], + [0.6994, 0.7782], [0.5623, 0.6303], + [0.4359, 0.6532]]) + assert torch.allclose(point_2d_res, expected_point_2d_res, 1e-3) + + points = points.numpy() + proj_mat = proj_mat.numpy() + point_2d_res = points_cam2img(points, proj_mat) + expected_point_2d_res = expected_point_2d_res.numpy() + assert np.allclose(point_2d_res, expected_point_2d_res, 1e-3) + + points = torch.from_numpy(points) + point_2d_res = points_cam2img(points, proj_mat) + expected_point_2d_res = torch.from_numpy(expected_point_2d_res) + assert torch.allclose(point_2d_res, expected_point_2d_res, 1e-3) + + point_2d_res = points_cam2img(points, proj_mat, with_depth=True) + expected_point_2d_res = torch.tensor([[0.5832, 0.6496, 1.7577], + [0.6146, 0.7910, 1.5477], + [0.6994, 0.7782, 2.0091], + [0.5623, 0.6303, 1.8739], + [0.4359, 0.6532, 1.2056]]) + assert torch.allclose(point_2d_res, expected_point_2d_res, 1e-3) + + +def test_points_in_boxes(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + lidar_pts = torch.tensor([[1.0, 4.3, 0.1], [1.0, 4.4, + 0.1], [1.1, 4.3, 0.1], + [0.9, 4.3, 0.1], [1.0, -0.3, 0.1], + [1.0, -0.4, 0.1], [2.9, 0.1, 6.0], + [-0.9, 3.9, 6.0]]).cuda() + lidar_boxes = torch.tensor([[1.0, 2.0, 0.0, 4.0, 4.0, 6.0, np.pi / 6], + [1.0, 2.0, 0.0, 4.0, 4.0, 6.0, np.pi / 2], + [1.0, 2.0, 0.0, 4.0, 4.0, 6.0, 7 * np.pi / 6], + [1.0, 2.0, 0.0, 4.0, 4.0, 6.0, -np.pi / 6]], + dtype=torch.float32).cuda() + lidar_boxes = LiDARInstance3DBoxes(lidar_boxes) + + point_indices = lidar_boxes.points_in_boxes_all(lidar_pts) + expected_point_indices = torch.tensor( + [[1, 0, 1, 1], [0, 0, 0, 0], [1, 0, 1, 0], [0, 0, 0, 1], [1, 0, 1, 1], + [0, 0, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0]], + dtype=torch.int32).cuda() + assert point_indices.shape == torch.Size([8, 4]) + assert (point_indices == expected_point_indices).all() + + lidar_pts = torch.tensor([[1.0, 4.3, 0.1], [1.0, 4.4, + 0.1], [1.1, 4.3, 0.1], + [0.9, 4.3, 0.1], [1.0, -0.3, 0.1], + [1.0, -0.4, 0.1], [2.9, 0.1, 6.0], + [-0.9, 3.9, 6.0]]).cuda() + lidar_boxes = torch.tensor([[1.0, 2.0, 0.0, 4.0, 4.0, 6.0, np.pi / 6], + [1.0, 2.0, 0.0, 4.0, 4.0, 6.0, np.pi / 2], + [1.0, 2.0, 0.0, 4.0, 4.0, 6.0, 7 * np.pi / 6], + [1.0, 2.0, 0.0, 4.0, 4.0, 6.0, -np.pi / 6]], + dtype=torch.float32).cuda() + lidar_boxes = LiDARInstance3DBoxes(lidar_boxes) + + point_indices = lidar_boxes.points_in_boxes_part(lidar_pts) + expected_point_indices = torch.tensor([0, -1, 0, 3, 0, -1, 1, 1], + dtype=torch.int32).cuda() + assert point_indices.shape == torch.Size([8]) + assert (point_indices == expected_point_indices).all() + + depth_boxes = torch.tensor([[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], + [-10.0, 23.0, 16.0, 10, 20, 20, 0.5]], + dtype=torch.float32).cuda() + depth_boxes = DepthInstance3DBoxes(depth_boxes) + depth_pts = torch.tensor( + [[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], + [0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3], + [4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [ + -16, -18, 9 + ], [-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4]]], + dtype=torch.float32).cuda() + + point_indices = depth_boxes.points_in_boxes_all(depth_pts) + expected_point_indices = torch.tensor( + [[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0], + [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]], + dtype=torch.int32).cuda() + assert point_indices.shape == torch.Size([15, 2]) + assert (point_indices == expected_point_indices).all() + + point_indices = depth_boxes.points_in_boxes_part(depth_pts) + expected_point_indices = torch.tensor( + [0, 0, 0, 0, 0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + dtype=torch.int32).cuda() + assert point_indices.shape == torch.Size([15]) + assert (point_indices == expected_point_indices).all() + + depth_boxes = torch.tensor([[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], + [-10.0, 23.0, 16.0, 10, 20, 20, 0.5], + [1.0, 2.0, 0.0, 4.0, 4.0, 6.0, np.pi / 6], + [1.0, 2.0, 0.0, 4.0, 4.0, 6.0, np.pi / 2], + [1.0, 2.0, 0.0, 4.0, 4.0, 6.0, 7 * np.pi / 6], + [1.0, 2.0, 0.0, 4.0, 4.0, 6.0, -np.pi / 6]], + dtype=torch.float32).cuda() + cam_boxes = DepthInstance3DBoxes(depth_boxes).convert_to(Box3DMode.CAM) + depth_pts = torch.tensor( + [[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], + [0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3], + [4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [-16, -18, 9], + [-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4], [1.0, 4.3, 0.1], + [1.0, 4.4, 0.1], [1.1, 4.3, 0.1], [0.9, 4.3, 0.1], [1.0, -0.3, 0.1], + [1.0, -0.4, 0.1], [2.9, 0.1, 6.0], [-0.9, 3.9, 6.0]], + dtype=torch.float32).cuda() + + cam_pts = DepthPoints(depth_pts).convert_to(Coord3DMode.CAM).tensor + + point_indices = cam_boxes.points_in_boxes_all(cam_pts) + expected_point_indices = torch.tensor( + [[1, 0, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1], + [1, 0, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], + [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0], + [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]], + dtype=torch.int32).cuda() + assert point_indices.shape == torch.Size([23, 6]) + assert (point_indices == expected_point_indices).all() + + point_indices = cam_boxes.points_in_boxes_batch(cam_pts) + assert (point_indices == expected_point_indices).all() + + point_indices = cam_boxes.points_in_boxes_part(cam_pts) + expected_point_indices = torch.tensor([ + 0, 0, 0, 0, 0, 1, -1, -1, -1, -1, -1, -1, 3, -1, -1, 2, 3, 3, 2, 2, 3, + 0, 0 + ], + dtype=torch.int32).cuda() + assert point_indices.shape == torch.Size([23]) + assert (point_indices == expected_point_indices).all() + + point_indices = cam_boxes.points_in_boxes(cam_pts) + assert (point_indices == expected_point_indices).all() diff --git a/tests/test_structures/test_bbox/test_coord_3d_mode.py b/tests/test_structures/test_bbox/test_coord_3d_mode.py new file mode 100755 index 0000000..bffbe39 --- /dev/null +++ b/tests/test_structures/test_bbox/test_coord_3d_mode.py @@ -0,0 +1,351 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.structures import (CameraInstance3DBoxes, Coord3DMode, + DepthInstance3DBoxes, LiDARInstance3DBoxes, + limit_period) +from mmdet3d.structures.points import CameraPoints, DepthPoints, LiDARPoints + + +def test_points_conversion(): + """Test the conversion of points between different modes.""" + points_np = np.array([[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, + 0.4974, 0.9409 + ], + [ + -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, + 0.1502, 0.3707, 0.1086, 0.6297 + ], + [ + -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, + 0.6565, 0.6248, 0.6954, 0.2538 + ], + [ + -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, + 0.2803, 0.0258, 0.4896, 0.3269 + ]], + dtype=np.float32) + + # test CAM to LIDAR and DEPTH + cam_points = CameraPoints( + points_np, + points_dim=7, + attribute_dims=dict(color=[3, 4, 5], height=6)) + + convert_lidar_points = cam_points.convert_to(Coord3DMode.LIDAR) + expected_tensor = torch.tensor([[ + 2.9757e-01, 5.2422e+00, -4.0021e+01, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -9.1435e-01, 2.6675e+01, -5.5950e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 2.0089e-01, 5.8098e+00, -3.5409e+01, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -1.9461e-01, 3.1309e+01, -1.0901e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + + lidar_point_tensor = Coord3DMode.convert_point(cam_points.tensor, + Coord3DMode.CAM, + Coord3DMode.LIDAR) + assert torch.allclose(expected_tensor, convert_lidar_points.tensor, 1e-4) + assert torch.allclose(lidar_point_tensor, convert_lidar_points.tensor, + 1e-4) + + convert_depth_points = cam_points.convert_to(Coord3DMode.DEPTH) + expected_tensor = torch.tensor([[ + -5.2422e+00, 2.9757e-01, -4.0021e+01, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.6675e+01, -9.1435e-01, -5.5950e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + -5.8098e+00, 2.0089e-01, -3.5409e+01, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -3.1309e+01, -1.9461e-01, -1.0901e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + + depth_point_tensor = Coord3DMode.convert_point(cam_points.tensor, + Coord3DMode.CAM, + Coord3DMode.DEPTH) + assert torch.allclose(expected_tensor, convert_depth_points.tensor, 1e-4) + assert torch.allclose(depth_point_tensor, convert_depth_points.tensor, + 1e-4) + + # test LIDAR to CAM and DEPTH + lidar_points = LiDARPoints( + points_np, + points_dim=7, + attribute_dims=dict(color=[3, 4, 5], height=6)) + + convert_cam_points = lidar_points.convert_to(Coord3DMode.CAM) + expected_tensor = torch.tensor([[ + -4.0021e+01, -2.9757e-01, -5.2422e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -5.5950e+00, 9.1435e-01, -2.6675e+01, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + -3.5409e+01, -2.0089e-01, -5.8098e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -1.0901e+00, 1.9461e-01, -3.1309e+01, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + + cam_point_tensor = Coord3DMode.convert_point(lidar_points.tensor, + Coord3DMode.LIDAR, + Coord3DMode.CAM) + assert torch.allclose(expected_tensor, convert_cam_points.tensor, 1e-4) + assert torch.allclose(cam_point_tensor, convert_cam_points.tensor, 1e-4) + + convert_depth_points = lidar_points.convert_to(Coord3DMode.DEPTH) + expected_tensor = torch.tensor([[ + -4.0021e+01, -5.2422e+00, 2.9757e-01, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -5.5950e+00, -2.6675e+01, -9.1435e-01, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + -3.5409e+01, -5.8098e+00, 2.0089e-01, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -1.0901e+00, -3.1309e+01, -1.9461e-01, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + + depth_point_tensor = Coord3DMode.convert_point(lidar_points.tensor, + Coord3DMode.LIDAR, + Coord3DMode.DEPTH) + assert torch.allclose(expected_tensor, convert_depth_points.tensor, 1e-4) + assert torch.allclose(depth_point_tensor, convert_depth_points.tensor, + 1e-4) + + # test DEPTH to CAM and LIDAR + depth_points = DepthPoints( + points_np, + points_dim=7, + attribute_dims=dict(color=[3, 4, 5], height=6)) + + convert_cam_points = depth_points.convert_to(Coord3DMode.CAM) + expected_tensor = torch.tensor([[ + -5.2422e+00, -2.9757e-01, 4.0021e+01, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.6675e+01, 9.1435e-01, 5.5950e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + -5.8098e+00, -2.0089e-01, 3.5409e+01, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -3.1309e+01, 1.9461e-01, 1.0901e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + + cam_point_tensor = Coord3DMode.convert_point(depth_points.tensor, + Coord3DMode.DEPTH, + Coord3DMode.CAM) + assert torch.allclose(expected_tensor, convert_cam_points.tensor, 1e-4) + assert torch.allclose(cam_point_tensor, convert_cam_points.tensor, 1e-4) + + rt_mat_provided = torch.tensor([[0.99789, -0.012698, -0.063678], + [-0.012698, 0.92359, -0.38316], + [0.063678, 0.38316, 0.92148]]) + + depth_points_new = torch.cat([ + depth_points.tensor[:, :3] @ rt_mat_provided.t(), + depth_points.tensor[:, 3:] + ], + dim=1) + mat = rt_mat_provided.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) + rt_mat_provided = mat @ rt_mat_provided.transpose(1, 0) + cam_point_tensor_new = Coord3DMode.convert_point( + depth_points_new, + Coord3DMode.DEPTH, + Coord3DMode.CAM, + rt_mat=rt_mat_provided) + assert torch.allclose(expected_tensor, cam_point_tensor_new, 1e-4) + + convert_lidar_points = depth_points.convert_to(Coord3DMode.LIDAR) + expected_tensor = torch.tensor([[ + 4.0021e+01, 5.2422e+00, 2.9757e-01, 6.6660e-01, 1.9560e-01, 4.9740e-01, + 9.4090e-01 + ], + [ + 5.5950e+00, 2.6675e+01, -9.1435e-01, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 3.5409e+01, 5.8098e+00, 2.0089e-01, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + 1.0901e+00, 3.1309e+01, -1.9461e-01, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + + lidar_point_tensor = Coord3DMode.convert_point(depth_points.tensor, + Coord3DMode.DEPTH, + Coord3DMode.LIDAR) + assert torch.allclose(lidar_point_tensor, convert_lidar_points.tensor, + 1e-4) + assert torch.allclose(lidar_point_tensor, convert_lidar_points.tensor, + 1e-4) + + +def test_boxes_conversion(): + # test CAM to LIDAR and DEPTH + cam_boxes = CameraInstance3DBoxes( + [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48], + [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62], + [28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57], + [26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69], + [31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]]) + convert_lidar_boxes = Coord3DMode.convert(cam_boxes, Coord3DMode.CAM, + Coord3DMode.LIDAR) + + expected_tensor = torch.tensor([[ + -1.7501, -1.7802, -2.5162, 1.7500, 1.6500, 3.3900, -1.4800 - np.pi / 2 + ], [ + -1.6357, -8.9594, -2.4567, 1.5400, 1.5700, 4.0100, -1.6200 - np.pi / 2 + ], [-1.3033, -28.2967, 0.5558, 1.4700, 1.4800, 2.2300, 1.5700 - np.pi / 2], + [ + -1.7361, -26.6690, -21.8230, 1.5600, + 1.4000, 3.4800, 1.6900 - np.pi / 2 + ], + [ + -1.6218, -31.3198, -8.1621, 1.7400, + 1.4800, 3.7700, -2.7900 - np.pi / 2 + ]]) + expected_tensor[:, -1:] = limit_period( + expected_tensor[:, -1:], period=np.pi * 2) + assert torch.allclose(expected_tensor, convert_lidar_boxes.tensor, 1e-3) + + convert_depth_boxes = Coord3DMode.convert(cam_boxes, Coord3DMode.CAM, + Coord3DMode.DEPTH) + expected_tensor = torch.tensor( + [[1.7802, -1.7501, -2.5162, 1.7500, 1.6500, 3.3900, -1.4800], + [8.9594, -1.6357, -2.4567, 1.5400, 1.5700, 4.0100, -1.6200], + [28.2967, -1.3033, 0.5558, 1.4700, 1.4800, 2.2300, 1.5700], + [26.6690, -1.7361, -21.8230, 1.5600, 1.4000, 3.4800, 1.6900], + [31.3198, -1.6218, -8.1621, 1.7400, 1.4800, 3.7700, -2.7900]]) + assert torch.allclose(expected_tensor, convert_depth_boxes.tensor, 1e-3) + + # test LIDAR to CAM and DEPTH + lidar_boxes = LiDARInstance3DBoxes( + [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48], + [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62], + [28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57], + [26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69], + [31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]]) + convert_cam_boxes = Coord3DMode.convert(lidar_boxes, Coord3DMode.LIDAR, + Coord3DMode.CAM) + expected_tensor = torch.tensor([ + [-2.5162, 1.7501, 1.7802, 1.7500, 1.6500, 3.3900, -1.4800 - np.pi / 2], + [-2.4567, 1.6357, 8.9594, 1.5400, 1.5700, 4.0100, -1.6200 - np.pi / 2], + [0.5558, 1.3033, 28.2967, 1.4700, 1.4800, 2.2300, 1.5700 - np.pi / 2], + [ + -21.8230, 1.7361, 26.6690, 1.5600, 1.4000, 3.4800, + 1.6900 - np.pi / 2 + ], + [ + -8.1621, 1.6218, 31.3198, 1.7400, 1.4800, 3.7700, + -2.7900 - np.pi / 2 + ] + ]) + expected_tensor[:, -1:] = limit_period( + expected_tensor[:, -1:], period=np.pi * 2) + assert torch.allclose(expected_tensor, convert_cam_boxes.tensor, 1e-3) + + convert_depth_boxes = Coord3DMode.convert(lidar_boxes, Coord3DMode.LIDAR, + Coord3DMode.DEPTH) + expected_tensor = torch.tensor([[ + -2.5162, 1.7802, -1.7501, 1.7500, 3.3900, 1.6500, 1.4800 + np.pi / 2 + ], [-2.4567, 8.9594, -1.6357, 1.5400, 4.0100, 1.5700, 1.6200 + np.pi / 2], + [ + 0.5558, 28.2967, -1.3033, 1.4700, + 2.2300, 1.4800, -1.5700 + np.pi / 2 + ], + [ + -21.8230, 26.6690, -1.7361, 1.5600, + 3.4800, 1.4000, -1.6900 + np.pi / 2 + ], + [ + -8.1621, 31.3198, -1.6218, 1.7400, + 3.7700, 1.4800, 2.7900 + np.pi / 2 + ]]) + expected_tensor[:, -1:] = limit_period( + expected_tensor[:, -1:], period=np.pi * 2) + assert torch.allclose(expected_tensor, convert_depth_boxes.tensor, 1e-3) + + # test DEPTH to CAM and LIDAR + depth_boxes = DepthInstance3DBoxes( + [[1.7802081, 2.516249, -1.7501148, 1.75, 3.39, 1.65, 1.48], + [8.959413, 2.4567227, -1.6357126, 1.54, 4.01, 1.57, 1.62], + [28.2967, -0.5557558, -1.303325, 1.47, 2.23, 1.48, -1.57], + [26.66902, 21.82302, -1.736057, 1.56, 3.48, 1.4, -1.69], + [31.31978, 8.162144, -1.6217787, 1.74, 3.77, 1.48, 2.79]]) + convert_cam_boxes = Coord3DMode.convert(depth_boxes, Coord3DMode.DEPTH, + Coord3DMode.CAM) + expected_tensor = torch.tensor( + [[1.7802, 1.7501, 2.5162, 1.7500, 1.6500, 3.3900, -1.4800], + [8.9594, 1.6357, 2.4567, 1.5400, 1.5700, 4.0100, -1.6200], + [28.2967, 1.3033, -0.5558, 1.4700, 1.4800, 2.2300, 1.5700], + [26.6690, 1.7361, 21.8230, 1.5600, 1.4000, 3.4800, 1.6900], + [31.3198, 1.6218, 8.1621, 1.7400, 1.4800, 3.7700, -2.7900]]) + assert torch.allclose(expected_tensor, convert_cam_boxes.tensor, 1e-3) + + convert_lidar_boxes = Coord3DMode.convert(depth_boxes, Coord3DMode.DEPTH, + Coord3DMode.LIDAR) + expected_tensor = torch.tensor([[ + 2.5162, -1.7802, -1.7501, 1.7500, 3.3900, 1.6500, 1.4800 - np.pi / 2 + ], [ + 2.4567, -8.9594, -1.6357, 1.5400, 4.0100, 1.5700, 1.6200 - np.pi / 2 + ], [ + -0.5558, -28.2967, -1.3033, 1.4700, 2.2300, 1.4800, -1.5700 - np.pi / 2 + ], [ + 21.8230, -26.6690, -1.7361, 1.5600, 3.4800, 1.4000, -1.6900 - np.pi / 2 + ], [8.1621, -31.3198, -1.6218, 1.7400, 3.7700, 1.4800, + 2.7900 - np.pi / 2]]) + expected_tensor[:, -1:] = limit_period( + expected_tensor[:, -1:], period=np.pi * 2) + assert torch.allclose(expected_tensor, convert_lidar_boxes.tensor, 1e-3) diff --git a/tests/test_structures/test_det3d_data_sample.py b/tests/test_structures/test_det3d_data_sample.py new file mode 100755 index 0000000..5532036 --- /dev/null +++ b/tests/test_structures/test_det3d_data_sample.py @@ -0,0 +1,154 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import pytest +import torch +from mmengine.structures import InstanceData + +from mmdet3d.structures import Det3DDataSample, PointData + + +def _equal(a, b): + if isinstance(a, (torch.Tensor, np.ndarray)): + return (a == b).all() + else: + return a == b + + +class TestDet3DDataSample(TestCase): + + def test_init(self): + meta_info = dict( + img_size=[256, 256], + scale_factor=np.array([1.5, 1.5]), + img_shape=torch.rand(4)) + + det3d_data_sample = Det3DDataSample(metainfo=meta_info) + assert 'img_size' in det3d_data_sample + assert det3d_data_sample.img_size == [256, 256] + assert det3d_data_sample.get('img_size') == [256, 256] + + def test_setter(self): + det3d_data_sample = Det3DDataSample() + # test gt_instances_3d + gt_instances_3d_data = dict( + bboxes_3d=torch.rand(4, 7), labels_3d=torch.rand(4)) + gt_instances_3d = InstanceData(**gt_instances_3d_data) + det3d_data_sample.gt_instances_3d = gt_instances_3d + assert 'gt_instances_3d' in det3d_data_sample + assert _equal(det3d_data_sample.gt_instances_3d.bboxes_3d, + gt_instances_3d_data['bboxes_3d']) + assert _equal(det3d_data_sample.gt_instances_3d.labels_3d, + gt_instances_3d_data['labels_3d']) + + # test pred_instances_3d + pred_instances_3d_data = dict( + bboxes_3d=torch.rand(2, 7), + labels_3d=torch.rand(2), + scores_3d=torch.rand(2)) + pred_instances_3d = InstanceData(**pred_instances_3d_data) + det3d_data_sample.pred_instances_3d = pred_instances_3d + assert 'pred_instances_3d' in det3d_data_sample + assert _equal(det3d_data_sample.pred_instances_3d.bboxes_3d, + pred_instances_3d_data['bboxes_3d']) + assert _equal(det3d_data_sample.pred_instances_3d.labels_3d, + pred_instances_3d_data['labels_3d']) + assert _equal(det3d_data_sample.pred_instances_3d.scores_3d, + pred_instances_3d_data['scores_3d']) + + # test pts_pred_instances_3d + pts_pred_instances_3d_data = dict( + bboxes_3d=torch.rand(2, 7), + labels_3d=torch.rand(2), + scores_3d=torch.rand(2)) + pts_pred_instances_3d = InstanceData(**pts_pred_instances_3d_data) + det3d_data_sample.pts_pred_instances_3d = pts_pred_instances_3d + assert 'pts_pred_instances_3d' in det3d_data_sample + assert _equal(det3d_data_sample.pts_pred_instances_3d.bboxes_3d, + pts_pred_instances_3d_data['bboxes_3d']) + assert _equal(det3d_data_sample.pts_pred_instances_3d.labels_3d, + pts_pred_instances_3d_data['labels_3d']) + assert _equal(det3d_data_sample.pts_pred_instances_3d.scores_3d, + pts_pred_instances_3d_data['scores_3d']) + + # test img_pred_instances_3d + img_pred_instances_3d_data = dict( + bboxes_3d=torch.rand(2, 7), + labels_3d=torch.rand(2), + scores_3d=torch.rand(2)) + img_pred_instances_3d = InstanceData(**img_pred_instances_3d_data) + det3d_data_sample.img_pred_instances_3d = img_pred_instances_3d + assert 'img_pred_instances_3d' in det3d_data_sample + assert _equal(det3d_data_sample.img_pred_instances_3d.bboxes_3d, + img_pred_instances_3d_data['bboxes_3d']) + assert _equal(det3d_data_sample.img_pred_instances_3d.labels_3d, + img_pred_instances_3d_data['labels_3d']) + assert _equal(det3d_data_sample.img_pred_instances_3d.scores_3d, + img_pred_instances_3d_data['scores_3d']) + + # test gt_pts_seg + gt_pts_seg_data = dict( + pts_instance_mask=torch.rand(20), pts_semantic_mask=torch.rand(20)) + gt_pts_seg = PointData(**gt_pts_seg_data) + det3d_data_sample.gt_pts_seg = gt_pts_seg + assert 'gt_pts_seg' in det3d_data_sample + assert _equal(det3d_data_sample.gt_pts_seg.pts_instance_mask, + gt_pts_seg_data['pts_instance_mask']) + assert _equal(det3d_data_sample.gt_pts_seg.pts_semantic_mask, + gt_pts_seg_data['pts_semantic_mask']) + + # test pred_pts_seg + pred_pts_seg_data = dict( + pts_instance_mask=torch.rand(20), pts_semantic_mask=torch.rand(20)) + pred_pts_seg = PointData(**pred_pts_seg_data) + det3d_data_sample.pred_pts_seg = pred_pts_seg + assert 'pred_pts_seg' in det3d_data_sample + assert _equal(det3d_data_sample.pred_pts_seg.pts_instance_mask, + pred_pts_seg_data['pts_instance_mask']) + assert _equal(det3d_data_sample.pred_pts_seg.pts_semantic_mask, + pred_pts_seg_data['pts_semantic_mask']) + + # test type error + with pytest.raises(AssertionError): + det3d_data_sample.pred_instances_3d = torch.rand(2, 4) + + with pytest.raises(AssertionError): + det3d_data_sample.pred_pts_seg = torch.rand(20) + + def test_deleter(self): + tmp_instances_3d_data = dict( + bboxes_3d=torch.rand(4, 4), labels_3d=torch.rand(4)) + + det3d_data_sample = Det3DDataSample() + gt_instances_3d = InstanceData(data=tmp_instances_3d_data) + det3d_data_sample.gt_instances_3d = gt_instances_3d + assert 'gt_instances_3d' in det3d_data_sample + del det3d_data_sample.gt_instances_3d + assert 'gt_instances_3d' not in det3d_data_sample + + pred_instances_3d = InstanceData(data=tmp_instances_3d_data) + det3d_data_sample.pred_instances_3d = pred_instances_3d + assert 'pred_instances_3d' in det3d_data_sample + del det3d_data_sample.pred_instances_3d + assert 'pred_instances_3d' not in det3d_data_sample + + pts_pred_instances_3d = InstanceData(data=tmp_instances_3d_data) + det3d_data_sample.pts_pred_instances_3d = pts_pred_instances_3d + assert 'pts_pred_instances_3d' in det3d_data_sample + del det3d_data_sample.pts_pred_instances_3d + assert 'pts_pred_instances_3d' not in det3d_data_sample + + img_pred_instances_3d = InstanceData(data=tmp_instances_3d_data) + det3d_data_sample.img_pred_instances_3d = img_pred_instances_3d + assert 'img_pred_instances_3d' in det3d_data_sample + del det3d_data_sample.img_pred_instances_3d + assert 'img_pred_instances_3d' not in det3d_data_sample + + pred_pts_seg_data = dict( + pts_instance_mask=torch.rand(20), pts_semantic_mask=torch.rand(20)) + pred_pts_seg = PointData(**pred_pts_seg_data) + det3d_data_sample.pred_pts_seg = pred_pts_seg + assert 'pred_pts_seg' in det3d_data_sample + del det3d_data_sample.pred_pts_seg + assert 'pred_pts_seg' not in det3d_data_sample diff --git a/tests/test_structures/test_ops/test_box_np_ops.py b/tests/test_structures/test_ops/test_box_np_ops.py new file mode 100755 index 0000000..2ce6cf6 --- /dev/null +++ b/tests/test_structures/test_ops/test_box_np_ops.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np + + +def test_camera_to_lidar(): + from mmdet3d.structures.ops.box_np_ops import camera_to_lidar + points = np.array([[1.84, 1.47, 8.41]]) + rect = np.array([[0.9999128, 0.01009263, -0.00851193, 0.], + [-0.01012729, 0.9999406, -0.00403767, 0.], + [0.00847068, 0.00412352, 0.9999556, 0.], [0., 0., 0., + 1.]]) + Trv2c = np.array([[0.00692796, -0.9999722, -0.00275783, -0.02457729], + [-0.00116298, 0.00274984, -0.9999955, -0.06127237], + [0.9999753, 0.00693114, -0.0011439, -0.3321029], + [0., 0., 0., 1.]]) + points_lidar = camera_to_lidar(points, rect, Trv2c) + expected_points = np.array([[8.73138192, -1.85591746, -1.59969933]]) + assert np.allclose(points_lidar, expected_points) + + +def test_box_camera_to_lidar(): + from mmdet3d.structures.ops.box_np_ops import box_camera_to_lidar + box = np.array([[1.84, 1.47, 8.41, 1.2, 1.89, 0.48, -0.01]]) + rect = np.array([[0.9999128, 0.01009263, -0.00851193, 0.], + [-0.01012729, 0.9999406, -0.00403767, 0.], + [0.00847068, 0.00412352, 0.9999556, 0.], [0., 0., 0., + 1.]]) + Trv2c = np.array([[0.00692796, -0.9999722, -0.00275783, -0.02457729], + [-0.00116298, 0.00274984, -0.9999955, -0.06127237], + [0.9999753, 0.00693114, -0.0011439, -0.3321029], + [0., 0., 0., 1.]]) + box_lidar = box_camera_to_lidar(box, rect, Trv2c) + expected_box = np.array([[ + 8.73138192, -1.85591746, -1.59969933, 1.2, 0.48, 1.89, 0.01 - np.pi / 2 + ]]) + assert np.allclose(box_lidar, expected_box) + + +def test_corners_nd(): + from mmdet3d.structures.ops.box_np_ops import corners_nd + dims = np.array([[0.47, 0.98]]) + corners = corners_nd(dims) + expected_corners = np.array([[[-0.235, -0.49], [-0.235, 0.49], + [0.235, 0.49], [0.235, -0.49]]]) + assert np.allclose(corners, expected_corners) + + +def test_center_to_corner_box2d(): + from mmdet3d.structures.ops.box_np_ops import center_to_corner_box2d + center = np.array([[9.348705, -3.6271024]]) + dims = np.array([[0.47, 0.98]]) + angles = np.array([3.14]) + corner = center_to_corner_box2d(center, dims, angles) + expected_corner = np.array([[[9.584485, -3.1374772], [9.582925, -4.117476], + [9.112926, -4.1167274], + [9.114486, -3.1367288]]]) + assert np.allclose(corner, expected_corner) + + center = np.array([[-0.0, 0.0]]) + dims = np.array([[4.0, 8.0]]) + angles = np.array([-0.785398]) # -45 degrees + corner = center_to_corner_box2d(center, dims, angles) + expected_corner = np.array([[[-4.24264, -1.41421], [1.41421, 4.24264], + [4.24264, 1.41421], [-1.41421, -4.24264]]]) + assert np.allclose(corner, expected_corner) + + +def test_points_in_convex_polygon_jit(): + from mmdet3d.structures.ops.box_np_ops import points_in_convex_polygon_jit + points = np.array([[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]) + polygons = np.array([[[1.0, 0.0], [0.0, 1.0], [0.0, 0.5], [0.0, 0.0]], + [[1.0, 0.0], [1.0, 1.0], [0.5, 1.0], [0.0, 1.0]], + [[1.0, 0.0], [0.0, 1.0], [-1.0, 0.0], [0.0, -1.0]]]) + res = points_in_convex_polygon_jit(points, polygons) + expected_res = np.array([[1, 0, 1], [0, 0, 0], [0, 1, 0]]).astype(bool) + assert np.allclose(res, expected_res) + + polygons = np.array([[[0.0, 0.0], [0.0, 1.0], [0.5, 0.5], [1.0, 0.0]], + [[0.0, 1.0], [1.0, 1.0], [1.0, 0.5], [1.0, 0.0]], + [[1.0, 0.0], [0.0, -1.0], [-1.0, 0.0], [0.0, 1.1]]]) + res = points_in_convex_polygon_jit(points, polygons, clockwise=True) + expected_res = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 0]]).astype(bool) + assert np.allclose(res, expected_res) diff --git a/tests/test_structures/test_point_data.py b/tests/test_structures/test_point_data.py new file mode 100755 index 0000000..20a72a6 --- /dev/null +++ b/tests/test_structures/test_point_data.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +from unittest import TestCase + +import numpy as np +import pytest +import torch + +from mmdet3d.structures import PointData + + +class TestPointData(TestCase): + + def setup_data(self): + metainfo = dict(sample_idx=random.randint(0, 100)) + points = torch.rand((5, 3)) + point_data = PointData(metainfo=metainfo, points=points) + return point_data + + def test_set_data(self): + point_data = self.setup_data() + + # test set '_metainfo_fields' or '_data_fields' + with self.assertRaises(AttributeError): + point_data._metainfo_fields = 1 + with self.assertRaises(AttributeError): + point_data._data_fields = 1 + + point_data.keypoints = torch.rand((5, 2)) + assert 'keypoints' in point_data + + def test_getitem(self): + point_data = PointData() + # length must be greater than 0 + with self.assertRaises(IndexError): + point_data[1] + + point_data = self.setup_data() + assert len(point_data) == 5 + slice_point_data = point_data[:2] + assert len(slice_point_data) == 2 + slice_point_data = point_data[1] + assert len(slice_point_data) == 1 + # assert the index should in 0 ~ len(point_data) - 1 + with pytest.raises(IndexError): + point_data[5] + + # isinstance(str, slice, int, torch.LongTensor, torch.BoolTensor) + item = torch.Tensor([1, 2, 3, 4]) # float + with pytest.raises(AssertionError): + point_data[item] + + # when input is a bool tensor, The shape of + # the input at index 0 should equal to + # the value length in instance_data_field + with pytest.raises(AssertionError): + point_data[item.bool()] + + # test LongTensor + long_tensor = torch.randint(5, (2, )) + long_index_point_data = point_data[long_tensor] + assert len(long_index_point_data) == len(long_tensor) + + # test BoolTensor + bool_tensor = torch.rand(5) > 0.5 + bool_index_point_data = point_data[bool_tensor] + assert len(bool_index_point_data) == bool_tensor.sum() + bool_tensor = torch.rand(5) > 1 + empty_point_data = point_data[bool_tensor] + assert len(empty_point_data) == bool_tensor.sum() + + # test list index + list_index = [1, 2] + list_index_point_data = point_data[list_index] + assert len(list_index_point_data) == len(list_index) + + # test list bool + list_bool = [True, False, True, False, False] + list_bool_point_data = point_data[list_bool] + assert len(list_bool_point_data) == 2 + + # test numpy + long_numpy = np.random.randint(5, size=2) + long_numpy_point_data = point_data[long_numpy] + assert len(long_numpy_point_data) == len(long_numpy) + + bool_numpy = np.random.rand(5) > 0.5 + bool_numpy_point_data = point_data[bool_numpy] + assert len(bool_numpy_point_data) == bool_numpy.sum() + + def test_len(self): + point_data = self.setup_data() + assert len(point_data) == 5 + point_data = PointData() + assert len(point_data) == 0 diff --git a/tests/test_structures/test_points/test_base_points.py b/tests/test_structures/test_points/test_base_points.py new file mode 100755 index 0000000..7984fdb --- /dev/null +++ b/tests/test_structures/test_points/test_base_points.py @@ -0,0 +1,268 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch + +from mmdet3d.structures.points import BasePoints + + +def test_base_points(): + # test empty initialization + empty_boxes = [] + points = BasePoints(empty_boxes) + assert points.tensor.shape[0] == 0 + assert points.tensor.shape[1] == 3 + + # Test init with origin + points_np = np.array([[-5.24223238e+00, 4.00209696e+01, 2.97570381e-01], + [-2.66751588e+01, 5.59499564e+00, -9.14345860e-01], + [-5.80979675e+00, 3.54092357e+01, 2.00889888e-01], + [-3.13086877e+01, 1.09007628e+00, -1.94612112e-01]], + dtype=np.float32) + base_points = BasePoints(points_np, points_dim=3) + assert base_points.tensor.shape[0] == 4 + + # Test init with color and height + points_np = np.array([[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, + 0.4974, 0.9409 + ], + [ + -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, + 0.1502, 0.3707, 0.1086, 0.6297 + ], + [ + -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, + 0.6565, 0.6248, 0.6954, 0.2538 + ], + [ + -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, + 0.2803, 0.0258, 0.4896, 0.3269 + ]], + dtype=np.float32) + base_points = BasePoints( + points_np, + points_dim=7, + attribute_dims=dict(color=[3, 4, 5], height=6)) + expected_tensor = torch.tensor([[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, + 0.4974, 0.9409 + ], + [ + -2.66751588e+01, 5.59499564e+00, + -9.14345860e-01, 0.1502, 0.3707, + 0.1086, 0.6297 + ], + [ + -5.80979675e+00, 3.54092357e+01, + 2.00889888e-01, 0.6565, 0.6248, 0.6954, + 0.2538 + ], + [ + -3.13086877e+01, 1.09007628e+00, + -1.94612112e-01, 0.2803, 0.0258, + 0.4896, 0.3269 + ]]) + + assert torch.allclose(expected_tensor, base_points.tensor) + assert torch.allclose(expected_tensor[:, :2], base_points.bev) + assert torch.allclose(expected_tensor[:, :3], base_points.coord) + assert torch.allclose(expected_tensor[:, 3:6], base_points.color) + assert torch.allclose(expected_tensor[:, 6], base_points.height) + + # test points clone + new_base_points = base_points.clone() + assert torch.allclose(new_base_points.tensor, base_points.tensor) + + # test points shuffle + new_base_points.shuffle() + assert new_base_points.tensor.shape == torch.Size([4, 7]) + + # test points rotation + rot_mat = torch.tensor([[0.93629336, -0.27509585, 0.21835066], + [0.28962948, 0.95642509, -0.03695701], + [-0.19866933, 0.0978434, 0.97517033]]) + + base_points.rotate(rot_mat) + expected_tensor = torch.tensor([[ + 6.6239e+00, 3.9748e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.3174e+01, 1.2600e+01, -6.9230e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 4.7760e+00, 3.5484e+01, -2.3813e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.8960e+01, 9.6364e+00, -7.0663e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, base_points.tensor, 1e-3) + + new_base_points = base_points.clone() + new_base_points.rotate(0.1, axis=2) + expected_tensor = torch.tensor([[ + 2.6226e+00, 4.0211e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.4316e+01, 1.0224e+01, -6.9230e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 1.2096e+00, 3.5784e+01, -2.3813e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.9777e+01, 6.6971e+00, -7.0663e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, new_base_points.tensor, 1e-3) + + # test points translation + translation_vector = torch.tensor([0.93629336, -0.27509585, 0.21835066]) + base_points.translate(translation_vector) + expected_tensor = torch.tensor([[ + 7.5602e+00, 3.9473e+01, -2.1152e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.2237e+01, 1.2325e+01, -6.7046e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 5.7123e+00, 3.5209e+01, -2.1629e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.8023e+01, 9.3613e+00, -6.8480e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, base_points.tensor, 1e-4) + + # test points filter + point_range = [-10, -40, -10, 10, 40, 10] + in_range_flags = base_points.in_range_3d(point_range) + expected_flags = torch.tensor([True, False, True, False]) + assert torch.all(in_range_flags == expected_flags) + + # test points scale + base_points.scale(1.2) + expected_tensor = torch.tensor([[ + 9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.6685e+01, 1.4790e+01, -8.0455e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 6.8547e+00, 4.2251e+01, -2.5955e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -3.3628e+01, 1.1234e+01, -8.2176e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, base_points.tensor, 1e-3) + + # test get_item + expected_tensor = torch.tensor( + [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297]]) + assert torch.allclose(expected_tensor, base_points[1].tensor, 1e-4) + expected_tensor = torch.tensor( + [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297], + [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]]) + assert torch.allclose(expected_tensor, base_points[1:3].tensor, 1e-4) + mask = torch.tensor([True, False, True, False]) + expected_tensor = torch.tensor( + [[9.0722, 47.3678, -2.5382, 0.6666, 0.1956, 0.4974, 0.9409], + [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]]) + assert torch.allclose(expected_tensor, base_points[mask].tensor, 1e-4) + expected_tensor = torch.tensor([[0.6666], [0.1502], [0.6565], [0.2803]]) + assert torch.allclose(expected_tensor, base_points[:, 3].tensor, 1e-4) + + # test length + assert len(base_points) == 4 + + # test repr + expected_repr = 'BasePoints(\n '\ + 'tensor([[ 9.0722e+00, 4.7368e+01, -2.5382e+00, '\ + '6.6660e-01, 1.9560e-01,\n 4.9740e-01, '\ + '9.4090e-01],\n '\ + '[-2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, '\ + '3.7070e-01,\n '\ + '1.0860e-01, 6.2970e-01],\n '\ + '[ 6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, '\ + '6.2480e-01,\n '\ + '6.9540e-01, 2.5380e-01],\n '\ + '[-3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, '\ + '2.5800e-02,\n '\ + '4.8960e-01, 3.2690e-01]]))' + assert expected_repr == str(base_points) + + # test concatenate + base_points_clone = base_points.clone() + cat_points = BasePoints.cat([base_points, base_points_clone]) + assert torch.allclose(cat_points.tensor[:len(base_points)], + base_points.tensor) + + # test iteration + for i, point in enumerate(base_points): + assert torch.allclose(point, base_points.tensor[i]) + + # test new_point + new_points = base_points.new_point([[1, 2, 3, 4, 5, 6, 7]]) + assert torch.allclose( + new_points.tensor, + torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=base_points.tensor.dtype)) + + # test BasePoint indexing + base_points = BasePoints( + points_np, + points_dim=7, + attribute_dims=dict(height=3, color=[4, 5, 6])) + assert torch.all(base_points[:, 3:].tensor == torch.tensor(points_np[:, + 3:])) + + # test set and get function for BasePoint color and height + base_points = BasePoints(points_np[:, :3]) + assert base_points.attribute_dims is None + base_points.height = points_np[:, 3] + assert base_points.attribute_dims == dict(height=3) + base_points.color = points_np[:, 4:] + assert base_points.attribute_dims == dict(height=3, color=[4, 5, 6]) + assert torch.allclose(base_points.height, + torch.tensor([0.6666, 0.1502, 0.6565, 0.2803])) + assert torch.allclose( + base_points.color, + torch.tensor([[0.1956, 0.4974, 0.9409], [0.3707, 0.1086, 0.6297], + [0.6248, 0.6954, 0.2538], [0.0258, 0.4896, 0.3269]])) + # values to be set should have correct shape (e.g. number of points) + with pytest.raises(ValueError): + base_points.coord = np.random.rand(5, 3) + with pytest.raises(ValueError): + base_points.height = np.random.rand(3) + with pytest.raises(ValueError): + base_points.color = np.random.rand(4, 2) + base_points.coord = points_np[:, [1, 2, 3]] + base_points.height = points_np[:, 0] + base_points.color = points_np[:, [4, 5, 6]] + assert np.allclose(base_points.coord, points_np[:, 1:4]) + assert np.allclose(base_points.height, points_np[:, 0]) + assert np.allclose(base_points.color, points_np[:, 4:]) diff --git a/tests/test_structures/test_points/test_cam_points.py b/tests/test_structures/test_points/test_cam_points.py new file mode 100755 index 0000000..f9ead56 --- /dev/null +++ b/tests/test_structures/test_points/test_cam_points.py @@ -0,0 +1,559 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.structures.points import CameraPoints, LiDARPoints + + +def test_cam_points(): + # test empty initialization + empty_boxes = [] + points = CameraPoints(empty_boxes) + assert points.tensor.shape[0] == 0 + assert points.tensor.shape[1] == 3 + + # Test init with origin + points_np = np.array([[-5.24223238e+00, 4.00209696e+01, 2.97570381e-01], + [-2.66751588e+01, 5.59499564e+00, -9.14345860e-01], + [-5.80979675e+00, 3.54092357e+01, 2.00889888e-01], + [-3.13086877e+01, 1.09007628e+00, -1.94612112e-01]], + dtype=np.float32) + cam_points = CameraPoints(points_np, points_dim=3) + assert cam_points.tensor.shape[0] == 4 + + # Test init with color and height + points_np = np.array([[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, + 0.4974, 0.9409 + ], + [ + -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, + 0.1502, 0.3707, 0.1086, 0.6297 + ], + [ + -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, + 0.6565, 0.6248, 0.6954, 0.2538 + ], + [ + -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, + 0.2803, 0.0258, 0.4896, 0.3269 + ]], + dtype=np.float32) + cam_points = CameraPoints( + points_np, + points_dim=7, + attribute_dims=dict(color=[3, 4, 5], height=6)) + expected_tensor = torch.tensor([[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, + 0.4974, 0.9409 + ], + [ + -2.66751588e+01, 5.59499564e+00, + -9.14345860e-01, 0.1502, 0.3707, + 0.1086, 0.6297 + ], + [ + -5.80979675e+00, 3.54092357e+01, + 2.00889888e-01, 0.6565, 0.6248, 0.6954, + 0.2538 + ], + [ + -3.13086877e+01, 1.09007628e+00, + -1.94612112e-01, 0.2803, 0.0258, + 0.4896, 0.3269 + ]]) + + assert torch.allclose(expected_tensor, cam_points.tensor) + assert torch.allclose(expected_tensor[:, [0, 2]], cam_points.bev) + assert torch.allclose(expected_tensor[:, :3], cam_points.coord) + assert torch.allclose(expected_tensor[:, 3:6], cam_points.color) + assert torch.allclose(expected_tensor[:, 6], cam_points.height) + + # test points clone + new_cam_points = cam_points.clone() + assert torch.allclose(new_cam_points.tensor, cam_points.tensor) + + # test points shuffle + new_cam_points.shuffle() + assert new_cam_points.tensor.shape == torch.Size([4, 7]) + + # test points rotation + rot_mat = torch.tensor([[0.93629336, -0.27509585, 0.21835066], + [0.28962948, 0.95642509, -0.03695701], + [-0.19866933, 0.0978434, 0.97517033]]) + cam_points.rotate(rot_mat) + expected_tensor = torch.tensor([[ + 6.6239e+00, 3.9748e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.3174e+01, 1.2600e+01, -6.9230e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 4.7760e+00, 3.5484e+01, -2.3813e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.8960e+01, 9.6364e+00, -7.0663e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, cam_points.tensor, 1e-3) + + new_cam_points = cam_points.clone() + new_cam_points.rotate(0.1, axis=2) + expected_tensor = torch.tensor([[ + 2.6226e+00, 4.0211e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.4316e+01, 1.0224e+01, -6.9230e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 1.2096e+00, 3.5784e+01, -2.3813e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.9777e+01, 6.6971e+00, -7.0663e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, new_cam_points.tensor, 1e-3) + + # test points translation + translation_vector = torch.tensor([0.93629336, -0.27509585, 0.21835066]) + cam_points.translate(translation_vector) + expected_tensor = torch.tensor([[ + 7.5602e+00, 3.9473e+01, -2.1152e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.2237e+01, 1.2325e+01, -6.7046e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 5.7123e+00, 3.5209e+01, -2.1629e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.8023e+01, 9.3613e+00, -6.8480e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, cam_points.tensor, 1e-4) + + # test points filter + point_range = [-10, -40, -10, 10, 40, 10] + in_range_flags = cam_points.in_range_3d(point_range) + expected_flags = torch.tensor([True, False, True, False]) + assert torch.all(in_range_flags == expected_flags) + + # test points scale + cam_points.scale(1.2) + expected_tensor = torch.tensor([[ + 9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.6685e+01, 1.4790e+01, -8.0455e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 6.8547e+00, 4.2251e+01, -2.5955e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -3.3628e+01, 1.1234e+01, -8.2176e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, cam_points.tensor, 1e-3) + + # test get_item + expected_tensor = torch.tensor( + [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297]]) + assert torch.allclose(expected_tensor, cam_points[1].tensor, 1e-4) + expected_tensor = torch.tensor( + [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297], + [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]]) + assert torch.allclose(expected_tensor, cam_points[1:3].tensor, 1e-4) + mask = torch.tensor([True, False, True, False]) + expected_tensor = torch.tensor( + [[9.0722, 47.3678, -2.5382, 0.6666, 0.1956, 0.4974, 0.9409], + [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]]) + assert torch.allclose(expected_tensor, cam_points[mask].tensor, 1e-4) + expected_tensor = torch.tensor([[0.6666], [0.1502], [0.6565], [0.2803]]) + assert torch.allclose(expected_tensor, cam_points[:, 3].tensor, 1e-4) + + # test length + assert len(cam_points) == 4 + + # test repr + expected_repr = 'CameraPoints(\n '\ + 'tensor([[ 9.0722e+00, 4.7368e+01, -2.5382e+00, '\ + '6.6660e-01, 1.9560e-01,\n 4.9740e-01, '\ + '9.4090e-01],\n '\ + '[-2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, '\ + '3.7070e-01,\n '\ + '1.0860e-01, 6.2970e-01],\n '\ + '[ 6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, '\ + '6.2480e-01,\n '\ + '6.9540e-01, 2.5380e-01],\n '\ + '[-3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, '\ + '2.5800e-02,\n '\ + '4.8960e-01, 3.2690e-01]]))' + assert expected_repr == str(cam_points) + + # test concatenate + cam_points_clone = cam_points.clone() + cat_points = CameraPoints.cat([cam_points, cam_points_clone]) + assert torch.allclose(cat_points.tensor[:len(cam_points)], + cam_points.tensor) + + # test iteration + for i, point in enumerate(cam_points): + assert torch.allclose(point, cam_points.tensor[i]) + + # test new_point + new_points = cam_points.new_point([[1, 2, 3, 4, 5, 6, 7]]) + assert torch.allclose( + new_points.tensor, + torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=cam_points.tensor.dtype)) + + # test in_range_bev + point_bev_range = [-10, -10, 10, 10] + in_range_flags = cam_points.in_range_bev(point_bev_range) + expected_flags = torch.tensor([True, False, True, False]) + assert torch.all(in_range_flags == expected_flags) + + # test flip + cam_points.flip(bev_direction='horizontal') + expected_tensor = torch.tensor([[ + -9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + 2.6685e+01, 1.4790e+01, -8.0455e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + -6.8547e+00, 4.2251e+01, -2.5955e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + 3.3628e+01, 1.1234e+01, -8.2176e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, cam_points.tensor, 1e-4) + + cam_points.flip(bev_direction='vertical') + expected_tensor = torch.tensor([[ + -9.0722e+00, 4.7368e+01, 2.5382e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + 2.6685e+01, 1.4790e+01, 8.0455e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + -6.8547e+00, 4.2251e+01, 2.5955e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + 3.3628e+01, 1.1234e+01, 8.2176e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, cam_points.tensor, 1e-4) + + +def test_lidar_points(): + # test empty initialization + empty_boxes = [] + points = LiDARPoints(empty_boxes) + assert points.tensor.shape[0] == 0 + assert points.tensor.shape[1] == 3 + + # Test init with origin + points_np = np.array([[-5.24223238e+00, 4.00209696e+01, 2.97570381e-01], + [-2.66751588e+01, 5.59499564e+00, -9.14345860e-01], + [-5.80979675e+00, 3.54092357e+01, 2.00889888e-01], + [-3.13086877e+01, 1.09007628e+00, -1.94612112e-01]], + dtype=np.float32) + lidar_points = LiDARPoints(points_np, points_dim=3) + assert lidar_points.tensor.shape[0] == 4 + + # Test init with color and height + points_np = np.array([[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, + 0.4974, 0.9409 + ], + [ + -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, + 0.1502, 0.3707, 0.1086, 0.6297 + ], + [ + -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, + 0.6565, 0.6248, 0.6954, 0.2538 + ], + [ + -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, + 0.2803, 0.0258, 0.4896, 0.3269 + ]], + dtype=np.float32) + lidar_points = LiDARPoints( + points_np, + points_dim=7, + attribute_dims=dict(color=[3, 4, 5], height=6)) + expected_tensor = torch.tensor([[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, + 0.4974, 0.9409 + ], + [ + -2.66751588e+01, 5.59499564e+00, + -9.14345860e-01, 0.1502, 0.3707, + 0.1086, 0.6297 + ], + [ + -5.80979675e+00, 3.54092357e+01, + 2.00889888e-01, 0.6565, 0.6248, 0.6954, + 0.2538 + ], + [ + -3.13086877e+01, 1.09007628e+00, + -1.94612112e-01, 0.2803, 0.0258, + 0.4896, 0.3269 + ]]) + + assert torch.allclose(expected_tensor, lidar_points.tensor) + assert torch.allclose(expected_tensor[:, :2], lidar_points.bev) + assert torch.allclose(expected_tensor[:, :3], lidar_points.coord) + assert torch.allclose(expected_tensor[:, 3:6], lidar_points.color) + assert torch.allclose(expected_tensor[:, 6], lidar_points.height) + + # test points clone + new_lidar_points = lidar_points.clone() + assert torch.allclose(new_lidar_points.tensor, lidar_points.tensor) + + # test points shuffle + new_lidar_points.shuffle() + assert new_lidar_points.tensor.shape == torch.Size([4, 7]) + + # test points rotation + rot_mat = torch.tensor([[0.93629336, -0.27509585, 0.21835066], + [0.28962948, 0.95642509, -0.03695701], + [-0.19866933, 0.0978434, 0.97517033]]) + lidar_points.rotate(rot_mat) + expected_tensor = torch.tensor([[ + 6.6239e+00, 3.9748e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.3174e+01, 1.2600e+01, -6.9230e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 4.7760e+00, 3.5484e+01, -2.3813e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.8960e+01, 9.6364e+00, -7.0663e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, lidar_points.tensor, 1e-3) + + new_lidar_points = lidar_points.clone() + new_lidar_points.rotate(0.1, axis=2) + expected_tensor = torch.tensor([[ + 2.6226e+00, 4.0211e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.4316e+01, 1.0224e+01, -6.9230e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 1.2096e+00, 3.5784e+01, -2.3813e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.9777e+01, 6.6971e+00, -7.0663e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, new_lidar_points.tensor, 1e-3) + + # test points translation + translation_vector = torch.tensor([0.93629336, -0.27509585, 0.21835066]) + lidar_points.translate(translation_vector) + expected_tensor = torch.tensor([[ + 7.5602e+00, 3.9473e+01, -2.1152e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.2237e+01, 1.2325e+01, -6.7046e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 5.7123e+00, 3.5209e+01, -2.1629e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.8023e+01, 9.3613e+00, -6.8480e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, lidar_points.tensor, 1e-4) + + # test points filter + point_range = [-10, -40, -10, 10, 40, 10] + in_range_flags = lidar_points.in_range_3d(point_range) + expected_flags = torch.tensor([True, False, True, False]) + assert torch.all(in_range_flags == expected_flags) + + # test points scale + lidar_points.scale(1.2) + expected_tensor = torch.tensor([[ + 9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.6685e+01, 1.4790e+01, -8.0455e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 6.8547e+00, 4.2251e+01, -2.5955e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -3.3628e+01, 1.1234e+01, -8.2176e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, lidar_points.tensor, 1e-3) + + # test get_item + expected_tensor = torch.tensor( + [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297]]) + assert torch.allclose(expected_tensor, lidar_points[1].tensor, 1e-4) + expected_tensor = torch.tensor( + [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297], + [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]]) + assert torch.allclose(expected_tensor, lidar_points[1:3].tensor, 1e-4) + mask = torch.tensor([True, False, True, False]) + expected_tensor = torch.tensor( + [[9.0722, 47.3678, -2.5382, 0.6666, 0.1956, 0.4974, 0.9409], + [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]]) + assert torch.allclose(expected_tensor, lidar_points[mask].tensor, 1e-4) + expected_tensor = torch.tensor([[0.6666], [0.1502], [0.6565], [0.2803]]) + assert torch.allclose(expected_tensor, lidar_points[:, 3].tensor, 1e-4) + + # test length + assert len(lidar_points) == 4 + + # test repr + expected_repr = 'LiDARPoints(\n '\ + 'tensor([[ 9.0722e+00, 4.7368e+01, -2.5382e+00, '\ + '6.6660e-01, 1.9560e-01,\n 4.9740e-01, '\ + '9.4090e-01],\n '\ + '[-2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, '\ + '3.7070e-01,\n '\ + '1.0860e-01, 6.2970e-01],\n '\ + '[ 6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, '\ + '6.2480e-01,\n '\ + '6.9540e-01, 2.5380e-01],\n '\ + '[-3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, '\ + '2.5800e-02,\n '\ + '4.8960e-01, 3.2690e-01]]))' + assert expected_repr == str(lidar_points) + + # test concatenate + lidar_points_clone = lidar_points.clone() + cat_points = LiDARPoints.cat([lidar_points, lidar_points_clone]) + assert torch.allclose(cat_points.tensor[:len(lidar_points)], + lidar_points.tensor) + + # test iteration + for i, point in enumerate(lidar_points): + assert torch.allclose(point, lidar_points.tensor[i]) + + # test new_point + new_points = lidar_points.new_point([[1, 2, 3, 4, 5, 6, 7]]) + assert torch.allclose( + new_points.tensor, + torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=lidar_points.tensor.dtype)) + + # test in_range_bev + point_bev_range = [-30, -40, 30, 40] + in_range_flags = lidar_points.in_range_bev(point_bev_range) + expected_flags = torch.tensor([False, True, False, False]) + assert torch.all(in_range_flags == expected_flags) + + # test flip + lidar_points.flip(bev_direction='horizontal') + expected_tensor = torch.tensor([[ + 9.0722e+00, -4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.6685e+01, -1.4790e+01, -8.0455e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 6.8547e+00, -4.2251e+01, -2.5955e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -3.3628e+01, -1.1234e+01, -8.2176e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, lidar_points.tensor, 1e-4) + + lidar_points.flip(bev_direction='vertical') + expected_tensor = torch.tensor([[ + -9.0722e+00, -4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + 2.6685e+01, -1.4790e+01, -8.0455e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + -6.8547e+00, -4.2251e+01, -2.5955e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + 3.3628e+01, -1.1234e+01, -8.2176e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, lidar_points.tensor, 1e-4) diff --git a/tests/test_structures/test_points/test_depth_points.py b/tests/test_structures/test_points/test_depth_points.py new file mode 100755 index 0000000..fc5f911 --- /dev/null +++ b/tests/test_structures/test_points/test_depth_points.py @@ -0,0 +1,282 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet3d.structures.points import DepthPoints + + +def test_depth_points(): + # test empty initialization + empty_boxes = [] + points = DepthPoints(empty_boxes) + assert points.tensor.shape[0] == 0 + assert points.tensor.shape[1] == 3 + + # Test init with origin + points_np = np.array([[-5.24223238e+00, 4.00209696e+01, 2.97570381e-01], + [-2.66751588e+01, 5.59499564e+00, -9.14345860e-01], + [-5.80979675e+00, 3.54092357e+01, 2.00889888e-01], + [-3.13086877e+01, 1.09007628e+00, -1.94612112e-01]], + dtype=np.float32) + depth_points = DepthPoints(points_np, points_dim=3) + assert depth_points.tensor.shape[0] == 4 + + # Test init with color and height + points_np = np.array([[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, + 0.4974, 0.9409 + ], + [ + -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, + 0.1502, 0.3707, 0.1086, 0.6297 + ], + [ + -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, + 0.6565, 0.6248, 0.6954, 0.2538 + ], + [ + -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, + 0.2803, 0.0258, 0.4896, 0.3269 + ]], + dtype=np.float32) + depth_points = DepthPoints( + points_np, + points_dim=7, + attribute_dims=dict(color=[3, 4, 5], height=6)) + expected_tensor = torch.tensor([[ + -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, + 0.4974, 0.9409 + ], + [ + -2.66751588e+01, 5.59499564e+00, + -9.14345860e-01, 0.1502, 0.3707, + 0.1086, 0.6297 + ], + [ + -5.80979675e+00, 3.54092357e+01, + 2.00889888e-01, 0.6565, 0.6248, 0.6954, + 0.2538 + ], + [ + -3.13086877e+01, 1.09007628e+00, + -1.94612112e-01, 0.2803, 0.0258, + 0.4896, 0.3269 + ]]) + + assert torch.allclose(expected_tensor, depth_points.tensor) + assert torch.allclose(expected_tensor[:, :2], depth_points.bev) + assert torch.allclose(expected_tensor[:, :3], depth_points.coord) + assert torch.allclose(expected_tensor[:, 3:6], depth_points.color) + assert torch.allclose(expected_tensor[:, 6], depth_points.height) + + # test points clone + new_depth_points = depth_points.clone() + assert torch.allclose(new_depth_points.tensor, depth_points.tensor) + + # test points shuffle + new_depth_points.shuffle() + assert new_depth_points.tensor.shape == torch.Size([4, 7]) + + # test points rotation + rot_mat = torch.tensor([[0.93629336, -0.27509585, 0.21835066], + [0.28962948, 0.95642509, -0.03695701], + [-0.19866933, 0.0978434, 0.97517033]]) + depth_points.rotate(rot_mat) + expected_tensor = torch.tensor([[ + 6.6239e+00, 3.9748e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.3174e+01, 1.2600e+01, -6.9230e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 4.7760e+00, 3.5484e+01, -2.3813e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.8960e+01, 9.6364e+00, -7.0663e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, depth_points.tensor, 1e-3) + + new_depth_points = depth_points.clone() + new_depth_points.rotate(0.1, axis=2) + expected_tensor = torch.tensor([[ + 2.6226e+00, 4.0211e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.4316e+01, 1.0224e+01, -6.9230e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 1.2096e+00, 3.5784e+01, -2.3813e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.9777e+01, 6.6971e+00, -7.0663e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, new_depth_points.tensor, 1e-3) + + # test points translation + translation_vector = torch.tensor([0.93629336, -0.27509585, 0.21835066]) + depth_points.translate(translation_vector) + expected_tensor = torch.tensor([[ + 7.5602e+00, 3.9473e+01, -2.1152e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.2237e+01, 1.2325e+01, -6.7046e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 5.7123e+00, 3.5209e+01, -2.1629e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -2.8023e+01, 9.3613e+00, -6.8480e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4) + + # test points filter + point_range = [-10, -40, -10, 10, 40, 10] + in_range_flags = depth_points.in_range_3d(point_range) + expected_flags = torch.tensor([True, False, True, False]) + assert torch.all(in_range_flags == expected_flags) + + # test points scale + depth_points.scale(1.2) + expected_tensor = torch.tensor([[ + 9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + -2.6685e+01, 1.4790e+01, -8.0455e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + 6.8547e+00, 4.2251e+01, -2.5955e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + -3.3628e+01, 1.1234e+01, -8.2176e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, depth_points.tensor, 1e-3) + + # test get_item + expected_tensor = torch.tensor( + [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297]]) + assert torch.allclose(expected_tensor, depth_points[1].tensor, 1e-4) + expected_tensor = torch.tensor( + [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297], + [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]]) + assert torch.allclose(expected_tensor, depth_points[1:3].tensor, 1e-4) + mask = torch.tensor([True, False, True, False]) + expected_tensor = torch.tensor( + [[9.0722, 47.3678, -2.5382, 0.6666, 0.1956, 0.4974, 0.9409], + [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]]) + assert torch.allclose(expected_tensor, depth_points[mask].tensor, 1e-4) + expected_tensor = torch.tensor([[0.6666], [0.1502], [0.6565], [0.2803]]) + assert torch.allclose(expected_tensor, depth_points[:, 3].tensor, 1e-4) + + # test length + assert len(depth_points) == 4 + + # test repr + expected_repr = 'DepthPoints(\n '\ + 'tensor([[ 9.0722e+00, 4.7368e+01, -2.5382e+00, '\ + '6.6660e-01, 1.9560e-01,\n 4.9740e-01, '\ + '9.4090e-01],\n '\ + '[-2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, '\ + '3.7070e-01,\n '\ + '1.0860e-01, 6.2970e-01],\n '\ + '[ 6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, '\ + '6.2480e-01,\n '\ + '6.9540e-01, 2.5380e-01],\n '\ + '[-3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, '\ + '2.5800e-02,\n '\ + '4.8960e-01, 3.2690e-01]]))' + assert expected_repr == str(depth_points) + + # test concatenate + depth_points_clone = depth_points.clone() + cat_points = DepthPoints.cat([depth_points, depth_points_clone]) + assert torch.allclose(cat_points.tensor[:len(depth_points)], + depth_points.tensor) + + # test iteration + for i, point in enumerate(depth_points): + assert torch.allclose(point, depth_points.tensor[i]) + + # test new_point + new_points = depth_points.new_point([[1, 2, 3, 4, 5, 6, 7]]) + assert torch.allclose( + new_points.tensor, + torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=depth_points.tensor.dtype)) + + # test in_range_bev + point_bev_range = [-30, -40, 30, 40] + in_range_flags = depth_points.in_range_bev(point_bev_range) + expected_flags = torch.tensor([False, True, False, False]) + assert torch.all(in_range_flags == expected_flags) + + # test flip + depth_points.flip(bev_direction='horizontal') + expected_tensor = torch.tensor([[ + -9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + 2.6685e+01, 1.4790e+01, -8.0455e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + -6.8547e+00, 4.2251e+01, -2.5955e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + 3.3628e+01, 1.1234e+01, -8.2176e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4) + + depth_points.flip(bev_direction='vertical') + expected_tensor = torch.tensor([[ + -9.0722e+00, -4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, + 4.9740e-01, 9.4090e-01 + ], + [ + 2.6685e+01, -1.4790e+01, -8.0455e+00, + 1.5020e-01, 3.7070e-01, 1.0860e-01, + 6.2970e-01 + ], + [ + -6.8547e+00, -4.2251e+01, -2.5955e+00, + 6.5650e-01, 6.2480e-01, 6.9540e-01, + 2.5380e-01 + ], + [ + 3.3628e+01, -1.1234e+01, -8.2176e+00, + 2.8030e-01, 2.5800e-02, 4.8960e-01, + 3.2690e-01 + ]]) + assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4) diff --git a/tests/test_utils/test_compat_cfg.py b/tests/test_utils/test_compat_cfg.py new file mode 100755 index 0000000..4672f33 --- /dev/null +++ b/tests/test_utils/test_compat_cfg.py @@ -0,0 +1,113 @@ +import pytest +from mmengine import ConfigDict + +from mmdet3d.utils.compat_cfg import (compat_imgs_per_gpu, compat_loader_args, + compat_runner_args) + + +def test_compat_runner_args(): + cfg = ConfigDict(dict(total_epochs=12)) + with pytest.warns(None) as record: + cfg = compat_runner_args(cfg) + assert len(record) == 1 + assert 'runner' in record.list[0].message.args[0] + assert 'runner' in cfg + assert cfg.runner.type == 'EpochBasedRunner' + assert cfg.runner.max_epochs == cfg.total_epochs + + +def test_compat_loader_args(): + cfg = ConfigDict(dict(data=dict(val=dict(), test=dict(), train=dict()))) + cfg = compat_loader_args(cfg) + # auto fill loader args + assert 'val_dataloader' in cfg.data + assert 'train_dataloader' in cfg.data + assert 'test_dataloader' in cfg.data + cfg = ConfigDict( + dict( + data=dict( + samples_per_gpu=1, + persistent_workers=True, + workers_per_gpu=1, + val=dict(samples_per_gpu=3), + test=dict(samples_per_gpu=2), + train=dict()))) + cfg = compat_loader_args(cfg) + + assert cfg.data.train_dataloader.workers_per_gpu == 1 + assert cfg.data.train_dataloader.samples_per_gpu == 1 + assert cfg.data.train_dataloader.persistent_workers + assert cfg.data.val_dataloader.workers_per_gpu == 1 + assert cfg.data.val_dataloader.samples_per_gpu == 3 + assert cfg.data.test_dataloader.workers_per_gpu == 1 + assert cfg.data.test_dataloader.samples_per_gpu == 2 + + # test test is a list + cfg = ConfigDict( + dict( + data=dict( + samples_per_gpu=1, + persistent_workers=True, + workers_per_gpu=1, + val=dict(samples_per_gpu=3), + test=[dict(samples_per_gpu=2), + dict(samples_per_gpu=3)], + train=dict()))) + + cfg = compat_loader_args(cfg) + + # assert can not set args at the same time + cfg = ConfigDict( + dict( + data=dict( + samples_per_gpu=1, + persistent_workers=True, + workers_per_gpu=1, + val=dict(samples_per_gpu=3), + test=dict(samples_per_gpu=2), + train=dict(), + train_dataloader=dict(samples_per_gpu=2)))) + # samples_per_gpu can not be set in `train_dataloader` + # and data field at the same time + with pytest.raises(AssertionError): + compat_loader_args(cfg) + cfg = ConfigDict( + dict( + data=dict( + samples_per_gpu=1, + persistent_workers=True, + workers_per_gpu=1, + val=dict(samples_per_gpu=3), + test=dict(samples_per_gpu=2), + train=dict(), + val_dataloader=dict(samples_per_gpu=2)))) + # samples_per_gpu can not be set in `val_dataloader` + # and data field at the same time + with pytest.raises(AssertionError): + compat_loader_args(cfg) + cfg = ConfigDict( + dict( + data=dict( + samples_per_gpu=1, + persistent_workers=True, + workers_per_gpu=1, + val=dict(samples_per_gpu=3), + test=dict(samples_per_gpu=2), + test_dataloader=dict(samples_per_gpu=2)))) + # samples_per_gpu can not be set in `test_dataloader` + # and data field at the same time + with pytest.raises(AssertionError): + compat_loader_args(cfg) + + +def test_compat_imgs_per_gpu(): + cfg = ConfigDict( + dict( + data=dict( + imgs_per_gpu=1, + samples_per_gpu=2, + val=dict(), + test=dict(), + train=dict()))) + cfg = compat_imgs_per_gpu(cfg) + assert cfg.data.samples_per_gpu == cfg.data.imgs_per_gpu diff --git a/tests/test_utils/test_setup_env.py b/tests/test_utils/test_setup_env.py new file mode 100755 index 0000000..c7374b7 --- /dev/null +++ b/tests/test_utils/test_setup_env.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import multiprocessing as mp +import os +import platform +import sys + +import cv2 +from mmengine import Config, DefaultScope + +from mmdet3d.utils import register_all_modules, setup_multi_processes + + +def test_register_all_modules(): + from mmdet3d.registry import DATASETS + + sys.modules.pop('mmdet3d.datasets', None) + sys.modules.pop('mmdet3d.datasets.kitti_dataset', None) + DATASETS._module_dict.pop('KittiDataset', None) + assert 'KittiDataset' not in DATASETS.module_dict + register_all_modules(init_default_scope=True) + assert 'KittiDataset' in DATASETS.module_dict + assert DefaultScope.get_current_instance().scope_name == 'mmdet3d' + + +def test_setup_multi_processes(): + # temp save system setting + sys_start_mehod = mp.get_start_method(allow_none=True) + sys_cv_threads = cv2.getNumThreads() + # pop and temp save system env vars + sys_omp_threads = os.environ.pop('OMP_NUM_THREADS', default=None) + sys_mkl_threads = os.environ.pop('MKL_NUM_THREADS', default=None) + + # test config without setting env + config = dict(data=dict(workers_per_gpu=2)) + cfg = Config(config) + setup_multi_processes(cfg) + assert os.getenv('OMP_NUM_THREADS') == '1' + assert os.getenv('MKL_NUM_THREADS') == '1' + # when set to 0, the num threads will be 1 + assert cv2.getNumThreads() == 1 + if platform.system() != 'Windows': + assert mp.get_start_method() == 'fork' + + # test num workers <= 1 + os.environ.pop('OMP_NUM_THREADS') + os.environ.pop('MKL_NUM_THREADS') + config = dict(data=dict(workers_per_gpu=0)) + cfg = Config(config) + setup_multi_processes(cfg) + assert 'OMP_NUM_THREADS' not in os.environ + assert 'MKL_NUM_THREADS' not in os.environ + + # test manually set env var + os.environ['OMP_NUM_THREADS'] = '4' + config = dict(data=dict(workers_per_gpu=2)) + cfg = Config(config) + setup_multi_processes(cfg) + assert os.getenv('OMP_NUM_THREADS') == '4' + + # test manually set opencv threads and mp start method + config = dict( + data=dict(workers_per_gpu=2), + opencv_num_threads=4, + mp_start_method='spawn') + cfg = Config(config) + setup_multi_processes(cfg) + assert cv2.getNumThreads() == 4 + assert mp.get_start_method() == 'spawn' + + # revert setting to avoid affecting other programs + if sys_start_mehod: + mp.set_start_method(sys_start_mehod, force=True) + cv2.setNumThreads(sys_cv_threads) + if sys_omp_threads: + os.environ['OMP_NUM_THREADS'] = sys_omp_threads + else: + os.environ.pop('OMP_NUM_THREADS') + if sys_mkl_threads: + os.environ['MKL_NUM_THREADS'] = sys_mkl_threads + else: + os.environ.pop('MKL_NUM_THREADS') diff --git a/tools/analysis_tools/analyze_logs.py b/tools/analysis_tools/analyze_logs.py new file mode 100755 index 0000000..cb7429b --- /dev/null +++ b/tools/analysis_tools/analyze_logs.py @@ -0,0 +1,209 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +from collections import defaultdict + +import numpy as np +import seaborn as sns +from matplotlib import pyplot as plt + + +def cal_train_time(log_dicts, args): + for i, log_dict in enumerate(log_dicts): + print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + if not all_times: + raise KeyError( + 'Please reduce the log interval in the config so that ' + 'interval is less than iterations of one epoch.') + epoch_ave_time = np.array(list(map(lambda x: np.mean(x), all_times))) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print(f'slowest epoch {slowest_epoch + 1}, ' + f'average time is {epoch_ave_time[slowest_epoch]:.4f} s/iter') + print(f'fastest epoch {fastest_epoch + 1}, ' + f'average time is {epoch_ave_time[fastest_epoch]:.4f} s/iter') + print(f'time std over epochs is {std_over_epoch:.4f}') + print(f'average iter time: {np.mean(epoch_ave_time):.4f} s/iter\n') + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append(f'{json_log}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + if metric not in log_dict[epochs[int(args.eval_interval) - 1]]: + if args.eval: + raise KeyError( + f'{args.json_logs[i]} does not contain metric ' + f'{metric}. Please check if "--no-validate" is ' + 'specified when you trained the model. Or check ' + f'if the eval_interval {args.eval_interval} in args ' + 'is equal to the `eval_interval` during training.') + raise KeyError( + f'{args.json_logs[i]} does not contain metric {metric}. ' + 'Please reduce the log interval in the config so that ' + 'interval is less than iterations of one epoch.') + + if args.eval: + xs = [] + ys = [] + for epoch in epochs: + ys += log_dict[epoch][metric] + if log_dict[epoch][metric]: + xs += [epoch] + plt.xlabel('epoch') + plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') + else: + xs = [] + ys = [] + for epoch in epochs: + iters = log_dict[epoch]['step'] + xs.append(np.array(iters)) + ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('iter') + plt.plot( + xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['mAP_0.25'], + help='the metric that you want to plot') + parser_plt.add_argument( + '--eval', + action='store_true', + help='whether to plot evaluation metric') + parser_plt.add_argument( + '--eval-interval', + type=str, + default='1', + help='the eval interval when training') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics, e.g. memory, bbox_mAP + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + epoch = 1 + for i, line in enumerate(log_file): + log = json.loads(line.strip()) + val_flag = False + # skip lines only contains one key + if not len(log) > 1: + continue + + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + + for k, v in log.items(): + if '/' in k: + log_dict[epoch][k.split('/')[-1]].append(v) + val_flag = True + elif val_flag: + continue + else: + log_dict[epoch][k].append(v) + + if 'epoch' in log.keys(): + epoch = log['epoch'] + + return log_dicts + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = load_json_logs(json_logs) + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/benchmark.py b/tools/analysis_tools/benchmark.py new file mode 100755 index 0000000..5c2153f --- /dev/null +++ b/tools/analysis_tools/benchmark.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import time + +import torch +from mmcv import Config +from mmcv.parallel import MMDataParallel +from mmengine.runner import load_checkpoint + +from mmdet3d.registry import DATASETS, MODELS +from tools.misc.fuse_conv_bn import fuse_module + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMDet benchmark a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--samples', default=2000, help='samples to benchmark') + parser.add_argument( + '--log-interval', default=50, help='interval of logging') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + # build the dataloader + # TODO: support multiple images per gpu (only minor changes are needed) + dataset = DATASETS.build(cfg.data.test) + + # TODO fix this + def build_dataloader(): + pass + + data_loader = build_dataloader( + dataset, + samples_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=False, + shuffle=False) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = MODELS.build(cfg.model, test_cfg=cfg.get('test_cfg')) + load_checkpoint(model, args.checkpoint, map_location='cpu') + if args.fuse_conv_bn: + model = fuse_module(model) + + model = MMDataParallel(model, device_ids=[0]) + + model.eval() + + # the first several iterations may be very slow so skip them + num_warmup = 5 + pure_inf_time = 0 + + # benchmark with several samples and take the average + for i, data in enumerate(data_loader): + + torch.cuda.synchronize() + start_time = time.perf_counter() + + with torch.no_grad(): + model(return_loss=False, rescale=True, **data) + + torch.cuda.synchronize() + elapsed = time.perf_counter() - start_time + + if i >= num_warmup: + pure_inf_time += elapsed + if (i + 1) % args.log_interval == 0: + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Done image [{i + 1:<3}/ {args.samples}], ' + f'fps: {fps:.1f} img / s') + + if (i + 1) == args.samples: + pure_inf_time += elapsed + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Overall fps: {fps:.1f} img / s') + break + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/get_flops.py b/tools/analysis_tools/get_flops.py new file mode 100755 index 0000000..19b524d --- /dev/null +++ b/tools/analysis_tools/get_flops.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import torch +from mmengine import Config, DictAction +from mmengine.registry import init_default_scope + +from mmdet3d.registry import MODELS + +try: + from mmcv.cnn import get_model_complexity_info +except ImportError: + raise ImportError('Please upgrade mmcv to >0.6.2') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[40000, 4], + help='input point cloud size') + parser.add_argument( + '--modality', + type=str, + default='point', + choices=['point', 'image', 'multi'], + help='input data modality') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + if args.modality == 'point': + assert len(args.shape) == 2, 'invalid input shape' + input_shape = tuple(args.shape) + elif args.modality == 'image': + if len(args.shape) == 1: + input_shape = (3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (3, ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + elif args.modality == 'multi': + raise NotImplementedError( + 'FLOPs counter is currently not supported for models with ' + 'multi-modality input') + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + init_default_scope(cfg.get('default_scope', 'mmdet3d')) + + model = MODELS.build(cfg.model) + if torch.cuda.is_available(): + model.cuda() + model.eval() + + flops, params = get_model_complexity_info(model, input_shape) + split_line = '=' * 30 + print(f'{split_line}\nInput shape: {input_shape}\n' + f'Flops: {flops}\nParams: {params}\n{split_line}') + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify that the ' + 'flops computation is correct.') + + +if __name__ == '__main__': + main() diff --git a/tools/create_data.py b/tools/create_data.py new file mode 100755 index 0000000..05fd98c --- /dev/null +++ b/tools/create_data.py @@ -0,0 +1,356 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from os import path as osp + +from tools.dataset_converters import indoor_converter as indoor +from tools.dataset_converters import kitti_converter as kitti +from tools.dataset_converters import lyft_converter as lyft_converter +from tools.dataset_converters import nuscenes_converter as nuscenes_converter +from tools.dataset_converters import semantickitti_converter +from tools.dataset_converters.create_gt_database import ( + GTDatabaseCreater, create_groundtruth_database) +from tools.dataset_converters.update_infos_to_v2 import update_pkl_infos + + +def kitti_data_prep(root_path, + info_prefix, + version, + out_dir, + with_plane=False): + """Prepare data related to Kitti dataset. + + Related data consists of '.pkl' files recording basic infos, + 2D annotations and groundtruth database. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + out_dir (str): Output directory of the groundtruth database info. + with_plane (bool, optional): Whether to use plane information. + Default: False. + """ + kitti.create_kitti_info_file(root_path, info_prefix, with_plane) + kitti.create_reduced_point_cloud(root_path, info_prefix) + + info_train_path = osp.join(out_dir, f'{info_prefix}_infos_train.pkl') + info_val_path = osp.join(out_dir, f'{info_prefix}_infos_val.pkl') + info_trainval_path = osp.join(out_dir, f'{info_prefix}_infos_trainval.pkl') + info_test_path = osp.join(out_dir, f'{info_prefix}_infos_test.pkl') + update_pkl_infos('kitti', out_dir=out_dir, pkl_path=info_train_path) + update_pkl_infos('kitti', out_dir=out_dir, pkl_path=info_val_path) + update_pkl_infos('kitti', out_dir=out_dir, pkl_path=info_trainval_path) + update_pkl_infos('kitti', out_dir=out_dir, pkl_path=info_test_path) + create_groundtruth_database( + 'KittiDataset', + root_path, + info_prefix, + f'{info_prefix}_infos_train.pkl', + relative_path=False, + mask_anno_path='instances_train.json', + with_mask=(version == 'mask')) + + +def nuscenes_data_prep(root_path, + info_prefix, + version, + dataset_name, + out_dir, + max_sweeps=10): + """Prepare data related to nuScenes dataset. + + Related data consists of '.pkl' files recording basic infos, + 2D annotations and groundtruth database. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + dataset_name (str): The dataset class name. + out_dir (str): Output directory of the groundtruth database info. + max_sweeps (int, optional): Number of input consecutive frames. + Default: 10 + """ + nuscenes_converter.create_nuscenes_infos( + root_path, info_prefix, version=version, max_sweeps=max_sweeps) + + if version == 'v1.0-test': + info_test_path = osp.join(out_dir, f'{info_prefix}_infos_test.pkl') + update_pkl_infos('nuscenes', out_dir=out_dir, pkl_path=info_test_path) + return + + info_train_path = osp.join(out_dir, f'{info_prefix}_infos_train.pkl') + info_val_path = osp.join(out_dir, f'{info_prefix}_infos_val.pkl') + update_pkl_infos('nuscenes', out_dir=out_dir, pkl_path=info_train_path) + update_pkl_infos('nuscenes', out_dir=out_dir, pkl_path=info_val_path) + create_groundtruth_database(dataset_name, root_path, info_prefix, + f'{info_prefix}_infos_train.pkl') + + +def lyft_data_prep(root_path, info_prefix, version, max_sweeps=10): + """Prepare data related to Lyft dataset. + + Related data consists of '.pkl' files recording basic infos. + Although the ground truth database and 2D annotations are not used in + Lyft, it can also be generated like nuScenes. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + max_sweeps (int, optional): Number of input consecutive frames. + Defaults to 10. + """ + lyft_converter.create_lyft_infos( + root_path, info_prefix, version=version, max_sweeps=max_sweeps) + if version == 'v1.01-test': + info_test_path = osp.join(root_path, f'{info_prefix}_infos_test.pkl') + update_pkl_infos('lyft', out_dir=root_path, pkl_path=info_test_path) + elif version == 'v1.01-train': + info_train_path = osp.join(root_path, f'{info_prefix}_infos_train.pkl') + info_val_path = osp.join(root_path, f'{info_prefix}_infos_val.pkl') + update_pkl_infos('lyft', out_dir=root_path, pkl_path=info_train_path) + update_pkl_infos('lyft', out_dir=root_path, pkl_path=info_val_path) + + +def scannet_data_prep(root_path, info_prefix, out_dir, workers): + """Prepare the info file for scannet dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + """ + indoor.create_indoor_info_file( + root_path, info_prefix, out_dir, workers=workers) + info_train_path = osp.join(out_dir, f'{info_prefix}_infos_train.pkl') + info_val_path = osp.join(out_dir, f'{info_prefix}_infos_val.pkl') + info_test_path = osp.join(out_dir, f'{info_prefix}_infos_test.pkl') + update_pkl_infos('scannet', out_dir=out_dir, pkl_path=info_train_path) + update_pkl_infos('scannet', out_dir=out_dir, pkl_path=info_val_path) + update_pkl_infos('scannet', out_dir=out_dir, pkl_path=info_test_path) + + +def s3dis_data_prep(root_path, info_prefix, out_dir, workers): + """Prepare the info file for s3dis dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + """ + indoor.create_indoor_info_file( + root_path, info_prefix, out_dir, workers=workers) + splits = [f'Area_{i}' for i in [1, 2, 3, 4, 5, 6]] + for split in splits: + filename = osp.join(out_dir, f'{info_prefix}_infos_{split}.pkl') + update_pkl_infos('s3dis', out_dir=out_dir, pkl_path=filename) + + +def sunrgbd_data_prep(root_path, info_prefix, out_dir, workers): + """Prepare the info file for sunrgbd dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + """ + indoor.create_indoor_info_file( + root_path, info_prefix, out_dir, workers=workers) + info_train_path = osp.join(out_dir, f'{info_prefix}_infos_train.pkl') + info_val_path = osp.join(out_dir, f'{info_prefix}_infos_val.pkl') + update_pkl_infos('scannet', out_dir=out_dir, pkl_path=info_train_path) + update_pkl_infos('scannet', out_dir=out_dir, pkl_path=info_val_path) + + +def waymo_data_prep(root_path, + info_prefix, + version, + out_dir, + workers, + max_sweeps=5): + """Prepare the info file for waymo dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + max_sweeps (int, optional): Number of input consecutive frames. + Default: 5. Here we store pose information of these frames + for later use. + """ + from tools.dataset_converters import waymo_converter as waymo + + splits = [ + 'training', 'validation', 'testing', 'testing_3d_camera_only_detection' + ] + for i, split in enumerate(splits): + load_dir = osp.join(root_path, 'waymo_format', split) + if split == 'validation': + save_dir = osp.join(out_dir, 'kitti_format', 'training') + else: + save_dir = osp.join(out_dir, 'kitti_format', split) + converter = waymo.Waymo2KITTI( + load_dir, + save_dir, + prefix=str(i), + workers=workers, + test_mode=(split + in ['testing', 'testing_3d_camera_only_detection'])) + converter.convert() + + from tools.dataset_converters.waymo_converter import \ + create_ImageSets_img_ids + create_ImageSets_img_ids(osp.join(out_dir, 'kitti_format'), splits) + # Generate waymo infos + out_dir = osp.join(out_dir, 'kitti_format') + kitti.create_waymo_info_file( + out_dir, info_prefix, max_sweeps=max_sweeps, workers=workers) + info_train_path = osp.join(out_dir, f'{info_prefix}_infos_train.pkl') + info_val_path = osp.join(out_dir, f'{info_prefix}_infos_val.pkl') + info_trainval_path = osp.join(out_dir, f'{info_prefix}_infos_trainval.pkl') + info_test_path = osp.join(out_dir, f'{info_prefix}_infos_test.pkl') + update_pkl_infos('waymo', out_dir=out_dir, pkl_path=info_train_path) + update_pkl_infos('waymo', out_dir=out_dir, pkl_path=info_val_path) + update_pkl_infos('waymo', out_dir=out_dir, pkl_path=info_trainval_path) + update_pkl_infos('waymo', out_dir=out_dir, pkl_path=info_test_path) + GTDatabaseCreater( + 'WaymoDataset', + out_dir, + info_prefix, + f'{info_prefix}_infos_train.pkl', + relative_path=False, + with_mask=False, + num_worker=workers).create() + + +def semantickitti_data_prep(info_prefix, out_dir): + """Prepare the info file for SemanticKITTI dataset. + + Args: + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + """ + semantickitti_converter.create_semantickitti_info_file( + info_prefix, out_dir) + + +parser = argparse.ArgumentParser(description='Data converter arg parser') +parser.add_argument('dataset', metavar='kitti', help='name of the dataset') +parser.add_argument( + '--root-path', + type=str, + default='./data/kitti', + help='specify the root path of dataset') +parser.add_argument( + '--version', + type=str, + default='v1.0', + required=False, + help='specify the dataset version, no need for kitti') +parser.add_argument( + '--max-sweeps', + type=int, + default=10, + required=False, + help='specify sweeps of lidar per example') +parser.add_argument( + '--with-plane', + action='store_true', + help='Whether to use plane information for kitti.') +parser.add_argument( + '--out-dir', + type=str, + default='./data/kitti', + required=False, + help='name of info pkl') +parser.add_argument('--extra-tag', type=str, default='kitti') +parser.add_argument( + '--workers', type=int, default=4, help='number of threads to be used') +args = parser.parse_args() + +if __name__ == '__main__': + from mmdet3d.utils import register_all_modules + register_all_modules() + + if args.dataset == 'kitti': + kitti_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=args.version, + out_dir=args.out_dir, + with_plane=args.with_plane) + elif args.dataset == 'nuscenes' and args.version != 'v1.0-mini': + train_version = f'{args.version}-trainval' + nuscenes_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + test_version = f'{args.version}-test' + nuscenes_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=test_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + elif args.dataset == 'nuscenes' and args.version == 'v1.0-mini': + train_version = f'{args.version}' + nuscenes_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + elif args.dataset == 'lyft': + train_version = f'{args.version}-train' + lyft_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=train_version, + max_sweeps=args.max_sweeps) + test_version = f'{args.version}-test' + lyft_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=test_version, + max_sweeps=args.max_sweeps) + elif args.dataset == 'waymo': + waymo_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=args.version, + out_dir=args.out_dir, + workers=args.workers, + max_sweeps=args.max_sweeps) + elif args.dataset == 'scannet': + scannet_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + out_dir=args.out_dir, + workers=args.workers) + elif args.dataset == 's3dis': + s3dis_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + out_dir=args.out_dir, + workers=args.workers) + elif args.dataset == 'sunrgbd': + sunrgbd_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + out_dir=args.out_dir, + workers=args.workers) + elif args.dataset == 'semantickitti': + semantickitti_data_prep( + info_prefix=args.extra_tag, out_dir=args.out_dir) + else: + raise NotImplementedError(f'Don\'t support {args.dataset} dataset.') diff --git a/tools/create_data.sh b/tools/create_data.sh new file mode 100755 index 0000000..9a57852 --- /dev/null +++ b/tools/create_data.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x +export PYTHONPATH=`pwd`:$PYTHONPATH + +PARTITION=$1 +JOB_NAME=$2 +DATASET=$3 +GPUS=${GPUS:-1} +GPUS_PER_NODE=${GPUS_PER_NODE:-1} +SRUN_ARGS=${SRUN_ARGS:-""} +JOB_NAME=create_data + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/create_data.py ${DATASET} \ + --root-path ./data/${DATASET} \ + --out-dir ./data/${DATASET} \ + --extra-tag ${DATASET} diff --git a/tools/dataset_converters/create_gt_database.py b/tools/dataset_converters/create_gt_database.py new file mode 100755 index 0000000..e007395 --- /dev/null +++ b/tools/dataset_converters/create_gt_database.py @@ -0,0 +1,636 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pickle +from os import path as osp + +import mmcv +import mmengine +import numpy as np +from mmcv.ops import roi_align +from mmdet.evaluation import bbox_overlaps +from mmengine import track_iter_progress +from pycocotools import mask as maskUtils +from pycocotools.coco import COCO + +from mmdet3d.registry import DATASETS +from mmdet3d.structures.ops import box_np_ops as box_np_ops + + +def _poly2mask(mask_ann, img_h, img_w): + if isinstance(mask_ann, list): + # polygon -- a single object might consist of multiple parts + # we merge all parts into one mask rle code + rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) + rle = maskUtils.merge(rles) + elif isinstance(mask_ann['counts'], list): + # uncompressed RLE + rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) + else: + # rle + rle = mask_ann + mask = maskUtils.decode(rle) + return mask + + +def _parse_coco_ann_info(ann_info): + gt_bboxes = [] + gt_labels = [] + gt_bboxes_ignore = [] + gt_masks_ann = [] + + for i, ann in enumerate(ann_info): + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + if ann['area'] <= 0: + continue + bbox = [x1, y1, x1 + w, y1 + h] + if ann.get('iscrowd', False): + gt_bboxes_ignore.append(bbox) + else: + gt_bboxes.append(bbox) + gt_masks_ann.append(ann['segmentation']) + + if gt_bboxes: + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_labels = np.array([], dtype=np.int64) + + if gt_bboxes_ignore: + gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) + else: + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + + ann = dict( + bboxes=gt_bboxes, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann) + + return ann + + +def crop_image_patch_v2(pos_proposals, pos_assigned_gt_inds, gt_masks): + import torch + from torch.nn.modules.utils import _pair + device = pos_proposals.device + num_pos = pos_proposals.size(0) + fake_inds = ( + torch.arange(num_pos, + device=device).to(dtype=pos_proposals.dtype)[:, None]) + rois = torch.cat([fake_inds, pos_proposals], dim=1) # Nx5 + mask_size = _pair(28) + rois = rois.to(device=device) + gt_masks_th = ( + torch.from_numpy(gt_masks).to(device).index_select( + 0, pos_assigned_gt_inds).to(dtype=rois.dtype)) + # Use RoIAlign could apparently accelerate the training (~0.1s/iter) + targets = ( + roi_align(gt_masks_th, rois, mask_size[::-1], 1.0, 0, True).squeeze(1)) + return targets + + +def crop_image_patch(pos_proposals, gt_masks, pos_assigned_gt_inds, org_img): + num_pos = pos_proposals.shape[0] + masks = [] + img_patches = [] + for i in range(num_pos): + gt_mask = gt_masks[pos_assigned_gt_inds[i]] + bbox = pos_proposals[i, :].astype(np.int32) + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1 + 1, 1) + h = np.maximum(y2 - y1 + 1, 1) + + mask_patch = gt_mask[y1:y1 + h, x1:x1 + w] + masked_img = gt_mask[..., None] * org_img + img_patch = masked_img[y1:y1 + h, x1:x1 + w] + + img_patches.append(img_patch) + masks.append(mask_patch) + return img_patches, masks + + +def create_groundtruth_database(dataset_class_name, + data_path, + info_prefix, + info_path=None, + mask_anno_path=None, + used_classes=None, + database_save_path=None, + db_info_save_path=None, + relative_path=True, + add_rgb=False, + lidar_only=False, + bev_only=False, + coors_range=None, + with_mask=False): + """Given the raw data, generate the ground truth database. + + Args: + dataset_class_name (str): Name of the input dataset. + data_path (str): Path of the data. + info_prefix (str): Prefix of the info file. + info_path (str, optional): Path of the info file. + Default: None. + mask_anno_path (str, optional): Path of the mask_anno. + Default: None. + used_classes (list[str], optional): Classes have been used. + Default: None. + database_save_path (str, optional): Path to save database. + Default: None. + db_info_save_path (str, optional): Path to save db_info. + Default: None. + relative_path (bool, optional): Whether to use relative path. + Default: True. + with_mask (bool, optional): Whether to use mask. + Default: False. + """ + print(f'Create GT Database of {dataset_class_name}') + dataset_cfg = dict( + type=dataset_class_name, data_root=data_path, ann_file=info_path) + if dataset_class_name == 'KittiDataset': + backend_args = None + dataset_cfg.update( + modality=dict( + use_lidar=True, + use_camera=with_mask, + ), + data_prefix=dict( + pts='training/velodyne_reduced', img='training/image_2'), + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + backend_args=backend_args) + ]) + + elif dataset_class_name == 'NuScenesDataset': + dataset_cfg.update( + use_valid_flag=True, + data_prefix=dict( + pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP'), + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True) + ]) + + elif dataset_class_name == 'WaymoDataset': + backend_args = None + dataset_cfg.update( + test_mode=False, + data_prefix=dict( + pts='training/velodyne', img='', sweeps='training/velodyne'), + modality=dict( + use_lidar=True, + use_depth=False, + use_lidar_intensity=True, + use_camera=False, + ), + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=6, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + backend_args=backend_args) + ]) + + dataset = DATASETS.build(dataset_cfg) + + if database_save_path is None: + database_save_path = osp.join(data_path, f'{info_prefix}_gt_database') + if db_info_save_path is None: + db_info_save_path = osp.join(data_path, + f'{info_prefix}_dbinfos_train.pkl') + mmengine.mkdir_or_exist(database_save_path) + all_db_infos = dict() + if with_mask: + coco = COCO(osp.join(data_path, mask_anno_path)) + imgIds = coco.getImgIds() + file2id = dict() + for i in imgIds: + info = coco.loadImgs([i])[0] + file2id.update({info['file_name']: i}) + + group_counter = 0 + for j in track_iter_progress(list(range(len(dataset)))): + data_info = dataset.get_data_info(j) + example = dataset.pipeline(data_info) + annos = example['ann_info'] + image_idx = example['sample_idx'] + points = example['points'].tensor.numpy() + gt_boxes_3d = annos['gt_bboxes_3d'].tensor.numpy() + names = [dataset.metainfo['classes'][i] for i in annos['gt_labels_3d']] + group_dict = dict() + if 'group_ids' in annos: + group_ids = annos['group_ids'] + else: + group_ids = np.arange(gt_boxes_3d.shape[0], dtype=np.int64) + difficulty = np.zeros(gt_boxes_3d.shape[0], dtype=np.int32) + if 'difficulty' in annos: + difficulty = annos['difficulty'] + + num_obj = gt_boxes_3d.shape[0] + point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d) + + if with_mask: + # prepare masks + gt_boxes = annos['gt_bboxes'] + img_path = osp.split(example['img_info']['filename'])[-1] + if img_path not in file2id.keys(): + print(f'skip image {img_path} for empty mask') + continue + img_id = file2id[img_path] + kins_annIds = coco.getAnnIds(imgIds=img_id) + kins_raw_info = coco.loadAnns(kins_annIds) + kins_ann_info = _parse_coco_ann_info(kins_raw_info) + h, w = annos['img_shape'][:2] + gt_masks = [ + _poly2mask(mask, h, w) for mask in kins_ann_info['masks'] + ] + # get mask inds based on iou mapping + bbox_iou = bbox_overlaps(kins_ann_info['bboxes'], gt_boxes) + mask_inds = bbox_iou.argmax(axis=0) + valid_inds = (bbox_iou.max(axis=0) > 0.5) + + # mask the image + # use more precise crop when it is ready + # object_img_patches = np.ascontiguousarray( + # np.stack(object_img_patches, axis=0).transpose(0, 3, 1, 2)) + # crop image patches using roi_align + # object_img_patches = crop_image_patch_v2( + # torch.Tensor(gt_boxes), + # torch.Tensor(mask_inds).long(), object_img_patches) + object_img_patches, object_masks = crop_image_patch( + gt_boxes, gt_masks, mask_inds, annos['img']) + + for i in range(num_obj): + filename = f'{image_idx}_{names[i]}_{i}.bin' + abs_filepath = osp.join(database_save_path, filename) + rel_filepath = osp.join(f'{info_prefix}_gt_database', filename) + + # save point clouds and image patches for each object + gt_points = points[point_indices[:, i]] + gt_points[:, :3] -= gt_boxes_3d[i, :3] + + if with_mask: + if object_masks[i].sum() == 0 or not valid_inds[i]: + # Skip object for empty or invalid mask + continue + img_patch_path = abs_filepath + '.png' + mask_patch_path = abs_filepath + '.mask.png' + mmcv.imwrite(object_img_patches[i], img_patch_path) + mmcv.imwrite(object_masks[i], mask_patch_path) + + with open(abs_filepath, 'w') as f: + gt_points.tofile(f) + + if (used_classes is None) or names[i] in used_classes: + db_info = { + 'name': names[i], + 'path': rel_filepath, + 'image_idx': image_idx, + 'gt_idx': i, + 'box3d_lidar': gt_boxes_3d[i], + 'num_points_in_gt': gt_points.shape[0], + 'difficulty': difficulty[i], + } + local_group_id = group_ids[i] + # if local_group_id >= 0: + if local_group_id not in group_dict: + group_dict[local_group_id] = group_counter + group_counter += 1 + db_info['group_id'] = group_dict[local_group_id] + if 'score' in annos: + db_info['score'] = annos['score'][i] + if with_mask: + db_info.update({'box2d_camera': gt_boxes[i]}) + if names[i] in all_db_infos: + all_db_infos[names[i]].append(db_info) + else: + all_db_infos[names[i]] = [db_info] + + for k, v in all_db_infos.items(): + print(f'load {len(v)} {k} database infos') + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) + + +class GTDatabaseCreater: + """Given the raw data, generate the ground truth database. This is the + parallel version. For serialized version, please refer to + `create_groundtruth_database` + + Args: + dataset_class_name (str): Name of the input dataset. + data_path (str): Path of the data. + info_prefix (str): Prefix of the info file. + info_path (str, optional): Path of the info file. + Default: None. + mask_anno_path (str, optional): Path of the mask_anno. + Default: None. + used_classes (list[str], optional): Classes have been used. + Default: None. + database_save_path (str, optional): Path to save database. + Default: None. + db_info_save_path (str, optional): Path to save db_info. + Default: None. + relative_path (bool, optional): Whether to use relative path. + Default: True. + with_mask (bool, optional): Whether to use mask. + Default: False. + num_worker (int, optional): the number of parallel workers to use. + Default: 8. + """ + + def __init__(self, + dataset_class_name, + data_path, + info_prefix, + info_path=None, + mask_anno_path=None, + used_classes=None, + database_save_path=None, + db_info_save_path=None, + relative_path=True, + add_rgb=False, + lidar_only=False, + bev_only=False, + coors_range=None, + with_mask=False, + num_worker=8) -> None: + self.dataset_class_name = dataset_class_name + self.data_path = data_path + self.info_prefix = info_prefix + self.info_path = info_path + self.mask_anno_path = mask_anno_path + self.used_classes = used_classes + self.database_save_path = database_save_path + self.db_info_save_path = db_info_save_path + self.relative_path = relative_path + self.add_rgb = add_rgb + self.lidar_only = lidar_only + self.bev_only = bev_only + self.coors_range = coors_range + self.with_mask = with_mask + self.num_worker = num_worker + self.pipeline = None + + def create_single(self, input_dict): + group_counter = 0 + single_db_infos = dict() + example = self.pipeline(input_dict) + annos = example['ann_info'] + image_idx = example['sample_idx'] + points = example['points'].tensor.numpy() + gt_boxes_3d = annos['gt_bboxes_3d'].tensor.numpy() + names = [ + self.dataset.metainfo['classes'][i] for i in annos['gt_labels_3d'] + ] + group_dict = dict() + if 'group_ids' in annos: + group_ids = annos['group_ids'] + else: + group_ids = np.arange(gt_boxes_3d.shape[0], dtype=np.int64) + difficulty = np.zeros(gt_boxes_3d.shape[0], dtype=np.int32) + if 'difficulty' in annos: + difficulty = annos['difficulty'] + + num_obj = gt_boxes_3d.shape[0] + point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d) + + if self.with_mask: + # prepare masks + gt_boxes = annos['gt_bboxes'] + img_path = osp.split(example['img_info']['filename'])[-1] + if img_path not in self.file2id.keys(): + print(f'skip image {img_path} for empty mask') + return single_db_infos + img_id = self.file2id[img_path] + kins_annIds = self.coco.getAnnIds(imgIds=img_id) + kins_raw_info = self.coco.loadAnns(kins_annIds) + kins_ann_info = _parse_coco_ann_info(kins_raw_info) + h, w = annos['img_shape'][:2] + gt_masks = [ + _poly2mask(mask, h, w) for mask in kins_ann_info['masks'] + ] + # get mask inds based on iou mapping + bbox_iou = bbox_overlaps(kins_ann_info['bboxes'], gt_boxes) + mask_inds = bbox_iou.argmax(axis=0) + valid_inds = (bbox_iou.max(axis=0) > 0.5) + + # mask the image + # use more precise crop when it is ready + # object_img_patches = np.ascontiguousarray( + # np.stack(object_img_patches, axis=0).transpose(0, 3, 1, 2)) + # crop image patches using roi_align + # object_img_patches = crop_image_patch_v2( + # torch.Tensor(gt_boxes), + # torch.Tensor(mask_inds).long(), object_img_patches) + object_img_patches, object_masks = crop_image_patch( + gt_boxes, gt_masks, mask_inds, annos['img']) + + for i in range(num_obj): + filename = f'{image_idx}_{names[i]}_{i}.bin' + abs_filepath = osp.join(self.database_save_path, filename) + rel_filepath = osp.join(f'{self.info_prefix}_gt_database', + filename) + + # save point clouds and image patches for each object + gt_points = points[point_indices[:, i]] + gt_points[:, :3] -= gt_boxes_3d[i, :3] + + if self.with_mask: + if object_masks[i].sum() == 0 or not valid_inds[i]: + # Skip object for empty or invalid mask + continue + img_patch_path = abs_filepath + '.png' + mask_patch_path = abs_filepath + '.mask.png' + mmcv.imwrite(object_img_patches[i], img_patch_path) + mmcv.imwrite(object_masks[i], mask_patch_path) + + with open(abs_filepath, 'w') as f: + gt_points.tofile(f) + + if (self.used_classes is None) or names[i] in self.used_classes: + db_info = { + 'name': names[i], + 'path': rel_filepath, + 'image_idx': image_idx, + 'gt_idx': i, + 'box3d_lidar': gt_boxes_3d[i], + 'num_points_in_gt': gt_points.shape[0], + 'difficulty': difficulty[i], + } + local_group_id = group_ids[i] + # if local_group_id >= 0: + if local_group_id not in group_dict: + group_dict[local_group_id] = group_counter + group_counter += 1 + db_info['group_id'] = group_dict[local_group_id] + if 'score' in annos: + db_info['score'] = annos['score'][i] + if self.with_mask: + db_info.update({'box2d_camera': gt_boxes[i]}) + if names[i] in single_db_infos: + single_db_infos[names[i]].append(db_info) + else: + single_db_infos[names[i]] = [db_info] + + return single_db_infos + + def create(self): + print(f'Create GT Database of {self.dataset_class_name}') + dataset_cfg = dict( + type=self.dataset_class_name, + data_root=self.data_path, + ann_file=self.info_path) + if self.dataset_class_name == 'KittiDataset': + backend_args = None + dataset_cfg.update( + test_mode=False, + data_prefix=dict( + pts='training/velodyne_reduced', img='training/image_2'), + modality=dict( + use_lidar=True, + use_depth=False, + use_lidar_intensity=True, + use_camera=self.with_mask, + ), + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + backend_args=backend_args) + ]) + + elif self.dataset_class_name == 'NuScenesDataset': + dataset_cfg.update( + use_valid_flag=True, + data_prefix=dict( + pts='samples/LIDAR_TOP', img='', + sweeps='sweeps/LIDAR_TOP'), + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True) + ]) + + elif self.dataset_class_name == 'WaymoDataset': + backend_args = None + dataset_cfg.update( + test_mode=False, + data_prefix=dict( + pts='training/velodyne', + img='', + sweeps='training/velodyne'), + modality=dict( + use_lidar=True, + use_depth=False, + use_lidar_intensity=True, + use_camera=False, + ), + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=6, + backend_args=backend_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + backend_args=backend_args) + ]) + + self.dataset = DATASETS.build(dataset_cfg) + self.pipeline = self.dataset.pipeline + if self.database_save_path is None: + self.database_save_path = osp.join( + self.data_path, f'{self.info_prefix}_gt_database') + if self.db_info_save_path is None: + self.db_info_save_path = osp.join( + self.data_path, f'{self.info_prefix}_dbinfos_train.pkl') + mmengine.mkdir_or_exist(self.database_save_path) + if self.with_mask: + self.coco = COCO(osp.join(self.data_path, self.mask_anno_path)) + imgIds = self.coco.getImgIds() + self.file2id = dict() + for i in imgIds: + info = self.coco.loadImgs([i])[0] + self.file2id.update({info['file_name']: i}) + + def loop_dataset(i): + input_dict = self.dataset.get_data_info(i) + input_dict['box_type_3d'] = self.dataset.box_type_3d + input_dict['box_mode_3d'] = self.dataset.box_mode_3d + return input_dict + + multi_db_infos = mmengine.track_parallel_progress( + self.create_single, + ((loop_dataset(i) + for i in range(len(self.dataset))), len(self.dataset)), + self.num_worker) + print('Make global unique group id') + group_counter_offset = 0 + all_db_infos = dict() + for single_db_infos in track_iter_progress(multi_db_infos): + group_id = -1 + for name, name_db_infos in single_db_infos.items(): + for db_info in name_db_infos: + group_id = max(group_id, db_info['group_id']) + db_info['group_id'] += group_counter_offset + if name not in all_db_infos: + all_db_infos[name] = [] + all_db_infos[name].extend(name_db_infos) + group_counter_offset += (group_id + 1) + + for k, v in all_db_infos.items(): + print(f'load {len(v)} {k} database infos') + + with open(self.db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) diff --git a/tools/dataset_converters/indoor_converter.py b/tools/dataset_converters/indoor_converter.py new file mode 100755 index 0000000..9092285 --- /dev/null +++ b/tools/dataset_converters/indoor_converter.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os + +import mmengine +import numpy as np + +from tools.dataset_converters.s3dis_data_utils import S3DISData, S3DISSegData +from tools.dataset_converters.scannet_data_utils import (ScanNetData, + ScanNetSegData) +from tools.dataset_converters.sunrgbd_data_utils import SUNRGBDData + + +def create_indoor_info_file(data_path, + pkl_prefix='sunrgbd', + save_path=None, + use_v1=False, + workers=4): + """Create indoor information file. + + Get information of the raw data and save it to the pkl file. + + Args: + data_path (str): Path of the data. + pkl_prefix (str, optional): Prefix of the pkl to be saved. + Default: 'sunrgbd'. + save_path (str, optional): Path of the pkl to be saved. Default: None. + use_v1 (bool, optional): Whether to use v1. Default: False. + workers (int, optional): Number of threads to be used. Default: 4. + """ + assert os.path.exists(data_path) + assert pkl_prefix in ['sunrgbd', 'scannet', 's3dis'], \ + f'unsupported indoor dataset {pkl_prefix}' + save_path = data_path if save_path is None else save_path + assert os.path.exists(save_path) + + # generate infos for both detection and segmentation task + if pkl_prefix in ['sunrgbd', 'scannet']: + train_filename = os.path.join(save_path, + f'{pkl_prefix}_infos_train.pkl') + val_filename = os.path.join(save_path, f'{pkl_prefix}_infos_val.pkl') + if pkl_prefix == 'sunrgbd': + # SUN RGB-D has a train-val split + train_dataset = SUNRGBDData( + root_path=data_path, split='train', use_v1=use_v1) + val_dataset = SUNRGBDData( + root_path=data_path, split='val', use_v1=use_v1) + else: + # ScanNet has a train-val-test split + train_dataset = ScanNetData(root_path=data_path, split='train') + val_dataset = ScanNetData(root_path=data_path, split='val') + test_dataset = ScanNetData(root_path=data_path, split='test') + test_filename = os.path.join(save_path, + f'{pkl_prefix}_infos_test.pkl') + + infos_train = train_dataset.get_infos( + num_workers=workers, has_label=True) + mmengine.dump(infos_train, train_filename, 'pkl') + print(f'{pkl_prefix} info train file is saved to {train_filename}') + + infos_val = val_dataset.get_infos(num_workers=workers, has_label=True) + mmengine.dump(infos_val, val_filename, 'pkl') + print(f'{pkl_prefix} info val file is saved to {val_filename}') + + if pkl_prefix == 'scannet': + infos_test = test_dataset.get_infos( + num_workers=workers, has_label=False) + mmengine.dump(infos_test, test_filename, 'pkl') + print(f'{pkl_prefix} info test file is saved to {test_filename}') + + # generate infos for the semantic segmentation task + # e.g. re-sampled scene indexes and label weights + # scene indexes are used to re-sample rooms with different number of points + # label weights are used to balance classes with different number of points + if pkl_prefix == 'scannet': + # label weight computation function is adopted from + # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24 + train_dataset = ScanNetSegData( + data_root=data_path, + ann_file=train_filename, + split='train', + num_points=8192, + label_weight_func=lambda x: 1.0 / np.log(1.2 + x)) + # TODO: do we need to generate on val set? + val_dataset = ScanNetSegData( + data_root=data_path, + ann_file=val_filename, + split='val', + num_points=8192, + label_weight_func=lambda x: 1.0 / np.log(1.2 + x)) + # no need to generate for test set + train_dataset.get_seg_infos() + val_dataset.get_seg_infos() + elif pkl_prefix == 's3dis': + # S3DIS doesn't have a fixed train-val split + # it has 6 areas instead, so we generate info file for each of them + # in training, we will use dataset to wrap different areas + splits = [f'Area_{i}' for i in [1, 2, 3, 4, 5, 6]] + for split in splits: + dataset = S3DISData(root_path=data_path, split=split) + info = dataset.get_infos(num_workers=workers, has_label=True) + filename = os.path.join(save_path, + f'{pkl_prefix}_infos_{split}.pkl') + mmengine.dump(info, filename, 'pkl') + print(f'{pkl_prefix} info {split} file is saved to {filename}') + seg_dataset = S3DISSegData( + data_root=data_path, + ann_file=filename, + split=split, + num_points=4096, + label_weight_func=lambda x: 1.0 / np.log(1.2 + x)) + seg_dataset.get_seg_infos() diff --git a/tools/dataset_converters/kitti_converter.py b/tools/dataset_converters/kitti_converter.py new file mode 100755 index 0000000..367cfd7 --- /dev/null +++ b/tools/dataset_converters/kitti_converter.py @@ -0,0 +1,626 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict +from pathlib import Path + +import mmcv +import mmengine +import numpy as np +from nuscenes.utils.geometry_utils import view_points + +from mmdet3d.structures import points_cam2img +from mmdet3d.structures.ops import box_np_ops +from .kitti_data_utils import WaymoInfoGatherer, get_kitti_image_info +from .nuscenes_converter import post_process_coords + +kitti_categories = ('Pedestrian', 'Cyclist', 'Car') + + +def convert_to_kitti_info_version2(info): + """convert kitti info v1 to v2 if possible. + + Args: + info (dict): Info of the input kitti data. + - image (dict): image info + - calib (dict): calibration info + - point_cloud (dict): point cloud info + """ + if 'image' not in info or 'calib' not in info or 'point_cloud' not in info: + info['image'] = { + 'image_shape': info['img_shape'], + 'image_idx': info['image_idx'], + 'image_path': info['img_path'], + } + info['calib'] = { + 'R0_rect': info['calib/R0_rect'], + 'Tr_velo_to_cam': info['calib/Tr_velo_to_cam'], + 'P2': info['calib/P2'], + } + info['point_cloud'] = { + 'velodyne_path': info['velodyne_path'], + } + + +def _read_imageset_file(path): + with open(path, 'r') as f: + lines = f.readlines() + return [int(line) for line in lines] + + +class _NumPointsInGTCalculater: + """Calculate the number of points inside the ground truth box. This is the + parallel version. For the serialized version, please refer to + `_calculate_num_points_in_gt`. + + Args: + data_path (str): Path of the data. + relative_path (bool): Whether to use relative path. + remove_outside (bool, optional): Whether to remove points which are + outside of image. Default: True. + num_features (int, optional): Number of features per point. + Default: False. + num_worker (int, optional): the number of parallel workers to use. + Default: 8. + """ + + def __init__(self, + data_path, + relative_path, + remove_outside=True, + num_features=4, + num_worker=8) -> None: + self.data_path = data_path + self.relative_path = relative_path + self.remove_outside = remove_outside + self.num_features = num_features + self.num_worker = num_worker + + def calculate_single(self, info): + pc_info = info['point_cloud'] + image_info = info['image'] + calib = info['calib'] + if self.relative_path: + v_path = str(Path(self.data_path) / pc_info['velodyne_path']) + else: + v_path = pc_info['velodyne_path'] + points_v = np.fromfile( + v_path, dtype=np.float32, + count=-1).reshape([-1, self.num_features]) + rect = calib['R0_rect'] + Trv2c = calib['Tr_velo_to_cam'] + P2 = calib['P2'] + if self.remove_outside: + points_v = box_np_ops.remove_outside_points( + points_v, rect, Trv2c, P2, image_info['image_shape']) + annos = info['annos'] + num_obj = len([n for n in annos['name'] if n != 'DontCare']) + dims = annos['dimensions'][:num_obj] + loc = annos['location'][:num_obj] + rots = annos['rotation_y'][:num_obj] + gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + gt_boxes_lidar = box_np_ops.box_camera_to_lidar( + gt_boxes_camera, rect, Trv2c) + indices = box_np_ops.points_in_rbbox(points_v[:, :3], gt_boxes_lidar) + num_points_in_gt = indices.sum(0) + num_ignored = len(annos['dimensions']) - num_obj + num_points_in_gt = np.concatenate( + [num_points_in_gt, -np.ones([num_ignored])]) + annos['num_points_in_gt'] = num_points_in_gt.astype(np.int32) + return info + + def calculate(self, infos): + ret_infos = mmengine.track_parallel_progress(self.calculate_single, + infos, self.num_worker) + for i, ret_info in enumerate(ret_infos): + infos[i] = ret_info + + +def _calculate_num_points_in_gt(data_path, + infos, + relative_path, + remove_outside=True, + num_features=4): + for info in mmengine.track_iter_progress(infos): + pc_info = info['point_cloud'] + image_info = info['image'] + calib = info['calib'] + if relative_path: + v_path = str(Path(data_path) / pc_info['velodyne_path']) + else: + v_path = pc_info['velodyne_path'] + points_v = np.fromfile( + v_path, dtype=np.float32, count=-1).reshape([-1, num_features]) + rect = calib['R0_rect'] + Trv2c = calib['Tr_velo_to_cam'] + P2 = calib['P2'] + if remove_outside: + points_v = box_np_ops.remove_outside_points( + points_v, rect, Trv2c, P2, image_info['image_shape']) + + # points_v = points_v[points_v[:, 0] > 0] + annos = info['annos'] + num_obj = len([n for n in annos['name'] if n != 'DontCare']) + # annos = kitti.filter_kitti_anno(annos, ['DontCare']) + dims = annos['dimensions'][:num_obj] + loc = annos['location'][:num_obj] + rots = annos['rotation_y'][:num_obj] + gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + gt_boxes_lidar = box_np_ops.box_camera_to_lidar( + gt_boxes_camera, rect, Trv2c) + indices = box_np_ops.points_in_rbbox(points_v[:, :3], gt_boxes_lidar) + num_points_in_gt = indices.sum(0) + num_ignored = len(annos['dimensions']) - num_obj + num_points_in_gt = np.concatenate( + [num_points_in_gt, -np.ones([num_ignored])]) + annos['num_points_in_gt'] = num_points_in_gt.astype(np.int32) + + +def create_kitti_info_file(data_path, + pkl_prefix='kitti', + with_plane=False, + save_path=None, + relative_path=True): + """Create info file of KITTI dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + data_path (str): Path of the data root. + pkl_prefix (str, optional): Prefix of the info file to be generated. + Default: 'kitti'. + with_plane (bool, optional): Whether to use plane information. + Default: False. + save_path (str, optional): Path to save the info file. + Default: None. + relative_path (bool, optional): Whether to use relative path. + Default: True. + """ + imageset_folder = Path(data_path) / 'ImageSets' + train_img_ids = _read_imageset_file(str(imageset_folder / 'train.txt')) + val_img_ids = _read_imageset_file(str(imageset_folder / 'val.txt')) + test_img_ids = _read_imageset_file(str(imageset_folder / 'test.txt')) + + print('Generate info. this may take several minutes.') + if save_path is None: + save_path = Path(data_path) + else: + save_path = Path(save_path) + kitti_infos_train = get_kitti_image_info( + data_path, + training=True, + velodyne=True, + calib=True, + with_plane=with_plane, + image_ids=train_img_ids, + relative_path=relative_path) + _calculate_num_points_in_gt(data_path, kitti_infos_train, relative_path) + filename = save_path / f'{pkl_prefix}_infos_train.pkl' + print(f'Kitti info train file is saved to {filename}') + mmengine.dump(kitti_infos_train, filename) + kitti_infos_val = get_kitti_image_info( + data_path, + training=True, + velodyne=True, + calib=True, + with_plane=with_plane, + image_ids=val_img_ids, + relative_path=relative_path) + _calculate_num_points_in_gt(data_path, kitti_infos_val, relative_path) + filename = save_path / f'{pkl_prefix}_infos_val.pkl' + print(f'Kitti info val file is saved to {filename}') + mmengine.dump(kitti_infos_val, filename) + filename = save_path / f'{pkl_prefix}_infos_trainval.pkl' + print(f'Kitti info trainval file is saved to {filename}') + mmengine.dump(kitti_infos_train + kitti_infos_val, filename) + + kitti_infos_test = get_kitti_image_info( + data_path, + training=False, + label_info=False, + velodyne=True, + calib=True, + with_plane=False, + image_ids=test_img_ids, + relative_path=relative_path) + filename = save_path / f'{pkl_prefix}_infos_test.pkl' + print(f'Kitti info test file is saved to {filename}') + mmengine.dump(kitti_infos_test, filename) + + +def create_waymo_info_file(data_path, + pkl_prefix='waymo', + save_path=None, + relative_path=True, + max_sweeps=5, + workers=8): + """Create info file of waymo dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + data_path (str): Path of the data root. + pkl_prefix (str, optional): Prefix of the info file to be generated. + Default: 'waymo'. + save_path (str, optional): Path to save the info file. + Default: None. + relative_path (bool, optional): Whether to use relative path. + Default: True. + max_sweeps (int, optional): Max sweeps before the detection frame + to be used. Default: 5. + """ + imageset_folder = Path(data_path) / 'ImageSets' + train_img_ids = _read_imageset_file(str(imageset_folder / 'train.txt')) + val_img_ids = _read_imageset_file(str(imageset_folder / 'val.txt')) + test_img_ids = _read_imageset_file(str(imageset_folder / 'test.txt')) + + print('Generate info. this may take several minutes.') + if save_path is None: + save_path = Path(data_path) + else: + save_path = Path(save_path) + waymo_infos_gatherer_trainval = WaymoInfoGatherer( + data_path, + training=True, + velodyne=True, + calib=True, + pose=True, + relative_path=relative_path, + max_sweeps=max_sweeps, + num_worker=workers) + waymo_infos_gatherer_test = WaymoInfoGatherer( + data_path, + training=False, + label_info=False, + velodyne=True, + calib=True, + pose=True, + relative_path=relative_path, + max_sweeps=max_sweeps, + num_worker=workers) + num_points_in_gt_calculater = _NumPointsInGTCalculater( + data_path, + relative_path, + num_features=6, + remove_outside=False, + num_worker=workers) + + waymo_infos_train = waymo_infos_gatherer_trainval.gather(train_img_ids) + num_points_in_gt_calculater.calculate(waymo_infos_train) + filename = save_path / f'{pkl_prefix}_infos_train.pkl' + print(f'Waymo info train file is saved to {filename}') + mmengine.dump(waymo_infos_train, filename) + waymo_infos_val = waymo_infos_gatherer_trainval.gather(val_img_ids) + num_points_in_gt_calculater.calculate(waymo_infos_val) + filename = save_path / f'{pkl_prefix}_infos_val.pkl' + print(f'Waymo info val file is saved to {filename}') + mmengine.dump(waymo_infos_val, filename) + filename = save_path / f'{pkl_prefix}_infos_trainval.pkl' + print(f'Waymo info trainval file is saved to {filename}') + mmengine.dump(waymo_infos_train + waymo_infos_val, filename) + waymo_infos_test = waymo_infos_gatherer_test.gather(test_img_ids) + filename = save_path / f'{pkl_prefix}_infos_test.pkl' + print(f'Waymo info test file is saved to {filename}') + mmengine.dump(waymo_infos_test, filename) + + +def _create_reduced_point_cloud(data_path, + info_path, + save_path=None, + back=False, + num_features=4, + front_camera_id=2): + """Create reduced point clouds for given info. + + Args: + data_path (str): Path of original data. + info_path (str): Path of data info. + save_path (str, optional): Path to save reduced point cloud + data. Default: None. + back (bool, optional): Whether to flip the points to back. + Default: False. + num_features (int, optional): Number of point features. Default: 4. + front_camera_id (int, optional): The referenced/front camera ID. + Default: 2. + """ + kitti_infos = mmengine.load(info_path) + + for info in mmengine.track_iter_progress(kitti_infos): + pc_info = info['point_cloud'] + image_info = info['image'] + calib = info['calib'] + + v_path = pc_info['velodyne_path'] + v_path = Path(data_path) / v_path + points_v = np.fromfile( + str(v_path), dtype=np.float32, + count=-1).reshape([-1, num_features]) + rect = calib['R0_rect'] + if front_camera_id == 2: + P2 = calib['P2'] + else: + P2 = calib[f'P{str(front_camera_id)}'] + Trv2c = calib['Tr_velo_to_cam'] + # first remove z < 0 points + # keep = points_v[:, -1] > 0 + # points_v = points_v[keep] + # then remove outside. + if back: + points_v[:, 0] = -points_v[:, 0] + points_v = box_np_ops.remove_outside_points(points_v, rect, Trv2c, P2, + image_info['image_shape']) + if save_path is None: + save_dir = v_path.parent.parent / (v_path.parent.stem + '_reduced') + if not save_dir.exists(): + save_dir.mkdir() + save_filename = save_dir / v_path.name + # save_filename = str(v_path) + '_reduced' + if back: + save_filename += '_back' + else: + save_filename = str(Path(save_path) / v_path.name) + if back: + save_filename += '_back' + with open(save_filename, 'w') as f: + points_v.tofile(f) + + +def create_reduced_point_cloud(data_path, + pkl_prefix, + train_info_path=None, + val_info_path=None, + test_info_path=None, + save_path=None, + with_back=False): + """Create reduced point clouds for training/validation/testing. + + Args: + data_path (str): Path of original data. + pkl_prefix (str): Prefix of info files. + train_info_path (str, optional): Path of training set info. + Default: None. + val_info_path (str, optional): Path of validation set info. + Default: None. + test_info_path (str, optional): Path of test set info. + Default: None. + save_path (str, optional): Path to save reduced point cloud data. + Default: None. + with_back (bool, optional): Whether to flip the points to back. + Default: False. + """ + if train_info_path is None: + train_info_path = Path(data_path) / f'{pkl_prefix}_infos_train.pkl' + if val_info_path is None: + val_info_path = Path(data_path) / f'{pkl_prefix}_infos_val.pkl' + if test_info_path is None: + test_info_path = Path(data_path) / f'{pkl_prefix}_infos_test.pkl' + + print('create reduced point cloud for training set') + _create_reduced_point_cloud(data_path, train_info_path, save_path) + print('create reduced point cloud for validation set') + _create_reduced_point_cloud(data_path, val_info_path, save_path) + print('create reduced point cloud for testing set') + _create_reduced_point_cloud(data_path, test_info_path, save_path) + if with_back: + _create_reduced_point_cloud( + data_path, train_info_path, save_path, back=True) + _create_reduced_point_cloud( + data_path, val_info_path, save_path, back=True) + _create_reduced_point_cloud( + data_path, test_info_path, save_path, back=True) + + +def export_2d_annotation(root_path, info_path, mono3d=True): + """Export 2d annotation from the info file and raw data. + + Args: + root_path (str): Root path of the raw data. + info_path (str): Path of the info file. + mono3d (bool, optional): Whether to export mono3d annotation. + Default: True. + """ + # get bbox annotations for camera + kitti_infos = mmengine.load(info_path) + cat2Ids = [ + dict(id=kitti_categories.index(cat_name), name=cat_name) + for cat_name in kitti_categories + ] + coco_ann_id = 0 + coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) + from os import path as osp + for info in mmengine.track_iter_progress(kitti_infos): + coco_infos = get_2d_boxes(info, occluded=[0, 1, 2, 3], mono3d=mono3d) + (height, width, + _) = mmcv.imread(osp.join(root_path, + info['image']['image_path'])).shape + coco_2d_dict['images'].append( + dict( + file_name=info['image']['image_path'], + id=info['image']['image_idx'], + Tri2v=info['calib']['Tr_imu_to_velo'], + Trv2c=info['calib']['Tr_velo_to_cam'], + rect=info['calib']['R0_rect'], + cam_intrinsic=info['calib']['P2'], + width=width, + height=height)) + for coco_info in coco_infos: + if coco_info is None: + continue + # add an empty key for coco format + coco_info['segmentation'] = [] + coco_info['id'] = coco_ann_id + coco_2d_dict['annotations'].append(coco_info) + coco_ann_id += 1 + if mono3d: + json_prefix = f'{info_path[:-4]}_mono3d' + else: + json_prefix = f'{info_path[:-4]}' + mmengine.dump(coco_2d_dict, f'{json_prefix}.coco.json') + + +def get_2d_boxes(info, occluded, mono3d=True): + """Get the 2D annotation records for a given info. + + Args: + info: Information of the given sample data. + occluded: Integer (0, 1, 2, 3) indicating occlusion state: + 0 = fully visible, 1 = partly occluded, 2 = largely occluded, + 3 = unknown, -1 = DontCare + mono3d (bool): Whether to get boxes with mono3d annotation. + + Return: + list[dict]: List of 2D annotation record that belongs to the input + `sample_data_token`. + """ + # Get calibration information + P2 = info['calib']['P2'] + + repro_recs = [] + # if no annotations in info (test dataset), then return + if 'annos' not in info: + return repro_recs + + # Get all the annotation with the specified visibilties. + ann_dicts = info['annos'] + mask = [(ocld in occluded) for ocld in ann_dicts['occluded']] + for k in ann_dicts.keys(): + ann_dicts[k] = ann_dicts[k][mask] + + # convert dict of list to list of dict + ann_recs = [] + for i in range(len(ann_dicts['occluded'])): + ann_rec = {} + for k in ann_dicts.keys(): + ann_rec[k] = ann_dicts[k][i] + ann_recs.append(ann_rec) + + for ann_idx, ann_rec in enumerate(ann_recs): + # Augment sample_annotation with token information. + ann_rec['sample_annotation_token'] = \ + f"{info['image']['image_idx']}.{ann_idx}" + ann_rec['sample_data_token'] = info['image']['image_idx'] + sample_data_token = info['image']['image_idx'] + + loc = ann_rec['location'][np.newaxis, :] + dim = ann_rec['dimensions'][np.newaxis, :] + rot = ann_rec['rotation_y'][np.newaxis, np.newaxis] + # transform the center from [0.5, 1.0, 0.5] to [0.5, 0.5, 0.5] + dst = np.array([0.5, 0.5, 0.5]) + src = np.array([0.5, 1.0, 0.5]) + loc = loc + dim * (dst - src) + offset = (info['calib']['P2'][0, 3] - info['calib']['P0'][0, 3]) \ + / info['calib']['P2'][0, 0] + loc_3d = np.copy(loc) + loc_3d[0, 0] += offset + gt_bbox_3d = np.concatenate([loc, dim, rot], axis=1).astype(np.float32) + + # Filter out the corners that are not in front of the calibrated + # sensor. + corners_3d = box_np_ops.center_to_corner_box3d( + gt_bbox_3d[:, :3], + gt_bbox_3d[:, 3:6], + gt_bbox_3d[:, 6], [0.5, 0.5, 0.5], + axis=1) + corners_3d = corners_3d[0].T # (1, 8, 3) -> (3, 8) + in_front = np.argwhere(corners_3d[2, :] > 0).flatten() + corners_3d = corners_3d[:, in_front] + + # Project 3d box to 2d. + camera_intrinsic = P2 + corner_coords = view_points(corners_3d, camera_intrinsic, + True).T[:, :2].tolist() + + # Keep only corners that fall within the image. + final_coords = post_process_coords(corner_coords) + + # Skip if the convex hull of the re-projected corners + # does not intersect the image canvas. + if final_coords is None: + continue + else: + min_x, min_y, max_x, max_y = final_coords + + # Generate dictionary record to be included in the .json file. + repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, + sample_data_token, + info['image']['image_path']) + + # If mono3d=True, add 3D annotations in camera coordinates + if mono3d and (repro_rec is not None): + repro_rec['bbox_cam3d'] = np.concatenate( + [loc_3d, dim, rot], + axis=1).astype(np.float32).squeeze().tolist() + repro_rec['velo_cam3d'] = -1 # no velocity in KITTI + + center3d = np.array(loc).reshape([1, 3]) + center2d = points_cam2img( + center3d, camera_intrinsic, with_depth=True) + repro_rec['center2d'] = center2d.squeeze().tolist() + # normalized center2D + depth + # samples with depth < 0 will be removed + if repro_rec['center2d'][2] <= 0: + continue + + repro_rec['attribute_name'] = -1 # no attribute in KITTI + repro_rec['attribute_id'] = -1 + + repro_recs.append(repro_rec) + + return repro_recs + + +def generate_record(ann_rec, x1, y1, x2, y2, sample_data_token, filename): + """Generate one 2D annotation record given various information on top of + the 2D bounding box coordinates. + + Args: + ann_rec (dict): Original 3d annotation record. + x1 (float): Minimum value of the x coordinate. + y1 (float): Minimum value of the y coordinate. + x2 (float): Maximum value of the x coordinate. + y2 (float): Maximum value of the y coordinate. + sample_data_token (str): Sample data token. + filename (str):The corresponding image file where the annotation + is present. + + Returns: + dict: A sample 2D annotation record. + - file_name (str): file name + - image_id (str): sample data token + - area (float): 2d box area + - category_name (str): category name + - category_id (int): category id + - bbox (list[float]): left x, top y, x_size, y_size of 2d box + - iscrowd (int): whether the area is crowd + """ + repro_rec = OrderedDict() + repro_rec['sample_data_token'] = sample_data_token + coco_rec = dict() + + key_mapping = { + 'name': 'category_name', + 'num_points_in_gt': 'num_lidar_pts', + 'sample_annotation_token': 'sample_annotation_token', + 'sample_data_token': 'sample_data_token', + } + + for key, value in ann_rec.items(): + if key in key_mapping.keys(): + repro_rec[key_mapping[key]] = value + + repro_rec['bbox_corners'] = [x1, y1, x2, y2] + repro_rec['filename'] = filename + + coco_rec['file_name'] = filename + coco_rec['image_id'] = sample_data_token + coco_rec['area'] = (y2 - y1) * (x2 - x1) + + if repro_rec['category_name'] not in kitti_categories: + return None + cat_name = repro_rec['category_name'] + coco_rec['category_name'] = cat_name + coco_rec['category_id'] = kitti_categories.index(cat_name) + coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] + coco_rec['iscrowd'] = 0 + + return coco_rec diff --git a/tools/dataset_converters/kitti_data_utils.py b/tools/dataset_converters/kitti_data_utils.py new file mode 100755 index 0000000..64c3bc4 --- /dev/null +++ b/tools/dataset_converters/kitti_data_utils.py @@ -0,0 +1,668 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict +from concurrent import futures as futures +from os import path as osp +from pathlib import Path + +import mmengine +import numpy as np +from PIL import Image +from skimage import io + + +def get_image_index_str(img_idx, use_prefix_id=False): + if use_prefix_id: + return '{:07d}'.format(img_idx) + else: + return '{:06d}'.format(img_idx) + + +def get_kitti_info_path(idx, + prefix, + info_type='image_2', + file_tail='.png', + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False): + img_idx_str = get_image_index_str(idx, use_prefix_id) + img_idx_str += file_tail + prefix = Path(prefix) + if training: + file_path = Path('training') / info_type / img_idx_str + else: + file_path = Path('testing') / info_type / img_idx_str + if exist_check and not (prefix / file_path).exists(): + raise ValueError('file not exist: {}'.format(file_path)) + if relative_path: + return str(file_path) + else: + return str(prefix / file_path) + + +def get_image_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + info_type='image_2', + file_tail='.png', + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, info_type, file_tail, training, + relative_path, exist_check, use_prefix_id) + + +def get_label_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + info_type='label_2', + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, info_type, '.txt', training, + relative_path, exist_check, use_prefix_id) + + +def get_plane_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + info_type='planes', + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, info_type, '.txt', training, + relative_path, exist_check, use_prefix_id) + + +def get_velodyne_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training, + relative_path, exist_check, use_prefix_id) + + +def get_calib_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, 'calib', '.txt', training, + relative_path, exist_check, use_prefix_id) + + +def get_pose_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, 'pose', '.txt', training, + relative_path, exist_check, use_prefix_id) + + +def get_timestamp_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, 'timestamp', '.txt', training, + relative_path, exist_check, use_prefix_id) + + +def get_label_anno(label_path): + annotations = {} + annotations.update({ + 'name': [], + 'truncated': [], + 'occluded': [], + 'alpha': [], + 'bbox': [], + 'dimensions': [], + 'location': [], + 'rotation_y': [] + }) + with open(label_path, 'r') as f: + lines = f.readlines() + # if len(lines) == 0 or len(lines[0]) < 15: + # content = [] + # else: + content = [line.strip().split(' ') for line in lines] + num_objects = len([x[0] for x in content if x[0] != 'DontCare']) + annotations['name'] = np.array([x[0] for x in content]) + num_gt = len(annotations['name']) + annotations['truncated'] = np.array([float(x[1]) for x in content]) + annotations['occluded'] = np.array([int(x[2]) for x in content]) + annotations['alpha'] = np.array([float(x[3]) for x in content]) + annotations['bbox'] = np.array([[float(info) for info in x[4:8]] + for x in content]).reshape(-1, 4) + # dimensions will convert hwl format to standard lhw(camera) format. + annotations['dimensions'] = np.array([[float(info) for info in x[8:11]] + for x in content + ]).reshape(-1, 3)[:, [2, 0, 1]] + annotations['location'] = np.array([[float(info) for info in x[11:14]] + for x in content]).reshape(-1, 3) + annotations['rotation_y'] = np.array([float(x[14]) + for x in content]).reshape(-1) + if len(content) != 0 and len(content[0]) == 16: # have score + annotations['score'] = np.array([float(x[15]) for x in content]) + else: + annotations['score'] = np.zeros((annotations['bbox'].shape[0], )) + index = list(range(num_objects)) + [-1] * (num_gt - num_objects) + annotations['index'] = np.array(index, dtype=np.int32) + annotations['group_ids'] = np.arange(num_gt, dtype=np.int32) + return annotations + + +def _extend_matrix(mat): + mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0) + return mat + + +def get_kitti_image_info(path, + training=True, + label_info=True, + velodyne=False, + calib=False, + with_plane=False, + image_ids=7481, + extend_matrix=True, + num_worker=8, + relative_path=True, + with_imageshape=True): + """ + KITTI annotation format version 2: + { + [optional]points: [N, 3+] point cloud + [optional, for kitti]image: { + image_idx: ... + image_path: ... + image_shape: ... + } + point_cloud: { + num_features: 4 + velodyne_path: ... + } + [optional, for kitti]calib: { + R0_rect: ... + Tr_velo_to_cam: ... + P2: ... + } + annos: { + location: [num_gt, 3] array + dimensions: [num_gt, 3] array + rotation_y: [num_gt] angle array + name: [num_gt] ground truth name array + [optional]difficulty: kitti difficulty + [optional]group_ids: used for multi-part object + } + } + """ + root_path = Path(path) + if not isinstance(image_ids, list): + image_ids = list(range(image_ids)) + + def map_func(idx): + info = {} + pc_info = {'num_features': 4} + calib_info = {} + + image_info = {'image_idx': idx} + annotations = None + if velodyne: + pc_info['velodyne_path'] = get_velodyne_path( + idx, path, training, relative_path) + image_info['image_path'] = get_image_path(idx, path, training, + relative_path) + if with_imageshape: + img_path = image_info['image_path'] + if relative_path: + img_path = str(root_path / img_path) + image_info['image_shape'] = np.array( + io.imread(img_path).shape[:2], dtype=np.int32) + if label_info: + label_path = get_label_path(idx, path, training, relative_path) + if relative_path: + label_path = str(root_path / label_path) + annotations = get_label_anno(label_path) + info['image'] = image_info + info['point_cloud'] = pc_info + if calib: + calib_path = get_calib_path( + idx, path, training, relative_path=False) + with open(calib_path, 'r') as f: + lines = f.readlines() + P0 = np.array([float(info) for info in lines[0].split(' ')[1:13] + ]).reshape([3, 4]) + P1 = np.array([float(info) for info in lines[1].split(' ')[1:13] + ]).reshape([3, 4]) + P2 = np.array([float(info) for info in lines[2].split(' ')[1:13] + ]).reshape([3, 4]) + P3 = np.array([float(info) for info in lines[3].split(' ')[1:13] + ]).reshape([3, 4]) + if extend_matrix: + P0 = _extend_matrix(P0) + P1 = _extend_matrix(P1) + P2 = _extend_matrix(P2) + P3 = _extend_matrix(P3) + R0_rect = np.array([ + float(info) for info in lines[4].split(' ')[1:10] + ]).reshape([3, 3]) + if extend_matrix: + rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype) + rect_4x4[3, 3] = 1. + rect_4x4[:3, :3] = R0_rect + else: + rect_4x4 = R0_rect + + Tr_velo_to_cam = np.array([ + float(info) for info in lines[5].split(' ')[1:13] + ]).reshape([3, 4]) + Tr_imu_to_velo = np.array([ + float(info) for info in lines[6].split(' ')[1:13] + ]).reshape([3, 4]) + if extend_matrix: + Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam) + Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo) + calib_info['P0'] = P0 + calib_info['P1'] = P1 + calib_info['P2'] = P2 + calib_info['P3'] = P3 + calib_info['R0_rect'] = rect_4x4 + calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam + calib_info['Tr_imu_to_velo'] = Tr_imu_to_velo + info['calib'] = calib_info + + if with_plane: + plane_path = get_plane_path(idx, path, training, relative_path) + if relative_path: + plane_path = str(root_path / plane_path) + lines = mmengine.list_from_file(plane_path) + info['plane'] = np.array([float(i) for i in lines[3].split()]) + + if annotations is not None: + info['annos'] = annotations + add_difficulty_to_annos(info) + return info + + with futures.ThreadPoolExecutor(num_worker) as executor: + image_infos = executor.map(map_func, image_ids) + + return list(image_infos) + + +class WaymoInfoGatherer: + """ + Parallel version of waymo dataset information gathering. + Waymo annotation format version like KITTI: + { + [optional]points: [N, 3+] point cloud + [optional, for kitti]image: { + image_idx: ... + image_path: ... + image_shape: ... + } + point_cloud: { + num_features: 6 + velodyne_path: ... + } + [optional, for kitti]calib: { + R0_rect: ... + Tr_velo_to_cam0: ... + P0: ... + } + annos: { + location: [num_gt, 3] array + dimensions: [num_gt, 3] array + rotation_y: [num_gt] angle array + name: [num_gt] ground truth name array + [optional]difficulty: kitti difficulty + [optional]group_ids: used for multi-part object + } + } + """ + + def __init__(self, + path, + training=True, + label_info=True, + velodyne=False, + calib=False, + pose=False, + extend_matrix=True, + num_worker=8, + relative_path=True, + with_imageshape=True, + max_sweeps=5) -> None: + self.path = path + self.training = training + self.label_info = label_info + self.velodyne = velodyne + self.calib = calib + self.pose = pose + self.extend_matrix = extend_matrix + self.num_worker = num_worker + self.relative_path = relative_path + self.with_imageshape = with_imageshape + self.max_sweeps = max_sweeps + + def gather_single(self, idx): + root_path = Path(self.path) + info = {} + pc_info = {'num_features': 6} + calib_info = {} + + image_info = {'image_idx': idx} + annotations = None + if self.velodyne: + pc_info['velodyne_path'] = get_velodyne_path( + idx, + self.path, + self.training, + self.relative_path, + use_prefix_id=True) + with open( + get_timestamp_path( + idx, + self.path, + self.training, + relative_path=False, + use_prefix_id=True)) as f: + info['timestamp'] = np.int64(f.read()) + image_info['image_path'] = get_image_path( + idx, + self.path, + self.training, + self.relative_path, + info_type='image_0', + file_tail='.jpg', + use_prefix_id=True) + if self.with_imageshape: + img_path = image_info['image_path'] + if self.relative_path: + img_path = str(root_path / img_path) + # io using PIL is significantly faster than skimage + w, h = Image.open(img_path).size + image_info['image_shape'] = np.array((h, w), dtype=np.int32) + if self.label_info: + label_path = get_label_path( + idx, + self.path, + self.training, + self.relative_path, + info_type='label_all', + use_prefix_id=True) + cam_sync_label_path = get_label_path( + idx, + self.path, + self.training, + self.relative_path, + info_type='cam_sync_label_all', + use_prefix_id=True) + if self.relative_path: + label_path = str(root_path / label_path) + cam_sync_label_path = str(root_path / cam_sync_label_path) + annotations = get_label_anno(label_path) + cam_sync_annotations = get_label_anno(cam_sync_label_path) + info['image'] = image_info + info['point_cloud'] = pc_info + if self.calib: + calib_path = get_calib_path( + idx, + self.path, + self.training, + relative_path=False, + use_prefix_id=True) + with open(calib_path, 'r') as f: + lines = f.readlines() + P0 = np.array([float(info) for info in lines[0].split(' ')[1:13] + ]).reshape([3, 4]) + P1 = np.array([float(info) for info in lines[1].split(' ')[1:13] + ]).reshape([3, 4]) + P2 = np.array([float(info) for info in lines[2].split(' ')[1:13] + ]).reshape([3, 4]) + P3 = np.array([float(info) for info in lines[3].split(' ')[1:13] + ]).reshape([3, 4]) + P4 = np.array([float(info) for info in lines[4].split(' ')[1:13] + ]).reshape([3, 4]) + if self.extend_matrix: + P0 = _extend_matrix(P0) + P1 = _extend_matrix(P1) + P2 = _extend_matrix(P2) + P3 = _extend_matrix(P3) + P4 = _extend_matrix(P4) + R0_rect = np.array([ + float(info) for info in lines[5].split(' ')[1:10] + ]).reshape([3, 3]) + if self.extend_matrix: + rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype) + rect_4x4[3, 3] = 1. + rect_4x4[:3, :3] = R0_rect + else: + rect_4x4 = R0_rect + + # TODO: naming Tr_velo_to_cam or Tr_velo_to_cam0 + Tr_velo_to_cam = np.array([ + float(info) for info in lines[6].split(' ')[1:13] + ]).reshape([3, 4]) + Tr_velo_to_cam1 = np.array([ + float(info) for info in lines[7].split(' ')[1:13] + ]).reshape([3, 4]) + Tr_velo_to_cam2 = np.array([ + float(info) for info in lines[8].split(' ')[1:13] + ]).reshape([3, 4]) + Tr_velo_to_cam3 = np.array([ + float(info) for info in lines[9].split(' ')[1:13] + ]).reshape([3, 4]) + Tr_velo_to_cam4 = np.array([ + float(info) for info in lines[10].split(' ')[1:13] + ]).reshape([3, 4]) + if self.extend_matrix: + Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam) + Tr_velo_to_cam1 = _extend_matrix(Tr_velo_to_cam1) + Tr_velo_to_cam2 = _extend_matrix(Tr_velo_to_cam2) + Tr_velo_to_cam3 = _extend_matrix(Tr_velo_to_cam3) + Tr_velo_to_cam4 = _extend_matrix(Tr_velo_to_cam4) + calib_info['P0'] = P0 + calib_info['P1'] = P1 + calib_info['P2'] = P2 + calib_info['P3'] = P3 + calib_info['P4'] = P4 + calib_info['R0_rect'] = rect_4x4 + calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam + calib_info['Tr_velo_to_cam1'] = Tr_velo_to_cam1 + calib_info['Tr_velo_to_cam2'] = Tr_velo_to_cam2 + calib_info['Tr_velo_to_cam3'] = Tr_velo_to_cam3 + calib_info['Tr_velo_to_cam4'] = Tr_velo_to_cam4 + info['calib'] = calib_info + + if self.pose: + pose_path = get_pose_path( + idx, + self.path, + self.training, + relative_path=False, + use_prefix_id=True) + info['pose'] = np.loadtxt(pose_path) + + if annotations is not None: + info['annos'] = annotations + info['annos']['camera_id'] = info['annos'].pop('score') + add_difficulty_to_annos(info) + info['cam_sync_annos'] = cam_sync_annotations + # NOTE: the 2D labels do not have strict correspondence with + # the projected 2D lidar labels + # e.g.: the projected 2D labels can be in camera 2 + # while the most_visible_camera can have id 4 + info['cam_sync_annos']['camera_id'] = info['cam_sync_annos'].pop( + 'score') + + sweeps = [] + prev_idx = idx + while len(sweeps) < self.max_sweeps: + prev_info = {} + prev_idx -= 1 + prev_info['velodyne_path'] = get_velodyne_path( + prev_idx, + self.path, + self.training, + self.relative_path, + exist_check=False, + use_prefix_id=True) + if_prev_exists = osp.exists( + Path(self.path) / prev_info['velodyne_path']) + if if_prev_exists: + with open( + get_timestamp_path( + prev_idx, + self.path, + self.training, + relative_path=False, + use_prefix_id=True)) as f: + prev_info['timestamp'] = np.int64(f.read()) + prev_info['image_path'] = get_image_path( + prev_idx, + self.path, + self.training, + self.relative_path, + info_type='image_0', + file_tail='.jpg', + use_prefix_id=True) + prev_pose_path = get_pose_path( + prev_idx, + self.path, + self.training, + relative_path=False, + use_prefix_id=True) + prev_info['pose'] = np.loadtxt(prev_pose_path) + sweeps.append(prev_info) + else: + break + info['sweeps'] = sweeps + + return info + + def gather(self, image_ids): + if not isinstance(image_ids, list): + image_ids = list(range(image_ids)) + image_infos = mmengine.track_parallel_progress(self.gather_single, + image_ids, + self.num_worker) + return list(image_infos) + + +def kitti_anno_to_label_file(annos, folder): + folder = Path(folder) + for anno in annos: + image_idx = anno['metadata']['image_idx'] + label_lines = [] + for j in range(anno['bbox'].shape[0]): + label_dict = { + 'name': anno['name'][j], + 'alpha': anno['alpha'][j], + 'bbox': anno['bbox'][j], + 'location': anno['location'][j], + 'dimensions': anno['dimensions'][j], + 'rotation_y': anno['rotation_y'][j], + 'score': anno['score'][j], + } + label_line = kitti_result_line(label_dict) + label_lines.append(label_line) + label_file = folder / f'{get_image_index_str(image_idx)}.txt' + label_str = '\n'.join(label_lines) + with open(label_file, 'w') as f: + f.write(label_str) + + +def add_difficulty_to_annos(info): + min_height = [40, 25, + 25] # minimum height for evaluated groundtruth/detections + max_occlusion = [ + 0, 1, 2 + ] # maximum occlusion level of the groundtruth used for evaluation + max_trunc = [ + 0.15, 0.3, 0.5 + ] # maximum truncation level of the groundtruth used for evaluation + annos = info['annos'] + dims = annos['dimensions'] # lhw format + bbox = annos['bbox'] + height = bbox[:, 3] - bbox[:, 1] + occlusion = annos['occluded'] + truncation = annos['truncated'] + diff = [] + easy_mask = np.ones((len(dims), ), dtype=bool) + moderate_mask = np.ones((len(dims), ), dtype=bool) + hard_mask = np.ones((len(dims), ), dtype=bool) + i = 0 + for h, o, t in zip(height, occlusion, truncation): + if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]: + easy_mask[i] = False + if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]: + moderate_mask[i] = False + if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]: + hard_mask[i] = False + i += 1 + is_easy = easy_mask + is_moderate = np.logical_xor(easy_mask, moderate_mask) + is_hard = np.logical_xor(hard_mask, moderate_mask) + + for i in range(len(dims)): + if is_easy[i]: + diff.append(0) + elif is_moderate[i]: + diff.append(1) + elif is_hard[i]: + diff.append(2) + else: + diff.append(-1) + annos['difficulty'] = np.array(diff, np.int32) + return diff + + +def kitti_result_line(result_dict, precision=4): + prec_float = '{' + ':.{}f'.format(precision) + '}' + res_line = [] + all_field_default = OrderedDict([ + ('name', None), + ('truncated', -1), + ('occluded', -1), + ('alpha', -10), + ('bbox', None), + ('dimensions', [-1, -1, -1]), + ('location', [-1000, -1000, -1000]), + ('rotation_y', -10), + ('score', 0.0), + ]) + res_dict = [(key, None) for key, val in all_field_default.items()] + res_dict = OrderedDict(res_dict) + for key, val in result_dict.items(): + if all_field_default[key] is None and val is None: + raise ValueError('you must specify a value for {}'.format(key)) + res_dict[key] = val + + for key, val in res_dict.items(): + if key == 'name': + res_line.append(val) + elif key in ['truncated', 'alpha', 'rotation_y', 'score']: + if val is None: + res_line.append(str(all_field_default[key])) + else: + res_line.append(prec_float.format(val)) + elif key == 'occluded': + if val is None: + res_line.append(str(all_field_default[key])) + else: + res_line.append('{}'.format(val)) + elif key in ['bbox', 'dimensions', 'location']: + if val is None: + res_line += [str(v) for v in all_field_default[key]] + else: + res_line += [prec_float.format(v) for v in val] + else: + raise ValueError('unknown key. supported key:{}'.format( + res_dict.keys())) + return ' '.join(res_line) diff --git a/tools/dataset_converters/lyft_converter.py b/tools/dataset_converters/lyft_converter.py new file mode 100755 index 0000000..e9fd98a --- /dev/null +++ b/tools/dataset_converters/lyft_converter.py @@ -0,0 +1,273 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from logging import warning +from os import path as osp + +import mmcv +import mmengine +import numpy as np +from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft +from pyquaternion import Quaternion + +from mmdet3d.datasets.convert_utils import LyftNameMapping +from .nuscenes_converter import (get_2d_boxes, get_available_scenes, + obtain_sensor2top) + +lyft_categories = ('car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', + 'motorcycle', 'bicycle', 'pedestrian', 'animal') + + +def create_lyft_infos(root_path, + info_prefix, + version='v1.01-train', + max_sweeps=10): + """Create info file of lyft dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + root_path (str): Path of the data root. + info_prefix (str): Prefix of the info file to be generated. + version (str, optional): Version of the data. + Default: 'v1.01-train'. + max_sweeps (int, optional): Max number of sweeps. + Default: 10. + """ + lyft = Lyft( + data_path=osp.join(root_path, version), + json_path=osp.join(root_path, version, version), + verbose=True) + available_vers = ['v1.01-train', 'v1.01-test'] + assert version in available_vers + if version == 'v1.01-train': + train_scenes = mmengine.list_from_file('data/lyft/train.txt') + val_scenes = mmengine.list_from_file('data/lyft/val.txt') + elif version == 'v1.01-test': + train_scenes = mmengine.list_from_file('data/lyft/test.txt') + val_scenes = [] + else: + raise ValueError('unknown') + + # filter existing scenes. + available_scenes = get_available_scenes(lyft) + available_scene_names = [s['name'] for s in available_scenes] + train_scenes = list( + filter(lambda x: x in available_scene_names, train_scenes)) + val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) + train_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in train_scenes + ]) + val_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in val_scenes + ]) + + test = 'test' in version + if test: + print(f'test scene: {len(train_scenes)}') + else: + print(f'train scene: {len(train_scenes)}, \ + val scene: {len(val_scenes)}') + train_lyft_infos, val_lyft_infos = _fill_trainval_infos( + lyft, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + + metadata = dict(version=version) + if test: + print(f'test sample: {len(train_lyft_infos)}') + data = dict(infos=train_lyft_infos, metadata=metadata) + info_name = f'{info_prefix}_infos_test' + info_path = osp.join(root_path, f'{info_name}.pkl') + mmengine.dump(data, info_path) + else: + print(f'train sample: {len(train_lyft_infos)}, \ + val sample: {len(val_lyft_infos)}') + data = dict(infos=train_lyft_infos, metadata=metadata) + train_info_name = f'{info_prefix}_infos_train' + info_path = osp.join(root_path, f'{train_info_name}.pkl') + mmengine.dump(data, info_path) + data['infos'] = val_lyft_infos + val_info_name = f'{info_prefix}_infos_val' + info_val_path = osp.join(root_path, f'{val_info_name}.pkl') + mmengine.dump(data, info_val_path) + + +def _fill_trainval_infos(lyft, + train_scenes, + val_scenes, + test=False, + max_sweeps=10): + """Generate the train/val infos from the raw data. + + Args: + lyft (:obj:`LyftDataset`): Dataset class in the Lyft dataset. + train_scenes (list[str]): Basic information of training scenes. + val_scenes (list[str]): Basic information of validation scenes. + test (bool, optional): Whether use the test mode. In the test mode, no + annotations can be accessed. Default: False. + max_sweeps (int, optional): Max number of sweeps. Default: 10. + + Returns: + tuple[list[dict]]: Information of training set and + validation set that will be saved to the info file. + """ + train_lyft_infos = [] + val_lyft_infos = [] + + for sample in mmengine.track_iter_progress(lyft.sample): + lidar_token = sample['data']['LIDAR_TOP'] + sd_rec = lyft.get('sample_data', sample['data']['LIDAR_TOP']) + cs_record = lyft.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = lyft.get('ego_pose', sd_rec['ego_pose_token']) + abs_lidar_path, boxes, _ = lyft.get_sample_data(lidar_token) + # nuScenes devkit returns more convenient relative paths while + # lyft devkit returns absolute paths + abs_lidar_path = str(abs_lidar_path) # absolute path + lidar_path = abs_lidar_path.split(f'{os.getcwd()}/')[-1] + # relative path + + mmengine.check_file_exist(lidar_path) + + info = { + 'lidar_path': lidar_path, + 'num_features': 5, + 'token': sample['token'], + 'sweeps': [], + 'cams': dict(), + 'lidar2ego_translation': cs_record['translation'], + 'lidar2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sample['timestamp'], + } + + l2e_r = info['lidar2ego_rotation'] + l2e_t = info['lidar2ego_translation'] + e2g_r = info['ego2global_rotation'] + e2g_t = info['ego2global_translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + # obtain 6 image's information per frame + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + for cam in camera_types: + cam_token = sample['data'][cam] + cam_path, _, cam_intrinsic = lyft.get_sample_data(cam_token) + cam_info = obtain_sensor2top(lyft, cam_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, cam) + cam_info.update(cam_intrinsic=cam_intrinsic) + info['cams'].update({cam: cam_info}) + + # obtain sweeps for a single key-frame + sd_rec = lyft.get('sample_data', sample['data']['LIDAR_TOP']) + sweeps = [] + while len(sweeps) < max_sweeps: + if not sd_rec['prev'] == '': + sweep = obtain_sensor2top(lyft, sd_rec['prev'], l2e_t, + l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + sweeps.append(sweep) + sd_rec = lyft.get('sample_data', sd_rec['prev']) + else: + break + info['sweeps'] = sweeps + # obtain annotation + if not test: + annotations = [ + lyft.get('sample_annotation', token) + for token in sample['anns'] + ] + locs = np.array([b.center for b in boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) + rots = np.array([b.orientation.yaw_pitch_roll[0] + for b in boxes]).reshape(-1, 1) + + names = [b.name for b in boxes] + for i in range(len(names)): + if names[i] in LyftNameMapping: + names[i] = LyftNameMapping[names[i]] + names = np.array(names) + + # we need to convert box size to + # the format of our lidar coordinate system + # which is x_size, y_size, z_size (corresponding to l, w, h) + gt_boxes = np.concatenate([locs, dims[:, [1, 0, 2]], rots], axis=1) + assert len(gt_boxes) == len( + annotations), f'{len(gt_boxes)}, {len(annotations)}' + info['gt_boxes'] = gt_boxes + info['gt_names'] = names + info['num_lidar_pts'] = np.array( + [a['num_lidar_pts'] for a in annotations]) + info['num_radar_pts'] = np.array( + [a['num_radar_pts'] for a in annotations]) + + if sample['scene_token'] in train_scenes: + train_lyft_infos.append(info) + else: + val_lyft_infos.append(info) + + return train_lyft_infos, val_lyft_infos + + +def export_2d_annotation(root_path, info_path, version): + """Export 2d annotation from the info file and raw data. + + Args: + root_path (str): Root path of the raw data. + info_path (str): Path of the info file. + version (str): Dataset version. + """ + warning.warn('DeprecationWarning: 2D annotations are not used on the ' + 'Lyft dataset. The function export_2d_annotation will be ' + 'deprecated.') + # get bbox annotations for camera + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + lyft_infos = mmengine.load(info_path)['infos'] + lyft = Lyft( + data_path=osp.join(root_path, version), + json_path=osp.join(root_path, version, version), + verbose=True) + # info_2d_list = [] + cat2Ids = [ + dict(id=lyft_categories.index(cat_name), name=cat_name) + for cat_name in lyft_categories + ] + coco_ann_id = 0 + coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) + for info in mmengine.track_iter_progress(lyft_infos): + for cam in camera_types: + cam_info = info['cams'][cam] + coco_infos = get_2d_boxes( + lyft, + cam_info['sample_data_token'], + visibilities=['', '1', '2', '3', '4']) + (height, width, _) = mmcv.imread(cam_info['data_path']).shape + coco_2d_dict['images'].append( + dict( + file_name=cam_info['data_path'], + id=cam_info['sample_data_token'], + width=width, + height=height)) + for coco_info in coco_infos: + if coco_info is None: + continue + # add an empty key for coco format + coco_info['segmentation'] = [] + coco_info['id'] = coco_ann_id + coco_2d_dict['annotations'].append(coco_info) + coco_ann_id += 1 + mmengine.dump(coco_2d_dict, f'{info_path[:-4]}.coco.json') diff --git a/tools/dataset_converters/lyft_data_fixer.py b/tools/dataset_converters/lyft_data_fixer.py new file mode 100755 index 0000000..5510351 --- /dev/null +++ b/tools/dataset_converters/lyft_data_fixer.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os + +import numpy as np + + +def fix_lyft(root_folder='./data/lyft', version='v1.01'): + # refer to https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/discussion/110000 # noqa + lidar_path = 'lidar/host-a011_lidar1_1233090652702363606.bin' + root_folder = os.path.join(root_folder, f'{version}-train') + lidar_path = os.path.join(root_folder, lidar_path) + assert os.path.isfile(lidar_path), f'Please download the complete Lyft ' \ + f'dataset and make sure {lidar_path} is present.' + points = np.fromfile(lidar_path, dtype=np.float32, count=-1) + try: + points.reshape([-1, 5]) + print(f'This fix is not required for version {version}.') + except ValueError: + new_points = np.array(list(points) + [100.0, 1.0], dtype='float32') + new_points.tofile(lidar_path) + print(f'Appended 100.0 and 1.0 to the end of {lidar_path}.') + + +parser = argparse.ArgumentParser(description='Lyft dataset fixer arg parser') +parser.add_argument( + '--root-folder', + type=str, + default='./data/lyft', + help='specify the root path of Lyft dataset') +parser.add_argument( + '--version', + type=str, + default='v1.01', + help='specify Lyft dataset version') +args = parser.parse_args() + +if __name__ == '__main__': + fix_lyft(root_folder=args.root_folder, version=args.version) diff --git a/tools/dataset_converters/nuimage_converter.py b/tools/dataset_converters/nuimage_converter.py new file mode 100755 index 0000000..9c1c60c --- /dev/null +++ b/tools/dataset_converters/nuimage_converter.py @@ -0,0 +1,227 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import base64 +from os import path as osp + +import mmcv +import mmengine +import numpy as np +from nuimages import NuImages +from nuimages.utils.utils import mask_decode, name_to_index_mapping + +nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', + 'barrier') + +NAME_MAPPING = { + 'movable_object.barrier': 'barrier', + 'vehicle.bicycle': 'bicycle', + 'vehicle.bus.bendy': 'bus', + 'vehicle.bus.rigid': 'bus', + 'vehicle.car': 'car', + 'vehicle.construction': 'construction_vehicle', + 'vehicle.motorcycle': 'motorcycle', + 'human.pedestrian.adult': 'pedestrian', + 'human.pedestrian.child': 'pedestrian', + 'human.pedestrian.construction_worker': 'pedestrian', + 'human.pedestrian.police_officer': 'pedestrian', + 'movable_object.trafficcone': 'traffic_cone', + 'vehicle.trailer': 'trailer', + 'vehicle.truck': 'truck', +} + + +def parse_args(): + parser = argparse.ArgumentParser(description='Data converter arg parser') + parser.add_argument( + '--data-root', + type=str, + default='./data/nuimages', + help='specify the root path of dataset') + parser.add_argument( + '--version', + type=str, + nargs='+', + default=['v1.0-mini'], + required=False, + help='specify the dataset version') + parser.add_argument( + '--out-dir', + type=str, + default='./data/nuimages/annotations/', + required=False, + help='path to save the exported json') + parser.add_argument( + '--nproc', + type=int, + default=4, + required=False, + help='workers to process semantic masks') + parser.add_argument('--extra-tag', type=str, default='nuimages') + args = parser.parse_args() + return args + + +def get_img_annos(nuim, img_info, cat2id, out_dir, data_root, seg_root): + """Get semantic segmentation map for an image. + + Args: + nuim (obj:`NuImages`): NuImages dataset object + img_info (dict): Meta information of img + + Returns: + np.ndarray: Semantic segmentation map of the image + """ + sd_token = img_info['token'] + image_id = img_info['id'] + name_to_index = name_to_index_mapping(nuim.category) + + # Get image data. + width, height = img_info['width'], img_info['height'] + semseg_mask = np.zeros((height, width)).astype('uint8') + + # Load stuff / surface regions. + surface_anns = [ + o for o in nuim.surface_ann if o['sample_data_token'] == sd_token + ] + + # Draw stuff / surface regions. + for ann in surface_anns: + # Get color and mask. + category_token = ann['category_token'] + category_name = nuim.get('category', category_token)['name'] + if ann['mask'] is None: + continue + mask = mask_decode(ann['mask']) + + # Draw mask for semantic segmentation. + semseg_mask[mask == 1] = name_to_index[category_name] + + # Load object instances. + object_anns = [ + o for o in nuim.object_ann if o['sample_data_token'] == sd_token + ] + + # Sort by token to ensure that objects always appear in the + # instance mask in the same order. + object_anns = sorted(object_anns, key=lambda k: k['token']) + + # Draw object instances. + # The 0 index is reserved for background; thus, the instances + # should start from index 1. + annotations = [] + for i, ann in enumerate(object_anns, start=1): + # Get color, box, mask and name. + category_token = ann['category_token'] + category_name = nuim.get('category', category_token)['name'] + if ann['mask'] is None: + continue + mask = mask_decode(ann['mask']) + + # Draw masks for semantic segmentation and instance segmentation. + semseg_mask[mask == 1] = name_to_index[category_name] + + if category_name in NAME_MAPPING: + cat_name = NAME_MAPPING[category_name] + cat_id = cat2id[cat_name] + + x_min, y_min, x_max, y_max = ann['bbox'] + # encode calibrated instance mask + mask_anno = dict() + mask_anno['counts'] = base64.b64decode( + ann['mask']['counts']).decode() + mask_anno['size'] = ann['mask']['size'] + + data_anno = dict( + image_id=image_id, + category_id=cat_id, + bbox=[x_min, y_min, x_max - x_min, y_max - y_min], + area=(x_max - x_min) * (y_max - y_min), + segmentation=mask_anno, + iscrowd=0) + annotations.append(data_anno) + + # after process, save semantic masks + img_filename = img_info['file_name'] + seg_filename = img_filename.replace('jpg', 'png') + seg_filename = osp.join(seg_root, seg_filename) + mmcv.imwrite(semseg_mask, seg_filename) + return annotations, np.max(semseg_mask) + + +def export_nuim_to_coco(nuim, data_root, out_dir, extra_tag, version, nproc): + print('Process category information') + categories = [] + categories = [ + dict(id=nus_categories.index(cat_name), name=cat_name) + for cat_name in nus_categories + ] + cat2id = {k_v['name']: k_v['id'] for k_v in categories} + + images = [] + print('Process image meta information...') + for sample_info in mmengine.track_iter_progress(nuim.sample_data): + if sample_info['is_key_frame']: + img_idx = len(images) + images.append( + dict( + id=img_idx, + token=sample_info['token'], + file_name=sample_info['filename'], + width=sample_info['width'], + height=sample_info['height'])) + + seg_root = f'{out_dir}semantic_masks' + mmengine.mkdir_or_exist(seg_root) + mmengine.mkdir_or_exist(osp.join(data_root, 'calibrated')) + + global process_img_anno + + def process_img_anno(img_info): + single_img_annos, max_cls_id = get_img_annos(nuim, img_info, cat2id, + out_dir, data_root, + seg_root) + return single_img_annos, max_cls_id + + print('Process img annotations...') + if nproc > 1: + outputs = mmengine.track_parallel_progress( + process_img_anno, images, nproc=nproc) + else: + outputs = [] + for img_info in mmengine.track_iter_progress(images): + outputs.append(process_img_anno(img_info)) + + # Determine the index of object annotation + print('Process annotation information...') + annotations = [] + max_cls_ids = [] + for single_img_annos, max_cls_id in outputs: + max_cls_ids.append(max_cls_id) + for img_anno in single_img_annos: + img_anno.update(id=len(annotations)) + annotations.append(img_anno) + + max_cls_id = max(max_cls_ids) + print(f'Max ID of class in the semantic map: {max_cls_id}') + + coco_format_json = dict( + images=images, annotations=annotations, categories=categories) + + mmengine.mkdir_or_exist(out_dir) + out_file = osp.join(out_dir, f'{extra_tag}_{version}.json') + print(f'Annotation dumped to {out_file}') + mmengine.dump(coco_format_json, out_file) + + +def main(): + args = parse_args() + for version in args.version: + nuim = NuImages( + dataroot=args.data_root, version=version, verbose=True, lazy=True) + export_nuim_to_coco(nuim, args.data_root, args.out_dir, args.extra_tag, + version, args.nproc) + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/nuscenes_converter.py b/tools/dataset_converters/nuscenes_converter.py new file mode 100755 index 0000000..5c76239 --- /dev/null +++ b/tools/dataset_converters/nuscenes_converter.py @@ -0,0 +1,630 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from collections import OrderedDict +from os import path as osp +from typing import List, Tuple, Union + +import mmcv +import mmengine +import numpy as np +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.geometry_utils import view_points +from pyquaternion import Quaternion +from shapely.geometry import MultiPoint, box + +from mmdet3d.datasets.convert_utils import NuScenesNameMapping +from mmdet3d.structures import points_cam2img + +nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', + 'barrier') + +nus_attributes = ('cycle.with_rider', 'cycle.without_rider', + 'pedestrian.moving', 'pedestrian.standing', + 'pedestrian.sitting_lying_down', 'vehicle.moving', + 'vehicle.parked', 'vehicle.stopped', 'None') + + +def create_nuscenes_infos(root_path, + info_prefix, + version='v1.0-trainval', + max_sweeps=10): + """Create info file of nuscene dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + root_path (str): Path of the data root. + info_prefix (str): Prefix of the info file to be generated. + version (str, optional): Version of the data. + Default: 'v1.0-trainval'. + max_sweeps (int, optional): Max number of sweeps. + Default: 10. + """ + from nuscenes.nuscenes import NuScenes + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + from nuscenes.utils import splits + available_vers = ['v1.0-trainval', 'v1.0-test', 'v1.0-mini'] + assert version in available_vers + if version == 'v1.0-trainval': + train_scenes = splits.train + val_scenes = splits.val + elif version == 'v1.0-test': + train_scenes = splits.test + val_scenes = [] + elif version == 'v1.0-mini': + train_scenes = splits.mini_train + val_scenes = splits.mini_val + else: + raise ValueError('unknown') + + # filter existing scenes. + available_scenes = get_available_scenes(nusc) + available_scene_names = [s['name'] for s in available_scenes] + train_scenes = list( + filter(lambda x: x in available_scene_names, train_scenes)) + val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) + train_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in train_scenes + ]) + val_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in val_scenes + ]) + + test = 'test' in version + if test: + print('test scene: {}'.format(len(train_scenes))) + else: + print('train scene: {}, val scene: {}'.format( + len(train_scenes), len(val_scenes))) + train_nusc_infos, val_nusc_infos = _fill_trainval_infos( + nusc, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + + metadata = dict(version=version) + if test: + print('test sample: {}'.format(len(train_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(root_path, + '{}_infos_test.pkl'.format(info_prefix)) + mmengine.dump(data, info_path) + else: + print('train sample: {}, val sample: {}'.format( + len(train_nusc_infos), len(val_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(root_path, + '{}_infos_train.pkl'.format(info_prefix)) + mmengine.dump(data, info_path) + data['infos'] = val_nusc_infos + info_val_path = osp.join(root_path, + '{}_infos_val.pkl'.format(info_prefix)) + mmengine.dump(data, info_val_path) + + +def get_available_scenes(nusc): + """Get available scenes from the input nuscenes class. + + Given the raw data, get the information of available scenes for + further info generation. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + + Returns: + available_scenes (list[dict]): List of basic information for the + available scenes. + """ + available_scenes = [] + print('total scene num: {}'.format(len(nusc.scene))) + for scene in nusc.scene: + scene_token = scene['token'] + scene_rec = nusc.get('scene', scene_token) + sample_rec = nusc.get('sample', scene_rec['first_sample_token']) + sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + has_more_frames = True + scene_not_exist = False + while has_more_frames: + lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) + lidar_path = str(lidar_path) + if os.getcwd() in lidar_path: + # path from lyftdataset is absolute path + lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] + # relative path + if not mmengine.is_filepath(lidar_path): + scene_not_exist = True + break + else: + break + if scene_not_exist: + continue + available_scenes.append(scene) + print('exist scene num: {}'.format(len(available_scenes))) + return available_scenes + + +def _fill_trainval_infos(nusc, + train_scenes, + val_scenes, + test=False, + max_sweeps=10): + """Generate the train/val infos from the raw data. + + Args: + nusc (:obj:`NuScenes`): Dataset class in the nuScenes dataset. + train_scenes (list[str]): Basic information of training scenes. + val_scenes (list[str]): Basic information of validation scenes. + test (bool, optional): Whether use the test mode. In test mode, no + annotations can be accessed. Default: False. + max_sweeps (int, optional): Max number of sweeps. Default: 10. + + Returns: + tuple[list[dict]]: Information of training set and validation set + that will be saved to the info file. + """ + train_nusc_infos = [] + val_nusc_infos = [] + + for sample in mmengine.track_iter_progress(nusc.sample): + lidar_token = sample['data']['LIDAR_TOP'] + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) + + mmengine.check_file_exist(lidar_path) + + info = { + 'lidar_path': lidar_path, + 'num_features': 5, + 'token': sample['token'], + 'sweeps': [], + 'cams': dict(), + 'lidar2ego_translation': cs_record['translation'], + 'lidar2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sample['timestamp'], + } + + l2e_r = info['lidar2ego_rotation'] + l2e_t = info['lidar2ego_translation'] + e2g_r = info['ego2global_rotation'] + e2g_t = info['ego2global_translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + # obtain 6 image's information per frame + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + for cam in camera_types: + cam_token = sample['data'][cam] + cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) + cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, cam) + cam_info.update(cam_intrinsic=cam_intrinsic) + info['cams'].update({cam: cam_info}) + + # obtain sweeps for a single key-frame + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + sweeps = [] + while len(sweeps) < max_sweeps: + if not sd_rec['prev'] == '': + sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, + l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + sweeps.append(sweep) + sd_rec = nusc.get('sample_data', sd_rec['prev']) + else: + break + info['sweeps'] = sweeps + # obtain annotation + if not test: + annotations = [ + nusc.get('sample_annotation', token) + for token in sample['anns'] + ] + locs = np.array([b.center for b in boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) + rots = np.array([b.orientation.yaw_pitch_roll[0] + for b in boxes]).reshape(-1, 1) + velocity = np.array( + [nusc.box_velocity(token)[:2] for token in sample['anns']]) + valid_flag = np.array( + [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 + for anno in annotations], + dtype=bool).reshape(-1) + # convert velo from global to lidar + for i in range(len(boxes)): + velo = np.array([*velocity[i], 0.0]) + velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( + l2e_r_mat).T + velocity[i] = velo[:2] + + names = [b.name for b in boxes] + for i in range(len(names)): + if names[i] in NuScenesNameMapping: + names[i] = NuScenesNameMapping[names[i]] + names = np.array(names) + # we need to convert box size to + # the format of our lidar coordinate system + # which is x_size, y_size, z_size (corresponding to l, w, h) + gt_boxes = np.concatenate([locs, dims[:, [1, 0, 2]], rots], axis=1) + assert len(gt_boxes) == len( + annotations), f'{len(gt_boxes)}, {len(annotations)}' + info['gt_boxes'] = gt_boxes + info['gt_names'] = names + info['gt_velocity'] = velocity.reshape(-1, 2) + info['num_lidar_pts'] = np.array( + [a['num_lidar_pts'] for a in annotations]) + info['num_radar_pts'] = np.array( + [a['num_radar_pts'] for a in annotations]) + info['valid_flag'] = valid_flag + + if sample['scene_token'] in train_scenes: + train_nusc_infos.append(info) + else: + val_nusc_infos.append(info) + + return train_nusc_infos, val_nusc_infos + + +def obtain_sensor2top(nusc, + sensor_token, + l2e_t, + l2e_r_mat, + e2g_t, + e2g_r_mat, + sensor_type='lidar'): + """Obtain the info with RT matric from general sensor to Top LiDAR. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + sensor_token (str): Sample data token corresponding to the + specific sensor type. + l2e_t (np.ndarray): Translation from lidar to ego in shape (1, 3). + l2e_r_mat (np.ndarray): Rotation matrix from lidar to ego + in shape (3, 3). + e2g_t (np.ndarray): Translation from ego to global in shape (1, 3). + e2g_r_mat (np.ndarray): Rotation matrix from ego to global + in shape (3, 3). + sensor_type (str, optional): Sensor to calibrate. Default: 'lidar'. + + Returns: + sweep (dict): Sweep information after transformation. + """ + sd_rec = nusc.get('sample_data', sensor_token) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + data_path = str(nusc.get_sample_data_path(sd_rec['token'])) + if os.getcwd() in data_path: # path from lyftdataset is absolute path + data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path + sweep = { + 'data_path': data_path, + 'type': sensor_type, + 'sample_data_token': sd_rec['token'], + 'sensor2ego_translation': cs_record['translation'], + 'sensor2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sd_rec['timestamp'] + } + l2e_r_s = sweep['sensor2ego_rotation'] + l2e_t_s = sweep['sensor2ego_translation'] + e2g_r_s = sweep['ego2global_rotation'] + e2g_t_s = sweep['ego2global_translation'] + + # obtain the RT from sensor to Top LiDAR + # sweep->ego->global->ego'->lidar + l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix + e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix + R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + l2e_t @ np.linalg.inv(l2e_r_mat).T + sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T + sweep['sensor2lidar_translation'] = T + return sweep + + +def export_2d_annotation(root_path, info_path, version, mono3d=True): + """Export 2d annotation from the info file and raw data. + + Args: + root_path (str): Root path of the raw data. + info_path (str): Path of the info file. + version (str): Dataset version. + mono3d (bool, optional): Whether to export mono3d annotation. + Default: True. + """ + # get bbox annotations for camera + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + nusc_infos = mmengine.load(info_path)['infos'] + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + # info_2d_list = [] + cat2Ids = [ + dict(id=nus_categories.index(cat_name), name=cat_name) + for cat_name in nus_categories + ] + coco_ann_id = 0 + coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) + for info in mmengine.track_iter_progress(nusc_infos): + for cam in camera_types: + cam_info = info['cams'][cam] + coco_infos = get_2d_boxes( + nusc, + cam_info['sample_data_token'], + visibilities=['', '1', '2', '3', '4'], + mono3d=mono3d) + (height, width, _) = mmcv.imread(cam_info['data_path']).shape + coco_2d_dict['images'].append( + dict( + file_name=cam_info['data_path'].split('data/nuscenes/') + [-1], + id=cam_info['sample_data_token'], + token=info['token'], + cam2ego_rotation=cam_info['sensor2ego_rotation'], + cam2ego_translation=cam_info['sensor2ego_translation'], + ego2global_rotation=info['ego2global_rotation'], + ego2global_translation=info['ego2global_translation'], + cam_intrinsic=cam_info['cam_intrinsic'], + width=width, + height=height)) + for coco_info in coco_infos: + if coco_info is None: + continue + # add an empty key for coco format + coco_info['segmentation'] = [] + coco_info['id'] = coco_ann_id + coco_2d_dict['annotations'].append(coco_info) + coco_ann_id += 1 + if mono3d: + json_prefix = f'{info_path[:-4]}_mono3d' + else: + json_prefix = f'{info_path[:-4]}' + mmengine.dump(coco_2d_dict, f'{json_prefix}.coco.json') + + +def get_2d_boxes(nusc, + sample_data_token: str, + visibilities: List[str], + mono3d=True): + """Get the 2D annotation records for a given `sample_data_token`. + + Args: + sample_data_token (str): Sample data token belonging to a camera + keyframe. + visibilities (list[str]): Visibility filter. + mono3d (bool): Whether to get boxes with mono3d annotation. + + Return: + list[dict]: List of 2D annotation record that belongs to the input + `sample_data_token`. + """ + + # Get the sample data and the sample corresponding to that sample data. + sd_rec = nusc.get('sample_data', sample_data_token) + + assert sd_rec[ + 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ + ' for camera sample_data!' + if not sd_rec['is_key_frame']: + raise ValueError( + 'The 2D re-projections are available only for keyframes.') + + s_rec = nusc.get('sample', sd_rec['sample_token']) + + # Get the calibrated sensor and ego pose + # record to get the transformation matrices. + cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) + pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) + camera_intrinsic = np.array(cs_rec['camera_intrinsic']) + + # Get all the annotation with the specified visibilties. + ann_recs = [ + nusc.get('sample_annotation', token) for token in s_rec['anns'] + ] + ann_recs = [ + ann_rec for ann_rec in ann_recs + if (ann_rec['visibility_token'] in visibilities) + ] + + repro_recs = [] + + for ann_rec in ann_recs: + # Augment sample_annotation with token information. + ann_rec['sample_annotation_token'] = ann_rec['token'] + ann_rec['sample_data_token'] = sample_data_token + + # Get the box in global coordinates. + box = nusc.get_box(ann_rec['token']) + + # Move them to the ego-pose frame. + box.translate(-np.array(pose_rec['translation'])) + box.rotate(Quaternion(pose_rec['rotation']).inverse) + + # Move them to the calibrated sensor frame. + box.translate(-np.array(cs_rec['translation'])) + box.rotate(Quaternion(cs_rec['rotation']).inverse) + + # Filter out the corners that are not in front of the calibrated + # sensor. + corners_3d = box.corners() + in_front = np.argwhere(corners_3d[2, :] > 0).flatten() + corners_3d = corners_3d[:, in_front] + + # Project 3d box to 2d. + corner_coords = view_points(corners_3d, camera_intrinsic, + True).T[:, :2].tolist() + + # Keep only corners that fall within the image. + final_coords = post_process_coords(corner_coords) + + # Skip if the convex hull of the re-projected corners + # does not intersect the image canvas. + if final_coords is None: + continue + else: + min_x, min_y, max_x, max_y = final_coords + + # Generate dictionary record to be included in the .json file. + repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, + sample_data_token, sd_rec['filename']) + + # If mono3d=True, add 3D annotations in camera coordinates + if mono3d and (repro_rec is not None): + loc = box.center.tolist() + + dim = box.wlh + dim[[0, 1, 2]] = dim[[1, 2, 0]] # convert wlh to our lhw + dim = dim.tolist() + + rot = box.orientation.yaw_pitch_roll[0] + rot = [-rot] # convert the rot to our cam coordinate + + global_velo2d = nusc.box_velocity(box.token)[:2] + global_velo3d = np.array([*global_velo2d, 0.0]) + e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix + c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix + cam_velo3d = global_velo3d @ np.linalg.inv( + e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + velo = cam_velo3d[0::2].tolist() + + repro_rec['bbox_cam3d'] = loc + dim + rot + repro_rec['velo_cam3d'] = velo + + center3d = np.array(loc).reshape([1, 3]) + center2d = points_cam2img( + center3d, camera_intrinsic, with_depth=True) + repro_rec['center2d'] = center2d.squeeze().tolist() + # normalized center2D + depth + # if samples with depth < 0 will be removed + if repro_rec['center2d'][2] <= 0: + continue + + ann_token = nusc.get('sample_annotation', + box.token)['attribute_tokens'] + if len(ann_token) == 0: + attr_name = 'None' + else: + attr_name = nusc.get('attribute', ann_token[0])['name'] + attr_id = nus_attributes.index(attr_name) + repro_rec['attribute_name'] = attr_name + repro_rec['attribute_id'] = attr_id + + repro_recs.append(repro_rec) + + return repro_recs + + +def post_process_coords( + corner_coords: List, imsize: Tuple[int, int] = (1600, 900) +) -> Union[Tuple[float, float, float, float], None]: + """Get the intersection of the convex hull of the reprojected bbox corners + and the image canvas, return None if no intersection. + + Args: + corner_coords (list[int]): Corner coordinates of reprojected + bounding box. + imsize (tuple[int]): Size of the image canvas. + + Return: + tuple [float]: Intersection of the convex hull of the 2D box + corners and the image canvas. + """ + polygon_from_2d_box = MultiPoint(corner_coords).convex_hull + img_canvas = box(0, 0, imsize[0], imsize[1]) + + if polygon_from_2d_box.intersects(img_canvas): + img_intersection = polygon_from_2d_box.intersection(img_canvas) + intersection_coords = np.array( + [coord for coord in img_intersection.exterior.coords]) + + min_x = min(intersection_coords[:, 0]) + min_y = min(intersection_coords[:, 1]) + max_x = max(intersection_coords[:, 0]) + max_y = max(intersection_coords[:, 1]) + + return min_x, min_y, max_x, max_y + else: + return None + + +def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, + sample_data_token: str, filename: str) -> OrderedDict: + """Generate one 2D annotation record given various information on top of + the 2D bounding box coordinates. + + Args: + ann_rec (dict): Original 3d annotation record. + x1 (float): Minimum value of the x coordinate. + y1 (float): Minimum value of the y coordinate. + x2 (float): Maximum value of the x coordinate. + y2 (float): Maximum value of the y coordinate. + sample_data_token (str): Sample data token. + filename (str):The corresponding image file where the annotation + is present. + + Returns: + dict: A sample 2D annotation record. + - file_name (str): file name + - image_id (str): sample data token + - area (float): 2d box area + - category_name (str): category name + - category_id (int): category id + - bbox (list[float]): left x, top y, dx, dy of 2d box + - iscrowd (int): whether the area is crowd + """ + repro_rec = OrderedDict() + repro_rec['sample_data_token'] = sample_data_token + coco_rec = dict() + + relevant_keys = [ + 'attribute_tokens', + 'category_name', + 'instance_token', + 'next', + 'num_lidar_pts', + 'num_radar_pts', + 'prev', + 'sample_annotation_token', + 'sample_data_token', + 'visibility_token', + ] + + for key, value in ann_rec.items(): + if key in relevant_keys: + repro_rec[key] = value + + repro_rec['bbox_corners'] = [x1, y1, x2, y2] + repro_rec['filename'] = filename + + coco_rec['file_name'] = filename + coco_rec['image_id'] = sample_data_token + coco_rec['area'] = (y2 - y1) * (x2 - x1) + + if repro_rec['category_name'] not in NuScenesNameMapping: + return None + cat_name = NuScenesNameMapping[repro_rec['category_name']] + coco_rec['category_name'] = cat_name + coco_rec['category_id'] = nus_categories.index(cat_name) + coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] + coco_rec['iscrowd'] = 0 + + return coco_rec diff --git a/tools/dataset_converters/s3dis_data_utils.py b/tools/dataset_converters/s3dis_data_utils.py new file mode 100755 index 0000000..d7a76a9 --- /dev/null +++ b/tools/dataset_converters/s3dis_data_utils.py @@ -0,0 +1,247 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from concurrent import futures as futures +from os import path as osp + +import mmengine +import numpy as np + + +class S3DISData(object): + """S3DIS data. + + Generate s3dis infos for s3dis_converter. + + Args: + root_path (str): Root path of the raw data. + split (str, optional): Set split type of the data. Default: 'Area_1'. + """ + + def __init__(self, root_path, split='Area_1'): + self.root_dir = root_path + self.split = split + self.data_dir = osp.join(root_path, + 'Stanford3dDataset_v1.2_Aligned_Version') + + # Following `GSDN `_, use 5 furniture + # classes for detection: table, chair, sofa, bookcase, board. + self.cat_ids = np.array([7, 8, 9, 10, 11]) + self.cat_ids2class = { + cat_id: i + for i, cat_id in enumerate(list(self.cat_ids)) + } + + assert split in [ + 'Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_6' + ] + self.sample_id_list = os.listdir(osp.join(self.data_dir, + split)) # conferenceRoom_1 + for sample_id in self.sample_id_list: + if os.path.isfile(osp.join(self.data_dir, split, sample_id)): + self.sample_id_list.remove(sample_id) + + def __len__(self): + return len(self.sample_id_list) + + def get_infos(self, num_workers=4, has_label=True, sample_id_list=None): + """Get data infos. + + This method gets information from the raw data. + + Args: + num_workers (int, optional): Number of threads to be used. + Default: 4. + has_label (bool, optional): Whether the data has label. + Default: True. + sample_id_list (list[int], optional): Index list of the sample. + Default: None. + + Returns: + infos (list[dict]): Information of the raw data. + """ + + def process_single_scene(sample_idx): + print(f'{self.split} sample_idx: {sample_idx}') + info = dict() + pc_info = { + 'num_features': 6, + 'lidar_idx': f'{self.split}_{sample_idx}' + } + info['point_cloud'] = pc_info + pts_filename = osp.join(self.root_dir, 's3dis_data', + f'{self.split}_{sample_idx}_point.npy') + pts_instance_mask_path = osp.join( + self.root_dir, 's3dis_data', + f'{self.split}_{sample_idx}_ins_label.npy') + pts_semantic_mask_path = osp.join( + self.root_dir, 's3dis_data', + f'{self.split}_{sample_idx}_sem_label.npy') + + points = np.load(pts_filename).astype(np.float32) + pts_instance_mask = np.load(pts_instance_mask_path).astype( + np.int64) + pts_semantic_mask = np.load(pts_semantic_mask_path).astype( + np.int64) + + mmengine.mkdir_or_exist(osp.join(self.root_dir, 'points')) + mmengine.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask')) + mmengine.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask')) + + points.tofile( + osp.join(self.root_dir, 'points', + f'{self.split}_{sample_idx}.bin')) + pts_instance_mask.tofile( + osp.join(self.root_dir, 'instance_mask', + f'{self.split}_{sample_idx}.bin')) + pts_semantic_mask.tofile( + osp.join(self.root_dir, 'semantic_mask', + f'{self.split}_{sample_idx}.bin')) + + info['pts_path'] = osp.join('points', + f'{self.split}_{sample_idx}.bin') + info['pts_instance_mask_path'] = osp.join( + 'instance_mask', f'{self.split}_{sample_idx}.bin') + info['pts_semantic_mask_path'] = osp.join( + 'semantic_mask', f'{self.split}_{sample_idx}.bin') + info['annos'] = self.get_bboxes(points, pts_instance_mask, + pts_semantic_mask) + + return info + + sample_id_list = sample_id_list if sample_id_list is not None \ + else self.sample_id_list + with futures.ThreadPoolExecutor(num_workers) as executor: + infos = executor.map(process_single_scene, sample_id_list) + return list(infos) + + def get_bboxes(self, points, pts_instance_mask, pts_semantic_mask): + """Convert instance masks to axis-aligned bounding boxes. + + Args: + points (np.array): Scene points of shape (n, 6). + pts_instance_mask (np.ndarray): Instance labels of shape (n,). + pts_semantic_mask (np.ndarray): Semantic labels of shape (n,). + + Returns: + dict: A dict containing detection infos with following keys: + + - gt_boxes_upright_depth (np.ndarray): Bounding boxes + of shape (n, 6) + - class (np.ndarray): Box labels of shape (n,) + - gt_num (int): Number of boxes. + """ + bboxes, labels = [], [] + for i in range(1, pts_instance_mask.max() + 1): + ids = pts_instance_mask == i + # check if all instance points have same semantic label + assert pts_semantic_mask[ids].min() == pts_semantic_mask[ids].max() + label = pts_semantic_mask[ids][0] + # keep only furniture objects + if label in self.cat_ids2class: + labels.append(self.cat_ids2class[pts_semantic_mask[ids][0]]) + pts = points[:, :3][ids] + min_pts = pts.min(axis=0) + max_pts = pts.max(axis=0) + locations = (min_pts + max_pts) / 2 + dimensions = max_pts - min_pts + bboxes.append(np.concatenate((locations, dimensions))) + annotation = dict() + # follow ScanNet and SUN RGB-D keys + annotation['gt_boxes_upright_depth'] = np.array(bboxes) + annotation['class'] = np.array(labels) + annotation['gt_num'] = len(labels) + return annotation + + +class S3DISSegData(object): + """S3DIS dataset used to generate infos for semantic segmentation task. + + Args: + data_root (str): Root path of the raw data. + ann_file (str): The generated scannet infos. + split (str, optional): Set split type of the data. Default: 'train'. + num_points (int, optional): Number of points in each data input. + Default: 8192. + label_weight_func (function, optional): Function to compute the + label weight. Default: None. + """ + + def __init__(self, + data_root, + ann_file, + split='Area_1', + num_points=4096, + label_weight_func=None): + self.data_root = data_root + self.data_infos = mmengine.load(ann_file) + self.split = split + self.num_points = num_points + + self.all_ids = np.arange(13) # all possible ids + self.cat_ids = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12]) # used for seg task + self.ignore_index = len(self.cat_ids) + + self.cat_id2class = np.ones( + (self.all_ids.shape[0], ), dtype=np.int64) * self.ignore_index + for i, cat_id in enumerate(self.cat_ids): + self.cat_id2class[cat_id] = i + + # label weighting function is taken from + # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24 + self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \ + label_weight_func is None else label_weight_func + + def get_seg_infos(self): + scene_idxs, label_weight = self.get_scene_idxs_and_label_weight() + save_folder = osp.join(self.data_root, 'seg_info') + mmengine.mkdir_or_exist(save_folder) + np.save( + osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'), + scene_idxs) + np.save( + osp.join(save_folder, f'{self.split}_label_weight.npy'), + label_weight) + print(f'{self.split} resampled scene index and label weight saved') + + def _convert_to_label(self, mask): + """Convert class_id in loaded segmentation mask to label.""" + if isinstance(mask, str): + if mask.endswith('npy'): + mask = np.load(mask) + else: + mask = np.fromfile(mask, dtype=np.int64) + label = self.cat_id2class[mask] + return label + + def get_scene_idxs_and_label_weight(self): + """Compute scene_idxs for data sampling and label weight for loss + calculation. + + We sample more times for scenes with more points. Label_weight is + inversely proportional to number of class points. + """ + num_classes = len(self.cat_ids) + num_point_all = [] + label_weight = np.zeros((num_classes + 1, )) # ignore_index + for data_info in self.data_infos: + label = self._convert_to_label( + osp.join(self.data_root, data_info['pts_semantic_mask_path'])) + num_point_all.append(label.shape[0]) + class_count, _ = np.histogram(label, range(num_classes + 2)) + label_weight += class_count + + # repeat scene_idx for num_scene_point // num_sample_point times + sample_prob = np.array(num_point_all) / float(np.sum(num_point_all)) + num_iter = int(np.sum(num_point_all) / float(self.num_points)) + scene_idxs = [] + for idx in range(len(self.data_infos)): + scene_idxs.extend([idx] * int(round(sample_prob[idx] * num_iter))) + scene_idxs = np.array(scene_idxs).astype(np.int32) + + # calculate label weight, adopted from PointNet++ + label_weight = label_weight[:-1].astype(np.float32) + label_weight = label_weight / label_weight.sum() + label_weight = self.label_weight_func(label_weight).astype(np.float32) + + return scene_idxs, label_weight diff --git a/tools/dataset_converters/scannet_data_utils.py b/tools/dataset_converters/scannet_data_utils.py new file mode 100755 index 0000000..d03c220 --- /dev/null +++ b/tools/dataset_converters/scannet_data_utils.py @@ -0,0 +1,299 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from concurrent import futures as futures +from os import path as osp + +import mmengine +import numpy as np + + +class ScanNetData(object): + """ScanNet data. + + Generate scannet infos for scannet_converter. + + Args: + root_path (str): Root path of the raw data. + split (str, optional): Set split type of the data. Default: 'train'. + """ + + def __init__(self, root_path, split='train'): + self.root_dir = root_path + self.split = split + self.split_dir = osp.join(root_path) + self.classes = [ + 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'garbagebin' + ] + self.cat2label = {cat: self.classes.index(cat) for cat in self.classes} + self.label2cat = {self.cat2label[t]: t for t in self.cat2label} + self.cat_ids = np.array( + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) + self.cat_ids2class = { + nyu40id: i + for i, nyu40id in enumerate(list(self.cat_ids)) + } + assert split in ['train', 'val', 'test'] + split_file = osp.join(self.root_dir, 'meta_data', + f'scannetv2_{split}.txt') + mmengine.check_file_exist(split_file) + self.sample_id_list = mmengine.list_from_file(split_file) + self.test_mode = (split == 'test') + + def __len__(self): + return len(self.sample_id_list) + + def get_aligned_box_label(self, idx): + box_file = osp.join(self.root_dir, 'scannet_instance_data', + f'{idx}_aligned_bbox.npy') + mmengine.check_file_exist(box_file) + return np.load(box_file) + + def get_unaligned_box_label(self, idx): + box_file = osp.join(self.root_dir, 'scannet_instance_data', + f'{idx}_unaligned_bbox.npy') + mmengine.check_file_exist(box_file) + return np.load(box_file) + + def get_axis_align_matrix(self, idx): + matrix_file = osp.join(self.root_dir, 'scannet_instance_data', + f'{idx}_axis_align_matrix.npy') + mmengine.check_file_exist(matrix_file) + return np.load(matrix_file) + + def get_images(self, idx): + paths = [] + path = osp.join(self.root_dir, 'posed_images', idx) + for file in sorted(os.listdir(path)): + if file.endswith('.jpg'): + paths.append(osp.join('posed_images', idx, file)) + return paths + + def get_extrinsics(self, idx): + extrinsics = [] + path = osp.join(self.root_dir, 'posed_images', idx) + for file in sorted(os.listdir(path)): + if file.endswith('.txt') and not file == 'intrinsic.txt': + extrinsics.append(np.loadtxt(osp.join(path, file))) + return extrinsics + + def get_intrinsics(self, idx): + matrix_file = osp.join(self.root_dir, 'posed_images', idx, + 'intrinsic.txt') + mmengine.check_file_exist(matrix_file) + return np.loadtxt(matrix_file) + + def get_infos(self, num_workers=4, has_label=True, sample_id_list=None): + """Get data infos. + + This method gets information from the raw data. + + Args: + num_workers (int, optional): Number of threads to be used. + Default: 4. + has_label (bool, optional): Whether the data has label. + Default: True. + sample_id_list (list[int], optional): Index list of the sample. + Default: None. + + Returns: + infos (list[dict]): Information of the raw data. + """ + + def process_single_scene(sample_idx): + print(f'{self.split} sample_idx: {sample_idx}') + info = dict() + pc_info = {'num_features': 6, 'lidar_idx': sample_idx} + info['point_cloud'] = pc_info + pts_filename = osp.join(self.root_dir, 'scannet_instance_data', + f'{sample_idx}_vert.npy') + points = np.load(pts_filename) + mmengine.mkdir_or_exist(osp.join(self.root_dir, 'points')) + points.tofile( + osp.join(self.root_dir, 'points', f'{sample_idx}.bin')) + info['pts_path'] = osp.join('points', f'{sample_idx}.bin') + + # update with RGB image paths if exist + if os.path.exists(osp.join(self.root_dir, 'posed_images')): + info['intrinsics'] = self.get_intrinsics(sample_idx) + all_extrinsics = self.get_extrinsics(sample_idx) + all_img_paths = self.get_images(sample_idx) + # some poses in ScanNet are invalid + extrinsics, img_paths = [], [] + for extrinsic, img_path in zip(all_extrinsics, all_img_paths): + if np.all(np.isfinite(extrinsic)): + img_paths.append(img_path) + extrinsics.append(extrinsic) + info['extrinsics'] = extrinsics + info['img_paths'] = img_paths + + if not self.test_mode: + pts_instance_mask_path = osp.join( + self.root_dir, 'scannet_instance_data', + f'{sample_idx}_ins_label.npy') + pts_semantic_mask_path = osp.join( + self.root_dir, 'scannet_instance_data', + f'{sample_idx}_sem_label.npy') + + pts_instance_mask = np.load(pts_instance_mask_path).astype( + np.int64) + pts_semantic_mask = np.load(pts_semantic_mask_path).astype( + np.int64) + + mmengine.mkdir_or_exist( + osp.join(self.root_dir, 'instance_mask')) + mmengine.mkdir_or_exist( + osp.join(self.root_dir, 'semantic_mask')) + + pts_instance_mask.tofile( + osp.join(self.root_dir, 'instance_mask', + f'{sample_idx}.bin')) + pts_semantic_mask.tofile( + osp.join(self.root_dir, 'semantic_mask', + f'{sample_idx}.bin')) + + info['pts_instance_mask_path'] = osp.join( + 'instance_mask', f'{sample_idx}.bin') + info['pts_semantic_mask_path'] = osp.join( + 'semantic_mask', f'{sample_idx}.bin') + + if has_label: + annotations = {} + # box is of shape [k, 6 + class] + aligned_box_label = self.get_aligned_box_label(sample_idx) + unaligned_box_label = self.get_unaligned_box_label(sample_idx) + annotations['gt_num'] = aligned_box_label.shape[0] + if annotations['gt_num'] != 0: + aligned_box = aligned_box_label[:, :-1] # k, 6 + unaligned_box = unaligned_box_label[:, :-1] + classes = aligned_box_label[:, -1] # k + annotations['name'] = np.array([ + self.label2cat[self.cat_ids2class[classes[i]]] + for i in range(annotations['gt_num']) + ]) + # default names are given to aligned bbox for compatibility + # we also save unaligned bbox info with marked names + annotations['location'] = aligned_box[:, :3] + annotations['dimensions'] = aligned_box[:, 3:6] + annotations['gt_boxes_upright_depth'] = aligned_box + annotations['unaligned_location'] = unaligned_box[:, :3] + annotations['unaligned_dimensions'] = unaligned_box[:, 3:6] + annotations[ + 'unaligned_gt_boxes_upright_depth'] = unaligned_box + annotations['index'] = np.arange( + annotations['gt_num'], dtype=np.int32) + annotations['class'] = np.array([ + self.cat_ids2class[classes[i]] + for i in range(annotations['gt_num']) + ]) + axis_align_matrix = self.get_axis_align_matrix(sample_idx) + annotations['axis_align_matrix'] = axis_align_matrix # 4x4 + info['annos'] = annotations + return info + + sample_id_list = sample_id_list if sample_id_list is not None \ + else self.sample_id_list + with futures.ThreadPoolExecutor(num_workers) as executor: + infos = executor.map(process_single_scene, sample_id_list) + return list(infos) + + +class ScanNetSegData(object): + """ScanNet dataset used to generate infos for semantic segmentation task. + + Args: + data_root (str): Root path of the raw data. + ann_file (str): The generated scannet infos. + split (str, optional): Set split type of the data. Default: 'train'. + num_points (int, optional): Number of points in each data input. + Default: 8192. + label_weight_func (function, optional): Function to compute the + label weight. Default: None. + """ + + def __init__(self, + data_root, + ann_file, + split='train', + num_points=8192, + label_weight_func=None): + self.data_root = data_root + self.data_infos = mmengine.load(ann_file) + self.split = split + assert split in ['train', 'val', 'test'] + self.num_points = num_points + + self.all_ids = np.arange(41) # all possible ids + self.cat_ids = np.array([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, + 39 + ]) # used for seg task + self.ignore_index = len(self.cat_ids) + + self.cat_id2class = np.ones( + (self.all_ids.shape[0], ), dtype=np.int64) * self.ignore_index + for i, cat_id in enumerate(self.cat_ids): + self.cat_id2class[cat_id] = i + + # label weighting function is taken from + # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24 + self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \ + label_weight_func is None else label_weight_func + + def get_seg_infos(self): + if self.split == 'test': + return + scene_idxs, label_weight = self.get_scene_idxs_and_label_weight() + save_folder = osp.join(self.data_root, 'seg_info') + mmengine.mkdir_or_exist(save_folder) + np.save( + osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'), + scene_idxs) + np.save( + osp.join(save_folder, f'{self.split}_label_weight.npy'), + label_weight) + print(f'{self.split} resampled scene index and label weight saved') + + def _convert_to_label(self, mask): + """Convert class_id in loaded segmentation mask to label.""" + if isinstance(mask, str): + if mask.endswith('npy'): + mask = np.load(mask) + else: + mask = np.fromfile(mask, dtype=np.int64) + label = self.cat_id2class[mask] + return label + + def get_scene_idxs_and_label_weight(self): + """Compute scene_idxs for data sampling and label weight for loss + calculation. + + We sample more times for scenes with more points. Label_weight is + inversely proportional to number of class points. + """ + num_classes = len(self.cat_ids) + num_point_all = [] + label_weight = np.zeros((num_classes + 1, )) # ignore_index + for data_info in self.data_infos: + label = self._convert_to_label( + osp.join(self.data_root, data_info['pts_semantic_mask_path'])) + num_point_all.append(label.shape[0]) + class_count, _ = np.histogram(label, range(num_classes + 2)) + label_weight += class_count + + # repeat scene_idx for num_scene_point // num_sample_point times + sample_prob = np.array(num_point_all) / float(np.sum(num_point_all)) + num_iter = int(np.sum(num_point_all) / float(self.num_points)) + scene_idxs = [] + for idx in range(len(self.data_infos)): + scene_idxs.extend([idx] * int(round(sample_prob[idx] * num_iter))) + scene_idxs = np.array(scene_idxs).astype(np.int32) + + # calculate label weight, adopted from PointNet++ + label_weight = label_weight[:-1].astype(np.float32) + label_weight = label_weight / label_weight.sum() + label_weight = self.label_weight_func(label_weight).astype(np.float32) + + return scene_idxs, label_weight diff --git a/tools/dataset_converters/semantickitti_converter.py b/tools/dataset_converters/semantickitti_converter.py new file mode 100755 index 0000000..2454eea --- /dev/null +++ b/tools/dataset_converters/semantickitti_converter.py @@ -0,0 +1,103 @@ +from os import path as osp +from pathlib import Path + +import mmengine + +total_num = { + 0: 4541, + 1: 1101, + 2: 4661, + 3: 801, + 4: 271, + 5: 2761, + 6: 1101, + 7: 1101, + 8: 4071, + 9: 1591, + 10: 1201, + 11: 921, + 12: 1061, + 13: 3281, + 14: 631, + 15: 1901, + 16: 1731, + 17: 491, + 18: 1801, + 19: 4981, + 20: 831, + 21: 2721, +} +fold_split = { + 'train': [0, 1, 2, 3, 4, 5, 6, 7, 9, 10], + 'val': [8], + 'test': [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], +} +split_list = ['train', 'valid', 'test'] + + +def get_semantickitti_info(split): + """Create info file in the form of + data_infos={ + 'metainfo': {'DATASET': 'SemanticKITTI'}, + 'data_list': { + 00000: { + 'lidar_points':{ + 'lidat_path':'sequences/00/velodyne/000000.bin' + }, + 'pts_semantic_mask_path': + 'sequences/000/labels/000000.labbel', + 'sample_id': '00' + }, + ... + } + } + """ + data_infos = dict() + data_infos['metainfo'] = dict(DATASET='SemanticKITTI') + data_list = [] + for i_folder in fold_split[split]: + for j in range(0, total_num[i_folder]): + data_list.append({ + 'lidar_points': { + 'lidar_path': + osp.join('sequences', + str(i_folder).zfill(2), 'velodyne', + str(j).zfill(6) + '.bin'), + 'num_pts_feats': + 4 + }, + 'pts_semantic_mask_path': + osp.join('sequences', + str(i_folder).zfill(2), 'labels', + str(j).zfill(6) + '.label'), + 'sample_id': + str(i_folder) + str(j) + }) + data_infos.update(dict(data_list=data_list)) + return data_infos + + +def create_semantickitti_info_file(pkl_prefix, save_path): + """Create info file of SemanticKITTI dataset. + + Directly generate info file without raw data. + + Args: + pkl_prefix (str): Prefix of the info file to be generated. + save_path (str): Path to save the info file. + """ + print('Generate info.') + save_path = Path(save_path) + + semantickitti_infos_train = get_semantickitti_info(split='train') + filename = save_path / f'{pkl_prefix}_infos_train.pkl' + print(f'SemanticKITTI info train file is saved to {filename}') + mmengine.dump(semantickitti_infos_train, filename) + semantickitti_infos_val = get_semantickitti_info(split='val') + filename = save_path / f'{pkl_prefix}_infos_val.pkl' + print(f'SemanticKITTI info val file is saved to {filename}') + mmengine.dump(semantickitti_infos_val, filename) + semantickitti_infos_test = get_semantickitti_info(split='test') + filename = save_path / f'{pkl_prefix}_infos_test.pkl' + print(f'SemanticKITTI info test file is saved to {filename}') + mmengine.dump(semantickitti_infos_test, filename) diff --git a/tools/dataset_converters/sunrgbd_data_utils.py b/tools/dataset_converters/sunrgbd_data_utils.py new file mode 100755 index 0000000..6444c4b --- /dev/null +++ b/tools/dataset_converters/sunrgbd_data_utils.py @@ -0,0 +1,227 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from concurrent import futures as futures +from os import path as osp + +import mmcv +import mmengine +import numpy as np +from scipy import io as sio + + +def random_sampling(points, num_points, replace=None, return_choices=False): + """Random sampling. + + Sampling point cloud to a certain number of points. + + Args: + points (ndarray): Point cloud. + num_points (int): The number of samples. + replace (bool): Whether the sample is with or without replacement. + return_choices (bool): Whether to return choices. + + Returns: + points (ndarray): Point cloud after sampling. + """ + + if replace is None: + replace = (points.shape[0] < num_points) + choices = np.random.choice(points.shape[0], num_points, replace=replace) + if return_choices: + return points[choices], choices + else: + return points[choices] + + +class SUNRGBDInstance(object): + + def __init__(self, line): + data = line.split(' ') + data[1:] = [float(x) for x in data[1:]] + self.classname = data[0] + self.xmin = data[1] + self.ymin = data[2] + self.xmax = data[1] + data[3] + self.ymax = data[2] + data[4] + self.box2d = np.array([self.xmin, self.ymin, self.xmax, self.ymax]) + self.centroid = np.array([data[5], data[6], data[7]]) + self.width = data[8] + self.length = data[9] + self.height = data[10] + # data[9] is x_size (length), data[8] is y_size (width), data[10] is + # z_size (height) in our depth coordinate system, + # l corresponds to the size along the x axis + self.size = np.array([data[9], data[8], data[10]]) * 2 + self.orientation = np.zeros((3, )) + self.orientation[0] = data[11] + self.orientation[1] = data[12] + self.heading_angle = np.arctan2(self.orientation[1], + self.orientation[0]) + self.box3d = np.concatenate( + [self.centroid, self.size, self.heading_angle[None]]) + + +class SUNRGBDData(object): + """SUNRGBD data. + + Generate scannet infos for sunrgbd_converter. + + Args: + root_path (str): Root path of the raw data. + split (str, optional): Set split type of the data. Default: 'train'. + use_v1 (bool, optional): Whether to use v1. Default: False. + """ + + def __init__(self, root_path, split='train', use_v1=False): + self.root_dir = root_path + self.split = split + self.split_dir = osp.join(root_path, 'sunrgbd_trainval') + self.classes = [ + 'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', + 'night_stand', 'bookshelf', 'bathtub' + ] + self.cat2label = {cat: self.classes.index(cat) for cat in self.classes} + self.label2cat = { + label: self.classes[label] + for label in range(len(self.classes)) + } + assert split in ['train', 'val', 'test'] + split_file = osp.join(self.split_dir, f'{split}_data_idx.txt') + mmengine.check_file_exist(split_file) + self.sample_id_list = map(int, mmengine.list_from_file(split_file)) + self.image_dir = osp.join(self.split_dir, 'image') + self.calib_dir = osp.join(self.split_dir, 'calib') + self.depth_dir = osp.join(self.split_dir, 'depth') + if use_v1: + self.label_dir = osp.join(self.split_dir, 'label_v1') + else: + self.label_dir = osp.join(self.split_dir, 'label') + + def __len__(self): + return len(self.sample_id_list) + + def get_image(self, idx): + img_filename = osp.join(self.image_dir, f'{idx:06d}.jpg') + return mmcv.imread(img_filename) + + def get_image_shape(self, idx): + image = self.get_image(idx) + return np.array(image.shape[:2], dtype=np.int32) + + def get_depth(self, idx): + depth_filename = osp.join(self.depth_dir, f'{idx:06d}.mat') + depth = sio.loadmat(depth_filename)['instance'] + return depth + + def get_calibration(self, idx): + calib_filepath = osp.join(self.calib_dir, f'{idx:06d}.txt') + lines = [line.rstrip() for line in open(calib_filepath)] + Rt = np.array([float(x) for x in lines[0].split(' ')]) + Rt = np.reshape(Rt, (3, 3), order='F').astype(np.float32) + K = np.array([float(x) for x in lines[1].split(' ')]) + K = np.reshape(K, (3, 3), order='F').astype(np.float32) + return K, Rt + + def get_label_objects(self, idx): + label_filename = osp.join(self.label_dir, f'{idx:06d}.txt') + lines = [line.rstrip() for line in open(label_filename)] + objects = [SUNRGBDInstance(line) for line in lines] + return objects + + def get_infos(self, num_workers=4, has_label=True, sample_id_list=None): + """Get data infos. + + This method gets information from the raw data. + + Args: + num_workers (int, optional): Number of threads to be used. + Default: 4. + has_label (bool, optional): Whether the data has label. + Default: True. + sample_id_list (list[int], optional): Index list of the sample. + Default: None. + + Returns: + infos (list[dict]): Information of the raw data. + """ + + def process_single_scene(sample_idx): + print(f'{self.split} sample_idx: {sample_idx}') + # convert depth to points + SAMPLE_NUM = 50000 + # TODO: Check whether can move the point + # sampling process during training. + pc_upright_depth = self.get_depth(sample_idx) + pc_upright_depth_subsampled = random_sampling( + pc_upright_depth, SAMPLE_NUM) + + info = dict() + pc_info = {'num_features': 6, 'lidar_idx': sample_idx} + info['point_cloud'] = pc_info + + mmengine.mkdir_or_exist(osp.join(self.root_dir, 'points')) + pc_upright_depth_subsampled.tofile( + osp.join(self.root_dir, 'points', f'{sample_idx:06d}.bin')) + + info['pts_path'] = osp.join('points', f'{sample_idx:06d}.bin') + img_path = osp.join('image', f'{sample_idx:06d}.jpg') + image_info = { + 'image_idx': sample_idx, + 'image_shape': self.get_image_shape(sample_idx), + 'image_path': img_path + } + info['image'] = image_info + + K, Rt = self.get_calibration(sample_idx) + calib_info = {'K': K, 'Rt': Rt} + info['calib'] = calib_info + + if has_label: + obj_list = self.get_label_objects(sample_idx) + annotations = {} + annotations['gt_num'] = len([ + obj.classname for obj in obj_list + if obj.classname in self.cat2label.keys() + ]) + if annotations['gt_num'] != 0: + annotations['name'] = np.array([ + obj.classname for obj in obj_list + if obj.classname in self.cat2label.keys() + ]) + annotations['bbox'] = np.concatenate([ + obj.box2d.reshape(1, 4) for obj in obj_list + if obj.classname in self.cat2label.keys() + ], + axis=0) + annotations['location'] = np.concatenate([ + obj.centroid.reshape(1, 3) for obj in obj_list + if obj.classname in self.cat2label.keys() + ], + axis=0) + annotations['dimensions'] = 2 * np.array([ + [obj.length, obj.width, obj.height] for obj in obj_list + if obj.classname in self.cat2label.keys() + ]) # lwh (depth) format + annotations['rotation_y'] = np.array([ + obj.heading_angle for obj in obj_list + if obj.classname in self.cat2label.keys() + ]) + annotations['index'] = np.arange( + len(obj_list), dtype=np.int32) + annotations['class'] = np.array([ + self.cat2label[obj.classname] for obj in obj_list + if obj.classname in self.cat2label.keys() + ]) + annotations['gt_boxes_upright_depth'] = np.stack( + [ + obj.box3d for obj in obj_list + if obj.classname in self.cat2label.keys() + ], + axis=0) # (K,8) + info['annos'] = annotations + return info + + sample_id_list = sample_id_list if \ + sample_id_list is not None else self.sample_id_list + with futures.ThreadPoolExecutor(num_workers) as executor: + infos = executor.map(process_single_scene, sample_id_list) + return list(infos) diff --git a/tools/dataset_converters/update_infos_to_v2.py b/tools/dataset_converters/update_infos_to_v2.py new file mode 100755 index 0000000..200dfff --- /dev/null +++ b/tools/dataset_converters/update_infos_to_v2.py @@ -0,0 +1,1157 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Convert the annotation pkl to the standard format in OpenMMLab V2.0. + +Example: + python tools/dataset_converters/update_infos_to_v2.py + --dataset kitti + --pkl-path ./data/kitti/kitti_infos_train.pkl + --out-dir ./kitti_v2/ +""" + +import argparse +import copy +import time +from os import path as osp +from pathlib import Path + +import mmengine +import numpy as np +from nuscenes.nuscenes import NuScenes + +from mmdet3d.datasets.convert_utils import (convert_annos, + get_kitti_style_2d_boxes, + get_nuscenes_2d_boxes) +from mmdet3d.datasets.utils import convert_quaternion_to_matrix +from mmdet3d.structures import points_cam2img + + +def get_empty_instance(): + """Empty annotation for single instance.""" + instance = dict( + # (list[float], required): list of 4 numbers representing + # the bounding box of the instance, in (x1, y1, x2, y2) order. + bbox=None, + # (int, required): an integer in the range + # [0, num_categories-1] representing the category label. + bbox_label=None, + # (list[float], optional): list of 7 (or 9) numbers representing + # the 3D bounding box of the instance, + # in [x, y, z, w, h, l, yaw] + # (or [x, y, z, w, h, l, yaw, vx, vy]) order. + bbox_3d=None, + # (bool, optional): Whether to use the + # 3D bounding box during training. + bbox_3d_isvalid=None, + # (int, optional): 3D category label + # (typically the same as label). + bbox_label_3d=None, + # (float, optional): Projected center depth of the + # 3D bounding box compared to the image plane. + depth=None, + # (list[float], optional): Projected + # 2D center of the 3D bounding box. + center_2d=None, + # (int, optional): Attribute labels + # (fine-grained labels such as stopping, moving, ignore, crowd). + attr_label=None, + # (int, optional): The number of LiDAR + # points in the 3D bounding box. + num_lidar_pts=None, + # (int, optional): The number of Radar + # points in the 3D bounding box. + num_radar_pts=None, + # (int, optional): Difficulty level of + # detecting the 3D bounding box. + difficulty=None, + unaligned_bbox_3d=None) + return instance + + +def get_empty_multicamera_instances(camera_types): + + cam_instance = dict() + for cam_type in camera_types: + cam_instance[cam_type] = None + return cam_instance + + +def get_empty_lidar_points(): + lidar_points = dict( + # (int, optional) : Number of features for each point. + num_pts_feats=None, + # (str, optional): Path of LiDAR data file. + lidar_path=None, + # (list[list[float]], optional): Transformation matrix + # from lidar to ego-vehicle + # with shape [4, 4]. + # (Referenced camera coordinate system is ego in KITTI.) + lidar2ego=None, + ) + return lidar_points + + +def get_empty_radar_points(): + radar_points = dict( + # (int, optional) : Number of features for each point. + num_pts_feats=None, + # (str, optional): Path of RADAR data file. + radar_path=None, + # Transformation matrix from lidar to + # ego-vehicle with shape [4, 4]. + # (Referenced camera coordinate system is ego in KITTI.) + radar2ego=None, + ) + return radar_points + + +def get_empty_img_info(): + img_info = dict( + # (str, required): the path to the image file. + img_path=None, + # (int) The height of the image. + height=None, + # (int) The width of the image. + width=None, + # (str, optional): Path of the depth map file + depth_map=None, + # (list[list[float]], optional) : Transformation + # matrix from camera to image with + # shape [3, 3], [3, 4] or [4, 4]. + cam2img=None, + # (list[list[float]]): Transformation matrix from lidar + # or depth to image with shape [4, 4]. + lidar2img=None, + # (list[list[float]], optional) : Transformation + # matrix from camera to ego-vehicle + # with shape [4, 4]. + cam2ego=None) + return img_info + + +def get_single_image_sweep(camera_types): + single_image_sweep = dict( + # (float, optional) : Timestamp of the current frame. + timestamp=None, + # (list[list[float]], optional) : Transformation matrix + # from ego-vehicle to the global + ego2global=None) + # (dict): Information of images captured by multiple cameras + images = dict() + for cam_type in camera_types: + images[cam_type] = get_empty_img_info() + single_image_sweep['images'] = images + return single_image_sweep + + +def get_single_lidar_sweep(): + single_lidar_sweep = dict( + # (float, optional) : Timestamp of the current frame. + timestamp=None, + # (list[list[float]], optional) : Transformation matrix + # from ego-vehicle to the global + ego2global=None, + # (dict): Information of images captured by multiple cameras + lidar_points=get_empty_lidar_points()) + return single_lidar_sweep + + +def get_empty_standard_data_info( + camera_types=['CAM0', 'CAM1', 'CAM2', 'CAM3', 'CAM4']): + + data_info = dict( + # (str): Sample id of the frame. + sample_idx=None, + # (str, optional): '000010' + token=None, + **get_single_image_sweep(camera_types), + # (dict, optional): dict contains information + # of LiDAR point cloud frame. + lidar_points=get_empty_lidar_points(), + # (dict, optional) Each dict contains + # information of Radar point cloud frame. + radar_points=get_empty_radar_points(), + # (list[dict], optional): Image sweeps data. + image_sweeps=[], + lidar_sweeps=[], + instances=[], + # (list[dict], optional): Required by object + # detection, instance to be ignored during training. + instances_ignore=[], + # (str, optional): Path of semantic labels for each point. + pts_semantic_mask_path=None, + # (str, optional): Path of instance labels for each point. + pts_instance_mask_path=None) + return data_info + + +def clear_instance_unused_keys(instance): + keys = list(instance.keys()) + for k in keys: + if instance[k] is None: + del instance[k] + return instance + + +def clear_data_info_unused_keys(data_info): + keys = list(data_info.keys()) + empty_flag = True + for key in keys: + # we allow no annotations in datainfo + if key in ['instances', 'cam_sync_instances', 'cam_instances']: + empty_flag = False + continue + if isinstance(data_info[key], list): + if len(data_info[key]) == 0: + del data_info[key] + else: + empty_flag = False + elif data_info[key] is None: + del data_info[key] + elif isinstance(data_info[key], dict): + _, sub_empty_flag = clear_data_info_unused_keys(data_info[key]) + if sub_empty_flag is False: + empty_flag = False + else: + # sub field is empty + del data_info[key] + else: + empty_flag = False + + return data_info, empty_flag + + +def generate_nuscenes_camera_instances(info, nusc): + + # get bbox annotations for camera + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + + empty_multicamera_instance = get_empty_multicamera_instances(camera_types) + + for cam in camera_types: + cam_info = info['cams'][cam] + # list[dict] + ann_infos = get_nuscenes_2d_boxes( + nusc, + cam_info['sample_data_token'], + visibilities=['', '1', '2', '3', '4']) + empty_multicamera_instance[cam] = ann_infos + + return empty_multicamera_instance + + +def update_nuscenes_infos(pkl_path, out_dir): + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + print(f'{pkl_path} will be modified.') + if out_dir in pkl_path: + print(f'Warning, you may overwriting ' + f'the original data {pkl_path}.') + print(f'Reading from input file: {pkl_path}.') + data_list = mmengine.load(pkl_path) + METAINFO = { + 'classes': + ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier'), + } + nusc = NuScenes( + version=data_list['metadata']['version'], + dataroot='./data/nuscenes', + verbose=True) + + print('Start updating:') + converted_list = [] + for i, ori_info_dict in enumerate( + mmengine.track_iter_progress(data_list['infos'])): + temp_data_info = get_empty_standard_data_info( + camera_types=camera_types) + temp_data_info['sample_idx'] = i + temp_data_info['token'] = ori_info_dict['token'] + temp_data_info['ego2global'] = convert_quaternion_to_matrix( + ori_info_dict['ego2global_rotation'], + ori_info_dict['ego2global_translation']) + temp_data_info['lidar_points']['num_pts_feats'] = ori_info_dict.get( + 'num_features', 5) + temp_data_info['lidar_points']['lidar_path'] = Path( + ori_info_dict['lidar_path']).name + temp_data_info['lidar_points'][ + 'lidar2ego'] = convert_quaternion_to_matrix( + ori_info_dict['lidar2ego_rotation'], + ori_info_dict['lidar2ego_translation']) + # bc-breaking: Timestamp has divided 1e6 in pkl infos. + temp_data_info['timestamp'] = ori_info_dict['timestamp'] / 1e6 + for ori_sweep in ori_info_dict['sweeps']: + temp_lidar_sweep = get_single_lidar_sweep() + temp_lidar_sweep['lidar_points'][ + 'lidar2ego'] = convert_quaternion_to_matrix( + ori_sweep['sensor2ego_rotation'], + ori_sweep['sensor2ego_translation']) + temp_lidar_sweep['ego2global'] = convert_quaternion_to_matrix( + ori_sweep['ego2global_rotation'], + ori_sweep['ego2global_translation']) + lidar2sensor = np.eye(4) + rot = ori_sweep['sensor2lidar_rotation'] + trans = ori_sweep['sensor2lidar_translation'] + lidar2sensor[:3, :3] = rot.T + lidar2sensor[:3, 3:4] = -1 * np.matmul(rot.T, trans.reshape(3, 1)) + temp_lidar_sweep['lidar_points'][ + 'lidar2sensor'] = lidar2sensor.astype(np.float32).tolist() + temp_lidar_sweep['timestamp'] = ori_sweep['timestamp'] / 1e6 + temp_lidar_sweep['lidar_points']['lidar_path'] = ori_sweep[ + 'data_path'] + temp_lidar_sweep['sample_data_token'] = ori_sweep[ + 'sample_data_token'] + temp_data_info['lidar_sweeps'].append(temp_lidar_sweep) + temp_data_info['images'] = {} + for cam in ori_info_dict['cams']: + empty_img_info = get_empty_img_info() + empty_img_info['img_path'] = Path( + ori_info_dict['cams'][cam]['data_path']).name + empty_img_info['cam2img'] = ori_info_dict['cams'][cam][ + 'cam_intrinsic'].tolist() + empty_img_info['sample_data_token'] = ori_info_dict['cams'][cam][ + 'sample_data_token'] + # bc-breaking: Timestamp has divided 1e6 in pkl infos. + empty_img_info[ + 'timestamp'] = ori_info_dict['cams'][cam]['timestamp'] / 1e6 + empty_img_info['cam2ego'] = convert_quaternion_to_matrix( + ori_info_dict['cams'][cam]['sensor2ego_rotation'], + ori_info_dict['cams'][cam]['sensor2ego_translation']) + lidar2sensor = np.eye(4) + rot = ori_info_dict['cams'][cam]['sensor2lidar_rotation'] + trans = ori_info_dict['cams'][cam]['sensor2lidar_translation'] + lidar2sensor[:3, :3] = rot.T + lidar2sensor[:3, 3:4] = -1 * np.matmul(rot.T, trans.reshape(3, 1)) + empty_img_info['lidar2cam'] = lidar2sensor.astype( + np.float32).tolist() + temp_data_info['images'][cam] = empty_img_info + ignore_class_name = set() + if 'gt_boxes' in ori_info_dict: + num_instances = ori_info_dict['gt_boxes'].shape[0] + for i in range(num_instances): + empty_instance = get_empty_instance() + empty_instance['bbox_3d'] = ori_info_dict['gt_boxes'][ + i, :].tolist() + if ori_info_dict['gt_names'][i] in METAINFO['classes']: + empty_instance['bbox_label'] = METAINFO['classes'].index( + ori_info_dict['gt_names'][i]) + else: + ignore_class_name.add(ori_info_dict['gt_names'][i]) + empty_instance['bbox_label'] = -1 + empty_instance['bbox_label_3d'] = copy.deepcopy( + empty_instance['bbox_label']) + empty_instance['velocity'] = ori_info_dict['gt_velocity'][ + i, :].tolist() + empty_instance['num_lidar_pts'] = ori_info_dict[ + 'num_lidar_pts'][i] + empty_instance['num_radar_pts'] = ori_info_dict[ + 'num_radar_pts'][i] + empty_instance['bbox_3d_isvalid'] = ori_info_dict[ + 'valid_flag'][i] + empty_instance = clear_instance_unused_keys(empty_instance) + temp_data_info['instances'].append(empty_instance) + temp_data_info[ + 'cam_instances'] = generate_nuscenes_camera_instances( + ori_info_dict, nusc) + temp_data_info, _ = clear_data_info_unused_keys(temp_data_info) + converted_list.append(temp_data_info) + pkl_name = Path(pkl_path).name + out_path = osp.join(out_dir, pkl_name) + print(f'Writing to output file: {out_path}.') + print(f'ignore classes: {ignore_class_name}') + + metainfo = dict() + metainfo['categories'] = {k: i for i, k in enumerate(METAINFO['classes'])} + if ignore_class_name: + for ignore_class in ignore_class_name: + metainfo['categories'][ignore_class] = -1 + metainfo['dataset'] = 'nuscenes' + metainfo['version'] = data_list['metadata']['version'] + metainfo['info_version'] = '1.1' + converted_data_info = dict(metainfo=metainfo, data_list=converted_list) + + mmengine.dump(converted_data_info, out_path, 'pkl') + + +def update_kitti_infos(pkl_path, out_dir): + print(f'{pkl_path} will be modified.') + if out_dir in pkl_path: + print(f'Warning, you may overwriting ' + f'the original data {pkl_path}.') + time.sleep(5) + # TODO update to full label + # TODO discuss how to process 'Van', 'DontCare' + METAINFO = { + 'classes': ('Pedestrian', 'Cyclist', 'Car', 'Van', 'Truck', + 'Person_sitting', 'Tram', 'Misc'), + } + print(f'Reading from input file: {pkl_path}.') + data_list = mmengine.load(pkl_path) + print('Start updating:') + converted_list = [] + for ori_info_dict in mmengine.track_iter_progress(data_list): + temp_data_info = get_empty_standard_data_info() + + if 'plane' in ori_info_dict: + temp_data_info['plane'] = ori_info_dict['plane'] + + temp_data_info['sample_idx'] = ori_info_dict['image']['image_idx'] + + temp_data_info['images']['CAM0']['cam2img'] = ori_info_dict['calib'][ + 'P0'].tolist() + temp_data_info['images']['CAM1']['cam2img'] = ori_info_dict['calib'][ + 'P1'].tolist() + temp_data_info['images']['CAM2']['cam2img'] = ori_info_dict['calib'][ + 'P2'].tolist() + temp_data_info['images']['CAM3']['cam2img'] = ori_info_dict['calib'][ + 'P3'].tolist() + + temp_data_info['images']['CAM2']['img_path'] = Path( + ori_info_dict['image']['image_path']).name + h, w = ori_info_dict['image']['image_shape'] + temp_data_info['images']['CAM2']['height'] = h + temp_data_info['images']['CAM2']['width'] = w + temp_data_info['lidar_points']['num_pts_feats'] = ori_info_dict[ + 'point_cloud']['num_features'] + temp_data_info['lidar_points']['lidar_path'] = Path( + ori_info_dict['point_cloud']['velodyne_path']).name + + rect = ori_info_dict['calib']['R0_rect'].astype(np.float32) + Trv2c = ori_info_dict['calib']['Tr_velo_to_cam'].astype(np.float32) + lidar2cam = rect @ Trv2c + temp_data_info['images']['CAM2']['lidar2cam'] = lidar2cam.tolist() + temp_data_info['images']['CAM0']['lidar2img'] = ( + ori_info_dict['calib']['P0'] @ lidar2cam).tolist() + temp_data_info['images']['CAM1']['lidar2img'] = ( + ori_info_dict['calib']['P1'] @ lidar2cam).tolist() + temp_data_info['images']['CAM2']['lidar2img'] = ( + ori_info_dict['calib']['P2'] @ lidar2cam).tolist() + temp_data_info['images']['CAM3']['lidar2img'] = ( + ori_info_dict['calib']['P3'] @ lidar2cam).tolist() + + temp_data_info['lidar_points']['Tr_velo_to_cam'] = Trv2c.tolist() + + # for potential usage + temp_data_info['images']['R0_rect'] = ori_info_dict['calib'][ + 'R0_rect'].astype(np.float32).tolist() + temp_data_info['lidar_points']['Tr_imu_to_velo'] = ori_info_dict[ + 'calib']['Tr_imu_to_velo'].astype(np.float32).tolist() + + cam2img = ori_info_dict['calib']['P2'] + + anns = ori_info_dict.get('annos', None) + ignore_class_name = set() + if anns is not None: + num_instances = len(anns['name']) + instance_list = [] + for instance_id in range(num_instances): + empty_instance = get_empty_instance() + empty_instance['bbox'] = anns['bbox'][instance_id].tolist() + + if anns['name'][instance_id] in METAINFO['classes']: + empty_instance['bbox_label'] = METAINFO['classes'].index( + anns['name'][instance_id]) + else: + ignore_class_name.add(anns['name'][instance_id]) + empty_instance['bbox_label'] = -1 + + empty_instance['bbox'] = anns['bbox'][instance_id].tolist() + + loc = anns['location'][instance_id] + dims = anns['dimensions'][instance_id] + rots = anns['rotation_y'][:, None][instance_id] + + dst = np.array([0.5, 0.5, 0.5]) + src = np.array([0.5, 1.0, 0.5]) + + center_3d = loc + dims * (dst - src) + center_2d = points_cam2img( + center_3d.reshape([1, 3]), cam2img, with_depth=True) + center_2d = center_2d.squeeze().tolist() + empty_instance['center_2d'] = center_2d[:2] + empty_instance['depth'] = center_2d[2] + + gt_bboxes_3d = np.concatenate([loc, dims, rots]).tolist() + empty_instance['bbox_3d'] = gt_bboxes_3d + empty_instance['bbox_label_3d'] = copy.deepcopy( + empty_instance['bbox_label']) + empty_instance['bbox'] = anns['bbox'][instance_id].tolist() + empty_instance['truncated'] = anns['truncated'][ + instance_id].tolist() + empty_instance['occluded'] = anns['occluded'][ + instance_id].tolist() + empty_instance['alpha'] = anns['alpha'][instance_id].tolist() + empty_instance['score'] = anns['score'][instance_id].tolist() + empty_instance['index'] = anns['index'][instance_id].tolist() + empty_instance['group_id'] = anns['group_ids'][ + instance_id].tolist() + empty_instance['difficulty'] = anns['difficulty'][ + instance_id].tolist() + empty_instance['num_lidar_pts'] = anns['num_points_in_gt'][ + instance_id].tolist() + empty_instance = clear_instance_unused_keys(empty_instance) + instance_list.append(empty_instance) + temp_data_info['instances'] = instance_list + cam_instances = generate_kitti_camera_instances(ori_info_dict) + temp_data_info['cam_instances'] = cam_instances + temp_data_info, _ = clear_data_info_unused_keys(temp_data_info) + converted_list.append(temp_data_info) + pkl_name = Path(pkl_path).name + out_path = osp.join(out_dir, pkl_name) + print(f'Writing to output file: {out_path}.') + print(f'ignore classes: {ignore_class_name}') + + # dataset metainfo + metainfo = dict() + metainfo['categories'] = {k: i for i, k in enumerate(METAINFO['classes'])} + if ignore_class_name: + for ignore_class in ignore_class_name: + metainfo['categories'][ignore_class] = -1 + metainfo['dataset'] = 'kitti' + metainfo['info_version'] = '1.1' + converted_data_info = dict(metainfo=metainfo, data_list=converted_list) + + mmengine.dump(converted_data_info, out_path, 'pkl') + + +def update_s3dis_infos(pkl_path, out_dir): + print(f'{pkl_path} will be modified.') + if out_dir in pkl_path: + print(f'Warning, you may overwriting ' + f'the original data {pkl_path}.') + time.sleep(5) + METAINFO = {'classes': ('table', 'chair', 'sofa', 'bookcase', 'board')} + print(f'Reading from input file: {pkl_path}.') + data_list = mmengine.load(pkl_path) + print('Start updating:') + converted_list = [] + for i, ori_info_dict in enumerate(mmengine.track_iter_progress(data_list)): + temp_data_info = get_empty_standard_data_info() + temp_data_info['sample_idx'] = i + temp_data_info['lidar_points']['num_pts_feats'] = ori_info_dict[ + 'point_cloud']['num_features'] + temp_data_info['lidar_points']['lidar_path'] = Path( + ori_info_dict['pts_path']).name + if 'pts_semantic_mask_path' in ori_info_dict: + temp_data_info['pts_semantic_mask_path'] = Path( + ori_info_dict['pts_semantic_mask_path']).name + if 'pts_instance_mask_path' in ori_info_dict: + temp_data_info['pts_instance_mask_path'] = Path( + ori_info_dict['pts_instance_mask_path']).name + + # TODO support camera + # np.linalg.inv(info['axis_align_matrix'] @ extrinsic): depth2cam + anns = ori_info_dict.get('annos', None) + ignore_class_name = set() + if anns is not None: + if anns['gt_num'] == 0: + instance_list = [] + else: + num_instances = len(anns['class']) + instance_list = [] + for instance_id in range(num_instances): + empty_instance = get_empty_instance() + empty_instance['bbox_3d'] = anns['gt_boxes_upright_depth'][ + instance_id].tolist() + + if anns['class'][instance_id] < len(METAINFO['classes']): + empty_instance['bbox_label_3d'] = anns['class'][ + instance_id] + else: + ignore_class_name.add( + METAINFO['classes'][anns['class'][instance_id]]) + empty_instance['bbox_label_3d'] = -1 + + empty_instance = clear_instance_unused_keys(empty_instance) + instance_list.append(empty_instance) + temp_data_info['instances'] = instance_list + temp_data_info, _ = clear_data_info_unused_keys(temp_data_info) + converted_list.append(temp_data_info) + pkl_name = Path(pkl_path).name + out_path = osp.join(out_dir, pkl_name) + print(f'Writing to output file: {out_path}.') + print(f'ignore classes: {ignore_class_name}') + + # dataset metainfo + metainfo = dict() + metainfo['categories'] = {k: i for i, k in enumerate(METAINFO['classes'])} + if ignore_class_name: + for ignore_class in ignore_class_name: + metainfo['categories'][ignore_class] = -1 + metainfo['dataset'] = 's3dis' + metainfo['info_version'] = '1.1' + + converted_data_info = dict(metainfo=metainfo, data_list=converted_list) + + mmengine.dump(converted_data_info, out_path, 'pkl') + + +def update_scannet_infos(pkl_path, out_dir): + print(f'{pkl_path} will be modified.') + if out_dir in pkl_path: + print(f'Warning, you may overwriting ' + f'the original data {pkl_path}.') + time.sleep(5) + METAINFO = { + 'classes': + ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', + 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') + } + print(f'Reading from input file: {pkl_path}.') + data_list = mmengine.load(pkl_path) + print('Start updating:') + converted_list = [] + for ori_info_dict in mmengine.track_iter_progress(data_list): + temp_data_info = get_empty_standard_data_info() + temp_data_info['lidar_points']['num_pts_feats'] = ori_info_dict[ + 'point_cloud']['num_features'] + temp_data_info['lidar_points']['lidar_path'] = Path( + ori_info_dict['pts_path']).name + if 'pts_semantic_mask_path' in ori_info_dict: + temp_data_info['pts_semantic_mask_path'] = Path( + ori_info_dict['pts_semantic_mask_path']).name + if 'pts_instance_mask_path' in ori_info_dict: + temp_data_info['pts_instance_mask_path'] = Path( + ori_info_dict['pts_instance_mask_path']).name + + # TODO support camera + # np.linalg.inv(info['axis_align_matrix'] @ extrinsic): depth2cam + anns = ori_info_dict.get('annos', None) + ignore_class_name = set() + if anns is not None: + temp_data_info['axis_align_matrix'] = anns[ + 'axis_align_matrix'].tolist() + if anns['gt_num'] == 0: + instance_list = [] + else: + num_instances = len(anns['name']) + instance_list = [] + for instance_id in range(num_instances): + empty_instance = get_empty_instance() + empty_instance['bbox_3d'] = anns['gt_boxes_upright_depth'][ + instance_id].tolist() + + if anns['name'][instance_id] in METAINFO['classes']: + empty_instance['bbox_label_3d'] = METAINFO[ + 'classes'].index(anns['name'][instance_id]) + else: + ignore_class_name.add(anns['name'][instance_id]) + empty_instance['bbox_label_3d'] = -1 + + empty_instance = clear_instance_unused_keys(empty_instance) + instance_list.append(empty_instance) + temp_data_info['instances'] = instance_list + temp_data_info, _ = clear_data_info_unused_keys(temp_data_info) + converted_list.append(temp_data_info) + pkl_name = Path(pkl_path).name + out_path = osp.join(out_dir, pkl_name) + print(f'Writing to output file: {out_path}.') + print(f'ignore classes: {ignore_class_name}') + + # dataset metainfo + metainfo = dict() + metainfo['categories'] = {k: i for i, k in enumerate(METAINFO['classes'])} + if ignore_class_name: + for ignore_class in ignore_class_name: + metainfo['categories'][ignore_class] = -1 + metainfo['dataset'] = 'scannet' + metainfo['info_version'] = '1.1' + + converted_data_info = dict(metainfo=metainfo, data_list=converted_list) + + mmengine.dump(converted_data_info, out_path, 'pkl') + + +def update_sunrgbd_infos(pkl_path, out_dir): + print(f'{pkl_path} will be modified.') + if out_dir in pkl_path: + print(f'Warning, you may overwriting ' + f'the original data {pkl_path}.') + time.sleep(5) + METAINFO = { + 'classes': ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', + 'dresser', 'night_stand', 'bookshelf', 'bathtub') + } + print(f'Reading from input file: {pkl_path}.') + data_list = mmengine.load(pkl_path) + print('Start updating:') + converted_list = [] + for ori_info_dict in mmengine.track_iter_progress(data_list): + temp_data_info = get_empty_standard_data_info() + temp_data_info['lidar_points']['num_pts_feats'] = ori_info_dict[ + 'point_cloud']['num_features'] + temp_data_info['lidar_points']['lidar_path'] = Path( + ori_info_dict['pts_path']).name + calib = ori_info_dict['calib'] + rt_mat = calib['Rt'] + # follow Coord3DMode.convert_point + rt_mat = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0] + ]) @ rt_mat.transpose(1, 0) + depth2img = calib['K'] @ rt_mat + temp_data_info['images']['CAM0']['depth2img'] = depth2img.tolist() + temp_data_info['images']['CAM0']['img_path'] = Path( + ori_info_dict['image']['image_path']).name + h, w = ori_info_dict['image']['image_shape'] + temp_data_info['images']['CAM0']['height'] = h + temp_data_info['images']['CAM0']['width'] = w + + anns = ori_info_dict.get('annos', None) + if anns is not None: + if anns['gt_num'] == 0: + instance_list = [] + else: + num_instances = len(anns['name']) + ignore_class_name = set() + instance_list = [] + for instance_id in range(num_instances): + empty_instance = get_empty_instance() + empty_instance['bbox_3d'] = anns['gt_boxes_upright_depth'][ + instance_id].tolist() + empty_instance['bbox'] = anns['bbox'][instance_id].tolist() + if anns['name'][instance_id] in METAINFO['classes']: + empty_instance['bbox_label_3d'] = METAINFO[ + 'classes'].index(anns['name'][instance_id]) + empty_instance['bbox_label'] = empty_instance[ + 'bbox_label_3d'] + else: + ignore_class_name.add(anns['name'][instance_id]) + empty_instance['bbox_label_3d'] = -1 + empty_instance['bbox_label'] = -1 + empty_instance = clear_instance_unused_keys(empty_instance) + instance_list.append(empty_instance) + temp_data_info['instances'] = instance_list + temp_data_info, _ = clear_data_info_unused_keys(temp_data_info) + converted_list.append(temp_data_info) + pkl_name = Path(pkl_path).name + out_path = osp.join(out_dir, pkl_name) + print(f'Writing to output file: {out_path}.') + print(f'ignore classes: {ignore_class_name}') + + # dataset metainfo + metainfo = dict() + metainfo['categories'] = {k: i for i, k in enumerate(METAINFO['classes'])} + if ignore_class_name: + for ignore_class in ignore_class_name: + metainfo['categories'][ignore_class] = -1 + metainfo['dataset'] = 'sunrgbd' + metainfo['info_version'] = '1.1' + + converted_data_info = dict(metainfo=metainfo, data_list=converted_list) + + mmengine.dump(converted_data_info, out_path, 'pkl') + + +def update_lyft_infos(pkl_path, out_dir): + print(f'{pkl_path} will be modified.') + if out_dir in pkl_path: + print(f'Warning, you may overwriting ' + f'the original data {pkl_path}.') + print(f'Reading from input file: {pkl_path}.') + data_list = mmengine.load(pkl_path) + METAINFO = { + 'classes': + ('car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', + 'motorcycle', 'bicycle', 'pedestrian', 'animal'), + } + print('Start updating:') + converted_list = [] + for i, ori_info_dict in enumerate( + mmengine.track_iter_progress(data_list['infos'])): + temp_data_info = get_empty_standard_data_info() + temp_data_info['sample_idx'] = i + temp_data_info['token'] = ori_info_dict['token'] + temp_data_info['ego2global'] = convert_quaternion_to_matrix( + ori_info_dict['ego2global_rotation'], + ori_info_dict['ego2global_translation']) + temp_data_info['lidar_points']['num_pts_feats'] = ori_info_dict.get( + 'num_features', 5) + temp_data_info['lidar_points']['lidar_path'] = Path( + ori_info_dict['lidar_path']).name + temp_data_info['lidar_points'][ + 'lidar2ego'] = convert_quaternion_to_matrix( + ori_info_dict['lidar2ego_rotation'], + ori_info_dict['lidar2ego_translation']) + # bc-breaking: Timestamp has divided 1e6 in pkl infos. + temp_data_info['timestamp'] = ori_info_dict['timestamp'] / 1e6 + for ori_sweep in ori_info_dict['sweeps']: + temp_lidar_sweep = get_single_lidar_sweep() + temp_lidar_sweep['lidar_points'][ + 'lidar2ego'] = convert_quaternion_to_matrix( + ori_sweep['sensor2ego_rotation'], + ori_sweep['sensor2ego_translation']) + temp_lidar_sweep['ego2global'] = convert_quaternion_to_matrix( + ori_sweep['ego2global_rotation'], + ori_sweep['ego2global_translation']) + lidar2sensor = np.eye(4) + rot = ori_sweep['sensor2lidar_rotation'] + trans = ori_sweep['sensor2lidar_translation'] + lidar2sensor[:3, :3] = rot.T + lidar2sensor[:3, 3:4] = -1 * np.matmul(rot.T, trans.reshape(3, 1)) + temp_lidar_sweep['lidar_points'][ + 'lidar2sensor'] = lidar2sensor.astype(np.float32).tolist() + # bc-breaking: Timestamp has divided 1e6 in pkl infos. + temp_lidar_sweep['timestamp'] = ori_sweep['timestamp'] / 1e6 + temp_lidar_sweep['lidar_points']['lidar_path'] = ori_sweep[ + 'data_path'] + temp_lidar_sweep['sample_data_token'] = ori_sweep[ + 'sample_data_token'] + temp_data_info['lidar_sweeps'].append(temp_lidar_sweep) + temp_data_info['images'] = {} + for cam in ori_info_dict['cams']: + empty_img_info = get_empty_img_info() + empty_img_info['img_path'] = Path( + ori_info_dict['cams'][cam]['data_path']).name + empty_img_info['cam2img'] = ori_info_dict['cams'][cam][ + 'cam_intrinsic'].tolist() + empty_img_info['sample_data_token'] = ori_info_dict['cams'][cam][ + 'sample_data_token'] + empty_img_info[ + 'timestamp'] = ori_info_dict['cams'][cam]['timestamp'] / 1e6 + empty_img_info['cam2ego'] = convert_quaternion_to_matrix( + ori_info_dict['cams'][cam]['sensor2ego_rotation'], + ori_info_dict['cams'][cam]['sensor2ego_translation']) + lidar2sensor = np.eye(4) + rot = ori_info_dict['cams'][cam]['sensor2lidar_rotation'] + trans = ori_info_dict['cams'][cam]['sensor2lidar_translation'] + lidar2sensor[:3, :3] = rot.T + lidar2sensor[:3, 3:4] = -1 * np.matmul(rot.T, trans.reshape(3, 1)) + empty_img_info['lidar2cam'] = lidar2sensor.astype( + np.float32).tolist() + temp_data_info['images'][cam] = empty_img_info + ignore_class_name = set() + if 'gt_boxes' in ori_info_dict: + num_instances = ori_info_dict['gt_boxes'].shape[0] + for i in range(num_instances): + empty_instance = get_empty_instance() + empty_instance['bbox_3d'] = ori_info_dict['gt_boxes'][ + i, :].tolist() + if ori_info_dict['gt_names'][i] in METAINFO['classes']: + empty_instance['bbox_label'] = METAINFO['classes'].index( + ori_info_dict['gt_names'][i]) + else: + ignore_class_name.add(ori_info_dict['gt_names'][i]) + empty_instance['bbox_label'] = -1 + empty_instance['bbox_label_3d'] = copy.deepcopy( + empty_instance['bbox_label']) + empty_instance = clear_instance_unused_keys(empty_instance) + temp_data_info['instances'].append(empty_instance) + temp_data_info, _ = clear_data_info_unused_keys(temp_data_info) + converted_list.append(temp_data_info) + pkl_name = Path(pkl_path).name + out_path = osp.join(out_dir, pkl_name) + print(f'Writing to output file: {out_path}.') + print(f'ignore classes: {ignore_class_name}') + + metainfo = dict() + metainfo['categories'] = {k: i for i, k in enumerate(METAINFO['classes'])} + if ignore_class_name: + for ignore_class in ignore_class_name: + metainfo['categories'][ignore_class] = -1 + metainfo['dataset'] = 'lyft' + metainfo['version'] = data_list['metadata']['version'] + metainfo['info_version'] = '1.1' + converted_data_info = dict(metainfo=metainfo, data_list=converted_list) + + mmengine.dump(converted_data_info, out_path, 'pkl') + + +def update_waymo_infos(pkl_path, out_dir): + # the input pkl is based on the + # pkl generated in the waymo cam only challenage. + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_LEFT', + 'CAM_FRONT_RIGHT', + 'CAM_SIDE_LEFT', + 'CAM_SIDE_RIGHT', + ] + print(f'{pkl_path} will be modified.') + if out_dir in pkl_path: + print(f'Warning, you may overwriting ' + f'the original data {pkl_path}.') + time.sleep(5) + # TODO update to full label + # TODO discuss how to process 'Van', 'DontCare' + METAINFO = { + 'classes': ('Car', 'Pedestrian', 'Cyclist', 'Sign'), + } + print(f'Reading from input file: {pkl_path}.') + data_list = mmengine.load(pkl_path) + print('Start updating:') + converted_list = [] + for ori_info_dict in mmengine.track_iter_progress(data_list): + temp_data_info = get_empty_standard_data_info(camera_types) + + if 'plane' in ori_info_dict: + temp_data_info['plane'] = ori_info_dict['plane'] + temp_data_info['sample_idx'] = ori_info_dict['image']['image_idx'] + + # calib matrix + for cam_idx, cam_key in enumerate(camera_types): + temp_data_info['images'][cam_key]['cam2img'] =\ + ori_info_dict['calib'][f'P{cam_idx}'].tolist() + + for cam_idx, cam_key in enumerate(camera_types): + rect = ori_info_dict['calib']['R0_rect'].astype(np.float32) + velo_to_cam = 'Tr_velo_to_cam' + if cam_idx != 0: + velo_to_cam += str(cam_idx) + Trv2c = ori_info_dict['calib'][velo_to_cam].astype(np.float32) + + lidar2cam = rect @ Trv2c + temp_data_info['images'][cam_key]['lidar2cam'] = lidar2cam.tolist() + temp_data_info['images'][cam_key]['lidar2img'] = ( + ori_info_dict['calib'][f'P{cam_idx}'] @ lidar2cam).tolist() + + # image path + base_img_path = Path(ori_info_dict['image']['image_path']).name + + for cam_idx, cam_key in enumerate(camera_types): + temp_data_info['images'][cam_key]['timestamp'] = ori_info_dict[ + 'timestamp'] + temp_data_info['images'][cam_key]['img_path'] = base_img_path + + h, w = ori_info_dict['image']['image_shape'] + + # for potential usage + temp_data_info['images'][camera_types[0]]['height'] = h + temp_data_info['images'][camera_types[0]]['width'] = w + temp_data_info['lidar_points']['num_pts_feats'] = ori_info_dict[ + 'point_cloud']['num_features'] + temp_data_info['lidar_points']['timestamp'] = ori_info_dict[ + 'timestamp'] + velo_path = ori_info_dict['point_cloud'].get('velodyne_path') + if velo_path is not None: + temp_data_info['lidar_points']['lidar_path'] = Path(velo_path).name + + # TODO discuss the usage of Tr_velo_to_cam in lidar + Trv2c = ori_info_dict['calib']['Tr_velo_to_cam'].astype(np.float32) + + temp_data_info['lidar_points']['Tr_velo_to_cam'] = Trv2c.tolist() + + # for potential usage + # temp_data_info['images']['R0_rect'] = ori_info_dict['calib'][ + # 'R0_rect'].astype(np.float32).tolist() + + # for the sweeps part: + temp_data_info['timestamp'] = ori_info_dict['timestamp'] + temp_data_info['ego2global'] = ori_info_dict['pose'] + + for ori_sweep in ori_info_dict['sweeps']: + # lidar sweeps + lidar_sweep = get_single_lidar_sweep() + lidar_sweep['ego2global'] = ori_sweep['pose'] + lidar_sweep['timestamp'] = ori_sweep['timestamp'] + lidar_sweep['lidar_points']['lidar_path'] = Path( + ori_sweep['velodyne_path']).name + # image sweeps + image_sweep = get_single_image_sweep(camera_types) + image_sweep['ego2global'] = ori_sweep['pose'] + image_sweep['timestamp'] = ori_sweep['timestamp'] + img_path = Path(ori_sweep['image_path']).name + for cam_idx, cam_key in enumerate(camera_types): + image_sweep['images'][cam_key]['img_path'] = img_path + + temp_data_info['lidar_sweeps'].append(lidar_sweep) + temp_data_info['image_sweeps'].append(image_sweep) + + anns = ori_info_dict.get('annos', None) + ignore_class_name = set() + if anns is not None: + num_instances = len(anns['name']) + + instance_list = [] + for instance_id in range(num_instances): + empty_instance = get_empty_instance() + empty_instance['bbox'] = anns['bbox'][instance_id].tolist() + + if anns['name'][instance_id] in METAINFO['classes']: + empty_instance['bbox_label'] = METAINFO['classes'].index( + anns['name'][instance_id]) + else: + ignore_class_name.add(anns['name'][instance_id]) + empty_instance['bbox_label'] = -1 + + empty_instance['bbox'] = anns['bbox'][instance_id].tolist() + + loc = anns['location'][instance_id] + dims = anns['dimensions'][instance_id] + rots = anns['rotation_y'][:, None][instance_id] + gt_bboxes_3d = np.concatenate([loc, dims, rots + ]).astype(np.float32).tolist() + empty_instance['bbox_3d'] = gt_bboxes_3d + empty_instance['bbox_label_3d'] = copy.deepcopy( + empty_instance['bbox_label']) + empty_instance['bbox'] = anns['bbox'][instance_id].tolist() + empty_instance['truncated'] = int( + anns['truncated'][instance_id].tolist()) + empty_instance['occluded'] = anns['occluded'][ + instance_id].tolist() + empty_instance['alpha'] = anns['alpha'][instance_id].tolist() + empty_instance['index'] = anns['index'][instance_id].tolist() + empty_instance['group_id'] = anns['group_ids'][ + instance_id].tolist() + empty_instance['difficulty'] = anns['difficulty'][ + instance_id].tolist() + empty_instance['num_lidar_pts'] = anns['num_points_in_gt'][ + instance_id].tolist() + empty_instance['camera_id'] = anns['camera_id'][ + instance_id].tolist() + empty_instance = clear_instance_unused_keys(empty_instance) + instance_list.append(empty_instance) + temp_data_info['instances'] = instance_list + + # waymo provide the labels that sync with cam + anns = ori_info_dict.get('cam_sync_annos', None) + ignore_class_name = set() + if anns is not None: + num_instances = len(anns['name']) + instance_list = [] + for instance_id in range(num_instances): + empty_instance = get_empty_instance() + empty_instance['bbox'] = anns['bbox'][instance_id].tolist() + + if anns['name'][instance_id] in METAINFO['classes']: + empty_instance['bbox_label'] = METAINFO['classes'].index( + anns['name'][instance_id]) + else: + ignore_class_name.add(anns['name'][instance_id]) + empty_instance['bbox_label'] = -1 + + empty_instance['bbox'] = anns['bbox'][instance_id].tolist() + + loc = anns['location'][instance_id] + dims = anns['dimensions'][instance_id] + rots = anns['rotation_y'][:, None][instance_id] + gt_bboxes_3d = np.concatenate([loc, dims, rots + ]).astype(np.float32).tolist() + empty_instance['bbox_3d'] = gt_bboxes_3d + empty_instance['bbox_label_3d'] = copy.deepcopy( + empty_instance['bbox_label']) + empty_instance['bbox'] = anns['bbox'][instance_id].tolist() + empty_instance['truncated'] = int( + anns['truncated'][instance_id].tolist()) + empty_instance['occluded'] = anns['occluded'][ + instance_id].tolist() + empty_instance['alpha'] = anns['alpha'][instance_id].tolist() + empty_instance['index'] = anns['index'][instance_id].tolist() + empty_instance['group_id'] = anns['group_ids'][ + instance_id].tolist() + empty_instance['camera_id'] = anns['camera_id'][ + instance_id].tolist() + empty_instance = clear_instance_unused_keys(empty_instance) + instance_list.append(empty_instance) + temp_data_info['cam_sync_instances'] = instance_list + + cam_instances = generate_waymo_camera_instances( + ori_info_dict, camera_types) + temp_data_info['cam_instances'] = cam_instances + + temp_data_info, _ = clear_data_info_unused_keys(temp_data_info) + converted_list.append(temp_data_info) + pkl_name = Path(pkl_path).name + out_path = osp.join(out_dir, pkl_name) + print(f'Writing to output file: {out_path}.') + print(f'ignore classes: {ignore_class_name}') + + # dataset metainfo + metainfo = dict() + metainfo['categories'] = {k: i for i, k in enumerate(METAINFO['classes'])} + if ignore_class_name: + for ignore_class in ignore_class_name: + metainfo['categories'][ignore_class] = -1 + metainfo['dataset'] = 'waymo' + metainfo['version'] = '1.4' + metainfo['info_version'] = '1.1' + + converted_data_info = dict(metainfo=metainfo, data_list=converted_list) + + mmengine.dump(converted_data_info, out_path, 'pkl') + + +def generate_kitti_camera_instances(ori_info_dict): + + cam_key = 'CAM2' + empty_camera_instances = get_empty_multicamera_instances([cam_key]) + annos = copy.deepcopy(ori_info_dict['annos']) + ann_infos = get_kitti_style_2d_boxes( + ori_info_dict, occluded=[0, 1, 2, 3], annos=annos) + empty_camera_instances[cam_key] = ann_infos + + return empty_camera_instances + + +def generate_waymo_camera_instances(ori_info_dict, cam_keys): + + empty_multicamera_instances = get_empty_multicamera_instances(cam_keys) + + for cam_idx, cam_key in enumerate(cam_keys): + annos = copy.deepcopy(ori_info_dict['cam_sync_annos']) + if cam_idx != 0: + annos = convert_annos(ori_info_dict, cam_idx) + + ann_infos = get_kitti_style_2d_boxes( + ori_info_dict, cam_idx, occluded=[0], annos=annos, dataset='waymo') + + empty_multicamera_instances[cam_key] = ann_infos + return empty_multicamera_instances + + +def parse_args(): + parser = argparse.ArgumentParser(description='Arg parser for data coords ' + 'update due to coords sys refactor.') + parser.add_argument( + '--dataset', type=str, default='kitti', help='name of dataset') + parser.add_argument( + '--pkl-path', + type=str, + default='./data/kitti/kitti_infos_train.pkl ', + help='specify the root dir of dataset') + parser.add_argument( + '--out-dir', + type=str, + default='converted_annotations', + required=False, + help='output direction of info pkl') + args = parser.parse_args() + return args + + +def update_pkl_infos(dataset, out_dir, pkl_path): + if dataset.lower() == 'kitti': + update_kitti_infos(pkl_path=pkl_path, out_dir=out_dir) + elif dataset.lower() == 'waymo': + update_waymo_infos(pkl_path=pkl_path, out_dir=out_dir) + elif dataset.lower() == 'scannet': + update_scannet_infos(pkl_path=pkl_path, out_dir=out_dir) + elif dataset.lower() == 'sunrgbd': + update_sunrgbd_infos(pkl_path=pkl_path, out_dir=out_dir) + elif dataset.lower() == 'lyft': + update_lyft_infos(pkl_path=pkl_path, out_dir=out_dir) + elif dataset.lower() == 'nuscenes': + update_nuscenes_infos(pkl_path=pkl_path, out_dir=out_dir) + elif dataset.lower() == 's3dis': + update_s3dis_infos(pkl_path=pkl_path, out_dir=out_dir) + else: + raise NotImplementedError(f'Do not support convert {dataset} to v2.') + + +if __name__ == '__main__': + args = parse_args() + if args.out_dir is None: + args.out_dir = args.root_dir + update_pkl_infos( + dataset=args.dataset, out_dir=args.out_dir, pkl_path=args.pkl_path) diff --git a/tools/dataset_converters/waymo_converter.py b/tools/dataset_converters/waymo_converter.py new file mode 100755 index 0000000..87f9c54 --- /dev/null +++ b/tools/dataset_converters/waymo_converter.py @@ -0,0 +1,632 @@ +# Copyright (c) OpenMMLab. All rights reserved. +r"""Adapted from `Waymo to KITTI converter + `_. +""" + +try: + from waymo_open_dataset import dataset_pb2 +except ImportError: + raise ImportError('Please run "pip install waymo-open-dataset-tf-2-6-0" ' + '>1.4.5 to install the official devkit first.') + +import os +from glob import glob +from os.path import exists, join + +import mmengine +import numpy as np +import tensorflow as tf +from waymo_open_dataset.utils import range_image_utils, transform_utils +from waymo_open_dataset.utils.frame_utils import \ + parse_range_image_and_camera_projection + + +class Waymo2KITTI(object): + """Waymo to KITTI converter. + + This class serves as the converter to change the waymo raw data to KITTI + format. + + Args: + load_dir (str): Directory to load waymo raw data. + save_dir (str): Directory to save data in KITTI format. + prefix (str): Prefix of filename. In general, 0 for training, 1 for + validation and 2 for testing. + workers (int, optional): Number of workers for the parallel process. + Defaults to 64. + test_mode (bool, optional): Whether in the test_mode. + Defaults to False. + save_cam_sync_labels (bool, optional): Whether to save cam sync labels. + Defaults to True. + """ + + def __init__(self, + load_dir, + save_dir, + prefix, + workers=64, + test_mode=False, + save_cam_sync_labels=True): + self.filter_empty_3dboxes = True + self.filter_no_label_zone_points = True + + self.selected_waymo_classes = ['VEHICLE', 'PEDESTRIAN', 'CYCLIST'] + + # Only data collected in specific locations will be converted + # If set None, this filter is disabled + # Available options: location_sf (main dataset) + self.selected_waymo_locations = None + self.save_track_id = False + + # turn on eager execution for older tensorflow versions + if int(tf.__version__.split('.')[0]) < 2: + tf.enable_eager_execution() + + # keep the order defined by the official protocol + self.cam_list = [ + '_FRONT', + '_FRONT_LEFT', + '_FRONT_RIGHT', + '_SIDE_LEFT', + '_SIDE_RIGHT', + ] + self.lidar_list = ['TOP', 'FRONT', 'SIDE_LEFT', 'SIDE_RIGHT', 'REAR'] + self.type_list = [ + 'UNKNOWN', 'VEHICLE', 'PEDESTRIAN', 'SIGN', 'CYCLIST' + ] + self.waymo_to_kitti_class_map = { + 'UNKNOWN': 'DontCare', + 'PEDESTRIAN': 'Pedestrian', + 'VEHICLE': 'Car', + 'CYCLIST': 'Cyclist', + 'SIGN': 'Sign' # not in kitti + } + + self.load_dir = load_dir + self.save_dir = save_dir + self.prefix = prefix + self.workers = int(workers) + self.test_mode = test_mode + self.save_cam_sync_labels = save_cam_sync_labels + + self.tfrecord_pathnames = sorted( + glob(join(self.load_dir, '*.tfrecord'))) + + self.label_save_dir = f'{self.save_dir}/label_' + self.label_all_save_dir = f'{self.save_dir}/label_all' + self.image_save_dir = f'{self.save_dir}/image_' + self.calib_save_dir = f'{self.save_dir}/calib' + self.point_cloud_save_dir = f'{self.save_dir}/velodyne' + self.pose_save_dir = f'{self.save_dir}/pose' + self.timestamp_save_dir = f'{self.save_dir}/timestamp' + if self.save_cam_sync_labels: + self.cam_sync_label_save_dir = f'{self.save_dir}/cam_sync_label_' + self.cam_sync_label_all_save_dir = \ + f'{self.save_dir}/cam_sync_label_all' + + self.create_folder() + + def convert(self): + """Convert action.""" + print('Start converting ...') + mmengine.track_parallel_progress(self.convert_one, range(len(self)), + self.workers) + print('\nFinished ...') + + def convert_one(self, file_idx): + """Convert action for single file. + + Args: + file_idx (int): Index of the file to be converted. + """ + pathname = self.tfrecord_pathnames[file_idx] + dataset = tf.data.TFRecordDataset(pathname, compression_type='') + + for frame_idx, data in enumerate(dataset): + + frame = dataset_pb2.Frame() + frame.ParseFromString(bytearray(data.numpy())) + if (self.selected_waymo_locations is not None + and frame.context.stats.location + not in self.selected_waymo_locations): + continue + + self.save_image(frame, file_idx, frame_idx) + self.save_calib(frame, file_idx, frame_idx) + self.save_lidar(frame, file_idx, frame_idx) + self.save_pose(frame, file_idx, frame_idx) + self.save_timestamp(frame, file_idx, frame_idx) + + if not self.test_mode: + # TODO save the depth image for waymo challenge solution. + self.save_label(frame, file_idx, frame_idx) + if self.save_cam_sync_labels: + self.save_label(frame, file_idx, frame_idx, cam_sync=True) + + def __len__(self): + """Length of the filename list.""" + return len(self.tfrecord_pathnames) + + def save_image(self, frame, file_idx, frame_idx): + """Parse and save the images in jpg format. + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + """ + for img in frame.images: + img_path = f'{self.image_save_dir}{str(img.name - 1)}/' + \ + f'{self.prefix}{str(file_idx).zfill(3)}' + \ + f'{str(frame_idx).zfill(3)}.jpg' + with open(img_path, 'wb') as fp: + fp.write(img.image) + + def save_calib(self, frame, file_idx, frame_idx): + """Parse and save the calibration data. + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + """ + # waymo front camera to kitti reference camera + T_front_cam_to_ref = np.array([[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], + [1.0, 0.0, 0.0]]) + camera_calibs = [] + R0_rect = [f'{i:e}' for i in np.eye(3).flatten()] + Tr_velo_to_cams = [] + calib_context = '' + + for camera in frame.context.camera_calibrations: + # extrinsic parameters + T_cam_to_vehicle = np.array(camera.extrinsic.transform).reshape( + 4, 4) + T_vehicle_to_cam = np.linalg.inv(T_cam_to_vehicle) + Tr_velo_to_cam = \ + self.cart_to_homo(T_front_cam_to_ref) @ T_vehicle_to_cam + if camera.name == 1: # FRONT = 1, see dataset.proto for details + self.T_velo_to_front_cam = Tr_velo_to_cam.copy() + Tr_velo_to_cam = Tr_velo_to_cam[:3, :].reshape((12, )) + Tr_velo_to_cams.append([f'{i:e}' for i in Tr_velo_to_cam]) + + # intrinsic parameters + camera_calib = np.zeros((3, 4)) + camera_calib[0, 0] = camera.intrinsic[0] + camera_calib[1, 1] = camera.intrinsic[1] + camera_calib[0, 2] = camera.intrinsic[2] + camera_calib[1, 2] = camera.intrinsic[3] + camera_calib[2, 2] = 1 + camera_calib = list(camera_calib.reshape(12)) + camera_calib = [f'{i:e}' for i in camera_calib] + camera_calibs.append(camera_calib) + + # all camera ids are saved as id-1 in the result because + # camera 0 is unknown in the proto + for i in range(5): + calib_context += 'P' + str(i) + ': ' + \ + ' '.join(camera_calibs[i]) + '\n' + calib_context += 'R0_rect' + ': ' + ' '.join(R0_rect) + '\n' + for i in range(5): + calib_context += 'Tr_velo_to_cam_' + str(i) + ': ' + \ + ' '.join(Tr_velo_to_cams[i]) + '\n' + + with open( + f'{self.calib_save_dir}/{self.prefix}' + + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt', + 'w+') as fp_calib: + fp_calib.write(calib_context) + fp_calib.close() + + def save_lidar(self, frame, file_idx, frame_idx): + """Parse and save the lidar data in psd format. + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + """ + range_images, camera_projections, seg_labels, range_image_top_pose = \ + parse_range_image_and_camera_projection(frame) + + if range_image_top_pose is None: + # the camera only split doesn't contain lidar points. + return + # First return + points_0, cp_points_0, intensity_0, elongation_0, mask_indices_0 = \ + self.convert_range_image_to_point_cloud( + frame, + range_images, + camera_projections, + range_image_top_pose, + ri_index=0 + ) + points_0 = np.concatenate(points_0, axis=0) + intensity_0 = np.concatenate(intensity_0, axis=0) + elongation_0 = np.concatenate(elongation_0, axis=0) + mask_indices_0 = np.concatenate(mask_indices_0, axis=0) + + # Second return + points_1, cp_points_1, intensity_1, elongation_1, mask_indices_1 = \ + self.convert_range_image_to_point_cloud( + frame, + range_images, + camera_projections, + range_image_top_pose, + ri_index=1 + ) + points_1 = np.concatenate(points_1, axis=0) + intensity_1 = np.concatenate(intensity_1, axis=0) + elongation_1 = np.concatenate(elongation_1, axis=0) + mask_indices_1 = np.concatenate(mask_indices_1, axis=0) + + points = np.concatenate([points_0, points_1], axis=0) + intensity = np.concatenate([intensity_0, intensity_1], axis=0) + elongation = np.concatenate([elongation_0, elongation_1], axis=0) + mask_indices = np.concatenate([mask_indices_0, mask_indices_1], axis=0) + + # timestamp = frame.timestamp_micros * np.ones_like(intensity) + + # concatenate x,y,z, intensity, elongation, timestamp (6-dim) + point_cloud = np.column_stack( + (points, intensity, elongation, mask_indices)) + + pc_path = f'{self.point_cloud_save_dir}/{self.prefix}' + \ + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.bin' + point_cloud.astype(np.float32).tofile(pc_path) + + def save_label(self, frame, file_idx, frame_idx, cam_sync=False): + """Parse and save the label data in txt format. + The relation between waymo and kitti coordinates is noteworthy: + 1. x, y, z correspond to l, w, h (waymo) -> l, h, w (kitti) + 2. x-y-z: front-left-up (waymo) -> right-down-front(kitti) + 3. bbox origin at volumetric center (waymo) -> bottom center (kitti) + 4. rotation: +x around y-axis (kitti) -> +x around z-axis (waymo) + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + cam_sync (bool, optional): Whether to save the cam sync labels. + Defaults to False. + """ + label_all_path = f'{self.label_all_save_dir}/{self.prefix}' + \ + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt' + if cam_sync: + label_all_path = label_all_path.replace('label_', + 'cam_sync_label_') + fp_label_all = open(label_all_path, 'w+') + id_to_bbox = dict() + id_to_name = dict() + for labels in frame.projected_lidar_labels: + name = labels.name + for label in labels.labels: + # TODO: need a workaround as bbox may not belong to front cam + bbox = [ + label.box.center_x - label.box.length / 2, + label.box.center_y - label.box.width / 2, + label.box.center_x + label.box.length / 2, + label.box.center_y + label.box.width / 2 + ] + id_to_bbox[label.id] = bbox + id_to_name[label.id] = name - 1 + + for obj in frame.laser_labels: + bounding_box = None + name = None + id = obj.id + for proj_cam in self.cam_list: + if id + proj_cam in id_to_bbox: + bounding_box = id_to_bbox.get(id + proj_cam) + name = str(id_to_name.get(id + proj_cam)) + break + + # NOTE: the 2D labels do not have strict correspondence with + # the projected 2D lidar labels + # e.g.: the projected 2D labels can be in camera 2 + # while the most_visible_camera can have id 4 + if cam_sync: + if obj.most_visible_camera_name: + name = str( + self.cam_list.index( + f'_{obj.most_visible_camera_name}')) + box3d = obj.camera_synced_box + else: + continue + else: + box3d = obj.box + + if bounding_box is None or name is None: + name = '0' + bounding_box = (0, 0, 0, 0) + + my_type = self.type_list[obj.type] + + if my_type not in self.selected_waymo_classes: + continue + + if self.filter_empty_3dboxes and obj.num_lidar_points_in_box < 1: + continue + + my_type = self.waymo_to_kitti_class_map[my_type] + + height = box3d.height + width = box3d.width + length = box3d.length + + x = box3d.center_x + y = box3d.center_y + z = box3d.center_z - height / 2 + + # project bounding box to the virtual reference frame + pt_ref = self.T_velo_to_front_cam @ \ + np.array([x, y, z, 1]).reshape((4, 1)) + x, y, z, _ = pt_ref.flatten().tolist() + + rotation_y = -box3d.heading - np.pi / 2 + track_id = obj.id + + # not available + truncated = 0 + occluded = 0 + alpha = -10 + + line = my_type + \ + ' {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format( + round(truncated, 2), occluded, round(alpha, 2), + round(bounding_box[0], 2), round(bounding_box[1], 2), + round(bounding_box[2], 2), round(bounding_box[3], 2), + round(height, 2), round(width, 2), round(length, 2), + round(x, 2), round(y, 2), round(z, 2), + round(rotation_y, 2)) + + if self.save_track_id: + line_all = line[:-1] + ' ' + name + ' ' + track_id + '\n' + else: + line_all = line[:-1] + ' ' + name + '\n' + + label_path = f'{self.label_save_dir}{name}/{self.prefix}' + \ + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt' + if cam_sync: + label_path = label_path.replace('label_', 'cam_sync_label_') + fp_label = open(label_path, 'a') + fp_label.write(line) + fp_label.close() + + fp_label_all.write(line_all) + + fp_label_all.close() + + def save_pose(self, frame, file_idx, frame_idx): + """Parse and save the pose data. + + Note that SDC's own pose is not included in the regular training + of KITTI dataset. KITTI raw dataset contains ego motion files + but are not often used. Pose is important for algorithms that + take advantage of the temporal information. + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + """ + pose = np.array(frame.pose.transform).reshape(4, 4) + np.savetxt( + join(f'{self.pose_save_dir}/{self.prefix}' + + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt'), + pose) + + def save_timestamp(self, frame, file_idx, frame_idx): + """Save the timestamp data in a separate file instead of the + pointcloud. + + Note that SDC's own pose is not included in the regular training + of KITTI dataset. KITTI raw dataset contains ego motion files + but are not often used. Pose is important for algorithms that + take advantage of the temporal information. + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + """ + with open( + join(f'{self.timestamp_save_dir}/{self.prefix}' + + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt'), + 'w') as f: + f.write(str(frame.timestamp_micros)) + + def create_folder(self): + """Create folder for data preprocessing.""" + if not self.test_mode: + dir_list1 = [ + self.label_all_save_dir, + self.calib_save_dir, + self.pose_save_dir, + self.timestamp_save_dir, + ] + dir_list2 = [self.label_save_dir, self.image_save_dir] + if self.save_cam_sync_labels: + dir_list1.append(self.cam_sync_label_all_save_dir) + dir_list2.append(self.cam_sync_label_save_dir) + else: + dir_list1 = [ + self.calib_save_dir, self.pose_save_dir, + self.timestamp_save_dir + ] + dir_list2 = [self.image_save_dir] + if 'testing_3d_camera_only_detection' not in self.load_dir: + dir_list1.append(self.point_cloud_save_dir) + for d in dir_list1: + mmengine.mkdir_or_exist(d) + for d in dir_list2: + for i in range(5): + mmengine.mkdir_or_exist(f'{d}{str(i)}') + + def convert_range_image_to_point_cloud(self, + frame, + range_images, + camera_projections, + range_image_top_pose, + ri_index=0): + """Convert range images to point cloud. + + Args: + frame (:obj:`Frame`): Open dataset frame. + range_images (dict): Mapping from laser_name to list of two + range images corresponding with two returns. + camera_projections (dict): Mapping from laser_name to list of two + camera projections corresponding with two returns. + range_image_top_pose (:obj:`Transform`): Range image pixel pose for + top lidar. + ri_index (int, optional): 0 for the first return, + 1 for the second return. Default: 0. + + Returns: + tuple[list[np.ndarray]]: (List of points with shape [N, 3], + camera projections of points with shape [N, 6], intensity + with shape [N, 1], elongation with shape [N, 1], points' + position in the depth map (element offset if points come from + the main lidar otherwise -1) with shape[N, 1]). All the + lists have the length of lidar numbers (5). + """ + calibrations = sorted( + frame.context.laser_calibrations, key=lambda c: c.name) + points = [] + cp_points = [] + intensity = [] + elongation = [] + mask_indices = [] + + frame_pose = tf.convert_to_tensor( + value=np.reshape(np.array(frame.pose.transform), [4, 4])) + # [H, W, 6] + range_image_top_pose_tensor = tf.reshape( + tf.convert_to_tensor(value=range_image_top_pose.data), + range_image_top_pose.shape.dims) + # [H, W, 3, 3] + range_image_top_pose_tensor_rotation = \ + transform_utils.get_rotation_matrix( + range_image_top_pose_tensor[..., 0], + range_image_top_pose_tensor[..., 1], + range_image_top_pose_tensor[..., 2]) + range_image_top_pose_tensor_translation = \ + range_image_top_pose_tensor[..., 3:] + range_image_top_pose_tensor = transform_utils.get_transform( + range_image_top_pose_tensor_rotation, + range_image_top_pose_tensor_translation) + for c in calibrations: + range_image = range_images[c.name][ri_index] + if len(c.beam_inclinations) == 0: + beam_inclinations = range_image_utils.compute_inclination( + tf.constant( + [c.beam_inclination_min, c.beam_inclination_max]), + height=range_image.shape.dims[0]) + else: + beam_inclinations = tf.constant(c.beam_inclinations) + + beam_inclinations = tf.reverse(beam_inclinations, axis=[-1]) + extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4]) + + range_image_tensor = tf.reshape( + tf.convert_to_tensor(value=range_image.data), + range_image.shape.dims) + pixel_pose_local = None + frame_pose_local = None + if c.name == dataset_pb2.LaserName.TOP: + pixel_pose_local = range_image_top_pose_tensor + pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0) + frame_pose_local = tf.expand_dims(frame_pose, axis=0) + range_image_mask = range_image_tensor[..., 0] > 0 + + if self.filter_no_label_zone_points: + nlz_mask = range_image_tensor[..., 3] != 1.0 # 1.0: in NLZ + range_image_mask = range_image_mask & nlz_mask + + range_image_cartesian = \ + range_image_utils.extract_point_cloud_from_range_image( + tf.expand_dims(range_image_tensor[..., 0], axis=0), + tf.expand_dims(extrinsic, axis=0), + tf.expand_dims(tf.convert_to_tensor( + value=beam_inclinations), axis=0), + pixel_pose=pixel_pose_local, + frame_pose=frame_pose_local) + + mask_index = tf.where(range_image_mask) + + range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0) + points_tensor = tf.gather_nd(range_image_cartesian, mask_index) + + cp = camera_projections[c.name][ri_index] + cp_tensor = tf.reshape( + tf.convert_to_tensor(value=cp.data), cp.shape.dims) + cp_points_tensor = tf.gather_nd(cp_tensor, mask_index) + points.append(points_tensor.numpy()) + cp_points.append(cp_points_tensor.numpy()) + + intensity_tensor = tf.gather_nd(range_image_tensor[..., 1], + mask_index) + intensity.append(intensity_tensor.numpy()) + + elongation_tensor = tf.gather_nd(range_image_tensor[..., 2], + mask_index) + elongation.append(elongation_tensor.numpy()) + if c.name == 1: + mask_index = (ri_index * range_image_mask.shape[0] + + mask_index[:, 0] + ) * range_image_mask.shape[1] + mask_index[:, 1] + mask_index = mask_index.numpy().astype(elongation[-1].dtype) + else: + mask_index = np.full_like(elongation[-1], -1) + + mask_indices.append(mask_index) + + return points, cp_points, intensity, elongation, mask_indices + + def cart_to_homo(self, mat): + """Convert transformation matrix in Cartesian coordinates to + homogeneous format. + + Args: + mat (np.ndarray): Transformation matrix in Cartesian. + The input matrix shape is 3x3 or 3x4. + + Returns: + np.ndarray: Transformation matrix in homogeneous format. + The matrix shape is 4x4. + """ + ret = np.eye(4) + if mat.shape == (3, 3): + ret[:3, :3] = mat + elif mat.shape == (3, 4): + ret[:3, :] = mat + else: + raise ValueError(mat.shape) + return ret + + +def create_ImageSets_img_ids(root_dir, splits): + save_dir = join(root_dir, 'ImageSets/') + if not exists(save_dir): + os.mkdir(save_dir) + + idx_all = [[] for i in splits] + for i, split in enumerate(splits): + path = join(root_dir, splits[i], 'calib') + if not exists(path): + RawNames = [] + else: + RawNames = os.listdir(path) + + for name in RawNames: + if name.endswith('.txt'): + idx = name.replace('.txt', '\n') + idx_all[int(idx[0])].append(idx) + idx_all[i].sort() + + open(save_dir + 'train.txt', 'w').writelines(idx_all[0]) + open(save_dir + 'val.txt', 'w').writelines(idx_all[1]) + open(save_dir + 'trainval.txt', 'w').writelines(idx_all[0] + idx_all[1]) + open(save_dir + 'test.txt', 'w').writelines(idx_all[2]) + # open(save_dir+'test_cam_only.txt','w').writelines(idx_all[3]) + print('created txt files indicating what to collect in ', splits) diff --git a/tools/deployment/mmdet3d2torchserve.py b/tools/deployment/mmdet3d2torchserve.py new file mode 100755 index 0000000..0f80b1f --- /dev/null +++ b/tools/deployment/mmdet3d2torchserve.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser, Namespace +from pathlib import Path +from tempfile import TemporaryDirectory + +import mmengine + +try: + from model_archiver.model_packaging import package_model + from model_archiver.model_packaging_utils import ModelExportUtils +except ImportError: + package_model = None + + +def mmdet3d2torchserve( + config_file: str, + checkpoint_file: str, + output_folder: str, + model_name: str, + model_version: str = '1.0', + force: bool = False, +): + """Converts MMDetection3D model (config + checkpoint) to TorchServe `.mar`. + + Args: + config_file (str): + In MMDetection3D config format. + The contents vary for each task repository. + checkpoint_file (str): + In MMDetection3D checkpoint format. + The contents vary for each task repository. + output_folder (str): + Folder where `{model_name}.mar` will be created. + The file created will be in TorchServe archive format. + model_name (str): + If not None, used for naming the `{model_name}.mar` file + that will be created under `output_folder`. + If None, `{Path(checkpoint_file).stem}` will be used. + model_version (str, optional): + Model's version. Default: '1.0'. + force (bool, optional): + If True, if there is an existing `{model_name}.mar` + file under `output_folder` it will be overwritten. + Default: False. + """ + mmengine.mkdir_or_exist(output_folder) + + config = mmengine.Config.fromfile(config_file) + + with TemporaryDirectory() as tmpdir: + config.dump(f'{tmpdir}/config.py') + + args = Namespace( + **{ + 'model_file': f'{tmpdir}/config.py', + 'serialized_file': checkpoint_file, + 'handler': f'{Path(__file__).parent}/mmdet3d_handler.py', + 'model_name': model_name or Path(checkpoint_file).stem, + 'version': model_version, + 'export_path': output_folder, + 'force': force, + 'requirements_file': None, + 'extra_files': None, + 'runtime': 'python', + 'archive_format': 'default' + }) + manifest = ModelExportUtils.generate_manifest_json(args) + package_model(args, manifest) + + +def parse_args(): + parser = ArgumentParser( + description='Convert MMDetection models to TorchServe `.mar` format.') + parser.add_argument('config', type=str, help='config file path') + parser.add_argument('checkpoint', type=str, help='checkpoint file path') + parser.add_argument( + '--output-folder', + type=str, + required=True, + help='Folder where `{model_name}.mar` will be created.') + parser.add_argument( + '--model-name', + type=str, + default=None, + help='If not None, used for naming the `{model_name}.mar`' + 'file that will be created under `output_folder`.' + 'If None, `{Path(checkpoint_file).stem}` will be used.') + parser.add_argument( + '--model-version', + type=str, + default='1.0', + help='Number used for versioning.') + parser.add_argument( + '-f', + '--force', + action='store_true', + help='overwrite the existing `{model_name}.mar`') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + if package_model is None: + raise ImportError('`torch-model-archiver` is required.' + 'Try: pip install torch-model-archiver') + + mmdet3d2torchserve(args.config, args.checkpoint, args.output_folder, + args.model_name, args.model_version, args.force) diff --git a/tools/deployment/mmdet3d_handler.py b/tools/deployment/mmdet3d_handler.py new file mode 100755 index 0000000..e5575eb --- /dev/null +++ b/tools/deployment/mmdet3d_handler.py @@ -0,0 +1,120 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import base64 +import os + +import numpy as np +import torch +from ts.torch_handler.base_handler import BaseHandler + +from mmdet3d.apis import inference_detector, init_model +from mmdet3d.structures.points import get_points_type + + +class MMdet3dHandler(BaseHandler): + """MMDetection3D Handler used in TorchServe. + + Handler to load models in MMDetection3D, and it will process data to get + predicted results. For now, it only supports SECOND. + """ + threshold = 0.5 + load_dim = 4 + use_dim = [0, 1, 2, 3] + coord_type = 'LIDAR' + attribute_dims = None + + def initialize(self, context): + """Initialize function loads the model in MMDetection3D. + + Args: + context (context): It is a JSON Object containing information + pertaining to the model artifacts parameters. + """ + properties = context.system_properties + self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' + self.device = torch.device(self.map_location + ':' + + str(properties.get('gpu_id')) if torch.cuda. + is_available() else self.map_location) + self.manifest = context.manifest + + model_dir = properties.get('model_dir') + serialized_file = self.manifest['model']['serializedFile'] + checkpoint = os.path.join(model_dir, serialized_file) + self.config_file = os.path.join(model_dir, 'config.py') + self.model = init_model(self.config_file, checkpoint, self.device) + self.initialized = True + + def preprocess(self, data): + """Preprocess function converts data into LiDARPoints class. + + Args: + data (List): Input data from the request. + + Returns: + `LiDARPoints` : The preprocess function returns the input + point cloud data as LiDARPoints class. + """ + for row in data: + # Compat layer: normally the envelope should just return the data + # directly, but older versions of Torchserve didn't have envelope. + pts = row.get('data') or row.get('body') + if isinstance(pts, str): + pts = base64.b64decode(pts) + + points = np.frombuffer(pts, dtype=np.float32) + points = points.reshape(-1, self.load_dim) + points = points[:, self.use_dim] + points_class = get_points_type(self.coord_type) + points = points_class( + points, + points_dim=points.shape[-1], + attribute_dims=self.attribute_dims) + + return points + + def inference(self, data): + """Inference Function. + + This function is used to make a prediction call on the + given input request. + + Args: + data (`LiDARPoints`): LiDARPoints class passed to make + the inference request. + + Returns: + List(dict) : The predicted result is returned in this function. + """ + results, _ = inference_detector(self.model, data) + return results + + def postprocess(self, data): + """Postprocess function. + + This function makes use of the output from the inference and + converts it into a torchserve supported response output. + + Args: + data (List[dict]): The data received from the prediction + output of the model. + + Returns: + List: The post process function returns a list of the predicted + output. + """ + output = [] + for pts_index, result in enumerate(data): + output.append([]) + if 'pts_bbox' in result.keys(): + pred_bboxes = result['pts_bbox']['boxes_3d'].tensor.numpy() + pred_scores = result['pts_bbox']['scores_3d'].numpy() + else: + pred_bboxes = result['boxes_3d'].tensor.numpy() + pred_scores = result['scores_3d'].numpy() + + index = pred_scores > self.threshold + bbox_coords = pred_bboxes[index].tolist() + score = pred_scores[index].tolist() + + output[pts_index].append({'3dbbox': bbox_coords, 'score': score}) + + return output diff --git a/tools/deployment/test_torchserver.py b/tools/deployment/test_torchserver.py new file mode 100755 index 0000000..613f9e4 --- /dev/null +++ b/tools/deployment/test_torchserver.py @@ -0,0 +1,56 @@ +from argparse import ArgumentParser + +import numpy as np +import requests + +from mmdet3d.apis import inference_detector, init_model + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('pcd', help='Point cloud file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('model_name', help='The model name in the server') + parser.add_argument( + '--inference-addr', + default='127.0.0.1:8080', + help='Address and port of the inference server') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--score-thr', type=float, default=0.5, help='3d bbox score threshold') + args = parser.parse_args() + return args + + +def parse_result(input): + bbox = input[0]['3dbbox'] + result = np.array(bbox) + return result + + +def main(args): + # build the model from a config file and a checkpoint file + model = init_model(args.config, args.checkpoint, device=args.device) + # test a single point cloud file + model_result, _ = inference_detector(model, args.pcd) + # filter the 3d bboxes whose scores > 0.5 + if 'pts_bbox' in model_result[0].keys(): + pred_bboxes = model_result[0]['pts_bbox']['boxes_3d'].tensor.numpy() + pred_scores = model_result[0]['pts_bbox']['scores_3d'].numpy() + else: + pred_bboxes = model_result[0]['boxes_3d'].tensor.numpy() + pred_scores = model_result[0]['scores_3d'].numpy() + model_result = pred_bboxes[pred_scores > 0.5] + + url = 'http://' + args.inference_addr + '/predictions/' + args.model_name + with open(args.pcd, 'rb') as points: + response = requests.post(url, points) + server_result = parse_result(response.json()) + assert np.allclose(model_result, server_result) + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/tools/dist_test.sh b/tools/dist_test.sh new file mode 100755 index 0000000..dea131b --- /dev/null +++ b/tools/dist_test.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/test.py \ + $CONFIG \ + $CHECKPOINT \ + --launcher pytorch \ + ${@:4} diff --git a/tools/dist_train.sh b/tools/dist_train.sh new file mode 100755 index 0000000..3fca764 --- /dev/null +++ b/tools/dist_train.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --launcher pytorch ${@:3} diff --git a/tools/misc/browse_dataset.py b/tools/misc/browse_dataset.py new file mode 100755 index 0000000..3381b51 --- /dev/null +++ b/tools/misc/browse_dataset.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from os import path as osp + +from mmengine.config import Config, DictAction +from mmengine.registry import init_default_scope +from mmengine.utils import ProgressBar, mkdir_or_exist + +from mmdet3d.registry import DATASETS, VISUALIZERS +from mmdet3d.utils import replace_ceph_backend + + +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--output-dir', + default=None, + type=str, + help='If there is no display interface, you can save it') + parser.add_argument('--not-show', default=False, action='store_true') + parser.add_argument( + '--show-interval', + type=float, + default=2, + help='the interval of show (s)') + parser.add_argument( + '--task', + type=str, + choices=[ + 'mono_det', 'multi-view_det', 'lidar_det', 'lidar_seg', + 'multi-modality_det' + ], + help='Determine the visualization method depending on the task.') + parser.add_argument( + '--aug', + action='store_true', + help='Whether to visualize augmented datasets or original dataset.') + parser.add_argument( + '--ceph', action='store_true', help='Use ceph as data storage backend') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def build_data_cfg(config_path, aug, cfg_options): + """Build data config for loading visualization data.""" + + cfg = Config.fromfile(config_path) + if cfg_options is not None: + cfg.merge_from_dict(cfg_options) + + # extract inner dataset of `RepeatDataset` as + # `cfg.train_dataloader.dataset` so we don't + # need to worry about it later + if cfg.train_dataloader.dataset['type'] == 'RepeatDataset': + cfg.train_dataloader.dataset = cfg.train_dataloader.dataset.dataset + # use only first dataset for `ConcatDataset` + if cfg.train_dataloader.dataset['type'] == 'ConcatDataset': + cfg.train_dataloader.dataset = cfg.train_dataloader.dataset.datasets[0] + train_data_cfg = cfg.train_dataloader.dataset + + if aug: + show_pipeline = cfg.train_pipeline + else: + show_pipeline = cfg.test_pipeline + for i in range(len(cfg.train_pipeline)): + if cfg.train_pipeline[i]['type'] == 'LoadAnnotations3D': + show_pipeline.insert(i, cfg.train_pipeline[i]) + # Collect data as well as labels + if cfg.train_pipeline[i]['type'] == 'Pack3DDetInputs': + if show_pipeline[-1]['type'] == 'Pack3DDetInputs': + show_pipeline[-1] = cfg.train_pipeline[i] + else: + show_pipeline.append(cfg.train_pipeline[i]) + + train_data_cfg['pipeline'] = show_pipeline + + return cfg + + +def main(): + args = parse_args() + + if args.output_dir is not None: + mkdir_or_exist(args.output_dir) + + cfg = build_data_cfg(args.config, args.aug, args.cfg_options) + + # TODO: We will unify the ceph support approach with other OpenMMLab repos + if args.ceph: + cfg = replace_ceph_backend(cfg) + + init_default_scope(cfg.get('default_scope', 'mmdet3d')) + + try: + dataset = DATASETS.build( + cfg.train_dataloader.dataset, + default_args=dict(filter_empty_gt=False)) + except TypeError: # seg dataset doesn't have `filter_empty_gt` key + dataset = DATASETS.build(cfg.train_dataloader.dataset) + + # configure visualization mode + vis_task = args.task + + visualizer = VISUALIZERS.build(cfg.visualizer) + visualizer.dataset_meta = dataset.metainfo + + progress_bar = ProgressBar(len(dataset)) + + for i, item in enumerate(dataset): + # the 3D Boxes in input could be in any of three coordinates + data_input = item['inputs'] + data_sample = item['data_samples'].numpy() + + out_file = osp.join( + args.output_dir, + f'{i}.jpg') if args.output_dir is not None else None + + # o3d_save_path is valid when args.not_show is False + o3d_save_path = osp.join(args.output_dir, f'pc_{i}.png') if ( + args.output_dir is not None + and vis_task in ['lidar_det', 'lidar_seg', 'multi-modality_det'] + and not args.not_show) else None + + visualizer.add_datasample( + '3d visualzier', + data_input, + data_sample=data_sample, + show=not args.not_show, + wait_time=args.show_interval, + out_file=out_file, + o3d_save_path=o3d_save_path, + vis_task=vis_task) + + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/tools/misc/fuse_conv_bn.py b/tools/misc/fuse_conv_bn.py new file mode 100755 index 0000000..90d30ce --- /dev/null +++ b/tools/misc/fuse_conv_bn.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import torch +from mmengine.runner import save_checkpoint +from torch import nn as nn + +from mmdet3d.apis import init_model + + +def fuse_conv_bn(conv, bn): + """During inference, the functionary of batch norm layers is turned off but + only the mean and var alone channels are used, which exposes the chance to + fuse it with the preceding conv layers to save computations and simplify + network bboxes_3d.""" + conv_w = conv.weight + conv_b = conv.bias if conv.bias is not None else torch.zeros_like( + bn.running_mean) + + factor = bn.weight / torch.sqrt(bn.running_var + bn.eps) + conv.weight = nn.Parameter(conv_w * + factor.reshape([conv.out_channels, 1, 1, 1])) + conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias) + return conv + + +def fuse_module(m): + last_conv = None + last_conv_name = None + + for name, child in m.named_children(): + if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)): + if last_conv is None: # only fuse BN that is after Conv + continue + fused_conv = fuse_conv_bn(last_conv, child) + m._modules[last_conv_name] = fused_conv + # To reduce changes, set BN as Identity instead of deleting it. + m._modules[name] = nn.Identity() + last_conv = None + elif isinstance(child, nn.Conv2d): + last_conv = child + last_conv_name = name + else: + fuse_module(child) + return m + + +def parse_args(): + parser = argparse.ArgumentParser( + description='fuse Conv and BN layers in a model') + parser.add_argument('config', help='config file path') + parser.add_argument('checkpoint', help='checkpoint file path') + parser.add_argument('out', help='output path of the converted model') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + # build the model from a config file and a checkpoint file + model = init_model(args.config, args.checkpoint) + # fuse conv and bn layers of the model + fused_model = fuse_module(model) + save_checkpoint(fused_model, args.out) + + +if __name__ == '__main__': + main() diff --git a/tools/misc/print_config.py b/tools/misc/print_config.py new file mode 100755 index 0000000..a5e6e64 --- /dev/null +++ b/tools/misc/print_config.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +from mmengine import Config, DictAction + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--options', nargs='+', action=DictAction, help='arguments in dict') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.options is not None: + cfg.merge_from_dict(args.options) + print(f'Config:\n{cfg.pretty_text}') + + +if __name__ == '__main__': + main() diff --git a/tools/misc/visualize_results.py b/tools/misc/visualize_results.py new file mode 100755 index 0000000..b9d3452 --- /dev/null +++ b/tools/misc/visualize_results.py @@ -0,0 +1,50 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import mmengine +from mmengine import Config + +from mmdet3d.registry import DATASETS + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet3D visualize the results') + parser.add_argument('config', help='test config file path') + parser.add_argument('--result', help='results file in pickle format') + parser.add_argument( + '--show-dir', help='directory where visualize results will be saved') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + if args.result is not None and \ + not args.result.endswith(('.pkl', '.pickle')): + raise ValueError('The results file must be a pkl file.') + + cfg = Config.fromfile(args.config) + cfg.data.test.test_mode = True + + # build the dataset + dataset = DATASETS.build(cfg.data.test) + results = mmengine.load(args.result) + + if getattr(dataset, 'show', None) is not None: + # data loading pipeline for showing + eval_pipeline = cfg.get('eval_pipeline', {}) + if eval_pipeline: + dataset.show(results, args.show_dir, pipeline=eval_pipeline) + else: + dataset.show(results, args.show_dir) # use default pipeline + else: + raise NotImplementedError( + 'Show is not implemented for dataset {}!'.format( + type(dataset).__name__)) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/convert_h3dnet_checkpoints.py b/tools/model_converters/convert_h3dnet_checkpoints.py new file mode 100755 index 0000000..08c27da --- /dev/null +++ b/tools/model_converters/convert_h3dnet_checkpoints.py @@ -0,0 +1,177 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import tempfile + +import torch +from mmcv import Config +from mmengine.runner import load_state_dict + +from mmdet3d.registry import MODELS + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet3D upgrade model version(before v0.6.0) of H3DNet') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='path of the output checkpoint file') + args = parser.parse_args() + return args + + +def parse_config(config_strings): + """Parse config from strings. + + Args: + config_strings (string): strings of model config. + + Returns: + Config: model config + """ + temp_file = tempfile.NamedTemporaryFile() + config_path = f'{temp_file.name}.py' + with open(config_path, 'w') as f: + f.write(config_strings) + + config = Config.fromfile(config_path) + + # Update backbone config + if 'pool_mod' in config.model.backbone.backbones: + config.model.backbone.backbones.pop('pool_mod') + + if 'sa_cfg' not in config.model.backbone: + config.model.backbone['sa_cfg'] = dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True) + + if 'type' not in config.model.rpn_head.vote_aggregation_cfg: + config.model.rpn_head.vote_aggregation_cfg['type'] = 'PointSAModule' + + # Update rpn_head config + if 'pred_layer_cfg' not in config.model.rpn_head: + config.model.rpn_head['pred_layer_cfg'] = dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True) + + if 'feat_channels' in config.model.rpn_head: + config.model.rpn_head.pop('feat_channels') + + if 'vote_moudule_cfg' in config.model.rpn_head: + config.model.rpn_head['vote_module_cfg'] = config.model.rpn_head.pop( + 'vote_moudule_cfg') + + if config.model.rpn_head.vote_aggregation_cfg.use_xyz: + config.model.rpn_head.vote_aggregation_cfg.mlp_channels[0] -= 3 + + for cfg in config.model.roi_head.primitive_list: + cfg['vote_module_cfg'] = cfg.pop('vote_moudule_cfg') + cfg.vote_aggregation_cfg.mlp_channels[0] -= 3 + if 'type' not in cfg.vote_aggregation_cfg: + cfg.vote_aggregation_cfg['type'] = 'PointSAModule' + + if 'type' not in config.model.roi_head.bbox_head.suface_matching_cfg: + config.model.roi_head.bbox_head.suface_matching_cfg[ + 'type'] = 'PointSAModule' + + if config.model.roi_head.bbox_head.suface_matching_cfg.use_xyz: + config.model.roi_head.bbox_head.suface_matching_cfg.mlp_channels[ + 0] -= 3 + + if 'type' not in config.model.roi_head.bbox_head.line_matching_cfg: + config.model.roi_head.bbox_head.line_matching_cfg[ + 'type'] = 'PointSAModule' + + if config.model.roi_head.bbox_head.line_matching_cfg.use_xyz: + config.model.roi_head.bbox_head.line_matching_cfg.mlp_channels[0] -= 3 + + if 'proposal_module_cfg' in config.model.roi_head.bbox_head: + config.model.roi_head.bbox_head.pop('proposal_module_cfg') + + temp_file.close() + + return config + + +def main(): + """Convert keys in checkpoints for VoteNet. + + There can be some breaking changes during the development of mmdetection3d, + and this tool is used for upgrading checkpoints trained with old versions + (before v0.6.0) to the latest one. + """ + args = parse_args() + checkpoint = torch.load(args.checkpoint) + cfg = parse_config(checkpoint['meta']['config']) + # Build the model and load checkpoint + model = MODELS.build( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + orig_ckpt = checkpoint['state_dict'] + converted_ckpt = orig_ckpt.copy() + + if cfg['dataset_type'] == 'ScanNetDataset': + NUM_CLASSES = 18 + elif cfg['dataset_type'] == 'SUNRGBDDataset': + NUM_CLASSES = 10 + else: + raise NotImplementedError + + RENAME_PREFIX = { + 'rpn_head.conv_pred.0': 'rpn_head.conv_pred.shared_convs.layer0', + 'rpn_head.conv_pred.1': 'rpn_head.conv_pred.shared_convs.layer1' + } + + DEL_KEYS = [ + 'rpn_head.conv_pred.0.bn.num_batches_tracked', + 'rpn_head.conv_pred.1.bn.num_batches_tracked' + ] + + EXTRACT_KEYS = { + 'rpn_head.conv_pred.conv_cls.weight': + ('rpn_head.conv_pred.conv_out.weight', [(0, 2), (-NUM_CLASSES, -1)]), + 'rpn_head.conv_pred.conv_cls.bias': + ('rpn_head.conv_pred.conv_out.bias', [(0, 2), (-NUM_CLASSES, -1)]), + 'rpn_head.conv_pred.conv_reg.weight': + ('rpn_head.conv_pred.conv_out.weight', [(2, -NUM_CLASSES)]), + 'rpn_head.conv_pred.conv_reg.bias': + ('rpn_head.conv_pred.conv_out.bias', [(2, -NUM_CLASSES)]) + } + + # Delete some useless keys + for key in DEL_KEYS: + converted_ckpt.pop(key) + + # Rename keys with specific prefix + RENAME_KEYS = dict() + for old_key in converted_ckpt.keys(): + for rename_prefix in RENAME_PREFIX.keys(): + if rename_prefix in old_key: + new_key = old_key.replace(rename_prefix, + RENAME_PREFIX[rename_prefix]) + RENAME_KEYS[new_key] = old_key + for new_key, old_key in RENAME_KEYS.items(): + converted_ckpt[new_key] = converted_ckpt.pop(old_key) + + # Extract weights and rename the keys + for new_key, (old_key, indices) in EXTRACT_KEYS.items(): + cur_layers = orig_ckpt[old_key] + converted_layers = [] + for (start, end) in indices: + if end != -1: + converted_layers.append(cur_layers[start:end]) + else: + converted_layers.append(cur_layers[start:]) + converted_layers = torch.cat(converted_layers, 0) + converted_ckpt[new_key] = converted_layers + if old_key in converted_ckpt.keys(): + converted_ckpt.pop(old_key) + + # Check the converted checkpoint by loading to the model + load_state_dict(model, converted_ckpt, strict=True) + checkpoint['state_dict'] = converted_ckpt + torch.save(checkpoint, args.out) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/convert_votenet_checkpoints.py b/tools/model_converters/convert_votenet_checkpoints.py new file mode 100755 index 0000000..9a9a32f --- /dev/null +++ b/tools/model_converters/convert_votenet_checkpoints.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import tempfile + +import torch +from mmengine import Config +from mmengine.runner import load_state_dict + +from mmdet3d.registry import MODELS + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet3D upgrade model version(before v0.6.0) of VoteNet') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='path of the output checkpoint file') + args = parser.parse_args() + return args + + +def parse_config(config_strings): + """Parse config from strings. + + Args: + config_strings (string): strings of model config. + + Returns: + Config: model config + """ + temp_file = tempfile.NamedTemporaryFile() + config_path = f'{temp_file.name}.py' + with open(config_path, 'w') as f: + f.write(config_strings) + + config = Config.fromfile(config_path) + + # Update backbone config + if 'pool_mod' in config.model.backbone: + config.model.backbone.pop('pool_mod') + + if 'sa_cfg' not in config.model.backbone: + config.model.backbone['sa_cfg'] = dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True) + + if 'type' not in config.model.bbox_head.vote_aggregation_cfg: + config.model.bbox_head.vote_aggregation_cfg['type'] = 'PointSAModule' + + # Update bbox_head config + if 'pred_layer_cfg' not in config.model.bbox_head: + config.model.bbox_head['pred_layer_cfg'] = dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True) + + if 'feat_channels' in config.model.bbox_head: + config.model.bbox_head.pop('feat_channels') + + if 'vote_moudule_cfg' in config.model.bbox_head: + config.model.bbox_head['vote_module_cfg'] = config.model.bbox_head.pop( + 'vote_moudule_cfg') + + if config.model.bbox_head.vote_aggregation_cfg.use_xyz: + config.model.bbox_head.vote_aggregation_cfg.mlp_channels[0] -= 3 + + temp_file.close() + + return config + + +def main(): + """Convert keys in checkpoints for VoteNet. + + There can be some breaking changes during the development of mmdetection3d, + and this tool is used for upgrading checkpoints trained with old versions + (before v0.6.0) to the latest one. + """ + args = parse_args() + checkpoint = torch.load(args.checkpoint) + cfg = parse_config(checkpoint['meta']['config']) + # Build the model and load checkpoint + model = MODELS.build( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + orig_ckpt = checkpoint['state_dict'] + converted_ckpt = orig_ckpt.copy() + + if cfg['dataset_type'] == 'ScanNetDataset': + NUM_CLASSES = 18 + elif cfg['dataset_type'] == 'SUNRGBDDataset': + NUM_CLASSES = 10 + else: + raise NotImplementedError + + RENAME_PREFIX = { + 'bbox_head.conv_pred.0': 'bbox_head.conv_pred.shared_convs.layer0', + 'bbox_head.conv_pred.1': 'bbox_head.conv_pred.shared_convs.layer1' + } + + DEL_KEYS = [ + 'bbox_head.conv_pred.0.bn.num_batches_tracked', + 'bbox_head.conv_pred.1.bn.num_batches_tracked' + ] + + EXTRACT_KEYS = { + 'bbox_head.conv_pred.conv_cls.weight': + ('bbox_head.conv_pred.conv_out.weight', [(0, 2), (-NUM_CLASSES, -1)]), + 'bbox_head.conv_pred.conv_cls.bias': + ('bbox_head.conv_pred.conv_out.bias', [(0, 2), (-NUM_CLASSES, -1)]), + 'bbox_head.conv_pred.conv_reg.weight': + ('bbox_head.conv_pred.conv_out.weight', [(2, -NUM_CLASSES)]), + 'bbox_head.conv_pred.conv_reg.bias': + ('bbox_head.conv_pred.conv_out.bias', [(2, -NUM_CLASSES)]) + } + + # Delete some useless keys + for key in DEL_KEYS: + converted_ckpt.pop(key) + + # Rename keys with specific prefix + RENAME_KEYS = dict() + for old_key in converted_ckpt.keys(): + for rename_prefix in RENAME_PREFIX.keys(): + if rename_prefix in old_key: + new_key = old_key.replace(rename_prefix, + RENAME_PREFIX[rename_prefix]) + RENAME_KEYS[new_key] = old_key + for new_key, old_key in RENAME_KEYS.items(): + converted_ckpt[new_key] = converted_ckpt.pop(old_key) + + # Extract weights and rename the keys + for new_key, (old_key, indices) in EXTRACT_KEYS.items(): + cur_layers = orig_ckpt[old_key] + converted_layers = [] + for (start, end) in indices: + if end != -1: + converted_layers.append(cur_layers[start:end]) + else: + converted_layers.append(cur_layers[start:]) + converted_layers = torch.cat(converted_layers, 0) + converted_ckpt[new_key] = converted_layers + if old_key in converted_ckpt.keys(): + converted_ckpt.pop(old_key) + + # Check the converted checkpoint by loading to the model + load_state_dict(model, converted_ckpt, strict=True) + checkpoint['state_dict'] = converted_ckpt + torch.save(checkpoint, args.out) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/publish_model.py b/tools/model_converters/publish_model.py new file mode 100755 index 0000000..e266057 --- /dev/null +++ b/tools/model_converters/publish_model.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import subprocess + +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) + subprocess.Popen(['mv', out_file, final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/regnet2mmdet.py b/tools/model_converters/regnet2mmdet.py new file mode 100755 index 0000000..fbf8c8f --- /dev/null +++ b/tools/model_converters/regnet2mmdet.py @@ -0,0 +1,90 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + + +def convert_stem(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('stem.conv', 'conv1') + new_key = new_key.replace('stem.bn', 'bn1') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('head.fc', 'fc') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_reslayer(model_key, model_weight, state_dict, converted_names): + split_keys = model_key.split('.') + layer, block, module = split_keys[:3] + block_id = int(block[1:]) + layer_name = f'layer{int(layer[1:])}' + block_name = f'{block_id - 1}' + + if block_id == 1 and module == 'bn': + new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}' + elif block_id == 1 and module == 'proj': + new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}' + elif module == 'f': + if split_keys[3] == 'a_bn': + module_name = 'bn1' + elif split_keys[3] == 'b_bn': + module_name = 'bn2' + elif split_keys[3] == 'c_bn': + module_name = 'bn3' + elif split_keys[3] == 'a': + module_name = 'conv1' + elif split_keys[3] == 'b': + module_name = 'conv2' + elif split_keys[3] == 'c': + module_name = 'conv3' + new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}' + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + print(f'Convert {model_key} to {new_key}') + state_dict[new_key] = model_weight + converted_names.add(model_key) + + +def convert(src, dst): + """Convert keys in pycls pretrained RegNet models to mmdet style.""" + # load caffe model + regnet_model = torch.load(src) + blobs = regnet_model['model_state'] + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + for key, weight in blobs.items(): + if 'stem' in key: + convert_stem(key, weight, state_dict, converted_names) + elif 'head' in key: + convert_head(key, weight, state_dict, converted_names) + elif key.startswith('s'): + convert_reslayer(key, weight, state_dict, converted_names) + + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'not converted: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/slurm_test.sh b/tools/slurm_test.sh new file mode 100755 index 0000000..6dd67e5 --- /dev/null +++ b/tools/slurm_test.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/tools/slurm_train.sh b/tools/slurm_train.sh new file mode 100755 index 0000000..b3feb3d --- /dev/null +++ b/tools/slurm_train.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +WORK_DIR=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:5} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/tools/test.py b/tools/test.py new file mode 100755 index 0000000..fd52bf7 --- /dev/null +++ b/tools/test.py @@ -0,0 +1,127 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +from mmengine.config import Config, DictAction +from mmengine.registry import RUNNERS +from mmengine.runner import Runner + +from mmdet3d.utils import replace_ceph_backend + + +# TODO: support fuse_conv_bn and format_only +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet3D test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--work-dir', + default='./output', + help='the directory to save the file containing evaluation metrics') + parser.add_argument( + '--ceph', action='store_true', help='Use ceph as data storage backend') + parser.add_argument( + '--show', action='store_true', help='show prediction results') + parser.add_argument( + '--show-dir', + help='directory where painted images will be saved. ' + 'If specified, it will be automatically saved ' + 'to the work_dir/timestamp/show_dir') + parser.add_argument( + '--task', + type=str, + choices=[ + 'mono_det', 'multi-view_det', 'lidar_det', 'lidar_seg', + 'multi-modality_det' + ], + help='Determine the visualization method depending on the task.') + parser.add_argument( + '--wait-time', type=float, default=2, help='the interval of show (s)') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def trigger_visualization_hook(cfg, args): + default_hooks = cfg.default_hooks + if 'visualization' in default_hooks: + visualization_hook = default_hooks['visualization'] + # Turn on visualization + visualization_hook['draw'] = True + if args.show: + visualization_hook['show'] = True + visualization_hook['wait_time'] = args.wait_time + if args.show_dir: + visualization_hook['test_out_dir'] = args.show_dir + visualization_hook['vis_task'] = args.task + else: + raise RuntimeError( + 'VisualizationHook must be included in default_hooks.' + 'refer to usage ' + '"visualization=dict(type=\'VisualizationHook\')"') + + return cfg + + +def main(): + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + + # TODO: We will unify the ceph support approach with other OpenMMLab repos + if args.ceph: + cfg = replace_ceph_backend(cfg) + + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + cfg.load_from = args.checkpoint + + if args.show or args.show_dir: + cfg = trigger_visualization_hook(cfg, args) + + # build the runner from config + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) + + # start testing + runner.test() + + +if __name__ == '__main__': + main() diff --git a/tools/train.py b/tools/train.py new file mode 100755 index 0000000..b2ced54 --- /dev/null +++ b/tools/train.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import logging +import os +import os.path as osp + +from mmengine.config import Config, DictAction +from mmengine.logging import print_log +from mmengine.registry import RUNNERS +from mmengine.runner import Runner + +from mmdet3d.utils import replace_ceph_backend + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a 3D detector') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--amp', + action='store_true', + default=False, + help='enable automatic-mixed-precision training') + parser.add_argument( + '--auto-scale-lr', + action='store_true', + help='enable automatically scaling LR.') + parser.add_argument( + '--resume', + nargs='?', + type=str, + const='auto', + help='If specify checkpoint path, resume from it, while if not ' + 'specify, try to auto resume from the latest checkpoint ' + 'in the work directory.') + parser.add_argument( + '--ceph', action='store_true', help='Use ceph as data storage backend') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + # When using PyTorch version >= 2.0.0, the `torch.distributed.launch` + # will pass the `--local-rank` parameter to `tools/train.py` instead + # of `--local_rank`. + parser.add_argument('--local_rank', '--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def main(): + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + + # TODO: We will unify the ceph support approach with other OpenMMLab repos + if args.ceph: + cfg = replace_ceph_backend(cfg) + + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + # enable automatic-mixed-precision training + if args.amp is True: + optim_wrapper = cfg.optim_wrapper.type + if optim_wrapper == 'AmpOptimWrapper': + print_log( + 'AMP training is already enabled in your config.', + logger='current', + level=logging.WARNING) + else: + assert optim_wrapper == 'OptimWrapper', ( + '`--amp` is only supported when the optimizer wrapper type is ' + f'`OptimWrapper` but got {optim_wrapper}.') + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.loss_scale = 'dynamic' + + # enable automatically scaling LR + if args.auto_scale_lr: + if 'auto_scale_lr' in cfg and \ + 'enable' in cfg.auto_scale_lr and \ + 'base_batch_size' in cfg.auto_scale_lr: + cfg.auto_scale_lr.enable = True + else: + raise RuntimeError('Can not find "auto_scale_lr" or ' + '"auto_scale_lr.enable" or ' + '"auto_scale_lr.base_batch_size" in your' + ' configuration file.') + + # resume is determined in this priority: resume from > auto_resume + if args.resume == 'auto': + cfg.resume = True + cfg.load_from = None + elif args.resume is not None: + cfg.resume = True + cfg.load_from = args.resume + + # build the runner from config + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) + + # start training + runner.train() + + +if __name__ == '__main__': + main() diff --git a/tools/update_data_coords.py b/tools/update_data_coords.py new file mode 100755 index 0000000..280e78d --- /dev/null +++ b/tools/update_data_coords.py @@ -0,0 +1,168 @@ +import argparse +import time +from os import path as osp + +import mmengine +import numpy as np + +from mmdet3d.structures import limit_period + + +def update_sunrgbd_infos(root_dir, out_dir, pkl_files): + print(f'{pkl_files} will be modified because ' + f'of the refactor of the Depth coordinate system.') + if root_dir == out_dir: + print(f'Warning, you are overwriting ' + f'the original data under {root_dir}.') + time.sleep(3) + for pkl_file in pkl_files: + in_path = osp.join(root_dir, pkl_file) + print(f'Reading from input file: {in_path}.') + a = mmengine.load(in_path) + print('Start updating:') + for item in mmengine.track_iter_progress(a): + if 'rotation_y' in item['annos']: + item['annos']['rotation_y'] = -item['annos']['rotation_y'] + item['annos']['gt_boxes_upright_depth'][:, -1:] = \ + -item['annos']['gt_boxes_upright_depth'][:, -1:] + + out_path = osp.join(out_dir, pkl_file) + print(f'Writing to output file: {out_path}.') + mmengine.dump(a, out_path, 'pkl') + + +def update_outdoor_dbinfos(root_dir, out_dir, pkl_files): + print(f'{pkl_files} will be modified because ' + f'of the refactor of the LIDAR coordinate system.') + if root_dir == out_dir: + print(f'Warning, you are overwriting ' + f'the original data under {root_dir}.') + time.sleep(3) + for pkl_file in pkl_files: + in_path = osp.join(root_dir, pkl_file) + print(f'Reading from input file: {in_path}.') + a = mmengine.load(in_path) + print('Start updating:') + for k in a.keys(): + print(f'Updating samples of class {k}:') + for item in mmengine.track_iter_progress(a[k]): + boxes = item['box3d_lidar'].copy() + # swap l, w (or dx, dy) + item['box3d_lidar'][3] = boxes[4] + item['box3d_lidar'][4] = boxes[3] + # change yaw + item['box3d_lidar'][6] = -boxes[6] - np.pi / 2 + item['box3d_lidar'][6] = limit_period( + item['box3d_lidar'][6], period=np.pi * 2) + + out_path = osp.join(out_dir, pkl_file) + print(f'Writing to output file: {out_path}.') + mmengine.dump(a, out_path, 'pkl') + + +def update_nuscenes_or_lyft_infos(root_dir, out_dir, pkl_files): + + print(f'{pkl_files} will be modified because ' + f'of the refactor of the LIDAR coordinate system.') + if root_dir == out_dir: + print(f'Warning, you are overwriting ' + f'the original data under {root_dir}.') + time.sleep(3) + for pkl_file in pkl_files: + in_path = osp.join(root_dir, pkl_file) + print(f'Reading from input file: {in_path}.') + a = mmengine.load(in_path) + print('Start updating:') + for item in mmengine.track_iter_progress(a['infos']): + boxes = item['gt_boxes'].copy() + # swap l, w (or dx, dy) + item['gt_boxes'][:, 3] = boxes[:, 4] + item['gt_boxes'][:, 4] = boxes[:, 3] + # change yaw + item['gt_boxes'][:, 6] = -boxes[:, 6] - np.pi / 2 + item['gt_boxes'][:, 6] = limit_period( + item['gt_boxes'][:, 6], period=np.pi * 2) + + out_path = osp.join(out_dir, pkl_file) + print(f'Writing to output file: {out_path}.') + mmengine.dump(a, out_path, 'pkl') + + +parser = argparse.ArgumentParser(description='Arg parser for data coords ' + 'update due to coords sys refactor.') +parser.add_argument('dataset', metavar='kitti', help='name of the dataset') +parser.add_argument( + '--root-dir', + type=str, + default='./data/kitti', + help='specify the root dir of dataset') +parser.add_argument( + '--version', + type=str, + default='v1.0', + required=False, + help='specify the dataset version, no need for kitti') +parser.add_argument( + '--out-dir', + type=str, + default=None, + required=False, + help='name of info pkl') +args = parser.parse_args() + +if __name__ == '__main__': + if args.out_dir is None: + args.out_dir = args.root_dir + if args.dataset == 'kitti': + # KITTI infos is in CAM coord sys (unchanged) + # KITTI dbinfos is in LIDAR coord sys (changed) + # so we only update dbinfos + pkl_files = ['kitti_dbinfos_train.pkl'] + update_outdoor_dbinfos( + root_dir=args.root_dir, out_dir=args.out_dir, pkl_files=pkl_files) + elif args.dataset == 'nuscenes': + # nuScenes infos is in LIDAR coord sys (changed) + # nuScenes dbinfos is in LIDAR coord sys (changed) + # so we update both infos and dbinfos + pkl_files = ['nuscenes_infos_val.pkl'] + if args.version != 'v1.0-mini': + pkl_files.append('nuscenes_infos_train.pkl') + else: + pkl_files.append('nuscenes_infos_train_tiny.pkl') + update_nuscenes_or_lyft_infos( + root_dir=args.root_dir, out_dir=args.out_dir, pkl_files=pkl_files) + if args.version != 'v1.0-mini': + pkl_files = ['nuscenes_dbinfos_train.pkl'] + update_outdoor_dbinfos( + root_dir=args.root_dir, + out_dir=args.out_dir, + pkl_files=pkl_files) + elif args.dataset == 'lyft': + # Lyft infos is in LIDAR coord sys (changed) + # Lyft has no dbinfos + # so we update infos + pkl_files = ['lyft_infos_train.pkl', 'lyft_infos_val.pkl'] + update_nuscenes_or_lyft_infos( + root_dir=args.root_dir, out_dir=args.out_dir, pkl_files=pkl_files) + elif args.dataset == 'waymo': + # Waymo infos is in CAM coord sys (unchanged) + # Waymo dbinfos is in LIDAR coord sys (changed) + # so we only update dbinfos + pkl_files = ['waymo_dbinfos_train.pkl'] + update_outdoor_dbinfos( + root_dir=args.root_dir, out_dir=args.out_dir, pkl_files=pkl_files) + elif args.dataset == 'scannet': + # ScanNet infos is in DEPTH coord sys (changed) + # but bbox is without yaw + # so ScanNet is unaffected + pass + elif args.dataset == 's3dis': + # Segmentation datasets are not affected + pass + elif args.dataset == 'sunrgbd': + # SUNRGBD infos is in DEPTH coord sys (changed) + # and bbox is with yaw + # so we update infos + pkl_files = ['sunrgbd_infos_train.pkl', 'sunrgbd_infos_val.pkl'] + update_sunrgbd_infos( + root_dir=args.root_dir, out_dir=args.out_dir, pkl_files=pkl_files) diff --git a/tools/update_data_coords.sh b/tools/update_data_coords.sh new file mode 100755 index 0000000..bd8db62 --- /dev/null +++ b/tools/update_data_coords.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -x +export PYTHONPATH=`pwd`:$PYTHONPATH + +PARTITION=$1 +DATASET=$2 +GPUS=${GPUS:-1} +GPUS_PER_NODE=${GPUS_PER_NODE:-1} +SRUN_ARGS=${SRUN_ARGS:-""} +JOB_NAME=update_data_coords + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/update_data_coords.py ${DATASET} \ + --root-dir ./data/${DATASET} \ + --out-dir ./data/${DATASET} diff --git a/visual.py b/visual.py new file mode 100644 index 0000000..12a48b1 --- /dev/null +++ b/visual.py @@ -0,0 +1,704 @@ + +from nuscenes.nuscenes import NuScenesExplorer, NuScenes + +import json +import math +import os +import os.path as osp +import sys +import time +from datetime import datetime +from typing import Tuple, List, Iterable + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import sklearn.metrics +from PIL import Image +from matplotlib import rcParams +from matplotlib.axes import Axes +from pyquaternion import Quaternion +from tqdm import tqdm + +from nuscenes.lidarseg.lidarseg_utils import colormap_to_colors, \ + get_labels_in_coloring, create_lidarseg_legend, paint_points_label +from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix +from nuscenes.utils.map_mask import MapMask +from nuscenes.utils.color_map import get_colormap + +color_mapping = [ + np.array([140, 140, 136]) / 255.0, + np.array([4, 157, 217]) / 255.0, + np.array([191, 4, 54]) / 255.0, + np.array([0, 0, 0]) / 255.0, + np.array([224, 133, 250]) / 255.0, + np.array([32, 64, 40]) / 255.0, + np.array([77, 115, 67]) / 255.0 +] + +print('Using color mapping', color_mapping) + + +class NuScenesMars(NuScenes): + def __init__(self, + version: str = 'v1.0-mini', + dataroot: str = '/data/sets/nuscenes', + verbose: bool = True, + map_resolution: float = 0.1): + super().__init__(version, dataroot, verbose, map_resolution) + + + def get_box(self, sample_annotation_token: str) -> Box: + """ + Instantiates a Box class from a sample annotation record. + :param sample_annotation_token: Unique sample_annotation identifier. + """ + record = self.get('sample_annotation', sample_annotation_token) + velocity = self.box_velocity(sample_annotation_token) + return Box(record['translation'], record['size'], Quaternion(record['rotation']), + name=record['category_name'], token=record['token'], velocity=velocity) + + def get_sample_data_given_boxes(self, sample_data_token: str, + boxes: List[Box], + box_vis_level: BoxVisibility = BoxVisibility.ANY, + selected_anntokens: List[str] = None, + use_flat_vehicle_coordinates: bool = False) -> \ + Tuple[str, List[Box], np.array]: + """ + Returns the data path as well as all annotations related to that sample_data. + Note that the boxes are transformed into the current sensor's coordinate frame. + :param sample_data_token: Sample_data token. + :param boxes: List of Boxes in global frame. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param selected_anntokens: If provided only return the selected annotation. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. + :return: (data_path, boxes, camera_intrinsic ) + """ + + # Retrieve sensor & pose records + sd_record = self.get('sample_data', sample_data_token) + cs_record = self.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = self.get('sensor', cs_record['sensor_token']) + pose_record = self.get('ego_pose', sd_record['ego_pose_token']) + + data_path = self.get_sample_data_path(sample_data_token) + + if sensor_record['modality'] == 'camera': + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + else: + cam_intrinsic = None + imsize = None + + # map boxes from global frame to sensor coordinate system. + + # Make list of Box objects including coord system transforms. + box_list = [] + for box in boxes: + if use_flat_vehicle_coordinates: + # Move box to ego vehicle coord system parallel to world z plane. + yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse) + else: + # Move box to ego vehicle coord system. + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + + if sensor_record['modality'] == 'camera' and not \ + box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level): + continue + + box_list.append(box) + + return data_path, box_list, cam_intrinsic + + +class NuScenesExplorerMars(NuScenesExplorer): + + def __init__(self, nusc: NuScenesMars): + super().__init__(nusc) + self.nusc = nusc + + def render_sample_data(self, + sample_data_token: str, + with_anns: bool = True, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + axes_limit: float = 40, + ax: Axes = None, + nsweeps: int = 1, + out_path: str = 'output1', + underlay_map: bool = True, + use_flat_vehicle_coordinates: bool = True, + show_lidarseg: bool = False, + show_lidarseg_legend: bool = False, + filter_lidarseg_labels: List = None, + lidarseg_preds_bin_path: str = None, + verbose: bool = True, + show_panoptic: bool = False, + show_radar_raw_velo: bool = False, + i:int=0, + ) -> None: + """ + Render sample data onto axis. + :param sample_data_token: Sample_data token. + :param with_anns: Whether to draw box annotations. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param axes_limit: Axes limit for lidar and radar (measured in meters). + :param ax: Axes onto which to render. + :param nsweeps: Number of sweeps for lidar and radar. + :param out_path: Optional path to save the rendered figure to disk. + :param underlay_map: When set to true, lidar data is plotted onto the map. This can be slow. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. Note: Previously this method did not use flat vehicle coordinates, which + can lead to small errors when the vertical axis of the global frame and lidar are not aligned. The new + setting is more correct and rotates the plot by ~90 degrees. + :param show_lidarseg: When set to True, the lidar data is colored with the segmentation labels. When set + to False, the colors of the lidar data represent the distance from the center of the ego vehicle. + :param show_lidarseg_legend: Whether to display the legend for the lidarseg labels in the frame. + :param filter_lidarseg_labels: Only show lidar points which belong to the given list of classes. If None + or the list is empty, all classes will be displayed. + :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation + predictions for the sample. + :param verbose: Whether to display the image after it is rendered. + :param show_panoptic: When set to True, the lidar data is colored with the panoptic labels. When set + to False, the colors of the lidar data represent the distance from the center of the ego vehicle. + If show_lidarseg is True, show_panoptic will be set to False. + """ + # Get sensor modality. + sd_record = self.nusc.get('sample_data', sample_data_token) + sensor_modality = sd_record['sensor_modality'] + + if sensor_modality in ['lidar', 'radar']: + sample_rec = self.nusc.get('sample', sd_record['sample_token']) + chan = sd_record['channel'] + ref_chan = 'LIDAR_TOP' + ref_sd_token = sample_rec['data'][ref_chan] + ref_sd_record = self.nusc.get('sample_data', ref_sd_token) + + if sensor_modality == 'lidar': + # Get aggregated lidar point cloud in lidar frame. + pc, times = LidarPointCloud.from_file_multisweep(self.nusc, sample_rec, chan, ref_chan, + nsweeps=nsweeps) + velocities = None + else: + # Get aggregated radar point cloud in reference frame. + # The point cloud is transformed to the reference frame for visualization purposes. + pc, times = RadarPointCloud.from_file_multisweep(self.nusc, sample_rec, chan, ref_chan, nsweeps=nsweeps) + + # Transform radar velocities (x is front, y is left), as these are not transformed when loading the + # point cloud. + radar_cs_record = self.nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + ref_cs_record = self.nusc.get('calibrated_sensor', ref_sd_record['calibrated_sensor_token']) + velocities = pc.points[8:10, :] # Compensated velocity + velocities = np.vstack((velocities, np.zeros(pc.points.shape[1]))) + velocities = np.dot(Quaternion(radar_cs_record['rotation']).rotation_matrix, velocities) + velocities = np.dot(Quaternion(ref_cs_record['rotation']).rotation_matrix.T, velocities) + velocities[2, :] = np.zeros(pc.points.shape[1]) + + if show_radar_raw_velo: + # code for radar velocity without compensated starts below + velocities_ = pc.points[6:8, :] # Not Compensated velocity + velocities_ = np.vstack((velocities_, np.zeros(pc.points.shape[1]))) + velocities_ = np.dot(Quaternion(radar_cs_record['rotation']).rotation_matrix, velocities_) + velocities_ = np.dot(Quaternion(ref_cs_record['rotation']).rotation_matrix.T, velocities_) + velocities_[2, :] = np.zeros(pc.points.shape[1]) + # code for radar velocity without compensated end here + + # By default we render the sample_data top down in the sensor frame. + # This is slightly inaccurate when rendering the map as the sensor frame may not be perfectly upright. + # Using use_flat_vehicle_coordinates we can render the map in the ego frame instead. + if use_flat_vehicle_coordinates: + # Retrieve transformation matrices for reference point cloud. + cs_record = self.nusc.get('calibrated_sensor', ref_sd_record['calibrated_sensor_token']) + pose_record = self.nusc.get('ego_pose', ref_sd_record['ego_pose_token']) + ref_to_ego = transform_matrix(translation=cs_record['translation'], + rotation=Quaternion(cs_record["rotation"])) + + # Compute rotation between 3D vehicle pose and "flat" vehicle pose (parallel to global z plane). + ego_yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] + rotation_vehicle_flat_from_vehicle = np.dot( + Quaternion(scalar=np.cos(ego_yaw / 2), vector=[0, 0, np.sin(ego_yaw / 2)]).rotation_matrix, + Quaternion(pose_record['rotation']).inverse.rotation_matrix) + vehicle_flat_from_vehicle = np.eye(4) + vehicle_flat_from_vehicle[:3, :3] = rotation_vehicle_flat_from_vehicle + viewpoint = np.dot(vehicle_flat_from_vehicle, ref_to_ego) + else: + viewpoint = np.eye(4) + + # Init axes. + if ax is None: + _, ax = plt.subplots(1, 1, figsize=(9, 9)) + + # Render map if requested. + if underlay_map: + assert use_flat_vehicle_coordinates, 'Error: underlay_map requires use_flat_vehicle_coordinates, as ' \ + 'otherwise the location does not correspond to the map!' + self.render_ego_centric_map(sample_data_token=sample_data_token, axes_limit=axes_limit, ax=ax) + + # Show point cloud. + points = view_points(pc.points[:3, :], viewpoint, normalize=False) + dists = np.sqrt(np.sum(pc.points[:2, :] ** 2, axis=0)) + colors = np.minimum(1, dists / axes_limit / np.sqrt(2)) + + point_scale = 0.2 if sensor_modality == 'lidar' else 3.0 + scatter = ax.scatter(points[0, :], points[1, :], c=colors, s=point_scale) + + # Show velocities. + if sensor_modality == 'radar': + points_vel = view_points(pc.points[:3, :] + velocities, viewpoint, normalize=False) + deltas_vel = points_vel - points + deltas_vel = 6 * deltas_vel # Arbitrary scaling + max_delta = 20 + deltas_vel = np.clip(deltas_vel, -max_delta, max_delta) # Arbitrary clipping + + if show_radar_raw_velo: + # code for radar velocity without compensated starts below + points_vel_ = view_points(pc.points[:3, :] + velocities_, viewpoint, normalize=False) + deltas_vel_ = points_vel_ - points + deltas_vel_ = 6 * deltas_vel_ # Arbitrary scaling + deltas_vel_ = np.clip(deltas_vel_, -max_delta, max_delta) # Arbitrary clipping + # code for radar velocity without compensated end here + + colors_rgba = scatter.to_rgba(colors) + for i in range(points.shape[1]): + ax.arrow(points[0, i], points[1, i], deltas_vel[0, i], deltas_vel[1, i], color=colors_rgba[i]) + + if show_radar_raw_velo: + # code for radar velocity without compensated starts below + ax.arrow(points[0, i], points[1, i], deltas_vel_[0, i], deltas_vel_[1, i], color='pink') + # code for radar velocity without compensated end here + + # Show ego vehicle. + ax.plot(0, 0, 'x', color='red') + + # Get boxes in lidar frame. + _, boxes, _ = self.nusc.get_sample_data(ref_sd_token, box_vis_level=box_vis_level, + use_flat_vehicle_coordinates=use_flat_vehicle_coordinates) + + # Show boxes. + if with_anns: + for box in boxes: + + c = np.array(self.get_color(box.name)) / 255.0 + box.render(ax, view=np.eye(4), colors=(c, c, c)) + ax.arrow( + box.center[0], box.center[1], box.velocity[0], box.velocity[1], + color='cyan', width=0.25, ) + + + # Limit visible range. + ax.set_xlim(-axes_limit, axes_limit) + ax.set_ylim(-axes_limit, axes_limit) + elif sensor_modality == 'camera': + # Load boxes and image. + data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(sample_data_token, + box_vis_level=box_vis_level) + data = Image.open(data_path) + + # Init axes. + if ax is None: + _, ax = plt.subplots(1, 1, figsize=(9, 16)) + + # Show image. + ax.imshow(data) + + # Show boxes. + if with_anns: + for box in boxes: + c = np.array(self.get_color(box.name)) / 255.0 + box.render(ax, view=camera_intrinsic, normalize=True, colors=(c, c, c)) + center = box.center[:, np.newaxis] + velo = box.velocity[:, np.newaxis] + center_cam = view_points(center, camera_intrinsic, normalize=True)[:, 0] + center_add_velo_cam = view_points(center + velo, camera_intrinsic, normalize=True)[:, 0] + + delta = center_add_velo_cam - center_cam + ax.arrow( + center_cam[0], center_cam[1], delta[0], delta[1], + color='cyan', width=3.0, ) + + + # Limit visible range. + ax.set_xlim(0, data.size[0]) + ax.set_ylim(data.size[1], 0) + + else: + raise ValueError("Error: Unknown sensor modality!") + + ax.axis('off') + ax.set_title('{} {labels_type}'.format( + sd_record['channel'], labels_type='(predictions)' if lidarseg_preds_bin_path else '')) + ax.set_aspect('equal') + + if out_path is not None: + root_dir = './output1' + file_name = sd_record['filename'].split('/')[-1].split('.')[0] + output1_path = os.path.join(root_dir, file_name) + # out_path = "./output1/output1{i+1}" + + plt.savefig(output1_path, bbox_inches='tight', pad_inches=0, dpi=200) + + if verbose: + plt.show() + plt.close() + def render_sample_pred(self, + sample_data_token: str, + boxes: List[Box], + with_anns: bool = True, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + axes_limit: float = 40, + ax: Axes = None, + nsweeps: int = 1, + out_path: str = 'output1', + underlay_map: bool = True, + use_flat_vehicle_coordinates: bool = True, + show_lidarseg: bool = False, + show_lidarseg_legend: bool = False, + filter_lidarseg_labels: List = None, + lidarseg_preds_bin_path: str = None, + verbose: bool = True, + show_panoptic: bool = False, + show_radar_raw_velo: bool = False, + i:int=0 + ) -> None: + """ + Render sample data onto axis. + :param sample_data_token: Sample_data token. + :param with_anns: Whether to draw box annotations. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param axes_limit: Axes limit for lidar and radar (measured in meters). + :param ax: Axes onto which to render. + :param nsweeps: Number of sweeps for lidar and radar. + :param out_path: Optional path to save the rendered figure to disk. + :param underlay_map: When set to true, lidar data is plotted onto the map. This can be slow. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. Note: Previously this method did not use flat vehicle coordinates, which + can lead to small errors when the vertical axis of the global frame and lidar are not aligned. The new + setting is more correct and rotates the plot by ~90 degrees. + :param show_lidarseg: When set to True, the lidar data is colored with the segmentation labels. When set + to False, the colors of the lidar data represent the distance from the center of the ego vehicle. + :param show_lidarseg_legend: Whether to display the legend for the lidarseg labels in the frame. + :param filter_lidarseg_labels: Only show lidar points which belong to the given list of classes. If None + or the list is empty, all classes will be displayed. + :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation + predictions for the sample. + :param verbose: Whether to display the image after it is rendered. + :param show_panoptic: When set to True, the lidar data is colored with the panoptic labels. When set + to False, the colors of the lidar data represent the distance from the center of the ego vehicle. + If show_lidarseg is True, show_panoptic will be set to False. + """ + # Get sensor modality. + sd_record = self.nusc.get('sample_data', sample_data_token) + sensor_modality = sd_record['sensor_modality'] + + if sensor_modality in ['lidar', 'radar']: + sample_rec = self.nusc.get('sample', sd_record['sample_token']) + chan = sd_record['channel'] + ref_chan = 'LIDAR_TOP' + ref_sd_token = sample_rec['data'][ref_chan] + ref_sd_record = self.nusc.get('sample_data', ref_sd_token) + + if sensor_modality == 'lidar': + # Get aggregated lidar point cloud in lidar frame. + pc, times = LidarPointCloud.from_file_multisweep(self.nusc, sample_rec, chan, ref_chan, + nsweeps=nsweeps) + velocities = None + else: + # Get aggregated radar point cloud in reference frame. + # The point cloud is transformed to the reference frame for visualization purposes. + pc, times = RadarPointCloud.from_file_multisweep(self.nusc, sample_rec, chan, ref_chan, nsweeps=nsweeps) + + # Transform radar velocities (x is front, y is left), as these are not transformed when loading the + # point cloud. + radar_cs_record = self.nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + ref_cs_record = self.nusc.get('calibrated_sensor', ref_sd_record['calibrated_sensor_token']) + velocities = pc.points[8:10, :] # Compensated velocity + velocities = np.vstack((velocities, np.zeros(pc.points.shape[1]))) + velocities = np.dot(Quaternion(radar_cs_record['rotation']).rotation_matrix, velocities) + velocities = np.dot(Quaternion(ref_cs_record['rotation']).rotation_matrix.T, velocities) + velocities[2, :] = np.zeros(pc.points.shape[1]) + + if show_radar_raw_velo: + # code for radar velocity without compensated starts below + velocities_ = pc.points[6:8, :] # Not Compensated velocity + velocities_ = np.vstack((velocities_, np.zeros(pc.points.shape[1]))) + velocities_ = np.dot(Quaternion(radar_cs_record['rotation']).rotation_matrix, velocities_) + velocities_ = np.dot(Quaternion(ref_cs_record['rotation']).rotation_matrix.T, velocities_) + velocities_[2, :] = np.zeros(pc.points.shape[1]) + # code for radar velocity without compensated end here + + # By default we render the sample_data top down in the sensor frame. + # This is slightly inaccurate when rendering the map as the sensor frame may not be perfectly upright. + # Using use_flat_vehicle_coordinates we can render the map in the ego frame instead. + if use_flat_vehicle_coordinates: + # Retrieve transformation matrices for reference point cloud. + cs_record = self.nusc.get('calibrated_sensor', ref_sd_record['calibrated_sensor_token']) + pose_record = self.nusc.get('ego_pose', ref_sd_record['ego_pose_token']) + ref_to_ego = transform_matrix(translation=cs_record['translation'], + rotation=Quaternion(cs_record["rotation"])) + + # Compute rotation between 3D vehicle pose and "flat" vehicle pose (parallel to global z plane). + ego_yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] + rotation_vehicle_flat_from_vehicle = np.dot( + Quaternion(scalar=np.cos(ego_yaw / 2), vector=[0, 0, np.sin(ego_yaw / 2)]).rotation_matrix, + Quaternion(pose_record['rotation']).inverse.rotation_matrix) + vehicle_flat_from_vehicle = np.eye(4) + vehicle_flat_from_vehicle[:3, :3] = rotation_vehicle_flat_from_vehicle + viewpoint = np.dot(vehicle_flat_from_vehicle, ref_to_ego) + else: + viewpoint = np.eye(4) + + # Init axes. + if ax is None: + _, ax = plt.subplots(1, 1, figsize=(9, 9)) + + # Render map if requested. + if underlay_map: + assert use_flat_vehicle_coordinates, 'Error: underlay_map requires use_flat_vehicle_coordinates, as ' \ + 'otherwise the location does not correspond to the map!' + self.render_ego_centric_map(sample_data_token=sample_data_token, axes_limit=axes_limit, ax=ax) + + # Show point cloud. + points = view_points(pc.points[:3, :], viewpoint, normalize=False) + dists = np.sqrt(np.sum(pc.points[:2, :] ** 2, axis=0)) + colors = np.minimum(1, dists / axes_limit / np.sqrt(2)) + + point_scale = 0.2 if sensor_modality == 'lidar' else 3.0 + scatter = ax.scatter(points[0, :], points[1, :], c=colors, s=point_scale) + + # Show velocities. + if sensor_modality == 'radar': + points_vel = view_points(pc.points[:3, :] + velocities, viewpoint, normalize=False) + deltas_vel = points_vel - points + deltas_vel = 6 * deltas_vel # Arbitrary scaling + max_delta = 20 + deltas_vel = np.clip(deltas_vel, -max_delta, max_delta) # Arbitrary clipping + + if show_radar_raw_velo: + # code for radar velocity without compensated starts below + points_vel_ = view_points(pc.points[:3, :] + velocities_, viewpoint, normalize=False) + deltas_vel_ = points_vel_ - points + deltas_vel_ = 6 * deltas_vel_ # Arbitrary scaling + deltas_vel_ = np.clip(deltas_vel_, -max_delta, max_delta) # Arbitrary clipping + # code for radar velocity without compensated end here + + colors_rgba = scatter.to_rgba(colors) + for i in range(points.shape[1]): + ax.arrow(points[0, i], points[1, i], deltas_vel[0, i], deltas_vel[1, i], color=colors_rgba[i]) + + if show_radar_raw_velo: + # code for radar velocity without compensated starts below + ax.arrow(points[0, i], points[1, i], deltas_vel_[0, i], deltas_vel_[1, i], color='pink') + # code for radar velocity without compensated end here + + # Show ego vehicle. + ax.plot(0, 0, 'x', color='red') + + # Get boxes in lidar frame. + _, boxes, _ = self.nusc.get_sample_data_given_boxes( + ref_sd_token, boxes, box_vis_level=box_vis_level, + use_flat_vehicle_coordinates=use_flat_vehicle_coordinates) + + # Show boxes. + if with_anns: + for i, box in enumerate(boxes): + + + c = np.array(self.get_color(box.name)) / 255.0 + if hasattr(box, 'track_ind'): # this is true + tr_id = box.track_ind + c = color_mapping[tr_id % len(color_mapping)] + # print(c_box) + # print("original color", np.array(self.get_color(box.name)) / 255.0) + box.render(ax, view=np.eye(4), colors=(c,c,c)) + # print(c_box, np.array(self.get_color(box.name)) / 255.0) + ax.arrow( + box.center[0], box.center[1], box.velocity[0], box.velocity[1], + color=c, width=0.25, ) + # color='cyan', width=0.25, ) + + + # Limit visible range. + ax.set_xlim(-axes_limit, axes_limit) + ax.set_ylim(-axes_limit, axes_limit) + elif sensor_modality == 'camera': + # Load boxes and image. + data_path, boxes, camera_intrinsic = self.nusc.get_sample_data_given_boxes( + sample_data_token, boxes, box_vis_level=box_vis_level) + data = Image.open(data_path) + + # Init axes. + if ax is None: + _, ax = plt.subplots(1, 1, figsize=(9, 16)) + + # Show image. + ax.imshow(data) + + # Show boxes. + if with_anns: + for box in boxes: + c = np.array(self.get_color(box.name)) / 255.0 + if hasattr(box, 'track_ind'): # this is true + tr_id = box.track_ind + c = color_mapping[tr_id % len(color_mapping)] + # if hasattr(box, 'track_ind'): # this is true + box.render(ax, view=camera_intrinsic, normalize=True, colors=(c, c, c)) + center = box.center[:, np.newaxis] + velo = box.velocity[:, np.newaxis] + center_cam = view_points(center, camera_intrinsic, normalize=True)[:, 0] + center_add_velo_cam = view_points(center + velo, camera_intrinsic, normalize=True)[:, 0] + + delta = center_add_velo_cam - center_cam + ax.arrow( + center_cam[0], center_cam[1], delta[0], delta[1], + color=c, width=3.0, ) + # color='cyan', width=3.0, ) + + + # Limit visible range. + ax.set_xlim(0, data.size[0]) + ax.set_ylim(data.size[1], 0) + + else: + raise ValueError("Error: Unknown sensor modality!") + + ax.axis('off') + ax.set_title('{} {labels_type} - Pred'.format( + sd_record['channel'], labels_type='(predictions)' if lidarseg_preds_bin_path else '')) + ax.set_aspect('equal') + + if out_path is not None: + root_dir = './output1' + file_name = sd_record['filename'].split('/')[-1].split('.')[0] + output1_path = os.path.join(root_dir, file_name) + # out_path = "./output1/output1{i+1}" + + plt.savefig(output1_path, bbox_inches='tight', pad_inches=0, dpi=200) + + if verbose: + plt.show() + plt.close() + +def load_results_json(results_path: str = None): + NameMapping = { + 'movable_object.barrier': 'barrier', + 'vehicle.bicycle': 'bicycle', + 'vehicle.bus.bendy': 'bus', + 'vehicle.bus.rigid': 'bus', + 'vehicle.car': 'car', + 'vehicle.construction': 'construction_vehicle', + 'vehicle.motorcycle': 'motorcycle', + 'human.pedestrian.adult': 'pedestrian', + 'human.pedestrian.child': 'pedestrian', + 'human.pedestrian.construction_worker': 'pedestrian', + 'human.pedestrian.police_officer': 'pedestrian', + 'movable_object.trafficcone': 'traffic_cone', + 'vehicle.trailer': 'trailer', + 'vehicle.truck': 'truck'} + + inverse_mapping = {} + for key, value in NameMapping.items(): + inverse_mapping[value] = key + with open(results_path) as f: + data = json.load(f) + results_dict = data['results'] + + new_results_dict = {} + + for key, item in results_dict.items(): + new_item = [] + for _box_dict in item: + if 'detection_name' in _box_dict: + # load detection box + score=_box_dict['detection_score'] + if score < 0.20: + continue + new_box = Box( + center=_box_dict['translation'], + size=_box_dict['size'], + orientation=Quaternion(_box_dict['rotation']), + score=_box_dict['detection_score'], + velocity=_box_dict['velocity'] + [0], + name=inverse_mapping[_box_dict['detection_name']], + token=_box_dict['sample_token']) + else: + # then it is track box + center_ = _box_dict['translation'] + new_box = Box( + center=center_, + size=_box_dict['size'], + orientation=Quaternion(_box_dict['rotation']), + label=int(_box_dict['tracking_id']), + score=_box_dict['tracking_score'], + velocity=_box_dict['velocity'] + [0], + name=inverse_mapping[_box_dict['tracking_name']], + token=_box_dict['sample_token']) + new_box.track_ind = int(_box_dict['tracking_id']) + new_item.append(new_box) + + new_results_dict[key] = new_item + + print('loading total of {} boxes'.format(len(new_results_dict))) + + + return new_results_dict + + +def _test(): + nusc = NuScenesMars(version='v1.0-trainval', dataroot='./data/nuscenes') + nusc_exp = NuScenesExplorerMars(nusc) + nusc_exp_raw = NuScenesExplorer(nusc) + samples = nusc.sample + samples[0] + lidar_names = ['LIDAR_TOP'] + cam_names = ['CAM_FRONT',] + sample_data_token = samples[0]['data'][lidar_names[0]] + sample_data_token_cam = samples[0]['data'][cam_names[0]] + anns = nusc.sample_annotation + anns[0] + nusc_exp.render_sample_data(sample_data_token) + print('----------------------') + nusc_exp.render_sample_data(sample_data_token_cam) + print('----------------------') + nusc_exp.render_sample(samples[0]['token']) + + +def _test_pred(results_path): + results_dict = load_results_json(results_path) + + nusc = NuScenesMars(version='v1.0-trainval', dataroot='./data/nuscenes') + nusc_exp = NuScenesExplorerMars(nusc) + samples = nusc.sample + samples[0] + lidar_names = ['LIDAR_TOP'] + cam_names = ['CAM_FRONT_LEFT',] + sample_data_token = samples[0]['data'][lidar_names[0]] + sample_data_token_cam = samples[0]['data'][cam_names[0]] + anns = nusc.sample_annotation + anns[0] + nusc_exp.render_sample_data(sample_data_token) + print('----------------------') + nusc_exp.render_sample_data(sample_data_token_cam) + + selected_keys = list(results_dict.keys()) + i=0 + for sample_token in selected_keys: + + selected_sample = nusc.get('sample', sample_token) + + # selected_lidar_token = selected_sample['data']['LIDAR_BACK'] + selected_cam_token = selected_sample['data']['CAM_BACK'] + + # nusc_exp.render_sample_pred(selected_lidar_token, results_dict[sample_token],i) + # i+=1 + nusc_exp.render_sample_pred(selected_cam_token, results_dict[sample_token],i) + i+=1 +path='results_nusc.json' +_test_pred(path) diff --git a/visual2.py b/visual2.py new file mode 100644 index 0000000..8c55624 --- /dev/null +++ b/visual2.py @@ -0,0 +1,477 @@ +# Based on https://github.com/nutonomy/nuscenes-devkit +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import mmcv +from nuscenes.nuscenes import NuScenes +from PIL import Image +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix +from typing import Tuple, List, Iterable +import matplotlib.pyplot as plt +import numpy as np +from PIL import Image +from matplotlib import rcParams +from matplotlib.axes import Axes +from pyquaternion import Quaternion +from PIL import Image +from matplotlib import rcParams +from matplotlib.axes import Axes +from pyquaternion import Quaternion +from tqdm import tqdm +from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix +from nuscenes.eval.common.data_classes import EvalBoxes, EvalBox +from nuscenes.eval.detection.data_classes import DetectionBox +from nuscenes.eval.detection.utils import category_to_detection_name +from nuscenes.eval.detection.render import visualize_sample + + + + +cams = ['CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_RIGHT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_FRONT_LEFT'] + +import numpy as np +import matplotlib.pyplot as plt +from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box +from PIL import Image +from matplotlib import rcParams + + +def render_annotation( + anntoken: str, + margin: float = 10, + view: np.ndarray = np.eye(4), + box_vis_level: BoxVisibility = BoxVisibility.ANY, + out_path: str = 'render.png', + extra_info: bool = False) -> None: + """ + Render selected annotation. + :param anntoken: Sample_annotation token. + :param margin: How many meters in each direction to include in LIDAR view. + :param view: LIDAR view point. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param out_path: Optional path to save the rendered figure to disk. + :param extra_info: Whether to render extra information below camera view. + """ + ann_record = nusc.get('sample_annotation', anntoken) + sample_record = nusc.get('sample', ann_record['sample_token']) + assert 'LIDAR_TOP' in sample_record['data'].keys(), 'Error: No LIDAR_TOP in data, unable to render.' + + # Figure out which camera the object is fully visible in (this may return nothing). + boxes, cam = [], [] + cams = [key for key in sample_record['data'].keys() if 'CAM' in key] + all_bboxes = [] + select_cams = [] + for cam in cams: + _, boxes, _ = nusc.get_sample_data(sample_record['data'][cam], box_vis_level=box_vis_level, + selected_anntokens=[anntoken]) + if len(boxes) > 0: + all_bboxes.append(boxes) + select_cams.append(cam) + # We found an image that matches. Let's abort. + # assert len(boxes) > 0, 'Error: Could not find image where annotation is visible. ' \ + # 'Try using e.g. BoxVisibility.ANY.' + # assert len(boxes) < 2, 'Error: Found multiple annotations. Something is wrong!' + + num_cam = len(all_bboxes) + + fig, axes = plt.subplots(1, num_cam + 1, figsize=(18, 9)) + select_cams = [sample_record['data'][cam] for cam in select_cams] + print('bbox in cams:', select_cams) + # Plot LIDAR view. + lidar = sample_record['data']['LIDAR_TOP'] + data_path, boxes, camera_intrinsic = nusc.get_sample_data(lidar, selected_anntokens=[anntoken]) + LidarPointCloud.from_file(data_path).render_height(axes[0], view=view) + for box in boxes: + c = np.array(get_color(box.name)) / 255.0 + box.render(axes[0], view=view, colors=(c, c, c)) + corners = view_points(boxes[0].corners(), view, False)[:2, :] + axes[0].set_xlim([np.min(corners[0, :]) - margin, np.max(corners[0, :]) + margin]) + axes[0].set_ylim([np.min(corners[1, :]) - margin, np.max(corners[1, :]) + margin]) + axes[0].axis('off') + axes[0].set_aspect('equal') + + # Plot CAMERA view. + for i in range(1, num_cam + 1): + cam = select_cams[i - 1] + data_path, boxes, camera_intrinsic = nusc.get_sample_data(cam, selected_anntokens=[anntoken]) + im = Image.open(data_path) + axes[i].imshow(im) + axes[i].set_title(nusc.get('sample_data', cam)['channel']) + axes[i].axis('off') + axes[i].set_aspect('equal') + for box in boxes: + c = np.array(get_color(box.name)) / 255.0 + box.render(axes[i], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + + # Print extra information about the annotation below the camera view. + axes[i].set_xlim(0, im.size[0]) + axes[i].set_ylim(im.size[1], 0) + + if extra_info: + rcParams['font.family'] = 'monospace' + + w, l, h = ann_record['size'] + category = ann_record['category_name'] + lidar_points = ann_record['num_lidar_pts'] + radar_points = ann_record['num_radar_pts'] + + sample_data_record = nusc.get('sample_data', sample_record['data']['LIDAR_TOP']) + pose_record = nusc.get('ego_pose', sample_data_record['ego_pose_token']) + dist = np.linalg.norm(np.array(pose_record['translation']) - np.array(ann_record['translation'])) + + information = ' \n'.join(['category: {}'.format(category), + '', + '# lidar points: {0:>4}'.format(lidar_points), + '# radar points: {0:>4}'.format(radar_points), + '', + 'distance: {:>7.3f}m'.format(dist), + '', + 'width: {:>7.3f}m'.format(w), + 'length: {:>7.3f}m'.format(l), + 'height: {:>7.3f}m'.format(h)]) + + plt.annotate(information, (0, 0), (0, -20), xycoords='axes fraction', textcoords='offset points', va='top') + + if out_path is not None: + plt.savefig(out_path) + + + +def get_sample_data(sample_data_token: str, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + selected_anntokens=None, + use_flat_vehicle_coordinates: bool = False): + """ + Returns the data path as well as all annotations related to that sample_data. + Note that the boxes are transformed into the current sensor's coordinate frame. + :param sample_data_token: Sample_data token. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param selected_anntokens: If provided only return the selected annotation. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. + :return: (data_path, boxes, camera_intrinsic ) + """ + + # Retrieve sensor & pose records + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + + data_path = nusc.get_sample_data_path(sample_data_token) + + if sensor_record['modality'] == 'camera': + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + else: + cam_intrinsic = None + imsize = None + + # Retrieve all sample annotations and map to sensor coordinate system. + if selected_anntokens is not None: + boxes = list(map(nusc.get_box, selected_anntokens)) + else: + boxes = nusc.get_boxes(sample_data_token) + + # Make list of Box objects including coord system transforms. + box_list = [] + for box in boxes: + if use_flat_vehicle_coordinates: + # Move box to ego vehicle coord system parallel to world z plane. + yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse) + else: + # Move box to ego vehicle coord system. + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + + if sensor_record['modality'] == 'camera' and not \ + box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level): + continue + + box_list.append(box) + + return data_path, box_list, cam_intrinsic + + + +def get_predicted_data(sample_data_token: str, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + selected_anntokens=None, + use_flat_vehicle_coordinates: bool = False, + pred_anns=None + ): + """ + Returns the data path as well as all annotations related to that sample_data. + Note that the boxes are transformed into the current sensor's coordinate frame. + :param sample_data_token: Sample_data token. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param selected_anntokens: If provided only return the selected annotation. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. + :return: (data_path, boxes, camera_intrinsic ) + """ + + # Retrieve sensor & pose records + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + + data_path = nusc.get_sample_data_path(sample_data_token) + + if sensor_record['modality'] == 'camera': + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + else: + cam_intrinsic = None + imsize = None + + # Retrieve all sample annotations and map to sensor coordinate system. + # if selected_anntokens is not None: + # boxes = list(map(nusc.get_box, selected_anntokens)) + # else: + # boxes = nusc.get_boxes(sample_data_token) + boxes = pred_anns + # Make list of Box objects including coord system transforms. + box_list = [] + for box in boxes: + if use_flat_vehicle_coordinates: + # Move box to ego vehicle coord system parallel to world z plane. + yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse) + else: + # Move box to ego vehicle coord system. + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + + if sensor_record['modality'] == 'camera' and not \ + box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level): + continue + box_list.append(box) + + return data_path, box_list, cam_intrinsic + + + + +def lidiar_render(sample_token, data,out_path=None): + bbox_gt_list = [] + bbox_pred_list = [] + anns = nusc.get('sample', sample_token)['anns'] + for ann in anns: + content = nusc.get('sample_annotation', ann) + try: + bbox_gt_list.append(DetectionBox( + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=nusc.box_velocity(content['token'])[:2], + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=category_to_detection_name(content['category_name']), + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name='')) + except: + pass + + bbox_anns = data['results'][sample_token] + for content in bbox_anns: + bbox_pred_list.append(DetectionBox( + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name'])) + gt_annotations = EvalBoxes() + pred_annotations = EvalBoxes() + gt_annotations.add_boxes(sample_token, bbox_gt_list) + pred_annotations.add_boxes(sample_token, bbox_pred_list) + print('green is ground truth') + print('blue is the predited result') + visualize_sample(nusc, sample_token, gt_annotations, pred_annotations, savepath=out_path+'_bev') + + +def get_color(category_name: str): + """ + Provides the default colors based on the category names. + This method works for the general nuScenes categories, as well as the nuScenes detection categories. + """ + a = ['noise', 'animal', 'human.pedestrian.adult', 'human.pedestrian.child', 'human.pedestrian.construction_worker', + 'human.pedestrian.personal_mobility', 'human.pedestrian.police_officer', 'human.pedestrian.stroller', + 'human.pedestrian.wheelchair', 'movable_object.barrier', 'movable_object.debris', + 'movable_object.pushable_pullable', 'movable_object.trafficcone', 'static_object.bicycle_rack', 'vehicle.bicycle', + 'vehicle.bus.bendy', 'vehicle.bus.rigid', 'vehicle.car', 'vehicle.construction', 'vehicle.emergency.ambulance', + 'vehicle.emergency.police', 'vehicle.motorcycle', 'vehicle.trailer', 'vehicle.truck', 'flat.driveable_surface', + 'flat.other', 'flat.sidewalk', 'flat.terrain', 'static.manmade', 'static.other', 'static.vegetation', + 'vehicle.ego'] + class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + ] + #print(category_name) + if category_name == 'bicycle': + return nusc.colormap['vehicle.bicycle'] + elif category_name == 'construction_vehicle': + return nusc.colormap['vehicle.construction'] + elif category_name == 'traffic_cone': + return nusc.colormap['movable_object.trafficcone'] + + for key in nusc.colormap.keys(): + if category_name in key: + return nusc.colormap[key] + return [0, 0, 0] + + +def render_sample_data( + sample_toekn: str, + with_anns: bool = True, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + axes_limit: float = 40, + ax=None, + nsweeps: int = 1, + out_path: str = None, + underlay_map: bool = True, + use_flat_vehicle_coordinates: bool = True, + show_lidarseg: bool = False, + show_lidarseg_legend: bool = False, + filter_lidarseg_labels=None, + lidarseg_preds_bin_path: str = None, + verbose: bool = True, + show_panoptic: bool = False, + pred_data=None, + ) -> None: + """ + Render sample data onto axis. + :param sample_data_token: Sample_data token. + :param with_anns: Whether to draw box annotations. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param axes_limit: Axes limit for lidar and radar (measured in meters). + :param ax: Axes onto which to render. + :param nsweeps: Number of sweeps for lidar and radar. + :param out_path: Optional path to save the rendered figure to disk. + :param underlay_map: When set to true, lidar data is plotted onto the map. This can be slow. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. Note: Previously this method did not use flat vehicle coordinates, which + can lead to small errors when the vertical axis of the global frame and lidar are not aligned. The new + setting is more correct and rotates the plot by ~90 degrees. + :param show_lidarseg: When set to True, the lidar data is colored with the segmentation labels. When set + to False, the colors of the lidar data represent the distance from the center of the ego vehicle. + :param show_lidarseg_legend: Whether to display the legend for the lidarseg labels in the frame. + :param filter_lidarseg_labels: Only show lidar points which belong to the given list of classes. If None + or the list is empty, all classes will be displayed. + :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation + predictions for the sample. + :param verbose: Whether to display the image after it is rendered. + :param show_panoptic: When set to True, the lidar data is colored with the panoptic labels. When set + to False, the colors of the lidar data represent the distance from the center of the ego vehicle. + If show_lidarseg is True, show_panoptic will be set to False. + """ + lidiar_render(sample_toekn, pred_data, out_path=out_path) + sample = nusc.get('sample', sample_toekn) + # sample = data['results'][sample_token_list[0]][0] + cams = [ + 'CAM_FRONT_LEFT', + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_LEFT', + 'CAM_BACK', + 'CAM_BACK_RIGHT', + ] + if ax is None: + _, ax = plt.subplots(4, 3, figsize=(24, 18)) + j = 0 + for ind, cam in enumerate(cams): + sample_data_token = sample['data'][cam] + + sd_record = nusc.get('sample_data', sample_data_token) + sensor_modality = sd_record['sensor_modality'] + + if sensor_modality in ['lidar', 'radar']: + assert False + elif sensor_modality == 'camera': + # Load boxes and image. + boxes = [Box(record['translation'], record['size'], Quaternion(record['rotation']), + name=record['detection_name'], token='predicted') for record in + pred_data['results'][sample_toekn] if record['detection_score'] > 0.2] + + data_path, boxes_pred, camera_intrinsic = get_predicted_data(sample_data_token, + box_vis_level=box_vis_level, pred_anns=boxes) + _, boxes_gt, _ = nusc.get_sample_data(sample_data_token, box_vis_level=box_vis_level) + if ind == 3: + j += 1 + ind = ind % 3 + data = Image.open(data_path) + # mmcv.imwrite(np.array(data)[:,:,::-1], f'{cam}.png') + # Init axes. + + # Show image. + ax[j, ind].imshow(data) + ax[j + 2, ind].imshow(data) + + # Show boxes. + if with_anns: + for box in boxes_pred: + c = np.array(get_color(box.name)) / 255.0 + box.render(ax[j, ind], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + for box in boxes_gt: + c = np.array(get_color(box.name)) / 255.0 + box.render(ax[j + 2, ind], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + + # Limit visible range. + ax[j, ind].set_xlim(0, data.size[0]) + ax[j, ind].set_ylim(data.size[1], 0) + ax[j + 2, ind].set_xlim(0, data.size[0]) + ax[j + 2, ind].set_ylim(data.size[1], 0) + + else: + raise ValueError("Error: Unknown sensor modality!") + + ax[j, ind].axis('off') + ax[j, ind].set_title('PRED: {} {labels_type}'.format( + sd_record['channel'], labels_type='(predictions)' if lidarseg_preds_bin_path else '')) + ax[j, ind].set_aspect('equal') + + ax[j + 2, ind].axis('off') + ax[j + 2, ind].set_title('GT:{} {labels_type}'.format( + sd_record['channel'], labels_type='(predictions)' if lidarseg_preds_bin_path else '')) + ax[j + 2, ind].set_aspect('equal') + + if out_path is not None: + plt.savefig(out_path+'_camera', bbox_inches='tight', pad_inches=0, dpi=200) + if verbose: + plt.show() + plt.close() + +if __name__ == '__main__': + nusc = NuScenes(version='v1.0-trainval', dataroot='./data/nuscenes', verbose=True) + # render_annotation('7603b030b42a4b1caa8c443ccc1a7d52') + bevformer_results = mmcv.load('results_nusc.json') + sample_token_list = list(bevformer_results['results'].keys()) + for id in range(0, 10): + render_sample_data(sample_token_list[id], pred_data=bevformer_results, out_path=sample_token_list[id]) \ No newline at end of file

    KRHZfzJQqFpv8TeiZHQP_TNQDhNdmYFEfp5o$k*G!$P2&haU z`ZS!c1Q&c||EX#sc3f8n5sBTZ1f{AeOY!|Q9+WLT6_%|WjfbmQ28WF0#G)Pg&znrT z2j-UlJk#$;blo;BX$L9R;Xr|@OIN!VV(VIL4MrA`NHGrGXSkgLOLMt}Qp}uy9scZ@ zqA4LXFL&~a$SFqma{BL@Y_7;+PBYob6QAyitoGW*m5Ok2K{lLTt`)3GGT3+Dl$}jr zVZ4=jxcbfZ;9Ta-mWPY2JZxdbM%&pzfK9um&Cm+OHAB+Yg;|0nDu}I~jak;4N?R+} ztg)!0ely!T|ukfkuY=c3rSea(`cnDUg;i6CA|`0>bRO6ZtJ9rx5# zN@AB@|J-7#Zn#Sx+ce(B;(-U9#?dD){#uK_1E7OeTf2kw~gh#35G# zS=PYwrARUjisH<=HX{lqh*L*Vj2$WFvdlcIApv9FO?+d)GRcTHk*lBqkI2YSxowGU z|BGA1AXWiE#6lke0ofDT7el9buX|z9-Y0K`o<^FIC)ZO;x*Fo21v>GS#-bO_;+M9< z?5Tc+@t_uYa~PkAF+q#Tm$)KCx}&{}Qzlg6(p<^L_L(eEG^yG_Aoxq3$q8v0LDRtc z^)A*kdp@q-^j5**l=)JYurkV@S9 zO87EXo`JUWpgM}0x^@UlS1u`jLd#EIKr_vVtnP}baTkF?lC;fLGeYVdBWHR?(u$lh zX>CJb0=MHds10f$_Cm?P6veLJVRM!{%S<;vvQQp!5xqU$VEH zd@Qw0P32i`#zgTBI_W zncOB3cZUuPCmQz%8v|nGHkO&J#iAt{|DRx-{hUF zmFhskC!K-_;#Jtuq(fD!sao;vpDfAkPQV9N1CLL*Y3Y#5)*3_PIx{aKg{X`!*PQ&p zsV_avZeM@dpxg8YG=S~QNy&LYrk%K@Keeon?WoOg^3^dYT@czd+OqUb1EW17Tg1op6)-}%(>N{^}vDMv#v4B~x;6+EJxl2_%t6R;Rm%y>rP zd5^0&^OmY2d};2>0)$II?lOZDbaF`Vq@c~k4>-M9vdm7JWVE=^p|V<1Twr+Qmd$iv zT63?Nq=-I8p^2iTX;Dk5Tg>KYK)oZi8lY@9Hh;OEIhP!I9f_7NqYisjxcS-2QwY9u_G)bh=hQYpU zbUU}ioUjVSONDrJGmIMby2aI9wC!5vyj$vy6icXi1KCuA0lGqv)iabjw%>RBh`Z(p zxz!9D>Px%q#E$l5f64q>$Yk_K%yzBQqTG}ekLt>~Q(NlE|8s8X!M0C^gq;g_c!)}| zGsxerHkB_OmoD09^E><&8=5yNwN4ZmUoOfmQXHwpld@JQv`gbT5;y_T9PS+KHv}Q&*cRQ{T zGdtH)Fvwpj;aS6@WJg7O7Fbpo$b!lPCb2Rkg3u5R7BN-vgU7QuC!q*bzlJ|U!0F9JJ00vTPh64P~gjglL(rd<{he?oI9NvMPa zrf2v!VJ5a!MnO#y5-$U&PAf?M{;z29H zWDu28K2mOn(IN>Eg?SZC5G6F?M|As@LIxN)4`_zBm4Br8BPM1wjWRW=$ZP2&HMjUD z;x|&!LeQre-ZwCDWG{PvUH$ zG%g}38eeBbEvbDs(oWoaMC7ZeRhjL=h0X2F)Wm=i;kX{iF2*b|3mF&=@?4>zQex;Z$PrC=Y@ z5u2DYHF#L$*dH8)8~BAHh?j+0IBrwfIh}H4mWh;?`FS$(fP{lZN2`Q{ec9Qc)x&d|eIh&g}XqNG13R*3KIGqTUhQ&!?i^q3SF*iAMcS`Yb(DG)L zq-J13a)H+s{D34G78cLwpuJL^|JIom4#FGSX#r{L4v50oIO&aC-)Pd)DQj8G2}58aF$YBN*rw0PF~q@ zCL?$BnWPNmK7S%P5t37dAx)LHe%ld8QPh4&#feDiAh32woSAU@g-T5mqav6$HB*&S zi6}@#CoZ9y*3^{J0wLz9DS_FFVR%gR;)*&|rU&Irxi_mydYF||Ps6Bj&}fIt8CJ`L zLm>L5&sG-1#+MqHBZ10J|Gt`*88Q=Tnyr4;G|W|9-ia9L*Cp)O9hcEf<)%)s7h0K# zsZCj=n^|jQie660aWn`bh}aj~p_2kwX{^d zq#Bf1KLTkRmC=9JrLLHnNSsL_EB0!k1*%hKnA^y714=CK(XUg=iX^12UV^W)w-y2h zf!&3VBm1CciIIhtONx_V8W-+$`fJ*P~!_N_zTIu=SyLq{<`6Vx#Bz9#gus zj+G`Zc1!nDvT`Cbe$qRUGnQ`)lTM>;v&wkEiDpj$8Clg6<57*VTWmkH799(+`m}7N z%PXGSxTxV8;RvjmmR{85Uv)9As7hV4bedDySgGon>>7$*`guE+jTSJp1 zM^U<}x!Eo=0$-VvKlU19Fp{7Os-Wttbh`_aS({)z!FY^QcW;LX{)x)wmo$@Nn%T=2+S4@YG4dJcc>9fH(9qoM!~I< zg#fff=o1@>rY+@Zo}H&}>8Z1H{HdRTO({Ia^jbb!GCb|Lb$S7%_QtqYctQ$Wflms@ zY@t+t!d+n)#7D%dy}6Gma~Bw=oJfq93{$(CT)?n{79T4_rS!y8ti@PjuhJH`4ZMgn zL3$Ca$E$HhkIF2r=6^K9AoFvx+(8+w1Zn>o8ky;?@VH_cF>~?APq3`A%3Pmvl@yFg zF)|Fq|5qqp@yCUT7K;D~sAM;2p$o}EEGSVWlK&_x9LN=6+k8C%eNU{;RDwflp4wb$w)HG?>gbP@x_C$s5#@U_9amSk|5XI zI>DiyvUVcD{Lm2XH7HECO|7J|IT%1Hqf^aF;0eCGrf)ZqLb4~&rCA)e7P<-rt*andPH#!7@5nJS~xvi+>Hy>PwHpF98!W`D`IUvT2O=t4doIRl>Ru_c35Gx_j zo>pm0`;~9Kuf`Nq=$pW!yFp`(+#hy6Yd9;V2-MFVRJ?2-k>35FjlD!%~x>9AWvh3#}rx(%GKH{f%|3@ur7Gk*i zo!=Ufbf6Jbn7zW;OY8!85j>Pm2~B+cI?X|DYi@1oN-@!=ZhUhpp@9A0G2|Q2>cog$ z4zgSYrxNWJGYLUl+l<1~KbgaJa+dvK(E7-{{>`eVmgFKUi{6W_@73-59|B%6Mmv`}V9 z81yFA}LJ}(`iJV|;l-hnR^is9>J zTKIt9YBMX9rpP``=|AD3r414K3DKi+Op%sP!UmMMGNZ`dWFFW>Pa`uLV z`Nl&nTF%8P-S)8G(i;Mr4ICXDNLyWDf(|y+wti;-AxKcIffWoMEGO{bLWT_;F8mkq z;Y5lREgF2+>Dfh&9X);o8S-O9k|INrl!)?QCxe|-PDJVO|0TnS8OhvySE19*gExJW z$*EG}PBI%Ss5IK?ozQ1Kp9!ttwBfs^QX4*dM)c=RjaYLYd>1w>)@NT2#(Y?kBHD*s z)gC1JRN}&ST#wFWxYKCGst)TCy$iK2P?I2=mbGcN@M6Y|9Y2N~8RSf%Dc?eVTh4-o zTK_5tEcZ_{=L(uTOD+wXq3K!{$~A2L+VN%7AG0d1om+S0nY+(c{-@X_ZJKzC6R#Qd zX4aoQeVVQ8^Z4G%d*z~JzIDsV*?U4+U&;DXbsJ_gG}uOa%*GHNT0$XG}^h_JKEp{kT(DnBIE z;;u!ynso9Zs=ym>LXC!0F|Zsf%o59mj?^;8_jatHuZsXQ@;RoQ@-8>3rsPVg7w4q2 z&dIuz$+96I+>#>+Db&$Ck_H-ROVcdq(;|QL;m0C^WJ8Fv5gCe2vKw9W%1%vpBQv)d z^Wra2EJY1drXGD_$xP+;!ZE+4(!|N3=C1l_BcZ7Bv8>-1n`z3d*yC@aumbZFFYdm2 zX-qQFq)1i$fYoxNqJ~Y7s|lmrG+S-AZII7B|MAQdCiT?fOc0s~BIqN3`tgUNglZvC zka8Yf4aLVUU2&&2x#d@+PCbR{AM4l(^hSb*Wa+1zgfxge9W%m+Htrtcj@9~Vbr7rY z%EH&8n0^hfOcnd;n9sT7Q;f(7y}gX#iu9_;rUFs=OJHn$!%r`s`sKN2P4(RMu9Fg_ zO)ZHAf>%p<8I;u`b2p%g#B~dZ^!y;(352k$Mmx7 zl*Wb$gR&(R<(u(X2epbjA}S~6ld9o!B}**pvHH(Bt`IZ1Pqrs4xqHQ}(otHYMsFN^ zJe_TJzq?p^705Ti!W=c5^coO%{rC6U+g(ZL(s4`{sn!^WP)RGyq^w376B*=c{DWWw zJ$5kP#0Wpe!xsf{RJQ!I2ufn3;KYDKq2{H{Mwc-hnv#dTDzy+UO7RX`!UDJ@A+98g z6IN87GQhAErDM#Z$lrJ(5up5oCusVMZuE5|x=GN95d@n504PA;C2k>vY6xefW)gbI zt{e(9NJ7l@L^75ULNc1kPEOb*{|$DJREqfu;M8<5FbVK*4pJD!wj#0^70y?i6I>w6 z6qd3j?t2qLmJLaQ!-)*hGM-V1-EPFbn6XJGK7^m+Fd~-SByn$L1f}QD_pL&Tt4~G~ zR76roDJ+KMU6dkC7p)e%P}Y)veMDnSGJ+MIgsMTn>y#QFG`+x)?Je)w7bCYrzT#L6 zD`nH2*(yXO_f^p*&96jxE7q$#C#g&`!xmaWxT-t&&$HBh76hO*&t7 zl;f1i;j&;^g4jm4hZ|jz|Fd9PVP`qj7dm)0MP#MRTRQKvoC0kPNH)`C?{;&Wa$<9! zKV4B2!KuhoqOYi1Lf050)w+)Sb0M^&-BW4VnD8Aae?aYAG%eLB`24bvWa5rO<> zS#%;Oy(q>YltNTJ)1&7}ln~oi&9fmCRr_+@bGE5ap&?~CscZ-k&or%-Wo0Aq+o)oA zsYIaU%%@m|ET`Z}$^cr?iuO#@0V##9hv-8e3jw1TU(>)><}O=Tjcm`}D%rOH$yEgv zpDEQNLL!ETq$;7%jh`p&M;&eDdk<*>eIC8HLW;dDO5CRx-SW~ zl7va(VN#_Y!03i;|CCH^cmd|Wp^D0*^%R|o4h1QLxMqtC@$4+wOFJ-rW~#GEZi0F% zUe#(fcR31aFG~hnj~;1}#6)CW^Hai-uqk;Wee8pzr@g=ljx4_#E2?VfvF}B=M}UGA zVNi*ksI&=%6zzzrcmmw0D0E9Z0WoR@w4D9&*TyLVC(EFj-gxr3kV&l1 z^}Sbq5hT+aZ_B^`txt_~0$XX;)jV;z%U%LESOz23t#vytoViat;XSy7L8AhPhHWybn!jAb1O=H|-Li=m7QY4z3drZ%)Y4(bobqGJ{ zTQ&Q#7i-$8|8b$UMP<}jutqLUE|uvcpDPJ0NY;%hB4^|oW`?*sVfxotHge(WQEOCh z1X;uCC#<=qRHZGt3`xdR+3|_9BFq|4R;J7r)VgV9G}e`-rF|#zc9XgT#CB{iqDA)N zvv#s`AZZ9VZE^>-s{cigS(-P&h4!XNa0#rhRl4iCq55K5DIvQDOhLKwM5i$W%%D?E zo`XeXj%Q;L=Qu*i>_Ruk&Gt?2wpMP6cMC5pz4k$Q{E>Gl)q$-HqqxH@n+7(JS5xd{ zv@71PbE6w0s`js_W3)%bb+;wKdal9?SFy)BFG%$M-M-BO++5*2h|<}io5{x@X<0UC zD*baf|Dm_yiGR%4JLTTWwO+c5!!y`jXS9+@1nGk;O*XQ0Dv?Vg@+6BrKoj3Ow_4tI zwA^&HA1$R|@Lg;nfm)AuzRGSlDD1Ijdhi$N)mX)%=E@Q7tj^U^-TEA;m#@q)OjaD3 ziwMb~SpGm0hkM*tF3-No`El6}3AaIO_JS<%?C-5~r`;YP!;9WdoRKmeY z{2({a`0GDobkzVJ>)>FWUz!o!N6^97_k8)>COUn3!R!3h8|nF}Sjp~w6?ks+H5iS^ z+x9n(VjsW0XvTD$A%uEw+N*}-^Vk36v!{DW;5U5PGGA_c&#*3wiMU`VVQU*cioW2p z|B}A3h_Tq53X7ph!ncBwj|0@Udt0~Zu(xdrBC=SSTLC(pxfZptKZ+y0Pq{Yh6Ny?N zy9#IxLD-_JiK-2`uL?mtloJUdbF=U(!8*ac+{2FjwfU3W<88&=Wy3;i*C_|Fkze z#ccaQk6;~hQH`aE9U>F08{|Cm>apE;CC#w3@o^RNqeXLr5vVwf=u)yL^EsU$3brbW zZ%T`>f-NLexxtIIoUopUI=7cFuIC#S%DF3qt1j-lK*%u(F{B7o0lV6BlU;+wW)v@L zGaETX4Hgs#1kwx?d7v2#x_8Vm-;hBV%)+SC#}_$7{7R$i5JlfpGKi5Aa|)b^xet@F z2))@JaBR6D>^(WU!gMSg<}ool13U_n$AtsD?ZX^tbdmW1E{A-`c2d7#Tr~Xq2pELD z)+iUH3PyI@pcj!pcnQRu?5~I{j4`u3TVzDU8H@>23L5IdZGyu32(t_v|A}1GlJ9{Q zD=VBlBDQ%u$S)L$txQQplu8BJ8s^|ZrW`2x=nbdjI6H(Tku%8O!8WjTjHtRA%`gVK z>>85Xs$ndXg>Z?Aw8+7lk2pfY6mrN4#L04E%%uCuCzG&4sy@7<3y-X;gp07cV@p^8 ziN!%d{VG44;3UILExV*i14#(BBZv;^r(|SD%fd@nQVpj%2t}}f%Y@Bm1kC#r#RWsi zpaU1AbFEXWM4aR~<_wMx>O|`Fh&7v@&%{DKOG3X?%OX@3{Cmj;L=28e4x>Ym!!WE1 zD$dx9t|~ha7<`RadO@}WPvpSC92B>{R2(4ixcB6j<6MkYTQIM4|2vW-KfuwEjgro= zVwEOToZ$0{mGrN3>EBhaT@L~Asv*&9Ml+!>eHi^*#itysQ>VxjP8MPqs+ zG7GL6VMg&;E_o{{68*gmL&>@!&d@ABaf}s-aKJ2WjKzYuAJs_|&5d*!8U)2VIJJn< zqCXe307{TkHwDt(G_)j{LuQX3;vcL+nmKH)Ylq0-iVnpII z42(aZ8lj#xj7Be z!oLY4Rz@T>RXow1)i>1|&H3^#GGsn|aaLhRm+igm6sBH^Rz3+Ugq0s+K^*%?(G^=6 zLdw{}&=;JLSdaojdtA>e>5u8gN#_M{~CX2m()laesWqvR9Syb#I{jS zoPE-~xR#3D3dOLbk6OWJC7W9eB;r09ib>tT4|nujuZlzkdE9Vw4Xd3Kf2^Xib%-3T!G7=t zf{fhYP2Qw6+q`-@B@ErmXl7d$84N6XQkdMI?9AJMw5F%*6Guf^*XX-|6YL7!-r^%C)fxb-QVgx;EoWCuh7<< zecBM^C?DCmkTeOsfeVf)U#;>?#~jgyIn|b6-$a8Z@^cI_47@j#*QU)4s?(Sg_R}3f z3!%!5;@w!1qeIDMzZ$h(1&$qAdOtxhhG#H_>3v|bwcG6tox`f&){R%QVA{8>HyLJ5 zWy3cueoSF$)--cUo47fXs9pX1t@6CQn^9c(RFEV(-2J7XIOM$lovPa0L(TXM8by#C zwh-mrS`<0iadScBZQ^$Y-3x}Kj1t%<4h-QiLTxM)(QM@E$S~!zNKtXV=i5RY)-y zNVGUCgHhXu2)G%t;GcccL<#fgx2%VU>0U`ySO(2rehwsrLY)b zov=Zo#Owq~NSs0{PyWDwxf-Q1jf6kVBmf)B`k0~zQHSJI+5mr-MBj%;% z?4hsKK;P|5k7=(sLjpTi~y~mBCR%ieGKQ74X>4*oXAt3{%N2V#-VFBg1$HT(ZbdRkF(C?uNX(O zUAW2tmScw28x9x+1Q?pAGUFA7FLoSky1>(}dP(<0%*^=;TF;`{Oke&~na#6#Y0%JtsNxd6k$ z?oFu(GxN0Bcuj6jlfIOR|43!pXqZSWKb&Y1jpH_^Z@L@a`GpXnp1mGkZPBvuUBnQ% z=8!M4YxGt@K$coTo^Q~)+?jsx!6xh?grtOWyiDfo?pW-ZI_{v>YbeH(0Vl$VJ{IOK zL$Vqd#BpJ4ZXzaYX-wb^UMt`yZ-4OA_S#cR8*^jHw zTSoE$Rp~ns>RAII)aHl^R0<8I>i@pkGS}WFZ7T~+O|&|_H5VH%#Jz9^3Aj7cIqT}3 zLTAy#;Z2XjSK-qD6i|X5F=V@o!t)9j4R-JZVawfs&)BVr|Rc1XSo%p zPGOs}>m?n_MCw1^|GQJS?_2Z>tn@@|wnVU$8G7a4c`S6g5Dv|ei@?>dqOe%|JL%zD zXv$*rcgE+@DDTxPZwv4OV_^1!$Z}Z?;S{vCF3;(Oz->MCVH78ICzJDeQVEE~aRJ9S zzyamLBk&s+(l*-UX-Ug9r({llE{W2%p&j%N%n6s0G6!GJUe~%~$0Y|*biHo$)lQ9P zhj!Ke44D?PjqQ*uZ#x^M*4*H0hFBML_)R7rcQxiE zdfx8ZPMSQO{}dw*`K?oH0Qaa>Ouoi&zAqVV7WZ{f9LplavwO#ESfLI={fHrNSX~)u z5uFo%)48Az@|7og(+;~FDc(XZVuvp>JG5;h8>8}Tegwb%Zw5rRa4;*5hhi*KU!WhPSJb+`h9 z8De!lM`^J$oJ!+)(}q)gw6zoie|y%8m;Q;)U&Y*;O&OSzl`6|6>g;&{dJaF}vv{DP z9`#$+W!thj5Bh^2ruF=4#{4F01GMz0ue%vabGkMElb2aPjI7}I_k%KSzqXQ}_W`kp zP>h*aV9OB+{_t=_eO2~tbbgQ-MY5swKZuDqt^ozW& zJYTML8gvG{>szxLbjrBvyaTtg8g`QjEJPgS)hYK%(ERe92zhI?s+QJSP(Ie_7W#E& z_N8y$+?ML3cgzd>(=66!17UbNRT-_uvS1AmEw@{N)6bw zV5>OYE}4)%zw*6mE2HWX_;0lgWb!vwY3xbbWEgFtYdVu5>a&bJb8GmsPfp0 za`Bw9Uw)D0W8qM%Mq=}f=ZWJ4Po_tgp^T&3`3-4@Ii}iK8syH`aV@))2WbN&kdQtEPMs zlk^1kX@4_o(pPziv!MjMitA1U@~-r2wDayP64Knt#f)(Hs=eBr6@cL|tU6>7W*;Bb zg_yIKJN8Inpylx@8lru%kQ3PY!Ab5a^L8c_A&wauGMw9vK(iX7o>VP_l|F4uc>CFc zw0IUJX3@^#d678%9Pg>7KO6=Scyzi;z>xT#rFJ|!4Wm&a>v`g(JMWa^{^r{@6S4Y8 zhKoA&aR&VC-87o^rOfqQdrmeDoC88;I=Gr7L^Ovc$3b#N1B%X@G%g|v;HF}YuGJVS z-zUF$T|>ta1@hDtgd3hiIOW;4S-y7u-UNnhCv^Ksd`U+IUk1Vaomu@=Y7{A7B}2@@~}6q*OgTnhG`CbJ=gwoTL#`gr~^;lf+J5e;)#5;JmAoZ z3k{NlhXxLj2nVgp1YA%t6sQ-S%V5L|mD)CO;Rw|tS;LvVL#&6Au@YHEsVUx|=!Q?9 z7k$w3RDRgA>rGl5BS`@sNx6Nd!hlZhcz27OYc*aJlw0GzxW!*)Z-x@1Z2_~;3=FQZlXA_M| z(8`H?93GWZjdVMWh2hn0Nfy2gUusastAZn7o|%sa6J&f(?wxq7LYHQ2M7~kPD_=%V z2;$-@K(SK)p1JR$W!i4wR&OPmTGy5uooIkb*4cgw_?_fQzm(j$Dt5YK=?G|4ekv}X z;C4n6(_ELcj^-I18%Bzq89AT`;z`5iY0sb?iv^RnQy_&kIk;EG#q0i(WoxpNVN(Zn z*yhZdVR9N*7b;~ma%9%(anNQaTGn}cHh0E0g&sG6fNtIrz0&M{YY3;`(_sa1H{p$G zn1HM`TZ#_y2o|Jzhc>m=Jj6dMX{bl%^1^k!E)r;WMQ<@s`#UdD%izt8E>BQlNJ7bbDn|Cs^56pU@=To6e%v5|x z>hUJ{7c{EQ<}NDgYezy&-{HhGO5W-=kIRZw4ARTc#aqP;XAeznccetll9C5JDp<=! zN(5mun4(>Rn%Wt{;j*q|^RH%XM`Oz6XRjLoqofUp&CKb~1!BVygGRdmA)PGH**~mY z&1%gW>&BVrLQWEi*;P*VA~KcVJ!79M+9j#q5JaQmz;s8xEUiP_)PiXu_Q`nIyU4v%Npkn*(er^k0|Bn2@q~DoT10&Y;4)u`$A?eH`#J>Q+j))$URK zc`@G*7ACnCytw=@hgv0aCs9~B%cWUoFbkG?EZ=pILP2~>0K>-I2onSBlNg}5~B~H0HYi5!^v+m`T<^(h(j%MwZdl3z&aUO) z_rDU!x2u%kH;d$9bwFnV^L-cz(TuuBf|pdl4)Q>}TI{)Z)iKmB?&ObB>uiJvW6V<9 zk@$ewYU0G-PFOy8@n;-Yv=z{WOP{bD;9+|#km&OY8s7$H4fJ>ZJ^O_jaT1(O;guYn z)*4+Rqkyp=n@3aZ4Lcc0+rP-W4`eq5m&;-`Wf-;fr^hsTPE!X}Z*bqIPlE`AE@RKf zFY7`WTU|o#yQ-wd`S%e}z3?r+q~fdsdC3%NynqPSd}*f@VydKP|1p2q@VSUcm@i^Ag8`G0^{ z2HCb0BO;UJCNWeSH=lSbx8fND7*xMsH$jDPiQQ9FrH|IHLo;mN5e{8`_| zf7#K(ymm-RX+JWcOh>G)WkK|c4^H%qKoFRyJblVyn~A?N3m4XodWon)bIDNU3^eCQ zHt$vj4KS0vz2`bZM$IS{y?X(6zjjIEie80}UfIcf{0@JhkehzO)QppPyOEiN?Tp)H z-o_aS+7(_G<<;p)eQm_%SM3?RA4X%7LFkdPAe3$)$c}uG83z^xZ|u1R8k>BR*%L2C zcSeHa$q%7}W%!yp&f4}>O9eP%^aURAc&QCv=KPJ@az8#elbZNlgmDl0@g=7;-kaJr zF7xn>?A!riiBpVtQZrk-BxQOHhr*tn2mJqd!bQ6RTyjMeGsKXAYGp{h_wuiNe zRClv5->Q#mzagu+bs2>97mNZo>}gi$1Uo&i9lJlXp?QebZU!MB1vMKoSekXTI7RNXh_*T`EO7}I@xx>$r}WP~uI zFEF<>aBPzngUA*QT;&M*Dd)B^*#C%ok1Q%W6>ZSfqB2n)c2Z=gmmhwlqNJb7S{@KL zB@qJ&wl*j(AQ92=$L8XwWMM6d+|{LgVT6;@V6(Ocbk}8n0<(=TBsJR+5hvnx`@q#;2lj4~i?I*BANYAu!| z7=iXS?o7p$sx)&pL+)T!qEk&$xwSwV;4LCaO_%AruOH6XZdB{XVOxc`QOz5Z=xUL2bIKu1=R67`iN z&!mvhtu1X@ly0qdyRVvk$X&nCmEPQ!{-^GoO_$?87uT+~v|%fIQS{>ZU#P*Zdw4d? zOpG7NFR!7*RJM3eiTIN&4Qul)Lexv*zCx*SLs5v-C`4IwD0$|{xae7BN9W=g5n}Vk z@&FNWJ#eK_j`CIX!(t|cO=5aw{XMBvpc&4pB~RO>ANG$B2<6^J+bFk;HxTd79cPj-IDYpO5)N&tn#yTS3TZNHj++q6N_ zNIqMqP+CW`K%X+H%NeuIzmQrQNBo@DqLjgd0aeQt=w@o;%A({7Uv8Izl|e$Fb=REFcy{=4Qvq zdWvPX?v5|G67~`5lnt8{B2w0nmRV)5*XUm<&8$n|mk4|TL?fY9`!#=kZV12FmlK{l zf*;AZ-gh;+Yeryiaf0eR#A5sz91(?eti`Piyx|-E{C}X-&rYEnK-r{hU zFcF5g;3i|b}_|`i^Okc=(rJE^^M-JOXw6{$4F(;eX>kDKCVjVNUQ^RGDKS07B?r9U_s)4`qw=UWRe8(om}XCK~fqOP)U*##(36@HOkW-@0NrvzIq4poQd$LKM=;WRw|1l7pTlMzj>SVf;Vw{ z#hk00Hdju-NG>{4q{srW^HEjGWW!sJLLmcs=(j z*q6J*yjH8I{MTUXEX_JZ+D%+sX-~^G#W#h9$E4I`qD`UJ@9Iqcm|W%0EP z{pKEqq+iudHtWZbWNoAxPM8(OKTKza|yy zMplYXH0Pw^k&jpI&*cbbBe@Z+Nx^Dcs~{kYeES_cLx?t61=$>^4Td=`ulOb=OO8dG zJ#QJw^R?)wy9)KMai+cv`@Za*Tr?BpL+QoESbSGh+WJVsjK6nP&zO!3DjqB??Rtv4 zE4BqdVHk?*6PtEp#+65b`s|`$eO9CFdO z?2wN5z`^8RK$qb#i;}%vt(+mH**V*_t2zDXC z6373D0;ou|k7+i~sYFR^!I6)mEnEbD+#>XUB>Laqqn5iQ(;Uw(Jb_8iHjYJ=oLrz=-y*7oz~>kxC|Gqr{AxC_#N~K(T3l8P5&X3wkWxv^fydVe$_9BLr*66&mr&#xTnv} z$n*VTCt~wk?cbIHgMJk$4QksYF^)eUEixvLqU%>tDn_U^ zM)nPqB_{90=Ux~tf}=!wqp=)6{k5yQ4thE2-AV@xz;C>8{H-Os$NS@oz-%oP-Zxj; z{ECN0^~Y!~!IZwTteOu4D1opB&(W#C3Jj<82l#4m>*cpdtFh6ZEdICt7|SRV!^dqY z%eAxoo}{t+KV1~-xL29zh{7yEq%>w_Pk6G6HY zIj2hjCKGnt?@h-_iA7pGaz{uM08>FuHp8p2&L&_X%)L8;s5oN{aec zm0E_5ao{7_cX-6Uy{Un{sgnKQsAB8&Y^g`XA#iB8nz8pLQTpNb2%3Cs<)w(o>^xkt zt`(#DxJ%iGHg^zM8jZl1Tt;aDrD1u zTKcJ{>cs@y+P=71beav&hu<3Ro3ZkmEeGwn^Iv{+w3x1V&+2nkXp~s3i2R*lvz4ec z5=1b%&i<{QNDmGU%;hj2{~p2$>~!b}SBOKL>-yw>vSLmrR}nzwUzX^b&ifmUE1d6X zw^pN8)4Z$sA~PAMciJiN_hsD2bjknr8La52+`csbQ@O#rMQywZPDBAIlf^xJc7xPN zk=skJspm<+DY^Y!9x0Mya~1UW{w*ZOs{Awe;FoC(%~nLCZkb2a^sBVM*7uT-5HJ|h zlA(JOpRyucd0HX0ySS7SsR@oEQFRnfur8JNd$PkV&;FTtbN43k%`)Fp;q{e33mdVj z*Z}2Uc&;onJ>9XKK&E+itT|t5o6j*k-?c1~a3$r$!yJjwKWIEBRicz@xno z-GABN@$;1>fz-Cb34pKW=`eE2IIt`&MUv*K96gS`(n_;@BOm0C-BjdeSE5>_Odc)f z*{{a2Rdt-0elFUJY3kX2`3=t(zF0&Q&yF~3&m?N9CZXpVT%2$^+*mdPIoY|p@x)D= zvLx+Yhy`lqwY~aNj;4cQs8r>RUUxQav}40A!$KZnAG-6i!ff&k-KI8jL$ZyuA=!#T zrwS>oC(w}D!*~mI5R_Y-+ACmTUfPGidHXm<)|30cyGr6|e`vXpPtfhJgM#RwrW5x; zKXo_#X{Gh8jupzpsB-Ny%R+0bKY>%C?0+F?7gbpg9-B4O5HR}H!LFB$KyjGWyz&on z=iSSivgifPCoc0I%bHdIH+TSgKohUar9pDep`?DJ`K$%#4Bwj|YoT+hg>$~BI18`f zKLQ|0xLqdp!4w!2R6#`BiPuMLryQzKHhh>UnD?Bemq6?W)3AMVnx7u@OE1F zZI?NvN20kAHlxAyd5B|6XgzjEMqWBniuTqsoTWsQs`~G<&vD#;roBl(7PHL&Ny4gL zd5*xTESYC|mfYLXsm#HD708#LhLFl_$e$YB`%H{oRqdZSa8LjX8b;|#aL10sPMfW1 zc*?Y!Cx3q*isSb$6)M##M13=vtF$)Eq0Xf!O zt@7IN>Ts39>OUtZb6OG)KV>yKG(&{03U7(}iL)Y=)bF5)aNfXjjEvf@OOgpIWld7MQC=p3 zws>PYjC?5(P)_%!q@Ix@;^#1*^v4@W?;eASkb6f)EqOIML{SZrIrCeUYg7shJqfWA z0U_KmMnL^KX(0e%LYE|+i=qH2c@vwIfG6E1&2esVUpB`C#*t2tD1C_zi)!xZDxtFT zCwH4(^>$MVIEPMh#rTMW-^sg!wS%})`8if#u<$7#jvt}WIy+s)Kb0q$unZ4Q3tN_; zo-@J@+u9O}OVK)`6h@1fKf}E+#88%dg#JXVKWj|)F)CIql3b!FU13L2Rum&ko=hPk zO_k7IFT<%c|-8z3TDV)L45l+eOl%4eps-XghO+{RyqlbM|{RWVnit659^ zvT!Zt>;$KT1%}UE|Ad)#w~l17t8N||&lpI7>b76*(M_Fa_T6S3=HM)-G>qcp;x)gC z!_sCLtoifbX#Gr3wpU1&_-XStq5g`@Y*US9X)WQ0Se+18UG(joT7`#Ne3?vZ&-RED zf*6FzMK&4|y^gVmIVDaOVxaf@h4~K<39>mx`v;@7u%tQ`Yw`o#VkcU_>+UN!`?!e6-fwZqKt1?nl^ z9%ygmD=$agi?YM4YIiAOMc-CZCVi_p=L3Jr77G%MtT*#h0@hV|8uBu3I7K*a^P;{N znBxe-MK(v3UJ9Jiyx~?!G)uAcg$~TNjC*ryQYr;~g@(xrLBfmC%|Nr6ro#zE%YfYW zz)GW5<<4G9{G|^uB5x}ld1AVwfRPiA4OzVpKW>V-Op0OBr_>B4#etWVk|k|o8~%`{ z8O$OO$v`BqFaGDCRT)k4FF{cdp3V|CxqmeOslg0IL8NNI6TJvMMUcDsNu=ru_yV^{c+j=|OXK4TDD|)G#dgBEocT2nLc8Or93c zHW%qI`yjK_ChHkFrhC*`>uRj{9aTC0zS~a1ceUY;vpFVprGM(dO_svNJ&^f6rVjG zrm0a$+_X!VaD#9bktBxl@KJ)eF5eu3tA2hLTCIw5$7o~u?@9H{qX`q_oPQ|(BWG=2 z$hFlM#=u1pFJbEt{CfG_qj?waPd!x_OYF+r3fkWt6|yEVK60*a{omsXG={43Pu)hJ z;)r4$d`RL0)(uPOF+YW$kF667H0_&%ZLY-pE(7%Wq{ZnaFp5?@%U^Z=f(Ow`estXa z84FlUCDEp%;I2`P@RE>fRmjqb)sa6UC|t&U5{{4w6FNsQ_KP$)!kH3zv4MCn({R?J z&$^pA-#zmxe_o51Y=<8qV?3I1e%teWU{fEr&&OV*wugilpcdh(qmFffd|;70Q2NUS z{EL3_o7{5JH+*-r?xPWn{*#I2K;D~c`QgoU1~&QVX6g|_90)Z+BoM*9%qj|OGI@;u zPz|45mS!xCU^S;42j28MFP%!j>m6J+G%{ukIzFKSLDsgtDV zb7p5`n)ctoUBD7(iMbMi!_^Y~9EOTm`t|~9Y&}VpRI*g+gaeRT;{5Bi{2o(h{;j?l zF$qeb`i8Q5G;~%$(-ZpdYijB)0FJ)$@EN-MEM++y#gWGqMM3rP?HC819A=A&nzXcM zT@rnK1T}8)DR2=vW31*M8%rrGjruQHRb*dsx%3Nj^pPgGq?6Y{NlMJ0ugi$lY!5_Q zidejA$O+{?^Fi;Z(=5cZ*w8BlboUzflqhN|(4uhge=P&dmw}t9k0Q56mC9yNkR;}1 zIL2mc%d+UsW;2Kmer)p3vuR0_q8|5mQCK>-GICX^IOmyFJ+$>)e$B@pc8S zImPGm$Ts{UdD9gxrn0lUx~20d1-(2I;+PlPU2$a^Rbr(GO9Ey3iK>{gEYB(E)!L44 ziF(Jq>7EU3!J2uujozCH$gE21N=5pW98)NHVBALgl=|?TQYfQ}KCD9aQAX!r9tAZ5 z_(p*{dQ6nxepA|RoSM=+=*q>B%xPT7CQ;6>EC1>k8GN_+YHKgd0d40Pjp;za7+l3{ z#z}m74w*!UBb;(gI!=5k&(aD_g*qR`X9={pbB1LC(d7BTvzP0bhiAC;kwzIQz5)s}ztC-Os@)~X!}WAefJ)*>myP(b61TcT zfJS}iY%L^N9mt{VI|L0YEzddm)Sns68Y$%ifF#kqXE!{dJ{Xyv&k(WGZ0c91siS3l zlyG-e`pZELEW2R3YuCn&jIUfh1}cds-LjKlOTdhh>7u*oSK=927(-CZSYunAp*;`7 zi;}L`GC_|et`p(4{Zs9v_I_ZJY{!&qm-*od?zFUFKw|Xlw=VN$k_eUL`^`@A=&9J7 zx*964;^(vpuZVjXi}p#5o88I&a!dq&gu5?Ewywlhn~BPNBp9Q54NEWH6!J$_836%ALdDYRXL&rD29usf4R3Y>fztC zD%{zTy8{{<1oY<-&d@Qjvp60c6rTAAnBK`8$VNU~KaEB@yvyQ2Pq#?#Zb_*Dk3n)` zoX91mfHl6~L9^;?XKF5a{$ednubl9Zt&U-G3u0EpU{!3<^?4N=m-}#KrmJj0i-=X- zTGCxO!==Vzhc3evy6yiArS!L<<=eFQg0b9KGL9*?l%$d%vWTBu54szpM66+V^rn); zY_iPG2Xz^^y*k@m$TXaFeJ09ISw8bsLS>*m6uQuzF>|8f0-|t~IRH%P9*F?;b4yBn zqK-LFN=olAfsqQfooUV;3ycvg&8aC0ZCd)Htd7;h;D1qY58YlK+6|vho0u)U`%5gI zTr8f+v?1!+fLG?lQHfMZo0df|-)%VZpqQ@^GM;N$lsyH0i3S-L4{f=!M@Sk(q?_ot zHwPky6RTXt{*1`5kMYf`JBG}_Rn?u;yw`?L=0Usht(WAwx!6B5r}gL&conAT4O##f zujiJh?&W(pFK&kNN8^gMn*}Xei@Ep2k{%Z zfv6O^f1A?#C_@^RYvr8BV7vA6Om<`>Y&t|K3y*|8W9fVe3y^LB-!{VEedbXvaIYn#~aJf(>UcQ_TV@A z;S({icp@O&zY8U&i!af^5?{ zEE`vWrt$;taNhFAYVHx_L@A}Ail4^5!&ydWvNVC&gKw3DSX31fMlNYt%@7yFgd zFmiw!o^t4Km;(hK4ZivekvWUM&BE)q_iE3kjz(X=o0RPkw-C>D0qtEu7-{lfA;0Ht zmSLTj@80i%CB4N#C+Y~Mc$<~Dty!aZ1a4@XK`!Mzm!~~CShNRu=Z%P< zTrI(8cT!Ucp9^aOJ-?(F{zWi72Pk_K%qZ*ui-l%G=2X`6V)Y88m`mdzfzyk~X%iHq zKjs_+h*;dpm52(^T_mVW@-Dk1};R#ducE@2gTMXX3g@1;6HR^QLfiqS$H|9_`kWgcnmj<&`n`?U_>igDUEE zSGNV2>bU@I1j|m*!&*=}^?{U@K@_CsC>rJlPBM!Qt~+sEJ6&)CAwLmfd?~q<+u>pw zQ@$O1(e4D?EfzVN-r`Hv`*z1(6k=zmr|a(-7@_wIzmI-FD%u)o4NM*@tiKD=?LLFM zAE;Dg|Bln`9q@~EJQ(rxHZQLHke$#*-lHtMPx1xV5gQYo+M7-MTBoSygdz+sX2zHg zA)I~w+QBiu!fw4`G0K3kS`Zw%K%33SlCO9_^?UxYYolD)g?9e%0CBsZlXOKmq&=2bmUCww7fRN9* zzkiwpe~FJzn)KNvJ-@L&#m+XCm=6?v15=^%%}y1CH5^t!mc2lfhYyDVkO;Bg{u4EQ z;D}H^|16b_$6+(#Vr$NoiW=h1YSq$AhnFIrGa)xxjY!BOFd6g*B5+pE=W#jg4#siT zEEWj`e0(BcnM;+jg6SD*=qEY#}3$jJQdXET|M z21AMb9cOj<2txsI+MGvu2Fxl^wpohWVJ21X)!X^k>uvb&h0L+)=tkYpe=o29^qVpF zW8AeD=lihE4;AGP8P2+T{z{=jdbLjzaZb}k zi=CPE?AoDn2F?unZwzwRo_7XiQ&0A;ULAqfZURkw>s zdm)#;=l_$gg_Z78DMs$6nH+O&NU)kk>pWwXV;qcNoy+5|L*WSK)bG{|EGm4@3Sv6` zqbdb9)l4T7J;~mNwkhIY2@4k5q>LanvQ3j{nCUc*RJ88aF$Wmc7Urfa%iC3D8_R2y z=JCG+OR}osvqgnY2^5ymujy5cCr^z$}Cr_@qny60+|$? zO7-Sk(ger$Sr(%5y6mdS%)HiM1~YA^=%0>t#l^1}pzh=IdOl}$YQ`qGORb_dxNp5{ z0fW}s>hh1XMC$4D2MI1iA9o2EwJgR~7g9UQ+4>%Cqx`MXi^^HqJv~k~Bhdf80YFAS z2msR{J0v*L#$l4rf&cG>Co4!59kcnc1;_iKYX5wXRP#J)_;5I|M!GZF zcg@8;Sv+cGH!>rpR4v2bDE=4aDZ!LGq8s>Wf%*aaeN*9ZxMowv6p6kyYwq18r!w)_ zBWEld;UmnhXqkR8X`@s9KE@e~flrG$`@E{;EsA=93Y;(gnr$I0P1QR@!A_1E`sjhB z!vFR$?+2Fstp4i!vkUaH1GaoVy&k5B9#%hfhFF4^mQBJc)|EMy{qf!gd53qRTp0c9 zq~&Ju2qEMYff&AG0G8MhWMg!6bwar*UQQ9sAcvtoR(PW zh36I{=XQCk#vHQfP*rOVKq-`$^;iK)m2}wZu`}vPY3H|b@diSWarEQx9`zYQd}!h? zN8kkgKlX%3rb-H0n0Dr$<2ogY4cd(eVIG-Tu|d9ppNS__mx>fEyANIyJLo z+_qD9O>;>jeAHLN355y5oNhA6P2)N4We9vC&m{*+x2d#bCaRkx7O65z9#a5$n_*-| zKHA9I@9V8r5=K{YMABteaks=NJy1X92#n-c|C1%K%}nhA|_-woj;#Im#vc-r~Qc+JR zM3l4jhlG+Lfzm7qFg&fUGy+ebqDj+9>7Zrs z&Mn>GloYXvFoo1C#VjB^1HgDhmNo@Y6EnDcn~yL&uJ`#iRqo)ng`X3t`tptY0~vqB zd!@FU+FLhoe)QyVD}Mx;@^peFg|akVz)MH|kOZWE{H8TDy5*2)*%HOd1U$j@aZC=Z z*MQyLSs%BF=A$SK?(-dRr~+qaTVuk@cv7#VkZjg-2WRzcdS@%HqZi7 zj+t~_X^q)HNYI@2c=}q>MdqiOMb-I1KI)9=T}R39s@)mu4VncUH#@H?ASGN!We^RH zjb{T6ua8$oued6R(h!ok1hD3#-H`@HzEVBKDq+Ksk8-Lx7S;bqgsEKHnnWuC31 zouzI@RhsKpbp{#S3*#aN)PN2-gZzry(%Fbg$3e65g+9+Ol}5R>rPZlKY=YP{q_mC@ z>zH@YnvGpjRCYYi5CP}TTf#w_g&EybNRwmahnDlw+-N7q(6BdVrdL`N&=7G3W_N!F zli>WZ_7>OW@7%UrHLStaq+B-iZPfbY>+6;&8|{3l7gL|wdW8J(4M+qsHH2&y@nk0f zB*;A3=yM0ZXI(#-a|nqTwS43n~O5YI-mU% zdwl;-CHSqu>>Ot5?{=|SIM5UJC}iJ+6nt+I-#nZ#NbN+Vzhbd?wPV9vv(ew1J1H{* zy}aNsu^-FAqy|pq6A4K$iZy-?72_>(TJRig2!8p4wYOsi*IJ$Xz8ABoH!h3lRxkW6 zhQ^5?$!xoFi@MjP46#CcTD2v)F*He^%B{wJ*8B_tgN z1-#@j(E&Ks`4oq}j2)lmK@!I-!qN;Ad&E)rBFd!KEQ}XOa)FhYscyKg-Nx>@Z4==A zOpbAk_F0N{WU5QF%Humqu9fX?n}vCc=Vb~X8@5fe_=lB=O3(uaM=U2cjhD*NRuIYz z4;428=ZNT#oY%V&a|{yo`-48RxlMpCIs$B1`bA8*ZG4Sw*s_;5d9BXhnh?biU)(nj zu|FY5=HVD=TvStp?zVA>7(UK;NQK)B1y~m7*6yh|VTfEL#xd~=@P@Bbw%c$Rhp<6Z z**J%j@!QshiJ$};K94kQh6`&ID784bPCN7wPK;0yL=NJKX)3hvhyqwyoN0L`9Sq~M z#CI^f&T=k&ye&4Cgm;rK9k_#Ib2}wrItfdgbA2L~*^C8S7xJGees*uvdYB-U8dB>K z+RS!T;W)kt3@-^i(JUxjU?lM)HeE2+smV7MjW+zunygbJ9diVZp-c`#Ee>FsQJ{fp zs)lrYMD)RCx4wdTqnqX%>(4Y{K|hnJv=cq2>rp$Y%~t1_w4I7I0vA0I$v1;`CR1MI%uAXrjAMqk+^>+t87OH6hdZ(H}QF(7b4RrH%K~+zaIj8D>(1 z+c;RCNiy`Z>|z5J^b&e<$UvHDYO83`b@{L6_@Aw5dD=(=5MdT>NMob_nL>(xUgnwg z7U)2FhfqYmZxt9>L|Xf}KRx77yim zg{FOtj%C%#1%ag)PViIlcdh&s+A6Fbv9t^yLJs4&2j;OEOjLK&Lv&PGYq|)EnnLfI zoZSdUf^gF|Y#%KGEdKyL>?DRwpl5oV^DpmSbR+_CYA!V*j8N3OP&H(B{JOZh9)?)I zeyn-58g(3=_UtO~uM|amOpH|aQ3Ci0i3s*EjqZD*MRr`Tk?dairiQHs6Mf!4{HlI^ zRT#8I9C)_V7?}PS1r+SMoMw=r_b3TFk|+0BYqL5V3@yKX^lj5~LbGSh-VvKuxfKU1 zGzPj&!_6QMmf^ghs$Yzf>FX5>)lQq zytJ;%ZWqQ@R3YCE$8TNW12E0so-*gdvjK(4+_$$eePslmE+L-&ufK0k*N02rITxPU zlQgl&Y<`qy8@{;3-1X(-r#tW!GTNpxWI47zkvPZ3DO8iA#Lbf&7k)*L zg(2A57yR8>d!B9ReR-aPep`zXQCn15WFP2QxtrAT4z$!UhxIFnK$qdOOujnbA(WIO z+%QhpBE-Q6gwoqlwSZlS*%w?Il!PTPm0`H7%+#*mH}bOX{F%2`=Uo})@-G`J#bcKhMV z*KS$?ET0%%r?dJH2;Azzc&o_oXzHgJ*|3Jy2iQD07=tWi!&bw4XBx& zFl^hqQq9qv50Em_G)T}8jWb&7uHL+CnZ^VWRQE~i8s(GT8m@Ws3EcAN@?mv5yK>XV zaXHBg&OaOkwtx}!Ht_umxh3ueV1317Cj)w=mwK6APg7-C_{X7;occORgDlmRc)Eq} z_DuAY@B5>yHP=OzRw{3QDOK$Dq$r>?rw&9>Wj;1B;we#XGU`PB9@6jpa{m`af68=OhZr2*AhDD&3orym>gGK=yoUgc% z9^=S=NPKTAQ;fDGOx&Ihg&jTW4u$VgqdB(~Nj!S-AE)9EYV_(lPMH1#Fpg6&xJ4Vl zm&aG*hXuNkKg&;V_QFbX=|86Do++9X>-L3pJ|rz5m0TB2i`5@vga!~JS<3L%405X( z_y%k>?0PhVD3j%52g~sYIsGyOX54PlyGIwi%=pnAL71br+ZcLlCYxMI4`WMYlbubFa}6KPu*J0N7C z99URA{7N4e61#7AEmwmdq{HflSx`GQe%I(m1xm!ZDz>cMh&>g+Cl>>-k)!UE{gxai z#^?{M>vCUxs-#=6to3|efPEk55+cfnA={S99z+xYg{j;WZxedo!m}e8QHoW}$Q3lY z1njqmO0&;OBAbSy1*5}}dp|V8-Xi!)x*6B|950O7V3b_9C7b33_Bp5o=F4U<=EY-pqoRzA#~h~G_NUU|RvZwp&=-@Pwu zr@c!22r80F?7*ivcJ66*k~c!o>Ec`Au~R_OAbrTkcq_c4vgF?t%eOT2K!|C^1!c&} zR%^8YEBi(&6RiI{4crL4jpV;KynqN;_6!@8(JT-I{%aN6KW=vYe4d+4%{hp1Rv#jU zCJzMms}$4EsQgsF5F?b-5f}6B+nhu2JdbFG%0TYWMra5oL_Ve@;*VwQVibv!r_IZL zrI6KdDLEUXrgLWP3MD9u?f2?&%j+uPfkul>w9pOrzR&qfL+0*~wm)tD0M`5;09-($ zzaJfu;`n6JBis4TDgEpyzuQl^w3J{iiCtrK#YhmpQS*RVMQ$Yn00Sj<6C%FWvTs;j^_Y4>}s@7#m8-n0gd;8nZ zd9`=+yw?Zqd5yJ|bV;LlG`gymggZ(dV$fMO{ce{mU6pV|ww3HQ6%>gaWlHYcL#SFj zK-26S^;g~O#Z%`h+79X33&BnCieu~8^dYw&ABN0Wx^g&lEl$YOOr@9{hv54EeGRWf zD{LQO+*CBhQ9*AJfeKi4ztJAEQ*8XM!~jCuJkHg^O?Kc^4;>-DQ=!F|&h)5=+dmp_ zC0a?GR)?t@l7Ay-;*_))D0UHPg-SV2q+~g=An`{P2{X<%kVHRwagF z011uM3!`f}0xoCAWQdCY&CcW*?mZ@&I#K`lH3L|-iMX5Lkf)lZ zvq>~WrnW~wois<}cl*)O6ehk#jr#zhLk`n&7l5!a2K*3#?5xjFI2QWv02e~E)1VBZ| zO#&bm z<~e{CSnDTE+!aBwV{p_5YSh<+KXx?EBnZIOdXnWtv4>!BhCN;X_=u3mWX@57LNs+Z z2xz%A5mW*niU0)V;13&83%KA9>l1h6Fkn=QQ2sNIiyY9KuV%7>URzczI|Mg;ykpH-=QVRzI|Yivb_cP%iCuNF!oz zr+8>RK`sI(L@^P2@?|q|p)Ps#b0R}}g;GsRaukAMBabCGy%J|V_j)Mw7-qI4kpXeZ z_>aoij1v)FPN#11XNfrCWSUrw=i*=0REs!QSIDDja°QdDt4j?DNHboe0x5`}y~ zNc{LgqF0C&$Y><>f$cbcO=vXpMsHIHkTMyNqQW3mL5Udu){0~ZhV8;`?DKo`v2`xl zjXE}BItE2$2T4G-k!4|R7?6E6MgU3TizIV2ySP!u!5+O*XNV|C^5Z_?bdMo4lVllq z%g7Mn^@@FEkgy~=F!yrq@{>`vbIFHw#?fVC(N55pa+RlV$HAig#-y} zo|zJWrxJG}jtR+mgK|FlwTX$*JF97LH_=>q)kPowCNY2c6g0LehS)i**K>k!mFuHp zNEH{a*=P1>Us^;cQD#buk(|rfpV_6BiI-#mieis~mfnMru~`xB;$ImOAO6LiJ9tWn z;}o=Eniyt^Sur1mcrZo5Pn~8EqNkWP*N;;PV=!qVDrqZ-qFx;n2sAit{u!f2)`Jpp zp$58{2FgMPDr*yIqZAP(it<_(X^~YDp|FL6fi!R92^(T}6|@;68=8n%CkVWWmFqHZ z$8l!zawM18dhh~Zfe;J&fDQz=l=xFgGMc8u^NQ=YlT;yYYA239IuWzdq*$7RS<`6n zxum4Cq$;7L4P%9Gbw{Mfi|ScEbajs38RNHGL7Lb{D2F%pbz}e53yh?_V^U~ zk}qodsl$VDd5DQx@nrmlHv_sWqPHuy)1);4X&}};V(Di5^ z85oe7!SR&YM4>6MscFVuj@pK}&<~_YM5TBYFEmCi1gh5hIBGedZhDiVw1?Cg73MWw zxFT$aMo}g;gm^SBw6qnAWq}`pn+XP@$E6tSVpZtqA?G-M-07f^;SWMdG3Mtx`EoDU zTCinxb=pd*HQ6_TR24)D5v<8Q=qe&R#)rn|K18P#8d|6uMJta)SG#~V7y$?iIG(f> zlZ??z5Q%fnm8E8C5z%R|G7B@->5K#a8FLHUO*D!XGIpI~C!vl?k-KsnAt#w66tqrC zK2OCI7(i8l5L-w~ti1BP8VO2;GK}xBKCljd@d${@twu);^04lb5 zf}{3B2E?L7Ej1ZlI~)>(gM2%fmMoV#ai`%=H)TW=wECTru3ucECNf~O}E~n%azv`g1n59V3MO3sh zv^y1x#g2mT0P5&mV<`w?+7Cqk!%;SJ5P~ra%A$w+g=?OjbB6h7~rv3!!vBlB!mbyl9DK^db~kre5#(7^pM7P3pc+k*jd` zA*j1;x^!HpF&a`?P#qJns_IDXXcXwshA$`_`y0YgTs&;5u+2$nCae$>2a8vXtL6C` z$H#*!aeSzEG^d$s1k52g{1l48j=Z?NAh@En850~8xU%@c5KFY3t2=bLo{~XiQT)gK zF~!~rQ1TZR_3N;9{C?#BWh?FlV0U9hjbUjdlEyMyvk|BmORy0Wt%K4>}S7rmkk79Fp-s!TwebL7Lf6T@=;ETQ8mmK z#8_8Iq+4T5LL!@N#H{?xGV^j(yrYtcEVG=#=_QeQB}*dn61$9bkeQ+9!p65bwDMx8 zQ{-&4SQAolxx4~?&>Cd-3(fSLBGNp4F@k;St)x>%dY69!oB27Pom!8?b?3^#9J zVaa+6H6&v~1Y7~xmn8o{eX^JpciE7XOc5wt&mJAfYjs=toH2)tbe@_OF9wsSWsiHI zig|@pvS%`a^37EL78Z(OBh;j-=X4ZSB^o7xI{thZf5H{~{Lw~TGa-Fj_Z-VD(KRuf z!|<3C4&BZGU7^KUHJs)RT|{F|xY1Bu0frfT!E|O-6+L938Y&4DWX%<4u^NgM)JGlH z>_>RjoMKx%U6Px$O!6dlM-_F8VZ6(-@1hUe$<@Lq%2ojxm#kQb?QEU=54+$8J$oXB z^#G|+Sa2=ZnhhP)LAG`6chx!-ywoeZg~&lY*;0ZSFAB-4HyKS)5-mF=6WB3y6#++k zBDH`4w-6Z*Kmh(IWSbq_(b3s8S};`c)D=9AXR-No&=bw@#Y;`Do#K^%tJY)Eo? z!GT;4;8Yp^70&r#aFA6fy$xN(E8OVa9K>DROf3=TZBzW%ld@^rWk@gYgichX8Ax%- zejq=}HA~!mSlW9oNioDKVgZG<3n%e}BY{51o!$^W*CYM7cP$XWT@mq(xx#oA*KEMx zhu=#vLKOTJMFHCbAr7|f2iezbHJDh!l_c&e4j91P&%ukP@l-s-L(1jM5#Hli@mcGA zcnWJ3I{FYaj&~YK*Oo_F)kuu~<|TNw~5?3G-ptSFmy26eeoN}R!U+nZ717~o0G zhSInmCXwDn~mK}7Z+i!vBkbdjSBC7bDcy3PQ!Xgo?<$Gubm|qF)O;@r5t?Zu6-#?6@lTG9eh)KJF9ip0I1ZchnvnI$vX z%+a9-!v|u`5V`$x296@|As~IbPe{z|Ut{T=wbWK;s+|-SFH-^Je(py;V^7=dax>{_ zo9A9_85Xb$GoCOi%_kdW@e2>@{60`_QR`mr?~(H64%WEeKBu{L$SS1DOm6N&`$e1o zj&|SZI`gSZNzNHfu_i?r)PR5ihh^*U-hFT3f?~QkLQnFO;<(@jfJ#2iu*UQdL9hey z;0B!5qYXGTF1CnQB`G&%JgylX`g4LFQvTqqGDwQ=;#6IZ?_pj6u@I*EARy1K^h+=9 zC(q_8Pk<&56;#q(yaa+>Ny}*2O=5iW_+f}mTlNHz_5;BW`rr?cul6=a0IrTAg&Fd9 za0em(p?1$NeSc?DEcMbu`dVQt@6vsSUU2VQ52OeKa)QY{m>5;;16oQ z8W}}5DAL=0a9EGEPa^;M%3}IUm#qa~j*}i0R4?D{sQOvdPWkqk;&qoN;%pfIfZYVa z`zL-;*O7!!#o{PZ4#Iy&iAndbj{IJT_eF;CL#}=kL=|xkC6d{`jU6k@*M0y|LEu1v z|Nap~m{8$Dh7BD)geXuDAOaRF2o#s`79TzY2ync0YuBz?f&^JKSu3JKgDYLWR9Nl- z0fDtt5_nLq!bO-pef|U*ROnEmMU5UsnpCO5gA1D~g&I}r)SfMUGOb{BB~z|ik3M_m zvYeo?76g@iiI$++f?gv&MTnK^+=gQ-V(7?`&#jE*%9T86QLtIEK;6=PxNGk%K!C&% z@N-rrV#<{*U&fqS^H8}?;cf;UTJ*!NpB}bFui!!f=~YckFey9+HbV6Y#L~+`S@e7K?fg1tUHzpgs{TU zDBKOT3D0ZED*S$W=bdLddFLeyW}B|Mh~BfUx~4v~Pr?Z;T*)lrY#b0O3`0w)EaYee zGDsnZ#8Ac|kECoz?_@-c!>sOu(Wg$xC{ds(t7M2R;Z78brv$xx>cj3}j50{TYC-X| z?~r^AwltB1Gfp`#EKtcg?+nV$4bkiBN!~hyvdou4BfjMD%fHp?Ug7!qouZ5YeP+l)N5&-);vGYgJ{>(PW4i@ zmsBOotv=P1^w(`o-8No%=ZzIucj<*Q*q21zcg$kxVo@MvA5!+UX1m?-URw|D7GH-S z#`MooA@+6Cf9+kjU%3=U>NOMtUU9O&QnVP#t=i?-KoytLmSUG*<~B@;Va~MSJ{8V5 zWr#$?GRp-8W;Iz0h*RkzCNCb>I|vmti#sQ2hB|6Xh0RU>XQ}Vp73a>ZcIX*0T5||d zMe7PoCyPcdkJm_%Hqa!$&iYweaIdDjZXyFk+3sio_0y${lQt?uo*`0J)tIOo%j~fZ zdNan8^=Dc&u#3e0q&$Vl~)RxVg+wr5wAmdyV2Zqn>1n0ZR zrpAGNTvqTV7sM)?3_>vUr$;Bgc<|t*)OZL>$L(*_XU6@gPTCVI?&;q2@lCjOBFlQz zbFcS01SoDWxY_bU`vUHAddbttdFK37F#6Au+LkUE zq04fcyI>4u*e~UnY=%Pe*XgqL85k}EEQ^ZUNU34clfTRp9!p8b zKF-B{e9T%=Mnp)0bq_&?%Th^HQWlFe(vhu2sv#6&n7`@$TwRF(7LfMod+EyyM}qiV#>0L zoFb({F{YU|Z7D^B`;V4Pf#pWLT4&8a>s z=FEydA=ILHQj%~U&LXkUM?bEYD$2>UqmK-qjzT(_k+v~WCw*nLR+G|~Ruw5_+S)?h zH?dlx6N@%|Rt_uZ5Q9d9Rg&=@HHTIXfAAw5%Su%~^?9-cQstXtbE-FS=}oA9>Wfwd zEX!uuysC0lV@64xRyHWsg-(w)3-Vb1ZPe<*q0VzbMM5He#_Apvv1NP-0a+{i>ZzOt z*0fYi;{gjRI<1n@tNBZ!YdQK8U#^H%JT*yRhKP_<@}w}0nk!Ocv{{(A@UsgEZ7%mp zuMjQsw9kzmOlkSjQl`(E!r~`-zD1PU0`X^X3k;g-a*_AchDEVtDPdcy5IA!4P|Lk2 z%vg6y=t9?iH?41UIjKWYN~oQpqUGRVVv~TOq?;z(N#b@nTMM>SvC}mxLfV+zM8cQC z)+())NW{$i2KK?kRVnk%lZt?N8|I65E|9KnH%JG^@iF^s=jqQ+bUyn_YcXVR~okQ#YicR$<*0q}w$~XyxxY^w94*N`%3+uR>BmL5@Wu5K7VTsM`iO8B+b7^!F+uf^G;>N;k<2*-{)TL(Q zD)k-TD)%VI6~1GY4j)1_hJ*Bh*v{*Y1IwdwOoB>bo_rNnJ8-2KhuRInR3_fBrFY7R2he$qwNO>FJ1-Oqp&YIN_tU_QK=B;GV2zdKs_47cGtu zo>6(@NB^a$X)V#rJ^5xP8?~QXS0lf;SYSVEIW+klRYbd!t|li~u3v7_n=c%q4PVI* zM?~?32=3QNhr7n_rPYw%dtFCcdLenI*X%hY(mejSeGW^Rze^atiGuIJ-KlI-Lj)|W z`=Zg~K6%Ph+G&Dv5`eQ3@Q8v2LDgK=ZpofV6lr$wgcm#De?0o+-Mc5O1B-wwKl`I= zt?_q6Sq`7fY>RFG7sPGna6`z#J2%0x)sl|2;Li#6l}v8+$0s3t8IL#l37huUf49v7 z6%~S)yu{$@K3HKQ`GNekONr_%&UxFoEORu}c^B>Tg>T{IVprZ#nm8Dnj(z@9x7XSR zIU8CZJFGD>0YgneGC<4`MaN)4)L+8h~n+6ub@xL6U?svcw07^xfdbpny>K*%n6v00W^dlFY7TZ@52r+OuYpguK?nz`-2fwL&HRb zH=Tn&nChyL8=isf}%4jj?q#ig77Uq^s4{k5rZ>6T;ruk^AJBoJ1kPf zR*WF+bGFOqzOOUH()l4X45UGl!k!44w>cY~P&nkWL4n|j3zReU;g1X)yMV&E93#1> zVZ&H-#c8w~XW75en8kN`Iu;x!TyzLI1d7DzJGD?H;bSCXOccTZKsCgUFU&dC1E&+Y zJIyn)Mx@4j#2O_$r&W9(>Z3(|frzrA#D*Bi4+;#*Ixf;H$DJrXLS#bba7QcDB~{x- zXf(n9AjHRv#Fr#|$RsHlP~x*rDM$qzq>lNIx51thNgSy9ElrFK_qxFciO9nfs4*j- zbfGD{3B0euE{*ibcgaRs`Mm&)L`W)1qX5UA@WoX6l3ePZ$^tDhI+0>58#!@4=)=l3 zY6+vsNiu9Ly!*+p>=hCmEPF(z`*Sdi1d5R)zt}m+_yetts*kb|qnDgY1<5UOYY@eA zKpvYUgRp?<>pUD3%ff7*y6eczu*L(NrI8~FM1+W>Ockx;vV_<%xN%9u8wd*s1kTin zgp3ou?6F~F3YAbcKQv3s$Oyw^%_Wq^!D)g%-MXlRg$7qy_6RVSf%L=gg2i|;$WqLv1XutWh%%)ML63Q(!Q6wJ&%{kdm zw|pIWL`Sg#!bc>m^r8yuc}mf&eueWTS!oXz|jgAQyxWA3RI{6@2ny6 zbi&yrI`+8GXDAa)O3$INC-s9XqACtF*+eU26)#aF$+#3{G$B`Gueb59iuBQ=1VE}F zfQ=vsn!pG?NYhJoQ>5v`<@pXrO^>3J)7O$cAq+kyBebG{vAa5(05Oi=m<-86iRGLV zckED>ND(($yk+wn=?t#9z)>B&(OU=}V8v7(Wk=Wq63r6D&N!v?Sp@O}%af7OmVudz(9d;k~4i3LSln?DT^OP)9S(&SKrsMrBjEF%3T3 zqfA@O3K&V<)IU%`OPE_Z@~MmX1V#Wcj-j!gK$EPDI=my{!Q?CmM#?Jx?{mXuBg36k z3QE0EK4_!bnGXe(*Va7N9K@j@g))%5SF&^n+#~}b?MeDrkdinIy!@^Cm{0mtSQ34z zRFa*P@RX4%MO_isiakb3vpdizQ{ljX2N2eooeyFiS+OM0WMw?*+Q%-kP$-ns5UIN= z+f0I`L8c5LS1rynk_=T7kS}>yB+;dzeYL(hF^yF+k^_wz1)5z*(7O_Z$DoQkEzEkI zAi)KV4m8ThfgU2=8P=F8>jDZd;i>Goom={ilkf(GRajIJui0r&t4u4X`pbYbxmwIv z%;4CPh=7mq66?eO4B*(Pcvh+<+-jNKIdR;N0@G_UrS`B;Ny$I|d?kuvY!iu1@KVddafkUHsj|Y5zao@vEQ;J3BUEyxEHnI*OKc_x;Rq=A%I#) z-2+X>@SWQ0`^s%Q5=V7Y-ea5n`G?X<4-LJ<`yvV|?c6xBvj0fOr!3z5#FK!zjdw5x zAOHeN(7(C;QjM|5&G6gG!BGpKRJ?rM2(G-?TBAorl4p#i3<^?#@L)O(SV95EKOzcb znbs}2)>v&)+!DQLVG%ERVIVLD4%3&a%}JwGQ=?egiwKGTT6loSD50fQ3cv;8DH38L zmfP~hJs?$9+?hVUxf1(Z4uc}UKY}ns8DWM@*m>H-ww)Cp+bIiB1fUz^tKvWkM%JPz zi3cE*S^xw-Xa#-<1OhmRKnR3^_~E2D(m1A{h}qQH6k|GRnGcIe9NeOB(a@ebj|GIx z6r|p1<2s1I%;F#*!EhMLT}TIG8Gua9vT#?{;e!Zp41UmuJ0Jjluz=(2*s0i1RE|w` zQDtE|-+rnoSdm5~Zi{8QP&#xvul=I;vtZ8h#8d@TkGZFlc$nc8w^p7go4SkwvH(B8 z059-@bFct^r~rS!g$HPbSf~Yp4pzDtRc$^VZnoP0Bl_m;*kz-FAdxc&4c_2)05A>q z+I>u>p^##@p+Dk-jTftD6^>9R49dBX%jl?4lZXYO+6hp`h3Y6}r0_Y`LFb2t=&b@_ zd}a{!O}^YkVj}J6;aOr9T+|*SWGfDvHn}qDc*hV6&>GfiAgUaZwSZV4Xf@^tFLC2H z_GuTi5-JI5Ikw}&0ef>esc-jJ9s+g?Xq%Q`U24VqFDfkCxw-I@+OVNg=wr z<`fW3?4Ck4Y->d9m1RMZ+OZtXUjHC~eh`HJ2guG^*lEnJ88EA8wjK{S-BLgvBu1Un z-K9sYQo*7)5N$0NL|qGC3>p(&DcRnvyk3h}WfgI7SJ=3PR=8YAE$g1xh)5M~2NBQW zmLeEpS*8eS#kIS2cD-H;P^cbkg5oKGb(@}|HkOVeT}39C@RBTin+JFRp5TL5z=adp zi{Oq31J#Q&ZEx`)>SEiR`{@cyn&kVqlDRHGs2j1{?TNj6B3!EuyqpcTKwfyeXms6? z;6aOfvVaGe44Rlqy|vVypwW1x2}#Xx&CqO5>hMS53dh<|QL)fI2EY1&!zFPXu5r#H zDn;UerrY|b!e*!$$EF%*s>$#z;0RX#wAh8#rEHk^2ye^LHSd}?D{{nw@6#Ge!K?uH z4Nvk=YP*%U>-y~J4T>xME<^JXWmJpLgWIGd&>2r~_q2e?-rJer13*}GhFEWLz;T_x z@P>%pIsZC(yBnBkbky*m40}2a?%mFd@n28ER0&)k3OK%% zdNuG?8uQAj96|61KCl3BILDr#34ay@KhSZs$l)pUbx`*xIu4Q}k7!!z6SYHUZCh%N zc4_|-ibKhWu_Y5dE@dt|XW8H71WVMzY#w?k%6S48zHVWr5 zzww>Ie0=LRMOVdMFdP3F!SHf~;DZKNiJGqtK~RhzSHMe5_)SXR-YZ-RS$S|~EeX-k zl5APlxKI*N!M_Pf#WG6Y6P?1F=dBa1m60)`%1@wH2mxD9^TVJHIp6|r!ZH!5-}sAR zzdCvE-ik;tF&&7FQ1hSvoH}1ZykA4w&fWK^VdQC6W&JC=V{XG$J}QqNZx-- zI5e}7FVWLN@d9dV+dIwXBHlJegLM?Q4dG13M?Z>N(D5ya06*Y}0|jn4w|kG&dr8(u z#iSa}+wfpIakzkATTbHtoFe?*7tg6r{IGkSx14U3=*!jCF%_}uX~U6PotK7!KMfI- zD`QVxM~aGw4NEVFF)a%WX#IYCed%S6iiMYm-|Q7E5U~e$Z9!(_G5oFze*6ljzWe+G zaeSR3bN~7H@p=L<0ti7C%2g<6@F2p33KueL=5qDPY^TlzF=!%9;h z9=kNG*b1u+cOo;@G}odOpOJaym2B77u4M;b+R!Z6;Uo95ZR?gL+m<3V$`$B5^4{wU zB_CuPoi%ugMz78;Tt1@XlWH&byv4_YKYjep#d?JSm%oBD!}IU&AK`ib^%S6h8WD)v zZ^9V|U_l28^-L$XwWb(j6iHP9Z_f~?oNyFU*kD)Vag^ML2dQO2Nem69R(GC7RE}C} z>E#$hF*?^ATm=fKRahJ%RU&n82_~0zJE>GleBXSd&npk$Vh{oBJ@gJ4fa!Q8mRW-5 zpq5>NI9yi$VvQvtfA2i=PJ&TMcoi9B07n&xO_d1Sh7FE5UYaL*2cuvL(fD0tbOCyh zIO6bei=G6fXpoI(c^4Q#(|MU1k56$46p%qyfMSl6SzssudwA0d0T%G1pC1}gY2~D? zy80?aX~O!Yma>*LW@2()T3Cj}G4zav2GxdUMP^Rb39A4S*ind;%KGK9%`&={b0@{+ zVnH;<)#;Gf8p;O&7(`H+c5}Ii)?_t8XUsbx5v1*`=ILswk6YF&7q&sE^Z*~?#K#XG zPWJQJF7X=d>aGVr3+uvL)f-fr39fc4t>768qC%@;1R=2n4IAru(uR2F!_vl>)=2E( zb(viM3P~DLwh@5K$1Z@(RSp4sxD|oTYk4PSLTa7J>1F92Oc=90K1?ZrFd`LjUF8JA zPb{U1qu&EZYn^4oTYK#**ajx7n`?6_M`oSjC2dfz#)f$iu;P`MTeC~2?XX#q4r*ej z*IMT%x0i87(p-CO)~`X|Vo;e}cWqkaw1NpjNxPHv`Sno98Kongqp8%WN`UBG!G4&V zZm->^&pNiUsgvEhk8{fz+h`Xmi!p}1LF@WM&mN_;-CfgqVq{=e#NtROLjL%)k#Qyv zWNg(EfqS5eYgx|MiUeG%cKmAxCh8V;D z=OZ}rfgyXyyG}HA7};=acbN+ic{&FtkSQuU5j$Smh}1a1F^W?JC{;Fy2?+-hBfdNU0|mQQS|VTp zekCMeqr=x+fQAsuT_!r=>){F$vmiWfL=b?0-;pW+2tQz8c?*$WLLk&8J~9$>QWWC; zAUVV)g6uf_15dC1Ar(m`r8pC*BHsQ%txJljB9r;Y7fYBg$$W<`77$%zhUN$Varl7% zAv6dd0AWUIRi`0+f*E!SlEOyf%~wz)=0Pmr2c20BTxJX)Z`xF)WFfPf)+Eg_T_`)b zg|cj%XS}}D;BoQU|7dClK(P12Y zQ(z>e(BHI0psZ}mpHL*Tg6v|M;d;OYL#U@eio;qanV>@K2!!64(2y5Z=tfH@vj{|V zm?%M{u&9|-wPo~G*_>)8Ia;!GQn9OhJk8pW_R)GF(0^lj%U3LEOu-EQWHpIGV?iV( zm#gJ0Tzoi31O&kc3}Ck7R64GPRSE5|JtkG%epGMY5`ZW{WuJG4-B) zn8{uC>jxM4H?BsDtM`Nnz8fp{es(&rB&o%k1xvR|5iZSDFFP+hCRsHN3+vsWRk4JA zbtA|MZIjRYI@(S7wB5U-Nw2smfuLl9A)!f4*MbgkoWsTYkqZW{HWR+YMM`XaPF#6I ziSKQclsnS!cH)>ys3?(%3W?j1C#kyMA-F(z`twK!$W;ZER?E0U$z-=&p)8L#Y@7}; zs7E9l7O|xwKt&H-Lb5^S?WG_3=!bg_bwk?v(+?}z9((%4<4p-u5xp_4MkNO50deVk zMB1KeHG7i=8^lb~M3z(&E$v`88e;fEt*|c)r(8*7veWJVB!}bjEcT*=(OpIfutm+6 za*&uVsS648*fJAw1i~A80LLrbgMreO;{kUF!~KNhvQUTGL{~l&|EFetlZY&Z8ARET~S0b$p*<@@+G#}eL7op zG%Bgt-KF>-fzOF)3!z=voaIC z{M0Aac^Z%@!?03(w)o1?pryBV`HAk|ulpSSIN*Zkyu-%n^WODTySK@)nwc=-v_lJe= zaxf2QXM;l#p1)Rva27{D>nuVGaAx26B%Q3aC!^*mMC#c(Xd@Sn(c8yWnApOXh*QIC z*GIAKm$09;xL0g3M`)Ny*r|#|(ch*C z-e{B_4hrAxklyYUk!kpf`~;tqbx3*X#k<`93<)xz4?+Y47S8g}O`N0{6E1{6jLR-y zfE3b{2Y%lS-O$iT37GK>j&*_;&=DU!5)w`d{;5a|21=jQ7-2kH4~ho+)!*IO-yIHN zDScIzJrIaw2p?)yf*4=?)kQzx0&;~0vg}_UzMc-^A)w_ULO@k6vJe|q zncNspgSbs0PFxA4(BGt(oxzkOZW`|h*|v03S6~sj!Ihts!yj#83 zB;Yj}Ql4Y7?9wjW5#WQ=*hm-d_$CCEG-ZXdohSL5fwTMc+83HQq$w>Dabmqs{@PJEf3& zsF9?^+jR(LLLg=NWl@gk9Xqc74tCw;6Y_-Oji5#H$H(9$Ztx(M-JemaCNXxEOGbp- zAd+7?QD}%Hb^w?ut_P1HChUM#RGMPTjoD*@R6``6&55A&U<+o>$dssJ_z+Hwpv-bD z%*ioa>}g5~k)-E^2crF?K6Xixu_kQBX2s0rLwt--h6O&N&)z5?*Cpr6on|-fgcCC6 z;uR0#J)A%pPPMp*W~Pio0LwFY!J|Be=b?+=S!b&}nR#N_c2b9wK*n>PCu?j|ggWAD zVrW*G)JO?W@P#KEvfyGIV76SPeq!Ov4X85e1dW6b7V#(Ir4IE`qg(i0a{k@gMb|U9 zWlBXRqnN}9l4rxk-41sD6v*wwV3@>(dZ^3u3sKes`BY&+o9N^D%vEr;}$iV5#DO2c^C`*V(ozvz5FSYwrEMF z6oGvaU09@xK4eb}%C4o8vp~^XDQD1_V#_2FV5*3GwiohUOUO+qm5NlO4h3GsMV|Vq zwN>ke3g5f3D~OK&9IpK04Xze!$ZBHAV5Y)l1*uH4s@8LoYfaoy3YApDVPo=eD$H3W zQ5YfdMDk=BPupo&~7nDs0|M3$!(4vI)& zk=h=mf~;7H8cukI(g{fLayUVqt@Ofzh>%yiI750N6K#N7foL@5(VKAClyvAsMbXC-O=R! z?1`za(l+PDCMj#`?Hk=uQ2Z z%>L(p0ZpAQqgV=p^wxr@1ujwiQb{&T&XK0frAx{Rg{#&XWWW*A_GyQR*S zM&dF5uuuTgcV6SV1qIpOWCdHWxs*gdJpganXmS;L@em&QILsWGo0OK;@x*W7On7NatDgd1C<$n6BexoC63$O(0=@g+xkum^CWPhC{( z6&;@a0aXT%FHpprxo{sL?bYI3kT~L=7SpSCozMS1glE_;k*+Uz@-Kg=u&>5z9`i~? zoe3VvF{64IGWM$S`CoT2q#VIpOL0p4^+|HzZYmuzPUOF-uT zL{p-v(FiJfl$vW)E{#EQPpp^n@@K5lZqP;r3hxB;Sb#SaPBiC*i%>H!#w)&lGV#Xp zK}?pZq25W1ahVh}WkK{2u8l-D1S})P;P%ie-=D+C59`>n7fZyAnVIB(Pj={LqbjHK zfoT*Ag^s3{0~xPPq^n3Tks0HJ%f-?A#1X^|#QSWVM`trKcUC23j73w#`;FqHNQLh9 zSyS(AScP=lb}J@22Hadhjo|^ik&jC|o>e zL#~D&ce6<3bT&sscI52i=CvzJv3H=zJ<&xtyxR7SOW8U0EA9ku1YbvdEh&?*RMaum ziI1!vnueq^WtVm1Lg;6Wm{&Kf0Xq&tBC!Fho@h6nVr%I`=<-!75o05ARqNX%U)!Yl!h70NEd7zDbF7{6eeE~UzvBi4)H)kCP3P+eB>uOO*#jdKTURE?7VygoB^kbIESC*3e*;47Da|R zt;Ru@h)e2kcen8eKe%13mfyzZ_s7aJQvOGEm^(E<{QFmdU_pZi5hhevP+)~}7HSa` zI8fria)JaUtd((L#|nZX5+s1(Rs}xd3W@_{plSeb+vUuqzkR+HaHs6wr_)TW5P5U38_Q8mytL)&%QwT;f_U;NXo9zH ziQ{9z0^{+sS(^zbh05wgHt-612oM4+5TreV1lcVsx)7}JCgo}}j;Ec*N@%&CeuC~p z3rQ@ox9TWNOD_|zg6*uJ0%K9e6F2M-q4J>r6HmU$Hauu4-UI>(zQpA7gO7)50gaE6 z%87tI@~R_@L&_E-=pEsRGcm&bED*=biz1BRL8)w<(M+JcqY+1ixcVzg)kZW^PB~K) zaL(*ttKdpK-@Mb$;>1gF%^XjA$fec{`mr@3Z;bLKKty{8gU}q}ufdS0i*hjJDAK8* zzX%FyPZ{NVfWOc%AV5_D#H?!2KP${rO$`?^>_bp%GR!SqVYNxqUW=meS72x3mDpqb zJ1tPussvEImNHp|eH9+~S!S8du1uLWDl2ZIRsD+j-eOboCg!~Q{YR6jA|&q0Tr;%&%b8y+mqdZ(lkMVhVztSbZc zyp9Z_vw{(6gSI-*Pk3`q7SMvDPV3gLuA{f=J2kX1;qW*nNWS!Zdq~lX+xt4Fe@>DQ zmpeAr@1b9DhRf@Lf@*o`Kh383(lImSbZg)WbUJW|p`NO`D_?|>jF$m6ydX4-GB!H3 z2S2WEt&SeNu%q*o)$>2Q?v&~r|J$)TBsV7LA+k?jn=}Xm>BA3#Y^#8!hy}#lZVFkB z-OQyLEC=wLeIMN|pRmKq;;fkeA8z^UdeXY5&Xt!@bK!?#9Q&MCtU%!GiM^Zq-)KDz zq=M!9D5`&6$N43>pL%5v3^*d-ql_Q6)7h3ogs5X3FIZ%;9{JiLoB$%waWR740ulHf zx}DEC4$PUXb{4^2buDFJsZs1U6E1(Ii*E*ln>`alA^?+Sr!5Eo(5M8{85tm&ZAo z@p56I%8sm<5k!hDGe9K&-Y)c4kdyd<07fztixM|FdnG1^f-H*C*vPXvnvjrKabib0 z$+Yi5E-uM?8s-d%M5&Y|l<_*HCvo|^d%3Q4MhS_v#;6c)u|z*5xlc)M@c=(u(v<*` z(JigA$z~ZZm$MWaE6pQIwFJ^5sbpG~psAK-LXnhVQ&u(IXc*#P6P@kkS)-!(9*!|; zjPgW?Q)YG_0+d8Q1Q}Ts$EiU-hLb306ejT~;X`+NNN9F^j0b;1OSnkXAWK~6ImH6c zf(6u**Kz1YLF%6>sSQ1;p{HrAbuq~_(jWd{cD~1#Hhr|9n>=Xo zbmxV~g|r|6Q)o2*HKn#em69mlY|&6v!&FQ@Y%x)ECqb>GPNHsgE~OkH7VjC7MOq6? z7nw{!{Gkti2!u4CF=pQ$vylcR4J|L?sa0Z=(+Opgk@<9pP!+*d7(wt|nN;fYq}sh& zYUNGrbW378cti6E)`CUVY6YCiRn6XPCP-Qgl3d3qA??Vl0_g`oTFQ@pG=wz9{0MIP zVpiQ%(K^kW=x`PbrB^9rt_CSbP+;p$qdE3)f|FWTwRy+fR&R2BqFi4f8`%=_b+ZM5 z5pgnGUF{yRbOYg$Z*g-OvpLAK1mTB1{2{>5`Zcy2>kn<^6uwy*kv~Q;#yf)9PJi8X zYKlp0Qj^>Nk>{E+Fyq5xs46?8%q`1w8C~0U3&TwaPgu9EF;2zo^15~zPm!=~X-5`0 z82HW%zB7`qtEh_*FomS0?_n=G{fJ^|BDGG@O~`?Kq+oRsxt_DcLb5$aql9%>~j1;=TLu_&2(kxQNz7a>8}LWzsK>3$7c zH3qVDvMa~KZSaHR3Djl{j8!Eo8Dal$vSk|St&I3iKEdV7mYedR601nBk(1+YZE~5- zoHh_GDeFSN+|&Mv3tt_p8gbJcIE~sEeX$hsge}UaJ+B&wO#a89pWMjV-lWPe7R;rd zZ04E&a(TJB$X)KtG?+_hPQ_{P#Nrc*PXN>FYjKx%uY_o(vMDk+IpdLff9)-u#yqdOBz+`7E}6`Kw#RP1||qjsR{zk-sQ* zVyqm;9kqbm#i+Hb{p*f4FV5Yju=yHOVtKZ&BVg+7Z)V0QFmZ(42-=yOCilG}SH-WF z%FLPDd6s^!dD#{2Z?EEHxluLnE&uU%RClRxLE_lm4qvs;s1#!N@sfaIgGfqelTS$p zA{ObT8H?mADg?i5{)Y*#}D@kP90yzwgScLSk zxl;r^1vz@4F;b32XFe|PFh*wlA&5Yae6K*+lK- zfaN)^6nh{HW)&a3R7)G6pcch)s};O7uo;@~962#qLeih_IH8DxgVaP(uTS>2g8xUAgyE45kXRo zLPQX`XeA@X>CS?WffmwM3 z*wG`Z%x|EP`wHZ`^yCK9;%*d!#fe ztVTLbA&3Ve!DGwrjS?s!D_o@az>y&EqAGuqHZTT(xI<~yNJ`c&#gwm{|A2*+Eb>YU zW@q}0Dp#(mNbMcxL@U3kXw<1AzmgKggULJs0QX82BVuOo(b2L>Yamn23{xWlGG`29 z;PRr>vX3v3%k0kRgE)pFrO)dm#v*&h+-z}1c1(>7Vz?r+H^I&X^->$j5fNeo+#aE3##p5%^_ zQ%IAL)f@*aNn|-?Z2U-Mg-!xYoWugSW!f}lCre~H8|)6>b57JK`Yx|4ku*)SXH6w2 zHTf-59|Kf6$>Lzrn%L+fldMf{vr?l}HB@C6oWc)Us7XYlHM|dl?5I&|E*9@pE%Z!3 zP~uJ=wWx%qD-M)W5okW|6Z?p@BcKUM#OzZA(i+E-+kys0|3K;jzXLxYsU=nji)w)+ zD)d5pj8Q*xZPwD~9#JW8xT_K*dCOeV;n5s%e#aMbglb~b^tIY{F@C~19U zLJ$h|j%w6vEfF~IgHj9vKm0^I;iy!Z*5UXx-o}-b|F(7^E-p7JKp*rWH_Xx_8=^cx zig_Z|*GRT12^Ljvq1bi-19qVSs^7L+bG@x?P z)aVF7rllYRw*vTqA3lN~HH{KEb~ft*R@`hb@Z&v5BLYOjA5^Q#EZ`)%wI7c4-CxRrbfguvewyYc+LGfmFFX z!eBFl005yD_~0D8ArJz=K_=IF)vhqFO&iP9E7A;5UqQ9}!8TIVe@jF!e$`pGH-b0N zdmC3RNQ@)M%e|O#@DMkbuw;GNw|%APeXVI}|8tgFlF0)40UQWGe|Oq#AqNmg6|fMoL3jWgPoKPBJ{xjcRt0#7D#-%;fMonm zaAUy&ULikLuNLH1hA(SKXZUv{Z{OY`+H69B3BX~cS4PyRbFfp81^H<=s*;0La@gx3 z!r>qqc?mldTo3acBXnTF)OT43sDMy#|1ID%j4okq0#QNud%x70-yt*@_@!QzMexE! zae164cDe#_M&#)s0>K*^X_z^8NFkYsu9#*ixpd}BmWWk9Z1)`CW{n}Dlmo9^4`LvH zR5ouCarrxhrrXMOz1vB%bi^64QR=D2SlG8DLHfP4zlrY9t5YBc%GB{x!R(YZAfb13{8E|)1 zWQ}Zjv0h4&@`H!V8a!x8S0vh~|MeQ1F|}`p!mOCWsnH>{%1_34@2b(PUFo@BCr^Ho z8L3p`Bz%xGFaRP*nU85Yo>voy9=mc5!(SxzCT7|;=-Nc^!jC^wuR*qXR~h2$n&8al zLCWmO+S8F&t!oR~q%rc;JhDwygMM;jcK#`bljwut89G_Bv!&u(u(zs7H#`vXu;w5nC>Aw)wad3-$3$uYx`Tu$dtxVKI9sq;V(1 zvcc>`rE`TX(t3w?b%CcF&UWe~og2F+Obgv?&E!t0F;r_1ZQ^QsiYd%-`aANNSF);4 zzY7Zqqn5dL*jxO-4=hu3|91#U1-i3&k(J}Zm`6i@&egPsLy`>~3_GuNlUs>9oJBN_ z<04$b-AJ?Jg`gStNtG>xVUfDNw^n+{AjC+TaR+YGqEVqXLY4c+1p`n6JUVD;Mf`Zi zfpL#qb9>m%swy}wHiM~$k{sQryt8L{LsGXD?BL)W*<49f5Ah&kc}c1lc^|jIZ%Mui z)YX!BhvJaM?S9H= ztT}K&`=xnqwL23L5@`uk4Bf=+$^$K3(S4I*QCdW)%})yLe8pR{nnvLAQqwn`-|URR zGn_9q{66^nxbCRY{{a0?Q#{%IqU>H>P6p1EX&rBc$E&IhO69dzsHs+K1@qh-eDKmc zDjd}PI*vR2hgbu^=6o=qO{cpvI;*^L{w3WPGupaiakG7)9Q(rOvA92tUiPGu5?w;u z707K^ch`hEwHn7S`cKsN-DJbwsfr!vd3200dKmuS1!=4OlopW(VCWq@2VzBB1QE*} zS#)Jj^c^&f{hjqzsb>s7s}9Sax@kYCeAA$Hh%)>cTO;v*(@W`uHytP%x2tb#IPwuHNaH<1Ett=&Osb*1gT& zVTBT55+qv4{|m8)xVUKqdPF-Y^B}(KA+0{)2=AL?d|Jl842oQ(HG;gy=-nP3haIF6 z0Z>D&<>xl)?X#q(YDxotf-{h5cB<`FL@M#7qC@{J)Jg379$7pdE^9jSBXjZ#5oZZt z67)nr_<*1i9_nU&L}wnwe005iP4U-6B7Z{0Z*1r3IikhT$Zs;ItQl5#NL2QP^kpCX z;(lPZ4^0477%=M}bd?J{)-yp4e10D~w?EE{{YeY-ZXQ@jd!KQ!!LpxbUAuh!iVYyogcYvuE!*J!|;x85u5o#Ql3G@Siw7 z6>jnA|3n6*L6ILb+Pr!29hpuzAxfOsGw8vIKY3!jLkNR%H5< zCf1Zdg_0p-HLT5$J5h?{DK@RzwQSqEeJl1S+_`VRf_$g56MqJ!R*2cy=46Q-pJB!OPpn;mcr&Nf`PCy#GKEdMehoXe?2Af6 z&%W(YF~hSS2@pt$S0rG(s9WQX~Be-A%CMWyoVFYM^C0)a_f#M7*>5=_rmAQo&`4n|I~mQ;ivd3KarogKK4fE9?88j3Ey2%~Gz#du(O1ZZ{Q zO6Up0$8R)Nq+3|axkw*mGe-B^Pkw!cnL$S`RE`CLbZ9{>*%5SPiRi7?NmUCv4+2;cA0_z+Cqm^}HROx6i3#C!mz9N-c7jw; z3wISvIgo{so`fEgJ4rR^brC)()_-oQhaYWiMrUbM zslNp;%y7fCDp??a<0>JEKK{sxuC{;P#O{xrwiVt#%!*YD5S?geL5Ic``>$MNhG=1+ zViN4|UH`FIbIv;NTxL<7aN*Kkd?d0{KLGkjYpu6t)G^PF{X{_G*lGwkpqP^%ehD=O1X_sOeTYNmCy zKvT*UB_#ua#kP|&&kQ0=qaqaK%P>lg8d?dJUDo7ke-66e(T09#iSO_z{}DfXh~p1G zMhmnjw~}M@H>re*6>nSm3KZpaB~uuRQ&d_Ln8EBE^aKY4NaR;SF{iUQ| zkG)T~n;JGm!I3!WGfJ4$kE~s-l(f69y=8Y5h?2@ALO+MN3xganOXM^+6)pv3Y~(={^gcu!7Fo(850s!0uNOSEL#yGC)fW@rZoH-WQRC!n7EWAQ@DT!t!F9aAE8#u)^ZD0N9))X6uh2sZ@1( z^2MzALmzlc8CPQHwKe|nF#ie3`Ug0xFGt2ERxU}8t103v-&r$fO4FO)WaC9hNI7>tb0hMy%l#zMBXJzlBo6DF4CUD- zaC)y_yL{RgvnjA)sf>3l^IC%xIz6zsF)Fr-=0J?eq=veP|3X64V=Uo`(v)hiW&gzJ zRNi=>vpG?X!b6rgd?U5dJ<_8Q(dnrIhLbSrbAJ#?8BplA9iRr%Feo|9?4(AJWvTE@ zsgdfL%&F3>YLy|+A|65o0>ypGB&?2`X-YCB4sl?~VZ98|wxrXO0gBY27WFAXAeyg< z3dE>{aw*w(V#=|CMRFw#DV)N%$E_-Mv5~yfuCizos5WFHl672YKr|GVu2pPw>|aY! z1+67MWPnJb=Xdf-5Nbv$oAjAsMeOn)P^F1AQpN0H3p-WD>Q=V^`zK`KLRQPR6*4SM z7F&5r&!xf$uAWscY1)~VBt6MOWHci;C9*r$Hm|8t|AB3naNAiF8PJlJMDA|StJUE_ z_Nx}j;Atb0sfZHSVU?{bd*8E}W%|`t&{d*6n~T~C@IxQyu)tD|rQLhV%b>Wuq(NOI zM9nR$r0A{iV!bLUyW%dqw6U*I#D<#YYIsdR-7unhHC^Z&s~`L@qacdnLgzZ9JeVts zRaK+FI_mIDubiuee{96oLg&5@!DoCAI;bp;_*7@H4l~(Q;!Z(tsF-Btk+QZj457@x zPtFK&n!1w)-tm%k6e4Bk$SfZNdCl1qa&N~pDzonQqMv1$ibM-1JkMvz{G||F_M6&@ zO5|SJiIV-&Lf9J$m7QunPCKm%%SN+#(xI!T|9Xjxkl=pji@2oe!oH$sH-&o7yFur> zoSC(s+^Zbev>%@@g1P)u@0H5*(@A?`)>3y#s+prQrGH&%-$uD7cB${CUp*1jZ z>4c1ll;EpqScsvN3_|(ZsB$R67+g-wM@)GZc4GA)7aYsvSk&k(aip;Alr*s4jkY3P zG%T8i8M2d2=eJ6TG;(>csGsd+*aCci_x=}aql6O0`T8cs4zo!i;$DoD`%tQkX^R?M zYj|tC%`tOq!G#29&B_;B@B(H*Cf>7X!*7lQC%8lLmz`RO#0wk_ivVkDTf{KuDWI>GIzu%-Rc9eQfA^McG%eE>4YphEFI|X zv@4!lUu$`l+1qyW1S~ZVtT&2Ojw^}ikm{HhWyo;aS)m%_gF#j8G^%MnxL z+4pwFFOfG1sf8E3Kw`V^(A0V#c5(L}`ZnoCstbSau18ay#IJt&pv7>$lMNx#&G1YQ z%utoM3H8t0M2O1IywW#?!4q@uY<&xUN6}JlV>5sKwA@u+W53d7XWU^WUQg{))(PBW z8QQwXb!xi$diEA-?$vq{}l)LOqb#m-N$|p*f4tsa<<|Z4!CqpLld48b6^D) z62*Vo7jW`&OYrAK`Sp94_aEwaSPEzmqcKPj2!pI=SH$BufYKE&D0}S#K-n}=+eT4( z6(5dgFe=n~zZG2(a)SN0EjXwUlm|&M2!%PwRWs6d3o=|Fvmw~m7!s%u_#zPg5D0gr zP4Xdx3dU29WrQGygdSE)=C>wK=qDf*g>$GgY;!wRNPY;*kh=~!%L4iYL6TyOQ(I_kv3ts4kFNcVX zVkMsB4R~WL8Ui1uSP}6f|6)hih@sVTSI8WZ7*+Gv$FSYQArr36n5+(j1ECNAjW5^tTXcc)eDoPcANeF&37DUB28@l*WYDQiwQdP$o zjP2+?Bi49%XdYQpEStoN_ri{}p^X&S5EX?Bfe;H;Q)tzgjYh2-jumMt!9#$SCXeP(jrHg{*62w=!G$wHk*PI?{SXUb*o=nLhq7Ui-BA_( zrff$xlSgQac4aL?1Qn_QSWRVtYf*lk^ON!Oa29ElnrL*e2phgrQN=P~{;*ded3PgO z5bCxN1lbSz5D0}f{}2^*8%kMZG;wB=^L#aFlhBeAT_;;l1Qroy9>|jyo?>KE5mI>} zen*Ly`u2NJ#uzI(ECOaRsF+42_>vk_lH0^PxDXDwfQ_K^8eo|lQ?`~hXi@)yVkU@6 zwKa+7*k_0N8u-yvJNaz~Q%PrGM|lZ*sA)UJcpL<%9oeKEEC!Vj<(1O5kIyt$EVGiU z)^{yw8j$G_{jwtNw2-W+UyjI2YFUc%F*{^Yi4Hkgv;|A5iDcB7P+yaMMh7W`mX${1 zNwv9W#(5zr!~m??jA^pozfJU=uLm-tcbR9@tmm*Q!i zGNO?kCn#M7{~7v1HMl^Hjsl+X37`w~5clGR0%if!s7(#Zc?tPU%OXE$IdW-eOA9$$ zWp_{Y=@w$SnF|qN>y)Aa`k(>&m;>5rmp5d#p)c741VA7qMuMT1b(?gVk9{YV6@Uw| z0F~R}m`JCdiO74NXP}kIh-w%xfajl8>6vt=b5~k=z_Ow+3Nv2nF&_7#Tlk*X#cB`* z6N03CVhz!9(1o@gvxug)sPfj?CNhF^d3Re>rRH>&y*FqO$QKf|^kzo2> ziprN537@^fQitP7=%J(blN}LPS84H#KiHLbHKb%ZroH$|U*~%O=6VUa8507Lg2YaU zv8nkf|4x=9sEm4n)Y&wlDy7b6UpLa`^>0kCI9vYex)Y?4eXluO2t?`+AmZqBE%CFKSqZ6?q(ISlv>aDxzfZNe1 zyeXi`I{AKh5TBA7OH1f^rvvo^t`5UFT~;YgwlCp*v))rRpL68mO#MvbZT%P#bqT6|wKu zF;y#)m!>3&FroPa1j0%NP{ST_o2`nAtk62IExMOg)=g#^SB4czA#0yr7iM9LK>~}n z$9Z?XH-nh_tqA*y0rL}S%7??Ue-!Aqh4-|l`=B}LXUB=F&ngs^(nSO~7ry!yTf!{# zId=5=xhYFjW($u7TakU6upGm?T+uhpNUrfk9II**uPS+@OQoL##Y={b^Iw_hTCe+f#8sAz(xwoar*nxDn93m9%TV0w|F`+e zUa8cf`Fj(ylOd~Sh*CyFd$RERj;U$CKc;8R>%5QEdF3%!u^Wqw3#uF0pM-gk zOT?P#*jQ|f6f22kWm~ytQXJBWT!lr#77V&J8Gj%eg&T~sfMj2~cp`4Yk4M}sNoyJ- z>!bfWVUnh{wwYvY>pgW#Q^86s1ev5WOcS#!h>@&VaJ9EKs9fd78#Uxv_jGHbH>)d45#@!)d92I$MwO_9c>3tF+}UCe z+(P>+bzZl$UB}HLtix(Nm83SzrObTD+01{Nv&>U&(}Fyp2F@-CaH#~j!qdxUJiiaI zdD0B6*4(=Fvli(a!D_k8wH&oQJkIBgQgDla)+@4U5_XL9&KLW6>r~A2Ov!}#%=<}Z z@+p8qF{CaOXz(f&W~aG0H_rq;fhqmSElkdfhn#)#g#Jct=;p$!DPtcp&{BNP9-7RS z%ZB#6&ttfZ)@;pIamZJtlPE2^;)=^Di_m?vh6O`2DY(og9_0O3*rTHO*Du(6}XrSH4gYZKXDT51|g zlP~wz6v$5F)T;y058?2sY#nd{O}xr|gwau*#~aoW#LM%R|DkTx)|k=7&>3#5@uvUX z5WCRY*2vv0aW2|;#~i`Xw(W;9_BW~9Y$=w9?zf?3qk)U^4(`B7_fimB>}8PVaLKLT z>%F#3ZPCu_*JEd4p#|f}Yj1JIrL3VYs9k4Itq`yR2z^H@e6R~mapC0^MDT&)(mp-1~# zlR}OVz7YYA;@$nw53uq8tOgYR`&>pl#?3p{;+vcu*vnI@lRjnvGm8@Q?aCq@R3Jq_ zt3l>U&1oF#s3GdH!1mHzgBwDosU28Ssa@yNT@eJ}{|6Y5GUcGv{_qWZzz2R%CH})< z2rRA%d+4$aFU<0$HNM7-A>t5mHG^o)Dn1rp4A(E-!6^)YDt#njEKA@g;9t4mWITLd zT^?Q~8@KQPEn(}pz)7#L7qt)pU~XhA?0u@<>(Qzth0)i?X0=8#ddjCs%g7IbpoL&A zIGda4i>$_!QMK7^xMiXtzB%e1JjvM3L7&d!)!QN+gcdXy8+s!c4-g2mz6Z7-2wIJy zmh;$*d(ph@%c%>p!S39c&_&Q-j?KqgW2$Ufh|f|K18K-R}<6?S{;WA#gU4P0LtK)EEdK zX^mCCa@B#KIKON>%*@sX@i1a&SXo_#ESVT4hv{rD>iezF9K7@zMBa@t_lD!E zXrJ?J92uhQ$lv>Vu?DikiH7aH7T{h1SqToe5cLnBRx5G7hFQ@%eU|)M6zFjoBqF8> zYi50|p48+IET#|MU;*e5XkI^>?92AAi%@v)%alv)nPIWKvnb^c&mIcs;0FXyMg+jhfDkUX01$pG|5PAY&|tw%WS$*l*wCTDe-R@-q*#%lM2r?S zHpD3LUBP!9H;&Z!j?BqU88ebRSW6H;eg0$$64bILOPudMJ|mN`6HcH(g$^ZJ)aX&9 zNjd%_I#Ht1hfga^Eck38RICJDmSZ@uLM;nivo>Y=)G1YwV}+i**s&~ArO!I)OsMeU zyH3y0?p0gy9{~(}B8nrxZ{RIH1OV02nDArLv{2nNZ zuwNjt1qG#6i?UPRzOiM`rd=B=)uVi6OAT#%s7r^d36`_K*f+(>$txeXxOe$ftUMQ< zB=h*6$&}A=*ZxNs(8OD}fLBN?|9qoPq>+`XPoKO(-JhSI%kMqZBz1_&g_gTXu%9=t z3gS;Tlnmmjr^lF6a6txr+YYDth5~Ii-f*)jDCuOQ>7fCKE3U!MLJUzkwm@VExB1|L z>b3VyN)S8xR(h~6jQDuKI6lN<3^70g)F{KE*vm~wvgW(*8SC_$kEr`R%MY{}M-qgw zeoFHvmO%LNr=VJl>+7VdI!n+*GRriRqzFe!bE)P^yiLUlnSv20Ak~_(BJ(QX&r1d4 zByqwsN7N5Gs9?PBAQn}El0u9Ki-0i#DzHGD9EoGiBD}7WaHaJ)9db$dG;?oBp~Q^J zy`U^WM<0F&(kBpa^cn8X|Jcy0;6RFFT0)NoSl0OT4lnpywdv*9>i0y^z*Hf)^C|t-acCJ_Aplt|YXlbR* z%|#U**bQcazw%gIm1BH*$hEc; zDQOH9N=Q?kr2Dfi|6?OejDV@F9;)YSe~uULjpRk>qZ9L|nh^wFN0(O_K$;pdemb> z5&V*_$BA}|V&~HNulR2-w8_bhe!jxp*Xq^~yikW}nHt;Q7#BUojm0qr0n3y`M!k1E z&@UJS$QU?l$jI1s4cKCrb`|wM+Ac+Jz=2|jU~jP%0iex z;0eufuTfypxC8_j70QhYq{sv>pp};-{~Q zJrYoXlx~*1Ts(7FC!rM4ArsXIRjHbv4N^{;k~|?($>i09LXse8gr)!o**lFMr%U08 zMNPW&lycVdlTHQSPUrGRn8p)z?t#xwA#@hJRDh@FnINY8+Paa5QIje3t5F>bv1eLJ ze*H35Md!xA-!yWnA7aT>qZT3F;ZAOsT<2W-IHRChj+>ZEYZ)!G*alHeANqLflvD&! z|22BGwTfk0D_c9L$yhIN^GYL5d;%MXD6l6zWJr^ErpW{r_p#F@pv64bOk#qSo6qy1 zT3X7;v{jX}&yprh4JlHT0nn7w?agwURGl`Vcb%8p3}TsMsVem6`?t|QdiU8gg1c^^+imt5r*D5lcIiG%-1T?{iQG)QF^46BvUoOYEq*A1d(aniQ9 z+ElcZL#?MuYE&0@b-@fNZ1J$wCM!NOHn#moKf)0f{_4rZCN%A6DFVy9j0v8p%-+5l z8t1BIF;RhG_^Pc7_s1BJsHNmzKn+}+dB6ns$qbgA)VP|!g;)%=s{LOV+5V3&8; zs;i71@$>0DZ}o5b{vxvibZuEmWIX=RM=YMrCxOE{xA?MafsI$0x+JN{|7=`vl8h{B z1m%j;J@hnjFI<#lIXK=a+c{3=y%FLlY?IksIac{~WUXy$Bm+X5GL=HaA*ZsHZz}qN zq)4A#MvmV`pCZLO`yu>c+>3Cz6UX^+r$$EI>wgVnXCjx^k^c$7@FvQF-PziQ_ypb5 zaW|7Z*z@=TzN+gwaq_ZSBz%WC@CK4~Qmhtqr4prxsy;j?qbc~(y2adD-=pL*Z}3a^ z*KLc;xV1#)4Z>sCrw=vZBMzvCy0GLe?*Vsy(|3*|JcCx=gE&5P9Fe5 zGzHs?y|_Qb;kpt(UMD`bpD5)1Kxuh8!+k4Ss-=(Evo^Xp<>9^iP&eXRKrzd)7gD=U z*&(T0zUpzeIiZN+0J4yiJXY(N2Z@u*V?Z`Rxd?l(vGO;IIwII(39#U-SkQ;kIEW3j zKeCZFe&7cz=^yuFy^-rBzB4(tgFDUOwj~>hRa!L4dI%LVH@(w41&OZ5NH-CjLgpbn zS1U2&P&4`>I{2Z$rntZoON|-a598ypiUL9^EU*9*lT!0Ois~k&>XMGB1zeb|7}N^t zgCL5bosO%CsKKrO10ne%p+^fnI7FcYdXL?yKaiNQ++sQ9|3Z>n3BaG&n}(4K12hUt zq&>(}~6S6!sFD~4|hG4Gi(<+Vl!^k5F&#HxEVYmkJL>i2Uy;&|&J2o&p zJTp2mv6z}bn5|Y}GojPN94aT62!xL-CqxQF?sAB}dZ#Z!BEo_kKzcDW>N`P9p#bE) z=2D>OVmIIGi|7Iq2jfOt94N{=J}Yb{2;7uuLLq!XoK-x*Oym+i5j&;ev582rNfMES z!@iEdGMQ?fi1WoFBnhvmpkZVcsp*pUqd}G^JDCUsTxh`$Sv^Cfk6EjWw=+Z`#1s<3 zppmQ!l$fa=%rpCmGKX6T_~=A)#3Scg$DJER7A!@!|1rFrGB1De7xWlNo*X44~NKE@d zy&^JbybHmyG$cC7TLDMJb0d|6#LKV>*yzMAGo}YiKy|c98p26;%#-xf$+ChU(3+cI zvy~i6w4)4?x1tK*h%q^F|lyo6CH}DkYh#E&&di@P|N%1#h60W8^`3vzl-S zgrr)RhFmF;RGb6JC*3QsB2%ywGNWId#-n(zxp=ZDt3c*}oB@OJwYb*9fHIxQW@~xIm~k zA;Y%c@Iyhk5)1H$jI2yE48oZSm9<>Y-Sh~-(u*4d9Q_b3oKqhJv8Gy@%VV1t6wHd~ zI>8EEPNPE1h_bhan3~A^zHAH69I6jmBMB_BB>gEkqUZ-0+@D}U8?L%XQ^SaFk&Z|d zPuuwiFwvlL@B-(%L4v@ASSX*-T+gC#iKz*PS9w8ZHl_Ui3@Pz!eR}FfzkJpG%lm1haswJbju_w~h$hjR`x6P?3QUL_BSvJ1?Mpd3{k<7#kcLPJ^Sn`1(J0Inm~O z5fsGJT2(bZMT#PFC?;CYe<;u4JXF4;Q9lt?F3~L1E5_!$sH12>3ld8U2nT*ZjmKNf zuQZTyEG_y1*2&=||HBq~@zVZNxS%jM$WazIv{0e~Nn3ra==&I56%C%OB2(nK>Rh=z zwYh>-LZg|eH9Z^9`G>Wk(J#x0#c~YcK~7aoL1Lq#$*YAe+19fxF;LXXakT(LvmovC z)aFx8Zbd@I44@-CI3<~mTBVcD|6Dl486}Z5rK8Osw_2&Nz(GOiha#mF zuY|u&tJwg(-6xxBR8rFEusG`p^ zJUT(b7pII3WqMeo@W9)8(*=Z9^Z*m=P_R2{JR>qI<=ZUd7>Kf66OiH6{*prrSVkvx zwutnRH6?M4Vz=f$EsY3s|Gj!pV&@ z0Cifm01`8E#lG-BgRt7i|B4V>K^%;|GQBu1^hj7EqNcB_1*D2Rv$a(Ch28#%h$-2M zKsZgqc%4p_+s8o=DW%!oMWA_#qMEW6aI+?t{Gc5LiR0zkN%UKgh_Kt-Uc1PopoLyu zaW<;4->);wrva;MEYV=Siaq1rRrKDzK(P=VPpGWm_}H?pE!2J*p?djRPYN37@f@j@ z9>SYVl5mNLFrQigS8wo~oA_V#NL-QQU6>sfJwmPo{*wi^BD?&l2ae$Dam0R8E~VSv z#!I&i=3wSNeg&&?>fK+N0Ep zK(fA5)Lr5}yhM;$?VBWHwa73LI3S*k~Lg&{4gRlL*P$inVPkx3M7O;}ix3;9HF1 zSQFj-gJMNg*WIJbRKkmWK~9obFn(<=VGXA-v1gaH<%n4m*|oPx^PE!pKy98!@k~L= zP10g^Dem0CjuBsAT2zskAV5YEjovhFaSS^CAK24oQ*zUl-AT#yEj_sjXB(f4WShd_ zlgQxYPWG)&{~jin5#^EPl-fhxPrYUc2`q#_lDX1n0Y*s*M%+T>7WEWp_DN&@p;$KV zleU~dIVI*j&bJ}LRcFaCsEUZu#YN}i#D5mBbYtZNYswYNIOp8d(L*W=*oA~yXB}i4 zXDf$Safu8xj?@b1mt{?Oeu%{)>R_{w1Z~-yO)R?uQ)}9#d$n0{j1A80+FEvM6B!T@ zPB`m5<{Uoc6td>e6q*5D#q}B`{?MFI+64?C z0DOIcvN|CcG6C&d{WQLSIT= z%Ql_;|MHQMRhwZnKR((z-w^F()?T9xi`Ows)cz+$F&n7bmckNTN%T^`a*o?^B8v!+ zKwFJlAOHeLj0k8XA}Lq2(aL^Ua9P|8SV}fOTQA+a4!%*MnmQDnb|ol+vUwd~5prz3 ziJZP1i(jH)zy5AivkmvAO?tHnsU6Z-z=f`QFp{-xsc-d>!43C?s?p zAzgu-#LfxrzC0#+sid)OHJciKcm)j5Z3_Sd#kdLSk+zmb+0?vpzkU z!J^7h#>L)@jO^3IoCWdbY9kSsDPgs7W6Gr1R`KWwF%I#=QYuRKj&Ydw7|rt-VF4W;Y80DwB9c8>s9;wmo6cvE8l}w08xYT13n<)+Ggph zIZc6yh#&*Zsl#1Qo*B(dbNL?S+||GAX*iIWD?QgkfU5Jk$Rs8Vo`;JKhf8rjHF84VcJxSOD9(x!DYr>UKi@L$X$P%Yv7!DxCMb&ojf59bP(5mAiG-FXMH<3;2G!%1%>rOf0i`20Nxq^Low>KV(`o7G&vI|7e9)z=a2R z01RN#m;j3hAb?xAA1(g}mvB*jK$vl=BQihNwWGLnPNL#1$oLp#fapYqOs9Va|5Zq+ zaN)vt0ta?_$8aJ;hYBISLr8I>!e;{qKBMT7;5&&NPohNmtkW}-DPO{jDRU;xnl^9Z zd|A#y1wpka?))r*P5&@v&`yB_B8UZK zJQH1Ms9hFdh$4#S-A5*!HX>^QuH;*ZtA#e-XdI38(Re61^w2I zY%*lfKp7eIT}d^z>1JjcnN}BRCEm0XU*OadAwS;`5WyV;|J+gwqcvIhmqyt2gUc|S zNOV+Ex~cf5pIv?!6=)Lm80u;q^_J3Tp`lgkLOMni6Ogg?=W0bp;(C^EHO|_JlE=aM zm7c>ATWmsD_LNk5()na%vPw;6>`WvoCv0s`C}N&vXF97CR#V9*?M4FKC?|&p$tR|l zH;T&{pL5{_>Zdtb(8nIPSRjrd-ssZ@0T`U~&n>Ymg> z9M^rHlDHy{He&1fJuAvE=D~HFa>k+%{;+(Tg5hvu6 zbI+zU9hO3A8ka^pb*r<-N{>v*!wDT_-9%_kv@BTa|1!N&x&PGH+PUzlOcTY*#S8Dd ze~!J|6Fd;`fQCQ)V*vqoR2LznQI%%_WEMOY&O%Fp*r~@9k5=Q%eVUpw$_vT3qFt>G zK4WSe!(=!~k;lfH%k0`Gb4v`}w^p7_)hOSbOrxH9oIEG=7g1KLw=L8U7mXKDPpdAj zwHwtJwZl^XeiH2AeZFMc#GMS4ujmpaBTmO7=bzDv2R>7aJ3LUCb%nmNNA`+|9pF;f zVQE1)-r%CpPJ$d?{fmMdPG`!tR!rZRj-O_n{Hxnfc}go^ZesRxS@TdrK;XEfI9?iE zQ*8DS2Iek;dO9AmlBOQqIjj~2D+*qgB0C8V|I23N!JY4dcctN7t$6N%oUV)_tfNy6c zTA%tX5D0$hqluOIpu2cRz0SmMeqO9!;qC)Lj=ZK-UNneHT7@H6jge~vjL0n~qNA3W zPK>-c(Q<5*qe((DXHiqrLVlJwRgMoS|9B%`G*u_1pir}vhXhNwKnONpb(4Sm0TBy% zSCZQ(ARi+ewi*k%b}hl$!-GK(HdRPE!uUP&jZQ6cj41Vy)cIY&bQ` zQJ>mI474}a${no2&}kco4N`$ugNJ-vXC`3Dh&#JqU)GAvMj%*>5j=+|E%iXnpn9QR%A8#DMf?`hzBHwi2CfPhNPw;elX~7 zSjpil@j^vQdiARZF$qh^x~hca27jUX5^D~6ObBW;x9^MCoqT27;{pexlk?wkv}8G) zUN*WkIgfK-7Zk-wwJb+LiiQlk-Ja&`j7%!$QD2jiWl6S5;7yKrnMT>WsZKW)m5M=x zNB{!l6RI8UnPuW8h*-oT91S}bOIb^;*t~W#yOgbWEXq0`VdSC4Y25TQ!q)!*sU(8^A1NG120*RDx@Ng#!H)o+z> zNpMWDQHcfHxZWf!w-D`U|6#JeCXVkh{HSIf4SZnC@M?dHtE7Tw1kZVGb+!-T@~U>E zRsf>dE5PhiRK)CIf~KJ=-g8W-Ssi2 zP*sv7^85s{P^+m*()WsWiDRR!Tnv#9wV;=%6SdlgL(w&5ZAN=qKRoIud>gmSJ z0jlDHO>-Z$UQ;8lAx-{37OPWP4!A;coSNOTBZ!#smKrtE$e4*74}Xcz(vICq5u!mt ztB=y<(G*c!n%-%P#L`JBCUtruwQz2%u{de3vEKW<2geu56u}1=?17IFC0X93;pg@Q zfm(E!M}w}sGG11u|K<8wk<7vbNpHi0BoUhVX|yw;|GDs znKQ1)4Q`=^(_png^h;34SEPCHA22_8O57dp=~id3l*W+?oucVtwb9Zr;dG}x%@KP5 zzzXunhZOS-Z~j@W7P#n#KBn9sY{MGM0^w1YXDwgEiIu`eHJmOH2f49w*x1oN$wfKZ zX0EM~jc(tsNIsmdAYFVo;z?4SX)f)fbgDX)iHQXd7E|o-q%z!mI_iFYlX5niae41^ zchUWn5dUw-06{>$zwOR(?ikM{eJ=#*D#*gpL*;b3o62gA(T6w6aS&@RH1@cCjq|=J zxgI?n;$>^!g%$2K-2bGAmlRdWuvg>OFOh1NGWmEJjg&6r+zIdM_WNantW^RNCDFB# z5?8Pk2*35Db?ighf3_6)nVjDyB22%Uh0fy2Xx}M`WP8tv)#KC={ciI;IsmpYdU`Nj zt(+cI5g73N1#0<2T)^HJeUV3?ok1a-Le&L+2p6tRg#OT4TDXSTVFrH8heu`HX}Fbe z(UO0Z#E|%k%&$VoYH{a=S3mX?L#$~02u&a6gFLx5g3*6OE|FFq(Pv>%pUMPPHfyB z8h#AO_#W%nlK;q@U2f&ijx>%~08dY|)kr`r+h3+noUmbIG7h)mRSYO*vTJ z`*jx`p#!VgUF-A!wAel7NVYtaAB)yi$7GIwp7v< zddazT-5Ks4UGxuG5F1Cl;SH75oM2Hx>>5MenQ7#qr;UkuorSNo#GP1F8>OHi1|6<^ z6nmW*CK{roq>a!Ng-T_`Dhb7N#2EJ3#r!D^xlm&{7K;{cn$a-?(%ploF@p#c0zH)C zR-IzN)dj+!meuu)z!98Xl#UH~-7HpERQN_wrHwNMp9cD(!nwrUan)Q2Bdq8lh%D2W zu>`xx75^dHnHn|WTY!c+LK8w**1&M;}1I--j6VczUH8*;QoktAVFZX(qT#RoYDU!lZbAdMngKrC3D z*Mvm3mC}0Q66ZA#in$O*h6Q4RkS5Na$%VxeZsI&n1?V9iJ={Zoz=8;1f<1^@J*rAv zS)(3J-73OHjhW(dL8a}{q8g^f#oUD9bdfG<#+Vso0vVq#(u&rlRiBLD$ndV_x06J71t3}FbmJXc|lvZV+#Z+Z&Fqh-Rh-?bq zLBYuIL5!M-B`nPfcxc868kZj$7s;XJMt&L11j(Esr_RL2az>V5{@Djk8~Uk(!~8q(2yx+lVDmL?W9wfWkMj`J6Ps^bixR9LOTu}ryLh;hL-6n$e8pv=Un$^z; zf{EtjigBezo5UG!#GS6ZjF?Jea?O>TZpkdwX<)8cXqIH832F{-K6ctE*cfC&&o0B%t!${SroT`f@EfbHXgZX|z#8NC|a zG$P?~V$RudY2ldHnEsv|YT&TeB1VYC#}SjC)I>%4s@=ihnwpii=10RFYySbN%3(>` zvpA7|WFJoq4U1}1P0%F8=7)Hitat7xTpSP5al<=Q10>{wA+&%YV1g#F#AU84e7d6Q zjix>XCYCYWlu{`J-iqeY#B6-#$ME86M3h%XX_sE2S|rzY*lN{UWzr@W3VPU;+Jup; zOcADRqkdgd=G4w+V^UPbPKiiqb*x~y9-~T|p^8gKj_uMKOI#t{Har6hh=4rUg9$*w zI?w~8s_WqJEE!^KY5oOagdu&#Z3~4Asru(?l7{H)D`8HIRwgLg+1fP;2gN}}+f_tx zwaLS24sxj=OQ?`WjmH6b?To_8h}|vOx{n^kAAKH@LR@s ziP(BvhF+S3!AR78?gMh=sPai~Sgpy3*VUq_nnL5UjmOolDfB_a6JUoEC;|I2Z%im( zoaE7dfvDoRPP150@IH)nT$d4YFD`cHo+YoIfae0|$BphRMTi~|zyc_kK;eo&D%e9x zxP|B*DZGu1pIVzBP~EDPkJQ}N`AVhblEwpNuJx2yFPh=}>Qa9ghg;Cab5+EHJ~06? zPBO+>L-1;^rqQn~*VY1y7XX2V5CCE%f?$g9y0#NvJs9j59sjb>R4ZX$wpB-5*vtp( zmE9sQ9hWiNEw6rYhCN(B6ZnEW&_fWMK|a_37y#g1XjtO*XVnPYq)}Zsyws~qu4t6% zRt7P@YON+`-q1R&tBwW*&d8R2AoC!sML}^xxJ_URH4MTX=b>8j%tqzOzSJy3TkG`L#Oe;fZ1>CyJNb}AfSBn;77EYAI z6JU#Jxf-8E^BpI%n{1dtfLdo(z&_jqJ;*~(BQ80YGwY;}*rsj9|^L$V<)v5MQd#xEiS>H5}%qH-YOhW}d}S2L<$xCCMWWf8L>_EOx!eGz~y z1cJO`2_O)_E>yH$^9L$kcWgYc-1?62rpZsVXk8z64dJn13Bvp>Ad$iJWm&hl?A`!+ zHs1Plxe+tqyyA{x09*gNI+@7SQY&eofap%0=%vndWHU{TUs{LAX1U zi+>9!%3feSFT`!2?^H**+Zmf2_A@U5YpTDXZ+TdO`{DC3xGa0ct>Q$X{7a%#g&%CV z+h~X)aKR1`K%J**wTGz40On0#8~-?Ij{uzwRQwUOKs%3-#oIW1wOg+Gp5@li?Y2Kr z*71y4HG8)PPE*r!2NPeXOWxGF7FB!hq}Q*E1Y1X>vH&9+6_=n!T(RdYvFBKhAkKt1 zBnBw;&`aUEgwz5cL_ph6;!}UT!5kLR+J&D=y!QEdL5=r%7i;j+6dCi*=aApW4|_sF zPmYTG(grzb6AljDwyaTZYJ(0Zb2@-8G)g@6+3^;Auwmm^#1UV)t6IdSTh6Z>pH`2@ zj9g^sVzF5tpZ~ZnP1siiU_dKGKm>SWEv$L>BuEczNExa9r&yDGiJ?df2%du%U6-HO zbE}2?Zd0(_>EF4r6W#6;7ynqwy{81u!=-i8f|>4bBsG(J%p3Mq%6uFi_{|@F<7}O9 zITyXtiqBuaTaCynx6v|EIJCt`SI5M1V?YEvKr86OV0^ew_=7Fn*B&HFmi({7kH#Us z9UeW+2VZP@>-F>iLL!; zpbDSKoO zUE98c-3pc}Q5+uv5C1Ggz|cw`KYft{RhZm|00ux2_YUq9IO|B!qe(A3DzKczn?-3> zun>{v%8RvXF-#5HC~KA`b(i+-(1M_Hy*~nfop5hx-^-adcm5pu!J{fC-mFestJ2uA z4Gz`GeR~C-UBw4|*|KKKsLC^@WiMTJ+N%iVBcJJ2Wzd-_oqpB56fJ=c`v)$c-m*)n z&=Bz%;wF ze2_Q`YQe6z|M(REyv(N?i;{gWBSW&Sy2Iu{V_jSrNyQV%=6qgN&8vV_rG> zve#d;RsRSy)C36*SYnSYbK{(K-r3djbVW$zX?aWv=P^S*S|~-8VvSASiduD}s4@+* zsBahQY1y{m4G_PVK10bVv3By7jI;cd$!xn0m8ijG`;3$?zvfakuuA*#$}fTx_LKmJ zON_+=0`|ZFgHvH1ukQPJX4X1gKQ5Z6Scw{wp_AoIxwN3GEcWJ?7jhY)C%;>q^wLc? zTDPCIE!p#dFoxVr_(o8YLey z*Z+J*&>7;t97hV^LWConyiBfr2~=RKwlp1WW#mh%3L4AEG?bJ?*D32`-^37NVa+iZ?2zyG=p4~d32n%?MApIbpKENRe z$LI)VkOIvI7dRtnQAAb>iC=4QqL5A1jB5HDjq0rAKaqHGRv)_+qnfC!GM-U-M0z5& zTtcMiO;C+u@=i@KIH=sErzmyY2niWtHHG-GgfQAsp-$+nIdbi5L^B@CMpC=v5$1*i zE132!qy+I5M2IB<$1WTb0VMLSY-p5Gm+04}l*y)kO!{Py1Y)^HGO~ZhGN5RFMgOgX z*l3KD6J>U$wo6}1L}gFppX^HL6j<`Af*s4Fc0$=S-3c-yZo-K06hh5F4$B~4tI15( zLY6$SWsqds+8RZpr-MoAU@n@Zr7i}^gJjYhf*@Q3X33HD{L(pH+Ei_LbxKr%he*j{ zrKWPZD_Hh#X7&8yLuvt=VGg8#Sj6XndO1;xjt+$Hvn4TM@;Y#3bah7ij!CNWuC+L= zC}UwFC^?!c5xNwb1Zk0j@T8|djcq>!Vi7pCLNzw-4{T(U%1~Vd6m{+}aXJK;L2hx2 za)6I95`9Wmq;%2cbm>TBn&JU>WDv=5(R&458Cank!z0~{1PC4N?4e_X{LoV3Yxy6oUa^jC8s%PmR15Vr7{R% z>y%52E;OuL@If5&jAF2~6QQ>5&x+~Ol&qM>BOc|7Wt##CS&PKERi5=B3jK(aPTN~& zy)}*Ek}K*AA}rWx@PQ&lE?AawmT<)@lpHZE7c=%M!ya#?j~q}?yQZJT-n2gil2_B1 za}@o^%|(~Jl-(i($?XZ`76gbdJ*A2fVO7+(W3}InR!g}Ca>t(uxnf6XTiclhCpaT9 z>ux)jU$)`b!EFR?aLM^UA{qCibHyD-%LGS)cr>ulH4|~9$uW|?RR5@VBAz3=2F{!^ zmTOya)CMQpGjv_HCppydg>o#O!3ebz;xLtJJyM{#xGlmisT|F`H`=)7r6ib})op=U zU>O+)Ns{x(f+w;rCKWKr62WkoZ9LqoiVK4LJ7yeRtIU5!RzFiJ@kX*6K`(ERY}j0E z2U|pvtE>ga(EZv~bUIQ8mY0TpxeKN)rrDrK3U6dk0uSgbnxuXP$z+z(kAjm+E~XA8 zTSbVLC2VHr*p|rzNp(gzr&~HJ?jXF?bV6b_YXVPLBz~>1Y2<3xyUy;-k$s=H)&|p# z@G--Pis@;h7dsK&lSMm4OHP+($atYJyuz(wFE!*Mn zIz91Dh^U3eI)qwa)uR@pM7Ye65cPI;?c?)mO6*2jx!6k7AaL37GO1U zUHfbo<6K+IN0BYg5stScGZ9-_*8V3#vkA|L_UjI@mqWXqw_ooqP1_KpbT1+N9||is zD{CUwSP$NJt+K6fO#3+~>D*Tk>9^5iY7i$)KEFrzt^dF!%Y&E>TjG=ynX6dD67i@;Jua>6`Z)F}4yp<(J8k^DUAR7NPHTm_!;~ApU~`Fzo=IaD z2&?;O)@iq)X~$27@I3K~=eO%}AM1$lyy)i6^uN)ld3$!>s!ZRasSO%egKv}h8%{29 zsh;``6D`sm;W|U;B;2Kpb0v_CkLrVK$RH;k3vK@4UPz8(pvA@Lj>jlzK_X=Nnq#jP z#qY?-&=5lK$jVqY3i3wG=dNt~MyS|!E+PiZW`b_4Mo=O|2aT$#BO1@Q)K9rgj{R;; zSE8<~Dg=biMd}Xe&5$ci;^z5a?+71hDunCesQ+W)SdZhZMz5;IECxk|X`V z+%V+ZkPr8;Ceq?BB!1#8UT@a0j(=o}zc|nWG$#ai?dOC8^X#Y70*ImN5a$dIAtDNm zXfOlYZzFiHoKmmX?4|T<>Gb9=!!(Mopb!H!0>s43Dj3k(PD22D2iolD#ea_fvrsupL<1%)O?b};H5%C3H}{%CJIy280|FA2{M{Sx9^v~3B+Eu(Zp@U|m0B%&7y@i~r!1vSED zQUfCHC)XH`BXB7hD{mQ}t{Gp+5Ph!3OpnbN(f(q?_0S@2$na_;^6Sha_;?SGBJEB1 z2#~Nb#_VM)Iu6bH${2NUjS5C><`HeqEht)K8>i~rx-rSz@4tAZ1B*i6^yXFw^3;rl zX7o_#QmsjD#no(b#1OLGs&XRPiWK4F8MQ?87_oJ3u*1r50p*wirLN@7!> zWZv8`EH~%gwk)>>$RPqD5F#QVZU5$#5QqCV}5j7{`Na&DQWT_k`1EN6lJT=0vs7S8T6E_s*jK*v= zS+fvx&?dXH&6@Gus8RkJ5$ihYGw#DJgU1GkN8=vEyAG}rx#J1%GE6QGP51?znp3i# zbNHl1v#67YBF!hVqU=Nh-DEHjXHs^EBQ?TPC_u9!Ppt%}kJao`W>SqG_yM9;L@VXf zLmL7ZRV^t=2P%GYKl^h=DgVpOnzZT0vS@_xDnANPRLC9|bT<_f;w18da1%nQEj|`h zkMfKr!f`K^lNL?#FZodpm1R3k?8A8I_`=OFu2eTaLKbCFB+0D6tZbjOr83KENQnjL z^bik^CH#`^M~e|Gk5N6{Fg}xXKbf-q0CWhSlt9Z;N;?eeB!oN^l;R$AA?Q+DLgP(P z;v8?nF71R)Fw|SlLS7h)CfT%2WorLM&pLv#QgqcbmZ~k@RLm%_?`Ez~4Q?{85=Ntg z%Wk7cSFoN0NTT|IAN*k;?kDLei}1LE7%74w7f2Z;mEdafKMU?s_mDp|)gcXZ^`MJ- zh)v?Olps)yYYr(bF#mB?VeNyWf`jm*Dcl65Ol&m>(I*G-iFox3jcSJww#WLcIoJ;R zd?x!a0yu_)HLwplwpBP(F!EL{{3L@~kwqZ7l^NA=Jsa$Enw4k16pATWs}~hAS#=&RTUs(d0Ox%ekCMTKEww)CO4NZY||h zDGal;#tmNg=qC_|79&h`AhQMk6p9S&AotCCkjQ_wwF3O%V`T5T`UuD=Gxtc<$F3q>V60xzl!d*Q_OzJZ4sDXY5alYRC(Sr|KSEd*q=ygbcDqQH z6xi`{E0+|8ky-7D3%DWJGbCD#h;0pvSCdl9tc4NI>H79>F?d~*vwEdMeLWbC5yFJ{ zgN26nL9aJ^XSpZ4;@bXIjAt_V>H<1n#W0P~_)OF<%q52vKKvl%&WnZ1UFiArJFD-6}bgWKaXFvbk|^q+d}O7M`I z(G!dmijkLCA>4C-U1KBU*6}LZqP+Q=VOV~FR0OTectJ=-NBM#+_n&8&bi3|OKiEvd zkqh5A676J`?}IGz=%fzxB1v}7gx2RmvWMod+S-9Zx>{k1zrdtdZ}9L zXice^4vqoqTi4kAk{ffu`>(W#k?=%9=H@-t;Sb8~!Z3vGKDTOB&Il>QieW}k&M*yK zyC5y17x!(okFl&7LZhoWG7fc;kMYHABem_jzF!!e$rWk5ct;ZFxYL#9l>c{cPbjHW zd1nm*#pqHu-gy#v8b}8Bmf_z+rPl0PD8wGI2diF)AizU zPeJ0n^pj6{#(-K}NT0GJNLwP(d%X_=9Rh*Ld}OOI`I0S}#}8eiw~#3D%JZgVEY#Pf zFIBm}t88=_jlFTNFWs-rt{|FixbctEN9W2{&wB?7uH)4U1!5c|fEKs`e?IR*J{)aI zl)bdBy~E6KstO;o>*|FBqu?$y|cyX72Y)NgyPxj3bDgUUTz7AAle2LGCaJOjiLHXe%# zc}c+VtgX}Au@7QE17l2jRdXx-QlTE(G?qcdF@D(tMLSyX0g2i zTZlmZ_#zIRL3;_*xAZ-L7sMJ|{my;4Ej501r9}UDFqWVJ79k(V{1!7#(a^kKkx!Uw zI6CNG` zyxamQ<;|sjA}~6WQD)fBTJl0V-)wi;i{543+Db4U@&7Y|v;VLxwZbGl=UQOxzlTu5 zJ#|MO+uKJKi$VVE-b9u~EP6Iw)ZgB=MR642!tP4+9k{^|0zeAL-omIG#Ni^lZ=EoD z{z6KeLC1sFGhZVV>RRnLm!iKQ*w)4;kI-AGv>6`RBR-i+N6wwU^23`^^BC*vG94q6 zJ32!=s!`J2{wQwqm2C&V6!V=?JyV4H&OjZ74+9|Hk?B;Spur0N5+YQX@L$1(1|dFd zNKxTJh6x3dA@l6n#*Z7{y@P~e5T=g@Q>r{DQRBu=FJX4_Namf)3NS%_Bv|2P1)3k9 ztsF|U=uetBYZ_h3w5ijlP@_tnYOtK3loqHiwR({2(Eo6OEC?#MRghS+RRvvzYW1vE zs1hf>oh$VnB}KLJ=B3NGuiw9K;{sM$(PG7j4u^IWoYLY!fjHkGPCSvaM2MI%LL}2u zXa$#@Br6WN)T8I2q*bOih?Mm}*PIKI_Q?{amz+~J$k2mRZv5G)d-vqo!;2puHS58#;n5pZpH^%*e@5G~FKBi?Q>*sL zGiFSlu2{XbsMX(q1Qy7WW8?KUS!PD@huBL0zyuzGab2bvMZEpvQBO)G^%8gona0#c zr>WFhN*bB?nn5s~CgNw?{q)+4nce1_TnqN*5dVe^b(E7%p6x@1G|rSb6-7S|L}Exc zeHc=3JiR2HPvgbtAx8#gspVhu<=2;h>(x?_d}zg|RxM?ciJ+No73knaTT*3#m<9zH z=bd;Sxn_bG=C}}RM#4GNo(O^X=VXWqs$)e2ktiWhwLM1SQW_cRTA(t<79y4`a+;K9 zv=t;Gjhf+jsHt!+nrfd@CMQrg!FX2bQxyT%-9R!`dQ+~!36)clL5(L;LBMI6=dsA9 zBnX{jt~Hof&DMubKmD-Do|=UfRZf6j>PKj0$u=d{LA6*=X1M4c`=)q(W*C>1i9)zr zyx`WWsBeqf#8FNn{TOLMHDfbm~D*^6${=o zZ_LBVaH}!JBd||$6jG&?PFa&iJQa)SV8vpW?#x<3TabC#p@-j`Wc8zuK1AV{rdnf> zg`1#V)pss|>t;yvP!`}^fmusGZFPT#G2O0$q6WGyp$)Y-(8Ls1wxh(g>TBhXB|67c z!9<08scIcshj524lGb9wtJ+w0ybAKjs7@wYGEXHi%3TzXwk}x{uPf`V6F~uAsrX>e z0Bh;hocks9)K!1U?O5vhL#;|=j)~uV6_=X&mQ8cb`RYWAn4yJx%jx2GN$ z;ZA~C?6xEv|BIHR?K4Y(L157x1FM*W%{(g1ItY2+0S6YU$x}mk{Ko6n_e$W)B-Ps0XohwJw zCOE$k3SGGjZogvy1yB3Q9EMZ@`9DPrZ`Vh#=0AbQe+Ssw$Of_ ziIMi8(~0gO?RzEEk=YV*onfV<6UyiYkcjdohrHuC&DkE4UUQRTl}uAEBTxGDhrupN zWm!oBpwcSvxzAPLb+7Z_gT_S0ZfR{^4+J0Z(Dk2icmo}0@}eH8qCvo9ZHT{vBfab; zBbj;cM%~kqbR6;w(17e;qyLE^ieAVe6&dS9Hk6RvIP^p*_V9Q=q>w042*@c#DksQr z#&s~&LAhvzCMhCOie^Q!Th*;sa{p09O~ znR%j`>HHx<3;Ob!gAyd4IOt4mB8^@fG9=l|*0;VTAhheS0UEh1F_T0#9w?X%K!Ar_s)5$sBJ4Mq{1|X&&P2@AV8Jl-%y3Tz#T?yS1nc+%%Msa z%1)^l)t5dwv#Zy%ReD=Z?qLrhhWNeSl3fL5j{8cSR^PPcK zI!9>UNrL{+2S09R&AevRv3wC?OTYQRor)?-jpPkE@5RrqW&~g=Q_{cI{c*Q%=3h87dvATj2_NDoY03Qh^JVADd|?wVe9-NB@(!LhGcl<3syB}SugVS zmmVeTQkKOns~q-#h5g6tMR-Q(vkXYs9 zEH`paoCtJ@&KW3qu!>9f^3P_0Eem+J;+Btk)UV{_s<&>sS###;t}YJp`sRpJt%8ld zo<+S>E(%l`X>n2H8NJrR*0l+q+a^QINz*`>47nxbesZH&MZ%SEjVx{;7lPFk zNr;9a0dQ5JWF4^1L|8J3=$(a(yU)4N#!cbYS)JPJCSl*O^r@F#A)8@-srPr7#R%YMARXaLe zO1ri*#s5atly;yD?AEHa2js$*Xz& z>(NeF)1&O|A4$d<4t+A**(6p^x=h7i6C`55=1hqRmyi=lM>3_BZgRGGYH}mXP9T-` zjGsc%3ZOjND-mgq@5Q@r@_60mHo4CIl5?#bro)#eCJC75L&E;3XYD>Fyx(dt%TPF62mfM+KwBkLC_`ExQk4itfyt0XKkyOgaNRK=~ zCnvqS*N~EsK1k@NE_DXuFvJpKXi$X?LMLk#C2k-0t^xi6GTkrY>0pKx5IMHM--!zG%GiA7P4c3bbTU|eG^!M9@c&Q6?EUXfB#{# z6Xcg)LZx{1f^cYub6AJMNHcjDhna?Ol#w~T2ZQDmd6YA64iOyHV-yD_djD3&gHgq0 z4&yMW#}NZ(GM|wgTnBD@7(V(ZT20t-WAhNy0V8Y{RTAQf#71(PQHofSihc1YGl6XT z6Cjm!9ooOW;taQ^AObv~e4f0cJKrL$@bt zYy}iA_$vw{d?I&Z`?El~Sda%{E&*bTS>lv~2WX1ch7Gxi^rdbjXlWJsl^R)n-e-(8 zDOzeXaRygmYFR5NL2vB_FWNX1inwpp({Ht5Lk;tilE_H`Qytt?mjC%xDw9T$M)5>1 zC})4-lPOYSWmg>p^@jr0Y6C)dxD$PCG>~I5V|L<{G_x!)W|e%>ki3YIB1o1#my=*w zk%Xxn7XoyH^nG#R9VQl-hX+B0AsTVkaf?V%Na=QRX=S4qmkPKdTy`?{hg>yyaLAZs z0M%Km$$0E27m7(@y7@^nxkQ;VZzxe99idm}vY9-SkSifTE_ZErvWB4PEi56L4;d9U z2c3y`k(T9Msd;XTMP_Oieg;QK4JI0uhM(r=J1{|mjpS=GlT=KVh_4l#@RAzBCOI+5 zD@r+z`pHd}CO+HbpZ*z*qBe+$5h~09GAp8D;?{orBS27(OhjC<~rNt*2iW9g28Pvy48COxQ!7>D*Hc)ysqq8lTIiBAMqcyXTdE$XG zN}BL#Q|&o|UKwQgF*RVcf<2mKV)=DjX{dxG8d(KzN7|Z&p^Rgv5H5(IQFp0Qx10C| zX$9kEQnW8Lh$a7Iqd13N#*?8B5rsdgaj=ybwwGGksT3|Ddw-EQv zMM*PLkk?fZzxtF_6O|^2r_DOAiE5e|lyFoLhWx{@^pSkai}# zry*>>A|U~@PW`brIS7k2MrasIE*FTfar$@;n^N&vtXvwj$2b)gRGDD19%K=V9Q&+& zdaRGil|C!7l0vd3i=R%Tt?lw~Q`NHW_nUzdm;dqec^&eg*@JcZ^cgo{q3`Op{|QJt zR&P1Lx2kUr-H<~x9)3Y5|yBf zt63YUF2}2B%csyfkqS3o@QRF-fmL7lZggw6Na}?Wp+0D}IhM+VO}av&!GxD2#)O@mbxgod^MgSNh!wI+wBSqYIL z+fC`nv(PF*(#F07X&(hSsPJ36l=qxN2E6qvv7F^!;+Tnb8;HHJ9r*_-cy+#(%1Pv! zl2gSKaATa%ixi_uj$d1j4veFGA&N)q8UK=tFsG6vB=?9QLB6W!Dll9#SlcFpdW&(Y zqPIx9K_YwRj@CR=Z5qG(l1+z_&}ka|^Es+&lBubpa*3 z`*k72mVe)Ry!ocLAtO(f)0aAgbrFiQ4oYt!%%h0H#Xvh4I2(-^r>}%Uoxe9Z^<=!^ zamM(FCr&AlVxqrA!I|#sd;u(EIcB=JJHT%{#X(wxXm}Ong0+eeHBmes3A?KhK{fRu zz`%H)Rot_D8p4~_cG!ACTU=>T5*nu^8@$w}Plv(lC#8aGxzIa2GGml1a)egQhKOof zSRBn?zi2q;cQtSbhb8^E31i*mR7MpxcJ{&xcJitMEx?!t} zO!10wDhNQ3#3$#!n7ONYu`G)$%5SV_f8>?ATh8qGf~y?7a=Z{eNU!VZPA{WxQ97sw z63Do%Tww_XFf1Yx8ApwJtEo<9jghi2F08>YnX_BiV9+DchAKqm)VTNRsx3jUCaOs) zvt(aJt4%?7>X#DJ{1gE@7ys&E)EsEY@5?N7dRa1BAu^5B2+X$nY@_43Q3nwVu`s)W z9J@En#Ak$}I(*cLtvg=IyI1@@Xt|#TtY0778emN&Dk6M}LuGLZ$TT~+(3`S;=$Hb= zQW2RWpH|bji^87hlZY8YZ!N12eSfivxl-XH7qy)#v5KBY6b(Hd&>}(eT#ySyE<2iX zJ)EaGg@=%Q(|WqtT5J?XY(W;#50`wN?2!;H+R5WGhMJ9ilFXV)4a|`Y-Xtkb|3und ztl#$)BMXKQsZA7f$=c=`RVU&TC+?C^E>S3tfglDwuD3i@uHvA@COOI#+gt=) zrRuhjTwD?kA*Q%#a1vp|V-OYt6cFjW-BAE<+`6=Frt6CD$H%BMvC=M0_1S zzI3A%(Jgr0{<*eO=`M{;3OB#RYP;rs?)w?u;?nNS(hs?HGvIN)22J2~lh6r6e=E}&zw*j4VOtyz$GBasHg1O& zZV_%Q^8Y3|N%jVlS?+MWwAafWT9#MqxYF!-K5E2*=Tm{=ou27JE++EK6yUB$ChPMy zJ=2ZegflI~XsGlDp$`JIzMLGV;)Usgx-`m)IB;N!-u+HLS>MNQ*ju=rkoe@~OML&iS4H>rY`GI*bdrAlU{Yo(duF08ycw zg<2I19uy>~+(Cv22@WLKP~t?26)j%Gm{H?KjuiiW{MS(=Ly#X2mNaRwGfTO?EUwxMC_0=p{mn?dfE%CHDvwvKC z*r`FjMB7Jx|2L|9zXqAIZ~t#7P{c|rfCcQo3Jxo4Hk(pgi#y`5JIJTGjM6DEhW~ad ziy^%J8!IoxB*c)zry6?=u(6)WDk-fJ1It7EwCd?Eslp4eqRgTbND$>P!;PWLEW;=> z27eT6CDbaKZ7uIyo32O&72IebgMO=^AAMrEZ8#tg@+~=>maEZ9EJf?l0viPp^GY+% z9BsTG&66!I1DT`^HuUNvPtNPuB(J1*OjNH$iekKLK86Iv&p)ariz+0tsOrQuqZr+3 zs`O4A(oO_JO7KV|se=q8vSj=8E%=7o6R{Qj+q1$3=?w9i6sO{A#HZ2&aX=Q=E7jG! z{FG?N=d9F>Oe<+b$hXk6L^D}C4RoneJiDVVHZ>O%47VIB@CTRPY(&#Iv;VR@j#z_s z?8vuT2I5g!b=Pf{*=S#46HX@8r1#G8GHvanvP$i<)>Zv2O4SYh>u}Kb02_)?TmKXB z8Ll*3^sxUnZ8xKtjBJ-;>9mr|*2DkQZ$oMUFQ#)xw99x(d=l z1rB%E&LHCU%gqw_D<~o}xje}@avK6yBw;U0 zmuRt*otDAV&aSl1j5C%jyYn{1*Cz2OcIuryy}izsZVt30#o8;3LZ=blC4EFp1ghu&<+fosXgvVUK;UCSL` z_B)IX1`22U?$+Dnt{M_~VEXpuSj3s1G%2R)_f?smM>*tFnO zF5)pHB2WU7aY_QN3^F8n)hd&iL^whozGj6j1;`dDDGOaA_a}puc zsYXVp`?wEgje6J>_4Ad7xoTDAi_=BC2f6-PwD7L2cu8h`kq|;8=k>7~RS|FSbmr7!s z8{VjpZULHbFu6!lie!2|6p|2!Si%&3Bq)vXo#R@WAV4t@fAslD9HaO*^OYrwgX^OJ z&&RJvwFrSdVdD=E)V;+`vLlD8-1%Yxp{B6WV|76$0l`uhI!@<~Zw%O{7?rC)-4Y?6 z92c1sBATK7?1MepVJX$6nrEHTl#z_2bS~MDN?yr$AK{X>l+!xWpbKozlc$;5h8<;6 zuO_Nor3rhtC$uz@h-|XkN7U0O^MDL<47?AlIK&mJeE&s2fir1?vJ$bdI80VDv6w_p z*wBrnFH|PsNnFxrzFO%eim9quH#gR&F7kybp>oy92*@>xKxbz?LuUm(X_zrZ8`&q8 zdK1^Mh7Baf5$L$q3LJHcmA6+)6+>gGUA+Dfu>Y*FiX{K)t^n!lKLpEYTtutBDWX?U zR_u^c00bGhQ1*=}O6phAtC7t5^|%({m96}v(XOE6e{1|xVNyoeH~z?uqzfE|NOYC# zT7*IwnGrhM8CBs)HEI~q?RA?8Nz~XCreZ8{5~&k4L_*~WRU0K~jJG8o4Fq<`!47mj z6Jrra4vgBk*GI^2d(NuZl`3S|O2`pz~^aRrN5ZFIiT#M>O`!Y+dsRr z$F{?#S7)kO;|_Nu!L!mko}dV`R>xZ)0sj|{b<|;fD&(LcKD0>ax+XIVnX5!*W|2o$ zSS9mMh*RuJ!w#5BR>R4o)2t7_*e577KGs%8*`{O#oL{n(3M-B#Gj~qyXaIXCz=ktV zn}40)sx){*B}FEhT6a?6aSYiOzdxx-YYU8v6_-g~yX(O|7kau4#tLid@V zAHfXPlw0pUC*+*Ysj9lMx?SmYmvOEJZ%!q%G=j@ACwFe2wQ2IAOqGvjAVC;?zFfE! zDdw>z-KHGHIU>d&yNn1Q4VpWos=gU`Ksa0P-ZacDCI7V~Z+$**IWw*i(%YgNbPzOooKE_{gIgWzp*nke zrCYU$Tiit_$YpTZB4Qpsm6v$*BsDWh!-C0JmAJQkMd=*KTa*_%LmkC#GD+gKv)RKw zyHntyCN{q;5xXo#DR|@yG|{9L7`b#W`3+c}GhcWy1zkr_it2}prS)=$9Z2j(p{2(7 zBdU+}NtPC2mbPZYahIkau@D!~KEry~--Sd)8#|CMW_D;9S%GVQT*KMUMz|1cEr+e9 z@1ZE?mTK6<&Mh_ZGawNwvZ}p; znl2n!s|b{igz$%fDhP;BJqqM3X)_b)Q$C|%zWhPEx;P4gi$U%2ld2FAzKDwh3%vAl zrCkyk_<|X@i#t*|Ajy)ICnF2VniSdUD)U%2dSbea3p~mjkc>hf7R*2MVT=XR!Dqvc z(9sIQu&kuWoTK>mYOkG1F7Q3Ou`Zlf4vUw%TYR>5>}e!Xac4jtJ}|>aa21 zTL^R^hzV4|S7MjEx{~dL z<#WWfnZeg`73q7miy)?y*+Kkait8JYv=BeJn2f+vipLTQn%hM9Va1e+6iqZge_=XV zf=EaljGgkq`Ex&!YPQ7EuZTg#H3W`;AwV?p#n@V%A8Es8be`dGKsZb!b*!s*&y6DrKZ(U3REcp;pG8X5tKK%6d3 zs}6ljo*7F)#4IHjL`t^npQTg{XnM!rs>ksQDz9vb!g@Y7+RB(vEblWy5iyih)EBZ8 zsjisE^EwQjQyW=aHi*=#6;u&k+QLfYO8kS%x>$^?u*DToKmP%?1B?~V+qsgAA+WKn zJ|s$q_(?zm&Aa+4?^vb&giN*j&vOe=ukOHzp4%R11kFj7j*f=f!J zv_!V3&Hq%*1S$UvSGtRpV4z+iiI3qvQluyW`66qQuUW&2g&aLK49-$RJmWOWi*P*C zdc2El&|8d4{qi!{0*ZxlFzj@)t>~NoyOP(bk$LNk8$y?C9M8in&yiTV-Equ|OwZ!k z!~d`Vsqn*&C`6h7n#@F~PD_rf^SUUV!%^!Y(R@kLoTRa1ljyWY{!%4X;iw5+kYHpm z=u=R=Q5f0y$5RAIhV0G2c&xVzAkXvA#Z$cK8?Lq#LjlVZMOniWy`TC}P6V4P{MxzG z38!SVv_$L-(FhxJfvz0=Nl^Py{ZUNDq>c3~o_?~SBPGZBv<%J6tsl|1LJZ2DEJ_y* zO?GTY7&QOTd}$_=c(T&D#J$Q#xj4T*N>K`IkQt^VV7M zNWu7;a(f+aTnKa&6JVLTal=%0Wj~mFS6PkMZrLEN5mhE7h`2}yG66(Zl~T{h#zHbx zmxTW^qa0X*)hdIPM6}yY7F^AZ2#Un%vh2IQuAM)au%>M~32sueGIR`vtdO*om|x4; zrR|uDVpxjorfDsh*_c@`v{|=RmSOxnLY1JO zXg}=v+SfQ&D>0VOEEmm)i5&?~<@(o=&`%pn)ozqV*E+QSLM61+(&WpD={!)}Wx`&} z+eC@BZW6yKiz2zrN(9u{j3TyTvk+Y=RJ$Cp+%X}Gc+)haJZ?Ryro&c&AvWsu*1tKpE6K%gxb+903pm3uX$&UFiWxfi7^Qg*Qo+nCI;dfiRynfVkJiLn1# z;ON&rd=5*>*WzQuq=VI1-P7G|C_NEYvenYL^p6qo#Or9&=c@{o(7V6*8)7Y1V{(m{ z)vwi9DbGFH>va!-Ia$ejvxylG^!=#oe2hbVt?Dcdys+Ia?9L~R2y}4Y%N-8qaULCN zs@CCL`_)vpp%%v+A#?HHn{0~&CZx9l#PKYRa$F}c(a)mfPIVhy33fjUuHa*(UJO24 zjpMS<1DKc2t^Vi>{v$)xxdg*mSZ9f8Q1|;v5v@I?fs|_Ry>38;R_orqmnJV zb1(p0ml;`LZhR|rgpL;iIR71`8!B2TR!p=JUEKAjL<9{#+#TdNi_$gVP;UQ=*2PRO zQP)MR8S+i22u9L!9idudXpPoCCES%m8}6k_IF>Dk!DR+1%ln*Xf7=lx_6=Of zG=f;?fneQF{)};~=lq#M(ACq4K9dVU-QqmGT2N}Kif5ZNYI4L+`ZfQ-AMMYDHf69C zW|KQ5VY6YTUd-OjHcgZt#XSq=MC8B|>5!0HxPFv8w$o)EX#u1kFvaFt#T|?oVU&F{ zIxPhG1XvYB;lE= z4d9VjRsWqjSU$9CmPTE!Kw%bB6N@$24&2RAu(_5S_D~I7+uk|uMe1Eh3Z=x{-Y+1v zCtVKgY(2B|IkkmZX}c^DE_4iQvT2^PZrIJY?VegNQHVe&)k!U2$?ZmH+-$!tu4WB2 zcuk4c4Hr(sIzkkl9bKL=md0=e&7(w0*^Vii;tH`9HCZ`nX?p)S#nnVuB5M`dK~*VR z(|X0R0Op%^;j{c&!fojfR!>3A2yP`E5vA@rD>geTDDF;YZhXVmvErfDp+SQ0fj)0| zY*vO|NBxb7=kd2^bnkfcy1{ha@^rHD1aOB+W4xN}>1m=MN87MGX@`4jOeAbvYqLIH ziC(3=%F1n!)ib@8QQXzzXD-B+b6sbBZp6UPM0o(G1RAik@I*lRle+sM1Ie)somD zA>7DniGxI3Bcy8yUy$b=@HaALJb@h7zMq*^*=gdaZ6^Oz3TeyV7MIdP^wLPx_>F4N6bi*wM`JJYN$+wdpNLyY zBO-5$KEcIC8SFDhzhk4AF52EY&d|aDK<-FeEvFxDo3O7%8?$a{D{L2F>K`0lBbbd% zMc1}!6L=;iabZVdu^L1=_erY>%wY#>J#B41gV*jsM-ftZjqryOgxZ3RuB}x^-(^>8 zzxE}|b`Zs;3wHD%C$RO{U}%Pq?U@Vq_;OAsNWR6KCp_+U(-=L&xt*$sEyuwA!SF5o za9&~-m|k}}0>(724hnfNO16j$`nm>|8g9|Cfd2oIa}Jg(rh12uDGJ1kiBFobLMVvn z2r20Yjxbeq@)G;J?^~|tk~dExXLOS%oW*$Iu%@NTnqE>&cLL{VY13^JX2IuGLuxlX z6^1jHpr}MiwU=$~5AQWN`Cbc3=C;97VJxQDx(G|ox=RMJps@{PL5`_);&rZiulM?p zZMwz`33QnVLZ1kK;D@xwFjCudpTKpZuCw zZsB!nFoXK&onD<&Y>azw^{{3OW61Vn7Ck0w zVegXA@MJjOX&OOYGyZDT7yHHmh<^erRH*-Dp%%e}1O-_rr|=;}g8lgUi>R>NL4^h_ z2)Zb!qs5RSN0KaQ65_i~&rY&z>GI`BfihvzR4DW2OoBLD;v87v9hpw=o{=H5l4j4G z4}lg9T7hWOrbi(PJo-#2!DpR{PJIaVsZWJaxki*ZbZgY8oz^~U>vnA?f@zDYESZvw zT%k7;VvWeuXh^8 z?nzx_(!Dyhv&>$hGTQ43|B@LzHmd*S?J^5vCOZ;u#I|au*3Q}%?cBL?=?Xo0x3ONn z{ln5&T_Fb8dw_YTPq@qdw9LE`p6|mNsLL$<*B$G|nBqNR4$p~dh zhZ)6Ge={~US$_RVDV0V4W!c+GU|MvZg1jx|7MpEm=wF8T(e=)bHx6~-b_D&^p+ZfO z#9nPVv3HVWiR~w$N_?ioCvI;(7LjN`jufJg5pBd=Y#&xy5Q{E42@`iXs`)9YJn8t5 zb)zmhb3Kv_cwc7tms;bvX8LFtcVs{f!Ke_i}f%~Opn03Z7`czYv;WXA{ zXUh2$p55IjQk-*n8J1aSb$HXBA`L1MU1e$a6-!tmC|QJ0wPo3()U~DOS{K%05UV1E zSkk3P8mXkEozi-%MZ6|UFv10&ifT^2GBmJ}!(B$Ab125TX~G$6EE|a+3a*d5ZuujUH){>IsDcKu`8tB`6 z&$zbZN(;`T#1F;&QfRdH7%Rlew{*1jW+z`A;LWd&_(?_W*va#f(>N;7L zv*@7j2zh>P?cKIx8RDPTnC$lx@&phf_{mwgur;TU%;X>OL)}!4wv`EnBx%y2kXyLN z65oZyYncL=Njya{@7?HZs_UMUn5VEhk5Wg*!Qz z^S)=132p@}3!49pSqOr@e+))jBl(YkfOSA|Wlk}?vX7j!1t7TOhd;E_4HM-Q7tYBp zB!78dLeTXqW|c>L37X1ggr&iUG>tm2yUS1zWRej^#B2W26ij@#uUj3DH$dc}$5`mb zUNQ0|u6oGa9#WhcI!9|QYL08Nhe*N{aFae6`V)Xq4l9TLQ*H!{$)u@k&{4rtmmiFd7B>| zvN!2FX72x@h01|u$9-%(7D-|zBm714Oz5(lOEM(UiTct%T$0L#u!0}-sb?w1^h>$G zD3@BAmrtoG`|5CV^a(U)8` zUHIA0=zM~*6JjNSsJrJ1>C)DS#7}~FP2*p$^QJgjquHnfd$nS3>+O9bRv_<^a5tu@>n zP51vmV4>-zBuOo{a_J|MMO7!zqEn7)7g*C-Q!HFrtx1?ttsOE8qLf*#bnZ03WpNOx z9Lb1cVWM4(pvJL}T^`Vqg}kqx7PF1)s0wd0J>j%7Yj?YEd&Sb+{8n|g(6Q><`iZnF zF)52>%1>AKQpUh!uBSW7%@iwTN?KRvl1s&=GaHF464RS1z*UyNCy6gsWeXkhA~j##ts!Xhn_C}8DXaV3 zEO6N?62j6*WhF-N_>>~cSvI(1XVI2~ff}bQ)(g4I{Ga7`W7oUxH!H|&&$6Z&uh;)B z372=_6Fn^zx6;&0!0-~sl8Xg-ICY>(Yg;zKWxmQkYGbPFfS+=VUv1x(KXiT4Ev=OsFtBOQqnGV|0 z=6(^kpQLG#AsS|#6wd^=gx~;C)u4A(VzAwX4IpMjrz1c=z z=ctgJuRMeEp`iNRB1^_OlhRF_llQ!zu#>QO1k-g|A3UrRUeA0$rT7Fp=6?&EaIc^> zTMoC!x{HPg(RUe?)`cnIx7lBJSHsPZPwd4>rXldY78*j69Ll}_DR)l&k(m(IP)aHt4k z3Eu7TP_lsrh{O~Iejxz@6}wpp)__k>K!ywUo9l@VaG^;<36S`84qcQVNgU8@L6JSB z-_YP)8Aey4Mo}Buanm&QCy&=?$5P zXi6zYBQ~ick1)i?y+(P-6eLocq50G}ZsI0|lT6x5a^x9#6=K?mVvB7BHEN?z{v=tL2;!+HJC(lmZMCRJ%$)CDI_-Q*s<<2wLoZ))5(aYl%!hSmTe0}&@d_1KHz-F5aQC0^)^7-ngGMix?OlTc?1 zE@6iHhtz#q7nuiq=!JU{9fwLJ3<6|=g#}s!$o7P%>MhX}9?hcIiee3Dw>`|j7+avV z-AOKDI(Eb(wup@J6YwSKf{GIh!6bzVnGpY#L%C@w^c-nviK1dsst11QJc8qQ%0_H9 zNd|S1OhCpP-eTrm9;c$DAEJjZa)+3p=X|~=a{*|Q?v zf$cVr-K|X>5Tj_0>clvJ_hM%y@uj z`#DW-nVHBYp&5x$Rx(Tz%_}kLp@aX7n1HxMFZq=>^;8Lh3e)cY#-c`NYQslVYMUvR3_$b{BYl?uD{Yh@CIK)Xt?E)T}6Kb9< zVl53So(~3|YpAYIeu(lug(CkPMB=V4W1j7a2AP*eDF&ezh9TWXLEH-B=X!W1&e-qx z;qR%MESLN)ROqc4;cOFDo_tnRl}ae$Dr*<62vXtL?v+Cz=tGf)hN?KMr+V(@qLAL{ zT=#w?Lv*ck9u{w+uiC)w$b#$ir71tEmRSJMTy#kNHeOiV64SxORQ{I#;O`tBZtpgT zvxHax`*6yFo@;p;NDXgR2r#J_M8=MWl5VT_4v#|wf;YfTIs5}gfD(YQYo=u{CN7N9 zyj&zfFMmoW=|)!dNU;eM5(=lVW?eAPrHi2irIfsD2v!TUw3*-7FlfRBAjjfW?ruv2 zoTq9XUJ&ta`Pa#Ml$i?`e8@@Qaxo3cs~+hSge6>3pIDngGTbT; zc_HbLR&G+gL~RB_$6~}nIE1&_u`Hw8Q?s0}{8{(P9*UH)Tr!7rva)jY14nx-(Z=+p zwJx)As#iq_WrnpJO-Kyi)trnq?_$USc^~>@y0x(;1x-7aHIk& z&AwOwIy~FI5VSK_Z4Xs%N?z+pFO{aO^KMG-#YSH{Cj{75lIYs?3Cr|`!fyBgtv=2M z!l{L12SyRy9a)F-3hGWqxm7SqwVDG=H@;M96hxu`zAOvR}XAq7wBI4z^HVMgWIky-E?% z5ppi$cc%%BDYxoxAGAqW5Ln&_{FMZBpm8 zP$@G~TjeQeL7;jQ>%t;wYg-yzyB%}RxAG7+98V&FQ_=5=m$`D7cf4%7o^8N^^yaLY za@#8)s>$slhESXiyL?I}%Tr0p-c;YMvvr0*L@;|l*&@|Z_Kl5qsujKg~O z*f{TXuDM<2GnL>I-LQbmtkRL0Q(sti#%qEUI>X=ygI`8vOf!w(OGA9jjp0q9>dZq} z`w3~yHhUnEkG7hWI5zj7xU039Tg=*Rcal9L&at^W9mkQbyS&SDoQu>)Cz_b(hpzu~ z5||a&&4&s9A~3h}D+#ucui)VqJ2p)$%A-WL&;KBAo}6}Xiq`DwUI_%phDM?8o_87rtEzXQ8mDfizkEE#d_%LZrTOE%`Z^YMNe<7vEs-Yx z)$FNll)zE^lqIl)`CzIJZk@mvGXk2 z=)dLJOL-*KRJ6Bv$TQhT06}p0gFbYuYOC-3HKwJ&@-9(q&D!xidw9d;~w>n71Z zttD6Tbh~=+);80fQ>Xu7~;pi@*gLY|z1+f+O%c2;B<}BitnXXC~Z^ zvuL6PJ$q0%^my~FrV8H+uY%$*J8mZUA{5HI*Gi*|KF97#aVfdp(ow0VBD5gA(K=Bv zMTc&zV6VF@Xv)8{)LTnLv2=2;y%-JL&#_wcyH7u(h`KAu?X(P#yD8Dc>&&5?$}!D1 z;f&KI=@<;rM&BM$_Q;z?_BQ2w-#2O6(YNs)Y zl58vbGWC%{s@j^Xq@K2NZ_K!=D<{ma3X3v56w^#A!t3xn3Njq;d+I*^c0Cokz|<_K zt|#Rr@TvNE6tLK3nH3S%(AuJNLEe7LjMjrxiYTRvzPn7?SljB53`#F66d^`adbQTV zC_+tAhb*N@zW+3R>Bde=W$PdF)G`l3bz|(Yp}7p4Pe>>QbydzQ@nm=;$oN7kFS{rX zvq@i3y(+-^oLaNl&?uthz__Rz?BkPBcI{ct)ccmeipV95R_z*Xtx=6SiMdsUAJmN^ z6J0CO(acyRYu%b)#&9x$*#uZ%$g0w<%BcS}sL85dZOH#XkGE6uY9)!KGT}tul(k_P zk5;It>ZsdwV!P&xOl!FX)7q}EQ=TlPC-Q64wv>&WFBHL5_1Wi%^H&5jvq9XNC;op8YEGnDm$jS^U%8SS*mQsnlnH|LBpx z_(C13_{%FCgj574$h#`V2!cVm&lxQMy@_Q|KD_eaLpo+ch8R$RBJ`ph@hCVUrZ9Gl zLmX)iC6bx#Q6U}r2uS=?0dsk%hc6@~pLzm07e?wL19@aih&VaItfq*TDOh@#Ml!e@ zE-Ik9QvPHIwlVpuFHD-7AfFgUF#-^bU#!X2Iz};Frj9Ad)6%7WX~M_k@{V~lrW`3} zn&Au*g*A!f`oQQ%8F6GFhE$09l+(yN$#VZa|Dcv{CfPio*^*r<3Fnb6xe(Os=A01e zltI9hm3Jm{iP?34G!!%VRF*I!p1Xtf=hyqyyT50X0bEE(ak#9Op(a2omVa?4TYcCoHFiEFoN;0sgsy9Ubioy61C7KP;&lxzYCQ*xb@kvUA$ur@X5tkvZXyOvdNr|IP27S{{L3EH@KSxeymUL%9sTutg zBp=;4wX;I`AB7AQz^jx_o_C38FPGQF)g>2jXiIK0eMQ-10SjUerOEJ^H%zV_g|o4> zZ{SpVo^RQ8t+-W6yjH3amUKwC8tN-+CbCza@rI>IW1LE4tmst?&+aa+rEV`{3E^3ik-{|*>#6*fK&}?%yMV(b3E`>Y z`wsbm=^R}4c1yIJ#cx28q#^%vS@#kKcLXJYqzr5tyb(Q(ctGlt+-!FPWue$~Ct_A7 zG7Bz9n`AB$&s1B~++RFG`>t$=3doydbYFn3d=`268w%WP)L{VdrJJHVJk!fy7lKjTT0R&8;wi3wuLF zPEu6&yj^~g#2u3e=^xZl>}GoOTD3h%FY0Jvp+J(GhRMgtnux zJ86nn4dqh>(x9 znXhg9V@M98KIgb&2VJ(l%lff9(qLn%20lsgcYF%H_*lx2tzjG|C7u-)gUWXYLM@gYTUtKTclI-2xLg z1xFmWxJ3n)UK0OdLx<@_rhSnAmRb>g@~6xK>(~0xkElDv-j}yN$4gIJwZ1H{U7iRd zWrLg|WgkSN->`%=8F#wBe(X%XOmzg#=;PZR#lX+ClC<0Jh1`Mw1mFV!BA^yFMa2lH zto=l9hS;xJiqG`sE_cF%aA;1M zXsM9Q%rd$pghDW};=*c@DsFrVD=I)7_<#?@0S7B!7c77Pd{1q3jp#6J{?JZ1YOmQC z5Atru0V!^x6v7|+!5@%s2`}&p%_`Xpkdg$#%23VhZl?Pl?FD&)h-z*plxaqrjBvp`&p`x2%EoC7&2Q7TrTRuBw8-KFRVGFp=ce2b2_5Jx z?gc7ff|a16DhN+DtK@>w zvT^?-Stc2+r5t}tIpFalRioa@ZUxa~6P;&p!l>wj%?b@W zJjNt;k$_4^2YvAO{(&EbM6pPs#1hd#+5~P|=^CD6FuRrm`zNlNlK^(!$cY zyb*iogog?Ors6~;W5_pR(&pS!1xN53VWQP4Bj>`)Qy?;n_KkqVWIr}TKz1=Fgl_*K z_>lm_p(;=eIfd-H_V78^gG&?yG9ObCCzFk=a`|$zGU0?8rKhz%GdxK$G>?otO|CSt zFG@PCnnH2gJkq2ji6%XZCsYCsJ5n6w)1)}ZKmdlD8j8D!E{l$_Yi_U~LFXci0v%pK z2aS@~N&*K_N%&&(J);r=8Df_V1}c#+&?Iv}s&5zr0jVSEQb$*4rzVuJM5FU|DzaFxa=s+O7`F06-4y>UdnOIl zQq;Of^PYuBN+Xft^gW*moKR46h$ufNBU{uDGlq_kp61qe%}C{@B&rb*05PRX3?R>e zAHt!;0MG)wfeLSs59+ZNqsRtDD>E8WJjSC*B<@WA0&c(yO{38|GqWOnU zD(9@`6nyM6%2euSY^FVH^gYjQB-;`q)=^LmBPGV@GF83G`Q zaZUtb7XWcAK;;%Nzybn+Q!9WEBH-_|!*xW&y zvu^cYeJDj?spH(WB`WEhUXi5<=s%TZ zU%=+W0LJJlp#L7g4;^9`B0wCn0v&K+UoaqD=ffbx0RwXFGfL1rrPE}{qn8Y3KeAFQ z^A1hT6jswjL<mL>)N+SRR2;c!MAYI)eT_1BH{!UagmN2-a zcC<1#n?_fpQzW7bL_H}QE%7S#B0H~+X)!nbXmg`jbZi?dEQ{43=*kQEh?}J_Z@%O|*5d6Sq zf$ss*RX*el@3Q7!=x}W=v{rv=UaRe3NY+6@X>z-BZ4`|&Gq-%va92Gm1G_BdsMZw` zg2_6Kka~sI`=8i6chOVMp{S~AXQ9Bp~E^L##{H` z9HyfJA^>hN00X$gF`Ys!I5=TPOW?eMJLT0Wmry~fkw8|-Xs1yR9RgzR)qF8`H0`u~ z^8_QL>EqB7Ti#d8K*CNp>*

    X1%e)#X~)QytwxfhIhbA6nT9Eohv|~;u?&Czh)?D9 zVUOu*uqK)6=@dd)naA)~L7ADFX=Q*1iARZ)qIrfMRY#bpY>1bdsTq|=B%qZ?paCgU z4;C~%<|j~un_sD$pyEoe2%MvWMWE+CvsYRCt>QpsIFqs+-rZBK5Bny0Bz=i^*9uZkijn$DGR1Y_RuRy+wNuYn^^6 zk`!C9DJg>Ac{kYjH_z&J1R||WMy)+Mvh2l^m%|=Gd3Ui-%uc`>4vy!x71U_ikw6pr0{uY~4%ehwjw88>`ucWns zYp2zza&iN*%h;mL7^5nAX>Rph;LM@ zx0ZUkm&R{|3u>kWy``J87C5Cz=#cMfi#`jvl54d1+7#~pnp11IV*dJ@3fV{w$%~7| zMxh&Z2x~@AYPwy!tEmelXAn?fE4C9`a)4^FHX2^dLc8R_vEsQ_IjXzWaB;j#cQWV@ zmvaumyRE9g48?1_$IG`I$DPb8xQ>@Fhbz5?8^RG*z1F*rr3kc6L{r>ruio3eL>ra} z+9$_3zEt;2L)ARzJC^JB!C~lV3Ck;RinTnU~J>#-fH$>cW)2%KJ8ENe2T6F$ieL3%$ZaxCR&!N_Zr%L{BKIc)w3WVNiWZ%o1^ zyqf&zxG4Ou+AD5Do5w}_!p00elpDitIiiRh$X^(jg-mTy`+!udpLSWHnYPIBYiRQe zUB4R1o-@fR>X~SJ$zC_3Gb#&L+_s$Dn9!oTyRZ%lJj(1wvN#5)Ss$%9Odc zWV}$#uuqn7%P3bm^(S_?oXfg=!`EBK#VoJEe8(&tfCO;S1%T1V++q2ue=}^W(QB?w z3(fcCv=phnw)!;s1ztj|X!3i;n$~eTX1_lFqr`qneU|LMvP&b(DlQGxlF$lq?hMcO z5V!Q4LGsWHf@MjVv=^!jQT&Y0V;s;iG0=Hw&?`i$-IXv5GM4*V6S>XL+=Y@wn_yU6jJzvGvzxyG#G|`rEj@d^s+SK-(-M1! zmCRP=Ohzg6T|eDe8~d?oXD$|Z)a$j+7_lDT2Mw3WaZ>Hd0G+(vzzhWaY>Ov0nA*Wl z8l?CBSMfS3gT!m-#`*()ltMvd8z1Eh<1vcu4QA(Rau6xF#F z6r%mgq#f0WUCA5~VHw@q zzR4(bQj4;k!=Eq*bjhY3eoi-K8bQ3=pc}*}?Y`PAa6XAP*zI-Xl7cOs-N6Y9JKYLq zpd|xM#fBQ*lFg!u6Atri-jnptdBk@m^1Bf{-=j_18f<(bUAV=@2xwDQ{XLJKIA8z{ z$BbL5T)wXdo~jL=+egRXZC&PJE?aM1;arNQxtEr~1hCl5LTz}sZ3-)>*HqQQwb~3O zr<)%&T8%4?#QC|wF>cP;BD078u7~*3$<2Vf>KxP*cb+X+-a-y9ex=^)?ZxT>!N05I z7)-{};FzBS0hAyZzK3Rb-**f0sSZoW~Aj*}l zaU&ri>Mgt}%Y79=sZ{;xmtGC7ic+Ce>W6G*_U?eL4PoTe>Wcd^7#8coEhj7d;9}k= zxUTCOmdwE}?5e(O0pBfWck48%pvLua@eopKrNE$Ymc}*oB>A4CGM8Lf!9s z?Eq3Lm#p>ZOvy6+tSSF3F#EsbEe+xx^OBR^`TW3;k-?A(l-jEEOTP0l()043+S-n6 z2R-z_p6?D#395jE07RZLjPHX~?n(#$6Zw)q#Ol^cB4#;;+sm>Z(YH<8pP&9|ZSbNG`nP`Y5ia4SAJ?%@ zk-mNU&zC2K21QlW=+Kfdl^$hUE9jz_J)!2} zB}>+yGgq{}+GQ11s#?m7!SY46SXGi|ZFyq!DAC(Yx{le~SJ&=cym|HR<=fZqU%-I{ z@0~X<+`4VOm~kTq&Dr62IgNKuHe|-h`TVMhL3pDUQ01+f0 z!2=x_P=LF*laRdoF4$lqg&fk*1{XTKusre_I_{u6+}cfr?5ayB!;TaZ(Wfy|^p3fK zI4T4N42>HpsiUYX!zF~CS_&tdb|F$HsenRisU@50ghwWwbV|wLKr*SSUR=9vD_pt+ z#;Rr15(}+X$|{QzxF!)LmR-WhXD~S9lygox2eT)h!x9^YF~>f0Mls6Nx$HC03N;iR zL`O^iZPC@R(NYkt)+iCU*s5HPQcQmo4n|EoE$CAfi@20G9BoLXCh4ZjCIyAsNo4PL`BgqsP%#Ft8d0~R>S zc&DTkETFK|;NIO>(vUXo|amp({4{&Zu#)mf(gg_mc?|@^+;XtdJwn9 zc0uU-BPl-x^6(*RVnr^=xH~)YB2Qz+ULTw*(bDq=#ukw@{%rU+@p|%c!a?y((hrn^ot6rt@0NjW|l1|wOaYrE%n=X_8leR<( z2qR2F2}x)|aHSB1BK#4;3MVC#5bjB98OT?_)Qs1iPIVDm9qVj(CN;Swh`*2r?QnNQ zB-$<<{IbSpjAWUT@kuk-fSK~Fn2zSDCL7O#UPPRr9B!Q`Xq6iy_x#2M4pmQkenV0C z#IsxGz&{!K5#!{BD{G8`%DMRRt z&MeDF#pQgWuq-W17Ga85>RcB>HQ53hbV*_}ojE6RJaJI)xCSfo^ugc%;iQUJOjH(| zrwU~5%zC&P=k>}MMl(uCjn%srbdK@HrODBJY*eJ(?8rW0-D;10+?pW&SV(^sh>!p^ zBqHyTzD4r!J=GGR{|dM^bIr(Yu{t9|gY-#@vILUl{A49Da*!zL$S9|DiBk5^LAIbY zW3yywOE=XyTT+FT@S0)5Mj{tZ`0}R1goZG|GBMXxjCDeUi|mZ}Or$1NWZE3w$(A?^ z%3R}5#@NR1Vz*6hcGH`S0w<&-C!0HzbDj9g-Z|4L13^q;ayn>d(u~HmNY;~r_Eg^; z6{=79@iVXg3ruqnCVY-*!Yi;E2tV>9fMCoaQ7V=C-ou)7Jzl=@rX z=7SxG zy{++$OJzbe%#evZUDWK>$vkWF0im4dwJJe@I*Hu((0X1g zhiI-{hSr$FJYR+O%FO#^vwh!uKR4qS&iE1Vvgw>R|02|a8I9~F0Uc-!zO{v|ZJMY8 zcInn4y299fbZNyngnQ$Ag_53hQgHbdcxM_no6ZXV<5>Kqv7k=WGM?S39UiiIzdCFo7~1fBGxYn;M_IRCEvkw0t2_2vzxIp2_v6A? zu&&`_G8SySp)sKO)4-2vx$TfVthl`Y0aG?F+cwVAxeoBWIh??1lR&V!FAU@r(_=HZ z`9L~DC(L3xG?X)QQ9(IdK@)7cr?|Zdi$oaV7Yn1ZuY(P*nn6wEKbLC3QGy}R!I_PK zEgJ&DokEzeiXGSKA)z{^HCdc-ps^;LMZ&-amx(4@n8NSd3|M3a4^qD@+`^Bet}i4x zC;OuNLnlP67FVf0@=7^1L@OFvCqY!NI1Ihc!@2ok7CbyX`Pjog1S|^-$NB*<@G8Kl zfxV^skf!s%a#O*|^0VB4G=XSD*^|VPT7*ixK4crhOq4-R1julaJ>vsKv@1nHOAES_ zscDpn7Q3P8`=Qw4xa`wDTAaoICv=CY0zWI{MOARBo8Y^`8^*-Ind$*Tb4tnllL7#| z0WMm(XdF2LWVUNuANY|rZT!6R0J=2$IdCLL1^C0Cj6iGbz?37YY78`?iKsfeJ-!wI9_1IZK%lQWKJgZ6UER)aEJ`o_`B-VLhKaA?F`H#10uux&hUi1 z@l?d|5HDlkv$@hr%+sfHJRkQ2vz!#H`Si^E(aig#Hp1FG`=nAjL?3nZkoQWFJZnmE zlRCaR!AI1x2IbKI*pyJ2yiE%YF%9a_P}Huem^-s1OPylK6TKl|BE^kqQ5S7aR+G`D z3XQ&m&cM6Q9M#Sp-O;l&M$7RTAU(PL6F%@7Qh1a`^L)%UTv9HJ%sSl9ZPcSEmC`BY z(g(a!ZOc+QT-E-J$J7hc0uw>hG*AXpnngR8lpvZ6!pBKWv_wi9M9qJ)$by?nKiQVb(u zoNV}3_d~pNC8MFO)B{7z{#)8-Y)oo|nzX!ExEkBVg57Vl%*muf`Xow$1>2w-$Ji}O zYwM%`Llo6o1-Ey^x?0UuG>zL$np2Bum{L zD21~$+D=l>5L#V)^+TpQ6e`Pa)pvLS;6HJV4@h_QO@+G#l&D= zi96AZV=8UJQ5`ir#W^m|%@gkI-+TFA02W{Z?mvn`U{8R&nzY*X!rD`fUCO=c$aE#lkZhCNv>!TUSBOyVhi;?D(LWx{0r1!XHH zUGP*)1v^jF{geU>-fg2n@Y*t~)mO5mU2GiHH^yCIu3!r`*k@C{VWv#FO2?#KAf{7Fbr(M8e>OE@=KGT3EhfLUcK1b&+{N0eQK(U3xG3M*H z9=(M9HkE#6Y3|}ZqnASLo)nJh12w6WYTjT9$!;cOX6@KW+i9Hk>7N$aDv6h>OK0HZ zH$o;(oIZ>Lay;QN)0L)fKVvVEX$$RDhq75rYGd8_l zPnm&fG)OLL@MQ{CDo!(bWf@Cc*H&LCwnf?Y=bnWkO3-TD z&h4Ew=+XUcS!QTkK1#K|;|0cL(O&|xmGYpq&uZ?a9F4Z%3)mzp)Q*~qL4(y8- z5$|Sk@HSzpy;Puv>3c+Bu!QW*z3HvAL7fIx_{QG+V`L}DrO>XeAGWkf=(M#Q>j~{m ze&po;Mj~;j>OCnE0*}s=VvGdOZQTxQFYj%2B{9af)LGtCYt~i&$c*vjzVHr@YZPzu zH=jVd-fpVd-M!9k5ERuDH$)Ft@oRB$US?0jR#1m+>9sX)+}LKxPVG@^Rz{~#|23Tv z`*98pa)lg1+hB`FHm1dsu*Fel54En%LBDor?I_oadPsG;s7U8bV%r`RQ5L*Q;2kdC z@-C0r=rt?vAl3&jb2WEES!Js=w{SQ&>8s_&taa-W2fc}^>yLK!pG4+3&T~AsN&i&t z4fb<{!aRI^N=Srp2@h|?$#L<$j7R6qwL5ooUw6&+N2 zmy2vL^;5?SyR7NXy-QNZrY62ZT2EzLFX+Z?uF~mjD;75Yhwe2q$JD5saB8pcXa?2f zHueYnWd{U#k3V+lo^Jb+_D;(9X*aBE$96hrW@fuQYm6}Q{!%<{Y?mN+eo-=Iba1ew zbRoHBh;N-wc%_7VRv(@zAlC0>5R=uRP1J@(fB$#diHBKm%ch1knw>Sji%#(C-05WH zg=cv7wI*EmZKmCIh3@sSyLgO`ptd`i`$mWy(2Cs3BGV8W?d*bH!;H~iI4teAk z=DQzx#5Z{hR%5>hn+v}4wH|!ycP!_Q^ANx2$Vqd#t$9Na_wS!mgOlusM|(d`|4IjX zKCV-eHIW?-eNyzQ&nb~z;-S+Q3!p-#WCDn5`SuYkXz(DygbEijZ0PVI!g#t^P3mJ2 z&KyC%!j1Em?Uk-|9hH&fMh;skRwidn+J%i{$8+UQqLWGUrnsCicgEzo(pk1_#sn?0 zR5TFMJ8_DPS*ld24jWGLB;|ug%+)torM4JSMk^8;T}g=D8kK{G5M;w*VJl-t4HRpm zxXn@5#0k72^6Kr|A;aIjfCo0=C?_e0OhV;QRLj2^fYmy}jW$j3CObLc%1; zYTVp((=>PfwX77ZfC_0xf@VbVP-(~(7dPGOPKi|&%{rC9n<+lpI$@+c!q{M@F zL){WAPkHAGfgpXdM^mWOSKvgr08lv7Rku%%P;F=tk4O;IQR)?8q!Wf~OIorPO;WbU>?a)QA*r(bnCwisi0 zevqf0y>(^)pnrxY8li<6YMB6~sn!~!E;#yVqi0%Bn{CE{K&M@C_W7Bl8h9FKn(-Ak z=X9cC=O29Z8KuW~4M0q!CKZTr*xy#{AfBMccp=6=2tc4@^Jzqu*LecH+K zpdpJKsGuX0Tr#4HzP9MfEknxxrpvie00E^iGrTdUIismDs@07OTU(~OrroXjwR&Ex z<67xu(X`@>+ILXd}!^6<|cBYGx)3sF5Gc< zDEjP%`1t-A}^7D>w6@=`#RaAN<(E^Wl|GkndQVwADl444rizN z<`64k1YE>ndh^GXmVPInsH>hj>xdnS^6MwVo^nB!#(X>Oxg$4oZKUJuCvd>+46JuQ zTTHZd$KTL-)9oGo{4W#=%QW@P>ld}I&ac{?+EgFA-nm$x(Go7#i=BNfU7nka*=QG2 z;o9V{^S0de+rJP!(;6H9t%G?3)$fy_Arg9JV_wm^#^Mf#lYAvmaY4aGtja|dEu5=3 z2CJOGE@vEqWrt-8I}?IDcNaTROk<~89SZgIJCFfyb}oG3pAuEOmqkid7TbHs6pv$4r<2ZUi128o^82n7bZfYfWUXKDZ1xszA~TGEJwTxNl+FJ^2LUp zcg6EfFcn_>3bnS@KKG?De#5XwitZ=JInwQMRU-*6`p2$!Z9^I+@uL$5XTY;Lu#kn+ zA5k**6bw>sd0PC^YB+Z#t~^0PbWT@bP#*{8Clq&Jz%9+yil_cGvZhRO@or$!AIsFfNHmT9+ z1#_4i!rM`)V$&YlvWI2xrmCWV%!UGULRE#_AAXcH{^>#z-}`E@#EDI+niCQ1bDxNO z(LM6PF`jP!1sCAz21Y#UQAf_;qcud~&w%>E8VxikLjT&NK1xY?0sCAdv%QaPoRHaTl)XG8na$HcU$v8*X9PpjHpE-IHyrBe$%i^-d^ z#Y-HuX!Wu=3vSM6TOpKXYJ;0vERJyzNXWwvq|!z3aV-vO%LvkVTQ*`fNDbh0W{1!U zB4(BA72KfTTf+;HdTi*q+PWt`_Xfefb|i4>h$}$jv0h;Km7of|Z(s#W*atEcU}%)+ zMIY%{)rje#L@e+TIf*%eO~Gd4!$^(vpoYqp6tkN>sc1pln+&tCH!5qTa8VoL%YF){ zrq!wcXdf1x#-_oS-LXT{wzXTW>GroI@Nm(-yyN)dRfZy|O~aF{Aa-HN6{IuSdpZ!0f_{z4<+}eh1613`R7$|4rtL zhZv?wx>NY=UP=_Wh@Wdt@bEypH=Z8e5MnE2&|Vl0_lz?k$D>FH6)tTtWX)LZz=;8D_Bnd z^-DfC%n9Yr++;zj;~I-%2)Z%r*R%#`K^L0P^-eTT5sh!sE?Q(Gwc(=!zFOOi3C46$ zcL_HfXjRL(R0SsPM3EwI! z$mKK^gj-S9v(_T5(tFiBicM^{y>pJ|*m9ps-fRsSE=Qsekr)q4JcoBC;O6u)Xh{_C4vF?XhiPzh$QxjSGC>cE3AG zp{Tq{V>m95eYj%7eeNgN$l|DNa5kqd;eWzf$&Gis8ApEf4pl^cT^*aB+W_JJm*42| zC_kgOExu|_J9p`8)Z^HMt#f1BV;=H|hdg*5&tuS&ZE7_DQ9!Q0y)yXaq>Utf7VYSx z(3zg?;cGkU#w8l^Ssih#4_@m*<9pY`&cBPtDXU+fW=PS#_I*=^`=jkmOb-oWyw{!l zLWIxWq3=oAn)s*9H+YB7KqBFk6%@L$FMl@2ws@)k8n+NIcvF{@_DB&_k7JU_!LRaOf*9tMZ+?X;0RK~1KprPsTtHkgH^#HxQU+N(ZDPQ z-gQh%=v~rn3Dw|P(5``zEz8JJ;Ym6|!8BSnQ{Rd^K^saq5=pdacXIZ+<`coVNY|A?s}m+~=N zCoY|cq+aZqVExEfI*edC7=t^2AVP59O;nAl@ONI#y3Gx!4ZIV=lzw zCD3E#6_<1LN~od zb(*Fd5+{U87Nk988@vieP?#pMmgu-9hbHPzDk@=kXoqG-h>GZll4uw*XQg6ga!e0R z`sSgUC{*&~!xUI8)>({#WBt)+I@)MC#v#*~54xpeSo$cCewF=o-?>e}}pO_|cx&`rlKgr8qa}^s&d$?Yc<9DC6X+OtVOJ9jqYj2YOH-6-1+h7u`mnd zi4T7;*P0%Y0?Hh)sqC^nEsj9xLTV`-4G_#`|EoGUO!kQ zq8#IGV(qDG@6ld%P0Kbd-t(c0y7aE^{_aGEk?@jhx{|Ia=q?#>rWY*lgSu<-E)&;o zo!9pE<8dke+)M1aUP58(>wRuM z{9Jl;5kFliK`^B15)KE0*Oyj=hfHLE*r)9JPvPWKVanR(fMU@VpCjgAoK}O*USxt+ zont+(EaEN4POrSmq}XDwY+hJUa<8F1|0<*sB`sY>_!6v9{%gaouT(*&`tIt&b|ah=;2f_k9*O7b$!zK}$?wgB zw$839a6}0j1YS1LF%(cA5Aq2r%kaiAtx>0p08|)RTd&0xJ(ksC7N`u@Fem#}EJ~-E zB$gw6BN_uSi;iEtV(-NS4=X!yr`n15ZAuig5c$^fXDx9Dv(&({?Gr;W4CUSWR*?^1 zv2J3W{;KkCCa3gT>WLP!Fz0A-opI9&2qFRSkFFztPF&bb1j>350w*vhYS%KjQ$r9W z=lXHZ8t5q6Y>9Z+Iw&$hpaVA{|FX3bW*{L|N1~JIWk}Cf>5fE?o_-ES?w1X2vYT@9 zCj;P0_8{}7NhzB$^_H!QWnnVzax1&C+QsrH{qkZw;{e$5hN&$^2iHZnpGBYapY+bf zqF*5rvuHlEMZM~$@+3s9jTp}#!g8g>64i6Iqcr!gBGi`3@yD!YGgf40$xf4X!NNDw zaUHLbH*{%~60}XIXVRE%;%LYbwsS|o^938Sy0infdiC3g3(1LT*6?g83aD0X6P3Oo z>F%@#LY+Y$bewV*DDx8Z4(d-sG`+ISiVCrXjZ|%Bv@dV;EQ2&qf^^7yv~7xXW~B0G zxH6}*A4lJ<6}$8m--=>g|L+Jb+~yU*aOSkunj=s5boTUhRfTdITC-6ugXrOJwf32NIU*VoIE%!KCfUpMYov_ z$$_XT<=N;X3<-B1|9K}Pkax==H*Y3ii?@NF@+@WO@u)O<&o?w8_6uowdt>wz*0+7z zw<&$tqwsR1rSurLfSL0SO3My!1`UC?v}TiL^*A&azxft-Evedca*YLqC#;0)H8lJ5 zg^P_U?3v%e@q$1%$Rc%zSCfc4HExr*RO^Gd$oK=b^*$-djzpd9PMJxdXOzk~jnk}& z?+qB$Hd-?}fuvOncKW#3>C?VkDP%X2-}+r+CbKlJIKHoe&ABP-cVG{(-fFa87<*!L zdBNsMDuubTKQ@`)#+vgtXrPH0gD(?qxgyamu`pbKJ@pD*10uxBgSOk1{>vv-!ua$;jSn8$Yv zMY~iE+rQVKgoKo1-`~7|HZ>%#|q(q+nBF+`@C&j5KUaVq4&v)z9_YM%Deo^ceJttgaZW! z1`2c_U|&LfLS!^#@sL3S3o2YtyixID3y2X5YVi2ONJx<&C0Z;}VL`zH1P!o!AOdC( zH#2DhvcuE(%lzuOv{U_MY0i6tI>tSLwP!)k>Bc!7~vX zwlQVWu3vf$=i+tQZ!1Ym`);;dSr{1Zt$M z)Y2iW3>(CdhKWGm)P5Y|AaAwEY^(5PAHtJDybT>f^!b9;CyT>ul^V-&aKE;V^OZq;uDfc zCC$r{&i=CME-oINGc)qqfbcNQddP4*^Ue#^F}_5l3^UYRP3_gvSj+A;+3d5ew_JBC zP#kiY3rCv#giWqGc$|AJI>~Sot~y$=+l~_YY7vh-SVpUCnrn>Hg^^dbkGA|IZSGSaGTj z5XKRMgNfX6$0Z$(I7y0Yv{*v7e3?A zS$y$hwKHux<=ipWS=g?r_Pg*{y%xP|qKk|haE}RH+F*xe7j?wpeYfy(~KKi{O=8%st11RhC%bJQZ$u_lq%>*v*l3=Jj~E=S}YT|KupG2e}}W7kXGr zmmxAqBcKcB0OkvC2CGcvmrt6or<4wQeeyNrQ_uFZFhQ!TmIf@6@=DdZ6t=j}cT`g; zT_g*Ewn3og#P&6LInY|GBAceXfIA3&26l6^V5l;8DpN@?Zh3V19FP1j^8SGL-7nya}gXNdd4*yPQ;=;uTbE$@B0Jl20tCl}4`2lI$xbrzknIl-`SeoQ$OV zKsm;MZn0hi8{{c%*ECI*lAk{9se)p;g?-vmUi#{b|6!;$Dz$kBI^hiBVZ?yJs)mr5 zc$-xU)x%7$4k#SZlqNMNN4eK{ZZ^RJM-6)z8+LYQK6ZnZFh$W$(iIV}$cY76zDiHO zN|B1LD;lCIXjH>G%`6rw>0;Y-(uXNjq@%JaWbeh$g+}ywI8q-SCrVK~D(Rva&8SAT zCnE86k7b5TpGbM4NPr$xD2Pm+N?|!E7(}FDFWuiKjkcss{?>h!JqjES^^k=Hl8_`N z(Q=R4)0-kes6;&~Cg{ef!)nk$mKx<`=sGv5(rcJmEvDYK>a2MX=X8Cgj#!(iu40+> zal+arH!BA%xiXP+k!-Y}%^pW7j3w5#Shc66*;YKn7MZz}wvh7K zBQ+YbsZ3H$3+osge=0mr5;;v>yevW^_R$VMIiZq$Zgd-XK?lD@Rbm6wcR_NO?rO$u zo|38v!&}U0SQ9(vJx&XW6+;;o&K#1frsY<+pR&@|S(gi92hVdFy7Cu?LKGG{4bk5N z12(V)60md`d}W+AYEk8r^hB=w*b@z}qZ)?tliw)fPD2lmpH=b2HjBN6>P)nv-l&yU zE8|Z)8I6fhzKhym3T6>%S<~XeDMUW`P4;-r9`cMgU2z^SFS8-OqmM~k z;?SnLkEJzja$oG?k_*kXWTG_sPR`_Bhs|DYgd|LJViTTS47o&)Zb z`q1+a^V{}*ClEY?V}&5z!(Ai_|2*J$Z|0(b`Zk6qJR}AWdh8w!@g_?=!)mM(iZs1s zPfN7pkguetQ#|VGfxOf#4ryv#E%Om^e$py;6S8+3Q_p)W$G>GChE23dq)9oTWYOst znG5Z~6TRlR405z*IU%Ps{oqM=s(Fl&7^+v@>R8uXYR;U@uIhcQlJkv$$vWr1@y8iD zv`)Lzo{f0B{ZzxiE4gwgAFrVI6MwjS-l3o1uk!t{j^-)geZPBq{hZ>5Z@l7j5XlWA znT=*k@*?lU2+-qt4D&LN)gmj#Ht*G7%X?VvWkB!Z5QewrVp8zp@LEp)Os}Ox zY%=Mwtjn?;+ulcxY ziWCj3st=mtEI!t29-7bzk;!qKaQmRC-SCW7yieZ1?=VyYh~BST?#|u@!O+;R{q~OF z`Y!(F&z~BN=g4RUA#nfr4*-wr(oRqS>nH&8@U$Mv@-EJ^rlb#1jgLl5s-SeMkl?1QTyUyInM40>wzAS@g927Igm*i zrLYPAroKu?3yDZF|NaY}#P5gVBMiro4E4?oxg&%8j;Ep^D|YGV?C%CQtPV{M@eT&| z(ijbwmTDE34#=nmse(=xRRzno5S(!E2bB+2hLBe3L0kZC zS?22)olz%s5{0DECwmeonUT)^s~WA5fZ&PW+~XI}f#AIH8{N+gov7c&aj0xh9e1wC zI&mH|tsNa}D=n@b?eQKb&j4-h)c&v_ORXOPlEr*vAP=b^BQjqwvhxrUx{U7ixF^=` zvgmH^L!{sl|4ond9?sZe3@ENfwGJuto{ps&Gt0nCJdkdJYKs*QGbR0kFNvssdJh+0 z(#&Ks&1g~>snG0_@fmkgC!;YZg)%5xv%dBwtL~=W-YL-NE?AUO9d6S$2M)n_Gb(9A z97hohf{HWvQt(1)_gL@33=-4Or1t#p1TAYU$&xw+(W0soE%y-t|B)>h&?OqtwFq)A zPtlR|5+Y#_=14FU#j*pDb0QzIwLD2KE-Uc9A~9zN1Q(M$GsH1rklwa!GyA2=a193s zv_63kQ!q0YNzy7hQ*N-18kTV;MUx6wQyEe7H0i-stmEs%K?=vtGdVzb+bFP`4&5$f*V+Cev^gWP)4H{Wm2xQo%O@rOb_bbUkN~y8P3FDB%*|N?Zk0Z78!&=d@h)G;QX_PnVAu5fxD-6kby^L(R6BuHr$Qrp z<`Z?)Jr{!=q3dJ6k_ClSWGhg&a5Yzv6c8=1S2e0FkpxY{lM;t@Kg*FlD-kbeH6i~} z=WMQ36%n;wE+&}lOo8?Rjn>j8MQOWr7Ukv^#}!;5GeEf&=`L@SkmWHYWj2w_d9EATCy99AmXiDLJbRBN>N|L{cw z39BlAU{fanEDZNBuaY7yP?LgWIki+)>o9Un_GF7RWdm{JP*26AWDuhS0be#~WmZ@f zvV8s}5oy)ve1}ahP;v?emUgdIL>NzCW^rjHWiQBuiTtiB+#~(O#%wAR zQqWZ;*p$XF#VU{-o`^s>@;9v2Rgwc=WrNU?L~^zg*8 zb9@#!b2Zl>T2_xh*Dk44O2-qRWXD5}bxSX?Biqwi-;n`#3nK|-mwt4B|AVy!>x5}z z@G;}<4kpnH07z=7#y}JFZ}Ie}l9ze8Hdd-PY_$bZVd#3T7kdX)8=BCKlOn4OyA&_Mv-HF>-RWgF@G2AmDCaF7RvCVwH^%^NL%dj z%+i6Ew2ng##3Xovy;C5gR58sm9&6CX8WxxXIU4HWs6#|(hba1BV#s&wN-SL z%?4Zeg{znx&(XWW)rKdtKqKM0_=2~Vw^J@vZ@6|DH8Wk&zf#*_D?# znsAwkt5EF37Ybj~QpqE8{_G1EHWBLX8nxJq!8nY)itEtNes6D0|4d3z(zt&oclCtK zjkTG87ddw4cmUNhelQo0wKIbC*jFjIf=8BvVJ7$v5`(`~S;aKs&=X9z(kvzMM`u@s z{n$s_#)UI^>I%~*0s&6rltFnfmhQAa?Z#kXQW?$;mWw!1*J_rJSj}{qG`IJOfjElw zAsvEws>Y1Fwpi-Qqcl#mz2di-<@cF$*yl`glGK9!_!od(HIchAbW66I;~1R7S)6Y* zs6E%5(fMRRmmnnhj{$j0I93ANnLy{cOAA?%RS@F@VrdlkF$h|z`+uI-h3>)J!_nyxdNucv9R{Tg3CS|J`*q({22&4ypI*eHXMr5#(0 zpBcU*TZcuEl&V>_gDke$)26h#vnThbnVQrd*dI-sdW@P%)On}bnW;+(mC6&gOE9Vr znW3W9w%5qhaCc8284>*0w?BA8fLnK)HtJ3#1+VIN{{bdYh_@xj1}j;-xmD$@sTk{; z1G{M)qqUosd0C@(yq7y#yjvr1I7c=ofe}<(hz$Eacu<(}t0ca>u6nO_DVyDq!R`*kJnP zb|;Wg9-+_82hhQ&=PtSF#G1r!bWW)>4#W&ebqQ@4A zKfQ_fy(dL9u;bQziLHndfK<(nkU zQ4GN;u+8JyQD{EWs6B;q-h+*GgnM4wdq7Cy{PSm;JY`P}|E^e*e$Yewd`I$rq292F z)kp2efo8)8F@DCs5gSw~<{AOGB<#DT@7re{3h<0lJ5sB@- zn+v&OyYufctC{O*00KyxK62C$yyD`9lP5_Sa&br|B0`8}w7l7b5mZKv8%fE+#S2(1 zSFIw&p@hhwl8YKkUMy2_iVPSu{{t1Vp#nvV9(PcY{P}Z{B%yp@8s!LdWKyL{i;2t? zGTAa}RI6GoH;^QUL&6qoTX4Wk9vB|au$1*l-bSI(~ z1s!bi(5BIqi3A#g<0Ez&H%0OA_AMrOjt(Fy6psQ0i4F_QAyA-!1-Wtw)T={Jj(zxv z?cBS2HvvAp^#%;cKQOyHMYLy+Ain$q^c56vfIq+f`~>>n_YYuw`{~C*P9ae6 zUw{hQSDyfR;e*_I0*<$V{|7oHgy9A7HCJGUG4-Sp7*^D<0tU}fs9bit{bomtE(!tT z7+a8NB8u1nf@6+3MrhoG=j8|7jtyZr1Z}m!w#FcHB&j4tGKn~mL{SdZ+E7Sg(ve3X z`DKYgCXEPUP%^Hx5Q$L?wNi&xF11=^tjz|=DQt!W6;pNk6_QnYR@F{Ce#(;-S7Hq+ zR$FX|gJ@cczV+2TbUA8Iqj$-p44rriRtjNAm6n(5oz+sp>)Kam|_qrekUHY?lp@Zg&St@o`loR zXF#?CZku4Y0djks|AF2v$Y6sH!uVsk6&|?mfj?w8t-CwzAZ%oLNO@Cxw19MkBxdChIGUQW$)#2?_JGYmr9sx@18GMLZ>xx@}=`m04!l7nfe@0j5w)-9~V| zM-9mmmL0XvL zg=w2DVY>5Ul(0f7K_MsEi>j%zO15aDWjq;bR(p)=9dK$)@vVgf%$vVprdh19`i_Ti zve54B?BCKh{A{)9Zm2D|xc;oz?yPdl1$=hyj`=)E5fjH@5Z%mpOIJmMZ zKD^_=3p1=?|BP8gCuES}vcBF43ir0KKq? zKWs%-x-CUfKFJAkEQ#4#yv;W+b*EElTVtqOAOY%?vl7U(@F7zv8zzZeu%7p#coBM%N%!5moH&0q=~)*9>zE}JmS@C zddD-~|C$byJVP8~Fi0B8NYdsB(VawY9=nqDM05#Ju%a@hy3+T+7r@cPihn&j-_ruc zzM{F$Ebwc~WV8aQN2x^`oII5vRRh4q%wmv*X~p=A@wEgF;}ft_-~$`zz*j2LG$`T3 z&ekTQ3r=iho={9Jdx=Ps-NvKJpMNu@}xGLu=vEV6K!%nVjD&!EcZ2~e9fRVr=U}`i<&a|dBbp|H$K|ivQvZr+_scWt`6w*+Yx~u%1sc0F?|Id(2 zCW#0xRH+&X;G(yx6%{G@hS_QvqEB7VZ39NVp(0tp1hucVm&CVfem}O zxrQ@{-ki{a8ALg`dhwa?66_K`fmp?&Q^F?d+hbp3MaeqLu_WFnSp!PhqOk0Vp6x8~ zE(S3*Dt2VKe5h*O=+iv>#f&bym zV{Rkor%9#`=BVH6QjtsAHSU6$f(u-#X1pM=IjC2WZ2lB_hsxfz?a{QXtR({DJWO{L z)0k>XPKPT-ll-1oa5tQ$p&{2`aMF-*j7~Izzcs_Z5-V|grEm(wWnm`L|FVoeAf07V zoMIt`c(FD#))r6g(bqM0ZXX3T%UWDU7%Ozvr42;)Y|Lbl0o64`RtiEZiiC~aE0?9j zZED-gRWHxEdvW|;nPI7x;!c^$%C$008HLlibkdX8t!^%u%-b!2H_Y7?B?4zr?$MaJ zGrJz}ld~o(_5PI3!)BWWxB7<;()U+;-E%oV_f<;^deAil91gF=%@QtJ(TzTMUmv|q z#tmTN436-DIUsP1fX4YRsZKb*2m0kzL1P&^GBc2@zfz z$#`RCQ!S3RcA}9HdTXHnoYik@IakY+Y?B6X&6cEOYS1F(X>&!U|CNOLb2AC4Y-~F; zi0!SNZ5c>%PnXQ-hWEO;t>*K(S?sAPDZS;~D=&W?v>N4i4F-Hd!VY}GDhzbr<=J7q z7uw;3A6(%b{NM&P9O8`kmc==)@r)BziI*l)0uhsn8BhMxBscLyP#)@)gE%=Vcl8*p z%1|@boYphuF|Hq)Gr9Bnw7gx}nd0DvQuZ24q$hfeBoS)aVmD05^R|=MJyU5*eJkat zc9o5rNk3sle`gt`D3p=wa91#-YQP##-wb}oj5087UiX{T9;>mF2G6X|Rjk?nD_Zq8 zg?#q(-wQ`gXDXbnK3uqh9sO{Nr+5PRXpaYZgH>UacVQ$Y|9np(a$*&FqbFjs6Imrz zdbih65!hJoGGM&(BxuB0NcVvwh(SbVV=eO&AYmA|w0myjP=&Dvy2X2rg%PMjU+zR~ zzSeWu7d}}-K2FCcLKAhywLV#q7VpC|SBG_xk`B=ZD1Op4&7ghqg?mNkeOtqJBxQYs z^lq_~M_hw`ZI>(Z27k+zgMnmcM+ap7_7LY(Y4_)M7ovZJmVZh_IRA%-h&O-)$cKF> zX*7{(k(Yo9I4>VJO7Ju~`v)E#_F*7(aun!QDi=FqVrnkZMq_n)ZMb47Q-Yl+XMH4W z?{zgQh-X(bUnXclbrcl-bZqw4igbj6!`DVBcuIS+|0n4%bwp@{(uY5y5fAbHdl0XE?!U2*Vi<>zK~24__@UQI-A?bdE= zs99H(h;|1Z7a({KIEVOGhyHhmc9@3%=!bgPcnk7}9QKY&lum?LbC-vM8rF}d*NB;= zI-T}_5hx@eSBaNccQhuERahiDXpt9*k;oH^IhF_=>3&MWbEuaFK4Drd2rxB>fhC!9 zRT6u*l~NaFi?{fM$LKWqa}HXElbm8IzZe?As8g5WK8NBB^Ux<{F;xmgg+~#RFd>b- z;C&PM2!?2Wpu&u1$BkP?hHrvzR;YO}rv-aQ|1Xsna7Vo@ryQCXSYwi+2kTc{ugRu!2?Ms#YJk{K8$#^-HcW=fs{lLTaos);Da2#oAdKiqJH zO*LHbpp3%kDM1;GHVKUQqYcgIkk+%5DWa2~BtT$57}HoP!5|IeXO&UaoYA>Qi2#Jwca+5f3gXwv!h!Sk*|A>v5bQtM!KW8^@siIP=v zjvCRSQz&<)8E!vQi>+x3Q!#zUn2b-eqW!aevcL>fr<=<_7C0H3#F&h4K}o=14(ru( z5`}BbnPKzwPMMGi!BCA)g?>_blxru3f?%6TnV~)xWBj!O(E*PuWPeEnpyL^!W_h0I z87=C$o^Q!5d-$FTLUBP9pLaE%WC@U#m!DnPR)VR4rxTuE^pF~Q6T@ksqL-i%`FoEE znGlt8;O0|Cs;IOCqH)KW74$~O#u1~rnTxQFE0`2?CZbfsjV5}cR{T|DWuZMb(JXo2FT4&AXuT%P&C+LbcaUEp+g= zghWYGqq?H22@SZ3sxykCz{yB9T7Adh4PaKQg^H=c1d`Teh!g5HQQE212^dZ4vbtay zO+~JIWT9}kB+HR%)uN?48?9T~vv2y2*?O&Hnx5Ngv;o+yXgYx33bX<@A9UGqnW#s1 zX(J4YkoP35@EWf}X0?_`|9obMS_rzVF4<8-_q7~3r`|QT5Gso`Yit+@3d$0;NM^82 zv|ESq6y)c!p8Bv5>#Eo0q&$g)Niz)+%ds^HxUB_WFi5lD2uzF`U)qzl(?}Ui7gbh} zRxb-@wwX}?TRpF4rC92vKMR%tN^ng(kAgQO2=cV`S z`()N&RKyi1hT9IY`55CPxIWo48LP4EOO+HgvaoQkyC#tJ8w>F@xmpObs=2a?J3*QY zeu5D_nktKj31|b8|E1qax*ei-vxT*Cs0LxVVgV*lVu_g_XHe%PJsQVwY{{*#3&OHX zyF`R82`Ry<8<^tyRs{KvoOh?`dK_TIii8=wE%F6_y0vL*GAtOsv|4*@i?5?dk~avV z%VQBiVYb%$c9p7WF{6^Gc|AJWq{0OZm9m?!39+WS4&|$i|7)?%S9QE7su_!U3-k)h z2E2}NMOA;k+ zJwmkAe+G1#<3!} zxL_EB8;oL{u^B5W@Y};gIz6IvtW^UCgM>zcG=9zOlzouL1r)4z`~;s0zyYkzoEW0a z>Ycj_t%qF5;rYn73wV;;x+k0hlMK-8nRp+Z0JDp<`?$cUyU&Wuf%%NW=DMzR^T2$G z!|~^PUE52f6jZCcK&@(>o%FZNrOd!6H-?%$A(^PGd@_Z3ny}mnyR^&HE3&ypK-YYf z_d>A=6f~;@l!)sLpOhKqut^hTN#e#yLVe7nI;zcR#lcw%zbuQ-pmN9%4OJ}+pUIRO z)G6;f|Hs?-O3>4??R>@(R5kGY$KRQqecZGY{dXm7!qXbGk?bSus38GOX+$f~?McuE z-7S>k&?x-Ze+_uIi#I--$q`*&={Sd@Ov*VMfmm1;{Zld1$f7vv8ZoM(#P}zht=a5g z4TNga|H?7Lz_C{B!(XBW&%4q(x4bXyF(ksvVp7YS(#n=_7Ez3>uuwqBQ?Wc<**I#d z6ej?}J>11@+{PUN$erBB&D;Ud+|DfkXz&g1K;6}C-PipN(V%SLtX1U9fK%Yr4^%bX z+|C6t&h_SrQya*-lF)2@x}v<$V`|rJd9<%f$x2(v+ltVd4A%_J$qgNXYVD7r49bGd z|96Nf(b)9XUR#kI9x-FcBVGQDcN;JyZemf7t{Q=Jn>ik9sx)`Rs7);K-RoJi&Zj@JIX)-@7 zuPB?JrE*m>z1pCwR>zv3*x=c?v0pGss-$S*CnL|mRHeKO+9TS4+6+uJ-U+(B{|!I5 z3*Zo{Pe?xQAOJrO0K3lXz3%J34(!1$?9RRGUjT$vK?}4n4au$yOpcmk@+x6XqLoXX znw~(T4Z!lt18JnMIe=H~Vd=j)EwYT0=F!Ph`!=V4k+P&?QP z3E!q$-~rCogU)hxdgu`OVr2w!);O+k@P3}|DhRLej)_&Y{nXf5FC5f$c=yJ(DGkaT zswqB-d@*F0mm)+b+ar6c)eDue%)N&ZqmjLgqZHi$pyb5<>EG9LgnU+hWl>mPvR zNGx1|4sZpg8gmC z^OwTuzJKga_U%sJ_+8L)-gp5p@9{|Qty95+cb@<9A$lTD$4zql0I z;an{XxefrtVDmpe>;do$zK-)k9syC_>&yN6sqf?gAoQGJ^vsU*-{8csvtyQD3^Pu# zyZsb~u^ZW(tbPuHeYclqZsz)4?s~pya2(+4W1|kl@#fb|4@vjPLjkJfjDG` zM2T~zT=4?L2^Tn?JYH%j3Dl-C2mkE4maU_+VY6bzK{D`7AyTMDS}G~9rlLSSciQxr zr_Gs6E`#a%b7p8MLzF%~Rkcgkt4YJKRXf(Qmo1k9K1AsD>{$dfU(h5#0|3BS03V(a zO8ZPLp$`BEsqGdZ09dts1*}cbwj0^M5f%wLyhxL#I|LmcE^GFS*s#Pbf1~{D+vd%h zIe!L?xf^E7nN62Q4cAH&)+kcQ5CT)nL5eOp{QLR!<=@9| zKR*Hf`{xVTCqR6lSU~~<4{VP-1tDNCK?EDLz=nt_pnyUPD}1oMAT}g04G--A5fBk+ zdZ@VOPUKCu9FWW6u@OZ?NX6`8#14uNUtDoJ>o#J9DNmeuO@$VQB+|qpVKZ`~7-wSg zAtx8QsTC=0l8Lq&mb(dyRI zW630uIhyK`&s6>tlM`09GU^&;v?3H2uh!Cwuf5U&%u&1oV9Tya|H{iRy)p&sE4lWH zpfFKG9hFo8-a$20Raa%TRaakq^{tIsBSANzW{c=d|BRAQ5=CFHWDYnkgRSv`1;K;v zR~?UKui5jS<<3E8*P{>FYW0I}+HCvV_FDhq6L3K0mUS*gb7!W6KD8ODNwPNz5Ty1X)W;Pu%W3l3`$lggD892!7H@lBR^E6*H~0 z4M{j34)V%8{8i4UIX4~>jFBr+C6ZZd0tP55O+MufSEB4GV>=TmiWN~39tMPc$Q^|J!uUO-A2_C6;kw-ECG`e3|vu+WM%a zTwBet{g(62JrB1abVqM@a?=U7(u31am$%7!`+eNr*7qIJ-{%^4ou(WQUJdArBu7%> z;2YM-MHfe^C3%femJnDddjh$*BxIwp2$N;Xa=1XSY|Z9eVs>**naAJRj89ymswrEJ zxk?-Nr&$KjSI{?R8gZ^VlxeO8`0MGmLT!uG_ToRaJk4yDVw7Msl|TjBB`pW+n%NTY zw6w8>7N%HC1~Irn#-w5y%BaZ)sWHN4lu(3BYm6@pS24$6!6rEh+`PDlD~=&#S6_12 z;z;H=+1V>|rJG#0pe4i~8Vqtp#9ZZ+|M)o~a!ZK-5|HT5HAQt%v5LRKA@ImGyWBA; zc0Ftn?P^Dy^w=wmkYFB$w1W|+5KW81!<|C-HOGrZNfye39^*bHtS-?iNY|T_6b<*9 zq@;;_hjddS_2-n1Oyv`kU<5S6fsS~LLz0z*M=#nqm0yf79ELg+OQZs|vzY2sWHH-Y z$dW*|+{J;8%0OVWwwAMe3WAs_j9fUCsSAd}m%rp-1``!ZmK4*3Qb`I@YB)VKHGvaq zfLIL2K*Q@4j+%($)$oAFq8#xMj7A)y3F5Un&t2|9=FAWhEtkY4Hj#<%bPp5*B*lMO z(VjWPBD>zGMtRvthX2%N$i~S<|Jo@hSvArZ8poI+HuixG4~63w<9M<+RTO7gD=4yyUrsGkQdR8c;ae{_Omf|8M;6Ub}9Qg_St>crRWJy#yX%%H0lbBh- zS^&ZZzyL;|E^48gDUFK2q9T=lNYzwQXQ?$^Mo=#r1ZGx!ISXb~<)>eb=`J`zn;Wjd znvV>UG(#FPHCWT5TUw?-ugJ%UeiMn~JZE0riOz6J%dhO*+&aDE#BvIeu=bIsxJXCN zdu|h6`{W%#0ou)g4m61bp6Xvj9P|FosPu%9=I2@m1cFKN=)hE3nsj99R*rrTg&Ft}mNt!zdB z{OiwCN3~r5XqTxA1aEk6xwS@BbyHW>YI-rq-1U9cr^DD&FKC-hi}1*-Q-CBP)#^uP z&bN8kjO|?M>P>ta=da#T96zZA&ukqSoDX)eVB2|EZynac?@26y#JSkjsjkCS)Tf8% z8d=G{jcs5VD^gDEQqGG}FMQab_T>xV?Q++|+ZO=R#eqUi3)RqzRrQ)8 zD<~OFN#ZzTOHTB6FQq& zZJk%-w&D<^{N**RaY7v3CMUEi?X7PYfom!8V!KzSvf13iwe0e*Y=o#XX7GZl<3ht3 zy##l-*U%QcVM5F`G0b25CX7(!VlHU`#auWODs$_F7~iPcwXw$LHg7N-^Z>X`=bXQ= znh!Uh*p)*G?dP%yPS6}(YoN1P=ymN7(XorPTtA!6y;7R55RPTWC+(m{dprRr$8hK< z+vyR%F^iw>|L4S5_#unS+MlLgXsSDO^EoaSVThPGY56<6GIYYV5YMq{?TG85`#RW9 zc`1xnX;YyY${7R8d~mDwZk^lsp*p2;YPc-!Mt|xev0jDCjh_uT|%t9 zd8gi9+8S6f4me~)#t(&P&;dQ!qeC4`6xZRQ32Y?5QftFsN$TTKK6%Pl9_*LLJjx-z zdCObL zUndo{|M`PU{&CIBgB(mwwS$^q_g?+XFLEav@-O76h~K>8boqc{dyunJmWg9KE- z1!O=rAcVmqk)L>z+#5O?lf8?Bfv(fQ4K$^o+CWhWDp(Q05hTG9G{F;8l?c#>D0(qi zv%ngI!Rw$g3wgb(^CHBXJyjDmZU8<&!N1x#3dAhSsuiir!IGf8#oIO1GQQ+fKBr>{ zAB(jbiay9dHn(RPf<35b>zOlG6_v6CzGZpnixxqL&uK7Z#rgupi$nwudAThS=sV?86hxF_pBtXV3sAVUvy zi?=8*O8;~qzljWD@Ef+_#L~zcWB|ob6va^_#k(oRQ$)p79L2XN4Haa;O+&rB^10KK z#RsWDEg2YGVZB+bm=E;D4wOV+UKEwMci^Coy6q(_(6WPK( z?b|*rbhj^TKguwwFTgJJ`#!L1KP@xEP~nUCQ^PgdKkm=0H*r+bO8 zyZ<_(%Bn?%y2UV}ISlkc4+%zMJgSBZ#;^#(@gl~j=^uQP#JjOBp$ZFnWC;0L!i5`% zW>h>~xvj610>}u2m$-z73>2baI%*TYxghZ8LZ0f|9I**z|+hq)pkZ%`m`CDqw=$T!JI$O(I}P;1tf`B+lS8&f_f3 z;e;lhRIH$cw4rP{&5TIZ6F#G~&bp$(n8QWydOJC!O41}lTN*}S0lT`MMQ{5)%+&EnK*xyECCE13k{8yaOF7(Es~9Oew+8 zsKaX0z!t60^$W9q%us>sw+qcN5ka8tC@>9Ew2SSr4Z!Gu4Fo3Yd!#T8TUNP@~OIn z!!t@~i8c}xp!10KQq-PGROhO@P2IA+l+cKcKNMUQQBAwXRKpFGJB%IGQuSELbT~A! zOrXHUToJOwkw{jJop8;+OL04;!bq1rF9hnG23dm<=v7~>&0if>p8xGxVl~#>EK(+2 z)}l?)Pl(o6l(fttqGaKfY5~f;`Z17NKEvxXe&SLzy-;tIRi{b6SrrRSJezpq6l?Rw z^<+P>!5X*N+Iq!P|01-a6U(@DCg8hMG69rA4U|Z5Dfp>|<&p*CS|t5ZRGnHHU}zMR zBLGN%wur^Dik;ZVeU(nU3{;`q%e`C;)!2^x*maytztgetDOrAn$c9lImBlzpQ4Lzk z8>iu}ys<=KcsIDH(Y$f0X#s)~(Al3Q)}8HL-z`?2722UCTH-ZYWnBVQ_yB2bP6=Du z>5Q%F&{E#$D-&@&j)00@dq}8*kfL-|jO<8wP24k^S1EH*I{#EpHH#G06yFHYznM)> zTV+o?+TQ6aACt||`kjqzL=3ki*uWKBh1H*)s-MBFljQT6T=<4$2nI*Eg$IV<2!3E- zJ&FbPHoQZW6IxU}!Agqt;18bM&E?$AO}o$S9TDxZ?)f`WHnxA6=Ejs0wPAZK0*0#ud3)+v5D z*NB`Ocr_SP3jLg)Fn(HrfwG!qQB zz{hxX)Hz)?3YF0rHbx?P z%}fSbAI41w4ql%nW@A3)AU@(EcIIbhW+cuN=6x}UbPtWlOu*ZfD1DBl0EwJTDK*mH ziHq6@sfxoLrmcu#GUiw9BG2yXHPviEz$Hk4;a#Yg@vWRCJzIY_3c z@ZUi8gkSiio#F8c`-UHt7Lo zn$8Hf5q@P?W@(pxX@az9Z}=n$9K_4~V&BQk>HmeRntNeh2I~EjS8e?bGJs9mEY?*} z>SRvpr6y*kUS`{r>Zy+Asy^arenp6o1B5saAQ*%y?!Cg%jqylhV>4;87ghsv%?&xS($&JpYMs6T2=B7?=O+IF1rfT5r;b6^8 zte#e_=IZh=0z*aQ?ha=g&cd@U-P<8(?*9%vCMKM|OPD8w>$rXe?q%mV@Cm!VYr*ES zt|I_P)KtT|;%uRBU$ksWNHV?ETbi0AbvUlcR&e@}OBE#jY^h3xX%c;;3G#Fk(P&V(x{NeEsCr&eZX zzzjNCWC*7jK1t*gmhVLesr8|=od0TY%I4rmfADWkWXmRPNX~3g!~|hb4ao>mVd&`C zuDc6{pO4Pqno4!!qUq@h?Gie17MJx|U-4JIw`6c1`GRqV;3*lWafD-GoAYhq;Bo52 zr{@h16&Ui^WNsr5gJDK;BX@EppY~>F?qEO#DTibp@s$6C{Qo+uW~#tAqR3*uR#HosM+>J%Ao6Gj^k+wUq;GO1uXbw} zQf$|D*g%$VcQuX3PH+EqFBkVN-)?C7^0LykAt0JYit8(&cY`2M)!^;w+!ucDU+12L zU_SEZriOw~_=8XMoi}fVr zfM_Pr7`15zs~ucMP~kvl%pL|iHmsMfim@)fVuf*|#*7?4V*K`w7%fvKPnJX(Y9-5- zE?>foDO1x-l{9On%qcJ@En@GI6>R7*8&PAnJngbZG~76D*Ca|zH0R6~E>@g0QDQ_$ zksdm{f(+i7_J8A`5pzQDhM;xD@3{ zClgZGiiD|Pm|=ygaEOa68eSMlh#QJnB83hf^(@jPmiDZ&QGKo`>Jo)sEFAjPq4K`Oo^U#&+w9_S)5b1IW7}k*q zoOx&Z&{bJ$+R&z28Qdk9Ty)6>Uu=O5CZ`KrTr!b%1A2B@WuSE?sG*-B%Aca4A&Q!# zrK#4MYyaixCffqEu>hT>zHwRsroH`^6$ZmiW#SrS_@ym5Yi0GCJYmYlxROLcA+IyqEEyDPtkMqU~u#itaxulW@8=P>H z3adnLPSH%8qpY!Fi6uJl#N*|aI#OpP zNsd#tpjaHpRf8<y(YwR&Ka;8y}X z3+@W|h_^K&6ULfFfZpc1*NsDNI`R(0s8u<^;Vo|;I?m#bcL{l2s10)PU>*`zLjMz< zFola75eP8{qlrllhWV1A*v)b^q^N1ZtO%(r%QAL)sKQWw5e$z_N$=D<`LY8Se z3N)lfSm#A=JcTJVoKgjIaks!}5QFCukr_rv75e1|Tq;CiD`{y<6;d%oExcj9M#;!u z&Ty1A9Hzi>2qYe+1W1$EVGxnY5@$m5V6?d0GfuZ7ggnqGOVs9!E|Q!l*8j4A9{SEF zs0d61iRdDs6eEKgWTl5>kan~iNCV$ED4U`2F>JivKCxH5(b#hp*XSN=<`}%$=rL5O z`jm1|HM~K>#gFU*mWF9X-juCuBboa7v+2z zL&y;`s}@C&f_A~fer2RcN4lIM%PNY^v7w`}a9A`yClQcTvm3k^h)1DeQzhb6lv%1B z2WRQZF{Of$ShE*88S%j{foXyixgWq}M87=j zec-zS}Bqn8nY_QZ{>; z-K;^=z8GcM>N3C=a@>9Oi{htArGX2+ zXZ0AvpDvV#?)ERSdzrx3Ta6 za4^LpDL~=sm%0KK36gcJXKm|O*P5ugrgg4?q3d2JgBQA17m$&<=?@<6!$uA#l1ZrK zVuu=-RK=H7r5tUK5?DyF1nuUig2+qm#XaIKpQX=-;bd!c&t+ywS``V0&zbd_$*9}3 zmEsVi{5Z~Y_M15syV!YlC(c)VHoUeCVty|xG=tXXqyGiO1kEHEHHzs1qAhFjel|SO z8b8fICyh`{(Z(-(+M}LID?+Rg=8k*f-+439Bi%G|O#9jee=!tEZ}A%1>=JUZdA?-4 z=^T~GeV4(P{b{F;$$&}EZ^6ZER4E6O+FAPuR=0~?Yja53MR%H(Hzlg*SonM82{+lj zys}Lp56d=bw;{cdFg9#Gyh4@glK#gn#5%s0WKf}Dj?+{9)w{Q&=l0DWM1dh5$b_n z`q+`mF^%boj9I8&MFpJkDU-etABd!&7E#wJwP5VUV3^ci&vgrP^elqF&)ta`-vuF9>7L=}AdIcWbA?O*o=OSe zKm;nF16Cm7F#!=M-s8=T;Z0zJF`yaJ;r|lM;l-t)gf$8V-h-|DMacc3=Y5|03{B{n z9U=u@*oj^0P1UUMiJ$e^&`BZ;#@Xzp&h1sAu_4uv5#82x;qUceDEc4|24T|);Sd_% zlTBUn@!5(z zlOdN8@(4ttBK`Htui2VL?pn@yyBD^w%_WB*pJ3wTi1C-FZJRS1B#5lZ zvOFal%HdKXr5h$CQT^PS)i`UL8f=&|Zd@UQ*|E7GsbF<|#(vWXf4i zI_9}S&0;p^{+Xxke5Xfv5?x^@VS*=SmSn+M7-ycQIGScuhURFBW@(ZpQTnHSiX#tr zCdQ>^xi}?>Cjiq-6q=&$saayQvPR)73USR z3qogJVx4$t=XSEYb(wM&Yi zse8stk&(;L6=!d9=>LWqBPrS`gC=K)BHdZi=#1KBi?-;C3hJN^WOg28c=hRE+UTO* zXrrdqqe3c=`e=9_jw`<4m`P~6z~}x2NR&QlllCW-LTRWv<&ypZSgso{{K&cm`;mYO0bdfQ~9t{%5JS zt7=-Q&SdGA!YY@FDX6p(CM}NE^cmo=U>4?Ju(nGHA?!!WX~QaPCW2_O-s!RG;I=wv zwf?EyS*v7X?El8D=(Q$lx`r&ZdMmi{YPV`=%7&}BYO5mABzr>az#Zp-MHqkzXuQ5F z&gQI?R^tNN>%GFwf$Hlb#zwy)Cs~GNwhSklCL@V7Ez0(4)GlnfC|TlwXg+e( zY`x}f-tLpT@=U(=>zEoXz$I6}hDj{IEO@TvV@~a^PA=0*ZRGyVn7$GZnwznTr=D`{ z#CogOo~_85Zt0?K>Mm=+Hs{BJEO4T&+UDr&u59SO?Q`Wv@=!#3p@Pgh+ud5ved;IB z_U+CB?*H%#?%)zG+@QjwwHW7S?HYUnkkA)1EH2>W<0VpV3T>~dIjnmUtZ{}+^pY>= z-Y(aPZr5Jn>3%KAlJ5J$uj&qJ+8V0-nlA0)@7hZ0{+=(gCg$|+ZkC9|PFcib1@EDm zD)Hjs-umtFCUEj9FVHftq||N!DP3;#!7IRwHZ6}c9L2H4!H=@9!Lh03Rxae~V6Yw; z2E*mmLeb~?Zw&LV#dhnA%I@gKZV8WY{MK&|yRO-`@BNw_4eKuv&#>FdFz)8=ynRDW z9YZG6(*dLE@KWGWMz8|*Y*F&9&6?p9OYme2gKM^i;y^DH0WkCA)$B0s!|HJOy6Fj{ zu>X-A2O$C4v3BnGmaqBRt`XO84&SgJ^DuB`tPc}15X0{u7cn0nG5TsP61OcRIAfhf zF#=B>RFWoZO7Q|0FY+#)7vtyT&4ML3FXE2R(wxB~c+yg+gFC=ZIoy;&Q0>#U@o~=S z$;$G!5x7@*b0GWiH2*R+SF<&LX#Y>1 z$|QU8Hshx^W0q7}F-U7NIg7IQXl_~}p@0oV60Nd0=+%_Ku|CH!;_!`k8sttN&!vIg z4`%Hy1GE|IF*;*(G%s{gQ}j|Ba#O?Z4h!=>A9Y3_v_?C!-@qsEel&jWCpwDsHhSG83qF?C>Or;4*l^X-*>by!=rSBrJB^g>FrhAY;WmLS$G_rg1Uc0H3Y zvJtdRTZbI0!H&9W%GmV>W@RZ4twQa!67T5@k9H_I?LvRNJ@yp}2|Ww|*0M zL(jK=|F=;$c2f;_fp4;bM|Yy=ErSDjgComkLpVP0BRrXQiQ98hH|+|C)0u$GJc9Up z(>8}&d3(oqZZ9(A);E6(c7zUd*}iy;8@88QxKyLLVk7r>ZZ1|!N&kRC@Q%m1btACP z2Kk-SxgR|E7_(0a_i{d~uz7E^*E~_C&}N5+_=itkZIaTEN-#)O88?R*lES{J2&gA)Zi}7oEXpPskOa|wmJG!?-lA-wVqda&QMu#@<-r}=zqy0f$IvNJo`HhaF4 zd9+Kry@z_6TRRtHd%|b?!cTUBfBVDJdAQ@lqno>y^JqV-`~Uplj(YpLyT3cU$2*lb zdX^J>U}HMK>wC&G>%Xh~`m%iNcDk$=yca_8wJUt7Yx~VNa5X}F&-=VTg>jKT`K#P| zDr|H^e)`apNp08-fC28xYyRV_e(JM6 z>aYImZ$1aZzUYlhjRTlJoDuBYfKj|Nro_z3_K?t531=N_aq$p0Mw}Kx@o_r{crb3O^3Sz@~Q(;J-y>tb|B@31+SMR=kIznX$ z)1_6qIE6ZuYE>puv0Bx-m21|hU%`f{GnQ;wKW5J!QoELI+q7ump6%61-#)uEYUI^2 zhVNdzO14}HY^HCVNR$wF>SH+29l~lH`+Y2#GXLNvMuZf}OoB5E88$SB7Cjm>=^;LT zoZgYTwB^>XStq$H+evNPPjFAkEm${i-(JWB7k*|qnBr}aCtv($IU?tW6JPQ)JyrEm z*NtY6HOp4_U*AoBthqDWw({Z7w{*9jy~Xt0%=4UiEGAH)_q(s3x1U~r|L}IfPC!ni zBhbK45Tvfa0~Zu!z>4JIPr?YB(T)`h6U+nb6&dU> zv0=_@N5++~`6as+S!Co99dpFdMOa|e>AWG!o6jbKzDcU7r#o7bl)}s&OzX{z=>M2w%&^MJjN;5W|6;*2JMp{$wb|074Yof2 z4D_}^)!faBLb*s((J~=ZuF>Y8ixflez}tu?BH_bSNKH4D52BIEBXPudNHyja41G)$ zM_a;~&_DmGL9xLeXWj8f4#&~YRSGZcRaRPowe{BQu$fU*8BP5)*ky$^5E_<_MK3@$ zc}&FGHucCB+ifA;Lq}bn<-(~Km$e-FNHy4xXyuH142-QCl=VyMU8uOkmqLgb(c!R3%Nd#=_^^h z_%d}#+LG}*)njE-Oc2IVEuwjwY5!F$x7ucF#jaQhU)_%!?sDGQXi0mX_2&q8T}LRG zd5)UgRjWCcRAje{S?aBiez9r@X#_{38_DIiTW#l%Bki@Bvt=1hYqTZOg5g-hZa%tE zvdO)nTz6i00gqSk!JWz%U&F=9#1ei3JFKud8+G`v$Ss!q;e;=5nBs~fCKO_gH|E$n zm$~~GD3d>(?aMwFkeOU*zM9vfb;iACR*o4l=%A0$+DAcz zPnykAr172U2`hy9_TaARUmDEA`!@Hm&Wz@PfKnlJEabrvt`3B) zBi-w;6T14PkUOb6NEq*ivEKkEWA;m-`?fWm7P_!Vr)nY*n|F*(r7Grs8hxje)4^yTxEQc zr=#S!kqG+hW*rL{PCAa0oZvL4IS=<1JsM?!C_EVEDo4RWE{vXOLgXX2vq(nP=R#}S zr03%KPu|4uNSHj112bsG_8rC-)VK*L1#%sZA=8%?Ma(Q-0+CIb(s{R3+Vp@J4{Hnv zccIMQ)_NtQknYlY7WF7~ptzD_!BADSTBtH{XAK;x2O~6<=1*_>$x2=`ZqvD+-oBaC zIEs_0=X~Q+2MEPouut=x;NuR;$wZ0rCmZQ!q4pFT&UJ1#h9DjZ zZ6{W=0@R(5J4V9X0qPIi(!ZYq_wQC(^|rRr4O z;oxL=jn=RgFqhnR9vt5JMX8@S&Oo<u!hvVM8JW{^sJ*mvnBTUME7HR$! z-)O_uQ)Zm?zU>p}cvlO*sm+$RcC>JAF>Kok`?jjU4))fcxHf=juL*lB*&$0Evs&#D$2~s3zSTVKuttc}snOA7mG`}p2>`Lcx zjY!7Vu*-^tH|VR_>h;&J4ZfZBVpKB{W%G(uM(Ox?YQ7;K=!AZ>t3u^^)J)Wt&gkUu zZZn+cI_9>+eg5rGJ`9<@4wtJmHu0fRkmweqSjDfg8(F1m(B8Po#Acm$lWBWATV^xg4$TP`TU?6_=Qqds3lzTaoo~BgJ!^Q|e(p0)P!VX2A@sx`Ft>6) zlm7xnFPh!$rZjS*8)^G$x6$=BBqH9*X-QwO()WgN-6(<4?v{E%!;ZGjo(LaTYgEbv z1}zW2<|FagQ6U5KA|r742@N>nS+I1M=#wD7I7&3Q|aP~Xv{tFAe!_ggUe&bqY(+VxOp%l4$+XkQ^8e2Vs6$Z0R?f057Z zC;z>Nxvu-go&U7a$!@c7pI*a5o_r(H9m`-V{D_kE(q*$yrx~Y0=*XFxdw28X3x<|;bZho6}FD+ zpD2CZfV4X}}* z57%G^2Onv^s!w`Gr3Amn;fyI}UhDkMFa3;W{eo)d9>x9M5A)!U{^D;6fBz2X=B_CA zkHnD9{{%4gm`>1KjFG6zHr8zcWzP)H@VaUv)4U@B{b^zvFrj!4uLh1MB=7@;kD^3t zWsEP`vTysEF9oe{LR6*>FD2|Wh2=_15qoeDzt0D$Xc3`BuPCt)6Onhiuj3$*C1?i` z(T>wVaVA_S%?3*hFD}@+25*q6{-}`Ku=>9CY#w4E54FhzM3wiPXYVTA) z=7V0(-n?)N(J=OYF(y)HT68Q8bs!Go4BiAT?0N$D@Gt~Az8800P(Hu7s9)+nM?U5cMQ4{a+9orH882@pnE^)6qu_s`H z^6sw{rBL&p5Fwppe~2IxfG!wn(H8rt^qQ*zdhy&GFy1Of`u2b~j#2fDk?L4);mj&+ z=!)J_vN(Eg9Eaf!ukp*KZwJ5Qg}PC&x~LNWu^;)d5%&=v`*EW}u^VkL28;6P$_ngA z$VixSDV_2Vp)xAR(G#^{Af@suOU)x((h#XqKF)<9!4eh|a@%GA2NWl3YBA7CY$Ro) zU<$C}zKaZ1VClLrB%Kiqn{pwTG52;%4KMKDAP_6Jj3#X`DL3#irxK!628?Pjm=v+( z6tgQ4lPF;j94!;%lqWNDMl)eXR`6*v0k6qWF*HpO5KWWqM*nV7T<|N$GAzY%AzSn3 z;$|Y%GEj{17hf>yK$0$#t~Y(N0Rv9DR0k&c@=$h+k~$>g=x{5EsuKh9IR(-VU9f{# z<|seYdN?yXRb(_zP&-Ex<4Q9;6Hh$JGv&rJCY3Nf8^s_UGB#OJO3E@mVe>sPtUZGv z9B45uMGy8at?97i23N#5oUs@QbS}#f#rjiifRO}GPQp5d?^;hLZt_S3Q!+EEI<50M zVQu`n^E4R?@rKYtI}<`l^VIH3L_O4_&U5VAj0rP|joL3`*mFKzlPg}}MoS<^aa1AQ zb8q$sBJD#zZ*l6(gbj62IwmmocoP|q^N$$HFq;xeDgX0>NM=e!s0ODKGlft^^AH=& zvqQfWOuv&vOLSIDO8Yp}OVRThrA_i=bQP(jM`_fXc2q}W;2`HTPx16eZ-Ev)Ot>US z8kN&^oTE6Kw6&J$E%i=9qtr^_^0cO};7rs-+muq5uuIi*O*OT^LKIEI6t%QRL&Q^x~2F-3&JQ6 z7nxKgR}x#Z6<#IwTURw*JN06P(6FR6b~bi9&y`$JluN@^QztfC#SvIi(eJzK4Iv?1Xff-(^8w!FD2_fOSLTRzaJ%(e!KHCES9;y?avxW6Blmi( zH*=}Ca(xnevlo2DcYMir9)Hpuq4sH|wo%y^Lz@*qSuKm!K2fGvQ4e>Z>?Sa^Xq7wfWkAsFd&kKZVGXqne}-_=PqSbb#SZ77i^ zs~3bVcZ91CgsCwUp|#=)wrLa7d$%`ywYPj_SRc=qgeMU)r#F3d_`G@|NnrI|^jClB z^>yzzf7|nj`!|Ve6<7av7b)O?pZ{0_qIim_7=fKwBeD2_fAjRVczF93Z~gXfoAH9l znCLK&a5>n6EwMS*_gO&~XHW5C#Yjw%l`vQMC_A@>E%a&Av3nmAhXL7-ZwO@wS&%mu z?c!Hu&o+qdw}@L8i6J?XmH2hpiH|Vyk})}xtN4lyIExcli@6w#1xb`AxG#lP#{@T( zG5C2m*b-_17>c2UR3w&50vKFDF}N0Evr}Z%m5yV{JIfQ8KXf{-LCn_9GnIB=H8eL> z_;V5WgA$oZ7MYQsqJHI+nk9LXvssA|uZ%j(&lTmJ!F`B`oOt82$~n3I;5C$)%d2>g@>WC0pn-8GZA2`si5 zZGDS~+vSn9`J#nbe!JO{H~E{x`J>0VoIlxti8q}=*`zBljQ_HNEf}6v*?CXlmF@YS zLn)tY0hbp#rwRIN`#GR>d8Zp1urw=&*p;Ul8f@b^Td^Q@Et*#M)rg@wqNQ4!{brBs z;sGWAt0BOnI~t@rS)@-|oynP`*IA{_x}974onLxlZ#kbqVG-CzruEqpDi%`_&!|<^ zu6=sSLRP1PTJgB(i##wDks7ZvcyRxTshj$mqZ*qX`?0AyORO4zwR)?&y0XFAoX5J0 zIh&NpI;AhIKUbQi)BoC~n^)ReAs5_Q5n6esdEutvx?TVJsO!3*ZTp|w4i#&A*_sUq zR~3y{ncDa>Z=e9Or}?-KQY=t*iKlw8jk~!eS+Xe-v!U3Ey*jhQ`m;q@yN@@t`F6Cq zuC(2Gt*5Q6=ejYJL$zI-uF-q94ZElhTeff8ug}!5X#1s%VUQ7aw&Odg30Gk~hq!&f zxe45pA3VaX zd!#j+yO*5FE&trRpYg0snY1;W!%thTLA3qobe16f3UyeM^5we6yXr0g)HO(b~$@`lU(1 z6-K;$ww$I(Jj~hD#xGUOTO89T9elRv%w^ofSsA$7+)&|M&gZ<&N5GXU3pJKz}0Q5xggcWecXj(-KJpNc^uDNJ!o4=zLM`wyEvRjHjwuKmPEz2Zq- zt-M{)7ytn{-T*xQ<3XMPM1JHw{twW7)s;Jm+1=e)eyZVJ-a|U(>wVrUJj!hz%B5Vq zs2td}eb@p1*ljx5!Mxxh{UDIc`bNS)fd_}KJL`ZAih~8Cue$lxd4Kn`R8ero? zp5(p$>z^Rq9r>w)xW{3=<@`2#wso8Jn4AmWS<+@q;_?*LCo3U+{|_sG_$W z8Q-X>{_$J?;N;ZHJ&u7l4Z-1DOb9b8FQvhnl(RJ(zmZ? z1%p5_{2WSDsL`WHH6-0YkZIGW1EG?bDs=_bt1Pn0sG*gs*R3sVh#gDz$B-R8bpNO| z;^Qn@wn@yoRYEszT_;WQPQknPN#DO#0&fvKxGR+^s5_rtotm@i$$MD0mM!Ryq1%0G=ceuYHE-W~Vgvs@o49U7 z$Ac>`jvTk`M9nENnl2r>^XQMNTNl23cyHs}lka|>`WH5C*}SFSbshE@_mSZ*o=?9% z<16?9`?U)fZMS!5fE5-80x5M+Pzymc(Sj1WaM4R8i6r4i8Qo-Ig%@V1;f5P#$CFP8 z7Dd5`1ubRbi6~a3B34}q*rJPHmGxFzMbJ3c4gKX<<6S)RwV#ha>PMJj_W#{yWPGsY z5}Gie+!4ojN-il$Y2bZlWq96o#~hYgURfoVU1llYmgJR5=6K|Ba*1~BnTZ{mUdAb= zc)yu?(S za)?t+poZ${sGxe-(>)}Ln5wD21lTupgr4nWO>4c;vtgxu0nhGMTvj0XbF|8D*@FJu8 zrp4>8I0DPDj>8Ijuf4}6YqDdlFe@!4PP(TuFrqz~E}-$&d}fu2sS75y-_D$Ex!VEl zGta{rlU|vY_v+cd?_JB{jzUI_hddk_w#l01*wcwb);KUAyfUl5O_xYxll7 z+l^YB`tWa04fot0-;MY3rH6j9pQpS`3Lr8k8Eq?z1ip0P+y7UNxc7bb%=qHs1D)OZ zAD8bHtVi?9Rp@}JFpcHccaoB?^flz2G*)y z!i!+>j%U2MeN1l3(;vP{_B@lN3>ncNoXeuMvg*C(gza13nbfC3LA9`3lS`kt))d1? z@TrC`Twf`wLBDmF})VF!meuMFC)F8qVr?*Q03tO+n87PtZd4`@KzA+TYR_zFl!v270wV+F@}!5f+Jj2dhn5d&oz5Q@-x*CR~|$7dWhiJ}xbVH4$yL#P~v&wMuw zqpQGc*RoM zPJyiRq61ec##@n*f@O?l^3q7lN_w!3Zmb31o<%}b$Ps08bY3viL`WI>k&ffCocbm~ zLsd8vkI;-JGjS+LJ+d!{wm2F(dKgLfxh$DWvn3`k=^p@fjDr5-WG6ofJJ`Wtid7V4 zJW07QRI2ix_*7jwiHABEOz@S+3+OD#IZh6)Q4+2gTnXWW%Qp_QX~$$-_ae$ja8@&# ztaxTdi+RzGTJ(F}(uE`G_lhoAb6jh5TE^a@$xR+;cb|oF6N~byC?#6>4ZyUQ1o7POCXlrl6$hO>MeYdWOoLJ*}%xff`g-!qTX+ zL~LT?%DkkC6L3uB14ExGS*k*`tcI%(X0JNd8G3@5vyfjq#1Yn$CK9XI+{Wg}agJ=v zqC2LQB>o!9SR|VBEw1V(1Yf#N1>tq3dhIJtKP6mI0amcc4J;T5YuM%kFKb!UdTYGpHLq5GhDedZQJFeR1P1N zx{pn0E@QSdWy~X6k7PkCzU&9Bk-9 z$&fQ%ED6ObX7h_({Nv4nw8ch#Q;l&f4?d699pk7*B%3MYIs@6V>|!lD9%+m90_x17 zn_H6$8|L1|l*$cu=Yz4l=_}V%%NFjkmwUJ2wV2w&0)_O4(TuXlc2}~}o0enh1!FnW z+0b^bHJ+9E3HRE#&wl1JHcHFYkN@KL*6=xNT3g%bp(Rj0j|3&Fa!;~R+#Wa}kK^?z39#Bu_<+;cAb$%&1QFKv!Me#(*M9;JvP(qIY5t%6iWD)~us(9p^jGaS#B{ zYMx)A+F%14&=jdDLW0zOILtHCbyu9a*d4KE+%W6P!Yq#BE$=}cu4|^v?8N~mtNFNb@S+?1*P}HC!yB$u ziQ4*onj>;p*XIRQ6ZGOL_gBdioNaA)93~&{b_~G1(~+Bf>uA^dmrsszF=u_{r)IgC zr&~*7HPqhnesy-W%iW#7HUHl|NA}S}H-Dl5gNQgMzd528&E)&8v z_gkr$54`XRf9v!W8@7b@gs&CPe54~BJx=VyLklYZ-Ga?GUy=T>U6Cx7!7 zT|q)`GFNkj;ez@1YXAFJgVy4IKKFnFXFkifcnQdOindM6FlbCCc_t-f5!Va^Sb>9= zd3TY48n}U>r)j8ndLIaaW2aLOGXbmDg(O&lCm4n(sDePiZZen|b|41FvU@LRSr&+c zI{0+`7l*~?Xv*gNK)InLx(P3VNe0)-~{HfyJGT?YnN zScQ~mN+RfmUkHX`2!GDCf^)ZOF*jnlXBccKihgHb%=q;J%yJWtPnqih#viAKW}Jtj5tC6!*v|EARZWY9(ReX$9`lIejc+z{+!#*KhLON=iygU|Q%^D~kN zr(;XFZ+LbLlvf>2Ia{5}$% zW@`CKqMWRs5|m#o>EIYF0q8Jl^ji3u5hw#ksxgOo|B1-yxk+4y^lnQOs0mBIme zojFpJ35%EMm0ii4f3}W2RxWi|QJ$$D&d?0dS&vqyWTgpv{Ai3)Mt;iZnyAN2AZAJ_ea;H zY`r*aZL#T zrItEyDru&gx~803qib4=(ZO$7g_Tjal4e;%5h|e!NmOFgMgCZ20?DKS2^}J*erYGD zr&*|aDW!ZlqC5hhBc`a0YK@P&o0GbtVVbO$3a94isgv2MX$q~*+MEdLj)!J_a%!qa zHl+V%)SVTIA{Y9W8Y-)u7N~>zq3F7+hDwyZ3KtV;rPMR5b{C&C7m?h0tje0N%c^Ht z`K)M4qi1@f(>k!~Sgo9C8@$+{Yc(79Iv3vRofZm-iSVc7DqOT$cDQ=3v#G0{xUT7j zq9GKrTPm+&P@-BIsh9PXTuP}`2cs_gvc!2gfapw#MzD`mu%H=)0{gG==&wBcvvjI6 zwYG~AD37|)vJl!M5BsgCsc{~_S4(gNUGoxKQ?(R}As4%aO?#eC3Z=CvjlLQMPyh(^ zSwf6@7r+Xb4Qr448nZ0hf-ss#G|M@ix($CQeXn4BI}5b7&xqW4>U7N1zdAS~&YVUfb!$JyGIcyZws@->5W$z zO*Olzk_mmyunTxwU(kej(aM#~wow^XoqX7byxY5%8Jd@8GxP}!nd&a{c$TD7RE!I; z-T_)5CkX}+bKk=8M@t81p~ z@e6$`OWKN4n6y%h|uI1-#CtER*oJI@>wZ@oe|Fyp5ZyNs0jC8NHG_ z0$E#^R&%`@o4wjgU?vNZB#XAeYM&jvwilSb`l`6Ah`!-rN4{33zPP{g3wi(SdklN) ztk!_L(TP5JOTax#pm`Fc!-s?mcnk+@ALOLK3%s>X8^KC!Qi=SbP!a{@`#5-)ncEg9%QC@{7_G`1~pmb`y#$M&OIm^RR zhewPwoj;0w(Pu(KYQ${mxK8UJp0>oL*Te`$$WJ`TVaBV%(w^_COChWmlMK4siIHDS z!X*q*EUFy2TVFePc&huxa9m_`>{&FMImO$<3oD1g3(G1;$i|4s zzO2ZLtg*bs$e26DQ+ml+Aii}`ioe;HsXC;bysRk;#wL8Qbj%8FY|8&RYQvk!9Cdug z@wf~n#bWnIc@8>N!icAb8hd-%z@sOz-ZGjYfRKJqchxwXJWGjsZ_j+z)J(<% zebZP{&E`AQ*TGE0>y@@kt+p6L9=T&fOw(;Zv>3hDXC@XL-O>L~X1!PA0npoVBLD=E zqHUPQ01upu6KvQS+ts%DA!Cv7FF@1fV}H zghCy_lcXGLdj=6$)TLT^N1eQAP?~2{%=K*8Q^0LLAZ3|$+!T?MTy5E29mQY$T${~; z@CMV)GSg@++B#jru|Z?((k{>-9d1q90T^ST#Y57XnS`6rH$B>;d)rD4*t9j+Gs(-A zCW-L$15<|F$lV7XxZKRW%K&c7Fv7hr4bz|+}`X=dd-T+II zv8{7Dt;6zTw^VMz9&>Sjfr88>%#cVCv*AQHE&y*%g%-OAr$WJ)NY6apwV0Ug>+uG+;H3wF}V4eH7yUfShjNVFTRt-R%&vvdh+m?Atny@ujQQwr#8 z;yaAT zwM>DwX3(OleY0JFH-ycnOzeEy&9UsIe0@TH>*@dFY#2vG>Mo8p;d*JS9`5aQ0o;}d z9{}rlKI`iKwSGQx>n7+iBkBwuwwLtlgxAx;UbOm-$1HiiqGN45L}ON?if4IvL4*){@eqehGo5|7lh;=ENj=z zPhF1K!Tny-T z^SSXp?=0W)hYo-cJ@)l0xSlWb3X6zhES#$R;c|cJRvGF^yZUh3@HM{}ZNWcZF#ADJ z`?i1kxXM5J?0YQ8Mre7%r5IVdLg4oVbS#j~VhZrHKOGePQ&n5*3Id$Tc@Sy*~PM|+A2qikys0O4*lX6&kp{WNDBtkr$3bm?K zr!25q-Fmfa3a~O-h^0|>ELkXkY6(Nu)~&EpcB1qsLbtBnERfi~&9ye}-5oxJ02chi zaACv*6(?rA_~K*87PnZTd{FWgE|@E7M%dZRXEmYMw3$=dw4g_-Y#o!9Z5%q-IrH=8WX?!;o}6@a>)GKTKdrqxB8rK6 z`R-lb{CV=b&#m9ZsE?lQ@#9>dUY|Pl>KOH3HfVYOe*hB;ZJ6MS9g}YE8A>Sm4?vsQ_L~Lgs2qo@Vqn6IH%+< z$|f0gQaS>+EYlO^(6IDUOf%JVQ%*bm;8RdT6?N26BQW(;R8v(|RRLNJz*SgdmGxCw zSB)Tm4m|{MS6)N(RVfyM4VJ56Uz|}Z8f!!=NFmc0>_@vc*^95Z%D`($8*Wtc(Sopr zQc?fHolIOrPb*~>ExG>$bX|7+jL1yl&;$;(L+jlvQ913^S5HH|+tWQ~{uG!@LDvfh zL3+8X(_efYb-3GYorBVlUkXgqAyuG*I9qR()z;&)kPSJ;6^reafdx=*AZ3*iIN)Ui z3Ls!+nrj~5W}I`@d1sz&o;lZ(OB7mIlPxIPSYnS=TI7*yly=&;ASv?WXMq%Q$W5lE znqwa$ZuD!gHwKqnD)Y@VwO3e+C_RD$9oSuY4Sbi9WO`SkvM#MxAzZ@eF#o@~hjl*96ig;QK} z&jk=Vl~jX-1fM4xZ3;%hw%d6q|R5ra_x&>Lj0Sd+V=|^!jC+| zpu``F*f=Os&VG|?VgBqkht>4Ye|O7B=LCqn;C+sO20UQyEV#f1Mo^6tjGX{xcSfIJ z@PZqZV;TRGM#j;l#T6$UlGdD;ysJ42S|t493ae;6v&Bb-y-1f1)s({|DzSTybWIH} zW{W2VF^Y*KVkUp+#QA|TVeI=vDZwX6wh=BH$4Ot^%m%|L-cOeJJ3|)3haAWea*P8M z;~nX!Ks2WDfoo*r>lEO|3c_)YzhtH_IT%Jg@=-2>)BzxyRkdmwQk#dQUUNX1Hc9c& zk(0b3D&b~Hb3780kw`>4NjXGKZnBl44CN>5RnI~#?p)^FAtez8Paxuto3or=EUd8& zEglFy+vMdg@n}YfM)X4`b6G`&Nlc8s?u`@Ws6;*5Oy1einIwCrS%MJFKsFC#ZUCY3 z-a!9FhC+{pFoofjY^locjq^3Bj3gv=+E1NUk|E9@YHNx*j9)M^7O!BcYUt)pdd_ow zql{`NtBOy3zG9sG#Hnm%floxiQmiy3s1+k)(8~qq7Zi!i4ZNsJheGqAA4Q-tE$YCF zX0(_z+bA$QdOMH?wxo;c;L}oSq(2VtYJKrzOw0OM7`Bs<2i>Vp(@D;rdM|F6{b^2x zDo%ukR;@JDQfZNz)LrPp7PDCDxnxMG)IfC*vXv_1YI|FN!K0kJC8ssy2hf0$b*2Mt zR2#s#HZ&nfxDD-PbLXl-iYnl)c%`mhGul_WE>pX84XI$CYtm?~<*+#bFJ4+Y-a`MT z*Ga<_E_AR*+0I(_vWUwpdOnLz@ZYFgE{Hny{UCqSPl zS)S^axBX|EQMNM7^?+rx;3#Orlw2_T1x%}c)m;CZ-gi8PXoiZ$?=`CfIZt`E5{AX z0OqoRPwUzR>ocEN1rKGPoLS>gl}=Zzj6h@gX7bUr6htMkG%{!85tB5oB!+Gnn8A~quZ;QMbf#;OqMTD`*}_!cN+iHp}w+~ zx13!3a=OazV#b$CISDb7nLwQ7Dl}jXTsFH|!;pR0Cp10aJ98_~&i!tmqxuD4?)`4*7zqw@Raw!eGkZFI?2~7%y9wuAJ8TlosB<;`C+uMlcG|l0GqM4_ z?3p$D+0lM6wWaOxyoeXQTl?T*Q|Rq)gIjueP+Pg%eeZO?eAP0axxL}t;dZjrWQpN%r#YH;P8)ldllHsMd#Y=1d(KKEwMhOnSJkxE?jW7lcwT;!g09y4R@jAW zpFGtAdt&TgUD?-lamP9TdGLqc>m1bf*iCutS}sl$ll{(kPapQQYGuJcph zocDx(YM+->&rgv+dk{sJQxc< zL*qcMt1-1WA-HI)&j~^FJ2|-;GS*|SSz3eGdpo?dKi%8AzWb6M>OE4^KN*xT`s+cu zvpvJplQ>g67Hq#F!;F?2uw`I`<&wY@q`-;mIt$D^Dr_AM{5}uN!toP7E-VBeGe2zG znlHRIvNJ+U`!;Z)m|u&*9rQmL^gSFD4tZn4{WCt`<3HW&h2&E{#!J8=6pyr0EBTqe zZF$0y@;E7cwhP2SE9|}w{JO6zKQKfd6CAk}q&_p8qUi{_B&5UnySq1RjX2CdQuM?r z12sG3yE_DvH1m!&>%#+NK3Tks$$~zi3btGVBg*?gEVTbTU&K5r)Vf~WKuA2Gle#f7 zEJKkyJE;l7h-14yq$T(h#X77(AN<5nd>2xjLsPUyJOsrb#6xFrCq7&{Ka55!iZXpu zD^^OnXVf$6LquZy#YIduzH-E2^uA(5nvxU2tZ~L{8^diYJ%CifcZ9}n5;v!-MsLK% zHk`Y1gF_vB$e9Dja1_UNS{Sp6#Ukv;#Y{Uu z%2^DXs(hh7h(DnW zN{775QN+Kaq)5B$%clG(y znEWxCBs-gonmz(Mx6H})^F!uXxV`jCyv)W;J5AnW&7Vxg)MQFrbVtJ^$tI-4t4u59 zf=hUO$;t%F$h5l2bhbFs$KT{ac?>@-=*(&PN6>^O=KR7!IH}DPP2N00k9@^@K`Xvg zO4QU#qb$YNjF;GC%I~zSAdC(otW82%&n@7vK*WaK%*Ec^#bGf{`~=RFtgc*H2LJre zW(ZIL1yBMlPy=NR1WnKZJx~U1&;@-^YKU7<37t?1bx?Y%Pz;4o4c$-# zLqb49L`6eT56PfkilPEJftQB_WgjZZBmPf$`&P*G4) zR8WhLQAk5kO-WHvPf=4;QC3+}R8>-rl2lGgR8dbXlrL_YiLG5Xkl7tV_<1tS7~Z&X=`g~Yj109Z)>To zY)*}AWL#`*Z*62>ZEtdJX=83`XK!t5Z*XvLaC2{Qba7~4adLKXtFLl!Z*p^XbFsE` zM?iFSc64`obfc$rX<>DCcXmCKc5P*La&UNWXLxXGczJtydVP98m3n-DdXJTSbZmTe zaD0D(e}amEd2@k`e|&?1fQ5yJhI?*^ij0X;l#7jyjEaejj*^Xna*c3W=si>%`s;jQAudiinuduPOvb3?RsIjxQv$VFgwY9c;Yqq(%xVN>ry1Tl( zy}Z7@y}!V}!of*`000000000000{p8_wmygPFy*2=MpMJ2yUB0Z4e_qWVjAp#f$DT zHr!~jo&P%m}ZhMSAsc>dcArrB9kbg9;sLv#3#d zNRRGRN^~Yhdie(GlS;L!)vH*uYTe4UtJkk!!*<0-_MX{!P|K=a%eJlCw`Y%zbScwg z-IhgRqE(5PZ(o>x3kn8IxMktPEE7*$%vfS$$B!dJo(!344JT*BWX8jpjprUDKumzp zpfmyos6}g5#F~<&RIp$F@BriToz{YGSJ|CAks98ZUn{~KI)sQBMUrQZ4LgO2=PE#V zSSCUO00tAGfDQI{$RB$Q;s_|U+@j7@VudK;h$NP1Vp#1I zr5;mBwdf*?O*v&vC;z=cMi}_PXGI!(^hQV?KKk(^cG%e<1udl9H=T04oI<3LK4b@m zjyvLz1C&%QspSxt)yBh=(!HRZmtR^LW|?R{k!5sjiisSW6kO0j1av+S=bd=knI~y{ z>gi{lsr?D4X?>oifT4(5M#N@y{1!)NQe+2Rpn=*Z$)(`MM#KncV%kTeUr^y)sE>;3 z8fP@bP@J2lGM7jT&K>_3C2y-(CtIXT=y)n8yV|M>dC92v%&_Wh!e6f1B_ZXq$wGmv zv(MH?UV;Nc=n8@f64M|t)c^yGc*Io447S`>h+&3?YzRw-_y~03yYR*!g$jBo&x9iSv(Nx2swJqp&6ga| zr2T-jcf9%*>!PDNx=7VlyGGoqV@8eX)S;;vswuT*L}x9fGsh=KlWc z`M@2`SfjLehx~Am4ktb9EwJP$Gsa-f{4vQk#w_;HOOm-}%PK<$ZJMm^J}a3U6tA&viO|y{BEk3NrX(W8K7WQNyXbr@Xp}Gp(-P zo-o}X!W1K{CY893_x}LAQSAufw*ne2aDT%VCIGa+2PKeijBDJwieV3TRqlcq z%-~o+!a9U-u5+Ig80bbAx+M80ZwND8#!$z?*m;3sPl^JPV#q8PMoWIULs{?Sr@Zw! z?}xm@63hRR7sOQcaEQyA)DSW7yynfqk!9;_c|y9FCnWjJ=XYc-YpqU}27bbU-2L0$4gd0s?T=3ZwxqF{DBgk{Qhy z+~Fj2pmc4>aoT{K1}90$3~nwa*vMZ@K*&iEdXj_*!3H23u?9b?aD}OBp_FErH<@XO zQ7y9}?Pi%nA8zT0T!K&Va4Ezl@{(q@bEC7AC?}ms&xuj|*%Sw2GBNf~4uFsY4=k`n zFm?xhUxXhSyJ)pDlJ9ulWFy$d$3{Ht5@KFt&KwzmzdGWPk9!oLDdl-BK^ib|1TrKc z8<_vd{>)-q&vc|C*=4~>8uXy_+N49sk;!7*q>~~1Bq$Ry$}!9{N1oAV!&H|YR!VGm zw$n}W;^{J4?q+E^%w-Wv7EIXz6ErsCRL<1+%i%T8h)hK0F_Rep>y-*rj4)Q*{ID`D z7L$uf!yc?4MTOv$ZJXRY+NDS}&MJJTeKTF7+1_`>bjtC5!m{HX;nvHS){u`7v?l^B z#MXcI6I|wsAi6RqNrc+so65s7RSSvjO>}7}Oj-IjvOD}( zWp7y2%kmJHqy=q)k2*93>-32`g`#SKda!_sAy%Sws;6EHTWgw4w@nS_HA9>;?rfE- z)FGpA>FcSlGO#>hb(>JnD&74y>vycFu641yPk$nEy9<2awf5uP3AU@Cn_Bji(*-y9L*Jcxx6ticloifgzQNJT;`PC zu1hNIH8^Xm<~G+RdZmr9(B@1hW&O{jD%W(r+WWt^Rkt_@}4$3 zIM@4b3OoF~K!4L`&gS;Ey**+xS}M3JPUT25ibh5Q$cBKhfodtGw)7~zT6#!IT}x=Y*GhE8*4 z$!uAUF_G9=H4bNqx$>aN*-*>O2U49GAosB0?m3s3oPfsLr1rd3Mb9mXcZuRYBac4z z!<4#V%x+`6lWwfe9u^1a>iOi`x^NYwBQ=fBAlJLo1nv6QcmFP)H00n4jmEytj&@Z0 zb?vHe`+P0`FVqp(^;SKq-aMz%4 z9$0*-HWz$2LtRLM_n~dfr&9c-ScO=9D+qZvsC_%Mh{^xfelsOlg7|hDrGoM% zCEl1rV#b1v=!iG&@huK(#+jw8HW`baraaY1>swj?$ICAE=jtl?xiRTD@zLb;fczAx2gYif;^B9ES zhD0pZNS;W9pxBS6ST;q7Zrc=K1NlZ?#fzj@M@h$!4rz-`H&m%-BM~W)3)m#VIE-tR zb&g{UNA{6!c@<$dhY5!b!FP=)iINzjjS@#rE(t9x=>vZVU^GdNE!cgdh?8)Zm~n=f zkf@mMC}-(sVn(TxnD=a}Cxbf|RZ;m=W|THknP`RCRc-WT=yWw#^@>>;V~*EH^mJvd znTxPFhGY1P2GeNQ^;QV93&?_0{cVLBSlV!w{j@g}y36zfsp6CBJl$*$j??IWKseS>4nVFfH8fG0N29Q#? zYh~h3=jMvMn2_!Ukx!>bIu?f9RblxljNf9GeA zdHI~($T3+)Lx0J7)=8N3$$7}4h>ht)KKYa48J;R?iR4*-d^VYoxS68IexF#1nmHXa zY6;y|kO7%(gmhU+sgkJ~R{P1HyVzYJQ)#}4k*xtpd5KTaPm#nWL^XZKmRn<(UeHT9qN!il+a`qxxB$=1F7__nNQ?hEXS*16qcP)C}i=br5KE zk93URC5=~#r2`j|2Im;hkQA8nrC=JScPgB~|pk@%+a z7pIg6o+w%XJ7}jX+M%)wiu zx2@dTtBiG(U{!iWii{0;Eam?yS9^1*y*PCgxliOWIN9*CxQT|5N{hk(6{UKwsM~p%IyD>OQ?`{7tcpon*7lBT%TpOU z02u4BHY$qf2`NGXZICy&?zvjH`l2iQnLQevJPM1;^({6#i)CnlT#2)m3ZOg7vtijl z?)t9IKrTZYub~P_%c!)xm#@g_w9+7aNTE1W`>FwZy8FsjKhhslQkpI#ozVg$yXv)H zTaLhlwqt9yHKVLbsJ0cryLftS?KqwTAh)H$CRqfi&H7t<3xDUMc|$@q1}T-U(pxks zpV~U40_cTgV6{iczl+73y}xl1;jbf_;mkx9mx zoW{Tl(rCJ<`@g<`p?OAQ22dEgu5pSr)FERc3NA$3%tTRyyIyZ zq$pKq@B(`lvdR&pqfknf6^icpylF&DTJ^W7h^=ymy~!1>-0QuZsJH@^!=>~Mi%{zyAxchP%3Kb+8Z9k_hW`2|T;BE5gsU zlWr;~x@)l%e8IKVu^L-`aJ$ACTqth*wrw1@9Bf;vc%ma*f6|(_BW8p<8a_&>t}H8~ zoC%Y8Ty!zJTsZ$MuU@&hnR_4O>%)r!9%l)R{0oeQGsIU%w4ut8NepUXr#TGKwCK<+ z9#qP8NJ3H^t3s+bt)ZbA`XqmVd<*==wX2g!A;6&a`aKz)kcO)!vE(jVzrg?Q?oN0xw>e3*(=G_yve$W zIR^S5my4iY2{;t=$)Pq6tD2kxQ=Iis%G!X>>A=n=xyk}d4hpH4FAIHF5LC8`jJ)8= zUJOfaV#3%Nnrd1^xy)d_%gajye!+{zX$)b({Kmig#^NW(!i>zJ0ye{%t>0S0d~B$I zmNrE-D3||n&ev?RROr0gl%rOdKNIw5^F_|O=*WZ>$vqrM4uh4qsSM5sw37{ru0NM0R^sp;IiT31NDHRb^5TB(R*d_Qlj~fI|oT(5sl8ZOS~0snKr= zS{;o79$l;<&9Nih*Wn4bD7`f__cP7IZR7gVf1KEo*gb*jVuYN%I-S&`r$6hFjO-zx zHS!~Q!)(&UTgOG%O~gaS!E;=P4Yt*4 zqrd-QbIv0u$mQ59oy|`O#EGn$&#Baz{d$c2+13@>KOBvUgUP3Tpj67ls=eBgQ*ijp zrJ)20T%8P`Yul{My6}-b1?2*Ep*+;qy^8V<+L-Qgtd+<;Bp zZ~!<2R6-w={CqoW6j6V|3E!aILZU4n^_nm3 z45jW0s+pU6{Ebkd4BMLYv`!8Sp@eW=4c0P3oU)kUM8e=G7>>dn;eS^ij4 z+#Aj*Z2Qq^4&rAF-O=5#B5tF1C*J(%w=NCMFb!hLYvVeatucNh%!Zt<#4WKN`>Jr-;CvcL zm$hBxQ?`fCK-L5-eXMT1t;^E2?B#{&)3tt+%1x{rj={~1@V_3%$r{)W%-B=oqRDr1 z%FgU3>*6I#ZS8|PVfgGXY;-g%q}k5i6Upr}%%tAlRw%z6v=F)zM7f^2T}c1l$;$Yf z>CW1s{O<0zYOpcx*lilm>tByqoaA z{^5ST#>O1%b57#LPSX*8Jw8hD4SR%%jb>c+gn;h2ACJg4{^Xa9?ITa}WwY(p0qMDs zK!RLl^ljRmy1qqRfS!Ew7{rG>|Ibc*FVX0GcEp@Q55Psgd>bXWEB>Przw{xhKdvBh zBr3LYZLD(2%WIDHZVu-eEZuSJ@L`|PIj0)K1WtSVZGlRytk_{W=E8&wvj-XSB5%`n z5Bv%kBwfh~5ESkPN;psbsl3_v`)&9ch|fAd<>5fTnr{8kaLSP%^dSFn4fy`bpvM~f zZi*cW;a1{5xkCeU!1@36`4(RAzDxR|&+A!#*Ql@hT+jL_=la7A5Gq1oFfqfR2!uSS ze5rGX;le(PBC3h=kdh^gS5jyIG4NnTPLfPf>N29k$%2Yju}U>k*2|YwWQwF&^WsXJ zg^(pPR!f*Npg=POY7QKLCqB4vrmX;W2EXW5#n6lzthxmMy4q>|AmLSA+4;<u zn=NT8ktH_PR@nggH7-?I2M->$pY`Z{cBZY}X?3Vmt6t4|-D}wCV#}so``rI+bJxIz&7xMN7AaoX zaQ)pQN0K&OiHl*r-1*EiXrxE)LcIu%C1Zp?fvymSh&exLZI=nr36YKIKfYjZvg3P{ z5gsU@&!GPO`4il?;E%$;{`~MmzyJaT{NTR|IDkOF0t;yH!2~6cFhT+xv~U0nE6k7p z00{FSJ?k23=sOw&g71P2JVFnoiApLF#r;UM=)4wZq^PF$5?axwGd?6p#hY@Xs=DfU z3@ON*dg95KpqvqAs6u!Pi>aq}OR7pMv&;%6F1H*~IXTy|pf z&9~ZY3rkRLF@_$&Ak%YCKKu0ZF?`S=XR~53Tj!c`=)wO+vqJ4TZL`^2lXTKZZL<_N z-Hf^dxG;HB!iwjPo9;PMM-B1R>%uT123ET>k&Hse+tj@B`peNf8P$tVz5wk@z(5}n z47OJYfjv;z0gF|(feD-S^?(aE)R03D-9y#IRykUbR}gPx@m319mC;*`wYlB^&I4b9{Q$&Zgrx+_%PtxI?I~WYEK?b? zwj-p>H=DtjEjh_NvlCZ-%I8m#OE&pr#3TdFvSPrA&6smmu7@&6achlIoG;bc=4`~| zRLblG_BUuS9Wq*IQ;$Zq)j{|mvQ`_S5DyHfX+-}oMy$aNH$`HNZPr#~pG|gbvX3>J z>}Z#zkXg2;O?%o72eaV6YirPv>-tn&_gfNSLl4+DhZ**mt6et9B;qmI5-%?~isHDej+udz%)9hj z%cpkoMEQ5%gI6rjo>*S=(Y~}XMj3ES%dE1TZ{B%&o_Rj&;Gd%$Jygw$RvPJRpU&|Q zUrwRAYBOTRoK|()&vkCUyB7P`tihgNZ3WNX7VQhYb{1LyzO6uU3k>{z@HD=?2ya{K z*5RF#{02SHbX|3METCAE;zBwNJ6{YVWFF z-{SX2Zi%ap43yvgzNSC^36OqkV`Ko8MZkPSC{wQLtLsVhBUNFDtAZfr(RtIAg2Nn+ z!WG7}#OZ1ol{H;Mmd&u;nl^R{o_?iHcXHx2o%+H>hXgA+~6h^%D(^+6n+LhC_?Xw*Z&0;unu}C zL@Ri=f--PN_3DvJIl0)3l2ZSabCv9o6xdExo=2oHDXA)xqteRN^0FZ%t*9Jp+V0u( zrZr@Va}?$lJ=jSTVPy$yc>$V?8FhASistU5K~1OzSGf8>$5d-p&#M+n9ESoKFviGL zL3!02(gmwni&EA$Qs=DOH3%5eX`SP=wT^((C&kwHkbctDl7B5*{=&A%!8XXfe&y>z z1^d^+>gS^xU5{OV)WCCD)NdFiWkx$XO2(BkSIrqIt~%J-&RS)qp@q_DPkUk1`ar`Q zrqgO~s12+U@{2O2U&X>LL74xr_ zrO5j_Qq2|0FGt$R-?=9CzX!}Ip(`egDj)D^wSBuA}m zxj3$cR*76>BOkeGNye^Ooy^?@zqP!+$nFE7AloZjt;-fP&X)r^SYf>v%xt4CgxuEN zger1FZRQ|)<2+CR2Y8|p&GU>VTj)PaC(wenghZXd*h3>aw4#l4E@c|D)K;3(B^DEj zK|G#4q~TyFPEP+Twzy1CC!DuQ(o+v-76+=d^t9nch-%C|2c%Av}nkGTdX&)DBQH zEhWkYHMhxR`(9v;YKZ4uPZkd&7n$^^Xb8&hA&c(DJMQr_ZpvxAcJ0XHI7immkwhsc zw&X!%@_)%S=IdoB{?4XXpBJmloew>=dwy+cJv41{E_xvCmUNGDtLc%==+QB&;Hq2w zRz-rhXAS?M!U=DY(OzFXTEcE`@9MkkI03QI&<;%=u07zfQ`22^iSRbXWw&J-wcY#T zaJ`2gGK$&;7Qbjb>&2{m8aMpmzl!9L$A7zUeEF0s&m8hn`Tkp;tHJomWtnR|oHIJm zqd79mxzanm(900j6Cg=KEO6U1abr5jBCt=IAdRrSsWUdRDk%K5*25~~a zuVNI!Yrn$FwdwMzwEK#G!5){Jzq3Lm@S>+DNjCjkIWChGZ{xq1BQ$j}zzDoK&wIep zOThmF{2$Z{fYb{*G|Q_KaXq9fH^-8_3T!fRbDs@N9G7CZ4-Byg(>fKSr4pn*6x_Ar z1GyILC9~rg5M8ZfBS!+)Z^3v7{LK{wpnuXI$$L)?nrOEloyMdD*2Dn!0w z3po7TonNFo9!xk#{A}9Y@ zgu)Z5$6j+hJu5b+i<%Vr#l|WTVze_9kpe4I#${~A08Fd_{5+Ewz_GbTH{>}5+{S6? z#yHHzZw$u?jKH+|!wKr6$|^(;6vPh9Iz${nq>4Lv^qlFV$F!KoW4gzH*+)sNwB+j) zU~(pZ#27;uIA(H_7t01v%ouxUNVZH2dni9ue1?b&yg@mIS`ZCyXvK`A#ag5pR{XeJ zWU_}D%y+7~3=~OY;Kc{p1eKF0lvqgzQ#PagHfTgcne;0+w8@(^y++c>25G>aT*Iv) z%5g+1!7)ew+M9KZG9FP%2J1bPg2$j3zBFmXGXX$*3~9tfFq1j1%&O9-|D5EWC;9%f76;j`YZ-k(%PeGveIB zN3bW9?1=qy%>y!<*yB9QoW{ASMrqW{(7ez5{7jzQOlc%d*0Vr8G#3EeEX%q&-g61` zY`l1pwuFg&@Il+P)pIr_9r0>siU#jiP( zn_+32T6HrCQXHWqM+rQoGu7pvLbuMsz3#b9JPkZ znYB&zOJM?4!VJ|^CDo5o26nnS@UT3}YR^_xuUv&mFpE`L%}M`now;XgThi+*%+xlV%vhc5 z(unm~ZN9>CW6E)P&RkhHq+DPS{x>PP!tOnIxUDj>g+wxb{4Tjkr*zGEgzBK}` zT})vR#tWLhu}#vmh2BA0+qV^74Q-ioT;zvZ(HV$Z*(8|@^W-`8T-3?X2gxyNcPopvX&EFc^Uq*dV zsS2{u@S-is3@fJE0_IVGl}OShTLv!Ut))Ma#9jXh*4^Es;NE4`0tH?QoMSO-UJsVl zxE*1*4LuVUQ@hRMktMfsWETrMVaQt3Zrfhe^4`MDmj@le+s$Ey^Wh$@SsxB!A=X)+ z$*GIEUGDqYBvxZ4o*io7+|sz2(MV-`xChUb;wfHNSSE*zGeRO_+F6`hjZ7Y?jmQNy zW7-wTImIXQRags#n#a;Sy&_&e7TZ#_q+g&1|sE2(IGTi9V`)E5H zx6#YnIfLe&J1C#bW)g0*`&?!awq~^D=5W*`PEurwBGwICO4yVzN_9#Wl1d#m-(4yg z43%8=mFEt{ga=!{f5hj@JtibNxQ)@6@*OdR^odnI*MWAbg6@nmk_K8%=;pCCTwdq| zerR4kW5uN1k{p*0lrm-(#A5~}k5*=ozGFj%Phr*IY%5-xWZPsh;Q}dXyH-GBsXDrj z-aPP*y!JCZV74JW$|wV3az@-orp-49YIP=GYOTL?IKNTa|`xl>U{v^;L=en`<`g{|r-^@Mb~|Hp%ARq&q~z?P(lF-|)0k_6Es2 z#Z%5gtE?Cr*%HyqO>J?-t+wFYgqh0LW)iR#XxZ+QM0E$HRi>~$!aeZcvi?_|Nd}AL z;z*uo?)h&t7PdA<$tRs_vxT{24)MZ{ZtE^_6QA3Qz3u?WZYd4v;B{V@o}`w{Bk_K0 z@`ksRWn>+f4on3y@qKUkrCG)Ua`nyA<)iOVw(lAo2A|cEArac5#qicvBIVNR0)HYZ zu406qhp(>MSeniUhww0_aJBw5Q}s&!?#XevP6?$0mkg$?4wqw!b>=#ra}r14T18&C zVeuA^@$QCk&O|Uk>m!?X>!>qy!u70AOsSvh%{p;%M;8lChG!g~+#te>$L&+xX}+-J z#mNy%DIZJM)@GD0QE}sWgmRC+KKhvdfe=Q#e?`mT7=aCS*y z7jZft@npZRj`vSz_c=a~_R{*vV*hx+4&F#*{-UsxNzgRsltdq}ssT=lA{4OY!>4t&9H*~CYk z!hiVb&pJ$|bb`ABqj%?_6|be%^myiZZ7s{P#C&S#H=U{lZE*U}XN-uR@>e{k^TRD_ z$n`p*(dPR4)dvW20>u&Z2F{zYUC0P2qU5llAcjS7z&L{k;t(D@^6+7E@gvBIF;tj% zV)2Yd7$Qh2=~BgINemrc^lIVK_3MGK3B-~ei=YA8v}V_qB@2Kb+96_s(ScAi{>A zL-qC?e3UH%1!ooi177ObW}ifoCgVv-_$KAamM4n2Be~A!(3VE?;bknDFIu;NolXex zq3GC)W@nZSTNWo%x^J&a>Zs?&oAr3Smi6v&3peqbr0vkp(xENAR19F5M zjC;9+l3-Vyq!S%x>}1o6ISCa4P(}J>SYH!3))iJ%UG*diO@cM0T2&&})|FXmDb`zY z(S;XZmxUSs=3k8ohGbz?S-Iw8L`0U^W+{E~CS`G+1e$Fs`qJ8-sGXLHig^k;C~v?S z`kQvb73bM;(s}ovdek*iNHFOI*Bhq0F&AB=Galkzd+4244K|%Y{~Z}m;jGC za9~HC>28`>?fclGlC6swaCg{=C&hHGhz22j9(bWLsDTnFZG?tQ=%uwC%FuP{B^S}f zj4CSs;VIWuIy23gp6uHwfXD%zrvf@MD>I^!%8V-OoqAz>tHQc8(@o>&-*@_DU3*y+!n%%6f)Y90QAmw6Ph_nq`tCx;2u@od2R?uxDko6{f zx4J|L+2*_LGU@ldXbuT4zK!SGZEwi0>aV0;p$;N&n6TEer+KlMVv#7m=8F~gS^m#l3 z_}x58JAFO&OPg~`gnLXsymihbh#`a0gj1e72e~%kgTohicG+mdFTeWBkK49J7=I-H zC){gq+c!&iXRh7Vq<}>l9O6{>uyg5c26Zza=%kcD#UV~|jGGs~A{RO~RZcFN%Nz&^ z#<_H{OJ0!ci?!H5Le#;D1_4|d>qzsE>ZB(%!IRzY{MVuwf&@9oGv00>qlG_2tA{$9 z7S34F1|r^yPsA%mr=U|b@8qm`FJoR7xC1x$L2pyFf`u~jVZH2;v5fceSdT(QwD4Jt zeBx_X);z{OZ46Cp@w=ml=%>eLK}&}-x}Rr6l7{vOFo$yk3b@?mHU~!VTokmyNW$O+ zND7XBN0b^MH1|jeUNCVMR8s~wn1N3=E&%`fUg3K1 zVThd-)?-X2Vq#{ZBF{pM_A^?*BWY%QCp_hm8ca4ZXl#sQHiow#jzwsW=$T(00}9Y^ zJj)<`6xWTYWy=)y&xrfW3jx8DP$8}BV6|jkCFVd$o(PegK5M2U(e*b^POzk%lw2rD z`8a%;(o3p*sS9H&(^aCdkqLXHMOjFji>iT#%GBkbrnUw#v=XK^@}2DfI0TSQ@nT3d zgzFfD%}l(msy3|RCW`3GFPalQ!x)-6vzn?g!n3T`dj>6jmqsq4GoJ|ms-rm|WIkoI zLN#A=#`C%{P=W4MY;^Qx7OAPi{JqVOfmGmNUr3Wg7L}0-RO`pS1)f@f=VRK0LM%C{ zS(9>Br8pI(C|hc|jn34lruE#BluE(R=14R=^(l*%NZDD9!J8=>WM)xG+TG@$w-xa~m3%@~HKE|N z8gry1eK3SQE8$mCShOBIt!__irimi3f}&$gnL_+u;qG*Ru$7_z{-Th^0cCZl2#qBW zd=O*b%9zG8zHvraW;^5^(U}9rq;qRXo**wbs?nXScWd>`6?< zLTg4b$=0@_??C4r1Uu7_kg6f%7-l}td-tej_|BJie_Y*RUq~nYZZyS+6{dp+7)#*I zRtx6pr)BF(*(C(-q?_$*geMHz3R@V%j_xmS?-ZF1bM&T(x%7uWEX$rw+Og_7@eXAy zNfrwzz>&5wszGW56_47wF>|%dYBt+hYqMjybM+Sk{^-G&Msp;l6(jnmWRvPK*)(C^kt15V;t2cMmgI5o8~oV8&Z{5mw2d@|BA`tM`_!qE;Pa7Y{dld9B8f z(Rp&`sH)laxyL%h=uWd*Bdxn0#kbc;o^3?qjr$C71&@aFD)-S2Wol(B!(<16{882x6Y^3c{#nXJ_*OM}CN-FbzkU6n) z-c_QFN@eTR#?XD9k8m@+8s)IZv$1CnOMRCZ<4O8A^y#a=*5X;_{02UfkB@I$z4=)$ zZ;zWvb~oFN!-}^7R7pYoSsEPe3&bz2`L=Pj=63FKr~3sz3-FZ6rvQ2HoZkEH_xb<5 zVk2E(zm>$fra%(ab0*F)(b*)K1XUGRizykCvD^T%+Pm43j-l9SEZlq**K;M2=Z#-j z0he-EhmA=M>gmnzsUDP7o9j_e?v>T@QJp*3Ue7&E^2Hv1bY7Ds9q`4`_Phs{8DCqy z;O^0$^F1G%HBCeINamA)Iv@Nx+{>q>`ca7XZ>y61w3KF(4f_n5q?Aq>;>ublBGwP0w85 z$5}`xNRgE77;~5&2(H)&ilENf3ew2ls@Ov}yr76UTRqsrCCcfUGTd)X3J4_uuTp9O})3@n@ z@TdkIg~(*_%6E(+vKfyL#-d-bmn`a9Eqc?t3DzB29M~-qFh*k;<{LH!9^shZgsG%5 zLXI=GMKiwSGa8;aP9q)~PDqyD7(OAAP)SXZ7~;XH6kO!K#URbRNqZrz13u$ zJ?3L7Q&WQE8}=qtMkR8E+y-KbMPLdCUS%uj39ZqK^DEe`sOEzdsie^edsE7@WG}c%&3T0Aim^Pv&8j|D6 zU1-P^B}8GChzbf*;+gLF=5LOk?@W@O6k>K*XcoQYD0~NV9-BSlT&G+}KQ;}}4J3zH zXI#`nJJjCju|~5Ti$r$+j5Mq#Vjkv`I_WOuXJZ!pr;}oxtV9i5-db|bpcKv1Jg$?k(vUiR9X6e(vC^kv;%E0n z9&ZK~qy}JblFQk#X`8y~o3`d=I$URt&iU2pXWpqxu}h!sX&6eVxz6Ni?w4x;>P?~$ zXlN-&4CdVy^qFd&9ps7LbZZm z64XHXWni9&)}UvTzU3_U=4*+ntLX@>j82T8#%m{)ol%}CF_ozuj;Ova>d5R^9mMNHDc*2yiohP? zi;9ywMj)IOY(0wVlVuqx6i8V9oT|b@&rRLLf-HAV9bPpo`*1AHe(XGitaSP#WvNxM z>S}iGLO$XDV6F%cL8X(eAt4djtg}omx!o-55+*DJ3TIs0yhbY71#M)4t%i!!p)O-u zcv8~lDdE{^);8$WVr>Tv%^l3+i<-sP^!;Uexne2vGhD&z9+ z@@$q+f@z&PqHfK4FbEr_M8a%1WY&tL4o-n7` zZk!7LSiN#BzA7bd7SR70;-rr68`=!#2CR2PhmLTnO8|$*Op|*sVlPw|8JCw`6>fb{ z-9Z4Us+5i5uBCtMQmr_HJ8&lg8)*iMNcG^y`2gBEZIL~(?*v=0=$2evu18%u>n8l~ z>Rx2)qOM_{@4%KYB{!>ZNv{(REnSRI3n#H*Tufn2-9RsB5s>48DtO9>CGU_Ho zMsYGX^Gn|eanh^FQjy()ao%t-GDQ&;{q6jk2lAbuB$9JJ?q#p68SDUAGEkj5w-J*D zspZxO*3_Cp49_JDatKa`RzI#jJ43FdNI%P$A_ufU6LbjYB?)V>B|}_M+q6SRuZYG3 zCub&18!a-L^wP$#MXwYpb2Q|*GGo7TxW@8G{~foS^l0)3hCZt?yY!qqT;EjxuT5c8 zF-MZf0V+;2lpg-jGLP?1_usU=u6GFX@o1_Nb;cCY7Z_u6;NEjrP2vf@@!~qs*({{3 zpffxVX=36N0OxP3t}M>US~lZ0@m*UE7F~N(NV<^%T&M0p%k^9*W?kz~bvWzw2{T{w zT}fZ>0eN;{GhAa-wED#hWxKcVVzerIbYq9K)uQW7GH=5rwkRjzq#bb+PcIV&D(j>$ zXT$4y-?eBj@e@CIPM@~;a-UDXugbCZSO*7ekFf?O%51xg^DKiHYxBaQaT@<6dlj;8 zGqO~R^>x+p7)c_Z5DUmEcU+_=TR%5+j~9gejIdz0uIwrf#&vfWx$A!aw=T{uQZn=k zhph^qix{SNfEI-l@+3!z7-7$FM$$t+2xLB^I zm)R@^JDnOAbje<69v?72$T*U&u{?Bje-z!!w05OTN9Q)3UK;7(mZEd9x}|To>Ti7B>qq1FT@dyfGBHJ@ zc`((t)(%Bar#6J=GWZ6#dQSzM1K_UL(nm!h6eM8RE_t6LbNKfEE1((kxRS{Nz6T9ZNi@i~1Rr`gmM1 zbmOs&y87#>H5-XM<3lpZhYh|jPvsTT?T)sa%cd{CrcK2E{Cd-Ah^_fWpX;`J7ME*# z7~c0|L$(h~c1>)UwWO3!M7_EDDS==8E>k$UQ@FUlD~8iF>Q#NM`1%KC{p`T41?I-Q zfBl8KM9B!N=S3wFp*`uD8TIEj{estdyuEy=`lORQh=}6o($SNq5BYO?cga2D+PKm* zw>*}82+}>taXjMJr#=(8kwa-DJ7uYrwk-n0l`M1&?eStHi4`A17$S7oum{8%JZ4lR zf&)Uv2N5(}Y~tkNgpD9xO#ET-V@eMzO^_VHAq7mCGc8cys2~A?oH}4^O`UJ1_=>j$%1go^Fmm)IoP(n*%pY| zvr=bvQ4-nDA9!`g&P1Qy0YG0|SVC(qzn+~NaE#(DcHwhLon7=l`$Jj85d zkRwKiHg}F|BkVykt2wtuya=`=PO~W~5`=~gp-ZWF)3#*$PL<1c9h;IhOIIadtnD5@ z4zg1*WS>^`a-RHpZal1z=fPe5Sa|W{nGrL8zP$Nn>zjSc_Z5D8YtEh_qpuBre*NUq z^*5(YI{mPThQMvaSw|iJ?1^t4`-DlRn*-&UheCbgyYQ=OfT1p!U2;+7LlA*E54i%t z5vCygXmQ0Cb>`#6L;7Oe51bmkxe><*b2O0uM;d(;aKH_9)6TmrBvQyo!U`izwZTq< z3$??FpkXWwJ_89Pja)UTOr?`-is>cEI;x2!kw&6wuB*&siYB0*q6^NYs+w~u zt@6AxgF2V#Y*Wkf zLQKigq=@2FD%sjhv?dunl{r_Zw5&GS5^HS8-}>NA3?h>xF0t#DYfU!mF7b$t6^#?t zSmcE3tvK$6nU=g|*h3Fn^_Yp~Jq+Qyrat=y1adwc{o6*FamnScodnaVhd~F8$xp#< zz5Uk14n33>ITCHjr5gP-fd$$(6-ZtQrzkz@@~tnaSXs<3%beLxH*LlWXEh`9lhT|4tEy%>0oAI~Mu!em z(60zJ*RP2n!GHMsUFSnr;>stRW5LNHMex z3Aa!$*RXmiJtBIxd8I)qU%VhTbXeC(k@bHOfl zv&+yZikCYg;!bkK0L2NTqoUg#?~GMUVe;s>yyo3;T+g#xuZr}mQgNmrbpzS<)-opV zm9Hh?6CcbJiN2Vfk7hsrdmsDSCqMPo?|wtu+FbD06{T^le@z1*qNX-A16uHGT_Dp< zdd4Qzu@Rs+O(L9w#v@AZUp55i5}Amt&q48Cos%OLC4?|>-0^hlfR}vW$Rl@! zQH=C3SP^%VFU##tImYM&!LH$p8TqqwZ_J_{nb%O~p(uJ~q=RQRQP~I6SCxv@yO2U_{t1d?P2x)NK8~^6lyyaYL;#~K9FLDqz9BqQcIeqp%N8-Ds_}> zH3?gs$#j2U0VQf((o+Iz)Pbo2sU)?8%GiwUwXl^XEQNa7TiSAgL5z|wPX@~FiI!B- zm1f3plU46xDy!b1PDG(q%~zHxJEItDcX~w&;PD2Wj)Nv2sYR`}h9^CigKO&2Sy#)3 z<6-Y)r|9(m8lQXIXI_5=qhP(Yy2G-yacE4e@K`4>KD6;il&$Q43+maqRn(78azh{y zH{BK$I30I-;r|&mM)gr5^XAS-xJ7BiZ8AdQ_v?C7+nJH)3WkYcEd)4~R|yEBL13 zGNb7yTG{+&4C(^HlXELR`0FhWPw|U*4V`2^*G_lHv(EU-Y^v}+egh>RhmwPoApGNNEb&^%yr^ztVJrb)Uxu%qm1oL zhze8x8w1MMTEo-PevE4&gRjJeG;)$Ou*sR#lF3<1Zjuowl|&Vdet z3wUCHJv3i6r)U)FXu)FiiZslaXERQRjRw__rsrxqWsvaFgBJCu|6FQ{VwS4h732-~ zm~QMQP{myC=(1lNV;Rpl*Hznfjfp$k9jkBH^n3DciT%mz`WB~yjOx<_a;D^Vd%2=Q zUjcn;!EJ+=Av6Qil^=ud(zyJP&4{Y^stI`aK9SJGpAs!zZ~Of(|G5`Zs5<2@+YDVo#>s+?FNo6`=1SYX-hBi z)?&tWEL&OJPA7hpyI#Rx@}4HIhM>D!9{O{azU-eDo7oj3Lr`IihK@;h+cAi#V#OT? zf~Yh#%u{bW^9%2Jzn0!J7e)UTdYz#beB$?7c##);Lhwnh8qeao3FFL4VS=G_D9+Oe zr1DN7^F~erH}AbZOsi0BkEja+m5O^_?q!sZ)>5zBB7^lztG8Y+*lK1>V2}3yYA@(~ z4yA@>_iAbdZ-)1v%FBXpm5vQ&q78345L2+ND8x;A?nc{y1^P-0`H<_7N<-$jCbOdH z33DpUisSf>%Qplk`!E4SP!8^Xqq86fLa>cJlHK_2JPLb~N1;E^8kkwWqi5Cu;1 z3UGHWZ$=!j7V2XXBCB95uUsb35!LAt$K9Qq3aRgT^6mRaeo;a_g1kLSMe2tFBVPX7-uo{dJo2+4GV!W`F^h2evr9H@dzVG>%6UpS`w5z&$^aT z2TiP*)C!xL@tMkQDYMXeoYMUCMu_@FtiEF$$%Fmg@ErN8ivUex4sIXAG9K@79_bOC z674 zXc6_Q>mx(5=XC8PS+GrBsw8_XB@2a4a>^x>P$v}n6JF#G5Rw7)axc@-Ji~<=tM9w$=Cd4vFfYR} zS!^Q}v*tRo%X*F4b`3vUkZLHCKT&ckE;FiF>Pj@T6FIX|e9;HFZDe-L)r{{WN^@(R zk3q-6%~+G7473WNBsOL9-dw0vYSa6^Bm8=^y}r;VtE0RB9_JGHNjt=G-{enQ1Q0Ei z^BMrmb-;*6^6@!)bRNYrN1ZdSpi?cc(@5#*Hvuvr0nHUi13kslpvbd4&$B?P)Gsga z7T>cNw{%tB6F$d8K3R-DWz6P01qDSi_IwWpM^fni(-n)229r!8p@ztm@3oXH>L$e| z6v9q{?Ch$mxFR$YlTa)m@-?;VPz@AVJT$D(P|iej$)bgCN^HJXR4Q4tW1131uhN`o zlq>DQMiov-*D@c7lq~H5ukJA#4un-BBp=ctT%_n6;88)G6FJc_Ne6NL5^+G>>lPI9 zSD|#EmUBv}bczh})UXi~KkIFxz!8|$OD)0!JyRwB#?(wjaTF^vKV9oFCDWyDF9l;z z_uv#WZP7y8lNnEAG!=9~XGz(H3QuiCP(Nz(nA5p-bkSp z3KkL}^%5;rgFwa36c*095f3V&MB&dmIQ4R9v{cEmIepYrPjxL1tsGQUix31K%;7byvk_L*{6Hf_H33@l^h>k! z1F5xIPcJd6%|6}K2Gg`ZyEQUvOIwSMe}?jZjE|ZqBFkDG&>U}fs%sG%s8IWuvpaY`nFR4zX~|-pkbp@MRhZ+E&?|zwqj$HiB>gZ z$FgIgb5uF@V@b9iutBc^@Kp=3vCKhb39lc$1z}+JjMndS-s@yV1CAgIXEBXu9nxoG zwHeYgQw-N4Lv%3h)oEic?8L-cS1W4EL|fsNO`XjokN0Zx$H$CLCC&9*tLsuK!r7qi zPn{M)NfYKi_KSQ!C zQiEkzMpR)$vF8br^_oa*S$kzMJ0ep5hVd}DcXvIKFSQBZdC&;}Hf&GJ%y^iE_mzB?MsUrS{2nhin~HrWR+-W> ze!19iA67LFmkjYYfAzO=JlB5%%VR~he|=ORK^3pcp$_Mv92_!p6K_Zv7#-FlTS}Jz z1BzfNco9)Hvf^kVU-wu!SVlgWHz&~`IMf2i@0G3&bId;6uNNk_db&eiIEP7Qi`V-Y{?HP_jXbRsEavd zi;LNjX%|z&IE3HKjHiX}$f-U5))*Tg_f+jz9Nt)f`4RB$_#3_%oWVJd6OSK4*YBb^ zkl$=Nd{sG!2Usm_7Z6ZNH`rJixse??0~0nh*iQSF)rglC6rI;TIXQ`A7&1XQl)W{f z+f>+U3zf;WC41NyTRGWY8J5+cF#}a?E3^kt@FgPkUd@CsOa+SlHBpIRFRVBl0ojAs zj+ou=m`gO7C)I}JO^lt{V$ax|tl498lt{5zn=4nICsPnN#6mB2P89TMp zV7sHNhP7blg=SG`nc^9d>A9Zm8Cmf;DrKozuL=8_iwFTapf&kv%T&fZ`IC7~Xc(Fo zgTgXPxo1u}l}`{}>l8Nszjm(UwQM1!Gy1f}qU%~6Qz=R~q_-qrd)cv18gcO!r_&jT z>r8`KgkVR3I$By9PgJG}S5o6kr!5xX+ISufj;G~Ose@Xm|5u!m8ny|Tshv7rpgKw~ z&VxSg5mI+9>kNbKs%N=+k-d6$YlSd752P_FS_|{6(K@)A_IKGj+|E?F7-*rxRVJ?6 zuJ2lTyBngfH|bb8m?ZJY==O*MTOyQLCgrtL?o*TGOhM)%rz- z9^nkzH*huUvzz0lC;OR6I~x{mo7GryQ9FQzx*lA69b@|)4Q)Ya8$SBjw$JKz$Ov^` zO7tSEpVH>F{Jae1+NL2@&LFlr-NsZXq7YeHJ z3M(sPwZu)FxbGRPXl7T3?!wthY?!4bB>gGyvKbU$hl^fbE?e8 zK-EPP$#plRwTBm}cTjl?=>WSYTLrSKeAd1k%d?zBEOk-EO1swWzsEe%sWEROTg}&8 z9No%2);P8Q@nN;;{F=iuoUi@bEf=*Hj?ZVufuTCH!4c67MxK3Jj#h|RMXswEz0sE` z#WhBmS7Q()U7Om*OYt>`Xi=c&(-d=j)4vteZ&WE@L#EOXLyCEK*JXV5_+9IG&b_Jotv`J~dn_vy+Pj&z7K4$m$9vUN{oytGyeSmg zf|$ww^K=;Zoq8&C^jzbqE`38)&g4b@{YW0=yAd2WX1qC@eZ@TA^agMpHs*DD&mNan zVMjvXc)@f&fOo#T2?)#o;krr$`5!=?= zurDCzz~7dPlH>XLYX|?2C^hgAf2CVrzqKm?ycB0nBcW3dy>jU}UK0Xnqk&!agwhRgs$>IM%~OIY37^@rKCVcSN4TY-TByKh(c zoq+cM0>q0CH+~>F>kPn`dk)bW`EUR&^C8}V-GqqkFfPZ&kRvZ!wlQe*kYYwon)PGW z^U!Yj`{B+_>I<%Oe{)?iPoBreOc{Ox4hVvGy7{KrfQ!ua+<9aivIh_jhUbOnx;QFunDqKX(JeE%U*~a>JcM~{;FuoolVYI$yz+hGNZSZV#uQs40o~O zoj!g-$fMCtilUMPO^oEgO*T_y$RbyHB}FECiE@`8MLFg|dFBH%N-@u@CO`1N5)y@E;@-^sG%HnWAxr&o`Zob~V< z`D*wn-QwD}F1PzB9J#D!TOKXmcK@&~yNR?r?SqUgAuqjmag782zVovFI*4erRV2W+ z zuVIZ+1=E@%9`V46fsIp&$xiU}gSXA0&0T6^TiZSuE)sAjKDEl5VPxeSUa9bIhWQoW zSopUX_Qr*Svkthj6SweOVuzLx9<-`87|+QjTbwJR6f$=i&`m~Kqa)C7ULZtuace>D zdW)q{=eiwr4R(%;-Q#5RFMkQlPO9Kjy?lqHGfqo!--;Ih!o=vKp}mhrMuOh+q$jZI z5rs-p%7&Y)q`mENPi5376U(Ah$nl{sX5{b&IL>E}c1%PZ>&qkkGNHSk#UyA(;e#i6 zq6avEk`AOWp#M$@Kv!IF2>FAGDkW%_t0l0D3~Yrhd523`PVi!jA{(O>W(q}gH~-mr6(yBhFD@?O%pm(hY;$7CbAG$YG#u)p;8U7{I=H& z;gAB~RMrSjXsx=HvwlF?L_oiIFL#O+o|Ple@n(0t4A#?#a-CeKyjq1S!YiO^9A4B6 zItPSWw5yA2tt4LSP%=VD8IG-yM7L6w%b9VOvjeG-T-!r$5shL9tfNRnx-k|7)s#{C zqd6j?rIw;>WiV|nQ7W>?d(_lvnB3_|v)bDKY5oS}d5I-kRrRH(S|?#!jDPwP!zBoWyZ~)-wg&+9O=2P_v#knX)@= zjv>|1*m5*RzpGwub-UFzO7OR$Gcb{kLJQ&|H+oYhDRcQ)CFep{kkZwNJPg?~k%Vs> zXiC$}w98!x=I%}&EhGG%3W%k&x0P=A*#U8R<&%QNi~M_Ue4$zlsn(ZySlcSnpt?Z+ zT5y#uevevjtm6YC7_p`KoTVS^*9*`8ux}66@C!M;w@`1GnI4wUVxugR5%;*SLi8*X z9qIkx06Ak4=$1cQd({YdVn3;KIAW2lg)(lZ5#}CiI0CX_&P_*;v z!L3T|N++SkTt6F?-%N2We(wt@{t!>Uscm4^6kQ<94fr}v9_Nh^n^{RmoYH}_+ohwJ zE?sGw!Wj;=r%5jAW2qdRr5KiWRRf#PK ztQPIv!;O53J10&?M;wb|b-BeOH)$LYMzVCnbTdOr*ajmPgq0A2u4Q zJ5(OXa+Bt#p{&mt^vlpM5wt~gm5hxYV4~Mc>I0>?uytQ_AZuO7ziacs?g&AQmMH7h z4`W=iuG{`%Ov=hr(sHpZrJ$}fx^3^sJi1X`F%@#AFr&w}i&Ah!QJG`j1?AxDT?urS zZ)jI|sRVV3XF%lDDCFh;QB=iR=!87*qD!muaMA)Ds^vo)mwBw#XfW1lDIhtfhG8j0 zf}m!Cucvw`c!DY@II`DrrnUsOmufNxL^?NDW%X*X_En@uV>w8CPDDhwmTNa+S`eaq z&39QzmnpQgT4;cE$fkEr*mV#SFdAZB@zfAY0|M>ar1^M8PcOG1@r1?V$uuz>K@X9%|1*-HgqOvT|Bz$l4fgxCepXf{|xPqhTdZoyMdoyYs7A^=PgSDV) zk7HIJ7=)LnVmW93gN8GFKloyzqFcn8Y1AA9$E zzPNqkw+oDxk`VJYWK(rPdZ>Ry zHGsC%Rv`w60YqGccz=diRP?2Yz{p#D^rd$hP>x2R1%_=9eS6Sl@rVuyJQV}z-*O+?6J zSyXJ)!gPUwJIH8|*LRHx*Nj-Wh}Cy_TuU=zZ%Bxn&aOww$egnou35pbw7SE-fn zcZTr?j)VmNf7wG0^!POKHjq>$k7f}kYKDlnw-*RQ_fJN~JjeDnDhF3_<>6~h4GFf6i>3~yY;S1zIT`)BgTTu^E(MapKevR~PX*rZ1 z6>WL2mWOwbQKp5ESPS_$m;6|WVId5N=T^D79+kIikJp+RxR5=ElZW$piTRL|CXtS5 zk&*fTm)>U5D zo`VLJb5uJkf=0f@J_T8MG0K;>X_Qk4qh|?$#kqAo_mg*cY|AM>X;-8rv6bOqQ;vi^ z-td2q6f|x~4Z&a~Ska`|DW2oGL&V^c+vjvZVmwzM6O-_NaqX^`olg_@Dm;ex|7SJ;+q&ew1x27;V< ze4n|eoyn3y7@|FRsa)59rgM}K*d5}>qKf#U)W$=~DMtjUUvE@sIVyP?#eEDksV>U@ zZ5dT$;^kaeHkAmGq~3TA-|3`2MGH_0W??pF-MOUX_PlkDR&zRSpc;>R0UB_SV20_gc-p5CDX)t4r}QeQ4T`V% zYN!_$p|m$d5u_AWmwS)8V+NX`zgLR}#)BS8n(kALPz0d5sG?ODh7~KSq}q!ZYpQ(r zr87E@rvQ|69z2zHV)s7ioJB@=L`5a0M*SW2DWsSM>xtlSt= z;$siViapEfvrw}-LE5D~gpKS~SC7I7+^RKEm8}8yUiCAT;wrAY5QF8)mmT~6qu;lm zdC{50G@@nuu9b?m^Ln73$cYTaQPWrxi-DyT^O5Vjs6AvFV|(F-D?Z zm8ridgg)qSoYOkHG<`DZxW03$#`QCD8lq;Yh$G9YUfQD|r9>}@vWZZBCQEh$pyp>Hu;wP^sZ zSet;Ac%RI31#0P=N+`B{TN`?6z1RC+X3M7rDuTnpac;9|@*%h5yRUKEf`%obH;5^j zcyM~VrMQVeGwsd+9`*Lz1o|^N_<0(2EPr~y$_%S zv}8uSgNp=21|$Ti&6%uffC@*2{>ze8z07Ag@`V z+B`+S1(PiMw+pPXhYYzvjLv(Ee0QwC{3(b<61oeboP!KTBATM+oXE=mYCwJaQk>P&sVt#|P(qtTk2GV&&(PaYncwq1cuU7`u{$LxKVP(Y>z-Hpns!uKZJ zxarlzJ-W;DSxmCKI?80;2eEjNJc1lsaB7Eo#6v}@6xSf0f8d=3eGb)HQx32IQ%>bl zUgcMAEDmr2SB?Z&?&V)T<>T-U@xTsbZsun`=4n0-+u{g`Xpe_&XB{0U3wO1$+{1wg z%>DV=DS5WOyxC;j=Mi4v6F!-LYT>B8(-~f=5+c-%e%UpLut^N*ItYBO){=2~3#J1n zVW|$Co({kegLyrjqt1@gX$=q@$EDiN$lK6$a=|<<|D*(5<92Mtq57q646UXrwU+=g zxv-_(W}YFFe@YG%Yj@rOtqpW}B~va6VE*MuzztOn?Nbiz+|cD#9_HN8?AgBUYF_5w z4({Rp4x$rfLP@!J;osbKZ+Iut1UKLzndb@)U`XA>SKQzZzL+&_=q~ixJ1yad-prL5 z)JaU+0qW-%3df4u+6WetKledsdWYrQ$tUdM*8qU2?zm40pg~c) zTO~eIK~z#Lm4nRHoXa7%)QAIhOI>Hy07G>w>gx-oyRP0N!mgyHewFQzJqs}H*lzSk zPxR4lB_?T$$o|o`9$6*JL z+YTSN5DzDAS;HY3A~c-Jrkt(iJbfZxi*OmsCa>hzpf9M#>Fae-?2O#O1M`c^&Pr?5 zk-O?WW)?EM-uq~~HtR^8UQ-Mq_3R)DC%_KS@cU7Z1aTnc+Wz~~E((jF<_F|&{tM8+{oT*~+YkQWPv$m%I$-Cp-Rgfg9K2TG-)*^HNEeu43z!|Q z%Nm}i_pZJAjzXo!_W*GKfdPRGARJ7f|3F~_h6Wc7EEqB2M2Zy)5WtvGqeP1wJ!<3^ zGG7Fe6egTRSu&+b8aZrizz|c!Ocgb4!oWEr=S>hAJp63&qsdS;UyK&5(+6pnAxWSB zf!eeOj~Y8ipb&a=(N?Z^s_5)s;%U#IM9gaKsz{HaL4OXh9W%$(Tv(eN-Noz0N0dEq z{n7#4S8!m#Oa&eGn)nVSOQupa&J~$edS~mV?U^)e#zX@Pwg3sY{{;)!A!$FZJrYsw9kf@^PE`H;3-;d^1-<|VTu&7C z+^f$(2b_Th8)&#`5Sm{m#HGRsxryf;#~2}_E+wYYY!p!J`p&dTd~qhlXjEh|ncl+i zP6`q|^v`Awr9Kq1-2_G%fQLijMl&Z+R_`)m(o_q=mI2@{a5=tqfd_k-fsw4}h z2OQ$^p(eAmAWW0Igs35c%IxS&G}9!LO*b!sb0axFqBG6`FakiNkyIM9&n;I{vL!(M zB(%_muFMLpIFe#?DIc&{O7_TeO>u5Ln1NXB%Xc81Y3`zj%ze zJlr58+hm(lF1teJ=#Z|u+!8gpa=)cflNM7v4ZZ&C8&Ez6=!2KQcjCcsJ^%O%u)TQO z`_F;>6hyB~c+@fm*4}H|G@k=sJ^DdTFQ8%yXZR z_@uh(3p_P+(5s8?a#KYcO$t#-Ls$~1vpDTEQlE&_6zoK{GQrOnKovC#tVy_AP@iq4 zvI<*Yjn%navi)0C|1r9S8PacW=8HRCVPlN6)UE+T7i-Yl<+m;8GbYL+kIo9{cRI=U#h}OZITbmRT0Rd|iLE@@4cPW&dZL`R@BH_j~4A=$*x^ z-)J?DmY{$Cn|@jV1^Be6P0eZtl!@B5w!rsg4Ix*bndkATb3MZyfyjUG?<1M?QAXkJt;D>!@S6@!d)fggl?xXsEs$Ch{d@ ztKi-wIX3$J$&zRqn+OTDKmEPwfrkQRCkF_?QA(tMd%9%)R00*9A<&fxDN6+1#wZVF zk|nZ48!c0EDNLO$mrB8k2%YklyIdh|kfdbZ%GS0PF3OoloYie)n3KaHv1U8TA^fTZ zE6yP@3*6D+=AdJ(x5-M0O03ba(&?Qkt}|U)oE`1lm9P8w4nNGZXFl;G34QjnJ^nmn zeG*_H|AOM;K@Nl4ZQ!s_b4mhZA)5^~zIU%cg6|a5Qd!D!R0Ljmlb21ISvWh`$YpX+ zN0Y2%N>hnRWLA)rOyeX>gZ5IImQo|9WMEEpdcU3aw3YsgB~gCqC|l<2mcAUSSAtTK zkK)o&8~{|Wwqpe_6_qAnQ6^S-@V93wrdrWt;>`Ni&685ohC5tLanfo{BMBmAljvY< zY~vi`Z0oCby&`jtW1=cf4jkgy#y6Zn#ar}77}f}^ct%zm)A(yZ1HxxNk=M^eOxCiO zm8|j(R8UlOc0vek+-qI}2+`OGi?7q-il{+Rb`@hS7iFCJhET#HX_S$WOx*fNYE}4Y z|JAoC-QY^g6iNKmRJb$!Unr3)Q{&on066ujmZY1&k`i~imh|9Kl8VylI#q*PQXw{H zSrkp`im*d!DQ|z9)g)GmiKL66A2`G}vW~^9hAC?eDWzW^zLiRo5!@^e1K)GPmv*n= z11Zc?#lNaU9^q-nYSwWcUTiV3#ae6^9ShmW{u8qv4r7N!JmP%HQ9%WGg=bZ4P|!vp zMx%YA9Ns_+V_lR1S8n5av+Axlg-t&0Las99 zpPFfue)7mCmAl;Ln$o##P>NEwIj_8QLhvH|EO4Pi`XX zp?JlfJ?K*AQY1hxj>a~29eJd^+IM_UzCH$4ka2~rY!msmM`k2ify89>#!r#JRcVx= z%4RLwRJzPVw3V_4}*L(k+VzmI^RS8rdp{CNSG{|0LViwH3G zZg=(RU3_-^JM|dvo*dpa^NM|J6-zJJJX*F(^blh?lt>v0o;F}rT~WEXB8Vw_oOv%3 zorUiO$=|kksnYvCb2t3A>IP+%5n`u#t1{m7CJ2F8K6j_l{oecTDJ$`AU9|^QyK*+W z*dCrXgy(chZjP#u$V}!Nty#Rn2`gj#>_pkPP`CO;gMFn0n{Z?dgTYj?e2j z)|2R-HM*;k-VzbbY+11bg)dZ75AM@L9#e0vb%Zrv%}A9Gj+~pX|H@}>uw%5`?R)as z(avtGvEBYii|O0-5BIj;{{VO2JH{J3zRMb+m=rZb!1#kRgZnPSi!-4*IHI|lZbPA} zDmZ-$63C0Z$$JsDx;)UyJh&Pw&>Jlc6dlA#Kex&WLbxEEm^jk8w(JnNmOX*~3MKqKp!-7>S@3cTQ&KMB+%{G&TC zyQD1JGI?{i{NqCb0ziI)37kR101Tl-5jdkzz&ZT8Mk%~x|AIge5+*f#KknKbpE1FR zgQjNN7O^tD9(u#AV#CZEv~4;;6qK3GixSd1y=y5U+#tEtD-a+2H8n5=6u7mm_#Ks# zmoS4D;S&&DvmOWeg(lcAmt(mO<1n7fr)Z?cYP>>R$U+L45GuNh((1+?gE|+{M(H7j z@j#J1D7Dy9GE`ZdMDnW7V+xakzX5c-ddx#Ttg=F!yOglMK-|A8Qzk5fH##}QMN~wC z95Y-}6nlI`d|be;S|YQ^L>qw?P7E&~ngvJXA$SBulCwOvS*x=utBORwjeAG=A`-;F ziCQT|!J$Q4yv1S!I`+`PaX^CqjduUqb|=tj3`v zF$5?mZ9FKbluB)U$`rFot0YJ6!v=bAgmeTl_Zzg%dl&+PptedRdbGnkw2FIlNGsFF zE6aem^hdc|8oIR0j`)fG%YXpTlZ3>(rb5eOBEW`B%lSbxHM796!63x@k|J}8O;k%% zJV}m|jQF}HnNh1%!J&0DNfUfS7A(QIlEKk5m(pY>TN|E+nU`LhHR9U{;JFC-fK8sm zkcF`jcBsk;DK!d-gx>Vc-?R|loDkq7&f?6?3dzQ+RL|KpK;sPCIgmS91OeemdbyL<%*lE) zF=Zwy+WGCzdOQq8U$K(oWQlcpvx0N}v zz%08+#KU}CyVh&HeyPDVWxe(Iqc`22IF-{mh0{9K9wZP@zhuM$(E?NiY!kSSUD|HTrmmeldz4oV_;+Hk`{4sALD*gwkIHR?Z66CN0uo4OS_w zhi%9Q)cHzO$**F{AjvCHFa0r63Dd+ZOna;vHTek;BeBWij}w5s%UXniVIKGtI{sAG zb?r}@qmNz?oQJZFilSF~JqA_7Q<3;bt2xM5dZj;I%!;cx1BJ{Cs*DPNo^Qpcp2OAI z#3yv+wJ|!7acBl19fxw=&^U?*M+GU4 z9&{K(d;mxCxz1DUww2vdCHl9NR6l}EuWZ#;x4T)|;}2e=m$c=zdO{v)oY-@1!sI&7 zy0r+qH9%50f->L&zO{o=09;Q9T)`dO!ZloGc!zv7Ab({bspz{s?Ki;#)PcR$O*B}W z=)I&=qk78Oq+DB{ds{SGzMB&OTd)oApoYo7AP z*}X^(%WzpkLP>3*4G}Rcl3|fnYex3mQT4PtXAFQ<$Pi+nklI|*>b2eqiCU<2Ql)*q zsZ|FuvSXVp~oBI7ayBUx2~6`M?)1ID}{O)QiLrj-9j(G)c?!Cgc!NZ!=lk?I9y- zyx#pLW`&|!nOS*aEW@hVZ`d%R#0W{ah~!n?$9i7t0ZQvpm@f9>F9zc<7UMAn~&gq;Jz^2v8w}&&R9PjNyC+eR~Q7g$@jp|LGmy$-%hI9nCFdw+$l} zj!*v;;JMsaE2A^F|A>XaMc_)dX+W37d2Sn$-1LkEmt=cfLA2nym4Jh)nUj1**Gp)AI3NlL8~F|76g+qS`;V50)|#= zHO8{GChm>7Fb!hRm`Rr4P|)UP359g9hX;#>j=?e7Ks9N;S4QolbXMneW@qW~Rj5T{ z=X?h>RtGlbI(3qU2ZM{25tl>@j)v-rfZaM@VNkM_KTr+)nIIu1$YcoiXpg>RVn|$0wqQ{jF1?#SlkPWz4L4`4Ks^46k`NdcPM!d1 zWzN;TT?^zy|K2%=wq4DPid~+}+-(jR1k+Vq4%_uYu=SQ>)+;&>wG@A;nMt)09w z-FKLV!qw)k_Uf)S2JmC%QKMOjvg$6<7>nIiGG^*gR5Hw~BV`8el>e|Wfzc!a$5K^wr<^~1^ zv7VfD|1synI%^^2-lUc5_I7W14w+=2T5r%k@9RGM9z#zx17U#$PKz|J73gXO&py^; zGEHoNu|4>hJ^!5ss~wnunswm2_CLv2)EU6cS2xZyR7bwX3X=^pRJ0qF(iv6KNx ziilyJTEGTM_C|FyZqh&&k+br4QerKw8v!Q_Dbf~mIbLx9XY8KWX@*J4!*Xt%mGAcO z4(A2%t1)wo@AK}WI|kr z|1GH%FYv%aAce&&L;kQ#f7=VT>1tQ2z5&y^^ym55kGa4Utm=i31Xu0Ru`0vcJG(eXa%mkWM z_-^1LkGXd`m(g^uOeS^RS8+Xvdr(h#m1khk z7X6a1*a{97mk(ZAj{sG9d1NNLL0Y&3%5i)%1ib(G@f(7d{^wLY1ESY>P3J?oi>amG zW@i_}W*05kNH{l|h07R;OYOI`k>mpG_x#?J71z6h|`bKq{j)6x+6L z-?j}T*Uk|pa^d3jq}NlkcS!c}3;gI1Ny2jcm_*6d2%VEW(H7Q!aHVS*~Yh)TTl$|R5LBz@CCslq^|M|_K!U=BU;Cx-$ zWce30Z0_JWgJ)Bz>efpQ?nKHu-jlTMvTc`F@3?nyBIZFXIU}R?>j}YEBtJeN^?cCu z#y{uq{$1M6k;9Q}s+^+BE(W@iU@izMxS)f)90=hkpU{U1g#@<3U@)@H2B9&sP4iAX zB9d6*h$o^rVm8}=Ll8sGz=%*X9l>Lej5GFi)GMg$_)CvJ0vV)`L-JT8kw+ToBQ8oZ z*`$+8{&Lhb8+r%~iwD67pg}EyWR92EcnM%O!W2l%h6JJ)Q86H~sZ1?~BzD>rnz>Y` zO*!3}r%o@Z;8g@$ZDmwEO!3)52e@gKRaA8fyyU|3Pama<$Y;$5j_uL-W?M2vx+`i=)7iyukX^7{gm#(_@v8!#n>*}|j zFjq!1=dlW{Q{ z+iNUq3{uNk0S}?5y5J>>eQlM(*$~S(p0_B6Nmt__MNsl_RIN`uL@+L~1 zaCq!%hXJvywa-E;-B;6Motw4MMF%c+p?z2PUS){uHY9k7SIW8by<6dX?YalB?~`n@ z$7UJ6=l^Pe!G15Ro+>G_r-~2H44s#zg#>5w23v1&OtP98@r<}g} z=%@U0lafdYON3?0?{CK0d()7Z`P2LT^c&xTMn4k?vIYT~{4C)mP6Jldpfj*|{!;uEQIt_Ljg? z`7LmR`&-})CpxVe(07yD1WZC>1W2W7Pa&w>VCc{q%@wdVAe7bTK*uNFgoSh#z>N~S zfG%yVFc8^+h3fQzh1i7=F0r^>Cfdk6-rem2K zq#%cPFdQ8+k%hG5AInFu^RaJzl8odefz+e~iRc)P++_4@#=jTwZ;|)wA4ht&Gg2ml zSaU*P4jE|021>1hVap&`1ce+VS}=lOJE9ki#-d`tqJ%^%N$^nkKLFP6LmjzP8d>-j zxxGz>(xjo@ZfMOO{w;$z^dS}#cPq>^|IRQK%%%uhnLrKJr#*z++|qVJMbjzpE8Q&4 zpeEJDYhf!2VZ@^=$|#pVFve|}QlrAIN4=R5fCzWu*AR$4N+1vXUxYX~~%Mp<`rnqX_C~DZv4z7YVbIq5MycdYV6w zf}#yzpc7G9$<25!aGWZc8VziDzz=>cgin2h(89DoU>XaRBlX@(nb}Nd#>Iuvl%@=+ zS*mR!P@C9X<(zW)O>ow#6>PX7TCI7nCqgkc$&#Yy=qk9T%u|cAtVuo#dnp*s4l<+R zPTjbmgDB`gplf;N8Yi|QQreGz|3!lruI^(|1)V967yZ{}d$+sOik6O?>?HBFnpfAt zmZUP>BqW=Z(%ZIlNw`f}uVfQbjIK6(uEmmo@JcYArNgIU>Z%8AGt|BrwX@eWs%1{0 zm=MOGsVW)KEMAjSe!?bz0dj~{u>>0uengSp>!)?b1y)Uw3$gFD-KeT*PE;{Xtvo!c zTO%jL%!qSxMF8lpI777Yflq1e?5m!lCfGyCC}?O|5%fDQ*-CjuIo2 z4I3oQthYH<71p!%F&=P%mpk2Y_Qj)BZE#sT(%0fRwz8dV`O4D^SUjmn?sK2Yh8$#; zCMl#D6Vdgcm&_Z_uS)uo|6Y_mRMq5iX=hmWFfg54RJP`ae!~RhY!n-rDswDh!NA|p z$|q9lky5Kvenxl$i4Cs~biQE~D_P-YRvYejtu)x>TJt+A#=VMj{w1ulK*!5g)p&fI z+kidc+2Ec$I8V}%uo5iQHVYS~q~!W=mr7mS9RC)f8p_-$gvdNt$tz!|O|%efslT0EiFdYHwYylF_XeXKTfQkm%$(7u=HYw~e;vU=V# zpZg4|KO;5Jw}FaO|L@xyS;qCh03K$efg{w5w`A7@OR(AotLazn2Gj9C1hY!V9CYE+ ziN$nPf{+@JAPaX zV>esFx^AfWraf(BYrNP$g8It#>I$fr%#a?5*oX}(c7Lndq-zfMMm}jVbnNe-bUdxk zYa?u(B15icen^b$-LI_M7~agykRT5%JQaR*;IoUb!3~b^U?r7O4KFm{yoTt`QXI^U z$qo;bUDCYjz4Gsbngac{MHEa+6sli6>szn-MZn%TvS(QLwnz4eNi6rq8PPJ(?E5Nf z#6f&OQ;K!||KgwvJ=YY~Xy66!{Gg%ZY1mx$)^c!UnUk z-k@rq%=3B`@te=iRVR0d%+hO-i?YmiGaJU|5ADzLy{mRsBgWbxQ_Vd{L@A%)eH(~9 zoB9n5;^mp*LDN{x*W)Q%QJo&G$jSHd4G>9O7*v;rZ3f|tMuOni&213Gksfa#6U-Lm<^eNraE#LwkMMcnBvk_QS zWmork94&P~$mXeFX-70a&}#1Xlo^;UJn<_~FDM zTmE^TV;~{C1r-#QQwrV*>LdjsxLzHoo^dqH#4OD0jhODSVk@#@0p_5-)X`lr6ng++ z&wbx#O`Y_~*7GIZ5oThqAz?1~SRP?qih)^yHO&;dlgF(A7aB?S5tA3T0yGxM8fwSN zi3k(U(UcWoPTkvOT@1QqT{gZIyu_g>u@;T-Q@|J?cLfoOJ))`1<5Ydw<@i_d999G> z9>LK?ASxchEuu8a$*K_5L}dmCc3>qW;U;0CnXS`aHO@VyAnvH37om$R+*R4(+(lr7 z|A8>WnRMh#jifEC8Z1T`J64pdl~!rpA};1)Evg~l5h2nUAut-(5h@HfN)05@k^uC2}C8ZYd=qAi)Wo0300UC@Li`@MKl)o<*!&R#GF?{oYMFr1Hh%Jj$Q* z0O0=R-z1X6Hr}H$MxGQx9<6QOD3lZ+c8Ubfm)jJgA@&&s77k}nnl2G!K2C;0{v19w zWcfTKtO#S5xlvuMi!QojH)^3`{a)~?B$-{NO1@eU!dUUaN~TQSXC3+`Y3>UjWyEI&U339xXg(K=RS18U=3@GqCZ;A%GT<<# zVr{~vgi@%4&fbM$XohO&hH~iaRb_9s;{m3m9+GHg?&e|FO(&meXLYiTWOZi*T4!DMsCUi|U$%-OnqFm?XZAsg*EA<~G2~$?jo6Umd2yLV z#ae%=lYQobC1iw&A}B+hWGs4VVZJ1R4yc)499+U!e%_se{$hi!rgwp(|GS~2N!lqa zYNSW%X>L~Ehfc(w0xD3VT*~27P!{T-3aas)=%QMpa871$&e~%_twE-; zVLc*()g)0D&FJ>IL}cw~U6RYGj*E_(D%-3oce2GlqN-Z_=#M5IVu8n8$OVxa>2)z_ zu4q@NHYubb;A(lGD@^Gj#h;>rDQUtVX~EG-KI*d4qLvnCXRceBqG`7FoN?|U(RHPp zq9*yoDSINTf(|3K&g7*+E2Bbcq|zf=s_8jOPIj34e!$K^s!m7ifYKVa+|DcWMpm`2dQDm_G zs+2)c5zweq2Yj3%2}>bmmWwc_lV=IqYKq!3anP1fY|o$Jtw>9ZZ3 z2|A=kQL4H!ExVGaaJuWfDr?j}ZKLX8iG5ei;wM6;Q<~bVh1jFJ3hcW&T-a&>cDca9 zD(rUZXvV5-slx5VrYba1Y^PML#X@YW;;oLFPsf&Qu#zW)^`*msDg~*;Bb+QdQDz$r zYkkII@2u;7!eL8N?w9JOwYF&H>a6DCY`@NIx{j;SB`(ow?a!_zFQTi`MsCwOZM|A8 z)KYEizV7QjS-Ik%-)lQ{Bow# znr`TV9-NXc%!+H#W@gfQ>Hp#`>$a}v4)E;CuGCK1w#96#ij1@@xwGu>5*$5dRzj3vmCAE^r-jr5dq=-mdCmBj_&Q6Pxbp3NZN=@X&p&0Uxlu zmdPmXTiP`-mU^qK!K1!L=F%4@o+MxF47|8mM(q3aHN{Cm8Of$&Mdb6Fc5F9SQ0Ua(&re5ZddxR6i+e#Cg=h~ zaTEh^5m#{*`|KifYt@D&53}+#{;qrYtG9h*?r!lSsHFo>l;P>X9Luq+@^S~u?Jt9{ z^A2?JN^NNiof9#PqSJXZBa3pi?B+suE zGb$&C^0)?a>ZbE3cd|RTvnO}*ZgMgBU9mK4@hTsm1|Dakb#H=!uOh3mU~VHl!)pll zGV`|W8w0a27c)W|GYRLh#bIP!C0r3{o_bU&|06RnE&Hb&HL~z|H065pM{j003usEC zv=CD=D?jsAzVi^WCp*V86>o9?J25(w@+mK>PGf2-$16UE=so8%P~%!Z_w2^KAM@BERO4~J3>#R!)^-kCHI`_3tFKuAQbWZOgUkmnOH#NPQGEon-J{vVS#Yto% z^*?jO`+>4*-62i8MtePPLRU2go2rjubysWkR+DxH6Qe|H??pqGwmb_rv#g38^0rb) z43{)pKX6Br^c?9?4rl5(-*s@aG`2qW|6V8dU^6vx|FtLsc40fWVJr7?Kle~CwsBW? zEkm_rlP%^Y6IanNnG(*}hO=2LbjE&m!v14vH|#@Ob$grkX@mAxtM+O;+EZ~a4rlEU zJMusukA816ZUguYhjgu#t$in>aIZ8u^Xqk=^I%i=aZ7h}lns zLpgT)Hm%nsllX-`m zx|-iOnxFY(a}RnWwgR$@kI3n{JBt&6j`uEThX%eb$HSul+omjI1{+-!R2_71}~qVuMrPwfN0 z@_d4Aj7d6w)%V07`LwsY|D~HdxhpoQb9=V)^gAD>Jcqlek9(At`?-&~%7eF=r@6XQ z@`i|r_VD;0Y$tZYjojcn<6*3w+xo7vxAgXTcs}HnnbLT{!a{nyTR%GvH@!J$b6ltO z!__!&?`p-1HC;b>#&5jJbNte0y0>%sO_RJ|f4a$oImoAc*Ry<7TRXeM{m$}Uz0iyN ztk7NFdj|7p&+~o0w|IQN!_Wg7;+7iy)t^EP`;eD8kyq`6Xgn%AeX`@Yv>UjRU%kaI zIi>sf+q?YYhds7~e#l?Bm5;LYfqR6b{pf!@*tdP_cRtLUt-6nGYjDJ3;e3$lyx!M4 zA?|(O`#or@sw@P4{|$9<;UE5Zr4mmwd*mPbNqc6)(>CTt`}9-4jbr`gXTIihzL~Q< z>wkW?fBWeZcG-V9_^*DZcYn)oKe}gp24RvuDo&K)iFX zk6%G$&k$|{w~!ndm-Ar~JwOzaS4`08a0Bh_tSW{ucn*t|prC9J{ z$D|+=W*oV3|7FUTAH#B$c@$>JlR=06JQ{E5xOxAg)oWI@*t2_H#|{gYwpOpTx#rfr zo6H#BS%G5>Gn}{@cI*}vCxayrBu~!mL~jnA`t<78tvBZ~g?Sn8+rdl6KAyaG@#oQh z4}XNcEP(_KCOmjBV#JBG1heeedVd?6|6BShq^bZE5UZtnNKh%MqC&8#1$lDH!KWyc z!a|{@qDr_8<>0MD+zOP9wbV{C&BPN)T#YUj4-?Hr#7OJxMbBKsPqQ00<7~zlTjUW% z7=J90#S}#>(n#1qOj60mFTcd{JS=i~2MPG{ z0rIeG|Fn?^8*7sB#m2jaoC&1c0@Sahn?(8q&jASp?8B5?+7qgsa8OXfMVEL`(Fh-f zkWmbOl2St`2hDWJ5Jx0(MIq_zF-X)%^;At!WkmJVPkn6FvpI3xQC3w~ot4#6am}^P zBm3gjwb_8()V3x)*=4`46k-R#y^Gm!ZG)+~ zAOqvZ`Oe1!ZLKDpx{C0O2{Bh8(aam|eCen4^+EK{m5y3!yRA;Wakw{L+iTInPFrKp zV$YGr*SkjhVvJq?+Htp6e;Y|ngIAsE;_&{dS-$t~``YIL-!nTnOq&^B zm+kIX>f-XDOR}7*&1b0BbM7P4fkG1Yy|J`vfdu*|Fce~v2|BiqC z8z2F{2Rz{wZ-H_nR^6zUyyaj4WlXSO^PHDK=xwlN*t=f!n4!3Qw2OEoG}lQ$0y#si zFla3_)ccM`FV4XahNBCe4CR-@`c3VD3{)W7)E313$*zC_OxynictqO~F;q=NU=a=Y zL?QmLV-2K(1L4*~EdoV?7ren2gTOp7Vvvkwd=>_yLp=|!k&Pe}Vd~^3w~tf?P*o5g z`5I)pmY|Q1G;rSxV@SF`4pN6vn&0v4cSRrmZiS6`KI7TzBQk5!0W%Jq?%PqNady8}7Ey<^+j^Htn|1%_6FWVPP z8rE=_#1v#94~e=`vMzs^i=KJ9Rm%}36PHr)84FEk%%=G<2f{pNKW#|JVh$9UhTP$AO6War0@0D$ zbm*)s3BcM-G@>@D1*~pyQHp5Pq8ydzM|TlYk&bkuya>igQJPXRcx}k!nsEEOmMs?4UcHH8}Rr@>}pSVLiKd z#|eSJ1Sx<)4S@O8^a+%#WhH1H3yM~HD)O87+om%&Sy7Y92^n=A|5r8e8jZZRNg?%V z$X@~5jlTLd9EBC^Va1Wyz@CG#i+xBsAYu;5HrBC~wX9+>o7r+qHnN=c>}Mex+Rk29 zv!JEyX$hNJH}UmNt!0y5oiQKTiYuoi9oq>3mAs?wmbbn2ZBl>hR5pT#oLGwIw>pE> zT;8*2@&oI0x$4!hPB);{H7gL#n%#Vs?x3SwC_@kUFuLyauF`;$YX@7}b-07Q5P^q0 z+8f__z?Z)A6|HI2YuNnGmmdA)uYc!z-_hc?w5!d=fnSSYY?wB_K-q?5c+F&+Y7Irn#_9hGY}&z1~_)c3u8c84}Lif38zf=#Dg zZ)l?t=vIAFl#U~`mP!03ol2O}5N0tmVnJg~$9Tq_W+ewf{Wl)l(jxf@5tOx>xe=^d zx?@f9b)gIE_2JM6wVw5mdAQbuwyjoj9WQNVYhrH>B zls}TJOngVGp7!^@0j@z!8~2SUq?4wMmTEgXS*#6z^^%YLa3ym$3KZ94-BgpAEB`v! ztfe=xKko4dFFVD@9yfiv?d@|f2F`PycA78wR&>+C8z~xMtZ&_@M@7%5=c(KqIw-mhH~ZO} z>`z&b-Qobl%bPjw;Yxp)?&wwRd~WLO8ZFr8C{Lm_PXz9B`1j?7?{7D53*PW5O!VmZYfr+bB)t@oLpUccCY`nB=G z^Emt58`nQ~Luu}E!^^|_?q;;&wPuaJ!#wgiSKDLA{P_ke+WnJ%beItx`lL%8V7s6X z_1lCZ%3|s$h4r{Y_hK&rX>S3=DEDyB0h?+&Xhxop>Gz1q)rxQUj_FBWgYxVQrJAqq zM()96DrZ(i%1Ezxz%3y_&OYR%{$vgoh)(|A%le9_bplV%9MAEv;qP$JnsiY7&P*K4 z&3CBmFbGY+POked!;#zyBa#sR8Y}fq{}A8W>Q7cs?Bk^Fz)OM*qCkkdWxcwf#gmsq{hOa2(jV}QP;F6 zE#wVOT5t<*!9=)8MNCf%`7Rxt!IHj>=Y((wVMX$&X!Alv6F1S!UXXW^&YCKb&&Vwq z=t$;TNBa!zzNm2jQczPS5dde=l*R!J)$j~I%@=`D0ej&+xCcRkuhqH=12v%R;80Np zg@5{R((-T`VX70W@K`L4>9Xv^axtyiuDi_Q(I5@=Gz0x!aT~Vr#8?Zcj`01mjqrAm z&djeJzvdlBkseZR9$m3cP(}SZ|7!^|@B5zcEy7I-A;K6aV1w#z5 z7lVkOeqg9b~1(@PsRTR zY~0M@8Uzp&zl$y!a?9{f7);Fy`7g>e;U&{C!31zP5Md%I5+k{8E}JSYGqRjEav+M4 z>_RdbKeD>I&~ylL!{$Q}4-uf$4l&ItEBtRQ?d^105)reowW<#t%MS&uQWEFQF%wJw zyt2wjLH+Jg9k?*4%&}o4|Euw~4Iavm9e;4bmhm}{aXPuvLM%d~ z@-8nh!~%a$4;B#+#YHoILj!NfNj#!Gwa`InarE{g+$^*1wo(uUavw!eDwlFj<}*PP z11VK=2hG9S^zkco{|?S*P`>W-ag@~loK6a9^BT`mnE<3FeF82s)Gmt?_Oz}{)pR*U zD3GF4IyI*+NnlQ?^C=p^qEhrKs54H1FAm*j6q*i35mH5IRDE=CXt;5mn`H3U?A@ zB$Sjy#-GTv>&nzPE%FR;bxqB%Lv7~lI)G0_fLPyjIHP336=;M7=)^LuUWV6*Fg^k$s?_1&R-3G(8LU6&P0ZCyH6sfdH zc~Ik0bmL4?7v2>bl|i(4*1wq06hVpCrc~VwG$8TyUnemu|B5Ri17;0&R!3H08x}*m z)>qXKSPKFJD)v}0)>xZjWTi1X4JiiP_HE&IL_ZW+Um{#YkPF*ZQs(wg3GpIskrBCd zF=7^Lg=Z1jt!9%pXU(xl>osUOwRUz&q^PmZAlGO&u^meDUh~IDnl`kKcD@2MYH@IC z4YEt&|BV2*h=*8#YkgH$9ri-OmLlClFUNLR$<}tw_H5yHD?%!#@=!)SmQLfAL_^j^ zSL*p{~K zlt+X?+?o{S)YCR!^ZrT~&#9C=?CdCVaB zkPECbF8Cl^tC@old3w3WB_>HFz(&G4n33sA(1v5^PTQS!Rr>D5nRA2WS; zl!YUycJSGGldDtE0gW;xn~*+4#oe>xBsID!IM1$`kP$93?&@Cs{?7CyJp`T20yW=ReBQWKQdR5${H{EW`)^a z+ti^vOn0ooV3gOX^QGkBZ|s?brbPVJyY%KL?`m)ZR6H1${pX%PPv5?u*qv zFxXvaLWTkuk2Vdsx&idkL@8$ALsR&7B=Y7HXP@FHb(epf_yeJ;E1$c zp~Ktz4{cMQ@xw`a!BSJCAs2?v2{8Ma=aI!i@nLYb9WFlYK03|cIL-gC$ys2WRA8ts zowW};ig?u5!8Z#q)arTl`HTBqT$ZSNHX0Kso;D35yjG@b)uOx@AM*Ep1> z!~(g%jP4~;aQs)v^mkRdk`cXnU(D7IPeiC#tr;$PuNW(BFgn@HLOAD>8Iu29XgM@P z`vEqiB@CJ9)7j(Ide$6?mDmTD9nE?C5HMT--*?XHA?`SDRfv8?=%vYd9J0VF3$zZ| zfMVK2 zFD>&c5JL1sqqdCk_g!KpMK7V)Y}v;`Rwgq^IiXR|$HF1X&)emCp0L1J->`iM?@Qn@ z`&jq9A~!Kr-yvYK;0KFpn$+(kT15?1Dxd7?LzO}e)e)tnM=`}3t{JHzQ5 zHRWc`R@5(il-+->b|f)}*7kbLM zc7MEOUd1qpdNk~l8`Rp*eQ+TJ5RJ7l>xgtE$;avSC#yqw+K}?>hO@nbIbmcM9CMun zMM&3Z*DUUNLLp-isoJuHxjdH*=+F0kuLyA5U>#B7qqfLYk)#ba33$o)V?q6s5bD+A zJJqI(Z@&~vEAt13uT6q|`TZsH$GPKECWGITB6&GUmir?^u&A(H>W!uztlXJfni&Nr>x4YVD?!DVAak`zpLGtn&J1+y1et@Wsh_g1LYT1_ceZ!4Nq33!oyz98czD$3-z@IpF`MRwy?;_#TghJ17xVg)CsB=#xysJi ziq#H(wSM5E)5q`iMXM*xUCFzrg2S+tV|KpjHZx9v`e)Oh9c^5vyjYG*W309iXA2hV zHg1z)EU!bC965L0mS6RWIEih0@!qkgl4(3hPu&Yo@47>ZzCdLj(LV1k0{1D%eg;eX z+4rrD;)v*ZkyOAAZ>!1CuHf=^gF?&E_R-)=8gN~A8Kyls_TPLh{k#3@tkS5(kcPBypL6rD$ zcwr=in2QF5>z#fkA};Wod_}uJzwJ!I3YlM&=vP7E?32W_`*;&2EH3P6hEI)i3ubiR zy3&OMS!AMLUp%0IgbL1NQ7q?!GxDy5_1{`LeD@(@{l*(=1%n?nMQ*Zq<6k2T`Sp9KK3DfjtAwwinLz|LYIdSjL_eWE z*I7Dv1mypC--!uXG1#Q)5s}Lt3jUh*o*5FZaR2c2x3ur3EV_q1Z_w&2c+@GB{^yMv zWU+qN`LmB1_#}+0%d;rrm-yT*`^}_}v;9eF&ccF}B1OJs(;L3y@m@?Bn#s3ftjgsp zs2l$Dl`8f3$TQJeHa79h7ik-J2k85HMpD;~W0*8nL>Y+(<$5dfj~myY9o4C-eq~S1 z!mua=loYuoNvQLbZ3~UVdykd zb-`)Xe8u=enQ8XNKYMSIoV3LwzR7a_@)#d~U0L)g*hcY)M0bDj_({MYkqb>s8T^4H5@zNBYl-PeLn#axwl+=3Ld?r*j!AVqB+ap*% zHLn1qaW1!8?VQ-W;xFg&zO>gtBTGYmC_QR_TT{=_$?ft;sco;ByYbm;`wv~IMkn=2 zO|TLkuVj{aOy^lVLche1d`AEJ+5NUV{8UiN{_$%pZb^~?4m^Y}N$XQ?+@~0)i@6bL zK-;I|rX0_vLYa+6CS~{AnBs+5*T!N;h)(P30x2fd^<^SK7; zyZ=!)u|Z*T+IbQ$|FyGV4#OI3_xN|&=Yb@)+_fE-GeoVCy*s#hA&uF^fZM=PP(+*g zC~#_8SyouiP9B9~djURQ3^~mXoiUSDZI~Q&Ii5SLQjU^7uI=7(^4xj28PAxBhkEQ+ zPh7T(S9>|T6J0~pJ+x`}@fPA^#tF&z1@g|k$b?f9!03d6CjfRC%7mNv4dxVk zDbtO!l=o$tL=%O*V9C&mHP`UTenUS(g9QSX+F?VVeN|EF#Wq}V2pVIToh03Sbn-%` zW>?diCS{41Qh2ug^P5=LnXM^`hdt!#PXR6WRTAHNX4A)x$z7Mbf^hc%*8xXVf;uoS zZG7}3%Ezhq8FpW3uj*rJ#(u3_dYeT|`)HJRXbvwVK5o#(_ZQ+K!c%~wfhYx21N?`N9RdB|45wu!|X?u|8$KzBWpkr zVL-}F4}-j57B?4QHGsp}XOO`FbIpe{F+@e)`vuAn-Q|gK4HQCse!}A=f#gMZK4N;| zbRih@DlBMd!cXzOznZpxS|{-v3-^4@;MK%nsuTZr_rp!heKyVC$H<4jF%2o**Mp#f zJ`tH^3aAgt1@c=2IwF0AO+sC}c|$Z=Jz3rU%J=gn3Ja7EQr)?x3I3GVB(i$F$b z<~bw^McxlpX=e>rNboU5h4mvfKXkjykA%Z4!dhHBbL77i+zWLO3T>0~0tk?<4^Lmh z^SyjnBPzQ;Rary?>v+TDjT^g(DOhgqvhqI+i_{Kt2{wZw+#Y)*2dO{I3@jvuBnZ#N8a$ER?|sT z+`q5aVqw;n55*&PWeW?&=-BTaL8Aq}X>}{rO4H@FzU>K)840r^BUYf${gUhUMZgtb z%q><5DJP8#RqKhB6^ck)irujA)M4{~q7W6I2(J15wH4{l#g_1#)^z*K1(IwLRSlIZ ze~)AJG!P0Xm5LE_=OUzyBk2i}p{19#jJq8kcT^oWk9_G~Ydb|94}`JBYq$pmocrB! zi)Tzu+?F?saI-$N_&U`U_Fgw3Z8Eu$FM+!!S>t^|5;BY}^xG;SsYU=@36DxlXrjKQ zr)RjY_eG4IQ0SeWsDMewi8o0hJ+I62lfa!;{E3qFXp?zfC6~S@akfhy2oK|6BN}9j zI_gT`ard_jPcb?VwzEtpTzjwki*LL={%c))g+g4Fd#YS`oM26AV|eV6TT(b%+V9CE zt&JoG-Hh=>qCFk=UT5b&wC>kPQpPsYErSgv3(}#rnG^R?7zJOwD<|KVOJCXWyGJ6% z6Y5F+GU&1AI)!h4_Yqn{SsoYM~W>4*>($Q;?o6|stZsrdc6UP=!tfm|^!gI2f^FKcnq zpIR}>)XLYD&ph(t``f|{&6HGJmsP2j-%VDSp;flJF3dzEpGG8CkxoUr1;wu93Uwp;K)t!Fz@Xtv;C5rq@66@P(t}2apuX-qLf1F>B4|-+-UXi zzVi~(&{A!C^8RaTTVG_=Wy`p$Ke04q{hG`=^?(c2#x-B0VzuBew&1;++5KNCz!!d^ zqQ;|}X{R?Tr&3~M`pPLtt3(yUZ=Fh^D8jq*LpO!V5i9ODL{!y;3TN64kD@ z(W!-4zUF99xQsj}AQ`O`tfsyT9Mlbkno>4^-y6OmL&PdPPwl> zXm~sW9eGE-gGZzD$0UCuN50_|{Ux&w<5P15c7Uqfh+@&9Gqu1clr~2OJ+fJToaSJFI6? z?rEEbM1F8N#{bK!izKZ}eX3EDJ@mo;?+3Yc0R|N|Tdf70zw|`w#q{f|*-Ms`nhPU( z64UziFu!t-`34iXykcoh{mE-MTGl?*imjvoQdym*!)>cHfkT8}SZ~#-ZoZCtP(47J z&9fEf*I2ijpGEG~-uUoWv%ycqWI@8ki=ps(TCWa|kjU8-r-gp z5R3$u#s9sr{U`r7GJ0pAuCbaKI}!JYJ%y{rEIpT9{Hq05PgcozX-R*W0GE+mZ$5(d zy_17>#CuP@<}yTEP3+yY+1jksg4#zf@QSA&G~PM+?${oPO?EvR?$^O>Lf3GA|$cZbu(X9>9*>?+!Xu?y2p zlYA*e4#9Cj$}Nif{L(8A4Hj*`r!Yj^@5F16a-XZKFG`|Y zeH%T{h{$M8H)If>d5TzynOo(atL`CRz%Eu`wr7TBqeU~8(~V|lcl2tcs3nayiJ}&L zjVAcTN01vrJ~_;s@=|4ND6HqGf7rW+Q*#>JV_FgqiPKi2n^(V5)T?lIWUw!zx4YFR zNW)E*Q*bG@?6Wt(1rUtDSEN;duP4cr)Vk9g&Bo@FLZis>Irl|GTP4F*Mf4oCcSUUJ zlD0}y7>WOdKY|dm% zIeDiPdd-g`#*tL4selHug+^voZlvenrQ zo8~o=LgGd2zK}7EXw>>0<0+fE&X3QhWsXXJ*6nU{50)4VRA21Y8?7GfY#pf_Dzq%2 zn@7)^*o=4E%_$f0f&cZ%4y&5d->3MPh> zQ!mf5OMaWbs64o~)r+%o1=vXs)gBRSS?|5w+f`Sck>6Y8(%Me0>&A!d+wV86c-j%e z_g|>;r9!6gM@gm!PVFcUKEtM_@LFW2Ct$Bhq+oRGIJc)tpI_t7;#Uuzcg+P+j^Ue} z^F*A7x17JipjZYETwaZL-CDaoee#R!)O7yB407a(9h~?)_5LY3@+Ep~klCJo6ZU9! z#=!F3HYt-_nJ4@#YTFCu!C8GarKA7iELCkft8R_VV8UMp`Y7?9lQZ>cYH z7{4RQ{WS4Ncpl+aPMr@I$lSrcUG(s_Cyn`y@#EUi+zSnC+1el9+68cJknP`)@xEv? zSjyow${sWT3=cX3Ly|c|E&Gmhr@~O|Bn(?9&aIz+vG9z{X>2`DRU)(f%#VDv6*jFr zf6VlGbr-nYlf-C4#^Wi*f8}5dP6{xCKtou_JNQM>V1k##gfQ%iWC}_nx+l3d1r$0g zmNAm63CIQbjQZG~XxW~$w#3%LE?v!ARrhc$0IvR?qBiU#H7X)28^c)#tjq^385NC| zt}Q=4dP2J15k$^8b2<)k!m*A`M_HWME&YS)2(r9+;=z`)?bj$Z3L`k#ULDl2dFOHu zf>{@8WG25bqTEj`#dKf}G4w~tkW&kI`dJ`W2sq6#Mk9SG%*fW=gU4624@!7RWT19{ z;?CJKa!jl3Wm(##>-x0s3`(zb^#B4Wf=eUqh{nth&s+k$sypwXr$mpXXuuNABYqg8 z)%!?4S(Xk;;g@Ex5&?OwP1i1*ac|!g6Ux7P4tv8I!6-i7GlN zg}bD0Ly3E}f@N%hueKSVBwAqr6P2<*U+U$s00w?B8oi%F zuUE$W?IofU46HF>)6s0 zj(O3qegFZyY6Up7%D4ax?@{{dsoXP=htmL5ZUSkGRxDk#K)7?r-_MskPCH>!P{C-z z8>@i>m6p#UU8bOO=|$t(Y%44__lyD21n{{J9KfjJcwrYK)Nqg{Z=MZ3<1!$0sul87ZBARkAbU7K*C3aB_}}rQW&S@ z>Ez5P=2(Vl6uBEAim2bvtxf3VgQmdY)yqBA6HU0&u`%$M)Qg^U?-)$qQa=EJU4XDe z-t96|0s|`V^%IZ>WCL|uY*jV{H?A3+=(eMC7RK3bpH=Ccw5lR&TX1_eeN4b*D`nV$ zs-3;mnu*GFKAD{BC*5Bm`2nwSEdp{NCl+==Tg5~U#ac(ws4tR9!LXqB0i}WyjtE+J z$cXsSl#e2vFNGSF!IqqAH#n7g3mygJnMTZ_S|FG!Omy2dPF#wEUHXRd#2LU(YH(AGh0{XsRNW;af^|$S54HjUKqTU1CD9pZRm1Gdx1C zs48?DlL{Pyp?6DEupx$KaZTwE56|s9fU5fbYKvbY7`qG%10f-N(Ty z#hQ;!uH1@SAwg!X8zRF6SNjSx1h66@X30g0eFG5 z%D7|zSZRUjtrE52Z2^1WW{@8RJ>H;y!0yw~Yd^o1!C8Eiw?{N8AqHI_EA2`_8AbW* zEwvh-1R;*MZ1M1GuXy(rz@!$z2b%o&jkg@HGH_;bqk@rd&$9+H33k%z8v4P66sxXM0&g8uk5{pt@Oiu-$kVuLR5hgz+RS`Ggy zjlp+L!>5h;PbmL8$F$FLpZxJv<4K7Dz4wgxBGz_I(ogqxqPH=;3bgB2;bYLv{0D$h z#CyJXv*=nbFxZ6XDQNco+OA)henRcfJCO$vB|~mR?{Dg66HC8|iX$K&!uxL^8;=~s z)g=rhR|1&+yYSE9ej4*9^S7 zdBHIRSYU2cT@r%8MyaZ`?W!GxV&~_^anSUtm@u?Z` zsnqbP@b*BIKkTc6v#y+90vg5|_EI?}bMs2(n1T;$lwQLDM!A<@$iXfERXHIF*9ZCn zPsG4sPlHSHt5NQ|qKBJhd3S8}Ny`>J)a}vePg}nS{dAkBx8^^C8t+pAU`zH~o$x{J zWH{l45e%TTp7AoyT~WnBpvwS#j-JBKUBPv*(an|QmeAe#sKWJ~CEsf9J@cxEzx_e} zutFaRu&;{TlsrGUd1V4U3ZPZ>7U?r9S1