From 8536ade25237f756b8cb9c5f2c4afde2b38e8f12 Mon Sep 17 00:00:00 2001 From: Licsber Date: Wed, 2 Mar 2022 19:32:08 +0800 Subject: [PATCH] =?UTF-8?q?add=20=E5=8E=9F=E7=89=88=E4=BB=A3=E7=A0=81.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- sh/activate.sh | 6 + sh/deploy.sh | 11 + sh/run.sh | 0 sh/ser.sh | 17 + sh/train.sh | 3 + src/0_extract_video.py | 15 + src/1_rename_img.py | 8 + src/2_make_voc.py | 48 ++ src/3_train_ssd.py | 322 ++++++++++++++ src/4_eval_ssd.py | 219 ++++++++++ src/5_video_test.py | 42 ++ src/6_onnx_export.py | 107 +++++ src/7_onnx_test.py | 9 + src/8_merge_voc.py | 12 + src/config.py | 27 ++ src/main.py | 25 ++ src/vision/__init__.py | 0 src/vision/datasets/__init__.py | 0 src/vision/datasets/collation.py | 31 ++ src/vision/datasets/generate_vocdata.py | 128 ++++++ src/vision/datasets/open_images.py | 130 ++++++ src/vision/datasets/voc_dataset.py | 187 ++++++++ src/vision/nn/__init__.py | 0 src/vision/nn/alexnet.py | 60 +++ src/vision/nn/mobilenet.py | 52 +++ src/vision/nn/mobilenet_v2.py | 175 ++++++++ src/vision/nn/multibox_loss.py | 46 ++ src/vision/nn/scaled_l2_norm.py | 19 + src/vision/nn/squeezenet.py | 127 ++++++ src/vision/nn/vgg.py | 25 ++ src/vision/prunning/__init__.py | 0 src/vision/prunning/prunner.py | 235 ++++++++++ src/vision/ssd/__init__.py | 0 src/vision/ssd/config/__init__.py | 0 .../ssd/config/mobilenetv1_ssd_config.py | 32 ++ .../ssd/config/squeezenet_ssd_config.py | 21 + src/vision/ssd/config/vgg_ssd_config.py | 22 + src/vision/ssd/data_preprocessing.py | 62 +++ src/vision/ssd/fpn_mobilenetv1_ssd.py | 77 ++++ src/vision/ssd/fpn_ssd.py | 143 ++++++ src/vision/ssd/mobilenet_v2_ssd_lite.py | 71 +++ src/vision/ssd/mobilenetv1_ssd.py | 75 ++++ src/vision/ssd/mobilenetv1_ssd_lite.py | 80 ++++ src/vision/ssd/predictor.py | 73 ++++ src/vision/ssd/squeezenet_ssd_lite.py | 86 ++++ src/vision/ssd/ssd.py | 167 +++++++ src/vision/ssd/vgg_ssd.py | 76 ++++ src/vision/test/__init__.py | 0 src/vision/test/assets/000138.jpg | Bin 0 -> 87499 bytes src/vision/test/test_vgg_ssd.py | 49 +++ src/vision/transforms/__init__.py | 0 src/vision/transforms/transforms.py | 410 ++++++++++++++++++ src/vision/utils/__init__.py | 1 + src/vision/utils/box_utils.py | 293 +++++++++++++ src/vision/utils/box_utils_numpy.py | 238 ++++++++++ src/vision/utils/measurements.py | 32 ++ src/vision/utils/misc.py | 46 ++ src/vision/utils/model_book.py | 82 ++++ 58 files changed, 4222 insertions(+) create mode 100644 sh/activate.sh create mode 100644 sh/deploy.sh create mode 100644 sh/run.sh create mode 100644 sh/ser.sh create mode 100644 sh/train.sh create mode 100644 src/0_extract_video.py create mode 100644 src/1_rename_img.py create mode 100644 src/2_make_voc.py create mode 100644 src/3_train_ssd.py create mode 100644 src/4_eval_ssd.py create mode 100644 src/5_video_test.py create mode 100644 src/6_onnx_export.py create mode 100644 src/7_onnx_test.py create mode 100644 src/8_merge_voc.py create mode 100644 src/config.py create mode 100644 src/main.py create mode 100644 src/vision/__init__.py create mode 100644 src/vision/datasets/__init__.py create mode 100644 src/vision/datasets/collation.py create mode 100644 src/vision/datasets/generate_vocdata.py create mode 100644 src/vision/datasets/open_images.py create mode 100644 src/vision/datasets/voc_dataset.py create mode 100644 src/vision/nn/__init__.py create mode 100644 src/vision/nn/alexnet.py create mode 100644 src/vision/nn/mobilenet.py create mode 100644 src/vision/nn/mobilenet_v2.py create mode 100644 src/vision/nn/multibox_loss.py create mode 100644 src/vision/nn/scaled_l2_norm.py create mode 100644 src/vision/nn/squeezenet.py create mode 100644 src/vision/nn/vgg.py create mode 100644 src/vision/prunning/__init__.py create mode 100644 src/vision/prunning/prunner.py create mode 100644 src/vision/ssd/__init__.py create mode 100644 src/vision/ssd/config/__init__.py create mode 100644 src/vision/ssd/config/mobilenetv1_ssd_config.py create mode 100644 src/vision/ssd/config/squeezenet_ssd_config.py create mode 100644 src/vision/ssd/config/vgg_ssd_config.py create mode 100644 src/vision/ssd/data_preprocessing.py create mode 100644 src/vision/ssd/fpn_mobilenetv1_ssd.py create mode 100644 src/vision/ssd/fpn_ssd.py create mode 100644 src/vision/ssd/mobilenet_v2_ssd_lite.py create mode 100644 src/vision/ssd/mobilenetv1_ssd.py create mode 100644 src/vision/ssd/mobilenetv1_ssd_lite.py create mode 100644 src/vision/ssd/predictor.py create mode 100644 src/vision/ssd/squeezenet_ssd_lite.py create mode 100644 src/vision/ssd/ssd.py create mode 100644 src/vision/ssd/vgg_ssd.py create mode 100644 src/vision/test/__init__.py create mode 100644 src/vision/test/assets/000138.jpg create mode 100644 src/vision/test/test_vgg_ssd.py create mode 100644 src/vision/transforms/__init__.py create mode 100644 src/vision/transforms/transforms.py create mode 100644 src/vision/utils/__init__.py create mode 100644 src/vision/utils/box_utils.py create mode 100644 src/vision/utils/box_utils_numpy.py create mode 100644 src/vision/utils/measurements.py create mode 100644 src/vision/utils/misc.py create mode 100644 src/vision/utils/model_book.py diff --git a/sh/activate.sh b/sh/activate.sh new file mode 100644 index 0000000..184775f --- /dev/null +++ b/sh/activate.sh @@ -0,0 +1,6 @@ +export PYTHONPATH=/home/licsber/services/gxs/src +PY=/home/licsber/anaconda3/envs/gxs-36/bin/python + +hostname +echo $PYTHONPATH +echo $PY diff --git a/sh/deploy.sh b/sh/deploy.sh new file mode 100644 index 0000000..53f18ab --- /dev/null +++ b/sh/deploy.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env zsh + +SRC=/Users/licsber/Coding/Python/2021工训赛/ +DST=192.168.1.102:/home/licsber/gx/ +rsync -rtvzhP $SRC $DST --delete-after --exclude "venv/" --exclude "__pycache__/" --exclude "*.onnx" --exclude "*.engine" --exclude ".git/" + +SRC=/Users/licsber/datasets/工训赛/models/ + +cd "$SRC" || exit +rsync -rtvzhP ssd-mobilenet.onnx $DST +rsync -rtvzhP labels.txt $DST diff --git a/sh/run.sh b/sh/run.sh new file mode 100644 index 0000000..e69de29 diff --git a/sh/ser.sh b/sh/ser.sh new file mode 100644 index 0000000..655e238 --- /dev/null +++ b/sh/ser.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env zsh + +SRC=/Users/licsber/Coding/Python/2021工训赛/ +DST=ser:/home/licsber/services/gxs/ + +rsync -rtvzhP $SRC $DST --delete-after --exclude "venv/" --exclude "__pycache__/" + +SRC=/Users/licsber/datasets/工训赛/models/ +DST=ser:/datasets/工训赛/models +cd "$SRC" || exit + +rsync -tvzhP labels.txt $DST +rsync -rtvzhP $SRC/../voc/ $DST/../voc --delete-after +rsync -tvzhP mobilenet-v1-ssd-mp-0_675.pth $DST +rsync -tvzhP ser:/datasets/工训赛/models/mb1-ssd-Epoch-60-Loss-1.0784624990294962.pth /Users/licsber/datasets/工训赛/models/ +#rsync -tvzhP ssd-mobilenet.onnx $DST +#rsync -tvzhP mb1-ssd-Epoch-28-Loss-1.1538286421980177.pth $DST diff --git a/sh/train.sh b/sh/train.sh new file mode 100644 index 0000000..56acb95 --- /dev/null +++ b/sh/train.sh @@ -0,0 +1,3 @@ +source activate.sh + +$PY "$PYTHONPATH/train.py" "$@" diff --git a/src/0_extract_video.py b/src/0_extract_video.py new file mode 100644 index 0000000..1e9f11f --- /dev/null +++ b/src/0_extract_video.py @@ -0,0 +1,15 @@ +import cv2 + +from config import VIDEO_PATH, IMG_PATH + +count = 0 +for avi in VIDEO_PATH.glob('*.avi'): + cap = cv2.VideoCapture(str(avi)) + while True: + suc, bgr = cap.read() + if not suc: + break + + save_name = IMG_PATH / f"{count}.jpg" + cv2.imwrite(str(save_name), bgr) + count += 1 diff --git a/src/1_rename_img.py b/src/1_rename_img.py new file mode 100644 index 0000000..9090973 --- /dev/null +++ b/src/1_rename_img.py @@ -0,0 +1,8 @@ +from config import IMG_PATH + +count = 0 +all_files = list(IMG_PATH.glob('*.jpg')) +all_files.sort() +for img in all_files: + img.rename(img.parent / f"{count:06d}.jpg") + count += 1 diff --git a/src/2_make_voc.py b/src/2_make_voc.py new file mode 100644 index 0000000..1d81b09 --- /dev/null +++ b/src/2_make_voc.py @@ -0,0 +1,48 @@ +import random + +from config import IMG_PATH, VOC_PATH, CLASSES + +random.seed(233) +annos = VOC_PATH / 'Annotations' +datasets = VOC_PATH / 'ImageSets' / 'Main' +images = VOC_PATH / 'JPEGImages' +annos.mkdir(exist_ok=True) +datasets.mkdir(parents=True, exist_ok=True) +images.mkdir(exist_ok=True) + +for img in IMG_PATH.glob('*.jpg'): + img.rename(images / img.name) + +for anno in IMG_PATH.glob('*.xml'): + anno.rename(annos / anno.name) + +labels = VOC_PATH / 'labels.txt' +labels.write_text('\n'.join(CLASSES)) + +train_file = datasets / 'train.txt' +val_file = datasets / 'val.txt' +train_val_file = datasets / 'trainval.txt' +test_file = datasets / 'test.txt' + +train_ratio = 0.7 +val_ratio = 0.1 + +total = list(annos.glob('*.xml')) +random.shuffle(total) +total_nums = len(total) + +train_num = int(total_nums * train_ratio) +val_num = int(total_nums * val_ratio) + +train = total[:train_num] +val = total[train_num:train_num + val_num] +test = total[train_num + val_num:] + +train = '\n'.join([i.name.rstrip('.xml') for i in train]) +val = '\n'.join([i.name.rstrip('.xml') for i in val]) +test = '\n'.join([i.name.rstrip('.xml') for i in test]) + +train_file.write_text(train) +val_file.write_text(val) +test_file.write_text(test) +train_val_file.write_text(train + '\n' + val) diff --git a/src/3_train_ssd.py b/src/3_train_ssd.py new file mode 100644 index 0000000..abb22aa --- /dev/null +++ b/src/3_train_ssd.py @@ -0,0 +1,322 @@ +import argparse +import itertools +import logging +import os +import sys + +import torch +from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR +from torch.utils.data import DataLoader, ConcatDataset + +from config import VOC_PATH, MODEL_PATH +from vision.datasets.voc_dataset import VOCDataset +from vision.nn.multibox_loss import MultiboxLoss +from vision.ssd.config import mobilenetv1_ssd_config +from vision.ssd.config import squeezenet_ssd_config +from vision.ssd.config import vgg_ssd_config +from vision.ssd.data_preprocessing import TrainAugmentation, TestTransform +from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite +from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd +from vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite +from vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite +from vision.ssd.ssd import MatchPrior +from vision.ssd.vgg_ssd import create_vgg_ssd +from vision.utils.misc import str2bool, Timer, freeze_net_layers, store_labels + +parser = argparse.ArgumentParser( + description='Single Shot MultiBox Detector Training With PyTorch') + +parser.add_argument("--dataset-type", default="voc", type=str, + help='Specify dataset type. Currently supports voc and open_images.') +parser.add_argument('--datasets', '--data', nargs='+', default=[str(VOC_PATH)], help='Dataset directory path') +parser.add_argument('--balance-data', action='store_true', + help="Balance training data by down-sampling more frequent labels.") + +parser.add_argument('--net', default="mb1-ssd", + help="The network architecture, it can be mb1-ssd, mb1-lite-ssd, mb2-ssd-lite or vgg16-ssd.") +parser.add_argument('--freeze-base-net', action='store_true', + help="Freeze base net layers.") +parser.add_argument('--freeze-net', action='store_true', + help="Freeze all the layers except the prediction head.") +parser.add_argument('--mb2-width-mult', default=1.0, type=float, + help='Width Multiplifier for MobilenetV2') + +# Params for loading pretrained basenet or checkpoints. +parser.add_argument('--base-net', help='Pretrained base model') +parser.add_argument('--pretrained-ssd', default=str(MODEL_PATH) + '/mobilenet-v1-ssd-mp-0_675.pth', type=str, + help='Pre-trained base model') +parser.add_argument('--resume', default=None, type=str, + help='Checkpoint state_dict file to resume training from') + +# Params for SGD +parser.add_argument('--lr', '--learning-rate', default=0.01, type=float, + help='initial learning rate') +parser.add_argument('--momentum', default=0.9, type=float, + help='Momentum value for optim') +parser.add_argument('--weight-decay', default=5e-4, type=float, + help='Weight decay for SGD') +parser.add_argument('--gamma', default=0.1, type=float, + help='Gamma update for SGD') +parser.add_argument('--base-net-lr', default=0.001, type=float, + help='initial learning rate for base net, or None to use --lr') +parser.add_argument('--extra-layers-lr', default=None, type=float, + help='initial learning rate for the layers not in base net and prediction heads.') + +# Scheduler +parser.add_argument('--scheduler', default="cosine", type=str, + help="Scheduler for SGD. It can one of multi-step and cosine") + +# Params for Multi-step Scheduler +parser.add_argument('--milestones', default="80,100", type=str, + help="milestones for MultiStepLR") + +# Params for Cosine Annealing +parser.add_argument('--t-max', default=100, type=float, + help='T_max value for Cosine Annealing Scheduler.') + +# Train params +parser.add_argument('--batch-size', default=16, type=int, + help='Batch size for training') +parser.add_argument('--num-epochs', '--epochs', default=100, type=int, + help='the number epochs') +parser.add_argument('--num-workers', '--workers', default=0, type=int, + help='Number of workers used in dataloading') +parser.add_argument('--validation-epochs', default=1, type=int, + help='the number epochs between running validation') +parser.add_argument('--debug-steps', default=10, type=int, + help='Set the debug log output frequency.') +parser.add_argument('--use-cuda', default=True, type=str2bool, + help='Use CUDA to train model') +parser.add_argument('--checkpoint-folder', '--model-dir', default=str(MODEL_PATH), + help='Directory for saving checkpoint models') + +logging.basicConfig(stream=sys.stdout, level=logging.INFO, + format='%(asctime)s - %(message)s', datefmt="%Y-%m-%d %H:%M:%S") + +args = parser.parse_args() +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() and args.use_cuda else "cpu") + +if args.use_cuda and torch.cuda.is_available(): + torch.backends.cudnn.benchmark = True + logging.info("Using CUDA...") + + +def train(loader, net, criterion, optimizer, device, debug_steps=100, epoch=-1): + net.train(True) + running_loss = 0.0 + running_regression_loss = 0.0 + running_classification_loss = 0.0 + for i, data in enumerate(loader): + images, boxes, labels = data + images = images.to(device) + boxes = boxes.to(device) + labels = labels.to(device) + + optimizer.zero_grad() + confidence, locations = net(images) + regression_loss, classification_loss = criterion(confidence, locations, labels, boxes) + loss = regression_loss + classification_loss + loss.backward() + optimizer.step() + + running_loss += loss.item() + running_regression_loss += regression_loss.item() + running_classification_loss += classification_loss.item() + if i and i % debug_steps == 0: + avg_loss = running_loss / debug_steps + avg_reg_loss = running_regression_loss / debug_steps + avg_clf_loss = running_classification_loss / debug_steps + logging.info( + f"Epoch: {epoch}, Step: {i}/{len(loader)}, " + + f"Avg Loss: {avg_loss:.4f}, " + + f"Avg Regression Loss {avg_reg_loss:.4f}, " + + f"Avg Classification Loss: {avg_clf_loss:.4f}" + ) + running_loss = 0.0 + running_regression_loss = 0.0 + running_classification_loss = 0.0 + + +def test(loader, net, criterion, device): + net.eval() + running_loss = 0.0 + running_regression_loss = 0.0 + running_classification_loss = 0.0 + num = 0 + for _, data in enumerate(loader): + images, boxes, labels = data + images = images.to(device) + boxes = boxes.to(device) + labels = labels.to(device) + num += 1 + + with torch.no_grad(): + confidence, locations = net(images) + regression_loss, classification_loss = criterion(confidence, locations, labels, boxes) + loss = regression_loss + classification_loss + + running_loss += loss.item() + running_regression_loss += regression_loss.item() + running_classification_loss += classification_loss.item() + return running_loss / num, running_regression_loss / num, running_classification_loss / num + + +if __name__ == '__main__': + timer = Timer() + + logging.info(args) + + if args.checkpoint_folder: + args.checkpoint_folder = os.path.expanduser(args.checkpoint_folder) + + if not os.path.exists(args.checkpoint_folder): + os.mkdir(args.checkpoint_folder) + + if args.net == 'vgg16-ssd': + create_net = create_vgg_ssd + config = vgg_ssd_config + elif args.net == 'mb1-ssd': + create_net = create_mobilenetv1_ssd + config = mobilenetv1_ssd_config + elif args.net == 'mb1-ssd-lite': + create_net = create_mobilenetv1_ssd_lite + config = mobilenetv1_ssd_config + elif args.net == 'sq-ssd-lite': + create_net = create_squeezenet_ssd_lite + config = squeezenet_ssd_config + elif args.net == 'mb2-ssd-lite': + create_net = lambda num: create_mobilenetv2_ssd_lite(num, width_mult=args.mb2_width_mult) + config = mobilenetv1_ssd_config + else: + logging.fatal("The net type is wrong.") + parser.print_help(sys.stderr) + sys.exit(1) + + train_transform = TrainAugmentation(config.image_size, config.image_mean, config.image_std) + target_transform = MatchPrior(config.priors, config.center_variance, + config.size_variance, 0.5) + + test_transform = TestTransform(config.image_size, config.image_mean, config.image_std) + + logging.info("Prepare training datasets.") + datasets = [] + for dataset_path in args.datasets: + dataset = VOCDataset(dataset_path, transform=train_transform, + target_transform=target_transform) + label_file = os.path.join(args.checkpoint_folder, "labels.txt") + store_labels(label_file, dataset.class_names) + num_classes = len(dataset.class_names) + datasets.append(dataset) + + logging.info(f"Stored labels into file {label_file}.") + train_dataset = ConcatDataset(datasets) + logging.info("Train dataset size: {}".format(len(train_dataset))) + train_loader = DataLoader(train_dataset, args.batch_size, + num_workers=args.num_workers, + shuffle=True) + + logging.info("Prepare Validation datasets.") + val_dataset = VOCDataset(dataset_path, transform=test_transform, + target_transform=target_transform, is_test=True) + logging.info("Validation dataset size: {}".format(len(val_dataset))) + val_loader = DataLoader(val_dataset, args.batch_size, + num_workers=args.num_workers, + shuffle=False) + + logging.info("Build network.") + net = create_net(num_classes) + min_loss = -10000.0 + last_epoch = -1 + + base_net_lr = args.base_net_lr if args.base_net_lr is not None else args.lr + extra_layers_lr = args.extra_layers_lr if args.extra_layers_lr is not None else args.lr + + if args.freeze_base_net: + logging.info("Freeze base net.") + freeze_net_layers(net.base_net) + params = itertools.chain(net.source_layer_add_ons.parameters(), net.extras.parameters(), + net.regression_headers.parameters(), net.classification_headers.parameters()) + params = [ + {'params': itertools.chain( + net.source_layer_add_ons.parameters(), + net.extras.parameters() + ), 'lr': extra_layers_lr}, + {'params': itertools.chain( + net.regression_headers.parameters(), + net.classification_headers.parameters() + )} + ] + elif args.freeze_net: + freeze_net_layers(net.base_net) + freeze_net_layers(net.source_layer_add_ons) + freeze_net_layers(net.extras) + params = itertools.chain(net.regression_headers.parameters(), net.classification_headers.parameters()) + logging.info("Freeze all the layers except prediction heads.") + else: + params = [ + {'params': net.base_net.parameters(), 'lr': base_net_lr}, + {'params': itertools.chain( + net.source_layer_add_ons.parameters(), + net.extras.parameters() + ), 'lr': extra_layers_lr}, + {'params': itertools.chain( + net.regression_headers.parameters(), + net.classification_headers.parameters() + )} + ] + + # load a previous model checkpoint (if requested) + timer.start("Load Model") + if args.resume: + logging.info(f"Resume from the model {args.resume}") + net.load(args.resume) + elif args.base_net: + logging.info(f"Init from base net {args.base_net}") + net.init_from_base_net(args.base_net) + elif args.pretrained_ssd: + logging.info(f"Init from pretrained ssd {args.pretrained_ssd}") + net.init_from_pretrained_ssd(args.pretrained_ssd) + logging.info(f'Took {timer.end("Load Model"):.2f} seconds to load the model.') + + net.to(DEVICE) + + criterion = MultiboxLoss(config.priors, iou_threshold=0.5, neg_pos_ratio=3, + center_variance=0.1, size_variance=0.2, device=DEVICE) + optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, + weight_decay=args.weight_decay) + logging.info(f"Learning rate: {args.lr}, Base net learning rate: {base_net_lr}, " + + f"Extra Layers learning rate: {extra_layers_lr}.") + + if args.scheduler == 'multi-step': + logging.info("Uses MultiStepLR scheduler.") + milestones = [int(v.strip()) for v in args.milestones.split(",")] + scheduler = MultiStepLR(optimizer, milestones=milestones, + gamma=0.1, last_epoch=last_epoch) + elif args.scheduler == 'cosine': + logging.info("Uses CosineAnnealingLR scheduler.") + scheduler = CosineAnnealingLR(optimizer, args.t_max, last_epoch=last_epoch) + else: + logging.fatal(f"Unsupported Scheduler: {args.scheduler}.") + parser.print_help(sys.stderr) + sys.exit(1) + + logging.info(f"Start training from epoch {last_epoch + 1}.") + + for epoch in range(last_epoch + 1, args.num_epochs): + train(train_loader, net, criterion, optimizer, + device=DEVICE, debug_steps=args.debug_steps, epoch=epoch) + scheduler.step() + + if epoch % args.validation_epochs == 0 or epoch == args.num_epochs - 1: + val_loss, val_regression_loss, val_classification_loss = test(val_loader, net, criterion, DEVICE) + logging.info( + f"Epoch: {epoch}, " + + f"Validation Loss: {val_loss:.4f}, " + + f"Validation Regression Loss {val_regression_loss:.4f}, " + + f"Validation Classification Loss: {val_classification_loss:.4f}" + ) + model_path = os.path.join(args.checkpoint_folder, f"{args.net}-Epoch-{epoch}-Loss-{val_loss}.pth") + net.save(model_path) + logging.info(f"Saved model {model_path}") + + logging.info("Task done, exiting program.") diff --git a/src/4_eval_ssd.py b/src/4_eval_ssd.py new file mode 100644 index 0000000..fd03c0e --- /dev/null +++ b/src/4_eval_ssd.py @@ -0,0 +1,219 @@ +import argparse +import logging +import pathlib +import sys + +import numpy as np +import torch + +from config import MODEL_PATH, VOC_PATH, MODEL_NAME +from vision.datasets.open_images import OpenImagesDataset +from vision.datasets.voc_dataset import VOCDataset +from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite, create_mobilenetv2_ssd_lite_predictor +from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor +from vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite, create_mobilenetv1_ssd_lite_predictor +from vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite, create_squeezenet_ssd_lite_predictor +from vision.ssd.vgg_ssd import create_vgg_ssd, create_vgg_ssd_predictor +from vision.utils import box_utils, measurements +from vision.utils import str2bool, Timer + +parser = argparse.ArgumentParser(description="SSD Evaluation on VOC Dataset.") +parser.add_argument('--net', default='mb1-ssd', + help="The network architecture, it should be of mb1-ssd, mb1-ssd-lite, mb2-ssd-lite or vgg16-ssd.") +parser.add_argument("--trained_model", type=str, + default='/Users/licsber/datasets/工训赛/models/' + MODEL_NAME) + +parser.add_argument("--dataset_type", default="voc", type=str, + help='Specify dataset type. Currently support voc and open_images.') +parser.add_argument("--dataset", type=str, help="The root directory of the VOC dataset or Open Images dataset.", + default=str(VOC_PATH)) +parser.add_argument("--label_file", type=str, help="The label file path.", default=str(MODEL_PATH) + '/labels.txt') +parser.add_argument("--use_cuda", type=str2bool, default=True) +parser.add_argument("--use_2007_metric", type=str2bool, default=True) +parser.add_argument("--nms_method", type=str, default="hard") +parser.add_argument("--iou_threshold", type=float, default=0.5, help="The threshold of Intersection over Union.") +parser.add_argument("--eval_dir", default="eval_results", type=str, help="The directory to store evaluation results.") +parser.add_argument('--mb2_width_mult', default=1.0, type=float, + help='Width Multiplifier for MobilenetV2') +args = parser.parse_args() +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() and args.use_cuda else "cpu") + + +def group_annotation_by_class(dataset): + true_case_stat = {} + all_gt_boxes = {} + all_difficult_cases = {} + for i in range(len(dataset)): + image_id, annotation = dataset.get_annotation(i) + gt_boxes, classes, is_difficult = annotation + gt_boxes = torch.from_numpy(gt_boxes) + for i, difficult in enumerate(is_difficult): + class_index = int(classes[i]) + gt_box = gt_boxes[i] + if not difficult: + true_case_stat[class_index] = true_case_stat.get(class_index, 0) + 1 + + if class_index not in all_gt_boxes: + all_gt_boxes[class_index] = {} + if image_id not in all_gt_boxes[class_index]: + all_gt_boxes[class_index][image_id] = [] + all_gt_boxes[class_index][image_id].append(gt_box) + if class_index not in all_difficult_cases: + all_difficult_cases[class_index] = {} + if image_id not in all_difficult_cases[class_index]: + all_difficult_cases[class_index][image_id] = [] + all_difficult_cases[class_index][image_id].append(difficult) + + for class_index in all_gt_boxes: + for image_id in all_gt_boxes[class_index]: + all_gt_boxes[class_index][image_id] = torch.stack(all_gt_boxes[class_index][image_id]) + for class_index in all_difficult_cases: + for image_id in all_difficult_cases[class_index]: + all_gt_boxes[class_index][image_id] = torch.tensor(all_gt_boxes[class_index][image_id]) + return true_case_stat, all_gt_boxes, all_difficult_cases + + +def compute_average_precision_per_class(num_true_cases, gt_boxes, difficult_cases, + prediction_file, iou_threshold, use_2007_metric): + with open(prediction_file) as f: + image_ids = [] + boxes = [] + scores = [] + for line in f: + t = line.rstrip().split("\t") + image_ids.append(t[0]) + scores.append(float(t[1])) + box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0) + box -= 1.0 # convert to python format where indexes start from 0 + boxes.append(box) + + scores = np.array(scores) + sorted_indexes = np.argsort(-scores) + boxes = [boxes[i] for i in sorted_indexes] + image_ids = [image_ids[i] for i in sorted_indexes] + true_positive = np.zeros(len(image_ids)) + false_positive = np.zeros(len(image_ids)) + matched = set() + for i, image_id in enumerate(image_ids): + box = boxes[i] + if image_id not in gt_boxes: + false_positive[i] = 1 + continue + + gt_box = gt_boxes[image_id] + ious = box_utils.iou_of(box, gt_box) + max_iou = torch.max(ious).item() + max_arg = torch.argmax(ious).item() + if max_iou > iou_threshold: + if difficult_cases[image_id][max_arg] == 0: + if (image_id, max_arg) not in matched: + true_positive[i] = 1 + matched.add((image_id, max_arg)) + else: + false_positive[i] = 1 + else: + false_positive[i] = 1 + + true_positive = true_positive.cumsum() + false_positive = false_positive.cumsum() + precision = true_positive / (true_positive + false_positive) + recall = true_positive / num_true_cases + if use_2007_metric: + return measurements.compute_voc2007_average_precision(precision, recall) + else: + return measurements.compute_average_precision(precision, recall) + + +if __name__ == '__main__': + eval_path = pathlib.Path(args.eval_dir) + eval_path.mkdir(exist_ok=True) + timer = Timer() + class_names = [name.strip() for name in open(args.label_file).readlines()] + + if args.dataset_type == "voc": + dataset = VOCDataset(args.dataset, is_test=True) + elif args.dataset_type == 'open_images': + dataset = OpenImagesDataset(args.dataset, dataset_type="test") + + true_case_stat, all_gb_boxes, all_difficult_cases = group_annotation_by_class(dataset) + if args.net == 'vgg16-ssd': + net = create_vgg_ssd(len(class_names), is_test=True) + elif args.net == 'mb1-ssd': + net = create_mobilenetv1_ssd(len(class_names), is_test=True) + elif args.net == 'mb1-ssd-lite': + net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True) + elif args.net == 'sq-ssd-lite': + net = create_squeezenet_ssd_lite(len(class_names), is_test=True) + elif args.net == 'mb2-ssd-lite': + net = create_mobilenetv2_ssd_lite(len(class_names), width_mult=args.mb2_width_mult, is_test=True) + else: + logging.fatal("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.") + parser.print_help(sys.stderr) + sys.exit(1) + + timer.start("Load Model") + net.load(args.trained_model) + net = net.to(DEVICE) + print(f'It took {timer.end("Load Model")} seconds to load the model.') + if args.net == 'vgg16-ssd': + predictor = create_vgg_ssd_predictor(net, nms_method=args.nms_method, device=DEVICE) + elif args.net == 'mb1-ssd': + predictor = create_mobilenetv1_ssd_predictor(net, nms_method=args.nms_method, device=DEVICE) + elif args.net == 'mb1-ssd-lite': + predictor = create_mobilenetv1_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE) + elif args.net == 'sq-ssd-lite': + predictor = create_squeezenet_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE) + elif args.net == 'mb2-ssd-lite': + predictor = create_mobilenetv2_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE) + else: + logging.fatal("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.") + parser.print_help(sys.stderr) + sys.exit(1) + + results = [] + for i in range(len(dataset)): + print("process image", i) + timer.start("Load Image") + image = dataset.get_image(i) + print("Load Image: {:4f} seconds.".format(timer.end("Load Image"))) + timer.start("Predict") + boxes, labels, probs = predictor.predict(image) + print("Prediction: {:4f} seconds.".format(timer.end("Predict"))) + indexes = torch.ones(labels.size(0), 1, dtype=torch.float32) * i + results.append(torch.cat([ + indexes.reshape(-1, 1), + labels.reshape(-1, 1).float(), + probs.reshape(-1, 1), + boxes + 1.0 # matlab's indexes start from 1 + ], dim=1)) + results = torch.cat(results) + for class_index, class_name in enumerate(class_names): + if class_index == 0: continue # ignore background + prediction_path = eval_path / f"det_test_{class_name}.txt" + with open(prediction_path, "w") as f: + sub = results[results[:, 1] == class_index, :] + for i in range(sub.size(0)): + prob_box = sub[i, 2:].numpy() + image_id = dataset.ids[int(sub[i, 0])] + print( + image_id + "\t" + " ".join([str(v) for v in prob_box]).replace(" ", "\t"), + file=f + ) + aps = [] + print("\n\nAverage Precision Per-class:") + for class_index, class_name in enumerate(class_names): + if class_index == 0: + continue + prediction_path = eval_path / f"det_test_{class_name}.txt" + ap = compute_average_precision_per_class( + true_case_stat[class_index], + all_gb_boxes[class_index], + all_difficult_cases[class_index], + prediction_path, + args.iou_threshold, + args.use_2007_metric + ) + aps.append(ap) + print(f"{class_name}: {ap}") + + print(f"\nAverage Precision Across All Classes: {sum(aps) / len(aps)}") diff --git a/src/5_video_test.py b/src/5_video_test.py new file mode 100644 index 0000000..da554c8 --- /dev/null +++ b/src/5_video_test.py @@ -0,0 +1,42 @@ +import cv2 + +from config import MODEL_PATH, VIDEO_PATH, LABEL_PATH, MODEL_NAME +from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor + +train_model = MODEL_PATH / MODEL_NAME +test_videos = VIDEO_PATH.glob('*.avi') + +class_names = [name.strip() for name in LABEL_PATH.read_text().split()] +net = create_mobilenetv1_ssd(len(class_names), is_test=True) +net.load(train_model) +predictor = create_mobilenetv1_ssd_predictor(net, nms_method='hard') + +count = 0 +for video in test_videos: + count += 1 + cap = cv2.VideoCapture(str(video)) + if count != 1: + continue + + while True: + suc, bgr = cap.read() + if not suc: + break + + rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB) + boxes, labels, probs = predictor.predict(rgb, 5, 0.4) + for i in range(boxes.size(0)): + box = boxes[i, :] + if box[0] <= 0 or box[1] <= 0 or box[3] >= 240: + continue + + label = f"{class_names[labels[i]]}: {probs[i]:.2f}" + print(label) + + cv2.rectangle(bgr, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 4) + cv2.putText(bgr, label, (int(box[0]) + 20, int(box[1]) + 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) + + cv2.imshow('bgr', bgr) + cv2.waitKey(1) + + break diff --git a/src/6_onnx_export.py b/src/6_onnx_export.py new file mode 100644 index 0000000..a502ebf --- /dev/null +++ b/src/6_onnx_export.py @@ -0,0 +1,107 @@ +import argparse +import os +import sys + +import torch.onnx + +from config import MODEL_PATH, MAC, MODEL_NAME +from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite +from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd +from vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite +from vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite +from vision.ssd.vgg_ssd import create_vgg_ssd + +parser = argparse.ArgumentParser() +parser.add_argument('--net', default='ssd-mobilenet', + help="The network architecture, it can be mb1-ssd (aka ssd-mobilenet), mb1-lite-ssd, mb2-ssd-lite or vgg16-ssd.") +parser.add_argument('--input', type=str, default=str(MODEL_PATH / MODEL_NAME), + help="path to input PyTorch model (.pth checkpoint)") +parser.add_argument('--output', type=str, default='', help="desired path of converted ONNX model (default: .onnx)") +parser.add_argument('--labels', type=str, default=str(MODEL_PATH) + '/labels.txt', help="name of the class labels file") +parser.add_argument('--width', type=int, default=300, help="input width of the model to be exported (in pixels)") +parser.add_argument('--height', type=int, default=300, help="input height of the model to be exported (in pixels)") +parser.add_argument('--batch-size', type=int, default=1, help="batch size of the model to be exported (default=1)") +parser.add_argument('--model-dir', type=str, default=str(MODEL_PATH), + help="directory to look for the input PyTorch model in, and export the converted ONNX model to (if --output doesn't specify a directory)") + +args = parser.parse_args() +print(args) + +# set the device +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') +print('running on device ' + str(device)) + +# format input model paths +if args.model_dir: + args.model_dir = os.path.expanduser(args.model_dir) + + # find the checkpoint with the lowest loss + if not args.input: + best_loss = 10000 + for file in os.listdir(args.model_dir): + if not file.endswith(".pth"): + continue + try: + loss = float(file[file.rfind("-") + 1:len(file) - 4]) + if loss < best_loss: + best_loss = loss + args.input = os.path.join(args.model_dir, file) + except ValueError: + continue + print('found best checkpoint with loss {:f} ({:s})'.format(best_loss, args.input)) + + # append the model dir (if needed) + if not os.path.isfile(args.input): + args.input = os.path.join(args.model_dir, args.input) + + if not os.path.isfile(args.labels): + args.labels = os.path.join(args.model_dir, args.labels) + +# determine the number of classes +class_names = [name.strip() for name in open(args.labels).readlines()] +num_classes = len(class_names) + +# construct the network architecture +print('creating network: ' + args.net) +print('num classes: ' + str(num_classes)) + +if args.net == 'vgg16-ssd': + net = create_vgg_ssd(len(class_names), is_test=True) +elif args.net == 'mb1-ssd' or args.net == 'ssd-mobilenet': + net = create_mobilenetv1_ssd(len(class_names), is_test=True) +elif args.net == 'mb1-ssd-lite': + net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True) +elif args.net == 'mb2-ssd-lite': + net = create_mobilenetv2_ssd_lite(len(class_names), is_test=True) +elif args.net == 'sq-ssd-lite': + net = create_squeezenet_ssd_lite(len(class_names), is_test=True) +else: + print("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.") + sys.exit(1) + +# load the model checkpoint +print('loading checkpoint: ' + args.input) + +net.load(args.input) +net.to(device) +net.eval() + +if MAC: + dummy_input = torch.randn(args.batch_size, 3, args.height, args.width) +else: + dummy_input = torch.randn(args.batch_size, 3, args.height, args.width).cuda() + +# format output model path +if not args.output: + args.output = args.net + '.onnx' + +if args.model_dir and args.output.find('/') == -1 and args.output.find('\\') == -1: + args.output = os.path.join(args.model_dir, args.output) + +input_names = ['input_0'] +output_names = ['scores', 'boxes'] + +print('exporting model to ONNX...') +torch.onnx.export(net, dummy_input, args.output, verbose=True, input_names=input_names, output_names=output_names) +print('model exported to: {:s}'.format(args.output)) +print('task done, exiting program') diff --git a/src/7_onnx_test.py b/src/7_onnx_test.py new file mode 100644 index 0000000..82ad54f --- /dev/null +++ b/src/7_onnx_test.py @@ -0,0 +1,9 @@ +import onnx + +from config import MODEL_PATH + +model_path = MODEL_PATH / 'ssd-mobilenet.onnx' +model = onnx.load(str(model_path)) + +print(onnx.checker.check_model(model)) +print(onnx.helper.printable_graph(model.graph)) diff --git a/src/8_merge_voc.py b/src/8_merge_voc.py new file mode 100644 index 0000000..d84d5fe --- /dev/null +++ b/src/8_merge_voc.py @@ -0,0 +1,12 @@ +from config import IMG_PATH + +all_img = list(IMG_PATH.glob('*.jpg')) +all_img.sort() + +count = 0 +for img in all_img: + xml = IMG_PATH / img.name.replace('jpg', 'xml') + new_file_basename = f"{count:05d}" + xml.rename(xml.parent / (new_file_basename + '.xml')) + img.rename(img.parent / (new_file_basename + '.jpg')) + count += 1 diff --git a/src/config.py b/src/config.py new file mode 100644 index 0000000..1a4e15b --- /dev/null +++ b/src/config.py @@ -0,0 +1,27 @@ +import sys +from pathlib import Path + +from licsber.dl import DATASETS_ROOT + +CLASSES = ( + 'circle', + 'square', + 'huan', +) + +MODEL_NAME = 'mb1-ssd-Epoch-29-Loss-1.1743878581944633.pth' +MAC = sys.platform == 'darwin' + +VIDEO_PATH = DATASETS_ROOT / '工训赛/video' +VIDEO_PATH = Path(VIDEO_PATH) + +IMG_PATH = VIDEO_PATH.parent / 'labeled' +IMG_PATH.mkdir(exist_ok=True) + +MODEL_PATH = VIDEO_PATH.parent / 'models' +MODEL_PATH.mkdir(exist_ok=True) + +VOC_PATH = VIDEO_PATH.parent / 'voc' +VOC_PATH.mkdir(exist_ok=True) + +LABEL_PATH = MODEL_PATH / 'labels.txt' diff --git a/src/main.py b/src/main.py new file mode 100644 index 0000000..aed327d --- /dev/null +++ b/src/main.py @@ -0,0 +1,25 @@ +# noinspection PyUnresolvedReferences +import jetson.inference +import jetson.utils + +net = jetson.inference.detectNet(argv=[ + '--model=../ssd-mobilenet.onnx', + '--labels=../labels.txt', + '--input-blob=input_0', + '--output-cvg=scores', + '--output-bbox=boxes', +], + threshold=0.5) + +input = jetson.utils.videoSource('/dev/video0') + +count = 0 +while True: + count += 1 + img = input.Capture() + detections = net.Detect(img, overlay='box,labels,conf') + print("detected {:d} objects in image".format(len(detections))) + for detection in detections: + print(detection) + if count >= 100: + break diff --git a/src/vision/__init__.py b/src/vision/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/vision/datasets/__init__.py b/src/vision/datasets/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/vision/datasets/collation.py b/src/vision/datasets/collation.py new file mode 100644 index 0000000..da8ae35 --- /dev/null +++ b/src/vision/datasets/collation.py @@ -0,0 +1,31 @@ +import numpy as np +import torch + + +def object_detection_collate(batch): + images = [] + gt_boxes = [] + gt_labels = [] + image_type = type(batch[0][0]) + box_type = type(batch[0][1]) + label_type = type(batch[0][2]) + for image, boxes, labels in batch: + if image_type is np.ndarray: + images.append(torch.from_numpy(image)) + elif image_type is torch.Tensor: + images.append(image) + else: + raise TypeError(f"Image should be tensor or np.ndarray, but got {image_type}.") + if box_type is np.ndarray: + gt_boxes.append(torch.from_numpy(boxes)) + elif box_type is torch.Tensor: + gt_boxes.append(boxes) + else: + raise TypeError(f"Boxes should be tensor or np.ndarray, but got {box_type}.") + if label_type is np.ndarray: + gt_labels.append(torch.from_numpy(labels)) + elif label_type is torch.Tensor: + gt_labels.append(labels) + else: + raise TypeError(f"Labels should be tensor or np.ndarray, but got {label_type}.") + return torch.stack(images), gt_boxes, gt_labels diff --git a/src/vision/datasets/generate_vocdata.py b/src/vision/datasets/generate_vocdata.py new file mode 100644 index 0000000..8e33909 --- /dev/null +++ b/src/vision/datasets/generate_vocdata.py @@ -0,0 +1,128 @@ +import os +import sys +import xml.etree.ElementTree as ET +from random import random + + +def main(filename): + # ratio to divide up the images + train = 0.7 + val = 0.2 + test = 0.1 + if (train + test + val) != 1.0: + print("probabilities must equal 1") + exit() + + # get the labels + labels = [] + imgnames = [] + annotations = {} + + with open(filename, 'r') as labelfile: + label_string = "" + for line in labelfile: + label_string += line.rstrip() + + labels = label_string.split(',') + labels = [elem.replace(" ", "") for elem in labels] + + # get image names + for filename in os.listdir("./JPEGImages"): + if filename.endswith(".jpg"): + img = filename.rstrip('.jpg') + imgnames.append(img) + + print("Labels:", labels, "imgcnt:", len(imgnames)) + + # initialise annotation list + for label in labels: + annotations[label] = [] + + # Scan the annotations for the labels + for img in imgnames: + annote = "Annotations/" + img + '.xml' + if os.path.isfile(annote): + tree = ET.parse(annote) + root = tree.getroot() + annote_labels = [] + for labelname in root.findall('*/name'): + labelname = labelname.text + annote_labels.append(labelname) + if labelname in labels: + annotations[labelname].append(img) + annotations[img] = annote_labels + else: + print("Missing annotation for ", annote) + exit() + + # divvy up the images to the different sets + sampler = imgnames.copy() + train_list = [] + val_list = [] + test_list = [] + + while len(sampler) > 0: + dice = random() + elem = sampler.pop() + + if dice <= test: + test_list.append(elem) + elif dice <= (test + val): + val_list.append(elem) + else: + train_list.append(elem) + + print("Training set:", len(train_list), "validation set:", len(val_list), "test set:", len(test_list)) + + # create the dataset files + create_folder("./ImageSets/Main/") + with open("./ImageSets/Main/train.txt", 'w') as outfile: + for name in train_list: + outfile.write(name + "\n") + with open("./ImageSets/Main/val.txt", 'w') as outfile: + for name in val_list: + outfile.write(name + "\n") + with open("./ImageSets/Main/trainval.txt", 'w') as outfile: + for name in train_list: + outfile.write(name + "\n") + for name in val_list: + outfile.write(name + "\n") + + with open("./ImageSets/Main/test.txt", 'w') as outfile: + for name in test_list: + outfile.write(name + "\n") + + # create the individiual files for each label + for label in labels: + with open("./ImageSets/Main/" + label + "_train.txt", 'w') as outfile: + for name in train_list: + if label in annotations[name]: + outfile.write(name + " 1\n") + else: + outfile.write(name + " -1\n") + with open("./ImageSets/Main/" + label + "_val.txt", 'w') as outfile: + for name in val_list: + if label in annotations[name]: + outfile.write(name + " 1\n") + else: + outfile.write(name + " -1\n") + with open("./ImageSets/Main/" + label + "_test.txt", 'w') as outfile: + for name in test_list: + if label in annotations[name]: + outfile.write(name + " 1\n") + else: + outfile.write(name + " -1\n") + + +def create_folder(foldername): + if os.path.exists(foldername): + print('folder already exists:', foldername) + else: + os.makedirs(foldername) + + +if __name__ == '__main__': + if len(sys.argv) < 2: + print("usage: python generate_vocdata.py ") + exit() + main(sys.argv[1]) diff --git a/src/vision/datasets/open_images.py b/src/vision/datasets/open_images.py new file mode 100644 index 0000000..f573003 --- /dev/null +++ b/src/vision/datasets/open_images.py @@ -0,0 +1,130 @@ +import copy +import logging +import os +import pathlib + +import cv2 +import numpy as np +import pandas as pd + + +class OpenImagesDataset: + + def __init__(self, root, + transform=None, target_transform=None, + dataset_type="train", balance_data=False): + self.root = pathlib.Path(root) + self.transform = transform + self.target_transform = target_transform + self.dataset_type = dataset_type.lower() + + self.data, self.class_names, self.class_dict = self._read_data() + self.balance_data = balance_data + self.min_image_num = -1 + if self.balance_data: + self.data = self._balance_data() + self.ids = [info['image_id'] for info in self.data] + + self.class_stat = None + + def _getitem(self, index): + image_info = self.data[index] + image = self._read_image(image_info['image_id']) + # duplicate boxes to prevent corruption of dataset + boxes = copy.copy(image_info['boxes']) + boxes[:, 0] *= image.shape[1] + boxes[:, 1] *= image.shape[0] + boxes[:, 2] *= image.shape[1] + boxes[:, 3] *= image.shape[0] + # duplicate labels to prevent corruption of dataset + labels = copy.copy(image_info['labels']) + if self.transform: + image, boxes, labels = self.transform(image, boxes, labels) + if self.target_transform: + boxes, labels = self.target_transform(boxes, labels) + return image_info['image_id'], image, boxes, labels + + def __getitem__(self, index): + _, image, boxes, labels = self._getitem(index) + return image, boxes, labels + + def get_annotation(self, index): + """To conform the eval_ssd implementation that is based on the VOC dataset.""" + image_id, image, boxes, labels = self._getitem(index) + is_difficult = np.zeros(boxes.shape[0], dtype=np.uint8) + return image_id, (boxes, labels, is_difficult) + + def get_image(self, index): + image_info = self.data[index] + image = self._read_image(image_info['image_id']) + if self.transform: + image, _ = self.transform(image) + return image + + def _read_data(self): + annotation_file = f"{self.root}/sub-{self.dataset_type}-annotations-bbox.csv" + logging.info(f'loading annotations from: {annotation_file}') + annotations = pd.read_csv(annotation_file) + logging.info(f'annotations loaded from: {annotation_file}') + class_names = ['BACKGROUND'] + sorted(list(annotations['ClassName'].unique())) + class_dict = {class_name: i for i, class_name in enumerate(class_names)} + data = [] + for image_id, group in annotations.groupby("ImageID"): + img_path = os.path.join(self.root, self.dataset_type, image_id + '.jpg') + if os.path.isfile(img_path) is False: + logging.error(f'missing ImageID {image_id}.jpg - dropping from annotations') + continue + boxes = group.loc[:, ["XMin", "YMin", "XMax", "YMax"]].values.astype(np.float32) + # make labels 64 bits to satisfy the cross_entropy function + labels = np.array([class_dict[name] for name in group["ClassName"]], dtype='int64') + # print('found image {:s} ({:d})'.format(img_path, len(data))) + data.append({ + 'image_id': image_id, + 'boxes': boxes, + 'labels': labels + }) + print('num images: {:d}'.format(len(data))) + return data, class_names, class_dict + + def __len__(self): + return len(self.data) + + def __repr__(self): + if self.class_stat is None: + self.class_stat = {name: 0 for name in self.class_names[1:]} + for example in self.data: + for class_index in example['labels']: + class_name = self.class_names[class_index] + self.class_stat[class_name] += 1 + content = ["Dataset Summary:" + f"Number of Images: {len(self.data)}", + f"Minimum Number of Images for a Class: {self.min_image_num}", + "Label Distribution:"] + for class_name, num in self.class_stat.items(): + content.append(f"\t{class_name}: {num}") + return "\n".join(content) + + def _read_image(self, image_id): + image_file = self.root / self.dataset_type / f"{image_id}.jpg" + image = cv2.imread(str(image_file)) + if image.shape[2] == 1: + image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) + else: + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + return image + + def _balance_data(self): + logging.info('balancing data') + label_image_indexes = [set() for _ in range(len(self.class_names))] + for i, image in enumerate(self.data): + for label_id in image['labels']: + label_image_indexes[label_id].add(i) + label_stat = [len(s) for s in label_image_indexes] + self.min_image_num = min(label_stat[1:]) + sample_image_indexes = set() + for image_indexes in label_image_indexes[1:]: + image_indexes = np.array(list(image_indexes)) + sub = np.random.permutation(image_indexes)[:self.min_image_num] + sample_image_indexes.update(sub) + sample_data = [self.data[i] for i in sample_image_indexes] + return sample_data diff --git a/src/vision/datasets/voc_dataset.py b/src/vision/datasets/voc_dataset.py new file mode 100644 index 0000000..133e5de --- /dev/null +++ b/src/vision/datasets/voc_dataset.py @@ -0,0 +1,187 @@ +import logging +import os +import pathlib +import xml.etree.ElementTree as ET + +import cv2 +import numpy as np + + +class VOCDataset: + + def __init__(self, root, transform=None, target_transform=None, is_test=False, keep_difficult=True, + label_file=None): + """Dataset for VOC data. + Args: + root: the root of the VOC2007 or VOC2012 dataset, the directory contains the following sub-directories: + Annotations, ImageSets, JPEGImages, SegmentationClass, SegmentationObject. + """ + self.root = pathlib.Path(root) + self.transform = transform + self.target_transform = target_transform + + # determine the image set file to use + if is_test: + image_sets_file = self.root / "ImageSets/Main/test.txt" + else: + image_sets_file = self.root / "ImageSets/Main/trainval.txt" + + if not os.path.isfile(image_sets_file): + image_sets_default = self.root / "ImageSets/Main/default.txt" # CVAT only saves default.txt + + if os.path.isfile(image_sets_default): + image_sets_file = image_sets_default + else: + raise IOError("missing ImageSet file {:s}".format(image_sets_file)) + + # read the image set ID's + self.ids = self._read_image_ids(image_sets_file) + self.keep_difficult = keep_difficult + + # if the labels file exists, read in the class names + label_file_name = self.root / "labels.txt" + + if os.path.isfile(label_file_name): + classes = [] + + # classes should be a line-separated list + with open(label_file_name, 'r') as infile: + for line in infile: + classes.append(line.rstrip()) + + # prepend BACKGROUND as first class + classes.insert(0, 'BACKGROUND') + # classes = [ elem.replace(" ", "") for elem in classes] + self.class_names = tuple(classes) + logging.info("VOC Labels read from file: " + str(self.class_names)) + + else: + logging.info("No labels file, using default VOC classes.") + self.class_names = ('BACKGROUND', + 'aeroplane', 'bicycle', 'bird', 'boat', + 'bottle', 'bus', 'car', 'cat', 'chair', + 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', + 'sheep', 'sofa', 'train', 'tvmonitor') + + self.class_dict = {class_name: i for i, class_name in enumerate(self.class_names)} + + def __getitem__(self, index): + image_id = self.ids[index] + boxes, labels, is_difficult = self._get_annotation(image_id) + + if not self.keep_difficult: + boxes = boxes[is_difficult == 0] + labels = labels[is_difficult == 0] + + # print('__getitem__ image_id=' + str(image_id) + ' \nboxes=' + str(boxes) + ' \nlabels=' + str(labels)) + + image = self._read_image(image_id) + + if self.transform: + image, boxes, labels = self.transform(image, boxes, labels) + if self.target_transform: + boxes, labels = self.target_transform(boxes, labels) + + return image, boxes, labels + + def get_image(self, index): + image_id = self.ids[index] + image = self._read_image(image_id) + if self.transform: + image, _ = self.transform(image) + return image + + def get_annotation(self, index): + image_id = self.ids[index] + return image_id, self._get_annotation(image_id) + + def __len__(self): + return len(self.ids) + + def _read_image_ids(self, image_sets_file): + ids = [] + with open(image_sets_file) as f: + for line in f: + image_id = line.rstrip() + + if len(image_id) <= 0: + print('warning - found empty line in {:s}, skipping line'.format(image_sets_file)) + continue + + if self._get_num_annotations(image_id) > 0: + if self._find_image(image_id) is not None: + ids.append(line.rstrip()) + else: + print('warning - could not find image {:s} - ignoring from dataset'.format(image_id)) + else: + print('warning - image {:s} has no box/labels annotations, ignoring from dataset'.format(image_id)) + + return ids + + def _get_num_annotations(self, image_id): + annotation_file = self.root / f"Annotations/{image_id}.xml" + objects = ET.parse(annotation_file).findall("object") + return len(objects) + + def _get_annotation(self, image_id): + annotation_file = self.root / f"Annotations/{image_id}.xml" + objects = ET.parse(annotation_file).findall("object") + boxes = [] + labels = [] + is_difficult = [] + for object in objects: + class_name = object.find('name').text.strip() # .lower().strip() + # we're only concerned with clases in our list + if class_name in self.class_dict: + bbox = object.find('bndbox') + + # VOC dataset format follows Matlab, in which indexes start from 0 + x1 = float(bbox.find('xmin').text) - 1 + y1 = float(bbox.find('ymin').text) - 1 + x2 = float(bbox.find('xmax').text) - 1 + y2 = float(bbox.find('ymax').text) - 1 + boxes.append([x1, y1, x2, y2]) + + labels.append(self.class_dict[class_name]) + + # retrieve element + is_difficult_obj = object.find('difficult') + is_difficult_str = '0' + + if is_difficult_obj is not None: + is_difficult_str = object.find('difficult').text + + is_difficult.append(int(is_difficult_str) if is_difficult_str else 0) + else: + print("warning - image {:s} has object with unknown class '{:s}'".format(image_id, class_name)) + + return (np.array(boxes, dtype=np.float32), + np.array(labels, dtype=np.int64), + np.array(is_difficult, dtype=np.uint8)) + + def _find_image(self, image_id): + img_extensions = ( + '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.bmp', '.BMP', '.tif', '.TIF', '.tiff', '.TIFF') + + for ext in img_extensions: + image_file = os.path.join(self.root, "JPEGImages/{:s}{:s}".format(image_id, ext)) + + if os.path.exists(image_file): + return image_file + + return None + + def _read_image(self, image_id): + image_file = self._find_image(image_id) + + if image_file is None: + raise IOError('failed to load ' + image_file) + + image = cv2.imread(str(image_file)) + + if image is None or image.size == 0: + raise IOError('failed to load ' + str(image_file)) + + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + return image diff --git a/src/vision/nn/__init__.py b/src/vision/nn/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/vision/nn/alexnet.py b/src/vision/nn/alexnet.py new file mode 100644 index 0000000..1b2e8dd --- /dev/null +++ b/src/vision/nn/alexnet.py @@ -0,0 +1,60 @@ +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +# copied from torchvision (https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py). +# The forward function is modified for model pruning. + +__all__ = ['AlexNet', 'alexnet'] + +model_urls = { + 'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth', +} + + +class AlexNet(nn.Module): + + def __init__(self, num_classes=1000): + super(AlexNet, self).__init__() + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + x = self.features(x) + x = x.view(x.size(0), -1) + x = self.classifier(x) + return x + + +def alexnet(pretrained=False, **kwargs): + r"""AlexNet model architecture from the + `"One weird trick..." `_ paper. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = AlexNet(**kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['alexnet'])) + return model diff --git a/src/vision/nn/mobilenet.py b/src/vision/nn/mobilenet.py new file mode 100644 index 0000000..6216696 --- /dev/null +++ b/src/vision/nn/mobilenet.py @@ -0,0 +1,52 @@ +# borrowed from "https://github.com/marvis/pytorch-mobilenet" + +import torch.nn as nn +import torch.nn.functional as F + + +class MobileNetV1(nn.Module): + def __init__(self, num_classes=1024): + super(MobileNetV1, self).__init__() + + def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True) + ) + + def conv_dw(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + nn.ReLU(inplace=True), + + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True), + ) + + self.model = nn.Sequential( + conv_bn(3, 32, 2), + conv_dw(32, 64, 1), + conv_dw(64, 128, 2), + conv_dw(128, 128, 1), + conv_dw(128, 256, 2), + conv_dw(256, 256, 1), + conv_dw(256, 512, 2), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 1024, 2), + conv_dw(1024, 1024, 1), + ) + self.fc = nn.Linear(1024, num_classes) + + def forward(self, x): + x = self.model(x) + x = F.avg_pool2d(x, 7) + x = x.view(-1, 1024) + x = self.fc(x) + return x diff --git a/src/vision/nn/mobilenet_v2.py b/src/vision/nn/mobilenet_v2.py new file mode 100644 index 0000000..f685d50 --- /dev/null +++ b/src/vision/nn/mobilenet_v2.py @@ -0,0 +1,175 @@ +import math + +import torch.nn as nn + + +# Modified from https://github.com/tonylins/pytorch-mobilenet-v2/blob/master/MobileNetV2.py. +# In this version, Relu6 is replaced with Relu to make it ONNX compatible. +# BatchNorm Layer is optional to make it easy do batch norm confusion. + + +def conv_bn(inp, oup, stride, use_batch_norm=True, onnx_compatible=False): + ReLU = nn.ReLU if onnx_compatible else nn.ReLU6 + + if use_batch_norm: + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + ReLU(inplace=True) + ) + else: + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + ReLU(inplace=True) + ) + + +def conv_1x1_bn(inp, oup, use_batch_norm=True, onnx_compatible=False): + ReLU = nn.ReLU if onnx_compatible else nn.ReLU6 + if use_batch_norm: + return nn.Sequential( + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ReLU(inplace=True) + ) + else: + return nn.Sequential( + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + ReLU(inplace=True) + ) + + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, expand_ratio, use_batch_norm=True, onnx_compatible=False): + super(InvertedResidual, self).__init__() + ReLU = nn.ReLU if onnx_compatible else nn.ReLU6 + + self.stride = stride + assert stride in [1, 2] + + hidden_dim = round(inp * expand_ratio) + self.use_res_connect = self.stride == 1 and inp == oup + + if expand_ratio == 1: + if use_batch_norm: + self.conv = nn.Sequential( + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + nn.BatchNorm2d(hidden_dim), + ReLU(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ) + else: + self.conv = nn.Sequential( + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + ReLU(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + ) + else: + if use_batch_norm: + self.conv = nn.Sequential( + # pw + nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), + nn.BatchNorm2d(hidden_dim), + ReLU(inplace=True), + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + nn.BatchNorm2d(hidden_dim), + ReLU(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ) + else: + self.conv = nn.Sequential( + # pw + nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), + ReLU(inplace=True), + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + ReLU(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + ) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +class MobileNetV2(nn.Module): + def __init__(self, n_class=1000, input_size=224, width_mult=1., dropout_ratio=0.2, + use_batch_norm=True, onnx_compatible=False): + super(MobileNetV2, self).__init__() + block = InvertedResidual + input_channel = 32 + last_channel = 1280 + interverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + [6, 160, 3, 2], + [6, 320, 1, 1], + ] + + # building first layer + assert input_size % 32 == 0 + input_channel = int(input_channel * width_mult) + self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel + self.features = [conv_bn(3, input_channel, 2, onnx_compatible=onnx_compatible)] + # building inverted residual blocks + for t, c, n, s in interverted_residual_setting: + output_channel = int(c * width_mult) + for i in range(n): + if i == 0: + self.features.append(block(input_channel, output_channel, s, + expand_ratio=t, use_batch_norm=use_batch_norm, + onnx_compatible=onnx_compatible)) + else: + self.features.append(block(input_channel, output_channel, 1, + expand_ratio=t, use_batch_norm=use_batch_norm, + onnx_compatible=onnx_compatible)) + input_channel = output_channel + # building last several layers + self.features.append(conv_1x1_bn(input_channel, self.last_channel, + use_batch_norm=use_batch_norm, onnx_compatible=onnx_compatible)) + # make it nn.Sequential + self.features = nn.Sequential(*self.features) + + # building classifier + self.classifier = nn.Sequential( + nn.Dropout(dropout_ratio), + nn.Linear(self.last_channel, n_class), + ) + + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + x = x.mean(3).mean(2) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(1) + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() diff --git a/src/vision/nn/multibox_loss.py b/src/vision/nn/multibox_loss.py new file mode 100644 index 0000000..32049e5 --- /dev/null +++ b/src/vision/nn/multibox_loss.py @@ -0,0 +1,46 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..utils import box_utils + + +class MultiboxLoss(nn.Module): + def __init__(self, priors, iou_threshold, neg_pos_ratio, + center_variance, size_variance, device): + """Implement SSD Multibox Loss. + + Basically, Multibox loss combines classification loss + and Smooth L1 regression loss. + """ + super(MultiboxLoss, self).__init__() + self.iou_threshold = iou_threshold + self.neg_pos_ratio = neg_pos_ratio + self.center_variance = center_variance + self.size_variance = size_variance + self.priors = priors + self.priors.to(device) + + def forward(self, confidence, predicted_locations, labels, gt_locations): + """Compute classification loss and smooth l1 loss. + + Args: + confidence (batch_size, num_priors, num_classes): class predictions. + locations (batch_size, num_priors, 4): predicted locations. + labels (batch_size, num_priors): real labels of all the priors. + boxes (batch_size, num_priors, 4): real boxes corresponding all the priors. + """ + num_classes = confidence.size(2) + with torch.no_grad(): + # derived from cross_entropy=sum(log(p)) + loss = -F.log_softmax(confidence, dim=2)[:, :, 0] + mask = box_utils.hard_negative_mining(loss, labels, self.neg_pos_ratio) + + confidence = confidence[mask, :] + classification_loss = F.cross_entropy(confidence.reshape(-1, num_classes), labels[mask], size_average=False) + pos_mask = labels > 0 + predicted_locations = predicted_locations[pos_mask, :].reshape(-1, 4) + gt_locations = gt_locations[pos_mask, :].reshape(-1, 4) + smooth_l1_loss = F.smooth_l1_loss(predicted_locations, gt_locations, size_average=False) + num_pos = gt_locations.size(0) + return smooth_l1_loss / num_pos, classification_loss / num_pos diff --git a/src/vision/nn/scaled_l2_norm.py b/src/vision/nn/scaled_l2_norm.py new file mode 100644 index 0000000..f31be6a --- /dev/null +++ b/src/vision/nn/scaled_l2_norm.py @@ -0,0 +1,19 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ScaledL2Norm(nn.Module): + def __init__(self, in_channels, initial_scale): + super(ScaledL2Norm, self).__init__() + self.in_channels = in_channels + self.scale = nn.Parameter(torch.Tensor(in_channels)) + self.initial_scale = initial_scale + self.reset_parameters() + + def forward(self, x): + return (F.normalize(x, p=2, dim=1) + * self.scale.unsqueeze(0).unsqueeze(2).unsqueeze(3)) + + def reset_parameters(self): + self.scale.data.fill_(self.initial_scale) diff --git a/src/vision/nn/squeezenet.py b/src/vision/nn/squeezenet.py new file mode 100644 index 0000000..a05e39a --- /dev/null +++ b/src/vision/nn/squeezenet.py @@ -0,0 +1,127 @@ +import torch +import torch.nn as nn +import torch.nn.init as init +import torch.utils.model_zoo as model_zoo + +__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1'] + +model_urls = { + 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth', + 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth', +} + + +class Fire(nn.Module): + + def __init__(self, inplanes, squeeze_planes, + expand1x1_planes, expand3x3_planes): + super(Fire, self).__init__() + self.inplanes = inplanes + self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) + self.squeeze_activation = nn.ReLU(inplace=True) + self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, + kernel_size=1) + self.expand1x1_activation = nn.ReLU(inplace=True) + self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, + kernel_size=3, padding=1) + self.expand3x3_activation = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.squeeze_activation(self.squeeze(x)) + return torch.cat([ + self.expand1x1_activation(self.expand1x1(x)), + self.expand3x3_activation(self.expand3x3(x)) + ], 1) + + +class SqueezeNet(nn.Module): + + def __init__(self, version=1.0, num_classes=1000): + super(SqueezeNet, self).__init__() + if version not in [1.0, 1.1]: + raise ValueError("Unsupported SqueezeNet version {version}:" + "1.0 or 1.1 expected".format(version=version)) + self.num_classes = num_classes + if version == 1.0: + self.features = nn.Sequential( + nn.Conv2d(3, 96, kernel_size=7, stride=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(96, 16, 64, 64), + Fire(128, 16, 64, 64), + Fire(128, 32, 128, 128), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(256, 32, 128, 128), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(512, 64, 256, 256), + ) + else: + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=3, stride=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + Fire(64, 16, 64, 64), + Fire(128, 16, 64, 64), + nn.MaxPool2d(kernel_size=3, stride=2), + Fire(128, 32, 128, 128), + Fire(256, 32, 128, 128), + nn.MaxPool2d(kernel_size=3, stride=2), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + Fire(512, 64, 256, 256), + ) + # Final convolution is initialized differently form the rest + final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) + self.classifier = nn.Sequential( + nn.Dropout(p=0.5), + final_conv, + nn.ReLU(inplace=True), + nn.AvgPool2d(13, stride=1) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + if m is final_conv: + init.normal_(m.weight, mean=0.0, std=0.01) + else: + init.kaiming_uniform_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x) + return x.view(x.size(0), self.num_classes) + + +def squeezenet1_0(pretrained=False, **kwargs): + r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level + accuracy with 50x fewer parameters and <0.5MB model size" + `_ paper. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = SqueezeNet(version=1.0, **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_0'])) + return model + + +def squeezenet1_1(pretrained=False, **kwargs): + r"""SqueezeNet 1.1 model from the `official SqueezeNet repo + `_. + SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters + than SqueezeNet 1.0, without sacrificing accuracy. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = SqueezeNet(version=1.1, **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1'])) + return model diff --git a/src/vision/nn/vgg.py b/src/vision/nn/vgg.py new file mode 100644 index 0000000..255d8ad --- /dev/null +++ b/src/vision/nn/vgg.py @@ -0,0 +1,25 @@ +import torch.nn as nn + + +# borrowed from https://github.com/amdegroot/ssd.pytorch/blob/master/ssd.py +def vgg(cfg, batch_norm=False): + layers = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + elif v == 'C': + layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] + else: + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) + conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) + conv7 = nn.Conv2d(1024, 1024, kernel_size=1) + layers += [pool5, conv6, + nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)] + return layers diff --git a/src/vision/prunning/__init__.py b/src/vision/prunning/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/vision/prunning/prunner.py b/src/vision/prunning/prunner.py new file mode 100644 index 0000000..6e6510a --- /dev/null +++ b/src/vision/prunning/prunner.py @@ -0,0 +1,235 @@ +import logging +from heapq import nsmallest + +import torch +import torch.nn as nn + +from ..utils.model_book import ModelBook + + +class ModelPrunner: + def __init__(self, model, train_fun, ignored_paths=[]): + """ Implement the pruning algorithm described in the paper https://arxiv.org/pdf/1611.06440.pdf . + + The prunning criteria is dC/dh * h, while C is the cost, h is the activation. + """ + self.model = model + self.train_fun = train_fun + self.ignored_paths = ignored_paths + self.book = ModelBook(self.model) + self.outputs = {} + self.grads = {} + self.handles = [] + self.decendent_batch_norms = {} # descendants impacted by the conv layers. + self.last_conv_path = None # used to trace the graph + self.descendent_convs = {} # descendants impacted by the conv layers. + self.descendent_linears = {} # descendants impacted by the linear layers. + self.last_linear_path = None # used to trace the graph + + def _make_new_conv(self, conv, filter_index, channel_type="out"): + if not isinstance(conv, nn.Conv2d): + raise TypeError(f"The module is not Conv2d, but {type(conv)}.") + + if channel_type == "out": + new_conv = nn.Conv2d(conv.in_channels, conv.out_channels - 1, conv.kernel_size, conv.stride, + conv.padding, conv.dilation, conv.groups, conv.bias is not None) + mask = torch.ones(conv.out_channels, dtype=torch.uint8) + mask[filter_index] = 0 + new_conv.weight.data = conv.weight.data[mask, :, :, :] + if conv.bias is not None: + new_conv.bias.data = conv.bias.data[mask] + + elif channel_type == 'in': + new_conv = nn.Conv2d(conv.in_channels - 1, conv.out_channels, conv.kernel_size, conv.stride, + conv.padding, conv.dilation, conv.groups, conv.bias is not None) + mask = torch.ones(conv.in_channels, dtype=torch.uint8) + mask[filter_index] = 0 + new_conv.weight.data = conv.weight.data[:, mask, :, :] + if conv.bias is not None: + new_conv.bias.data = conv.bias.data + else: + raise ValueError(f"{channel_type} should be either 'in' or 'out'.") + return new_conv + + def remove_conv_filter(self, path, filter_index): + conv = self.book.get_module(path) + logging.info(f'Prune Conv: {"/".join(path)}, Filter: {filter_index}, Layer: {conv}') + new_conv = self._make_new_conv(conv, filter_index, channel_type="out") + self._update_model(path, new_conv) + + next_conv_path = self.descendent_convs.get(path) + if next_conv_path: + next_conv = self.book.get_module(next_conv_path) + new_next_conv = self._make_new_conv(next_conv, filter_index, channel_type="in") + self._update_model(next_conv_path, new_next_conv) + + # reduce the num_features of batch norm + batch_norm_path = self.decendent_batch_norms.get(path) + if batch_norm_path: + batch_norm = self.book.get_module(batch_norm_path) + new_batch_norm = nn.BatchNorm2d(batch_norm.num_features - 1) + self._update_model(batch_norm_path, new_batch_norm) + + # reduce the in channels of linear layer + linear_path = self.descendent_linears.get(path) + if linear_path: + linear = self.book.get_module(linear_path) + new_linear = self._make_new_linear(linear, filter_index, conv, channel_type="in") + self._update_model(linear_path, new_linear) + + @staticmethod + def _make_new_linear(linear, feature_index, conv=None, channel_type="out"): + if channel_type == "out": + new_linear = nn.Linear(linear.in_features, linear.out_features - 1, + bias=linear.bias is not None) + mask = torch.ones(linear.out_features, dtype=torch.uint8) + mask[feature_index] = 0 + new_linear.weight.data = linear.weight.data[mask, :] + if linear.bias is not None: + new_linear.bias.data = linear.bias.data[mask] + elif channel_type == "in": + if conv: + block = int(linear.in_features / conv.out_channels) + else: + block = 1 + new_linear = nn.Linear(linear.in_features - block, linear.out_features, + bias=linear.bias is not None) + start_index = feature_index * block + end_index = (feature_index + 1) * block + mask = torch.ones(linear.in_features, dtype=torch.uint8) + mask[start_index: end_index] = 0 + new_linear.weight.data = linear.weight.data[:, mask] + if linear.bias is not None: + new_linear.bias.data = linear.bias.data + else: + raise ValueError(f"{channel_type} should be either 'in' or 'out'.") + return new_linear + + def prune_conv_layers(self, num=1): + """Prune one conv2d filter. + """ + self.register_conv_hooks() + before_loss, before_accuracy = self.train_fun(self.model) + ranks = [] + for path, output in self.outputs.items(): + output = output.data + grad = self.grads[path].data + v = grad * output + v = v.sum(0).sum(1).sum(1) # sum to the channel axis. + v = torch.abs(v) + v = v / torch.sqrt(torch.sum(v * v)) # normalize + for i, e in enumerate(v): + ranks.append((path, i, e)) + to_prune = nsmallest(num, ranks, key=lambda t: t[2]) + to_prune = sorted(to_prune, key=lambda t: ( + t[0], -t[1])) # prune the filters with bigger indexes first to avoid rearrangement. + for path, filter_index, value in to_prune: + self.remove_conv_filter(path, filter_index) + self.deregister_hooks() + after_loss, after_accuracy = self.train_fun(self.model) + return after_loss - before_loss, after_accuracy - before_accuracy + + def register_conv_hooks(self): + """Run register before training for pruning.""" + self.outputs.clear() + self.grads.clear() + self.handles.clear() + self.last_conv_path = None + self.decendent_batch_norms.clear() + self.descendent_convs.clear() + self.descendent_linears.clear() + + def forward_hook(m, input, output): + path = self.book.get_path(m) + if isinstance(m, nn.Conv2d): + if path not in self.ignored_paths: + self.outputs[path] = output + if self.last_conv_path: + self.descendent_convs[self.last_conv_path] = path + self.last_conv_path = path + elif isinstance(m, nn.BatchNorm2d): + if self.last_conv_path: + self.decendent_batch_norms[self.last_conv_path] = path + elif isinstance(m, nn.Linear): + if self.last_conv_path: + self.descendent_linears[self.last_conv_path] = path + self.last_conv_path = None # after a linear layer the conv layer doesn't matter + + def backward_hook(m, input, output): + path = self.book.get_path(m) + self.grads[path] = output[0] + + for path, m in self.book.modules(module_type=(nn.Conv2d, nn.BatchNorm2d, nn.Linear)): + h = m.register_forward_hook(forward_hook) + self.handles.append(h) + h = m.register_backward_hook(backward_hook) + self.handles.append(h) + + def deregister_hooks(self): + """Run degresiter before retraining to recover the model""" + for handle in self.handles: + handle.remove() + + def prune_linear_layers(self, num=1): + self.register_linear_hooks() + before_loss, before_accuracy = self.train_fun(self.model) + ranks = [] + for path, output in self.outputs.items(): + output = output.data + grad = self.grads[path].data + v = grad * output + v = v.sum(0) # sum to the channel axis. + v = torch.abs(v) + v = v / torch.sqrt(torch.sum(v * v)) # normalize + for i, e in enumerate(v): + ranks.append((path, i, e)) + to_prune = nsmallest(num, ranks, key=lambda t: t[2]) + to_prune = sorted(to_prune, key=lambda t: (t[0], -t[1])) + for path, feature_index, value in to_prune: + self.remove_linear_feature(path, feature_index) + self.deregister_hooks() + after_loss, after_accuracy = self.train_fun(self.model) + return after_loss - before_loss, after_accuracy - before_accuracy + + def register_linear_hooks(self): + self.outputs.clear() + self.grads.clear() + self.handles.clear() + self.descendent_linears.clear() + self.last_linear_path = None + + def forward_hook(m, input, output): + path = self.book.get_path(m) + if path not in self.ignored_paths: + self.outputs[path] = output + if self.last_linear_path: + self.descendent_linears[self.last_linear_path] = path + self.last_linear_path = path + + def backward_hook(m, input, output): + path = self.book.get_path(m) + self.grads[path] = output[0] + + for _, m in self.book.linear_modules(): + h = m.register_forward_hook(forward_hook) + self.handles.append(h) + h = m.register_backward_hook(backward_hook) + self.handles.append(h) + + def remove_linear_feature(self, path, feature_index): + linear = self.book.get_module(path) + logging.info(f'Prune Linear: {"/".join(path)}, Filter: {feature_index}, Layer: {linear}') + new_linear = self._make_new_linear(linear, feature_index, channel_type="out") + self._update_model(path, new_linear) + + # update following linear layers + next_linear_path = self.descendent_linears.get(path) + if next_linear_path: + next_linear = self.book.get_module(next_linear_path) + new_next_linear = self._make_new_linear(next_linear, feature_index, channel_type='in') + self._update_model(next_linear_path, new_next_linear) + + def _update_model(self, path, module): + parent = self.book.get_module(path[:-1]) + parent._modules[path[-1]] = module + self.book.update(path, module) diff --git a/src/vision/ssd/__init__.py b/src/vision/ssd/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/vision/ssd/config/__init__.py b/src/vision/ssd/config/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/vision/ssd/config/mobilenetv1_ssd_config.py b/src/vision/ssd/config/mobilenetv1_ssd_config.py new file mode 100644 index 0000000..af5137d --- /dev/null +++ b/src/vision/ssd/config/mobilenetv1_ssd_config.py @@ -0,0 +1,32 @@ +import numpy as np + +from vision.utils.box_utils import SSDSpec, SSDBoxSizes, generate_ssd_priors + +image_size = 300 +image_mean = np.array([127, 127, 127]) # RGB layout +image_std = 128.0 +iou_threshold = 0.45 +center_variance = 0.1 +size_variance = 0.2 + +specs = [ + SSDSpec(19, 16, SSDBoxSizes(60, 105), [2, 3]), + SSDSpec(10, 32, SSDBoxSizes(105, 150), [2, 3]), + SSDSpec(5, 64, SSDBoxSizes(150, 195), [2, 3]), + SSDSpec(3, 100, SSDBoxSizes(195, 240), [2, 3]), + SSDSpec(2, 150, SSDBoxSizes(240, 285), [2, 3]), + SSDSpec(1, 300, SSDBoxSizes(285, 330), [2, 3]) +] + +priors = generate_ssd_priors(specs, image_size) + +# print(' ') +# print('SSD-Mobilenet-v1 priors:') +# print(priors.shape) +# print(priors) +# print(' ') + +# import torch +# torch.save(priors, 'mb1-ssd-priors.pt') + +# np.savetxt('mb1-ssd-priors.txt', priors.numpy()) diff --git a/src/vision/ssd/config/squeezenet_ssd_config.py b/src/vision/ssd/config/squeezenet_ssd_config.py new file mode 100644 index 0000000..531dcd1 --- /dev/null +++ b/src/vision/ssd/config/squeezenet_ssd_config.py @@ -0,0 +1,21 @@ +import numpy as np + +from vision.utils.box_utils import SSDSpec, SSDBoxSizes, generate_ssd_priors + +image_size = 300 +image_mean = np.array([127, 127, 127]) # RGB layout +image_std = 128.0 +iou_threshold = 0.45 +center_variance = 0.1 +size_variance = 0.2 + +specs = [ + SSDSpec(17, 16, SSDBoxSizes(60, 105), [2, 3]), + SSDSpec(10, 32, SSDBoxSizes(105, 150), [2, 3]), + SSDSpec(5, 64, SSDBoxSizes(150, 195), [2, 3]), + SSDSpec(3, 100, SSDBoxSizes(195, 240), [2, 3]), + SSDSpec(2, 150, SSDBoxSizes(240, 285), [2, 3]), + SSDSpec(1, 300, SSDBoxSizes(285, 330), [2, 3]) +] + +priors = generate_ssd_priors(specs, image_size) diff --git a/src/vision/ssd/config/vgg_ssd_config.py b/src/vision/ssd/config/vgg_ssd_config.py new file mode 100644 index 0000000..1358053 --- /dev/null +++ b/src/vision/ssd/config/vgg_ssd_config.py @@ -0,0 +1,22 @@ +import numpy as np + +from vision.utils.box_utils import SSDSpec, SSDBoxSizes, generate_ssd_priors + +image_size = 300 +image_mean = np.array([123, 117, 104]) # RGB layout +image_std = 1.0 + +iou_threshold = 0.45 +center_variance = 0.1 +size_variance = 0.2 + +specs = [ + SSDSpec(38, 8, SSDBoxSizes(30, 60), [2]), + SSDSpec(19, 16, SSDBoxSizes(60, 111), [2, 3]), + SSDSpec(10, 32, SSDBoxSizes(111, 162), [2, 3]), + SSDSpec(5, 64, SSDBoxSizes(162, 213), [2, 3]), + SSDSpec(3, 100, SSDBoxSizes(213, 264), [2]), + SSDSpec(1, 300, SSDBoxSizes(264, 315), [2]) +] + +priors = generate_ssd_priors(specs, image_size) diff --git a/src/vision/ssd/data_preprocessing.py b/src/vision/ssd/data_preprocessing.py new file mode 100644 index 0000000..d9b2fe8 --- /dev/null +++ b/src/vision/ssd/data_preprocessing.py @@ -0,0 +1,62 @@ +from ..transforms.transforms import * + + +class TrainAugmentation: + def __init__(self, size, mean=0, std=1.0): + """ + Args: + size: the size the of final image. + mean: mean pixel value per channel. + """ + self.mean = mean + self.size = size + self.augment = Compose([ + ConvertFromInts(), + PhotometricDistort(), + Expand(self.mean), + RandomSampleCrop(), + RandomMirror(), + ToPercentCoords(), + Resize(self.size), + SubtractMeans(self.mean), + lambda img, boxes=None, labels=None: (img / std, boxes, labels), + ToTensor(), + ]) + + def __call__(self, img, boxes, labels): + """ + + Args: + img: the output of cv.imread in RGB layout. + boxes: boundding boxes in the form of (x1, y1, x2, y2). + labels: labels of boxes. + """ + return self.augment(img, boxes, labels) + + +class TestTransform: + def __init__(self, size, mean=0.0, std=1.0): + self.transform = Compose([ + ToPercentCoords(), + Resize(size), + SubtractMeans(mean), + lambda img, boxes=None, labels=None: (img / std, boxes, labels), + ToTensor(), + ]) + + def __call__(self, image, boxes, labels): + return self.transform(image, boxes, labels) + + +class PredictionTransform: + def __init__(self, size, mean=0.0, std=1.0): + self.transform = Compose([ + Resize(size), + SubtractMeans(mean), + lambda img, boxes=None, labels=None: (img / std, boxes, labels), + ToTensor() + ]) + + def __call__(self, image): + image, _, _ = self.transform(image) + return image diff --git a/src/vision/ssd/fpn_mobilenetv1_ssd.py b/src/vision/ssd/fpn_mobilenetv1_ssd.py new file mode 100644 index 0000000..548f001 --- /dev/null +++ b/src/vision/ssd/fpn_mobilenetv1_ssd.py @@ -0,0 +1,77 @@ +import torch +from torch.nn import Conv2d, Sequential, ModuleList, ReLU + +from .config import mobilenetv1_ssd_config as config +from .fpn_ssd import FPNSSD +from .predictor import Predictor +from ..nn.mobilenet import MobileNetV1 + + +def create_fpn_mobilenetv1_ssd(num_classes): + base_net = MobileNetV1(1001).features # disable dropout layer + + source_layer_indexes = [ + (69, Conv2d(in_channels=512, out_channels=256, kernel_size=1)), + (len(base_net), Conv2d(in_channels=1024, out_channels=256, kernel_size=1)), + ] + extras = ModuleList([ + Sequential( + Conv2d(in_channels=1024, out_channels=256, kernel_size=1), + ReLU(), + Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, padding=1), + ReLU() + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ReLU() + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ReLU() + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ReLU() + ) + ]) + + regression_headers = ModuleList([ + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + # TODO: change to kernel_size=1, padding=0? + ]) + + classification_headers = ModuleList([ + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + # TODO: change to kernel_size=1, padding=0? + ]) + + return FPNSSD(num_classes, base_net, source_layer_indexes, + extras, classification_headers, regression_headers) + + +def create_fpn_mobilenetv1_ssd_predictor(net, candidate_size=200, nms_method=None, sigma=0.5, + device=torch.device('cpu')): + predictor = Predictor(net, config.image_size, config.image_mean, config.priors, + config.center_variance, config.size_variance, + nms_method=nms_method, + iou_threshold=config.iou_threshold, + candidate_size=candidate_size, + sigma=sigma, + device=device) + return predictor diff --git a/src/vision/ssd/fpn_ssd.py b/src/vision/ssd/fpn_ssd.py new file mode 100644 index 0000000..f301270 --- /dev/null +++ b/src/vision/ssd/fpn_ssd.py @@ -0,0 +1,143 @@ +from typing import List, Tuple + +import numpy as np +import torch +import torch.nn as nn + +from ..utils import box_utils + + +class FPNSSD(nn.Module): + def __init__(self, num_classes: int, base_net: nn.ModuleList, source_layer_indexes: List[int], + extras: nn.ModuleList, classification_headers: nn.ModuleList, + regression_headers: nn.ModuleList, upsample_mode="nearest"): + """Compose a SSD model using the given components. + """ + super(FPNSSD, self).__init__() + + self.num_classes = num_classes + self.base_net = base_net + self.source_layer_indexes = source_layer_indexes + self.extras = extras + self.classification_headers = classification_headers + self.regression_headers = regression_headers + self.upsample_mode = upsample_mode + + # register layers in source_layer_indexes by adding them to a module list + self.source_layer_add_ons = nn.ModuleList([t[1] for t in source_layer_indexes if isinstance(t, tuple)]) + self.upsamplers = [ + nn.Upsample(size=(19, 19), mode='bilinear'), + nn.Upsample(size=(10, 10), mode='bilinear'), + nn.Upsample(size=(5, 5), mode='bilinear'), + nn.Upsample(size=(3, 3), mode='bilinear'), + nn.Upsample(size=(2, 2), mode='bilinear'), + ] + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + confidences = [] + locations = [] + start_layer_index = 0 + header_index = 0 + features = [] + for end_layer_index in self.source_layer_indexes: + + if isinstance(end_layer_index, tuple): + added_layer = end_layer_index[1] + end_layer_index = end_layer_index[0] + else: + added_layer = None + for layer in self.base_net[start_layer_index: end_layer_index]: + x = layer(x) + start_layer_index = end_layer_index + if added_layer: + y = added_layer(x) + else: + y = x + # confidence, location = self.compute_header(header_index, y) + features.append(y) + header_index += 1 + # confidences.append(confidence) + # locations.append(location) + + for layer in self.base_net[end_layer_index:]: + x = layer(x) + + for layer in self.extras: + x = layer(x) + # confidence, location = self.compute_header(header_index, x) + features.append(x) + header_index += 1 + # confidences.append(confidence) + # locations.append(location) + + upstream_feature = None + for i in range(len(features) - 1, -1, -1): + feature = features[i] + if upstream_feature is not None: + upstream_feature = self.upsamplers[i](upstream_feature) + upstream_feature += feature + else: + upstream_feature = feature + confidence, location = self.compute_header(i, upstream_feature) + confidences.append(confidence) + locations.append(location) + confidences = torch.cat(confidences, 1) + locations = torch.cat(locations, 1) + return confidences, locations + + def compute_header(self, i, x): + confidence = self.classification_headers[i](x) + confidence = confidence.permute(0, 2, 3, 1).contiguous() + confidence = confidence.view(confidence.size(0), -1, self.num_classes) + + location = self.regression_headers[i](x) + location = location.permute(0, 2, 3, 1).contiguous() + location = location.view(location.size(0), -1, 4) + + return confidence, location + + def init_from_base_net(self, model): + self.base_net.load_state_dict(torch.load(model, map_location=lambda storage, loc: storage), strict=False) + self.source_layer_add_ons.apply(_xavier_init_) + self.extras.apply(_xavier_init_) + self.classification_headers.apply(_xavier_init_) + self.regression_headers.apply(_xavier_init_) + + def init(self): + self.base_net.apply(_xavier_init_) + self.source_layer_add_ons.apply(_xavier_init_) + self.extras.apply(_xavier_init_) + self.classification_headers.apply(_xavier_init_) + self.regression_headers.apply(_xavier_init_) + + def load(self, model): + self.load_state_dict(torch.load(model, map_location=lambda storage, loc: storage)) + + def save(self, model_path): + torch.save(self.state_dict(), model_path) + + +class MatchPrior(object): + def __init__(self, center_form_priors, center_variance, size_variance, iou_threshold): + self.center_form_priors = center_form_priors + self.corner_form_priors = box_utils.center_form_to_corner_form(center_form_priors) + self.center_variance = center_variance + self.size_variance = size_variance + self.iou_threshold = iou_threshold + + def __call__(self, gt_boxes, gt_labels): + if type(gt_boxes) is np.ndarray: + gt_boxes = torch.from_numpy(gt_boxes) + if type(gt_labels) is np.ndarray: + gt_labels = torch.from_numpy(gt_labels) + boxes, labels = box_utils.assign_priors(gt_boxes, gt_labels, + self.corner_form_priors, self.iou_threshold) + boxes = box_utils.corner_form_to_center_form(boxes) + locations = box_utils.convert_boxes_to_locations(boxes, self.center_form_priors, self.center_variance, + self.size_variance) + return locations, labels + + +def _xavier_init_(m: nn.Module): + if isinstance(m, nn.Conv2d): + nn.init.xavier_uniform_(m.weight) diff --git a/src/vision/ssd/mobilenet_v2_ssd_lite.py b/src/vision/ssd/mobilenet_v2_ssd_lite.py new file mode 100644 index 0000000..c68246b --- /dev/null +++ b/src/vision/ssd/mobilenet_v2_ssd_lite.py @@ -0,0 +1,71 @@ +import torch +from torch import nn +from torch.nn import Conv2d, Sequential, ModuleList, BatchNorm2d + +from .config import mobilenetv1_ssd_config as config +from .predictor import Predictor +from .ssd import SSD, GraphPath +from ..nn.mobilenet_v2 import MobileNetV2, InvertedResidual + + +def SeperableConv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, onnx_compatible=False): + """Replace Conv2d with a depthwise Conv2d and Pointwise Conv2d. + """ + ReLU = nn.ReLU if onnx_compatible else nn.ReLU6 + return Sequential( + Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, + groups=in_channels, stride=stride, padding=padding), + BatchNorm2d(in_channels), + ReLU(), + Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1), + ) + + +def create_mobilenetv2_ssd_lite(num_classes, width_mult=1.0, use_batch_norm=True, onnx_compatible=False, is_test=False): + base_net = MobileNetV2(width_mult=width_mult, use_batch_norm=use_batch_norm, + onnx_compatible=onnx_compatible).features + + source_layer_indexes = [ + GraphPath(14, 'conv', 3), + 19, + ] + extras = ModuleList([ + InvertedResidual(1280, 512, stride=2, expand_ratio=0.2), + InvertedResidual(512, 256, stride=2, expand_ratio=0.25), + InvertedResidual(256, 256, stride=2, expand_ratio=0.5), + InvertedResidual(256, 64, stride=2, expand_ratio=0.25) + ]) + + regression_headers = ModuleList([ + SeperableConv2d(in_channels=round(576 * width_mult), out_channels=6 * 4, + kernel_size=3, padding=1, onnx_compatible=False), + SeperableConv2d(in_channels=1280, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False), + SeperableConv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False), + SeperableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False), + SeperableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False), + Conv2d(in_channels=64, out_channels=6 * 4, kernel_size=1), + ]) + + classification_headers = ModuleList([ + SeperableConv2d(in_channels=round(576 * width_mult), out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=1280, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=64, out_channels=6 * num_classes, kernel_size=1), + ]) + + return SSD(num_classes, base_net, source_layer_indexes, + extras, classification_headers, regression_headers, is_test=is_test, config=config) + + +def create_mobilenetv2_ssd_lite_predictor(net, candidate_size=200, nms_method=None, sigma=0.5, + device=torch.device('cpu')): + predictor = Predictor(net, config.image_size, config.image_mean, + config.image_std, + nms_method=nms_method, + iou_threshold=config.iou_threshold, + candidate_size=candidate_size, + sigma=sigma, + device=device) + return predictor diff --git a/src/vision/ssd/mobilenetv1_ssd.py b/src/vision/ssd/mobilenetv1_ssd.py new file mode 100644 index 0000000..2d7115b --- /dev/null +++ b/src/vision/ssd/mobilenetv1_ssd.py @@ -0,0 +1,75 @@ +from torch.nn import Conv2d, Sequential, ModuleList, ReLU + +from .config import mobilenetv1_ssd_config as config +from .predictor import Predictor +from .ssd import SSD +from ..nn.mobilenet import MobileNetV1 + + +def create_mobilenetv1_ssd(num_classes, is_test=False): + base_net = MobileNetV1(1001).model # disable dropout layer + + source_layer_indexes = [ + 12, + 14, + ] + extras = ModuleList([ + Sequential( + Conv2d(in_channels=1024, out_channels=256, kernel_size=1), + ReLU(), + Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1), + ReLU() + ), + Sequential( + Conv2d(in_channels=512, out_channels=128, kernel_size=1), + ReLU(), + Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ReLU() + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ReLU() + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ReLU() + ) + ]) + + regression_headers = ModuleList([ + Conv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=1024, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + # TODO: change to kernel_size=1, padding=0? + ]) + + classification_headers = ModuleList([ + Conv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=1024, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + # TODO: change to kernel_size=1, padding=0? + ]) + + return SSD(num_classes, base_net, source_layer_indexes, + extras, classification_headers, regression_headers, is_test=is_test, config=config) + + +def create_mobilenetv1_ssd_predictor(net, candidate_size=200, nms_method=None, sigma=0.5, device=None): + predictor = Predictor(net, config.image_size, config.image_mean, + config.image_std, + nms_method=nms_method, + iou_threshold=config.iou_threshold, + candidate_size=candidate_size, + sigma=sigma, + device=device) + return predictor diff --git a/src/vision/ssd/mobilenetv1_ssd_lite.py b/src/vision/ssd/mobilenetv1_ssd_lite.py new file mode 100644 index 0000000..bb18350 --- /dev/null +++ b/src/vision/ssd/mobilenetv1_ssd_lite.py @@ -0,0 +1,80 @@ +from torch.nn import Conv2d, Sequential, ModuleList, ReLU + +from .config import mobilenetv1_ssd_config as config +from .predictor import Predictor +from .ssd import SSD +from ..nn.mobilenet import MobileNetV1 + + +def SeperableConv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0): + """Replace Conv2d with a depthwise Conv2d and Pointwise Conv2d. + """ + return Sequential( + Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, + groups=in_channels, stride=stride, padding=padding), + ReLU(), + Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1), + ) + + +def create_mobilenetv1_ssd_lite(num_classes, is_test=False): + base_net = MobileNetV1(1001).model # disable dropout layer + + source_layer_indexes = [ + 12, + 14, + ] + extras = ModuleList([ + Sequential( + Conv2d(in_channels=1024, out_channels=256, kernel_size=1), + ReLU(), + SeperableConv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1), + ), + Sequential( + Conv2d(in_channels=512, out_channels=128, kernel_size=1), + ReLU(), + SeperableConv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + SeperableConv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + SeperableConv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1) + ) + ]) + + regression_headers = ModuleList([ + SeperableConv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1), + SeperableConv2d(in_channels=1024, out_channels=6 * 4, kernel_size=3, padding=1), + SeperableConv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1), + SeperableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + SeperableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=1), + ]) + + classification_headers = ModuleList([ + SeperableConv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=1024, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=1), + ]) + + return SSD(num_classes, base_net, source_layer_indexes, + extras, classification_headers, regression_headers, is_test=is_test, config=config) + + +def create_mobilenetv1_ssd_lite_predictor(net, candidate_size=200, nms_method=None, sigma=0.5, device=None): + predictor = Predictor(net, config.image_size, config.image_mean, + config.image_std, + nms_method=nms_method, + iou_threshold=config.iou_threshold, + candidate_size=candidate_size, + sigma=sigma, + device=device) + return predictor diff --git a/src/vision/ssd/predictor.py b/src/vision/ssd/predictor.py new file mode 100644 index 0000000..a37209e --- /dev/null +++ b/src/vision/ssd/predictor.py @@ -0,0 +1,73 @@ +import torch + +from .data_preprocessing import PredictionTransform +from ..utils import box_utils +from ..utils.misc import Timer + + +class Predictor: + def __init__(self, net, size, mean=0.0, std=1.0, nms_method=None, + iou_threshold=0.45, filter_threshold=0.01, candidate_size=200, sigma=0.5, device=None): + self.net = net + self.transform = PredictionTransform(size, mean, std) + self.iou_threshold = iou_threshold + self.filter_threshold = filter_threshold + self.candidate_size = candidate_size + self.nms_method = nms_method + + self.sigma = sigma + if device: + self.device = device + else: + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + self.net.to(self.device) + self.net.eval() + + self.timer = Timer() + + def predict(self, image, top_k=-1, prob_threshold=None): + cpu_device = torch.device("cpu") + height, width, _ = image.shape + image = self.transform(image) + # print(image) + images = image.unsqueeze(0) + images = images.to(self.device) + with torch.no_grad(): + self.timer.start() + scores, boxes = self.net.forward(images) + print("Inference time: ", self.timer.end()) + boxes = boxes[0] + scores = scores[0] + if not prob_threshold: + prob_threshold = self.filter_threshold + + boxes = boxes.to(cpu_device) + scores = scores.to(cpu_device) + picked_box_probs = [] + picked_labels = [] + for class_index in range(1, scores.size(1)): + probs = scores[:, class_index] + mask = probs > prob_threshold + probs = probs[mask] + if probs.size(0) == 0: + continue + + subset_boxes = boxes[mask, :] + box_probs = torch.cat([subset_boxes, probs.reshape(-1, 1)], dim=1) + box_probs = box_utils.nms(box_probs, self.nms_method, + score_threshold=prob_threshold, + iou_threshold=self.iou_threshold, + sigma=self.sigma, + top_k=top_k, + candidate_size=self.candidate_size) + picked_box_probs.append(box_probs) + picked_labels.extend([class_index] * box_probs.size(0)) + if not picked_box_probs: + return torch.tensor([]), torch.tensor([]), torch.tensor([]) + picked_box_probs = torch.cat(picked_box_probs) + picked_box_probs[:, 0] *= width + picked_box_probs[:, 1] *= height + picked_box_probs[:, 2] *= width + picked_box_probs[:, 3] *= height + return picked_box_probs[:, :4], torch.tensor(picked_labels), picked_box_probs[:, 4] diff --git a/src/vision/ssd/squeezenet_ssd_lite.py b/src/vision/ssd/squeezenet_ssd_lite.py new file mode 100644 index 0000000..2533597 --- /dev/null +++ b/src/vision/ssd/squeezenet_ssd_lite.py @@ -0,0 +1,86 @@ +import torch +from torch.nn import Conv2d, Sequential, ModuleList, ReLU + +from .config import squeezenet_ssd_config as config +from .predictor import Predictor +from .ssd import SSD +from ..nn.squeezenet import squeezenet1_1 + + +def SeperableConv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0): + """Replace Conv2d with a depthwise Conv2d and Pointwise Conv2d. + """ + return Sequential( + Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, + groups=in_channels, stride=stride, padding=padding), + ReLU(), + Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1), + ) + + +def create_squeezenet_ssd_lite(num_classes, is_test=False): + base_net = squeezenet1_1(False).features # disable dropout layer + + source_layer_indexes = [ + 12 + ] + extras = ModuleList([ + Sequential( + Conv2d(in_channels=512, out_channels=256, kernel_size=1), + ReLU(), + SeperableConv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=2), + ), + Sequential( + Conv2d(in_channels=512, out_channels=256, kernel_size=1), + ReLU(), + SeperableConv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1), + ), + Sequential( + Conv2d(in_channels=512, out_channels=128, kernel_size=1), + ReLU(), + SeperableConv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + SeperableConv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + SeperableConv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1) + ) + ]) + + regression_headers = ModuleList([ + SeperableConv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1), + SeperableConv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1), + SeperableConv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1), + SeperableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + SeperableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=1), + ]) + + classification_headers = ModuleList([ + SeperableConv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + SeperableConv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=1), + ]) + + return SSD(num_classes, base_net, source_layer_indexes, + extras, classification_headers, regression_headers, is_test=is_test, config=config) + + +def create_squeezenet_ssd_lite_predictor(net, candidate_size=200, nms_method=None, sigma=0.5, + device=torch.device('cpu')): + predictor = Predictor(net, config.image_size, config.image_mean, + config.image_std, + nms_method=nms_method, + iou_threshold=config.iou_threshold, + candidate_size=candidate_size, + sigma=sigma, + device=device) + return predictor diff --git a/src/vision/ssd/ssd.py b/src/vision/ssd/ssd.py new file mode 100644 index 0000000..bcdbdfd --- /dev/null +++ b/src/vision/ssd/ssd.py @@ -0,0 +1,167 @@ +from collections import namedtuple +from typing import List, Tuple + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..utils import box_utils + +GraphPath = namedtuple("GraphPath", ['s0', 'name', 's1']) # + + +class SSD(nn.Module): + def __init__(self, num_classes: int, base_net: nn.ModuleList, source_layer_indexes: List[int], + extras: nn.ModuleList, classification_headers: nn.ModuleList, + regression_headers: nn.ModuleList, is_test=False, config=None, device=None): + """Compose a SSD model using the given components. + """ + super(SSD, self).__init__() + + self.num_classes = num_classes + self.base_net = base_net + self.source_layer_indexes = source_layer_indexes + self.extras = extras + self.classification_headers = classification_headers + self.regression_headers = regression_headers + self.is_test = is_test + self.config = config + + # register layers in source_layer_indexes by adding them to a module list + self.source_layer_add_ons = nn.ModuleList([t[1] for t in source_layer_indexes + if isinstance(t, tuple) and not isinstance(t, GraphPath)]) + if device: + self.device = device + else: + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + if is_test: + self.config = config + self.priors = config.priors.to(self.device) + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + confidences = [] + locations = [] + start_layer_index = 0 + header_index = 0 + for end_layer_index in self.source_layer_indexes: + if isinstance(end_layer_index, GraphPath): + path = end_layer_index + end_layer_index = end_layer_index.s0 + added_layer = None + elif isinstance(end_layer_index, tuple): + added_layer = end_layer_index[1] + end_layer_index = end_layer_index[0] + path = None + else: + added_layer = None + path = None + for layer in self.base_net[start_layer_index: end_layer_index]: + x = layer(x) + if added_layer: + y = added_layer(x) + else: + y = x + if path: + sub = getattr(self.base_net[end_layer_index], path.name) + for layer in sub[:path.s1]: + x = layer(x) + y = x + for layer in sub[path.s1:]: + x = layer(x) + end_layer_index += 1 + start_layer_index = end_layer_index + confidence, location = self.compute_header(header_index, y) + header_index += 1 + confidences.append(confidence) + locations.append(location) + + for layer in self.base_net[end_layer_index:]: + x = layer(x) + + for layer in self.extras: + x = layer(x) + confidence, location = self.compute_header(header_index, x) + header_index += 1 + confidences.append(confidence) + locations.append(location) + + confidences = torch.cat(confidences, 1) + locations = torch.cat(locations, 1) + + if self.is_test: + confidences = F.softmax(confidences, dim=2) + boxes = box_utils.convert_locations_to_boxes( + locations, self.priors, self.config.center_variance, self.config.size_variance + ) + boxes = box_utils.center_form_to_corner_form(boxes) + return confidences, boxes + else: + return confidences, locations + + def compute_header(self, i, x): + confidence = self.classification_headers[i](x) + confidence = confidence.permute(0, 2, 3, 1).contiguous() + confidence = confidence.view(confidence.size(0), -1, self.num_classes) + + location = self.regression_headers[i](x) + location = location.permute(0, 2, 3, 1).contiguous() + location = location.view(location.size(0), -1, 4) + + return confidence, location + + def init_from_base_net(self, model): + self.base_net.load_state_dict(torch.load(model, map_location=lambda storage, loc: storage), strict=True) + self.source_layer_add_ons.apply(_xavier_init_) + self.extras.apply(_xavier_init_) + self.classification_headers.apply(_xavier_init_) + self.regression_headers.apply(_xavier_init_) + + def init_from_pretrained_ssd(self, model): + state_dict = torch.load(model, map_location=lambda storage, loc: storage) + state_dict = {k: v for k, v in state_dict.items() if + not (k.startswith("classification_headers") or k.startswith("regression_headers"))} + model_dict = self.state_dict() + model_dict.update(state_dict) + self.load_state_dict(model_dict) + self.classification_headers.apply(_xavier_init_) + self.regression_headers.apply(_xavier_init_) + + def init(self): + self.base_net.apply(_xavier_init_) + self.source_layer_add_ons.apply(_xavier_init_) + self.extras.apply(_xavier_init_) + self.classification_headers.apply(_xavier_init_) + self.regression_headers.apply(_xavier_init_) + + def load(self, model): + self.load_state_dict(torch.load(model, map_location=lambda storage, loc: storage)) + + def save(self, model_path): + torch.save(self.state_dict(), model_path) + + +class MatchPrior(object): + def __init__(self, center_form_priors, center_variance, size_variance, iou_threshold): + self.center_form_priors = center_form_priors + self.corner_form_priors = box_utils.center_form_to_corner_form(center_form_priors) + self.center_variance = center_variance + self.size_variance = size_variance + self.iou_threshold = iou_threshold + + def __call__(self, gt_boxes, gt_labels): + if type(gt_boxes) is np.ndarray: + gt_boxes = torch.from_numpy(gt_boxes) + if type(gt_labels) is np.ndarray: + gt_labels = torch.from_numpy(gt_labels) + boxes, labels = box_utils.assign_priors(gt_boxes, gt_labels, + self.corner_form_priors, self.iou_threshold) + boxes = box_utils.corner_form_to_center_form(boxes) + locations = box_utils.convert_boxes_to_locations(boxes, self.center_form_priors, self.center_variance, + self.size_variance) + return locations, labels + + +def _xavier_init_(m: nn.Module): + if isinstance(m, nn.Conv2d): + nn.init.xavier_uniform_(m.weight) diff --git a/src/vision/ssd/vgg_ssd.py b/src/vision/ssd/vgg_ssd.py new file mode 100644 index 0000000..21ff264 --- /dev/null +++ b/src/vision/ssd/vgg_ssd.py @@ -0,0 +1,76 @@ +from torch.nn import Conv2d, Sequential, ModuleList, ReLU, BatchNorm2d + +from .config import vgg_ssd_config as config +from .predictor import Predictor +from .ssd import SSD +from ..nn.vgg import vgg + + +def create_vgg_ssd(num_classes, is_test=False): + vgg_config = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M', + 512, 512, 512] + base_net = ModuleList(vgg(vgg_config)) + + source_layer_indexes = [ + (23, BatchNorm2d(512)), + len(base_net), + ] + extras = ModuleList([ + Sequential( + Conv2d(in_channels=1024, out_channels=256, kernel_size=1), + ReLU(), + Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1), + ReLU() + ), + Sequential( + Conv2d(in_channels=512, out_channels=128, kernel_size=1), + ReLU(), + Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1), + ReLU() + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + Conv2d(in_channels=128, out_channels=256, kernel_size=3), + ReLU() + ), + Sequential( + Conv2d(in_channels=256, out_channels=128, kernel_size=1), + ReLU(), + Conv2d(in_channels=128, out_channels=256, kernel_size=3), + ReLU() + ) + ]) + + regression_headers = ModuleList([ + Conv2d(in_channels=512, out_channels=4 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=1024, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=4 * 4, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=4 * 4, kernel_size=3, padding=1), + # TODO: change to kernel_size=1, padding=0? + ]) + + classification_headers = ModuleList([ + Conv2d(in_channels=512, out_channels=4 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=1024, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=4 * num_classes, kernel_size=3, padding=1), + Conv2d(in_channels=256, out_channels=4 * num_classes, kernel_size=3, padding=1), + # TODO: change to kernel_size=1, padding=0? + ]) + + return SSD(num_classes, base_net, source_layer_indexes, + extras, classification_headers, regression_headers, is_test=is_test, config=config) + + +def create_vgg_ssd_predictor(net, candidate_size=200, nms_method=None, sigma=0.5, device=None): + predictor = Predictor(net, config.image_size, config.image_mean, + nms_method=nms_method, + iou_threshold=config.iou_threshold, + candidate_size=candidate_size, + sigma=sigma, + device=device) + return predictor diff --git a/src/vision/test/__init__.py b/src/vision/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/vision/test/assets/000138.jpg b/src/vision/test/assets/000138.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e4746ef06a4592f9880666b6d9d5f3daec98ee2 GIT binary patch literal 87499 zcmbTdbyQnj_%0ZVdvPsL+>28nXj`;EaVyZ`QrsaB+?^tY0>z5EyF>Bf5ZsF=Erb9e z4Bzjrb?2X%weHN$+V46!*(Z6QY>RP1H1t|L;J7% zC!hUSFwp;#m>3x77+9ECSpQS8adEJ)aj~(maPV<(@%}4MR|NQY1pmGH?@RvIqt7tW z(J}F`v9SNA$^R$u*aslNL3@vOg^tDyct(PTPJ;G01Yms96Z3!6ds6m)6|`sQ7?@bt zPio*lHE4eRq&_+@ikgO&jh%y&i(5ogO#HQkXVpG$;re|bkeakK^DlYk1T2@}s(Ad=6(%RPEF)%nZJTm%gY10puQiMMOYh3Vc|ZDN69L(!v^~g+W#Q?KLZx} z{|njw2KImBf&d86(VjXFodh5YcnF1#WVIMfkN0w~D@i^AI<~PMd{ABx91K=ZK>LqGiG0hKWWP5a(ane@Ccp+YBp;x0H|L6VsQ`C@4w*wSzIoZ+P4=yKb7XC3oymU&I;2o4>^O4lNp|FI$v|Qyyl2q ze7a}#CF$weEsBq%lC!XgW(Us@6L9J9{6|puD8l9pDGCNA!!0V&R#_ zb9@(@n}Hf^o@wk%_yVlY3;?9WvS!?y`GHQp@$jm%ilfY!Od$HmHZJ`&y{>+v*uLvN zwgM3@eImHLQflnT?Xcs8frv+iDnXXmMQBDL@#zSc&&|dM8EsXr&Y2%Y;2%HqE&pn2 zJG_viYW;rxd8s(qB)#76+od zvTZ*Wj=f2o(k>286bN*vK_M7(Rp^8NUFzq4%UNiu{R75JXWfHTU`yYYlfD@R>c^~1 z74Cn*DK7$RP3bBz5n-y+UC_*k7J3W1wOfh1I_QA{Vyo$M6O^kHBbzlwgktQe8H6?U z`{5O{J(?x&6IPaDvc}mWzPQ?-4EgL!cXJNTW-_zwMa;Z{hLLZXlsvx30gediaH+6S?HZ?#fG-b~9rZcRKji3TH0w7zd#{)r zhf%a!YPQ=GbWRF;AQVG~Bi$J8m{;?+>eb$D{ka&KV1pVXjee&?5SCUws$vyYZ6+-i zaII~VBQuuX#whv`+n-#T+M>*ilujlpw%LW#Zb|b4q0_?Av1?Ouj_Y)g-HIUm8rAG) zPEVlk)QOR&tof$R5_xY^Ezc#$$-$;byxC;4>4=VERwzD>sorTy{G?=;(7RKCCPOhU`j?~1&l?^@KzT3SifeJo>&^^9LfG)r-W|3n#? zz?lg=Y8KX((K`#@J7h{40^gl5O3_my6Kp)IEldok_o%N+dBcE{KGkGGcHQ*fMu7{n zVg_%IzV@~PQw`aDT>kag_@e6UBFA{YBx&0GFo{oYY!E68E77uYi+)6~}p6pPy zv?w9>hAEJ%bb}Sryw{Nt%XyyOS{@YgojcfFgR3JYv@16S@B0xO_0P3GNp40SeH{7! zQ0l&=akAUR;JV9@t~VSJNVR)qL=evkyTD}2X5fJ0iFZR&^2lrrk<#WUb{-Gx5`|~_ z2*4bDDCq$rdFF%YkTUb15MY<`k7{0(YY1B-$MOrltbFDjx%_%Ac8b)}LoznPVev z`h%qB6a* z0j?z8)(dztK93+qvSLs;!T^SEyj)e)r3V(d(=QJw8T28J@w7Ja`QCtK=d8_KDa=LG z^XX;l%YL-b_$8O<&~8=7{YSvgSoPr7JsyRk``WwOU#Qsdur}sgbNWX>KV7e#srhd0 z*-<%299A1|uD=W(CY{sRJK66c+BU|D|B2WlC>U{kuZ1Lj$nmsZ z?0Mx2pMRhFmd~-2|Ja4!{}E6bB6KDs*cM#Wi3hj0u-6E!*hn+=XB0QV*Uxx4+YIrZ zFaNU;DHojD2M}`iH#kxnxkT{4+vgII#R#I7w|2}h^fD(`waV6dIB1CoEx(*hxWEK` zhApG;2Jhd4wC-)vb}tzak*Dw5;>O$R=M+pJ9eH>n0^FC{L`1uNR}wYc-==B#7v|IM zGdCXrW4Q|rydSU2?P{gX*ONBs5)3r!=eP8_dm`z4=L6Q&xT_p`N@DsLZin5pgBE%l z-x@dCX`_DkV4Ez&OBuJuo(%<)%Dooy4>O+;y*XzA?@5Wx@pYMaaJFh@s`RA%#z376 ztpJGD3@FC7d{Gh?{CqWq>#;sKo6m7(ETml(ajvSsEtQ_7F8N6G8VOAPW-u{=<0`!d zHTMW;Y>hZWZR~0$$q>|q^Ir=68-k~V3NF}afbr%K<}mP1)JQGy)NX!+jMN>a~rha)%n1C+hI~OAVBef;29764T`W2(fPyo5^e%( z4J~Tp`1l*?@9d>K#+t=nwS5oF>&xQMtg`78Xixw6&I_A7Wh`O5d`DoqFwI3H>dzGU zVzTAoue(^&R59s*A*cBTL(`BDnmWgEjcX~#sw z)V&;%fbn!aVI-u@;b;RfE>}0;X#eCDxlmL=^5rP&AS6nOY-4X-#^o ztBdLe`tu@ul1BzhqMUqAPE39^HO@A=iPY(8!kvP66#&*$Ie=9*vihN+cl*?QL!Z+eCQFN9uvaP38usT;#yI6%pKOz%Pjjv6T$0qetYk6 zY@8H|k_P$pFi97h$xgirMZSfnqdmrN|gzW3)kDVA`JV{!%t{_>Z53}%k zCvnE~cP_JY$DoeO+cZ+Z*w0Mr-JEw{_Gd6~Im!!Txm~=a1bzyR6|4che@b~%5MGHY zJGN?rRb!&j(_%fuE*8mpUvQg<#nPH%Qx_K5Fm2Ov6Mof8$^dd^v;>JPfuu!x|77QO zcdtA9Q*eJdaJj#R`^a2oUo&EEsg)sYX!vu;!nIysuqs$4gSS4Pj*I?cnFX2r{CK%W zZ_sc&Px1%=+I}bt3Zo1Y{nLZiW7E;q^c6X$$`1fqPJe7Y5SlXM6EDBiRT(;G1B-3Y z=czsmYAY8xR+CS3#eLyWrA^sH|F<~Nf3NnsWS*ab!*`~hqEljSgVNd(b0%xZ3*(;n z5%59On!b8)NRt4)_OHvntmFhtu@)=or+Rqm9yB1HWZHGBcRXWSNk*S3pL{8 z-8T!CQidpO>_40u0}cn5!ssbV*(&>CdYcNuGt&Pc$&-?NI##P0zyZNDWpjy_y~Z^* zSF$CdH8}JsYTt;zwy|O44(WvN81TxWFDw!_Rm3-bB2u}k9eV^I_>Pd2ts{(>J%WrY zIdBt1gx}7FrE0X=-VC)pCgy^{E81V#)>TzF(IXL7xywfr#SRlK9|oKf_`W19GSVYz z2N|fkTat!0+2=jS_`l+?EHraE<>};!@g|n^ZZplxthpSNj6y6|nJ-j;!`vr|^4PDL z?h<)!=@Hd1s?i`x7-cOCe<+A|0r_T^E~g-F%E9lI^98E~)nT@xJ=6+91>A28w%b~b+8xPt zDMRWkHoM@-RHQmUh)T<*yc|V4DaZgN*fzUXr0y$oZrjg$UvS7@hoHo9mG8X+Juo6zDtON2}{?2 zP`KLvFiQq-NT3f&EiTy7Id9~X?d?H`|wo1BUu@^ zJb0(`FpND6`#}>R$>Ue9_L{wBB=4nnO zOrxw-1}C61BYng3Tn0tSXid~kQ2zx@&O#9f|JR=ADHt7v>nk&1`*LO%51jIDbtXFEJEtbNs4-c_HLtxD;0I?Ld=^T46AB(h)E)YREL zzj-L^!m*mi#b#0DSwff3r_4)NVlAGe#56T~0aTDr1 ze%DJmj`gqxt!9@@bwQ|tkrEVD8k2vw)>S(EaIfTpb2>gZrUOHCsF#QC>k{*9DBN`I z6|Wh3l-Bbbde22+_i!cXQOp+*8I10&>6;Pn4A%_@|Dfo1)43Q}+9@W$i$M?EtfmAS zS>|*+MaRem8BRyZrJ}&yB=Xaa$oldVDnikv^2#mXzx8$&g1_y&D>Jp9uy}Upwjh?f z|0=V9rf?SlQcFiGCQ<+^S*X%$H09em74!pX&4J6cqR~8xt|WCBe*#vy(=%Sas_F{5 ztbNPq1q>Vgv4IRd?doX_LJIduTW0kQ{(LBrq){TFu1m($b?a&Swbw{Ym2ZpU0#e6A zQciX;ikX$k6VS>lyP?e^baC1bQNa1M;7pUgCb+U1EPuVh?k>A6lWx8P9tKBkQSQZ_ z%#4Jp&0~M#Yl60=W7Fspyk`1t%aMJhCQFQv^rIw?9yfgc!g7MxBQZD-%>9qGMD=5% zzO?e|g(MMUpytTLcEF8Vw^E;h&neEN#|iD^p9$EsNhJsiU~un#Lt)lq_~eFOsOl9qXTWrjV%aA$VdO zpu`x}#k0M^O!$^56?J+K6!FGe<@pK}nJj*uOAeZpgU}f+sypOwzWp)Zy}a)F9O*!n zI-ZC3kSTJj@G5)8=VxDZw4|U=qZ6Z9^H$I_xaAQrnjf}&m%ALbKM%xrtWs#Xarf0t zklFoHz(!FVX8(=6Gnp7r&l8_L*A5dO4CGLDVlhC3IhJna=XbLDb^F#U z9obZWB^%khU9#0oJa`_6G}>lNL~%v#O?q&fo(h@1`0Y^~OUfdt z0^r@tJ7SF>Z%NaWeW1H4!ti)NA_&O7Mh0bETBX zIor!#BuPGe5V3V{;%l-?SqiHiJ+7T8bX*cRZY)Xf!Y9{;WLiJN`Ay%DHmxb`c;3A? z*UJgsUaEcmt_(?y=qz4e8+-APpD1$Jw)IO;Cbt(d_tZYA!58#3!{T?K)Ri!fjp(U}-2FE+!#g#O+Qg4N}p5c!<7g zLn{BiXljq!Wlgh(|53VIOMz1)qfN6Y82uX21udq3ROdFQYrWq0&*IGyn*2Ny6|s2U zVV>*#YOgki0f4Z6GSLU*VT_~3vQcN^PWR63<%9N=Zr@$fzuK-U~Q%aWY+-o{@UA>J<$vrIp^}?9Ct!ZzwMm7 zUM#C=Jy*?67nsHgPF}p>Hz@iR+!X5J<2JuTAOU zdvXq^&G>rej#?kS_gYS{x23EYp5zI`)=fxs|IXOkg}1XbOanMBX_0~pAj{pK3k*kE zi|?UhG$c{p7I?`Xxg4TY***{5#)JFHPBGnKPFw5bYz5G}+;4)CB@-

5{aY7i8vF3GU@ci zIPMYNJJ0OaI4?S*!rwUtiQB3)UrJKNjF<<}>rw$2wh^*iI!QY<-dk#oFuetcX!o~y zJtY9@8J`pfuy0s7xh&qtqN=Ev!16)c_$azL@Y7SUkq)x zej=x2(-p=wq_f}TDT_^ZYre%(d_72zq~ZEbOt#Mcu8WF~;J0$s(m&DiLF!^mVhlLP zPdV<%Ql{O;xR#?{nWbrDGK7UxuxpCOf zgC^1Os^OdBa(x79!lB_M4{&K&=h+ym;lr0!w?-O@e@RF(@$Jhn8O)%8d|VVANOqC$ znvj33Z;2}T#5;C?=hk_TDs`QGn`qf@C3Kw5js~5Y@onGUvu=7}2kZlh>USno5*%Iu z>~|%FVCggCU)Mz}3%Vww1wYy-<~=43SoyJ49R+1oKZE7fUrnnNubf2*hxDClb$XKv z+XmYg2Malv)Z61sHR%e8n>!fMz0jl}6WhyFQJA^J=M10z5ebr-dH@4cVmuf!Gili*H8TUZiCw#ldp2A3RL? z{bvC+GB~!`-b1Mc1bl;nmV6VmAs^eC98hfcMQTw)IPvU3J4{U741lycCEPU^= znG!$jA#=#GJk)16r#AxPmDSNUdsgl2rNjHB;?S?Fo>uKfceq0=h#I-$Dxhv{+${P0 zFz5I2!1n1L_r0WGXY+I%eCVMZ#CA`In(PsN_zARQP}9WyGXaCAP)E=PkV#oAwfE*D2o;k>HrD-pLG0}p?wjy zYn=dtf9>~Nj{oeCCpx|O`0Lr~`9_m3@iD2#`Ur!iKEV=s-itA(8|##-Zw$-! z3@`K<6vURQkuc~I3aD3V-;#;usW``B z&#L@x8&3 zPiU`G?F)qXz}?UComcn>7NzF7PnlDTGA|vL2TZkg6m3lCcyJ*008WKQp^FiP?J%=a zKN#^Y$9c*!nK+omxH)xZnB_U{lSqZIEb*xPj`t|4lcz z9LXm>B6jy;NT}yq&{`@N4YPZpgdCnc-Z(6;01-7T3pF=KaA6fdOZ&~x5K6E$6==Ju z3?b#5lXyQo)%>H9BduKHnDmhQFDvfgM($d7DpB}S!aZd_Os&612Kp4@BJ#x^Krt?f z#9Y-990$=tw^p1}0iD|!hb+7mRx07{=$)6Z(A+%`6R@P6sA!O|eaVDZiJFxt&kNN+ zvE^~@H-;zfs}tO^Y|440eXa`S^!v7d+>K%11H*IIMS8uQWKIKrAugCFj#MjaCyEy# zXBTPr7o8{fEE~i~@*EVNZKdz5=S)m~tNBp1`ay3Z2{w{lXKn4)M!iA6#5+Nr22*4G zb6UJ$)y#9$I=E>nYE<2L{1RS*i1EUVZjF?(h4mCOnWWYi=k$GhAELtBKCSt>mN91- z#kkM}K9}G(nQHl96{<~8|6T_#+$w$aD>?m6Xrg0OT~Ax&ss5tZ5wluO`nOGezE%$& zuDu^0#ieEBkIm&Z@`1H%4|bq#qYp*oKWg%NvJLO{}^Y9XAW=v_~JiHB&^2M4r z_|?!073O*Z<&Q73D)$NYLk)nIuNjV@M=1QMr|ppl;gd5_o0x`(dtV>9!%+_tk8<*o zh|kys*4(`PHc_a_59}KXEeKgN&)*_O;_md4=Gx-3_UM4o zBlNcCxZ`0r*nQ>*UKb=;BRsNkE`Pwh>1RrznIj%ieu?iEiuK?7;g(xR*7JR-i(Bui zwN_Bu3T3ku8!lWJxVtE5nd7gV%SKx}Oi!N;WA0t3v9r!Q|0j{^Ri|*ss6oUd3EZ1y3OCuh6^kV zeONXeA!#A!AFl+?Rf2<3g##)8xph5gb;u86CqQ{DmM|-x_gqVqf#YmX483 zo_A##4bdtX?HJ19&*VP6;X%t&o|Z>%YC?Ee*KqZA!+yJOXUdeejU6#OI1X0qP}yYL zR;#O5{>*;;6bSjX+`RS*8zH4U0mR%7Ew?7NvqVnSM)H!HCXu&xnT}QV3$UY39Q7+F1e99b?aI}?o4k{O55O(Ed+b-a1y|G z{B-9aq3Z7Yk{0A=c~C}me)D*wJZWMv_g5)RAM6*7gFqFi!H5iI&jL_lSDiGFL#xRm zQ*8P9PIb-%n{J|5!kTK{>I`)G>ZsZnggK8?ax0&J<9gYDD{h~ z`+gyYl{ILr6w_|&q5jJoo5Bx8S5u7gjhaL@_u7jE#6^Ev%9{_g+y8y!_f`~6J-@<8 zi$xf~TM%qu=-%)vOOL_{M_!kMt$|+^7U_oNtVH1gEP2~Ws(1KseW-J8DUx{(z&oP^ds;J2$)IK>~=_4>i3zkAT`=PyutpL4&*(kAOjj^zB;Q zCvR_vinwO|qdeQgDR$=)KcY35DCSGPzs<)TO%e{%$s_Dppg44JfL2!)rZt&)FU*lC zMVaJxGQ#BD&br?eF(!TbYB1H|)z3lV>`eDAn%eT%78SmH@jO2o@!uHm2E=fS6R2o!_9F+>s7Jxw@HfJLT{`HLb;bn zTRB9U%&e;PWNdLxW{R6@F+dp0m$a8d*@S7EQald@YVUaJ?yJm%N}{hNM8^;_J4t=k z-LyUR{B>~emUEs}6NlAUZGcps&MB&6J`<*4_^NWg+c7zb9oKQO-p z)hKa{1DJc0@-hE7LFb3=G>0^CgXa9>iJi(>Q8b;6%WuqC|E%o@_`Nr>c~2mKo-P5* zeqcn{KLW4~Q7a4#h>#Mb1Y+-A$QLWXae^&-(4=9(g((1`GN3watb9|aPedVSB-AO_ zS6k9IrkKeFKV82C1yMFTY%U2RI(Y0GkeZt*%-P!8o`=2MUu3ucXolZOG-mE{oE7Xw ze>v)Yt_l9(2*1I9KH@{(u8i|~W}+=sH`rr1Pz5@H!hdy71X7HpsR21wA_ASYj5pO? z-g(f+D|*N9W$STZ(9?UpnC5BG(OUnfPe#JN@M)vd@s=Kpq}PHjnaA`(mA7Jlj|sG{0V-Rs68% zEi+#;-^)LnxmQB<_0o3nZEq!PQZ2n)lp#SRQpRa6?bt7feRCZjVE=SW@D1HXPbP|S zyfqG_L~d-l)LuSLxoJ9? z`lVXlJ~Q&n=yn<~gt zw6ej?nkXZnh`dSDzOPn=8%) zW5KVu~++h)HX&&vJkAqYZ%sAXrjD=I${UU2mFLcM~W)6eAxhF(0w>;z9g%{7##Qg_;9BfD49g5rCq8`4dYr zp~vRSgasOytF0~aAVZ7ASxO79j7SEB7fKfvHdF0cMxSVFzK1{IWTpltxjklW z#e288f=HFaGQlo-jw>2c6&O8czl;0%jgzHowQnVZa)SI?yAm%2oI$c&CSC1aPul`e z+RgRCV7lWUi|IO96BZK#ma^=(pKA`6qFr&)^}-`W$aufF*{?}j@kym~U69`&DhqCI z#hCT}E|gXpn0t?2uRZP62Te;NkAM}%?TDo4q9&7-9ui+STS}qZtQ{a_J&x`p*fLU$z~v?fy&rXwd~XlMXS(jKT%d* zO@CF}oKda!_Hr~5MOox3Y9Yf)ta;aT<0cm{bzbHD1yjl&1j7k}>z`s(7}3MDGB~Q= z*vv+M)uENolPfIET25zVKoTxC6O$r^=C2a`&ss`8rTw8DntL&{0a1oKA8mxPuAauA zT^je$cNl_{Bu@z5`@m0mXVa;*ds)Y2qW6Os7N>k|-a<3a(e^aaN4cwe+MTR-b6ITx znNLif)@4G>uTh)ZdAHS<&s8%d&uCKsim%?hX8bq34H`tLnO2xoY)Y=A}=ABo~Nt14}13w(O1=4wfw+0sG4PvQX~L zg_SPrm=kQq2v7MO!JzCc#*`uy*I_zfXA4$vs&~U-9!@9LvS4giWz!q!)Bv8<7UP?|TJ3+L=zbq%7LSTC|Mu%GHMm zmGD_K(`BOz^56(NeHo{)lZug7{BBvaQ25Uy8K>uy; z_i#iFcaWC(+1&G0e~0q0;2Jnem2NNYO5#}aI}}>Z#N;Tpck7y6SMrQ%eM)tcnkO28V#2r#Dvpci{~S$x=3z2ZnR;Cd zLG;Hrc}z{HX&8OBD%imu<K{ zBwJ&F#*l>5e%cIP;fQd7_vZ;(zO0cJDo|pG2b>8ubIx}!WoJ&>#X;Bx8ypM_v@Si- z<`p~^ut(=fXyP&H^z{pXol(u^PGQ4bt=*Rr2XU=lP>C&3dYfVZd=aR?^$`?GtlE(H_ zt%}OoZ|zF#x=MN6s8fXWPpQk16cUoI~X)GR79t%%f+zAei}cz4KIsYzfq>kaal6OVaCagP zx2J9aWqV%c=>ynIW$!dle3Ul#Y|4+zk?svpTmv^{WVHmk zwkF~5Ze`t+1W6}W{}(Y5L)eXOa@?r4;Whb8cpeqZC9dl;Mjm^wSN%e6(`nWP1-u3s z_8U}F6K${%@0QZp@w?0XhkY*v-_;h0VFTpkM!ndq@ASScWtUTn_#GFEi6R=1ItD0M zaG%`pC*H3_vO@c08eJC}mC(DxyySK2D3u^?7-`el*wM7!^W}--GA_S;PN#vxEl-19 ze%6Hx(*tS}7*h>vMrYnREpUCSs^-hUaYl13|CTPV`z7%WzYl;$4exp@DW9PE#k13a zAaSVEi3r(o2K;x=TNjC^&6Zp9uJ28`wvbpY3oC=k6It=H1o;-a*gs!wHmKd!L?yZk zdE(Zg&5Ppm>FDM8EHpbg98PHFfte{&EJ9ZavZ!9}a8KPA;2x|=^(;PoM`5DodN`qv zfcV1&6t}6bm+fz-A9H+7jL+5Bx__3j+q4-T`DTC{n-n<5cun7@o?CsQ_G?9w3J-4) zf+(Jhqbn$OFvtD##U>}23&;aIHLh!`PFFkeFNooh575J09z?PH2yoy$zx2#9=ubWt zKIP+UT!)w{8-AHy{xF8BL#~BkY9k_Hbiq-skHPeX|B%DPm{BB{3hP4 zR!y|F=A6E{_U!&%4PmhB?_h=lGm9#*o7-BO@S@)&k9;_4#si+33;9tR7TaR*?s+dt zaGw9uZ%yd^tk#+P9C7pkQR%nyih2aSHeQBq2=eyKFe=@q#U~qb$ewm6FhN%^Bj;E9 z`Dp@l4w(}Z<$CQ&O-N;3l<)++Xi}&5KATDx#2&L-er=h%HQWS{J9;AA8D;82&lim8 z326lDK8-6b9p?|`kF?vG&JVV=ON*Hw-qmV9d_T1)+#{dgxNOPhD7ZARL%A(#L$&*sCj}<11x@Tt`HC{MYaA1pcw`_75tqa*t1Bpc(ak>a~_7uov>> zm!%l{97w;X-_v1oPZvohJrSM4O!A79fC<}Y9IbyRQ5L&*yH2)wD-LYWh~NY#T|NB+ zy(C6JlAZEukhG=OPjb8U)!~Li!-M`?+JhQ}2C)}<%CE<2ec1Da1)Jzpam^jQjg5ln zoI!^HnwY`PT2C2BwV>?QfY$xX9*EBh%>10L5TSTNh_$9oy0K+iB}QrHew+4ds`>dd zCDv`LWGPw)U*bL6p>~G7;q12G=Onr%8F{Cy2Pz}93Y{O0=s8(!F9{5$B@G76(dCQG zo>HFLzuvVw-jDGMGbf(e0*_vQ39TTK;K;tSgquASoS`@nSdfVkT+z&EzCuAYC!Y|u zefsbYX`!k%zL1-c5j2c@!zz|RZJm-@AE%^e5$5)Kij~c9l^io-geJ7LPY{0vB!M|n z;J!cR;SW5P$7T#tpGuVWKkT^YGyU97GKTh=6)m3gdJk3=>BLscKF2ondCi{ytx zLxl_#k_*<7Uwg8oW$zzZ?1{Lzrt@WB?`X;3j8^=76PyD0`2BBgD;`ufBE$bx2(nvN z2bz#ru(M(Dm(vT!3*TDB^PeDI!Xa8_OBy#dy$E0P*I7;_X$u6+N&RX`T@bX`(QzL(A<;C$D-54ZSuZ#;pA;+M*8x7{}c)Sh0Nh5Ei4N~$&rEFGqb6FMQuvRD{zWHwcx7z*BLk>Vza z?6W8%!fWy0!bZ(WMh(8AQTgE;s=}mW(U7COr$hexn^+}A-|PL)9iTOn&=SyEze*Xx zcF?#ss=18D`DztU-6ftfm6afo98CT@Sx9n%jNy%2_0DS~S0!RXVWG=}+d&dISLN;V z#3c!L`y1T$D`EDVYO2JCB23+7s~sxJ><&~5@Jx)ouqxX-^XT;S_O5X@h!bfxC;F5g zIW>-oxNrt0ykiZUHYxdP+%)7@_c3zJ7a~13GGtXphu2VFn;FFV5T`rKI?Gobsn<1O zWg{V_GLSaPi_7{52skxczJH4_AIK;0^q%8&Uf;@9soqzb=08unsrwi7s>b*WmYL?wjXG90vUM89m zK$pY2JYd*^-g%sJqBjfYAn7s(61M3AjAS8-m5<^|1Y)6j$)TkZwMAe%@M_c^V|ef9 z4FkAR=3uMj;WwvUYG)+wZnxxHyNfeQSdEE#heH34u9nTSQY?wIBA(zGf}yZW+s=X0(Dj;L39^-#Hc#_nuMhGOQ8D)0bskW~@;K=+W}Ym*=~ z%GfwO5(h);S!sbFe;?~G5y*P6Q(6#9wK3hLpEw@48IHZ7JzzjCbp%LXt<@y8uSN6^ z(6}c!i5zsO+p}hxvd!zQT_Sg%7?pa6dARRGn%5(M;N+&!k%0xqD<}bgTYOg zw$Y(zi3S~soF=cg3-rwOYuyonP|ZD}5Q^UDpjRNv9J6m9(!QYRnj4w!^0~HLehc0$ zOMkI8QWZUBFAeB+aWSa`C6sUfnb$5b)^fFHgQ$&9_2$m^bTCeCoeSQJ2mQ{kZ%wj+ zijTUWZ~lRNx87-5C`|57EX)ZJGEobrXCSuuh#Z&vJ`>rCj&!3ce4&>QLW#EMadz71mQf+a=p{m9$h zg%zputNkLl$84j{#SPJl`xJTP!oqOD_+$>!A7)BJ)Se=44|5C9g8_{mM_Sz+-E8%9 z&m4#GJMp#a)7uw&`RGVR#GAb*hE-TT8IUS)XA1nTO$^eLRIWf7;1{I-K0f))OQNch z^z=akrO9i2qmhCkM20Ts6P8po0hxQ^Ub{OAVmi`{BInUhp zx8H2O4Yk^nF%%DDPO8i>efH<9%_z#lkE3ddl>STfnl3i)#~sXc;dF*fQPipmg6f^T z=K92C-RSG@ei-X7Q@cD@D%iO(S!)YO`t7BJB*RCj6mBRCn6`i6Ds{Xu@Yv( zlA+`(voGcU6sra}GXrXt^k}TDb<8a}kdz!KJm~^EeMn^7Q^-&$KJWnj@7_*ne3!~LF?C;uj*^>GSwx&poI}SL)g;l`6}sDc>A@#u zi9LpM*&)LxJKr*cK=~!&}JJ_p%`NFYG22QATRm){OsH7?t zmn^w2gD#lc`?~fs;^kBDTo7ak!-y#aSs{3K9O*{ZKP`yZxme9#EYMrdkUj#6#M}Yt z3aNr^q-aR5MHHoP+Ax@ca3-XOWehvC_RDT}z=q7_RED0cZ05cIn)RpEX#({&f!jFd z2P$Ycd(XU+=Y*4llV@A{QFHUX)|)_5K*QvYvVk5pQ?lA~6HEZ%A7di=(eZmsdd>O< zXSl3g(<=m@Y~@ z@hjcP5I~jNpZsE2wUx0s)PBOoKZz?s&(7|_|KSl(a5v%fG*-I?@( z1_CFPCp3C(7-#MVr|||qpW;LH54k;Tr?@*?I3Tmx!-b#mKw7W$DSHpel-$`-*D~vm z9_``;9^VcAo!-93yp~9j+prz8;~qmRmO2 z=ld!O#e)n&I5|;;ffPkwyi$+6VNqH-$lIiIj6gq9AM`2?lQ3dlOIVBHF;NuA?lKxQ zK;x#>jnnMaJUuG4yz4hwG8`Nlc*pW`K3QqO?Y&mll%mtg?C z?e11#?Vw!h_})4}1?l@39bR;!>AHg7mQb6g?ERD|^(8~$QSDCIP+QLiafrAp=NTI! zBFEXyrC#b8*4v9Ye=6Ct!GIRm@#t{tydM!so0SfbmaWZA>YR+rQ#yJF(%Y>?a9?i? zJg9?S!GyOT?i~~;GN*rE1*%>^bS2PI)qG7gC#j#S*6~E@0ku@La!h6Z=1P=8d)v$t zG#>#twTUPOkZP=Y@+u-ylfWUBECepCuFpx+o$<$h>O8V0X9(I-P{I@OvN&Y-hCs$s zHqshdNw=32UW(|1nheR1&Y6sNEp-9skA-|fQM^FHykRy$MoZ#msXp`Pd_i0wzBw6o z<31L352fbCaMn*lbBYaWH1yjA_C+l93Q`8)XL}xti>j07GW4f~jt`Q^RLqUep{+{_ z(jUmZeAImn9Ig)ayr$SikuT&cnPmOTn7e#8fVM#qUQCR@{@$D556c3+r?^22B@#}V zmq7BUYMczIJA5_6;$%3kUk4v|~0;M^YHyZ-^j*8iLs5}h`Rl9_v=cdUZs%a8}k ziI8bw?Yxy}Km%Yl0KmeiAHu3W zRbNl3CxaV{_A8Pf6Z{wTvFv48ch=|aDtrs^eZS1b`1A3NJSXAntwQrrGhEEf=1C6b zd0VRxMhPeI=sFBn&Y!aP!>w!fiui{g$KMlL%YUa^Y7$4_O)_amm#AGr?6T^U79bd5 zba4|o*}Wv4jv|QHn|{(i8FWvJ-yF1Gf}SS$k!Rt(8&YeloqJQh)#J1=X||BO_e%@A zC=)bjV_b5(%k#3lcm4`f{{RI#@P?uA%Io$*{f~S{V{PH9%`QDZ#-1+Fg9vXdL~60< zdSM$#vQ!Eum|)y8h*S3}?J~=-H7iLwX!@O*#$!(p4`mJee_Jo>vB3WT!B;7FiE#@s23BU6g;t0!$n9$w3qHAZ z6}$_AEgIRUEJ@&zE077mILP4Vilsk_wuz?EY}Uc$lYFTQ=L#2Us3)DcImZJ&y{g>Z zdDjYMZR98(q?}}*&%J$RDp92c80>s>rspncS+U+J@!ZW~x(=IevZN0tCc9NHwSfTw zFUYDCk%A6JGB_3NpRrHHFCSg&ll(r>J|bRP>X63Qx-Qye# zcs$l%tDS|UlXD(8QS0^e`d3~1KkJ%jfPOLff5tk++_w6Crm7>pySD_%Z3Fy`GJ%rC zH+h);ErfTd#nFviG@_QfuV45kxEw`FIOt8v%IQ6Arr(DD03-D4;FrTcj2|3)MWx*Q zarmuyt!Q?QAN)yeT3D^;P{4$k6PAn~s5o!BNm509Z~I>U(!URXWl!0MS@@mfE6BzC znswHh;EfDF?{%G7?b%|502x$|B3WfPSmR8R8IfDTz9M{h@CWRZ`!{Ld9Q<>tX|qkJ zs{a6DX_`&^XZ{i^h2>3~u0e%?SsG9oV~z51+f~0WKWJatd*avaY4K9y;(x_`eQs{v zKeOr97OKf*rdtNeTH8cfwnD#oiObC2GqW+Tkiq4$%8PnCJs7hKd<;# z*`;&gn@JtVfptASb&{5GnJsT+{{W9do4kqDgU2Wt!+gN01LoSral2`+Z_?Ytxo>A11`i$RK;B$O{88G5p(E2#vPF4RMrltP?iI$%Y?j)1NT4s?n8h*JcoLh%vo(r%5X28Z3)Uz@v`DIeWfC~86 zihKj4d|zEtTKiAUcd9!qk3W`Ul~onl!vz4gFk8MuV{ie!oYH(D@P;1?ABq0}6m_{% zPqBh>$5OXa%PTZKSdHTW_b~wBS)4BX5DBcW_*UNwt@JCO75F#c)~If80E*md(@cwV zDV=sEE}diC#Dbw(NgHqiJd=%S*Nru6Jvy9tj5MlMa?@Q*czi=O@4@+eCF3Bs7p)Q` zGvCA{jw`1_vB(>g3zT3BWGh?^m{P#j&Zl)~F|6qDLpGbI?Khf6pbYmGEQ_?>!dx_k zN)j2Au|ULO)P)*82mO@%e`}#?a{NNl?)6P$Rk4!PX_{`Rwz^%EZn?y>O||X_aHWDq z7BabvGac2|=)VKBO&?9Whs7QshBzR#j!UTEN13hJ4rE`nGN$5F- zy4{+#JP)U2YqvEOp4Npy|LM@%VT@d65* z6?J7Ge5>{^?C1&ppTQ2`82@`-QK` z-yV32#y%2)FNz-zG;4ng-Rbwuap6-9zn2(xCA`_AUAu#vXK~y^5^cN5fBr{vq);xu@%z{+X%xqgm9o2&}v>q1;0A zks9F1B#rWBgI_IcT+SqBxJ*$Ku!Rx#17lXW~Ag z@aN-hzo7UtM(~7y?Z{kEB zXYh5Uyf&7xm(+DDxrf=+^Pjkx*bV?yKQRly#})k>{6W?{1^YaFG1ojSB-1=uZQ-94 zL8th)UAc)G4;*;@QmuJy5MadEX!>xTFuQgw&BSZB&4NID_WuC>9o_MlNggElm-`oZ zCTpTtLvs#>ws$Vg^AcvVXbYX+#7V|6#z?Qxa_C1a!bXIe=C#}YyC0Wkc&XLHOz@~9jxe$BQ#M?^TNVSxQgf%18Ei4PZFmVw|{|@ z@Nt~fljU8vdN0@fj~(!D#~n|`-W-+uL#8&7;;jsY)U^3xV9ao#Sp#Qsxn;oWrGO^| zSo328(GS5D}wZxI6FTXwM4^w802T3MHDvd0TCn%a33s4_;SfRy~QD+9Zcv&6g!p?GF1N7D4GgEV@t zCri1MB8gR*lsIga`IM(TjF`jV=(#!D<^6Q*PB#%+m+tR>?|FZpf5US>;YW|W zEvWoS*FF;K8f@R$8k{6q+Su-SgOej9W0Jr((4Vx|!QE2(`y=9q zhI~(G%l0)E*P-zlfG}=dRIp&~M{(uN7FD}^-{PNvlWIO7wAFk$bEsO}*+{Z9mc>oN ztS)fM;00-sP&9A5YZL*BmIl74_(S4U)w~HNlkpkgOFNm8&r^xx%vxTlBB-~Q48@{` zXO4}7+QZ*s-MBr+!NkVGz{=ebOO+QO-9cj>Cd_}glc=YHZhHYKt z5z8D0jk1R&W{Y_$12z{ew2bYp67TkG)jkJ!_g&X?%{6TF`}=llZ9eWcJ7tPMUKCaY zkkJ`8MsjwhP_pM9$MDBYy>-+yFAGif4-&M}%ckkFsEWq^Cv*FtkGW!ylB_voD%%-T zA1wYE*SsgLTHbg<@4>NHMQ;R=e`i|7Fc93aV(_GHa|4DFGC`6CSi;#GMsbx&Qnj@0 z`kfe|8Eyq^_(Eb+FXqiQzr zN_;!5L71gdr~|<$0Ffe+@kopcmD<2zS3ZWah2r%*nDWA^QdjS9(2wJnjQmM{bk7cW zvQstYjbrvZZdVHy_i)+oyC94bJB())=aEYtt+9xfG8WG4VDnoa5d2xGc=N@Vekj-8 z6MLz-xw|&d2;%b{h{R~4Y9Igr2LNLgl+mTTAbC8rM#+*z+6nX->VKVie-m!U=!*2I zMmO0ce?cGcPEB`7)W6`JpA7X43gRObm&5BqBvPki#R(6dGmz-uJTAJW#M922%o;z2 z1=K^#Si*peLrJxet17D&C!xiBW-5%ZnA(wQ_xzr#(?i_BMj6HipC#S3vflB!ezs@c z8YjkW6IZj7QMr+B5(#BU?ko?V9l%3^(G>@GPC(8GJ#kZb_u@U3wx1gPqs&JNvAkpg zThs6a6ZqGg>0h-c!9N~Br}!Je`Zl8+Fsh}Ye+ORa@mY{C5VX*uMq7{+9&x}lc%Mtt zVKVAkj;A9wKr!#i0tNvCj2vU9<6M)kI8$&==CrVM)pX|v2D&qc_?zNMHC;05=I|DT zmO!jQ4qJ>LY#x>K@9iP~00ol$i@#%kfnoIT6I*!O;w!}`*?bkJ&fa`COZ&B1*_5rM z0aaMGH#+2~a>BY_+Rx*DouX*X;eQJ{{kC#pwT?Vw%0c_QmnN zlHCQh)2?kTB6A|DmWs;V4ohR92j80L;q1B@hBd0K*(>SR%@}b$YgZ-9<5rY!Cerk} zZvOy>rq{aB)%dOP>sa{1@ef@1&+%8pP~CWw#M-p_p0hg6p+xerB~d|UR8`v=uVqCY zYn8hAxu!zq>dI+dzUcCx*ra=wKVASe%;{bb{?JB~N{8)?fMrtM%JOl`ZR8JbJ?o&- z{ubyn&kQ~(@hFz&X+p)N>6dXC6dz^t3gghIHTo*TVdO3GBD07F#I#{hr>S%P2y|64e3zIv6qc6BmU63Wec3D zNK9#u@3O8jp4hHK;YN!$#D5j0sp8#YONea%DHP4PzA{1pcv0No00wi8Dxnx_%Cs=8 zi^aIFZ!62c^|#3NzlNG;h!ev$m)icjBSkJ(PSC7m0d|AHZG(Z6$P76o<0R*|{jhvB zuKWi0uNT7mUx-?sp*@a?W^e80Re~rkWPxRx-X)Ckk-L=}n3mg;PfDTiCriHgk^4e; zSHh9rAhiD4vO{bn9!jB^K^p-1#y!#B5Om(Z>p*1;I}@}r9QPoP zrF;JX!;ji4;y1!S4o{}&8oCyk~&JpZ9%1Sjv@}kUcijVa#41;Wluazt_Al{6l0fG4Q_H_8stNaG| z^J6?ZjBr|AT}NwoaTpK&j`AUGtl2882GU0$Gh`fK{+s+_ve&#*@Gnu-{51BLSNSlTTC;-dhGGUPzU^k@t)sZm&>Zhn_O{ zZ=-4+8u3Pe`sI`ivuUu*t#NU0ZX*G%tfy9Av|Hef#1@(!;#2^)ie7vVpW-BXPN%Lx zcc5#wR#57G5cq|pI2w+a&6bDvtFbPiJH}8Ef=Z{AkR|qSfqxjhS>s<1{{X_Y__3?Q zCyabGiS=78El4uJNigf0Y^tgn89-%pR0UlOZlS?hcaohMb4BUfxApm$MpSFk_EnYp zJ)h6F_1PoVJ_&qT_@AYCHMEED?ziG-tuGoIT~gm)Sf()rO|je<<7JjrA1|5mmO&on zRs>N{uk6kHGW=+^gT~(#G|Nj}Vc}S1n@RHoH>kgI<7`Z!3n>7nAxiC1P%RZ+(+>4( z^8WxcHHEFxYTjx80P-*MKF_kgwpk@GOg_&Q+FaW({{Wv`rQQhod9wci-yYTG-?QvP z!PkB;_(O9fM0jt;H?3uChYc2;s4>5RNaFxJ_pzzyRvc$4K<#W^+8ed7NtLaS+BD%J z5LMx2`(3L7I)e!1v1yqtQ@*c-Mq})IYRegZ>tLFx0$Z;>~g^I}1B@ zdF7T&Wb{=4a?Helss>3Q4z=+0*ZdXN;hIAY=lmgFCl{>%H&BS6Xybfx71~v28OJ45 zobmz4uPyzdzi!XjyY`RxP2w*Gd`IwqjuT(I@}=;`u8`V2zL9YYhg0PXfLuw;axu$n zaj*@zZy{xP%GiYJ#iZV+)&Br%<=9LUg$btOw7vCzcJ2QFNcrRThWOE`{C@bOb>sbB zCbfh}Q%bX$fopifWDLL6qS*ZUt}1mZI+NACS+YY#aIe2` zCj<`FZa*=L0^E~~j{cRCZL3Fn3eKTRq5a?or>W<*D_sl5o5nhD005e{tPqlx!mN?W z8FkzP<8Dt%NH4Cm`DSS1Rt1T{Bz{D8?@xp(0#9Znk=*-IOotf?7zfE7tKaeOQd7Dg zFNrnp5Zl;kTBfseHI9`8km*)ZTr)#s6Ux&_kt-4-R*~LDRlqF8k0&*&qre%T5SbeQ zDieTmGD$s#PBGNu)0(NE=__+N0S3ZDISq`A^clu8)AOzA?h-3g<#{*~!I0=d(?4dV9IB_{IH z@Ak?1J zT;6JB8*Gbd232;31zKrYP%t4wm0&*i_&577`~&@;R9*OEO^Q81BHG&SsK0L1SZ9^< zNLb(pjEI7a_1%i(#AE2ue5+UG{${v*9cryj=)BLLd>i{c{8`k#AH{7RjTVlP+_m0; zb*gJAX>gEC!d<%?Ge+u28Ds^T1aZR*dVhvK4tSBFeQRCTv{~nw8XN6085(%!P&4MC zL|ufrCuhyHu_FNOuU_$e{()oRJMB}$I)oS3Xus&zs^T}hnV2F*9Box4m;g_6)by@r zO895-YUUF^jr?_>+D9NewAA}W)X@*&D>^DKu^4QeZ9L+*sXS&bEz^$7>8(=6&(GSrebG?4<#tDJ&s z)x2f#Cr7g`_S#>UaCb#xswKgYM6A0%caxY&=yrfla&wCFPm3N9@kfO1Q(C^%2BoLK zoKzs}$(V8`a;1dn`HPN}HgU){&#wtjLMROgbVSwZeF zLl11yd@S&$x$#?C(fl*u&kWydH?VJ(*GkiEjPksSHip=p6?r9?g&(flK~m2mg$mq1hd*l%4gSIZ z0Jm4de~udW!@2xPXQ})WlS=Wb=p}n=W^diT zrz~KymOG_o6F%4yG(Qn|AN~sM`#r<^UU;KL_!;4k4E!I^d~xE-RPf)$-BaOu++O$# zNmv}s9fp+vmf3G5m+wwD29`#Z(q-C_S@^yCPFZ|T@TmAB@Pkg)yfyHKEot>FLep8) zTK8V@uBPrDO=rXxWs6>3O5jQ5?lHtdsN7W=KQ{rE;j-yUyq`4P?c}_--?{2$*|sk= z<-^7dl{xe;9lb@pr{KH-~-|Xm|R5!XJlPV`#q&bUWFY&wUd~zUNfB zX91df$@4UherCcc!QRfzKUvnr%EdN~XS)PP9QUDD*BC&@kHhe;7SmJGwafdMbSsjU z_EDlV_eE79+_}aS?Hg1B-0}b%*F)gVPs8#!mrl34)MI85i=u`#xR4wah+wmjK_q|x z9Zza2N>wM#B51}mprDSb)_al_Cx+5FNK!->@&ULW#gn%`kmkH+_M(GI_;IS~{vwJy z*|hzCQG0iS$}*O*U$_#pPRQsZSrRf!5w(gwPDmB+A7#^Jwfi*M6|^(D{_!TcZR6Jt z2?}%T$GvfX8T<`l@h{=-v+(y=zPhm*g`blh+)hNc%orHvQl~qkaI6SCu0i1Pu+_bl zRQh!^txuj7JZHmy3nloE;eAH?M!nIIuQb;4VgAs#-Xs~=%;+VEw$~#kj22cHCmj~G z`x5*N(&4buKjCFuX;&gAdpK?3j`X{>8BlqUJ=1LnTZI_d04udv_z%MW0FPf6G_Q%e zx9suo_x63&rqzTZ+V1;I)S{l^-^;)kP(^mj3lqpv=V{%87-6=(`@=eygZ>Tprs8d5 z_GY@&yg{m6OEu(PI{1@nmn;#1X|7jn@hc%8{0layA{ipGZ<8Hg4cq7BNmUF=-o8xzeDYYFvmPnq~;?PLK))ALq-n4FaxLA>d zY(1aBpB214bY=$j_gB0O8pFdn5js|b6DuN;qiJbLbGc+HT!_>vs5l%4?RDY}Lrm~o zelfZ5KZPTf2cAi_DYU;oPqIzmeC>E#yDO8lM1jjQ5C+p*Miu9y``_vR01RU)*g88u zlKox&>lpfHhrAZjuDPQ8SC3t`&^08rit1RTo=YOCLql>(sq+*NT2%x&4Ix!#Vh@Yo z4)w`zCWFJe^sTD0#1j5{g~qLFs-tMRSMqP;Q;oZtDO9K)X)29b{{R7gHd=f^TWw!b z(ykZ9ntq6{;w2g>OMO980xdxTAllMKQQF;$I~ZM;a9}Iw%|B1J)vfgJh1Q-Cd#gQG zWV@3~vW;ho%+AV-aH`-LBQ6OK8!Hf>DF=hoC^$uV9JJ~uQJDu3AHgb zwY0IxIA|ti6Um*$vVo#@11zhR4U$3J%-ko%zX$1-@oL^NwA1CVzj&m(l1QCGNh>K- zU{Hl}P_$)$B(KU@zFogSXjgs!xbYm`8PdKU>sNyISzO-kS?vn{024!UMs6d?{xBs` z!7NENqpoV+75HeSH2XBXzw_DSyt)fKdRgFz<0t0xFa}E!SEhTOTxL3!7M-WN*(21; za=Q4s>TuCTrrZ1ef7R@KC!_w(za9K$(9NOzJJsN|pA8PDs@XN2s8GCONkZ-cZ~#!* zz`!*v$Lveu>&XnhI{2@qSi}xqJu)e-<#o?Fc|5gV{W$vKzN6OuB>1x1TA1p7B)hnf z$LB21F=i3B%Ik*C03;v9tBBV=B5HH^T5aMqX`@7p5KA`Vyb=#Q9y5>zab6TL_?Ito z%jJL1{ExE5;PCaJ)l0tm+w!ygPbl~~sYm-0d|A@ICwv%PQ&`lZCM{YE+oVYDboL|8 zK#HIaipp59+^TSKU#%aq2gDDEpBFBZ#9s*hCN7ntYS_DLSuI419E@<_C~e7>2P61f zJoK;2{{Xe$+pBAnM;v!o^LYs%MFd6$SdE}#u>AdN(!XTy9{h3mYvKPi4tH$*-H81qXROJ0D3(qnmAR z`m)D@;PJo2&k@{wP4U&cTVWV!`g5=*+BP3CVmLA|192QE)EUkOuYo~edtzXWFmcsLlV5l2UrZeyQ->>+1Y2F%t4S0UVR!4-Sl5P3982it&FRe$Jl}weJ*tp3}xq-q?~wwD&Pv+erWiCwaoCz#KCD z?tWF~zT*dvqlousq`wA!cbMf^tP=K=z3BJ0tJll=(DSc~e;YL)7wL18c{~G_B=#o&XRb)BKZ+g>9s}`g zx&YKx>sN+Q$+_8#z@t z+~h7!0_5Nf=NRi=jNBF4Yf`#I{q2>5qz;4cE`4=kEpvd=UU!?xml;T^USxSiPtBMXoT%M9?Jx0md_ ztN4$?y1tVpis^rC71hPI^Z}CuNMh_p-L7%T!+KZ2QQ{fY%z2z`2cq=uexX~0X+tXH zQvKpr`S!PC^F=K5sFMvLJ0HxCZU@YGY-6Cv1b_AEZZ+$Bt8(+s0*D$nViiX05~QA; zGmmjxw7R9+*s(2%Rz`|M2ZeQF3Xe{o%DLYbYxC*$F~zyDa1crhe5hEq);{?d_Q9{S zs}G~-TNbI9|?6`V&2DA(e(*l&hBMl_Eovr6HDf%83QOpbqoV+g_s3aG^UhY z-pr~~mol^1`a9w0?DONV*x%yDsp0Pe-aY>SfVBvX=AYpGLr$9F4J^!?m_+1&!AUAM zwswKNOO4f|@h{-D<&Kr3{A2iK<2#)W;jUgQT~kI2adV|S;3SO!7-=IQ12jw(-FI(O zBzup${9Dj9--w?J?EFot_v&?Y=ko%fmWVj)UW^9N6miwrL)rabu^!CAOs~&gJk+COs#AqC+IU8~@jui*W3YF|%+5C-EF|k&S zTQBR#@BaW`?}%O|@jvW~;X5xITnYSH;GI76!G0|QP0n@C3f?`NmOtK_#S%_=B0<)- z{{U?bRxb;DB8TGbl(GGzz`iZg{7a^^lPrH}jV4iX0v2W7(%T!z!h%bA;9;J= z+gtcqul!f|gYfrT(hMFh@phNukL;`4a~!&ExHNRI)Yt%7T06&uWWctN#EH-r5C%8$C{G;uh!>V`HPI3IGFZ1;z%@7&UT32IX6tO(gZV zUqb~dv%~wb=C@v+w?63A{{U*wgkCALy#D}%BUIFNTPtWDG}hwKu1$rv``E9_ zT~qd(*Ze8tbksfyc(>wqjD^)FHu6h*XsWIA2DXu=jbmK>&@!VS=fBjS3I70WAB$3W zPSaHJ=fjI%7->>O6aEtq32I7iA>YF!_KhwUK)ju@q3jWU?QZaN=xY^!UCz8EYIR$T z<7qCg@ACfuhVwn=Quw8#&9ANBhP;12h2$|a!=d<(!;y#x409|psS7wcIV(GE0D|0Y zKcC<5T)&JOEysp@8SwAIv1oRe`n>b%J|no$F7X}QcH%U)SBa5b4*8{hxnymM%1+#m zXZWk*ZGTOX{8Ogsmmk@BjNWa$`ZV`0A_Z8fp4#IL8C;Fb49YM;kr-rrAO8Ra7VxLU zi_LdK@usOHmwH4t4`ME3jegGr?=#JS5mZCw2Lveu5~PQ4!K=Ry2X1AtLiMG0xn`<4J>IbLu{Oa542{6k{gFfQ0?c)~CvghW>`HAb>Z3xWL zNhaL3>;s-_p~YPBV_ICcW|WuKkb(q-j&L^QW~yA?%`hQA89fg{O9hJ)x@_mNj2^qP8b;-rn*=Oqpu4#kRYJ$64|n?|e#L(qKW#rBH-Wwq=x8kM84kbV zJwaJ)beP+tO3F7pi~%bGW!tV50b%RwS#a6%8!=7-qH{aeCBVS;j-AeardfpUA*7pmiTbvmDkY&=Z%5w*bsb3tsVQ6m^3 zM7!H)1&Jhz_}BglQ~v-2k?}w5iKE>*s$r=6p|jld&R3IL|#RBUijzJ81x7q=Eq-coH#?dit8Lq4=74;QJP# zJ26m9PUooVRG(hp{vMTG+4X6pZmYDcL)ScK_5T1luh7(Ox%oS*&l|_7`%&PNqxfgS zI!A{5KWP%__ButirHm_ps{%r*m;ht$jOV#xd)CFR#pa?CnC(_(NOq4y@66 zhR4JDeAhCd0aUXH)a+f(GR?I?LUW4wNy182>U~0zO8pLh#ri#-lYY0B@k(* z8$_$~05QPi0zJlkX`0WACQT(Zo9oq_I`1TIZuokT8cCYxOg9lCuJ%(XiQL9G!Db7$a3zLDepA#uX?bJeskK?G9ynu& zN^Hw+D+8+lq~P+Tk@W*4=QZ>>C$vg%lD5apXWq@q6N686-?6_J#9dic&9q#T7Hj4=kjbN!~(ejVIJ<3A1C+z$_3$RvhKP+jy}oOH%ZJPq>| zVUf3TFarea_?yN0w}X6PE}h|f39m+?Em}LdZ^8)FZza_2ZNXW7=_)>P^BmEy&!_m$ zLe)G8t4u$$^-Cs4jWZ|Nn>jM3>=G5gW#<`CIOJEeUk?iL)p|3=!`7)nNvG;?-VXTn zr+A}F^UkFVS5~q4lZYQN$chQ`5a*{~m+vXy_OH~>*mJ=)AG62okKs)tOVTYg{cBv& zV%2<8e&CzCyPJ?_5t2q2Mv|f`x7}0$Lgk74Q_#K*&tdW6!{M&6u1^M|F1cf4I@w!o zNkz=cigL#cjS1hLFsueiQ(vk70JbN_y?ajZrj_uYz&4jRy3dOo+G+Y`n#4@-@|#rT z6%Bj->697DE;^xKG zwT+6nGf5&~E##a8iYZC9c|hF^!5Np~&-g0ZXaoKVLGc5@-V}M|(tH(Tr}$#dN$w1? zyIESovb>IYc^cwL+1T>V%uX|k{Ym|i{vdn;{{Vt}e$Vz^3h>#p@t2NfMZEE*ucna| zgu0WDnk1harG^j$C=qs*LczXbYvZpH{?5J#@z=y35&TrL_#>xknzxAcd%aUy@js5Q zUhX42^E9#)gpnF3X2Pn*NL5jiYl5~Wm92eW;Cfg}6OCSGv|o?N_?zI@?8WgH_NUfw zv|j}+g_e_~L=LgyJ!a*iv9NYhV^(BVcbSOFMH;j5xb0QlU#ByE$lnXK-`T%O@K?dh z*dp7$gHBCqu+r?K;YqH`8k^Wm%^GPvfNemi`uNRGUs)O*Pa*Jc*X_v%p|_;BlJb zc#4;@S{?JH3X1L7_dajZyc6P240uoe5>JhO74c@1pth?c-Xi#7`dMydXWG7M!iD3A z00TL}I4Zd1hq!od*T$CCFlwG1weXGZmoy+-jV4=|B@i6!RCxhFFn;FcUZazm*6_EB zFEqHV^y}{xT54u{+mj8|fpt}Z+_Y-pfXPtBNE~2~N_>LbM3-%u@*t8-PYN6bC+{!= z0CR@N9E0mwPOFnD6qF-$2^%~!9Y7GHZe5rS8_ZKPI}%u6 zD#}WXZdD&GY5v$hv=75?+3)tJT|WNoO>2Lk>6cG`1=7m!%l3aHtgzq`8DmBb^9%(c zayjMgel>o}-a6GS=h6H{_A8j?S(@U~`D2cG0^|A4_ zYR}s=_PNr2&R+)q0B#9Z>s9c#!_y+$dAC;nB=JtTiDf7R5h_6%NpW+xEx{rLEOJ8- zYY#>EQ}MUsXYC_r;a`it7c~C>62YbIxRO06ES9mx?#}NF42u+da5F5XNf?4xAxkH! z_=Co_{{R)Vc9Y@`vf726rLz5(!@6dnD@Afbsv^ClG6Z#85(JD?l~PsPCb(Y?_>Lb9 zc<)N^e}OcJ+f~%(iS*qmp^gYrPy-n!D2@BI05h|b{;gGVr)kZp-QAs3-59kc7yfV4 zvGz}hJT>913*iOiIvtI!l>%9;cYY$%p{^0ADrK4N}v__-R(;Vlo~ z_LbsUR^!B)jpekLws#M<1(5lgo~1B6IJdU9WsR8s0Is}J3kbrGZShC#ci}IF+V-39 zZ&zT_K=d2-_=PpDy%KiUWT)$z5ey4Z4n32~EK>+?|HR_+RkBWRH;a}R5K##||wXT<;`0ClTJFQmn#Bgc% z(lY&?S)AZ3UuZ;)-0+ETT>PqQguAoGt@g<|#2rAu3qOTqmk#RcOsWX$NMtKU#Iq*pFVC{(UfYg_ znkj}J85H4^dv{K7dRLEEC&Z~*oRg2OzV!b9 zOP!Q+*eL$;n@`C$pQBwpY<2!D_&coC7B3YbfGGC71*B2A~>5_mhOQpUY+7$Ts5x zu0b8MT~L=Uq*aGEx_r_5RQOlK@oHAu{{VILH_{QTlPc!p>)rPJ{kDS`$PEm!+tHa zy0^RV#jk|*6tPGxwvR7REb+0PXyy){SZMsPq>@XiPfrz!RdSg^PR~Q-@c0LW&!JWd z>s`CsxA|+oQ}-9)XN3MF*!XtG!(SKlEe}%h4TJ%$JXN7M5l+ksw!q4=Tb5jAG72)} z25e%yll}@(`#K+s{{RJK_^sjl-D^eFJRxs!KCfZn`?C<#ZPo|#;*cmT#7DW%t{9M1 zwjSu(edHNhQ|q*V^jW>Ydk`Kb`$&UAED_CTjX!r=Ipx-N|zS zy^WQh_?fmM30#nkpv=8g1gW{7ng!14!EwUN#UY>a1uE9fuSJL89mz7P1sYC0TZ z=H7d0ZMA(SNtq(lY~;ecF3ZY%qbnf!k(4mV+#BZUCv{MzN6#3@{{TNhUibSA*lD*~ z_3i$xGsL#@TeZBf?^#kn%tJ=VcV$57j&d+T03GT@a%y?hs&iCl?dR<2`%CB=-@}U- zwJ9z&Uk=|}MEXvvG=6R2wo<7avK#;|t)(7Ak;AgX^6rqA-uR2*E2*z0)(yUZ_BR$O zES5KE8r#b{n2c&zov|yaCNj*bK~!c&P+#VMglA3obg*kTdX1ENZn+vuuHG%Oz%+a3 zCNX@GD<~R$+WB$aDy|`yWy);|%O9ouMfkzuT@O{#^j{ZU+#d~VS1%TUuOq6-6z)8m ztpeq~P?xCX+SQ0FFlA6fZ4KtCE)_2wpY`|u09qs4jarncZ~cFleg6P6-Sizp#2Q|e z3=@1j)*W3$kc*8KuF4~F6gv&r9Pr$Op4g(i7sLMmv>nBkpXB&|;*HZ=d503|o>Wju z-5NxU;77QESri3UEC3kA71Q;4x6}IkNLh;Pm)w3|d}H|OG|*_)J`ww5HexB>>fLn$ zyC-&!Kf6MslBB6@vhQrMEJ%aHzaBLY2_oup>bm^!#HtKhMcvd77*!l$9IF5b+`F)H z4{==I?Q^1NdMC%v7JMSmbUBk$(Y!(vMZC4T5=Vb-(Ik-qkAS6>!z-!A7%5Yo)#R4` zC)Do{Ca94~9A6?bN3_L^sbd#_>u6FQ)BRJ_LuQKg?S&#JUa9eKwNXQ zJg6OIA21)iUNf9^$Bg(A>%juZe$rnI zv<(aGGwRy3a-w1@S#+4tM>JD&*t z)Ze!U!i`CFuN_UHc#B3ByKgNSVz$~NWBcTEi9*ZB1e9c80NazA*7%F@SKu%0AK=Ye z@4*_4wwTwNEXQ5&KD#Sh$n1#IJ6fDFnL$$`v`k4UwWG!{^G}F>Zoi3brqAJ@4CxwX zlcmXUWr{nECOfGBWD40bZiwzbHhjE~lw^e!L-n%qp`fV8vNR z6^1|{V~h-A0=0}eC3T_AR~qWk_Hum7&vg8r{{ZG-yY7J)5tYFwft>UGX>F(Sq;0n4 z&T^z42Li4woQuL32qyyparsox+Fi2b?8k6AewBAT_`%-dTg@X7WN5%V;d_2HW=SDX z36TQeo?58h+G)j!mi8#*{w1ZKcf= z+ID4oqhbJQBvZTY?Ie@*0=?Vz0sV%4B!1dI6C1a;wCuD10>QCH_bEP8D??;XFQSu zfE86o%jenZ%i{3#9qPaQm(BV1UpLwPrws7Z%p|dRrD`i>vzzkk%ia8zw`#}i@9+=d zx9r*Qr%(84@XNz7X_{7@3wdxxcS$_0_vV$(8d;o-w2jLgbWo}}{{V}gD9|kClUMOB zpn)yWl@d4sMh_!A1I1nV{rf`rd*S|(CB3xh(;y%&di_7fza@WfulOkbrK##So(1qm zvbTDPER)L{Zi2*j&u5d-dN1*MAB@ZLtWITHtG=zj^FI4B&hXhy4ry6iM3>-_J74%H zZ~PQPLGb;p=Yu{MYmF|oJMD!dKx2hSAk62m1MaW!G3S75^M2*8wTr2>Jxc0Ji;Ia` zS?8Iu(z_h95_*zOb5ELEtz!CJR`%{&n~B`K^UUPDjCzt!CalF}wN*kkSpGw~Kj)hL zH!r|Y%c$~FTJ?R;$n#9k6`W!{t#_i?bVhnW`|w;G6#d7VdmmspFZsORi_p@FyJds-N5XEfv=&`d20{;Lr8ZN)Bco)XHKaD&)c{Q%5rs{Cs z*xg#N5=AsJtcdB5LbzoFaCyfS{VsmZ9|Fh4uZ;Kq0JZnT9|(At!uqFCWr6+06MSlHHpvW+fHN+}am$^?@ zt+e|qr(WGXE_%;}G+)~XMwn~*R+;-hcz;oiNsbK{;l+P4~Zt0|H#Joy%7W%C4cv+I$B6m49i!ioXq; z#@`P<1bDkt(tJF-?lt(d85YSS4e}E_n^A*u0;@4#-7$=QwRji)2yOcg=(>bAzYuh< z6L|LOBL4s{iZ$kr*q2pT1`9QWGO!47e$2n8O#lBSAZ$;&8Z}-^v>iju~!BdR! z5=)t-W#`rD{%7Wg#P9ee-|cDev*N#j{6XU@ZNs>^YYlzMZFP1)ZJy>-dbDH>f#2!8I z9D1yJ?}KHIV3CHAV=7Jq6*$=LIOC1Lb>g_O_3PKID7_WV+_r@ZICxEJ?e}^fgQ5IX zwD@(a&Hb%!CDp8sq{df(zeD*|zZZVdx?hRylF!4?T-)UQzI@7Y>3{$j8Sm;ViM;W> z{{W7y5*>M#88MK?B~V)#^vSMj{{T$2(kCtU)m$(te(5>qKj+rCBTg;d?s`(3qO7(( z3&HopPMTdx>DlFAOl2ILDg1ajt+_l!rBA2NZF*Kn}qg@{`V{T6hJ^ui}{{Z^WX6`R&lCkL;Zlz~qDYUhnj40&A z6MfKn?aAru^yq6`_{nZye>?9Hf=hKh{{WEs*Uws)#q*`eUR#Z;oP)cZlg}NqkHa;S zd;3SOpKg*FT=|5A+J5)A&$UybDG67Kc0JR^zYy%~ZP-gVCty`VdJ=l`_zLpB4S3(e z-ZuEfrrCJD^BP61!%J{U7;zp61fIq4Ki~(~HRjsa#hpvXvaX+@+~!H&4%!sEmE>~b zYd%{e=70Dshl4%{$?-?R9ys_veKXu^8XWLz<=b(dTWM7ylpg0~ramw+3iq#%ZM-=m z%q?TKGPyi3WdTp2Ae?p12Q~V2{{RID{h9nH@yGUj@ehu?Ev6;5x$xTF*IQpEKlPeK zk~CM)kTH=MSgr>gc~ax=X1)OMR+pq-&a<|O4a64mY_pDecEz}V6XYmjPDlh}1BEB% zuWmIIXI~_RD{!_=fJ=%{bp@bWvo$&jm;f0py(VjAti}4G+X0 z7Cb%T8yWl;@jt{icb2Z=*6eE17Eu2HDOjRX&B)-AcK%te=`K7kBX02?w{E~T#f~GG z*BlkW8Ejx=a7I4sb*)bZY90;0y1SP|(*>QatPs!U`FOax-#W&u$Eu?OaJ+Un?OA-8CH;&0kP?Cx(z$E{ssj@2!$P1c_S6CO~NB!U8xpzX=d(VX*+ zufQJ@KWLq2#u`+**ToB85NX$UN^h3x$yQZ{BG(P%?IUcA44BR{^JHLPL*dI)JXcy3 z)u~G{0ghZ8G%_i089@gmLn&d&$%q`}*HPea499EYODN{DSdu8k$~%E-g`4Rcn-xKfq3vFcIEsm`oi*6nqD8S2`d<-9P*eQJw7m3+C5bRmMq;~)?- zz|Ie*J&kfxUcQZ}FNHOGh8DL?jV9%}*udmM-s1qP9=O}-PrKALUj@9^cJ^;`rAox= zT3Rp7Zy`L*0PB;U_&sI_aI3R=63i(AI zUqkNHxt5!*pV#z`PWxKl;r(*bE3}py8^)H|qi%^H49kx}Ch?Bh71bHpu2T6h$#HIy zA9P}qIYT+*5tNjV~39`o+Dn}o$^{STe2@n~G zEMW>%VC1W9=uf3h6yXT#OcPFCFHl{jA3Cbzbp z*G;s`1TzE!ed06hyj3}@&mZd7d+YC#I6szw5gMn3-l0APChVxZLg zRioQzC;k!iX=IXLDmf)W4D$2H2auza>^U_{#k$GU^mVqikgWI9i(7rYdhTLAiV4rs zx#%x#-ui7uNdnqwkeK0ZoCCp;$Eha(4&(veoM7ax6)Q###)iM)g}Kt>u>nj%U@jzn zlFGw&(duvs$3VQC3|BGXe~f<>KV?sfiSa|=r;X;(^?$IKU@=7-q-&fzg^^en+nuG9 z^(Sb+19lBdSJssoW18O8V<8&jZsMfms`mh%OB@sV0p{NqNtXUyO4+wVYy?1L;Ii^h z9lDJ5uR{Rg^@zf1>#BPmHfW^quBA5Ax^!n@;~$KF6FxF{o^KWWSMm0*;%!#+2}Y}} z-@VMBg~EW$NkOy$wQ!?oz!e6csY!Es6`4oc;~`jcwnZm{&(|FBkEMCfhOhqs(3;BH z>C}z47KHJ@JrB4ek@c>&*8JQJ(6b?#eEW7@0FZs<&usL^*1p~`jAD{jN5QJDZMN>s zKZ<$}{3X8*^=%ApGD~c3?1L}_NPxP4Ipmae$NN?C`bRa^i$0^hN7>{KMtM8QUI(Bf zpZA4*ccfid>XvX`7#1~@WD;_7oIXkKoDw_p>t7;xqQd*a`rfPIdCL6Yi^2MZ{+nxSG`HG`g)MEP3Phz?k^y&A z2b=)P!{+D+1bCK*r#`22){C5&%P7ZE2OqB$>3#|DqUhE(THd7+%WETSnpl@6AjAOK z>GH53p2oPbEpk{`D7QXi^w0Ki@aKs9WqD=duZv$0bZ-M`ma!3erbjy8++Kw$3~OkR zrIZ_h3hc~xDPe+1$Es?77ykfcO%LJDyYScHJRTtN^@W9i{@T$lqie{otbp2K^Bsaq zBuRyhgBOeGQ0M;r71*+HtpIMLCI4aLJV=KyDO<8wF)b6Rh=m zF5lPcdQ+;Xt3iF|*ZvmxmEa9O!v@!v+*KV=ZRyuyH*U0LRcc&=RnM9)j*`l_# zmvS>4xNPJcaQfzxl-m^Q%tSsShK|)pKkxu1QMptYj zZcTcBOYks+=BnO0t^WY8neuLftn1zh(QftKKUki`M`+C)hTTk3$%0WGN!rAm0L!(3 z!NJBVhJ)dMkH55zuy4FS;LjEK(@zXU&E zkJyLcjiMihU$Zxb?4CIYI=-u9b!*u$-R8bBoa6(Q!1MrdUei9O29s<3nWx`RYi$_+ z099F5wo$0|IGdz?dVDJX0E1fX+f}o?_|5xez}85?uk_Ccct&47 zRR@%g;_^fhla3t1`0KFrKV^6u{s^)E00hMNWej6X{gZUPUU<|Yx$$lxNk(Ln$15xCf4LSW$St_N27& z40=|brfE8y4|1?I#r(|=%t_!CR$|x$;X`}iRMnIyQ*|p%MK12fm}*tARaGh!+@$&? zf9ZLfS}*Lg`!IYmkuH80{>^>^@YL|&MK+D#ojMsq4@m?*YaUM}iRZ0rrlsQps5X(M zc#`5pB&?0OfQ0`5bOC_J)bw75CyLo$33#7Wfgtfbkz0i!u2R|}pfMc_WS)bd#zr!E z#V)-!hkOI!me-_~AF@Lcc?$fv13x!C#yVHX=A20>LD9f({LlXYZBJjD`7Wp3 zr^6o&&wZ-+hTg*37>Rl3l~iy$XQ}C2myLgF{{V;|7OiE_e`T+RTCa`nM208SEC7p3 zwv(arZ(k8{&O#S%+d-}$!@uxPZ;!q|)FkmA?Mbd^_s?|4%+oBC%L>Lp%sFiQtNr1I zCz5y@e7vuRaTuBVO7}_X+ui=ZHhY=Z72xnO_E>vLOGK8-?);bFerW#OKk!n|75rq> z?))Y2f?K;iA>E^&T_KWtk(@Au92nH}E=J?nFMpG5S!5e7RN#LMoqFWwJ^8QbLrebv zf>?gXHt-`{djs=5CFfS+pn>KIBGT1uTIWPV$V%&}PN?yYxs_%B27cF#f6rNq!h zgX)rzf%FEf%WQ1oG6(}GAQcaTlhYaF^%eTD@xT5EbNd`?-Wi_v;J=N$P2mkrEy5{o z^)KyPBH(USc$pGV7z2h`GBJ!dL;3mpMSj9Rv^VV5^J?BK&`th{8+@K9@Z8OLVUD26 zLWwQO2c*cJ%+;)^&1=wzSEnr&hmBl#g2vs-J(bWP;1)muADuF7ZB_v`oo@1y4oZ)^ z>~YAYk#3ava^bfqDpR;W#kBB2^&M)B)DlB6k~l50I4lbx`VpK`(5XskXnKF_CohQg zZ`+UIL+CepBpQrZ$%;FG||C;46QuDN%i_^QXmv0T~t`eoc#P<^T$@jQ{pvMQAW<^+HU z?gevr~T5KOekHqG)P0Ym~n5noqhbDc~x`;&Rw&Hxe2shzhHM zLpC=2cNbx5&C;zEqSNO80MGi|`@RIlQst{bzk2-Mt3>|*tM&3fM|?jMYL;+cTicNv zpl#PD4YZC9Gg|X&9v#qa-fO$aiB%azBOv;HeXH|J#ozE)t$yC-Eid7x!fzC7x?AK~ zT+XprMRGQm51HIZQv<0{kVma}=Z^mX;HMw8_r(1#Qn~P-gLID#+1Xr414(hIwab|h z4cHMN2LzGJ43K>*p#Be~o_>z9snL`@`Q6yk&K+NvG;^MGyoCH!G0LsG|h%LC$!{2EK3c z{IY3(6K)(547SkADiMIk<{OASX9FLBufNIoYFIj6-gQ0cEtbhYxcu8a;#xS#{Z^GG zqX)Zsw!J+3+4}AO00kWVwfq75O@7RO0)J^;J72Jm#ySs&Ch%XxjS5TX{{XZ!>)TSS zvq=~$AdT54A;1be;4*Vx8L#aR;$2Gb?EW6{MDb$+;iR;ZaG;ILxVB1yFnBzS9-PGF*F3FRtEV=dc^{ZSdx%(@b~rya zQONoX_OEVPTqOCWBj;n8&I^_?{7+}qK0IAn&-Tr8;_jOp9Bqy%!V~H8lb=(Xq`$S- zhHW3qo5og?GLx5%~aHUFuD$h738aMNx?`L z0L<<8KTgeQ@1(GwwfPADLpeO`-9=rktX&knScGZyc zI`Zsm-u}_n_mG`eSkm=5pi*|&TH6$9Tl=pgU?9#jv@xtLPvS?6yil@hy3dEM3@D>z zoD+|;+r^){6+UMpsodBk^zB(1evcCPL`upJoe+^#`5~7eoP9r0U5CO;SP%B1!g6A@ zVdRAYPfuPj!xhkJ+Mf)jyBhkJm_&N<<>C)Z@g-jk&;LOT$axub= zK|M$wnXh)V&^%q?Td`?%sA4Bfvlhzj&j2ns0|%!aE9V~$_|M|E?CAOyy{UL@F8n#I z>9JnuI_8SoqG%p9VvhrDWONJz1uO;$99P?avv-NS7xCXkvDMn;FQ&DDO!3VdMkfH0 zszx%|z#lIJ^sWU5Ptf$GRaHy5(EiUp1kt~>HM`GthB75ppsx8i?;TAraOZwc$Sx;(Pu4(8rxi)x&Fp@ITP1B10j!Q_@m+rcqRzMw%QcKQr8J9Y7_pJvi-Eye&24mOp8P zG`7znHn&dsXJWhENf{)90UQDH@z)iX;VZk_KNeYd{>>R<)3qrq^+@`%!pzxh550tP zI^>lHHFZ7sG*uPJS`z600O6nAsqH=w)lH_esA<}b;E>$e+skStn`n8h+3nk`Q>ku987b{C9-DO7KHJhxt}Bt&)2Fw4m*N#eEZOIh_r zp4lV0^5vCoZ6lCIZX-1T2B;t;xiwR|sRjY=)d zw&i3a(6A?W;yTwYCx$f5A6Ay;*4FagJEq+&yjKcs=4|6}Qa2=nuI0h^Jl9`o;h!I^ zn6bUlbqm#vb8mIEl3kg`)+6O&J;2T@BgEFXnkBP;ZN;hD#4)+1wZQfJ#WCNleC%6` zvGt8Ya9&4>{9PU<@YI@}g~yC!)O6{zTd%Xhdk^|tMbKePi-nOC0yBkd;~@2~Rq&Rr zbEo)H!NM!Wa5plpC-Gz15{seA$_+4?6v(sOOK{A)h#`XzM<9X^W5DTL(P^HGO*1|n8(P)%2Glh#4_w=bvSPQA79boA z#~@>|$G0_q?K@WtfU!7LehB$&ms`?rWwHL=pKEfjA|!wwPbJ1{PRGSQFN)bN z^-G&z4oo|321v#?5J?&QD&D3ychswM3&A(rXFGPp*i+G4IQ~HWGg%%Pnmg@o_Bn18 zu#P7U##;>JjQVFErD$uOIq?O#GDSVBq>en*dsP`Kfuv z+bX*dd$H?^v7+wRC$7YjY%Pg`;#2M$?@*yXvP)iVhiI+ciseCPx*Ic-hT=Axv zU@dgio>{E!?7=}Pm7Resals7SgOH#exvUf7-B;qDh$k9mt$U^DA&geZr&|FL3)Qzb z8`$UO0gf@q&2}hRS#;`oZm%@CrPb~IblT?kjs7rrcf;|uo8J|8B~~buw60H}0FrtE z+t$4A;vb5&>+JP(e7Kll8b^!UA$08)bIVUW0$G2M-hddMEDV}{7MoBJ? zOWn;Wl0tn;Hco#o^~`wdUeqVj?u;c2*Jy3+@yl*ta&zbbCbgp)P?~U8^)bX$t5Ta& zeKud`{dwHv>@R21uHzAJk&%RuNW2z1bGJT>pr5EdwcF`8!s6OlG~mhVT1W}ukcLtZsQ~r zoMtP(abQPI7+n+fI}smm@k~3f-9W z<+IggE)Wl#aPJm#u$<=r^NxKDKVSHX;dy0`PWxT+T&jRzu!UqZ{J7dtm6x$O$2i7n zj+vue>+t2vTg4&~;9vJpq9Ob%WB#iO~ z)5UstYHl!BIqOdD$jsBcE390=Zu~W@c$Yp!zL|J|ytpqwEYV7Z zl(b?OD$b!kaoF z*ZeEuJxV)k)LCuy4LayBbzzXjNaP+pPtv>|*Tt7QJh1#x@PtcsYK&W3(YXy2n}Csm zH!8;jkbzUhdPNN!9ljJXKd z?WY9JDb1eGdGR~O@IfGs-$n6duHT;n?6*ogox^gI?%PLg)qyw0JqlZ|vdQBuPr|p1 z36D?lRj=+P80A(*%Cd}}QI8el{{R{MX{6cR$)xxTQP88+Vi8YhZ1sD1bs50rq!E%! zLfj|}q_;ax04K^~{iWcyzmq}uv!dMHz`K?1Jl5TA#BDi8EX{xbIc(qogNo>k5?UBJ zH=*^nfW8cPBjZQJwY$Hyw1DZL-0Hp}@Rqz`D;faouOwwUPvW(WyZC^^rrJjypcd{_m06_npkPT6#pv@uS(9Nk28I5DK!}W7Pi;B>vG!sP4V^5hW;N95%?-yKU49?i1hP4jm6|l(J$C; zFDAAoGRRDRLPwNlQgT5cgIq4H@S|P*pgeP{Y9AGRQFUkF{Wnl+J5LE|aw?5RNPt<5 z+~*~tGHwDS+qqQkat_*;#r`f9y8eaY4-$BWTkQkDT6A+3@!LcAU*Q`)BgK{)jf|UGYy10{gs>ZjVEz%TGfU%99T7U-t1p^hzc%S1}gtRduc#BEBu!yD}d#DWXVD$a%t4I_8 ze~1CkYVaL<_L{ne*>xQwTARq20~nf7Eu5Jg46fntjmmlC3gY!&+NR#dQzwe_L8TW5 z7ur0eMB^D_hB+T72WUR^_Rzz=r{;351#W4Q>X!v(pwAJiEz&d*^*iM{& z?1>cr0K7YNtS=JygHG0wm&3j-G9r*BiY-8$hqy6;yFIXdE92i8c*o(Ni7%H~@c#gb zua{6RqC4#p7lP$-SCb-zQa)kBG04vq=AJe9)A55$SG?49X*Dqz#g3~0*{IvG1fMk9 zhDT5U`@}KMI`LgG!>b(brn;X-_{Zbth`bfATw7e0zqbt{No%!ZlrHQQ0D?%yG5kX# zp~Y~XF8HnCsb!DEnjQ7)$9kmNU0XAGsB(A4;EVO-8?cDY$#3Zw0+!z zkarvt=~_j`3r)#s$(8>AZ2tg-8V0LlCHH{uB(iT0S~c5vZtqQo-Buf^w>sNAU^6&& zGKd>?)RdGepe1~nsJzfAi-4miBy-ljvhg;ndEs~#+g7=``(~$Zf_S21HQdNpg^7_r zd6Ge49@Qi;Y!zTnnD{^T9{r`hI7@NkjVIy!nn#DEx@K)xU2$y;T}T8FS({)ag;EAN zBE0JU&JyNs*E{Gc^<&8t+2uM*S;cY zHy2uFm@iz(rV$kD8YJ6P$ozSVLMg#!X%y`MsBdn)@MekNj|}M=NznBDKU?ty)$OD| z+7{^{l-q`t)RG8|m2%9)fL9-PajpqgrBQO*nOyfNU~v>D%%tGdSDWo+XV=v6U)jg@ z;_yD1Z9R|eLvUlB(@2eUd!08#k^a`eVb#QYiJ0w0Q-H0w^f|AlJbUABgr5LEXH6f% z-aOKqPQSZoC)FN!u<0by3z?MTmXKtiQJgXHRFDoiJ4QU5cMd|nV;^LkVD*!}o=Gpq=6l&+8C1h$)B41e>a8NI?(eH|ceT1J zS?Tj3wDD-Zxc<=cgi>F;_YPe`uxEC1GI8dM@ZwdfoF(`A}tz#rA|u{I&qx)AEjTMCGF@-OE{1T3++G` zd*Ee=?s0>kU!MKYoi>LOomk>9mW}l`=InS(P(-~dBae4!0Jl? zdW`;kxb>rPmSNLk(EK@js9VRfWw%v?vB60kg;vL}&A_c)GCd2!S~OQW#E%`W?=CPy zd6yC&iXcd3RXHrg{LD$o7^}UsTZyZ!b{lDRtoWApO>_%Q5rFhiD&pa9NlU(2VEb73cBH zB)+)3ibs%L-AOrfoRG|R0zEKmZr@Y4w6%&EWhwSk=PSfMaK!Bl25?9m`+{qhUh+0R z=>41S^dH(Q_HTdL`^6f$(Qf=bb!z%1xdeON7c#Pvna%)dVr`^uR#HA?8R~o;`y&3; z{{Rj3WlsovMmG9Q*$CRcmeEY6GtVK$V>rqAlNs+{BK$J=#o{l8c2W4Y(@wdP=HlCA z@mec0XL^NT0ptUNp1V#(d-uWb_$gP#KluioBU+!xjZDjLrzl{D+791;l~ zE6uA18jjmD(xWvxX}!_*hr-VeNAUYXgTvE7d3AG_p8D$MefKwW$is89B7NVNfpV^ymRW0@lL~LV_u6C(8+nuKz9R7&>BK@vD z8~k&%T_)~hVXO=rf9(|VOs|8LzyRYsaxzbBn*7uKx%^S3=%2TbkF^`CU?I2h1Ssm# zg=rAkTZV;!>5?(IfeLVVIRp+Pic6zBqu}w>?tCw)cu!GB4{3BFlG%_6Bl(HQ z&pB372*~+M4r^QCK9GDj;(PrcQ@2@QyO1NXU>65FSPX#21YrLFv!7gcUl-rQHoD!X z#$SgTLrV^+Xjfm2WmgX<3%oc}wH>g8vWyl4a863-x4su`9^Y5dRy(_kTc0v{r3@NK zmAM4~7%Fj<8OU&3i5nX)-bd6qy^P~+d4I3R^E7Y%8%UB{Xtw&9iJj8e$npkL0y8qm zBsj~($xwQu52bx4`y_l0@n7v%@nZYoSHa0_Ai2{mpp#p_zrG?H7^E(+tgMR6?!_Fe zD7gVk9iW1KXVP`=2x~SPr-Xc4s6{+ZlBm;e1GK0ax1b1D<^Y@m*gY32RZV^I{{RN# zcn?qTt*69Ye$vrlH-09ufka~p7CTmO0X*X~k&aGl=d-)e!#TIA*?P69_1PU)UlSNX zX!m#8$=O@f`a{Ft@IYUP`o5mC-Tu_x7lvY)2_J-Rqu%{rgQZ{b`|#!vVnPwl1f z%Se}1_;cgW3V6aO)aJ`g)u+?$Tn@O3@qDHyo;=a`*Xpl?^&Hxx1|#Je2iMlBd`Z`q z+eVlZoW$Rqe3Ws1<@%pWmM7ct-%mt;D&Mq+!rvC${9x8}ZF@$aLDalIFtog|dr@sI zwaS9h?~DKzOqC2k`GSLzFizLN&x8jrJXEcLn7XsJY9 zS{dYp^$j`@_ZH+489;ZAcHn2PrEs1h@xO~S`OV|llL){vNUQ)IIUscH?TYkxH0!%5 z66R?4<}H^op~(x>5$Z>`HCx4x1X>J`Nwwa1!l-U{1A(}M+aJ=LCX=*K zufMkR+j;Dl{yt*LVWb?xv)Eo*NT%Pt95jb&Zf9r(%mfn6_!hHRm< zjJ9Dwqn=02pTe;|IL`?;f5KY}ehCup|Tq5P}pH?xj%;P*ZF#dM;=<4htlf=GEG2n2)b z0T~@T8rH0!s&dCYnkT!yk8DZHZ%k*7x$oceuRie%_ZsAl zsH`R!OL-c40gQ&u=?ES6gO5RvTJ$Y`Xzr|<)_9sITbT$v1qV3KYUAQbHOv11x5EzG zJ8Z{p2S+%=F&>;?fA#2tXs#J2@NT!_7_B1F^vjExgWKGMFJnVs$C&L0%O6l=Y>mz81~ZRb zSD!pHoUF~^RZ@B#mYH!I?A2c1%807|=l~b*CvGwdk_TRg)~@Q_G}3g`j8CX0z@ffp zQWRqabHO>_;GCX$&MVB}@kfllBh1j~Rst(zVhzi#6aae>%N%2lgB`1XOZaJ}+bT(~ zOD>~09d8_!81zhFemOloYqmNhk%u=ln~AVKH+WzYEp4vnT>bkjYKqxC4yDG zSrE+%@oOU}W-wobYj- zr0{vJSI7A9y_Tcl9~x;lcRmi+%G?{h7FJob%Yn>ya2|ZQP@TYxh94^{D=d9><=%kLO-r;9nWt&!*Ve{E1Rf{RbGBt%4Z#@vz#M>8wwz=P5Nqsj9QZt1 z_?FCz>$n(AGO!67ESZcFSSe7zs4KT?23^_Po_-4by?hy`_*dgnY8pD(+>2XaW z*jtFA^VGC%LM{Rl6&Qd3`SaeLM<^?zB9nG!)}QcCZ;t*8*1TcjTaN;2cP)Eqr`X)H zO=smSN@sL^fVnNhsA2+xkPWd8)=$-{*!ayyF+hxdTw<#vqB_UnL{neAci9y z85vnlcJ(#*xBmbH(DA$;75%1dbSc9KyhozmX{0OgWC|`KBzO7XQU3q}uh&n7{xEBA z1Zlq!Y~Ah?LzrovCAL$O_IATTagSj5KEV1{D_rj9RAk{BC#U=q)wMr|{x9%OjTXD` z6H~F)G?^r6qtb4Q+szXk?l4}!iz2gf3fwHp!y&k?Cs*;`f_3Q*hw&4_PowLeS_tjD zO9;1(%N!|7h=I42a(K$F?xlK)?Bekjx#3l|@o$1IqSQ2-v<)Sxlw;<|Bpx}z2R(XK zPZ#_^9w^d{#h-%XynA&5>rJ~mSRuyX0R%P(Bolx~rF2nN(8i;0R(^kcP56W3UxU9D zwNDBBHNU;kwJ5J{R?ov8GPjZmH5*HLOEt_gMoqoUY^;m{+IF5d?O-!6KWfj1T9p1@ z#}9^D--7SoIM#eEYZR7pHbD$g7kJ%)xT|1{@H2t*C+#DxSoniVYfWpz^Gl)4c_hl| zVG^ARbXbB2_Mb3HtXaX^hE`=HsTJ}+#jlKh2KXo8TZ^4hZFPN0>AZ^{2|FoqCO3J# z*=>rEBEC5e6nxvZ^f@T%*@{=uBXsyT;FrW(kG6b2`1@(9NZ>lhcc`=zWMdf)-Eap_ zO2P3j!~Xz?J_JLk!|{_$(yd`7)wK;~BM=zKY_>9Ulg3X<`D@2MCGlUx>!Uor71MNh zSe?*lntnG7^e*mP{TBn;zM%btf8d$E7WnU`LGgR`kG*{#!B2W??8D`?>(u3s^o}0$;^)K-PtBXddX}5xmRq3!Yb`0* z8C$oQ+fyf#jFvq@kBpzsCan60e^zLf`utkqz%gW7BMO>xm$^ype|BBM?fg`$h|8hyY-P0=&6qGOFyZ zk8YMr>?6-nEjz#R-|s$iwEc_xFE5DnzlGlo;~pB&Zy9CRbo~y=j-#tvrW$s+y^#5v z91Xr&l~5ZjP6jmp0Bns$FNPWnce*;qat@1eVWVgUQXsIE3CkpS#$$~Gw;wEOi+~T! z)chLId?Wi6_$SA|6n;ARQ&h9@MdXWVp*M)5WofKRD=aqi7m&*&lJ1gaT$NZ56N=^h zGw@&H5A7KSzvEvRS}v7w;vFPu-W`Hi8d)^0If_dw3}h^_B!Srd<}w8&gaWc@I-Jks zcS0Q4Wf;Eg+hmr@=K35yoBKiOAGEi_-7ofMU$N=BmaD4UO>?Mf7%>Q=Xf8s-kC|qR z?2*Ppe5|V2Icy(v{iavLKeM0gr{ixMT3AJShtVU>9Ns)Om0MRVlE2|&@P+PKEk&j5bCYp2t<%*%;( zd0-H|$ZX{F_8_UL0nISwCL{jyP57G%r-ZsF%ym3qz4M8`@jLXpP=uG^bZnCZE1Hs z#BjzLV6g6VMWW8p6{w(;Ttg7|BqK z$h(LDg(H#g-`^F}_fDv~l8i72W&k~rXJ>x#JUOW5YMJNfk=h%pCb6e{!qMcRGfH>nA^aIy5=6)g9KjALc#1PM$C`Mg|Ldr)ZkM>45 zs{SDHoZ7*b!%H4q5^+7qTr`6mfO`YZd!Cgpi)$1pjt~PU03YrQ5anvq$c=jfcmy9o z$*0-D4vC{GO&Ig$RU3H9k(_bsgZPt9EN8xc$rm0J)xIg{J{a+5fb_o`>GR6AGWcgy z@j%g*_{$NFIECvnt)2>dtkcg8;->3<3Q8{wbr zO>0`XYj|QuSkfy~8Eb}>+vW060cU36Kr9a80sS2CukBs&%ft3w75$RDBja1IiI;v9 ziHi7x#$V}QVm?YoH`$(bX>F|)Kr0(D7-lC0_JTQl@s(FZy8i%>rH_sd4Q~5?nX~&g zd_4G}@gq#sCHRN?JNPq7)vfIWw%3iTCB4H=LE5I;HFE$zcolKAhBlQr9~t~({{Vtk z{{X>5{y2P8*M2eUKMU5+QSnEJX1IG}uUN>IQM+vn5lS)gbx{?WJWM?DEkOkkB zIM@Ra0nbG^&TH6)NrZyYyZW9}&T&;Gs#ZTEyjA}I1a|$ZyaV9xa01FG*}Qz>F3PG-q}d z94X}R1~-qGDgCFo@ou*Ac{lqm1;T*`lEqN%P0g`b=YYTxK*kS#o=Ndn;XlG}j263m zN#RUghT9GNBkZ#S#uT`1%rTIm#?s4x2U_99^876E9=AEO9xS%7X;Tdin#jg*qlIY|Rb8>3OLQk5mHJQr00a=xWAJCg zZxLw5?rER?6B%Adxr{3eTU)BA!6ASHgP!KRui`K4SE)m$+-p7pnr&(uMlrHmYV%rL zM(Q#aNQ|3ZfFm58-9~y>r+>jJbxlXX)|$STb}odG_>y?8W4eu!NTop#Wne%cjFE$m zfcLM>bILgSSXoBATwAvNetUi=>zNf?w!R`S3pFb)oiyz2Z@B$@@HGBqf)?aiiP_q-7Gw?i~@ZNcqU;xHuxSyf5)D;g^Q=@u2vx;pVB~mnZ#B zsSU`ta!5QhjfEVX07wIY#dJTlxBL_P_KN+8ej&r~)5Kp8?zMe3Pc~l&>e1Y-qH1=^ zLa~M9MpT(WQ7yAK%1CUovbplhUlM#v@YTy{U+}Yam?dGBu+^;Nl!54CY^=-g6F2*{ z_cWo0tw}m^O+Kl$`aj6|nw7B>s{ZmVKFufhJFmg{o{jc!jNT*gKZkUkVkwg@EpH@^ z@y;+qGO+r9ezlo%@v?spxq|-y;a;_GF5*0?Cfyl6%&0&f#EQbuekA-D)$WqV#U2~6 zu+)ePCEb({rx%b9nSertcM*^|z&-1&Yeai*wRl6uo+M-${mgobT!ruVXF|k(z`q)a zLy_8P{Qm&2(AqU!MS4H$6@Fz2UYRcjWm`i2gGO>92oU|5=alwyk&-Z_Qhsu9w7Le z<7)@gr|~qF_-w^>;q68zE$ryqWXIDS)v8zcY?sW3SucsoPv!a@wcBZa3)3WaxJ7{# zn&RS1g$%glbps=y%JpaL%aGXPh2JJpj*hQ|kW! z3%(}mnwW>gTBYWpZVp4aY!U7c-9P<)Yo&+bJ->>zPY-D(-R-Wfb-ODD*yRZlOo-f! za8#a{AlGdO(xUF=d;SNLi>qG|3q~#}ekY;$Q$z68#+w63HOSdO6M0vWGUuk^tbk{y zLzCXNu5A1}Z4($ZsnFx)YugRnb|=ezzPxv@*x!R6@J%1sOIYwdy}yXRW*-gC#x8C% zbri?_t?4qK*14aJ-|$E8*cR&6Yo8AIU&THgmtiV&pBPOfX~uKq-7KJw#=J@96cn!` z+J+LXlw-O0&8~P#`^2-DHGdcB;>;c1Ob|>7KH#o#$o(pxi7vby;2j1_`-|fwR?0tn zBs(KUJOxzAGCAz01G&f*>0j_s?}YOH$6A%Gx5KX;e`~?0UHKNVYPxKGdSOczB?wml z4!cJ^tK~BJYg1gqZsf-X0zx=BDu@8c=uR`+HS{@l4ta$&QZL;6Pn*rN z2xXOJMW*UoE6V9m4;a6;{D90EZiux-}i zr&!Dkk%hqw02^>r3Mh14+^#?oX^Nhh!eA5MQ$P{izQ@hdWv zax!-0kMCq;VD;nluco1er-hB7)cIQ2>iEegRqe9qzFvJzO)UqOvR=&6{DL=9LWclg zWDZ*y?oMleOVmc6G;eDb(u4&F00ef($3h3wkzBQn=HdcB9PU28+5Kx`(s*Nl;zj5I z%VTe%llgJ}$raLP2hi%Zs|WG+mi`}U7A&}uH;e`d*gR)}&T-eA*8=e9-W9r&MAIJP z=Kg0fc_nbj-46qx9Z5Yl=e>10POo_ly1Z?$sK#8CA$Y*!zia(jyPy0F%Ms;YM@U*0VHvkZ5+Y=+;r$S?SS6k)o(b*SA6m#&g&FgN+EU zqHsLe_A;F1hy#vzJc0Gc9G*!O+g*E0)CZS$Y2-+N@BtX>+o3qe9-U1~bzq2|ag4H) z^16UVbKLQf->}H;7J`O@+wF*ue677Uqh2 z4#@e@dXKGr)&BqltoV_2s(d-|Kf@XB51FE0MX6Y+W|d`7(wU0FNeM{k6ZBV zy`&6f5u*UiFiMm6a5>22asL3;uOax|XQ+68#Zma5O!IuHBsgf;{J8b!uLF_kQ`mU9 zJQd>9)K*yqjr59JaR5az!8!B{3C??udh0$WX%_w<(l9}GxQ}{CzW_O0=RLX}ykn+o zvYJmuWj5of#B0A1WbrqLEN`SmySYonj7=)UB#b(`DuT=j8;(d-C5Xceay~Wvx-|H_ zNuaNZAx3efY9vE+z~*SAJ0xOHT*wsXp~t;@?e4Mv00^|#5bIII7S8H9ZT`{}h{`Zk zWhXmNBxLpD9eCkCZK<^VOW;M+_R%Yu!UcHa3aBDiLeYL8X`zhzE~y3LC^+5axN^Tj0UwQi)&9$W5VS2HQMA=Ar4#sf#O?i=e9tCO zB%n=aG3|^>S3ZsGEAZtd@}vhD!NJdQU!~vhOmB$R-xhyo>z{|()xnl+CfRJ5@7h3# zkCMP=kKH_LpIFbOb3=O@r=j;>g;MX}$Bi#O6nL89q|vW~&7xek7+{?Fc{vJK2abee z>sS0%mUd|Xjn*0b@{wJjz`-MEI32+UJvgpU!u}(=`0em|PYdeP6XMSYvczWS(`_HR zn0IA$7$>u1wMF6Y9*-0FNxV0##j^1RKYBnp1g-{o9E^YZtEM_xnai;Uiel6)b+w;X z@IbtK`4TNftzh#8-zHz*O4}6xOx481=mRM$v zRb1p`Du;42%7z@*#eeWtol8%%_;uj#6T(jDd_^Kc(%2aSm$s0VVEaZ_VBnTq9N^Qg zxv4QwY89g{mpp?;@E^nB;N45&kH$ISm&0BkvPtFBG)t9+dzO*RajMG_4W*pz`GHmn z+b|Tm`d`D(+E!nP9}N68r+h!~#jG?(S=Z|paJt~{?Fb$*Zc!@ zbEN7*^2To_Rncx_RzVHJBP&J- zSlLTG%f+Z(c#i7o>rlUu_}nIEY{ozVLo*K8Q}Wl%+6RhtZBt6pwBHNr(ZusW-b`^v zxu@R1q=#z!!Ge-kjFue$UAaFQzA9=T2fRaT;q4n+ySmpk7~_{x(&E%aaN0q$C`_@i zaWiKs0aR533cv!<7{U(Ab6M5IVpEixX?@;^`r7xyJ`C|@gCO{W@T$jJ)FI4wI?cY3 z9ERPl!|}X%+S2#|(O-MR9F*xdbX*5UM2PkDG8{a9LS% zUrPKU_-1}2Xu6lgPlf&^xK9wAkg1k1UN`eC?AL|_0z)y|jPkh8 zQ}7S`6pv8<0EFYmx;C7;q?1D%YPSm>a164^2_%l02&g_@$OU~*`*Z%#o*MW)tbf8c zeW^`v9m-lsV-@Qc5k~88G8ZaCBe$01`=boxfZqsy&>s-t*Zf2O00}bQTu&TLD}SZL zP=sbw!i;uwlyxNS+x^V^$j9Tx zYAbud*G(cyfwHK%0OaHkKmi>353gFO;x85JULu%jdQO^DmnY1XlmJOM$Kj8swP)D) zBgXOC$tr7h>9!<09XAuSo&f;laDT|JUheuIHPcf=Q_=qbwlzeB!*4R?V0kT$m>%R~ z9+|BDKf@A7cosODK4D&Qv}cjqr}^z&CxGI*lfw48$BUtjC6(kbn_>$X_Z0=$eb7i^ zGueReT@{v>qu*MK>wCDPLQYkBf!_nUBRqEJH2G5}n#VP#+KG!GAG^=Zk&vSp=j)7r zz(}nKj? z^BGvV>6RpxX7m{(`=0)s?~Ob_n&tG`b&TR@qkst~jN~~`G8eEJ&m*a*JX@~EVWP*Q z>EwxSmp3I({IVSf2l$9N9*gf-I%Ud71~NBvJoOmGan5gJpt)fhyMT%@kOGl_YoV}< zU@?ZMls3nk9w_o zw@avfzVc|^Ey*mNO~ObMBjr}uTVVupt%lD8X1dJ-;V;H5LN~I5Ky5u$kD4y-OkPmO z-gyGHae{ETQ_gc(};rb(}$AV%Kpwx@WcC2oZwj zIT)~I#2qpPeS`2<;J%me)58}bC0Et;lN|bvs_pyR#sGk-4>8-TebqgF^X_gJ&%b3K z*vrDdvqaKL*ROr5U&6B9>TMpynOS#58YaYL8(fy$s>{li9V_0oIN-dW8by!jf(v%G#LOFf$}&j`Im>4qe(vs>HG;a;r-rq8FM)As zXJs|R6~kqhaZ+;32*_qUb_C=K<$N8g>bBMvI8TR+s5VXB{zCQUAS1vF4>eC0 zY(>=eswD`~2*lABRd9{`ETB5Csm4JB*H7Y4A9!EGT3lN0tE$@R)~$F%Wv2fCSnkas zPzTJQh8=VB5!ak?pAqz3GsQYe__M)2L6adGD>m~yXHlzqcHC=pBv!6Ax>@pBn=w{ETz?)0)M=+##u&w z>x$mfJZYkM^WcoX4)vMlN#NaXvmjS!gCE_<4^=ARdS`CHKsc`hG{D0!!cK)wzk}%i z0IrAAMJyU`+&#M%rL687j*S5{?-2gvuA?8XupM*{{RkrZ>M>d zwnpQ}{3O3sv|-@qa|o^#{`*cH3A_ zmvaMT$8`!gB&3Qm2UTFoPUSWI3`Q+ATOdlSDO{YMNBQEuKK}rMoBqIhXYDoc@B4qk zJ{Z%kJZa(hq?f~UUdE~}RQ&Q-$tf8~*?|&R>O~6?kGdV>P~h~#D(1f4 zok%#^f1SV0KkHNED^jV?%}-C2*XEjjpK(S10BBDG#Kt{)Q@_>_pyD46Dv)v1uzjbF z4hAb8{{Z5T?D?vnIlMcmSxR{i7M#jgAMPG&o7ZgM(KT5n@h9y5uz*OG>qn8CM0xql zak*7J4lo6BI=#L1jl6o!jpAEt`xY4@hpy|=$bf~2+OSeV?pT5CR=;FF7I>TE$L+iD z$HLclGYb*pOFNXa-Z6+@&n(0Nk_36@ILCVNElPb)UdV>Xq-;Sf>Oo=$1e20~6OMXU z*Wd6%AA#DRjsE~{n?Dg6G~HN(k< z*hwAF_;jwz!;r&aX$PLj8DLR>F@x0ddirO*d57&?ujsOPF72%^5hso&ASuq`2l?w? zmsgsWiJz@uA!?Us{67Bxf}Z%_QTUhpP22cp*1_*#(CnGr9^^$5%`;#ZW6lC6MFgGI z;xFKbyD`h-Z-@}aSOmAz>?9Dvy@HJV&EK_0`%QSt`&Ip>{B7bBrrcZv*JQUzV+=wp zAV<#AjBrmQzgpvLykDRVOZ#s2c9!S>LNuf*?Z6lvk3F;8=dFIplfs@CE~QQX04n@- zKNZgE%Nd+ksNdRaKx+KB4g*eNsOv&Cx~xP@o1K@=sijInFWbT%NsmcXCU!mTq|*VCUbV z?_IO07qREKMhfS31=h&}s^k;L2M60GuSas*01OBt?vf6EfOID~$3J-2BjJsF-&vyE zAKoVnN$N*(Kdp3jvom2yW5~lO!2`diP=8TZ7j#yeqodOGQDOoUxDYdj`@r|f>Dcq0 z82VPmfvEkh=eM22aUztBnaSIbKy2W1$T;^uO5$GOWMY!Ca8Dy3bKj0|a!+m>+P8G+ zmgdPc*tSGlg6{ah#u$-<@0=bFerY|;mF=mqHKaPdxxD*6+o(4NF^&k}=g{O4j+g_J z#UtOFTR*bi&g`v#%)N1r4hNh1u{Gb36y+M_c7Z1WhcZ zJkU7a06huF$?JpL6jXaiE~4g_W5;W-_zmKD{15Sq!u~QxDIJErYb4MA08ISV4nGtm zS0xNac~&6mrH%n}fIg?SX$?C5J3(7F~&KpTe0P>?tZ3g_p|C6Jl6Bs zv%;jr==dZ$5~r!pV1K1t_+#+~@LGSuBd^{(ip+k)GZI(~^vUWu<$n>ut}EexjN$RO z!`&Oiw-W%`)|G2=TO%Syjdz2SoQV~N@2@nP#8;XX%3cX2+T= zO9j5CZV_&IA}N;!ISTMVR{)hAS0e{C^k$3Vbn!*8)Ab}_IKdOL<9H*Hk?Jrr#M1!86>bwDA^|I(aU5Gr)#q zU^|_wD93%n(y~4_d@_s2{tvs>v<)nobQm5>dx+a`c!L&ri6oq0mR09#0tN#b^AFi$ z#x39v+9Sb!D`_^xZ{jOuxRW^B6pSL1{{S3z_$2G?MB>0UV!A}%l{5J8ql79#5@-h`?EVgpC2J9cY zr;d9bdE&aQ%KVIJh|~FnHyXFxwr=#u%^h1#&|VLOWN2f5Aq4 z0jK!C_FnNM)6Z*td7{T1{MWX?VJ3n_K_DssB&*94V1LU+JYv01;ue#r_!q?!TFWVD zp=O&+y~!ebbB1PI95LDv!1YGQO2qx3{2w=rJ|6gv%6&osrR%nLmd_ra8F=7|ba0b0 za6^saMjzf>t`A*mM(W+soEmzck{TAJc?H3S5*V;l0n`@CoybAzrz0E@&MN#Gy^QuG zEx7X*GliJ%`L_~SFv9`?80U<4s-7nB-m&oS#rNJ4@e&B{bj!wqa}gz2so}vO5~N|2 z4qGs?}9yR$Z^#4qfz@K+J9%S+E&}b9wJR6$9GK? zl+dJ6>N866EHSEZ8B}EF1OxX*;#o-p9WQ}C7JNwfwXf>`01`%}ZK1rgopQBC$IJWf5RU|4`p7G<<{TV_4t~e2JrU3`(Xav+HQmKLqOB6w989b zd`stA4fgu&rLxSnb1u?R)t=@k+%J1_uB!WmeBJwV&1>-Y;;yHxIEi%4K4WPl(ZtOe zyP93e3W7dk<-){~oS;*YxaaFH*|Xziu8aFX_+wkv?CkBe%gen}O}=Y(m0-N{~nUGtT5v`Czik^4IOtrFfU)pO0+53H(3MWWLpPzl~S1>HZwjqcUD!-Ct>% zRCWr1u)L<^RXhiI3Bswa{53|OHF?_2*N$veflrf=al$+b44s-S63Rss-f>yRU5b%LefChQT zuVIeo*01<~#vTnj?5%Z}-CL17Pl1v+3Qs*d9;XEK;t{E75?)VbYpq$UE*tF$c9N=c zTXL0Sw31IPf*5r9PI}u-7$ojv-@qrhX(C0IL?Q0H**2_10z#5_`A6O#T;l*&0po3U z&r*4HSuWkJ7TKYKW{8$`aNFM=gpss$AOTl?B6!11DL;nvtCJ0#s9}3@HjpF6NCmo< zBoaAn?e1%?{hB{#-ACh=wQu7802NwICXEV%rrBEEVny1(EV8N)=PQ%IAP&9jl64Z$ z-Vto${uBH=@mIz*8Xt%JJ9L^|hzT^;mg=$}nLxo+1q(X`;hC~RQ)qe) zvjEhgf>pa{teIXvv;-h++V7T+CBmbGC5o$kEw1>I-}YkoMWXnt$2xV6pL!r^^{YKL z74n7ctn219N)GoJGP*W0$jIACBv+j}v5T_Zp7s|6>%~fzeRVG3dr0){a!9P> zv4#+$Ct$HdB2L9^G^s3j{Tp1;G8y^?}@B_B+a91lG{ao zvD;}rA=RZ=pu9q^;i3R4m|iAUDy+K;<3BEbg7`P^BSZKZsQ8u!yPL!I+J2P2AeU2( zrI0#{i_f&SA2GzuC<;^tX+!5fDBJWFnGUJp4+-cx^}d+8Y;arKM`foOGF-QtK;{{V*kP2s<^{{W#|>H0RAWWkkWxw~h$y1JE$@W~o5 zj4nB5k+OG+!W3&_aZ!hR8+&X%4+AN!qkVg|zssTVx$#b>%XQ)(5M0l7tm@YdJ;$8I ze6U91qu*)yzL@P=I&Pz6+Ev6iPqJ2d9u|<28zX68RE&~N2Tlet-nq-9iu!LVc_$3e zAeYTNZ{57G3P}KxNdq{=ZfMdAzY8_Akr@gyp<yDTLwyZ87mrW5$%jJ16pElk^ z8^+U|2jd4Iw>cQ@O?ms@T-*3}Q)_v*q%%uDmf#OF_sHqUJFuac zb$H6$L_DvxNb2B|x%YxeP{VF^fsCg)#ZNZna_Evg{{Z%!@q~UP)00BD^U^yE#SDH~ zFC>JwO{W&&DjApzFDGzOv=T?5{?8}imc6CjTE?nEcB~bj4Jai4U*EcAXg!zC0Uj;yr}@1;pc}tl%0=5mZE};RnJfHb)JFoSK*cJ zt>KxilT@-pJQh;QBZ&Db%<7HRi|#0(o?X;uo;*9@_4c|vPpfLT)-P{ts9s3jvzY@f z&UxAiAY|YIeznu;pS1q~!&$EF{uz8Su&}a<-syzVTi%Q4r70l_(_N@5G=QXU62-Mp zvu8OASSxwqzxY{FUHA{gc5vy&S#2-vb!2&A@|B5jCyRE_a0VR|sT{W%tff{noz3A& zbm4BNlK#=~-)J5_w)lbY(>A*Mq%R!X)=ly6+xB^J$pjK}o_47iHT3@g>_7hi1ps^7 zyMGDz%g@p^+eJZpre8MJMPM+%AH1Y=J-Qqbit-PQ-UPVS{w3)8CYh|YjpeaIw0l{O26vobbCAk% z8zghZc-hWFM>C}!9_~ZLSd5~wgmsTpe?$KO0zN(IUMJJwDX1&%01&6I<6hG7 zX}%)T{LrEDKIr%V06pvQBlZ;k0D_fxx54_Zo#0=Mc9&XB{H0RXTezMm?#_5ImIDpX z+&uHfdw#?I%-^(DkK>&V)vfgfkxXNq`R(uDrG7z~#6eh{1wCg34P+9j5f3W(%-WtgB0F7)1lq0cx(Wv;#d)IJhR5j)Ib$mAWWb}$9l$V z@?XQh6?{$Ni`$JJ z^$fi^HZKnbu-LhB$L!0q?Jw^6^y{YF*|afoe_6t=X?>_{*TJ#)LACg;ydt+R6~?D=6_vOsLkF5g zjawd}oM0T|2Rxit?%(Y1@W(*-Ver>S@K=Xy)(Z_QPKM6PAiX1r0*&AW+wMJn56cyN!Qvj!T6(|oS2~Sa?&i|Vi9d(0r?25(GXB?oHEk!t zcei(wZ85`^A-KWFCnNmvUqyK1RkzcvWPRfqBjw`*9V_#b{tB(}!aocADb`}Oxy1Jt z&24NIvR+9(M8F+RO@3j6rBfGic_%)@pGy9L zgd-Rz^hfigl;?TNcdy=gZlUnT&IDs`smvI6F{lUE0At^eQY!~j@W!oTxLcn#G29ME z)P4u@HS8BUU6L~>w^j1j0UIQoa&SBU00_tXUe$%Gz!eBfR4alB1E?7p9kGB;ev~?K zIps}sN114svgw+R#&QGjcq0d~uH#R;^CMS`HyxA==iAiv$G+dCb6zXcon&bODrDsC z-~-njeLqTOi{hlShwQh&g1s0X4so8HI(7BO99D7n9hsbc$7XfkYC#}xFe>iGLxezZz`i}sOSPONcr zQNRAl^KH?stghhY$LMN zZJ3gB@lA57#R&xM1(iu)4sxnjI%>yjQ*Ty$v*3S#Um1QR>p$3E4n7;}J|WZ)^=Ki_*&_FSMg)^SNMzIjU!RL+ii2J{4~`qEG^89@}fNG zFu5UA5?dGpIIryw;kUvsg}(&69}j>&3Vb-wydk05j9Y0Q7}DXew~qrHBnqpaatS@n zVSIe>^7xP7&x|xZUeXKeZ9~GEloILMlr3>@bRO1J+bm0yBz)w632b3OVk+fP#cFw8 z&rJ{Jm;MPo`!D|4AG9}&Vf~c;9RAKXTIHsfZynC7LMALwc*<9)Rh;`BAEg_SCnlK#H)PAV)ECCrHfr|XY_>2Dl1h4qX;vHjH)c*ix z{{R$e{xk5dkFDyKI%USCel+&Ggj_UHw351r9#s*jX;W&m2LLJD2krNdqttJ-$*k@q z32xE+_VE+@!Q4A=SbwrGr?@rw%lmKt0Kr=PANxA%H-EFw!_SB9;=dhuI9tc9+uckp zEVO|nNjV#?F;bV$NvBaJWs29 zKKSGC(^v4mv!q)uou+D%#j(pNFv_7%`a(FtS6%ABWqAi4Euj9wx=)CFB?Zo(uT5vH z#*FOBFcLz=&=t=iK~cDJanNGEr~RP+0BbLZpS3TEBk`xj{{SD2X71X}(#|sy#c!s< z@8(SmN&FFs3ldq5PdV#cZ^OSB+uwMk+v#w;_EJY25X``crjRodz!9CeI0TQI832Gt z*A*$OKQ_$^15TXrN)pvG=>GuO)5O<0_k!%sxdJw&6iN2-69h@OYmhqaEu0Q`J-Mw9 zir)^bd_kgKg|_)-Eb--XRFx!Pk4z83xNia69|`#C9V+tYWH-sYBR&Ce*m3KD$?8u( zg?mn~_DMADmh*YAc~Q3Ejjk23!Tv9oBb@X-@m_`&qOFIF`=gH+Nzla39hu~R4Za}9 zs%dNRSK^hyTZkC{0AOmfkV2^d7})w8V#y|248-ZYX0=KhKIrMD&>;1bES-JhKyk$18r^f`+ z-a+z19@ykhvqlm`1A?rfO0fIf9_GBmO=Y;!=hNm29xibw1de)o5P8RN4SkFM00lVs zTcPNu!+tQewRD~rpG}%sr`;s+5$8h0w}$)cxn|Dl3z{n4hAbBn+uY@bLlPZ*dm7$?;ctMErg7&eibp|%pYH$ZtdDDX?&TLcN96`bs!%2Uc$+bqBGl3?j;JHMj~Kz}$nAs9du~!Oigo^< zZ*qXxA!T@r+F?Uv;GKkaIRs#IB#%nUaD>&Qj>=VQQGC?pujl=3-2FcNia%pt+e7y6 zj_+6fkvtJ^taxig@dbvXXXBf_dOJ-fE0|?TQr+j7M2&K&3doU0(keV{BB@{-_1pdl z@BaV%3||MqQ;wo zDP}KThyMTsf$*2Y?}H!kOaB0Y+8@FbXD)(%9qISx_SqUWzr4DLPkD0Nz+mrk^AorN z0)@Z|E9yN9#2RObw8YhI?^W&JJ7S7JMgb!%M^llERuOTE*t+X2^m~}8C ziFQUfXT~r+1$d|Y6f5>J_+$HN{>-|+z|R$FmeF4Vm!1{Zo)B(yO-d(Mis}hI^li9d zvBN7BU_m0i@>_uvdth}c2i)|ocf^hM)G_&mPdNv@8*E{u^m`x5x9x5F9?knc_|sSL zx9uC@Xl(pJV|rqbUGVdzmY=Ik&xDMfjpbZz%aFh%fq_QI`Mz@1B=DcZSq0Vih9#c; z{KmJpot(!LD{T({03cQhK4M7$yKV=M=}Z0!UHcdKE91w7t~^3*ptja6ZJH&!c-59j z*c`(mkVKJ?NZP=GxRHa+e>%Um4xRAp;lGP+z7hBm7uTBNes4-vov7&9`EG9|-0x~neb0=|F!q;>xQj^DJ0 z#m@%#Ikmki{{XPmotilpa=c2&zyPb)&HcNj zoxTR~RGuEav2*0OT-%0Gv?1Gr*x=_6T=?;4qwBgCi2O~UKZQJF z{{RUDGwN5iw<#=I&8o)1W-i5J)FM|Hc3W zJFd^}-;CCt35!+m=fWKV&I>ITEh^hij6_vpv(wRV#3|0uu|xqrteuv;YvDexXW}hx z=k|)e(mL0sG#-E9oUMujoif@jx ze9cS4Lr#K7n+%I^(A&NW_X1q;-ZSr+{5kPX$#icEy@V0l$#R!hP(owgEgj^xw}BLK zfgBQ%fs7DqrtTDyJp42jd_?5_(|>aR0ESD^o$tgSiyk@g-|Yu%G>9+W%SY31Wwq60 zjpKsq;qD{1l0b!lQc&4j!@C<2dxjaq1LuFW@5M`x82I_TF%`|M*8U!jRQ7NfIu+Z3PBKmp1aZ$bwW(Ok7Mw0E!t8E}EZ>D>n6BX3*Tgg(|{{UUnt>$?|mKR9?C`MQ|{lMcb@`49!k=Pnr=%w*T zhb{aqrQJlDb(v{(X;wkz&EQ#GQX8L_ z2b%CbbK*6&rFnaJf#5ex%-fuq2qz1YG8g3_bT&6MZKT%|ln4#DP|&X7!6%Fyo(CX$3aR72i5?p82gUtQQt;5ewKrEW$ne-+lp9_& zW4YWFeosbL9kOeLd#g*CZ1riCi+M|j1Z5Z=NgVnEj+Im6mZI0XwZ5k$5d$rhafxC% z53xezgUX&c9FI!JpWJsgard-7zwk%I{X61Mf#a9PejtlRwy=$@-s0tORwrX4#-MHt zU9jIN3fTEV;O9%>-|a#BB=}QPeJe-smA0{ZptZgBpo1#o1;j{35tt4RS-8gGk`KfB zK8x`K;uY4L;Xen(d3st8BwNEF`3Yjfa0j3Vf=5CC^slS_3Vy~{J_XRD@kfZX1AV9? zfRMC*enZFr9AqmFPJ0}h@55E8QarJai>c>gY0{@BDZO7q>febUvfsi_jJ^@JxYay8 z9B|*-H0-lN%2RTOR3{~s*Bi0{106vea=!__ApCUwguGpCrs_93hl#u%NHJW%?LF;; zMEs$^0Nfy49Drm|)G*|Ghr>NjkIV6hm$~eOnk#?4p9=KpZ$3mcY zCbZ>kHhCHw-7#)sl(U%K%Oe8AAf5t~o(Th~2PE*b47&@NQgrLa>%UZKmF9T7uBxqu zm74pXsy@a2l0R#o2z+bs-IlH4t#TXKt|J9ayrF^h#yjVO+#U`(SJA0)ZK^~ifRMN$ z*Bp_L(!U+_OUsYhhvHqQ$FGA{cXsw`davTI585I7HTANbOFU`OZV+XGTyIq>!HHI1 z*N=ssEzmw9_$yBFripPJuxb`@&m57qR#asStO)9)9D`qtXZ$-;Da7)pcuQycdZYHd zi-_pwSZ`@=S;t8)*U|L*kBR>P;I2RLOglf?+rbw<7CsSa5ZnAm(CrIa#0sK#iU`!O zv7DR_FktyIk~X#xj>gLTbMQC*4c7SE;(rZjzAF8y{{U;YyDXKME#`y z0ASCG-|$aAkN*GddVKMJ1A4?O`Iv8By~S?_V4IZ)@SLB1L(!y9VLA0Q1-Cf2DEf z*_B*%Ix(BP)OCKZO|-qN->-Ak$g;>_t5sCK2GzIdwYBLAoxqcIylpfvq8IQI7bE4 z=NRZQo_Y%XDL#kg@}1p{+l04~oISEG#a0Em8%_u#rg8G|?agK0XbMX$i!6za!ACp| zxa5DjKQl(9CZ`NySGRUow|J#iR^YEAj-=oYxvF-aEwP!0%B4ClR!(adj83gn` zy(x+Em|h_8+ZY{ZfUXqs%h!RDJ9a1W99N!e-XFQOor=bGuN`uKAIR6b`O-*Yj_ShV zFER|3n%Ra39Ood8a53qV>sc569Pqxfsk$i9q(Z>OE)L&81*tV8f)M%FLcA=Vx=*|?J!Ey*Ap=LB)>NWTH@(&6qP)@`mW zqHr<+RwHgnJ%9ic!8M<LTBLdv}#0Q&y`je9@r^?Rqs;g~!Uw)d`ejZ#b9 zU9{)~lNcaRE(k&}DC%BGS&@bq001Nknh$~cG&an)7c<8)FbZ<57zcpEZ#X0#xI7P9 zf56`bYyK7Soch{a-LmPw=+av0>K<#vc4lpXzEC!i%Xj2pu4@|bY6nIh4wYoy&-(n2 zqx9c}o+7oi1Ht|UO;XM9!tYGfL{XN&1Qc*eFu()`VcZ-X0=INei9QO3#0^;Jmv;<7 zl54GYTN#PK7?v=ZE-`}4Nj!6m09Szhwf-N#`uNtt@~v0j{{R5~t?i_^xKQ%4EWPUYQ!8+}px245(Z7^`itfU;|5n9^kkrtIa z^DtCM$z~@240k5ICGSG#$VqZa);|^g9qXPM{ggjw-B;pfqvES=F6ZI*igg6N*0q$4 z7CT5}mNtTGnPptZEFh`eNmY;(!2l1~e}umg{15Su;l_pWTj8gOE%k2?_>)Mt)bveC z(opeS+Rqxiaw~TQNc=$@SKx>I6vy@*_}lwH{{X>D{Bt*e{ug-P;ue!}b7QP%nl_W+ z%d71-OOk6?gscju*^)+;B`qYRvhKj+r|KX45r5&g?WyoP;C;`){{Y%g;1%D7yg#Jg zdHxE#xz(&%NbRRA(^|c}6G-udlo=Or98OdLvI&oOliujU?U6NkACD&bU zeRtxkAo_bT8OBH8MSnhD@Ni$kyAKL~!9guPBiU)KZ+qf@09eT$r)b_x<*lZk(bsqc zIM}cTM^-`#?p}t!p??+Vo+H&PzQ>@Ypxg%Ot|w*u08~HDz5)LLf~fxh!7u(ge%BhO zhxIQAd^ga%K^~E%UPAt7iB{5h<(62=7=xmNwNcoUB8-qQa&dJdQ9H!tjY@W0$Kf80 z;?D|cx<8ZgCqcTq(ygRlE%Xf_I>yCS1KeA01~7~ZvqmLi1`}xkX@6nA34Ct-roJ-I z@i)X*kwf6W4as$FXFi#2C9Q?r?H~w{B!hg%YHw$Jx`LsO&;6Z0YQGo!C_&+Sy*fQ#MprD_ zBQnP<0f{B-rEU`^B*7sl&7F-IIXTs@R#cpN{{UZ!-GirwScuO0F5Ue6y%Fkvw9kfL zL(nv@5LnCQ$z!P~a@}@FF_J*wZ&e)s0D8KAhMqFD@wbMpEZ#(Wm5&Exm?w^T0|TGW zBi5t%kE!?zo+%b&C(cD^GM9V0Gc$jX9&ce81&OuVZ7)SJiCRNLI&JGY!jbD#s&^6 z)z?vZf@iTgj@<%V~Ew0h&7o}|_`qwt5qdfv9u>Q+B#Egp(9 zp3!dzcJfMcHz0BiW7>F5N4GLvtdPB>;-v1e5)(f$Vmh#32^hi1Wv|jt zj34ku@o^;j*T7AAtfz`b+hKCMpngdt1NVC4BL_dk4Sc!rhv0w4AB5L$Vexn2jpeS} zWr|H-N`a(~Pyh(TedWPW!|wpfc^inYK8`Y#W$v@oub0)MFMU|>KN)yJ=UURwiToiK zn9)Z60NM~hSbz~pY;NV0Al|*o9!JV?%6M;9@|CTngt8XNk~4;hheuz227QHkEEjj$ z2BCMUX!6H-ZZg-lFyyPp(WZ=Xi4YN$VUoW?TSM`A4|^#=r1zpY7HAHF$6PE5$x9{{V!iO#4&J zB-Ws4=82ulykcn4TfLj`5YUDyB*&rM%W4KzzBcoua(Jfka+Sak=HkU8o&$f`Gg z+PZVLm3Z<->Tpvz1OdYjU$0SFOASg(V_4&>Mpti6+pom_iGBxv!P)-+0DNs7mY?w( zz*jmhuILitYpbP>2^$?!QW`VQATufcRrMW*{1#{SVEBjNukGIo>ep|6QAMq+Sewt0 z`lxgzah#pm!0Vh>@@odQaOcXonpoH_H%8@n_Zb=d#X9ExZ56=KwYP|nHycZa1fIN= zAaVUGKdRLG6Z_Tw0Latp*k-Rfe{;9zf6)Em{{Vu!f5BVtJ|y^hU)hKDPVszAsNL8+ zzButMrRzqvacutpkafMns|2}}oHVPv5lN-O(3@06+NmGRLVz%LV7Xuzv0<9lojKE1df%w0 zUky^yjGDJgf5AO__BH*ibWejitUn0;IBFMGw$^T9hW-nug{?QJFAR>156Vu)SIUC8 z`AYNmFNM5W;13DxzMsbAXqSwmP1ddNnE6Dm;;V9mk`4eU#DkIdfD2!amp0mjlbf5j zca~N%G?Ou7DPxk^JxR$Vlg)Nt3jR0zU-&#czlT0GxVEsparV6)=@xq_%AAoT5(Y*G z*8#A4)>ZJzZ<;R5=)q<+DJ2?;>3#nI@K4rHjJ_xF9iN3yj5K-S)ZmC^u96tMtE}g0 zNbx9pc6_ecRZ4Cck};JH<{uI~L2dEh;iiY6=@%NUlUeH){{U#SzLi|S`@VE>yXH`m z2M)WotCkK>oxd%7HU9vDlxw~kwrwL;(x=lcr9ziBc5<2QhULRFNcoZgLE!^3j>814 zZ}=nQ)`Xrau<>4p;u~o#Eu@W{TS&3U@u?(l9EjwNiDi{W&>hlL*&}Bvbf-?qt+qSr zvzBW8ya(6Bc zyxi!q6KZ;#kuVM9vLfRk<+ALHk;3n0IP6X- z(!m^lC5=`+tg*S0MI0-$FbqKC4mc$JJuB_<%0E}Z#xH%}tJM7e064_`Z!xJ+ZF?_8 zr{in<&ZkAOpHG0<+as2V7{@SR%2R6vAwKB^fxu&tgX(J&#?uR1MEgXdU6$&i8BXCF z?ZaSnJD3xU_a~A?bozbWm8?=(lWiEgge+c6Ve&+ua1Xq=M1Ds&KdY#q#Yg;fej3c%&It~ys`_0;n@bkO7c4RQ8Oc3n_Xm#P#07XH%zFXI1cz6626UBPArSY;K5J9TQ^J`jW>DbJ!-c(b_ zk>$}>a3KBS0V<<8IVa~kEg#0xX~$Fl0EB+z+j+t*hwvc7j75oPfZBdSq2yPsI(b>C0t%5i+3$DF-<1x9R=@u6VP@n(n@1X^e2m z7UI*u3cRJ4YcavV$R{8GIj?g4ng0M|PZ!%>_-EqB#TyIFBG&U)wv$8C=C?(f<}oUh zbs|X0$pWZitg6h+1~56RgykzI)aR8Z+fmlq{l{JV9e&Jzv}c0n(>^Wf-WG;q_Qgpw zp{l}XOLj&MlEb@nZIX?(xyq zj!#a4ygNqylRhKpemuMQlkmsJ(`l`FsJ-UFBS{hla>H`B-XxF-@5elf`hVdM{3W{I zhVT42@gq)(El$?nBDc`7{rpm88dNMvLaZ>rjGUZx#^FcV;J9mfpDHr+BNJzl%4?Wj%z^*|d_TCtsW_n?_0ixdHeYU_kW- zzMlU8f?E7%u+uypfA9xbyD`6qB}sH8k&atTpL0ZjpS|TcmM>*g74s*EFYS(+#yv6! zn#}5qH+)HKQ?-0YM_^m8rX(W3anNe;7 z1zfk4l$duOSsl%M=MiP^Fw3PFuX;Mi)pYr-J#2kf4Q4UR^BB%=-IkK{eSS}0OP{a0 zmbW51PihQ}3uLOEtK5BYR%6txr?y**i9+rfRoZY@8UFwr{VT||?~WQrhV=`ba#=U& zmsb!YT_a{CNp>no7;&|og6F5qJw;IX$>YxwcrHB?#czIXblaQA>?WLYi!80SF#iDE z-1ty`yuf!A_-xa1KYpV|IwtITe~z^O01wG#x|O!23>Pt@c@i|BEQF7^!=o!;dxL^8 ziv0fo{k6V2ojz%-rFVhi8ImV#mGsX)t~sx0{joo2S^ofKkArq!96Vj8U0?Wa(^wYz z%vMEr=4fR5I-za>X~IR5&`1srF<+YgDE+8@JpS0;J%i#ui@Y(U_)kNI0Cb%Y+0Nrfd<7^;N@ zvzqkZg}?AmzuEUnw}xv!hFX51DiqzQ>uV;VJGM_CN%Dela8*L&dsk`Td;KR)b+fy8 zt|xQ`d-QXRup}rPIoyBXTpomq_Kh=HvRyaGwrG_MYaiR<+w&3gZA=ai1&AY{#(jTZ zqlCiXsK%8j=(I=S_?&(#FvLo=XgU6Ei8MOjhV*;bkQSEHQPbj+Ag?$(K3)!Z zKDEp1J|w(bpr%7>FlNikBCBoBAgCDn4?;a_nAE&WW|vCMcGnD`?T!^*bTaORwG8lo$JD%R-&5Nkx3c2;KFZh}8 zYf$hCPpTHS)$BoC*Oqp>C4q1-)4&4|yqw5|lxHPWblV;=x{61+(zLWgBw;06kfdOo zk{8pbBc?O-zi;thP`b59q>j?xViXM;sKz>gNcnOHYKKY<8^aM+54rP4#625Y@SW6N zC-8=%8Ddpyn=vY~x)KiM+}wf)IRl~2O*7%=#7hlI;(ZxQvfKkDp~ebfd5r%6bdE+x zP)Dg1=-(4G{VPhilJ?f-IIJ&b+kdCWXr?G6W+9r3Nql-JE3)fMt;cCgDFsD9H1{{WSa z6^2fKFh&Plire@Tro(;W%|gj7j-hjNBx#~)etC#ZJ0X)L$08(9UD%Kn6z9 z^vh|ZxkpIsnn?<$10?V;amORj;=SYc5Yn{O@kfX}bEvHH-a%;_-$vYN#zeV@=ZQw-oTJ{h{#o5 zI9^B?+)HMP^*<5(G17I-NZllwkwsmo=TVX};F8;l!}5%lC*Q9^D6gCLsqZKEpH=;t zhmMv%b3fpmUkJZzp9OpZ@cxDSNHxBXs%duS(P6l_j_&f})i5A=yrbpCCzrKY24j@T z0FnpPH#%0Ic>uP)w2pKH9g?UZk=%p9sb`uCSWL4>v7(HYVgLi`c&=;WC&gb1d@|C` zrQ$2ygtDSV7~IUv!Bs#VNhAP0NFBy~wHj1osqhsl)vqhFr${1+1aU?cK)^*90DU@D zeM(bnr%g0&5TqXMM(b$y+lAh}S z0M_L{58mqb+6|l&=(lD;2`zxR7(8cl0txGi*N#iAr_M}qz}=IacdH6ew$@>j(0BUM zNUR?K4W$Ury+$7n@bna1o3cw=bXG2u$;cvmKyVbM1(xcb3 zC^dWdj8bXSywRRf}MEN!=4b-;?%Cb5NI}j8n=QwC%BG%Mj2(ey^#XCg7D39wynE`Xrv$#zckfc z)UVjqkGokYz0c3T4r^X1@NT~Mb{F@$9=~Lw3Fl~WD<(LKcwAy=&OzAY7&%r`^HXBK zmtWUyd_Aq%S?QNok~&(>vXr&CEXCC#mDm{M$sk7|3`YuIsr?51lD}iGfIsk3?}gqn z_(QAsl3xvcZSduYz4&{mNpvN$y0TVNG%}F6lzFUHMcjOqnkf)5SpoU4!hi5f-}op8 z#LtR)$H4sy_D$Bci0mhp&i?>Vxwlv}-RjtA=bq)37>bXWp=M*1X!ofNMjoYXG`W(x zXGMQyh>s-Qn$>pK`q%y#=Cti{Ux)UK;p>kQ$vok~P11oNW#GmMAdms-eFb+O4EUwv zJE1O@dEiY_=)j^;C)k;o0u*Noq=0e`;5uWqearh3f5Et47Csz3=Bx4h_KDD~p}e+K z8U}-;Nvhr0k_KWo_ir3>AXASm?WHTm$u{tQ)9_dP5!?1G@ROw4&Vk}T5g29<1bWwr z=SZW#04Rpn$%|;g!j&WEZ&8nOIJh`nBgT~{8Cv&0CA7<(d&e4ewAw|oY4at_aL(F| zt4Se1#4O0<6KJDoG%b zlatByt~&L-yk!W#FT&?e8ODWZPD^H)@ZW-OA5rkkT8-7e*)s>5B$ML+4oElv=Y!8U z$n@gRjD8(>AL161_CFNpntlDH$~u_{SptEaoQ|XL;OBsIUwwGT{t4;uGhMJYz7msB z&0dW|cKww;G5iaHeP(zDx1zLw9bZmN$8M#wnd5R=FZZ(Q zTWBmZiuo$|999`~TTbixx%b#i+b_pHOWIGP{+j$x&Hn(}L;eW&@KfS6_d5Ro!<&gT zEl**RH;J}@PIi(<&>nIRO8ox-{h9V`{V1A2^g zJPwv!jZQtz-1r9+!g7@M;s5_Z;wB)Kfu3?$#H$E zctI_0l6f9IMg$PZ8#xM$yx?SDuRLV+uZjLS{{X=+ej<2TZm00cwDBF6CN$JyRk;H> z!cVZ4AmfHs1osu{(9EdRTIkOU6_Vm9YaLPWcAXu)lCpnhLv+#~+_dqo)*N61gN}sc zdSi}ijrFrle$w4(vD*Xl7@8*2Xm4{w`7zJljT5-AO5salwt zN8QLt6Dat{0Z1o2j+Key4~94TWtEg7$XVRaeH;pI+^Zuo10`9qxIFy891($C%A{V; zbIz6?T)Ol)eQGEg&+QE2IS}nJbSgl|P%+!T*V?sHM7Nhrh}|y8{PkDz)0~9MWOe0< z{Es}=--`STbS&c0;M3iV6G|nx-cQP)nI$EMJVfIFo=L}AtlENFtbQ801q|r3eW%L@ z%Qha8~`?wK;&Qn%{0?NQkw-ML1o1tj80|(y1Mp-ux{6sD3YeEs`j-?MZb@61$_ay$36fyBTm*h6v6GBOrnf z57u9^XYHe;e1Gvqhwzbf%e^~Zp5V!0y4=c)5IplKbtE)l7DoB8j(u`I74X9@qXD&r zp%ZzYY)JVSU^yA*?_od(jN-na{{Vt?d?*^%#4Tgu+Ub|E+IV+GL2YwuFa{a#B{8BB zM^KR5jARm|9-VHD2-d{L7V2v(WokG%RK3-Wvj^PX@DlgJ|n2fbzLC}xl%$l?(aKgk;t7*Yq$ z4hBzA$2hBA3cgrx{eK;v&4xByoR0$A%>9rm9E`MA2eQlLngkOff7f-Xud~Io|+G%hqYf-%4Z%AWPK3Px9!4c|ETfX!S{h~8%t65Aj+r z4U0b!%dOf4Sts*s@=B8%BFLoe8RURD=Lep((%)+$^4e=dY}*}zM;f4E<*)@oIKqI$ z4*eU{XTv{jZya3d{xP!nb!{w>&#A(fEYT8JM5+}O1B|;mZpQ;`IO=)j##K~kxt(}Q zUfQaO<31p^k4Et8*`CRxDqjlQx)v%%4ip|UUnK>7S@reg;-d(&+<#@S*>k`? z4DjLj!}~-%gU;5;Hjm-kyR#UzS5j8yW>|=z#7X> z5@IOJu-rf=ELe_QjFJf(MSTJP00i;)Q>olb+fDtc7K;oM!892b7ZXk+SmZmh zvKZ20M+H@VP6u8IUlkjic*?w(2|_z}*}v{!%J6$Tp0hTBTa}Sp6s>t4E;>v(>U*;Qeo-@xluQ~8b=>8njE%b}qs}DJgT!7Rwy=`cK`haR+K}OdieD-Vebr;~hU10F>0e%x zP`f@kyUNEsb@ppWKGg#;XGlvV(D{xVYNN-)4Xgk^De4Y5Ijp3IQq+9eZqsDDl~zL& zM#`flhkO&rI32*?0bNz{#U?13ZwXzJB<({{RIE{j_8fkB5H(Gz|{Q)j??W2@Ka0Ha852f~YOf0$Vu% zo-5gWN%334cCt-rBSjphNXQHoK!`GvlHYfc!RyeB*AwvkXg?4>Drp`S)wRV`@dmYL zYGqkK5kO?z>`CASNF*aYLl0W>vkJdg!%nM}xl?~}>t(clwTgup#_mloy`OJ?!9Onk zAb1nNIyda!`(Qz(YNy1$9eMQGY@vT8?B&3pP8-Q)lGw#Vaq_tu3KGVxu7h8BB=|?BH)L&e30<-Y@;UJagb*6yvb$Ng#x?}>jNwe4cuF=(vb`I6oQ zZmSdvwl&8kaJzsR&TF{vMwr^1jjLbk*AvR@tP$lTAyoo3Cpb_52OI)G`d5|wKZ8%Y z(JgGWrInHx!b2pCcdo?>yN)pH#zE_Z$2}|AJOczs>%MdgZ4kklU7@!P*d!1#*(_9y z9D$S4zhZ?tRA8e`S$10=gjTIv5k6|Na(&yo`_D}9tis#LxJ#+++F6K6Jd(hJ#^&R4 z0m#W2=mGCtZT|p?Z%Z9I&vHQkBI-qT0G1?#1CZPe%z41i*0`%FFDJ6Jo(1ymr9&0C z+5)4IoH!>WE&*aPFaW^Ft1sg{H^X|hmxiuxc29QOVVz?n&vNJH1Y?G5^2eRI1n?L; znI)mQ;$MhBM7|zqqHAekY`Bl^#CGOVdjZDN{p@6(O?ZcnyhW(#+QhA=P5^1tJkag` z07(syjtB&tgPgJ9w{Sqkb{-{rXe7Mw<;}|^_b#s^ga^$kamMa|42_vQ(~7j+%X0n%%zJs>TrT zLa|obc;IAX0CDThWNRAlhpi_>)81)VuF*1C1`D186a0Vud{sMrI`-u*(tQr_O%2+B zl8luBNx=uW$N4p1QquKZP223YOtFV_jL&eYSqkJ}78xPEdgN5|cVo53OX4j`=^oDS zO+yXDCN~omT&n`4W0vFsF(Yn4hZLuYT|w*M}4O3@dR=t_m;tt*5O$Bk)B347{SQ|bmVub zoUa?2(wc;|*wE6vap3;|3R^FS^-W|)cJ}IwL}aQrQV9Tr$;j`IVP9i^!8p7#VXOFa zQoPpfOTnn$jjs!<7mfm@)MqMpI8a%>RaHKf`K>+Cyq--``GZLANQi@j{{YqHU6JJI{#Fc#{7B#S%+v<4e5;)le;#pQYHA0xn2t8;#CMCw2x`ZU!+$LjM4@ca6Rd zU0i%O_%q<^S+vbA;o`i#xVASk-QH(2O7iZ^j9BAw{^%S404u@v6`s&m^Km}6+uN`H zmHH$4N!2X$-CoXJGgh#I>e^%}HMPtuB1jMLm0&?0#=dL#&+#H%BgdMD!yg=cB=G&! zj9LE7d3A{`mU|Wo5n2NSD<}vQglAG?A1biLeHnLm_Ek3$%&Lf`ncaX00Ldht00WXw zU{7lJNA`^W0D_)tJ}&WU>0h$9*Nd9!_*=m?T1B<{nWuYu8SHK6k%=Xx znsrO7o=G8Mja2mWQ#r3f_*;(x>$jc>y42e8{{UM50EvZ#<;;Fqn51#TG+|fdjUo#f zNLQ7OfaHc_`ET)$_T&Axei6Vtf2e-a-XOG++C_HLd_$#aYioMye|-npQat%;Fie1j zWhZQk#Z7(L;ZNFk!aox~W&Z#Ve$zAPdd0k2zL7qz(5}+jH1^T6N~yCY-*Od<0LDdO zxGL@=mKCFin{wT1{Xeg`>iW00pqxG%+iPp-r{C22hQ`L)#Y;xJ<&f@{W6Mb1r2haR z&<+O`$liQgw!8SGJ{Nd;6}h^!g=f`BU`#Q$Kma^-8Dqfc0XfH>{1o`Rs(#g;0RR@H?024U+_#T z{WHg(6Yixj_@m-=!CT!cL%2B({$TkvE3^dcf;0A5S%&svuLEDU#^Y+mQCyVM)=zeq zNUuJw`gB(+I9vt_jNT$#^;)!GwoU!1OUZ2%wQV-i-{^1nF6aCfi^IS0Pv3`fe$9Wg z--zrlemr>BPi4}h^HG<=wvsCk?J3dD)|`Mvv<-mn1dcqY*f--B>?8XF{?T6_b*(qT ze-Au8tkzhocS z!{8U}Iq;^};YYx~3w@T|A&GS@UiLvX{;MQvRb{!HfMj8o+M@{=VyLRTSEl36t+G6o zE=_u$Gk?K2zu<^I0R4`9L8W|W_@(h5Q1K^^H0-^cIxXb3dPbWeRzg)5Y?p7boeYy< zSz87n)cx5M(A=v=0L*Sxf$T^4gP)YLc&dU` zbH?L^&wBIUf`1--UGa~{)7|)X(@dYkN+r$g84eX>WLzAbb@87o`K-4C;Jb> zT58(cdGgKWhi4{99i7;gB%HGo$@Ulk3QrOEr%3R`Pz^>#X>O-jt!?4}ysS?^2?GPE zB>uP+ts`PG+3DyrUnfcNm&QL9{wi48_=~|ho|_&0?(YludTUmS^6tht2ov0+VW5fG zOJ%l_tBw~eanAEfSvDG-I&t@;)MAp+Y`SgHCDpW&XHH3y;&bZtYffJ56O`L}yWREG zy&G?$O}4i^tK#SFFYr6yroCg~zaRL*7&YBZ6luCWwY$%Ea;+dmD;fe-OL4i1s3Vib zV9EPq{3^8Zu95K9R-QOKN2|#iN1+Wu+}%Rbvj|m?xH)A~Kpg&=-}r;@zsG(!_+8=; z4%vA6Yh7Piwuo9a;-r@LjTu%k0Enk#yGV2M5FG8{R9ELez`xj2;+O3K`(oXAuf-a6 zrQux`-tcNSULw7anYAfxq>UN0(JmRmV2kIfh~gM-U6oIVoaUM48y`lV8X8sAE>x|m zO-Z)(vs&nt_HAkQd?mwt9h>D?j7C2Z?41PMpEdcLSBr#HV(orp;+*9t8>W-j^;Od1 z-}jKdN1;4Y18b^Uy|v5;=<3NVQYd9*Bam`1K?jg})s-obxfwj;*0JTcc>GHgs>PJ1 z0%rkFAd~8FMn4++8PC}?)~UN6m_aI+Pjs$)S^Hmn2Z!M2k71v|cFPWpu0t%=5WJX6 zNmP$A08c~()t9CNZ5$}C4!e!yg>GevIb3crxK;rA<#{K7eTOHuYwn-hcfgvL#GNa_ z9t!a6k;SR%J~LP?=aEYYqWc}o%;XMAghEIic7c(QN5nrAv|WGUXT)7!z|XDQ>T>A! z&uZ6p*3B57Za;;Xl>;SqmLn=iBntW5#)q)3d7BHLl(tfs1*Ezj! zNrj+k@1zJ>l&fz5yLx(Yz{jsais3v(;K^lod2uMt)!M^hC%y^!SGF;p2hzK1`}?h0 zGi85hym^kL$W_T)ow(_M3Ca7Tzou;77fEcRiJo=&m441yhT>Z)Fh+BM!Q+O>>0Wm| z&vV(?A140MKeN|^z9K_5k8_|$=LHP1eV6xAGT?>41hK$wR~aYNWc;tM{{X=?d|@rs z(|j=1E~0B&mOt6IaTkg=4V3{{R|FgmwYokMTInhh($WyE%0RGVjP5L5j&s*PUs~#=iLFQBdLDiw2ZpaLR;S{R z#BYQD0296zPd1bB3&3;OM=KaxQGzo)xKLC!4&0$^5K926x#aPWE4}bV#m|YSK_o5v z!!DV7Y^x+PDco1*{t>v5gOa6&MSk`8qu~dF{7G*5SBf+kBuPjI&WIHyxFF%a&#FNQ|5lkk+Gcd+WI5D#xipo0*JZbRC9}R0!&uyn4 zv&n7ysO13SG6~w?00CD82<)}=pN~IcU)me?a`8*+UL*K#{{RU_j8aH-DfCnk%`6fw z*;SC?7?48-8%|MAO64`ne+zip(%KCY^3MBEwv}SM(=M&P!+MYdgCjynV&H(~lXADu z*LEw=_SEV$;PgDW(5n^8HD-Lx;oTor@K3}@*3MYm-K$6QMoK(v;Z92D8RK@`l_cY} zUigdPmP>n$V#dN)6HdDlTw2Q_$@XZSeq{i1R64Q9`B`wlDPpzM`0wFPyQ1r#*-aFl zcbh!AeY6P0QBdYscYWdt4Zxlb#ULD-XT%*U=F-C7R2MMIcGg=A9Fuczz}j(*&6D#f zA-0v^=Odv%c{7$&??uP9zvg)Em0_scc&3=br!ZZTfgRY#tytK0KFdorskRK1UOXT@`UscMo;*8&-y*t$fZ zk3F(}Rs{b53CB4f!``iEcB}TOZyGsbcQW~4Hx8wOk%E7^_{lgMa!zXvuBMa3Q{PP^ zyeSf_dz`7xoOad3I;GM3LPg1gxys1@WAy z3OOChnylKq76bh$lg{D6#`Xsc{5Nxwcmp8w>@r58k=ZQqT9PCVtjL6t!CQY*!e9}M z{M*Jr#YOZj-(sESt7T-XuHH0>7{ro!Cn^B>QI!TT25`qX&N$OMytua?QRffy>xRQ3mw)3gcAge@H7 z&yhn+xgn5pc|8|`Mms44lb@Q0ivBBH_~z0>;>{gP-U7nv#zqYbDuuvDBy7Ts%baj9 zMg~1w;ctg5bsvXc@SMTqwTOgACFTf~`WiLy%poHGm+2RY7KzJc&3iw>t^ zL6$Hik|>buz*yg6Kcw$Lh@ZHBX^p0*(N6qqb$)85w_=-DA zd!Msiqay(P}N7?AAuc~SOP?|8_7UOI|leQ|sJh9tQ#64v`nq_*M{lby%z`}7;C zm&5-6gpl~R#uk@a4gKAnt&X32brrFMG|^liE;fuN)~H_iUrV>@|^;?>uQVA3wU zRpHCkzp&GwH#1*Zz|SmkBW__V375*noD~eL#~&_nUp{`%SKk*uZ4cV7Mbq2E`nHAe z_fws1?=CGwkx6l<+Yc~BEa*V3j6G_j(_rKe7jcpW*Mq{eSj?__gBcZkt|>;nF0#xDJ+jTmg#}JOGU#cgo{( zqMn$q&3}nLDYy8y@k3YftTt>T)wLNK)@e~dNQ>nd2K%HS&)2EuzRv#ug06nl`d*9R zs~_3V;iQq;tJpX9yN)>GjEyE)B>9om zkIWBZ#~cHI0mmfPEzia+3d(fwpN#a6D$e;7Ea@)Bw~L~#K>0|)9P%)ID&LFzKVz%g z+c$;$L?yPljQx&PbqyHK41^!N4tB3Y$@RrJ>Pz=trxW5IirSxvJUwvtj?;LHM>}sJ zXHeIXs6V|x2h2bOFkY%N&lSec@fB}$QQ~cCCXz>gE?b*d%P#~ff;x^tC$2h^nuFs1 z0D(31)Q-R5y+SsZOm;Cfvf@xs4a8s$J7I^fv9CS2@pb8ynt1|x+*6(1!ivu##*DN)wR7Q$}5O;eII1v=VzTEZ{C&0SQf!k zoaY$y>~oDTjdd4EQfESUup5XcZa5z>B=j6~>s-t}JeyIvmhRmQVn=Zh0Oam%#ku7H z?Sa~mYsMJg%)XtRE(gj+RD+z6jAuWMaypuua-BrJBTJP?ZYMGMNI+PWA5N^GdValX z9YRQ?wN*>^c-%8AZvOx=$>WZ5jAtUTZ1wF)l{U=_%gI5(VmafGc<-8Sp{vI{DlYWk zjkA}8U>?JTIRp<%SGaPQbq8C8OIfu|d?yyBVDt{$s5~t22m@yW(+9Bhp|@a5Q<~DWU{{u7>h z{!{!l(<9XUU?e#S0*2&uC5Ilzt$kPUyW;kxAH^+R(*D9*t21WT(>lm^TdZ)b4C)E$ zk04?|IaeHTO?kEHs*c?cTP>flpErF9pAfuLsd(4M`lI+|?;_LmyR)a-CAu7Ki~?DY z8Dh-LPCLo5?KYSR(T3H6sPSAfmD=ULSrM5?$MC2hqF)66 z0N~?4g#Q2nz6NSqm+kLy;$25o(WbQ#YaSKwymP(!+^d*kmNaP6DRB{C?onA;cn#Of z{uuuN!DIgbWj~EE{9pa1wNDFp$HHC`_>HDXK9aHA!n%|VX8^L143RNdF0Ne=ppBme zh%87r@oTwNrm9kYU4KT2qfKF}N|j`!w0hdtTRnYjWPBO_0D^D)X8oZx`wxo$0Jis! z^!L&>{jM(!cz8N(Glu=OV=De9k}_3N+CW7PWEQBjMtTUl8zFVj!> z1wZ&G-^8!lE5zTi$BTX#{3Q6t;!P(_&^%ROzBtel{jmK}O zf5EUn1S~vT`&Ibc;*W~_H)VCG{5bH`R`$BeSu{+y*V0RF#Sh(JAhw-BAU4@}`CFfB zES$Zcvy=FLC;c)LoWZqk5Np9BidSe<84D5>RX}Bi7?pAsGDm&zXZ{L(`#F4KzDUztO&;Ae-#+4P zO64CH1&|06$}PN)-U3KpnF6ljPT!XQ0N}6R2sFtBUP=8|I*vCozo4hamz2faJLc8(R>N09t#+w_-98r^>E+>^$@W(9Y3zluabyQcA zm0&Ao72}A6c~#qTUGH~gd)@y4O!l+9(+!hh9ujYvq?BUr^lHhswU_)c>W!^SCcLBJcf@T9ZyxJ^6Lc?y_I7sKFl37>Fg9MEmMFfa;tZE&Cs}P`ovF-&^a3Etoj}T^6Fu3|O z)k|{w+NI`}tv-p~+xlq!OC!R2dJ3u+d9^<|-S0V8JjRo4@uALX0s#Ho- zECI$weK;SLUJ4X&KOtXFh0E%~YIUV{w%_l5eb1k(h>K1%)syu6_x`p#y8BnN@h8Xo z-D33!(>@&decD@zBVeZf@Ch4fBLu~30LdLb)<<5Sp1vs87sS3Gn_kv~8MRyauU-iW zGBZmdk(hzg0q_SrXO1iNaI;fpP12DG@b^27YG$mnpR7~DpFP)WvmoOaDgXW>}Fqs;($W6DU(Pav`2 zkTAWofGeX7rxjTxm647P=>ZG3C!xSO!8yS0J#$(qEt-rYi3;R4_uNR%cLF+`VBqBA z9kWwpQ=F}1&b99c={MH$2er16GZP)Iq3~F9obU?cq345&`SbRG{{Vt>e$Ss9(o4Np z!Z2IkJC^>@wumD{;0zTjjiiyBgPu>)zPZ$Nmek;O3aG^uRFwIFW78+6Z2d^%t#fas zrJk`Zib*~$Hjt4L0`@96`35q0_dFWX6(>#JEXq`A){iWe&&qF%fACHZ+JE7SuC4I< zR~|0X>|9N4VXUW}acot$103TLDZs`K-~l5Xn)&DAZ-@LHsz3HGinM)0!G0iFiA!B} zHfvXKnA;uNw!#)+_M>jK9E2%B=8lf3UOjsORqPs@z+>zeYvjK8xd z!+(mgU0C?T!Ww17L@aSnCAx_LD~x4(91;QJhCQ)e^m7@(`{xZ@t~2-_-~MOi2kdX~ z`rG05udaN1@g9ngHJ}<+gayD!M3crQxVVficbKizWG^_8#~5IJ=50u7k-hEx!FRZw zRy1ZMgKon%PH=v3M;$$R$8GT=;n%?r*~TCEXP<|$!wsI3sY|KfM|Eme;qUM5;%1Tp z;}Sx)%M@zEAeED@E6ZZjueC>#D<@Ip29<4L$^jY67SB*e01W#28vQRVr^|wbt??`U z=j6GiMJ#mV{I1`2g}Ys5ON~-{eL@qz$y9jcWnH8yj2;gh5%TAe*qZ75L8#f-+eu*s zvLv!bW01t>a>U9=PZ6F2ZZUjjM?zEFzLP*k!WJGQ&KK6S#4>_5!-s zwu1M=si~BTXs$_lHk>#hGBSa;lH0?26OqXcUW2*koV1p#+JeQS%RKA>K1xi=yEdJy zxgRlXmf(HRIULb_9qg0Ar_X3aITJ{tPC;c=3zMC>#z@Ent}-)PI+xhzx)9yzgHIb& zI!K;d7urygKpDVdxZnavJZ7ofS~|$a&fOm0%^{^Q?kgC<+(B$e4iFT<=XQEv&F(uH zUKrGGQZ}&re~{~gD9BYD{d1Fqz#MXM>t8$m-M9V;yVtxuqxh>(G2O>$b9EG9m;$mo zqN&@0GSHKfMGut%TiR-RYEKJ!0HY|F6=i09?S^1Jd?~=MCpVui8q2FajGmg7Chugw z;CUWZtmqmvdOSnJ5TSxQQn#1Ge#u>>I7KXT*zO|$;PJ;V@JR6_KMgf)2SB@XX{pWy z>}ftfcCCWNPYO-|Bw&@uIl#o95j=J^Z2|}&Yjl!W+ACc?!raFg4Bs@&H@Pmv?a5<+ zFt`MRQ{jc}$B{0OF!I7$c$^Kvquc|j8SL02)G_Cp`NpN}?tK+0Qu@WMrHwxaHP`$j zU)k6CL3?X>^C`m=!(|J&84Dt(45#Ih6?!uK-qGM4M(e_!0@3uXA(9%N&o5*#I0U$Ry`D83#D8 zak0EQpTrAGTP7bYL@t z;tRO4aU&Sr-ck!Pvnl0%=-`jXy?Q^xo6^^-5{VlqT_uaEHqaH+9=TDt9>)h2^ig^v z=Om8fT=<49JWn(_ZP`d6Xpy5}U6Bx~qYRP>R3%TK>B{7DzZkTo*L0V?)34OODJp+)8*gOAnrc6sBb)0x{rhY$r}bb zT&}W78TpUTB{I~)OC z{{Z5Rd%~U$_;h|Mt@-}|gt84kRNZ%HXZCiuiqb9WF2gFTlEu+njpbJ*iCkC5(|BLT z{soZuvME{(0LZN`h%JCJ>2e7+2H4mVmUSzz5Z@ujMIW`F?U2dv%S-USopn6ZU1?Vq zHyVuAq9l?fQ$0V*tT;l-p1$gJfJtxF+$c})@3ukix2*~aSs0F<_HSNC+>?bATySFhc{{SqA z%Z=e!k_K_KLmxKM% zGB5!ko^mmsm7y)_I!mZs%4HWZDi(4wB24uhkPF~*j+_yS#Ie1&`#f^2VFb)k43XuP zcm$9-5TN}z;-b3Lqc+D+%S4y*EO7*s?hzo!0OO6s{J{SJe~Q}2Fxl;N?PpJ(T|fm( z)?g#KYwaTOSz5r)&9V>TM;xdb=z6!%S7YOE z5@?GAx>l(qaV({yi(_phDaJ?ykD18q4h4DkwQb^^JT>-IoLdLtZCO1YF`d~TMnhBGmY9dw#^mHC4ek|l0yy%$smDT=fn?+N5!@i zcw@pI3XaYukuL7{$XLU04j9H)kGe|dC*@)XTGXD#ZxJ+BwjLI?f=i`n#q!#Dauwf~ zz&z(XWP!lZPuIz+S7 zEsPQXLJNQju<5}g*FQ?>t$assB7eleRH?uQATYmC&*k2_--+J`qSX8=b1GYfx=9P$ zNg9v~6;pxrB=P7@E5r0p5eQF{cu2sUqvH zfyQy#o{Fz+y&VVxBV*(aI+2g3r%L7QuPmh_Ycm0Zj<~BsQnv~fRE8Z(atF458Y4UC zUoUd~{l%@ygnMwCda%Y1z7B9k4@%|yPPUDJCUD_F#@uu0dX7KFrqujL3{Wy8*i>s#Lt*TlA(>uO#alI1RzHft$hkcatHq^dwUA&?Nu++%~=1L2mT9go8OQqm&E zYrB*3?l?PDSa3N%aPjY6hh;l!dR4@B`h-Q+BXKG~cqN(-wPccm(Ma{{RpSdB%CizI)MG@=K%q4yfW1@%&#XKAFkK zCcG%Y(EY?-kMMTa?>eUayZXepR!`5d>;C|@H^$Ec{37^wJ&v=j+pX@a7>35v&f4-E z{O&@K!4*Nt9Z3o~Jbc06584OBKNm0m0JQvVcP5vi#eB(S6iNYO51jC;6tR}eFd=eC z`AcAAFUmJ?50Lm0)=!F_I{yHKqvB?zd9PlB5%aZ6x0Ia71`ba^Gl9@#*QtKcbN>Ls zGvj+JeG=Xadt(f8#bp!_?E3zP(tiRE!W!cNhUKE3|{gIPR(Vd4J(h@Tcd z4{JJ%CsozFJ)-KCNgIG2Egs?&_|dZ}UHP{{Y~j{{RWTF!3 z1>wCL!IH{n)$~sdOLi`=fje62Yl+q=Ethc%wj~b7DrLr2zhf@#h0VKLnWRnF8{=#+ z`X1HEpN2mVPl?8}dElKU?)@a)9;JDvLoLb?$RcJ!&(X^I_!nZ$U z7O`5zA}mHYk$`t45apNU0F0LTN&2bDiV|2i$~hyFas5p$!77H6lbWfi-cN0HWfV*T z&va5iRB|>k{$N*j2Lnefq~Q&2^?q*4Zl8V64>HVZ=9sT*HEk@GuFHMj^>Yv6zr#-r zd}r`X{t59EVeO}Natui()=+kmj;yRXJTM^FBjE4Z6TsRpiLE?g;=NAqTfEc5{e}ns z07kV!;|%WvL@ew_8JBvG;ZS+6TDOf7JR*hXu&FDySvCT`A~>4#)TWk?{s;M-^z$rj z3_GDpqfmW6;P3uqP{R$hO9T-rDgY5tKmc|F&{h;*5xgaQ^TDU5B7)#SHnogQpBCr0a`UvOqJ)eTZtPi$})cnwma9ehKwObF;{EPf0^V> zw>H@m?l%al_f!^duR;B5E^irVHv0JR+?SwTmI}*}gk}J$l6#PN12v(kO7`|L&k@4` z!Tf8>J|y192DPVZ*4ky9T7&G3wK(mRToBA7KR;ZB&OI`FabA9Bm`et&6{@p-m$jC= zo!Nd87|Ps|>vX+sb$=t)14_TZ4sp}^(t@nzGBO58^auX{)lfkS*lEHDS(rl*MJ~jW z6mg$ZS)LP?S)n(xNLovxWSN-tJpdkn0gP9o>U%z0^*EF{lWU>#ukD-g0zDSuZ;bi? zmRnh+&bJG@J6p*3QE~|~r3~ zNHvWoPVokjbE0b&t39pdyTfw}jwFrLhEwc8HTj|9OE@&A*=)PKq_~X?ha?E(*uRcC z`jSsmUp<^*;X<;hAGP_PZ^L-HxU4*=&Fc$U`96=*#Ix%#qD5sT#u&-p1R&sTAdU}n z+oxUtxgE2(CMg#nsACI|G4pT%$sc%(93DNmhR;v57O#6G0!Q3X`JChd#(yFC3emSg z44YJvtjG$tAbj1k)9|k%I-f-(wnb>I?qkKnN?alk9S+h6$SMa;GC3ohRUJag=Hl%x zNJe<%P{6KK@(%0{SGOMg<2>x&>9LJU29DxXX&p;4Fd5#&Fvc;00PRyt3e1l*r#t6m zDYOLu91H+G2kL7?lQM0mb0XHl*HO5ZX}88>EG9sHW^8eQascB#yphi}&v@D^OKm;o ziYAg^;R?6_70=A1a#Xf`y5t<<#kx6hIymY#4l}sEMT@u#^YngP?EAMGO$K$ z1vwvbd};7TovHX2S+q;ZZNwInT@sK%FPI=vBE-uyZoXSF=KwJ3tf!j!3&%QLuZaE_ zczfa$uW@fZ<(0jOu!8PJwT3H=PQhQxNg0X|g^>VkB9f{A;=f19vuBQij8`jNH(GXk zuf+VXJIP~!sVPca%e}1i?zVoqT=UIFEml2X>Nn9(Ykj6o36@FVSlUqF{KR8wlf9TK zkDC}Bs@IHfA<(Wyn`b*(OZKczV<1LV6-!{8oH0{|80RA!ROi+8`}3rDzg4k&STF3f z&jemy+F0tb{i-yPQWQnZnP7?MJ17hakT#Gq4=b$Mcxzhmo`D=ecZQScS@vF<#7Zu$;MVW{(nB3Y6ldbkqL7txqn|Uslz1=`}0mc6kdK zOvfaZJF+?FIXk+7D|*h#+w3Ch##@=5)I5rbADCoi-LaJz=NTlP*;U9q0qP$Vz6f|X zO|#W6?B~8!wa{m}hS{zIMkGT6p~ERWZ)NMsu?IbC%`Ln)q^FFum`nx!-C+#dowCpw zArcUW&>R7~aUnor+~=yAQ&%}A+*ys{ZAZht9Qbc{*JZzOSQGQUkctayS7FWxro`wnPy{>8fH1_Ft{TX@TY?OOQ~z#6uQ6EL~Spg z(UPlxa91R*ag(^`jCuiH4sB7o(T_xrb1I~jHu|%Z)U@kMeMTKO!`g5c*C9=$F-7x9 zE56v&FBl}A0UUY?>S6Ho{{Ro6(-9?s<`y0b6wrW#0?e-gGwDgdqJ zr-i;D_;Xs(P(^cXs#?gBo=I=D?a9wymyC49GsPYt)Fb$b;x7uXm-aQ(ZqXE`N`UO@ zK|R#22e$`2W5=64dNE19=Uw1PbiG5xN5ytvz?yZ9)2vL{Xdh>kVcq;dZvi0TNiJIaBMomLaM7)Ab?3Zzj zZXghH0~{Rp=QZq~2-oc6Q=S$o>u#Y~9zZfEKc#wj1!&vM;m1_=lIAvKyAyfxT|yZ7 zgs^2J1OQi`QciQ*zk2Jw9cm$VByzBJW|rI@fGaR7!Qf*gbnA{QhDZ^ly#$`19)9+5 zl6_A#()c#o3w?UaY(a$HTDrd@t};CjCy`#vTRe$H8X7;25v^^meB&IC9Fi}VxMvHN zv zjIdn#`u%IC@dlS>s*bVXyUigW^!b8ddJJc$Jf3MOu}YeXIq!_uo-6S8hcwL^^Tc;E zSYHObO9#C3B2`yt)9(Gxlor7_1@qp%clgJye{E@YxM_vQ zvFRQllUK7gz9`e4!u!p1VhB|T4i&l+o(pA<@nalU&%PVKx%hGMI?8_xN#&D&61vJa z2Uf$7M|?LU(!QJcnG84D!fG?bJl0sFm6Va@28cdSJwZ4GV~#6YY5O~NB7Yfl{j&2< zi%Zj(J;rtw#?sOC!skCW(l8EspFvp5VXkUggwomRvnF=LAUGr{uK)sad$AmowQxrQ zyw}D4EY~b8Wd#J^~5i{`3~F44#Y;~39Qaz$}E--bL};FfJ_ z)5HSVp=B2KzF{IqBn418$sF)G;<2p$HT+ibj)8Nk>i##n1r>@z1aYD%Dc!^ zwcY;69v##CTjI?V#9k+9Z||>+)5~*pDRy5jXyZgscvTyjum(vyoB%3Z;W;FWe9(p2 z=${Ba5%`l((4p}Mk8Lgm;9yl2EC|r=u(m)u6QMl+02fT-f$yJ_emQ8?GI*wW z-ZEN!D|?gKhA>B`L0q_`qfI|ErixN_JfbUYN+Y%@WKres%CQ`OoYng+E5sU{OC&es zs~iH(K*{Jc_>b}{tI_n^X~IPyZ<6jf)q%kZ#AIZ4jN{`{@M?S^QFgUGKZo>M!V&9SS zuZL&*4}t8{YW(`dZ0C%GD$CDKGXA{&71-(euB&Gh6I)A}Eut|-h{^y8GD#Tc7!S9; z*B$GR@b$D;e-5wSWy`~A&l8Wlr5ZB0KE$8Sx=#*WO@C`IocMAD(U}Gx%~bOPXRqEJ zI&ytYO43Cy;r+*3qifdCS^b*KXb~8jZ1&gD1b8ICB?mt;fI(B}K35!1e%kqLsBRDwffo|JhkV<}CmK literal 0 HcmV?d00001 diff --git a/src/vision/test/test_vgg_ssd.py b/src/vision/test/test_vgg_ssd.py new file mode 100644 index 0000000..89468b4 --- /dev/null +++ b/src/vision/test/test_vgg_ssd.py @@ -0,0 +1,49 @@ +import tempfile + +import torch + +from ..ssd.vgg_ssd import create_vgg_ssd + + +def test_create_vgg_ssd(): + for num_classes in [2, 10, 21, 100]: + _ = create_vgg_ssd(num_classes) + + +def test_forward(): + for num_classes in [2]: + net = create_vgg_ssd(num_classes) + net.init() + net.eval() + x = torch.randn(2, 3, 300, 300) + confidences, locations = net.forward(x) + assert confidences.size() == torch.Size([2, 8732, num_classes]) + assert locations.size() == torch.Size([2, 8732, 4]) + assert confidences.nonzero().size(0) != 0 + assert locations.nonzero().size(0) != 0 + + +def test_save_model(): + net = create_vgg_ssd(10) + net.init() + with tempfile.TemporaryFile() as f: + net.save(f) + + +def test_save_load_model_consistency(): + net = create_vgg_ssd(20) + net.init() + model_path = tempfile.NamedTemporaryFile().name + net.save(model_path) + net_copy = create_vgg_ssd(20) + net_copy.load(model_path) + + net.eval() + net_copy.eval() + + for _ in range(1): + x = torch.randn(1, 3, 300, 300) + confidences1, locations1 = net.forward(x) + confidences2, locations2 = net_copy.forward(x) + assert (confidences1 == confidences2).long().sum() == confidences2.numel() + assert (locations1 == locations2).long().sum() == locations2.numel() diff --git a/src/vision/transforms/__init__.py b/src/vision/transforms/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/vision/transforms/transforms.py b/src/vision/transforms/transforms.py new file mode 100644 index 0000000..95b5ab4 --- /dev/null +++ b/src/vision/transforms/transforms.py @@ -0,0 +1,410 @@ +# from https://github.com/amdegroot/ssd.pytorch + + +import types + +import cv2 +import numpy as np +import torch +from numpy import random +from torchvision import transforms + + +def intersect(box_a, box_b): + max_xy = np.minimum(box_a[:, 2:], box_b[2:]) + min_xy = np.maximum(box_a[:, :2], box_b[:2]) + inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf) + return inter[:, 0] * inter[:, 1] + + +def jaccard_numpy(box_a, box_b): + """Compute the jaccard overlap of two sets of boxes. The jaccard overlap + is simply the intersection over union of two boxes. + E.g.: + A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B) + Args: + box_a: Multiple bounding boxes, Shape: [num_boxes,4] + box_b: Single bounding box, Shape: [4] + Return: + jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]] + """ + inter = intersect(box_a, box_b) + area_a = ((box_a[:, 2] - box_a[:, 0]) * + (box_a[:, 3] - box_a[:, 1])) # [A,B] + area_b = ((box_b[2] - box_b[0]) * + (box_b[3] - box_b[1])) # [A,B] + union = area_a + area_b - inter + return inter / union # [A,B] + + +class Compose(object): + """Composes several augmentations together. + Args: + transforms (List[Transform]): list of transforms to compose. + Example: + >>> augmentations.Compose([ + >>> transforms.CenterCrop(10), + >>> transforms.ToTensor(), + >>> ]) + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, img, boxes=None, labels=None): + for t in self.transforms: + img, boxes, labels = t(img, boxes, labels) + return img, boxes, labels + + +class Lambda(object): + """Applies a lambda as a transform.""" + + def __init__(self, lambd): + assert isinstance(lambd, types.LambdaType) + self.lambd = lambd + + def __call__(self, img, boxes=None, labels=None): + return self.lambd(img, boxes, labels) + + +class ConvertFromInts(object): + def __call__(self, image, boxes=None, labels=None): + return image.astype(np.float32), boxes, labels + + +class SubtractMeans(object): + def __init__(self, mean): + self.mean = np.array(mean, dtype=np.float32) + + def __call__(self, image, boxes=None, labels=None): + image = image.astype(np.float32) + image -= self.mean + return image.astype(np.float32), boxes, labels + + +class ToAbsoluteCoords(object): + def __call__(self, image, boxes=None, labels=None): + height, width, channels = image.shape + boxes[:, 0] *= width + boxes[:, 2] *= width + boxes[:, 1] *= height + boxes[:, 3] *= height + + return image, boxes, labels + + +class ToPercentCoords(object): + def __call__(self, image, boxes=None, labels=None): + height, width, channels = image.shape + boxes[:, 0] /= width + boxes[:, 2] /= width + boxes[:, 1] /= height + boxes[:, 3] /= height + + return image, boxes, labels + + +class Resize(object): + def __init__(self, size=300): + self.size = size + + def __call__(self, image, boxes=None, labels=None): + image = cv2.resize(image, (self.size, + self.size)) + return image, boxes, labels + + +class RandomSaturation(object): + def __init__(self, lower=0.5, upper=1.5): + self.lower = lower + self.upper = upper + assert self.upper >= self.lower, "contrast upper must be >= lower." + assert self.lower >= 0, "contrast lower must be non-negative." + + def __call__(self, image, boxes=None, labels=None): + if random.randint(2): + image[:, :, 1] *= random.uniform(self.lower, self.upper) + + return image, boxes, labels + + +class RandomHue(object): + def __init__(self, delta=18.0): + assert delta >= 0.0 and delta <= 360.0 + self.delta = delta + + def __call__(self, image, boxes=None, labels=None): + if random.randint(2): + image[:, :, 0] += random.uniform(-self.delta, self.delta) + image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0 + image[:, :, 0][image[:, :, 0] < 0.0] += 360.0 + return image, boxes, labels + + +class RandomLightingNoise(object): + def __init__(self): + self.perms = ((0, 1, 2), (0, 2, 1), + (1, 0, 2), (1, 2, 0), + (2, 0, 1), (2, 1, 0)) + + def __call__(self, image, boxes=None, labels=None): + if random.randint(2): + swap = self.perms[random.randint(len(self.perms))] + shuffle = SwapChannels(swap) # shuffle channels + image = shuffle(image) + return image, boxes, labels + + +class ConvertColor(object): + def __init__(self, current, transform): + self.transform = transform + self.current = current + + def __call__(self, image, boxes=None, labels=None): + if self.current == 'BGR' and self.transform == 'HSV': + image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) + elif self.current == 'RGB' and self.transform == 'HSV': + image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) + elif self.current == 'BGR' and self.transform == 'RGB': + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + elif self.current == 'HSV' and self.transform == 'BGR': + image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR) + elif self.current == 'HSV' and self.transform == "RGB": + image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB) + else: + raise NotImplementedError + return image, boxes, labels + + +class RandomContrast(object): + def __init__(self, lower=0.5, upper=1.5): + self.lower = lower + self.upper = upper + assert self.upper >= self.lower, "contrast upper must be >= lower." + assert self.lower >= 0, "contrast lower must be non-negative." + + # expects float image + def __call__(self, image, boxes=None, labels=None): + if random.randint(2): + alpha = random.uniform(self.lower, self.upper) + image *= alpha + return image, boxes, labels + + +class RandomBrightness(object): + def __init__(self, delta=32): + assert delta >= 0.0 + assert delta <= 255.0 + self.delta = delta + + def __call__(self, image, boxes=None, labels=None): + if random.randint(2): + delta = random.uniform(-self.delta, self.delta) + image += delta + return image, boxes, labels + + +class ToCV2Image(object): + def __call__(self, tensor, boxes=None, labels=None): + return tensor.cpu().numpy().astype(np.float32).transpose((1, 2, 0)), boxes, labels + + +class ToTensor(object): + def __call__(self, cvimage, boxes=None, labels=None): + return torch.from_numpy(cvimage.astype(np.float32)).permute(2, 0, 1), boxes, labels + + +class RandomSampleCrop(object): + """Crop + Arguments: + img (Image): the image being input during training + boxes (Tensor): the original bounding boxes in pt form + labels (Tensor): the class labels for each bbox + mode (float tuple): the min and max jaccard overlaps + Return: + (img, boxes, classes) + img (Image): the cropped image + boxes (Tensor): the adjusted bounding boxes in pt form + labels (Tensor): the class labels for each bbox + """ + + def __init__(self): + self.sample_options = ( + # using entire original input image + None, + # sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9 + (0.1, None), + (0.3, None), + (0.7, None), + (0.9, None), + # randomly sample a patch + (None, None), + ) + + def __call__(self, image, boxes=None, labels=None): + height, width, _ = image.shape + while True: + # randomly choose a mode + # mode = random.choice(self.sample_options) # throws numpy deprecation warning + mode = self.sample_options[random.randint(len(self.sample_options))] + + if mode is None: + return image, boxes, labels + + min_iou, max_iou = mode + if min_iou is None: + min_iou = float('-inf') + if max_iou is None: + max_iou = float('inf') + + # max trails (50) + for _ in range(50): + current_image = image + + w = random.uniform(0.3 * width, width) + h = random.uniform(0.3 * height, height) + + # aspect ratio constraint b/t .5 & 2 + if h / w < 0.5 or h / w > 2: + continue + + left = random.uniform(width - w) + top = random.uniform(height - h) + + # convert to integer rect x1,y1,x2,y2 + rect = np.array([int(left), int(top), int(left + w), int(top + h)]) + + # calculate IoU (jaccard overlap) b/t the cropped and gt boxes + overlap = jaccard_numpy(boxes, rect) + + # is min and max overlap constraint satisfied? if not try again + if overlap.min() < min_iou and max_iou < overlap.max(): + continue + + # cut the crop from the image + current_image = current_image[rect[1]:rect[3], rect[0]:rect[2], + :] + + # keep overlap with gt box IF center in sampled patch + centers = (boxes[:, :2] + boxes[:, 2:]) / 2.0 + + # mask in all gt boxes that above and to the left of centers + m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1]) + + # mask in all gt boxes that under and to the right of centers + m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1]) + + # mask in that both m1 and m2 are true + mask = m1 * m2 + + # have any valid boxes? try again if not + if not mask.any(): + continue + + # take only matching gt boxes + current_boxes = boxes[mask, :].copy() + + # take only matching gt labels + current_labels = labels[mask] + + # should we use the box left and top corner or the crop's + current_boxes[:, :2] = np.maximum(current_boxes[:, :2], + rect[:2]) + # adjust to crop (by substracting crop's left,top) + current_boxes[:, :2] -= rect[:2] + + current_boxes[:, 2:] = np.minimum(current_boxes[:, 2:], + rect[2:]) + # adjust to crop (by substracting crop's left,top) + current_boxes[:, 2:] -= rect[:2] + + return current_image, current_boxes, current_labels + + +class Expand(object): + def __init__(self, mean): + self.mean = mean + + def __call__(self, image, boxes, labels): + if random.randint(2): + return image, boxes, labels + + height, width, depth = image.shape + ratio = random.uniform(1, 4) + left = random.uniform(0, width * ratio - width) + top = random.uniform(0, height * ratio - height) + + expand_image = np.zeros( + (int(height * ratio), int(width * ratio), depth), + dtype=image.dtype) + expand_image[:, :, :] = self.mean + expand_image[int(top):int(top + height), + int(left):int(left + width)] = image + image = expand_image + + boxes = boxes.copy() + boxes[:, :2] += (int(left), int(top)) + boxes[:, 2:] += (int(left), int(top)) + + return image, boxes, labels + + +class RandomMirror(object): + def __call__(self, image, boxes, classes): + _, width, _ = image.shape + if random.randint(2): + image = image[:, ::-1] + boxes = boxes.copy() + boxes[:, 0::2] = width - boxes[:, 2::-2] + return image, boxes, classes + + +class SwapChannels(object): + """Transforms a tensorized image by swapping the channels in the order + specified in the swap tuple. + Args: + swaps (int triple): final order of channels + eg: (2, 1, 0) + """ + + def __init__(self, swaps): + self.swaps = swaps + + def __call__(self, image): + """ + Args: + image (Tensor): image tensor to be transformed + Return: + a tensor with channels swapped according to swap + """ + # if torch.is_tensor(image): + # image = image.data.cpu().numpy() + # else: + # image = np.array(image) + image = image[:, :, self.swaps] + return image + + +class PhotometricDistort(object): + def __init__(self): + self.pd = [ + RandomContrast(), # RGB + ConvertColor(current="RGB", transform='HSV'), # HSV + RandomSaturation(), # HSV + RandomHue(), # HSV + ConvertColor(current='HSV', transform='RGB'), # RGB + RandomContrast() # RGB + ] + self.rand_brightness = RandomBrightness() + self.rand_light_noise = RandomLightingNoise() + + def __call__(self, image, boxes, labels): + im = image.copy() + im, boxes, labels = self.rand_brightness(im, boxes, labels) + if random.randint(2): + distort = Compose(self.pd[:-1]) + else: + distort = Compose(self.pd[1:]) + im, boxes, labels = distort(im, boxes, labels) + return self.rand_light_noise(im, boxes, labels) diff --git a/src/vision/utils/__init__.py b/src/vision/utils/__init__.py new file mode 100644 index 0000000..0789bdb --- /dev/null +++ b/src/vision/utils/__init__.py @@ -0,0 +1 @@ +from .misc import * diff --git a/src/vision/utils/box_utils.py b/src/vision/utils/box_utils.py new file mode 100644 index 0000000..0f22bac --- /dev/null +++ b/src/vision/utils/box_utils.py @@ -0,0 +1,293 @@ +import collections +import itertools +import math +from typing import List + +import torch + +SSDBoxSizes = collections.namedtuple('SSDBoxSizes', ['min', 'max']) + +SSDSpec = collections.namedtuple('SSDSpec', ['feature_map_size', 'shrinkage', 'box_sizes', 'aspect_ratios']) + + +def generate_ssd_priors(specs: List[SSDSpec], image_size, clamp=True) -> torch.Tensor: + """Generate SSD Prior Boxes. + + It returns the center, height and width of the priors. The values are relative to the image size + Args: + specs: SSDSpecs about the shapes of sizes of prior boxes. i.e. + specs = [ + SSDSpec(38, 8, SSDBoxSizes(30, 60), [2]), + SSDSpec(19, 16, SSDBoxSizes(60, 111), [2, 3]), + SSDSpec(10, 32, SSDBoxSizes(111, 162), [2, 3]), + SSDSpec(5, 64, SSDBoxSizes(162, 213), [2, 3]), + SSDSpec(3, 100, SSDBoxSizes(213, 264), [2]), + SSDSpec(1, 300, SSDBoxSizes(264, 315), [2]) + ] + image_size: image size. + clamp: if true, clamp the values to make fall between [0.0, 1.0] + Returns: + priors (num_priors, 4): The prior boxes represented as [[center_x, center_y, w, h]]. All the values + are relative to the image size. + """ + priors = [] + for spec in specs: + scale = image_size / spec.shrinkage + for j, i in itertools.product(range(spec.feature_map_size), repeat=2): + x_center = (i + 0.5) / scale + y_center = (j + 0.5) / scale + + # small sized square box + size = spec.box_sizes.min + h = w = size / image_size + priors.append([ + x_center, + y_center, + w, + h + ]) + + # big sized square box + size = math.sqrt(spec.box_sizes.max * spec.box_sizes.min) + h = w = size / image_size + priors.append([ + x_center, + y_center, + w, + h + ]) + + # change h/w ratio of the small sized box + size = spec.box_sizes.min + h = w = size / image_size + for ratio in spec.aspect_ratios: + ratio = math.sqrt(ratio) + priors.append([ + x_center, + y_center, + w * ratio, + h / ratio + ]) + priors.append([ + x_center, + y_center, + w / ratio, + h * ratio + ]) + + priors = torch.tensor(priors) + if clamp: + torch.clamp(priors, 0.0, 1.0, out=priors) + return priors + + +def convert_locations_to_boxes(locations, priors, center_variance, + size_variance): + """Convert regressional location results of SSD into boxes in the form of (center_x, center_y, h, w). + + The conversion: + $$predicted\_center * center_variance = \frac {real\_center - prior\_center} {prior\_hw}$$ + $$exp(predicted\_hw * size_variance) = \frac {real\_hw} {prior\_hw}$$ + We do it in the inverse direction here. + Args: + locations (batch_size, num_priors, 4): the regression output of SSD. It will contain the outputs as well. + priors (num_priors, 4) or (batch_size/1, num_priors, 4): prior boxes. + center_variance: a float used to change the scale of center. + size_variance: a float used to change of scale of size. + Returns: + boxes: priors: [[center_x, center_y, h, w]]. All the values + are relative to the image size. + """ + # priors can have one dimension less. + if priors.dim() + 1 == locations.dim(): + priors = priors.unsqueeze(0) + return torch.cat([ + locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2], + torch.exp(locations[..., 2:] * size_variance) * priors[..., 2:] + ], dim=locations.dim() - 1) + + +def convert_boxes_to_locations(center_form_boxes, center_form_priors, center_variance, size_variance): + # priors can have one dimension less + if center_form_priors.dim() + 1 == center_form_boxes.dim(): + center_form_priors = center_form_priors.unsqueeze(0) + return torch.cat([ + (center_form_boxes[..., :2] - center_form_priors[..., :2]) / center_form_priors[..., 2:] / center_variance, + torch.log(center_form_boxes[..., 2:] / center_form_priors[..., 2:]) / size_variance + ], dim=center_form_boxes.dim() - 1) + + +def area_of(left_top, right_bottom) -> torch.Tensor: + """Compute the areas of rectangles given two corners. + + Args: + left_top (N, 2): left top corner. + right_bottom (N, 2): right bottom corner. + + Returns: + area (N): return the area. + """ + hw = torch.clamp(right_bottom - left_top, min=0.0) + return hw[..., 0] * hw[..., 1] + + +def iou_of(boxes0, boxes1, eps=1e-5): + """Return intersection-over-union (Jaccard index) of boxes. + + Args: + boxes0 (N, 4): ground truth boxes. + boxes1 (N or 1, 4): predicted boxes. + eps: a small number to avoid 0 as denominator. + Returns: + iou (N): IoU values. + """ + overlap_left_top = torch.max(boxes0[..., :2], boxes1[..., :2]) + overlap_right_bottom = torch.min(boxes0[..., 2:], boxes1[..., 2:]) + + overlap_area = area_of(overlap_left_top, overlap_right_bottom) + area0 = area_of(boxes0[..., :2], boxes0[..., 2:]) + area1 = area_of(boxes1[..., :2], boxes1[..., 2:]) + return overlap_area / (area0 + area1 - overlap_area + eps) + + +def assign_priors(gt_boxes, gt_labels, corner_form_priors, + iou_threshold): + """Assign ground truth boxes and targets to priors. + + Args: + gt_boxes (num_targets, 4): ground truth boxes. + gt_labels (num_targets): labels of targets. + priors (num_priors, 4): corner form priors + Returns: + boxes (num_priors, 4): real values for priors. + labels (num_priros): labels for priors. + """ + # size: num_priors x num_targets + ious = iou_of(gt_boxes.unsqueeze(0), corner_form_priors.unsqueeze(1)) + # size: num_priors + best_target_per_prior, best_target_per_prior_index = ious.max(1) + # size: num_targets + best_prior_per_target, best_prior_per_target_index = ious.max(0) + + for target_index, prior_index in enumerate(best_prior_per_target_index): + best_target_per_prior_index[prior_index] = target_index + # 2.0 is used to make sure every target has a prior assigned + best_target_per_prior.index_fill_(0, best_prior_per_target_index, 2) + # size: num_priors + labels = gt_labels[best_target_per_prior_index] + labels[best_target_per_prior < iou_threshold] = 0 # the backgournd id + boxes = gt_boxes[best_target_per_prior_index] + return boxes, labels + + +def hard_negative_mining(loss, labels, neg_pos_ratio): + """ + It used to suppress the presence of a large number of negative prediction. + It works on image level not batch level. + For any example/image, it keeps all the positive predictions and + cut the number of negative predictions to make sure the ratio + between the negative examples and positive examples is no more + the given ratio for an image. + + Args: + loss (N, num_priors): the loss for each example. + labels (N, num_priors): the labels. + neg_pos_ratio: the ratio between the negative examples and positive examples. + """ + pos_mask = labels > 0 + num_pos = pos_mask.long().sum(dim=1, keepdim=True) + num_neg = num_pos * neg_pos_ratio + + loss[pos_mask] = -math.inf + _, indexes = loss.sort(dim=1, descending=True) + _, orders = indexes.sort(dim=1) + neg_mask = orders < num_neg + return pos_mask | neg_mask + + +def center_form_to_corner_form(locations): + return torch.cat([locations[..., :2] - locations[..., 2:] / 2, + locations[..., :2] + locations[..., 2:] / 2], locations.dim() - 1) + + +def corner_form_to_center_form(boxes): + return torch.cat([ + (boxes[..., :2] + boxes[..., 2:]) / 2, + boxes[..., 2:] - boxes[..., :2] + ], boxes.dim() - 1) + + +def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200): + """ + + Args: + box_scores (N, 5): boxes in corner-form and probabilities. + iou_threshold: intersection over union threshold. + top_k: keep top_k results. If k <= 0, keep all the results. + candidate_size: only consider the candidates with the highest scores. + Returns: + picked: a list of indexes of the kept boxes + """ + scores = box_scores[:, -1] + boxes = box_scores[:, :-1] + picked = [] + _, indexes = scores.sort(descending=True) + indexes = indexes[:candidate_size] + while len(indexes) > 0: + current = indexes[0] + picked.append(current.item()) + if 0 < top_k == len(picked) or len(indexes) == 1: + break + current_box = boxes[current, :] + indexes = indexes[1:] + rest_boxes = boxes[indexes, :] + iou = iou_of( + rest_boxes, + current_box.unsqueeze(0), + ) + indexes = indexes[iou <= iou_threshold] + + return box_scores[picked, :] + + +def nms(box_scores, nms_method=None, score_threshold=None, iou_threshold=None, + sigma=0.5, top_k=-1, candidate_size=200): + if nms_method == "soft": + return soft_nms(box_scores, score_threshold, sigma, top_k) + else: + return hard_nms(box_scores, iou_threshold, top_k, candidate_size=candidate_size) + + +def soft_nms(box_scores, score_threshold, sigma=0.5, top_k=-1): + """Soft NMS implementation. + + References: + https://arxiv.org/abs/1704.04503 + https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/cython_nms.pyx + + Args: + box_scores (N, 5): boxes in corner-form and probabilities. + score_threshold: boxes with scores less than value are not considered. + sigma: the parameter in score re-computation. + scores[i] = scores[i] * exp(-(iou_i)^2 / simga) + top_k: keep top_k results. If k <= 0, keep all the results. + Returns: + picked_box_scores (K, 5): results of NMS. + """ + picked_box_scores = [] + while box_scores.size(0) > 0: + max_score_index = torch.argmax(box_scores[:, 4]) + cur_box_prob = torch.tensor(box_scores[max_score_index, :]) + picked_box_scores.append(cur_box_prob) + if len(picked_box_scores) == top_k > 0 or box_scores.size(0) == 1: + break + cur_box = cur_box_prob[:-1] + box_scores[max_score_index, :] = box_scores[-1, :] + box_scores = box_scores[:-1, :] + ious = iou_of(cur_box.unsqueeze(0), box_scores[:, :-1]) + box_scores[:, -1] = box_scores[:, -1] * torch.exp(-(ious * ious) / sigma) + box_scores = box_scores[box_scores[:, -1] > score_threshold, :] + if len(picked_box_scores) > 0: + return torch.stack(picked_box_scores) + else: + return torch.tensor([]) diff --git a/src/vision/utils/box_utils_numpy.py b/src/vision/utils/box_utils_numpy.py new file mode 100644 index 0000000..25fc207 --- /dev/null +++ b/src/vision/utils/box_utils_numpy.py @@ -0,0 +1,238 @@ +import itertools +import math +from typing import List + +import numpy as np + +from .box_utils import SSDSpec + + +def generate_ssd_priors(specs: List[SSDSpec], image_size, clamp=True): + """Generate SSD Prior Boxes. + + It returns the center, height and width of the priors. The values are relative to the image size + Args: + specs: SSDSpecs about the shapes of sizes of prior boxes. i.e. + specs = [ + SSDSpec(38, 8, SSDBoxSizes(30, 60), [2]), + SSDSpec(19, 16, SSDBoxSizes(60, 111), [2, 3]), + SSDSpec(10, 32, SSDBoxSizes(111, 162), [2, 3]), + SSDSpec(5, 64, SSDBoxSizes(162, 213), [2, 3]), + SSDSpec(3, 100, SSDBoxSizes(213, 264), [2]), + SSDSpec(1, 300, SSDBoxSizes(264, 315), [2]) + ] + image_size: image size. + clamp: if true, clamp the values to make fall between [0.0, 1.0] + Returns: + priors (num_priors, 4): The prior boxes represented as [[center_x, center_y, w, h]]. All the values + are relative to the image size. + """ + priors = [] + for spec in specs: + scale = image_size / spec.shrinkage + for j, i in itertools.product(range(spec.feature_map_size), repeat=2): + x_center = (i + 0.5) / scale + y_center = (j + 0.5) / scale + + # small sized square box + size = spec.box_sizes.min + h = w = size / image_size + priors.append([ + x_center, + y_center, + w, + h + ]) + + # big sized square box + size = math.sqrt(spec.box_sizes.max * spec.box_sizes.min) + h = w = size / image_size + priors.append([ + x_center, + y_center, + w, + h + ]) + + # change h/w ratio of the small sized box + size = spec.box_sizes.min + h = w = size / image_size + for ratio in spec.aspect_ratios: + ratio = math.sqrt(ratio) + priors.append([ + x_center, + y_center, + w * ratio, + h / ratio + ]) + priors.append([ + x_center, + y_center, + w / ratio, + h * ratio + ]) + + priors = np.array(priors, dtype=np.float32) + if clamp: + np.clip(priors, 0.0, 1.0, out=priors) + return priors + + +def convert_locations_to_boxes(locations, priors, center_variance, + size_variance): + """Convert regressional location results of SSD into boxes in the form of (center_x, center_y, h, w). + + The conversion: + $$predicted\_center * center_variance = \frac {real\_center - prior\_center} {prior\_hw}$$ + $$exp(predicted\_hw * size_variance) = \frac {real\_hw} {prior\_hw}$$ + We do it in the inverse direction here. + Args: + locations (batch_size, num_priors, 4): the regression output of SSD. It will contain the outputs as well. + priors (num_priors, 4) or (batch_size/1, num_priors, 4): prior boxes. + center_variance: a float used to change the scale of center. + size_variance: a float used to change of scale of size. + Returns: + boxes: priors: [[center_x, center_y, h, w]]. All the values + are relative to the image size. + """ + # priors can have one dimension less. + if len(priors.shape) + 1 == len(locations.shape): + priors = np.expand_dims(priors, 0) + return np.concatenate([ + locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2], + np.exp(locations[..., 2:] * size_variance) * priors[..., 2:] + ], axis=len(locations.shape) - 1) + + +def convert_boxes_to_locations(center_form_boxes, center_form_priors, center_variance, size_variance): + # priors can have one dimension less + if len(center_form_priors.shape) + 1 == len(center_form_boxes.shape): + center_form_priors = np.expand_dims(center_form_priors, 0) + return np.concatenate([ + (center_form_boxes[..., :2] - center_form_priors[..., :2]) / center_form_priors[..., 2:] / center_variance, + np.log(center_form_boxes[..., 2:] / center_form_priors[..., 2:]) / size_variance + ], axis=len(center_form_boxes.shape) - 1) + + +def area_of(left_top, right_bottom): + """Compute the areas of rectangles given two corners. + + Args: + left_top (N, 2): left top corner. + right_bottom (N, 2): right bottom corner. + + Returns: + area (N): return the area. + """ + hw = np.clip(right_bottom - left_top, 0.0, None) + return hw[..., 0] * hw[..., 1] + + +def iou_of(boxes0, boxes1, eps=1e-5): + """Return intersection-over-union (Jaccard index) of boxes. + + Args: + boxes0 (N, 4): ground truth boxes. + boxes1 (N or 1, 4): predicted boxes. + eps: a small number to avoid 0 as denominator. + Returns: + iou (N): IoU values. + """ + overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2]) + overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:]) + + overlap_area = area_of(overlap_left_top, overlap_right_bottom) + area0 = area_of(boxes0[..., :2], boxes0[..., 2:]) + area1 = area_of(boxes1[..., :2], boxes1[..., 2:]) + return overlap_area / (area0 + area1 - overlap_area + eps) + + +def center_form_to_corner_form(locations): + return np.concatenate([locations[..., :2] - locations[..., 2:] / 2, + locations[..., :2] + locations[..., 2:] / 2], len(locations.shape) - 1) + + +def corner_form_to_center_form(boxes): + return np.concatenate([ + (boxes[..., :2] + boxes[..., 2:]) / 2, + boxes[..., 2:] - boxes[..., :2] + ], len(boxes.shape) - 1) + + +def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200): + """ + + Args: + box_scores (N, 5): boxes in corner-form and probabilities. + iou_threshold: intersection over union threshold. + top_k: keep top_k results. If k <= 0, keep all the results. + candidate_size: only consider the candidates with the highest scores. + Returns: + picked: a list of indexes of the kept boxes + """ + scores = box_scores[:, -1] + boxes = box_scores[:, :-1] + picked = [] + # _, indexes = scores.sort(descending=True) + indexes = np.argsort(scores) + # indexes = indexes[:candidate_size] + indexes = indexes[-candidate_size:] + while len(indexes) > 0: + # current = indexes[0] + current = indexes[-1] + picked.append(current) + if 0 < top_k == len(picked) or len(indexes) == 1: + break + current_box = boxes[current, :] + # indexes = indexes[1:] + indexes = indexes[:-1] + rest_boxes = boxes[indexes, :] + iou = iou_of( + rest_boxes, + np.expand_dims(current_box, axis=0), + ) + indexes = indexes[iou <= iou_threshold] + + return box_scores[picked, :] + +# def nms(box_scores, nms_method=None, score_threshold=None, iou_threshold=None, +# sigma=0.5, top_k=-1, candidate_size=200): +# if nms_method == "soft": +# return soft_nms(box_scores, score_threshold, sigma, top_k) +# else: +# return hard_nms(box_scores, iou_threshold, top_k, candidate_size=candidate_size) + +# +# def soft_nms(box_scores, score_threshold, sigma=0.5, top_k=-1): +# """Soft NMS implementation. +# +# References: +# https://arxiv.org/abs/1704.04503 +# https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/cython_nms.pyx +# +# Args: +# box_scores (N, 5): boxes in corner-form and probabilities. +# score_threshold: boxes with scores less than value are not considered. +# sigma: the parameter in score re-computation. +# scores[i] = scores[i] * exp(-(iou_i)^2 / simga) +# top_k: keep top_k results. If k <= 0, keep all the results. +# Returns: +# picked_box_scores (K, 5): results of NMS. +# """ +# picked_box_scores = [] +# while box_scores.size(0) > 0: +# max_score_index = torch.argmax(box_scores[:, 4]) +# cur_box_prob = torch.tensor(box_scores[max_score_index, :]) +# picked_box_scores.append(cur_box_prob) +# if len(picked_box_scores) == top_k > 0 or box_scores.size(0) == 1: +# break +# cur_box = cur_box_prob[:-1] +# box_scores[max_score_index, :] = box_scores[-1, :] +# box_scores = box_scores[:-1, :] +# ious = iou_of(cur_box.unsqueeze(0), box_scores[:, :-1]) +# box_scores[:, -1] = box_scores[:, -1] * torch.exp(-(ious * ious) / sigma) +# box_scores = box_scores[box_scores[:, -1] > score_threshold, :] +# if len(picked_box_scores) > 0: +# return torch.stack(picked_box_scores) +# else: +# return torch.tensor([]) diff --git a/src/vision/utils/measurements.py b/src/vision/utils/measurements.py new file mode 100644 index 0000000..5cc590c --- /dev/null +++ b/src/vision/utils/measurements.py @@ -0,0 +1,32 @@ +import numpy as np + + +def compute_average_precision(precision, recall): + """ + It computes average precision based on the definition of Pascal Competition. It computes the under curve area + of precision and recall. Recall follows the normal definition. Precision is a variant. + pascal_precision[i] = typical_precision[i:].max() + """ + # identical but faster version of new_precision[i] = old_precision[i:].max() + precision = np.concatenate([[0.0], precision, [0.0]]) + for i in range(len(precision) - 1, 0, -1): + precision[i - 1] = np.maximum(precision[i - 1], precision[i]) + + # find the index where the value changes + recall = np.concatenate([[0.0], recall, [1.0]]) + changing_points = np.where(recall[1:] != recall[:-1])[0] + + # compute under curve area + areas = (recall[changing_points + 1] - recall[changing_points]) * precision[changing_points + 1] + return areas.sum() + + +def compute_voc2007_average_precision(precision, recall): + ap = 0. + for t in np.arange(0., 1.1, 0.1): + if np.sum(recall >= t) == 0: + p = 0 + else: + p = np.max(precision[recall >= t]) + ap = ap + p / 11. + return ap diff --git a/src/vision/utils/misc.py b/src/vision/utils/misc.py new file mode 100644 index 0000000..f67ee4b --- /dev/null +++ b/src/vision/utils/misc.py @@ -0,0 +1,46 @@ +import time + +import torch + + +def str2bool(s): + return s.lower() in ('true', '1') + + +class Timer: + def __init__(self): + self.clock = {} + + def start(self, key="default"): + self.clock[key] = time.time() + + def end(self, key="default"): + if key not in self.clock: + raise Exception(f"{key} is not in the clock.") + interval = time.time() - self.clock[key] + del self.clock[key] + return interval + + +def save_checkpoint(epoch, net_state_dict, optimizer_state_dict, best_score, checkpoint_path, model_path): + torch.save({ + 'epoch': epoch, + 'model': net_state_dict, + 'optimizer': optimizer_state_dict, + 'best_score': best_score + }, checkpoint_path) + torch.save(net_state_dict, model_path) + + +def load_checkpoint(checkpoint_path): + return torch.load(checkpoint_path) + + +def freeze_net_layers(net): + for param in net.parameters(): + param.requires_grad = False + + +def store_labels(path, labels): + with open(path, "w") as f: + f.write("\n".join(labels)) diff --git a/src/vision/utils/model_book.py b/src/vision/utils/model_book.py new file mode 100644 index 0000000..763b79b --- /dev/null +++ b/src/vision/utils/model_book.py @@ -0,0 +1,82 @@ +from collections import OrderedDict + +import torch.nn as nn + + +class ModelBook: + """Maintain the mapping between modules and their paths. + + Example: + book = ModelBook(model_ft) + for p, m in book.conv2d_modules(): + print('path:', p, 'num of filters:', m.out_channels) + assert m is book.get_module(p) + """ + + def __init__(self, model): + self._model = model + self._modules = OrderedDict() + self._paths = OrderedDict() + path = [] + self._construct(self._model, path) + + def _construct(self, module, path): + if not module._modules: + return + for name, m in module._modules.items(): + cur_path = tuple(path + [name]) + self._paths[m] = cur_path + self._modules[cur_path] = m + self._construct(m, path + [name]) + + def conv2d_modules(self): + return self.modules(nn.Conv2d) + + def linear_modules(self): + return self.modules(nn.Linear) + + def modules(self, module_type=None): + for p, m in self._modules.items(): + if not module_type or isinstance(m, module_type): + yield p, m + + def num_of_conv2d_modules(self): + return self.num_of_modules(nn.Conv2d) + + def num_of_conv2d_filters(self): + """Return the sum of out_channels of all conv2d layers. + + Here we treat the sub weight with size of [in_channels, h, w] as a single filter. + """ + num_filters = 0 + for _, m in self.conv2d_modules(): + num_filters += m.out_channels + return num_filters + + def num_of_linear_modules(self): + return self.num_of_modules(nn.Linear) + + def num_of_linear_filters(self): + num_filters = 0 + for _, m in self.linear_modules(): + num_filters += m.out_features + return num_filters + + def num_of_modules(self, module_type=None): + num = 0 + for p, m in self._modules.items(): + if not module_type or isinstance(m, module_type): + num += 1 + return num + + def get_module(self, path): + return self._modules.get(path) + + def get_path(self, module): + return self._paths.get(module) + + def update(self, path, module): + old_module = self._modules[path] + del self._paths[old_module] + self._paths[module] = path + self._modules[path] = module