|
| 1 | +import torch |
| 2 | +import torch.nn as nn |
| 3 | +import torchvision |
| 4 | +import torchvision.transforms.transforms as transforms |
| 5 | +import os |
| 6 | +import torch.quantization |
| 7 | + |
| 8 | +# Setup warnings |
| 9 | +import warnings |
| 10 | +warnings.filterwarnings( |
| 11 | + action='ignore', |
| 12 | + category=DeprecationWarning, |
| 13 | + module=r'.*' |
| 14 | +) |
| 15 | +warnings.filterwarnings( |
| 16 | + action='default', |
| 17 | + module=r'torch.quantization' |
| 18 | +) |
| 19 | + |
| 20 | +""" |
| 21 | +Define helper functions |
| 22 | +""" |
| 23 | + |
| 24 | +# Specify random seed for repeatable results |
| 25 | +_ = torch.manual_seed(191009) |
| 26 | + |
| 27 | +class AverageMeter(object): |
| 28 | + """Computes and stores the average and current value""" |
| 29 | + def __init__(self, name, fmt=':f'): |
| 30 | + self.name = name |
| 31 | + self.fmt = fmt |
| 32 | + self.reset() |
| 33 | + |
| 34 | + def reset(self): |
| 35 | + self.val = 0 |
| 36 | + self.avg = 0 |
| 37 | + self.sum = 0 |
| 38 | + self.count = 0 |
| 39 | + |
| 40 | + def update(self, val, n=1): |
| 41 | + self.val = val |
| 42 | + self.sum += val * n |
| 43 | + self.count += n |
| 44 | + self.avg = self.sum / self.count |
| 45 | + |
| 46 | + def __str__(self): |
| 47 | + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' |
| 48 | + return fmtstr.format(**self.__dict__) |
| 49 | + |
| 50 | + |
| 51 | +def accuracy(output, target, topk=(1,)): |
| 52 | + """Computes the accuracy over the k top predictions for the specified values of k""" |
| 53 | + with torch.no_grad(): |
| 54 | + maxk = max(topk) |
| 55 | + batch_size = target.size(0) |
| 56 | + |
| 57 | + _, pred = output.topk(maxk, 1, True, True) |
| 58 | + pred = pred.t() |
| 59 | + correct = pred.eq(target.view(1, -1).expand_as(pred)) |
| 60 | + |
| 61 | + res = [] |
| 62 | + for k in topk: |
| 63 | + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) |
| 64 | + res.append(correct_k.mul_(100.0 / batch_size)) |
| 65 | + return res |
| 66 | + |
| 67 | + |
| 68 | +def evaluate(model, criterion, data_loader): |
| 69 | + model.eval() |
| 70 | + top1 = AverageMeter('Acc@1', ':6.2f') |
| 71 | + top5 = AverageMeter('Acc@5', ':6.2f') |
| 72 | + cnt = 0 |
| 73 | + with torch.no_grad(): |
| 74 | + for image, target in data_loader: |
| 75 | + output = model(image) |
| 76 | + loss = criterion(output, target) |
| 77 | + cnt += 1 |
| 78 | + acc1, acc5 = accuracy(output, target, topk=(1, 5)) |
| 79 | + top1.update(acc1[0], image.size(0)) |
| 80 | + top5.update(acc5[0], image.size(0)) |
| 81 | + print('') |
| 82 | + |
| 83 | + return top1, top5 |
| 84 | + |
| 85 | +def load_model(model_file): |
| 86 | + model = resnet18(pretrained=False) |
| 87 | + state_dict = torch.load(model_file) |
| 88 | + model.load_state_dict(state_dict) |
| 89 | + model.to("cpu") |
| 90 | + return model |
| 91 | + |
| 92 | +def print_size_of_model(model): |
| 93 | + if isinstance(model, torch.jit.RecursiveScriptModule): |
| 94 | + torch.jit.save(model, "temp.p") |
| 95 | + else: |
| 96 | + torch.jit.save(torch.jit.script(model), "temp.p") |
| 97 | + print("Size (MB):", os.path.getsize("temp.p") / 1e6) |
| 98 | + os.remove("temp.p") |
| 99 | + |
| 100 | +def prepare_data_loaders(data_path): |
| 101 | + |
| 102 | + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], |
| 103 | + std=[0.229, 0.224, 0.225]) |
| 104 | + dataset = torchvision.datasets.ImageNet(data_path, |
| 105 | + split="train", |
| 106 | + transform=transforms.Compose([transforms.RandomResizedCrop(224), |
| 107 | + transforms.RandomHorizontalFlip(), |
| 108 | + transforms.ToTensor(), |
| 109 | + normalize])) |
| 110 | + dataset_test = torchvision.datasets.ImageNet(data_path, |
| 111 | + split="val", |
| 112 | + transform=transforms.Compose([transforms.Resize(256), |
| 113 | + transforms.CenterCrop(224), |
| 114 | + transforms.ToTensor(), |
| 115 | + normalize])) |
| 116 | + |
| 117 | + train_sampler = torch.utils.data.RandomSampler(dataset) |
| 118 | + test_sampler = torch.utils.data.SequentialSampler(dataset_test) |
| 119 | + |
| 120 | + data_loader = torch.utils.data.DataLoader( |
| 121 | + dataset, batch_size=train_batch_size, |
| 122 | + sampler=train_sampler) |
| 123 | + |
| 124 | + data_loader_test = torch.utils.data.DataLoader( |
| 125 | + dataset_test, batch_size=eval_batch_size, |
| 126 | + sampler=test_sampler) |
| 127 | + |
| 128 | + return data_loader, data_loader_test |
| 129 | + |
| 130 | +data_path = '~/my_imagenet/' |
| 131 | +saved_model_dir = '/data/home/amandaliu/cluster/pytorch/test/quantization/core/experimental/data/' |
| 132 | +float_model_file = 'resnet18_pretrained_float.pth' |
| 133 | + |
| 134 | +train_batch_size = 30 |
| 135 | +eval_batch_size = 50 |
| 136 | + |
| 137 | +data_loader, data_loader_test = prepare_data_loaders(data_path) |
| 138 | +criterion = nn.CrossEntropyLoss() |
| 139 | +float_model = load_model(saved_model_dir + float_model_file).to("cpu") |
| 140 | +float_model.eval() |
| 141 | + |
| 142 | +# deepcopy the model since we need to keep the original model around |
| 143 | +import copy |
| 144 | +model_to_quantize = copy.deepcopy(float_model) |
| 145 | + |
| 146 | +model_to_quantize.eval() |
| 147 | + |
| 148 | +""" |
| 149 | +Prepare models |
| 150 | +""" |
| 151 | + |
| 152 | +# Note that this is temporary, we'll expose these functions to torch.quantization after official releasee |
| 153 | +from torch.quantization.quantize_fx import prepare_fx, convert_fx |
| 154 | + |
| 155 | +def calibrate(model, data_loader): |
| 156 | + model.eval() |
| 157 | + with torch.no_grad(): |
| 158 | + for image, target in data_loader: |
| 159 | + model(image) |
| 160 | + |
| 161 | +from torch.ao.quantization.experimental.qconfig import ( |
| 162 | + uniform_qconfig_8bit, |
| 163 | + apot_weights_qconfig_8bit, |
| 164 | + apot_qconfig_8bit, |
| 165 | + uniform_qconfig_4bit, |
| 166 | + apot_weights_qconfig_4bit, |
| 167 | + apot_qconfig_4bit |
| 168 | +) |
| 169 | + |
| 170 | +""" |
| 171 | +Prepare full precision model |
| 172 | +""" |
| 173 | +full_precision_model = float_model |
| 174 | + |
| 175 | +top1, top5 = evaluate(full_precision_model, criterion, data_loader_test) |
| 176 | +print("Model #0 Evaluation accuracy on test dataset: %2.2f, %2.2f" % (top1.avg, top5.avg)) |
| 177 | + |
| 178 | +""" |
| 179 | +Prepare model PTQ for specified qconfig for torch.nn.Linear |
| 180 | +""" |
| 181 | +def prepare_ptq_linear(qconfig): |
| 182 | + qconfig_dict = {"object_type": [(torch.nn.Linear, qconfig)]} |
| 183 | + prepared_model = prepare_fx(copy.deepcopy(float_model), qconfig_dict) # fuse modules and insert observers |
| 184 | + calibrate(prepared_model, data_loader_test) # run calibration on sample data |
| 185 | + return prepared_model |
| 186 | + |
| 187 | +""" |
| 188 | +Prepare model with uniform activation, uniform weight |
| 189 | +b=8, k=2 |
| 190 | +""" |
| 191 | + |
| 192 | +prepared_model = prepare_ptq_linear(uniform_qconfig_8bit) |
| 193 | +quantized_model = convert_fx(prepared_model) # convert the calibrated model to a quantized model |
| 194 | + |
| 195 | +top1, top5 = evaluate(quantized_model, criterion, data_loader_test) |
| 196 | +print("Model #1 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg)) |
| 197 | + |
| 198 | +""" |
| 199 | +Prepare model with uniform activation, uniform weight |
| 200 | +b=4, k=2 |
| 201 | +""" |
| 202 | + |
| 203 | +prepared_model = prepare_ptq_linear(uniform_qconfig_4bit) |
| 204 | +quantized_model = convert_fx(prepared_model) # convert the calibrated model to a quantized model |
| 205 | + |
| 206 | +top1, top5 = evaluate(quantized_model1, criterion, data_loader_test) |
| 207 | +print("Model #1 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg)) |
| 208 | + |
| 209 | +""" |
| 210 | +Prepare model with uniform activation, APoT weight |
| 211 | +(b=8, k=2) |
| 212 | +""" |
| 213 | + |
| 214 | +prepared_model = prepare_ptq_linear(apot_weights_qconfig_8bit) |
| 215 | + |
| 216 | +top1, top5 = evaluate(prepared_model, criterion, data_loader_test) |
| 217 | +print("Model #2 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg)) |
| 218 | + |
| 219 | +""" |
| 220 | +Prepare model with uniform activation, APoT weight |
| 221 | +(b=4, k=2) |
| 222 | +""" |
| 223 | + |
| 224 | +prepared_model = prepare_ptq_linear(apot_weights_qconfig_4bit) |
| 225 | + |
| 226 | +top1, top5 = evaluate(prepared_model, criterion, data_loader_test) |
| 227 | +print("Model #2 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg)) |
| 228 | + |
| 229 | + |
| 230 | +""" |
| 231 | +Prepare model with APoT activation and weight |
| 232 | +(b=8, k=2) |
| 233 | +""" |
| 234 | + |
| 235 | +prepared_model = prepare_ptq_linear(apot_qconfig_8bit) |
| 236 | + |
| 237 | +top1, top5 = evaluate(prepared_model, criterion, data_loader_test) |
| 238 | +print("Model #3 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg)) |
| 239 | + |
| 240 | +""" |
| 241 | +Prepare model with APoT activation and weight |
| 242 | +(b=4, k=2) |
| 243 | +""" |
| 244 | + |
| 245 | +prepared_model = prepare_ptq_linear(apot_qconfig_4bit) |
| 246 | + |
| 247 | +top1, top5 = evaluate(prepared_model, criterion, data_loader_test) |
| 248 | +print("Model #3 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg)) |
| 249 | + |
| 250 | +""" |
| 251 | +Prepare eager mode quantized model |
| 252 | +""" |
| 253 | + |
| 254 | +from torchvision.models.quantization.resnet import resnet18 |
| 255 | +eager_quantized_model = resnet18(pretrained=True, quantize=True).eval() |
| 256 | +top1, top5 = evaluate(eager_quantized_model, criterion, data_loader_test) |
| 257 | +print("Eager mode quantized model evaluation accuracy on test dataset: %2.2f, %2.2f" % (top1.avg, top5.avg)) |
0 commit comments