|
| 1 | +import torch |
| 2 | + |
| 3 | + |
| 4 | +# transpose |
| 5 | +FLIP_LEFT_RIGHT = 0 |
| 6 | +FLIP_TOP_BOTTOM = 1 |
| 7 | + |
| 8 | + |
| 9 | +class BBox(object): |
| 10 | + """ |
| 11 | + This class represents a set of bounding boxes. |
| 12 | + The bounding boxes are represented as a Nx4 Tensor. |
| 13 | + In order ot uniquely determine the bounding boxes with respect |
| 14 | + to an image, we also store the corresponding image dimensions. |
| 15 | + They can contain extra information that is specific to each bounding box, such as |
| 16 | + labels. |
| 17 | + """ |
| 18 | + def __init__(self, bbox, image_size, mode='xyxy'): |
| 19 | + device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device('cpu') |
| 20 | + bbox = torch.tensor(bbox, dtype=torch.float32, device=device) |
| 21 | + if bbox.ndimension() != 2: |
| 22 | + raise ValueError( |
| 23 | + "bbox should have 2 dimensions, got {}".format(bbox.ndimension())) |
| 24 | + if bbox.size(-1) != 4: |
| 25 | + raise ValueError( |
| 26 | + "last dimenion of bbox should have a " |
| 27 | + "size of 4, got {}".format(bbox.size(-1))) |
| 28 | + if mode not in ('xyxy', 'xywh'): |
| 29 | + raise ValueError( |
| 30 | + "mode should be 'xyxy' or 'xywh'") |
| 31 | + |
| 32 | + self.bbox = bbox |
| 33 | + self.size = image_size # (image_width, image_height) |
| 34 | + self.mode = mode |
| 35 | + self.extra_fields = {} |
| 36 | + |
| 37 | + def add_field(self, field, field_data): |
| 38 | + self.extra_fields[field] = field_data |
| 39 | + |
| 40 | + def get_field(self, field): |
| 41 | + return self.extra_fields[field] |
| 42 | + |
| 43 | + def fields(self): |
| 44 | + return list(self.extra_fields.keys()) |
| 45 | + |
| 46 | + def _copy_extra_fields(self, bbox): |
| 47 | + for k, v in bbox.extra_fields.items(): |
| 48 | + self.extra_fields[k] = v |
| 49 | + |
| 50 | + def convert(self, mode): |
| 51 | + if mode not in ('xyxy', 'xywh'): |
| 52 | + raise ValueError( |
| 53 | + "mode should be 'xyxy' or 'xywh'") |
| 54 | + if mode == self.mode: |
| 55 | + return self |
| 56 | + # we only have two modes, so don't need to check |
| 57 | + # self.mode |
| 58 | + xmin, ymin, xmax, ymax = self._split() |
| 59 | + if mode == 'xyxy': |
| 60 | + bbox = torch.cat( |
| 61 | + (xmin, ymin, xmax, ymax), dim=-1) |
| 62 | + bbox = BBox(bbox, self.size, mode=mode) |
| 63 | + else: |
| 64 | + bbox = torch.cat( |
| 65 | + (xmin, ymin, xmax - xmin, ymax - ymin), dim=-1) |
| 66 | + bbox = BBox(bbox, self.size, mode=mode) |
| 67 | + bbox._copy_extra_fields(self) |
| 68 | + return bbox |
| 69 | + |
| 70 | + def _split(self): |
| 71 | + if self.mode == 'xyxy': |
| 72 | + xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1) |
| 73 | + return xmin, ymin, xmax, ymax |
| 74 | + elif self.mode == 'xywh': |
| 75 | + xmin, ymin, w, h = self.bbox.split(1, dim=-1) |
| 76 | + return xmin, ymin, xmin + w, ymin + h |
| 77 | + else: |
| 78 | + raise RuntimeError('Should not be here') |
| 79 | + |
| 80 | + def resize(self, size, *args, **kwargs): |
| 81 | + """ |
| 82 | + Returns a resized copy of this bounding box |
| 83 | +
|
| 84 | + :param size: The requested size in pixels, as a 2-tuple: |
| 85 | + (width, height). |
| 86 | + """ |
| 87 | + |
| 88 | + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size)) |
| 89 | + if ratios[0] == ratios[1]: |
| 90 | + ratio = ratios[0] |
| 91 | + scaled_box = self.bbox * ratio |
| 92 | + bbox = BBox(scaled_box, size, mode=self.mode) |
| 93 | + bbox._copy_extra_fields(self) |
| 94 | + return bbox |
| 95 | + |
| 96 | + ratio_width, ratio_height = ratios |
| 97 | + xmin, ymin, xmax, ymax = self._split() |
| 98 | + scaled_xmin = xmin * ratio_width |
| 99 | + scaled_xmax = xmax * ratio_width |
| 100 | + scaled_ymin = ymin * ratio_height |
| 101 | + scaled_ymax = ymax * ratio_height |
| 102 | + scaled_box = torch.cat( |
| 103 | + (scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1) |
| 104 | + bbox = BBox(scaled_box, size, mode='xyxy') |
| 105 | + bbox._copy_extra_fields(self) |
| 106 | + return bbox.convert(self.mode) |
| 107 | + |
| 108 | + def transpose(self, method): |
| 109 | + """ |
| 110 | + Transpose bounding box (flip or rotate in 90 degree steps) |
| 111 | + :param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`, |
| 112 | + :py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`, |
| 113 | + :py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`, |
| 114 | + :py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`. |
| 115 | + """ |
| 116 | + if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): |
| 117 | + raise NotImplementedError( |
| 118 | + "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented") |
| 119 | + image_width, image_height = self.size |
| 120 | + xmin, ymin, xmax, ymax = self._split() |
| 121 | + if method == FLIP_LEFT_RIGHT: |
| 122 | + transposed_xmin = image_width - xmax |
| 123 | + transposed_xmax = image_width - xmin |
| 124 | + transposed_ymin = ymin |
| 125 | + transposed_ymax = ymax |
| 126 | + elif method == FLIP_TOP_BOTTOM: |
| 127 | + transposed_xmin = xmin |
| 128 | + transposed_xmax = xmax |
| 129 | + transposed_ymin = image_height - ymax |
| 130 | + transposed_ymax = image_height - ymin |
| 131 | + |
| 132 | + transposed_boxes = torch.cat( |
| 133 | + (transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1) |
| 134 | + bbox = BBox(transposed_boxes, self.size, mode='xyxy') |
| 135 | + bbox._copy_extra_fields(self) |
| 136 | + return bbox.convert(self.mode) |
| 137 | + |
| 138 | + def crop(self, box): |
| 139 | + """ |
| 140 | + Cropss a rectangular region from this bounding box. The box is a |
| 141 | + 4-tuple defining the left, upper, right, and lower pixel |
| 142 | + coordinate. |
| 143 | + """ |
| 144 | + xmin, ymin, xmax, ymax = self._split() |
| 145 | + w, h = box[2] - box[0], box[3] - box[1] |
| 146 | + cropped_xmin = (xmin - box[0]).clamp(min=0, max=w) |
| 147 | + cropped_ymin = (ymin - box[1]).clamp(min=0, max=h) |
| 148 | + cropped_xmax = (xmax - box[0]).clamp(min=0, max=w) |
| 149 | + cropped_ymax = (ymax - box[1]).clamp(min=0, max=h) |
| 150 | + |
| 151 | + # TODO should I filter empty boxes here? |
| 152 | + if False: |
| 153 | + is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax) |
| 154 | + |
| 155 | + cropped_box = torch.cat( |
| 156 | + (cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1) |
| 157 | + bbox = BBox(cropped_box, (w, h), mode='xyxy') |
| 158 | + bbox._copy_extra_fields(self) |
| 159 | + return bbox.convert(self.mode) |
| 160 | + |
| 161 | + def __repr__(self): |
| 162 | + s = self.__class__.__name__ + '(' |
| 163 | + s += 'num_boxes={}, '.format(self.bbox.size(0)) |
| 164 | + s += 'image_width={}, '.format(self.size[0]) |
| 165 | + s += 'image_height={}, '.format(self.size[1]) |
| 166 | + s += 'mode={})'.format(self.mode) |
| 167 | + return s |
| 168 | + |
| 169 | + |
| 170 | +if __name__ == '__main__': |
| 171 | + bbox = BBox([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10)) |
| 172 | + s_bbox = bbox.resize((5, 5)) |
| 173 | + print(s_bbox) |
| 174 | + print(s_bbox.bbox) |
| 175 | + |
| 176 | + t_bbox = bbox.transpose(0) |
| 177 | + print(t_bbox) |
| 178 | + print(t_bbox.bbox) |
0 commit comments