Skip to content

Commit 9778d26

Browse files
authored
Fixed floor_divide deprecation warnings seen in pytest output (#3672)
1 parent 3926c90 commit 9778d26

File tree

5 files changed

+12
-9
lines changed

5 files changed

+12
-9
lines changed

test/test_datasets_samplers.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def test_random_clip_sampler(self):
4545
sampler = RandomClipSampler(video_clips, 3)
4646
self.assertEqual(len(sampler), 3 * 3)
4747
indices = torch.tensor(list(iter(sampler)))
48-
videos = indices // 5
48+
videos = torch.div(indices, 5, rounding_mode='floor')
4949
v_idxs, count = torch.unique(videos, return_counts=True)
5050
self.assertTrue(v_idxs.equal(torch.tensor([0, 1, 2])))
5151
self.assertTrue(count.equal(torch.tensor([3, 3, 3])))
@@ -62,7 +62,7 @@ def test_random_clip_sampler_unequal(self):
6262
indices.remove(0)
6363
indices.remove(1)
6464
indices = torch.tensor(indices) - 2
65-
videos = indices // 5
65+
videos = torch.div(indices, 5, rounding_mode='floor')
6666
v_idxs, count = torch.unique(videos, return_counts=True)
6767
self.assertTrue(v_idxs.equal(torch.tensor([0, 1])))
6868
self.assertTrue(count.equal(torch.tensor([3, 3])))
@@ -73,7 +73,7 @@ def test_uniform_clip_sampler(self):
7373
sampler = UniformClipSampler(video_clips, 3)
7474
self.assertEqual(len(sampler), 3 * 3)
7575
indices = torch.tensor(list(iter(sampler)))
76-
videos = indices // 5
76+
videos = torch.div(indices, 5, rounding_mode='floor')
7777
v_idxs, count = torch.unique(videos, return_counts=True)
7878
self.assertTrue(v_idxs.equal(torch.tensor([0, 1, 2])))
7979
self.assertTrue(count.equal(torch.tensor([3, 3, 3])))

torchvision/datasets/celeba.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,8 @@ def __init__(
104104
self.bbox = bbox.data[mask]
105105
self.landmarks_align = landmarks_align.data[mask]
106106
self.attr = attr.data[mask]
107-
self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}
107+
# map from {-1, 1} to {0, 1}
108+
self.attr = torch.div(self.attr + 1, 2, rounding_mode='floor')
108109
self.attr_names = attr.header
109110

110111
def _load_csv(

torchvision/models/detection/retinanet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,7 @@ def postprocess_detections(self, head_outputs, anchors, image_shapes):
428428
scores_per_level, idxs = scores_per_level.topk(num_topk)
429429
topk_idxs = topk_idxs[idxs]
430430

431-
anchor_idxs = topk_idxs // num_classes
431+
anchor_idxs = torch.div(topk_idxs, num_classes, rounding_mode='floor')
432432
labels_per_level = topk_idxs % num_classes
433433

434434
boxes_per_level = self.box_coder.decode_single(box_regression_per_level[anchor_idxs],

torchvision/models/detection/roi_heads.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def heatmaps_to_keypoints(maps, rois):
266266
pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1)
267267

268268
x_int = pos % w
269-
y_int = (pos - x_int) // w
269+
y_int = torch.div(pos - x_int, w, rounding_mode='floor')
270270
# assert (roi_map_probs[k, y_int, x_int] ==
271271
# roi_map_probs[k, :, :].max())
272272
x = (x_int.float() + 0.5) * width_correction

torchvision/transforms/functional_tensor.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -
9797
# factor should be forced to int for torch jit script
9898
# otherwise factor is a float and image // factor can produce different results
9999
factor = int((input_max + 1) // (output_max + 1))
100-
image = image // factor
100+
image = torch.div(image, factor, rounding_mode='floor')
101101
return image.to(dtype)
102102
else:
103103
# factor should be forced to int for torch jit script
@@ -908,11 +908,13 @@ def _scale_channel(img_chan):
908908
hist = torch.bincount(img_chan.view(-1), minlength=256)
909909

910910
nonzero_hist = hist[hist != 0]
911-
step = nonzero_hist[:-1].sum() // 255
911+
step = torch.div(nonzero_hist[:-1].sum(), 255, rounding_mode='floor')
912912
if step == 0:
913913
return img_chan
914914

915-
lut = (torch.cumsum(hist, 0) + (step // 2)) // step
915+
lut = torch.div(
916+
torch.cumsum(hist, 0) + torch.div(step, 2, rounding_mode='floor'),
917+
step, rounding_mode='floor')
916918
lut = torch.nn.functional.pad(lut, [1, 0])[:-1].clamp(0, 255)
917919

918920
return lut[img_chan.to(torch.int64)].to(torch.uint8)

0 commit comments

Comments
 (0)