diff --git a/references/similarity/test.py b/references/similarity/test.py index a1e646111c8..8381e02e740 100644 --- a/references/similarity/test.py +++ b/references/similarity/test.py @@ -27,15 +27,15 @@ def test_pksampler(self): for _, labels in loader: bins = defaultdict(int) - for l in labels.tolist(): - bins[l] += 1 + for label in labels.tolist(): + bins[label] += 1 # Ensure that each batch has samples from exactly p classes self.assertEqual(len(bins), p) # Ensure that there are k samples from each class - for l in bins: - self.assertEqual(bins[l], k) + for b in bins: + self.assertEqual(bins[b], k) if __name__ == '__main__': diff --git a/test/test_io.py b/test/test_io.py index 7bc312a1d2c..fe624200645 100644 --- a/test/test_io.py +++ b/test/test_io.py @@ -59,6 +59,7 @@ def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None, yield f.name, data os.unlink(f.name) + @unittest.skipIf(get_video_backend() != "pyav" and not io._HAS_VIDEO_OPT, "video_reader backend not available") @unittest.skipIf(av is None, "PyAV unavailable") @@ -108,10 +109,10 @@ def test_read_partial_video(self): with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): pts, _ = io.read_video_timestamps(f_name) for start in range(5): - for l in range(1, 4): - lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1]) - s_data = data[start:(start + l)] - self.assertEqual(len(lv), l) + for offset in range(1, 4): + lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1]) + s_data = data[start:(start + offset)] + self.assertEqual(len(lv), offset) self.assertTrue(s_data.equal(lv)) if get_video_backend() == "pyav": @@ -127,10 +128,10 @@ def test_read_partial_video_bframes(self): with temp_video(100, 300, 300, 5, options=options) as (f_name, data): pts, _ = io.read_video_timestamps(f_name) for start in range(0, 80, 20): - for l in range(1, 4): - lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1]) - s_data = data[start:(start + l)] - self.assertEqual(len(lv), l) + for offset in range(1, 4): + lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1]) + s_data = data[start:(start + offset)] + self.assertEqual(len(lv), offset) self.assertTrue((s_data.float() - lv.float()).abs().max() < self.TOLERANCE) lv, _, _ = io.read_video(f_name, pts[4] + 1, pts[7]) @@ -201,10 +202,10 @@ def test_read_partial_video_pts_unit_sec(self): pts, _ = io.read_video_timestamps(f_name, pts_unit='sec') for start in range(5): - for l in range(1, 4): - lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1], pts_unit='sec') - s_data = data[start:(start + l)] - self.assertEqual(len(lv), l) + for offset in range(1, 4): + lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1], pts_unit='sec') + s_data = data[start:(start + offset)] + self.assertEqual(len(lv), offset) self.assertTrue(s_data.equal(lv)) container = av.open(f_name) diff --git a/torchvision/datasets/usps.py b/torchvision/datasets/usps.py index 06f1fd0596f..ac4ae17b948 100644 --- a/torchvision/datasets/usps.py +++ b/torchvision/datasets/usps.py @@ -49,7 +49,7 @@ def __init__(self, root, train=True, transform=None, target_transform=None, import bz2 with bz2.open(full_path) as fp: - raw_data = [l.decode().split() for l in fp.readlines()] + raw_data = [line.decode().split() for line in fp.readlines()] imgs = [[x.split(':')[-1] for x in data[1:]] for data in raw_data] imgs = np.asarray(imgs, dtype=np.float32).reshape((-1, 16, 16)) imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8) diff --git a/torchvision/models/detection/keypoint_rcnn.py b/torchvision/models/detection/keypoint_rcnn.py index aeee558ca2a..257932fff9a 100644 --- a/torchvision/models/detection/keypoint_rcnn.py +++ b/torchvision/models/detection/keypoint_rcnn.py @@ -221,10 +221,10 @@ class KeypointRCNNHeads(nn.Sequential): def __init__(self, in_channels, layers): d = [] next_feature = in_channels - for l in layers: - d.append(misc_nn_ops.Conv2d(next_feature, l, 3, stride=1, padding=1)) + for out_channels in layers: + d.append(misc_nn_ops.Conv2d(next_feature, out_channels, 3, stride=1, padding=1)) d.append(nn.ReLU(inplace=True)) - next_feature = l + next_feature = out_channels super(KeypointRCNNHeads, self).__init__(*d) for m in self.children(): if isinstance(m, misc_nn_ops.Conv2d): diff --git a/torchvision/models/detection/roi_heads.py b/torchvision/models/detection/roi_heads.py index 6ba271409a9..cd1b23ab095 100644 --- a/torchvision/models/detection/roi_heads.py +++ b/torchvision/models/detection/roi_heads.py @@ -75,7 +75,7 @@ def maskrcnn_inference(x, labels): # select masks coresponding to the predicted classes num_masks = x.shape[0] - boxes_per_image = [l.shape[0] for l in labels] + boxes_per_image = [label.shape[0] for label in labels] labels = torch.cat(labels) index = torch.arange(num_masks, device=labels.device) mask_prob = mask_prob[index, labels][:, None] @@ -112,7 +112,7 @@ def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs """ discretization_size = mask_logits.shape[-1] - labels = [l[idxs] for l, idxs in zip(gt_labels, mask_matched_idxs)] + labels = [gt_label[idxs] for gt_label, idxs in zip(gt_labels, mask_matched_idxs)] mask_targets = [ project_masks_on_boxes(m, p, i, discretization_size) for m, p, i in zip(gt_masks, proposals, mask_matched_idxs) diff --git a/torchvision/models/detection/rpn.py b/torchvision/models/detection/rpn.py index bd149ee9061..27d315d3af7 100644 --- a/torchvision/models/detection/rpn.py +++ b/torchvision/models/detection/rpn.py @@ -195,9 +195,9 @@ def __init__(self, in_channels, num_anchors): in_channels, num_anchors * 4, kernel_size=1, stride=1 ) - for l in self.children(): - torch.nn.init.normal_(l.weight, std=0.01) - torch.nn.init.constant_(l.bias, 0) + for layer in self.children(): + torch.nn.init.normal_(layer.weight, std=0.01) + torch.nn.init.constant_(layer.bias, 0) def forward(self, x): # type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] diff --git a/torchvision/models/detection/transform.py b/torchvision/models/detection/transform.py index 8b7820ffd8d..1a3767f6475 100644 --- a/torchvision/models/detection/transform.py +++ b/torchvision/models/detection/transform.py @@ -111,15 +111,15 @@ def normalize(self, image): std = torch.as_tensor(self.image_std, dtype=dtype, device=device) return (image - mean[:, None, None]) / std[:, None, None] - def torch_choice(self, l): + def torch_choice(self, k): # type: (List[int]) -> int """ Implements `random.choice` via torch ops so it can be compiled with TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803 is fixed. """ - index = int(torch.empty(1).uniform_(0., float(len(l))).item()) - return l[index] + index = int(torch.empty(1).uniform_(0., float(len(k))).item()) + return k[index] def resize(self, image, target): # type: (Tensor, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]] diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index 60e5f428c88..3e266201450 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -9,6 +9,7 @@ from torch.jit.annotations import Optional, List, Dict, Tuple import torchvision + # copying result_idx_in_level to a specific index in result[] # is not supported by ONNX tracing yet. # _onnx_merge_levels() is an implementation supported by ONNX @@ -21,13 +22,13 @@ def _onnx_merge_levels(levels, unmerged_results): res = torch.zeros((levels.size(0), first_result.size(1), first_result.size(2), first_result.size(3)), dtype=dtype, device=device) - for l in range(len(unmerged_results)): - index = (levels == l).nonzero().view(-1, 1, 1, 1) + for level in range(len(unmerged_results)): + index = (levels == level).nonzero().view(-1, 1, 1, 1) index = index.expand(index.size(0), - unmerged_results[l].size(1), - unmerged_results[l].size(2), - unmerged_results[l].size(3)) - res = res.scatter(0, index, unmerged_results[l]) + unmerged_results[level].size(1), + unmerged_results[level].size(2), + unmerged_results[level].size(3)) + res = res.scatter(0, index, unmerged_results[level]) return res diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index d9d9b9863a7..d35225062ae 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -676,7 +676,7 @@ def adjust_hue(img, hue_factor): PIL Image: Hue adjusted image. """ if not(-0.5 <= hue_factor <= 0.5): - raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor)) + raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor)) if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index 02f28b33239..dfaafb59acd 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -807,8 +807,8 @@ def __init__(self, transformation_matrix, mean_vector): if mean_vector.size(0) != transformation_matrix.size(0): raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) + - " as any one of the dimensions of the transformation_matrix [{} x {}]" - .format(transformation_matrix.size())) + " as any one of the dimensions of the transformation_matrix [{}]" + .format(tuple(transformation_matrix.size()))) self.transformation_matrix = transformation_matrix self.mean_vector = mean_vector