Skip to content

Fix Python lint #2226

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 18, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions references/similarity/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,15 @@ def test_pksampler(self):

for _, labels in loader:
bins = defaultdict(int)
for l in labels.tolist():
bins[l] += 1
for label in labels.tolist():
bins[label] += 1

# Ensure that each batch has samples from exactly p classes
self.assertEqual(len(bins), p)

# Ensure that there are k samples from each class
for l in bins:
self.assertEqual(bins[l], k)
for b in bins:
self.assertEqual(bins[b], k)


if __name__ == '__main__':
Expand Down
25 changes: 13 additions & 12 deletions test/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None,
yield f.name, data
os.unlink(f.name)


@unittest.skipIf(get_video_backend() != "pyav" and not io._HAS_VIDEO_OPT,
"video_reader backend not available")
@unittest.skipIf(av is None, "PyAV unavailable")
Expand Down Expand Up @@ -108,10 +109,10 @@ def test_read_partial_video(self):
with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data):
pts, _ = io.read_video_timestamps(f_name)
for start in range(5):
for l in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1])
s_data = data[start:(start + l)]
self.assertEqual(len(lv), l)
for offset in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1])
s_data = data[start:(start + offset)]
self.assertEqual(len(lv), offset)
self.assertTrue(s_data.equal(lv))

if get_video_backend() == "pyav":
Expand All @@ -127,10 +128,10 @@ def test_read_partial_video_bframes(self):
with temp_video(100, 300, 300, 5, options=options) as (f_name, data):
pts, _ = io.read_video_timestamps(f_name)
for start in range(0, 80, 20):
for l in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1])
s_data = data[start:(start + l)]
self.assertEqual(len(lv), l)
for offset in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1])
s_data = data[start:(start + offset)]
self.assertEqual(len(lv), offset)
self.assertTrue((s_data.float() - lv.float()).abs().max() < self.TOLERANCE)

lv, _, _ = io.read_video(f_name, pts[4] + 1, pts[7])
Expand Down Expand Up @@ -201,10 +202,10 @@ def test_read_partial_video_pts_unit_sec(self):
pts, _ = io.read_video_timestamps(f_name, pts_unit='sec')

for start in range(5):
for l in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1], pts_unit='sec')
s_data = data[start:(start + l)]
self.assertEqual(len(lv), l)
for offset in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1], pts_unit='sec')
s_data = data[start:(start + offset)]
self.assertEqual(len(lv), offset)
self.assertTrue(s_data.equal(lv))

container = av.open(f_name)
Expand Down
2 changes: 1 addition & 1 deletion torchvision/datasets/usps.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(self, root, train=True, transform=None, target_transform=None,

import bz2
with bz2.open(full_path) as fp:
raw_data = [l.decode().split() for l in fp.readlines()]
raw_data = [line.decode().split() for line in fp.readlines()]
imgs = [[x.split(':')[-1] for x in data[1:]] for data in raw_data]
imgs = np.asarray(imgs, dtype=np.float32).reshape((-1, 16, 16))
imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8)
Expand Down
6 changes: 3 additions & 3 deletions torchvision/models/detection/keypoint_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,10 +221,10 @@ class KeypointRCNNHeads(nn.Sequential):
def __init__(self, in_channels, layers):
d = []
next_feature = in_channels
for l in layers:
d.append(misc_nn_ops.Conv2d(next_feature, l, 3, stride=1, padding=1))
for out_channels in layers:
d.append(misc_nn_ops.Conv2d(next_feature, out_channels, 3, stride=1, padding=1))
d.append(nn.ReLU(inplace=True))
next_feature = l
next_feature = out_channels
super(KeypointRCNNHeads, self).__init__(*d)
for m in self.children():
if isinstance(m, misc_nn_ops.Conv2d):
Expand Down
4 changes: 2 additions & 2 deletions torchvision/models/detection/roi_heads.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def maskrcnn_inference(x, labels):

# select masks coresponding to the predicted classes
num_masks = x.shape[0]
boxes_per_image = [l.shape[0] for l in labels]
boxes_per_image = [label.shape[0] for label in labels]
labels = torch.cat(labels)
index = torch.arange(num_masks, device=labels.device)
mask_prob = mask_prob[index, labels][:, None]
Expand Down Expand Up @@ -112,7 +112,7 @@ def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs
"""

discretization_size = mask_logits.shape[-1]
labels = [l[idxs] for l, idxs in zip(gt_labels, mask_matched_idxs)]
labels = [gt_label[idxs] for gt_label, idxs in zip(gt_labels, mask_matched_idxs)]
mask_targets = [
project_masks_on_boxes(m, p, i, discretization_size)
for m, p, i in zip(gt_masks, proposals, mask_matched_idxs)
Expand Down
6 changes: 3 additions & 3 deletions torchvision/models/detection/rpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,9 +195,9 @@ def __init__(self, in_channels, num_anchors):
in_channels, num_anchors * 4, kernel_size=1, stride=1
)

for l in self.children():
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
for layer in self.children():
torch.nn.init.normal_(layer.weight, std=0.01)
torch.nn.init.constant_(layer.bias, 0)

def forward(self, x):
# type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
Expand Down
6 changes: 3 additions & 3 deletions torchvision/models/detection/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,15 +111,15 @@ def normalize(self, image):
std = torch.as_tensor(self.image_std, dtype=dtype, device=device)
return (image - mean[:, None, None]) / std[:, None, None]

def torch_choice(self, l):
def torch_choice(self, k):
# type: (List[int]) -> int
"""
Implements `random.choice` via torch ops so it can be compiled with
TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803
is fixed.
"""
index = int(torch.empty(1).uniform_(0., float(len(l))).item())
return l[index]
index = int(torch.empty(1).uniform_(0., float(len(k))).item())
return k[index]

def resize(self, image, target):
# type: (Tensor, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]
Expand Down
13 changes: 7 additions & 6 deletions torchvision/ops/poolers.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from torch.jit.annotations import Optional, List, Dict, Tuple
import torchvision


# copying result_idx_in_level to a specific index in result[]
# is not supported by ONNX tracing yet.
# _onnx_merge_levels() is an implementation supported by ONNX
Expand All @@ -21,13 +22,13 @@ def _onnx_merge_levels(levels, unmerged_results):
res = torch.zeros((levels.size(0), first_result.size(1),
first_result.size(2), first_result.size(3)),
dtype=dtype, device=device)
for l in range(len(unmerged_results)):
index = (levels == l).nonzero().view(-1, 1, 1, 1)
for level in range(len(unmerged_results)):
index = (levels == level).nonzero().view(-1, 1, 1, 1)
index = index.expand(index.size(0),
unmerged_results[l].size(1),
unmerged_results[l].size(2),
unmerged_results[l].size(3))
res = res.scatter(0, index, unmerged_results[l])
unmerged_results[level].size(1),
unmerged_results[level].size(2),
unmerged_results[level].size(3))
res = res.scatter(0, index, unmerged_results[level])
return res


Expand Down
2 changes: 1 addition & 1 deletion torchvision/transforms/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -676,7 +676,7 @@ def adjust_hue(img, hue_factor):
PIL Image: Hue adjusted image.
"""
if not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))

if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
Expand Down
4 changes: 2 additions & 2 deletions torchvision/transforms/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -807,8 +807,8 @@ def __init__(self, transformation_matrix, mean_vector):

if mean_vector.size(0) != transformation_matrix.size(0):
raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) +
" as any one of the dimensions of the transformation_matrix [{} x {}]"
.format(transformation_matrix.size()))
" as any one of the dimensions of the transformation_matrix [{}]"
.format(tuple(transformation_matrix.size())))

self.transformation_matrix = transformation_matrix
self.mean_vector = mean_vector
Expand Down