@@ -38,13 +38,13 @@ def horizontal_flip_bounding_box(
38
38
39
39
bounding_box = convert_format_bounding_box (
40
40
bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
41
- ).view (- 1 , 4 )
41
+ ).reshape (- 1 , 4 )
42
42
43
43
bounding_box [:, [0 , 2 ]] = spatial_size [1 ] - bounding_box [:, [2 , 0 ]]
44
44
45
45
return convert_format_bounding_box (
46
46
bounding_box , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
47
- ).view (shape )
47
+ ).reshape (shape )
48
48
49
49
50
50
def horizontal_flip_video (video : torch .Tensor ) -> torch .Tensor :
@@ -75,13 +75,13 @@ def vertical_flip_bounding_box(
75
75
76
76
bounding_box = convert_format_bounding_box (
77
77
bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
78
- ).view (- 1 , 4 )
78
+ ).reshape (- 1 , 4 )
79
79
80
80
bounding_box [:, [1 , 3 ]] = spatial_size [0 ] - bounding_box [:, [3 , 1 ]]
81
81
82
82
return convert_format_bounding_box (
83
83
bounding_box , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
84
- ).view (shape )
84
+ ).reshape (shape )
85
85
86
86
87
87
def vertical_flip_video (video : torch .Tensor ) -> torch .Tensor :
@@ -123,7 +123,7 @@ def resize_image_tensor(
123
123
extra_dims = image .shape [:- 3 ]
124
124
125
125
if image .numel () > 0 :
126
- image = image .view (- 1 , num_channels , old_height , old_width )
126
+ image = image .reshape (- 1 , num_channels , old_height , old_width )
127
127
128
128
image = _FT .resize (
129
129
image ,
@@ -132,7 +132,7 @@ def resize_image_tensor(
132
132
antialias = antialias ,
133
133
)
134
134
135
- return image .view (extra_dims + (num_channels , new_height , new_width ))
135
+ return image .reshape (extra_dims + (num_channels , new_height , new_width ))
136
136
137
137
138
138
@torch .jit .unused
@@ -168,7 +168,7 @@ def resize_bounding_box(
168
168
new_height , new_width = _compute_resized_output_size (spatial_size , size = size , max_size = max_size )
169
169
ratios = torch .tensor ((new_width / old_width , new_height / old_height ), device = bounding_box .device )
170
170
return (
171
- bounding_box .view (- 1 , 2 , 2 ).mul (ratios ).to (bounding_box .dtype ).view (bounding_box .shape ),
171
+ bounding_box .reshape (- 1 , 2 , 2 ).mul (ratios ).to (bounding_box .dtype ).reshape (bounding_box .shape ),
172
172
(new_height , new_width ),
173
173
)
174
174
@@ -270,7 +270,7 @@ def affine_image_tensor(
270
270
271
271
num_channels , height , width = image .shape [- 3 :]
272
272
extra_dims = image .shape [:- 3 ]
273
- image = image .view (- 1 , num_channels , height , width )
273
+ image = image .reshape (- 1 , num_channels , height , width )
274
274
275
275
angle , translate , shear , center = _affine_parse_args (angle , translate , scale , shear , interpolation , center )
276
276
@@ -283,7 +283,7 @@ def affine_image_tensor(
283
283
matrix = _get_inverse_affine_matrix (center_f , angle , translate_f , scale , shear )
284
284
285
285
output = _FT .affine (image , matrix , interpolation = interpolation .value , fill = fill )
286
- return output .view (extra_dims + (num_channels , height , width ))
286
+ return output .reshape (extra_dims + (num_channels , height , width ))
287
287
288
288
289
289
@torch .jit .unused
@@ -338,20 +338,20 @@ def _affine_bounding_box_xyxy(
338
338
dtype = dtype ,
339
339
device = device ,
340
340
)
341
- .view (2 , 3 )
341
+ .reshape (2 , 3 )
342
342
.T
343
343
)
344
344
# 1) Let's transform bboxes into a tensor of 4 points (top-left, top-right, bottom-left, bottom-right corners).
345
345
# Tensor of points has shape (N * 4, 3), where N is the number of bboxes
346
346
# Single point structure is similar to
347
347
# [(xmin, ymin, 1), (xmax, ymin, 1), (xmax, ymax, 1), (xmin, ymax, 1)]
348
- points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].view (- 1 , 2 )
348
+ points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].reshape (- 1 , 2 )
349
349
points = torch .cat ([points , torch .ones (points .shape [0 ], 1 , device = points .device )], dim = - 1 )
350
350
# 2) Now let's transform the points using affine matrix
351
351
transformed_points = torch .matmul (points , transposed_affine_matrix )
352
352
# 3) Reshape transformed points to [N boxes, 4 points, x/y coords]
353
353
# and compute bounding box from 4 transformed points:
354
- transformed_points = transformed_points .view (- 1 , 4 , 2 )
354
+ transformed_points = transformed_points .reshape (- 1 , 4 , 2 )
355
355
out_bbox_mins , _ = torch .min (transformed_points , dim = 1 )
356
356
out_bbox_maxs , _ = torch .max (transformed_points , dim = 1 )
357
357
out_bboxes = torch .cat ([out_bbox_mins , out_bbox_maxs ], dim = 1 )
@@ -396,15 +396,15 @@ def affine_bounding_box(
396
396
original_shape = bounding_box .shape
397
397
bounding_box = convert_format_bounding_box (
398
398
bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
399
- ).view (- 1 , 4 )
399
+ ).reshape (- 1 , 4 )
400
400
401
401
out_bboxes , _ = _affine_bounding_box_xyxy (bounding_box , spatial_size , angle , translate , scale , shear , center )
402
402
403
403
# out_bboxes should be of shape [N boxes, 4]
404
404
405
405
return convert_format_bounding_box (
406
406
out_bboxes , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
407
- ).view (original_shape )
407
+ ).reshape (original_shape )
408
408
409
409
410
410
def affine_mask (
@@ -539,7 +539,7 @@ def rotate_image_tensor(
539
539
540
540
if image .numel () > 0 :
541
541
image = _FT .rotate (
542
- image .view (- 1 , num_channels , height , width ),
542
+ image .reshape (- 1 , num_channels , height , width ),
543
543
matrix ,
544
544
interpolation = interpolation .value ,
545
545
expand = expand ,
@@ -549,7 +549,7 @@ def rotate_image_tensor(
549
549
else :
550
550
new_width , new_height = _FT ._compute_affine_output_size (matrix , width , height ) if expand else (width , height )
551
551
552
- return image .view (extra_dims + (num_channels , new_height , new_width ))
552
+ return image .reshape (extra_dims + (num_channels , new_height , new_width ))
553
553
554
554
555
555
@torch .jit .unused
@@ -585,7 +585,7 @@ def rotate_bounding_box(
585
585
original_shape = bounding_box .shape
586
586
bounding_box = convert_format_bounding_box (
587
587
bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
588
- ).view (- 1 , 4 )
588
+ ).reshape (- 1 , 4 )
589
589
590
590
out_bboxes , spatial_size = _affine_bounding_box_xyxy (
591
591
bounding_box ,
@@ -601,7 +601,7 @@ def rotate_bounding_box(
601
601
return (
602
602
convert_format_bounding_box (
603
603
out_bboxes , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
604
- ).view (original_shape ),
604
+ ).reshape (original_shape ),
605
605
spatial_size ,
606
606
)
607
607
@@ -691,15 +691,15 @@ def _pad_with_scalar_fill(
691
691
692
692
if image .numel () > 0 :
693
693
image = _FT .pad (
694
- img = image .view (- 1 , num_channels , height , width ), padding = padding , fill = fill , padding_mode = padding_mode
694
+ img = image .reshape (- 1 , num_channels , height , width ), padding = padding , fill = fill , padding_mode = padding_mode
695
695
)
696
696
new_height , new_width = image .shape [- 2 :]
697
697
else :
698
698
left , right , top , bottom = _FT ._parse_pad_padding (padding )
699
699
new_height = height + top + bottom
700
700
new_width = width + left + right
701
701
702
- return image .view (extra_dims + (num_channels , new_height , new_width ))
702
+ return image .reshape (extra_dims + (num_channels , new_height , new_width ))
703
703
704
704
705
705
# TODO: This should be removed once pytorch pad supports non-scalar padding values
@@ -714,7 +714,7 @@ def _pad_with_vector_fill(
714
714
715
715
output = _pad_with_scalar_fill (image , padding , fill = 0 , padding_mode = "constant" )
716
716
left , right , top , bottom = _parse_pad_padding (padding )
717
- fill = torch .tensor (fill , dtype = image .dtype , device = image .device ).view (- 1 , 1 , 1 )
717
+ fill = torch .tensor (fill , dtype = image .dtype , device = image .device ).reshape (- 1 , 1 , 1 )
718
718
719
719
if top > 0 :
720
720
output [..., :top , :] = fill
@@ -863,15 +863,15 @@ def perspective_image_tensor(
863
863
shape = image .shape
864
864
865
865
if image .ndim > 4 :
866
- image = image .view ((- 1 ,) + shape [- 3 :])
866
+ image = image .reshape ((- 1 ,) + shape [- 3 :])
867
867
needs_unsquash = True
868
868
else :
869
869
needs_unsquash = False
870
870
871
871
output = _FT .perspective (image , perspective_coeffs , interpolation = interpolation .value , fill = fill )
872
872
873
873
if needs_unsquash :
874
- output = output .view (shape )
874
+ output = output .reshape (shape )
875
875
876
876
return output
877
877
@@ -898,7 +898,7 @@ def perspective_bounding_box(
898
898
original_shape = bounding_box .shape
899
899
bounding_box = convert_format_bounding_box (
900
900
bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
901
- ).view (- 1 , 4 )
901
+ ).reshape (- 1 , 4 )
902
902
903
903
dtype = bounding_box .dtype if torch .is_floating_point (bounding_box ) else torch .float32
904
904
device = bounding_box .device
@@ -947,7 +947,7 @@ def perspective_bounding_box(
947
947
# Tensor of points has shape (N * 4, 3), where N is the number of bboxes
948
948
# Single point structure is similar to
949
949
# [(xmin, ymin, 1), (xmax, ymin, 1), (xmax, ymax, 1), (xmin, ymax, 1)]
950
- points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].view (- 1 , 2 )
950
+ points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].reshape (- 1 , 2 )
951
951
points = torch .cat ([points , torch .ones (points .shape [0 ], 1 , device = points .device )], dim = - 1 )
952
952
# 2) Now let's transform the points using perspective matrices
953
953
# x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
@@ -959,7 +959,7 @@ def perspective_bounding_box(
959
959
960
960
# 3) Reshape transformed points to [N boxes, 4 points, x/y coords]
961
961
# and compute bounding box from 4 transformed points:
962
- transformed_points = transformed_points .view (- 1 , 4 , 2 )
962
+ transformed_points = transformed_points .reshape (- 1 , 4 , 2 )
963
963
out_bbox_mins , _ = torch .min (transformed_points , dim = 1 )
964
964
out_bbox_maxs , _ = torch .max (transformed_points , dim = 1 )
965
965
out_bboxes = torch .cat ([out_bbox_mins , out_bbox_maxs ], dim = 1 ).to (bounding_box .dtype )
@@ -968,7 +968,7 @@ def perspective_bounding_box(
968
968
969
969
return convert_format_bounding_box (
970
970
out_bboxes , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
971
- ).view (original_shape )
971
+ ).reshape (original_shape )
972
972
973
973
974
974
def perspective_mask (
@@ -1027,15 +1027,15 @@ def elastic_image_tensor(
1027
1027
shape = image .shape
1028
1028
1029
1029
if image .ndim > 4 :
1030
- image = image .view ((- 1 ,) + shape [- 3 :])
1030
+ image = image .reshape ((- 1 ,) + shape [- 3 :])
1031
1031
needs_unsquash = True
1032
1032
else :
1033
1033
needs_unsquash = False
1034
1034
1035
1035
output = _FT .elastic_transform (image , displacement , interpolation = interpolation .value , fill = fill )
1036
1036
1037
1037
if needs_unsquash :
1038
- output = output .view (shape )
1038
+ output = output .reshape (shape )
1039
1039
1040
1040
return output
1041
1041
@@ -1063,7 +1063,7 @@ def elastic_bounding_box(
1063
1063
original_shape = bounding_box .shape
1064
1064
bounding_box = convert_format_bounding_box (
1065
1065
bounding_box , old_format = format , new_format = features .BoundingBoxFormat .XYXY
1066
- ).view (- 1 , 4 )
1066
+ ).reshape (- 1 , 4 )
1067
1067
1068
1068
# Question (vfdev-5): should we rely on good displacement shape and fetch image size from it
1069
1069
# Or add spatial_size arg and check displacement shape
@@ -1075,21 +1075,21 @@ def elastic_bounding_box(
1075
1075
inv_grid = id_grid - displacement
1076
1076
1077
1077
# Get points from bboxes
1078
- points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].view (- 1 , 2 )
1078
+ points = bounding_box [:, [[0 , 1 ], [2 , 1 ], [2 , 3 ], [0 , 3 ]]].reshape (- 1 , 2 )
1079
1079
index_x = torch .floor (points [:, 0 ] + 0.5 ).to (dtype = torch .long )
1080
1080
index_y = torch .floor (points [:, 1 ] + 0.5 ).to (dtype = torch .long )
1081
1081
# Transform points:
1082
1082
t_size = torch .tensor (spatial_size [::- 1 ], device = displacement .device , dtype = displacement .dtype )
1083
1083
transformed_points = (inv_grid [0 , index_y , index_x , :] + 1 ) * 0.5 * t_size - 0.5
1084
1084
1085
- transformed_points = transformed_points .view (- 1 , 4 , 2 )
1085
+ transformed_points = transformed_points .reshape (- 1 , 4 , 2 )
1086
1086
out_bbox_mins , _ = torch .min (transformed_points , dim = 1 )
1087
1087
out_bbox_maxs , _ = torch .max (transformed_points , dim = 1 )
1088
1088
out_bboxes = torch .cat ([out_bbox_mins , out_bbox_maxs ], dim = 1 ).to (bounding_box .dtype )
1089
1089
1090
1090
return convert_format_bounding_box (
1091
1091
out_bboxes , old_format = features .BoundingBoxFormat .XYXY , new_format = format , copy = False
1092
- ).view (original_shape )
1092
+ ).reshape (original_shape )
1093
1093
1094
1094
1095
1095
def elastic_mask (
0 commit comments