Skip to content

Commit b23add1

Browse files
ahirnerfmassa
authored andcommitted
Assertion macros compatible with pytorch master (#540)
1 parent cf1d46e commit b23add1

File tree

4 files changed

+13
-13
lines changed

4 files changed

+13
-13
lines changed

torchvision/csrc/cpu/ROIAlign_cpu.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -223,8 +223,8 @@ at::Tensor ROIAlign_forward_cpu(const at::Tensor& input,
223223
const int pooled_height,
224224
const int pooled_width,
225225
const int sampling_ratio) {
226-
AT_ASSERT(!input.type().is_cuda(), "input must be a CPU tensor");
227-
AT_ASSERT(!rois.type().is_cuda(), "rois must be a CPU tensor");
226+
AT_ASSERTM(!input.type().is_cuda(), "input must be a CPU tensor");
227+
AT_ASSERTM(!rois.type().is_cuda(), "rois must be a CPU tensor");
228228

229229
auto num_rois = rois.size(0);
230230
auto channels = input.size(1);

torchvision/csrc/cpu/nms_cpu.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,9 @@ template <typename scalar_t>
55
at::Tensor nms_cpu_kernel(const at::Tensor& dets,
66
const at::Tensor& scores,
77
const float threshold) {
8-
AT_ASSERT(!dets.type().is_cuda(), "dets must be a CPU tensor");
9-
AT_ASSERT(!scores.type().is_cuda(), "scores must be a CPU tensor");
10-
AT_ASSERT(dets.type() == scores.type(), "dets should have the same type as scores");
8+
AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor");
9+
AT_ASSERTM(!scores.type().is_cuda(), "scores must be a CPU tensor");
10+
AT_ASSERTM(dets.type() == scores.type(), "dets should have the same type as scores");
1111

1212
if (dets.numel() == 0)
1313
return torch::CPU(at::kLong).tensor();

torchvision/csrc/cuda/ROIAlign_cuda.cu

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -258,8 +258,8 @@ at::Tensor ROIAlign_forward_cuda(const at::Tensor& input,
258258
const int pooled_height,
259259
const int pooled_width,
260260
const int sampling_ratio) {
261-
AT_ASSERT(input.type().is_cuda(), "input must be a CUDA tensor");
262-
AT_ASSERT(rois.type().is_cuda(), "rois must be a CUDA tensor");
261+
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
262+
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
263263

264264
auto num_rois = rois.size(0);
265265
auto channels = input.size(1);
@@ -308,8 +308,8 @@ at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad,
308308
const int height,
309309
const int width,
310310
const int sampling_ratio) {
311-
AT_ASSERT(grad.type().is_cuda(), "grad must be a CUDA tensor");
312-
AT_ASSERT(rois.type().is_cuda(), "rois must be a CUDA tensor");
311+
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
312+
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
313313

314314
auto num_rois = rois.size(0);
315315
at::Tensor grad_input = grad.type().tensor({batch_size, channels, height, width}).zero_();

torchvision/csrc/cuda/ROIPool_cuda.cu

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -110,8 +110,8 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(const at::Tensor& input,
110110
const float spatial_scale,
111111
const int pooled_height,
112112
const int pooled_width) {
113-
AT_ASSERT(input.type().is_cuda(), "input must be a CUDA tensor");
114-
AT_ASSERT(rois.type().is_cuda(), "rois must be a CUDA tensor");
113+
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
114+
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
115115

116116
auto num_rois = rois.size(0);
117117
auto channels = input.size(1);
@@ -162,8 +162,8 @@ at::Tensor ROIPool_backward_cuda(const at::Tensor& grad,
162162
const int channels,
163163
const int height,
164164
const int width) {
165-
AT_ASSERT(grad.type().is_cuda(), "grad must be a CUDA tensor");
166-
AT_ASSERT(rois.type().is_cuda(), "rois must be a CUDA tensor");
165+
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
166+
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
167167
// TODO add more checks
168168

169169
auto num_rois = rois.size(0);

0 commit comments

Comments
 (0)