diff --git a/extension/llm/custom_ops/op_sdpa.cpp b/extension/llm/custom_ops/op_sdpa.cpp index d23572d8d04..ecca294d499 100644 --- a/extension/llm/custom_ops/op_sdpa.cpp +++ b/extension/llm/custom_ops/op_sdpa.cpp @@ -594,46 +594,46 @@ bool validate_flash_attention_args( const Tensor& key, const Tensor& value, const optional& attn_mask) { - ET_LOG_MSG_AND_RETURN_IF_FALSE(query.dim() == 4, "query must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE(key.dim() == 4, "key must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE(value.dim() == 4, "value must be a 4D tensor"); + ET_LOG_MSG_AND_RETURN_UNLESS(query.dim() == 4, "query must be a 4D tensor"); + ET_LOG_MSG_AND_RETURN_UNLESS(key.dim() == 4, "key must be a 4D tensor"); + ET_LOG_MSG_AND_RETURN_UNLESS(value.dim() == 4, "value must be a 4D tensor"); // Sizes - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( (query.size(3) == value.size(3)) && (key.size(3) == value.size(3)), "scaled_dot_product_attention_flash_attention: Q/K/V should have the same head size"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( (query.scalar_type() == ScalarType::Float), "Query must be Float type"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( (query.scalar_type() == key.scalar_type()) && (query.scalar_type() == value.scalar_type()), "Key and Value must have the same data type as Query"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( !attn_mask.has_value() || attn_mask.value().dim() == 2, "Attention mask must be a 2D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( !attn_mask.has_value() || attn_mask.value().scalar_type() == query.scalar_type(), "Attention mask must be a 2D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( is_contiguous_dim_order(query.dim_order().data(), query.dim()), "key cache must be in contiguous dim order"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( is_contiguous_dim_order(key.dim_order().data(), key.dim()), "value cache must be in contiguous dim order"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( is_contiguous_dim_order(value.dim_order().data(), value.dim()), "value cache must be in contiguous dim order"); if (attn_mask.has_value()) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( is_contiguous_dim_order( attn_mask.value().dim_order().data(), attn_mask.value().dim()), "value cache must be in contiguous dim order"); @@ -647,21 +647,21 @@ bool validate_cache_params( const Tensor& v_cache, int64_t start_pos, int64_t seq_length) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( k_cache.dim() == 4, "kcache must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( v_cache.dim() == 4, "v_cache must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( start_pos < k_cache.size(1), "start_pos must be less than key cache at dim 1"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( start_pos < v_cache.size(1), "start_pos must be less than value cache at dim 1"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( (start_pos + seq_length) <= k_cache.size(1), "start_post + seq_length must be less than max seq length supported by key cache." "start pos: %" PRId64 ", seq_length: %" PRId64 @@ -671,7 +671,7 @@ bool validate_cache_params( seq_length, k_cache.size(1)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( (start_pos + seq_length) <= v_cache.size(1), "start_post + seq_length must be less than max seq length supported by key cache." "start pos: %" PRId64 ", seq_length: %" PRId64 @@ -682,11 +682,11 @@ bool validate_cache_params( v_cache.size(1)); // Make sure they are in contiguous dim order - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( is_contiguous_dim_order(k_cache.dim_order().data(), k_cache.dim()), "key cache must be in contiguous dim order"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( is_contiguous_dim_order(v_cache.dim_order().data(), v_cache.dim()), "value cache must be in contiguous dim order"); diff --git a/extension/llm/custom_ops/op_tile_crop.cpp b/extension/llm/custom_ops/op_tile_crop.cpp index 03777ea3e6e..0f82af656ea 100644 --- a/extension/llm/custom_ops/op_tile_crop.cpp +++ b/extension/llm/custom_ops/op_tile_crop.cpp @@ -19,12 +19,12 @@ bool check_tile_crop_out_args( const Tensor& in, int64_t tile_size, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 3)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(out, 4)); - ET_LOG_AND_RETURN_IF_FALSE(tile_size > 0); - ET_LOG_AND_RETURN_IF_FALSE(in.size(in.dim() - 1) % tile_size == 0); - ET_LOG_AND_RETURN_IF_FALSE(in.size(in.dim() - 2) % tile_size == 0); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(in, 3)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(out, 4)); + ET_LOG_AND_RETURN_UNLESS(tile_size > 0); + ET_LOG_AND_RETURN_UNLESS(in.size(in.dim() - 1) % tile_size == 0); + ET_LOG_AND_RETURN_UNLESS(in.size(in.dim() - 2) % tile_size == 0); return true; } diff --git a/extension/llm/custom_ops/op_update_cache.cpp b/extension/llm/custom_ops/op_update_cache.cpp index bbc0190dab1..e2cc7e87472 100644 --- a/extension/llm/custom_ops/op_update_cache.cpp +++ b/extension/llm/custom_ops/op_update_cache.cpp @@ -25,17 +25,17 @@ bool validate_cache_params( const Tensor& quantized_cache, int64_t start_pos, int64_t seq_length) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( quantized_cache.dim() == 4, "quantized cache must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( quantized_value.dim() == 4, "quantized_value must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( start_pos < quantized_cache.size(1), "start_pos must be less than cache size at dim 1"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( (start_pos + seq_length) <= quantized_cache.size(1), "start_post + seq_length must be less than max seq length supported by cache." "start pos: %" PRId64 ", seq_length: %" PRId64 @@ -46,12 +46,12 @@ bool validate_cache_params( quantized_cache.size(1)); // Make sure they are in contiguous dim order - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( is_contiguous_dim_order( quantized_cache.dim_order().data(), quantized_cache.dim()), "quantized cache must be in contiguous dim order"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( is_contiguous_dim_order( quantized_value.dim_order().data(), quantized_value.dim()), "quantized value must be in contiguous dim order"); diff --git a/extension/parallel/thread_parallel.cpp b/extension/parallel/thread_parallel.cpp index dfbb911d3a9..9ff4181429b 100644 --- a/extension/parallel/thread_parallel.cpp +++ b/extension/parallel/thread_parallel.cpp @@ -53,9 +53,9 @@ bool parallel_for( const int64_t end, const int64_t grain_size, const std::function& f) { - ET_LOG_AND_RETURN_IF_FALSE(begin >= 0 && end >= 0); - ET_LOG_AND_RETURN_IF_FALSE(end >= begin); - ET_LOG_AND_RETURN_IF_FALSE(grain_size > 0); + ET_LOG_AND_RETURN_UNLESS(begin >= 0 && end >= 0); + ET_LOG_AND_RETURN_UNLESS(end >= begin); + ET_LOG_AND_RETURN_UNLESS(grain_size > 0); int64_t num_tasks = 0, chunk_size = 0; std::tie(num_tasks, chunk_size) = calc_num_tasks_and_chunk_size(begin, end, grain_size); diff --git a/kernels/aten/cpu/op__empty_dim_order.cpp b/kernels/aten/cpu/op__empty_dim_order.cpp index e75963a9c4e..801f3cff327 100644 --- a/kernels/aten/cpu/op__empty_dim_order.cpp +++ b/kernels/aten/cpu/op__empty_dim_order.cpp @@ -44,13 +44,13 @@ inline bool _check__empty_out_dim_order( } // dim order size shall equal to input dim - ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == out.dim()); + ET_LOG_AND_RETURN_UNLESS(dim_order_ref.size() == out.dim()); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( is_channels_last_dim_order(dim_order_ref.data(), dim_order_ref.size()) || is_contiguous_dim_order(dim_order_ref.data(), dim_order_ref.size())); - ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim()); + ET_LOG_AND_RETURN_UNLESS(kMaxNumOfDimensions >= out.dim()); executorch::aten::StridesType target_strides[kMaxNumOfDimensions]; dim_order_to_stride_nocheck( out.sizes().data(), @@ -59,7 +59,7 @@ inline bool _check__empty_out_dim_order( target_strides); for (size_t i = 0; i < dim_order_ref.size(); i++) { - ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]); + ET_LOG_AND_RETURN_UNLESS(target_strides[i] == out.strides()[i]); } return true; diff --git a/kernels/aten/cpu/op__to_dim_order_copy.cpp b/kernels/aten/cpu/op__to_dim_order_copy.cpp index 10793d24db5..1315f93d14a 100644 --- a/kernels/aten/cpu/op__to_dim_order_copy.cpp +++ b/kernels/aten/cpu/op__to_dim_order_copy.cpp @@ -47,7 +47,7 @@ bool check__to_dim_order_copy_args( executorch::aten::OptionalArrayRef dim_order, Tensor& out) { // Right now we only support blocking data transfer - ET_LOG_AND_RETURN_IF_FALSE(non_blocking == false); + ET_LOG_AND_RETURN_UNLESS(non_blocking == false); // dim_order is set, the target dim_order will be either contiguous or // channels_last memory format @@ -55,9 +55,9 @@ bool check__to_dim_order_copy_args( executorch::aten::ArrayRef dim_order_ref = dim_order.value(); // dim order size shall equal to input dim - ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == input.dim()); + ET_LOG_AND_RETURN_UNLESS(dim_order_ref.size() == input.dim()); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( is_channels_last_dim_order( dim_order.value().data(), dim_order.value().size()) || is_contiguous_dim_order( @@ -65,25 +65,25 @@ bool check__to_dim_order_copy_args( // Out Aten tensor shall have same memory format stride as dim_order const size_t kMaxNumOfDimensions = 16; - ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim()); + ET_LOG_AND_RETURN_UNLESS(kMaxNumOfDimensions >= out.dim()); executorch::aten::StridesType target_strides[kMaxNumOfDimensions]; dim_order_to_stride_nocheck( out.sizes().data(), dim_order_ref.data(), dim_order_ref.size(), target_strides); - ET_LOG_AND_RETURN_IF_FALSE(out.dim() == dim_order_ref.size()); + ET_LOG_AND_RETURN_UNLESS(out.dim() == dim_order_ref.size()); for (size_t i = 0; i < dim_order_ref.size(); i++) { - ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]); + ET_LOG_AND_RETURN_UNLESS(target_strides[i] == out.strides()[i]); } } else { // dim_order is not set, preserve the dim order of input auto out_strides = out.strides(); auto input_strides = input.strides(); - ET_LOG_AND_RETURN_IF_FALSE(input_strides.size() == out_strides.size()); + ET_LOG_AND_RETURN_UNLESS(input_strides.size() == out_strides.size()); for (size_t i = 0; i < input_strides.size(); i++) { - ET_LOG_AND_RETURN_IF_FALSE(input_strides[i] == out_strides[i]); + ET_LOG_AND_RETURN_UNLESS(input_strides[i] == out_strides[i]); } } return true; diff --git a/kernels/aten/cpu/util/copy_ops_util.cpp b/kernels/aten/cpu/util/copy_ops_util.cpp index 0fe5342ca39..254d4c71431 100644 --- a/kernels/aten/cpu/util/copy_ops_util.cpp +++ b/kernels/aten/cpu/util/copy_ops_util.cpp @@ -22,7 +22,7 @@ bool check__to_dim_order_copy_args( executorch::aten::OptionalArrayRef dim_order, Tensor& out) { // Right now we only support blocking data transfer - ET_LOG_AND_RETURN_IF_FALSE(non_blocking == false); + ET_LOG_AND_RETURN_UNLESS(non_blocking == false); // dim_order is set, the target dim_order will be either contiguous or // channels_last memory format @@ -30,9 +30,9 @@ bool check__to_dim_order_copy_args( executorch::aten::ArrayRef dim_order_ref = dim_order.value(); // dim order size shall equal to input dim - ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == input.dim()); + ET_LOG_AND_RETURN_UNLESS(dim_order_ref.size() == input.dim()); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( is_channels_last_dim_order( dim_order.value().data(), dim_order.value().size()) || is_contiguous_dim_order( @@ -40,25 +40,25 @@ bool check__to_dim_order_copy_args( // Out Aten tensor shall have same memory format stride as dim_order const size_t kMaxNumOfDimensions = 16; - ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim()); + ET_LOG_AND_RETURN_UNLESS(kMaxNumOfDimensions >= out.dim()); executorch::aten::StridesType target_strides[kMaxNumOfDimensions]; dim_order_to_stride_nocheck( out.sizes().data(), dim_order_ref.data(), dim_order_ref.size(), target_strides); - ET_LOG_AND_RETURN_IF_FALSE(out.dim() == dim_order_ref.size()); + ET_LOG_AND_RETURN_UNLESS(out.dim() == dim_order_ref.size()); for (size_t i = 0; i < dim_order_ref.size(); i++) { - ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]); + ET_LOG_AND_RETURN_UNLESS(target_strides[i] == out.strides()[i]); } } else { // dim_order is not set, preserve the dim order of input auto out_strides = out.strides(); auto input_strides = input.strides(); - ET_LOG_AND_RETURN_IF_FALSE(input_strides.size() == out_strides.size()); + ET_LOG_AND_RETURN_UNLESS(input_strides.size() == out_strides.size()); for (size_t i = 0; i < input_strides.size(); i++) { - ET_LOG_AND_RETURN_IF_FALSE(input_strides[i] == out_strides[i]); + ET_LOG_AND_RETURN_UNLESS(input_strides[i] == out_strides[i]); } } return true; diff --git a/kernels/optimized/cpu/op_bmm.cpp b/kernels/optimized/cpu/op_bmm.cpp index 21ae7dfca90..112c486f229 100644 --- a/kernels/optimized/cpu/op_bmm.cpp +++ b/kernels/optimized/cpu/op_bmm.cpp @@ -31,46 +31,46 @@ namespace { // Verifies that the parameters are valid. bool check_bmm_out_args(const Tensor& self, const Tensor& mat2, Tensor& out) { // Ensure dimensions is 3 for all input and out - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( self.dim() == mat2.dim(), "self.dim() %zd != mat2.dim() %zd", self.dim(), mat2.dim()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( self.dim() == out.dim(), "self.dim() %zd != out.dim() %zd", self.dim(), out.dim()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( self.dim() == 3, "self.dim() %zd != 3", self.dim()); // Ensure batch larger than or equals to 0 - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( self.size(0) >= 0, "self.size(0) %zd < 0", self.size(0)); // Ensure batches are the same - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( self.size(0) == mat2.size(0), "self.size(0) %zd != mat2.size(0) %zd", self.size(0), mat2.size(0)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( self.size(0) == out.size(0), "self.size(0) %zd != out.size(0) %zd", self.size(0), out.size(0)); // Ensure the out size is compatible with input tensors - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( mat2.size(2) == out.size(2), "mat2.size(2) %zd != out.size(2) %zd", mat2.size(2), out.size(2)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( self.size(1) == out.size(1), "self.size(1) %zd != out.size(1) %zd", self.size(1), out.size(1)); // Ensure that all tensors share a dtype - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(self, mat2, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(self, mat2, out)); return true; } diff --git a/kernels/portable/cpu/op__empty_dim_order.cpp b/kernels/portable/cpu/op__empty_dim_order.cpp index 59b791d611a..87c2710fc90 100644 --- a/kernels/portable/cpu/op__empty_dim_order.cpp +++ b/kernels/portable/cpu/op__empty_dim_order.cpp @@ -30,20 +30,20 @@ bool _check__empty_out_dim_order(OptionalIntArrayRef dim_order, Tensor& out) { // out tensor's dim order shall equal to input dim order IntArrayRef dim_order_ref = dim_order.value(); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( is_channels_last_dim_order( dim_order.value().data(), dim_order.value().size()) || is_contiguous_dim_order( dim_order.value().data(), dim_order.value().size())); // Out tensor shall have same dim order as dim_order - ET_LOG_AND_RETURN_IF_FALSE(out_dim_order.size() == dim_order_ref.size()); + ET_LOG_AND_RETURN_UNLESS(out_dim_order.size() == dim_order_ref.size()); for (size_t i = 0; i < dim_order_ref.size(); i++) { - ET_LOG_AND_RETURN_IF_FALSE(out_dim_order[i] == dim_order_ref[i]); + ET_LOG_AND_RETURN_UNLESS(out_dim_order[i] == dim_order_ref[i]); } } else { // dim_order is not set, out tensor should be contiguous memory // format - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( is_contiguous_dim_order(out_dim_order.data(), out_dim_order.size())); } return true; diff --git a/kernels/portable/cpu/op_convolution_backward.cpp b/kernels/portable/cpu/op_convolution_backward.cpp index 7884ea0c44c..6e9d0618189 100644 --- a/kernels/portable/cpu/op_convolution_backward.cpp +++ b/kernels/portable/cpu/op_convolution_backward.cpp @@ -38,27 +38,27 @@ bool check_convolution_backward_args( Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( transposed == false, "Transposed Convolution Backward not supported yet"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( weight.dim() == 4, "Only 2D Convolution Backward supported for now"); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(weight, input)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(grad_output, input)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(weight, input)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(grad_output, input)); if (output_mask[0]) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(grad_input, input)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(grad_input, input)); } if (output_mask[1]) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(grad_weight, input)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(grad_weight, input)); } if (output_mask[2]) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(grad_bias, input)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(grad_bias, input)); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( check_convolution_args( input, weight, @@ -86,14 +86,14 @@ bool check_convolution_backward_args( output_sizes, &output_ndim); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( output_size_is_valid({output_sizes, output_ndim}, input.dim() - 2)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( grad_output.dim() == input.dim(), "grad_output should have same number of dimensions as input"); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensor_has_expected_size(grad_output, {output_sizes, output_ndim})); return true; diff --git a/kernels/portable/cpu/op_flip.cpp b/kernels/portable/cpu/op_flip.cpp index 41e99953c93..7d93d1ea1b7 100644 --- a/kernels/portable/cpu/op_flip.cpp +++ b/kernels/portable/cpu/op_flip.cpp @@ -16,7 +16,7 @@ namespace native { namespace { bool check_flip_args(const Tensor& in, IntArrayRef dims, const Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); return check_dim_list_is_valid(in, dims); } diff --git a/kernels/portable/cpu/op_linear_scratch_example.cpp b/kernels/portable/cpu/op_linear_scratch_example.cpp index b217e9ad942..9012a7d9e52 100644 --- a/kernels/portable/cpu/op_linear_scratch_example.cpp +++ b/kernels/portable/cpu/op_linear_scratch_example.cpp @@ -40,13 +40,13 @@ bool check_linear_scratch_example_args( const optional& bias, Tensor& out, Tensor& scratch) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( input.size(1) == weight.size(1), "Unexpected weight size 1"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( scratch.size(0) == input.size(0), "Unexpected scratch size 0"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( scratch.size(1) == weight.size(0), "Unexpected scratch size 1"); return true; diff --git a/kernels/portable/cpu/op_repeat.cpp b/kernels/portable/cpu/op_repeat.cpp index 8b64eefde31..44ea95298cc 100644 --- a/kernels/portable/cpu/op_repeat.cpp +++ b/kernels/portable/cpu/op_repeat.cpp @@ -21,9 +21,9 @@ bool calculate_output_size( const executorch::aten::ArrayRef& self_sizes, const executorch::aten::ArrayRef& repeats, Tensor::SizesType* out_sizes_ptr) { - ET_LOG_AND_RETURN_IF_FALSE(repeats.size() < kTensorDimensionLimit); + ET_LOG_AND_RETURN_UNLESS(repeats.size() < kTensorDimensionLimit); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( repeats.size() >= self_sizes.size(), "Repeats vector size is %zu must be >= self_sizes %zu.", repeats.size(), diff --git a/kernels/portable/cpu/op_repeat_interleave.cpp b/kernels/portable/cpu/op_repeat_interleave.cpp index c8a84e8c748..3144c53210c 100644 --- a/kernels/portable/cpu/op_repeat_interleave.cpp +++ b/kernels/portable/cpu/op_repeat_interleave.cpp @@ -18,26 +18,26 @@ bool check_repeat_interleave_args( int64_t output_size_value, int64_t repeats_sum, Tensor& out) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( repeats.scalar_type() == ScalarType::Int || repeats.scalar_type() == ScalarType::Long, "repeats must be int or long"); - ET_LOG_MSG_AND_RETURN_IF_FALSE(repeats.dim() == 1, "repeats must be 1D"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS(repeats.dim() == 1, "repeats must be 1D"); + ET_LOG_MSG_AND_RETURN_UNLESS( output_size_value == repeats_sum, "output_size, if provided, must be equal to repeats.sum()"); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(repeats, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(repeats, out)); if (repeats.scalar_type() == ScalarType::Long) { const int64_t* const repeats_data = repeats.const_data_ptr(); for (size_t i = 0; i < repeats.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( repeats_data[i] >= 0, "repeats cannot be negative"); } } else { const int32_t* const repeats_data = repeats.const_data_ptr(); for (size_t i = 0; i < repeats.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( repeats_data[i] >= 0, "repeats cannot be negative"); } } diff --git a/kernels/portable/cpu/op_roll.cpp b/kernels/portable/cpu/op_roll.cpp index ee735758c52..82e6ea98344 100644 --- a/kernels/portable/cpu/op_roll.cpp +++ b/kernels/portable/cpu/op_roll.cpp @@ -19,15 +19,15 @@ bool check_roll_args( IntArrayRef shifts, IntArrayRef dims, const Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(in, 1)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(in, 1)); if (in.numel() > 0) { for (const auto& d : dims) { - ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(d, in.dim())); + ET_LOG_AND_RETURN_UNLESS(dim_is_valid(d, in.dim())); } } - ET_LOG_AND_RETURN_IF_FALSE(!shifts.empty()); - ET_LOG_AND_RETURN_IF_FALSE(shifts.size() == dims.size()); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(!shifts.empty()); + ET_LOG_AND_RETURN_UNLESS(shifts.size() == dims.size()); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); return true; } diff --git a/kernels/portable/cpu/op_topk.cpp b/kernels/portable/cpu/op_topk.cpp index 987e974bbf5..48a830d368f 100644 --- a/kernels/portable/cpu/op_topk.cpp +++ b/kernels/portable/cpu/op_topk.cpp @@ -22,13 +22,13 @@ bool check_topk_args( int64_t dim, Tensor& values, Tensor& indices) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, values)); - ET_LOG_AND_RETURN_IF_FALSE(indices.scalar_type() == ScalarType::Long); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, values)); + ET_LOG_AND_RETURN_UNLESS(indices.scalar_type() == ScalarType::Long); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim)); if (dim < 0) { dim += nonzero_dim(in); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( k >= 0 && k <= nonempty_size(in, dim), "selected index k out of range"); return true; } diff --git a/kernels/portable/cpu/op_zeros.cpp b/kernels/portable/cpu/op_zeros.cpp index e24324e55fd..18eb1d37708 100644 --- a/kernels/portable/cpu/op_zeros.cpp +++ b/kernels/portable/cpu/op_zeros.cpp @@ -23,9 +23,9 @@ namespace { bool check_sizes( executorch::aten::ArrayRef size_int64_t, executorch::aten::ArrayRef size_int32_t) { - ET_LOG_AND_RETURN_IF_FALSE(size_int64_t.size() == size_int32_t.size()); + ET_LOG_AND_RETURN_UNLESS(size_int64_t.size() == size_int32_t.size()); for (int i = 0; i < size_int64_t.size(); i++) { - ET_LOG_AND_RETURN_IF_FALSE(((int64_t)size_int32_t[i] == size_int64_t[i])); + ET_LOG_AND_RETURN_UNLESS(((int64_t)size_int32_t[i] == size_int64_t[i])); } return true; diff --git a/kernels/portable/cpu/util/activation_ops_util.cpp b/kernels/portable/cpu/util/activation_ops_util.cpp index 499eeaf64d9..3ab35ae63b6 100644 --- a/kernels/portable/cpu/util/activation_ops_util.cpp +++ b/kernels/portable/cpu/util/activation_ops_util.cpp @@ -15,9 +15,9 @@ namespace torch { namespace executor { bool check_gelu_args(const Tensor& in, string_view approximate, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(in.scalar_type() != ScalarType::Bool); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(in.scalar_type() != ScalarType::Bool); + ET_LOG_MSG_AND_RETURN_UNLESS( approximate == "tanh" || approximate == "none", "Invalid approximation format: %.*s for gelu", static_cast(approximate.length()), @@ -26,21 +26,21 @@ bool check_gelu_args(const Tensor& in, string_view approximate, Tensor& out) { } bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, in.dim())); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(in)); + ET_LOG_AND_RETURN_UNLESS(dim_is_valid(dim, in.dim())); + ET_LOG_AND_RETURN_UNLESS(tensor_is_floating_type(in)); const size_t non_negative_dim = dim < 0 ? dim + in.dim() : dim; const size_t dim_size = in.size(non_negative_dim); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( dim_size % 2 == 0, "Halving dimension must be even, but dimension %zd is size %zd", non_negative_dim, dim_size); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(in, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensor_is_floating_type(out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_rank(in, out)); + ET_LOG_MSG_AND_RETURN_UNLESS( out.size(non_negative_dim) == dim_size / 2, "output tensor must have half the size of the input tensor along the specified dimension."); @@ -73,12 +73,12 @@ bool check_log_softmax_args( int64_t dim, bool half_to_float, Tensor& out) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( !half_to_float, "half to float conversion is not supported on CPU"); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(in)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(in)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(out)); return true; } diff --git a/kernels/portable/cpu/util/advanced_index_util.cpp b/kernels/portable/cpu/util/advanced_index_util.cpp index cc205df0e43..bf4a2148986 100644 --- a/kernels/portable/cpu/util/advanced_index_util.cpp +++ b/kernels/portable/cpu/util/advanced_index_util.cpp @@ -24,7 +24,7 @@ bool check_indices_dtypes(TensorOptList indices) { if (indices[i].has_value()) { const Tensor& index = indices[i].value(); ScalarType ix_type = index.scalar_type(); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( ix_type == ScalarType::Long || ix_type == ScalarType::Int || ix_type == ScalarType::Byte || ix_type == ScalarType::Bool, "Index tensors should be Long, Int, Byte or Bool"); @@ -47,7 +47,7 @@ bool check_mask_indices(const Tensor& in, TensorOptList indices) { if (indices[i].has_value()) { const Tensor& index = indices[i].value(); if (is_mask_index(index)) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( index.dim() > 0, "Zero-dimensional mask index not allowed"); for (auto j = 0; j < index.dim(); j++) { if (index.size(j) != in.size(in_i + j)) { @@ -154,11 +154,11 @@ int64_t query_integral_index( } // namespace bool check_index_args(const Tensor& in, TensorOptList indices, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(check_indices_dtypes(indices)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(check_indices_dtypes(indices)); + ET_LOG_MSG_AND_RETURN_UNLESS( indices.size() <= in.dim(), "Indexing too many dimensions"); - ET_LOG_AND_RETURN_IF_FALSE(check_mask_indices(in, indices)); + ET_LOG_AND_RETURN_UNLESS(check_mask_indices(in, indices)); return true; } @@ -197,7 +197,7 @@ bool get_indices_broadcast_shape( } else if (rev_ix_sizes[0] == 1) { rev_ix_sizes[0] = len; } else if (len != 1 && rev_ix_sizes[0] != len) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( false, "Broadcast of mask index failed."); } } else { @@ -209,7 +209,7 @@ bool get_indices_broadcast_shape( } else if (rev_ix_sizes[j] == 1) { rev_ix_sizes[j] = rev_j_size; } else if (rev_j_size != 1 && rev_ix_sizes[j] != rev_j_size) { - ET_LOG_MSG_AND_RETURN_IF_FALSE(false, "Broadcast of index failed."); + ET_LOG_MSG_AND_RETURN_UNLESS(false, "Broadcast of index failed."); } } } @@ -290,11 +290,11 @@ bool get_index_out_target_size( size_t num_null_indices = get_num_null_indices(indices); size_t num_indexed_dims = get_num_indexed_dims(indices); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( num_null_indices + num_indexed_dims <= in.dim(), "Indexing too many dimensions"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( in.dim() + broadcast_ndim - num_indexed_dims <= kTensorDimensionLimit, "Out tensor would exceed number of allowed dimensions"); @@ -441,7 +441,7 @@ bool get_in_coord( if (index_val < 0) { index_val += in.size(i); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( index_val >= 0 && index_val < in.size(i), "Index %" PRId64 " is out of bounds for input dimension %zd with size %zd.", diff --git a/kernels/portable/cpu/util/copy_ops_util.cpp b/kernels/portable/cpu/util/copy_ops_util.cpp index 78b66b05f22..16e618462e3 100644 --- a/kernels/portable/cpu/util/copy_ops_util.cpp +++ b/kernels/portable/cpu/util/copy_ops_util.cpp @@ -43,17 +43,17 @@ bool check_as_strided_copy_args( ArrayRef stride, optional storage_offset, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_MSG_AND_RETURN_UNLESS( size.size() == stride.size(), "mismatch in length of strides and shape"); for (const auto& val : stride) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( val >= 0, "as_strided: Negative strides are not supported at the moment"); } int64_t offset = storage_offset.has_value() ? storage_offset.value() : 0; - ET_LOG_MSG_AND_RETURN_IF_FALSE(offset >= 0, "Negative storage offset"); + ET_LOG_MSG_AND_RETURN_UNLESS(offset >= 0, "Negative storage offset"); // Check that the requested storage is within bounds of input storage size_t storage_size_bytes = @@ -63,7 +63,7 @@ bool check_as_strided_copy_args( return true; } size_t new_storage_size_bytes = in.nbytes(); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( storage_size_bytes + storage_offset_bytes <= new_storage_size_bytes, "Requiring a storage size of %zd are out of bounds for storage of size %zd", storage_size_bytes + storage_offset_bytes, @@ -76,7 +76,7 @@ bool check_cat_args( int64_t dim, Tensor& out) { // Ensure the input tensors list is non-empty - ET_LOG_AND_RETURN_IF_FALSE(tensors.size() > 0); + ET_LOG_AND_RETURN_UNLESS(tensors.size() > 0); // Find the first non-empty tensor in the list to use as a reference size_t ref_i = 0; @@ -92,10 +92,10 @@ bool check_cat_args( // https://pytorch.org/docs/stable/generated/torch.cat.html for (size_t i = 0; i < tensors.size(); ++i) { // All input dtypes must be castable to the output dtype. - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( canCast(tensors[i].scalar_type(), out.scalar_type())); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dim_order(tensors[i], out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dim_order(tensors[i], out)); // Empty tensors have no shape constraints. if (tensors[i].numel() == 0) { @@ -103,21 +103,20 @@ bool check_cat_args( } // All input tensors must have the same number of dimensions. - ET_LOG_AND_RETURN_IF_FALSE( - tensor_is_rank(tensors[ref_i], tensors[i].dim())); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(tensors[ref_i], tensors[i].dim())); for (size_t d = 0; d < tensors[i].dim(); ++d) { if (d != dim) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(tensors[i], d, tensors[ref_i], d)); } } } // Ensure dim is in range. - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors[ref_i].numel() == 0 || tensors[ref_i].dim() > dim); - ET_LOG_AND_RETURN_IF_FALSE(dim >= 0); + ET_LOG_AND_RETURN_UNLESS(dim >= 0); return true; } @@ -159,23 +158,23 @@ bool check_expand_copy_args( Tensor& out) { (void)out; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( implicit == false, "This operator is not implemented for when implicit == true."); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( expand_sizes.size() >= input.sizes().size(), "The number of sizes provided (%zu) must at least be equal to the number of dimensions in the tensor (%zu)", expand_sizes.size(), input.sizes().size()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( expand_sizes.size() <= kTensorDimensionLimit, "The number of expanded dims (%zu) exceeds the configured maximum (%zu). Increase this limit.", expand_sizes.size(), kTensorDimensionLimit); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(input, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(input, out)); return true; } @@ -198,7 +197,7 @@ bool get_expand_copy_out_target_size( // -1 can use for replacing any corresponding dimension output_sizes[j] = self_sizes[i]; } else if (self_sizes[i] != 1) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( expand_sizes[j] == self_sizes[i], "The expanded size of the tensor (%zu) must match the existing size (%zu) at non-singleton dimension %zu.", (size_t)expand_sizes[j], @@ -211,7 +210,7 @@ bool get_expand_copy_out_target_size( while (j > 0) { --j; output_sizes[j] = expand_sizes[j]; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( expand_sizes[j] >= 0, "The expanded size of the tensor (%zu) isn't allowed in a leading, non-existing dimension %zu", (size_t)expand_sizes[j], @@ -223,8 +222,8 @@ bool get_expand_copy_out_target_size( } bool check_permute_copy_args(const Tensor& in, IntArrayRef dims, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, dims.size())); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(in, dims.size())); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); // Make sure no dimensions are duplicated and all in the range [-in.dim(), // in.dim() - 1]. @@ -232,16 +231,16 @@ bool check_permute_copy_args(const Tensor& in, IntArrayRef dims, Tensor& out) { memset(dim_exist, false, sizeof(dim_exist)); for (int i = 0; i < dims.size(); i++) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dims[i])); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dims[i])); // Convert dimension to a non-negative number in the range // [0 .. in.dim() - 1]. size_t dim = dims[i] >= 0 ? dims[i] : in.dim() + dims[i]; // Internal check, since we have already validated this - ET_LOG_AND_RETURN_IF_FALSE(dim < kTensorDimensionLimit && dim >= 0); + ET_LOG_AND_RETURN_UNLESS(dim < kTensorDimensionLimit && dim >= 0); // Check that the dimension hasn't been seen previously. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( dim_exist[dim] == false, "duplicate dims are not allowed."); dim_exist[dim] = true; @@ -251,13 +250,13 @@ bool check_permute_copy_args(const Tensor& in, IntArrayRef dims, Tensor& out) { } bool check_unbind_copy_args(const Tensor& in, int64_t dim, TensorList out) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( in.dim() > 0, "in must have at least one dimension; saw %zd", in.dim()); - ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, in.dim())); + ET_LOG_AND_RETURN_UNLESS(dim_is_valid(dim, in.dim())); const ssize_t dim_size = in.size(dim); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( dim_size == out.size(), "out tensorlist's length %zd must equal unbind dim %" PRId64 " size = %zd.", @@ -268,7 +267,7 @@ bool check_unbind_copy_args(const Tensor& in, int64_t dim, TensorList out) { // Validate each output. for (size_t i = 0; i < out.size(); ++i) { // All output dtypes must be the same. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out[i].scalar_type() == out[0].scalar_type(), "out[%zu] dtype %" PRId8 " != out[0] dtype %" PRId8, i, @@ -276,7 +275,7 @@ bool check_unbind_copy_args(const Tensor& in, int64_t dim, TensorList out) { static_cast(out[0].scalar_type())); // output tensor must have # of dims = in.dim() -1 - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out[i].dim() == (in.dim() - 1), "out[%zu] dim %zd != in dim %zd", i, @@ -286,7 +285,7 @@ bool check_unbind_copy_args(const Tensor& in, int64_t dim, TensorList out) { // Check the shape of the output. for (ssize_t d = 0, out_d = 0; d < in.dim(); ++d) { if (d != dim) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out[i].size(out_d) == in.size(d), "out[%zu].size(%zd) %zd != in.size(%zd) %zd", i, @@ -318,11 +317,11 @@ bool check_pixel_shuffle_args( const Tensor& in, int64_t upscale_factor, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(in, 3)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(out, 3)); - ET_LOG_AND_RETURN_IF_FALSE(upscale_factor > 0); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(in, 3)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(out, 3)); + ET_LOG_AND_RETURN_UNLESS(upscale_factor > 0); + ET_LOG_AND_RETURN_UNLESS( in.size(in.dim() - 3) % (upscale_factor * upscale_factor) == 0); return true; } @@ -331,12 +330,12 @@ bool check_pixel_unshuffle_args( const Tensor& in, int64_t downscale_factor, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(in, 3)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(out, 3)); - ET_LOG_AND_RETURN_IF_FALSE(downscale_factor > 0); - ET_LOG_AND_RETURN_IF_FALSE(in.size(in.dim() - 1) % downscale_factor == 0); - ET_LOG_AND_RETURN_IF_FALSE(in.size(in.dim() - 2) % downscale_factor == 0); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(in, 3)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(out, 3)); + ET_LOG_AND_RETURN_UNLESS(downscale_factor > 0); + ET_LOG_AND_RETURN_UNLESS(in.size(in.dim() - 1) % downscale_factor == 0); + ET_LOG_AND_RETURN_UNLESS(in.size(in.dim() - 2) % downscale_factor == 0); return true; } @@ -390,10 +389,10 @@ bool check_select_copy_out_args( int64_t dim, int64_t index, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(in, 1)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_dim_has_index(in, dim, index)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(in, 1)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim)); + ET_LOG_AND_RETURN_UNLESS(tensor_dim_has_index(in, dim, index)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); return true; } @@ -418,22 +417,22 @@ bool check_split_with_sizes_copy_args( executorch::aten::ArrayRef split_sizes, int64_t dim, TensorList out) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(in, 1)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(in, 1)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( split_sizes.size() == out.size(), "Number of split sizes must match the number of output tensors"); int64_t sum = 0; for (int i = 0; i < split_sizes.size(); i++) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( split_sizes[i] >= 0, "All split sizes must be non negative."); sum += split_sizes[i]; } const ssize_t dim_size = in.size(dim); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( sum == dim_size, "Sum of split sizes does not match input size at given dim"); @@ -458,8 +457,8 @@ bool check_squeeze_copy_dim_args( const Tensor in, int64_t dim, const Tensor out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim)); return true; } @@ -495,18 +494,18 @@ bool check_squeeze_copy_dims_args( const Tensor in, const executorch::aten::ArrayRef dims, const Tensor out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); for (size_t i = 0; i < dims.size(); ++i) { const int64_t dim = dims[i] < 0 ? dims[i] + nonzero_dim(in) : dims[i]; - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim)); // Check that a dim does not appear twice in dims for (size_t j = 0; j < dims.size(); ++j) { if (i != j) { const int64_t dim_temp = dims[j] < 0 ? dims[j] + nonzero_dim(in) : dims[j]; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( dim != dim_temp, "dim %" PRId64 " appears multiple times in dims!", dim); @@ -560,25 +559,25 @@ bool check_stack_args( int64_t dim, Tensor& out) { // Ensure the input tensors list is non-empty - ET_LOG_AND_RETURN_IF_FALSE(tensors.size() > 0); + ET_LOG_AND_RETURN_UNLESS(tensors.size() > 0); // All input tensors need to be of the same size // https://pytorch.org/docs/stable/generated/torch.stack.html for (size_t i = 0; i < tensors.size(); i++) { // All input dtypes must be castable to the output dtype. - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( canCast(tensors[i].scalar_type(), out.scalar_type())); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(tensors[i], tensors[0].dim())); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(tensors[i], tensors[0].dim())); for (size_t d = 0; d < tensors[i].dim(); d++) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(tensors[i], d, tensors[0], d)); } } // The output tensor will have a dimension inserted, so dim should be between // 0 and ndim_of_inputs + 1 - ET_LOG_AND_RETURN_IF_FALSE(dim >= 0 && dim < tensors[0].dim() + 1); + ET_LOG_AND_RETURN_UNLESS(dim >= 0 && dim < tensors[0].dim() + 1); return true; } @@ -602,8 +601,8 @@ void get_stack_out_target_size( } bool check_tril_args(const Tensor& in, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(in, 2)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(in, 2)); return true; } @@ -612,22 +611,22 @@ bool check_split_copy_args( int64_t split_size, int64_t dim, TensorList out) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( input.dim() > 0, "input must have at least one dimension; saw %zd", input.dim()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( dim >= 0 && dim < input.dim(), "dim %" PRId64 " out of range [0,%zd)", dim, input.dim()); const ssize_t dim_size = input.size(dim); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( split_size >= 0, "split_size %" PRId64 " must be non-negative", split_size); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( split_size > 0 || dim_size == 0, "split_size is zero but input.size(%" PRId64 ") %zd is non-zero", dim, @@ -646,7 +645,7 @@ bool check_split_copy_args( // Note that this also handles the case where split_size == 0, avoiding a // division by zero in the other branch. When dim_size == 0 && split_size == // 0, core PyTorch expects 1 output element. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out.size() == 1, "Unexpected out.size() %zu: should be 1 because split_size %" PRId64 " >= input.size(%" PRId64 ") %zd", @@ -657,7 +656,7 @@ bool check_split_copy_args( remainder = dim_size; } else { int64_t expected_out_len = (dim_size + split_size - 1) / split_size; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out.size() == expected_out_len, "Unexpected out.size() %zu: ceil(input.size(%" PRId64 ")=%zd" @@ -676,7 +675,7 @@ bool check_split_copy_args( // Validate each output. for (size_t i = 0; i < out.size(); ++i) { // All output dtypes must be the same. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out[i].scalar_type() == out[0].scalar_type(), "out[%zu] dtype %" PRId8 " != out[0] dtype %" PRId8, i, @@ -684,7 +683,7 @@ bool check_split_copy_args( static_cast(out[0].scalar_type())); // All outputs must have the same number of dimensions as the input. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out[i].dim() == input.dim(), "out[%zu] dim %zd != input dim %zd", i, @@ -698,7 +697,7 @@ bool check_split_copy_args( if (i < out.size() - 1) { // All outputs except the final one: split dimension should be // split_size. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out[i].size(d) == split_size, "out[%zu].size(%zd) %zd != split_size %" PRId64, i, @@ -708,7 +707,7 @@ bool check_split_copy_args( } else { // The final output: split dimension should be the remainder of // split_size. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out[i].size(d) == remainder, "out[%zu].size(%zd) %zd != remainder %" PRId64, i, @@ -718,7 +717,7 @@ bool check_split_copy_args( } } else { // Non-split output dimensions must be the same as the input dimension. - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(out[i], d, input, d)); } } @@ -736,11 +735,11 @@ bool check_to_copy_args( (void)out; // Right now we only support blocking data transfer - ET_LOG_AND_RETURN_IF_FALSE(non_blocking == false); + ET_LOG_AND_RETURN_UNLESS(non_blocking == false); // Right now we only focus on contiguous memory, memory_format shall be // exec::aten::MemoryFormat::Contiguous or none. - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( !memory_format.has_value() || memory_format.value() == MemoryFormat::Contiguous); @@ -753,15 +752,15 @@ bool check__to_dim_order_copy_args( executorch::aten::OptionalArrayRef dim_order, Tensor& out) { // Right now we only support blocking data transfer - ET_LOG_AND_RETURN_IF_FALSE(non_blocking == false); + ET_LOG_AND_RETURN_UNLESS(non_blocking == false); if (dim_order.has_value()) { executorch::aten::ArrayRef dim_order_ref = dim_order.value(); // dim order size shall equal to input dim - ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == input.dim()); + ET_LOG_AND_RETURN_UNLESS(dim_order_ref.size() == input.dim()); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( is_channels_last_dim_order( dim_order.value().data(), dim_order.value().size()) || is_contiguous_dim_order( @@ -769,18 +768,18 @@ bool check__to_dim_order_copy_args( // Out tensor shall have same dim order as dim_order auto out_dim_order = out.dim_order(); - ET_LOG_AND_RETURN_IF_FALSE(out_dim_order.size() == dim_order_ref.size()); + ET_LOG_AND_RETURN_UNLESS(out_dim_order.size() == dim_order_ref.size()); for (size_t i = 0; i < dim_order_ref.size(); i++) { - ET_LOG_AND_RETURN_IF_FALSE(out_dim_order[i] == dim_order_ref[i]); + ET_LOG_AND_RETURN_UNLESS(out_dim_order[i] == dim_order_ref[i]); } } else { // dim_order is not set, preserve the dim order of input // Out tensor shall have same dim order as input dim_order auto out_dim_order = out.dim_order(); auto input_dim_order = input.dim_order(); - ET_LOG_AND_RETURN_IF_FALSE(out_dim_order.size() == input_dim_order.size()); + ET_LOG_AND_RETURN_UNLESS(out_dim_order.size() == input_dim_order.size()); for (size_t i = 0; i < input_dim_order.size(); i++) { - ET_LOG_AND_RETURN_IF_FALSE(out_dim_order[i] == input_dim_order[i]); + ET_LOG_AND_RETURN_UNLESS(out_dim_order[i] == input_dim_order[i]); } } return true; @@ -790,19 +789,19 @@ bool check_unsqueeze_copy_args( const Tensor input, int64_t dim, const Tensor out) { - ET_LOG_AND_RETURN_IF_FALSE(dim >= 0); + ET_LOG_AND_RETURN_UNLESS(dim >= 0); // The input and out shall share same dtype - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(input, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(input, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(out, dim)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(out, dim)); // The shape of input and out shall obey the relationship: // 1. input.dim() == out.dim()-1 // 2. input.size(i) == out.size(i) for all i < dim // 3. input.size(i-1) == out.size(i) for all i >= dim // 4. out.size(dim) == 1 - ET_LOG_AND_RETURN_IF_FALSE(input.dim() == out.dim() - 1); + ET_LOG_AND_RETURN_UNLESS(input.dim() == out.dim() - 1); for (size_t d = 0; d < out.dim(); d++) { auto dim_normalized = dim; @@ -811,7 +810,7 @@ bool check_unsqueeze_copy_args( } if (d < dim_normalized) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( input.size(d) == out.size(d), "input.size(%zu) %zd != out.size(%zu) %zd | dim = %" PRId64, d, @@ -820,7 +819,7 @@ bool check_unsqueeze_copy_args( out.size(d), dim); } else if (d > dim_normalized) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( input.size(d - 1) == out.size(d), "input.size(%zu) %zd != out.size(%zu) %zd | dim = %" PRId64, d - 1, @@ -829,7 +828,7 @@ bool check_unsqueeze_copy_args( out.size(d), dim); } else { // d == dim - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out.size(d) == 1, "out.size(%zu) %zd shall equal 1 | dim = %" PRId64, d, @@ -845,26 +844,26 @@ bool check_view_copy_args( const Tensor& self, executorch::aten::ArrayRef size_int64_t, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(size_int64_t.size() == out.sizes().size()); + ET_LOG_AND_RETURN_UNLESS(size_int64_t.size() == out.sizes().size()); // The input and out shall share same dtype and numel - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( self.numel() == out.numel(), "self.numel() %zd != out.numel() %zd", self.numel(), out.numel()); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(self, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(self, out)); // The size of out should equal target size. bool size_inferred = false; for (int i = 0; i < size_int64_t.size(); i++) { // If this value is -1 it implies that this dimension is inferred. if (size_int64_t[i] == -1) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( !size_inferred, "Multiple dimensions cannot be inferred."); size_inferred = true; } - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( ((int64_t)out.sizes()[i] == size_int64_t[i]) || (size_int64_t[i] == -1)); } @@ -880,7 +879,7 @@ bool get_view_copy_target_size( size_t out_numels_without_minus_1 = 1; int32_t minus_1_dim = -1; - ET_LOG_AND_RETURN_IF_FALSE(size_int64_t.size() == dim); + ET_LOG_AND_RETURN_UNLESS(size_int64_t.size() == dim); for (size_t i = 0; i < dim; ++i) { if (size_int64_t[i] != -1) { @@ -888,7 +887,7 @@ bool get_view_copy_target_size( out_numels_without_minus_1 = out_numels_without_minus_1 * size_int64_t[i]; } else { // TODO(kimishpatel): Add test to hit this line - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( minus_1_dim == -1, "At most one view copy dim can be -1."); minus_1_dim = i; } @@ -905,17 +904,17 @@ bool check_diagonal_copy_args( int64_t dim1, int64_t dim2, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(in, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim1)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim2)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(in, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim1)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim2)); if (dim1 < 0) { dim1 += nonzero_dim(in); } if (dim2 < 0) { dim2 += nonzero_dim(in); } - ET_LOG_AND_RETURN_IF_FALSE(dim1 != dim2); + ET_LOG_AND_RETURN_UNLESS(dim1 != dim2); return true; } diff --git a/kernels/portable/cpu/util/distance_util.cpp b/kernels/portable/cpu/util/distance_util.cpp index f8dc2f71216..bda6ab38462 100644 --- a/kernels/portable/cpu/util/distance_util.cpp +++ b/kernels/portable/cpu/util/distance_util.cpp @@ -12,9 +12,9 @@ namespace torch { namespace executor { bool check_pdist_args(const Tensor& in, double p, const Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(in, 2)); + ET_LOG_MSG_AND_RETURN_UNLESS( p >= 0, "pdist only supports non-negative p values"); return true; } @@ -34,17 +34,17 @@ bool check_cdist_args( double p, optional compute_mode, const Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(x1, x2)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(x1, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(x1, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(x2, 2)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(x1, x2)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(x1, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(x1, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_greater_or_equal_to(x2, 2)); + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(x1, x1.dim() - 1, x2, x2.dim() - 1)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( p >= 0, "cdist only supports non-negative p values"); if (compute_mode.has_value()) { int64_t mode = compute_mode.value(); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( mode >= 0 && mode <= 2, "possible modes: 0, 1, 2, but was: %" PRId64, mode); diff --git a/kernels/portable/cpu/util/index_util.cpp b/kernels/portable/cpu/util/index_util.cpp index fb54980bb47..7732c1b2195 100644 --- a/kernels/portable/cpu/util/index_util.cpp +++ b/kernels/portable/cpu/util/index_util.cpp @@ -18,13 +18,13 @@ bool check_gather_args( const Tensor& index, bool sparse_grad, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim)); + ET_LOG_MSG_AND_RETURN_UNLESS( index.scalar_type() == ScalarType::Long, "Expected dypte int64 for index"); if (index.numel() != 0) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( nonzero_dim(in) == nonzero_dim(index), "self and index should have the same dimensionality when index is not empty " "except for the case when one has dimension 0 and the other has dimension 1"); @@ -37,7 +37,7 @@ bool check_gather_args( for (size_t d = 0; d < nonzero_dim(in); ++d) { if (d != dim) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( nonempty_size(index, d) <= nonempty_size(in, d), "size of dimension %zd of index should be smaller than the size of that dimension of input if dimension %zd != dim %zd", d, @@ -47,7 +47,7 @@ bool check_gather_args( } const long* index_data = index.const_data_ptr(); for (size_t i = 0; i < index.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( index_data[i] >= 0 && index_data[i] < nonempty_size(in, dim), "Index is out of bounds for dimension %zd with size %zd", (size_t)dim, @@ -62,22 +62,22 @@ bool check_index_select_args( int64_t dim, const Tensor& index, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim)); dim = dim < 0 ? dim + nonzero_dim(in) : dim; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( nonempty_size(in, dim) > 0, "index_select: Indexing axis dim should be positive"); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_MSG_AND_RETURN_UNLESS( index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "Expected index to have type of Long or Int, but found %s", toString(index.scalar_type())); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_smaller_or_equal_to(index, 1)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_smaller_or_equal_to(index, 1)); if (index.dim() > 0 && in.dim() == 0) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( index.numel() == 1, "index_select: Index to scalar must have exactly 1 value"); } @@ -85,7 +85,7 @@ bool check_index_select_args( if (index.scalar_type() == ScalarType::Long) { const int64_t* const index_ptr = index.const_data_ptr(); for (size_t i = 0; i < index.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( index_ptr[i] >= 0 && index_ptr[i] < nonempty_size(in, dim), "index[%zu] = %" PRId64 " is out of range [0, %zd)", i, @@ -95,7 +95,7 @@ bool check_index_select_args( } else { const int32_t* const index_ptr = index.const_data_ptr(); for (size_t i = 0; i < index.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( index_ptr[i] >= 0 && index_ptr[i] < nonempty_size(in, dim), "index[%zu] = %" PRId32 " is out of range [0, %zd)", i, @@ -126,12 +126,12 @@ void get_index_select_out_target_size( bool check_nonzero_args(const Tensor& in, const Tensor& out) { (void)in; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out.scalar_type() == ScalarType::Long, "Expected out to be a Long tensor but received %" PRId8, static_cast(out.scalar_type())); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out.dim() == 2, "Expected out to be a 2d tensor received %zd", ssize_t(out.dim())); @@ -145,18 +145,18 @@ bool check_scatter_add_args( const Tensor& index, const Tensor& src, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(self, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(self, src)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(self, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(self, src)); + ET_LOG_MSG_AND_RETURN_UNLESS( index.scalar_type() == ScalarType::Long, "Expected dypte int64 for index"); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(self, dim)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(self, dim)); if (index.numel() == 0) { return true; } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( nonzero_dim(self) == nonzero_dim(src) && nonzero_dim(self) == nonzero_dim(index), "self, index and src should have same number of dimensions."); @@ -167,12 +167,12 @@ bool check_scatter_add_args( } for (size_t d = 0; d < nonzero_dim(self); ++d) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( nonempty_size(index, d) <= nonempty_size(src, d), "size of dimension %zd of index should be smaller than the size of that dimension of src", d); if (d != dim) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( nonempty_size(index, d) <= nonempty_size(self, d), "size of dimension %zd of index should be smaller than the size of that dimension of self if dimension %zd != dim %zd", d, @@ -182,7 +182,7 @@ bool check_scatter_add_args( } const long* index_data = index.const_data_ptr(); for (size_t i = 0; i < index.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( index_data[i] >= 0 && index_data[i] < nonempty_size(self, dim), "Index is out of bounds for dimension %zd with size %zd", (size_t)dim, @@ -222,13 +222,13 @@ bool check_select_scatter_args( * 3. dim and index values are valid given the input tensor */ - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, output)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, output)); // The dim planed to be selected on shall exist in input - ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, in.dim())); + ET_LOG_AND_RETURN_UNLESS(dim_is_valid(dim, in.dim())); // The index shall be valid in the given dimenson - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( index >= 0 && index < in.size(dim), "index %" PRId64 " out of range [-%zd,%zd) at in.size( %" PRId64 ")", index, @@ -239,7 +239,7 @@ bool check_select_scatter_args( // The src.dim() shall be one lower than in.dim() since src needs to fit // into the selected data on one dim of input // https://pytorch.org/docs/stable/generated/torch.select_scatter.html - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( in.dim() == src.dim() + 1, "in.dim() %zd != src.dim() + 1 %zd", in.dim(), @@ -251,9 +251,9 @@ bool check_select_scatter_args( for (ssize_t d = 0; d < in.dim() - 1; d++) { if (d < dim) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, d, src, d)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_size_at_dims(in, d, src, d)); } else { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(in, d + 1, src, d)); } } diff --git a/kernels/portable/cpu/util/kernel_ops_util.cpp b/kernels/portable/cpu/util/kernel_ops_util.cpp index 2e267b57715..e255038c692 100644 --- a/kernels/portable/cpu/util/kernel_ops_util.cpp +++ b/kernels/portable/cpu/util/kernel_ops_util.cpp @@ -26,21 +26,21 @@ bool param_array_is_valid( bool allow_empty) { auto size = array.size(); if (allow_empty) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( size == 0 || size == 1 || size == length, "Expected %s to have size 0, 1 or %zu but got %zd", name, length, size); } else { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( size == 1 || size == length, "Expected %s to have size 1 or %zu but got %zd", name, length, size); } - ET_LOG_AND_RETURN_IF_FALSE(int_array_all_ge(array, min_val)); + ET_LOG_AND_RETURN_UNLESS(int_array_all_ge(array, min_val)); return true; } @@ -115,7 +115,7 @@ bool output_padding_is_valid( IntArrayRef stride, IntArrayRef dilation, size_t kernel_ndim) { - ET_LOG_AND_RETURN_IF_FALSE(param_array_is_valid( + ET_LOG_AND_RETURN_UNLESS(param_array_is_valid( "output_padding", output_padding, /*min_val=*/0, @@ -126,7 +126,7 @@ bool output_padding_is_valid( const int64_t op_i = val_at(output_padding, i); const int64_t s_i = val_at(stride, i); const int64_t d_i = val_at(dilation, i); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( op_i < s_i || op_i < d_i, "output padding must be smaller than either stride or dilation"); } @@ -246,12 +246,12 @@ void calculate_kernel_output_sizes( } bool check_arange_args(double start, double end, double step, Tensor& out) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out.dim() == 1, "out should be a 1-d tensor, but got a %zu-d tensor", out.dim()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( (step > 0 && (end >= start)) || (step < 0 && (end <= start)), "upper bound and larger bound inconsistent with step sign"); @@ -267,25 +267,25 @@ bool check_avg_pool2d_args( const bool count_include_pad, const executorch::aten::optional& divisor_override, const Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(in)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(out)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(in)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( (in.dim() == 3 && in.size(0) > 0 && in.size(1) > 0 && in.size(2) > 0) || (in.dim() == 4 && in.size(1) > 0 && in.size(2) > 0 && in.size(3) > 0), "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input"); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( kernel_size_is_valid(kernel_size, /*kernel_ndim=*/2)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( stride_is_valid(kernel_size, /*kernel_ndim=*/2, /*allow_empty=*/true)); - ET_LOG_AND_RETURN_IF_FALSE(padding_is_valid( + ET_LOG_AND_RETURN_UNLESS(padding_is_valid( padding, kernel_size, /*kernel_ndim=*/2, /*enforce_half_kernel=*/true)); if (divisor_override.has_value()) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( divisor_override.value() != 0, "divisor_override must be non-zero, but found %" PRId64, divisor_override.value()); @@ -327,23 +327,23 @@ bool check_convolution_args( IntArrayRef output_padding, int64_t groups, const Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, weight, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(in)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(in)); + ET_LOG_AND_RETURN_UNLESS( tensor_is_default_or_channels_last_dim_order(weight)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(out)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( in.dim() == 3 || in.dim() == 4, "Expect input tensor to be 3-D or 4-D, but got, %zu.", static_cast(in.dim())); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(weight, in.dim())); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(out, in.dim())); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(weight, in.dim())); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(out, in.dim())); if (bias.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(bias.value(), 1)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(bias.value(), 1)); + ET_LOG_MSG_AND_RETURN_UNLESS( bias.value().size(0) == transposed ? groups * weight.size(1) : weight.size(0), "bias length must equal number of output channels, but got %zd", @@ -359,24 +359,24 @@ bool check_convolution_args( kernel_size[0] = weight.size(2); kernel_size[1] = weight.size(3); } - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( stride_is_valid(stride, kernel_ndim, /*allow_empty=*/false)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( padding_is_valid(padding, {kernel_size, kernel_ndim}, kernel_ndim)); - ET_LOG_AND_RETURN_IF_FALSE(dilation_is_valid(dilation, kernel_ndim)); + ET_LOG_AND_RETURN_UNLESS(dilation_is_valid(dilation, kernel_ndim)); if (transposed) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( output_padding_is_valid(output_padding, stride, dilation, kernel_ndim)); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( weight.size(0) >= groups, "Given groups=%" PRId64 ", expected weight to be at least %" PRId64 " at dimension 0, but got weight.size(0) = %zd instead", groups, groups, weight.size(0)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( weight.size(0) % groups == 0, "Given groups=%" PRId64 ", expected weight to be divisible by %" PRId64 " at dimension 0, but got weight.size(0) = %zd instead", @@ -385,7 +385,7 @@ bool check_convolution_args( weight.size(0)); if (!transposed) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( in.size(1) == groups * weight.size(1), "Given groups=%" PRId64 " and weight.size(1) = %zd, expected input to have %" PRId64 @@ -395,7 +395,7 @@ bool check_convolution_args( groups * weight.size(1), in.size(1)); } else { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( in.size(1) == weight.size(0), "input channels must match weight.size(0) in transposed convolution"); } @@ -453,10 +453,10 @@ bool check_cumsum_args( int64_t dim, optional dtype, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, in.dim())); + ET_LOG_AND_RETURN_UNLESS(dim_is_valid(dim, in.dim())); if (dtype.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(dtype.value() == out.scalar_type()); + ET_LOG_AND_RETURN_UNLESS(dtype.value() == out.scalar_type()); } return true; @@ -471,27 +471,27 @@ bool check_max_pool2d_with_indices_args( bool ceil_mode, Tensor& out, Tensor& indices) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_MSG_AND_RETURN_UNLESS( indices.scalar_type() == ScalarType::Long, "Expected indices to have type of Long, but found %s", toString(indices.scalar_type())); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(in)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(out)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(in)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( (in.dim() == 3 && in.size(0) > 0 && in.size(1) > 0 && in.size(2) > 0) || (in.dim() == 4 && in.size(1) > 0 && in.size(2) > 0 && in.size(3) > 0), "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input"); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( kernel_size_is_valid(kernel_size, /*kernel_ndim=*/2)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( stride_is_valid(kernel_size, /*kernel_ndim=*/2, /*allow_empty=*/true)); - ET_LOG_AND_RETURN_IF_FALSE(padding_is_valid( + ET_LOG_AND_RETURN_UNLESS(padding_is_valid( padding, kernel_size, /*kernel_ndim=*/2, /*enforce_half_kernel=*/true)); - ET_LOG_AND_RETURN_IF_FALSE(dilation_is_valid(kernel_size, /*kernel_ndim=*/2)); + ET_LOG_AND_RETURN_UNLESS(dilation_is_valid(kernel_size, /*kernel_ndim=*/2)); return true; } @@ -526,8 +526,8 @@ bool check_masked_fill_args( Tensor& out) { (void)value; - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(mask.scalar_type() == ScalarType::Bool); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(mask.scalar_type() == ScalarType::Bool); return true; } @@ -539,14 +539,14 @@ bool check_constant_pad_args( Tensor& out) { (void)value; - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_rank(in, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( pad.size() % 2 == 0, "Padding array must be a multiple of 2"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( pad.size() / 2 <= in.dim(), "Padding array contains too many elements"); return true; @@ -578,20 +578,20 @@ bool check_embedding_args( const Tensor& indices, const Tensor& out) { // Ensure weight is 2-D. It could be empty. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( weight.dim() == 2, "weight.dim() %zd != 2", weight.dim()); // Ensure out is k+1 dimension tensor where k is the indices.dim() // out's first k dimension shall be same as indices, and the last dim shall // equal weight's last dim - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out.dim() == indices.dim() + 1, "out.dim() %zd != indices.dim() %zd + 1", out.dim(), indices.dim()); // Ensure dtype is the same for out and weight - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(weight, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(weight, out)); return true; } @@ -618,7 +618,7 @@ bool check_alpha_type( const ScalarType common_type) { // Verify that alpha type is compatible with common type, // as used by ops such as add and sub. - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( canCast(alpha_type, common_type) || (common_type == ScalarType::Bool && isIntegralType(alpha_type, true))); diff --git a/kernels/portable/cpu/util/matmul_ops_util.cpp b/kernels/portable/cpu/util/matmul_ops_util.cpp index 2ff99724caa..ec8be6b0c07 100644 --- a/kernels/portable/cpu/util/matmul_ops_util.cpp +++ b/kernels/portable/cpu/util/matmul_ops_util.cpp @@ -23,27 +23,27 @@ bool check_addmm_args( const Scalar& beta, const Scalar& alpha, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(mat1, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(mat2, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(out, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(mat1, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(mat2, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(out, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, mat1, mat2)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, mat1, mat2)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(mat1, 1, mat2, 0)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_size_at_dims(mat1, 1, mat2, 0)); return true; } bool check_bmm_args(const Tensor& in, const Tensor& mat2, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 3)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(mat2, 3)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(out, 3)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(in, 3)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(mat2, 3)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(out, 3)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, mat2, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, mat2, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, 0, mat2, 0)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, 2, mat2, 1)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_size_at_dims(in, 0, mat2, 0)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_size_at_dims(in, 2, mat2, 1)); return true; } @@ -60,25 +60,25 @@ void get_bmm_out_target_size( } bool check_mm_args(const Tensor& in, const Tensor& mat2, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(mat2, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(out, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(in, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(mat2, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(out, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, mat2, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, mat2, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, 1, mat2, 0)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_size_at_dims(in, 1, mat2, 0)); return true; } bool check_linear_args(const Tensor& in, const Tensor& mat2, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(in.dim() == out.dim()); - ET_LOG_AND_RETURN_IF_FALSE(in.dim() >= 2); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(mat2, 2)); + ET_LOG_AND_RETURN_UNLESS(in.dim() == out.dim()); + ET_LOG_AND_RETURN_UNLESS(in.dim() >= 2); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(mat2, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, mat2, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, mat2, out)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(in, in.dim() - 1, mat2, 1)); return true; diff --git a/kernels/portable/cpu/util/normalization_ops_util.cpp b/kernels/portable/cpu/util/normalization_ops_util.cpp index 684417f448a..db11e94f93d 100644 --- a/kernels/portable/cpu/util/normalization_ops_util.cpp +++ b/kernels/portable/cpu/util/normalization_ops_util.cpp @@ -28,44 +28,42 @@ bool check_batch_norm_args( Tensor& var_out) { // All tensors must be the same dtype if (weight.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight.value())); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, weight.value())); } if (bias.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, bias.value())); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, bias.value())); } if (running_mean.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE( - tensors_have_same_dtype(in, running_mean.value())); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, running_mean.value())); } if (running_mean.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE( - tensors_have_same_dtype(in, running_var.value())); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, running_var.value())); } - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, mean_out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, var_out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, mean_out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, var_out)); size_t C_dim = in.dim() >= 1 ? 1 : 0; // All parameter tensors must be of dim 1 and have length equal to the // channels dim of in if (weight.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(weight.value(), 1)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(weight.value(), 1)); + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(weight.value(), 0, in, C_dim)); } if (bias.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(bias.value(), 1)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(bias.value(), 1)); + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(bias.value(), 0, in, C_dim)); } if (running_mean.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(running_mean.value(), 1)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(running_mean.value(), 1)); + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(running_mean.value(), 0, in, C_dim)); } if (running_var.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(running_var.value(), 1)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(running_var.value(), 1)); + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(running_var.value(), 0, in, C_dim)); } @@ -81,15 +79,15 @@ bool check_layer_norm_args( Tensor& mean_out, Tensor& rstd_out) { size_t ndim = normalized_shape.size(); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( ndim >= 1, "Expected normalized_shape to be at least 1-dimensional, i.e., containing at least one element."); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( in.dim() >= ndim, "Expected input tensor to have rank >= the length of normalized_shape."); size_t shift = in.dim() - ndim; for (size_t d = 0; d < ndim; ++d) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( in.size(d + shift) == normalized_shape[d], "Expected normalized_shape to match the sizes of input's rightmost dimensions."); } @@ -99,18 +97,18 @@ bool check_layer_norm_args( } if (weight.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight.value())); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, weight.value())); + ET_LOG_AND_RETURN_UNLESS( tensor_has_expected_size(weight.value(), {shape, ndim})); } if (bias.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, bias.value())); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, bias.value())); + ET_LOG_AND_RETURN_UNLESS( tensor_has_expected_size(bias.value(), {shape, ndim})); } - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, mean_out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, rstd_out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, mean_out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, rstd_out)); return true; } @@ -141,32 +139,32 @@ bool check_group_norm_args( Tensor& out, Tensor& mean_out, Tensor& rstd_out) { - ET_LOG_AND_RETURN_IF_FALSE(in.size(0) == N); - ET_LOG_AND_RETURN_IF_FALSE(in.size(1) == C); - ET_LOG_AND_RETURN_IF_FALSE(in.numel() == N * C * HxW); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(in.size(0) == N); + ET_LOG_AND_RETURN_UNLESS(in.size(1) == C); + ET_LOG_AND_RETURN_UNLESS(in.numel() == N * C * HxW); + ET_LOG_MSG_AND_RETURN_UNLESS( group > 0, "Expected number of groups to be greater than 0"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( C % group == 0, "Expected number of channels in input to be divisible by number of groups"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( !weight.has_value() || (weight.value().dim() == 1 && weight.value().size(0) == C), "Expected weight to be a vector of size equal to the number of channels in input"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( !bias.has_value() || (bias.value().dim() == 1 && bias.value().size(0) == C), "Expected bias to be a vector of size equal to the number of channels in input"); if (weight.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight.value())); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, weight.value())); } if (bias.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, bias.value())); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, bias.value())); } - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, mean_out)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, rstd_out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, mean_out)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, rstd_out)); return true; } diff --git a/kernels/portable/cpu/util/padding_util.cpp b/kernels/portable/cpu/util/padding_util.cpp index 251c7f1c44b..40e77d08e62 100644 --- a/kernels/portable/cpu/util/padding_util.cpp +++ b/kernels/portable/cpu/util/padding_util.cpp @@ -21,14 +21,14 @@ bool check_padding_args( executorch::aten::ArrayRef padding, Tensor& out, bool reflection) { - ET_LOG_AND_RETURN_IF_FALSE(padding.size() == 2 * n); - ET_LOG_AND_RETURN_IF_FALSE(in.dim() == n + 1 || in.dim() == n + 2); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(padding.size() == 2 * n); + ET_LOG_AND_RETURN_UNLESS(in.dim() == n + 1 || in.dim() == n + 2); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); for (size_t i = 1; i <= n; ++i) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( in.size(in.dim() - i) + padding[2 * i - 2] + padding[2 * i - 1] >= 0); if (reflection) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( padding[2 * i - 2] < in.size(in.dim() - i) && padding[2 * i - 1] < in.size(in.dim() - i)); } diff --git a/kernels/portable/cpu/util/reduce_util.cpp b/kernels/portable/cpu/util/reduce_util.cpp index 65140fc6643..42714c11122 100644 --- a/kernels/portable/cpu/util/reduce_util.cpp +++ b/kernels/portable/cpu/util/reduce_util.cpp @@ -42,16 +42,16 @@ ET_NODISCARD bool check_dim_list_is_valid( memset(dim_exist, false, sizeof(dim_exist)); for (const auto& d : reduce_dims) { if (in.dim() == 0) { - ET_LOG_AND_RETURN_IF_FALSE(d == 0 || d == -1); + ET_LOG_AND_RETURN_UNLESS(d == 0 || d == -1); } else { - ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(d, in.dim())); + ET_LOG_AND_RETURN_UNLESS(dim_is_valid(d, in.dim())); } const size_t non_neg_d = _normalize_non_neg_d(d, in.dim()); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( non_neg_d < kTensorDimensionLimit && non_neg_d >= 0); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( dim_exist[non_neg_d] == false, "dim %zd appears multiple times in the list of dims", non_neg_d); @@ -339,11 +339,11 @@ bool check_reduction_args( optional dtype, Tensor& out) { if (dtype.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(dtype.value() == out.scalar_type()); + ET_LOG_AND_RETURN_UNLESS(dtype.value() == out.scalar_type()); } - ET_LOG_AND_RETURN_IF_FALSE(check_dim_list_is_valid(in, dim_list)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(in)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(out)); + ET_LOG_AND_RETURN_UNLESS(check_dim_list_is_valid(in, dim_list)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(in)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(out)); return true; } @@ -360,24 +360,24 @@ bool check_reduction_args_single_dim( Tensor& out, bool allow_empty_dim) { if (dtype.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(dtype.value() == out.scalar_type()); + ET_LOG_AND_RETURN_UNLESS(dtype.value() == out.scalar_type()); } if (in.dim() == 0) { if (dim.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(dim.value() == 0 || dim.value() == -1); + ET_LOG_AND_RETURN_UNLESS(dim.value() == 0 || dim.value() == -1); } return true; } if (dim.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim.value(), in.dim())); + ET_LOG_AND_RETURN_UNLESS(dim_is_valid(dim.value(), in.dim())); if (!allow_empty_dim) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_non_empty_dim(in, dim.value())); + ET_LOG_AND_RETURN_UNLESS(tensor_has_non_empty_dim(in, dim.value())); } } - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(in)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(out)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(in)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_or_channels_last_dim_order(out)); return true; } @@ -388,16 +388,16 @@ bool check_mean_dim_args( bool keepdim, optional dtype, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( check_reduction_args(in, dim_list, keepdim, dtype, out)); if (dtype) { ET_LOG(Info, "dtype is %hhd", static_cast(dtype.value())); - ET_LOG_AND_RETURN_IF_FALSE(torch::executor::isFloatingType(dtype.value())); - ET_LOG_AND_RETURN_IF_FALSE(out.scalar_type() == dtype.value()); + ET_LOG_AND_RETURN_UNLESS(torch::executor::isFloatingType(dtype.value())); + ET_LOG_AND_RETURN_UNLESS(out.scalar_type() == dtype.value()); } else { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(in)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(out)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_floating_type(in)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_floating_type(out)); } return true; @@ -408,9 +408,9 @@ bool check_amin_amax_args( ArrayRef dim_list, bool keepdim, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( check_reduction_args(in, dim_list, keepdim, {}, out)); - ET_LOG_AND_RETURN_IF_FALSE(in.scalar_type() == out.scalar_type()); + ET_LOG_AND_RETURN_UNLESS(in.scalar_type() == out.scalar_type()); return true; } @@ -420,10 +420,10 @@ bool check_argmin_argmax_args( optional dim, bool keepdim, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( check_reduction_args_single_dim(in, dim, keepdim, {}, out)); - ET_LOG_AND_RETURN_IF_FALSE(out.scalar_type() == ScalarType::Long); + ET_LOG_AND_RETURN_UNLESS(out.scalar_type() == ScalarType::Long); return true; } @@ -434,13 +434,13 @@ bool check_min_max_args( bool keepdim, Tensor& max, Tensor& max_indices) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( check_reduction_args_single_dim(in, dim, keepdim, {}, max)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, max)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_shape(max, max_indices)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, max)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_shape(max, max_indices)); + ET_LOG_AND_RETURN_UNLESS( tensor_is_default_or_channels_last_dim_order(max_indices)); - ET_LOG_AND_RETURN_IF_FALSE(max_indices.scalar_type() == ScalarType::Long); + ET_LOG_AND_RETURN_UNLESS(max_indices.scalar_type() == ScalarType::Long); return true; } @@ -450,11 +450,11 @@ bool check_prod_out_args( optional dtype, Tensor& out) { if (dtype.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(dtype.value() == out.scalar_type()); + ET_LOG_AND_RETURN_UNLESS(dtype.value() == out.scalar_type()); } else if (isIntegralType(in.scalar_type(), /*includeBool*/ true)) { - ET_LOG_AND_RETURN_IF_FALSE(out.scalar_type() == ScalarType::Long); + ET_LOG_AND_RETURN_UNLESS(out.scalar_type() == ScalarType::Long); } else { - ET_LOG_AND_RETURN_IF_FALSE(out.scalar_type() == in.scalar_type()); + ET_LOG_AND_RETURN_UNLESS(out.scalar_type() == in.scalar_type()); } return true; diff --git a/kernels/portable/cpu/util/repeat_util.cpp b/kernels/portable/cpu/util/repeat_util.cpp index d373a86c16c..37147a68d69 100644 --- a/kernels/portable/cpu/util/repeat_util.cpp +++ b/kernels/portable/cpu/util/repeat_util.cpp @@ -25,7 +25,7 @@ bool check_repeat_args( executorch::aten::ArrayRef repeats, Tensor& out) { // Ensure the self tensors list is non-empty. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( repeats.size() >= self.dim(), "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor"); @@ -34,11 +34,11 @@ bool check_repeat_args( for (auto repeat : repeats) { all_non_negative = all_non_negative && (repeat >= 0); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( all_non_negative, "Trying to create tensor with negative dimension"); /// Check if out.size() is legal. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out.dim() == repeats.size(), "The dimension of out shall equal size of repeats, but now is %zd and %zd", out.dim(), @@ -47,12 +47,12 @@ bool check_repeat_args( // Right now we only support the tensors whose dimension is no greater than // kTensorDimensionLimit. Only check out tensor because the number of // dimension of out tensor shall have more than or equal to self tensor - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( out.dim() <= kTensorDimensionLimit, "The dimension of input and output should not be larger than %zd", kTensorDimensionLimit); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(out, self)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(out, self)); // We pad one to the beginning of self.size() to make its length equal // repeats, and called it reformat_self_size. We then make point-to-point mul @@ -66,7 +66,7 @@ bool check_repeat_args( reformat_self_size[out.dim() - 1 - i] = self.size(self.dim() - 1 - i); } for (size_t i = 0; i < repeats.size(); i++) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( reformat_self_size[i] * repeats[i] == out.size(i), "Expect out size at dimension %zu is %" PRId64 ", but now is %zd", i, diff --git a/kernels/portable/cpu/util/slice_util.cpp b/kernels/portable/cpu/util/slice_util.cpp index a948a370de2..67ed7c07589 100644 --- a/kernels/portable/cpu/util/slice_util.cpp +++ b/kernels/portable/cpu/util/slice_util.cpp @@ -21,16 +21,16 @@ bool check_narrow_copy_args( int64_t start, int64_t lenth, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(in.dim() > 0); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_LOG_MSG_AND_RETURN_IF_FALSE(lenth >= 0, "lenth must be non-negative"); - ET_LOG_AND_RETURN_IF_FALSE(start >= -in.size(dim)); - ET_LOG_AND_RETURN_IF_FALSE(start <= in.size(dim)); + ET_LOG_AND_RETURN_UNLESS(in.dim() > 0); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim)); + ET_LOG_MSG_AND_RETURN_UNLESS(lenth >= 0, "lenth must be non-negative"); + ET_LOG_AND_RETURN_UNLESS(start >= -in.size(dim)); + ET_LOG_AND_RETURN_UNLESS(start <= in.size(dim)); if (start < 0) { start += in.size(dim); } - ET_LOG_AND_RETURN_IF_FALSE(start + lenth <= in.size(dim)); + ET_LOG_AND_RETURN_UNLESS(start + lenth <= in.size(dim)); return true; } @@ -53,10 +53,10 @@ bool check_slice_copy_args( int64_t dim, int64_t step, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(in.dim() > 0); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(in.dim() > 0); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim)); + ET_LOG_MSG_AND_RETURN_UNLESS( step > 0, "slice step must be greater than zero"); return true; } @@ -77,19 +77,19 @@ bool check_slice_scatter_args( int64_t num_values, int64_t step, Tensor output) { - ET_LOG_AND_RETURN_IF_FALSE(input.dim() > 0); + ET_LOG_AND_RETURN_UNLESS(input.dim() > 0); // Check dim. The dim planed to be selected on shall exist in input - ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, input.dim())); + ET_LOG_AND_RETURN_UNLESS(dim_is_valid(dim, input.dim())); // Input and output tensors should be the same shape and dtype - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_shape_and_dtype(input, output)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_shape_and_dtype(input, output)); // The input.dim() shall equal to src.dim() - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(input, src)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_rank(input, src)); // Check step. Step must be greater than zero - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( step > 0, "slice step must be greater than zero"); // The size of src tensor should follow these rules: @@ -97,10 +97,10 @@ bool check_slice_scatter_args( // - src.size(dim) shall equal to num_values for (size_t d = 0; d < input.dim() - 1; d++) { if (d != dim) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(input, d, src, d)); } else { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( src.size(d) == num_values, "input.size(%zu) %zd != num_values %" PRId64 " | dim = %" PRId64 ")", d, diff --git a/kernels/portable/cpu/util/transpose_util.h b/kernels/portable/cpu/util/transpose_util.h index 453446fd842..053410c9a26 100644 --- a/kernels/portable/cpu/util/transpose_util.h +++ b/kernels/portable/cpu/util/transpose_util.h @@ -136,8 +136,8 @@ void transpose_tensors( } inline bool check_t_copy_args(const Tensor& in, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_smaller_or_equal_to(in, 2)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_rank_smaller_or_equal_to(in, 2)); return true; } @@ -146,9 +146,9 @@ inline bool check_transpose_copy_args( int64_t dim0, int64_t dim1, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim0)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim1)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim0)); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(in, dim1)); return true; } diff --git a/kernels/portable/cpu/util/upsample_util.cpp b/kernels/portable/cpu/util/upsample_util.cpp index ff3220f9883..931cf1cc61e 100644 --- a/kernels/portable/cpu/util/upsample_util.cpp +++ b/kernels/portable/cpu/util/upsample_util.cpp @@ -17,21 +17,20 @@ bool check_upsample_2d_common_args( const executorch::aten::OptionalArrayRef& output_size, const executorch::aten::OptionalArrayRef& scale_factors, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_AND_RETURN_IF_FALSE(in.dim() == 4); - ET_LOG_AND_RETURN_IF_FALSE(out.dim() == 4); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_dim_order(in)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_dim_order(out)); - ET_LOG_AND_RETURN_IF_FALSE( - output_size.has_value() ^ scale_factors.has_value()); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_UNLESS(in.dim() == 4); + ET_LOG_AND_RETURN_UNLESS(out.dim() == 4); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_dim_order(in)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_default_dim_order(out)); + ET_LOG_AND_RETURN_UNLESS(output_size.has_value() ^ scale_factors.has_value()); if (scale_factors.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(scale_factors.value().size() == 2); - ET_LOG_AND_RETURN_IF_FALSE(scale_factors.value()[0] > 0); - ET_LOG_AND_RETURN_IF_FALSE(scale_factors.value()[1] > 0); + ET_LOG_AND_RETURN_UNLESS(scale_factors.value().size() == 2); + ET_LOG_AND_RETURN_UNLESS(scale_factors.value()[0] > 0); + ET_LOG_AND_RETURN_UNLESS(scale_factors.value()[1] > 0); } else if (output_size.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(output_size.value().size() == 2); - ET_LOG_AND_RETURN_IF_FALSE(output_size.value()[0] > 0); - ET_LOG_AND_RETURN_IF_FALSE(output_size.value()[1] > 0); + ET_LOG_AND_RETURN_UNLESS(output_size.value().size() == 2); + ET_LOG_AND_RETURN_UNLESS(output_size.value()[0] > 0); + ET_LOG_AND_RETURN_UNLESS(output_size.value()[1] > 0); } return true; diff --git a/kernels/prim_ops/et_view.cpp b/kernels/prim_ops/et_view.cpp index 66aa9ac87e2..b9616150385 100644 --- a/kernels/prim_ops/et_view.cpp +++ b/kernels/prim_ops/et_view.cpp @@ -32,19 +32,19 @@ bool get_view_target_size( executorch::aten::ArrayRef size, int64_t dim, executorch::aten::SizesType* out_size) { - ET_LOG_AND_RETURN_IF_FALSE(size.size() == dim); + ET_LOG_AND_RETURN_UNLESS(size.size() == dim); int minus1_dim = -1; int n_zero = 0; int64_t numel_without_minus_1 = 1; for (int i = 0; i < dim; i++) { if (size[i] == -1) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( minus1_dim == -1, "At most one view dim can be -1."); minus1_dim = i; } else { // The size[i] must be non-negative now, but we check size[i] >= -1 // in case code is reordered in the future. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( size[i] >= -1, "Negative sizes are not allowed."); numel_without_minus_1 *= size[i]; @@ -56,7 +56,7 @@ bool get_view_target_size( } } if (minus1_dim >= 0) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( n_zero == 0, "Cannot infer dimension size if there is a zero dim."); out_size[minus1_dim] = self.numel() / numel_without_minus_1; } diff --git a/kernels/quantized/cpu/op_mixed_linear.cpp b/kernels/quantized/cpu/op_mixed_linear.cpp index d09d0bdd5e1..9961b64946a 100644 --- a/kernels/quantized/cpu/op_mixed_linear.cpp +++ b/kernels/quantized/cpu/op_mixed_linear.cpp @@ -22,40 +22,40 @@ bool check_quantized_mixed_linear_args( const executorch::aten::optional& opt_weight_zero_points, const executorch::aten::optional dtype, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(weight, 2)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(in, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(weight, 2)); + ET_LOG_AND_RETURN_UNLESS( tensor_is_rank(weight_scales, 1) || tensor_is_rank(weight_scales, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(out, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(out, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, 1, weight, 1)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_size_at_dims(in, 1, weight, 1)); + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(weight_scales, 0, weight, 0)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, 1, weight, 1)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_size_at_dims(in, 1, weight, 1)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight_scales)); + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, weight_scales)); if (dtype.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE(out.scalar_type() == dtype.value()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(out.scalar_type() == dtype.value()); + ET_LOG_MSG_AND_RETURN_UNLESS( dtype.value() == ScalarType::Float || dtype.value() == ScalarType::Half, "dtype must be Float or Half"); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( weight.scalar_type() == ScalarType::Char, "weight dtype must be int8"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( in.scalar_type() == ScalarType::Float || in.scalar_type() == ScalarType::Half, "input dtype must be Float or Half"); if (opt_weight_zero_points.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_shape(opt_weight_zero_points.value(), weight_scales)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_dtype(opt_weight_zero_points.value(), in)); } // Support for non-null zero points is not implemented yet. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( !opt_weight_zero_points.has_value(), "zero points not supported yet."); return true; } diff --git a/kernels/quantized/cpu/op_mixed_mm.cpp b/kernels/quantized/cpu/op_mixed_mm.cpp index 044e110bf5c..17728b75d8c 100644 --- a/kernels/quantized/cpu/op_mixed_mm.cpp +++ b/kernels/quantized/cpu/op_mixed_mm.cpp @@ -21,32 +21,32 @@ bool check_quantized_mixed_mm_args( const Tensor& weight_scales, const executorch::aten::optional& opt_weight_zero_points, Tensor& out) { - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(weight, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(weight_scales, 1)); - ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(out, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(in, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(weight, 2)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(weight_scales, 1)); + ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(out, 2)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, 1, weight, 0)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_size_at_dims(in, 1, weight, 0)); + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_size_at_dims(weight_scales, 0, weight, 0)); - ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight_scales, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, weight_scales, out)); + ET_LOG_MSG_AND_RETURN_UNLESS( weight.scalar_type() == ScalarType::Char, "weight dtype must be int8"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( in.scalar_type() == ScalarType::Float || in.scalar_type() == ScalarType::Half, "input dtype must be Float or Half"); if (opt_weight_zero_points.has_value()) { - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_shape(opt_weight_zero_points.value(), weight_scales)); - ET_LOG_AND_RETURN_IF_FALSE( + ET_LOG_AND_RETURN_UNLESS( tensors_have_same_dtype(opt_weight_zero_points.value(), in)); } // Support for non-null zero points is not implemented yet. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( !opt_weight_zero_points.has_value(), "zero points not supported yet."); return true; } diff --git a/runtime/core/exec_aten/util/tensor_util.h b/runtime/core/exec_aten/util/tensor_util.h index 5ae85b7c8b9..5753abda894 100644 --- a/runtime/core/exec_aten/util/tensor_util.h +++ b/runtime/core/exec_aten/util/tensor_util.h @@ -429,7 +429,7 @@ namespace runtime { * upper_bound - 1, inclusive. */ inline bool dim_is_valid(int64_t dim, int64_t upper_bound) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( dim >= -upper_bound && dim < upper_bound, "Dimension %" PRId64 " is out of range. Dimension should be between %" PRId64 " and %" PRId64 @@ -466,7 +466,7 @@ inline ssize_t nonempty_size( inline bool tensor_can_cast_to( executorch::aten::Tensor a, executorch::aten::ScalarType dtype) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( torch::executor::canCast(a.scalar_type(), dtype), "Tensor of dtype %s cannot cast to dtype %s", torch::executor::toString(a.scalar_type()), @@ -476,7 +476,7 @@ inline bool tensor_can_cast_to( } inline bool tensor_is_bool_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( t.scalar_type() == executorch::aten::ScalarType::Bool, "Expected to find bool type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -487,7 +487,7 @@ inline bool tensor_is_bool_type(executorch::aten::Tensor t) { inline bool tensor_is_type( executorch::aten::Tensor t, executorch::aten::ScalarType dtype) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( t.scalar_type() == dtype, "Expected to find %s type, but tensor has type %s", torch::executor::toString(dtype), @@ -499,7 +499,7 @@ inline bool tensor_is_type( inline bool tensor_is_integral_type( executorch::aten::Tensor t, bool includeBool = false) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( torch::executor::isIntegralType(t.scalar_type(), includeBool), "Expected to find a integral type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -508,7 +508,7 @@ inline bool tensor_is_integral_type( } inline bool tensor_is_floating_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( torch::executor::isFloatingType(t.scalar_type()), "Expected to find a floating type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -517,7 +517,7 @@ inline bool tensor_is_floating_type(executorch::aten::Tensor t) { } inline bool tensor_is_real_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( torch::executor::isRealType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -526,7 +526,7 @@ inline bool tensor_is_real_type(executorch::aten::Tensor t) { } inline bool tensor_is_realh_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( torch::executor::isRealHType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -535,7 +535,7 @@ inline bool tensor_is_realh_type(executorch::aten::Tensor t) { } inline bool tensor_is_realhbf16_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( executorch::runtime::isRealHBF16Type(t.scalar_type()), "Expected to find a real type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -544,7 +544,7 @@ inline bool tensor_is_realhbf16_type(executorch::aten::Tensor t) { } inline bool tensor_is_realhb_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( torch::executor::isRealHBType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -553,7 +553,7 @@ inline bool tensor_is_realhb_type(executorch::aten::Tensor t) { } inline bool tensor_is_realhbbf16_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( executorch::runtime::isRealHBBF16Type(t.scalar_type()), "Expected to find a real type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -562,7 +562,7 @@ inline bool tensor_is_realhbbf16_type(executorch::aten::Tensor t) { } inline bool tensor_is_complex_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( torch::executor::isComplexType(t.scalar_type()), "Expected to find a complex type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -571,7 +571,7 @@ inline bool tensor_is_complex_type(executorch::aten::Tensor t) { } inline bool tensor_is_bits_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( torch::executor::isBitsType(t.scalar_type()), "Expected to find a bits type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -582,7 +582,7 @@ inline bool tensor_is_bits_type(executorch::aten::Tensor t) { inline bool tensors_have_same_dtype( executorch::aten::Tensor a, executorch::aten::Tensor b) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( a.scalar_type() == b.scalar_type(), ET_TENSOR_CHECK_PREFIX__ ": dtype={%s, %s}", torch::executor::toString(a.scalar_type()), @@ -594,7 +594,7 @@ inline bool tensors_have_same_dtype( executorch::aten::Tensor a, executorch::aten::Tensor b, executorch::aten::Tensor c) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( a.scalar_type() == b.scalar_type() && b.scalar_type() == c.scalar_type(), ET_TENSOR_CHECK_PREFIX__ ": dtype={%s, %s, %s}", torch::executor::toString(a.scalar_type()), @@ -604,7 +604,7 @@ inline bool tensors_have_same_dtype( } inline bool tensor_is_rank(executorch::aten::Tensor t, size_t rank) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( t.dim() == rank, "Expected tensor.dim() to be %zu, but got %zu", static_cast(rank), @@ -616,7 +616,7 @@ inline bool tensor_is_rank(executorch::aten::Tensor t, size_t rank) { inline bool tensor_has_rank_greater_or_equal_to( executorch::aten::Tensor t, size_t rank) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( t.dim() >= rank, "Expected tensor.dim() to be >= %zu, but got %zu", static_cast(rank), @@ -628,7 +628,7 @@ inline bool tensor_has_rank_greater_or_equal_to( inline bool tensor_has_rank_smaller_or_equal_to( executorch::aten::Tensor t, size_t rank) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( t.dim() <= rank, "Expected tensor.dim() to be <= %zu, but got %zu", static_cast(rank), @@ -639,12 +639,12 @@ inline bool tensor_has_rank_smaller_or_equal_to( inline bool tensor_has_dim(executorch::aten::Tensor t, int64_t d) { if (t.dim() == 0) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( d == 0 || d == -1, "dim must be 0 or -1 for 0-dim tensor, got %" PRId64, d); } else { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( d > 0 ? d < t.dim() : t.dim() + d >= 0, "%zu-dim tensor does not have dim at index %zu", static_cast(t.dim()), @@ -655,8 +655,8 @@ inline bool tensor_has_dim(executorch::aten::Tensor t, int64_t d) { inline bool tensor_has_non_empty_dim(executorch::aten::Tensor t, int64_t d) { const size_t udim = ET_NORMALIZE_IX(d, t.dim()); - ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(t, d)); - ET_LOG_AND_RETURN_IF_FALSE(t.size(udim) != 0); + ET_LOG_AND_RETURN_UNLESS(tensor_has_dim(t, d)); + ET_LOG_AND_RETURN_UNLESS(t.size(udim) != 0); return true; } @@ -670,7 +670,7 @@ tensor_dim_has_index(executorch::aten::Tensor t, int64_t d, int64_t ix) { // Dimension must have been already checked by tensor_has_dim ET_CHECK(d >= 0 && d < t.dim()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( ix >= -t.size(d) && ix < t.size(d), "index %" PRId64 " out of range [-%zu,%zu) at dimension %" PRId64 ")", ix, @@ -685,17 +685,17 @@ inline bool tensors_have_same_size_at_dims( size_t dim_a, executorch::aten::Tensor b, size_t dim_b) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( dim_a < a.dim(), "Cannot retrieve dim %zu from tensor with dim %zu", static_cast(dim_a), static_cast(a.dim())); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( dim_b < b.dim(), "Cannot retrieve dim %zu from tensor with dim %zu", static_cast(dim_b), static_cast(b.dim())); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( a.size(dim_a) == b.size(dim_b), ET_TENSOR_CHECK_PREFIX__ ": a.size(%zu) = %zu does not match b.size(%zu) = %zu", @@ -870,13 +870,13 @@ inline bool tensor_is_contiguous(executorch::aten::Tensor t) { if (strides.size() == 0) { return true; } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( strides[strides.size() - 1] == 1, "Tensor is not contiguous; the stride of the last dimension must be 1, " "but got %zu", static_cast(strides[strides.size() - 1])); for (int i = strides.size() - 1; i > 0; --i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( strides[i - 1] == strides[i] * sizes[i], "Tensor is not contiguous; the stride of dim %zu should be equal to " "strides[%zu] * sizes[%zu] = %zu, but found %zu", @@ -892,7 +892,7 @@ inline bool tensor_is_contiguous(executorch::aten::Tensor t) { inline bool tensors_have_same_rank( executorch::aten::Tensor a, executorch::aten::Tensor b) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( a.dim() == b.dim(), ET_TENSOR_CHECK_PREFIX__ ": rank={%zd, %zd}", ssize_t(a.dim()), diff --git a/runtime/core/exec_aten/util/tensor_util_aten.cpp b/runtime/core/exec_aten/util/tensor_util_aten.cpp index d768f66d05f..15226ee37e3 100644 --- a/runtime/core/exec_aten/util/tensor_util_aten.cpp +++ b/runtime/core/exec_aten/util/tensor_util_aten.cpp @@ -35,7 +35,7 @@ Error get_dim_order( bool tensor_has_valid_dim_order(at::Tensor t) { executorch::aten::DimOrderType dim_order[kTensorDimensionLimit]; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( get_dim_order(t, dim_order, t.dim()) == Error::Ok, "Failed to retrieve dim order from tensor!"); @@ -55,7 +55,7 @@ bool tensor_has_valid_dim_order(at::Tensor t) { inline bool tensor_is_default_or_channels_last_dim_order(at::Tensor t) { executorch::aten::DimOrderType dim_order[kTensorDimensionLimit]; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( get_dim_order(t, dim_order, t.dim()) == Error::Ok, "Failed to retrieve dim order from tensor!"); @@ -86,7 +86,7 @@ bool tensors_have_same_dim_order( executorch::aten::DimOrderType first_dim_order[kTensorDimensionLimit]; executorch::aten::DimOrderType other_dim_order[kTensorDimensionLimit]; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( get_dim_order(tensor_list[0], first_dim_order, tensor_list[0].dim()) == Error::Ok, "Failed to retrieve dim order from 1st input tensor!"); @@ -97,7 +97,7 @@ bool tensors_have_same_dim_order( is_channels_last_dim_order(first_dim_order, tensor_list[0].dim()); for (size_t i = 1; i < tensor_list.size(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( get_dim_order(tensor_list[i], other_dim_order, tensor_list[i].dim()) == Error::Ok, "Failed to retrieve dim order from %zd-th input tensor!", @@ -109,7 +109,7 @@ bool tensors_have_same_dim_order( is_channels_last_dim_order(other_dim_order, tensor_list[i].dim()); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( all_contiguous || all_channels_last, "%zd input tensors have different dim orders", tensor_list.size()); diff --git a/runtime/core/exec_aten/util/tensor_util_portable.cpp b/runtime/core/exec_aten/util/tensor_util_portable.cpp index 3350445db73..8ec546313ea 100644 --- a/runtime/core/exec_aten/util/tensor_util_portable.cpp +++ b/runtime/core/exec_aten/util/tensor_util_portable.cpp @@ -125,7 +125,7 @@ bool tensors_have_same_dim_order( tensor_list[i].dim_order().size()); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_LOG_MSG_AND_RETURN_UNLESS( all_contiguous || all_channels_last, "%zd input tensors have different dim orders", tensor_list.size());