Skip to content

roll out s/IF_FALSE/UNLESS/ for ET_LOG macros #8318

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 20 additions & 20 deletions extension/llm/custom_ops/op_sdpa.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -594,46 +594,46 @@ bool validate_flash_attention_args(
const Tensor& key,
const Tensor& value,
const optional<Tensor>& attn_mask) {
ET_LOG_MSG_AND_RETURN_IF_FALSE(query.dim() == 4, "query must be a 4D tensor");
ET_LOG_MSG_AND_RETURN_IF_FALSE(key.dim() == 4, "key must be a 4D tensor");
ET_LOG_MSG_AND_RETURN_IF_FALSE(value.dim() == 4, "value must be a 4D tensor");
ET_LOG_MSG_AND_RETURN_UNLESS(query.dim() == 4, "query must be a 4D tensor");
ET_LOG_MSG_AND_RETURN_UNLESS(key.dim() == 4, "key must be a 4D tensor");
ET_LOG_MSG_AND_RETURN_UNLESS(value.dim() == 4, "value must be a 4D tensor");

// Sizes
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
(query.size(3) == value.size(3)) && (key.size(3) == value.size(3)),
"scaled_dot_product_attention_flash_attention: Q/K/V should have the same head size");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
(query.scalar_type() == ScalarType::Float), "Query must be Float type");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
(query.scalar_type() == key.scalar_type()) &&
(query.scalar_type() == value.scalar_type()),
"Key and Value must have the same data type as Query");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
!attn_mask.has_value() || attn_mask.value().dim() == 2,
"Attention mask must be a 2D tensor");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
!attn_mask.has_value() ||
attn_mask.value().scalar_type() == query.scalar_type(),
"Attention mask must be a 2D tensor");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
is_contiguous_dim_order(query.dim_order().data(), query.dim()),
"key cache must be in contiguous dim order");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
is_contiguous_dim_order(key.dim_order().data(), key.dim()),
"value cache must be in contiguous dim order");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
is_contiguous_dim_order(value.dim_order().data(), value.dim()),
"value cache must be in contiguous dim order");

if (attn_mask.has_value()) {
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
is_contiguous_dim_order(
attn_mask.value().dim_order().data(), attn_mask.value().dim()),
"value cache must be in contiguous dim order");
Expand All @@ -647,21 +647,21 @@ bool validate_cache_params(
const Tensor& v_cache,
int64_t start_pos,
int64_t seq_length) {
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
k_cache.dim() == 4, "kcache must be a 4D tensor");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
v_cache.dim() == 4, "v_cache must be a 4D tensor");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
start_pos < k_cache.size(1),
"start_pos must be less than key cache at dim 1");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
start_pos < v_cache.size(1),
"start_pos must be less than value cache at dim 1");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
(start_pos + seq_length) <= k_cache.size(1),
"start_post + seq_length must be less than max seq length supported by key cache."
"start pos: %" PRId64 ", seq_length: %" PRId64
Expand All @@ -671,7 +671,7 @@ bool validate_cache_params(
seq_length,
k_cache.size(1));

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
(start_pos + seq_length) <= v_cache.size(1),
"start_post + seq_length must be less than max seq length supported by key cache."
"start pos: %" PRId64 ", seq_length: %" PRId64
Expand All @@ -682,11 +682,11 @@ bool validate_cache_params(
v_cache.size(1));

// Make sure they are in contiguous dim order
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
is_contiguous_dim_order(k_cache.dim_order().data(), k_cache.dim()),
"key cache must be in contiguous dim order");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
is_contiguous_dim_order(v_cache.dim_order().data(), v_cache.dim()),
"value cache must be in contiguous dim order");

Expand Down
12 changes: 6 additions & 6 deletions extension/llm/custom_ops/op_tile_crop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@ bool check_tile_crop_out_args(
const Tensor& in,
int64_t tile_size,
Tensor& out) {
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out));
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 3));
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(out, 4));
ET_LOG_AND_RETURN_IF_FALSE(tile_size > 0);
ET_LOG_AND_RETURN_IF_FALSE(in.size(in.dim() - 1) % tile_size == 0);
ET_LOG_AND_RETURN_IF_FALSE(in.size(in.dim() - 2) % tile_size == 0);
ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(in, out));
ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(in, 3));
ET_LOG_AND_RETURN_UNLESS(tensor_is_rank(out, 4));
ET_LOG_AND_RETURN_UNLESS(tile_size > 0);
ET_LOG_AND_RETURN_UNLESS(in.size(in.dim() - 1) % tile_size == 0);
ET_LOG_AND_RETURN_UNLESS(in.size(in.dim() - 2) % tile_size == 0);
return true;
}

Expand Down
12 changes: 6 additions & 6 deletions extension/llm/custom_ops/op_update_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,17 @@ bool validate_cache_params(
const Tensor& quantized_cache,
int64_t start_pos,
int64_t seq_length) {
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
quantized_cache.dim() == 4, "quantized cache must be a 4D tensor");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
quantized_value.dim() == 4, "quantized_value must be a 4D tensor");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
start_pos < quantized_cache.size(1),
"start_pos must be less than cache size at dim 1");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
(start_pos + seq_length) <= quantized_cache.size(1),
"start_post + seq_length must be less than max seq length supported by cache."
"start pos: %" PRId64 ", seq_length: %" PRId64
Expand All @@ -46,12 +46,12 @@ bool validate_cache_params(
quantized_cache.size(1));

// Make sure they are in contiguous dim order
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
is_contiguous_dim_order(
quantized_cache.dim_order().data(), quantized_cache.dim()),
"quantized cache must be in contiguous dim order");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
is_contiguous_dim_order(
quantized_value.dim_order().data(), quantized_value.dim()),
"quantized value must be in contiguous dim order");
Expand Down
6 changes: 3 additions & 3 deletions extension/parallel/thread_parallel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,9 @@ bool parallel_for(
const int64_t end,
const int64_t grain_size,
const std::function<void(int64_t, int64_t)>& f) {
ET_LOG_AND_RETURN_IF_FALSE(begin >= 0 && end >= 0);
ET_LOG_AND_RETURN_IF_FALSE(end >= begin);
ET_LOG_AND_RETURN_IF_FALSE(grain_size > 0);
ET_LOG_AND_RETURN_UNLESS(begin >= 0 && end >= 0);
ET_LOG_AND_RETURN_UNLESS(end >= begin);
ET_LOG_AND_RETURN_UNLESS(grain_size > 0);
int64_t num_tasks = 0, chunk_size = 0;
std::tie(num_tasks, chunk_size) =
calc_num_tasks_and_chunk_size(begin, end, grain_size);
Expand Down
8 changes: 4 additions & 4 deletions kernels/aten/cpu/op__empty_dim_order.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,13 @@ inline bool _check__empty_out_dim_order(
}

// dim order size shall equal to input dim
ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == out.dim());
ET_LOG_AND_RETURN_UNLESS(dim_order_ref.size() == out.dim());

ET_LOG_AND_RETURN_IF_FALSE(
ET_LOG_AND_RETURN_UNLESS(
is_channels_last_dim_order(dim_order_ref.data(), dim_order_ref.size()) ||
is_contiguous_dim_order(dim_order_ref.data(), dim_order_ref.size()));

ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim());
ET_LOG_AND_RETURN_UNLESS(kMaxNumOfDimensions >= out.dim());
executorch::aten::StridesType target_strides[kMaxNumOfDimensions];
dim_order_to_stride_nocheck(
out.sizes().data(),
Expand All @@ -59,7 +59,7 @@ inline bool _check__empty_out_dim_order(
target_strides);

for (size_t i = 0; i < dim_order_ref.size(); i++) {
ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]);
ET_LOG_AND_RETURN_UNLESS(target_strides[i] == out.strides()[i]);
}

return true;
Expand Down
16 changes: 8 additions & 8 deletions kernels/aten/cpu/op__to_dim_order_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,43 +47,43 @@ bool check__to_dim_order_copy_args(
executorch::aten::OptionalArrayRef<int64_t> dim_order,
Tensor& out) {
// Right now we only support blocking data transfer
ET_LOG_AND_RETURN_IF_FALSE(non_blocking == false);
ET_LOG_AND_RETURN_UNLESS(non_blocking == false);

// dim_order is set, the target dim_order will be either contiguous or
// channels_last memory format
if (dim_order.has_value()) {
executorch::aten::ArrayRef<int64_t> dim_order_ref = dim_order.value();

// dim order size shall equal to input dim
ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == input.dim());
ET_LOG_AND_RETURN_UNLESS(dim_order_ref.size() == input.dim());

ET_LOG_AND_RETURN_IF_FALSE(
ET_LOG_AND_RETURN_UNLESS(
is_channels_last_dim_order(
dim_order.value().data(), dim_order.value().size()) ||
is_contiguous_dim_order(
dim_order.value().data(), dim_order.value().size()));

// Out Aten tensor shall have same memory format stride as dim_order
const size_t kMaxNumOfDimensions = 16;
ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim());
ET_LOG_AND_RETURN_UNLESS(kMaxNumOfDimensions >= out.dim());
executorch::aten::StridesType target_strides[kMaxNumOfDimensions];
dim_order_to_stride_nocheck(
out.sizes().data(),
dim_order_ref.data(),
dim_order_ref.size(),
target_strides);
ET_LOG_AND_RETURN_IF_FALSE(out.dim() == dim_order_ref.size());
ET_LOG_AND_RETURN_UNLESS(out.dim() == dim_order_ref.size());
for (size_t i = 0; i < dim_order_ref.size(); i++) {
ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]);
ET_LOG_AND_RETURN_UNLESS(target_strides[i] == out.strides()[i]);
}

} else { // dim_order is not set, preserve the dim order of input

auto out_strides = out.strides();
auto input_strides = input.strides();
ET_LOG_AND_RETURN_IF_FALSE(input_strides.size() == out_strides.size());
ET_LOG_AND_RETURN_UNLESS(input_strides.size() == out_strides.size());
for (size_t i = 0; i < input_strides.size(); i++) {
ET_LOG_AND_RETURN_IF_FALSE(input_strides[i] == out_strides[i]);
ET_LOG_AND_RETURN_UNLESS(input_strides[i] == out_strides[i]);
}
}
return true;
Expand Down
16 changes: 8 additions & 8 deletions kernels/aten/cpu/util/copy_ops_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,43 +22,43 @@ bool check__to_dim_order_copy_args(
executorch::aten::OptionalArrayRef<int64_t> dim_order,
Tensor& out) {
// Right now we only support blocking data transfer
ET_LOG_AND_RETURN_IF_FALSE(non_blocking == false);
ET_LOG_AND_RETURN_UNLESS(non_blocking == false);

// dim_order is set, the target dim_order will be either contiguous or
// channels_last memory format
if (dim_order.has_value()) {
executorch::aten::ArrayRef<int64_t> dim_order_ref = dim_order.value();

// dim order size shall equal to input dim
ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == input.dim());
ET_LOG_AND_RETURN_UNLESS(dim_order_ref.size() == input.dim());

ET_LOG_AND_RETURN_IF_FALSE(
ET_LOG_AND_RETURN_UNLESS(
is_channels_last_dim_order(
dim_order.value().data(), dim_order.value().size()) ||
is_contiguous_dim_order(
dim_order.value().data(), dim_order.value().size()));

// Out Aten tensor shall have same memory format stride as dim_order
const size_t kMaxNumOfDimensions = 16;
ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim());
ET_LOG_AND_RETURN_UNLESS(kMaxNumOfDimensions >= out.dim());
executorch::aten::StridesType target_strides[kMaxNumOfDimensions];
dim_order_to_stride_nocheck(
out.sizes().data(),
dim_order_ref.data(),
dim_order_ref.size(),
target_strides);
ET_LOG_AND_RETURN_IF_FALSE(out.dim() == dim_order_ref.size());
ET_LOG_AND_RETURN_UNLESS(out.dim() == dim_order_ref.size());
for (size_t i = 0; i < dim_order_ref.size(); i++) {
ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]);
ET_LOG_AND_RETURN_UNLESS(target_strides[i] == out.strides()[i]);
}

} else { // dim_order is not set, preserve the dim order of input

auto out_strides = out.strides();
auto input_strides = input.strides();
ET_LOG_AND_RETURN_IF_FALSE(input_strides.size() == out_strides.size());
ET_LOG_AND_RETURN_UNLESS(input_strides.size() == out_strides.size());
for (size_t i = 0; i < input_strides.size(); i++) {
ET_LOG_AND_RETURN_IF_FALSE(input_strides[i] == out_strides[i]);
ET_LOG_AND_RETURN_UNLESS(input_strides[i] == out_strides[i]);
}
}
return true;
Expand Down
18 changes: 9 additions & 9 deletions kernels/optimized/cpu/op_bmm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,46 +31,46 @@ namespace {
// Verifies that the parameters are valid.
bool check_bmm_out_args(const Tensor& self, const Tensor& mat2, Tensor& out) {
// Ensure dimensions is 3 for all input and out
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
self.dim() == mat2.dim(),
"self.dim() %zd != mat2.dim() %zd",
self.dim(),
mat2.dim());
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
self.dim() == out.dim(),
"self.dim() %zd != out.dim() %zd",
self.dim(),
out.dim());
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
self.dim() == 3, "self.dim() %zd != 3", self.dim());
// Ensure batch larger than or equals to 0
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
self.size(0) >= 0, "self.size(0) %zd < 0", self.size(0));
// Ensure batches are the same
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
self.size(0) == mat2.size(0),
"self.size(0) %zd != mat2.size(0) %zd",
self.size(0),
mat2.size(0));
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
self.size(0) == out.size(0),
"self.size(0) %zd != out.size(0) %zd",
self.size(0),
out.size(0));
// Ensure the out size is compatible with input tensors
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
mat2.size(2) == out.size(2),
"mat2.size(2) %zd != out.size(2) %zd",
mat2.size(2),
out.size(2));
ET_LOG_MSG_AND_RETURN_IF_FALSE(
ET_LOG_MSG_AND_RETURN_UNLESS(
self.size(1) == out.size(1),
"self.size(1) %zd != out.size(1) %zd",
self.size(1),
out.size(1));

// Ensure that all tensors share a dtype
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(self, mat2, out));
ET_LOG_AND_RETURN_UNLESS(tensors_have_same_dtype(self, mat2, out));

return true;
}
Expand Down
8 changes: 4 additions & 4 deletions kernels/portable/cpu/op__empty_dim_order.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,20 +30,20 @@ bool _check__empty_out_dim_order(OptionalIntArrayRef dim_order, Tensor& out) {
// out tensor's dim order shall equal to input dim order
IntArrayRef dim_order_ref = dim_order.value();

ET_LOG_AND_RETURN_IF_FALSE(
ET_LOG_AND_RETURN_UNLESS(
is_channels_last_dim_order(
dim_order.value().data(), dim_order.value().size()) ||
is_contiguous_dim_order(
dim_order.value().data(), dim_order.value().size()));

// Out tensor shall have same dim order as dim_order
ET_LOG_AND_RETURN_IF_FALSE(out_dim_order.size() == dim_order_ref.size());
ET_LOG_AND_RETURN_UNLESS(out_dim_order.size() == dim_order_ref.size());
for (size_t i = 0; i < dim_order_ref.size(); i++) {
ET_LOG_AND_RETURN_IF_FALSE(out_dim_order[i] == dim_order_ref[i]);
ET_LOG_AND_RETURN_UNLESS(out_dim_order[i] == dim_order_ref[i]);
}
} else { // dim_order is not set, out tensor should be contiguous memory
// format
ET_LOG_AND_RETURN_IF_FALSE(
ET_LOG_AND_RETURN_UNLESS(
is_contiguous_dim_order(out_dim_order.data(), out_dim_order.size()));
}
return true;
Expand Down
Loading
Loading