-
Notifications
You must be signed in to change notification settings - Fork 12.7k
SYCL: Initial set_rows kernel implementation #14562
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
6 commits
Select commit
Hold shift + click to select a range
857067a
SYCL: Initial set_rows kernel implementation
qnixsynapse f8ff536
Revert max_threads to 256
qnixsynapse 7036546
Refactor set_rows and address review comments
qnixsynapse 74a5fc8
Deduplicate conversion function
qnixsynapse bab2b3b
Remove guard before kernel launch and refactor
qnixsynapse 1ed8c7c
Fix and add back SFINAE
qnixsynapse File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
#include "set_rows.hpp" | ||
|
||
namespace utils { | ||
template<typename T> | ||
static constexpr bool is_arithmetic_v() { | ||
return std::is_arithmetic_v<T> || std::is_same_v<T, sycl::half> || std::is_same_v<T, sycl::ext::oneapi::bfloat16>; | ||
} | ||
} | ||
template<typename TIn, typename TOut> | ||
static inline std::enable_if_t<utils::is_arithmetic_v<TIn>() && utils::is_arithmetic_v<TOut>(), void> | ||
convert (const char* src, char* dst) { | ||
auto src_val = *reinterpret_cast<const TIn*>(src); | ||
auto dst_val = sycl::vec<TIn, 1>(src_val).template convert<TOut, sycl::rounding_mode::automatic>()[0]; | ||
*reinterpret_cast<TOut*>(dst) = dst_val;; | ||
} | ||
|
||
template<typename TIn, typename TOut> | ||
static void k_set_rows( | ||
const char * __restrict__ src0, const int64_t * __restrict__ src1, char * __restrict__ dst, | ||
const int64_t ne00, const int64_t ne01, const int64_t ne11, const int64_t ne12, | ||
const size_t nb01, const size_t nb02, const size_t nb03, | ||
const size_t nb10, const size_t nb11, const size_t nb12, | ||
const size_t nb1, const size_t nb2, const size_t nb3, | ||
const size_t src_type_size, const size_t dst_type_size, | ||
const sycl::nd_item<3> & item_ct1) { | ||
|
||
const int i03 = item_ct1.get_group(0); | ||
const int i02 = item_ct1.get_group(1); | ||
const int i01 = item_ct1.get_group(2) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1); // Row index | ||
|
||
if (i01 >= ne01) { | ||
return; | ||
} | ||
|
||
const int i12 = i03 % ne12; | ||
const int i11 = i02 % ne11; | ||
const int i10 = i01; | ||
|
||
const int64_t dst_row = *(const int64_t *)((const char *)src1 + calculate_offset<3>({nb10, nb11, nb12}, {i10, i11, i12})); | ||
|
||
const char * src0_row = src0 + calculate_offset<3>({nb01, nb02, nb03}, {i01, i02, i03}); | ||
char * dst_row_ptr = dst + dst_row*nb1 + i02*nb2 + i03*nb3; | ||
|
||
for (int col = item_ct1.get_local_id(0); col < ne00; col += item_ct1.get_local_range(0)) { | ||
const char * src_elem = src0_row + col * src_type_size; | ||
char * dst_elem = dst_row_ptr + col * dst_type_size; | ||
convert<TIn, TOut>(src_elem, dst_elem); | ||
} | ||
} | ||
|
||
template<typename TIn, typename TOut> | ||
static void set_rows_sycl( | ||
const char * src0_d, const int64_t * src1_d, char * dst_d, | ||
const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, | ||
const int64_t ne11, const int64_t ne12, const size_t nb01, const size_t nb02, const size_t nb03, | ||
const size_t nb10, const size_t nb11, const size_t nb12, | ||
const size_t nb1, const size_t nb2, const size_t nb3, | ||
const size_t src_type_size, const size_t dst_type_size, | ||
queue_ptr stream) { | ||
|
||
constexpr int max_threads_per_row = 64; // KEEPING 64 for now | ||
const int threads_per_row = std::min((int)ne00, max_threads_per_row); | ||
|
||
constexpr int max_threads_per_block = 64; | ||
const int rows_per_block = std::max(1, max_threads_per_block / threads_per_row); | ||
|
||
const sycl::range<3> block_size(1, rows_per_block, threads_per_row); | ||
const sycl::range<3> grid_size(ne03, ne02, (ne01 + rows_per_block - 1) / rows_per_block); | ||
|
||
sycl_parallel_for( | ||
stream, | ||
sycl::nd_range<3>(grid_size * block_size, block_size), | ||
[=](sycl::nd_item<3> item_ct1) { | ||
k_set_rows<TIn, TOut>( | ||
src0_d, src1_d, dst_d, | ||
ne00, ne01, ne11, ne12, | ||
nb01, nb02, nb03, | ||
nb10, nb11, nb12, | ||
nb1, nb2, nb3, | ||
src_type_size, dst_type_size, | ||
item_ct1 | ||
); | ||
} | ||
); | ||
} | ||
|
||
|
||
void ggml_sycl_op_set_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { | ||
scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2); | ||
const ggml_tensor * src0 = dst->src[0]; | ||
const ggml_tensor * src1 = dst->src[1]; | ||
|
||
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); | ||
GGML_ASSERT(dst->src[1]->type == GGML_TYPE_I64); | ||
|
||
GGML_TENSOR_BINARY_OP_LOCALS | ||
|
||
const int64_t * src1_dd = static_cast<const int64_t *>(src1->data); | ||
|
||
dpct::queue_ptr stream = ctx.stream(); | ||
switch (dst->type) { | ||
case GGML_TYPE_F32: | ||
set_rows_sycl<float, float>( | ||
(const char *)src0->data, src1_dd, (char *)dst->data, | ||
ne00, ne01, ne02, ne03, | ||
ne11, ne12, | ||
nb01, nb02, nb03, | ||
nb10, nb11, nb12, | ||
nb1, nb2, nb3, | ||
sizeof(float), sizeof(float), | ||
stream | ||
); | ||
break; | ||
case GGML_TYPE_F16: | ||
dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); | ||
set_rows_sycl<float, sycl::half>( | ||
(const char *)src0->data, src1_dd, (char *)dst->data, | ||
ne00, ne01, ne02, ne03, | ||
ne11, ne12, | ||
nb01, nb02, nb03, | ||
nb10, nb11, nb12, | ||
nb1, nb2, nb3, | ||
sizeof(float), sizeof(sycl::half), | ||
stream | ||
); | ||
break; | ||
default: | ||
GGML_ABORT("Unsupported tensor type!"); | ||
break; | ||
} | ||
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
#ifndef GGML_SYCL_SET_ROWS_HPP | ||
#define GGML_SYCL_SET_ROWS_HPP | ||
|
||
#include "common.hpp" | ||
|
||
void ggml_sycl_op_set_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst); | ||
|
||
#endif // GGML_SYCL_SET_ROWS_HPP |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As this, in essence is an element-wise operation, we can have a 1 Dimensional kernel here. and the helper function I had mentioned will take care ascertaining the position along these dimensions.
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks. I'll try testing with a 1D kernel today.
Edit: It seems possible but do we really need a 1D kernel for scatter row shuffle/reorder kernel?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So the argument is that all (activations, conversions, etc etc...) our element-wise like kernels can be in condensed into just one 1 function for the most part as follows, as element-wise functions are just one-to-one mappings, be it anything.:
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The PR was merged before I attempt at this. I will open a new PR with 1D kerenl. (Almost same as cpy kernel performance)