diff --git a/kernels/portable/cpu/util/dtype_util.h b/kernels/portable/cpu/util/dtype_util.h index 167d30611be..2286ca50bee 100644 --- a/kernels/portable/cpu/util/dtype_util.h +++ b/kernels/portable/cpu/util/dtype_util.h @@ -228,7 +228,7 @@ enum class SupportedTensorDtypes { namespace internal { template -load_to_compute_fn get_load_to_compute_fn_impl( +load_to_compute_fn get_load_to_compute_fn( const Tensor& t, SupportedTensorDtypes dtypes) { switch (dtypes) { @@ -252,7 +252,7 @@ load_to_compute_fn get_load_to_compute_fn_impl( } template -store_compute_to_tensor_fn get_store_compute_to_tensor_fn_impl( +store_compute_to_tensor_fn get_store_compute_to_tensor_fn( const Tensor& t, SupportedTensorDtypes dtypes) { switch (dtypes) { @@ -285,41 +285,6 @@ store_compute_to_tensor_fn get_store_compute_to_tensor_fn_impl( return nullptr; } -#ifndef EXECUTORCH_SELECTIVE_BUILD_DTYPE -constexpr const char kGenericElementwiseOpName[] = "generic_elementwise_op"; -#endif // EXECUTORCH_SELECTIVE_BUILD_DTYPE - -template -load_to_compute_fn get_load_to_compute_fn( - const Tensor& t, - SupportedTensorDtypes dtypes) { - // NOTE: Selective build relies on the operator name being passed - // here. When it's *not* active, using the same operator name - // everywhere saves on size because we don't require a new template - // instantiation for every operator. - return get_load_to_compute_fn_impl< - CTYPE_COMPUTE, -#ifdef EXECUTORCH_SELECTIVE_BUILD_DTYPE - op_name -#else // EXECUTORCH_SELECTIVE_BUILD_DTYPE - kGenericElementwiseOpName -#endif // EXECUTORCH_SELECTIVE_BUILD_DTYPE - >(t, dtypes); -} - -template -store_compute_to_tensor_fn get_store_compute_to_tensor_fn( - const Tensor& t, - SupportedTensorDtypes dtypes) { - return get_store_compute_to_tensor_fn_impl< - CTYPE_COMPUTE, -#ifdef EXECUTORCH_SELECTIVE_BUILD_DTYPE - op_name -#else // EXECUTORCH_SELECTIVE_BUILD_DTYPE - kGenericElementwiseOpName -#endif // EXECUTORCH_SELECTIVE_BUILD_DTYPE - >(t, dtypes); -} bool check_tensor_dtype( const Tensor t, SupportedTensorDtypes dtypes,