@@ -115,7 +115,7 @@ fn u64_by_u64_div_rem(duo: u64, div: u64) -> (u64, u64) {
115
115
// microarchitecture can multiply and divide. We decide to be optimistic and assume `trifecta` is
116
116
// faster if the target pointer width is at least 64.
117
117
#[ cfg( all(
118
- not( all( feature = "asm" , target_arch = "x86_64" ) ) ,
118
+ not( all( not ( feature = "no- asm" ) , target_arch = "x86_64" ) ) ,
119
119
not( any( target_pointer_width = "16" , target_pointer_width = "32" ) )
120
120
) ) ]
121
121
impl_trifecta ! (
@@ -131,7 +131,7 @@ impl_trifecta!(
131
131
// If the pointer width less than 64, then the target architecture almost certainly does not have
132
132
// the fast 64 to 128 bit widening multiplication needed for `trifecta` to be faster.
133
133
#[ cfg( all(
134
- not( all( feature = "asm" , target_arch = "x86_64" ) ) ,
134
+ not( all( not ( feature = "no- asm" ) , target_arch = "x86_64" ) ) ,
135
135
any( target_pointer_width = "16" , target_pointer_width = "32" )
136
136
) ) ]
137
137
impl_delegate ! (
@@ -152,7 +152,7 @@ impl_delegate!(
152
152
///
153
153
/// If the quotient does not fit in a `u64`, a floating point exception occurs.
154
154
/// If `div == 0`, then a division by zero exception occurs.
155
- #[ cfg( all( feature = "asm" , target_arch = "x86_64" ) ) ]
155
+ #[ cfg( all( not ( feature = "no- asm" ) , target_arch = "x86_64" ) ) ]
156
156
#[ inline]
157
157
unsafe fn u128_by_u64_div_rem ( duo : u128 , div : u64 ) -> ( u64 , u64 ) {
158
158
let duo_lo = duo as u64 ;
@@ -174,7 +174,7 @@ unsafe fn u128_by_u64_div_rem(duo: u128, div: u64) -> (u64, u64) {
174
174
}
175
175
176
176
// use `asymmetric` instead of `trifecta` on x86_64
177
- #[ cfg( all( feature = "asm" , target_arch = "x86_64" ) ) ]
177
+ #[ cfg( all( not ( feature = "no- asm" ) , target_arch = "x86_64" ) ) ]
178
178
impl_asymmetric ! (
179
179
u128_div_rem,
180
180
zero_div_fn,
@@ -203,7 +203,7 @@ fn u32_by_u32_div_rem(duo: u32, div: u32) -> (u32, u32) {
203
203
// When not on x86 and the pointer width is not 64, use `delegate` since the division size is larger
204
204
// than register size.
205
205
#[ cfg( all(
206
- not( all( feature = "asm" , target_arch = "x86" ) ) ,
206
+ not( all( not ( feature = "no- asm" ) , target_arch = "x86" ) ) ,
207
207
not( target_pointer_width = "64" )
208
208
) ) ]
209
209
impl_delegate ! (
@@ -220,7 +220,7 @@ impl_delegate!(
220
220
221
221
// When not on x86 and the pointer width is 64, use `binary_long`.
222
222
#[ cfg( all(
223
- not( all( feature = "asm" , target_arch = "x86" ) ) ,
223
+ not( all( not ( feature = "no- asm" ) , target_arch = "x86" ) ) ,
224
224
target_pointer_width = "64"
225
225
) ) ]
226
226
impl_binary_long ! (
@@ -238,7 +238,7 @@ impl_binary_long!(
238
238
///
239
239
/// If the quotient does not fit in a `u32`, a floating point exception occurs.
240
240
/// If `div == 0`, then a division by zero exception occurs.
241
- #[ cfg( all( feature = "asm" , target_arch = "x86" ) ) ]
241
+ #[ cfg( all( not ( feature = "no- asm" ) , target_arch = "x86" ) ) ]
242
242
#[ inline]
243
243
unsafe fn u64_by_u32_div_rem ( duo : u64 , div : u32 ) -> ( u32 , u32 ) {
244
244
let duo_lo = duo as u32 ;
@@ -260,7 +260,7 @@ unsafe fn u64_by_u32_div_rem(duo: u64, div: u32) -> (u32, u32) {
260
260
}
261
261
262
262
// use `asymmetric` instead of `delegate` on x86
263
- #[ cfg( all( feature = "asm" , target_arch = "x86" ) ) ]
263
+ #[ cfg( all( not ( feature = "no- asm" ) , target_arch = "x86" ) ) ]
264
264
impl_asymmetric ! (
265
265
u64_div_rem,
266
266
zero_div_fn,
0 commit comments